summaryrefslogtreecommitdiffstats
path: root/src/cmd/compile
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/compile')
-rw-r--r--src/cmd/compile/README.md316
-rw-r--r--src/cmd/compile/abi-internal.md973
-rw-r--r--src/cmd/compile/default.pgobin0 -> 286041 bytes
-rw-r--r--src/cmd/compile/doc.go321
-rw-r--r--src/cmd/compile/internal/abi/abiutils.go683
-rw-r--r--src/cmd/compile/internal/abt/avlint32.go832
-rw-r--r--src/cmd/compile/internal/abt/avlint32_test.go700
-rw-r--r--src/cmd/compile/internal/amd64/galign.go27
-rw-r--r--src/cmd/compile/internal/amd64/ggen.go135
-rw-r--r--src/cmd/compile/internal/amd64/ssa.go1444
-rw-r--r--src/cmd/compile/internal/amd64/versions_test.go433
-rw-r--r--src/cmd/compile/internal/arm/galign.go25
-rw-r--r--src/cmd/compile/internal/arm/ggen.go60
-rw-r--r--src/cmd/compile/internal/arm/ssa.go981
-rw-r--r--src/cmd/compile/internal/arm64/galign.go27
-rw-r--r--src/cmd/compile/internal/arm64/ggen.go73
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go1371
-rw-r--r--src/cmd/compile/internal/base/base.go221
-rw-r--r--src/cmd/compile/internal/base/bootstrap_false.go11
-rw-r--r--src/cmd/compile/internal/base/bootstrap_true.go11
-rw-r--r--src/cmd/compile/internal/base/debug.go76
-rw-r--r--src/cmd/compile/internal/base/flag.go575
-rw-r--r--src/cmd/compile/internal/base/hashdebug.go417
-rw-r--r--src/cmd/compile/internal/base/hashdebug_test.go140
-rw-r--r--src/cmd/compile/internal/base/link.go53
-rw-r--r--src/cmd/compile/internal/base/mapfile_mmap.go45
-rw-r--r--src/cmd/compile/internal/base/mapfile_read.go21
-rw-r--r--src/cmd/compile/internal/base/print.go283
-rw-r--r--src/cmd/compile/internal/base/timings.go237
-rw-r--r--src/cmd/compile/internal/bitvec/bv.go201
-rw-r--r--src/cmd/compile/internal/compare/compare.go381
-rw-r--r--src/cmd/compile/internal/compare/compare_test.go101
-rw-r--r--src/cmd/compile/internal/coverage/cover.go200
-rw-r--r--src/cmd/compile/internal/devirtualize/devirtualize.go140
-rw-r--r--src/cmd/compile/internal/devirtualize/pgo.go820
-rw-r--r--src/cmd/compile/internal/devirtualize/pgo_test.go217
-rw-r--r--src/cmd/compile/internal/dwarfgen/dwarf.go594
-rw-r--r--src/cmd/compile/internal/dwarfgen/dwinl.go441
-rw-r--r--src/cmd/compile/internal/dwarfgen/marker.go94
-rw-r--r--src/cmd/compile/internal/dwarfgen/scope.go136
-rw-r--r--src/cmd/compile/internal/dwarfgen/scope_test.go527
-rw-r--r--src/cmd/compile/internal/escape/assign.go128
-rw-r--r--src/cmd/compile/internal/escape/call.go361
-rw-r--r--src/cmd/compile/internal/escape/escape.go509
-rw-r--r--src/cmd/compile/internal/escape/expr.go341
-rw-r--r--src/cmd/compile/internal/escape/graph.go376
-rw-r--r--src/cmd/compile/internal/escape/leaks.go126
-rw-r--r--src/cmd/compile/internal/escape/solve.go326
-rw-r--r--src/cmd/compile/internal/escape/stmt.go218
-rw-r--r--src/cmd/compile/internal/escape/utils.go222
-rw-r--r--src/cmd/compile/internal/gc/compile.go198
-rw-r--r--src/cmd/compile/internal/gc/export.go51
-rw-r--r--src/cmd/compile/internal/gc/main.go391
-rw-r--r--src/cmd/compile/internal/gc/obj.go284
-rw-r--r--src/cmd/compile/internal/gc/util.go117
-rw-r--r--src/cmd/compile/internal/importer/exportdata.go95
-rw-r--r--src/cmd/compile/internal/importer/gcimporter.go253
-rw-r--r--src/cmd/compile/internal/importer/gcimporter_test.go608
-rw-r--r--src/cmd/compile/internal/importer/iimport.go793
-rw-r--r--src/cmd/compile/internal/importer/support.go152
-rw-r--r--src/cmd/compile/internal/importer/testdata/a.go14
-rw-r--r--src/cmd/compile/internal/importer/testdata/b.go11
-rw-r--r--src/cmd/compile/internal/importer/testdata/exports.go91
-rw-r--r--src/cmd/compile/internal/importer/testdata/generics.go29
-rw-r--r--src/cmd/compile/internal/importer/testdata/issue15920.go11
-rw-r--r--src/cmd/compile/internal/importer/testdata/issue20046.go9
-rw-r--r--src/cmd/compile/internal/importer/testdata/issue25301.go17
-rw-r--r--src/cmd/compile/internal/importer/testdata/issue25596.go13
-rw-r--r--src/cmd/compile/internal/importer/testdata/p.go13
-rw-r--r--src/cmd/compile/internal/importer/testdata/versions/test.go28
-rw-r--r--src/cmd/compile/internal/importer/ureader.go535
-rw-r--r--src/cmd/compile/internal/inline/inl.go1217
-rw-r--r--src/cmd/compile/internal/inline/inlheur/actualexprpropbits_string.go58
-rw-r--r--src/cmd/compile/internal/inline/inlheur/analyze.go370
-rw-r--r--src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go413
-rw-r--r--src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go356
-rw-r--r--src/cmd/compile/internal/inline/inlheur/analyze_func_params.go355
-rw-r--r--src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go277
-rw-r--r--src/cmd/compile/internal/inline/inlheur/callsite.go149
-rw-r--r--src/cmd/compile/internal/inline/inlheur/cspropbits_string.go56
-rw-r--r--src/cmd/compile/internal/inline/inlheur/debugflags_test.go65
-rw-r--r--src/cmd/compile/internal/inline/inlheur/dumpscores_test.go109
-rw-r--r--src/cmd/compile/internal/inline/inlheur/eclassify.go247
-rw-r--r--src/cmd/compile/internal/inline/inlheur/funcprop_string.go44
-rw-r--r--src/cmd/compile/internal/inline/inlheur/funcpropbits_string.go58
-rw-r--r--src/cmd/compile/internal/inline/inlheur/funcprops_test.go530
-rw-r--r--src/cmd/compile/internal/inline/inlheur/function_properties.go98
-rw-r--r--src/cmd/compile/internal/inline/inlheur/names.go129
-rw-r--r--src/cmd/compile/internal/inline/inlheur/parampropbits_string.go70
-rw-r--r--src/cmd/compile/internal/inline/inlheur/pstate_string.go30
-rw-r--r--src/cmd/compile/internal/inline/inlheur/resultpropbits_string.go68
-rw-r--r--src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go413
-rw-r--r--src/cmd/compile/internal/inline/inlheur/scoreadjusttyp_string.go80
-rw-r--r--src/cmd/compile/internal/inline/inlheur/scoring.go751
-rw-r--r--src/cmd/compile/internal/inline/inlheur/serialize.go80
-rw-r--r--src/cmd/compile/internal/inline/inlheur/testdata/dumpscores.go45
-rw-r--r--src/cmd/compile/internal/inline/inlheur/testdata/props/README.txt77
-rw-r--r--src/cmd/compile/internal/inline/inlheur/testdata/props/acrosscall.go214
-rw-r--r--src/cmd/compile/internal/inline/inlheur/testdata/props/calls.go240
-rw-r--r--src/cmd/compile/internal/inline/inlheur/testdata/props/funcflags.go341
-rw-r--r--src/cmd/compile/internal/inline/inlheur/testdata/props/params.go367
-rw-r--r--src/cmd/compile/internal/inline/inlheur/testdata/props/returns.go370
-rw-r--r--src/cmd/compile/internal/inline/inlheur/testdata/props/returns2.go231
-rw-r--r--src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go217
-rw-r--r--src/cmd/compile/internal/inline/inlheur/trace_off.go18
-rw-r--r--src/cmd/compile/internal/inline/inlheur/trace_on.go40
-rw-r--r--src/cmd/compile/internal/inline/inlheur/tserial_test.go65
-rw-r--r--src/cmd/compile/internal/inline/interleaved/interleaved.go132
-rw-r--r--src/cmd/compile/internal/ir/abi.go78
-rw-r--r--src/cmd/compile/internal/ir/bitset.go37
-rw-r--r--src/cmd/compile/internal/ir/cfg.go26
-rw-r--r--src/cmd/compile/internal/ir/check_reassign_no.go9
-rw-r--r--src/cmd/compile/internal/ir/check_reassign_yes.go9
-rw-r--r--src/cmd/compile/internal/ir/class_string.go30
-rw-r--r--src/cmd/compile/internal/ir/const.go161
-rw-r--r--src/cmd/compile/internal/ir/copy.go43
-rw-r--r--src/cmd/compile/internal/ir/dump.go256
-rw-r--r--src/cmd/compile/internal/ir/expr.go1256
-rw-r--r--src/cmd/compile/internal/ir/fmt.go1208
-rw-r--r--src/cmd/compile/internal/ir/func.go598
-rw-r--r--src/cmd/compile/internal/ir/func_test.go82
-rw-r--r--src/cmd/compile/internal/ir/ir.go5
-rw-r--r--src/cmd/compile/internal/ir/mini.go86
-rw-r--r--src/cmd/compile/internal/ir/mknode.go366
-rw-r--r--src/cmd/compile/internal/ir/name.go399
-rw-r--r--src/cmd/compile/internal/ir/node.go586
-rw-r--r--src/cmd/compile/internal/ir/node_gen.go1809
-rw-r--r--src/cmd/compile/internal/ir/op_string.go174
-rw-r--r--src/cmd/compile/internal/ir/package.go42
-rw-r--r--src/cmd/compile/internal/ir/reassign_consistency_check.go46
-rw-r--r--src/cmd/compile/internal/ir/reassignment.go205
-rw-r--r--src/cmd/compile/internal/ir/scc.go125
-rw-r--r--src/cmd/compile/internal/ir/sizeof_test.go37
-rw-r--r--src/cmd/compile/internal/ir/stmt.go505
-rw-r--r--src/cmd/compile/internal/ir/symtab.go82
-rw-r--r--src/cmd/compile/internal/ir/type.go69
-rw-r--r--src/cmd/compile/internal/ir/val.go107
-rw-r--r--src/cmd/compile/internal/ir/visit.go209
-rw-r--r--src/cmd/compile/internal/liveness/arg.go339
-rw-r--r--src/cmd/compile/internal/liveness/bvset.go98
-rw-r--r--src/cmd/compile/internal/liveness/plive.go1548
-rw-r--r--src/cmd/compile/internal/logopt/log_opts.go540
-rw-r--r--src/cmd/compile/internal/logopt/logopt_test.go250
-rw-r--r--src/cmd/compile/internal/loong64/galign.go25
-rw-r--r--src/cmd/compile/internal/loong64/ggen.go60
-rw-r--r--src/cmd/compile/internal/loong64/ssa.go830
-rw-r--r--src/cmd/compile/internal/loopvar/loopvar.go612
-rw-r--r--src/cmd/compile/internal/loopvar/loopvar_test.go383
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/for_complicated_esc_address.go115
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/for_esc_address.go45
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/for_esc_closure.go51
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/for_esc_method.go51
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/for_esc_minimal_closure.go48
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/for_nested.go47
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/inlines/a/a.go20
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/inlines/b/b.go21
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/inlines/c/c.go14
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/inlines/main.go53
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/opt-121.go43
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/opt-122.go43
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/opt.go41
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/range_esc_address.go47
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/range_esc_closure.go53
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/range_esc_method.go53
-rw-r--r--src/cmd/compile/internal/loopvar/testdata/range_esc_minimal_closure.go50
-rw-r--r--src/cmd/compile/internal/mips/galign.go27
-rw-r--r--src/cmd/compile/internal/mips/ggen.go51
-rw-r--r--src/cmd/compile/internal/mips/ssa.go880
-rw-r--r--src/cmd/compile/internal/mips64/galign.go28
-rw-r--r--src/cmd/compile/internal/mips64/ggen.go55
-rw-r--r--src/cmd/compile/internal/mips64/ssa.go889
-rw-r--r--src/cmd/compile/internal/noder/codes.go91
-rw-r--r--src/cmd/compile/internal/noder/export.go30
-rw-r--r--src/cmd/compile/internal/noder/helpers.go140
-rw-r--r--src/cmd/compile/internal/noder/import.go374
-rw-r--r--src/cmd/compile/internal/noder/irgen.go238
-rw-r--r--src/cmd/compile/internal/noder/lex.go184
-rw-r--r--src/cmd/compile/internal/noder/lex_test.go122
-rw-r--r--src/cmd/compile/internal/noder/linker.go349
-rw-r--r--src/cmd/compile/internal/noder/noder.go449
-rw-r--r--src/cmd/compile/internal/noder/posmap.go74
-rw-r--r--src/cmd/compile/internal/noder/quirks.go79
-rw-r--r--src/cmd/compile/internal/noder/reader.go3941
-rw-r--r--src/cmd/compile/internal/noder/stencil.go16
-rw-r--r--src/cmd/compile/internal/noder/stmt.go24
-rw-r--r--src/cmd/compile/internal/noder/types.go53
-rw-r--r--src/cmd/compile/internal/noder/unified.go535
-rw-r--r--src/cmd/compile/internal/noder/writer.go3003
-rw-r--r--src/cmd/compile/internal/objw/objw.go102
-rw-r--r--src/cmd/compile/internal/objw/prog.go214
-rw-r--r--src/cmd/compile/internal/pgo/internal/graph/graph.go520
-rw-r--r--src/cmd/compile/internal/pgo/irgraph.go603
-rw-r--r--src/cmd/compile/internal/pkginit/init.go148
-rw-r--r--src/cmd/compile/internal/pkginit/initAsanGlobals.go236
-rw-r--r--src/cmd/compile/internal/ppc64/galign.go29
-rw-r--r--src/cmd/compile/internal/ppc64/ggen.go54
-rw-r--r--src/cmd/compile/internal/ppc64/opt.go12
-rw-r--r--src/cmd/compile/internal/ppc64/ssa.go2078
-rw-r--r--src/cmd/compile/internal/rangefunc/rangefunc_test.go1297
-rw-r--r--src/cmd/compile/internal/rangefunc/rewrite.go1334
-rw-r--r--src/cmd/compile/internal/reflectdata/alg.go667
-rw-r--r--src/cmd/compile/internal/reflectdata/alg_test.go147
-rw-r--r--src/cmd/compile/internal/reflectdata/helpers.go216
-rw-r--r--src/cmd/compile/internal/reflectdata/reflect.go1898
-rw-r--r--src/cmd/compile/internal/riscv64/galign.go26
-rw-r--r--src/cmd/compile/internal/riscv64/ggen.go59
-rw-r--r--src/cmd/compile/internal/riscv64/gsubr.go20
-rw-r--r--src/cmd/compile/internal/riscv64/ssa.go817
-rw-r--r--src/cmd/compile/internal/rttype/rttype.go283
-rw-r--r--src/cmd/compile/internal/s390x/galign.go23
-rw-r--r--src/cmd/compile/internal/s390x/ggen.go89
-rw-r--r--src/cmd/compile/internal/s390x/ssa.go959
-rw-r--r--src/cmd/compile/internal/ssa/README.md222
-rw-r--r--src/cmd/compile/internal/ssa/TODO24
-rw-r--r--src/cmd/compile/internal/ssa/_gen/386.rules941
-rw-r--r--src/cmd/compile/internal/ssa/_gen/386Ops.go590
-rw-r--r--src/cmd/compile/internal/ssa/_gen/386splitload.rules11
-rw-r--r--src/cmd/compile/internal/ssa/_gen/AMD64.rules1700
-rw-r--r--src/cmd/compile/internal/ssa/_gen/AMD64Ops.go1167
-rw-r--r--src/cmd/compile/internal/ssa/_gen/AMD64latelower.rules8
-rw-r--r--src/cmd/compile/internal/ssa/_gen/AMD64splitload.rules45
-rw-r--r--src/cmd/compile/internal/ssa/_gen/ARM.rules1475
-rw-r--r--src/cmd/compile/internal/ssa/_gen/ARM64.rules1999
-rw-r--r--src/cmd/compile/internal/ssa/_gen/ARM64Ops.go803
-rw-r--r--src/cmd/compile/internal/ssa/_gen/ARM64latelower.rules21
-rw-r--r--src/cmd/compile/internal/ssa/_gen/ARMOps.go600
-rw-r--r--src/cmd/compile/internal/ssa/_gen/LOONG64.rules664
-rw-r--r--src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go486
-rw-r--r--src/cmd/compile/internal/ssa/_gen/MIPS.rules716
-rw-r--r--src/cmd/compile/internal/ssa/_gen/MIPS64.rules817
-rw-r--r--src/cmd/compile/internal/ssa/_gen/MIPS64Ops.go501
-rw-r--r--src/cmd/compile/internal/ssa/_gen/MIPSOps.go447
-rw-r--r--src/cmd/compile/internal/ssa/_gen/PPC64.rules1018
-rw-r--r--src/cmd/compile/internal/ssa/_gen/PPC64Ops.go755
-rw-r--r--src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules55
-rw-r--r--src/cmd/compile/internal/ssa/_gen/README11
-rw-r--r--src/cmd/compile/internal/ssa/_gen/RISCV64.rules821
-rw-r--r--src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go492
-rw-r--r--src/cmd/compile/internal/ssa/_gen/RISCV64latelower.rules19
-rw-r--r--src/cmd/compile/internal/ssa/_gen/S390X.rules1368
-rw-r--r--src/cmd/compile/internal/ssa/_gen/S390XOps.go819
-rw-r--r--src/cmd/compile/internal/ssa/_gen/Wasm.rules397
-rw-r--r--src/cmd/compile/internal/ssa/_gen/WasmOps.go277
-rw-r--r--src/cmd/compile/internal/ssa/_gen/allocators.go229
-rwxr-xr-xsrc/cmd/compile/internal/ssa/_gen/cover.bash26
-rw-r--r--src/cmd/compile/internal/ssa/_gen/dec.rules201
-rw-r--r--src/cmd/compile/internal/ssa/_gen/dec64.rules401
-rw-r--r--src/cmd/compile/internal/ssa/_gen/dec64Ops.go18
-rw-r--r--src/cmd/compile/internal/ssa/_gen/decOps.go18
-rw-r--r--src/cmd/compile/internal/ssa/_gen/generic.rules2756
-rw-r--r--src/cmd/compile/internal/ssa/_gen/genericOps.go675
-rw-r--r--src/cmd/compile/internal/ssa/_gen/main.go571
-rw-r--r--src/cmd/compile/internal/ssa/_gen/rulegen.go1885
-rw-r--r--src/cmd/compile/internal/ssa/addressingmodes.go518
-rw-r--r--src/cmd/compile/internal/ssa/allocators.go311
-rw-r--r--src/cmd/compile/internal/ssa/bench_test.go50
-rw-r--r--src/cmd/compile/internal/ssa/biasedsparsemap.go111
-rw-r--r--src/cmd/compile/internal/ssa/block.go428
-rw-r--r--src/cmd/compile/internal/ssa/branchelim.go470
-rw-r--r--src/cmd/compile/internal/ssa/branchelim_test.go172
-rw-r--r--src/cmd/compile/internal/ssa/cache.go62
-rw-r--r--src/cmd/compile/internal/ssa/check.go630
-rw-r--r--src/cmd/compile/internal/ssa/checkbce.go35
-rw-r--r--src/cmd/compile/internal/ssa/compile.go613
-rw-r--r--src/cmd/compile/internal/ssa/config.go420
-rw-r--r--src/cmd/compile/internal/ssa/copyelim.go84
-rw-r--r--src/cmd/compile/internal/ssa/copyelim_test.go41
-rw-r--r--src/cmd/compile/internal/ssa/critical.go111
-rw-r--r--src/cmd/compile/internal/ssa/cse.go378
-rw-r--r--src/cmd/compile/internal/ssa/cse_test.go130
-rw-r--r--src/cmd/compile/internal/ssa/deadcode.go366
-rw-r--r--src/cmd/compile/internal/ssa/deadcode_test.go161
-rw-r--r--src/cmd/compile/internal/ssa/deadstore.go397
-rw-r--r--src/cmd/compile/internal/ssa/deadstore_test.go129
-rw-r--r--src/cmd/compile/internal/ssa/debug.go1886
-rw-r--r--src/cmd/compile/internal/ssa/debug_lines_test.go269
-rw-r--r--src/cmd/compile/internal/ssa/debug_test.go1016
-rw-r--r--src/cmd/compile/internal/ssa/decompose.go479
-rw-r--r--src/cmd/compile/internal/ssa/dom.go275
-rw-r--r--src/cmd/compile/internal/ssa/dom_test.go608
-rw-r--r--src/cmd/compile/internal/ssa/expand_calls.go1035
-rw-r--r--src/cmd/compile/internal/ssa/export_test.go120
-rw-r--r--src/cmd/compile/internal/ssa/flagalloc.go270
-rw-r--r--src/cmd/compile/internal/ssa/flags_amd64_test.s29
-rw-r--r--src/cmd/compile/internal/ssa/flags_arm64_test.s30
-rw-r--r--src/cmd/compile/internal/ssa/flags_test.go108
-rw-r--r--src/cmd/compile/internal/ssa/fmahash_test.go52
-rw-r--r--src/cmd/compile/internal/ssa/func.go842
-rw-r--r--src/cmd/compile/internal/ssa/func_test.go482
-rw-r--r--src/cmd/compile/internal/ssa/fuse.go333
-rw-r--r--src/cmd/compile/internal/ssa/fuse_branchredirect.go112
-rw-r--r--src/cmd/compile/internal/ssa/fuse_comparisons.go157
-rw-r--r--src/cmd/compile/internal/ssa/fuse_test.go305
-rw-r--r--src/cmd/compile/internal/ssa/generate.go9
-rw-r--r--src/cmd/compile/internal/ssa/html.go1319
-rw-r--r--src/cmd/compile/internal/ssa/id.go28
-rw-r--r--src/cmd/compile/internal/ssa/layout.go185
-rw-r--r--src/cmd/compile/internal/ssa/lca.go127
-rw-r--r--src/cmd/compile/internal/ssa/lca_test.go88
-rw-r--r--src/cmd/compile/internal/ssa/likelyadjust.go580
-rw-r--r--src/cmd/compile/internal/ssa/location.go109
-rw-r--r--src/cmd/compile/internal/ssa/loopbce.go437
-rw-r--r--src/cmd/compile/internal/ssa/loopreschedchecks.go512
-rw-r--r--src/cmd/compile/internal/ssa/looprotate.go113
-rw-r--r--src/cmd/compile/internal/ssa/lower.go52
-rw-r--r--src/cmd/compile/internal/ssa/magic.go426
-rw-r--r--src/cmd/compile/internal/ssa/magic_test.go410
-rw-r--r--src/cmd/compile/internal/ssa/memcombine.go806
-rw-r--r--src/cmd/compile/internal/ssa/nilcheck.go337
-rw-r--r--src/cmd/compile/internal/ssa/nilcheck_test.go438
-rw-r--r--src/cmd/compile/internal/ssa/numberlines.go262
-rw-r--r--src/cmd/compile/internal/ssa/op.go529
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go41139
-rw-r--r--src/cmd/compile/internal/ssa/opt.go10
-rw-r--r--src/cmd/compile/internal/ssa/passbm_test.go101
-rw-r--r--src/cmd/compile/internal/ssa/phielim.go75
-rw-r--r--src/cmd/compile/internal/ssa/phiopt.go325
-rw-r--r--src/cmd/compile/internal/ssa/poset.go1358
-rw-r--r--src/cmd/compile/internal/ssa/poset_test.go800
-rw-r--r--src/cmd/compile/internal/ssa/print.go192
-rw-r--r--src/cmd/compile/internal/ssa/prove.go1813
-rw-r--r--src/cmd/compile/internal/ssa/regalloc.go2947
-rw-r--r--src/cmd/compile/internal/ssa/regalloc_test.go229
-rw-r--r--src/cmd/compile/internal/ssa/rewrite.go2211
-rw-r--r--src/cmd/compile/internal/ssa/rewrite386.go11602
-rw-r--r--src/cmd/compile/internal/ssa/rewrite386splitload.go159
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go31785
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64latelower.go134
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64splitload.go850
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM.go21838
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go27265
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64latelower.go288
-rw-r--r--src/cmd/compile/internal/ssa/rewriteCond_test.go635
-rw-r--r--src/cmd/compile/internal/ssa/rewriteLOONG64.go8037
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS.go7660
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS64.go8604
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64.go16564
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64latelower.go705
-rw-r--r--src/cmd/compile/internal/ssa/rewriteRISCV64.go9124
-rw-r--r--src/cmd/compile/internal/ssa/rewriteRISCV64latelower.go246
-rw-r--r--src/cmd/compile/internal/ssa/rewriteS390X.go16638
-rw-r--r--src/cmd/compile/internal/ssa/rewriteWasm.go4877
-rw-r--r--src/cmd/compile/internal/ssa/rewrite_test.go220
-rw-r--r--src/cmd/compile/internal/ssa/rewritedec.go1094
-rw-r--r--src/cmd/compile/internal/ssa/rewritedec64.go2537
-rw-r--r--src/cmd/compile/internal/ssa/rewritegeneric.go33938
-rw-r--r--src/cmd/compile/internal/ssa/sccp.go585
-rw-r--r--src/cmd/compile/internal/ssa/sccp_test.go95
-rw-r--r--src/cmd/compile/internal/ssa/schedule.go575
-rw-r--r--src/cmd/compile/internal/ssa/schedule_test.go160
-rw-r--r--src/cmd/compile/internal/ssa/shift_test.go107
-rw-r--r--src/cmd/compile/internal/ssa/shortcircuit.go513
-rw-r--r--src/cmd/compile/internal/ssa/shortcircuit_test.go53
-rw-r--r--src/cmd/compile/internal/ssa/sizeof_test.go39
-rw-r--r--src/cmd/compile/internal/ssa/softfloat.go80
-rw-r--r--src/cmd/compile/internal/ssa/sparsemap.go89
-rw-r--r--src/cmd/compile/internal/ssa/sparsemappos.go79
-rw-r--r--src/cmd/compile/internal/ssa/sparseset.go79
-rw-r--r--src/cmd/compile/internal/ssa/sparsetree.go242
-rw-r--r--src/cmd/compile/internal/ssa/stackalloc.go454
-rw-r--r--src/cmd/compile/internal/ssa/stmtlines_test.go158
-rw-r--r--src/cmd/compile/internal/ssa/testdata/b53456.go19
-rw-r--r--src/cmd/compile/internal/ssa/testdata/convertline.go16
-rw-r--r--src/cmd/compile/internal/ssa/testdata/fma.go37
-rw-r--r--src/cmd/compile/internal/ssa/testdata/hist.dlv-dbg.nexts99
-rw-r--r--src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts94
-rw-r--r--src/cmd/compile/internal/ssa/testdata/hist.gdb-dbg.nexts123
-rw-r--r--src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts143
-rw-r--r--src/cmd/compile/internal/ssa/testdata/hist.go106
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22558.dlv-dbg.nexts11
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22558.gdb-dbg.nexts11
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22558.go51
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22600.dlv-dbg-race.nexts7
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22600.gdb-dbg-race.nexts7
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22600.go27
-rw-r--r--src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts12
-rw-r--r--src/cmd/compile/internal/ssa/testdata/infloop.gdb-opt.nexts4
-rw-r--r--src/cmd/compile/internal/ssa/testdata/infloop.go16
-rw-r--r--src/cmd/compile/internal/ssa/testdata/inline-dump.go17
-rw-r--r--src/cmd/compile/internal/ssa/testdata/pushback.go30
-rw-r--r--src/cmd/compile/internal/ssa/testdata/sayhi.go12
-rw-r--r--src/cmd/compile/internal/ssa/testdata/scopes.dlv-dbg.nexts56
-rw-r--r--src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts46
-rw-r--r--src/cmd/compile/internal/ssa/testdata/scopes.gdb-dbg.nexts64
-rw-r--r--src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts55
-rw-r--r--src/cmd/compile/internal/ssa/testdata/scopes.go107
-rw-r--r--src/cmd/compile/internal/ssa/tighten.go269
-rw-r--r--src/cmd/compile/internal/ssa/trim.go172
-rw-r--r--src/cmd/compile/internal/ssa/tuple.go71
-rw-r--r--src/cmd/compile/internal/ssa/value.go620
-rw-r--r--src/cmd/compile/internal/ssa/writebarrier.go804
-rw-r--r--src/cmd/compile/internal/ssa/writebarrier_test.go56
-rw-r--r--src/cmd/compile/internal/ssa/xposmap.go116
-rw-r--r--src/cmd/compile/internal/ssa/zcse.go79
-rw-r--r--src/cmd/compile/internal/ssa/zeroextension_test.go34
-rw-r--r--src/cmd/compile/internal/ssagen/abi.go440
-rw-r--r--src/cmd/compile/internal/ssagen/arch.go51
-rw-r--r--src/cmd/compile/internal/ssagen/nowb.go195
-rw-r--r--src/cmd/compile/internal/ssagen/pgen.go364
-rw-r--r--src/cmd/compile/internal/ssagen/phi.go557
-rw-r--r--src/cmd/compile/internal/ssagen/ssa.go8369
-rw-r--r--src/cmd/compile/internal/staticdata/data.go346
-rw-r--r--src/cmd/compile/internal/staticdata/embed.go174
-rw-r--r--src/cmd/compile/internal/staticinit/sched.go1210
-rw-r--r--src/cmd/compile/internal/syntax/branches.go339
-rw-r--r--src/cmd/compile/internal/syntax/dumper.go212
-rw-r--r--src/cmd/compile/internal/syntax/dumper_test.go21
-rw-r--r--src/cmd/compile/internal/syntax/error_test.go190
-rw-r--r--src/cmd/compile/internal/syntax/nodes.go487
-rw-r--r--src/cmd/compile/internal/syntax/nodes_test.go326
-rw-r--r--src/cmd/compile/internal/syntax/operator_string.go46
-rw-r--r--src/cmd/compile/internal/syntax/parser.go2849
-rw-r--r--src/cmd/compile/internal/syntax/parser_test.go395
-rw-r--r--src/cmd/compile/internal/syntax/pos.go211
-rw-r--r--src/cmd/compile/internal/syntax/positions.go364
-rw-r--r--src/cmd/compile/internal/syntax/printer.go1020
-rw-r--r--src/cmd/compile/internal/syntax/printer_test.go285
-rw-r--r--src/cmd/compile/internal/syntax/scanner.go881
-rw-r--r--src/cmd/compile/internal/syntax/scanner_test.go767
-rw-r--r--src/cmd/compile/internal/syntax/source.go218
-rw-r--r--src/cmd/compile/internal/syntax/syntax.go94
-rw-r--r--src/cmd/compile/internal/syntax/testdata/chans.go66
-rw-r--r--src/cmd/compile/internal/syntax/testdata/fallthrough.go55
-rw-r--r--src/cmd/compile/internal/syntax/testdata/interface.go74
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue20789.go9
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue23385.go17
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue23434.go31
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue31092.go16
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue43527.go23
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue43674.go13
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue46558.go14
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue47704.go17
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue48382.go16
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue49205.go27
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue49482.go31
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue52391.go17
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue56022.go10
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue60599.go11
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue63835.go9
-rw-r--r--src/cmd/compile/internal/syntax/testdata/linalg.go83
-rw-r--r--src/cmd/compile/internal/syntax/testdata/map.go112
-rw-r--r--src/cmd/compile/internal/syntax/testdata/map2.go146
-rw-r--r--src/cmd/compile/internal/syntax/testdata/sample.go33
-rw-r--r--src/cmd/compile/internal/syntax/testdata/slices.go68
-rw-r--r--src/cmd/compile/internal/syntax/testdata/smoketest.go73
-rw-r--r--src/cmd/compile/internal/syntax/testdata/tparams.go57
-rw-r--r--src/cmd/compile/internal/syntax/testdata/typeset.go91
-rw-r--r--src/cmd/compile/internal/syntax/testing.go69
-rw-r--r--src/cmd/compile/internal/syntax/testing_test.go48
-rw-r--r--src/cmd/compile/internal/syntax/token_string.go70
-rw-r--r--src/cmd/compile/internal/syntax/tokens.go159
-rw-r--r--src/cmd/compile/internal/syntax/type.go75
-rw-r--r--src/cmd/compile/internal/syntax/walk.go346
-rw-r--r--src/cmd/compile/internal/test/README4
-rw-r--r--src/cmd/compile/internal/test/abiutils_test.go398
-rw-r--r--src/cmd/compile/internal/test/abiutilsaux_test.go131
-rw-r--r--src/cmd/compile/internal/test/align_test.go96
-rw-r--r--src/cmd/compile/internal/test/bench_test.go124
-rw-r--r--src/cmd/compile/internal/test/clobberdead_test.go54
-rw-r--r--src/cmd/compile/internal/test/constFold_test.go18111
-rw-r--r--src/cmd/compile/internal/test/dep_test.go29
-rw-r--r--src/cmd/compile/internal/test/divconst_test.go325
-rw-r--r--src/cmd/compile/internal/test/fixedbugs_test.go86
-rw-r--r--src/cmd/compile/internal/test/float_test.go545
-rw-r--r--src/cmd/compile/internal/test/global_test.go106
-rw-r--r--src/cmd/compile/internal/test/iface_test.go138
-rw-r--r--src/cmd/compile/internal/test/inl_test.go411
-rw-r--r--src/cmd/compile/internal/test/inst_test.go60
-rw-r--r--src/cmd/compile/internal/test/intrinsics_test.go62
-rw-r--r--src/cmd/compile/internal/test/issue50182_test.go62
-rw-r--r--src/cmd/compile/internal/test/issue53888_test.go46
-rw-r--r--src/cmd/compile/internal/test/issue57434_test.go38
-rw-r--r--src/cmd/compile/internal/test/lang_test.go58
-rw-r--r--src/cmd/compile/internal/test/logic_test.go293
-rw-r--r--src/cmd/compile/internal/test/math_test.go171
-rw-r--r--src/cmd/compile/internal/test/memcombine_test.go199
-rw-r--r--src/cmd/compile/internal/test/mulconst_test.go242
-rw-r--r--src/cmd/compile/internal/test/pgo_devirtualize_test.go261
-rw-r--r--src/cmd/compile/internal/test/pgo_inl_test.go344
-rw-r--r--src/cmd/compile/internal/test/race.go64
-rw-r--r--src/cmd/compile/internal/test/reproduciblebuilds_test.go106
-rw-r--r--src/cmd/compile/internal/test/shift_test.go1152
-rw-r--r--src/cmd/compile/internal/test/ssa_test.go179
-rw-r--r--src/cmd/compile/internal/test/switch_test.go296
-rw-r--r--src/cmd/compile/internal/test/test.go5
-rw-r--r--src/cmd/compile/internal/test/testdata/addressed_test.go214
-rw-r--r--src/cmd/compile/internal/test/testdata/append_test.go61
-rw-r--r--src/cmd/compile/internal/test/testdata/arithBoundary_test.go694
-rw-r--r--src/cmd/compile/internal/test/testdata/arithConst_test.go9570
-rw-r--r--src/cmd/compile/internal/test/testdata/arith_test.go1564
-rw-r--r--src/cmd/compile/internal/test/testdata/array_test.go132
-rw-r--r--src/cmd/compile/internal/test/testdata/assert_test.go128
-rw-r--r--src/cmd/compile/internal/test/testdata/break_test.go250
-rw-r--r--src/cmd/compile/internal/test/testdata/chan_test.go63
-rw-r--r--src/cmd/compile/internal/test/testdata/closure_test.go32
-rw-r--r--src/cmd/compile/internal/test/testdata/cmpConst_test.go2209
-rw-r--r--src/cmd/compile/internal/test/testdata/cmp_test.go37
-rw-r--r--src/cmd/compile/internal/test/testdata/compound_test.go128
-rw-r--r--src/cmd/compile/internal/test/testdata/copy_test.go760
-rw-r--r--src/cmd/compile/internal/test/testdata/ctl_test.go148
-rw-r--r--src/cmd/compile/internal/test/testdata/deferNoReturn_test.go21
-rw-r--r--src/cmd/compile/internal/test/testdata/divbyzero_test.go48
-rw-r--r--src/cmd/compile/internal/test/testdata/dupLoad_test.go83
-rw-r--r--src/cmd/compile/internal/test/testdata/flowgraph_generator1.go315
-rw-r--r--src/cmd/compile/internal/test/testdata/fp_test.go1775
-rw-r--r--src/cmd/compile/internal/test/testdata/gen/arithBoundaryGen.go208
-rw-r--r--src/cmd/compile/internal/test/testdata/gen/arithConstGen.go345
-rw-r--r--src/cmd/compile/internal/test/testdata/gen/cmpConstGen.go246
-rw-r--r--src/cmd/compile/internal/test/testdata/gen/constFoldGen.go307
-rw-r--r--src/cmd/compile/internal/test/testdata/gen/copyGen.go121
-rw-r--r--src/cmd/compile/internal/test/testdata/gen/zeroGen.go143
-rw-r--r--src/cmd/compile/internal/test/testdata/loadstore_test.go205
-rw-r--r--src/cmd/compile/internal/test/testdata/map_test.go37
-rw-r--r--src/cmd/compile/internal/test/testdata/mysort/mysort.go40
-rw-r--r--src/cmd/compile/internal/test/testdata/namedReturn_test.go93
-rw-r--r--src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.go252
-rw-r--r--src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprofbin0 -> 1345 bytes
-rw-r--r--src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go73
-rw-r--r--src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult.pkg/mult.go72
-rw-r--r--src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.go90
-rw-r--r--src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.pprofbin0 -> 943 bytes
-rw-r--r--src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot_test.go51
-rw-r--r--src/cmd/compile/internal/test/testdata/phi_test.go99
-rw-r--r--src/cmd/compile/internal/test/testdata/ptrsort.go30
-rw-r--r--src/cmd/compile/internal/test/testdata/ptrsort.out3
-rw-r--r--src/cmd/compile/internal/test/testdata/regalloc_test.go50
-rw-r--r--src/cmd/compile/internal/test/testdata/reproducible/issue20272.go34
-rw-r--r--src/cmd/compile/internal/test/testdata/reproducible/issue27013.go15
-rw-r--r--src/cmd/compile/internal/test/testdata/reproducible/issue30202.go17
-rw-r--r--src/cmd/compile/internal/test/testdata/reproducible/issue38068.go70
-rw-r--r--src/cmd/compile/internal/test/testdata/short_test.go57
-rw-r--r--src/cmd/compile/internal/test/testdata/slice_test.go46
-rw-r--r--src/cmd/compile/internal/test/testdata/sqrtConst_test.go50
-rw-r--r--src/cmd/compile/internal/test/testdata/string_test.go207
-rw-r--r--src/cmd/compile/internal/test/testdata/unsafe_test.go145
-rw-r--r--src/cmd/compile/internal/test/testdata/zero_test.go711
-rw-r--r--src/cmd/compile/internal/test/truncconst_test.go63
-rw-r--r--src/cmd/compile/internal/test/zerorange_test.go184
-rw-r--r--src/cmd/compile/internal/typebits/typebits.go96
-rw-r--r--src/cmd/compile/internal/typecheck/_builtin/coverage.go13
-rw-r--r--src/cmd/compile/internal/typecheck/_builtin/runtime.go286
-rw-r--r--src/cmd/compile/internal/typecheck/bexport.go16
-rw-r--r--src/cmd/compile/internal/typecheck/builtin.go408
-rw-r--r--src/cmd/compile/internal/typecheck/builtin_test.go31
-rw-r--r--src/cmd/compile/internal/typecheck/const.go486
-rw-r--r--src/cmd/compile/internal/typecheck/dcl.go125
-rw-r--r--src/cmd/compile/internal/typecheck/export.go33
-rw-r--r--src/cmd/compile/internal/typecheck/expr.go933
-rw-r--r--src/cmd/compile/internal/typecheck/func.go834
-rw-r--r--src/cmd/compile/internal/typecheck/iexport.go260
-rw-r--r--src/cmd/compile/internal/typecheck/iimport.go53
-rw-r--r--src/cmd/compile/internal/typecheck/mkbuiltin.go254
-rw-r--r--src/cmd/compile/internal/typecheck/stmt.go727
-rw-r--r--src/cmd/compile/internal/typecheck/subr.go792
-rw-r--r--src/cmd/compile/internal/typecheck/syms.go134
-rw-r--r--src/cmd/compile/internal/typecheck/target.go12
-rw-r--r--src/cmd/compile/internal/typecheck/type.go5
-rw-r--r--src/cmd/compile/internal/typecheck/typecheck.go1317
-rw-r--r--src/cmd/compile/internal/typecheck/universe.go197
-rw-r--r--src/cmd/compile/internal/types/alg.go169
-rw-r--r--src/cmd/compile/internal/types/algkind_string.go48
-rw-r--r--src/cmd/compile/internal/types/fmt.go650
-rw-r--r--src/cmd/compile/internal/types/goversion.go88
-rw-r--r--src/cmd/compile/internal/types/identity.go157
-rw-r--r--src/cmd/compile/internal/types/kind_string.go60
-rw-r--r--src/cmd/compile/internal/types/pkg.go131
-rw-r--r--src/cmd/compile/internal/types/size.go638
-rw-r--r--src/cmd/compile/internal/types/sizeof_test.go48
-rw-r--r--src/cmd/compile/internal/types/sort.go19
-rw-r--r--src/cmd/compile/internal/types/sym.go138
-rw-r--r--src/cmd/compile/internal/types/sym_test.go59
-rw-r--r--src/cmd/compile/internal/types/type.go1983
-rw-r--r--src/cmd/compile/internal/types/type_test.go27
-rw-r--r--src/cmd/compile/internal/types/universe.go154
-rw-r--r--src/cmd/compile/internal/types/utils.go17
-rw-r--r--src/cmd/compile/internal/types2/alias.go88
-rw-r--r--src/cmd/compile/internal/types2/api.go471
-rw-r--r--src/cmd/compile/internal/types2/api_predicates.go84
-rw-r--r--src/cmd/compile/internal/types2/api_test.go2939
-rw-r--r--src/cmd/compile/internal/types2/array.go25
-rw-r--r--src/cmd/compile/internal/types2/assignments.go575
-rw-r--r--src/cmd/compile/internal/types2/basic.go82
-rw-r--r--src/cmd/compile/internal/types2/builtins.go1047
-rw-r--r--src/cmd/compile/internal/types2/builtins_test.go250
-rw-r--r--src/cmd/compile/internal/types2/call.go999
-rw-r--r--src/cmd/compile/internal/types2/chan.go35
-rw-r--r--src/cmd/compile/internal/types2/check.go704
-rw-r--r--src/cmd/compile/internal/types2/check_test.go461
-rw-r--r--src/cmd/compile/internal/types2/compilersupport.go30
-rw-r--r--src/cmd/compile/internal/types2/const.go306
-rw-r--r--src/cmd/compile/internal/types2/context.go144
-rw-r--r--src/cmd/compile/internal/types2/context_test.go69
-rw-r--r--src/cmd/compile/internal/types2/conversions.go311
-rw-r--r--src/cmd/compile/internal/types2/decl.go910
-rw-r--r--src/cmd/compile/internal/types2/errorcalls_test.go95
-rw-r--r--src/cmd/compile/internal/types2/errors.go332
-rw-r--r--src/cmd/compile/internal/types2/errors_test.go44
-rw-r--r--src/cmd/compile/internal/types2/example_test.go252
-rw-r--r--src/cmd/compile/internal/types2/expr.go1699
-rw-r--r--src/cmd/compile/internal/types2/gccgosizes.go41
-rw-r--r--src/cmd/compile/internal/types2/gcsizes.go170
-rw-r--r--src/cmd/compile/internal/types2/hilbert_test.go206
-rw-r--r--src/cmd/compile/internal/types2/importer_test.go35
-rw-r--r--src/cmd/compile/internal/types2/index.go464
-rw-r--r--src/cmd/compile/internal/types2/infer.go790
-rw-r--r--src/cmd/compile/internal/types2/initorder.go328
-rw-r--r--src/cmd/compile/internal/types2/instantiate.go366
-rw-r--r--src/cmd/compile/internal/types2/instantiate_test.go232
-rw-r--r--src/cmd/compile/internal/types2/interface.go186
-rw-r--r--src/cmd/compile/internal/types2/issues_test.go1095
-rw-r--r--src/cmd/compile/internal/types2/labels.go269
-rw-r--r--src/cmd/compile/internal/types2/lookup.go603
-rw-r--r--src/cmd/compile/internal/types2/lookup_test.go55
-rw-r--r--src/cmd/compile/internal/types2/main_test.go17
-rw-r--r--src/cmd/compile/internal/types2/map.go24
-rw-r--r--src/cmd/compile/internal/types2/mono.go339
-rw-r--r--src/cmd/compile/internal/types2/mono_test.go82
-rw-r--r--src/cmd/compile/internal/types2/named.go658
-rw-r--r--src/cmd/compile/internal/types2/named_test.go114
-rw-r--r--src/cmd/compile/internal/types2/object.go619
-rw-r--r--src/cmd/compile/internal/types2/object_test.go156
-rw-r--r--src/cmd/compile/internal/types2/objset.go31
-rw-r--r--src/cmd/compile/internal/types2/operand.go396
-rw-r--r--src/cmd/compile/internal/types2/package.go80
-rw-r--r--src/cmd/compile/internal/types2/pointer.go19
-rw-r--r--src/cmd/compile/internal/types2/predicates.go546
-rw-r--r--src/cmd/compile/internal/types2/resolver.go776
-rw-r--r--src/cmd/compile/internal/types2/resolver_test.go218
-rw-r--r--src/cmd/compile/internal/types2/return.go184
-rw-r--r--src/cmd/compile/internal/types2/scope.go292
-rw-r--r--src/cmd/compile/internal/types2/selection.go180
-rw-r--r--src/cmd/compile/internal/types2/self_test.go118
-rw-r--r--src/cmd/compile/internal/types2/signature.go332
-rw-r--r--src/cmd/compile/internal/types2/sizeof_test.go64
-rw-r--r--src/cmd/compile/internal/types2/sizes.go340
-rw-r--r--src/cmd/compile/internal/types2/sizes_test.go194
-rw-r--r--src/cmd/compile/internal/types2/slice.go19
-rw-r--r--src/cmd/compile/internal/types2/stdlib_test.go488
-rw-r--r--src/cmd/compile/internal/types2/stmt.go1059
-rw-r--r--src/cmd/compile/internal/types2/struct.go230
-rw-r--r--src/cmd/compile/internal/types2/subst.go428
-rw-r--r--src/cmd/compile/internal/types2/termlist.go161
-rw-r--r--src/cmd/compile/internal/types2/termlist_test.go284
-rw-r--r--src/cmd/compile/internal/types2/testdata/local/issue47996.go8
-rw-r--r--src/cmd/compile/internal/types2/testdata/manual.go8
-rw-r--r--src/cmd/compile/internal/types2/tuple.go34
-rw-r--r--src/cmd/compile/internal/types2/type.go11
-rw-r--r--src/cmd/compile/internal/types2/typelists.go69
-rw-r--r--src/cmd/compile/internal/types2/typeparam.go156
-rw-r--r--src/cmd/compile/internal/types2/typeset.go415
-rw-r--r--src/cmd/compile/internal/types2/typeset_test.go80
-rw-r--r--src/cmd/compile/internal/types2/typestring.go504
-rw-r--r--src/cmd/compile/internal/types2/typestring_test.go166
-rw-r--r--src/cmd/compile/internal/types2/typeterm.go165
-rw-r--r--src/cmd/compile/internal/types2/typeterm_test.go239
-rw-r--r--src/cmd/compile/internal/types2/typexpr.go551
-rw-r--r--src/cmd/compile/internal/types2/under.go114
-rw-r--r--src/cmd/compile/internal/types2/unify.go796
-rw-r--r--src/cmd/compile/internal/types2/union.go199
-rw-r--r--src/cmd/compile/internal/types2/universe.go288
-rw-r--r--src/cmd/compile/internal/types2/util.go22
-rw-r--r--src/cmd/compile/internal/types2/util_test.go17
-rw-r--r--src/cmd/compile/internal/types2/validtype.go256
-rw-r--r--src/cmd/compile/internal/types2/version.go126
-rw-r--r--src/cmd/compile/internal/walk/assign.go733
-rw-r--r--src/cmd/compile/internal/walk/builtin.go888
-rw-r--r--src/cmd/compile/internal/walk/closure.go230
-rw-r--r--src/cmd/compile/internal/walk/compare.go514
-rw-r--r--src/cmd/compile/internal/walk/complit.go684
-rw-r--r--src/cmd/compile/internal/walk/convert.go536
-rw-r--r--src/cmd/compile/internal/walk/expr.go1096
-rw-r--r--src/cmd/compile/internal/walk/order.go1550
-rw-r--r--src/cmd/compile/internal/walk/range.go576
-rw-r--r--src/cmd/compile/internal/walk/select.go302
-rw-r--r--src/cmd/compile/internal/walk/stmt.go229
-rw-r--r--src/cmd/compile/internal/walk/switch.go966
-rw-r--r--src/cmd/compile/internal/walk/temp.go40
-rw-r--r--src/cmd/compile/internal/walk/walk.go393
-rw-r--r--src/cmd/compile/internal/wasm/ssa.go623
-rw-r--r--src/cmd/compile/internal/x86/galign.go39
-rw-r--r--src/cmd/compile/internal/x86/ggen.go50
-rw-r--r--src/cmd/compile/internal/x86/ssa.go958
-rw-r--r--src/cmd/compile/main.go59
-rwxr-xr-xsrc/cmd/compile/profile.sh21
684 files changed, 496309 insertions, 0 deletions
diff --git a/src/cmd/compile/README.md b/src/cmd/compile/README.md
new file mode 100644
index 0000000..9b99a1b
--- /dev/null
+++ b/src/cmd/compile/README.md
@@ -0,0 +1,316 @@
+<!---
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+-->
+
+## Introduction to the Go compiler
+
+`cmd/compile` contains the main packages that form the Go compiler. The compiler
+may be logically split in four phases, which we will briefly describe alongside
+the list of packages that contain their code.
+
+You may sometimes hear the terms "front-end" and "back-end" when referring to
+the compiler. Roughly speaking, these translate to the first two and last two
+phases we are going to list here. A third term, "middle-end", often refers to
+much of the work that happens in the second phase.
+
+Note that the `go/*` family of packages, such as `go/parser` and
+`go/types`, are mostly unused by the compiler. Since the compiler was
+initially written in C, the `go/*` packages were developed to enable
+writing tools working with Go code, such as `gofmt` and `vet`.
+However, over time the compiler's internal APIs have slowly evolved to
+be more familiar to users of the `go/*` packages.
+
+It should be clarified that the name "gc" stands for "Go compiler", and has
+little to do with uppercase "GC", which stands for garbage collection.
+
+### 1. Parsing
+
+* `cmd/compile/internal/syntax` (lexer, parser, syntax tree)
+
+In the first phase of compilation, source code is tokenized (lexical analysis),
+parsed (syntax analysis), and a syntax tree is constructed for each source
+file.
+
+Each syntax tree is an exact representation of the respective source file, with
+nodes corresponding to the various elements of the source such as expressions,
+declarations, and statements. The syntax tree also includes position information
+which is used for error reporting and the creation of debugging information.
+
+### 2. Type checking
+
+* `cmd/compile/internal/types2` (type checking)
+
+The types2 package is a port of `go/types` to use the syntax package's
+AST instead of `go/ast`.
+
+### 3. IR construction ("noding")
+
+* `cmd/compile/internal/types` (compiler types)
+* `cmd/compile/internal/ir` (compiler AST)
+* `cmd/compile/internal/noder` (create compiler AST)
+
+The compiler middle end uses its own AST definition and representation of Go
+types carried over from when it was written in C. All of its code is written in
+terms of these, so the next step after type checking is to convert the syntax
+and types2 representations to ir and types. This process is referred to as
+"noding."
+
+Noding using a process called Unified IR, which builds a node representation
+using a serialized version of the typechecked code from step 2.
+Unified IR is also involved in import/export of packages and inlining.
+
+### 4. Middle end
+
+* `cmd/compile/internal/deadcode` (dead code elimination)
+* `cmd/compile/internal/inline` (function call inlining)
+* `cmd/compile/internal/devirtualize` (devirtualization of known interface method calls)
+* `cmd/compile/internal/escape` (escape analysis)
+
+Several optimization passes are performed on the IR representation:
+dead code elimination, (early) devirtualization, function call
+inlining, and escape analysis.
+
+### 5. Walk
+
+* `cmd/compile/internal/walk` (order of evaluation, desugaring)
+
+The final pass over the IR representation is "walk," which serves two purposes:
+
+1. It decomposes complex statements into individual, simpler statements,
+ introducing temporary variables and respecting order of evaluation. This step
+ is also referred to as "order."
+
+2. It desugars higher-level Go constructs into more primitive ones. For example,
+ `switch` statements are turned into binary search or jump tables, and
+ operations on maps and channels are replaced with runtime calls.
+
+### 6. Generic SSA
+
+* `cmd/compile/internal/ssa` (SSA passes and rules)
+* `cmd/compile/internal/ssagen` (converting IR to SSA)
+
+In this phase, IR is converted into Static Single Assignment (SSA) form, a
+lower-level intermediate representation with specific properties that make it
+easier to implement optimizations and to eventually generate machine code from
+it.
+
+During this conversion, function intrinsics are applied. These are special
+functions that the compiler has been taught to replace with heavily optimized
+code on a case-by-case basis.
+
+Certain nodes are also lowered into simpler components during the AST to SSA
+conversion, so that the rest of the compiler can work with them. For instance,
+the copy builtin is replaced by memory moves, and range loops are rewritten into
+for loops. Some of these currently happen before the conversion to SSA due to
+historical reasons, but the long-term plan is to move all of them here.
+
+Then, a series of machine-independent passes and rules are applied. These do not
+concern any single computer architecture, and thus run on all `GOARCH` variants.
+These passes include dead code elimination, removal of
+unneeded nil checks, and removal of unused branches. The generic rewrite rules
+mainly concern expressions, such as replacing some expressions with constant
+values, and optimizing multiplications and float operations.
+
+### 7. Generating machine code
+
+* `cmd/compile/internal/ssa` (SSA lowering and arch-specific passes)
+* `cmd/internal/obj` (machine code generation)
+
+The machine-dependent phase of the compiler begins with the "lower" pass, which
+rewrites generic values into their machine-specific variants. For example, on
+amd64 memory operands are possible, so many load-store operations may be combined.
+
+Note that the lower pass runs all machine-specific rewrite rules, and thus it
+currently applies lots of optimizations too.
+
+Once the SSA has been "lowered" and is more specific to the target architecture,
+the final code optimization passes are run. This includes yet another dead code
+elimination pass, moving values closer to their uses, the removal of local
+variables that are never read from, and register allocation.
+
+Other important pieces of work done as part of this step include stack frame
+layout, which assigns stack offsets to local variables, and pointer liveness
+analysis, which computes which on-stack pointers are live at each GC safe point.
+
+At the end of the SSA generation phase, Go functions have been transformed into
+a series of obj.Prog instructions. These are passed to the assembler
+(`cmd/internal/obj`), which turns them into machine code and writes out the
+final object file. The object file will also contain reflect data, export data,
+and debugging information.
+
+### 8. Tips
+
+#### Getting Started
+
+* If you have never contributed to the compiler before, a simple way to begin
+ can be adding a log statement or `panic("here")` to get some
+ initial insight into whatever you are investigating.
+
+* The compiler itself provides logging, debugging and visualization capabilities,
+ such as:
+ ```
+ $ go build -gcflags=-m=2 # print optimization info, including inlining, escape analysis
+ $ go build -gcflags=-d=ssa/check_bce/debug # print bounds check info
+ $ go build -gcflags=-W # print internal parse tree after type checking
+ $ GOSSAFUNC=Foo go build # generate ssa.html file for func Foo
+ $ go build -gcflags=-S # print assembly
+ $ go tool compile -bench=out.txt x.go # print timing of compiler phases
+ ```
+
+ Some flags alter the compiler behavior, such as:
+ ```
+ $ go tool compile -h file.go # panic on first compile error encountered
+ $ go build -gcflags=-d=checkptr=2 # enable additional unsafe pointer checking
+ ```
+
+ There are many additional flags. Some descriptions are available via:
+ ```
+ $ go tool compile -h # compiler flags, e.g., go build -gcflags='-m=1 -l'
+ $ go tool compile -d help # debug flags, e.g., go build -gcflags=-d=checkptr=2
+ $ go tool compile -d ssa/help # ssa flags, e.g., go build -gcflags=-d=ssa/prove/debug=2
+ ```
+
+ There are some additional details about `-gcflags` and the differences between `go build`
+ vs. `go tool compile` in a [section below](#-gcflags-and-go-build-vs-go-tool-compile).
+
+* In general, when investigating a problem in the compiler you usually want to
+ start with the simplest possible reproduction and understand exactly what is
+ happening with it.
+
+#### Testing your changes
+
+* Be sure to read the [Quickly testing your changes](https://go.dev/doc/contribute#quick_test)
+ section of the Go Contribution Guide.
+
+* Some tests live within the cmd/compile packages and can be run by `go test ./...` or similar,
+ but many cmd/compile tests are in the top-level
+ [test](https://github.com/golang/go/tree/master/test) directory:
+
+ ```
+ $ go test cmd/internal/testdir # all tests in 'test' dir
+ $ go test cmd/internal/testdir -run='Test/escape.*.go' # test specific files in 'test' dir
+ ```
+ For details, see the [testdir README](https://github.com/golang/go/tree/master/test#readme).
+ The `errorCheck` method in [testdir_test.go](https://github.com/golang/go/blob/master/src/cmd/internal/testdir/testdir_test.go)
+ is helpful for a description of the `ERROR` comments used in many of those tests.
+
+ In addition, the `go/types` package from the standard library and `cmd/compile/internal/types2`
+ have shared tests in `src/internal/types/testdata`, and both type checkers
+ should be checked if anything changes there.
+
+* The new [application-based coverage profiling](https://go.dev/testing/coverage/) can be used
+ with the compiler, such as:
+
+ ```
+ $ go install -cover -coverpkg=cmd/compile/... cmd/compile # build compiler with coverage instrumentation
+ $ mkdir /tmp/coverdir # pick location for coverage data
+ $ GOCOVERDIR=/tmp/coverdir go test [...] # use compiler, saving coverage data
+ $ go tool covdata textfmt -i=/tmp/coverdir -o coverage.out # convert to traditional coverage format
+ $ go tool cover -html coverage.out # view coverage via traditional tools
+ ```
+
+#### Juggling compiler versions
+
+* Many of the compiler tests use the version of the `go` command found in your PATH and
+ its corresponding `compile` binary.
+
+* If you are in a branch and your PATH includes `<go-repo>/bin`,
+ doing `go install cmd/compile` will build the compiler using the code from your
+ branch and install it to the proper location so that subsequent `go` commands
+ like `go build` or `go test ./...` will exercise your freshly built compiler.
+
+* [toolstash](https://pkg.go.dev/golang.org/x/tools/cmd/toolstash) provides a way
+ to save, run, and restore a known good copy of the Go toolchain. For example, it can be
+ a good practice to initially build your branch, save that version of
+ the toolchain, then restore the known good version of the tools to compile
+ your work-in-progress version of the compiler.
+
+ Sample set up steps:
+ ```
+ $ go install golang.org/x/tools/cmd/toolstash@latest
+ $ git clone https://go.googlesource.com/go
+ $ cd go
+ $ git checkout -b mybranch
+ $ ./src/all.bash # build and confirm good starting point
+ $ export PATH=$PWD/bin:$PATH
+ $ toolstash save # save current tools
+ ```
+ After that, your edit/compile/test cycle can be similar to:
+ ```
+ <... make edits to cmd/compile source ...>
+ $ toolstash restore && go install cmd/compile # restore known good tools to build compiler
+ <... 'go build', 'go test', etc. ...> # use freshly built compiler
+ ```
+
+* toolstash also allows comparing the installed vs. stashed copy of
+ the compiler, such as if you expect equivalent behavior after a refactor.
+ For example, to check that your changed compiler produces identical object files to
+ the stashed compiler while building the standard library:
+ ```
+ $ toolstash restore && go install cmd/compile # build latest compiler
+ $ go build -toolexec "toolstash -cmp" -a -v std # compare latest vs. saved compiler
+ ```
+
+* If versions appear to get out of sync (for example, with errors like
+ `linked object header mismatch` with version strings like
+ `devel go1.21-db3f952b1f`), you might need to do
+ `toolstash restore && go install cmd/...` to update all the tools under cmd.
+
+#### Additional helpful tools
+
+* [compilebench](https://pkg.go.dev/golang.org/x/tools/cmd/compilebench) benchmarks
+ the speed of the compiler.
+
+* [benchstat](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) is the standard tool
+ for reporting performance changes resulting from compiler modifications,
+ including whether any improvements are statistically significant:
+ ```
+ $ go test -bench=SomeBenchmarks -count=20 > new.txt # use new compiler
+ $ toolstash restore # restore old compiler
+ $ go test -bench=SomeBenchmarks -count=20 > old.txt # use old compiler
+ $ benchstat old.txt new.txt # compare old vs. new
+ ```
+
+* [bent](https://pkg.go.dev/golang.org/x/benchmarks/cmd/bent) facilitates running a
+ large set of benchmarks from various community Go projects inside a Docker container.
+
+* [perflock](https://github.com/aclements/perflock) helps obtain more consistent
+ benchmark results, including by manipulating CPU frequency scaling settings on Linux.
+
+* [view-annotated-file](https://github.com/loov/view-annotated-file) (from the community)
+ overlays inlining, bounds check, and escape info back onto the source code.
+
+* [godbolt.org](https://go.godbolt.org) is widely used to examine
+ and share assembly output from many compilers, including the Go compiler. It can also
+ [compare](https://go.godbolt.org/z/5Gs1G4bKG) assembly for different versions of
+ a function or across Go compiler versions, which can be helpful for investigations and
+ bug reports.
+
+#### -gcflags and 'go build' vs. 'go tool compile'
+
+* `-gcflags` is a go command [build flag](https://pkg.go.dev/cmd/go#hdr-Compile_packages_and_dependencies).
+ `go build -gcflags=<args>` passes the supplied `<args>` to the underlying
+ `compile` invocation(s) while still doing everything that the `go build` command
+ normally does (e.g., handling the build cache, modules, and so on). In contrast,
+ `go tool compile <args>` asks the `go` command to invoke `compile <args>` a single time
+ without involving the standard `go build` machinery. In some cases, it can be helpful to have
+ fewer moving parts by doing `go tool compile <args>`, such as if you have a
+ small standalone source file that can be compiled without any assistance from `go build`.
+ In other cases, it is more convenient to pass `-gcflags` to a build command like
+ `go build`, `go test`, or `go install`.
+
+* `-gcflags` by default applies to the packages named on the command line, but can
+ use package patterns such as `-gcflags='all=-m=1 -l'`, or multiple package patterns such as
+ `-gcflags='all=-m=1' -gcflags='fmt=-m=2'`. For details, see the
+ [cmd/go documentation](https://pkg.go.dev/cmd/go#hdr-Compile_packages_and_dependencies).
+
+### Further reading
+
+To dig deeper into how the SSA package works, including its passes and rules,
+head to [cmd/compile/internal/ssa/README.md](internal/ssa/README.md).
+
+Finally, if something in this README or the SSA README is unclear
+or if you have an idea for an improvement, feel free to leave a comment in
+[issue 30074](https://go.dev/issue/30074).
diff --git a/src/cmd/compile/abi-internal.md b/src/cmd/compile/abi-internal.md
new file mode 100644
index 0000000..eae230d
--- /dev/null
+++ b/src/cmd/compile/abi-internal.md
@@ -0,0 +1,973 @@
+# Go internal ABI specification
+
+Self-link: [go.dev/s/regabi](https://go.dev/s/regabi)
+
+This document describes Go’s internal application binary interface
+(ABI), known as ABIInternal.
+Go's ABI defines the layout of data in memory and the conventions for
+calling between Go functions.
+This ABI is *unstable* and will change between Go versions.
+If you’re writing assembly code, please instead refer to Go’s
+[assembly documentation](/doc/asm.html), which describes Go’s stable
+ABI, known as ABI0.
+
+All functions defined in Go source follow ABIInternal.
+However, ABIInternal and ABI0 functions are able to call each other
+through transparent *ABI wrappers*, described in the [internal calling
+convention proposal](https://golang.org/design/27539-internal-abi).
+
+Go uses a common ABI design across all architectures.
+We first describe the common ABI, and then cover per-architecture
+specifics.
+
+*Rationale*: For the reasoning behind using a common ABI across
+architectures instead of the platform ABI, see the [register-based Go
+calling convention proposal](https://golang.org/design/40724-register-calling).
+
+## Memory layout
+
+Go's built-in types have the following sizes and alignments.
+Many, though not all, of these sizes are guaranteed by the [language
+specification](/doc/go_spec.html#Size_and_alignment_guarantees).
+Those that aren't guaranteed may change in future versions of Go (for
+example, we've considered changing the alignment of int64 on 32-bit).
+
+| Type | 64-bit | | 32-bit | |
+|-----------------------------|--------|-------|--------|-------|
+| | Size | Align | Size | Align |
+| bool, uint8, int8 | 1 | 1 | 1 | 1 |
+| uint16, int16 | 2 | 2 | 2 | 2 |
+| uint32, int32 | 4 | 4 | 4 | 4 |
+| uint64, int64 | 8 | 8 | 8 | 4 |
+| int, uint | 8 | 8 | 4 | 4 |
+| float32 | 4 | 4 | 4 | 4 |
+| float64 | 8 | 8 | 8 | 4 |
+| complex64 | 8 | 4 | 8 | 4 |
+| complex128 | 16 | 8 | 16 | 4 |
+| uintptr, *T, unsafe.Pointer | 8 | 8 | 4 | 4 |
+
+The types `byte` and `rune` are aliases for `uint8` and `int32`,
+respectively, and hence have the same size and alignment as these
+types.
+
+The layout of `map`, `chan`, and `func` types is equivalent to *T.
+
+To describe the layout of the remaining composite types, we first
+define the layout of a *sequence* S of N fields with types
+t<sub>1</sub>, t<sub>2</sub>, ..., t<sub>N</sub>.
+We define the byte offset at which each field begins relative to a
+base address of 0, as well as the size and alignment of the sequence
+as follows:
+
+```
+offset(S, i) = 0 if i = 1
+ = align(offset(S, i-1) + sizeof(t_(i-1)), alignof(t_i))
+alignof(S) = 1 if N = 0
+ = max(alignof(t_i) | 1 <= i <= N)
+sizeof(S) = 0 if N = 0
+ = align(offset(S, N) + sizeof(t_N), alignof(S))
+```
+
+Where sizeof(T) and alignof(T) are the size and alignment of type T,
+respectively, and align(x, y) rounds x up to a multiple of y.
+
+The `interface{}` type is a sequence of 1. a pointer to the runtime type
+description for the interface's dynamic type and 2. an `unsafe.Pointer`
+data field.
+Any other interface type (besides the empty interface) is a sequence
+of 1. a pointer to the runtime "itab" that gives the method pointers and
+the type of the data field and 2. an `unsafe.Pointer` data field.
+An interface can be "direct" or "indirect" depending on the dynamic
+type: a direct interface stores the value directly in the data field,
+and an indirect interface stores a pointer to the value in the data
+field.
+An interface can only be direct if the value consists of a single
+pointer word.
+
+An array type `[N]T` is a sequence of N fields of type T.
+
+The slice type `[]T` is a sequence of a `*[cap]T` pointer to the slice
+backing store, an `int` giving the `len` of the slice, and an `int`
+giving the `cap` of the slice.
+
+The `string` type is a sequence of a `*[len]byte` pointer to the
+string backing store, and an `int` giving the `len` of the string.
+
+A struct type `struct { f1 t1; ...; fM tM }` is laid out as the
+sequence t1, ..., tM, tP, where tP is either:
+
+- Type `byte` if sizeof(tM) = 0 and any of sizeof(t*i*) ≠ 0.
+- Empty (size 0 and align 1) otherwise.
+
+The padding byte prevents creating a past-the-end pointer by taking
+the address of the final, empty fN field.
+
+Note that user-written assembly code should generally not depend on Go
+type layout and should instead use the constants defined in
+[`go_asm.h`](/doc/asm.html#data-offsets).
+
+## Function call argument and result passing
+
+Function calls pass arguments and results using a combination of the
+stack and machine registers.
+Each argument or result is passed either entirely in registers or
+entirely on the stack.
+Because access to registers is generally faster than access to the
+stack, arguments and results are preferentially passed in registers.
+However, any argument or result that contains a non-trivial array or
+does not fit entirely in the remaining available registers is passed
+on the stack.
+
+Each architecture defines a sequence of integer registers and a
+sequence of floating-point registers.
+At a high level, arguments and results are recursively broken down
+into values of base types and these base values are assigned to
+registers from these sequences.
+
+Arguments and results can share the same registers, but do not share
+the same stack space.
+Beyond the arguments and results passed on the stack, the caller also
+reserves spill space on the stack for all register-based arguments
+(but does not populate this space).
+
+The receiver, arguments, and results of function or method F are
+assigned to registers or the stack using the following algorithm:
+
+1. Let NI and NFP be the length of integer and floating-point register
+ sequences defined by the architecture.
+ Let I and FP be 0; these are the indexes of the next integer and
+ floating-point register.
+ Let S, the type sequence defining the stack frame, be empty.
+1. If F is a method, assign F’s receiver.
+1. For each argument A of F, assign A.
+1. Add a pointer-alignment field to S. This has size 0 and the same
+ alignment as `uintptr`.
+1. Reset I and FP to 0.
+1. For each result R of F, assign R.
+1. Add a pointer-alignment field to S.
+1. For each register-assigned receiver and argument of F, let T be its
+ type and add T to the stack sequence S.
+ This is the argument's (or receiver's) spill space and will be
+ uninitialized at the call.
+1. Add a pointer-alignment field to S.
+
+Assigning a receiver, argument, or result V of underlying type T works
+as follows:
+
+1. Remember I and FP.
+1. If T has zero size, add T to the stack sequence S and return.
+1. Try to register-assign V.
+1. If step 3 failed, reset I and FP to the values from step 1, add T
+ to the stack sequence S, and assign V to this field in S.
+
+Register-assignment of a value V of underlying type T works as follows:
+
+1. If T is a boolean or integral type that fits in an integer
+ register, assign V to register I and increment I.
+1. If T is an integral type that fits in two integer registers, assign
+ the least significant and most significant halves of V to registers
+ I and I+1, respectively, and increment I by 2
+1. If T is a floating-point type and can be represented without loss
+ of precision in a floating-point register, assign V to register FP
+ and increment FP.
+1. If T is a complex type, recursively register-assign its real and
+ imaginary parts.
+1. If T is a pointer type, map type, channel type, or function type,
+ assign V to register I and increment I.
+1. If T is a string type, interface type, or slice type, recursively
+ register-assign V’s components (2 for strings and interfaces, 3 for
+ slices).
+1. If T is a struct type, recursively register-assign each field of V.
+1. If T is an array type of length 0, do nothing.
+1. If T is an array type of length 1, recursively register-assign its
+ one element.
+1. If T is an array type of length > 1, fail.
+1. If I > NI or FP > NFP, fail.
+1. If any recursive assignment above fails, fail.
+
+The above algorithm produces an assignment of each receiver, argument,
+and result to registers or to a field in the stack sequence.
+The final stack sequence looks like: stack-assigned receiver,
+stack-assigned arguments, pointer-alignment, stack-assigned results,
+pointer-alignment, spill space for each register-assigned argument,
+pointer-alignment.
+The following diagram shows what this stack frame looks like on the
+stack, using the typical convention where address 0 is at the bottom:
+
+ +------------------------------+
+ | . . . |
+ | 2nd reg argument spill space |
+ | 1st reg argument spill space |
+ | <pointer-sized alignment> |
+ | . . . |
+ | 2nd stack-assigned result |
+ | 1st stack-assigned result |
+ | <pointer-sized alignment> |
+ | . . . |
+ | 2nd stack-assigned argument |
+ | 1st stack-assigned argument |
+ | stack-assigned receiver |
+ +------------------------------+ ↓ lower addresses
+
+To perform a call, the caller reserves space starting at the lowest
+address in its stack frame for the call stack frame, stores arguments
+in the registers and argument stack fields determined by the above
+algorithm, and performs the call.
+At the time of a call, spill space, result stack fields, and result
+registers are left uninitialized.
+Upon return, the callee must have stored results to all result
+registers and result stack fields determined by the above algorithm.
+
+There are no callee-save registers, so a call may overwrite any
+register that doesn’t have a fixed meaning, including argument
+registers.
+
+### Example
+
+Consider the function `func f(a1 uint8, a2 [2]uintptr, a3 uint8) (r1
+struct { x uintptr; y [2]uintptr }, r2 string)` on a 64-bit
+architecture with hypothetical integer registers R0–R9.
+
+On entry, `a1` is assigned to `R0`, `a3` is assigned to `R1` and the
+stack frame is laid out in the following sequence:
+
+ a2 [2]uintptr
+ r1.x uintptr
+ r1.y [2]uintptr
+ a1Spill uint8
+ a3Spill uint8
+ _ [6]uint8 // alignment padding
+
+In the stack frame, only the `a2` field is initialized on entry; the
+rest of the frame is left uninitialized.
+
+On exit, `r2.base` is assigned to `R0`, `r2.len` is assigned to `R1`,
+and `r1.x` and `r1.y` are initialized in the stack frame.
+
+There are several things to note in this example.
+First, `a2` and `r1` are stack-assigned because they contain arrays.
+The other arguments and results are register-assigned.
+Result `r2` is decomposed into its components, which are individually
+register-assigned.
+On the stack, the stack-assigned arguments appear at lower addresses
+than the stack-assigned results, which appear at lower addresses than
+the argument spill area.
+Only arguments, not results, are assigned a spill area on the stack.
+
+### Rationale
+
+Each base value is assigned to its own register to optimize
+construction and access.
+An alternative would be to pack multiple sub-word values into
+registers, or to simply map an argument's in-memory layout to
+registers (this is common in C ABIs), but this typically adds cost to
+pack and unpack these values.
+Modern architectures have more than enough registers to pass all
+arguments and results this way for nearly all functions (see the
+appendix), so there’s little downside to spreading base values across
+registers.
+
+Arguments that can’t be fully assigned to registers are passed
+entirely on the stack in case the callee takes the address of that
+argument.
+If an argument could be split across the stack and registers and the
+callee took its address, it would need to be reconstructed in memory,
+a process that would be proportional to the size of the argument.
+
+Non-trivial arrays are always passed on the stack because indexing
+into an array typically requires a computed offset, which generally
+isn’t possible with registers.
+Arrays in general are rare in function signatures (only 0.7% of
+functions in the Go 1.15 standard library and 0.2% in kubelet).
+We considered allowing array fields to be passed on the stack while
+the rest of an argument’s fields are passed in registers, but this
+creates the same problems as other large structs if the callee takes
+the address of an argument, and would benefit <0.1% of functions in
+kubelet (and even these very little).
+
+We make exceptions for 0 and 1-element arrays because these don’t
+require computed offsets, and 1-element arrays are already decomposed
+in the compiler’s SSA representation.
+
+The ABI assignment algorithm above is equivalent to Go’s stack-based
+ABI0 calling convention if there are zero architecture registers.
+This is intended to ease the transition to the register-based internal
+ABI and make it easy for the compiler to generate either calling
+convention.
+An architecture may still define register meanings that aren’t
+compatible with ABI0, but these differences should be easy to account
+for in the compiler.
+
+The assignment algorithm assigns zero-sized values to the stack
+(assignment step 2) in order to support ABI0-equivalence.
+While these values take no space themselves, they do result in
+alignment padding on the stack in ABI0.
+Without this step, the internal ABI would register-assign zero-sized
+values even on architectures that provide no argument registers
+because they don't consume any registers, and hence not add alignment
+padding to the stack.
+
+The algorithm reserves spill space for arguments in the caller’s frame
+so that the compiler can generate a stack growth path that spills into
+this reserved space.
+If the callee has to grow the stack, it may not be able to reserve
+enough additional stack space in its own frame to spill these, which
+is why it’s important that the caller do so.
+These slots also act as the home location if these arguments need to
+be spilled for any other reason, which simplifies traceback printing.
+
+There are several options for how to lay out the argument spill space.
+We chose to lay out each argument according to its type's usual memory
+layout but to separate the spill space from the regular argument
+space.
+Using the usual memory layout simplifies the compiler because it
+already understands this layout.
+Also, if a function takes the address of a register-assigned argument,
+the compiler must spill that argument to memory in its usual memory
+layout and it's more convenient to use the argument spill space for
+this purpose.
+
+Alternatively, the spill space could be structured around argument
+registers.
+In this approach, the stack growth spill path would spill each
+argument register to a register-sized stack word.
+However, if the function takes the address of a register-assigned
+argument, the compiler would have to reconstruct it in memory layout
+elsewhere on the stack.
+
+The spill space could also be interleaved with the stack-assigned
+arguments so the arguments appear in order whether they are register-
+or stack-assigned.
+This would be close to ABI0, except that register-assigned arguments
+would be uninitialized on the stack and there's no need to reserve
+stack space for register-assigned results.
+We expect separating the spill space to perform better because of
+memory locality.
+Separating the space is also potentially simpler for `reflect` calls
+because this allows `reflect` to summarize the spill space as a single
+number.
+Finally, the long-term intent is to remove reserved spill slots
+entirely – allowing most functions to be called without any stack
+setup and easing the introduction of callee-save registers – and
+separating the spill space makes that transition easier.
+
+## Closures
+
+A func value (e.g., `var x func()`) is a pointer to a closure object.
+A closure object begins with a pointer-sized program counter
+representing the entry point of the function, followed by zero or more
+bytes containing the closed-over environment.
+
+Closure calls follow the same conventions as static function and
+method calls, with one addition. Each architecture specifies a
+*closure context pointer* register and calls to closures store the
+address of the closure object in the closure context pointer register
+prior to the call.
+
+## Software floating-point mode
+
+In "softfloat" mode, the ABI simply treats the hardware as having zero
+floating-point registers.
+As a result, any arguments containing floating-point values will be
+passed on the stack.
+
+*Rationale*: Softfloat mode is about compatibility over performance
+and is not commonly used.
+Hence, we keep the ABI as simple as possible in this case, rather than
+adding additional rules for passing floating-point values in integer
+registers.
+
+## Architecture specifics
+
+This section describes per-architecture register mappings, as well as
+other per-architecture special cases.
+
+### amd64 architecture
+
+The amd64 architecture uses the following sequence of 9 registers for
+integer arguments and results:
+
+ RAX, RBX, RCX, RDI, RSI, R8, R9, R10, R11
+
+It uses X0 – X14 for floating-point arguments and results.
+
+*Rationale*: These sequences are chosen from the available registers
+to be relatively easy to remember.
+
+Registers R12 and R13 are permanent scratch registers.
+R15 is a scratch register except in dynamically linked binaries.
+
+*Rationale*: Some operations such as stack growth and reflection calls
+need dedicated scratch registers in order to manipulate call frames
+without corrupting arguments or results.
+
+Special-purpose registers are as follows:
+
+| Register | Call meaning | Return meaning | Body meaning |
+| --- | --- | --- | --- |
+| RSP | Stack pointer | Same | Same |
+| RBP | Frame pointer | Same | Same |
+| RDX | Closure context pointer | Scratch | Scratch |
+| R12 | Scratch | Scratch | Scratch |
+| R13 | Scratch | Scratch | Scratch |
+| R14 | Current goroutine | Same | Same |
+| R15 | GOT reference temporary if dynlink | Same | Same |
+| X15 | Zero value (*) | Same | Scratch |
+
+(*) Except on Plan 9, where X15 is a scratch register because SSE
+registers cannot be used in note handlers (so the compiler avoids
+using them except when absolutely necessary).
+
+*Rationale*: These register meanings are compatible with Go’s
+stack-based calling convention except for R14 and X15, which will have
+to be restored on transitions from ABI0 code to ABIInternal code.
+In ABI0, these are undefined, so transitions from ABIInternal to ABI0
+can ignore these registers.
+
+*Rationale*: For the current goroutine pointer, we chose a register
+that requires an additional REX byte.
+While this adds one byte to every function prologue, it is hardly ever
+accessed outside the function prologue and we expect making more
+single-byte registers available to be a net win.
+
+*Rationale*: We could allow R14 (the current goroutine pointer) to be
+a scratch register in function bodies because it can always be
+restored from TLS on amd64.
+However, we designate it as a fixed register for simplicity and for
+consistency with other architectures that may not have a copy of the
+current goroutine pointer in TLS.
+
+*Rationale*: We designate X15 as a fixed zero register because
+functions often have to bulk zero their stack frames, and this is more
+efficient with a designated zero register.
+
+*Implementation note*: Registers with fixed meaning at calls but not
+in function bodies must be initialized by "injected" calls such as
+signal-based panics.
+
+#### Stack layout
+
+The stack pointer, RSP, grows down and is always aligned to 8 bytes.
+
+The amd64 architecture does not use a link register.
+
+A function's stack frame is laid out as follows:
+
+ +------------------------------+
+ | return PC |
+ | RBP on entry |
+ | ... locals ... |
+ | ... outgoing arguments ... |
+ +------------------------------+ ↓ lower addresses
+
+The "return PC" is pushed as part of the standard amd64 `CALL`
+operation.
+On entry, a function subtracts from RSP to open its stack frame and
+saves the value of RBP directly below the return PC.
+A leaf function that does not require any stack space may omit the
+saved RBP.
+
+The Go ABI's use of RBP as a frame pointer register is compatible with
+amd64 platform conventions so that Go can inter-operate with platform
+debuggers and profilers.
+
+#### Flags
+
+The direction flag (D) is always cleared (set to the “forward”
+direction) at a call.
+The arithmetic status flags are treated like scratch registers and not
+preserved across calls.
+All other bits in RFLAGS are system flags.
+
+At function calls and returns, the CPU is in x87 mode (not MMX
+technology mode).
+
+*Rationale*: Go on amd64 does not use either the x87 registers or MMX
+registers. Hence, we follow the SysV platform conventions in order to
+simplify transitions to and from the C ABI.
+
+At calls, the MXCSR control bits are always set as follows:
+
+| Flag | Bit | Value | Meaning |
+| --- | --- | --- | --- |
+| FZ | 15 | 0 | Do not flush to zero |
+| RC | 14/13 | 0 (RN) | Round to nearest |
+| PM | 12 | 1 | Precision masked |
+| UM | 11 | 1 | Underflow masked |
+| OM | 10 | 1 | Overflow masked |
+| ZM | 9 | 1 | Divide-by-zero masked |
+| DM | 8 | 1 | Denormal operations masked |
+| IM | 7 | 1 | Invalid operations masked |
+| DAZ | 6 | 0 | Do not zero de-normals |
+
+The MXCSR status bits are callee-save.
+
+*Rationale*: Having a fixed MXCSR control configuration allows Go
+functions to use SSE operations without modifying or saving the MXCSR.
+Functions are allowed to modify it between calls (as long as they
+restore it), but as of this writing Go code never does.
+The above fixed configuration matches the process initialization
+control bits specified by the ELF AMD64 ABI.
+
+The x87 floating-point control word is not used by Go on amd64.
+
+### arm64 architecture
+
+The arm64 architecture uses R0 – R15 for integer arguments and results.
+
+It uses F0 – F15 for floating-point arguments and results.
+
+*Rationale*: 16 integer registers and 16 floating-point registers are
+more than enough for passing arguments and results for practically all
+functions (see Appendix). While there are more registers available,
+using more registers provides little benefit. Additionally, it will add
+overhead on code paths where the number of arguments are not statically
+known (e.g. reflect call), and will consume more stack space when there
+is only limited stack space available to fit in the nosplit limit.
+
+Registers R16 and R17 are permanent scratch registers. They are also
+used as scratch registers by the linker (Go linker and external
+linker) in trampolines.
+
+Register R18 is reserved and never used. It is reserved for the OS
+on some platforms (e.g. macOS).
+
+Registers R19 – R25 are permanent scratch registers. In addition,
+R27 is a permanent scratch register used by the assembler when
+expanding instructions.
+
+Floating-point registers F16 – F31 are also permanent scratch
+registers.
+
+Special-purpose registers are as follows:
+
+| Register | Call meaning | Return meaning | Body meaning |
+| --- | --- | --- | --- |
+| RSP | Stack pointer | Same | Same |
+| R30 | Link register | Same | Scratch (non-leaf functions) |
+| R29 | Frame pointer | Same | Same |
+| R28 | Current goroutine | Same | Same |
+| R27 | Scratch | Scratch | Scratch |
+| R26 | Closure context pointer | Scratch | Scratch |
+| R18 | Reserved (not used) | Same | Same |
+| ZR | Zero value | Same | Same |
+
+*Rationale*: These register meanings are compatible with Go’s
+stack-based calling convention.
+
+*Rationale*: The link register, R30, holds the function return
+address at the function entry. For functions that have frames
+(including most non-leaf functions), R30 is saved to stack in the
+function prologue and restored in the epilogue. Within the function
+body, R30 can be used as a scratch register.
+
+*Implementation note*: Registers with fixed meaning at calls but not
+in function bodies must be initialized by "injected" calls such as
+signal-based panics.
+
+#### Stack layout
+
+The stack pointer, RSP, grows down and is always aligned to 16 bytes.
+
+*Rationale*: The arm64 architecture requires the stack pointer to be
+16-byte aligned.
+
+A function's stack frame, after the frame is created, is laid out as
+follows:
+
+ +------------------------------+
+ | ... locals ... |
+ | ... outgoing arguments ... |
+ | return PC | ← RSP points to
+ | frame pointer on entry |
+ +------------------------------+ ↓ lower addresses
+
+The "return PC" is loaded to the link register, R30, as part of the
+arm64 `CALL` operation.
+
+On entry, a function subtracts from RSP to open its stack frame, and
+saves the values of R30 and R29 at the bottom of the frame.
+Specifically, R30 is saved at 0(RSP) and R29 is saved at -8(RSP),
+after RSP is updated.
+
+A leaf function that does not require any stack space may omit the
+saved R30 and R29.
+
+The Go ABI's use of R29 as a frame pointer register is compatible with
+arm64 architecture requirement so that Go can inter-operate with platform
+debuggers and profilers.
+
+This stack layout is used by both register-based (ABIInternal) and
+stack-based (ABI0) calling conventions.
+
+#### Flags
+
+The arithmetic status flags (NZCV) are treated like scratch registers
+and not preserved across calls.
+All other bits in PSTATE are system flags and are not modified by Go.
+
+The floating-point status register (FPSR) is treated like scratch
+registers and not preserved across calls.
+
+At calls, the floating-point control register (FPCR) bits are always
+set as follows:
+
+| Flag | Bit | Value | Meaning |
+| --- | --- | --- | --- |
+| DN | 25 | 0 | Propagate NaN operands |
+| FZ | 24 | 0 | Do not flush to zero |
+| RC | 23/22 | 0 (RN) | Round to nearest, choose even if tied |
+| IDE | 15 | 0 | Denormal operations trap disabled |
+| IXE | 12 | 0 | Inexact trap disabled |
+| UFE | 11 | 0 | Underflow trap disabled |
+| OFE | 10 | 0 | Overflow trap disabled |
+| DZE | 9 | 0 | Divide-by-zero trap disabled |
+| IOE | 8 | 0 | Invalid operations trap disabled |
+| NEP | 2 | 0 | Scalar operations do not affect higher elements in vector registers |
+| AH | 1 | 0 | No alternate handling of de-normal inputs |
+| FIZ | 0 | 0 | Do not zero de-normals |
+
+*Rationale*: Having a fixed FPCR control configuration allows Go
+functions to use floating-point and vector (SIMD) operations without
+modifying or saving the FPCR.
+Functions are allowed to modify it between calls (as long as they
+restore it), but as of this writing Go code never does.
+
+### loong64 architecture
+
+The loong64 architecture uses R4 – R19 for integer arguments and integer results.
+
+It uses F0 – F15 for floating-point arguments and results.
+
+Registers R20 - R21, R23 – R28, R30 - R31, F16 – F31 are permanent scratch registers.
+
+Register R2 is reserved and never used.
+
+Register R20, R21 is Used by runtime.duffcopy, runtime.duffzero.
+
+Special-purpose registers used within Go generated code and Go assembly code
+are as follows:
+
+| Register | Call meaning | Return meaning | Body meaning |
+| --- | --- | --- | --- |
+| R0 | Zero value | Same | Same |
+| R1 | Link register | Link register | Scratch |
+| R3 | Stack pointer | Same | Same |
+| R20,R21 | Scratch | Scratch | Used by duffcopy, duffzero |
+| R22 | Current goroutine | Same | Same |
+| R29 | Closure context pointer | Same | Same |
+| R30, R31 | used by the assembler | Same | Same |
+
+*Rationale*: These register meanings are compatible with Go’s stack-based
+calling convention.
+
+#### Stack layout
+
+The stack pointer, R3, grows down and is aligned to 8 bytes.
+
+A function's stack frame, after the frame is created, is laid out as
+follows:
+
+ +------------------------------+
+ | ... locals ... |
+ | ... outgoing arguments ... |
+ | return PC | ← R3 points to
+ +------------------------------+ ↓ lower addresses
+
+This stack layout is used by both register-based (ABIInternal) and
+stack-based (ABI0) calling conventions.
+
+The "return PC" is loaded to the link register, R1, as part of the
+loong64 `JAL` operation.
+
+#### Flags
+All bits in CSR are system flags and are not modified by Go.
+
+### ppc64 architecture
+
+The ppc64 architecture uses R3 – R10 and R14 – R17 for integer arguments
+and results.
+
+It uses F1 – F12 for floating-point arguments and results.
+
+Register R31 is a permanent scratch register in Go.
+
+Special-purpose registers used within Go generated code and Go
+assembly code are as follows:
+
+| Register | Call meaning | Return meaning | Body meaning |
+| --- | --- | --- | --- |
+| R0 | Zero value | Same | Same |
+| R1 | Stack pointer | Same | Same |
+| R2 | TOC register | Same | Same |
+| R11 | Closure context pointer | Scratch | Scratch |
+| R12 | Function address on indirect calls | Scratch | Scratch |
+| R13 | TLS pointer | Same | Same |
+| R20,R21 | Scratch | Scratch | Used by duffcopy, duffzero |
+| R30 | Current goroutine | Same | Same |
+| R31 | Scratch | Scratch | Scratch |
+| LR | Link register | Link register | Scratch |
+*Rationale*: These register meanings are compatible with Go’s
+stack-based calling convention.
+
+The link register, LR, holds the function return
+address at the function entry and is set to the correct return
+address before exiting the function. It is also used
+in some cases as the function address when doing an indirect call.
+
+The register R2 contains the address of the TOC (table of contents) which
+contains data or code addresses used when generating position independent
+code. Non-Go code generated when using cgo contains TOC-relative addresses
+which depend on R2 holding a valid TOC. Go code compiled with -shared or
+-dynlink initializes and maintains R2 and uses it in some cases for
+function calls; Go code compiled without these options does not modify R2.
+
+When making a function call R12 contains the function address for use by the
+code to generate R2 at the beginning of the function. R12 can be used for
+other purposes within the body of the function, such as trampoline generation.
+
+R20 and R21 are used in duffcopy and duffzero which could be generated
+before arguments are saved so should not be used for register arguments.
+
+The Count register CTR can be used as the call target for some branch instructions.
+It holds the return address when preemption has occurred.
+
+On PPC64 when a float32 is loaded it becomes a float64 in the register, which is
+different from other platforms and that needs to be recognized by the internal
+implementation of reflection so that float32 arguments are passed correctly.
+
+Registers R18 - R29 and F13 - F31 are considered scratch registers.
+
+#### Stack layout
+
+The stack pointer, R1, grows down and is aligned to 8 bytes in Go, but changed
+to 16 bytes when calling cgo.
+
+A function's stack frame, after the frame is created, is laid out as
+follows:
+
+ +------------------------------+
+ | ... locals ... |
+ | ... outgoing arguments ... |
+ | 24 TOC register R2 save | When compiled with -shared/-dynlink
+ | 16 Unused in Go | Not used in Go
+ | 8 CR save | nonvolatile CR fields
+ | 0 return PC | ← R1 points to
+ +------------------------------+ ↓ lower addresses
+
+The "return PC" is loaded to the link register, LR, as part of the
+ppc64 `BL` operations.
+
+On entry to a non-leaf function, the stack frame size is subtracted from R1 to
+create its stack frame, and saves the value of LR at the bottom of the frame.
+
+A leaf function that does not require any stack space does not modify R1 and
+does not save LR.
+
+*NOTE*: We might need to save the frame pointer on the stack as
+in the PPC64 ELF v2 ABI so Go can inter-operate with platform debuggers
+and profilers.
+
+This stack layout is used by both register-based (ABIInternal) and
+stack-based (ABI0) calling conventions.
+
+#### Flags
+
+The condition register consists of 8 condition code register fields
+CR0-CR7. Go generated code only sets and uses CR0, commonly set by
+compare functions and use to determine the target of a conditional
+branch. The generated code does not set or use CR1-CR7.
+
+The floating point status and control register (FPSCR) is initialized
+to 0 by the kernel at startup of the Go program and not changed by
+the Go generated code.
+
+### riscv64 architecture
+
+The riscv64 architecture uses X10 – X17, X8, X9, X18 – X23 for integer arguments
+and results.
+
+It uses F10 – F17, F8, F9, F18 – F23 for floating-point arguments and results.
+
+Special-purpose registers used within Go generated code and Go
+assembly code are as follows:
+
+| Register | Call meaning | Return meaning | Body meaning |
+| --- | --- | --- | --- |
+| X0 | Zero value | Same | Same |
+| X1 | Link register | Link register | Scratch |
+| X2 | Stack pointer | Same | Same |
+| X3 | Global pointer | Same | Used by dynamic linker |
+| X4 | TLS (thread pointer) | TLS | Scratch |
+| X24,X25 | Scratch | Scratch | Used by duffcopy, duffzero |
+| X26 | Closure context pointer | Scratch | Scratch |
+| X27 | Current goroutine | Same | Same |
+| X31 | Scratch | Scratch | Scratch |
+
+*Rationale*: These register meanings are compatible with Go’s
+stack-based calling convention. Context register X20 will change to X26,
+duffcopy, duffzero register will change to X24, X25 before this register ABI been adopted.
+X10 – X17, X8, X9, X18 – X23, is the same order as A0 – A7, S0 – S7 in platform ABI.
+F10 – F17, F8, F9, F18 – F23, is the same order as FA0 – FA7, FS0 – FS7 in platform ABI.
+X8 – X23, F8 – F15 are used for compressed instruction (RVC) which will benefit code size in the future.
+
+#### Stack layout
+
+The stack pointer, X2, grows down and is aligned to 8 bytes.
+
+A function's stack frame, after the frame is created, is laid out as
+follows:
+
+ +------------------------------+
+ | ... locals ... |
+ | ... outgoing arguments ... |
+ | return PC | ← X2 points to
+ +------------------------------+ ↓ lower addresses
+
+The "return PC" is loaded to the link register, X1, as part of the
+riscv64 `CALL` operation.
+
+#### Flags
+
+The riscv64 has Zicsr extension for control and status register (CSR) and
+treated as scratch register.
+All bits in CSR are system flags and are not modified by Go.
+
+## Future directions
+
+### Spill path improvements
+
+The ABI currently reserves spill space for argument registers so the
+compiler can statically generate an argument spill path before calling
+into `runtime.morestack` to grow the stack.
+This ensures there will be sufficient spill space even when the stack
+is nearly exhausted and keeps stack growth and stack scanning
+essentially unchanged from ABI0.
+
+However, this wastes stack space (the median wastage is 16 bytes per
+call), resulting in larger stacks and increased cache footprint.
+A better approach would be to reserve stack space only when spilling.
+One way to ensure enough space is available to spill would be for
+every function to ensure there is enough space for the function's own
+frame *as well as* the spill space of all functions it calls.
+For most functions, this would change the threshold for the prologue
+stack growth check.
+For `nosplit` functions, this would change the threshold used in the
+linker's static stack size check.
+
+Allocating spill space in the callee rather than the caller may also
+allow for faster reflection calls in the common case where a function
+takes only register arguments, since it would allow reflection to make
+these calls directly without allocating any frame.
+
+The statically-generated spill path also increases code size.
+It is possible to instead have a generic spill path in the runtime, as
+part of `morestack`.
+However, this complicates reserving the spill space, since spilling
+all possible register arguments would, in most cases, take
+significantly more space than spilling only those used by a particular
+function.
+Some options are to spill to a temporary space and copy back only the
+registers used by the function, or to grow the stack if necessary
+before spilling to it (using a temporary space if necessary), or to
+use a heap-allocated space if insufficient stack space is available.
+These options all add enough complexity that we will have to make this
+decision based on the actual code size growth caused by the static
+spill paths.
+
+### Clobber sets
+
+As defined, the ABI does not use callee-save registers.
+This significantly simplifies the garbage collector and the compiler's
+register allocator, but at some performance cost.
+A potentially better balance for Go code would be to use *clobber
+sets*: for each function, the compiler records the set of registers it
+clobbers (including those clobbered by functions it calls) and any
+register not clobbered by function F can remain live across calls to
+F.
+
+This is generally a good fit for Go because Go's package DAG allows
+function metadata like the clobber set to flow up the call graph, even
+across package boundaries.
+Clobber sets would require relatively little change to the garbage
+collector, unlike general callee-save registers.
+One disadvantage of clobber sets over callee-save registers is that
+they don't help with indirect function calls or interface method
+calls, since static information isn't available in these cases.
+
+### Large aggregates
+
+Go encourages passing composite values by value, and this simplifies
+reasoning about mutation and races.
+However, this comes at a performance cost for large composite values.
+It may be possible to instead transparently pass large composite
+values by reference and delay copying until it is actually necessary.
+
+## Appendix: Register usage analysis
+
+In order to understand the impacts of the above design on register
+usage, we
+[analyzed](https://github.com/aclements/go-misc/tree/master/abi) the
+impact of the above ABI on a large code base: cmd/kubelet from
+[Kubernetes](https://github.com/kubernetes/kubernetes) at tag v1.18.8.
+
+The following table shows the impact of different numbers of available
+integer and floating-point registers on argument assignment:
+
+```
+| | | | stack args | spills | stack total |
+| ints | floats | % fit | p50 | p95 | p99 | p50 | p95 | p99 | p50 | p95 | p99 |
+| 0 | 0 | 6.3% | 32 | 152 | 256 | 0 | 0 | 0 | 32 | 152 | 256 |
+| 0 | 8 | 6.4% | 32 | 152 | 256 | 0 | 0 | 0 | 32 | 152 | 256 |
+| 1 | 8 | 21.3% | 24 | 144 | 248 | 8 | 8 | 8 | 32 | 152 | 256 |
+| 2 | 8 | 38.9% | 16 | 128 | 224 | 8 | 16 | 16 | 24 | 136 | 240 |
+| 3 | 8 | 57.0% | 0 | 120 | 224 | 16 | 24 | 24 | 24 | 136 | 240 |
+| 4 | 8 | 73.0% | 0 | 120 | 216 | 16 | 32 | 32 | 24 | 136 | 232 |
+| 5 | 8 | 83.3% | 0 | 112 | 216 | 16 | 40 | 40 | 24 | 136 | 232 |
+| 6 | 8 | 87.5% | 0 | 112 | 208 | 16 | 48 | 48 | 24 | 136 | 232 |
+| 7 | 8 | 89.8% | 0 | 112 | 208 | 16 | 48 | 56 | 24 | 136 | 232 |
+| 8 | 8 | 91.3% | 0 | 112 | 200 | 16 | 56 | 64 | 24 | 136 | 232 |
+| 9 | 8 | 92.1% | 0 | 112 | 192 | 16 | 56 | 72 | 24 | 136 | 232 |
+| 10 | 8 | 92.6% | 0 | 104 | 192 | 16 | 56 | 72 | 24 | 136 | 232 |
+| 11 | 8 | 93.1% | 0 | 104 | 184 | 16 | 56 | 80 | 24 | 128 | 232 |
+| 12 | 8 | 93.4% | 0 | 104 | 176 | 16 | 56 | 88 | 24 | 128 | 232 |
+| 13 | 8 | 94.0% | 0 | 88 | 176 | 16 | 56 | 96 | 24 | 128 | 232 |
+| 14 | 8 | 94.4% | 0 | 80 | 152 | 16 | 64 | 104 | 24 | 128 | 232 |
+| 15 | 8 | 94.6% | 0 | 80 | 152 | 16 | 64 | 112 | 24 | 128 | 232 |
+| 16 | 8 | 94.9% | 0 | 16 | 152 | 16 | 64 | 112 | 24 | 128 | 232 |
+| ∞ | 8 | 99.8% | 0 | 0 | 0 | 24 | 112 | 216 | 24 | 120 | 216 |
+```
+
+The first two columns show the number of available integer and
+floating-point registers.
+The first row shows the results for 0 integer and 0 floating-point
+registers, which is equivalent to ABI0.
+We found that any reasonable number of floating-point registers has
+the same effect, so we fixed it at 8 for all other rows.
+
+The “% fit” column gives the fraction of functions where all arguments
+and results are register-assigned and no arguments are passed on the
+stack.
+The three “stack args” columns give the median, 95th and 99th
+percentile number of bytes of stack arguments.
+The “spills” columns likewise summarize the number of bytes in
+on-stack spill space.
+And “stack total” summarizes the sum of stack arguments and on-stack
+spill slots.
+Note that these are three different distributions; for example,
+there’s no single function that takes 0 stack argument bytes, 16 spill
+bytes, and 24 total stack bytes.
+
+From this, we can see that the fraction of functions that fit entirely
+in registers grows very slowly once it reaches about 90%, though
+curiously there is a small minority of functions that could benefit
+from a huge number of registers.
+Making 9 integer registers available on amd64 puts it in this realm.
+We also see that the stack space required for most functions is fairly
+small.
+While the increasing space required for spills largely balances out
+the decreasing space required for stack arguments as the number of
+available registers increases, there is a general reduction in the
+total stack space required with more available registers.
+This does, however, suggest that eliminating spill slots in the future
+would noticeably reduce stack requirements.
diff --git a/src/cmd/compile/default.pgo b/src/cmd/compile/default.pgo
new file mode 100644
index 0000000..0f925ec
--- /dev/null
+++ b/src/cmd/compile/default.pgo
Binary files differ
diff --git a/src/cmd/compile/doc.go b/src/cmd/compile/doc.go
new file mode 100644
index 0000000..507899e
--- /dev/null
+++ b/src/cmd/compile/doc.go
@@ -0,0 +1,321 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Compile, typically invoked as ``go tool compile,'' compiles a single Go package
+comprising the files named on the command line. It then writes a single
+object file named for the basename of the first source file with a .o suffix.
+The object file can then be combined with other objects into a package archive
+or passed directly to the linker (``go tool link''). If invoked with -pack, the compiler
+writes an archive directly, bypassing the intermediate object file.
+
+The generated files contain type information about the symbols exported by
+the package and about types used by symbols imported by the package from
+other packages. It is therefore not necessary when compiling client C of
+package P to read the files of P's dependencies, only the compiled output of P.
+
+Command Line
+
+Usage:
+
+ go tool compile [flags] file...
+
+The specified files must be Go source files and all part of the same package.
+The same compiler is used for all target operating systems and architectures.
+The GOOS and GOARCH environment variables set the desired target.
+
+Flags:
+
+ -D path
+ Set relative path for local imports.
+ -I dir1 -I dir2
+ Search for imported packages in dir1, dir2, etc,
+ after consulting $GOROOT/pkg/$GOOS_$GOARCH.
+ -L
+ Show complete file path in error messages.
+ -N
+ Disable optimizations.
+ -S
+ Print assembly listing to standard output (code only).
+ -S -S
+ Print assembly listing to standard output (code and data).
+ -V
+ Print compiler version and exit.
+ -asmhdr file
+ Write assembly header to file.
+ -asan
+ Insert calls to C/C++ address sanitizer.
+ -buildid id
+ Record id as the build id in the export metadata.
+ -blockprofile file
+ Write block profile for the compilation to file.
+ -c int
+ Concurrency during compilation. Set 1 for no concurrency (default is 1).
+ -complete
+ Assume package has no non-Go components.
+ -cpuprofile file
+ Write a CPU profile for the compilation to file.
+ -dynlink
+ Allow references to Go symbols in shared libraries (experimental).
+ -e
+ Remove the limit on the number of errors reported (default limit is 10).
+ -goversion string
+ Specify required go tool version of the runtime.
+ Exits when the runtime go version does not match goversion.
+ -h
+ Halt with a stack trace at the first error detected.
+ -importcfg file
+ Read import configuration from file.
+ In the file, set importmap, packagefile to specify import resolution.
+ -installsuffix suffix
+ Look for packages in $GOROOT/pkg/$GOOS_$GOARCH_suffix
+ instead of $GOROOT/pkg/$GOOS_$GOARCH.
+ -l
+ Disable inlining.
+ -lang version
+ Set language version to compile, as in -lang=go1.12.
+ Default is current version.
+ -linkobj file
+ Write linker-specific object to file and compiler-specific
+ object to usual output file (as specified by -o).
+ Without this flag, the -o output is a combination of both
+ linker and compiler input.
+ -m
+ Print optimization decisions. Higher values or repetition
+ produce more detail.
+ -memprofile file
+ Write memory profile for the compilation to file.
+ -memprofilerate rate
+ Set runtime.MemProfileRate for the compilation to rate.
+ -msan
+ Insert calls to C/C++ memory sanitizer.
+ -mutexprofile file
+ Write mutex profile for the compilation to file.
+ -nolocalimports
+ Disallow local (relative) imports.
+ -o file
+ Write object to file (default file.o or, with -pack, file.a).
+ -p path
+ Set expected package import path for the code being compiled,
+ and diagnose imports that would cause a circular dependency.
+ -pack
+ Write a package (archive) file rather than an object file
+ -race
+ Compile with race detector enabled.
+ -s
+ Warn about composite literals that can be simplified.
+ -shared
+ Generate code that can be linked into a shared library.
+ -spectre list
+ Enable spectre mitigations in list (all, index, ret).
+ -traceprofile file
+ Write an execution trace to file.
+ -trimpath prefix
+ Remove prefix from recorded source file paths.
+
+Flags related to debugging information:
+
+ -dwarf
+ Generate DWARF symbols.
+ -dwarflocationlists
+ Add location lists to DWARF in optimized mode.
+ -gendwarfinl int
+ Generate DWARF inline info records (default 2).
+
+Flags to debug the compiler itself:
+
+ -E
+ Debug symbol export.
+ -K
+ Debug missing line numbers.
+ -d list
+ Print debug information about items in list. Try -d help for further information.
+ -live
+ Debug liveness analysis.
+ -v
+ Increase debug verbosity.
+ -%
+ Debug non-static initializers.
+ -W
+ Debug parse tree after type checking.
+ -f
+ Debug stack frames.
+ -i
+ Debug line number stack.
+ -j
+ Debug runtime-initialized variables.
+ -r
+ Debug generated wrappers.
+ -w
+ Debug type checking.
+
+Compiler Directives
+
+The compiler accepts directives in the form of comments.
+To distinguish them from non-directive comments, directives
+require no space between the comment opening and the name of the directive. However, since
+they are comments, tools unaware of the directive convention or of a particular
+directive can skip over a directive like any other comment.
+*/
+// Line directives come in several forms:
+//
+// //line :line
+// //line :line:col
+// //line filename:line
+// //line filename:line:col
+// /*line :line*/
+// /*line :line:col*/
+// /*line filename:line*/
+// /*line filename:line:col*/
+//
+// In order to be recognized as a line directive, the comment must start with
+// //line or /*line followed by a space, and must contain at least one colon.
+// The //line form must start at the beginning of a line.
+// A line directive specifies the source position for the character immediately following
+// the comment as having come from the specified file, line and column:
+// For a //line comment, this is the first character of the next line, and
+// for a /*line comment this is the character position immediately following the closing */.
+// If no filename is given, the recorded filename is empty if there is also no column number;
+// otherwise it is the most recently recorded filename (actual filename or filename specified
+// by previous line directive).
+// If a line directive doesn't specify a column number, the column is "unknown" until
+// the next directive and the compiler does not report column numbers for that range.
+// The line directive text is interpreted from the back: First the trailing :ddd is peeled
+// off from the directive text if ddd is a valid number > 0. Then the second :ddd
+// is peeled off the same way if it is valid. Anything before that is considered the filename
+// (possibly including blanks and colons). Invalid line or column values are reported as errors.
+//
+// Examples:
+//
+// //line foo.go:10 the filename is foo.go, and the line number is 10 for the next line
+// //line C:foo.go:10 colons are permitted in filenames, here the filename is C:foo.go, and the line is 10
+// //line a:100 :10 blanks are permitted in filenames, here the filename is " a:100 " (excluding quotes)
+// /*line :10:20*/x the position of x is in the current file with line number 10 and column number 20
+// /*line foo: 10 */ this comment is recognized as invalid line directive (extra blanks around line number)
+//
+// Line directives typically appear in machine-generated code, so that compilers and debuggers
+// will report positions in the original input to the generator.
+/*
+The line directive is a historical special case; all other directives are of the form
+//go:name, indicating that they are defined by the Go toolchain.
+Each directive must be placed its own line, with only leading spaces and tabs
+allowed before the comment.
+Each directive applies to the Go code that immediately follows it,
+which typically must be a declaration.
+
+ //go:noescape
+
+The //go:noescape directive must be followed by a function declaration without
+a body (meaning that the function has an implementation not written in Go).
+It specifies that the function does not allow any of the pointers passed as
+arguments to escape into the heap or into the values returned from the function.
+This information can be used during the compiler's escape analysis of Go code
+calling the function.
+
+ //go:uintptrescapes
+
+The //go:uintptrescapes directive must be followed by a function declaration.
+It specifies that the function's uintptr arguments may be pointer values that
+have been converted to uintptr and must be on the heap and kept alive for the
+duration of the call, even though from the types alone it would appear that the
+object is no longer needed during the call. The conversion from pointer to
+uintptr must appear in the argument list of any call to this function. This
+directive is necessary for some low-level system call implementations and
+should be avoided otherwise.
+
+ //go:noinline
+
+The //go:noinline directive must be followed by a function declaration.
+It specifies that calls to the function should not be inlined, overriding
+the compiler's usual optimization rules. This is typically only needed
+for special runtime functions or when debugging the compiler.
+
+ //go:norace
+
+The //go:norace directive must be followed by a function declaration.
+It specifies that the function's memory accesses must be ignored by the
+race detector. This is most commonly used in low-level code invoked
+at times when it is unsafe to call into the race detector runtime.
+
+ //go:nosplit
+
+The //go:nosplit directive must be followed by a function declaration.
+It specifies that the function must omit its usual stack overflow check.
+This is most commonly used by low-level runtime code invoked
+at times when it is unsafe for the calling goroutine to be preempted.
+
+ //go:linkname localname [importpath.name]
+
+The //go:linkname directive conventionally precedes the var or func
+declaration named by ``localname``, though its position does not
+change its effect.
+This directive determines the object-file symbol used for a Go var or
+func declaration, allowing two Go symbols to alias the same
+object-file symbol, thereby enabling one package to access a symbol in
+another package even when this would violate the usual encapsulation
+of unexported declarations, or even type safety.
+For that reason, it is only enabled in files that have imported "unsafe".
+
+It may be used in two scenarios. Let's assume that package upper
+imports package lower, perhaps indirectly. In the first scenario,
+package lower defines a symbol whose object file name belongs to
+package upper. Both packages contain a linkname directive: package
+lower uses the two-argument form and package upper uses the
+one-argument form. In the example below, lower.f is an alias for the
+function upper.g:
+
+ package upper
+ import _ "unsafe"
+ //go:linkname g
+ func g()
+
+ package lower
+ import _ "unsafe"
+ //go:linkname f upper.g
+ func f() { ... }
+
+The linkname directive in package upper suppresses the usual error for
+a function that lacks a body. (That check may alternatively be
+suppressed by including a .s file, even an empty one, in the package.)
+
+In the second scenario, package upper unilaterally creates an alias
+for a symbol in package lower. In the example below, upper.g is an alias
+for the function lower.f.
+
+ package upper
+ import _ "unsafe"
+ //go:linkname g lower.f
+ func g()
+
+ package lower
+ func f() { ... }
+
+The declaration of lower.f may also have a linkname directive with a
+single argument, f. This is optional, but helps alert the reader that
+the function is accessed from outside the package.
+
+ //go:wasmimport importmodule importname
+
+The //go:wasmimport directive is wasm-only and must be followed by a
+function declaration.
+It specifies that the function is provided by a wasm module identified
+by ``importmodule`` and ``importname``.
+
+ //go:wasmimport a_module f
+ func g()
+
+The types of parameters and return values to the Go function are translated to
+Wasm according to the following table:
+
+ Go types Wasm types
+ int32, uint32 i32
+ int64, uint64 i64
+ float32 f32
+ float64 f64
+ unsafe.Pointer i32
+
+Any other parameter types are disallowed by the compiler.
+
+*/
+package main
diff --git a/src/cmd/compile/internal/abi/abiutils.go b/src/cmd/compile/internal/abi/abiutils.go
new file mode 100644
index 0000000..607d462
--- /dev/null
+++ b/src/cmd/compile/internal/abi/abiutils.go
@@ -0,0 +1,683 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "fmt"
+ "math"
+ "sync"
+)
+
+//......................................................................
+//
+// Public/exported bits of the ABI utilities.
+//
+
+// ABIParamResultInfo stores the results of processing a given
+// function type to compute stack layout and register assignments. For
+// each input and output parameter we capture whether the param was
+// register-assigned (and to which register(s)) or the stack offset
+// for the param if is not going to be passed in registers according
+// to the rules in the Go internal ABI specification (1.17).
+type ABIParamResultInfo struct {
+ inparams []ABIParamAssignment // Includes receiver for method calls. Does NOT include hidden closure pointer.
+ outparams []ABIParamAssignment
+ offsetToSpillArea int64
+ spillAreaSize int64
+ inRegistersUsed int
+ outRegistersUsed int
+ config *ABIConfig // to enable String() method
+}
+
+func (a *ABIParamResultInfo) Config() *ABIConfig {
+ return a.config
+}
+
+func (a *ABIParamResultInfo) InParams() []ABIParamAssignment {
+ return a.inparams
+}
+
+func (a *ABIParamResultInfo) OutParams() []ABIParamAssignment {
+ return a.outparams
+}
+
+func (a *ABIParamResultInfo) InRegistersUsed() int {
+ return a.inRegistersUsed
+}
+
+func (a *ABIParamResultInfo) OutRegistersUsed() int {
+ return a.outRegistersUsed
+}
+
+func (a *ABIParamResultInfo) InParam(i int) *ABIParamAssignment {
+ return &a.inparams[i]
+}
+
+func (a *ABIParamResultInfo) OutParam(i int) *ABIParamAssignment {
+ return &a.outparams[i]
+}
+
+func (a *ABIParamResultInfo) SpillAreaOffset() int64 {
+ return a.offsetToSpillArea
+}
+
+func (a *ABIParamResultInfo) SpillAreaSize() int64 {
+ return a.spillAreaSize
+}
+
+// ArgWidth returns the amount of stack needed for all the inputs
+// and outputs of a function or method, including ABI-defined parameter
+// slots and ABI-defined spill slots for register-resident parameters.
+// The name is inherited from (*Type).ArgWidth(), which it replaces.
+func (a *ABIParamResultInfo) ArgWidth() int64 {
+ return a.spillAreaSize + a.offsetToSpillArea - a.config.LocalsOffset()
+}
+
+// RegIndex stores the index into the set of machine registers used by
+// the ABI on a specific architecture for parameter passing. RegIndex
+// values 0 through N-1 (where N is the number of integer registers
+// used for param passing according to the ABI rules) describe integer
+// registers; values N through M (where M is the number of floating
+// point registers used). Thus if the ABI says there are 5 integer
+// registers and 7 floating point registers, then RegIndex value of 4
+// indicates the 5th integer register, and a RegIndex value of 11
+// indicates the 7th floating point register.
+type RegIndex uint8
+
+// ABIParamAssignment holds information about how a specific param or
+// result will be passed: in registers (in which case 'Registers' is
+// populated) or on the stack (in which case 'Offset' is set to a
+// non-negative stack offset). The values in 'Registers' are indices
+// (as described above), not architected registers.
+type ABIParamAssignment struct {
+ Type *types.Type
+ Name *ir.Name
+ Registers []RegIndex
+ offset int32
+}
+
+// Offset returns the stack offset for addressing the parameter that "a" describes.
+// This will panic if "a" describes a register-allocated parameter.
+func (a *ABIParamAssignment) Offset() int32 {
+ if len(a.Registers) > 0 {
+ base.Fatalf("register allocated parameters have no offset")
+ }
+ return a.offset
+}
+
+// RegisterTypes returns a slice of the types of the registers
+// corresponding to a slice of parameters. The returned slice
+// has capacity for one more, likely a memory type.
+func RegisterTypes(apa []ABIParamAssignment) []*types.Type {
+ rcount := 0
+ for _, pa := range apa {
+ rcount += len(pa.Registers)
+ }
+ if rcount == 0 {
+ // Note that this catches top-level struct{} and [0]Foo, which are stack allocated.
+ return make([]*types.Type, 0, 1)
+ }
+ rts := make([]*types.Type, 0, rcount+1)
+ for _, pa := range apa {
+ if len(pa.Registers) == 0 {
+ continue
+ }
+ rts = appendParamTypes(rts, pa.Type)
+ }
+ return rts
+}
+
+func (pa *ABIParamAssignment) RegisterTypesAndOffsets() ([]*types.Type, []int64) {
+ l := len(pa.Registers)
+ if l == 0 {
+ return nil, nil
+ }
+ typs := make([]*types.Type, 0, l)
+ offs := make([]int64, 0, l)
+ offs, _ = appendParamOffsets(offs, 0, pa.Type)
+ return appendParamTypes(typs, pa.Type), offs
+}
+
+func appendParamTypes(rts []*types.Type, t *types.Type) []*types.Type {
+ w := t.Size()
+ if w == 0 {
+ return rts
+ }
+ if t.IsScalar() || t.IsPtrShaped() {
+ if t.IsComplex() {
+ c := types.FloatForComplex(t)
+ return append(rts, c, c)
+ } else {
+ if int(t.Size()) <= types.RegSize {
+ return append(rts, t)
+ }
+ // assume 64bit int on 32-bit machine
+ // TODO endianness? Should high-order (sign bits) word come first?
+ if t.IsSigned() {
+ rts = append(rts, types.Types[types.TINT32])
+ } else {
+ rts = append(rts, types.Types[types.TUINT32])
+ }
+ return append(rts, types.Types[types.TUINT32])
+ }
+ } else {
+ typ := t.Kind()
+ switch typ {
+ case types.TARRAY:
+ for i := int64(0); i < t.NumElem(); i++ { // 0 gets no registers, plus future-proofing.
+ rts = appendParamTypes(rts, t.Elem())
+ }
+ case types.TSTRUCT:
+ for _, f := range t.Fields() {
+ if f.Type.Size() > 0 { // embedded zero-width types receive no registers
+ rts = appendParamTypes(rts, f.Type)
+ }
+ }
+ case types.TSLICE:
+ return appendParamTypes(rts, synthSlice)
+ case types.TSTRING:
+ return appendParamTypes(rts, synthString)
+ case types.TINTER:
+ return appendParamTypes(rts, synthIface)
+ }
+ }
+ return rts
+}
+
+// appendParamOffsets appends the offset(s) of type t, starting from "at",
+// to input offsets, and returns the longer slice and the next unused offset.
+func appendParamOffsets(offsets []int64, at int64, t *types.Type) ([]int64, int64) {
+ at = align(at, t)
+ w := t.Size()
+ if w == 0 {
+ return offsets, at
+ }
+ if t.IsScalar() || t.IsPtrShaped() {
+ if t.IsComplex() || int(t.Size()) > types.RegSize { // complex and *int64 on 32-bit
+ s := w / 2
+ return append(offsets, at, at+s), at + w
+ } else {
+ return append(offsets, at), at + w
+ }
+ } else {
+ typ := t.Kind()
+ switch typ {
+ case types.TARRAY:
+ for i := int64(0); i < t.NumElem(); i++ {
+ offsets, at = appendParamOffsets(offsets, at, t.Elem())
+ }
+ case types.TSTRUCT:
+ for i, f := range t.Fields() {
+ offsets, at = appendParamOffsets(offsets, at, f.Type)
+ if f.Type.Size() == 0 && i == t.NumFields()-1 {
+ at++ // last field has zero width
+ }
+ }
+ at = align(at, t) // type size is rounded up to its alignment
+ case types.TSLICE:
+ return appendParamOffsets(offsets, at, synthSlice)
+ case types.TSTRING:
+ return appendParamOffsets(offsets, at, synthString)
+ case types.TINTER:
+ return appendParamOffsets(offsets, at, synthIface)
+ }
+ }
+ return offsets, at
+}
+
+// FrameOffset returns the frame-pointer-relative location that a function
+// would spill its input or output parameter to, if such a spill slot exists.
+// If there is none defined (e.g., register-allocated outputs) it panics.
+// For register-allocated inputs that is their spill offset reserved for morestack;
+// for stack-allocated inputs and outputs, that is their location on the stack.
+// (In a future version of the ABI, register-resident inputs may lose their defined
+// spill area to help reduce stack sizes.)
+func (a *ABIParamAssignment) FrameOffset(i *ABIParamResultInfo) int64 {
+ if a.offset == -1 {
+ base.Fatalf("function parameter has no ABI-defined frame-pointer offset")
+ }
+ if len(a.Registers) == 0 { // passed on stack
+ return int64(a.offset) - i.config.LocalsOffset()
+ }
+ // spill area for registers
+ return int64(a.offset) + i.SpillAreaOffset() - i.config.LocalsOffset()
+}
+
+// RegAmounts holds a specified number of integer/float registers.
+type RegAmounts struct {
+ intRegs int
+ floatRegs int
+}
+
+// ABIConfig captures the number of registers made available
+// by the ABI rules for parameter passing and result returning.
+type ABIConfig struct {
+ // Do we need anything more than this?
+ offsetForLocals int64 // e.g., obj.(*Link).Arch.FixedFrameSize -- extra linkage information on some architectures.
+ regAmounts RegAmounts
+ which obj.ABI
+}
+
+// NewABIConfig returns a new ABI configuration for an architecture with
+// iRegsCount integer/pointer registers and fRegsCount floating point registers.
+func NewABIConfig(iRegsCount, fRegsCount int, offsetForLocals int64, which uint8) *ABIConfig {
+ return &ABIConfig{offsetForLocals: offsetForLocals, regAmounts: RegAmounts{iRegsCount, fRegsCount}, which: obj.ABI(which)}
+}
+
+// Copy returns config.
+//
+// TODO(mdempsky): Remove.
+func (config *ABIConfig) Copy() *ABIConfig {
+ return config
+}
+
+// Which returns the ABI number
+func (config *ABIConfig) Which() obj.ABI {
+ return config.which
+}
+
+// LocalsOffset returns the architecture-dependent offset from SP for args and results.
+// In theory this is only used for debugging; it ought to already be incorporated into
+// results from the ABI-related methods
+func (config *ABIConfig) LocalsOffset() int64 {
+ return config.offsetForLocals
+}
+
+// FloatIndexFor translates r into an index in the floating point parameter
+// registers. If the result is negative, the input index was actually for the
+// integer parameter registers.
+func (config *ABIConfig) FloatIndexFor(r RegIndex) int64 {
+ return int64(r) - int64(config.regAmounts.intRegs)
+}
+
+// NumParamRegs returns the total number of registers used to
+// represent a parameter of the given type, which must be register
+// assignable.
+func (config *ABIConfig) NumParamRegs(typ *types.Type) int {
+ intRegs, floatRegs := typ.Registers()
+ if intRegs == math.MaxUint8 && floatRegs == math.MaxUint8 {
+ base.Fatalf("cannot represent parameters of type %v in registers", typ)
+ }
+ return int(intRegs) + int(floatRegs)
+}
+
+// ABIAnalyzeTypes takes slices of parameter and result types, and returns an ABIParamResultInfo,
+// based on the given configuration. This is the same result computed by config.ABIAnalyze applied to the
+// corresponding method/function type, except that all the embedded parameter names are nil.
+// This is intended for use by ssagen/ssa.go:(*state).rtcall, for runtime functions that lack a parsed function type.
+func (config *ABIConfig) ABIAnalyzeTypes(params, results []*types.Type) *ABIParamResultInfo {
+ setup()
+ s := assignState{
+ stackOffset: config.offsetForLocals,
+ rTotal: config.regAmounts,
+ }
+
+ assignParams := func(params []*types.Type, isResult bool) []ABIParamAssignment {
+ res := make([]ABIParamAssignment, len(params))
+ for i, param := range params {
+ res[i] = s.assignParam(param, nil, isResult)
+ }
+ return res
+ }
+
+ info := &ABIParamResultInfo{config: config}
+
+ // Inputs
+ info.inparams = assignParams(params, false)
+ s.stackOffset = types.RoundUp(s.stackOffset, int64(types.RegSize))
+ info.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
+
+ // Outputs
+ s.rUsed = RegAmounts{}
+ info.outparams = assignParams(results, true)
+ // The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
+ // TODO in theory could align offset only to minimum required by spilled data types.
+ info.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
+ info.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
+ info.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
+
+ return info
+}
+
+// ABIAnalyzeFuncType takes a function type 'ft' and an ABI rules description
+// 'config' and analyzes the function to determine how its parameters
+// and results will be passed (in registers or on the stack), returning
+// an ABIParamResultInfo object that holds the results of the analysis.
+func (config *ABIConfig) ABIAnalyzeFuncType(ft *types.Type) *ABIParamResultInfo {
+ setup()
+ s := assignState{
+ stackOffset: config.offsetForLocals,
+ rTotal: config.regAmounts,
+ }
+
+ assignParams := func(params []*types.Field, isResult bool) []ABIParamAssignment {
+ res := make([]ABIParamAssignment, len(params))
+ for i, param := range params {
+ var name *ir.Name
+ if param.Nname != nil {
+ name = param.Nname.(*ir.Name)
+ }
+ res[i] = s.assignParam(param.Type, name, isResult)
+ }
+ return res
+ }
+
+ info := &ABIParamResultInfo{config: config}
+
+ // Inputs
+ info.inparams = assignParams(ft.RecvParams(), false)
+ s.stackOffset = types.RoundUp(s.stackOffset, int64(types.RegSize))
+ info.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
+
+ // Outputs
+ s.rUsed = RegAmounts{}
+ info.outparams = assignParams(ft.Results(), true)
+ // The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
+ // TODO in theory could align offset only to minimum required by spilled data types.
+ info.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
+ info.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
+ info.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
+ return info
+}
+
+// ABIAnalyze returns the same result as ABIAnalyzeFuncType, but also
+// updates the offsets of all the receiver, input, and output fields.
+// If setNname is true, it also sets the FrameOffset of the Nname for
+// the field(s); this is for use when compiling a function and figuring out
+// spill locations. Doing this for callers can cause races for register
+// outputs because their frame location transitions from BOGUS_FUNARG_OFFSET
+// to zero to an as-if-AUTO offset that has no use for callers.
+func (config *ABIConfig) ABIAnalyze(t *types.Type, setNname bool) *ABIParamResultInfo {
+ result := config.ABIAnalyzeFuncType(t)
+
+ // Fill in the frame offsets for receiver, inputs, results
+ for i, f := range t.RecvParams() {
+ config.updateOffset(result, f, result.inparams[i], false, setNname)
+ }
+ for i, f := range t.Results() {
+ config.updateOffset(result, f, result.outparams[i], true, setNname)
+ }
+ return result
+}
+
+func (config *ABIConfig) updateOffset(result *ABIParamResultInfo, f *types.Field, a ABIParamAssignment, isResult, setNname bool) {
+ if f.Offset != types.BADWIDTH {
+ base.Fatalf("field offset for %s at %s has been set to %d", f.Sym, base.FmtPos(f.Pos), f.Offset)
+ }
+
+ // Everything except return values in registers has either a frame home (if not in a register) or a frame spill location.
+ if !isResult || len(a.Registers) == 0 {
+ // The type frame offset DOES NOT show effects of minimum frame size.
+ // Getting this wrong breaks stackmaps, see liveness/plive.go:WriteFuncMap and typebits/typebits.go:Set
+ off := a.FrameOffset(result)
+ if setNname && f.Nname != nil {
+ f.Nname.(*ir.Name).SetFrameOffset(off)
+ f.Nname.(*ir.Name).SetIsOutputParamInRegisters(false)
+ }
+ } else {
+ if setNname && f.Nname != nil {
+ fname := f.Nname.(*ir.Name)
+ fname.SetIsOutputParamInRegisters(true)
+ fname.SetFrameOffset(0)
+ }
+ }
+}
+
+//......................................................................
+//
+// Non-public portions.
+
+// regString produces a human-readable version of a RegIndex.
+func (c *RegAmounts) regString(r RegIndex) string {
+ if int(r) < c.intRegs {
+ return fmt.Sprintf("I%d", int(r))
+ } else if int(r) < c.intRegs+c.floatRegs {
+ return fmt.Sprintf("F%d", int(r)-c.intRegs)
+ }
+ return fmt.Sprintf("<?>%d", r)
+}
+
+// ToString method renders an ABIParamAssignment in human-readable
+// form, suitable for debugging or unit testing.
+func (ri *ABIParamAssignment) ToString(config *ABIConfig, extra bool) string {
+ regs := "R{"
+ offname := "spilloffset" // offset is for spill for register(s)
+ if len(ri.Registers) == 0 {
+ offname = "offset" // offset is for memory arg
+ }
+ for _, r := range ri.Registers {
+ regs += " " + config.regAmounts.regString(r)
+ if extra {
+ regs += fmt.Sprintf("(%d)", r)
+ }
+ }
+ if extra {
+ regs += fmt.Sprintf(" | #I=%d, #F=%d", config.regAmounts.intRegs, config.regAmounts.floatRegs)
+ }
+ return fmt.Sprintf("%s } %s: %d typ: %v", regs, offname, ri.offset, ri.Type)
+}
+
+// String method renders an ABIParamResultInfo in human-readable
+// form, suitable for debugging or unit testing.
+func (ri *ABIParamResultInfo) String() string {
+ res := ""
+ for k, p := range ri.inparams {
+ res += fmt.Sprintf("IN %d: %s\n", k, p.ToString(ri.config, false))
+ }
+ for k, r := range ri.outparams {
+ res += fmt.Sprintf("OUT %d: %s\n", k, r.ToString(ri.config, false))
+ }
+ res += fmt.Sprintf("offsetToSpillArea: %d spillAreaSize: %d",
+ ri.offsetToSpillArea, ri.spillAreaSize)
+ return res
+}
+
+// assignState holds intermediate state during the register assigning process
+// for a given function signature.
+type assignState struct {
+ rTotal RegAmounts // total reg amounts from ABI rules
+ rUsed RegAmounts // regs used by params completely assigned so far
+ stackOffset int64 // current stack offset
+ spillOffset int64 // current spill offset
+}
+
+// align returns a rounded up to t's alignment.
+func align(a int64, t *types.Type) int64 {
+ return alignTo(a, int(uint8(t.Alignment())))
+}
+
+// alignTo returns a rounded up to t, where t must be 0 or a power of 2.
+func alignTo(a int64, t int) int64 {
+ if t == 0 {
+ return a
+ }
+ return types.RoundUp(a, int64(t))
+}
+
+// nextSlot allocates the next available slot for typ.
+func nextSlot(offsetp *int64, typ *types.Type) int64 {
+ offset := align(*offsetp, typ)
+ *offsetp = offset + typ.Size()
+ return offset
+}
+
+// allocateRegs returns an ordered list of register indices for a parameter or result
+// that we've just determined to be register-assignable. The number of registers
+// needed is assumed to be stored in state.pUsed.
+func (state *assignState) allocateRegs(regs []RegIndex, t *types.Type) []RegIndex {
+ if t.Size() == 0 {
+ return regs
+ }
+ ri := state.rUsed.intRegs
+ rf := state.rUsed.floatRegs
+ if t.IsScalar() || t.IsPtrShaped() {
+ if t.IsComplex() {
+ regs = append(regs, RegIndex(rf+state.rTotal.intRegs), RegIndex(rf+1+state.rTotal.intRegs))
+ rf += 2
+ } else if t.IsFloat() {
+ regs = append(regs, RegIndex(rf+state.rTotal.intRegs))
+ rf += 1
+ } else {
+ n := (int(t.Size()) + types.RegSize - 1) / types.RegSize
+ for i := 0; i < n; i++ { // looking ahead to really big integers
+ regs = append(regs, RegIndex(ri))
+ ri += 1
+ }
+ }
+ state.rUsed.intRegs = ri
+ state.rUsed.floatRegs = rf
+ return regs
+ } else {
+ typ := t.Kind()
+ switch typ {
+ case types.TARRAY:
+ for i := int64(0); i < t.NumElem(); i++ {
+ regs = state.allocateRegs(regs, t.Elem())
+ }
+ return regs
+ case types.TSTRUCT:
+ for _, f := range t.Fields() {
+ regs = state.allocateRegs(regs, f.Type)
+ }
+ return regs
+ case types.TSLICE:
+ return state.allocateRegs(regs, synthSlice)
+ case types.TSTRING:
+ return state.allocateRegs(regs, synthString)
+ case types.TINTER:
+ return state.allocateRegs(regs, synthIface)
+ }
+ }
+ base.Fatalf("was not expecting type %s", t)
+ panic("unreachable")
+}
+
+// synthOnce ensures that we only create the synth* fake types once.
+var synthOnce sync.Once
+
+// synthSlice, synthString, and syncIface are synthesized struct types
+// meant to capture the underlying implementations of string/slice/interface.
+var synthSlice *types.Type
+var synthString *types.Type
+var synthIface *types.Type
+
+// setup performs setup for the register assignment utilities, manufacturing
+// a small set of synthesized types that we'll need along the way.
+func setup() {
+ synthOnce.Do(func() {
+ fname := types.BuiltinPkg.Lookup
+ nxp := src.NoXPos
+ bp := types.NewPtr(types.Types[types.TUINT8])
+ it := types.Types[types.TINT]
+ synthSlice = types.NewStruct([]*types.Field{
+ types.NewField(nxp, fname("ptr"), bp),
+ types.NewField(nxp, fname("len"), it),
+ types.NewField(nxp, fname("cap"), it),
+ })
+ types.CalcStructSize(synthSlice)
+ synthString = types.NewStruct([]*types.Field{
+ types.NewField(nxp, fname("data"), bp),
+ types.NewField(nxp, fname("len"), it),
+ })
+ types.CalcStructSize(synthString)
+ unsp := types.Types[types.TUNSAFEPTR]
+ synthIface = types.NewStruct([]*types.Field{
+ types.NewField(nxp, fname("f1"), unsp),
+ types.NewField(nxp, fname("f2"), unsp),
+ })
+ types.CalcStructSize(synthIface)
+ })
+}
+
+// assignParam processes a given receiver, param, or result
+// of field f to determine whether it can be register assigned.
+// The result of the analysis is recorded in the result
+// ABIParamResultInfo held in 'state'.
+func (state *assignState) assignParam(typ *types.Type, name *ir.Name, isResult bool) ABIParamAssignment {
+ registers := state.tryAllocRegs(typ)
+
+ var offset int64 = -1
+ if registers == nil { // stack allocated; needs stack slot
+ offset = nextSlot(&state.stackOffset, typ)
+ } else if !isResult { // register-allocated param; needs spill slot
+ offset = nextSlot(&state.spillOffset, typ)
+ }
+
+ return ABIParamAssignment{
+ Type: typ,
+ Name: name,
+ Registers: registers,
+ offset: int32(offset),
+ }
+}
+
+// tryAllocRegs attempts to allocate registers to represent a
+// parameter of the given type. If unsuccessful, it returns nil.
+func (state *assignState) tryAllocRegs(typ *types.Type) []RegIndex {
+ if typ.Size() == 0 {
+ return nil // zero-size parameters are defined as being stack allocated
+ }
+
+ intRegs, floatRegs := typ.Registers()
+ if int(intRegs) > state.rTotal.intRegs-state.rUsed.intRegs || int(floatRegs) > state.rTotal.floatRegs-state.rUsed.floatRegs {
+ return nil // too few available registers
+ }
+
+ regs := make([]RegIndex, 0, int(intRegs)+int(floatRegs))
+ return state.allocateRegs(regs, typ)
+}
+
+// ComputePadding returns a list of "post element" padding values in
+// the case where we have a structure being passed in registers. Given
+// a param assignment corresponding to a struct, it returns a list
+// containing padding values for each field, e.g. the Kth element in
+// the list is the amount of padding between field K and the following
+// field. For things that are not structs (or structs without padding)
+// it returns a list of zeros. Example:
+//
+// type small struct {
+// x uint16
+// y uint8
+// z int32
+// w int32
+// }
+//
+// For this struct we would return a list [0, 1, 0, 0], meaning that
+// we have one byte of padding after the second field, and no bytes of
+// padding after any of the other fields. Input parameter "storage" is
+// a slice with enough capacity to accommodate padding elements for
+// the architected register set in question.
+func (pa *ABIParamAssignment) ComputePadding(storage []uint64) []uint64 {
+ nr := len(pa.Registers)
+ padding := storage[:nr]
+ for i := 0; i < nr; i++ {
+ padding[i] = 0
+ }
+ if pa.Type.Kind() != types.TSTRUCT || nr == 0 {
+ return padding
+ }
+ types := make([]*types.Type, 0, nr)
+ types = appendParamTypes(types, pa.Type)
+ if len(types) != nr {
+ panic("internal error")
+ }
+ off := int64(0)
+ for idx, t := range types {
+ ts := t.Size()
+ off += int64(ts)
+ if idx < len(types)-1 {
+ noff := align(off, types[idx+1])
+ if noff != off {
+ padding[idx] = uint64(noff - off)
+ }
+ }
+ }
+ return padding
+}
diff --git a/src/cmd/compile/internal/abt/avlint32.go b/src/cmd/compile/internal/abt/avlint32.go
new file mode 100644
index 0000000..28c1642
--- /dev/null
+++ b/src/cmd/compile/internal/abt/avlint32.go
@@ -0,0 +1,832 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abt
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+const (
+ LEAF_HEIGHT = 1
+ ZERO_HEIGHT = 0
+ NOT_KEY32 = int32(-0x80000000)
+)
+
+// T is the exported applicative balanced tree data type.
+// A T can be used as a value; updates to one copy of the value
+// do not change other copies.
+type T struct {
+ root *node32
+ size int
+}
+
+// node32 is the internal tree node data type
+type node32 struct {
+ // Standard conventions hold for left = smaller, right = larger
+ left, right *node32
+ data interface{}
+ key int32
+ height_ int8
+}
+
+func makeNode(key int32) *node32 {
+ return &node32{key: key, height_: LEAF_HEIGHT}
+}
+
+// IsEmpty returns true iff t is empty.
+func (t *T) IsEmpty() bool {
+ return t.root == nil
+}
+
+// IsSingle returns true iff t is a singleton (leaf).
+func (t *T) IsSingle() bool {
+ return t.root != nil && t.root.isLeaf()
+}
+
+// VisitInOrder applies f to the key and data pairs in t,
+// with keys ordered from smallest to largest.
+func (t *T) VisitInOrder(f func(int32, interface{})) {
+ if t.root == nil {
+ return
+ }
+ t.root.visitInOrder(f)
+}
+
+func (n *node32) nilOrData() interface{} {
+ if n == nil {
+ return nil
+ }
+ return n.data
+}
+
+func (n *node32) nilOrKeyAndData() (k int32, d interface{}) {
+ if n == nil {
+ k = NOT_KEY32
+ d = nil
+ } else {
+ k = n.key
+ d = n.data
+ }
+ return
+}
+
+func (n *node32) height() int8 {
+ if n == nil {
+ return 0
+ }
+ return n.height_
+}
+
+// Find returns the data associated with x in the tree, or
+// nil if x is not in the tree.
+func (t *T) Find(x int32) interface{} {
+ return t.root.find(x).nilOrData()
+}
+
+// Insert either adds x to the tree if x was not previously
+// a key in the tree, or updates the data for x in the tree if
+// x was already a key in the tree. The previous data associated
+// with x is returned, and is nil if x was not previously a
+// key in the tree.
+func (t *T) Insert(x int32, data interface{}) interface{} {
+ if x == NOT_KEY32 {
+ panic("Cannot use sentinel value -0x80000000 as key")
+ }
+ n := t.root
+ var newroot *node32
+ var o *node32
+ if n == nil {
+ n = makeNode(x)
+ newroot = n
+ } else {
+ newroot, n, o = n.aInsert(x)
+ }
+ var r interface{}
+ if o != nil {
+ r = o.data
+ } else {
+ t.size++
+ }
+ n.data = data
+ t.root = newroot
+ return r
+}
+
+func (t *T) Copy() *T {
+ u := *t
+ return &u
+}
+
+func (t *T) Delete(x int32) interface{} {
+ n := t.root
+ if n == nil {
+ return nil
+ }
+ d, s := n.aDelete(x)
+ if d == nil {
+ return nil
+ }
+ t.root = s
+ t.size--
+ return d.data
+}
+
+func (t *T) DeleteMin() (int32, interface{}) {
+ n := t.root
+ if n == nil {
+ return NOT_KEY32, nil
+ }
+ d, s := n.aDeleteMin()
+ if d == nil {
+ return NOT_KEY32, nil
+ }
+ t.root = s
+ t.size--
+ return d.key, d.data
+}
+
+func (t *T) DeleteMax() (int32, interface{}) {
+ n := t.root
+ if n == nil {
+ return NOT_KEY32, nil
+ }
+ d, s := n.aDeleteMax()
+ if d == nil {
+ return NOT_KEY32, nil
+ }
+ t.root = s
+ t.size--
+ return d.key, d.data
+}
+
+func (t *T) Size() int {
+ return t.size
+}
+
+// Intersection returns the intersection of t and u, where the result
+// data for any common keys is given by f(t's data, u's data) -- f need
+// not be symmetric. If f returns nil, then the key and data are not
+// added to the result. If f itself is nil, then whatever value was
+// already present in the smaller set is used.
+func (t *T) Intersection(u *T, f func(x, y interface{}) interface{}) *T {
+ if t.Size() == 0 || u.Size() == 0 {
+ return &T{}
+ }
+
+ // For faster execution and less allocation, prefer t smaller, iterate over t.
+ if t.Size() <= u.Size() {
+ v := t.Copy()
+ for it := t.Iterator(); !it.Done(); {
+ k, d := it.Next()
+ e := u.Find(k)
+ if e == nil {
+ v.Delete(k)
+ continue
+ }
+ if f == nil {
+ continue
+ }
+ if c := f(d, e); c != d {
+ if c == nil {
+ v.Delete(k)
+ } else {
+ v.Insert(k, c)
+ }
+ }
+ }
+ return v
+ }
+ v := u.Copy()
+ for it := u.Iterator(); !it.Done(); {
+ k, e := it.Next()
+ d := t.Find(k)
+ if d == nil {
+ v.Delete(k)
+ continue
+ }
+ if f == nil {
+ continue
+ }
+ if c := f(d, e); c != d {
+ if c == nil {
+ v.Delete(k)
+ } else {
+ v.Insert(k, c)
+ }
+ }
+ }
+
+ return v
+}
+
+// Union returns the union of t and u, where the result data for any common keys
+// is given by f(t's data, u's data) -- f need not be symmetric. If f returns nil,
+// then the key and data are not added to the result. If f itself is nil, then
+// whatever value was already present in the larger set is used.
+func (t *T) Union(u *T, f func(x, y interface{}) interface{}) *T {
+ if t.Size() == 0 {
+ return u
+ }
+ if u.Size() == 0 {
+ return t
+ }
+
+ if t.Size() >= u.Size() {
+ v := t.Copy()
+ for it := u.Iterator(); !it.Done(); {
+ k, e := it.Next()
+ d := t.Find(k)
+ if d == nil {
+ v.Insert(k, e)
+ continue
+ }
+ if f == nil {
+ continue
+ }
+ if c := f(d, e); c != d {
+ if c == nil {
+ v.Delete(k)
+ } else {
+ v.Insert(k, c)
+ }
+ }
+ }
+ return v
+ }
+
+ v := u.Copy()
+ for it := t.Iterator(); !it.Done(); {
+ k, d := it.Next()
+ e := u.Find(k)
+ if e == nil {
+ v.Insert(k, d)
+ continue
+ }
+ if f == nil {
+ continue
+ }
+ if c := f(d, e); c != d {
+ if c == nil {
+ v.Delete(k)
+ } else {
+ v.Insert(k, c)
+ }
+ }
+ }
+ return v
+}
+
+// Difference returns the difference of t and u, subject to the result
+// of f applied to data corresponding to equal keys. If f returns nil
+// (or if f is nil) then the key+data are excluded, as usual. If f
+// returns not-nil, then that key+data pair is inserted. instead.
+func (t *T) Difference(u *T, f func(x, y interface{}) interface{}) *T {
+ if t.Size() == 0 {
+ return &T{}
+ }
+ if u.Size() == 0 {
+ return t
+ }
+ v := t.Copy()
+ for it := t.Iterator(); !it.Done(); {
+ k, d := it.Next()
+ e := u.Find(k)
+ if e != nil {
+ if f == nil {
+ v.Delete(k)
+ continue
+ }
+ c := f(d, e)
+ if c == nil {
+ v.Delete(k)
+ continue
+ }
+ if c != d {
+ v.Insert(k, c)
+ }
+ }
+ }
+ return v
+}
+
+func (t *T) Iterator() Iterator {
+ return Iterator{it: t.root.iterator()}
+}
+
+func (t *T) Equals(u *T) bool {
+ if t == u {
+ return true
+ }
+ if t.Size() != u.Size() {
+ return false
+ }
+ return t.root.equals(u.root)
+}
+
+func (t *T) String() string {
+ var b strings.Builder
+ first := true
+ for it := t.Iterator(); !it.Done(); {
+ k, v := it.Next()
+ if first {
+ first = false
+ } else {
+ b.WriteString("; ")
+ }
+ b.WriteString(strconv.FormatInt(int64(k), 10))
+ b.WriteString(":")
+ fmt.Fprint(&b, v)
+ }
+ return b.String()
+}
+
+func (t *node32) equals(u *node32) bool {
+ if t == u {
+ return true
+ }
+ it, iu := t.iterator(), u.iterator()
+ for !it.done() && !iu.done() {
+ nt := it.next()
+ nu := iu.next()
+ if nt == nu {
+ continue
+ }
+ if nt.key != nu.key {
+ return false
+ }
+ if nt.data != nu.data {
+ return false
+ }
+ }
+ return it.done() == iu.done()
+}
+
+func (t *T) Equiv(u *T, eqv func(x, y interface{}) bool) bool {
+ if t == u {
+ return true
+ }
+ if t.Size() != u.Size() {
+ return false
+ }
+ return t.root.equiv(u.root, eqv)
+}
+
+func (t *node32) equiv(u *node32, eqv func(x, y interface{}) bool) bool {
+ if t == u {
+ return true
+ }
+ it, iu := t.iterator(), u.iterator()
+ for !it.done() && !iu.done() {
+ nt := it.next()
+ nu := iu.next()
+ if nt == nu {
+ continue
+ }
+ if nt.key != nu.key {
+ return false
+ }
+ if !eqv(nt.data, nu.data) {
+ return false
+ }
+ }
+ return it.done() == iu.done()
+}
+
+type iterator struct {
+ parents []*node32
+}
+
+type Iterator struct {
+ it iterator
+}
+
+func (it *Iterator) Next() (int32, interface{}) {
+ x := it.it.next()
+ if x == nil {
+ return NOT_KEY32, nil
+ }
+ return x.key, x.data
+}
+
+func (it *Iterator) Done() bool {
+ return len(it.it.parents) == 0
+}
+
+func (t *node32) iterator() iterator {
+ if t == nil {
+ return iterator{}
+ }
+ it := iterator{parents: make([]*node32, 0, int(t.height()))}
+ it.leftmost(t)
+ return it
+}
+
+func (it *iterator) leftmost(t *node32) {
+ for t != nil {
+ it.parents = append(it.parents, t)
+ t = t.left
+ }
+}
+
+func (it *iterator) done() bool {
+ return len(it.parents) == 0
+}
+
+func (it *iterator) next() *node32 {
+ l := len(it.parents)
+ if l == 0 {
+ return nil
+ }
+ x := it.parents[l-1] // return value
+ if x.right != nil {
+ it.leftmost(x.right)
+ return x
+ }
+ // discard visited top of parents
+ l--
+ it.parents = it.parents[:l]
+ y := x // y is known visited/returned
+ for l > 0 && y == it.parents[l-1].right {
+ y = it.parents[l-1]
+ l--
+ it.parents = it.parents[:l]
+ }
+
+ return x
+}
+
+// Min returns the minimum element of t.
+// If t is empty, then (NOT_KEY32, nil) is returned.
+func (t *T) Min() (k int32, d interface{}) {
+ return t.root.min().nilOrKeyAndData()
+}
+
+// Max returns the maximum element of t.
+// If t is empty, then (NOT_KEY32, nil) is returned.
+func (t *T) Max() (k int32, d interface{}) {
+ return t.root.max().nilOrKeyAndData()
+}
+
+// Glb returns the greatest-lower-bound-exclusive of x and the associated
+// data. If x has no glb in the tree, then (NOT_KEY32, nil) is returned.
+func (t *T) Glb(x int32) (k int32, d interface{}) {
+ return t.root.glb(x, false).nilOrKeyAndData()
+}
+
+// GlbEq returns the greatest-lower-bound-inclusive of x and the associated
+// data. If x has no glbEQ in the tree, then (NOT_KEY32, nil) is returned.
+func (t *T) GlbEq(x int32) (k int32, d interface{}) {
+ return t.root.glb(x, true).nilOrKeyAndData()
+}
+
+// Lub returns the least-upper-bound-exclusive of x and the associated
+// data. If x has no lub in the tree, then (NOT_KEY32, nil) is returned.
+func (t *T) Lub(x int32) (k int32, d interface{}) {
+ return t.root.lub(x, false).nilOrKeyAndData()
+}
+
+// LubEq returns the least-upper-bound-inclusive of x and the associated
+// data. If x has no lubEq in the tree, then (NOT_KEY32, nil) is returned.
+func (t *T) LubEq(x int32) (k int32, d interface{}) {
+ return t.root.lub(x, true).nilOrKeyAndData()
+}
+
+func (t *node32) isLeaf() bool {
+ return t.left == nil && t.right == nil && t.height_ == LEAF_HEIGHT
+}
+
+func (t *node32) visitInOrder(f func(int32, interface{})) {
+ if t.left != nil {
+ t.left.visitInOrder(f)
+ }
+ f(t.key, t.data)
+ if t.right != nil {
+ t.right.visitInOrder(f)
+ }
+}
+
+func (t *node32) find(key int32) *node32 {
+ for t != nil {
+ if key < t.key {
+ t = t.left
+ } else if key > t.key {
+ t = t.right
+ } else {
+ return t
+ }
+ }
+ return nil
+}
+
+func (t *node32) min() *node32 {
+ if t == nil {
+ return t
+ }
+ for t.left != nil {
+ t = t.left
+ }
+ return t
+}
+
+func (t *node32) max() *node32 {
+ if t == nil {
+ return t
+ }
+ for t.right != nil {
+ t = t.right
+ }
+ return t
+}
+
+func (t *node32) glb(key int32, allow_eq bool) *node32 {
+ var best *node32 = nil
+ for t != nil {
+ if key <= t.key {
+ if allow_eq && key == t.key {
+ return t
+ }
+ // t is too big, glb is to left.
+ t = t.left
+ } else {
+ // t is a lower bound, record it and seek a better one.
+ best = t
+ t = t.right
+ }
+ }
+ return best
+}
+
+func (t *node32) lub(key int32, allow_eq bool) *node32 {
+ var best *node32 = nil
+ for t != nil {
+ if key >= t.key {
+ if allow_eq && key == t.key {
+ return t
+ }
+ // t is too small, lub is to right.
+ t = t.right
+ } else {
+ // t is an upper bound, record it and seek a better one.
+ best = t
+ t = t.left
+ }
+ }
+ return best
+}
+
+func (t *node32) aInsert(x int32) (newroot, newnode, oldnode *node32) {
+ // oldnode default of nil is good, others should be assigned.
+ if x == t.key {
+ oldnode = t
+ newt := *t
+ newnode = &newt
+ newroot = newnode
+ return
+ }
+ if x < t.key {
+ if t.left == nil {
+ t = t.copy()
+ n := makeNode(x)
+ t.left = n
+ newnode = n
+ newroot = t
+ t.height_ = 2 // was balanced w/ 0, sibling is height 0 or 1
+ return
+ }
+ var new_l *node32
+ new_l, newnode, oldnode = t.left.aInsert(x)
+ t = t.copy()
+ t.left = new_l
+ if new_l.height() > 1+t.right.height() {
+ newroot = t.aLeftIsHigh(newnode)
+ } else {
+ t.height_ = 1 + max(t.left.height(), t.right.height())
+ newroot = t
+ }
+ } else { // x > t.key
+ if t.right == nil {
+ t = t.copy()
+ n := makeNode(x)
+ t.right = n
+ newnode = n
+ newroot = t
+ t.height_ = 2 // was balanced w/ 0, sibling is height 0 or 1
+ return
+ }
+ var new_r *node32
+ new_r, newnode, oldnode = t.right.aInsert(x)
+ t = t.copy()
+ t.right = new_r
+ if new_r.height() > 1+t.left.height() {
+ newroot = t.aRightIsHigh(newnode)
+ } else {
+ t.height_ = 1 + max(t.left.height(), t.right.height())
+ newroot = t
+ }
+ }
+ return
+}
+
+func (t *node32) aDelete(key int32) (deleted, newSubTree *node32) {
+ if t == nil {
+ return nil, nil
+ }
+
+ if key < t.key {
+ oh := t.left.height()
+ d, tleft := t.left.aDelete(key)
+ if tleft == t.left {
+ return d, t
+ }
+ return d, t.copy().aRebalanceAfterLeftDeletion(oh, tleft)
+ } else if key > t.key {
+ oh := t.right.height()
+ d, tright := t.right.aDelete(key)
+ if tright == t.right {
+ return d, t
+ }
+ return d, t.copy().aRebalanceAfterRightDeletion(oh, tright)
+ }
+
+ if t.height() == LEAF_HEIGHT {
+ return t, nil
+ }
+
+ // Interior delete by removing left.Max or right.Min,
+ // then swapping contents
+ if t.left.height() > t.right.height() {
+ oh := t.left.height()
+ d, tleft := t.left.aDeleteMax()
+ r := t
+ t = t.copy()
+ t.data, t.key = d.data, d.key
+ return r, t.aRebalanceAfterLeftDeletion(oh, tleft)
+ }
+
+ oh := t.right.height()
+ d, tright := t.right.aDeleteMin()
+ r := t
+ t = t.copy()
+ t.data, t.key = d.data, d.key
+ return r, t.aRebalanceAfterRightDeletion(oh, tright)
+}
+
+func (t *node32) aDeleteMin() (deleted, newSubTree *node32) {
+ if t == nil {
+ return nil, nil
+ }
+ if t.left == nil { // leaf or left-most
+ return t, t.right
+ }
+ oh := t.left.height()
+ d, tleft := t.left.aDeleteMin()
+ if tleft == t.left {
+ return d, t
+ }
+ return d, t.copy().aRebalanceAfterLeftDeletion(oh, tleft)
+}
+
+func (t *node32) aDeleteMax() (deleted, newSubTree *node32) {
+ if t == nil {
+ return nil, nil
+ }
+
+ if t.right == nil { // leaf or right-most
+ return t, t.left
+ }
+
+ oh := t.right.height()
+ d, tright := t.right.aDeleteMax()
+ if tright == t.right {
+ return d, t
+ }
+ return d, t.copy().aRebalanceAfterRightDeletion(oh, tright)
+}
+
+func (t *node32) aRebalanceAfterLeftDeletion(oldLeftHeight int8, tleft *node32) *node32 {
+ t.left = tleft
+
+ if oldLeftHeight == tleft.height() || oldLeftHeight == t.right.height() {
+ // this node is still balanced and its height is unchanged
+ return t
+ }
+
+ if oldLeftHeight > t.right.height() {
+ // left was larger
+ t.height_--
+ return t
+ }
+
+ // left height fell by 1 and it was already less than right height
+ t.right = t.right.copy()
+ return t.aRightIsHigh(nil)
+}
+
+func (t *node32) aRebalanceAfterRightDeletion(oldRightHeight int8, tright *node32) *node32 {
+ t.right = tright
+
+ if oldRightHeight == tright.height() || oldRightHeight == t.left.height() {
+ // this node is still balanced and its height is unchanged
+ return t
+ }
+
+ if oldRightHeight > t.left.height() {
+ // left was larger
+ t.height_--
+ return t
+ }
+
+ // right height fell by 1 and it was already less than left height
+ t.left = t.left.copy()
+ return t.aLeftIsHigh(nil)
+}
+
+// aRightIsHigh does rotations necessary to fix a high right child
+// assume that t and t.right are already fresh copies.
+func (t *node32) aRightIsHigh(newnode *node32) *node32 {
+ right := t.right
+ if right.right.height() < right.left.height() {
+ // double rotation
+ if newnode != right.left {
+ right.left = right.left.copy()
+ }
+ t.right = right.leftToRoot()
+ }
+ t = t.rightToRoot()
+ return t
+}
+
+// aLeftIsHigh does rotations necessary to fix a high left child
+// assume that t and t.left are already fresh copies.
+func (t *node32) aLeftIsHigh(newnode *node32) *node32 {
+ left := t.left
+ if left.left.height() < left.right.height() {
+ // double rotation
+ if newnode != left.right {
+ left.right = left.right.copy()
+ }
+ t.left = left.rightToRoot()
+ }
+ t = t.leftToRoot()
+ return t
+}
+
+// rightToRoot does that rotation, modifying t and t.right in the process.
+func (t *node32) rightToRoot() *node32 {
+ // this
+ // left right
+ // rl rr
+ //
+ // becomes
+ //
+ // right
+ // this rr
+ // left rl
+ //
+ right := t.right
+ rl := right.left
+ right.left = t
+ // parent's child ptr fixed in caller
+ t.right = rl
+ t.height_ = 1 + max(rl.height(), t.left.height())
+ right.height_ = 1 + max(t.height(), right.right.height())
+ return right
+}
+
+// leftToRoot does that rotation, modifying t and t.left in the process.
+func (t *node32) leftToRoot() *node32 {
+ // this
+ // left right
+ // ll lr
+ //
+ // becomes
+ //
+ // left
+ // ll this
+ // lr right
+ //
+ left := t.left
+ lr := left.right
+ left.right = t
+ // parent's child ptr fixed in caller
+ t.left = lr
+ t.height_ = 1 + max(lr.height(), t.right.height())
+ left.height_ = 1 + max(t.height(), left.left.height())
+ return left
+}
+
+func max(a, b int8) int8 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func (t *node32) copy() *node32 {
+ u := *t
+ return &u
+}
diff --git a/src/cmd/compile/internal/abt/avlint32_test.go b/src/cmd/compile/internal/abt/avlint32_test.go
new file mode 100644
index 0000000..7fa9ed4
--- /dev/null
+++ b/src/cmd/compile/internal/abt/avlint32_test.go
@@ -0,0 +1,700 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abt
+
+import (
+ "fmt"
+ "strconv"
+ "testing"
+)
+
+func makeTree(te *testing.T, x []int32, check bool) (t *T, k int, min, max int32) {
+ t = &T{}
+ k = 0
+ min = int32(0x7fffffff)
+ max = int32(-0x80000000)
+ history := []*T{}
+
+ for _, d := range x {
+ d = d + d // double everything for Glb/Lub testing.
+
+ if check {
+ history = append(history, t.Copy())
+ }
+
+ t.Insert(d, stringer(fmt.Sprintf("%v", d)))
+
+ k++
+ if d < min {
+ min = d
+ }
+ if d > max {
+ max = d
+ }
+
+ if !check {
+ continue
+ }
+
+ for j, old := range history {
+ s, i := old.wellFormed()
+ if s != "" {
+ te.Errorf("Old tree consistency problem %v at k=%d, j=%d, old=\n%v, t=\n%v", s, k, j, old.DebugString(), t.DebugString())
+ return
+ }
+ if i != j {
+ te.Errorf("Wrong tree size %v, expected %v for old %v", i, j, old.DebugString())
+ }
+ }
+ s, i := t.wellFormed()
+ if s != "" {
+ te.Errorf("Tree consistency problem at %v", s)
+ return
+ }
+ if i != k {
+ te.Errorf("Wrong tree size %v, expected %v for %v", i, k, t.DebugString())
+ return
+ }
+ if t.Size() != k {
+ te.Errorf("Wrong t.Size() %v, expected %v for %v", t.Size(), k, t.DebugString())
+ return
+ }
+ }
+ return
+}
+
+func applicInsert(te *testing.T, x []int32) {
+ makeTree(te, x, true)
+}
+
+func applicFind(te *testing.T, x []int32) {
+ t, _, _, _ := makeTree(te, x, false)
+
+ for _, d := range x {
+ d = d + d // double everything for Glb/Lub testing.
+ s := fmt.Sprintf("%v", d)
+ f := t.Find(d)
+
+ // data
+ if s != fmt.Sprint(f) {
+ te.Errorf("s(%v) != f(%v)", s, f)
+ }
+ }
+}
+
+func applicBounds(te *testing.T, x []int32) {
+ t, _, min, max := makeTree(te, x, false)
+ for _, d := range x {
+ d = d + d // double everything for Glb/Lub testing.
+ s := fmt.Sprintf("%v", d)
+
+ kg, g := t.Glb(d + 1)
+ kge, ge := t.GlbEq(d)
+ kl, l := t.Lub(d - 1)
+ kle, le := t.LubEq(d)
+
+ // keys
+ if d != kg {
+ te.Errorf("d(%v) != kg(%v)", d, kg)
+ }
+ if d != kl {
+ te.Errorf("d(%v) != kl(%v)", d, kl)
+ }
+ if d != kge {
+ te.Errorf("d(%v) != kge(%v)", d, kge)
+ }
+ if d != kle {
+ te.Errorf("d(%v) != kle(%v)", d, kle)
+ }
+ // data
+ if s != fmt.Sprint(g) {
+ te.Errorf("s(%v) != g(%v)", s, g)
+ }
+ if s != fmt.Sprint(l) {
+ te.Errorf("s(%v) != l(%v)", s, l)
+ }
+ if s != fmt.Sprint(ge) {
+ te.Errorf("s(%v) != ge(%v)", s, ge)
+ }
+ if s != fmt.Sprint(le) {
+ te.Errorf("s(%v) != le(%v)", s, le)
+ }
+ }
+
+ for _, d := range x {
+ d = d + d // double everything for Glb/Lub testing.
+ s := fmt.Sprintf("%v", d)
+ kge, ge := t.GlbEq(d + 1)
+ kle, le := t.LubEq(d - 1)
+ if d != kge {
+ te.Errorf("d(%v) != kge(%v)", d, kge)
+ }
+ if d != kle {
+ te.Errorf("d(%v) != kle(%v)", d, kle)
+ }
+ if s != fmt.Sprint(ge) {
+ te.Errorf("s(%v) != ge(%v)", s, ge)
+ }
+ if s != fmt.Sprint(le) {
+ te.Errorf("s(%v) != le(%v)", s, le)
+ }
+ }
+
+ kg, g := t.Glb(min)
+ kge, ge := t.GlbEq(min - 1)
+ kl, l := t.Lub(max)
+ kle, le := t.LubEq(max + 1)
+ fmin := t.Find(min - 1)
+ fmax := t.Find(max + 1)
+
+ if kg != NOT_KEY32 || kge != NOT_KEY32 || kl != NOT_KEY32 || kle != NOT_KEY32 {
+ te.Errorf("Got non-error-key for missing query")
+ }
+
+ if g != nil || ge != nil || l != nil || le != nil || fmin != nil || fmax != nil {
+ te.Errorf("Got non-error-data for missing query")
+ }
+}
+
+func applicDeleteMin(te *testing.T, x []int32) {
+ t, _, _, _ := makeTree(te, x, false)
+ _, size := t.wellFormed()
+ history := []*T{}
+ for !t.IsEmpty() {
+ k, _ := t.Min()
+ history = append(history, t.Copy())
+ kd, _ := t.DeleteMin()
+ if kd != k {
+ te.Errorf("Deleted minimum key %v not equal to minimum %v", kd, k)
+ }
+ for j, old := range history {
+ s, i := old.wellFormed()
+ if s != "" {
+ te.Errorf("Tree consistency problem %s at old after DeleteMin, old=\n%stree=\n%v", s, old.DebugString(), t.DebugString())
+ return
+ }
+ if i != len(x)-j {
+ te.Errorf("Wrong old tree size %v, expected %v after DeleteMin, old=\n%vtree\n%v", i, len(x)-j, old.DebugString(), t.DebugString())
+ return
+ }
+ }
+ size--
+ s, i := t.wellFormed()
+ if s != "" {
+ te.Errorf("Tree consistency problem at %v after DeleteMin, tree=\n%v", s, t.DebugString())
+ return
+ }
+ if i != size {
+ te.Errorf("Wrong tree size %v, expected %v after DeleteMin", i, size)
+ return
+ }
+ if t.Size() != size {
+ te.Errorf("Wrong t.Size() %v, expected %v for %v", t.Size(), i, t.DebugString())
+ return
+ }
+ }
+}
+
+func applicDeleteMax(te *testing.T, x []int32) {
+ t, _, _, _ := makeTree(te, x, false)
+ _, size := t.wellFormed()
+ history := []*T{}
+
+ for !t.IsEmpty() {
+ k, _ := t.Max()
+ history = append(history, t.Copy())
+ kd, _ := t.DeleteMax()
+ if kd != k {
+ te.Errorf("Deleted maximum key %v not equal to maximum %v", kd, k)
+ }
+
+ for j, old := range history {
+ s, i := old.wellFormed()
+ if s != "" {
+ te.Errorf("Tree consistency problem %s at old after DeleteMin, old=\n%stree=\n%v", s, old.DebugString(), t.DebugString())
+ return
+ }
+ if i != len(x)-j {
+ te.Errorf("Wrong old tree size %v, expected %v after DeleteMin, old=\n%vtree\n%v", i, len(x)-j, old.DebugString(), t.DebugString())
+ return
+ }
+ }
+
+ size--
+ s, i := t.wellFormed()
+ if s != "" {
+ te.Errorf("Tree consistency problem at %v after DeleteMax, tree=\n%v", s, t.DebugString())
+ return
+ }
+ if i != size {
+ te.Errorf("Wrong tree size %v, expected %v after DeleteMax", i, size)
+ return
+ }
+ if t.Size() != size {
+ te.Errorf("Wrong t.Size() %v, expected %v for %v", t.Size(), i, t.DebugString())
+ return
+ }
+ }
+}
+
+func applicDelete(te *testing.T, x []int32) {
+ t, _, _, _ := makeTree(te, x, false)
+ _, size := t.wellFormed()
+ history := []*T{}
+
+ missing := t.Delete(11)
+ if missing != nil {
+ te.Errorf("Returned a value when there should have been none, %v", missing)
+ return
+ }
+
+ s, i := t.wellFormed()
+ if s != "" {
+ te.Errorf("Tree consistency problem at %v after delete of missing value, tree=\n%v", s, t.DebugString())
+ return
+ }
+ if size != i {
+ te.Errorf("Delete of missing data should not change tree size, expected %d, got %d", size, i)
+ return
+ }
+
+ for _, d := range x {
+ d += d // double
+ vWant := fmt.Sprintf("%v", d)
+ history = append(history, t.Copy())
+ v := t.Delete(d)
+
+ for j, old := range history {
+ s, i := old.wellFormed()
+ if s != "" {
+ te.Errorf("Tree consistency problem %s at old after DeleteMin, old=\n%stree=\n%v", s, old.DebugString(), t.DebugString())
+ return
+ }
+ if i != len(x)-j {
+ te.Errorf("Wrong old tree size %v, expected %v after DeleteMin, old=\n%vtree\n%v", i, len(x)-j, old.DebugString(), t.DebugString())
+ return
+ }
+ }
+
+ if v.(*sstring).s != vWant {
+ te.Errorf("Deleted %v expected %v but got %v", d, vWant, v)
+ return
+ }
+ size--
+ s, i := t.wellFormed()
+ if s != "" {
+ te.Errorf("Tree consistency problem at %v after Delete %d, tree=\n%v", s, d, t.DebugString())
+ return
+ }
+ if i != size {
+ te.Errorf("Wrong tree size %v, expected %v after Delete", i, size)
+ return
+ }
+ if t.Size() != size {
+ te.Errorf("Wrong t.Size() %v, expected %v for %v", t.Size(), i, t.DebugString())
+ return
+ }
+ }
+
+}
+
+func applicIterator(te *testing.T, x []int32) {
+ t, _, _, _ := makeTree(te, x, false)
+ it := t.Iterator()
+ for !it.Done() {
+ k0, d0 := it.Next()
+ k1, d1 := t.DeleteMin()
+ if k0 != k1 || d0 != d1 {
+ te.Errorf("Iterator and deleteMin mismatch, k0, k1, d0, d1 = %v, %v, %v, %v", k0, k1, d0, d1)
+ return
+ }
+ }
+ if t.Size() != 0 {
+ te.Errorf("Iterator ended early, remaining tree = \n%s", t.DebugString())
+ return
+ }
+}
+
+func equiv(a, b interface{}) bool {
+ sa, sb := a.(*sstring), b.(*sstring)
+ return *sa == *sb
+}
+
+func applicEquals(te *testing.T, x, y []int32) {
+ t, _, _, _ := makeTree(te, x, false)
+ u, _, _, _ := makeTree(te, y, false)
+ if !t.Equiv(t, equiv) {
+ te.Errorf("Equiv failure, t == t, =\n%v", t.DebugString())
+ return
+ }
+ if !t.Equiv(t.Copy(), equiv) {
+ te.Errorf("Equiv failure, t == t.Copy(), =\n%v", t.DebugString())
+ return
+ }
+ if !t.Equiv(u, equiv) {
+ te.Errorf("Equiv failure, t == u, =\n%v", t.DebugString())
+ return
+ }
+ v := t.Copy()
+
+ v.DeleteMax()
+ if t.Equiv(v, equiv) {
+ te.Errorf("!Equiv failure, t != v, =\n%v\nand%v\n", t.DebugString(), v.DebugString())
+ return
+ }
+
+ if v.Equiv(u, equiv) {
+ te.Errorf("!Equiv failure, v != u, =\n%v\nand%v\n", v.DebugString(), u.DebugString())
+ return
+ }
+
+}
+
+func tree(x []int32) *T {
+ t := &T{}
+ for _, d := range x {
+ t.Insert(d, stringer(fmt.Sprintf("%v", d)))
+ }
+ return t
+}
+
+func treePlus1(x []int32) *T {
+ t := &T{}
+ for _, d := range x {
+ t.Insert(d, stringer(fmt.Sprintf("%v", d+1)))
+ }
+ return t
+}
+func TestApplicInsert(t *testing.T) {
+ applicInsert(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25})
+ applicInsert(t, []int32{1, 2, 3, 4})
+ applicInsert(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9})
+ applicInsert(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25})
+ applicInsert(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
+ applicInsert(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
+ applicInsert(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24})
+ applicInsert(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2})
+}
+
+func TestApplicFind(t *testing.T) {
+ applicFind(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25})
+ applicFind(t, []int32{1, 2, 3, 4})
+ applicFind(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9})
+ applicFind(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25})
+ applicFind(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
+ applicFind(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
+ applicFind(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24})
+ applicFind(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2})
+}
+
+func TestBounds(t *testing.T) {
+ applicBounds(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25})
+ applicBounds(t, []int32{1, 2, 3, 4})
+ applicBounds(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9})
+ applicBounds(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25})
+ applicBounds(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
+ applicBounds(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
+ applicBounds(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24})
+ applicBounds(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2})
+}
+func TestDeleteMin(t *testing.T) {
+ applicDeleteMin(t, []int32{1, 2, 3, 4})
+ applicDeleteMin(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25})
+ applicDeleteMin(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9})
+ applicDeleteMin(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25})
+ applicDeleteMin(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
+ applicDeleteMin(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
+ applicDeleteMin(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24})
+ applicDeleteMin(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2})
+}
+func TestDeleteMax(t *testing.T) {
+ applicDeleteMax(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25})
+ applicDeleteMax(t, []int32{1, 2, 3, 4})
+ applicDeleteMax(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9})
+ applicDeleteMax(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25})
+ applicDeleteMax(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
+ applicDeleteMax(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
+ applicDeleteMax(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24})
+ applicDeleteMax(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2})
+}
+func TestDelete(t *testing.T) {
+ applicDelete(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25})
+ applicDelete(t, []int32{1, 2, 3, 4})
+ applicDelete(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9})
+ applicDelete(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25})
+ applicDelete(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
+ applicDelete(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
+ applicDelete(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24})
+ applicDelete(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2})
+}
+func TestIterator(t *testing.T) {
+ applicIterator(t, []int32{1, 2, 3, 4})
+ applicIterator(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9})
+ applicIterator(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25})
+ applicIterator(t, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25})
+ applicIterator(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
+ applicIterator(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
+ applicIterator(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24})
+ applicIterator(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2})
+}
+func TestEquals(t *testing.T) {
+ applicEquals(t, []int32{1, 2, 3, 4}, []int32{4, 3, 2, 1})
+
+ applicEquals(t, []int32{24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25},
+ []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25})
+ applicEquals(t, []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1},
+ []int32{25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1})
+ applicEquals(t, []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24},
+ []int32{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2})
+}
+
+func first(x, y interface{}) interface{} {
+ return x
+}
+func second(x, y interface{}) interface{} {
+ return y
+}
+func alwaysNil(x, y interface{}) interface{} {
+ return nil
+}
+func smaller(x, y interface{}) interface{} {
+ xi, _ := strconv.Atoi(fmt.Sprint(x))
+ yi, _ := strconv.Atoi(fmt.Sprint(y))
+ if xi < yi {
+ return x
+ }
+ return y
+}
+func assert(t *testing.T, expected, got *T, what string) {
+ s, _ := got.wellFormed()
+ if s != "" {
+ t.Errorf("Tree consistency problem %v for 'got' in assert for %s, tree=\n%v", s, what, got.DebugString())
+ return
+ }
+
+ if !expected.Equiv(got, equiv) {
+ t.Errorf("%s fail, expected\n%vgot\n%v\n", what, expected.DebugString(), got.DebugString())
+ }
+}
+
+func TestSetOps(t *testing.T) {
+ A := tree([]int32{1, 2, 3, 4})
+ B := tree([]int32{3, 4, 5, 6, 7})
+
+ AIB := tree([]int32{3, 4})
+ ADB := tree([]int32{1, 2})
+ BDA := tree([]int32{5, 6, 7})
+ AUB := tree([]int32{1, 2, 3, 4, 5, 6, 7})
+ AXB := tree([]int32{1, 2, 5, 6, 7})
+
+ aib1 := A.Intersection(B, first)
+ assert(t, AIB, aib1, "aib1")
+ if A.Find(3) != aib1.Find(3) {
+ t.Errorf("Failed aliasing/reuse check, A/aib1")
+ }
+ aib2 := A.Intersection(B, second)
+ assert(t, AIB, aib2, "aib2")
+ if B.Find(3) != aib2.Find(3) {
+ t.Errorf("Failed aliasing/reuse check, B/aib2")
+ }
+ aib3 := B.Intersection(A, first)
+ assert(t, AIB, aib3, "aib3")
+ if A.Find(3) != aib3.Find(3) {
+ // A is smaller, intersection favors reuse from smaller when function is "first"
+ t.Errorf("Failed aliasing/reuse check, A/aib3")
+ }
+ aib4 := B.Intersection(A, second)
+ assert(t, AIB, aib4, "aib4")
+ if A.Find(3) != aib4.Find(3) {
+ t.Errorf("Failed aliasing/reuse check, A/aib4")
+ }
+
+ aub1 := A.Union(B, first)
+ assert(t, AUB, aub1, "aub1")
+ if B.Find(3) != aub1.Find(3) {
+ // B is larger, union favors reuse from larger when function is "first"
+ t.Errorf("Failed aliasing/reuse check, A/aub1")
+ }
+ aub2 := A.Union(B, second)
+ assert(t, AUB, aub2, "aub2")
+ if B.Find(3) != aub2.Find(3) {
+ t.Errorf("Failed aliasing/reuse check, B/aub2")
+ }
+ aub3 := B.Union(A, first)
+ assert(t, AUB, aub3, "aub3")
+ if B.Find(3) != aub3.Find(3) {
+ t.Errorf("Failed aliasing/reuse check, B/aub3")
+ }
+ aub4 := B.Union(A, second)
+ assert(t, AUB, aub4, "aub4")
+ if A.Find(3) != aub4.Find(3) {
+ t.Errorf("Failed aliasing/reuse check, A/aub4")
+ }
+
+ axb1 := A.Union(B, alwaysNil)
+ assert(t, AXB, axb1, "axb1")
+ axb2 := B.Union(A, alwaysNil)
+ assert(t, AXB, axb2, "axb2")
+
+ adb := A.Difference(B, alwaysNil)
+ assert(t, ADB, adb, "adb")
+ bda := B.Difference(A, nil)
+ assert(t, BDA, bda, "bda")
+
+ Ap1 := treePlus1([]int32{1, 2, 3, 4})
+
+ ada1_1 := A.Difference(Ap1, smaller)
+ assert(t, A, ada1_1, "ada1_1")
+ ada1_2 := Ap1.Difference(A, smaller)
+ assert(t, A, ada1_2, "ada1_2")
+
+}
+
+type sstring struct {
+ s string
+}
+
+func (s *sstring) String() string {
+ return s.s
+}
+
+func stringer(s string) interface{} {
+ return &sstring{s}
+}
+
+// wellFormed ensures that a red-black tree meets
+// all of its invariants and returns a string identifying
+// the first problem encountered. If there is no problem
+// then the returned string is empty. The size is also
+// returned to allow comparison of calculated tree size
+// with expected.
+func (t *T) wellFormed() (s string, i int) {
+ if t.root == nil {
+ s = ""
+ i = 0
+ return
+ }
+ return t.root.wellFormedSubtree(nil, -0x80000000, 0x7fffffff)
+}
+
+// wellFormedSubtree ensures that a red-black subtree meets
+// all of its invariants and returns a string identifying
+// the first problem encountered. If there is no problem
+// then the returned string is empty. The size is also
+// returned to allow comparison of calculated tree size
+// with expected.
+func (t *node32) wellFormedSubtree(parent *node32, keyMin, keyMax int32) (s string, i int) {
+ i = -1 // initialize to a failing value
+ s = "" // s is the reason for failure; empty means okay.
+
+ if keyMin >= t.key {
+ s = " min >= t.key"
+ return
+ }
+
+ if keyMax <= t.key {
+ s = " max <= t.key"
+ return
+ }
+
+ l := t.left
+ r := t.right
+
+ lh := l.height()
+ rh := r.height()
+ mh := max(lh, rh)
+ th := t.height()
+ dh := lh - rh
+ if dh < 0 {
+ dh = -dh
+ }
+ if dh > 1 {
+ s = fmt.Sprintf(" dh > 1, t=%d", t.key)
+ return
+ }
+
+ if l == nil && r == nil {
+ if th != LEAF_HEIGHT {
+ s = " leaf height wrong"
+ return
+ }
+ }
+
+ if th != mh+1 {
+ s = " th != mh + 1"
+ return
+ }
+
+ if l != nil {
+ if th <= lh {
+ s = " t.height <= l.height"
+ } else if th > 2+lh {
+ s = " t.height > 2+l.height"
+ } else if t.key <= l.key {
+ s = " t.key <= l.key"
+ }
+ if s != "" {
+ return
+ }
+
+ }
+
+ if r != nil {
+ if th <= rh {
+ s = " t.height <= r.height"
+ } else if th > 2+rh {
+ s = " t.height > 2+r.height"
+ } else if t.key >= r.key {
+ s = " t.key >= r.key"
+ }
+ if s != "" {
+ return
+ }
+ }
+
+ ii := 1
+ if l != nil {
+ res, il := l.wellFormedSubtree(t, keyMin, t.key)
+ if res != "" {
+ s = ".L" + res
+ return
+ }
+ ii += il
+ }
+ if r != nil {
+ res, ir := r.wellFormedSubtree(t, t.key, keyMax)
+ if res != "" {
+ s = ".R" + res
+ return
+ }
+ ii += ir
+ }
+ i = ii
+ return
+}
+
+func (t *T) DebugString() string {
+ if t.root == nil {
+ return ""
+ }
+ return t.root.DebugString(0)
+}
+
+// DebugString prints the tree with nested information
+// to allow an eyeball check on the tree balance.
+func (t *node32) DebugString(indent int) string {
+ s := ""
+ if t.left != nil {
+ s = s + t.left.DebugString(indent+1)
+ }
+ for i := 0; i < indent; i++ {
+ s = s + " "
+ }
+ s = s + fmt.Sprintf("%v=%v:%d\n", t.key, t.data, t.height_)
+ if t.right != nil {
+ s = s + t.right.DebugString(indent+1)
+ }
+ return s
+}
diff --git a/src/cmd/compile/internal/amd64/galign.go b/src/cmd/compile/internal/amd64/galign.go
new file mode 100644
index 0000000..ca44263
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/galign.go
@@ -0,0 +1,27 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amd64
+
+import (
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/x86"
+)
+
+var leaptr = x86.ALEAQ
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &x86.Linkamd64
+ arch.REGSP = x86.REGSP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = ssaMarkMoves
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+ arch.LoadRegResult = loadRegResult
+ arch.SpillArgReg = spillArgReg
+}
diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go
new file mode 100644
index 0000000..db98a22
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/ggen.go
@@ -0,0 +1,135 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amd64
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+ "internal/buildcfg"
+)
+
+// no floating point in note handlers on Plan 9
+var isPlan9 = buildcfg.GOOS == "plan9"
+
+// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ,
+// See runtime/mkduff.go.
+const (
+ dzBlocks = 16 // number of MOV/ADD blocks
+ dzBlockLen = 4 // number of clears per block
+ dzBlockSize = 23 // size of instructions in a single block
+ dzMovSize = 5 // size of single MOV instruction w/ offset
+ dzLeaqSize = 4 // size of single LEAQ instruction
+ dzClearStep = 16 // number of bytes cleared by each MOV instruction
+
+ dzClearLen = dzClearStep * dzBlockLen // bytes cleared by one block
+ dzSize = dzBlocks * dzBlockSize
+)
+
+// dzOff returns the offset for a jump into DUFFZERO.
+// b is the number of bytes to zero.
+func dzOff(b int64) int64 {
+ off := int64(dzSize)
+ off -= b / dzClearLen * dzBlockSize
+ tailLen := b % dzClearLen
+ if tailLen >= dzClearStep {
+ off -= dzLeaqSize + dzMovSize*(tailLen/dzClearStep)
+ }
+ return off
+}
+
+// duffzeroDI returns the pre-adjustment to DI for a call to DUFFZERO.
+// b is the number of bytes to zero.
+func dzDI(b int64) int64 {
+ tailLen := b % dzClearLen
+ if tailLen < dzClearStep {
+ return 0
+ }
+ tailSteps := tailLen / dzClearStep
+ return -dzClearStep * (dzBlockLen - tailSteps)
+}
+
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
+ const (
+ r13 = 1 << iota // if R13 is already zeroed.
+ )
+
+ if cnt == 0 {
+ return p
+ }
+
+ if cnt == 8 {
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off)
+ } else if !isPlan9 && cnt <= int64(8*types.RegSize) {
+ for i := int64(0); i < cnt/16; i++ {
+ p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
+ }
+
+ if cnt%16 != 0 {
+ p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
+ }
+ } else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
+ // Save DI to r12. With the amd64 Go register abi, DI can contain
+ // an incoming parameter, whereas R12 is always scratch.
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_DI, 0, obj.TYPE_REG, x86.REG_R12, 0)
+ // Emit duffzero call
+ p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
+ p.To.Sym = ir.Syms.Duffzero
+ if cnt%16 != 0 {
+ p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
+ }
+ // Restore DI from r12
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R12, 0, obj.TYPE_REG, x86.REG_DI, 0)
+
+ } else {
+ // When the register ABI is in effect, at this point in the
+ // prolog we may have live values in all of RAX,RDI,RCX. Save
+ // them off to registers before the REPSTOSQ below, then
+ // restore. Note that R12 and R13 are always available as
+ // scratch regs; here we also use R15 (this is safe to do
+ // since there won't be any globals accessed in the prolog).
+ // See rewriteToUseGot() in obj6.go for more on r15 use.
+
+ // Save rax/rdi/rcx
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_DI, 0, obj.TYPE_REG, x86.REG_R12, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_REG, x86.REG_R13, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_CX, 0, obj.TYPE_REG, x86.REG_R15, 0)
+
+ // Set up the REPSTOSQ and kick it off.
+ p = pp.Append(p, x86.AXORL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
+ p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Append(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+
+ // Restore rax/rdi/rcx
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R12, 0, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R13, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R15, 0, obj.TYPE_REG, x86.REG_CX, 0)
+
+ // Record the fact that r13 is no longer zero.
+ *state &= ^uint32(r13)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ // This is a hardware nop (1-byte 0x90) instruction,
+ // even though we describe it as an explicit XCHGL here.
+ // Particularly, this does not zero the high 32 bits
+ // like typical *L opcodes.
+ // (gas assembles "xchg %eax,%eax" to 0x87 0xc0, which
+ // does zero the high 32 bits.)
+ p := pp.Prog(x86.AXCHGL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+ return p
+}
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
new file mode 100644
index 0000000..ab762c2
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -0,0 +1,1444 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amd64
+
+import (
+ "fmt"
+ "internal/buildcfg"
+ "math"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+
+// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
+ flive := b.FlagsLiveAtEnd
+ for _, c := range b.ControlValues() {
+ flive = c.Type.IsFlags() || flive
+ }
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if flive && (v.Op == ssa.OpAMD64MOVLconst || v.Op == ssa.OpAMD64MOVQconst) {
+ // The "mark" is any non-nil Aux value.
+ v.Aux = ssa.AuxMark
+ }
+ if v.Type.IsFlags() {
+ flive = false
+ }
+ for _, a := range v.Args {
+ if a.Type.IsFlags() {
+ flive = true
+ }
+ }
+ }
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ // Avoid partial register write
+ if !t.IsFloat() {
+ switch t.Size() {
+ case 1:
+ return x86.AMOVBLZX
+ case 2:
+ return x86.AMOVWLZX
+ }
+ }
+ // Otherwise, there's no difference between load and store opcodes.
+ return storeByType(t)
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ width := t.Size()
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return x86.AMOVSS
+ case 8:
+ return x86.AMOVSD
+ }
+ } else {
+ switch width {
+ case 1:
+ return x86.AMOVB
+ case 2:
+ return x86.AMOVW
+ case 4:
+ return x86.AMOVL
+ case 8:
+ return x86.AMOVQ
+ case 16:
+ return x86.AMOVUPS
+ }
+ }
+ panic(fmt.Sprintf("bad store type %v", t))
+}
+
+// moveByType returns the reg->reg move instruction of the given type.
+func moveByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ // Moving the whole sse2 register is faster
+ // than moving just the correct low portion of it.
+ // There is no xmm->xmm move with 1 byte opcode,
+ // so use movups, which has 2 byte opcode.
+ return x86.AMOVUPS
+ } else {
+ switch t.Size() {
+ case 1:
+ // Avoids partial register write
+ return x86.AMOVL
+ case 2:
+ return x86.AMOVL
+ case 4:
+ return x86.AMOVL
+ case 8:
+ return x86.AMOVQ
+ case 16:
+ return x86.AMOVUPS // int128s are in SSE registers
+ default:
+ panic(fmt.Sprintf("bad int register width %d:%v", t.Size(), t))
+ }
+ }
+}
+
+// opregreg emits instructions for
+//
+// dest := dest(To) op src(From)
+//
+// and also returns the created obj.Prog so it
+// may be further adjusted (offset, scale, etc).
+func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dest
+ p.From.Reg = src
+ return p
+}
+
+// memIdx fills out a as an indexed memory reference for v.
+// It assumes that the base register and the index register
+// are v.Args[0].Reg() and v.Args[1].Reg(), respectively.
+// The caller must still use gc.AddAux/gc.AddAux2 to handle v.Aux as necessary.
+func memIdx(a *obj.Addr, v *ssa.Value) {
+ r, i := v.Args[0].Reg(), v.Args[1].Reg()
+ a.Type = obj.TYPE_MEM
+ a.Scale = v.Op.Scale()
+ if a.Scale == 1 && i == x86.REG_SP {
+ r, i = i, r
+ }
+ a.Reg = r
+ a.Index = i
+}
+
+// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ,
+// See runtime/mkduff.go.
+func duffStart(size int64) int64 {
+ x, _ := duff(size)
+ return x
+}
+func duffAdj(size int64) int64 {
+ _, x := duff(size)
+ return x
+}
+
+// duff returns the offset (from duffzero, in bytes) and pointer adjust (in bytes)
+// required to use the duffzero mechanism for a block of the given size.
+func duff(size int64) (int64, int64) {
+ if size < 32 || size > 1024 || size%dzClearStep != 0 {
+ panic("bad duffzero size")
+ }
+ steps := size / dzClearStep
+ blocks := steps / dzBlockLen
+ steps %= dzBlockLen
+ off := dzBlockSize * (dzBlocks - blocks)
+ var adj int64
+ if steps != 0 {
+ off -= dzLeaqSize
+ off -= dzMovSize * steps
+ adj -= dzClearStep * (dzBlockLen - steps)
+ }
+ return off, adj
+}
+
+func getgFromTLS(s *ssagen.State, r int16) {
+ // See the comments in cmd/internal/obj/x86/obj6.go
+ // near CanUse1InsnTLS for a detailed explanation of these instructions.
+ if x86.CanUse1InsnTLS(base.Ctxt) {
+ // MOVQ (TLS), r
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = x86.REG_TLS
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ } else {
+ // MOVQ TLS, r
+ // MOVQ (r)(TLS*1), r
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_TLS
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ q := s.Prog(x86.AMOVQ)
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = r
+ q.From.Index = x86.REG_TLS
+ q.From.Scale = 1
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = r
+ }
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpAMD64VFMADD231SD:
+ p := s.Prog(v.Op.Asm())
+ p.From = obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[2].Reg()}
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+ p.AddRestSourceReg(v.Args[1].Reg())
+ case ssa.OpAMD64ADDQ, ssa.OpAMD64ADDL:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ switch {
+ case r == r1:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case r == r2:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ default:
+ var asm obj.As
+ if v.Op == ssa.OpAMD64ADDQ {
+ asm = x86.ALEAQ
+ } else {
+ asm = x86.ALEAL
+ }
+ p := s.Prog(asm)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r1
+ p.From.Scale = 1
+ p.From.Index = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ // 2-address opcode arithmetic
+ case ssa.OpAMD64SUBQ, ssa.OpAMD64SUBL,
+ ssa.OpAMD64MULQ, ssa.OpAMD64MULL,
+ ssa.OpAMD64ANDQ, ssa.OpAMD64ANDL,
+ ssa.OpAMD64ORQ, ssa.OpAMD64ORL,
+ ssa.OpAMD64XORQ, ssa.OpAMD64XORL,
+ ssa.OpAMD64SHLQ, ssa.OpAMD64SHLL,
+ ssa.OpAMD64SHRQ, ssa.OpAMD64SHRL, ssa.OpAMD64SHRW, ssa.OpAMD64SHRB,
+ ssa.OpAMD64SARQ, ssa.OpAMD64SARL, ssa.OpAMD64SARW, ssa.OpAMD64SARB,
+ ssa.OpAMD64ROLQ, ssa.OpAMD64ROLL, ssa.OpAMD64ROLW, ssa.OpAMD64ROLB,
+ ssa.OpAMD64RORQ, ssa.OpAMD64RORL, ssa.OpAMD64RORW, ssa.OpAMD64RORB,
+ ssa.OpAMD64ADDSS, ssa.OpAMD64ADDSD, ssa.OpAMD64SUBSS, ssa.OpAMD64SUBSD,
+ ssa.OpAMD64MULSS, ssa.OpAMD64MULSD, ssa.OpAMD64DIVSS, ssa.OpAMD64DIVSD,
+ ssa.OpAMD64MINSS, ssa.OpAMD64MINSD,
+ ssa.OpAMD64POR, ssa.OpAMD64PXOR,
+ ssa.OpAMD64BTSL, ssa.OpAMD64BTSQ,
+ ssa.OpAMD64BTCL, ssa.OpAMD64BTCQ,
+ ssa.OpAMD64BTRL, ssa.OpAMD64BTRQ:
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
+
+ case ssa.OpAMD64SHRDQ, ssa.OpAMD64SHLDQ:
+ p := s.Prog(v.Op.Asm())
+ lo, hi, bits := v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg()
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = bits
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = lo
+ p.AddRestSourceReg(hi)
+
+ case ssa.OpAMD64BLSIQ, ssa.OpAMD64BLSIL,
+ ssa.OpAMD64BLSMSKQ, ssa.OpAMD64BLSMSKL,
+ ssa.OpAMD64BLSRQ, ssa.OpAMD64BLSRL:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ switch v.Op {
+ case ssa.OpAMD64BLSRQ, ssa.OpAMD64BLSRL:
+ p.To.Reg = v.Reg0()
+ default:
+ p.To.Reg = v.Reg()
+ }
+
+ case ssa.OpAMD64ANDNQ, ssa.OpAMD64ANDNL:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p.AddRestSourceReg(v.Args[1].Reg())
+
+ case ssa.OpAMD64SARXL, ssa.OpAMD64SARXQ,
+ ssa.OpAMD64SHLXL, ssa.OpAMD64SHLXQ,
+ ssa.OpAMD64SHRXL, ssa.OpAMD64SHRXQ:
+ p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
+ p.AddRestSourceReg(v.Args[0].Reg())
+
+ case ssa.OpAMD64SHLXLload, ssa.OpAMD64SHLXQload,
+ ssa.OpAMD64SHRXLload, ssa.OpAMD64SHRXQload,
+ ssa.OpAMD64SARXLload, ssa.OpAMD64SARXQload:
+ p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
+ m := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()}
+ ssagen.AddAux(&m, v)
+ p.AddRestSource(m)
+
+ case ssa.OpAMD64SHLXLloadidx1, ssa.OpAMD64SHLXLloadidx4, ssa.OpAMD64SHLXLloadidx8,
+ ssa.OpAMD64SHRXLloadidx1, ssa.OpAMD64SHRXLloadidx4, ssa.OpAMD64SHRXLloadidx8,
+ ssa.OpAMD64SARXLloadidx1, ssa.OpAMD64SARXLloadidx4, ssa.OpAMD64SARXLloadidx8,
+ ssa.OpAMD64SHLXQloadidx1, ssa.OpAMD64SHLXQloadidx8,
+ ssa.OpAMD64SHRXQloadidx1, ssa.OpAMD64SHRXQloadidx8,
+ ssa.OpAMD64SARXQloadidx1, ssa.OpAMD64SARXQloadidx8:
+ p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[2].Reg())
+ m := obj.Addr{Type: obj.TYPE_MEM}
+ memIdx(&m, v)
+ ssagen.AddAux(&m, v)
+ p.AddRestSource(m)
+
+ case ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU:
+ // Arg[0] (the dividend) is in AX.
+ // Arg[1] (the divisor) can be in any other register.
+ // Result[0] (the quotient) is in AX.
+ // Result[1] (the remainder) is in DX.
+ r := v.Args[1].Reg()
+
+ // Zero extend dividend.
+ opregreg(s, x86.AXORL, x86.REG_DX, x86.REG_DX)
+
+ // Issue divide.
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+
+ case ssa.OpAMD64DIVQ, ssa.OpAMD64DIVL, ssa.OpAMD64DIVW:
+ // Arg[0] (the dividend) is in AX.
+ // Arg[1] (the divisor) can be in any other register.
+ // Result[0] (the quotient) is in AX.
+ // Result[1] (the remainder) is in DX.
+ r := v.Args[1].Reg()
+
+ var opCMP, opNEG, opSXD obj.As
+ switch v.Op {
+ case ssa.OpAMD64DIVQ:
+ opCMP, opNEG, opSXD = x86.ACMPQ, x86.ANEGQ, x86.ACQO
+ case ssa.OpAMD64DIVL:
+ opCMP, opNEG, opSXD = x86.ACMPL, x86.ANEGL, x86.ACDQ
+ case ssa.OpAMD64DIVW:
+ opCMP, opNEG, opSXD = x86.ACMPW, x86.ANEGW, x86.ACWD
+ }
+
+ // CPU faults upon signed overflow, which occurs when the most
+ // negative int is divided by -1. Handle divide by -1 as a special case.
+ var j1, j2 *obj.Prog
+ if ssa.DivisionNeedsFixUp(v) {
+ c := s.Prog(opCMP)
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = r
+ c.To.Type = obj.TYPE_CONST
+ c.To.Offset = -1
+
+ // Divisor is not -1, proceed with normal division.
+ j1 = s.Prog(x86.AJNE)
+ j1.To.Type = obj.TYPE_BRANCH
+
+ // Divisor is -1, manually compute quotient and remainder via fixup code.
+ // n / -1 = -n
+ n1 := s.Prog(opNEG)
+ n1.To.Type = obj.TYPE_REG
+ n1.To.Reg = x86.REG_AX
+
+ // n % -1 == 0
+ opregreg(s, x86.AXORL, x86.REG_DX, x86.REG_DX)
+
+ // TODO(khr): issue only the -1 fixup code we need.
+ // For instance, if only the quotient is used, no point in zeroing the remainder.
+
+ // Skip over normal division.
+ j2 = s.Prog(obj.AJMP)
+ j2.To.Type = obj.TYPE_BRANCH
+ }
+
+ // Sign extend dividend and perform division.
+ p := s.Prog(opSXD)
+ if j1 != nil {
+ j1.To.SetTarget(p)
+ }
+ p = s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+
+ if j2 != nil {
+ j2.To.SetTarget(s.Pc())
+ }
+
+ case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU:
+ // the frontend rewrites constant division by 8/16/32 bit integers into
+ // HMUL by a constant
+ // SSA rewrites generate the 64 bit versions
+
+ // Arg[0] is already in AX as it's the only register we allow
+ // and DX is the only output we care about (the high bits)
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ // IMULB puts the high portion in AH instead of DL,
+ // so move it to DL for consistency
+ if v.Type.Size() == 1 {
+ m := s.Prog(x86.AMOVB)
+ m.From.Type = obj.TYPE_REG
+ m.From.Reg = x86.REG_AH
+ m.To.Type = obj.TYPE_REG
+ m.To.Reg = x86.REG_DX
+ }
+
+ case ssa.OpAMD64MULQU, ssa.OpAMD64MULLU:
+ // Arg[0] is already in AX as it's the only register we allow
+ // results lo in AX
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ case ssa.OpAMD64MULQU2:
+ // Arg[0] is already in AX as it's the only register we allow
+ // results hi in DX, lo in AX
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ case ssa.OpAMD64DIVQU2:
+ // Arg[0], Arg[1] are already in Dx, AX, as they're the only registers we allow
+ // results q in AX, r in DX
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+
+ case ssa.OpAMD64AVGQU:
+ // compute (x+y)/2 unsigned.
+ // Do a 64-bit add, the overflow goes into the carry.
+ // Shift right once and pull the carry back into the 63rd bit.
+ p := s.Prog(x86.AADDQ)
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p.From.Reg = v.Args[1].Reg()
+ p = s.Prog(x86.ARCRQ)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64ADDQcarry, ssa.OpAMD64ADCQ:
+ r := v.Reg0()
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ switch r {
+ case r0:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case r1:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ default:
+ v.Fatalf("output not in same register as an input %s", v.LongString())
+ }
+
+ case ssa.OpAMD64SUBQborrow, ssa.OpAMD64SBBQ:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ case ssa.OpAMD64ADDQconstcarry, ssa.OpAMD64ADCQconst, ssa.OpAMD64SUBQconstborrow, ssa.OpAMD64SBBQconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ case ssa.OpAMD64ADDQconst, ssa.OpAMD64ADDLconst:
+ r := v.Reg()
+ a := v.Args[0].Reg()
+ if r == a {
+ switch v.AuxInt {
+ case 1:
+ var asm obj.As
+ // Software optimization manual recommends add $1,reg.
+ // But inc/dec is 1 byte smaller. ICC always uses inc
+ // Clang/GCC choose depending on flags, but prefer add.
+ // Experiments show that inc/dec is both a little faster
+ // and make a binary a little smaller.
+ if v.Op == ssa.OpAMD64ADDQconst {
+ asm = x86.AINCQ
+ } else {
+ asm = x86.AINCL
+ }
+ p := s.Prog(asm)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ case -1:
+ var asm obj.As
+ if v.Op == ssa.OpAMD64ADDQconst {
+ asm = x86.ADECQ
+ } else {
+ asm = x86.ADECL
+ }
+ p := s.Prog(asm)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ case 0x80:
+ // 'SUBQ $-0x80, r' is shorter to encode than
+ // and functionally equivalent to 'ADDQ $0x80, r'.
+ asm := x86.ASUBL
+ if v.Op == ssa.OpAMD64ADDQconst {
+ asm = x86.ASUBQ
+ }
+ p := s.Prog(asm)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = -0x80
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ }
+ var asm obj.As
+ if v.Op == ssa.OpAMD64ADDQconst {
+ asm = x86.ALEAQ
+ } else {
+ asm = x86.ALEAL
+ }
+ p := s.Prog(asm)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = a
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpAMD64CMOVQEQ, ssa.OpAMD64CMOVLEQ, ssa.OpAMD64CMOVWEQ,
+ ssa.OpAMD64CMOVQLT, ssa.OpAMD64CMOVLLT, ssa.OpAMD64CMOVWLT,
+ ssa.OpAMD64CMOVQNE, ssa.OpAMD64CMOVLNE, ssa.OpAMD64CMOVWNE,
+ ssa.OpAMD64CMOVQGT, ssa.OpAMD64CMOVLGT, ssa.OpAMD64CMOVWGT,
+ ssa.OpAMD64CMOVQLE, ssa.OpAMD64CMOVLLE, ssa.OpAMD64CMOVWLE,
+ ssa.OpAMD64CMOVQGE, ssa.OpAMD64CMOVLGE, ssa.OpAMD64CMOVWGE,
+ ssa.OpAMD64CMOVQHI, ssa.OpAMD64CMOVLHI, ssa.OpAMD64CMOVWHI,
+ ssa.OpAMD64CMOVQLS, ssa.OpAMD64CMOVLLS, ssa.OpAMD64CMOVWLS,
+ ssa.OpAMD64CMOVQCC, ssa.OpAMD64CMOVLCC, ssa.OpAMD64CMOVWCC,
+ ssa.OpAMD64CMOVQCS, ssa.OpAMD64CMOVLCS, ssa.OpAMD64CMOVWCS,
+ ssa.OpAMD64CMOVQGTF, ssa.OpAMD64CMOVLGTF, ssa.OpAMD64CMOVWGTF,
+ ssa.OpAMD64CMOVQGEF, ssa.OpAMD64CMOVLGEF, ssa.OpAMD64CMOVWGEF:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64CMOVQNEF, ssa.OpAMD64CMOVLNEF, ssa.OpAMD64CMOVWNEF:
+ // Flag condition: ^ZERO || PARITY
+ // Generate:
+ // CMOV*NE SRC,DST
+ // CMOV*PS SRC,DST
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ var q *obj.Prog
+ if v.Op == ssa.OpAMD64CMOVQNEF {
+ q = s.Prog(x86.ACMOVQPS)
+ } else if v.Op == ssa.OpAMD64CMOVLNEF {
+ q = s.Prog(x86.ACMOVLPS)
+ } else {
+ q = s.Prog(x86.ACMOVWPS)
+ }
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = v.Args[1].Reg()
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = v.Reg()
+
+ case ssa.OpAMD64CMOVQEQF, ssa.OpAMD64CMOVLEQF, ssa.OpAMD64CMOVWEQF:
+ // Flag condition: ZERO && !PARITY
+ // Generate:
+ // MOV SRC,TMP
+ // CMOV*NE DST,TMP
+ // CMOV*PC TMP,DST
+ //
+ // TODO(rasky): we could generate:
+ // CMOV*NE DST,SRC
+ // CMOV*PC SRC,DST
+ // But this requires a way for regalloc to know that SRC might be
+ // clobbered by this instruction.
+ t := v.RegTmp()
+ opregreg(s, moveByType(v.Type), t, v.Args[1].Reg())
+
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = t
+ var q *obj.Prog
+ if v.Op == ssa.OpAMD64CMOVQEQF {
+ q = s.Prog(x86.ACMOVQPC)
+ } else if v.Op == ssa.OpAMD64CMOVLEQF {
+ q = s.Prog(x86.ACMOVLPC)
+ } else {
+ q = s.Prog(x86.ACMOVWPC)
+ }
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = t
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = v.Reg()
+
+ case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.AddRestSourceReg(v.Args[0].Reg())
+
+ case ssa.OpAMD64ANDQconst:
+ asm := v.Op.Asm()
+ // If the constant is positive and fits into 32 bits, use ANDL.
+ // This saves a few bytes of encoding.
+ if 0 <= v.AuxInt && v.AuxInt <= (1<<32-1) {
+ asm = x86.AANDL
+ }
+ p := s.Prog(asm)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst,
+ ssa.OpAMD64ANDLconst,
+ ssa.OpAMD64ORQconst, ssa.OpAMD64ORLconst,
+ ssa.OpAMD64XORQconst, ssa.OpAMD64XORLconst,
+ ssa.OpAMD64SHLQconst, ssa.OpAMD64SHLLconst,
+ ssa.OpAMD64SHRQconst, ssa.OpAMD64SHRLconst, ssa.OpAMD64SHRWconst, ssa.OpAMD64SHRBconst,
+ ssa.OpAMD64SARQconst, ssa.OpAMD64SARLconst, ssa.OpAMD64SARWconst, ssa.OpAMD64SARBconst,
+ ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst, ssa.OpAMD64ROLWconst, ssa.OpAMD64ROLBconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8,
+ ssa.OpAMD64LEAL1, ssa.OpAMD64LEAL2, ssa.OpAMD64LEAL4, ssa.OpAMD64LEAL8,
+ ssa.OpAMD64LEAW1, ssa.OpAMD64LEAW2, ssa.OpAMD64LEAW4, ssa.OpAMD64LEAW8:
+ p := s.Prog(v.Op.Asm())
+ memIdx(&p.From, v)
+ o := v.Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = o
+ if v.AuxInt != 0 && v.Aux == nil {
+ // Emit an additional LEA to add the displacement instead of creating a slow 3 operand LEA.
+ switch v.Op {
+ case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8:
+ p = s.Prog(x86.ALEAQ)
+ case ssa.OpAMD64LEAL1, ssa.OpAMD64LEAL2, ssa.OpAMD64LEAL4, ssa.OpAMD64LEAL8:
+ p = s.Prog(x86.ALEAL)
+ case ssa.OpAMD64LEAW1, ssa.OpAMD64LEAW2, ssa.OpAMD64LEAW4, ssa.OpAMD64LEAW8:
+ p = s.Prog(x86.ALEAW)
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = o
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = o
+ }
+ ssagen.AddAux(&p.From, v)
+ case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL, ssa.OpAMD64LEAW:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
+ ssa.OpAMD64TESTQ, ssa.OpAMD64TESTL, ssa.OpAMD64TESTW, ssa.OpAMD64TESTB,
+ ssa.OpAMD64BTL, ssa.OpAMD64BTQ:
+ opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
+ case ssa.OpAMD64UCOMISS, ssa.OpAMD64UCOMISD:
+ // Go assembler has swapped operands for UCOMISx relative to CMP,
+ // must account for that right here.
+ opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
+ case ssa.OpAMD64CMPQconst, ssa.OpAMD64CMPLconst, ssa.OpAMD64CMPWconst, ssa.OpAMD64CMPBconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+ case ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst,
+ ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst,
+ ssa.OpAMD64BTSQconst,
+ ssa.OpAMD64BTCQconst,
+ ssa.OpAMD64BTRQconst:
+ op := v.Op
+ if op == ssa.OpAMD64BTQconst && v.AuxInt < 32 {
+ // Emit 32-bit version because it's shorter
+ op = ssa.OpAMD64BTLconst
+ }
+ p := s.Prog(op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[0].Reg()
+ case ssa.OpAMD64CMPQload, ssa.OpAMD64CMPLload, ssa.OpAMD64CMPWload, ssa.OpAMD64CMPBload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[1].Reg()
+ case ssa.OpAMD64CMPQconstload, ssa.OpAMD64CMPLconstload, ssa.OpAMD64CMPWconstload, ssa.OpAMD64CMPBconstload:
+ sc := v.AuxValAndOff()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.From, v, sc.Off64())
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = sc.Val64()
+ case ssa.OpAMD64CMPQloadidx8, ssa.OpAMD64CMPQloadidx1, ssa.OpAMD64CMPLloadidx4, ssa.OpAMD64CMPLloadidx1, ssa.OpAMD64CMPWloadidx2, ssa.OpAMD64CMPWloadidx1, ssa.OpAMD64CMPBloadidx1:
+ p := s.Prog(v.Op.Asm())
+ memIdx(&p.From, v)
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[2].Reg()
+ case ssa.OpAMD64CMPQconstloadidx8, ssa.OpAMD64CMPQconstloadidx1, ssa.OpAMD64CMPLconstloadidx4, ssa.OpAMD64CMPLconstloadidx1, ssa.OpAMD64CMPWconstloadidx2, ssa.OpAMD64CMPWconstloadidx1, ssa.OpAMD64CMPBconstloadidx1:
+ sc := v.AuxValAndOff()
+ p := s.Prog(v.Op.Asm())
+ memIdx(&p.From, v)
+ ssagen.AddAux2(&p.From, v, sc.Off64())
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = sc.Val64()
+ case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
+ x := v.Reg()
+
+ // If flags aren't live (indicated by v.Aux == nil),
+ // then we can rewrite MOV $0, AX into XOR AX, AX.
+ if v.AuxInt == 0 && v.Aux == nil {
+ opregreg(s, x86.AXORL, x, x)
+ break
+ }
+
+ asm := v.Op.Asm()
+ // Use MOVL to move a small constant into a register
+ // when the constant is positive and fits into 32 bits.
+ if 0 <= v.AuxInt && v.AuxInt <= (1<<32-1) {
+ // The upper 32bit are zeroed automatically when using MOVL.
+ asm = x86.AMOVL
+ }
+ p := s.Prog(asm)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst:
+ x := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVOload,
+ ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload,
+ ssa.OpAMD64MOVBEQload, ssa.OpAMD64MOVBELload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1,
+ ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2,
+ ssa.OpAMD64MOVBELloadidx1, ssa.OpAMD64MOVBELloadidx4, ssa.OpAMD64MOVBELloadidx8, ssa.OpAMD64MOVBEQloadidx1, ssa.OpAMD64MOVBEQloadidx8:
+ p := s.Prog(v.Op.Asm())
+ memIdx(&p.From, v)
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore,
+ ssa.OpAMD64ADDQmodify, ssa.OpAMD64SUBQmodify, ssa.OpAMD64ANDQmodify, ssa.OpAMD64ORQmodify, ssa.OpAMD64XORQmodify,
+ ssa.OpAMD64ADDLmodify, ssa.OpAMD64SUBLmodify, ssa.OpAMD64ANDLmodify, ssa.OpAMD64ORLmodify, ssa.OpAMD64XORLmodify,
+ ssa.OpAMD64MOVBEQstore, ssa.OpAMD64MOVBELstore, ssa.OpAMD64MOVBEWstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1,
+ ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2,
+ ssa.OpAMD64ADDLmodifyidx1, ssa.OpAMD64ADDLmodifyidx4, ssa.OpAMD64ADDLmodifyidx8, ssa.OpAMD64ADDQmodifyidx1, ssa.OpAMD64ADDQmodifyidx8,
+ ssa.OpAMD64SUBLmodifyidx1, ssa.OpAMD64SUBLmodifyidx4, ssa.OpAMD64SUBLmodifyidx8, ssa.OpAMD64SUBQmodifyidx1, ssa.OpAMD64SUBQmodifyidx8,
+ ssa.OpAMD64ANDLmodifyidx1, ssa.OpAMD64ANDLmodifyidx4, ssa.OpAMD64ANDLmodifyidx8, ssa.OpAMD64ANDQmodifyidx1, ssa.OpAMD64ANDQmodifyidx8,
+ ssa.OpAMD64ORLmodifyidx1, ssa.OpAMD64ORLmodifyidx4, ssa.OpAMD64ORLmodifyidx8, ssa.OpAMD64ORQmodifyidx1, ssa.OpAMD64ORQmodifyidx8,
+ ssa.OpAMD64XORLmodifyidx1, ssa.OpAMD64XORLmodifyidx4, ssa.OpAMD64XORLmodifyidx8, ssa.OpAMD64XORQmodifyidx1, ssa.OpAMD64XORQmodifyidx8,
+ ssa.OpAMD64MOVBEWstoreidx1, ssa.OpAMD64MOVBEWstoreidx2, ssa.OpAMD64MOVBELstoreidx1, ssa.OpAMD64MOVBELstoreidx4, ssa.OpAMD64MOVBELstoreidx8, ssa.OpAMD64MOVBEQstoreidx1, ssa.OpAMD64MOVBEQstoreidx8:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ memIdx(&p.To, v)
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify:
+ sc := v.AuxValAndOff()
+ off := sc.Off64()
+ val := sc.Val()
+ if val == 1 || val == -1 {
+ var asm obj.As
+ if v.Op == ssa.OpAMD64ADDQconstmodify {
+ if val == 1 {
+ asm = x86.AINCQ
+ } else {
+ asm = x86.ADECQ
+ }
+ } else {
+ if val == 1 {
+ asm = x86.AINCL
+ } else {
+ asm = x86.ADECL
+ }
+ }
+ p := s.Prog(asm)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, off)
+ break
+ }
+ fallthrough
+ case ssa.OpAMD64ANDQconstmodify, ssa.OpAMD64ANDLconstmodify, ssa.OpAMD64ORQconstmodify, ssa.OpAMD64ORLconstmodify,
+ ssa.OpAMD64XORQconstmodify, ssa.OpAMD64XORLconstmodify,
+ ssa.OpAMD64BTSQconstmodify, ssa.OpAMD64BTRQconstmodify, ssa.OpAMD64BTCQconstmodify:
+ sc := v.AuxValAndOff()
+ off := sc.Off64()
+ val := sc.Val64()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = val
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, off)
+
+ case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val64()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, sc.Off64())
+ case ssa.OpAMD64MOVOstoreconst:
+ sc := v.AuxValAndOff()
+ if sc.Val() != 0 {
+ v.Fatalf("MOVO for non zero constants not implemented: %s", v.LongString())
+ }
+
+ if s.ABI != obj.ABIInternal {
+ // zero X15 manually
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_X15
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, sc.Off64())
+
+ case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1,
+ ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8,
+ ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8,
+ ssa.OpAMD64ORLconstmodifyidx1, ssa.OpAMD64ORLconstmodifyidx4, ssa.OpAMD64ORLconstmodifyidx8, ssa.OpAMD64ORQconstmodifyidx1, ssa.OpAMD64ORQconstmodifyidx8,
+ ssa.OpAMD64XORLconstmodifyidx1, ssa.OpAMD64XORLconstmodifyidx4, ssa.OpAMD64XORLconstmodifyidx8, ssa.OpAMD64XORQconstmodifyidx1, ssa.OpAMD64XORQconstmodifyidx8:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val64()
+ switch {
+ case p.As == x86.AADDQ && p.From.Offset == 1:
+ p.As = x86.AINCQ
+ p.From.Type = obj.TYPE_NONE
+ case p.As == x86.AADDQ && p.From.Offset == -1:
+ p.As = x86.ADECQ
+ p.From.Type = obj.TYPE_NONE
+ case p.As == x86.AADDL && p.From.Offset == 1:
+ p.As = x86.AINCL
+ p.From.Type = obj.TYPE_NONE
+ case p.As == x86.AADDL && p.From.Offset == -1:
+ p.As = x86.ADECL
+ p.From.Type = obj.TYPE_NONE
+ }
+ memIdx(&p.To, v)
+ ssagen.AddAux2(&p.To, v, sc.Off64())
+ case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
+ ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
+ ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
+ case ssa.OpAMD64CVTSL2SD, ssa.OpAMD64CVTSQ2SD, ssa.OpAMD64CVTSQ2SS, ssa.OpAMD64CVTSL2SS:
+ r := v.Reg()
+ // Break false dependency on destination register.
+ opregreg(s, x86.AXORPS, r, r)
+ opregreg(s, v.Op.Asm(), r, v.Args[0].Reg())
+ case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i, ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i:
+ var p *obj.Prog
+ switch v.Op {
+ case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i:
+ p = s.Prog(x86.AMOVQ)
+ case ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i:
+ p = s.Prog(x86.AMOVL)
+ }
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64ADDQload, ssa.OpAMD64ADDLload, ssa.OpAMD64SUBQload, ssa.OpAMD64SUBLload,
+ ssa.OpAMD64ANDQload, ssa.OpAMD64ANDLload, ssa.OpAMD64ORQload, ssa.OpAMD64ORLload,
+ ssa.OpAMD64XORQload, ssa.OpAMD64XORLload, ssa.OpAMD64ADDSDload, ssa.OpAMD64ADDSSload,
+ ssa.OpAMD64SUBSDload, ssa.OpAMD64SUBSSload, ssa.OpAMD64MULSDload, ssa.OpAMD64MULSSload,
+ ssa.OpAMD64DIVSDload, ssa.OpAMD64DIVSSload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64ADDLloadidx1, ssa.OpAMD64ADDLloadidx4, ssa.OpAMD64ADDLloadidx8, ssa.OpAMD64ADDQloadidx1, ssa.OpAMD64ADDQloadidx8,
+ ssa.OpAMD64SUBLloadidx1, ssa.OpAMD64SUBLloadidx4, ssa.OpAMD64SUBLloadidx8, ssa.OpAMD64SUBQloadidx1, ssa.OpAMD64SUBQloadidx8,
+ ssa.OpAMD64ANDLloadidx1, ssa.OpAMD64ANDLloadidx4, ssa.OpAMD64ANDLloadidx8, ssa.OpAMD64ANDQloadidx1, ssa.OpAMD64ANDQloadidx8,
+ ssa.OpAMD64ORLloadidx1, ssa.OpAMD64ORLloadidx4, ssa.OpAMD64ORLloadidx8, ssa.OpAMD64ORQloadidx1, ssa.OpAMD64ORQloadidx8,
+ ssa.OpAMD64XORLloadidx1, ssa.OpAMD64XORLloadidx4, ssa.OpAMD64XORLloadidx8, ssa.OpAMD64XORQloadidx1, ssa.OpAMD64XORQloadidx8,
+ ssa.OpAMD64ADDSSloadidx1, ssa.OpAMD64ADDSSloadidx4, ssa.OpAMD64ADDSDloadidx1, ssa.OpAMD64ADDSDloadidx8,
+ ssa.OpAMD64SUBSSloadidx1, ssa.OpAMD64SUBSSloadidx4, ssa.OpAMD64SUBSDloadidx1, ssa.OpAMD64SUBSDloadidx8,
+ ssa.OpAMD64MULSSloadidx1, ssa.OpAMD64MULSSloadidx4, ssa.OpAMD64MULSDloadidx1, ssa.OpAMD64MULSDloadidx8,
+ ssa.OpAMD64DIVSSloadidx1, ssa.OpAMD64DIVSSloadidx4, ssa.OpAMD64DIVSDloadidx1, ssa.OpAMD64DIVSDloadidx8:
+ p := s.Prog(v.Op.Asm())
+
+ r, i := v.Args[1].Reg(), v.Args[2].Reg()
+ p.From.Type = obj.TYPE_MEM
+ p.From.Scale = v.Op.Scale()
+ if p.From.Scale == 1 && i == x86.REG_SP {
+ r, i = i, r
+ }
+ p.From.Reg = r
+ p.From.Index = i
+
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64DUFFZERO:
+ if s.ABI != obj.ABIInternal {
+ // zero X15 manually
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
+ off := duffStart(v.AuxInt)
+ adj := duffAdj(v.AuxInt)
+ var p *obj.Prog
+ if adj != 0 {
+ p = s.Prog(x86.ALEAQ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = adj
+ p.From.Reg = x86.REG_DI
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_DI
+ }
+ p = s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = off
+ case ssa.OpAMD64DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = ir.Syms.Duffcopy
+ if v.AuxInt%16 != 0 {
+ v.Fatalf("bad DUFFCOPY AuxInt %v", v.AuxInt)
+ }
+ p.To.Offset = 14 * (64 - v.AuxInt/16)
+ // 14 and 64 are magic constants. 14 is the number of bytes to encode:
+ // MOVUPS (SI), X0
+ // ADDQ $16, SI
+ // MOVUPS X0, (DI)
+ // ADDQ $16, DI
+ // and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy.
+
+ case ssa.OpCopy: // TODO: use MOVQreg for reg->reg copies instead of OpCopy?
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x != y {
+ opregreg(s, moveByType(v.Type), y, x)
+ }
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.OpAMD64LoweredHasCPUFeature:
+ p := s.Prog(x86.AMOVBLZX)
+ p.From.Type = obj.TYPE_MEM
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpArgIntReg, ssa.OpArgFloatReg:
+ // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
+ // The loop only runs once.
+ for _, ap := range v.Block.Func.RegArgs {
+ // Pass the spill/unspill information along to the assembler, offset by size of return PC pushed on stack.
+ addr := ssagen.SpillSlotAddr(ap, x86.REG_SP, v.Block.Func.Config.PtrSize)
+ s.FuncInfo().AddSpill(
+ obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByType(ap.Type), Spill: storeByType(ap.Type)})
+ }
+ v.Block.Func.RegArgs = nil
+ ssagen.CheckArgReg(v)
+ case ssa.OpAMD64LoweredGetClosurePtr:
+ // Closure pointer is DX.
+ ssagen.CheckLoweredGetClosurePtr(v)
+ case ssa.OpAMD64LoweredGetG:
+ if s.ABI == obj.ABIInternal {
+ v.Fatalf("LoweredGetG should not appear in ABIInternal")
+ }
+ r := v.Reg()
+ getgFromTLS(s, r)
+ case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLtail:
+ if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
+ // zeroing X15 when entering ABIInternal from ABI0
+ if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
+ // set G register from TLS
+ getgFromTLS(s, x86.REG_R14)
+ }
+ if v.Op == ssa.OpAMD64CALLtail {
+ s.TailCall(v)
+ break
+ }
+ s.Call(v)
+ if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
+ // zeroing X15 when entering ABIInternal from ABI0
+ if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
+ // set G register from TLS
+ getgFromTLS(s, x86.REG_R14)
+ }
+ case ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter:
+ s.Call(v)
+
+ case ssa.OpAMD64LoweredGetCallerPC:
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = -8 // PC is stored 8 bytes below first parameter.
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64LoweredGetCallerSP:
+ // caller's SP is the address of the first arg
+ mov := x86.AMOVQ
+ if types.PtrSize == 4 {
+ mov = x86.AMOVL
+ }
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize // 0 on amd64, just to be consistent with other architectures
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ // AuxInt encodes how many buffer entries we need.
+ p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
+
+ case ssa.OpAMD64LoweredPanicBoundsA, ssa.OpAMD64LoweredPanicBoundsB, ssa.OpAMD64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(int64(2 * types.PtrSize)) // space used in callee args area by assembly stubs
+
+ case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
+ ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
+ ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64NEGLflags:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD, ssa.OpAMD64SQRTSS:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ switch v.Op {
+ case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ:
+ p.To.Reg = v.Reg0()
+ case ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD, ssa.OpAMD64SQRTSS:
+ p.To.Reg = v.Reg()
+ }
+ case ssa.OpAMD64ROUNDSD:
+ p := s.Prog(v.Op.Asm())
+ val := v.AuxInt
+ // 0 means math.RoundToEven, 1 Floor, 2 Ceil, 3 Trunc
+ if val < 0 || val > 3 {
+ v.Fatalf("Invalid rounding mode")
+ }
+ p.From.Offset = val
+ p.From.Type = obj.TYPE_CONST
+ p.AddRestSourceReg(v.Args[0].Reg())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64POPCNTQ, ssa.OpAMD64POPCNTL,
+ ssa.OpAMD64TZCNTQ, ssa.OpAMD64TZCNTL,
+ ssa.OpAMD64LZCNTQ, ssa.OpAMD64LZCNTL:
+ if v.Args[0].Reg() != v.Reg() {
+ // POPCNT/TZCNT/LZCNT have a false dependency on the destination register on Intel cpus.
+ // TZCNT/LZCNT problem affects pre-Skylake models. See discussion at https://gcc.gnu.org/bugzilla/show_bug.cgi?id=62011#c7.
+ // Xor register with itself to break the dependency.
+ opregreg(s, x86.AXORL, v.Reg(), v.Reg())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64SETEQ, ssa.OpAMD64SETNE,
+ ssa.OpAMD64SETL, ssa.OpAMD64SETLE,
+ ssa.OpAMD64SETG, ssa.OpAMD64SETGE,
+ ssa.OpAMD64SETGF, ssa.OpAMD64SETGEF,
+ ssa.OpAMD64SETB, ssa.OpAMD64SETBE,
+ ssa.OpAMD64SETORD, ssa.OpAMD64SETNAN,
+ ssa.OpAMD64SETA, ssa.OpAMD64SETAE,
+ ssa.OpAMD64SETO:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64SETEQstore, ssa.OpAMD64SETNEstore,
+ ssa.OpAMD64SETLstore, ssa.OpAMD64SETLEstore,
+ ssa.OpAMD64SETGstore, ssa.OpAMD64SETGEstore,
+ ssa.OpAMD64SETBstore, ssa.OpAMD64SETBEstore,
+ ssa.OpAMD64SETAstore, ssa.OpAMD64SETAEstore:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+
+ case ssa.OpAMD64SETEQstoreidx1, ssa.OpAMD64SETNEstoreidx1,
+ ssa.OpAMD64SETLstoreidx1, ssa.OpAMD64SETLEstoreidx1,
+ ssa.OpAMD64SETGstoreidx1, ssa.OpAMD64SETGEstoreidx1,
+ ssa.OpAMD64SETBstoreidx1, ssa.OpAMD64SETBEstoreidx1,
+ ssa.OpAMD64SETAstoreidx1, ssa.OpAMD64SETAEstoreidx1:
+ p := s.Prog(v.Op.Asm())
+ memIdx(&p.To, v)
+ ssagen.AddAux(&p.To, v)
+
+ case ssa.OpAMD64SETNEF:
+ t := v.RegTmp()
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ q := s.Prog(x86.ASETPS)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = t
+ // ORL avoids partial register write and is smaller than ORQ, used by old compiler
+ opregreg(s, x86.AORL, v.Reg(), t)
+
+ case ssa.OpAMD64SETEQF:
+ t := v.RegTmp()
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ q := s.Prog(x86.ASETPC)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = t
+ // ANDL avoids partial register write and is smaller than ANDQ, used by old compiler
+ opregreg(s, x86.AANDL, v.Reg(), t)
+
+ case ssa.OpAMD64InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.OpAMD64FlagEQ, ssa.OpAMD64FlagLT_ULT, ssa.OpAMD64FlagLT_UGT, ssa.OpAMD64FlagGT_ULT, ssa.OpAMD64FlagGT_UGT:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.OpAMD64AddTupleFirst32, ssa.OpAMD64AddTupleFirst64:
+ v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString())
+ case ssa.OpAMD64REPSTOSQ:
+ s.Prog(x86.AREP)
+ s.Prog(x86.ASTOSQ)
+ case ssa.OpAMD64REPMOVSQ:
+ s.Prog(x86.AREP)
+ s.Prog(x86.AMOVSQ)
+ case ssa.OpAMD64LoweredNilCheck:
+ // Issue a load which will fault if the input is nil.
+ // TODO: We currently use the 2-byte instruction TESTB AX, (reg).
+ // Should we use the 3-byte TESTB $0, (reg) instead? It is larger
+ // but it doesn't have false dependency on AX.
+ // Or maybe allocate an output register and use MOVL (reg),reg2 ?
+ // That trades clobbering flags for clobbering a register.
+ p := s.Prog(x86.ATESTB)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+ case ssa.OpAMD64MOVBatomicload, ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpAMD64XCHGB, ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Reg0()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[1].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock:
+ s.Prog(x86.ALOCK)
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Reg0()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[1].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpAMD64CMPXCHGLlock, ssa.OpAMD64CMPXCHGQlock:
+ if v.Args[1].Reg() != x86.REG_AX {
+ v.Fatalf("input[1] not in AX %s", v.LongString())
+ }
+ s.Prog(x86.ALOCK)
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ p = s.Prog(x86.ASETEQ)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpAMD64ANDBlock, ssa.OpAMD64ANDLlock, ssa.OpAMD64ORBlock, ssa.OpAMD64ORLlock:
+ s.Prog(x86.ALOCK)
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpAMD64PrefetchT0, ssa.OpAMD64PrefetchNTA:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ case ssa.OpClobber:
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0xdeaddead
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = x86.REG_SP
+ ssagen.AddAux(&p.To, v)
+ p = s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0xdeaddead
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = x86.REG_SP
+ ssagen.AddAux(&p.To, v)
+ p.To.Offset += 4
+ case ssa.OpClobberReg:
+ x := uint64(0xdeaddeaddeaddead)
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(x)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = [...]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockAMD64EQ: {x86.AJEQ, x86.AJNE},
+ ssa.BlockAMD64NE: {x86.AJNE, x86.AJEQ},
+ ssa.BlockAMD64LT: {x86.AJLT, x86.AJGE},
+ ssa.BlockAMD64GE: {x86.AJGE, x86.AJLT},
+ ssa.BlockAMD64LE: {x86.AJLE, x86.AJGT},
+ ssa.BlockAMD64GT: {x86.AJGT, x86.AJLE},
+ ssa.BlockAMD64OS: {x86.AJOS, x86.AJOC},
+ ssa.BlockAMD64OC: {x86.AJOC, x86.AJOS},
+ ssa.BlockAMD64ULT: {x86.AJCS, x86.AJCC},
+ ssa.BlockAMD64UGE: {x86.AJCC, x86.AJCS},
+ ssa.BlockAMD64UGT: {x86.AJHI, x86.AJLS},
+ ssa.BlockAMD64ULE: {x86.AJLS, x86.AJHI},
+ ssa.BlockAMD64ORD: {x86.AJPC, x86.AJPS},
+ ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC},
+}
+
+var eqfJumps = [2][2]ssagen.IndexJump{
+ {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
+ {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
+}
+var nefJumps = [2][2]ssagen.IndexJump{
+ {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
+ {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockDefer:
+ // defer returns in rax:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(x86.ATESTL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+ p = s.Prog(x86.AJNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit, ssa.BlockRetJmp:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+
+ case ssa.BlockAMD64EQF:
+ s.CombJump(b, next, &eqfJumps)
+
+ case ssa.BlockAMD64NEF:
+ s.CombJump(b, next, &nefJumps)
+
+ case ssa.BlockAMD64EQ, ssa.BlockAMD64NE,
+ ssa.BlockAMD64LT, ssa.BlockAMD64GE,
+ ssa.BlockAMD64LE, ssa.BlockAMD64GT,
+ ssa.BlockAMD64OS, ssa.BlockAMD64OC,
+ ssa.BlockAMD64ULT, ssa.BlockAMD64UGT,
+ ssa.BlockAMD64ULE, ssa.BlockAMD64UGE:
+ jmp := blockJump[b.Kind]
+ switch next {
+ case b.Succs[0].Block():
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+
+ case ssa.BlockAMD64JUMPTABLE:
+ // JMP *(TABLE)(INDEX*8)
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = b.Controls[1].Reg()
+ p.To.Index = b.Controls[0].Reg()
+ p.To.Scale = 8
+ // Save jump tables for later resolution of the target blocks.
+ s.JumpTables = append(s.JumpTables, b)
+
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
+
+func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p := s.Prog(loadByType(t))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_AUTO
+ p.From.Sym = n.Linksym()
+ p.From.Offset = n.FrameOffset() + off
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = reg
+ return p
+}
+
+func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
+ p.To.Name = obj.NAME_PARAM
+ p.To.Sym = n.Linksym()
+ p.Pos = p.Pos.WithNotStmt()
+ return p
+}
diff --git a/src/cmd/compile/internal/amd64/versions_test.go b/src/cmd/compile/internal/amd64/versions_test.go
new file mode 100644
index 0000000..fc0046a
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/versions_test.go
@@ -0,0 +1,433 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// When using GOEXPERIMENT=boringcrypto, the test program links in the boringcrypto syso,
+// which does not respect GOAMD64, so we skip the test if boringcrypto is enabled.
+//go:build !boringcrypto
+
+package amd64_test
+
+import (
+ "bufio"
+ "debug/elf"
+ "debug/macho"
+ "errors"
+ "fmt"
+ "go/build"
+ "internal/testenv"
+ "io"
+ "math"
+ "math/bits"
+ "os"
+ "os/exec"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+// Test to make sure that when building for GOAMD64=v1, we don't
+// use any >v1 instructions.
+func TestGoAMD64v1(t *testing.T) {
+ if runtime.GOARCH != "amd64" {
+ t.Skip("amd64-only test")
+ }
+ if runtime.GOOS != "linux" && runtime.GOOS != "darwin" {
+ t.Skip("test only works on elf or macho platforms")
+ }
+ for _, tag := range build.Default.ToolTags {
+ if tag == "amd64.v2" {
+ t.Skip("compiling for GOAMD64=v2 or higher")
+ }
+ }
+ if os.Getenv("TESTGOAMD64V1") != "" {
+ t.Skip("recursive call")
+ }
+
+ // Make a binary which will be a modified version of the
+ // currently running binary.
+ dst, err := os.CreateTemp("", "TestGoAMD64v1")
+ if err != nil {
+ t.Fatalf("failed to create temp file: %v", err)
+ }
+ defer os.Remove(dst.Name())
+ dst.Chmod(0500) // make executable
+
+ // Clobber all the non-v1 opcodes.
+ opcodes := map[string]bool{}
+ var features []string
+ for feature, opcodeList := range featureToOpcodes {
+ if runtimeFeatures[feature] {
+ features = append(features, fmt.Sprintf("cpu.%s=off", feature))
+ }
+ for _, op := range opcodeList {
+ opcodes[op] = true
+ }
+ }
+ clobber(t, os.Args[0], dst, opcodes)
+ if err = dst.Close(); err != nil {
+ t.Fatalf("can't close binary: %v", err)
+ }
+
+ // Run the resulting binary.
+ cmd := testenv.Command(t, dst.Name())
+ testenv.CleanCmdEnv(cmd)
+ cmd.Env = append(cmd.Env, "TESTGOAMD64V1=yes")
+ cmd.Env = append(cmd.Env, fmt.Sprintf("GODEBUG=%s", strings.Join(features, ",")))
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("couldn't execute test: %s", err)
+ }
+ // Expect to see output of the form "PASS\n", unless the test binary
+ // was compiled for coverage (in which case there will be an extra line).
+ success := false
+ lines := strings.Split(string(out), "\n")
+ if len(lines) == 2 {
+ success = lines[0] == "PASS" && lines[1] == ""
+ } else if len(lines) == 3 {
+ success = lines[0] == "PASS" &&
+ strings.HasPrefix(lines[1], "coverage") && lines[2] == ""
+ }
+ if !success {
+ t.Fatalf("test reported error: %s lines=%+v", string(out), lines)
+ }
+}
+
+// Clobber copies the binary src to dst, replacing all the instructions in opcodes with
+// faulting instructions.
+func clobber(t *testing.T, src string, dst *os.File, opcodes map[string]bool) {
+ // Run objdump to get disassembly.
+ var re *regexp.Regexp
+ var disasm io.Reader
+ if false {
+ // TODO: go tool objdump doesn't disassemble the bmi1 instructions
+ // in question correctly. See issue 48584.
+ cmd := testenv.Command(t, "go", "tool", "objdump", src)
+ var err error
+ disasm, err = cmd.StdoutPipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := cmd.Start(); err != nil {
+ t.Fatal(err)
+ }
+ t.Cleanup(func() {
+ if err := cmd.Wait(); err != nil {
+ t.Error(err)
+ }
+ })
+ re = regexp.MustCompile(`^[^:]*:[-\d]+\s+0x([\da-f]+)\s+([\da-f]+)\s+([A-Z]+)`)
+ } else {
+ // TODO: we're depending on platform-native objdump here. Hence the Skipf
+ // below if it doesn't run for some reason.
+ cmd := testenv.Command(t, "objdump", "-d", src)
+ var err error
+ disasm, err = cmd.StdoutPipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := cmd.Start(); err != nil {
+ if errors.Is(err, exec.ErrNotFound) {
+ t.Skipf("can't run test due to missing objdump: %s", err)
+ }
+ t.Fatal(err)
+ }
+ t.Cleanup(func() {
+ if err := cmd.Wait(); err != nil {
+ t.Error(err)
+ }
+ })
+ re = regexp.MustCompile(`^\s*([\da-f]+):\s*((?:[\da-f][\da-f] )+)\s*([a-z\d]+)`)
+ }
+
+ // Find all the instruction addresses we need to edit.
+ virtualEdits := map[uint64]bool{}
+ scanner := bufio.NewScanner(disasm)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := re.FindStringSubmatch(line)
+ if len(parts) == 0 {
+ continue
+ }
+ addr, err := strconv.ParseUint(parts[1], 16, 64)
+ if err != nil {
+ continue // not a hex address
+ }
+ opcode := strings.ToLower(parts[3])
+ if !opcodes[opcode] {
+ continue
+ }
+ t.Logf("clobbering instruction %s", line)
+ n := (len(parts[2]) - strings.Count(parts[2], " ")) / 2 // number of bytes in instruction encoding
+ for i := 0; i < n; i++ {
+ // Only really need to make the first byte faulting, but might
+ // as well make all the bytes faulting.
+ virtualEdits[addr+uint64(i)] = true
+ }
+ }
+
+ // Figure out where in the binary the edits must be done.
+ physicalEdits := map[uint64]bool{}
+ if e, err := elf.Open(src); err == nil {
+ for _, sec := range e.Sections {
+ vaddr := sec.Addr
+ paddr := sec.Offset
+ size := sec.Size
+ for a := range virtualEdits {
+ if a >= vaddr && a < vaddr+size {
+ physicalEdits[paddr+(a-vaddr)] = true
+ }
+ }
+ }
+ } else if m, err2 := macho.Open(src); err2 == nil {
+ for _, sec := range m.Sections {
+ vaddr := sec.Addr
+ paddr := uint64(sec.Offset)
+ size := sec.Size
+ for a := range virtualEdits {
+ if a >= vaddr && a < vaddr+size {
+ physicalEdits[paddr+(a-vaddr)] = true
+ }
+ }
+ }
+ } else {
+ t.Log(err)
+ t.Log(err2)
+ t.Fatal("executable format not elf or macho")
+ }
+ if len(virtualEdits) != len(physicalEdits) {
+ t.Fatal("couldn't find an instruction in text sections")
+ }
+
+ // Copy source to destination, making edits along the way.
+ f, err := os.Open(src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ r := bufio.NewReader(f)
+ w := bufio.NewWriter(dst)
+ a := uint64(0)
+ done := 0
+ for {
+ b, err := r.ReadByte()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatal("can't read")
+ }
+ if physicalEdits[a] {
+ b = 0xcc // INT3 opcode
+ done++
+ }
+ err = w.WriteByte(b)
+ if err != nil {
+ t.Fatal("can't write")
+ }
+ a++
+ }
+ if done != len(physicalEdits) {
+ t.Fatal("physical edits remaining")
+ }
+ w.Flush()
+ f.Close()
+}
+
+func setOf(keys ...string) map[string]bool {
+ m := make(map[string]bool, len(keys))
+ for _, key := range keys {
+ m[key] = true
+ }
+ return m
+}
+
+var runtimeFeatures = setOf(
+ "adx", "aes", "avx", "avx2", "bmi1", "bmi2", "erms", "fma",
+ "pclmulqdq", "popcnt", "rdtscp", "sse3", "sse41", "sse42", "ssse3",
+)
+
+var featureToOpcodes = map[string][]string{
+ // Note: we include *q, *l, and plain opcodes here.
+ // go tool objdump doesn't include a [QL] on popcnt instructions, until CL 351889
+ // native objdump doesn't include [QL] on linux.
+ "popcnt": {"popcntq", "popcntl", "popcnt"},
+ "bmi1": {
+ "andnq", "andnl", "andn",
+ "blsiq", "blsil", "blsi",
+ "blsmskq", "blsmskl", "blsmsk",
+ "blsrq", "blsrl", "blsr",
+ "tzcntq", "tzcntl", "tzcnt",
+ },
+ "bmi2": {
+ "sarxq", "sarxl", "sarx",
+ "shlxq", "shlxl", "shlx",
+ "shrxq", "shrxl", "shrx",
+ },
+ "sse41": {
+ "roundsd",
+ "pinsrq", "pinsrl", "pinsrd", "pinsrb", "pinsr",
+ "pextrq", "pextrl", "pextrd", "pextrb", "pextr",
+ "pminsb", "pminsd", "pminuw", "pminud", // Note: ub and sw are ok.
+ "pmaxsb", "pmaxsd", "pmaxuw", "pmaxud",
+ "pmovzxbw", "pmovzxbd", "pmovzxbq", "pmovzxwd", "pmovzxwq", "pmovzxdq",
+ "pmovsxbw", "pmovsxbd", "pmovsxbq", "pmovsxwd", "pmovsxwq", "pmovsxdq",
+ "pblendvb",
+ },
+ "fma": {"vfmadd231sd"},
+ "movbe": {"movbeqq", "movbeq", "movbell", "movbel", "movbe"},
+ "lzcnt": {"lzcntq", "lzcntl", "lzcnt"},
+}
+
+// Test to use POPCNT instruction, if available
+func TestPopCnt(t *testing.T) {
+ for _, tt := range []struct {
+ x uint64
+ want int
+ }{
+ {0b00001111, 4},
+ {0b00001110, 3},
+ {0b00001100, 2},
+ {0b00000000, 0},
+ } {
+ if got := bits.OnesCount64(tt.x); got != tt.want {
+ t.Errorf("OnesCount64(%#x) = %d, want %d", tt.x, got, tt.want)
+ }
+ if got := bits.OnesCount32(uint32(tt.x)); got != tt.want {
+ t.Errorf("OnesCount32(%#x) = %d, want %d", tt.x, got, tt.want)
+ }
+ }
+}
+
+// Test to use ANDN, if available
+func TestAndNot(t *testing.T) {
+ for _, tt := range []struct {
+ x, y, want uint64
+ }{
+ {0b00001111, 0b00000011, 0b1100},
+ {0b00001111, 0b00001100, 0b0011},
+ {0b00000000, 0b00000000, 0b0000},
+ } {
+ if got := tt.x &^ tt.y; got != tt.want {
+ t.Errorf("%#x &^ %#x = %#x, want %#x", tt.x, tt.y, got, tt.want)
+ }
+ if got := uint32(tt.x) &^ uint32(tt.y); got != uint32(tt.want) {
+ t.Errorf("%#x &^ %#x = %#x, want %#x", tt.x, tt.y, got, tt.want)
+ }
+ }
+}
+
+// Test to use BLSI, if available
+func TestBLSI(t *testing.T) {
+ for _, tt := range []struct {
+ x, want uint64
+ }{
+ {0b00001111, 0b001},
+ {0b00001110, 0b010},
+ {0b00001100, 0b100},
+ {0b11000110, 0b010},
+ {0b00000000, 0b000},
+ } {
+ if got := tt.x & -tt.x; got != tt.want {
+ t.Errorf("%#x & (-%#x) = %#x, want %#x", tt.x, tt.x, got, tt.want)
+ }
+ if got := uint32(tt.x) & -uint32(tt.x); got != uint32(tt.want) {
+ t.Errorf("%#x & (-%#x) = %#x, want %#x", tt.x, tt.x, got, tt.want)
+ }
+ }
+}
+
+// Test to use BLSMSK, if available
+func TestBLSMSK(t *testing.T) {
+ for _, tt := range []struct {
+ x, want uint64
+ }{
+ {0b00001111, 0b001},
+ {0b00001110, 0b011},
+ {0b00001100, 0b111},
+ {0b11000110, 0b011},
+ {0b00000000, 1<<64 - 1},
+ } {
+ if got := tt.x ^ (tt.x - 1); got != tt.want {
+ t.Errorf("%#x ^ (%#x-1) = %#x, want %#x", tt.x, tt.x, got, tt.want)
+ }
+ if got := uint32(tt.x) ^ (uint32(tt.x) - 1); got != uint32(tt.want) {
+ t.Errorf("%#x ^ (%#x-1) = %#x, want %#x", tt.x, tt.x, got, uint32(tt.want))
+ }
+ }
+}
+
+// Test to use BLSR, if available
+func TestBLSR(t *testing.T) {
+ for _, tt := range []struct {
+ x, want uint64
+ }{
+ {0b00001111, 0b00001110},
+ {0b00001110, 0b00001100},
+ {0b00001100, 0b00001000},
+ {0b11000110, 0b11000100},
+ {0b00000000, 0b00000000},
+ } {
+ if got := tt.x & (tt.x - 1); got != tt.want {
+ t.Errorf("%#x & (%#x-1) = %#x, want %#x", tt.x, tt.x, got, tt.want)
+ }
+ if got := uint32(tt.x) & (uint32(tt.x) - 1); got != uint32(tt.want) {
+ t.Errorf("%#x & (%#x-1) = %#x, want %#x", tt.x, tt.x, got, tt.want)
+ }
+ }
+}
+
+func TestTrailingZeros(t *testing.T) {
+ for _, tt := range []struct {
+ x uint64
+ want int
+ }{
+ {0b00001111, 0},
+ {0b00001110, 1},
+ {0b00001100, 2},
+ {0b00001000, 3},
+ {0b00000000, 64},
+ } {
+ if got := bits.TrailingZeros64(tt.x); got != tt.want {
+ t.Errorf("TrailingZeros64(%#x) = %d, want %d", tt.x, got, tt.want)
+ }
+ want := tt.want
+ if want == 64 {
+ want = 32
+ }
+ if got := bits.TrailingZeros32(uint32(tt.x)); got != want {
+ t.Errorf("TrailingZeros64(%#x) = %d, want %d", tt.x, got, want)
+ }
+ }
+}
+
+func TestRound(t *testing.T) {
+ for _, tt := range []struct {
+ x, want float64
+ }{
+ {1.4, 1},
+ {1.5, 2},
+ {1.6, 2},
+ {2.4, 2},
+ {2.5, 2},
+ {2.6, 3},
+ } {
+ if got := math.RoundToEven(tt.x); got != tt.want {
+ t.Errorf("RoundToEven(%f) = %f, want %f", tt.x, got, tt.want)
+ }
+ }
+}
+
+func TestFMA(t *testing.T) {
+ for _, tt := range []struct {
+ x, y, z, want float64
+ }{
+ {2, 3, 4, 10},
+ {3, 4, 5, 17},
+ } {
+ if got := math.FMA(tt.x, tt.y, tt.z); got != tt.want {
+ t.Errorf("FMA(%f,%f,%f) = %f, want %f", tt.x, tt.y, tt.z, got, tt.want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/arm/galign.go b/src/cmd/compile/internal/arm/galign.go
new file mode 100644
index 0000000..43d8118
--- /dev/null
+++ b/src/cmd/compile/internal/arm/galign.go
@@ -0,0 +1,25 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/arm"
+ "internal/buildcfg"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &arm.Linkarm
+ arch.REGSP = arm.REGSP
+ arch.MAXWIDTH = (1 << 32) - 1
+ arch.SoftFloat = buildcfg.GOARM.SoftFloat
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/arm/ggen.go b/src/cmd/compile/internal/arm/ggen.go
new file mode 100644
index 0000000..f2c6763
--- /dev/null
+++ b/src/cmd/compile/internal/arm/ggen.go
@@ -0,0 +1,60 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+)
+
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if *r0 == 0 {
+ p = pp.Append(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
+ *r0 = 1
+ }
+
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
+ }
+ } else if cnt <= int64(128*types.PtrSize) {
+ p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
+ p.Reg = arm.REGSP
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
+ } else {
+ p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
+ p.Reg = arm.REGSP
+ p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
+ p.Reg = arm.REG_R1
+ p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
+ p1 := p
+ p.Scond |= arm.C_PBIT
+ p = pp.Append(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
+ p.Reg = arm.REG_R2
+ p = pp.Append(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ p.To.SetTarget(p1)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ p := pp.Prog(arm.AAND)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm.REG_R0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REG_R0
+ p.Scond = arm.C_SCOND_EQ
+ return p
+}
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
new file mode 100644
index 0000000..638ed3e
--- /dev/null
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -0,0 +1,981 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm
+
+import (
+ "fmt"
+ "internal/buildcfg"
+ "math"
+ "math/bits"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+)
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm.AMOVF
+ case 8:
+ return arm.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return arm.AMOVB
+ } else {
+ return arm.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return arm.AMOVH
+ } else {
+ return arm.AMOVHU
+ }
+ case 4:
+ return arm.AMOVW
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm.AMOVF
+ case 8:
+ return arm.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return arm.AMOVB
+ case 2:
+ return arm.AMOVH
+ case 4:
+ return arm.AMOVW
+ }
+ }
+ panic("bad store type")
+}
+
+// shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands.
+type shift int64
+
+// copied from ../../../internal/obj/util.go:/TYPE_SHIFT
+func (v shift) String() string {
+ op := "<<>>->@>"[((v>>5)&3)<<1:]
+ if v&(1<<4) != 0 {
+ // register shift
+ return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
+ } else {
+ // constant shift
+ return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
+ }
+}
+
+// makeshift encodes a register shifted by a constant.
+func makeshift(v *ssa.Value, reg int16, typ int64, s int64) shift {
+ if s < 0 || s >= 32 {
+ v.Fatalf("shift out of range: %d", s)
+ }
+ return shift(int64(reg&0xf) | typ | (s&31)<<7)
+}
+
+// genshift generates a Prog for r = r0 op (r1 shifted by n).
+func genshift(s *ssagen.State, v *ssa.Value, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = int64(makeshift(v, r1, typ, n))
+ p.Reg = r0
+ if r != 0 {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ return p
+}
+
+// makeregshift encodes a register shifted by a register.
+func makeregshift(r1 int16, typ int64, r2 int16) shift {
+ return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4)
+}
+
+// genregshift generates a Prog for r = r0 op (r1 shifted by r2).
+func genregshift(s *ssagen.State, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = int64(makeregshift(r1, typ, r2))
+ p.Reg = r0
+ if r != 0 {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ return p
+}
+
+// find a (lsb, width) pair for BFC
+// lsb must be in [0, 31], width must be in [1, 32 - lsb]
+// return (0xffffffff, 0) if v is not a binary like 0...01...10...0
+func getBFC(v uint32) (uint32, uint32) {
+ var m, l uint32
+ // BFC is not applicable with zero
+ if v == 0 {
+ return 0xffffffff, 0
+ }
+ // find the lowest set bit, for example l=2 for 0x3ffffffc
+ l = uint32(bits.TrailingZeros32(v))
+ // m-1 represents the highest set bit index, for example m=30 for 0x3ffffffc
+ m = 32 - uint32(bits.LeadingZeros32(v))
+ // check if v is a binary like 0...01...10...0
+ if (1<<m)-(1<<l) == v {
+ // it must be m > l for non-zero v
+ return l, m - l
+ }
+ // invalid
+ return 0xffffffff, 0
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpCopy, ssa.OpARMMOVWreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x == y {
+ return
+ }
+ as := arm.AMOVW
+ if v.Type.IsFloat() {
+ switch v.Type.Size() {
+ case 4:
+ as = arm.AMOVF
+ case 8:
+ as = arm.AMOVD
+ default:
+ panic("bad float size")
+ }
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ case ssa.OpARMMOVWnop:
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.OpARMADD,
+ ssa.OpARMADC,
+ ssa.OpARMSUB,
+ ssa.OpARMSBC,
+ ssa.OpARMRSB,
+ ssa.OpARMAND,
+ ssa.OpARMOR,
+ ssa.OpARMXOR,
+ ssa.OpARMBIC,
+ ssa.OpARMMUL,
+ ssa.OpARMADDF,
+ ssa.OpARMADDD,
+ ssa.OpARMSUBF,
+ ssa.OpARMSUBD,
+ ssa.OpARMSLL,
+ ssa.OpARMSRL,
+ ssa.OpARMSRA,
+ ssa.OpARMMULF,
+ ssa.OpARMMULD,
+ ssa.OpARMNMULF,
+ ssa.OpARMNMULD,
+ ssa.OpARMDIVF,
+ ssa.OpARMDIVD:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARMSRR:
+ genregshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR)
+ case ssa.OpARMMULAF, ssa.OpARMMULAD, ssa.OpARMMULSF, ssa.OpARMMULSD, ssa.OpARMFMULAD:
+ r := v.Reg()
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ if r != r0 {
+ v.Fatalf("result and addend are not in the same register: %v", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARMADDS,
+ ssa.OpARMSUBS:
+ r := v.Reg0()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.Scond = arm.C_SBIT
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARMSRAcond:
+ // ARM shift instructions uses only the low-order byte of the shift amount
+ // generate conditional instructions to deal with large shifts
+ // flag is already set
+ // SRA.HS $31, Rarg0, Rdst // shift 31 bits to get the sign bit
+ // SRA.LO Rarg1, Rarg0, Rdst
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(arm.ASRA)
+ p.Scond = arm.C_SCOND_HS
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 31
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p = s.Prog(arm.ASRA)
+ p.Scond = arm.C_SCOND_LO
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARMBFX, ssa.OpARMBFXU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt >> 8
+ p.AddRestSourceConst(v.AuxInt & 0xff)
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMANDconst, ssa.OpARMBICconst:
+ // try to optimize ANDconst and BICconst to BFC, which saves bytes and ticks
+ // BFC is only available on ARMv7, and its result and source are in the same register
+ if buildcfg.GOARM.Version == 7 && v.Reg() == v.Args[0].Reg() {
+ var val uint32
+ if v.Op == ssa.OpARMANDconst {
+ val = ^uint32(v.AuxInt)
+ } else { // BICconst
+ val = uint32(v.AuxInt)
+ }
+ lsb, width := getBFC(val)
+ // omit BFC for ARM's imm12
+ if 8 < width && width < 24 {
+ p := s.Prog(arm.ABFC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(width)
+ p.AddRestSourceConst(int64(lsb))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ break
+ }
+ }
+ // fall back to ordinary form
+ fallthrough
+ case ssa.OpARMADDconst,
+ ssa.OpARMADCconst,
+ ssa.OpARMSUBconst,
+ ssa.OpARMSBCconst,
+ ssa.OpARMRSBconst,
+ ssa.OpARMRSCconst,
+ ssa.OpARMORconst,
+ ssa.OpARMXORconst,
+ ssa.OpARMSLLconst,
+ ssa.OpARMSRLconst,
+ ssa.OpARMSRAconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMADDSconst,
+ ssa.OpARMSUBSconst,
+ ssa.OpARMRSBSconst:
+ p := s.Prog(v.Op.Asm())
+ p.Scond = arm.C_SBIT
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpARMSRRconst:
+ genshift(s, v, arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
+ case ssa.OpARMADDshiftLL,
+ ssa.OpARMADCshiftLL,
+ ssa.OpARMSUBshiftLL,
+ ssa.OpARMSBCshiftLL,
+ ssa.OpARMRSBshiftLL,
+ ssa.OpARMRSCshiftLL,
+ ssa.OpARMANDshiftLL,
+ ssa.OpARMORshiftLL,
+ ssa.OpARMXORshiftLL,
+ ssa.OpARMBICshiftLL:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
+ case ssa.OpARMADDSshiftLL,
+ ssa.OpARMSUBSshiftLL,
+ ssa.OpARMRSBSshiftLL:
+ p := genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRL,
+ ssa.OpARMADCshiftRL,
+ ssa.OpARMSUBshiftRL,
+ ssa.OpARMSBCshiftRL,
+ ssa.OpARMRSBshiftRL,
+ ssa.OpARMRSCshiftRL,
+ ssa.OpARMANDshiftRL,
+ ssa.OpARMORshiftRL,
+ ssa.OpARMXORshiftRL,
+ ssa.OpARMBICshiftRL:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
+ case ssa.OpARMADDSshiftRL,
+ ssa.OpARMSUBSshiftRL,
+ ssa.OpARMRSBSshiftRL:
+ p := genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRA,
+ ssa.OpARMADCshiftRA,
+ ssa.OpARMSUBshiftRA,
+ ssa.OpARMSBCshiftRA,
+ ssa.OpARMRSBshiftRA,
+ ssa.OpARMRSCshiftRA,
+ ssa.OpARMANDshiftRA,
+ ssa.OpARMORshiftRA,
+ ssa.OpARMXORshiftRA,
+ ssa.OpARMBICshiftRA:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
+ case ssa.OpARMADDSshiftRA,
+ ssa.OpARMSUBSshiftRA,
+ ssa.OpARMRSBSshiftRA:
+ p := genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMXORshiftRR:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
+ case ssa.OpARMMVNshiftLL:
+ genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
+ case ssa.OpARMMVNshiftRL:
+ genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
+ case ssa.OpARMMVNshiftRA:
+ genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
+ case ssa.OpARMMVNshiftLLreg:
+ genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL)
+ case ssa.OpARMMVNshiftRLreg:
+ genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR)
+ case ssa.OpARMMVNshiftRAreg:
+ genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR)
+ case ssa.OpARMADDshiftLLreg,
+ ssa.OpARMADCshiftLLreg,
+ ssa.OpARMSUBshiftLLreg,
+ ssa.OpARMSBCshiftLLreg,
+ ssa.OpARMRSBshiftLLreg,
+ ssa.OpARMRSCshiftLLreg,
+ ssa.OpARMANDshiftLLreg,
+ ssa.OpARMORshiftLLreg,
+ ssa.OpARMXORshiftLLreg,
+ ssa.OpARMBICshiftLLreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL)
+ case ssa.OpARMADDSshiftLLreg,
+ ssa.OpARMSUBSshiftLLreg,
+ ssa.OpARMRSBSshiftLLreg:
+ p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRLreg,
+ ssa.OpARMADCshiftRLreg,
+ ssa.OpARMSUBshiftRLreg,
+ ssa.OpARMSBCshiftRLreg,
+ ssa.OpARMRSBshiftRLreg,
+ ssa.OpARMRSCshiftRLreg,
+ ssa.OpARMANDshiftRLreg,
+ ssa.OpARMORshiftRLreg,
+ ssa.OpARMXORshiftRLreg,
+ ssa.OpARMBICshiftRLreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR)
+ case ssa.OpARMADDSshiftRLreg,
+ ssa.OpARMSUBSshiftRLreg,
+ ssa.OpARMRSBSshiftRLreg:
+ p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRAreg,
+ ssa.OpARMADCshiftRAreg,
+ ssa.OpARMSUBshiftRAreg,
+ ssa.OpARMSBCshiftRAreg,
+ ssa.OpARMRSBshiftRAreg,
+ ssa.OpARMRSCshiftRAreg,
+ ssa.OpARMANDshiftRAreg,
+ ssa.OpARMORshiftRAreg,
+ ssa.OpARMXORshiftRAreg,
+ ssa.OpARMBICshiftRAreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR)
+ case ssa.OpARMADDSshiftRAreg,
+ ssa.OpARMSUBSshiftRAreg,
+ ssa.OpARMRSBSshiftRAreg:
+ p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMHMUL,
+ ssa.OpARMHMULU:
+ // 32-bit high multiplication
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REGREG
+ p.To.Reg = v.Reg()
+ p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register
+ case ssa.OpARMMULLU:
+ // 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REGREG
+ p.To.Reg = v.Reg0() // high 32-bit
+ p.To.Offset = int64(v.Reg1()) // low 32-bit
+ case ssa.OpARMMULA, ssa.OpARMMULS:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REGREG2
+ p.To.Reg = v.Reg() // result
+ p.To.Offset = int64(v.Args[2].Reg()) // addend
+ case ssa.OpARMMOVWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMMOVFconst,
+ ssa.OpARMMOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMCMP,
+ ssa.OpARMCMN,
+ ssa.OpARMTST,
+ ssa.OpARMTEQ,
+ ssa.OpARMCMPF,
+ ssa.OpARMCMPD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ // Special layout in ARM assembly
+ // Comparing to x86, the operands of ARM's CMP are reversed.
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpARMCMPconst,
+ ssa.OpARMCMNconst,
+ ssa.OpARMTSTconst,
+ ssa.OpARMTEQconst:
+ // Special layout in ARM assembly
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpARMCMPF0,
+ ssa.OpARMCMPD0:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ case ssa.OpARMCMPshiftLL, ssa.OpARMCMNshiftLL, ssa.OpARMTSTshiftLL, ssa.OpARMTEQshiftLL:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt)
+ case ssa.OpARMCMPshiftRL, ssa.OpARMCMNshiftRL, ssa.OpARMTSTshiftRL, ssa.OpARMTEQshiftRL:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt)
+ case ssa.OpARMCMPshiftRA, ssa.OpARMCMNshiftRA, ssa.OpARMTSTshiftRA, ssa.OpARMTEQshiftRA:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt)
+ case ssa.OpARMCMPshiftLLreg, ssa.OpARMCMNshiftLLreg, ssa.OpARMTSTshiftLLreg, ssa.OpARMTEQshiftLLreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL)
+ case ssa.OpARMCMPshiftRLreg, ssa.OpARMCMNshiftRLreg, ssa.OpARMTSTshiftRLreg, ssa.OpARMTEQshiftRLreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR)
+ case ssa.OpARMCMPshiftRAreg, ssa.OpARMCMNshiftRAreg, ssa.OpARMTSTshiftRAreg, ssa.OpARMTEQshiftRAreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR)
+ case ssa.OpARMMOVWaddr:
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ var wantreg string
+ // MOVW $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R13)
+ // when constant is large, tmp register (R11) may be used
+ // - base is SB: load external address from constant pool (use relocation)
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ ssagen.AddAux(&p.From, v)
+ case *ir.Name:
+ wantreg = "SP"
+ ssagen.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVW $off(SP), R
+ wantreg = "SP"
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+
+ case ssa.OpARMMOVBload,
+ ssa.OpARMMOVBUload,
+ ssa.OpARMMOVHload,
+ ssa.OpARMMOVHUload,
+ ssa.OpARMMOVWload,
+ ssa.OpARMMOVFload,
+ ssa.OpARMMOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMMOVBstore,
+ ssa.OpARMMOVHstore,
+ ssa.OpARMMOVWstore,
+ ssa.OpARMMOVFstore,
+ ssa.OpARMMOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
+ // this is just shift 0 bits
+ fallthrough
+ case ssa.OpARMMOVWloadshiftLL:
+ p := genshift(s, v, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
+ p.From.Reg = v.Args[0].Reg()
+ case ssa.OpARMMOVWloadshiftRL:
+ p := genshift(s, v, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
+ p.From.Reg = v.Args[0].Reg()
+ case ssa.OpARMMOVWloadshiftRA:
+ p := genshift(s, v, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
+ p.From.Reg = v.Args[0].Reg()
+ case ssa.OpARMMOVWstoreidx, ssa.OpARMMOVBstoreidx, ssa.OpARMMOVHstoreidx:
+ // this is just shift 0 bits
+ fallthrough
+ case ssa.OpARMMOVWstoreshiftLL:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_SHIFT
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = int64(makeshift(v, v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt))
+ case ssa.OpARMMOVWstoreshiftRL:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_SHIFT
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = int64(makeshift(v, v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt))
+ case ssa.OpARMMOVWstoreshiftRA:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_SHIFT
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = int64(makeshift(v, v.Args[1].Reg(), arm.SHIFT_AR, v.AuxInt))
+ case ssa.OpARMMOVBreg,
+ ssa.OpARMMOVBUreg,
+ ssa.OpARMMOVHreg,
+ ssa.OpARMMOVHUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if v.Reg() == v.Args[0].Reg() {
+ return
+ }
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ return
+ default:
+ }
+ }
+ if buildcfg.GOARM.Version >= 6 {
+ // generate more efficient "MOVB/MOVBU/MOVH/MOVHU Reg@>0, Reg" on ARMv6 & ARMv7
+ genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, 0)
+ return
+ }
+ fallthrough
+ case ssa.OpARMMVN,
+ ssa.OpARMCLZ,
+ ssa.OpARMREV,
+ ssa.OpARMREV16,
+ ssa.OpARMRBIT,
+ ssa.OpARMSQRTF,
+ ssa.OpARMSQRTD,
+ ssa.OpARMNEGF,
+ ssa.OpARMNEGD,
+ ssa.OpARMABSD,
+ ssa.OpARMMOVWF,
+ ssa.OpARMMOVWD,
+ ssa.OpARMMOVFW,
+ ssa.OpARMMOVDW,
+ ssa.OpARMMOVFD,
+ ssa.OpARMMOVDF:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMMOVWUF,
+ ssa.OpARMMOVWUD,
+ ssa.OpARMMOVFWU,
+ ssa.OpARMMOVDWU:
+ p := s.Prog(v.Op.Asm())
+ p.Scond = arm.C_UBIT
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMCMOVWHSconst:
+ p := s.Prog(arm.AMOVW)
+ p.Scond = arm.C_SCOND_HS
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMCMOVWLSconst:
+ p := s.Prog(arm.AMOVW)
+ p.Scond = arm.C_SCOND_LS
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter:
+ s.Call(v)
+ case ssa.OpARMCALLtail:
+ s.TailCall(v)
+ case ssa.OpARMCALLudiv:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Udiv
+ case ssa.OpARMLoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ // AuxInt encodes how many buffer entries we need.
+ p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
+ case ssa.OpARMLoweredPanicBoundsA, ssa.OpARMLoweredPanicBoundsB, ssa.OpARMLoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(8) // space used in callee args area by assembly stubs
+ case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
+ s.UseArgs(12) // space used in callee args area by assembly stubs
+ case ssa.OpARMDUFFZERO:
+ p := s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = v.AuxInt
+ case ssa.OpARMDUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffcopy
+ p.To.Offset = v.AuxInt
+ case ssa.OpARMLoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ p := s.Prog(arm.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REGTMP
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+ case ssa.OpARMLoweredZero:
+ // MOVW.P Rarg2, 4(R1)
+ // CMP Rarg1, R1
+ // BLE -2(PC)
+ // arg1 is the address of the last element to zero
+ // arg2 is known to be zero
+ // auxint is alignment
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = arm.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = arm.AMOVH
+ default:
+ sz = 1
+ mov = arm.AMOVB
+ }
+ p := s.Prog(mov)
+ p.Scond = arm.C_PBIT
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm.REG_R1
+ p.To.Offset = sz
+ p2 := s.Prog(arm.ACMP)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = v.Args[1].Reg()
+ p2.Reg = arm.REG_R1
+ p3 := s.Prog(arm.ABLE)
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ case ssa.OpARMLoweredMove:
+ // MOVW.P 4(R1), Rtmp
+ // MOVW.P Rtmp, 4(R2)
+ // CMP Rarg2, R1
+ // BLE -3(PC)
+ // arg2 is the address of the last element of src
+ // auxint is alignment
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = arm.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = arm.AMOVH
+ default:
+ sz = 1
+ mov = arm.AMOVB
+ }
+ p := s.Prog(mov)
+ p.Scond = arm.C_PBIT
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = arm.REG_R1
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REGTMP
+ p2 := s.Prog(mov)
+ p2.Scond = arm.C_PBIT
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = arm.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = arm.REG_R2
+ p2.To.Offset = sz
+ p3 := s.Prog(arm.ACMP)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[2].Reg()
+ p3.Reg = arm.REG_R1
+ p4 := s.Prog(arm.ABLE)
+ p4.To.Type = obj.TYPE_BRANCH
+ p4.To.SetTarget(p)
+ case ssa.OpARMEqual,
+ ssa.OpARMNotEqual,
+ ssa.OpARMLessThan,
+ ssa.OpARMLessEqual,
+ ssa.OpARMGreaterThan,
+ ssa.OpARMGreaterEqual,
+ ssa.OpARMLessThanU,
+ ssa.OpARMLessEqualU,
+ ssa.OpARMGreaterThanU,
+ ssa.OpARMGreaterEqualU:
+ // generate boolean values
+ // use conditional move
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p = s.Prog(arm.AMOVW)
+ p.Scond = condBits[v.Op]
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMLoweredGetClosurePtr:
+ // Closure pointer is R7 (arm.REGCTXT).
+ ssagen.CheckLoweredGetClosurePtr(v)
+ case ssa.OpARMLoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMLoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMFlagConstant:
+ v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString())
+ case ssa.OpARMInvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.OpClobber, ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var condBits = map[ssa.Op]uint8{
+ ssa.OpARMEqual: arm.C_SCOND_EQ,
+ ssa.OpARMNotEqual: arm.C_SCOND_NE,
+ ssa.OpARMLessThan: arm.C_SCOND_LT,
+ ssa.OpARMLessThanU: arm.C_SCOND_LO,
+ ssa.OpARMLessEqual: arm.C_SCOND_LE,
+ ssa.OpARMLessEqualU: arm.C_SCOND_LS,
+ ssa.OpARMGreaterThan: arm.C_SCOND_GT,
+ ssa.OpARMGreaterThanU: arm.C_SCOND_HI,
+ ssa.OpARMGreaterEqual: arm.C_SCOND_GE,
+ ssa.OpARMGreaterEqualU: arm.C_SCOND_HS,
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockARMEQ: {arm.ABEQ, arm.ABNE},
+ ssa.BlockARMNE: {arm.ABNE, arm.ABEQ},
+ ssa.BlockARMLT: {arm.ABLT, arm.ABGE},
+ ssa.BlockARMGE: {arm.ABGE, arm.ABLT},
+ ssa.BlockARMLE: {arm.ABLE, arm.ABGT},
+ ssa.BlockARMGT: {arm.ABGT, arm.ABLE},
+ ssa.BlockARMULT: {arm.ABLO, arm.ABHS},
+ ssa.BlockARMUGE: {arm.ABHS, arm.ABLO},
+ ssa.BlockARMUGT: {arm.ABHI, arm.ABLS},
+ ssa.BlockARMULE: {arm.ABLS, arm.ABHI},
+ ssa.BlockARMLTnoov: {arm.ABMI, arm.ABPL},
+ ssa.BlockARMGEnoov: {arm.ABPL, arm.ABMI},
+}
+
+// To model a 'LEnoov' ('<=' without overflow checking) branching.
+var leJumps = [2][2]ssagen.IndexJump{
+ {{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}}, // next == b.Succs[0]
+ {{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}}, // next == b.Succs[1]
+}
+
+// To model a 'GTnoov' ('>' without overflow checking) branching.
+var gtJumps = [2][2]ssagen.IndexJump{
+ {{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}}, // next == b.Succs[0]
+ {{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}}, // next == b.Succs[1]
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockDefer:
+ // defer returns in R0:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(arm.ACMP)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.Reg = arm.REG_R0
+ p = s.Prog(arm.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockExit, ssa.BlockRetJmp:
+
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+
+ case ssa.BlockARMEQ, ssa.BlockARMNE,
+ ssa.BlockARMLT, ssa.BlockARMGE,
+ ssa.BlockARMLE, ssa.BlockARMGT,
+ ssa.BlockARMULT, ssa.BlockARMUGT,
+ ssa.BlockARMULE, ssa.BlockARMUGE,
+ ssa.BlockARMLTnoov, ssa.BlockARMGEnoov:
+ jmp := blockJump[b.Kind]
+ switch next {
+ case b.Succs[0].Block():
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+
+ case ssa.BlockARMLEnoov:
+ s.CombJump(b, next, &leJumps)
+
+ case ssa.BlockARMGTnoov:
+ s.CombJump(b, next, &gtJumps)
+
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/arm64/galign.go b/src/cmd/compile/internal/arm64/galign.go
new file mode 100644
index 0000000..3ebd860
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/galign.go
@@ -0,0 +1,27 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm64
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/arm64"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &arm64.Linkarm64
+ arch.REGSP = arm64.REGSP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.PadFrame = padframe
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+ arch.LoadRegResult = loadRegResult
+ arch.SpillArgReg = spillArgReg
+}
diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go
new file mode 100644
index 0000000..a681adc
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/ggen.go
@@ -0,0 +1,73 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm64
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm64"
+)
+
+func padframe(frame int64) int64 {
+ // arm64 requires that the frame size (not counting saved FP&LR)
+ // be 16 bytes aligned. If not, pad it.
+ if frame%16 != 0 {
+ frame += 16 - (frame % 16)
+ }
+ return frame
+}
+
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
+ }
+ } else if cnt <= int64(128*types.PtrSize) {
+ if cnt%(2*int64(types.PtrSize)) != 0 {
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
+ off += int64(types.PtrSize)
+ cnt -= int64(types.PtrSize)
+ }
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
+ p = pp.Append(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
+ p.Reg = arm64.REG_R20
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = 4 * (64 - cnt/(2*int64(types.PtrSize)))
+ } else {
+ // Not using REGTMP, so this is async preemptible (async preemption clobbers REGTMP).
+ // We are at the function entry, where no register is live, so it is okay to clobber
+ // other registers
+ const rtmp = arm64.REG_R20
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+ p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+ p.Reg = arm64.REGRT1
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
+ p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
+ p.Reg = arm64.REGRT1
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize))
+ p.Scond = arm64.C_XPRE
+ p1 := p
+ p = pp.Append(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
+ p.Reg = arm64.REGRT2
+ p = pp.Append(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ p.To.SetTarget(p1)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ p := pp.Prog(arm64.AHINT)
+ p.From.Type = obj.TYPE_CONST
+ return p
+}
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
new file mode 100644
index 0000000..27b4e88
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -0,0 +1,1371 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm64
+
+import (
+ "math"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm64"
+)
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm64.AFMOVS
+ case 8:
+ return arm64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return arm64.AMOVB
+ } else {
+ return arm64.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return arm64.AMOVH
+ } else {
+ return arm64.AMOVHU
+ }
+ case 4:
+ if t.IsSigned() {
+ return arm64.AMOVW
+ } else {
+ return arm64.AMOVWU
+ }
+ case 8:
+ return arm64.AMOVD
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm64.AFMOVS
+ case 8:
+ return arm64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return arm64.AMOVB
+ case 2:
+ return arm64.AMOVH
+ case 4:
+ return arm64.AMOVW
+ case 8:
+ return arm64.AMOVD
+ }
+ }
+ panic("bad store type")
+}
+
+// makeshift encodes a register shifted by a constant, used as an Offset in Prog.
+func makeshift(v *ssa.Value, reg int16, typ int64, s int64) int64 {
+ if s < 0 || s >= 64 {
+ v.Fatalf("shift out of range: %d", s)
+ }
+ return int64(reg&31)<<16 | typ | (s&63)<<10
+}
+
+// genshift generates a Prog for r = r0 op (r1 shifted by n).
+func genshift(s *ssagen.State, v *ssa.Value, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = makeshift(v, r1, typ, n)
+ p.Reg = r0
+ if r != 0 {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ return p
+}
+
+// generate the memory operand for the indexed load/store instructions.
+// base and idx are registers.
+func genIndexedOperand(op ssa.Op, base, idx int16) obj.Addr {
+ // Reg: base register, Index: (shifted) index register
+ mop := obj.Addr{Type: obj.TYPE_MEM, Reg: base}
+ switch op {
+ case ssa.OpARM64MOVDloadidx8, ssa.OpARM64MOVDstoreidx8, ssa.OpARM64MOVDstorezeroidx8,
+ ssa.OpARM64FMOVDloadidx8, ssa.OpARM64FMOVDstoreidx8:
+ mop.Index = arm64.REG_LSL | 3<<5 | idx&31
+ case ssa.OpARM64MOVWloadidx4, ssa.OpARM64MOVWUloadidx4, ssa.OpARM64MOVWstoreidx4, ssa.OpARM64MOVWstorezeroidx4,
+ ssa.OpARM64FMOVSloadidx4, ssa.OpARM64FMOVSstoreidx4:
+ mop.Index = arm64.REG_LSL | 2<<5 | idx&31
+ case ssa.OpARM64MOVHloadidx2, ssa.OpARM64MOVHUloadidx2, ssa.OpARM64MOVHstoreidx2, ssa.OpARM64MOVHstorezeroidx2:
+ mop.Index = arm64.REG_LSL | 1<<5 | idx&31
+ default: // not shifted
+ mop.Index = idx
+ }
+ return mop
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpCopy, ssa.OpARM64MOVDreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x == y {
+ return
+ }
+ as := arm64.AMOVD
+ if v.Type.IsFloat() {
+ switch v.Type.Size() {
+ case 4:
+ as = arm64.AFMOVS
+ case 8:
+ as = arm64.AFMOVD
+ default:
+ panic("bad float size")
+ }
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ case ssa.OpARM64MOVDnop:
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.OpArgIntReg, ssa.OpArgFloatReg:
+ // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
+ // The loop only runs once.
+ for _, a := range v.Block.Func.RegArgs {
+ // Pass the spill/unspill information along to the assembler, offset by size of
+ // the saved LR slot.
+ addr := ssagen.SpillSlotAddr(a, arm64.REGSP, base.Ctxt.Arch.FixedFrameSize)
+ s.FuncInfo().AddSpill(
+ obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
+ }
+ v.Block.Func.RegArgs = nil
+ ssagen.CheckArgReg(v)
+ case ssa.OpARM64ADD,
+ ssa.OpARM64SUB,
+ ssa.OpARM64AND,
+ ssa.OpARM64OR,
+ ssa.OpARM64XOR,
+ ssa.OpARM64BIC,
+ ssa.OpARM64EON,
+ ssa.OpARM64ORN,
+ ssa.OpARM64MUL,
+ ssa.OpARM64MULW,
+ ssa.OpARM64MNEG,
+ ssa.OpARM64MNEGW,
+ ssa.OpARM64MULH,
+ ssa.OpARM64UMULH,
+ ssa.OpARM64MULL,
+ ssa.OpARM64UMULL,
+ ssa.OpARM64DIV,
+ ssa.OpARM64UDIV,
+ ssa.OpARM64DIVW,
+ ssa.OpARM64UDIVW,
+ ssa.OpARM64MOD,
+ ssa.OpARM64UMOD,
+ ssa.OpARM64MODW,
+ ssa.OpARM64UMODW,
+ ssa.OpARM64SLL,
+ ssa.OpARM64SRL,
+ ssa.OpARM64SRA,
+ ssa.OpARM64FADDS,
+ ssa.OpARM64FADDD,
+ ssa.OpARM64FSUBS,
+ ssa.OpARM64FSUBD,
+ ssa.OpARM64FMULS,
+ ssa.OpARM64FMULD,
+ ssa.OpARM64FNMULS,
+ ssa.OpARM64FNMULD,
+ ssa.OpARM64FDIVS,
+ ssa.OpARM64FDIVD,
+ ssa.OpARM64FMINS,
+ ssa.OpARM64FMIND,
+ ssa.OpARM64FMAXS,
+ ssa.OpARM64FMAXD,
+ ssa.OpARM64ROR,
+ ssa.OpARM64RORW:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARM64FMADDS,
+ ssa.OpARM64FMADDD,
+ ssa.OpARM64FNMADDS,
+ ssa.OpARM64FNMADDD,
+ ssa.OpARM64FMSUBS,
+ ssa.OpARM64FMSUBD,
+ ssa.OpARM64FNMSUBS,
+ ssa.OpARM64FNMSUBD,
+ ssa.OpARM64MADD,
+ ssa.OpARM64MADDW,
+ ssa.OpARM64MSUB,
+ ssa.OpARM64MSUBW:
+ rt := v.Reg()
+ ra := v.Args[0].Reg()
+ rm := v.Args[1].Reg()
+ rn := v.Args[2].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.Reg = ra
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = rm
+ p.AddRestSourceReg(rn)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = rt
+ case ssa.OpARM64ADDconst,
+ ssa.OpARM64SUBconst,
+ ssa.OpARM64ANDconst,
+ ssa.OpARM64ORconst,
+ ssa.OpARM64XORconst,
+ ssa.OpARM64SLLconst,
+ ssa.OpARM64SRLconst,
+ ssa.OpARM64SRAconst,
+ ssa.OpARM64RORconst,
+ ssa.OpARM64RORWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64ADDSconstflags:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpARM64ADCzerocarry:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGZERO
+ p.Reg = arm64.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64ADCSflags,
+ ssa.OpARM64ADDSflags,
+ ssa.OpARM64SBCSflags,
+ ssa.OpARM64SUBSflags:
+ r := v.Reg0()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARM64NEGSflags:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpARM64NGCzerocarry:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64EXTRconst,
+ ssa.OpARM64EXTRWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.AddRestSourceReg(v.Args[0].Reg())
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64MVNshiftLL, ssa.OpARM64NEGshiftLL:
+ genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt)
+ case ssa.OpARM64MVNshiftRL, ssa.OpARM64NEGshiftRL:
+ genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt)
+ case ssa.OpARM64MVNshiftRA, ssa.OpARM64NEGshiftRA:
+ genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
+ case ssa.OpARM64MVNshiftRO:
+ genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_ROR, v.AuxInt)
+ case ssa.OpARM64ADDshiftLL,
+ ssa.OpARM64SUBshiftLL,
+ ssa.OpARM64ANDshiftLL,
+ ssa.OpARM64ORshiftLL,
+ ssa.OpARM64XORshiftLL,
+ ssa.OpARM64EONshiftLL,
+ ssa.OpARM64ORNshiftLL,
+ ssa.OpARM64BICshiftLL:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt)
+ case ssa.OpARM64ADDshiftRL,
+ ssa.OpARM64SUBshiftRL,
+ ssa.OpARM64ANDshiftRL,
+ ssa.OpARM64ORshiftRL,
+ ssa.OpARM64XORshiftRL,
+ ssa.OpARM64EONshiftRL,
+ ssa.OpARM64ORNshiftRL,
+ ssa.OpARM64BICshiftRL:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt)
+ case ssa.OpARM64ADDshiftRA,
+ ssa.OpARM64SUBshiftRA,
+ ssa.OpARM64ANDshiftRA,
+ ssa.OpARM64ORshiftRA,
+ ssa.OpARM64XORshiftRA,
+ ssa.OpARM64EONshiftRA,
+ ssa.OpARM64ORNshiftRA,
+ ssa.OpARM64BICshiftRA:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
+ case ssa.OpARM64ANDshiftRO,
+ ssa.OpARM64ORshiftRO,
+ ssa.OpARM64XORshiftRO,
+ ssa.OpARM64EONshiftRO,
+ ssa.OpARM64ORNshiftRO,
+ ssa.OpARM64BICshiftRO:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_ROR, v.AuxInt)
+ case ssa.OpARM64MOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64FMOVSconst,
+ ssa.OpARM64FMOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64FCMPS0,
+ ssa.OpARM64FCMPD0:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(0)
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpARM64CMP,
+ ssa.OpARM64CMPW,
+ ssa.OpARM64CMN,
+ ssa.OpARM64CMNW,
+ ssa.OpARM64TST,
+ ssa.OpARM64TSTW,
+ ssa.OpARM64FCMPS,
+ ssa.OpARM64FCMPD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpARM64CMPconst,
+ ssa.OpARM64CMPWconst,
+ ssa.OpARM64CMNconst,
+ ssa.OpARM64CMNWconst,
+ ssa.OpARM64TSTconst,
+ ssa.OpARM64TSTWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpARM64CMPshiftLL, ssa.OpARM64CMNshiftLL, ssa.OpARM64TSTshiftLL:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LL, v.AuxInt)
+ case ssa.OpARM64CMPshiftRL, ssa.OpARM64CMNshiftRL, ssa.OpARM64TSTshiftRL:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LR, v.AuxInt)
+ case ssa.OpARM64CMPshiftRA, ssa.OpARM64CMNshiftRA, ssa.OpARM64TSTshiftRA:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_AR, v.AuxInt)
+ case ssa.OpARM64TSTshiftRO:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_ROR, v.AuxInt)
+ case ssa.OpARM64MOVDaddr:
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ var wantreg string
+ // MOVD $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R13)
+ // when constant is large, tmp register (R11) may be used
+ // - base is SB: load external address from constant pool (use relocation)
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ ssagen.AddAux(&p.From, v)
+ case *ir.Name:
+ wantreg = "SP"
+ ssagen.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVD $off(SP), R
+ wantreg = "SP"
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+ case ssa.OpARM64MOVBload,
+ ssa.OpARM64MOVBUload,
+ ssa.OpARM64MOVHload,
+ ssa.OpARM64MOVHUload,
+ ssa.OpARM64MOVWload,
+ ssa.OpARM64MOVWUload,
+ ssa.OpARM64MOVDload,
+ ssa.OpARM64FMOVSload,
+ ssa.OpARM64FMOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64LDP:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REGREG
+ p.To.Reg = v.Reg0()
+ p.To.Offset = int64(v.Reg1())
+ case ssa.OpARM64MOVBloadidx,
+ ssa.OpARM64MOVBUloadidx,
+ ssa.OpARM64MOVHloadidx,
+ ssa.OpARM64MOVHUloadidx,
+ ssa.OpARM64MOVWloadidx,
+ ssa.OpARM64MOVWUloadidx,
+ ssa.OpARM64MOVDloadidx,
+ ssa.OpARM64FMOVSloadidx,
+ ssa.OpARM64FMOVDloadidx,
+ ssa.OpARM64MOVHloadidx2,
+ ssa.OpARM64MOVHUloadidx2,
+ ssa.OpARM64MOVWloadidx4,
+ ssa.OpARM64MOVWUloadidx4,
+ ssa.OpARM64MOVDloadidx8,
+ ssa.OpARM64FMOVDloadidx8,
+ ssa.OpARM64FMOVSloadidx4:
+ p := s.Prog(v.Op.Asm())
+ p.From = genIndexedOperand(v.Op, v.Args[0].Reg(), v.Args[1].Reg())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64LDAR,
+ ssa.OpARM64LDARB,
+ ssa.OpARM64LDARW:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpARM64MOVBstore,
+ ssa.OpARM64MOVHstore,
+ ssa.OpARM64MOVWstore,
+ ssa.OpARM64MOVDstore,
+ ssa.OpARM64FMOVSstore,
+ ssa.OpARM64FMOVDstore,
+ ssa.OpARM64STLRB,
+ ssa.OpARM64STLR,
+ ssa.OpARM64STLRW:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpARM64MOVBstoreidx,
+ ssa.OpARM64MOVHstoreidx,
+ ssa.OpARM64MOVWstoreidx,
+ ssa.OpARM64MOVDstoreidx,
+ ssa.OpARM64FMOVSstoreidx,
+ ssa.OpARM64FMOVDstoreidx,
+ ssa.OpARM64MOVHstoreidx2,
+ ssa.OpARM64MOVWstoreidx4,
+ ssa.OpARM64FMOVSstoreidx4,
+ ssa.OpARM64MOVDstoreidx8,
+ ssa.OpARM64FMOVDstoreidx8:
+ p := s.Prog(v.Op.Asm())
+ p.To = genIndexedOperand(v.Op, v.Args[0].Reg(), v.Args[1].Reg())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ case ssa.OpARM64STP:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REGREG
+ p.From.Reg = v.Args[1].Reg()
+ p.From.Offset = int64(v.Args[2].Reg())
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpARM64MOVBstorezero,
+ ssa.OpARM64MOVHstorezero,
+ ssa.OpARM64MOVWstorezero,
+ ssa.OpARM64MOVDstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpARM64MOVBstorezeroidx,
+ ssa.OpARM64MOVHstorezeroidx,
+ ssa.OpARM64MOVWstorezeroidx,
+ ssa.OpARM64MOVDstorezeroidx,
+ ssa.OpARM64MOVHstorezeroidx2,
+ ssa.OpARM64MOVWstorezeroidx4,
+ ssa.OpARM64MOVDstorezeroidx8:
+ p := s.Prog(v.Op.Asm())
+ p.To = genIndexedOperand(v.Op, v.Args[0].Reg(), v.Args[1].Reg())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGZERO
+ case ssa.OpARM64MOVQstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REGREG
+ p.From.Reg = arm64.REGZERO
+ p.From.Offset = int64(arm64.REGZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpARM64BFI,
+ ssa.OpARM64BFXIL:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt >> 8
+ p.AddRestSourceConst(v.AuxInt & 0xff)
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64SBFIZ,
+ ssa.OpARM64SBFX,
+ ssa.OpARM64UBFIZ,
+ ssa.OpARM64UBFX:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt >> 8
+ p.AddRestSourceConst(v.AuxInt & 0xff)
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64LoweredAtomicExchange64,
+ ssa.OpARM64LoweredAtomicExchange32:
+ // LDAXR (Rarg0), Rout
+ // STLXR Rarg1, (Rarg0), Rtmp
+ // CBNZ Rtmp, -2(PC)
+ ld := arm64.ALDAXR
+ st := arm64.ASTLXR
+ if v.Op == ssa.OpARM64LoweredAtomicExchange32 {
+ ld = arm64.ALDAXRW
+ st = arm64.ASTLXRW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ p1 := s.Prog(st)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = r0
+ p1.RegTo2 = arm64.REGTMP
+ p2 := s.Prog(arm64.ACBNZ)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = arm64.REGTMP
+ p2.To.Type = obj.TYPE_BRANCH
+ p2.To.SetTarget(p)
+ case ssa.OpARM64LoweredAtomicExchange64Variant,
+ ssa.OpARM64LoweredAtomicExchange32Variant:
+ swap := arm64.ASWPALD
+ if v.Op == ssa.OpARM64LoweredAtomicExchange32Variant {
+ swap = arm64.ASWPALW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+
+ // SWPALD Rarg1, (Rarg0), Rout
+ p := s.Prog(swap)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r0
+ p.RegTo2 = out
+
+ case ssa.OpARM64LoweredAtomicAdd64,
+ ssa.OpARM64LoweredAtomicAdd32:
+ // LDAXR (Rarg0), Rout
+ // ADD Rarg1, Rout
+ // STLXR Rout, (Rarg0), Rtmp
+ // CBNZ Rtmp, -3(PC)
+ ld := arm64.ALDAXR
+ st := arm64.ASTLXR
+ if v.Op == ssa.OpARM64LoweredAtomicAdd32 {
+ ld = arm64.ALDAXRW
+ st = arm64.ASTLXRW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ p1 := s.Prog(arm64.AADD)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = out
+ p2 := s.Prog(st)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = out
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = r0
+ p2.RegTo2 = arm64.REGTMP
+ p3 := s.Prog(arm64.ACBNZ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = arm64.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ case ssa.OpARM64LoweredAtomicAdd64Variant,
+ ssa.OpARM64LoweredAtomicAdd32Variant:
+ // LDADDAL Rarg1, (Rarg0), Rout
+ // ADD Rarg1, Rout
+ op := arm64.ALDADDALD
+ if v.Op == ssa.OpARM64LoweredAtomicAdd32Variant {
+ op = arm64.ALDADDALW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r0
+ p.RegTo2 = out
+ p1 := s.Prog(arm64.AADD)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = out
+ case ssa.OpARM64LoweredAtomicCas64,
+ ssa.OpARM64LoweredAtomicCas32:
+ // LDAXR (Rarg0), Rtmp
+ // CMP Rarg1, Rtmp
+ // BNE 3(PC)
+ // STLXR Rarg2, (Rarg0), Rtmp
+ // CBNZ Rtmp, -4(PC)
+ // CSET EQ, Rout
+ ld := arm64.ALDAXR
+ st := arm64.ASTLXR
+ cmp := arm64.ACMP
+ if v.Op == ssa.OpARM64LoweredAtomicCas32 {
+ ld = arm64.ALDAXRW
+ st = arm64.ASTLXRW
+ cmp = arm64.ACMPW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ out := v.Reg0()
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ p1 := s.Prog(cmp)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.Reg = arm64.REGTMP
+ p2 := s.Prog(arm64.ABNE)
+ p2.To.Type = obj.TYPE_BRANCH
+ p3 := s.Prog(st)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = r2
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = r0
+ p3.RegTo2 = arm64.REGTMP
+ p4 := s.Prog(arm64.ACBNZ)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = arm64.REGTMP
+ p4.To.Type = obj.TYPE_BRANCH
+ p4.To.SetTarget(p)
+ p5 := s.Prog(arm64.ACSET)
+ p5.From.Type = obj.TYPE_SPECIAL // assembler encodes conditional bits in Offset
+ p5.From.Offset = int64(arm64.SPOP_EQ)
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = out
+ p2.To.SetTarget(p5)
+ case ssa.OpARM64LoweredAtomicCas64Variant,
+ ssa.OpARM64LoweredAtomicCas32Variant:
+ // Rarg0: ptr
+ // Rarg1: old
+ // Rarg2: new
+ // MOV Rarg1, Rtmp
+ // CASAL Rtmp, (Rarg0), Rarg2
+ // CMP Rarg1, Rtmp
+ // CSET EQ, Rout
+ cas := arm64.ACASALD
+ cmp := arm64.ACMP
+ mov := arm64.AMOVD
+ if v.Op == ssa.OpARM64LoweredAtomicCas32Variant {
+ cas = arm64.ACASALW
+ cmp = arm64.ACMPW
+ mov = arm64.AMOVW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ out := v.Reg0()
+
+ // MOV Rarg1, Rtmp
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+
+ // CASAL Rtmp, (Rarg0), Rarg2
+ p1 := s.Prog(cas)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = arm64.REGTMP
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = r0
+ p1.RegTo2 = r2
+
+ // CMP Rarg1, Rtmp
+ p2 := s.Prog(cmp)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = r1
+ p2.Reg = arm64.REGTMP
+
+ // CSET EQ, Rout
+ p3 := s.Prog(arm64.ACSET)
+ p3.From.Type = obj.TYPE_SPECIAL // assembler encodes conditional bits in Offset
+ p3.From.Offset = int64(arm64.SPOP_EQ)
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = out
+
+ case ssa.OpARM64LoweredAtomicAnd8,
+ ssa.OpARM64LoweredAtomicAnd32,
+ ssa.OpARM64LoweredAtomicOr8,
+ ssa.OpARM64LoweredAtomicOr32:
+ // LDAXRB/LDAXRW (Rarg0), Rout
+ // AND/OR Rarg1, Rout
+ // STLXRB/STLXRB Rout, (Rarg0), Rtmp
+ // CBNZ Rtmp, -3(PC)
+ ld := arm64.ALDAXRB
+ st := arm64.ASTLXRB
+ if v.Op == ssa.OpARM64LoweredAtomicAnd32 || v.Op == ssa.OpARM64LoweredAtomicOr32 {
+ ld = arm64.ALDAXRW
+ st = arm64.ASTLXRW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ p1 := s.Prog(v.Op.Asm())
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = out
+ p2 := s.Prog(st)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = out
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = r0
+ p2.RegTo2 = arm64.REGTMP
+ p3 := s.Prog(arm64.ACBNZ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = arm64.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ case ssa.OpARM64LoweredAtomicAnd8Variant,
+ ssa.OpARM64LoweredAtomicAnd32Variant:
+ atomic_clear := arm64.ALDCLRALW
+ if v.Op == ssa.OpARM64LoweredAtomicAnd8Variant {
+ atomic_clear = arm64.ALDCLRALB
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+
+ // MNV Rarg1 Rtemp
+ p := s.Prog(arm64.AMVN)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+
+ // LDCLRALW Rtemp, (Rarg0), Rout
+ p1 := s.Prog(atomic_clear)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = arm64.REGTMP
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = r0
+ p1.RegTo2 = out
+
+ // AND Rarg1, Rout
+ p2 := s.Prog(arm64.AAND)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = r1
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = out
+
+ case ssa.OpARM64LoweredAtomicOr8Variant,
+ ssa.OpARM64LoweredAtomicOr32Variant:
+ atomic_or := arm64.ALDORALW
+ if v.Op == ssa.OpARM64LoweredAtomicOr8Variant {
+ atomic_or = arm64.ALDORALB
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+
+ // LDORALW Rarg1, (Rarg0), Rout
+ p := s.Prog(atomic_or)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r0
+ p.RegTo2 = out
+
+ // ORR Rarg1, Rout
+ p2 := s.Prog(arm64.AORR)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = r1
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = out
+
+ case ssa.OpARM64MOVBreg,
+ ssa.OpARM64MOVBUreg,
+ ssa.OpARM64MOVHreg,
+ ssa.OpARM64MOVHUreg,
+ ssa.OpARM64MOVWreg,
+ ssa.OpARM64MOVWUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpARM64MOVDreg {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpARM64MOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpARM64MOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
+ v.Op == ssa.OpARM64MOVWreg && t.Size() == 4 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if v.Reg() == v.Args[0].Reg() {
+ return
+ }
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ return
+ default:
+ }
+ }
+ fallthrough
+ case ssa.OpARM64MVN,
+ ssa.OpARM64NEG,
+ ssa.OpARM64FABSD,
+ ssa.OpARM64FMOVDfpgp,
+ ssa.OpARM64FMOVDgpfp,
+ ssa.OpARM64FMOVSfpgp,
+ ssa.OpARM64FMOVSgpfp,
+ ssa.OpARM64FNEGS,
+ ssa.OpARM64FNEGD,
+ ssa.OpARM64FSQRTS,
+ ssa.OpARM64FSQRTD,
+ ssa.OpARM64FCVTZSSW,
+ ssa.OpARM64FCVTZSDW,
+ ssa.OpARM64FCVTZUSW,
+ ssa.OpARM64FCVTZUDW,
+ ssa.OpARM64FCVTZSS,
+ ssa.OpARM64FCVTZSD,
+ ssa.OpARM64FCVTZUS,
+ ssa.OpARM64FCVTZUD,
+ ssa.OpARM64SCVTFWS,
+ ssa.OpARM64SCVTFWD,
+ ssa.OpARM64SCVTFS,
+ ssa.OpARM64SCVTFD,
+ ssa.OpARM64UCVTFWS,
+ ssa.OpARM64UCVTFWD,
+ ssa.OpARM64UCVTFS,
+ ssa.OpARM64UCVTFD,
+ ssa.OpARM64FCVTSD,
+ ssa.OpARM64FCVTDS,
+ ssa.OpARM64REV,
+ ssa.OpARM64REVW,
+ ssa.OpARM64REV16,
+ ssa.OpARM64REV16W,
+ ssa.OpARM64RBIT,
+ ssa.OpARM64RBITW,
+ ssa.OpARM64CLZ,
+ ssa.OpARM64CLZW,
+ ssa.OpARM64FRINTAD,
+ ssa.OpARM64FRINTMD,
+ ssa.OpARM64FRINTND,
+ ssa.OpARM64FRINTPD,
+ ssa.OpARM64FRINTZD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64LoweredRound32F, ssa.OpARM64LoweredRound64F:
+ // input is already rounded
+ case ssa.OpARM64VCNT:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = (v.Args[0].Reg()-arm64.REG_F0)&31 + arm64.REG_ARNG + ((arm64.ARNG_8B & 15) << 5)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = (v.Reg()-arm64.REG_F0)&31 + arm64.REG_ARNG + ((arm64.ARNG_8B & 15) << 5)
+ case ssa.OpARM64VUADDLV:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = (v.Args[0].Reg()-arm64.REG_F0)&31 + arm64.REG_ARNG + ((arm64.ARNG_8B & 15) << 5)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg() - arm64.REG_F0 + arm64.REG_V0
+ case ssa.OpARM64CSEL, ssa.OpARM64CSEL0:
+ r1 := int16(arm64.REGZERO)
+ if v.Op != ssa.OpARM64CSEL0 {
+ r1 = v.Args[1].Reg()
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_SPECIAL // assembler encodes conditional bits in Offset
+ condCode := condBits[ssa.Op(v.AuxInt)]
+ p.From.Offset = int64(condCode)
+ p.Reg = v.Args[0].Reg()
+ p.AddRestSourceReg(r1)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64CSINC, ssa.OpARM64CSINV, ssa.OpARM64CSNEG:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_SPECIAL // assembler encodes conditional bits in Offset
+ condCode := condBits[ssa.Op(v.AuxInt)]
+ p.From.Offset = int64(condCode)
+ p.Reg = v.Args[0].Reg()
+ p.AddRestSourceReg(v.Args[1].Reg())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64CSETM:
+ p := s.Prog(arm64.ACSETM)
+ p.From.Type = obj.TYPE_SPECIAL // assembler encodes conditional bits in Offset
+ condCode := condBits[ssa.Op(v.AuxInt)]
+ p.From.Offset = int64(condCode)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64DUFFZERO:
+ // runtime.duffzero expects start address in R20
+ p := s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = v.AuxInt
+ case ssa.OpARM64LoweredZero:
+ // STP.P (ZR,ZR), 16(R16)
+ // CMP Rarg1, R16
+ // BLE -2(PC)
+ // arg1 is the address of the last 16-byte unit to zero
+ p := s.Prog(arm64.ASTP)
+ p.Scond = arm64.C_XPOST
+ p.From.Type = obj.TYPE_REGREG
+ p.From.Reg = arm64.REGZERO
+ p.From.Offset = int64(arm64.REGZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm64.REG_R16
+ p.To.Offset = 16
+ p2 := s.Prog(arm64.ACMP)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = v.Args[1].Reg()
+ p2.Reg = arm64.REG_R16
+ p3 := s.Prog(arm64.ABLE)
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ case ssa.OpARM64DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffcopy
+ p.To.Offset = v.AuxInt
+ case ssa.OpARM64LoweredMove:
+ // LDP.P 16(R16), (R25, Rtmp)
+ // STP.P (R25, Rtmp), 16(R17)
+ // CMP Rarg2, R16
+ // BLE -3(PC)
+ // arg2 is the address of the last element of src
+ p := s.Prog(arm64.ALDP)
+ p.Scond = arm64.C_XPOST
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = arm64.REG_R16
+ p.From.Offset = 16
+ p.To.Type = obj.TYPE_REGREG
+ p.To.Reg = arm64.REG_R25
+ p.To.Offset = int64(arm64.REGTMP)
+ p2 := s.Prog(arm64.ASTP)
+ p2.Scond = arm64.C_XPOST
+ p2.From.Type = obj.TYPE_REGREG
+ p2.From.Reg = arm64.REG_R25
+ p2.From.Offset = int64(arm64.REGTMP)
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = arm64.REG_R17
+ p2.To.Offset = 16
+ p3 := s.Prog(arm64.ACMP)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[2].Reg()
+ p3.Reg = arm64.REG_R16
+ p4 := s.Prog(arm64.ABLE)
+ p4.To.Type = obj.TYPE_BRANCH
+ p4.To.SetTarget(p)
+ case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
+ s.Call(v)
+ case ssa.OpARM64CALLtail:
+ s.TailCall(v)
+ case ssa.OpARM64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ // AuxInt encodes how many buffer entries we need.
+ p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
+
+ case ssa.OpARM64LoweredPanicBoundsA, ssa.OpARM64LoweredPanicBoundsB, ssa.OpARM64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+ case ssa.OpARM64LoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ p := s.Prog(arm64.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+ case ssa.OpARM64Equal,
+ ssa.OpARM64NotEqual,
+ ssa.OpARM64LessThan,
+ ssa.OpARM64LessEqual,
+ ssa.OpARM64GreaterThan,
+ ssa.OpARM64GreaterEqual,
+ ssa.OpARM64LessThanU,
+ ssa.OpARM64LessEqualU,
+ ssa.OpARM64GreaterThanU,
+ ssa.OpARM64GreaterEqualU,
+ ssa.OpARM64LessThanF,
+ ssa.OpARM64LessEqualF,
+ ssa.OpARM64GreaterThanF,
+ ssa.OpARM64GreaterEqualF,
+ ssa.OpARM64NotLessThanF,
+ ssa.OpARM64NotLessEqualF,
+ ssa.OpARM64NotGreaterThanF,
+ ssa.OpARM64NotGreaterEqualF,
+ ssa.OpARM64LessThanNoov,
+ ssa.OpARM64GreaterEqualNoov:
+ // generate boolean values using CSET
+ p := s.Prog(arm64.ACSET)
+ p.From.Type = obj.TYPE_SPECIAL // assembler encodes conditional bits in Offset
+ condCode := condBits[v.Op]
+ p.From.Offset = int64(condCode)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64PRFM:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+ case ssa.OpARM64LoweredGetClosurePtr:
+ // Closure pointer is R26 (arm64.REGCTXT).
+ ssagen.CheckLoweredGetClosurePtr(v)
+ case ssa.OpARM64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64LoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64DMB:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ case ssa.OpARM64FlagConstant:
+ v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString())
+ case ssa.OpARM64InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.OpClobber:
+ // MOVW $0xdeaddead, REGTMP
+ // MOVW REGTMP, (slot)
+ // MOVW REGTMP, 4(slot)
+ p := s.Prog(arm64.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0xdeaddead
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ p = s.Prog(arm64.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm64.REGSP
+ ssagen.AddAux(&p.To, v)
+ p = s.Prog(arm64.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm64.REGSP
+ ssagen.AddAux2(&p.To, v, v.AuxInt+4)
+ case ssa.OpClobberReg:
+ x := uint64(0xdeaddeaddeaddead)
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(x)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var condBits = map[ssa.Op]arm64.SpecialOperand{
+ ssa.OpARM64Equal: arm64.SPOP_EQ,
+ ssa.OpARM64NotEqual: arm64.SPOP_NE,
+ ssa.OpARM64LessThan: arm64.SPOP_LT,
+ ssa.OpARM64LessThanU: arm64.SPOP_LO,
+ ssa.OpARM64LessEqual: arm64.SPOP_LE,
+ ssa.OpARM64LessEqualU: arm64.SPOP_LS,
+ ssa.OpARM64GreaterThan: arm64.SPOP_GT,
+ ssa.OpARM64GreaterThanU: arm64.SPOP_HI,
+ ssa.OpARM64GreaterEqual: arm64.SPOP_GE,
+ ssa.OpARM64GreaterEqualU: arm64.SPOP_HS,
+ ssa.OpARM64LessThanF: arm64.SPOP_MI, // Less than
+ ssa.OpARM64LessEqualF: arm64.SPOP_LS, // Less than or equal to
+ ssa.OpARM64GreaterThanF: arm64.SPOP_GT, // Greater than
+ ssa.OpARM64GreaterEqualF: arm64.SPOP_GE, // Greater than or equal to
+
+ // The following condition codes have unordered to handle comparisons related to NaN.
+ ssa.OpARM64NotLessThanF: arm64.SPOP_PL, // Greater than, equal to, or unordered
+ ssa.OpARM64NotLessEqualF: arm64.SPOP_HI, // Greater than or unordered
+ ssa.OpARM64NotGreaterThanF: arm64.SPOP_LE, // Less than, equal to or unordered
+ ssa.OpARM64NotGreaterEqualF: arm64.SPOP_LT, // Less than or unordered
+
+ ssa.OpARM64LessThanNoov: arm64.SPOP_MI, // Less than but without honoring overflow
+ ssa.OpARM64GreaterEqualNoov: arm64.SPOP_PL, // Greater than or equal to but without honoring overflow
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockARM64EQ: {arm64.ABEQ, arm64.ABNE},
+ ssa.BlockARM64NE: {arm64.ABNE, arm64.ABEQ},
+ ssa.BlockARM64LT: {arm64.ABLT, arm64.ABGE},
+ ssa.BlockARM64GE: {arm64.ABGE, arm64.ABLT},
+ ssa.BlockARM64LE: {arm64.ABLE, arm64.ABGT},
+ ssa.BlockARM64GT: {arm64.ABGT, arm64.ABLE},
+ ssa.BlockARM64ULT: {arm64.ABLO, arm64.ABHS},
+ ssa.BlockARM64UGE: {arm64.ABHS, arm64.ABLO},
+ ssa.BlockARM64UGT: {arm64.ABHI, arm64.ABLS},
+ ssa.BlockARM64ULE: {arm64.ABLS, arm64.ABHI},
+ ssa.BlockARM64Z: {arm64.ACBZ, arm64.ACBNZ},
+ ssa.BlockARM64NZ: {arm64.ACBNZ, arm64.ACBZ},
+ ssa.BlockARM64ZW: {arm64.ACBZW, arm64.ACBNZW},
+ ssa.BlockARM64NZW: {arm64.ACBNZW, arm64.ACBZW},
+ ssa.BlockARM64TBZ: {arm64.ATBZ, arm64.ATBNZ},
+ ssa.BlockARM64TBNZ: {arm64.ATBNZ, arm64.ATBZ},
+ ssa.BlockARM64FLT: {arm64.ABMI, arm64.ABPL},
+ ssa.BlockARM64FGE: {arm64.ABGE, arm64.ABLT},
+ ssa.BlockARM64FLE: {arm64.ABLS, arm64.ABHI},
+ ssa.BlockARM64FGT: {arm64.ABGT, arm64.ABLE},
+ ssa.BlockARM64LTnoov: {arm64.ABMI, arm64.ABPL},
+ ssa.BlockARM64GEnoov: {arm64.ABPL, arm64.ABMI},
+}
+
+// To model a 'LEnoov' ('<=' without overflow checking) branching.
+var leJumps = [2][2]ssagen.IndexJump{
+ {{Jump: arm64.ABEQ, Index: 0}, {Jump: arm64.ABPL, Index: 1}}, // next == b.Succs[0]
+ {{Jump: arm64.ABMI, Index: 0}, {Jump: arm64.ABEQ, Index: 0}}, // next == b.Succs[1]
+}
+
+// To model a 'GTnoov' ('>' without overflow checking) branching.
+var gtJumps = [2][2]ssagen.IndexJump{
+ {{Jump: arm64.ABMI, Index: 1}, {Jump: arm64.ABEQ, Index: 1}}, // next == b.Succs[0]
+ {{Jump: arm64.ABEQ, Index: 1}, {Jump: arm64.ABPL, Index: 0}}, // next == b.Succs[1]
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockDefer:
+ // defer returns in R0:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(arm64.ACMP)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.Reg = arm64.REG_R0
+ p = s.Prog(arm64.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockExit, ssa.BlockRetJmp:
+
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+
+ case ssa.BlockARM64EQ, ssa.BlockARM64NE,
+ ssa.BlockARM64LT, ssa.BlockARM64GE,
+ ssa.BlockARM64LE, ssa.BlockARM64GT,
+ ssa.BlockARM64ULT, ssa.BlockARM64UGT,
+ ssa.BlockARM64ULE, ssa.BlockARM64UGE,
+ ssa.BlockARM64Z, ssa.BlockARM64NZ,
+ ssa.BlockARM64ZW, ssa.BlockARM64NZW,
+ ssa.BlockARM64FLT, ssa.BlockARM64FGE,
+ ssa.BlockARM64FLE, ssa.BlockARM64FGT,
+ ssa.BlockARM64LTnoov, ssa.BlockARM64GEnoov:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ if !b.Controls[0].Type.IsFlags() {
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = b.Controls[0].Reg()
+ }
+ case ssa.BlockARM64TBZ, ssa.BlockARM64TBNZ:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ p.From.Offset = b.AuxInt
+ p.From.Type = obj.TYPE_CONST
+ p.Reg = b.Controls[0].Reg()
+
+ case ssa.BlockARM64LEnoov:
+ s.CombJump(b, next, &leJumps)
+ case ssa.BlockARM64GTnoov:
+ s.CombJump(b, next, &gtJumps)
+
+ case ssa.BlockARM64JUMPTABLE:
+ // MOVD (TABLE)(IDX<<3), Rtmp
+ // JMP (Rtmp)
+ p := s.Prog(arm64.AMOVD)
+ p.From = genIndexedOperand(ssa.OpARM64MOVDloadidx8, b.Controls[1].Reg(), b.Controls[0].Reg())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ p = s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm64.REGTMP
+ // Save jump tables for later resolution of the target blocks.
+ s.JumpTables = append(s.JumpTables, b)
+
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
+
+func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p := s.Prog(loadByType(t))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_AUTO
+ p.From.Sym = n.Linksym()
+ p.From.Offset = n.FrameOffset() + off
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = reg
+ return p
+}
+
+func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
+ p.To.Name = obj.NAME_PARAM
+ p.To.Sym = n.Linksym()
+ p.Pos = p.Pos.WithNotStmt()
+ return p
+}
diff --git a/src/cmd/compile/internal/base/base.go b/src/cmd/compile/internal/base/base.go
new file mode 100644
index 0000000..ee3772c
--- /dev/null
+++ b/src/cmd/compile/internal/base/base.go
@@ -0,0 +1,221 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "runtime/debug"
+ "runtime/metrics"
+)
+
+var atExitFuncs []func()
+
+func AtExit(f func()) {
+ atExitFuncs = append(atExitFuncs, f)
+}
+
+func Exit(code int) {
+ for i := len(atExitFuncs) - 1; i >= 0; i-- {
+ f := atExitFuncs[i]
+ atExitFuncs = atExitFuncs[:i]
+ f()
+ }
+ os.Exit(code)
+}
+
+// To enable tracing support (-t flag), set EnableTrace to true.
+const EnableTrace = false
+
+// forEachGC calls fn each GC cycle until it returns false.
+func forEachGC(fn func() bool) {
+ type T [32]byte // large enough to avoid runtime's tiny object allocator
+
+ var finalizer func(*T)
+ finalizer = func(p *T) {
+ if fn() {
+ runtime.SetFinalizer(p, finalizer)
+ }
+ }
+
+ finalizer(new(T))
+}
+
+// AdjustStartingHeap modifies GOGC so that GC should not occur until the heap
+// grows to the requested size. This is intended but not promised, though it
+// is true-mostly, depending on when the adjustment occurs and on the
+// compiler's input and behavior. Once this size is approximately reached
+// GOGC is reset to 100; subsequent GCs may reduce the heap below the requested
+// size, but this function does not affect that.
+//
+// -d=gcadjust=1 enables logging of GOGC adjustment events.
+//
+// NOTE: If you think this code would help startup time in your own
+// application and you decide to use it, please benchmark first to see if it
+// actually works for you (it may not: the Go compiler is not typical), and
+// whatever the outcome, please leave a comment on bug #56546. This code
+// uses supported interfaces, but depends more than we like on
+// current+observed behavior of the garbage collector, so if many people need
+// this feature, we should consider/propose a better way to accomplish it.
+func AdjustStartingHeap(requestedHeapGoal uint64) {
+ logHeapTweaks := Debug.GCAdjust == 1
+ mp := runtime.GOMAXPROCS(0)
+ gcConcurrency := Flag.LowerC
+
+ const (
+ goal = "/gc/heap/goal:bytes"
+ count = "/gc/cycles/total:gc-cycles"
+ allocs = "/gc/heap/allocs:bytes"
+ frees = "/gc/heap/frees:bytes"
+ )
+
+ sample := []metrics.Sample{{Name: goal}, {Name: count}, {Name: allocs}, {Name: frees}}
+ const (
+ GOAL = 0
+ COUNT = 1
+ ALLOCS = 2
+ FREES = 3
+ )
+
+ // Assumptions and observations of Go's garbage collector, as of Go 1.17-1.20:
+
+ // - the initial heap goal is 4M, by fiat. It is possible for Go to start
+ // with a heap as small as 512k, so this may change in the future.
+
+ // - except for the first heap goal, heap goal is a function of
+ // observed-live at the previous GC and current GOGC. After the first
+ // GC, adjusting GOGC immediately updates GOGC; before the first GC,
+ // adjusting GOGC does not modify goal (but the change takes effect after
+ // the first GC).
+
+ // - the before/after first GC behavior is not guaranteed anywhere, it's
+ // just behavior, and it's a bad idea to rely on it.
+
+ // - we don't know exactly when GC will run, even after we adjust GOGC; the
+ // first GC may not have happened yet, may have already happened, or may
+ // be currently in progress, and GCs can start for several reasons.
+
+ // - forEachGC above will run the provided function at some delay after each
+ // GC's mark phase terminates; finalizers are run after marking as the
+ // spans containing finalizable objects are swept, driven by GC
+ // background activity and allocation demand.
+
+ // - "live at last GC" is not available through the current metrics
+ // interface. Instead, live is estimated by knowing the adjusted value of
+ // GOGC and the new heap goal following a GC (this requires knowing that
+ // at least one GC has occurred):
+ // estLive = 100 * newGoal / (100 + currentGogc)
+ // this new value of GOGC
+ // newGogc = 100*requestedHeapGoal/estLive - 100
+ // will result in the desired goal. The logging code checks that the
+ // resulting goal is correct.
+
+ // There's a small risk that the finalizer will be slow to run after a GC
+ // that expands the goal to a huge value, and that this will lead to
+ // out-of-memory. This doesn't seem to happen; in experiments on a variety
+ // of machines with a variety of extra loads to disrupt scheduling, the
+ // worst overshoot observed was 50% past requestedHeapGoal.
+
+ metrics.Read(sample)
+ for _, s := range sample {
+ if s.Value.Kind() == metrics.KindBad {
+ // Just return, a slightly slower compilation is a tolerable outcome.
+ if logHeapTweaks {
+ fmt.Fprintf(os.Stderr, "GCAdjust: Regret unexpected KindBad for metric %s\n", s.Name)
+ }
+ return
+ }
+ }
+
+ // Tinker with GOGC to make the heap grow rapidly at first.
+ currentGoal := sample[GOAL].Value.Uint64() // Believe this will be 4MByte or less, perhaps 512k
+ myGogc := 100 * requestedHeapGoal / currentGoal
+ if myGogc <= 150 {
+ return
+ }
+
+ if logHeapTweaks {
+ sample := append([]metrics.Sample(nil), sample...) // avoid races with GC callback
+ AtExit(func() {
+ metrics.Read(sample)
+ goal := sample[GOAL].Value.Uint64()
+ count := sample[COUNT].Value.Uint64()
+ oldGogc := debug.SetGCPercent(100)
+ if oldGogc == 100 {
+ fmt.Fprintf(os.Stderr, "GCAdjust: AtExit goal %d gogc %d count %d maxprocs %d gcConcurrency %d\n",
+ goal, oldGogc, count, mp, gcConcurrency)
+ } else {
+ inUse := sample[ALLOCS].Value.Uint64() - sample[FREES].Value.Uint64()
+ overPct := 100 * (int(inUse) - int(requestedHeapGoal)) / int(requestedHeapGoal)
+ fmt.Fprintf(os.Stderr, "GCAdjust: AtExit goal %d gogc %d count %d maxprocs %d gcConcurrency %d overPct %d\n",
+ goal, oldGogc, count, mp, gcConcurrency, overPct)
+
+ }
+ })
+ }
+
+ debug.SetGCPercent(int(myGogc))
+
+ adjustFunc := func() bool {
+
+ metrics.Read(sample)
+ goal := sample[GOAL].Value.Uint64()
+ count := sample[COUNT].Value.Uint64()
+
+ if goal <= requestedHeapGoal { // Stay the course
+ if logHeapTweaks {
+ fmt.Fprintf(os.Stderr, "GCAdjust: Reuse GOGC adjust, current goal %d, count is %d, current gogc %d\n",
+ goal, count, myGogc)
+ }
+ return true
+ }
+
+ // Believe goal has been adjusted upwards, else it would be less-than-or-equal than requestedHeapGoal
+ calcLive := 100 * goal / (100 + myGogc)
+
+ if 2*calcLive < requestedHeapGoal { // calcLive can exceed requestedHeapGoal!
+ myGogc = 100*requestedHeapGoal/calcLive - 100
+
+ if myGogc > 125 {
+ // Not done growing the heap.
+ oldGogc := debug.SetGCPercent(int(myGogc))
+
+ if logHeapTweaks {
+ // Check that the new goal looks right
+ inUse := sample[ALLOCS].Value.Uint64() - sample[FREES].Value.Uint64()
+ metrics.Read(sample)
+ newGoal := sample[GOAL].Value.Uint64()
+ pctOff := 100 * (int64(newGoal) - int64(requestedHeapGoal)) / int64(requestedHeapGoal)
+ // Check that the new goal is close to requested. 3% of make.bash fails this test. Why, TBD.
+ if pctOff < 2 {
+ fmt.Fprintf(os.Stderr, "GCAdjust: Retry GOGC adjust, current goal %d, count is %d, gogc was %d, is now %d, calcLive %d pctOff %d\n",
+ goal, count, oldGogc, myGogc, calcLive, pctOff)
+ } else {
+ // The GC is being annoying and not giving us the goal that we requested, say more to help understand when/why.
+ fmt.Fprintf(os.Stderr, "GCAdjust: Retry GOGC adjust, current goal %d, count is %d, gogc was %d, is now %d, calcLive %d pctOff %d inUse %d\n",
+ goal, count, oldGogc, myGogc, calcLive, pctOff, inUse)
+ }
+ }
+ return true
+ }
+ }
+
+ // In this case we're done boosting GOGC, set it to 100 and don't set a new finalizer.
+ oldGogc := debug.SetGCPercent(100)
+ // inUse helps estimate how late the finalizer ran; at the instant the previous GC ended,
+ // it was (in theory) equal to the previous GC's heap goal. In a growing heap it is
+ // expected to grow to the new heap goal.
+ inUse := sample[ALLOCS].Value.Uint64() - sample[FREES].Value.Uint64()
+ overPct := 100 * (int(inUse) - int(requestedHeapGoal)) / int(requestedHeapGoal)
+ if logHeapTweaks {
+ fmt.Fprintf(os.Stderr, "GCAdjust: Reset GOGC adjust, old goal %d, count is %d, gogc was %d, calcLive %d inUse %d overPct %d\n",
+ goal, count, oldGogc, calcLive, inUse, overPct)
+ }
+ return false
+ }
+
+ forEachGC(adjustFunc)
+}
diff --git a/src/cmd/compile/internal/base/bootstrap_false.go b/src/cmd/compile/internal/base/bootstrap_false.go
new file mode 100644
index 0000000..ea6da43
--- /dev/null
+++ b/src/cmd/compile/internal/base/bootstrap_false.go
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !compiler_bootstrap
+
+package base
+
+// CompilerBootstrap reports whether the current compiler binary was
+// built with -tags=compiler_bootstrap.
+const CompilerBootstrap = false
diff --git a/src/cmd/compile/internal/base/bootstrap_true.go b/src/cmd/compile/internal/base/bootstrap_true.go
new file mode 100644
index 0000000..d0c6c88
--- /dev/null
+++ b/src/cmd/compile/internal/base/bootstrap_true.go
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build compiler_bootstrap
+
+package base
+
+// CompilerBootstrap reports whether the current compiler binary was
+// built with -tags=compiler_bootstrap.
+const CompilerBootstrap = true
diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go
new file mode 100644
index 0000000..420ad13
--- /dev/null
+++ b/src/cmd/compile/internal/base/debug.go
@@ -0,0 +1,76 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Debug arguments, set by -d flag.
+
+package base
+
+// Debug holds the parsed debugging configuration values.
+var Debug DebugFlags
+
+// DebugFlags defines the debugging configuration values (see var Debug).
+// Each struct field is a different value, named for the lower-case of the field name.
+// Each field must be an int or string and must have a `help` struct tag.
+//
+// The -d option takes a comma-separated list of settings.
+// Each setting is name=value; for ints, name is short for name=1.
+type DebugFlags struct {
+ Append int `help:"print information about append compilation"`
+ Checkptr int `help:"instrument unsafe pointer conversions\n0: instrumentation disabled\n1: conversions involving unsafe.Pointer are instrumented\n2: conversions to unsafe.Pointer force heap allocation" concurrent:"ok"`
+ Closure int `help:"print information about closure compilation"`
+ Defer int `help:"print information about defer compilation"`
+ DisableNil int `help:"disable nil checks" concurrent:"ok"`
+ DumpInlFuncProps string `help:"dump function properties from inl heuristics to specified file"`
+ DumpInlCallSiteScores int `help:"dump scored callsites during inlining"`
+ InlScoreAdj string `help:"set inliner score adjustments (ex: -d=inlscoreadj=panicPathAdj:10/passConstToNestedIfAdj:-90)"`
+ InlBudgetSlack int `help:"amount to expand the initial inline budget when new inliner enabled. Defaults to 80 if option not set." concurrent:"ok"`
+ DumpPtrs int `help:"show Node pointers values in dump output"`
+ DwarfInl int `help:"print information about DWARF inlined function creation"`
+ EscapeMutationsCalls int `help:"print extra escape analysis diagnostics about mutations and calls" concurrent:"ok"`
+ Export int `help:"print export data"`
+ Fmahash string `help:"hash value for use in debugging platform-dependent multiply-add use" concurrent:"ok"`
+ GCAdjust int `help:"log adjustments to GOGC" concurrent:"ok"`
+ GCCheck int `help:"check heap/gc use by compiler" concurrent:"ok"`
+ GCProg int `help:"print dump of GC programs"`
+ Gossahash string `help:"hash value for use in debugging the compiler"`
+ InlFuncsWithClosures int `help:"allow functions with closures to be inlined" concurrent:"ok"`
+ InlStaticInit int `help:"allow static initialization of inlined calls" concurrent:"ok"`
+ Libfuzzer int `help:"enable coverage instrumentation for libfuzzer"`
+ LoopVar int `help:"shared (0, default), 1 (private loop variables), 2, private + log"`
+ LoopVarHash string `help:"for debugging changes in loop behavior. Overrides experiment and loopvar flag."`
+ LocationLists int `help:"print information about DWARF location list creation"`
+ MaxShapeLen int `help:"hash shape names longer than this threshold (default 500)" concurrent:"ok"`
+ Nil int `help:"print information about nil checks"`
+ NoOpenDefer int `help:"disable open-coded defers" concurrent:"ok"`
+ NoRefName int `help:"do not include referenced symbol names in object file" concurrent:"ok"`
+ PCTab string `help:"print named pc-value table\nOne of: pctospadj, pctofile, pctoline, pctoinline, pctopcdata"`
+ Panic int `help:"show all compiler panics"`
+ Reshape int `help:"print information about expression reshaping"`
+ Shapify int `help:"print information about shaping recursive types"`
+ Slice int `help:"print information about slice compilation"`
+ SoftFloat int `help:"force compiler to emit soft-float code" concurrent:"ok"`
+ StaticCopy int `help:"print information about missed static copies" concurrent:"ok"`
+ SyncFrames int `help:"how many writer stack frames to include at sync points in unified export data"`
+ TypeAssert int `help:"print information about type assertion inlining"`
+ WB int `help:"print information about write barriers"`
+ ABIWrap int `help:"print information about ABI wrapper generation"`
+ MayMoreStack string `help:"call named function before all stack growth checks" concurrent:"ok"`
+ PGODebug int `help:"debug profile-guided optimizations"`
+ PGOHash string `help:"hash value for debugging profile-guided optimizations" concurrent:"ok"`
+ PGOInline int `help:"enable profile-guided inlining" concurrent:"ok"`
+ PGOInlineCDFThreshold string `help:"cumulative threshold percentage for determining call sites as hot candidates for inlining" concurrent:"ok"`
+ PGOInlineBudget int `help:"inline budget for hot functions" concurrent:"ok"`
+ PGODevirtualize int `help:"enable profile-guided devirtualization; 0 to disable, 1 to enable interface devirtualization, 2 to enable function devirtualization" concurrent:"ok"`
+ RangeFuncCheck int `help:"insert code to check behavior of range iterator functions" concurrent:"ok"`
+ WrapGlobalMapDbg int `help:"debug trace output for global map init wrapping"`
+ WrapGlobalMapCtl int `help:"global map init wrap control (0 => default, 1 => off, 2 => stress mode, no size cutoff)"`
+ ZeroCopy int `help:"enable zero-copy string->[]byte conversions" concurrent:"ok"`
+
+ ConcurrentOk bool // true if only concurrentOk flags seen
+}
+
+// DebugSSA is called to set a -d ssa/... option.
+// If nil, those options are reported as invalid options.
+// If DebugSSA returns a non-empty string, that text is reported as a compiler error.
+var DebugSSA func(phase, flag string, val int, valString string) string
diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go
new file mode 100644
index 0000000..a3144f8
--- /dev/null
+++ b/src/cmd/compile/internal/base/flag.go
@@ -0,0 +1,575 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "cmd/internal/cov/covcmd"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "internal/buildcfg"
+ "internal/platform"
+ "log"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/sys"
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n")
+ objabi.Flagprint(os.Stderr)
+ Exit(2)
+}
+
+// Flag holds the parsed command-line flags.
+// See ParseFlag for non-zero defaults.
+var Flag CmdFlags
+
+// A CountFlag is a counting integer flag.
+// It accepts -name=value to set the value directly,
+// but it also accepts -name with no =value to increment the count.
+type CountFlag int
+
+// CmdFlags defines the command-line flags (see var Flag).
+// Each struct field is a different flag, by default named for the lower-case of the field name.
+// If the flag name is a single letter, the default flag name is left upper-case.
+// If the flag name is "Lower" followed by a single letter, the default flag name is the lower-case of the last letter.
+//
+// If this default flag name can't be made right, the `flag` struct tag can be used to replace it,
+// but this should be done only in exceptional circumstances: it helps everyone if the flag name
+// is obvious from the field name when the flag is used elsewhere in the compiler sources.
+// The `flag:"-"` struct tag makes a field invisible to the flag logic and should also be used sparingly.
+//
+// Each field must have a `help` struct tag giving the flag help message.
+//
+// The allowed field types are bool, int, string, pointers to those (for values stored elsewhere),
+// CountFlag (for a counting flag), and func(string) (for a flag that uses special code for parsing).
+type CmdFlags struct {
+ // Single letters
+ B CountFlag "help:\"disable bounds checking\""
+ C CountFlag "help:\"disable printing of columns in error messages\""
+ D string "help:\"set relative `path` for local imports\""
+ E CountFlag "help:\"debug symbol export\""
+ I func(string) "help:\"add `directory` to import search path\""
+ K CountFlag "help:\"debug missing line numbers\""
+ L CountFlag "help:\"also show actual source file names in error messages for positions affected by //line directives\""
+ N CountFlag "help:\"disable optimizations\""
+ S CountFlag "help:\"print assembly listing\""
+ // V is added by objabi.AddVersionFlag
+ W CountFlag "help:\"debug parse tree after type checking\""
+
+ LowerC int "help:\"concurrency during compilation (1 means no concurrency)\""
+ LowerD flag.Value "help:\"enable debugging settings; try -d help\""
+ LowerE CountFlag "help:\"no limit on number of errors reported\""
+ LowerH CountFlag "help:\"halt on error\""
+ LowerJ CountFlag "help:\"debug runtime-initialized variables\""
+ LowerL CountFlag "help:\"disable inlining\""
+ LowerM CountFlag "help:\"print optimization decisions\""
+ LowerO string "help:\"write output to `file`\""
+ LowerP *string "help:\"set expected package import `path`\"" // &Ctxt.Pkgpath, set below
+ LowerR CountFlag "help:\"debug generated wrappers\""
+ LowerT bool "help:\"enable tracing for debugging the compiler\""
+ LowerW CountFlag "help:\"debug type checking\""
+ LowerV *bool "help:\"increase debug verbosity\""
+
+ // Special characters
+ Percent CountFlag "flag:\"%\" help:\"debug non-static initializers\""
+ CompilingRuntime bool "flag:\"+\" help:\"compiling runtime\""
+
+ // Longer names
+ AsmHdr string "help:\"write assembly header to `file`\""
+ ASan bool "help:\"build code compatible with C/C++ address sanitizer\""
+ Bench string "help:\"append benchmark times to `file`\""
+ BlockProfile string "help:\"write block profile to `file`\""
+ BuildID string "help:\"record `id` as the build id in the export metadata\""
+ CPUProfile string "help:\"write cpu profile to `file`\""
+ Complete bool "help:\"compiling complete package (no C or assembly)\""
+ ClobberDead bool "help:\"clobber dead stack slots (for debugging)\""
+ ClobberDeadReg bool "help:\"clobber dead registers (for debugging)\""
+ Dwarf bool "help:\"generate DWARF symbols\""
+ DwarfBASEntries *bool "help:\"use base address selection entries in DWARF\"" // &Ctxt.UseBASEntries, set below
+ DwarfLocationLists *bool "help:\"add location lists to DWARF in optimized mode\"" // &Ctxt.Flag_locationlists, set below
+ Dynlink *bool "help:\"support references to Go symbols defined in other shared libraries\"" // &Ctxt.Flag_dynlink, set below
+ EmbedCfg func(string) "help:\"read go:embed configuration from `file`\""
+ Env func(string) "help:\"add `definition` of the form key=value to environment\""
+ GenDwarfInl int "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals
+ GoVersion string "help:\"required version of the runtime\""
+ ImportCfg func(string) "help:\"read import configuration from `file`\""
+ InstallSuffix string "help:\"set pkg directory `suffix`\""
+ JSON string "help:\"version,file for JSON compiler/optimizer detail output\""
+ Lang string "help:\"Go language version source code expects\""
+ LinkObj string "help:\"write linker-specific object to `file`\""
+ LinkShared *bool "help:\"generate code that will be linked against Go shared libraries\"" // &Ctxt.Flag_linkshared, set below
+ Live CountFlag "help:\"debug liveness analysis\""
+ MSan bool "help:\"build code compatible with C/C++ memory sanitizer\""
+ MemProfile string "help:\"write memory profile to `file`\""
+ MemProfileRate int "help:\"set runtime.MemProfileRate to `rate`\""
+ MutexProfile string "help:\"write mutex profile to `file`\""
+ NoLocalImports bool "help:\"reject local (relative) imports\""
+ CoverageCfg func(string) "help:\"read coverage configuration from `file`\""
+ Pack bool "help:\"write to file.a instead of file.o\""
+ Race bool "help:\"enable race detector\""
+ Shared *bool "help:\"generate code that can be linked into a shared library\"" // &Ctxt.Flag_shared, set below
+ SmallFrames bool "help:\"reduce the size limit for stack allocated objects\"" // small stacks, to diagnose GC latency; see golang.org/issue/27732
+ Spectre string "help:\"enable spectre mitigations in `list` (all, index, ret)\""
+ Std bool "help:\"compiling standard library\""
+ SymABIs string "help:\"read symbol ABIs from `file`\""
+ TraceProfile string "help:\"write an execution trace to `file`\""
+ TrimPath string "help:\"remove `prefix` from recorded source file paths\""
+ WB bool "help:\"enable write barrier\"" // TODO: remove
+ PgoProfile string "help:\"read profile from `file`\""
+ ErrorURL bool "help:\"print explanatory URL with error message if applicable\""
+
+ // Configuration derived from flags; not a flag itself.
+ Cfg struct {
+ Embed struct { // set by -embedcfg
+ Patterns map[string][]string
+ Files map[string]string
+ }
+ ImportDirs []string // appended to by -I
+ ImportMap map[string]string // set by -importcfg
+ PackageFile map[string]string // set by -importcfg; nil means not in use
+ CoverageInfo *covcmd.CoverFixupConfig // set by -coveragecfg
+ SpectreIndex bool // set by -spectre=index or -spectre=all
+ // Whether we are adding any sort of code instrumentation, such as
+ // when the race detector is enabled.
+ Instrumenting bool
+ }
+}
+
+func addEnv(s string) {
+ i := strings.Index(s, "=")
+ if i < 0 {
+ log.Fatal("-env argument must be of the form key=value")
+ }
+ os.Setenv(s[:i], s[i+1:])
+}
+
+// ParseFlags parses the command-line flags into Flag.
+func ParseFlags() {
+ Flag.I = addImportDir
+
+ Flag.LowerC = runtime.GOMAXPROCS(0)
+ Flag.LowerD = objabi.NewDebugFlag(&Debug, DebugSSA)
+ Flag.LowerP = &Ctxt.Pkgpath
+ Flag.LowerV = &Ctxt.Debugvlog
+
+ Flag.Dwarf = buildcfg.GOARCH != "wasm"
+ Flag.DwarfBASEntries = &Ctxt.UseBASEntries
+ Flag.DwarfLocationLists = &Ctxt.Flag_locationlists
+ *Flag.DwarfLocationLists = true
+ Flag.Dynlink = &Ctxt.Flag_dynlink
+ Flag.EmbedCfg = readEmbedCfg
+ Flag.Env = addEnv
+ Flag.GenDwarfInl = 2
+ Flag.ImportCfg = readImportCfg
+ Flag.CoverageCfg = readCoverageCfg
+ Flag.LinkShared = &Ctxt.Flag_linkshared
+ Flag.Shared = &Ctxt.Flag_shared
+ Flag.WB = true
+
+ Debug.ConcurrentOk = true
+ Debug.MaxShapeLen = 500
+ Debug.InlFuncsWithClosures = 1
+ Debug.InlStaticInit = 1
+ Debug.PGOInline = 1
+ Debug.PGODevirtualize = 2
+ Debug.SyncFrames = -1 // disable sync markers by default
+ Debug.ZeroCopy = 1
+ Debug.RangeFuncCheck = 1
+
+ Debug.Checkptr = -1 // so we can tell whether it is set explicitly
+
+ Flag.Cfg.ImportMap = make(map[string]string)
+
+ objabi.AddVersionFlag() // -V
+ registerFlags()
+ objabi.Flagparse(usage)
+
+ if gcd := os.Getenv("GOCOMPILEDEBUG"); gcd != "" {
+ // This will only override the flags set in gcd;
+ // any others set on the command line remain set.
+ Flag.LowerD.Set(gcd)
+ }
+
+ if Debug.Gossahash != "" {
+ hashDebug = NewHashDebug("gossahash", Debug.Gossahash, nil)
+ }
+
+ // Compute whether we're compiling the runtime from the package path. Test
+ // code can also use the flag to set this explicitly.
+ if Flag.Std && objabi.LookupPkgSpecial(Ctxt.Pkgpath).Runtime {
+ Flag.CompilingRuntime = true
+ }
+
+ // Three inputs govern loop iteration variable rewriting, hash, experiment, flag.
+ // The loop variable rewriting is:
+ // IF non-empty hash, then hash determines behavior (function+line match) (*)
+ // ELSE IF experiment and flag==0, then experiment (set flag=1)
+ // ELSE flag (note that build sets flag per-package), with behaviors:
+ // -1 => no change to behavior.
+ // 0 => no change to behavior (unless non-empty hash, see above)
+ // 1 => apply change to likely-iteration-variable-escaping loops
+ // 2 => apply change, log results
+ // 11 => apply change EVERYWHERE, do not log results (for debugging/benchmarking)
+ // 12 => apply change EVERYWHERE, log results (for debugging/benchmarking)
+ //
+ // The expected uses of the these inputs are, in believed most-likely to least likely:
+ // GOEXPERIMENT=loopvar -- apply change to entire application
+ // -gcflags=some_package=-d=loopvar=1 -- apply change to some_package (**)
+ // -gcflags=some_package=-d=loopvar=2 -- apply change to some_package, log it
+ // GOEXPERIMENT=loopvar -gcflags=some_package=-d=loopvar=-1 -- apply change to all but one package
+ // GOCOMPILEDEBUG=loopvarhash=... -- search for failure cause
+ //
+ // (*) For debugging purposes, providing loopvar flag >= 11 will expand the hash-eligible set of loops to all.
+ // (**) Loop semantics, changed or not, follow code from a package when it is inlined; that is, the behavior
+ // of an application compiled with partially modified loop semantics does not depend on inlining.
+
+ if Debug.LoopVarHash != "" {
+ // This first little bit controls the inputs for debug-hash-matching.
+ mostInlineOnly := true
+ if strings.HasPrefix(Debug.LoopVarHash, "IL") {
+ // When hash-searching on a position that is an inline site, default is to use the
+ // most-inlined position only. This makes the hash faster, plus there's no point
+ // reporting a problem with all the inlining; there's only one copy of the source.
+ // However, if for some reason you wanted it per-site, you can get this. (The default
+ // hash-search behavior for compiler debugging is at an inline site.)
+ Debug.LoopVarHash = Debug.LoopVarHash[2:]
+ mostInlineOnly = false
+ }
+ // end of testing trickiness
+ LoopVarHash = NewHashDebug("loopvarhash", Debug.LoopVarHash, nil)
+ if Debug.LoopVar < 11 { // >= 11 means all loops are rewrite-eligible
+ Debug.LoopVar = 1 // 1 means those loops that syntactically escape their dcl vars are eligible.
+ }
+ LoopVarHash.SetInlineSuffixOnly(mostInlineOnly)
+ } else if buildcfg.Experiment.LoopVar && Debug.LoopVar == 0 {
+ Debug.LoopVar = 1
+ }
+
+ if Debug.Fmahash != "" {
+ FmaHash = NewHashDebug("fmahash", Debug.Fmahash, nil)
+ }
+ if Debug.PGOHash != "" {
+ PGOHash = NewHashDebug("pgohash", Debug.PGOHash, nil)
+ }
+
+ if Flag.MSan && !platform.MSanSupported(buildcfg.GOOS, buildcfg.GOARCH) {
+ log.Fatalf("%s/%s does not support -msan", buildcfg.GOOS, buildcfg.GOARCH)
+ }
+ if Flag.ASan && !platform.ASanSupported(buildcfg.GOOS, buildcfg.GOARCH) {
+ log.Fatalf("%s/%s does not support -asan", buildcfg.GOOS, buildcfg.GOARCH)
+ }
+ if Flag.Race && !platform.RaceDetectorSupported(buildcfg.GOOS, buildcfg.GOARCH) {
+ log.Fatalf("%s/%s does not support -race", buildcfg.GOOS, buildcfg.GOARCH)
+ }
+ if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X) {
+ log.Fatalf("%s/%s does not support -shared", buildcfg.GOOS, buildcfg.GOARCH)
+ }
+ parseSpectre(Flag.Spectre) // left as string for RecordFlags
+
+ Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared
+ Ctxt.Flag_optimize = Flag.N == 0
+ Ctxt.Debugasm = int(Flag.S)
+ Ctxt.Flag_maymorestack = Debug.MayMoreStack
+ Ctxt.Flag_noRefName = Debug.NoRefName != 0
+
+ if flag.NArg() < 1 {
+ usage()
+ }
+
+ if Flag.GoVersion != "" && Flag.GoVersion != runtime.Version() {
+ fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), Flag.GoVersion)
+ Exit(2)
+ }
+
+ if *Flag.LowerP == "" {
+ *Flag.LowerP = obj.UnlinkablePkg
+ }
+
+ if Flag.LowerO == "" {
+ p := flag.Arg(0)
+ if i := strings.LastIndex(p, "/"); i >= 0 {
+ p = p[i+1:]
+ }
+ if runtime.GOOS == "windows" {
+ if i := strings.LastIndex(p, `\`); i >= 0 {
+ p = p[i+1:]
+ }
+ }
+ if i := strings.LastIndex(p, "."); i >= 0 {
+ p = p[:i]
+ }
+ suffix := ".o"
+ if Flag.Pack {
+ suffix = ".a"
+ }
+ Flag.LowerO = p + suffix
+ }
+ switch {
+ case Flag.Race && Flag.MSan:
+ log.Fatal("cannot use both -race and -msan")
+ case Flag.Race && Flag.ASan:
+ log.Fatal("cannot use both -race and -asan")
+ case Flag.MSan && Flag.ASan:
+ log.Fatal("cannot use both -msan and -asan")
+ }
+ if Flag.Race || Flag.MSan || Flag.ASan {
+ // -race, -msan and -asan imply -d=checkptr for now.
+ if Debug.Checkptr == -1 { // if not set explicitly
+ Debug.Checkptr = 1
+ }
+ }
+
+ if Flag.LowerC < 1 {
+ log.Fatalf("-c must be at least 1, got %d", Flag.LowerC)
+ }
+ if !concurrentBackendAllowed() {
+ Flag.LowerC = 1
+ }
+
+ if Flag.CompilingRuntime {
+ // It is not possible to build the runtime with no optimizations,
+ // because the compiler cannot eliminate enough write barriers.
+ Flag.N = 0
+ Ctxt.Flag_optimize = true
+
+ // Runtime can't use -d=checkptr, at least not yet.
+ Debug.Checkptr = 0
+
+ // Fuzzing the runtime isn't interesting either.
+ Debug.Libfuzzer = 0
+ }
+
+ if Debug.Checkptr == -1 { // if not set explicitly
+ Debug.Checkptr = 0
+ }
+
+ // set via a -d flag
+ Ctxt.Debugpcln = Debug.PCTab
+}
+
+// registerFlags adds flag registrations for all the fields in Flag.
+// See the comment on type CmdFlags for the rules.
+func registerFlags() {
+ var (
+ boolType = reflect.TypeOf(bool(false))
+ intType = reflect.TypeOf(int(0))
+ stringType = reflect.TypeOf(string(""))
+ ptrBoolType = reflect.TypeOf(new(bool))
+ ptrIntType = reflect.TypeOf(new(int))
+ ptrStringType = reflect.TypeOf(new(string))
+ countType = reflect.TypeOf(CountFlag(0))
+ funcType = reflect.TypeOf((func(string))(nil))
+ )
+
+ v := reflect.ValueOf(&Flag).Elem()
+ t := v.Type()
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Name == "Cfg" {
+ continue
+ }
+
+ var name string
+ if len(f.Name) == 1 {
+ name = f.Name
+ } else if len(f.Name) == 6 && f.Name[:5] == "Lower" && 'A' <= f.Name[5] && f.Name[5] <= 'Z' {
+ name = string(rune(f.Name[5] + 'a' - 'A'))
+ } else {
+ name = strings.ToLower(f.Name)
+ }
+ if tag := f.Tag.Get("flag"); tag != "" {
+ name = tag
+ }
+
+ help := f.Tag.Get("help")
+ if help == "" {
+ panic(fmt.Sprintf("base.Flag.%s is missing help text", f.Name))
+ }
+
+ if k := f.Type.Kind(); (k == reflect.Ptr || k == reflect.Func) && v.Field(i).IsNil() {
+ panic(fmt.Sprintf("base.Flag.%s is uninitialized %v", f.Name, f.Type))
+ }
+
+ switch f.Type {
+ case boolType:
+ p := v.Field(i).Addr().Interface().(*bool)
+ flag.BoolVar(p, name, *p, help)
+ case intType:
+ p := v.Field(i).Addr().Interface().(*int)
+ flag.IntVar(p, name, *p, help)
+ case stringType:
+ p := v.Field(i).Addr().Interface().(*string)
+ flag.StringVar(p, name, *p, help)
+ case ptrBoolType:
+ p := v.Field(i).Interface().(*bool)
+ flag.BoolVar(p, name, *p, help)
+ case ptrIntType:
+ p := v.Field(i).Interface().(*int)
+ flag.IntVar(p, name, *p, help)
+ case ptrStringType:
+ p := v.Field(i).Interface().(*string)
+ flag.StringVar(p, name, *p, help)
+ case countType:
+ p := (*int)(v.Field(i).Addr().Interface().(*CountFlag))
+ objabi.Flagcount(name, help, p)
+ case funcType:
+ f := v.Field(i).Interface().(func(string))
+ objabi.Flagfn1(name, help, f)
+ default:
+ if val, ok := v.Field(i).Interface().(flag.Value); ok {
+ flag.Var(val, name, help)
+ } else {
+ panic(fmt.Sprintf("base.Flag.%s has unexpected type %s", f.Name, f.Type))
+ }
+ }
+ }
+}
+
+// concurrentFlagOk reports whether the current compiler flags
+// are compatible with concurrent compilation.
+func concurrentFlagOk() bool {
+ // TODO(rsc): Many of these are fine. Remove them.
+ return Flag.Percent == 0 &&
+ Flag.E == 0 &&
+ Flag.K == 0 &&
+ Flag.L == 0 &&
+ Flag.LowerH == 0 &&
+ Flag.LowerJ == 0 &&
+ Flag.LowerM == 0 &&
+ Flag.LowerR == 0
+}
+
+func concurrentBackendAllowed() bool {
+ if !concurrentFlagOk() {
+ return false
+ }
+
+ // Debug.S by itself is ok, because all printing occurs
+ // while writing the object file, and that is non-concurrent.
+ // Adding Debug_vlog, however, causes Debug.S to also print
+ // while flushing the plist, which happens concurrently.
+ if Ctxt.Debugvlog || !Debug.ConcurrentOk || Flag.Live > 0 {
+ return false
+ }
+ // TODO: Test and delete this condition.
+ if buildcfg.Experiment.FieldTrack {
+ return false
+ }
+ // TODO: fix races and enable the following flags
+ if Ctxt.Flag_dynlink || Flag.Race {
+ return false
+ }
+ return true
+}
+
+func addImportDir(dir string) {
+ if dir != "" {
+ Flag.Cfg.ImportDirs = append(Flag.Cfg.ImportDirs, dir)
+ }
+}
+
+func readImportCfg(file string) {
+ if Flag.Cfg.ImportMap == nil {
+ Flag.Cfg.ImportMap = make(map[string]string)
+ }
+ Flag.Cfg.PackageFile = map[string]string{}
+ data, err := os.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-importcfg: %v", err)
+ }
+
+ for lineNum, line := range strings.Split(string(data), "\n") {
+ lineNum++ // 1-based
+ line = strings.TrimSpace(line)
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+
+ verb, args, found := strings.Cut(line, " ")
+ if found {
+ args = strings.TrimSpace(args)
+ }
+ before, after, hasEq := strings.Cut(args, "=")
+
+ switch verb {
+ default:
+ log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb)
+ case "importmap":
+ if !hasEq || before == "" || after == "" {
+ log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
+ }
+ Flag.Cfg.ImportMap[before] = after
+ case "packagefile":
+ if !hasEq || before == "" || after == "" {
+ log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
+ }
+ Flag.Cfg.PackageFile[before] = after
+ }
+ }
+}
+
+func readCoverageCfg(file string) {
+ var cfg covcmd.CoverFixupConfig
+ data, err := os.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-coveragecfg: %v", err)
+ }
+ if err := json.Unmarshal(data, &cfg); err != nil {
+ log.Fatalf("error reading -coveragecfg file %q: %v", file, err)
+ }
+ Flag.Cfg.CoverageInfo = &cfg
+}
+
+func readEmbedCfg(file string) {
+ data, err := os.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-embedcfg: %v", err)
+ }
+ if err := json.Unmarshal(data, &Flag.Cfg.Embed); err != nil {
+ log.Fatalf("%s: %v", file, err)
+ }
+ if Flag.Cfg.Embed.Patterns == nil {
+ log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
+ }
+ if Flag.Cfg.Embed.Files == nil {
+ log.Fatalf("%s: invalid embedcfg: missing Files", file)
+ }
+}
+
+// parseSpectre parses the spectre configuration from the string s.
+func parseSpectre(s string) {
+ for _, f := range strings.Split(s, ",") {
+ f = strings.TrimSpace(f)
+ switch f {
+ default:
+ log.Fatalf("unknown setting -spectre=%s", f)
+ case "":
+ // nothing
+ case "all":
+ Flag.Cfg.SpectreIndex = true
+ Ctxt.Retpoline = true
+ case "index":
+ Flag.Cfg.SpectreIndex = true
+ case "ret":
+ Ctxt.Retpoline = true
+ }
+ }
+
+ if Flag.Cfg.SpectreIndex {
+ switch buildcfg.GOARCH {
+ case "amd64":
+ // ok
+ default:
+ log.Fatalf("GOARCH=%s does not support -spectre=index", buildcfg.GOARCH)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/base/hashdebug.go b/src/cmd/compile/internal/base/hashdebug.go
new file mode 100644
index 0000000..8342a5b
--- /dev/null
+++ b/src/cmd/compile/internal/base/hashdebug.go
@@ -0,0 +1,417 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "bytes"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "fmt"
+ "internal/bisect"
+ "io"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+type hashAndMask struct {
+ // a hash h matches if (h^hash)&mask == 0
+ hash uint64
+ mask uint64
+ name string // base name, or base name + "0", "1", etc.
+}
+
+type HashDebug struct {
+ mu sync.Mutex // for logfile, posTmp, bytesTmp
+ name string // base name of the flag/variable.
+ // what file (if any) receives the yes/no logging?
+ // default is os.Stdout
+ logfile io.Writer
+ posTmp []src.Pos
+ bytesTmp bytes.Buffer
+ matches []hashAndMask // A hash matches if one of these matches.
+ excludes []hashAndMask // explicitly excluded hash suffixes
+ bisect *bisect.Matcher
+ fileSuffixOnly bool // for Pos hashes, remove the directory prefix.
+ inlineSuffixOnly bool // for Pos hashes, remove all but the most inline position.
+}
+
+// SetInlineSuffixOnly controls whether hashing and reporting use the entire
+// inline position, or just the most-inline suffix. Compiler debugging tends
+// to want the whole inlining, debugging user problems (loopvarhash, e.g.)
+// typically does not need to see the entire inline tree, there is just one
+// copy of the source code.
+func (d *HashDebug) SetInlineSuffixOnly(b bool) *HashDebug {
+ d.inlineSuffixOnly = b
+ return d
+}
+
+// The default compiler-debugging HashDebug, for "-d=gossahash=..."
+var hashDebug *HashDebug
+
+var FmaHash *HashDebug // for debugging fused-multiply-add floating point changes
+var LoopVarHash *HashDebug // for debugging shared/private loop variable changes
+var PGOHash *HashDebug // for debugging PGO optimization decisions
+
+// DebugHashMatchPkgFunc reports whether debug variable Gossahash
+//
+// 1. is empty (returns true; this is a special more-quickly implemented case of 4 below)
+//
+// 2. is "y" or "Y" (returns true)
+//
+// 3. is "n" or "N" (returns false)
+//
+// 4. does not explicitly exclude the sha1 hash of pkgAndName (see step 6)
+//
+// 5. is a suffix of the sha1 hash of pkgAndName (returns true)
+//
+// 6. OR
+// if the (non-empty) value is in the regular language
+// "(-[01]+/)+?([01]+(/[01]+)+?"
+// (exclude..)(....include...)
+// test the [01]+ exclude substrings, if any suffix-match, return false (4 above)
+// test the [01]+ include substrings, if any suffix-match, return true
+// The include substrings AFTER the first slash are numbered 0,1, etc and
+// are named fmt.Sprintf("%s%d", varname, number)
+// As an extra-special case for multiple failure search,
+// an excludes-only string ending in a slash (terminated, not separated)
+// implicitly specifies the include string "0/1", that is, match everything.
+// (Exclude strings are used for automated search for multiple failures.)
+// Clause 6 is not really intended for human use and only
+// matters for failures that require multiple triggers.
+//
+// Otherwise it returns false.
+//
+// Unless Flags.Gossahash is empty, when DebugHashMatchPkgFunc returns true the message
+//
+// "%s triggered %s\n", varname, pkgAndName
+//
+// is printed on the file named in environment variable GSHS_LOGFILE,
+// or standard out if that is empty. "Varname" is either the name of
+// the variable or the name of the substring, depending on which matched.
+//
+// Typical use:
+//
+// 1. you make a change to the compiler, say, adding a new phase
+//
+// 2. it is broken in some mystifying way, for example, make.bash builds a broken
+// compiler that almost works, but crashes compiling a test in run.bash.
+//
+// 3. add this guard to the code, which by default leaves it broken, but does not
+// run the broken new code if Flags.Gossahash is non-empty and non-matching:
+//
+// if !base.DebugHashMatch(ir.PkgFuncName(fn)) {
+// return nil // early exit, do nothing
+// }
+//
+// 4. rebuild w/o the bad code,
+// GOCOMPILEDEBUG=gossahash=n ./all.bash
+// to verify that you put the guard in the right place with the right sense of the test.
+//
+// 5. use github.com/dr2chase/gossahash to search for the error:
+//
+// go install github.com/dr2chase/gossahash@latest
+//
+// gossahash -- <the thing that fails>
+//
+// for example: GOMAXPROCS=1 gossahash -- ./all.bash
+//
+// 6. gossahash should return a single function whose miscompilation
+// causes the problem, and you can focus on that.
+func DebugHashMatchPkgFunc(pkg, fn string) bool {
+ return hashDebug.MatchPkgFunc(pkg, fn, nil)
+}
+
+func DebugHashMatchPos(pos src.XPos) bool {
+ return hashDebug.MatchPos(pos, nil)
+}
+
+// HasDebugHash returns true if Flags.Gossahash is non-empty, which
+// results in hashDebug being not-nil. I.e., if !HasDebugHash(),
+// there is no need to create the string for hashing and testing.
+func HasDebugHash() bool {
+ return hashDebug != nil
+}
+
+// TODO: Delete when we switch to bisect-only.
+func toHashAndMask(s, varname string) hashAndMask {
+ l := len(s)
+ if l > 64 {
+ s = s[l-64:]
+ l = 64
+ }
+ m := ^(^uint64(0) << l)
+ h, err := strconv.ParseUint(s, 2, 64)
+ if err != nil {
+ Fatalf("Could not parse %s (=%s) as a binary number", varname, s)
+ }
+
+ return hashAndMask{name: varname, hash: h, mask: m}
+}
+
+// NewHashDebug returns a new hash-debug tester for the
+// environment variable ev. If ev is not set, it returns
+// nil, allowing a lightweight check for normal-case behavior.
+func NewHashDebug(ev, s string, file io.Writer) *HashDebug {
+ if s == "" {
+ return nil
+ }
+
+ hd := &HashDebug{name: ev, logfile: file}
+ if !strings.Contains(s, "/") {
+ m, err := bisect.New(s)
+ if err != nil {
+ Fatalf("%s: %v", ev, err)
+ }
+ hd.bisect = m
+ return hd
+ }
+
+ // TODO: Delete remainder of function when we switch to bisect-only.
+ ss := strings.Split(s, "/")
+ // first remove any leading exclusions; these are preceded with "-"
+ i := 0
+ for len(ss) > 0 {
+ s := ss[0]
+ if len(s) == 0 || len(s) > 0 && s[0] != '-' {
+ break
+ }
+ ss = ss[1:]
+ hd.excludes = append(hd.excludes, toHashAndMask(s[1:], fmt.Sprintf("%s%d", "HASH_EXCLUDE", i)))
+ i++
+ }
+ // hash searches may use additional EVs with 0, 1, 2, ... suffixes.
+ i = 0
+ for _, s := range ss {
+ if s == "" {
+ if i != 0 || len(ss) > 1 && ss[1] != "" || len(ss) > 2 {
+ Fatalf("Empty hash match string for %s should be first (and only) one", ev)
+ }
+ // Special case of should match everything.
+ hd.matches = append(hd.matches, toHashAndMask("0", fmt.Sprintf("%s0", ev)))
+ hd.matches = append(hd.matches, toHashAndMask("1", fmt.Sprintf("%s1", ev)))
+ break
+ }
+ if i == 0 {
+ hd.matches = append(hd.matches, toHashAndMask(s, fmt.Sprintf("%s", ev)))
+ } else {
+ hd.matches = append(hd.matches, toHashAndMask(s, fmt.Sprintf("%s%d", ev, i-1)))
+ }
+ i++
+ }
+ return hd
+}
+
+// TODO: Delete when we switch to bisect-only.
+func (d *HashDebug) excluded(hash uint64) bool {
+ for _, m := range d.excludes {
+ if (m.hash^hash)&m.mask == 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// TODO: Delete when we switch to bisect-only.
+func hashString(hash uint64) string {
+ hstr := ""
+ if hash == 0 {
+ hstr = "0"
+ } else {
+ for ; hash != 0; hash = hash >> 1 {
+ hstr = string('0'+byte(hash&1)) + hstr
+ }
+ }
+ if len(hstr) > 24 {
+ hstr = hstr[len(hstr)-24:]
+ }
+ return hstr
+}
+
+// TODO: Delete when we switch to bisect-only.
+func (d *HashDebug) match(hash uint64) *hashAndMask {
+ for i, m := range d.matches {
+ if (m.hash^hash)&m.mask == 0 {
+ return &d.matches[i]
+ }
+ }
+ return nil
+}
+
+// MatchPkgFunc returns true if either the variable used to create d is
+// unset, or if its value is y, or if it is a suffix of the base-two
+// representation of the hash of pkg and fn. If the variable is not nil,
+// then a true result is accompanied by stylized output to d.logfile, which
+// is used for automated bug search.
+func (d *HashDebug) MatchPkgFunc(pkg, fn string, note func() string) bool {
+ if d == nil {
+ return true
+ }
+ // Written this way to make inlining likely.
+ return d.matchPkgFunc(pkg, fn, note)
+}
+
+func (d *HashDebug) matchPkgFunc(pkg, fn string, note func() string) bool {
+ hash := bisect.Hash(pkg, fn)
+ return d.matchAndLog(hash, func() string { return pkg + "." + fn }, note)
+}
+
+// MatchPos is similar to MatchPkgFunc, but for hash computation
+// it uses the source position including all inlining information instead of
+// package name and path.
+// Note that the default answer for no environment variable (d == nil)
+// is "yes", do the thing.
+func (d *HashDebug) MatchPos(pos src.XPos, desc func() string) bool {
+ if d == nil {
+ return true
+ }
+ // Written this way to make inlining likely.
+ return d.matchPos(Ctxt, pos, desc)
+}
+
+func (d *HashDebug) matchPos(ctxt *obj.Link, pos src.XPos, note func() string) bool {
+ return d.matchPosWithInfo(ctxt, pos, nil, note)
+}
+
+func (d *HashDebug) matchPosWithInfo(ctxt *obj.Link, pos src.XPos, info any, note func() string) bool {
+ hash := d.hashPos(ctxt, pos)
+ if info != nil {
+ hash = bisect.Hash(hash, info)
+ }
+ return d.matchAndLog(hash,
+ func() string {
+ r := d.fmtPos(ctxt, pos)
+ if info != nil {
+ r += fmt.Sprintf(" (%v)", info)
+ }
+ return r
+ },
+ note)
+}
+
+// MatchPosWithInfo is similar to MatchPos, but with additional information
+// that is included for hash computation, so it can distinguish multiple
+// matches on the same source location.
+// Note that the default answer for no environment variable (d == nil)
+// is "yes", do the thing.
+func (d *HashDebug) MatchPosWithInfo(pos src.XPos, info any, desc func() string) bool {
+ if d == nil {
+ return true
+ }
+ // Written this way to make inlining likely.
+ return d.matchPosWithInfo(Ctxt, pos, info, desc)
+}
+
+// matchAndLog is the core matcher. It reports whether the hash matches the pattern.
+// If a report needs to be printed, match prints that report to the log file.
+// The text func must be non-nil and should return a user-readable
+// representation of what was hashed. The note func may be nil; if non-nil,
+// it should return additional information to display to the user when this
+// change is selected.
+func (d *HashDebug) matchAndLog(hash uint64, text, note func() string) bool {
+ if d.bisect != nil {
+ enabled := d.bisect.ShouldEnable(hash)
+ if d.bisect.ShouldPrint(hash) {
+ disabled := ""
+ if !enabled {
+ disabled = " [DISABLED]"
+ }
+ var t string
+ if !d.bisect.MarkerOnly() {
+ t = text()
+ if note != nil {
+ if n := note(); n != "" {
+ t += ": " + n + disabled
+ disabled = ""
+ }
+ }
+ }
+ d.log(d.name, hash, strings.TrimSpace(t+disabled))
+ }
+ return enabled
+ }
+
+ // TODO: Delete rest of function body when we switch to bisect-only.
+ if d.excluded(hash) {
+ return false
+ }
+ if m := d.match(hash); m != nil {
+ d.log(m.name, hash, text())
+ return true
+ }
+ return false
+}
+
+// short returns the form of file name to use for d.
+// The default is the full path, but fileSuffixOnly selects
+// just the final path element.
+func (d *HashDebug) short(name string) string {
+ if d.fileSuffixOnly {
+ return filepath.Base(name)
+ }
+ return name
+}
+
+// hashPos returns a hash of the position pos, including its entire inline stack.
+// If d.inlineSuffixOnly is true, hashPos only considers the innermost (leaf) position on the inline stack.
+func (d *HashDebug) hashPos(ctxt *obj.Link, pos src.XPos) uint64 {
+ if d.inlineSuffixOnly {
+ p := ctxt.InnermostPos(pos)
+ return bisect.Hash(d.short(p.Filename()), p.Line(), p.Col())
+ }
+ h := bisect.Hash()
+ ctxt.AllPos(pos, func(p src.Pos) {
+ h = bisect.Hash(h, d.short(p.Filename()), p.Line(), p.Col())
+ })
+ return h
+}
+
+// fmtPos returns a textual formatting of the position pos, including its entire inline stack.
+// If d.inlineSuffixOnly is true, fmtPos only considers the innermost (leaf) position on the inline stack.
+func (d *HashDebug) fmtPos(ctxt *obj.Link, pos src.XPos) string {
+ format := func(p src.Pos) string {
+ return fmt.Sprintf("%s:%d:%d", d.short(p.Filename()), p.Line(), p.Col())
+ }
+ if d.inlineSuffixOnly {
+ return format(ctxt.InnermostPos(pos))
+ }
+ var stk []string
+ ctxt.AllPos(pos, func(p src.Pos) {
+ stk = append(stk, format(p))
+ })
+ return strings.Join(stk, "; ")
+}
+
+// log prints a match with the given hash and textual formatting.
+// TODO: Delete varname parameter when we switch to bisect-only.
+func (d *HashDebug) log(varname string, hash uint64, text string) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ file := d.logfile
+ if file == nil {
+ if tmpfile := os.Getenv("GSHS_LOGFILE"); tmpfile != "" {
+ var err error
+ file, err = os.OpenFile(tmpfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
+ if err != nil {
+ Fatalf("could not open hash-testing logfile %s", tmpfile)
+ return
+ }
+ }
+ if file == nil {
+ file = os.Stdout
+ }
+ d.logfile = file
+ }
+
+ // Bisect output.
+ fmt.Fprintf(file, "%s %s\n", text, bisect.Marker(hash))
+
+ // Gossahash output.
+ // TODO: Delete rest of function when we switch to bisect-only.
+ fmt.Fprintf(file, "%s triggered %s %s\n", varname, text, hashString(hash))
+}
diff --git a/src/cmd/compile/internal/base/hashdebug_test.go b/src/cmd/compile/internal/base/hashdebug_test.go
new file mode 100644
index 0000000..62ef2ed
--- /dev/null
+++ b/src/cmd/compile/internal/base/hashdebug_test.go
@@ -0,0 +1,140 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "bytes"
+ "internal/bisect"
+ "strings"
+ "testing"
+)
+
+func TestHashDebugGossahashY(t *testing.T) {
+ hd := NewHashDebug("GOSSAHASH", "y", new(bytes.Buffer))
+ if hd == nil {
+ t.Errorf("NewHashDebug should not return nil for GOSSASHASH=y")
+ }
+ if !hd.MatchPkgFunc("anything", "anyfunc", nil) {
+ t.Errorf("NewHashDebug should return yes for everything for GOSSASHASH=y")
+ }
+}
+
+func TestHashDebugGossahashN(t *testing.T) {
+ hd := NewHashDebug("GOSSAHASH", "n", new(bytes.Buffer))
+ if hd == nil {
+ t.Errorf("NewHashDebug should not return nil for GOSSASHASH=n")
+ }
+ if hd.MatchPkgFunc("anything", "anyfunc", nil) {
+ t.Errorf("NewHashDebug should return no for everything for GOSSASHASH=n")
+ }
+}
+
+func TestHashDebugGossahashEmpty(t *testing.T) {
+ hd := NewHashDebug("GOSSAHASH", "", nil)
+ if hd != nil {
+ t.Errorf("NewHashDebug should return nil for GOSSASHASH=\"\"")
+ }
+}
+
+func TestHashDebugMagic(t *testing.T) {
+ hd := NewHashDebug("FOOXYZZY", "y", nil)
+ hd0 := NewHashDebug("FOOXYZZY0", "n", nil)
+ if hd == nil {
+ t.Errorf("NewHashDebug should have succeeded for FOOXYZZY")
+ }
+ if hd0 == nil {
+ t.Errorf("NewHashDebug should have succeeded for FOOXYZZY0")
+ }
+}
+
+func TestHash(t *testing.T) {
+ h0 := bisect.Hash("bar", "0")
+ h1 := bisect.Hash("bar", "1")
+ t.Logf(`These values are used in other tests: Hash("bar", "0")=%#64b, Hash("bar", "1")=%#64b`, h0, h1)
+ if h0 == h1 {
+ t.Errorf("Hashes 0x%x and 0x%x should differ", h0, h1)
+ }
+}
+
+func TestHashMatch(t *testing.T) {
+ b := new(bytes.Buffer)
+ hd := NewHashDebug("GOSSAHASH", "v1110", b)
+ check := hd.MatchPkgFunc("bar", "0", func() string { return "note" })
+ msg := b.String()
+ t.Logf("message was '%s'", msg)
+ if !check {
+ t.Errorf("GOSSAHASH=1110 should have matched for 'bar', '0'")
+ }
+ wantPrefix(t, msg, "bar.0: note [bisect-match ")
+ wantContains(t, msg, "\nGOSSAHASH triggered bar.0: note ")
+}
+
+func TestYMatch(t *testing.T) {
+ b := new(bytes.Buffer)
+ hd := NewHashDebug("GOSSAHASH", "vy", b)
+ check := hd.MatchPkgFunc("bar", "0", nil)
+ msg := b.String()
+ t.Logf("message was '%s'", msg)
+ if !check {
+ t.Errorf("GOSSAHASH=y should have matched for 'bar', '0'")
+ }
+ wantPrefix(t, msg, "bar.0 [bisect-match ")
+ wantContains(t, msg, "\nGOSSAHASH triggered bar.0 010100100011100101011110")
+}
+
+func TestNMatch(t *testing.T) {
+ b := new(bytes.Buffer)
+ hd := NewHashDebug("GOSSAHASH", "vn", b)
+ check := hd.MatchPkgFunc("bar", "0", nil)
+ msg := b.String()
+ t.Logf("message was '%s'", msg)
+ if check {
+ t.Errorf("GOSSAHASH=n should NOT have matched for 'bar', '0'")
+ }
+ wantPrefix(t, msg, "bar.0 [DISABLED] [bisect-match ")
+ wantContains(t, msg, "\nGOSSAHASH triggered bar.0 [DISABLED] 010100100011100101011110")
+}
+
+func TestHashNoMatch(t *testing.T) {
+ b := new(bytes.Buffer)
+ hd := NewHashDebug("GOSSAHASH", "01110", b)
+ check := hd.MatchPkgFunc("bar", "0", nil)
+ msg := b.String()
+ t.Logf("message was '%s'", msg)
+ if check {
+ t.Errorf("GOSSAHASH=001100 should NOT have matched for 'bar', '0'")
+ }
+ if msg != "" {
+ t.Errorf("Message should have been empty, instead %s", msg)
+ }
+
+}
+
+func TestHashSecondMatch(t *testing.T) {
+ b := new(bytes.Buffer)
+ hd := NewHashDebug("GOSSAHASH", "01110/11110", b)
+
+ check := hd.MatchPkgFunc("bar", "0", nil)
+ msg := b.String()
+ t.Logf("message was '%s'", msg)
+ if !check {
+ t.Errorf("GOSSAHASH=001100, GOSSAHASH0=0011 should have matched for 'bar', '0'")
+ }
+ wantContains(t, msg, "\nGOSSAHASH0 triggered bar")
+}
+
+func wantPrefix(t *testing.T, got, want string) {
+ t.Helper()
+ if !strings.HasPrefix(got, want) {
+ t.Errorf("want prefix %q, got:\n%s", want, got)
+ }
+}
+
+func wantContains(t *testing.T, got, want string) {
+ t.Helper()
+ if !strings.Contains(got, want) {
+ t.Errorf("want contains %q, got:\n%s", want, got)
+ }
+}
diff --git a/src/cmd/compile/internal/base/link.go b/src/cmd/compile/internal/base/link.go
new file mode 100644
index 0000000..d8aa5a7
--- /dev/null
+++ b/src/cmd/compile/internal/base/link.go
@@ -0,0 +1,53 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "cmd/internal/obj"
+)
+
+// ReservedImports are import paths used internally for generated
+// symbols by the compiler.
+//
+// The linker uses the magic symbol prefixes "go:" and "type:".
+// Avoid potential confusion between import paths and symbols
+// by rejecting these reserved imports for now. Also, people
+// "can do weird things in GOPATH and we'd prefer they didn't
+// do _that_ weird thing" (per rsc). See also #4257.
+var ReservedImports = map[string]bool{
+ "go": true,
+ "type": true,
+}
+
+var Ctxt *obj.Link
+
+// TODO(mdempsky): These should probably be obj.Link methods.
+
+// PkgLinksym returns the linker symbol for name within the given
+// package prefix. For user packages, prefix should be the package
+// path encoded with objabi.PathToPrefix.
+func PkgLinksym(prefix, name string, abi obj.ABI) *obj.LSym {
+ if name == "_" {
+ // TODO(mdempsky): Cleanup callers and Fatalf instead.
+ return linksym(prefix, "_", abi)
+ }
+ sep := "."
+ if ReservedImports[prefix] {
+ sep = ":"
+ }
+ return linksym(prefix, prefix+sep+name, abi)
+}
+
+// Linkname returns the linker symbol for the given name as it might
+// appear within a //go:linkname directive.
+func Linkname(name string, abi obj.ABI) *obj.LSym {
+ return linksym("_", name, abi)
+}
+
+// linksym is an internal helper function for implementing the above
+// exported APIs.
+func linksym(pkg, name string, abi obj.ABI) *obj.LSym {
+ return Ctxt.LookupABIInit(name, abi, func(r *obj.LSym) { r.Pkg = pkg })
+}
diff --git a/src/cmd/compile/internal/base/mapfile_mmap.go b/src/cmd/compile/internal/base/mapfile_mmap.go
new file mode 100644
index 0000000..b66c9eb
--- /dev/null
+++ b/src/cmd/compile/internal/base/mapfile_mmap.go
@@ -0,0 +1,45 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
+
+package base
+
+import (
+ "internal/unsafeheader"
+ "os"
+ "runtime"
+ "syscall"
+ "unsafe"
+)
+
+// TODO(mdempsky): Is there a higher-level abstraction that still
+// works well for iimport?
+
+// MapFile returns length bytes from the file starting at the
+// specified offset as a string.
+func MapFile(f *os.File, offset, length int64) (string, error) {
+ // POSIX mmap: "The implementation may require that off is a
+ // multiple of the page size."
+ x := offset & int64(os.Getpagesize()-1)
+ offset -= x
+ length += x
+
+ buf, err := syscall.Mmap(int(f.Fd()), offset, int(length), syscall.PROT_READ, syscall.MAP_SHARED)
+ runtime.KeepAlive(f)
+ if err != nil {
+ return "", err
+ }
+
+ buf = buf[x:]
+ pSlice := (*unsafeheader.Slice)(unsafe.Pointer(&buf))
+
+ var res string
+ pString := (*unsafeheader.String)(unsafe.Pointer(&res))
+
+ pString.Data = pSlice.Data
+ pString.Len = pSlice.Len
+
+ return res, nil
+}
diff --git a/src/cmd/compile/internal/base/mapfile_read.go b/src/cmd/compile/internal/base/mapfile_read.go
new file mode 100644
index 0000000..783f8c4
--- /dev/null
+++ b/src/cmd/compile/internal/base/mapfile_read.go
@@ -0,0 +1,21 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris
+
+package base
+
+import (
+ "io"
+ "os"
+)
+
+func MapFile(f *os.File, offset, length int64) (string, error) {
+ buf := make([]byte, length)
+ _, err := io.ReadFull(io.NewSectionReader(f, offset, length), buf)
+ if err != nil {
+ return "", err
+ }
+ return string(buf), nil
+}
diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go
new file mode 100644
index 0000000..cc36ace
--- /dev/null
+++ b/src/cmd/compile/internal/base/print.go
@@ -0,0 +1,283 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "fmt"
+ "internal/buildcfg"
+ "internal/types/errors"
+ "os"
+ "runtime/debug"
+ "sort"
+ "strings"
+
+ "cmd/internal/src"
+)
+
+// An errorMsg is a queued error message, waiting to be printed.
+type errorMsg struct {
+ pos src.XPos
+ msg string
+ code errors.Code
+}
+
+// Pos is the current source position being processed,
+// printed by Errorf, ErrorfLang, Fatalf, and Warnf.
+var Pos src.XPos
+
+var (
+ errorMsgs []errorMsg
+ numErrors int // number of entries in errorMsgs that are errors (as opposed to warnings)
+ numSyntaxErrors int
+)
+
+// Errors returns the number of errors reported.
+func Errors() int {
+ return numErrors
+}
+
+// SyntaxErrors returns the number of syntax errors reported.
+func SyntaxErrors() int {
+ return numSyntaxErrors
+}
+
+// addErrorMsg adds a new errorMsg (which may be a warning) to errorMsgs.
+func addErrorMsg(pos src.XPos, code errors.Code, format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+ // Only add the position if know the position.
+ // See issue golang.org/issue/11361.
+ if pos.IsKnown() {
+ msg = fmt.Sprintf("%v: %s", FmtPos(pos), msg)
+ }
+ errorMsgs = append(errorMsgs, errorMsg{
+ pos: pos,
+ msg: msg + "\n",
+ code: code,
+ })
+}
+
+// FmtPos formats pos as a file:line string.
+func FmtPos(pos src.XPos) string {
+ if Ctxt == nil {
+ return "???"
+ }
+ return Ctxt.OutermostPos(pos).Format(Flag.C == 0, Flag.L == 1)
+}
+
+// byPos sorts errors by source position.
+type byPos []errorMsg
+
+func (x byPos) Len() int { return len(x) }
+func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) }
+func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+// FlushErrors sorts errors seen so far by line number, prints them to stdout,
+// and empties the errors array.
+func FlushErrors() {
+ if Ctxt != nil && Ctxt.Bso != nil {
+ Ctxt.Bso.Flush()
+ }
+ if len(errorMsgs) == 0 {
+ return
+ }
+ sort.Stable(byPos(errorMsgs))
+ for i, err := range errorMsgs {
+ if i == 0 || err.msg != errorMsgs[i-1].msg {
+ fmt.Print(err.msg)
+ }
+ }
+ errorMsgs = errorMsgs[:0]
+}
+
+// lasterror keeps track of the most recently issued error,
+// to avoid printing multiple error messages on the same line.
+var lasterror struct {
+ syntax src.XPos // source position of last syntax error
+ other src.XPos // source position of last non-syntax error
+ msg string // error message of last non-syntax error
+}
+
+// sameline reports whether two positions a, b are on the same line.
+func sameline(a, b src.XPos) bool {
+ p := Ctxt.PosTable.Pos(a)
+ q := Ctxt.PosTable.Pos(b)
+ return p.Base() == q.Base() && p.Line() == q.Line()
+}
+
+// Errorf reports a formatted error at the current line.
+func Errorf(format string, args ...interface{}) {
+ ErrorfAt(Pos, 0, format, args...)
+}
+
+// ErrorfAt reports a formatted error message at pos.
+func ErrorfAt(pos src.XPos, code errors.Code, format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+
+ if strings.HasPrefix(msg, "syntax error") {
+ numSyntaxErrors++
+ // only one syntax error per line, no matter what error
+ if sameline(lasterror.syntax, pos) {
+ return
+ }
+ lasterror.syntax = pos
+ } else {
+ // only one of multiple equal non-syntax errors per line
+ // (FlushErrors shows only one of them, so we filter them
+ // here as best as we can (they may not appear in order)
+ // so that we don't count them here and exit early, and
+ // then have nothing to show for.)
+ if sameline(lasterror.other, pos) && lasterror.msg == msg {
+ return
+ }
+ lasterror.other = pos
+ lasterror.msg = msg
+ }
+
+ addErrorMsg(pos, code, "%s", msg)
+ numErrors++
+
+ hcrash()
+ if numErrors >= 10 && Flag.LowerE == 0 {
+ FlushErrors()
+ fmt.Printf("%v: too many errors\n", FmtPos(pos))
+ ErrorExit()
+ }
+}
+
+// UpdateErrorDot is a clumsy hack that rewrites the last error,
+// if it was "LINE: undefined: NAME", to be "LINE: undefined: NAME in EXPR".
+// It is used to give better error messages for dot (selector) expressions.
+func UpdateErrorDot(line string, name, expr string) {
+ if len(errorMsgs) == 0 {
+ return
+ }
+ e := &errorMsgs[len(errorMsgs)-1]
+ if strings.HasPrefix(e.msg, line) && e.msg == fmt.Sprintf("%v: undefined: %v\n", line, name) {
+ e.msg = fmt.Sprintf("%v: undefined: %v in %v\n", line, name, expr)
+ }
+}
+
+// Warn reports a formatted warning at the current line.
+// In general the Go compiler does NOT generate warnings,
+// so this should be used only when the user has opted in
+// to additional output by setting a particular flag.
+func Warn(format string, args ...interface{}) {
+ WarnfAt(Pos, format, args...)
+}
+
+// WarnfAt reports a formatted warning at pos.
+// In general the Go compiler does NOT generate warnings,
+// so this should be used only when the user has opted in
+// to additional output by setting a particular flag.
+func WarnfAt(pos src.XPos, format string, args ...interface{}) {
+ addErrorMsg(pos, 0, format, args...)
+ if Flag.LowerM != 0 {
+ FlushErrors()
+ }
+}
+
+// Fatalf reports a fatal error - an internal problem - at the current line and exits.
+// If other errors have already been printed, then Fatalf just quietly exits.
+// (The internal problem may have been caused by incomplete information
+// after the already-reported errors, so best to let users fix those and
+// try again without being bothered about a spurious internal error.)
+//
+// But if no errors have been printed, or if -d panic has been specified,
+// Fatalf prints the error as an "internal compiler error". In a released build,
+// it prints an error asking to file a bug report. In development builds, it
+// prints a stack trace.
+//
+// If -h has been specified, Fatalf panics to force the usual runtime info dump.
+func Fatalf(format string, args ...interface{}) {
+ FatalfAt(Pos, format, args...)
+}
+
+// FatalfAt reports a fatal error - an internal problem - at pos and exits.
+// If other errors have already been printed, then FatalfAt just quietly exits.
+// (The internal problem may have been caused by incomplete information
+// after the already-reported errors, so best to let users fix those and
+// try again without being bothered about a spurious internal error.)
+//
+// But if no errors have been printed, or if -d panic has been specified,
+// FatalfAt prints the error as an "internal compiler error". In a released build,
+// it prints an error asking to file a bug report. In development builds, it
+// prints a stack trace.
+//
+// If -h has been specified, FatalfAt panics to force the usual runtime info dump.
+func FatalfAt(pos src.XPos, format string, args ...interface{}) {
+ FlushErrors()
+
+ if Debug.Panic != 0 || numErrors == 0 {
+ fmt.Printf("%v: internal compiler error: ", FmtPos(pos))
+ fmt.Printf(format, args...)
+ fmt.Printf("\n")
+
+ // If this is a released compiler version, ask for a bug report.
+ if Debug.Panic == 0 && strings.HasPrefix(buildcfg.Version, "go") {
+ fmt.Printf("\n")
+ fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
+ fmt.Printf("https://go.dev/issue/new\n")
+ } else {
+ // Not a release; dump a stack trace, too.
+ fmt.Println()
+ os.Stdout.Write(debug.Stack())
+ fmt.Println()
+ }
+ }
+
+ hcrash()
+ ErrorExit()
+}
+
+// Assert reports "assertion failed" with Fatalf, unless b is true.
+func Assert(b bool) {
+ if !b {
+ Fatalf("assertion failed")
+ }
+}
+
+// Assertf reports a fatal error with Fatalf, unless b is true.
+func Assertf(b bool, format string, args ...interface{}) {
+ if !b {
+ Fatalf(format, args...)
+ }
+}
+
+// AssertfAt reports a fatal error with FatalfAt, unless b is true.
+func AssertfAt(b bool, pos src.XPos, format string, args ...interface{}) {
+ if !b {
+ FatalfAt(pos, format, args...)
+ }
+}
+
+// hcrash crashes the compiler when -h is set, to find out where a message is generated.
+func hcrash() {
+ if Flag.LowerH != 0 {
+ FlushErrors()
+ if Flag.LowerO != "" {
+ os.Remove(Flag.LowerO)
+ }
+ panic("-h")
+ }
+}
+
+// ErrorExit handles an error-status exit.
+// It flushes any pending errors, removes the output file, and exits.
+func ErrorExit() {
+ FlushErrors()
+ if Flag.LowerO != "" {
+ os.Remove(Flag.LowerO)
+ }
+ os.Exit(2)
+}
+
+// ExitIfErrors calls ErrorExit if any errors have been reported.
+func ExitIfErrors() {
+ if Errors() > 0 {
+ ErrorExit()
+ }
+}
+
+var AutogeneratedPos src.XPos
diff --git a/src/cmd/compile/internal/base/timings.go b/src/cmd/compile/internal/base/timings.go
new file mode 100644
index 0000000..f48ac93
--- /dev/null
+++ b/src/cmd/compile/internal/base/timings.go
@@ -0,0 +1,237 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "time"
+)
+
+var Timer Timings
+
+// Timings collects the execution times of labeled phases
+// which are added through a sequence of Start/Stop calls.
+// Events may be associated with each phase via AddEvent.
+type Timings struct {
+ list []timestamp
+ events map[int][]*event // lazily allocated
+}
+
+type timestamp struct {
+ time time.Time
+ label string
+ start bool
+}
+
+type event struct {
+ size int64 // count or amount of data processed (allocations, data size, lines, funcs, ...)
+ unit string // unit of size measure (count, MB, lines, funcs, ...)
+}
+
+func (t *Timings) append(labels []string, start bool) {
+ t.list = append(t.list, timestamp{time.Now(), strings.Join(labels, ":"), start})
+}
+
+// Start marks the beginning of a new phase and implicitly stops the previous phase.
+// The phase name is the colon-separated concatenation of the labels.
+func (t *Timings) Start(labels ...string) {
+ t.append(labels, true)
+}
+
+// Stop marks the end of a phase and implicitly starts a new phase.
+// The labels are added to the labels of the ended phase.
+func (t *Timings) Stop(labels ...string) {
+ t.append(labels, false)
+}
+
+// AddEvent associates an event, i.e., a count, or an amount of data,
+// with the most recently started or stopped phase; or the very first
+// phase if Start or Stop hasn't been called yet. The unit specifies
+// the unit of measurement (e.g., MB, lines, no. of funcs, etc.).
+func (t *Timings) AddEvent(size int64, unit string) {
+ m := t.events
+ if m == nil {
+ m = make(map[int][]*event)
+ t.events = m
+ }
+ i := len(t.list)
+ if i > 0 {
+ i--
+ }
+ m[i] = append(m[i], &event{size, unit})
+}
+
+// Write prints the phase times to w.
+// The prefix is printed at the start of each line.
+func (t *Timings) Write(w io.Writer, prefix string) {
+ if len(t.list) > 0 {
+ var lines lines
+
+ // group of phases with shared non-empty label prefix
+ var group struct {
+ label string // label prefix
+ tot time.Duration // accumulated phase time
+ size int // number of phases collected in group
+ }
+
+ // accumulated time between Stop/Start timestamps
+ var unaccounted time.Duration
+
+ // process Start/Stop timestamps
+ pt := &t.list[0] // previous timestamp
+ tot := t.list[len(t.list)-1].time.Sub(pt.time)
+ for i := 1; i < len(t.list); i++ {
+ qt := &t.list[i] // current timestamp
+ dt := qt.time.Sub(pt.time)
+
+ var label string
+ var events []*event
+ if pt.start {
+ // previous phase started
+ label = pt.label
+ events = t.events[i-1]
+ if qt.start {
+ // start implicitly ended previous phase; nothing to do
+ } else {
+ // stop ended previous phase; append stop labels, if any
+ if qt.label != "" {
+ label += ":" + qt.label
+ }
+ // events associated with stop replace prior events
+ if e := t.events[i]; e != nil {
+ events = e
+ }
+ }
+ } else {
+ // previous phase stopped
+ if qt.start {
+ // between a stopped and started phase; unaccounted time
+ unaccounted += dt
+ } else {
+ // previous stop implicitly started current phase
+ label = qt.label
+ events = t.events[i]
+ }
+ }
+ if label != "" {
+ // add phase to existing group, or start a new group
+ l := commonPrefix(group.label, label)
+ if group.size == 1 && l != "" || group.size > 1 && l == group.label {
+ // add to existing group
+ group.label = l
+ group.tot += dt
+ group.size++
+ } else {
+ // start a new group
+ if group.size > 1 {
+ lines.add(prefix+group.label+"subtotal", 1, group.tot, tot, nil)
+ }
+ group.label = label
+ group.tot = dt
+ group.size = 1
+ }
+
+ // write phase
+ lines.add(prefix+label, 1, dt, tot, events)
+ }
+
+ pt = qt
+ }
+
+ if group.size > 1 {
+ lines.add(prefix+group.label+"subtotal", 1, group.tot, tot, nil)
+ }
+
+ if unaccounted != 0 {
+ lines.add(prefix+"unaccounted", 1, unaccounted, tot, nil)
+ }
+
+ lines.add(prefix+"total", 1, tot, tot, nil)
+
+ lines.write(w)
+ }
+}
+
+func commonPrefix(a, b string) string {
+ i := 0
+ for i < len(a) && i < len(b) && a[i] == b[i] {
+ i++
+ }
+ return a[:i]
+}
+
+type lines [][]string
+
+func (lines *lines) add(label string, n int, dt, tot time.Duration, events []*event) {
+ var line []string
+ add := func(format string, args ...interface{}) {
+ line = append(line, fmt.Sprintf(format, args...))
+ }
+
+ add("%s", label)
+ add(" %d", n)
+ add(" %d ns/op", dt)
+ add(" %.2f %%", float64(dt)/float64(tot)*100)
+
+ for _, e := range events {
+ add(" %d", e.size)
+ add(" %s", e.unit)
+ add(" %d", int64(float64(e.size)/dt.Seconds()+0.5))
+ add(" %s/s", e.unit)
+ }
+
+ *lines = append(*lines, line)
+}
+
+func (lines lines) write(w io.Writer) {
+ // determine column widths and contents
+ var widths []int
+ var number []bool
+ for _, line := range lines {
+ for i, col := range line {
+ if i < len(widths) {
+ if len(col) > widths[i] {
+ widths[i] = len(col)
+ }
+ } else {
+ widths = append(widths, len(col))
+ number = append(number, isnumber(col)) // first line determines column contents
+ }
+ }
+ }
+
+ // make column widths a multiple of align for more stable output
+ const align = 1 // set to a value > 1 to enable
+ if align > 1 {
+ for i, w := range widths {
+ w += align - 1
+ widths[i] = w - w%align
+ }
+ }
+
+ // print lines taking column widths and contents into account
+ for _, line := range lines {
+ for i, col := range line {
+ format := "%-*s"
+ if number[i] {
+ format = "%*s" // numbers are right-aligned
+ }
+ fmt.Fprintf(w, format, widths[i], col)
+ }
+ fmt.Fprintln(w)
+ }
+}
+
+func isnumber(s string) bool {
+ for _, ch := range s {
+ if ch <= ' ' {
+ continue // ignore leading whitespace
+ }
+ return '0' <= ch && ch <= '9' || ch == '.' || ch == '-' || ch == '+'
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/bitvec/bv.go b/src/cmd/compile/internal/bitvec/bv.go
new file mode 100644
index 0000000..ad7ed0a
--- /dev/null
+++ b/src/cmd/compile/internal/bitvec/bv.go
@@ -0,0 +1,201 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bitvec
+
+import (
+ "math/bits"
+
+ "cmd/compile/internal/base"
+)
+
+const (
+ wordBits = 32
+ wordMask = wordBits - 1
+ wordShift = 5
+)
+
+// A BitVec is a bit vector.
+type BitVec struct {
+ N int32 // number of bits in vector
+ B []uint32 // words holding bits
+}
+
+func New(n int32) BitVec {
+ nword := (n + wordBits - 1) / wordBits
+ return BitVec{n, make([]uint32, nword)}
+}
+
+type Bulk struct {
+ words []uint32
+ nbit int32
+ nword int32
+}
+
+func NewBulk(nbit int32, count int32) Bulk {
+ nword := (nbit + wordBits - 1) / wordBits
+ size := int64(nword) * int64(count)
+ if int64(int32(size*4)) != size*4 {
+ base.Fatalf("NewBulk too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
+ }
+ return Bulk{
+ words: make([]uint32, size),
+ nbit: nbit,
+ nword: nword,
+ }
+}
+
+func (b *Bulk) Next() BitVec {
+ out := BitVec{b.nbit, b.words[:b.nword]}
+ b.words = b.words[b.nword:]
+ return out
+}
+
+func (bv1 BitVec) Eq(bv2 BitVec) bool {
+ if bv1.N != bv2.N {
+ base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.N, bv2.N)
+ }
+ for i, x := range bv1.B {
+ if x != bv2.B[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (dst BitVec) Copy(src BitVec) {
+ copy(dst.B, src.B)
+}
+
+func (bv BitVec) Get(i int32) bool {
+ if i < 0 || i >= bv.N {
+ base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.N)
+ }
+ mask := uint32(1 << uint(i%wordBits))
+ return bv.B[i>>wordShift]&mask != 0
+}
+
+func (bv BitVec) Set(i int32) {
+ if i < 0 || i >= bv.N {
+ base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.N)
+ }
+ mask := uint32(1 << uint(i%wordBits))
+ bv.B[i/wordBits] |= mask
+}
+
+func (bv BitVec) Unset(i int32) {
+ if i < 0 || i >= bv.N {
+ base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.N)
+ }
+ mask := uint32(1 << uint(i%wordBits))
+ bv.B[i/wordBits] &^= mask
+}
+
+// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
+// If there is no such index, bvnext returns -1.
+func (bv BitVec) Next(i int32) int32 {
+ if i >= bv.N {
+ return -1
+ }
+
+ // Jump i ahead to next word with bits.
+ if bv.B[i>>wordShift]>>uint(i&wordMask) == 0 {
+ i &^= wordMask
+ i += wordBits
+ for i < bv.N && bv.B[i>>wordShift] == 0 {
+ i += wordBits
+ }
+ }
+
+ if i >= bv.N {
+ return -1
+ }
+
+ // Find 1 bit.
+ w := bv.B[i>>wordShift] >> uint(i&wordMask)
+ i += int32(bits.TrailingZeros32(w))
+
+ return i
+}
+
+func (bv BitVec) IsEmpty() bool {
+ for _, x := range bv.B {
+ if x != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func (bv BitVec) Count() int {
+ n := 0
+ for _, x := range bv.B {
+ n += bits.OnesCount32(x)
+ }
+ return n
+}
+
+func (bv BitVec) Not() {
+ for i, x := range bv.B {
+ bv.B[i] = ^x
+ }
+ if bv.N%wordBits != 0 {
+ bv.B[len(bv.B)-1] &= 1<<uint(bv.N%wordBits) - 1 // clear bits past N in the last word
+ }
+}
+
+// union
+func (dst BitVec) Or(src1, src2 BitVec) {
+ if len(src1.B) == 0 {
+ return
+ }
+ _, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+ for i, x := range src1.B {
+ dst.B[i] = x | src2.B[i]
+ }
+}
+
+// intersection
+func (dst BitVec) And(src1, src2 BitVec) {
+ if len(src1.B) == 0 {
+ return
+ }
+ _, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+ for i, x := range src1.B {
+ dst.B[i] = x & src2.B[i]
+ }
+}
+
+// difference
+func (dst BitVec) AndNot(src1, src2 BitVec) {
+ if len(src1.B) == 0 {
+ return
+ }
+ _, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+ for i, x := range src1.B {
+ dst.B[i] = x &^ src2.B[i]
+ }
+}
+
+func (bv BitVec) String() string {
+ s := make([]byte, 2+bv.N)
+ copy(s, "#*")
+ for i := int32(0); i < bv.N; i++ {
+ ch := byte('0')
+ if bv.Get(i) {
+ ch = '1'
+ }
+ s[2+i] = ch
+ }
+ return string(s)
+}
+
+func (bv BitVec) Clear() {
+ for i := range bv.B {
+ bv.B[i] = 0
+ }
+}
diff --git a/src/cmd/compile/internal/compare/compare.go b/src/cmd/compile/internal/compare/compare.go
new file mode 100644
index 0000000..e165cd6
--- /dev/null
+++ b/src/cmd/compile/internal/compare/compare.go
@@ -0,0 +1,381 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package compare contains code for generating comparison
+// routines for structs, strings and interfaces.
+package compare
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "fmt"
+ "math/bits"
+ "sort"
+)
+
+// IsRegularMemory reports whether t can be compared/hashed as regular memory.
+func IsRegularMemory(t *types.Type) bool {
+ a, _ := types.AlgType(t)
+ return a == types.AMEM
+}
+
+// Memrun finds runs of struct fields for which memory-only algs are appropriate.
+// t is the parent struct type, and start is the field index at which to start the run.
+// size is the length in bytes of the memory included in the run.
+// next is the index just after the end of the memory run.
+func Memrun(t *types.Type, start int) (size int64, next int) {
+ next = start
+ for {
+ next++
+ if next == t.NumFields() {
+ break
+ }
+ // Stop run after a padded field.
+ if types.IsPaddedField(t, next-1) {
+ break
+ }
+ // Also, stop before a blank or non-memory field.
+ if f := t.Field(next); f.Sym.IsBlank() || !IsRegularMemory(f.Type) {
+ break
+ }
+ // For issue 46283, don't combine fields if the resulting load would
+ // require a larger alignment than the component fields.
+ if base.Ctxt.Arch.Alignment > 1 {
+ align := t.Alignment()
+ if off := t.Field(start).Offset; off&(align-1) != 0 {
+ // Offset is less aligned than the containing type.
+ // Use offset to determine alignment.
+ align = 1 << uint(bits.TrailingZeros64(uint64(off)))
+ }
+ size := t.Field(next).End() - t.Field(start).Offset
+ if size > align {
+ break
+ }
+ }
+ }
+ return t.Field(next-1).End() - t.Field(start).Offset, next
+}
+
+// EqCanPanic reports whether == on type t could panic (has an interface somewhere).
+// t must be comparable.
+func EqCanPanic(t *types.Type) bool {
+ switch t.Kind() {
+ default:
+ return false
+ case types.TINTER:
+ return true
+ case types.TARRAY:
+ return EqCanPanic(t.Elem())
+ case types.TSTRUCT:
+ for _, f := range t.Fields() {
+ if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// EqStructCost returns the cost of an equality comparison of two structs.
+//
+// The cost is determined using an algorithm which takes into consideration
+// the size of the registers in the current architecture and the size of the
+// memory-only fields in the struct.
+func EqStructCost(t *types.Type) int64 {
+ cost := int64(0)
+
+ for i, fields := 0, t.Fields(); i < len(fields); {
+ f := fields[i]
+
+ // Skip blank-named fields.
+ if f.Sym.IsBlank() {
+ i++
+ continue
+ }
+
+ n, _, next := eqStructFieldCost(t, i)
+
+ cost += n
+ i = next
+ }
+
+ return cost
+}
+
+// eqStructFieldCost returns the cost of an equality comparison of two struct fields.
+// t is the parent struct type, and i is the index of the field in the parent struct type.
+// eqStructFieldCost may compute the cost of several adjacent fields at once. It returns
+// the cost, the size of the set of fields it computed the cost for (in bytes), and the
+// index of the first field not part of the set of fields for which the cost
+// has already been calculated.
+func eqStructFieldCost(t *types.Type, i int) (int64, int64, int) {
+ var (
+ cost = int64(0)
+ regSize = int64(types.RegSize)
+
+ size int64
+ next int
+ )
+
+ if base.Ctxt.Arch.CanMergeLoads {
+ // If we can merge adjacent loads then we can calculate the cost of the
+ // comparison using the size of the memory run and the size of the registers.
+ size, next = Memrun(t, i)
+ cost = size / regSize
+ if size%regSize != 0 {
+ cost++
+ }
+ return cost, size, next
+ }
+
+ // If we cannot merge adjacent loads then we have to use the size of the
+ // field and take into account the type to determine how many loads and compares
+ // are needed.
+ ft := t.Field(i).Type
+ size = ft.Size()
+ next = i + 1
+
+ return calculateCostForType(ft), size, next
+}
+
+func calculateCostForType(t *types.Type) int64 {
+ var cost int64
+ switch t.Kind() {
+ case types.TSTRUCT:
+ return EqStructCost(t)
+ case types.TSLICE:
+ // Slices are not comparable.
+ base.Fatalf("eqStructFieldCost: unexpected slice type")
+ case types.TARRAY:
+ elemCost := calculateCostForType(t.Elem())
+ cost = t.NumElem() * elemCost
+ case types.TSTRING, types.TINTER, types.TCOMPLEX64, types.TCOMPLEX128:
+ cost = 2
+ case types.TINT64, types.TUINT64:
+ cost = 8 / int64(types.RegSize)
+ default:
+ cost = 1
+ }
+ return cost
+}
+
+// EqStruct compares two structs np and nq for equality.
+// It works by building a list of boolean conditions to satisfy.
+// Conditions must be evaluated in the returned order and
+// properly short-circuited by the caller.
+// The first return value is the flattened list of conditions,
+// the second value is a boolean indicating whether any of the
+// comparisons could panic.
+func EqStruct(t *types.Type, np, nq ir.Node) ([]ir.Node, bool) {
+ // The conditions are a list-of-lists. Conditions are reorderable
+ // within each inner list. The outer lists must be evaluated in order.
+ var conds [][]ir.Node
+ conds = append(conds, []ir.Node{})
+ and := func(n ir.Node) {
+ i := len(conds) - 1
+ conds[i] = append(conds[i], n)
+ }
+
+ // Walk the struct using memequal for runs of AMEM
+ // and calling specific equality tests for the others.
+ for i, fields := 0, t.Fields(); i < len(fields); {
+ f := fields[i]
+
+ // Skip blank-named fields.
+ if f.Sym.IsBlank() {
+ i++
+ continue
+ }
+
+ typeCanPanic := EqCanPanic(f.Type)
+
+ // Compare non-memory fields with field equality.
+ if !IsRegularMemory(f.Type) {
+ if typeCanPanic {
+ // Enforce ordering by starting a new set of reorderable conditions.
+ conds = append(conds, []ir.Node{})
+ }
+ switch {
+ case f.Type.IsString():
+ p := typecheck.DotField(base.Pos, typecheck.Expr(np), i)
+ q := typecheck.DotField(base.Pos, typecheck.Expr(nq), i)
+ eqlen, eqmem := EqString(p, q)
+ and(eqlen)
+ and(eqmem)
+ default:
+ and(eqfield(np, nq, i))
+ }
+ if typeCanPanic {
+ // Also enforce ordering after something that can panic.
+ conds = append(conds, []ir.Node{})
+ }
+ i++
+ continue
+ }
+
+ cost, size, next := eqStructFieldCost(t, i)
+ if cost <= 4 {
+ // Cost of 4 or less: use plain field equality.
+ for j := i; j < next; j++ {
+ and(eqfield(np, nq, j))
+ }
+ } else {
+ // Higher cost: use memequal.
+ cc := eqmem(np, nq, i, size)
+ and(cc)
+ }
+ i = next
+ }
+
+ // Sort conditions to put runtime calls last.
+ // Preserve the rest of the ordering.
+ var flatConds []ir.Node
+ for _, c := range conds {
+ isCall := func(n ir.Node) bool {
+ return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
+ }
+ sort.SliceStable(c, func(i, j int) bool {
+ return !isCall(c[i]) && isCall(c[j])
+ })
+ flatConds = append(flatConds, c...)
+ }
+ return flatConds, len(conds) > 1
+}
+
+// EqString returns the nodes
+//
+// len(s) == len(t)
+//
+// and
+//
+// memequal(s.ptr, t.ptr, len(s))
+//
+// which can be used to construct string equality comparison.
+// eqlen must be evaluated before eqmem, and shortcircuiting is required.
+func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
+ s = typecheck.Conv(s, types.Types[types.TSTRING])
+ t = typecheck.Conv(t, types.Types[types.TSTRING])
+ sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
+ tptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, t)
+ slen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, s), types.Types[types.TUINTPTR])
+ tlen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, t), types.Types[types.TUINTPTR])
+
+ // Pick the 3rd arg to memequal. Both slen and tlen are fine to use, because we short
+ // circuit the memequal call if they aren't the same. But if one is a constant some
+ // memequal optimizations are easier to apply.
+ probablyConstant := func(n ir.Node) bool {
+ if n.Op() == ir.OCONVNOP {
+ n = n.(*ir.ConvExpr).X
+ }
+ if n.Op() == ir.OLITERAL {
+ return true
+ }
+ if n.Op() != ir.ONAME {
+ return false
+ }
+ name := n.(*ir.Name)
+ if name.Class != ir.PAUTO {
+ return false
+ }
+ if def := name.Defn; def == nil {
+ // n starts out as the empty string
+ return true
+ } else if def.Op() == ir.OAS && (def.(*ir.AssignStmt).Y == nil || def.(*ir.AssignStmt).Y.Op() == ir.OLITERAL) {
+ // n starts out as a constant string
+ return true
+ }
+ return false
+ }
+ cmplen := slen
+ if probablyConstant(t) && !probablyConstant(s) {
+ cmplen = tlen
+ }
+
+ fn := typecheck.LookupRuntime("memequal", types.Types[types.TUINT8], types.Types[types.TUINT8])
+ call := typecheck.Call(base.Pos, fn, []ir.Node{sptr, tptr, ir.Copy(cmplen)}, false).(*ir.CallExpr)
+
+ cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen)
+ cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
+ cmp.SetType(types.Types[types.TBOOL])
+ return cmp, call
+}
+
+// EqInterface returns the nodes
+//
+// s.tab == t.tab (or s.typ == t.typ, as appropriate)
+//
+// and
+//
+// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
+//
+// which can be used to construct interface equality comparison.
+// eqtab must be evaluated before eqdata, and shortcircuiting is required.
+func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
+ if !types.Identical(s.Type(), t.Type()) {
+ base.Fatalf("EqInterface %v %v", s.Type(), t.Type())
+ }
+ // func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
+ // func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
+ var fn ir.Node
+ if s.Type().IsEmptyInterface() {
+ fn = typecheck.LookupRuntime("efaceeq")
+ } else {
+ fn = typecheck.LookupRuntime("ifaceeq")
+ }
+
+ stab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s)
+ ttab := ir.NewUnaryExpr(base.Pos, ir.OITAB, t)
+ sdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, s)
+ tdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, t)
+ sdata.SetType(types.Types[types.TUNSAFEPTR])
+ tdata.SetType(types.Types[types.TUNSAFEPTR])
+ sdata.SetTypecheck(1)
+ tdata.SetTypecheck(1)
+
+ call := typecheck.Call(base.Pos, fn, []ir.Node{stab, sdata, tdata}, false).(*ir.CallExpr)
+
+ cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, stab, ttab)
+ cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
+ cmp.SetType(types.Types[types.TBOOL])
+ return cmp, call
+}
+
+// eqfield returns the node
+//
+// p.field == q.field
+func eqfield(p, q ir.Node, field int) ir.Node {
+ nx := typecheck.DotField(base.Pos, typecheck.Expr(p), field)
+ ny := typecheck.DotField(base.Pos, typecheck.Expr(q), field)
+ return typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.OEQ, nx, ny))
+}
+
+// eqmem returns the node
+//
+// memequal(&p.field, &q.field, size)
+func eqmem(p, q ir.Node, field int, size int64) ir.Node {
+ nx := typecheck.Expr(typecheck.NodAddr(typecheck.DotField(base.Pos, p, field)))
+ ny := typecheck.Expr(typecheck.NodAddr(typecheck.DotField(base.Pos, q, field)))
+
+ fn, needsize := eqmemfunc(size, nx.Type().Elem())
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
+ call.Args.Append(nx)
+ call.Args.Append(ny)
+ if needsize {
+ call.Args.Append(ir.NewInt(base.Pos, size))
+ }
+
+ return call
+}
+
+func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) {
+ switch size {
+ case 1, 2, 4, 8, 16:
+ buf := fmt.Sprintf("memequal%d", int(size)*8)
+ return typecheck.LookupRuntime(buf, t, t), false
+ }
+
+ return typecheck.LookupRuntime("memequal", t, t), true
+}
diff --git a/src/cmd/compile/internal/compare/compare_test.go b/src/cmd/compile/internal/compare/compare_test.go
new file mode 100644
index 0000000..2f76165
--- /dev/null
+++ b/src/cmd/compile/internal/compare/compare_test.go
@@ -0,0 +1,101 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package compare
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "cmd/internal/sys"
+ "testing"
+)
+
+type typefn func() *types.Type
+
+func init() {
+ // These are the few constants that need to be initialized in order to use
+ // the types package without using the typecheck package by calling
+ // typecheck.InitUniverse() (the normal way to initialize the types package).
+ types.PtrSize = 8
+ types.RegSize = 8
+ types.MaxWidth = 1 << 50
+ typecheck.InitUniverse()
+ base.Ctxt = &obj.Link{Arch: &obj.LinkArch{Arch: &sys.Arch{Alignment: 1, CanMergeLoads: true}}}
+}
+
+func TestEqStructCost(t *testing.T) {
+ repeat := func(n int, typ *types.Type) []*types.Type {
+ typs := make([]*types.Type, n)
+ for i := range typs {
+ typs[i] = typ
+ }
+ return typs
+ }
+
+ tt := []struct {
+ name string
+ cost int64
+ nonMergeLoadCost int64
+ fieldTypes []*types.Type
+ }{
+ {"struct without fields", 0, 0, nil},
+ {"struct with 1 byte field", 1, 1, repeat(1, types.ByteType)},
+ {"struct with 8 byte fields", 1, 8, repeat(8, types.ByteType)},
+ {"struct with 16 byte fields", 2, 16, repeat(16, types.ByteType)},
+ {"struct with 32 byte fields", 4, 32, repeat(32, types.ByteType)},
+ {"struct with 2 int32 fields", 1, 2, repeat(2, types.Types[types.TINT32])},
+ {"struct with 2 int32 fields and 1 int64", 2, 3,
+ []*types.Type{
+ types.Types[types.TINT32],
+ types.Types[types.TINT32],
+ types.Types[types.TINT64],
+ },
+ },
+ {"struct with 1 int field and 1 string", 3, 3,
+ []*types.Type{
+ types.Types[types.TINT64],
+ types.Types[types.TSTRING],
+ },
+ },
+ {"struct with 2 strings", 4, 4, repeat(2, types.Types[types.TSTRING])},
+ {"struct with 1 large byte array field", 26, 101,
+ []*types.Type{
+ types.NewArray(types.Types[types.TUINT16], 101),
+ },
+ },
+ {"struct with string array field", 4, 4,
+ []*types.Type{
+ types.NewArray(types.Types[types.TSTRING], 2),
+ },
+ },
+ }
+
+ for _, tc := range tt {
+ t.Run(tc.name, func(t *testing.T) {
+ fields := make([]*types.Field, len(tc.fieldTypes))
+ for i, ftyp := range tc.fieldTypes {
+ fields[i] = types.NewField(src.NoXPos, typecheck.LookupNum("f", i), ftyp)
+ }
+ typ := types.NewStruct(fields)
+ types.CalcSize(typ)
+
+ want := tc.cost
+ base.Ctxt.Arch.CanMergeLoads = true
+ actual := EqStructCost(typ)
+ if actual != want {
+ t.Errorf("CanMergeLoads=true EqStructCost(%v) = %d, want %d", typ, actual, want)
+ }
+
+ base.Ctxt.Arch.CanMergeLoads = false
+ want = tc.nonMergeLoadCost
+ actual = EqStructCost(typ)
+ if actual != want {
+ t.Errorf("CanMergeLoads=false EqStructCost(%v) = %d, want %d", typ, actual, want)
+ }
+ })
+ }
+}
diff --git a/src/cmd/compile/internal/coverage/cover.go b/src/cmd/compile/internal/coverage/cover.go
new file mode 100644
index 0000000..5320f00
--- /dev/null
+++ b/src/cmd/compile/internal/coverage/cover.go
@@ -0,0 +1,200 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package coverage
+
+// This package contains support routines for coverage "fixup" in the
+// compiler, which happens when compiling a package whose source code
+// has been run through "cmd/cover" to add instrumentation. The two
+// important entry points are FixupVars (called prior to package init
+// generation) and FixupInit (called following package init
+// generation).
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/objabi"
+ "internal/coverage"
+ "strconv"
+ "strings"
+)
+
+// names records state information collected in the first fixup
+// phase so that it can be passed to the second fixup phase.
+type names struct {
+ MetaVar *ir.Name
+ PkgIdVar *ir.Name
+ InitFn *ir.Func
+ CounterMode coverage.CounterMode
+ CounterGran coverage.CounterGranularity
+}
+
+// Fixup adds calls to the pkg init function as appropriate to
+// register coverage-related variables with the runtime.
+//
+// It also reclassifies selected variables (for example, tagging
+// coverage counter variables with flags so that they can be handled
+// properly downstream).
+func Fixup() {
+ if base.Flag.Cfg.CoverageInfo == nil {
+ return // not using coverage
+ }
+
+ metaVarName := base.Flag.Cfg.CoverageInfo.MetaVar
+ pkgIdVarName := base.Flag.Cfg.CoverageInfo.PkgIdVar
+ counterMode := base.Flag.Cfg.CoverageInfo.CounterMode
+ counterGran := base.Flag.Cfg.CoverageInfo.CounterGranularity
+ counterPrefix := base.Flag.Cfg.CoverageInfo.CounterPrefix
+ var metavar *ir.Name
+ var pkgidvar *ir.Name
+
+ ckTypSanity := func(nm *ir.Name, tag string) {
+ if nm.Type() == nil || nm.Type().HasPointers() {
+ base.Fatalf("unsuitable %s %q mentioned in coveragecfg, improper type '%v'", tag, nm.Sym().Name, nm.Type())
+ }
+ }
+
+ for _, nm := range typecheck.Target.Externs {
+ s := nm.Sym()
+ switch s.Name {
+ case metaVarName:
+ metavar = nm
+ ckTypSanity(nm, "metavar")
+ nm.MarkReadonly()
+ continue
+ case pkgIdVarName:
+ pkgidvar = nm
+ ckTypSanity(nm, "pkgidvar")
+ nm.SetCoverageAuxVar(true)
+ s := nm.Linksym()
+ s.Type = objabi.SCOVERAGE_AUXVAR
+ continue
+ }
+ if strings.HasPrefix(s.Name, counterPrefix) {
+ ckTypSanity(nm, "countervar")
+ nm.SetCoverageCounter(true)
+ s := nm.Linksym()
+ s.Type = objabi.SCOVERAGE_COUNTER
+ }
+ }
+ cm := coverage.ParseCounterMode(counterMode)
+ if cm == coverage.CtrModeInvalid {
+ base.Fatalf("bad setting %q for covermode in coveragecfg:",
+ counterMode)
+ }
+ var cg coverage.CounterGranularity
+ switch counterGran {
+ case "perblock":
+ cg = coverage.CtrGranularityPerBlock
+ case "perfunc":
+ cg = coverage.CtrGranularityPerFunc
+ default:
+ base.Fatalf("bad setting %q for covergranularity in coveragecfg:",
+ counterGran)
+ }
+
+ cnames := names{
+ MetaVar: metavar,
+ PkgIdVar: pkgidvar,
+ CounterMode: cm,
+ CounterGran: cg,
+ }
+
+ for _, fn := range typecheck.Target.Funcs {
+ if ir.FuncName(fn) == "init" {
+ cnames.InitFn = fn
+ break
+ }
+ }
+ if cnames.InitFn == nil {
+ panic("unexpected (no init func for -cover build)")
+ }
+
+ hashv, len := metaHashAndLen()
+ if cnames.CounterMode != coverage.CtrModeTestMain {
+ registerMeta(cnames, hashv, len)
+ }
+ if base.Ctxt.Pkgpath == "main" {
+ addInitHookCall(cnames.InitFn, cnames.CounterMode)
+ }
+}
+
+func metaHashAndLen() ([16]byte, int) {
+
+ // Read meta-data hash from config entry.
+ mhash := base.Flag.Cfg.CoverageInfo.MetaHash
+ if len(mhash) != 32 {
+ base.Fatalf("unexpected: got metahash length %d want 32", len(mhash))
+ }
+ var hv [16]byte
+ for i := 0; i < 16; i++ {
+ nib := string(mhash[i*2 : i*2+2])
+ x, err := strconv.ParseInt(nib, 16, 32)
+ if err != nil {
+ base.Fatalf("metahash bad byte %q", nib)
+ }
+ hv[i] = byte(x)
+ }
+
+ // Return hash and meta-data len
+ return hv, base.Flag.Cfg.CoverageInfo.MetaLen
+}
+
+func registerMeta(cnames names, hashv [16]byte, mdlen int) {
+ // Materialize expression for hash (an array literal)
+ pos := cnames.InitFn.Pos()
+ elist := make([]ir.Node, 0, 16)
+ for i := 0; i < 16; i++ {
+ elem := ir.NewInt(base.Pos, int64(hashv[i]))
+ elist = append(elist, elem)
+ }
+ ht := types.NewArray(types.Types[types.TUINT8], 16)
+ hashx := ir.NewCompLitExpr(pos, ir.OCOMPLIT, ht, elist)
+
+ // Materalize expression corresponding to address of the meta-data symbol.
+ mdax := typecheck.NodAddr(cnames.MetaVar)
+ mdauspx := typecheck.ConvNop(mdax, types.Types[types.TUNSAFEPTR])
+
+ // Materialize expression for length.
+ lenx := ir.NewInt(base.Pos, int64(mdlen)) // untyped
+
+ // Generate a call to runtime.addCovMeta, e.g.
+ //
+ // pkgIdVar = runtime.addCovMeta(&sym, len, hash, pkgpath, pkid, cmode, cgran)
+ //
+ fn := typecheck.LookupRuntime("addCovMeta")
+ pkid := coverage.HardCodedPkgID(base.Ctxt.Pkgpath)
+ pkIdNode := ir.NewInt(base.Pos, int64(pkid))
+ cmodeNode := ir.NewInt(base.Pos, int64(cnames.CounterMode))
+ cgranNode := ir.NewInt(base.Pos, int64(cnames.CounterGran))
+ pkPathNode := ir.NewString(base.Pos, base.Ctxt.Pkgpath)
+ callx := typecheck.Call(pos, fn, []ir.Node{mdauspx, lenx, hashx,
+ pkPathNode, pkIdNode, cmodeNode, cgranNode}, false)
+ assign := callx
+ if pkid == coverage.NotHardCoded {
+ assign = typecheck.Stmt(ir.NewAssignStmt(pos, cnames.PkgIdVar, callx))
+ }
+
+ // Tack the call onto the start of our init function. We do this
+ // early in the init since it's possible that instrumented function
+ // bodies (with counter updates) might be inlined into init.
+ cnames.InitFn.Body.Prepend(assign)
+}
+
+// addInitHookCall generates a call to runtime/coverage.initHook() and
+// inserts it into the package main init function, which will kick off
+// the process for coverage data writing (emit meta data, and register
+// an exit hook to emit counter data).
+func addInitHookCall(initfn *ir.Func, cmode coverage.CounterMode) {
+ typecheck.InitCoverage()
+ pos := initfn.Pos()
+ istest := cmode == coverage.CtrModeTestMain
+ initf := typecheck.LookupCoverage("initHook")
+ istestNode := ir.NewBool(base.Pos, istest)
+ args := []ir.Node{istestNode}
+ callx := typecheck.Call(pos, initf, args, false)
+ initfn.Body.Append(callx)
+}
diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go
new file mode 100644
index 0000000..5d1b952
--- /dev/null
+++ b/src/cmd/compile/internal/devirtualize/devirtualize.go
@@ -0,0 +1,140 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package devirtualize implements two "devirtualization" optimization passes:
+//
+// - "Static" devirtualization which replaces interface method calls with
+// direct concrete-type method calls where possible.
+// - "Profile-guided" devirtualization which replaces indirect calls with a
+// conditional direct call to the hottest concrete callee from a profile, as
+// well as a fallback using the original indirect call.
+package devirtualize
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+// StaticCall devirtualizes the given call if possible when the concrete callee
+// is available statically.
+func StaticCall(call *ir.CallExpr) {
+ // For promoted methods (including value-receiver methods promoted
+ // to pointer-receivers), the interface method wrapper may contain
+ // expressions that can panic (e.g., ODEREF, ODOTPTR,
+ // ODOTINTER). Devirtualization involves inlining these expressions
+ // (and possible panics) to the call site. This normally isn't a
+ // problem, but for go/defer statements it can move the panic from
+ // when/where the call executes to the go/defer statement itself,
+ // which is a visible change in semantics (e.g., #52072). To prevent
+ // this, we skip devirtualizing calls within go/defer statements
+ // altogether.
+ if call.GoDefer {
+ return
+ }
+
+ if call.Op() != ir.OCALLINTER {
+ return
+ }
+
+ sel := call.Fun.(*ir.SelectorExpr)
+ r := ir.StaticValue(sel.X)
+ if r.Op() != ir.OCONVIFACE {
+ return
+ }
+ recv := r.(*ir.ConvExpr)
+
+ typ := recv.X.Type()
+ if typ.IsInterface() {
+ return
+ }
+
+ // If typ is a shape type, then it was a type argument originally
+ // and we'd need an indirect call through the dictionary anyway.
+ // We're unable to devirtualize this call.
+ if typ.IsShape() {
+ return
+ }
+
+ // If typ *has* a shape type, then it's a shaped, instantiated
+ // type like T[go.shape.int], and its methods (may) have an extra
+ // dictionary parameter. We could devirtualize this call if we
+ // could derive an appropriate dictionary argument.
+ //
+ // TODO(mdempsky): If typ has has a promoted non-generic method,
+ // then that method won't require a dictionary argument. We could
+ // still devirtualize those calls.
+ //
+ // TODO(mdempsky): We have the *runtime.itab in recv.TypeWord. It
+ // should be possible to compute the represented type's runtime
+ // dictionary from this (e.g., by adding a pointer from T[int]'s
+ // *runtime._type to .dict.T[int]; or by recognizing static
+ // references to go:itab.T[int],iface and constructing a direct
+ // reference to .dict.T[int]).
+ if typ.HasShape() {
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(call.Pos(), "cannot devirtualize %v: shaped receiver %v", call, typ)
+ }
+ return
+ }
+
+ // Further, if sel.X's type has a shape type, then it's a shaped
+ // interface type. In this case, the (non-dynamic) TypeAssertExpr
+ // we construct below would attempt to create an itab
+ // corresponding to this shaped interface type; but the actual
+ // itab pointer in the interface value will correspond to the
+ // original (non-shaped) interface type instead. These are
+ // functionally equivalent, but they have distinct pointer
+ // identities, which leads to the type assertion failing.
+ //
+ // TODO(mdempsky): We know the type assertion here is safe, so we
+ // could instead set a flag so that walk skips the itab check. For
+ // now, punting is easy and safe.
+ if sel.X.Type().HasShape() {
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(call.Pos(), "cannot devirtualize %v: shaped interface %v", call, sel.X.Type())
+ }
+ return
+ }
+
+ dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil)
+ dt.SetType(typ)
+ x := typecheck.XDotMethod(sel.Pos(), dt, sel.Sel, true)
+ switch x.Op() {
+ case ir.ODOTMETH:
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ)
+ }
+ call.SetOp(ir.OCALLMETH)
+ call.Fun = x
+ case ir.ODOTINTER:
+ // Promoted method from embedded interface-typed field (#42279).
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ)
+ }
+ call.SetOp(ir.OCALLINTER)
+ call.Fun = x
+ default:
+ base.FatalfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op())
+ }
+
+ // Duplicated logic from typecheck for function call return
+ // value types.
+ //
+ // Receiver parameter size may have changed; need to update
+ // call.Type to get correct stack offsets for result
+ // parameters.
+ types.CheckSize(x.Type())
+ switch ft := x.Type(); ft.NumResults() {
+ case 0:
+ case 1:
+ call.SetType(ft.Result(0).Type)
+ default:
+ call.SetType(ft.ResultsTuple())
+ }
+
+ // Desugar OCALLMETH, if we created one (#57309).
+ typecheck.FixMethodCall(call)
+}
diff --git a/src/cmd/compile/internal/devirtualize/pgo.go b/src/cmd/compile/internal/devirtualize/pgo.go
new file mode 100644
index 0000000..170bf74
--- /dev/null
+++ b/src/cmd/compile/internal/devirtualize/pgo.go
@@ -0,0 +1,820 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package devirtualize
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/inline"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/pgo"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "encoding/json"
+ "fmt"
+ "os"
+ "strings"
+)
+
+// CallStat summarizes a single call site.
+//
+// This is used only for debug logging.
+type CallStat struct {
+ Pkg string // base.Ctxt.Pkgpath
+ Pos string // file:line:col of call.
+
+ Caller string // Linker symbol name of calling function.
+
+ // Direct or indirect call.
+ Direct bool
+
+ // For indirect calls, interface call or other indirect function call.
+ Interface bool
+
+ // Total edge weight from this call site.
+ Weight int64
+
+ // Hottest callee from this call site, regardless of type
+ // compatibility.
+ Hottest string
+ HottestWeight int64
+
+ // Devirtualized callee if != "".
+ //
+ // Note that this may be different than Hottest because we apply
+ // type-check restrictions, which helps distinguish multiple calls on
+ // the same line.
+ Devirtualized string
+ DevirtualizedWeight int64
+}
+
+// ProfileGuided performs call devirtualization of indirect calls based on
+// profile information.
+//
+// Specifically, it performs conditional devirtualization of interface calls or
+// function value calls for the hottest callee.
+//
+// That is, for interface calls it performs a transformation like:
+//
+// type Iface interface {
+// Foo()
+// }
+//
+// type Concrete struct{}
+//
+// func (Concrete) Foo() {}
+//
+// func foo(i Iface) {
+// i.Foo()
+// }
+//
+// to:
+//
+// func foo(i Iface) {
+// if c, ok := i.(Concrete); ok {
+// c.Foo()
+// } else {
+// i.Foo()
+// }
+// }
+//
+// For function value calls it performs a transformation like:
+//
+// func Concrete() {}
+//
+// func foo(fn func()) {
+// fn()
+// }
+//
+// to:
+//
+// func foo(fn func()) {
+// if internal/abi.FuncPCABIInternal(fn) == internal/abi.FuncPCABIInternal(Concrete) {
+// Concrete()
+// } else {
+// fn()
+// }
+// }
+//
+// The primary benefit of this transformation is enabling inlining of the
+// direct call.
+func ProfileGuided(fn *ir.Func, p *pgo.Profile) {
+ ir.CurFunc = fn
+
+ name := ir.LinkFuncName(fn)
+
+ var jsonW *json.Encoder
+ if base.Debug.PGODebug >= 3 {
+ jsonW = json.NewEncoder(os.Stdout)
+ }
+
+ var edit func(n ir.Node) ir.Node
+ edit = func(n ir.Node) ir.Node {
+ if n == nil {
+ return n
+ }
+
+ ir.EditChildren(n, edit)
+
+ call, ok := n.(*ir.CallExpr)
+ if !ok {
+ return n
+ }
+
+ var stat *CallStat
+ if base.Debug.PGODebug >= 3 {
+ // Statistics about every single call. Handy for external data analysis.
+ //
+ // TODO(prattmic): Log via logopt?
+ stat = constructCallStat(p, fn, name, call)
+ if stat != nil {
+ defer func() {
+ jsonW.Encode(&stat)
+ }()
+ }
+ }
+
+ op := call.Op()
+ if op != ir.OCALLFUNC && op != ir.OCALLINTER {
+ return n
+ }
+
+ if base.Debug.PGODebug >= 2 {
+ fmt.Printf("%v: PGO devirtualize considering call %v\n", ir.Line(call), call)
+ }
+
+ if call.GoDefer {
+ if base.Debug.PGODebug >= 2 {
+ fmt.Printf("%v: can't PGO devirtualize go/defer call %v\n", ir.Line(call), call)
+ }
+ return n
+ }
+
+ var newNode ir.Node
+ var callee *ir.Func
+ var weight int64
+ switch op {
+ case ir.OCALLFUNC:
+ newNode, callee, weight = maybeDevirtualizeFunctionCall(p, fn, call)
+ case ir.OCALLINTER:
+ newNode, callee, weight = maybeDevirtualizeInterfaceCall(p, fn, call)
+ default:
+ panic("unreachable")
+ }
+
+ if newNode == nil {
+ return n
+ }
+
+ if stat != nil {
+ stat.Devirtualized = ir.LinkFuncName(callee)
+ stat.DevirtualizedWeight = weight
+ }
+
+ return newNode
+ }
+
+ ir.EditChildren(fn, edit)
+}
+
+// Devirtualize interface call if possible and eligible. Returns the new
+// ir.Node if call was devirtualized, and if so also the callee and weight of
+// the devirtualized edge.
+func maybeDevirtualizeInterfaceCall(p *pgo.Profile, fn *ir.Func, call *ir.CallExpr) (ir.Node, *ir.Func, int64) {
+ if base.Debug.PGODevirtualize < 1 {
+ return nil, nil, 0
+ }
+
+ // Bail if we do not have a hot callee.
+ callee, weight := findHotConcreteInterfaceCallee(p, fn, call)
+ if callee == nil {
+ return nil, nil, 0
+ }
+ // Bail if we do not have a Type node for the hot callee.
+ ctyp := methodRecvType(callee)
+ if ctyp == nil {
+ return nil, nil, 0
+ }
+ // Bail if we know for sure it won't inline.
+ if !shouldPGODevirt(callee) {
+ return nil, nil, 0
+ }
+ // Bail if de-selected by PGO Hash.
+ if !base.PGOHash.MatchPosWithInfo(call.Pos(), "devirt", nil) {
+ return nil, nil, 0
+ }
+
+ return rewriteInterfaceCall(call, fn, callee, ctyp), callee, weight
+}
+
+// Devirtualize an indirect function call if possible and eligible. Returns the new
+// ir.Node if call was devirtualized, and if so also the callee and weight of
+// the devirtualized edge.
+func maybeDevirtualizeFunctionCall(p *pgo.Profile, fn *ir.Func, call *ir.CallExpr) (ir.Node, *ir.Func, int64) {
+ if base.Debug.PGODevirtualize < 2 {
+ return nil, nil, 0
+ }
+
+ // Bail if this is a direct call; no devirtualization necessary.
+ callee := pgo.DirectCallee(call.Fun)
+ if callee != nil {
+ return nil, nil, 0
+ }
+
+ // Bail if we do not have a hot callee.
+ callee, weight := findHotConcreteFunctionCallee(p, fn, call)
+ if callee == nil {
+ return nil, nil, 0
+ }
+
+ // TODO(go.dev/issue/61577): Closures need the closure context passed
+ // via the context register. That requires extra plumbing that we
+ // haven't done yet.
+ if callee.OClosure != nil {
+ if base.Debug.PGODebug >= 3 {
+ fmt.Printf("callee %s is a closure, skipping\n", ir.FuncName(callee))
+ }
+ return nil, nil, 0
+ }
+ // runtime.memhash_varlen does not look like a closure, but it uses
+ // runtime.getclosureptr to access data encoded by callers, which are
+ // are generated by cmd/compile/internal/reflectdata.genhash.
+ if callee.Sym().Pkg.Path == "runtime" && callee.Sym().Name == "memhash_varlen" {
+ if base.Debug.PGODebug >= 3 {
+ fmt.Printf("callee %s is a closure (runtime.memhash_varlen), skipping\n", ir.FuncName(callee))
+ }
+ return nil, nil, 0
+ }
+ // TODO(prattmic): We don't properly handle methods as callees in two
+ // different dimensions:
+ //
+ // 1. Method expressions. e.g.,
+ //
+ // var fn func(*os.File, []byte) (int, error) = (*os.File).Read
+ //
+ // In this case, typ will report *os.File as the receiver while
+ // ctyp reports it as the first argument. types.Identical ignores
+ // receiver parameters, so it treats these as different, even though
+ // they are still call compatible.
+ //
+ // 2. Method values. e.g.,
+ //
+ // var f *os.File
+ // var fn func([]byte) (int, error) = f.Read
+ //
+ // types.Identical will treat these as compatible (since receiver
+ // parameters are ignored). However, in this case, we do not call
+ // (*os.File).Read directly. Instead, f is stored in closure context
+ // and we call the wrapper (*os.File).Read-fm. However, runtime/pprof
+ // hides wrappers from profiles, making it appear that there is a call
+ // directly to the method. We could recognize this pattern return the
+ // wrapper rather than the method.
+ //
+ // N.B. perf profiles will report wrapper symbols directly, so
+ // ideally we should support direct wrapper references as well.
+ if callee.Type().Recv() != nil {
+ if base.Debug.PGODebug >= 3 {
+ fmt.Printf("callee %s is a method, skipping\n", ir.FuncName(callee))
+ }
+ return nil, nil, 0
+ }
+
+ // Bail if we know for sure it won't inline.
+ if !shouldPGODevirt(callee) {
+ return nil, nil, 0
+ }
+ // Bail if de-selected by PGO Hash.
+ if !base.PGOHash.MatchPosWithInfo(call.Pos(), "devirt", nil) {
+ return nil, nil, 0
+ }
+
+ return rewriteFunctionCall(call, fn, callee), callee, weight
+}
+
+// shouldPGODevirt checks if we should perform PGO devirtualization to the
+// target function.
+//
+// PGO devirtualization is most valuable when the callee is inlined, so if it
+// won't inline we can skip devirtualizing.
+func shouldPGODevirt(fn *ir.Func) bool {
+ var reason string
+ if base.Flag.LowerM > 1 || logopt.Enabled() {
+ defer func() {
+ if reason != "" {
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: should not PGO devirtualize %v: %s\n", ir.Line(fn), ir.FuncName(fn), reason)
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(fn.Pos(), ": should not PGO devirtualize function", "pgo-devirtualize", ir.FuncName(fn), reason)
+ }
+ }
+ }()
+ }
+
+ reason = inline.InlineImpossible(fn)
+ if reason != "" {
+ return false
+ }
+
+ // TODO(prattmic): checking only InlineImpossible is very conservative,
+ // primarily excluding only functions with pragmas. We probably want to
+ // move in either direction. Either:
+ //
+ // 1. Don't even bother to check InlineImpossible, as it affects so few
+ // functions.
+ //
+ // 2. Or consider the function body (notably cost) to better determine
+ // if the function will actually inline.
+
+ return true
+}
+
+// constructCallStat builds an initial CallStat describing this call, for
+// logging. If the call is devirtualized, the devirtualization fields should be
+// updated.
+func constructCallStat(p *pgo.Profile, fn *ir.Func, name string, call *ir.CallExpr) *CallStat {
+ switch call.Op() {
+ case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
+ default:
+ // We don't care about logging builtin functions.
+ return nil
+ }
+
+ stat := CallStat{
+ Pkg: base.Ctxt.Pkgpath,
+ Pos: ir.Line(call),
+ Caller: name,
+ }
+
+ offset := pgo.NodeLineOffset(call, fn)
+
+ hotter := func(e *pgo.IREdge) bool {
+ if stat.Hottest == "" {
+ return true
+ }
+ if e.Weight != stat.HottestWeight {
+ return e.Weight > stat.HottestWeight
+ }
+ // If weight is the same, arbitrarily sort lexicographally, as
+ // findHotConcreteCallee does.
+ return e.Dst.Name() < stat.Hottest
+ }
+
+ // Sum of all edges from this callsite, regardless of callee.
+ // For direct calls, this should be the same as the single edge
+ // weight (except for multiple calls on one line, which we
+ // can't distinguish).
+ callerNode := p.WeightedCG.IRNodes[name]
+ for _, edge := range callerNode.OutEdges {
+ if edge.CallSiteOffset != offset {
+ continue
+ }
+ stat.Weight += edge.Weight
+ if hotter(edge) {
+ stat.HottestWeight = edge.Weight
+ stat.Hottest = edge.Dst.Name()
+ }
+ }
+
+ switch call.Op() {
+ case ir.OCALLFUNC:
+ stat.Interface = false
+
+ callee := pgo.DirectCallee(call.Fun)
+ if callee != nil {
+ stat.Direct = true
+ if stat.Hottest == "" {
+ stat.Hottest = ir.LinkFuncName(callee)
+ }
+ } else {
+ stat.Direct = false
+ }
+ case ir.OCALLINTER:
+ stat.Direct = false
+ stat.Interface = true
+ case ir.OCALLMETH:
+ base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
+ }
+
+ return &stat
+}
+
+// copyInputs copies the inputs to a call: the receiver (for interface calls)
+// or function value (for function value calls) and the arguments. These
+// expressions are evaluated once and assigned to temporaries.
+//
+// The assignment statement is added to init and the copied receiver/fn
+// expression and copied arguments expressions are returned.
+func copyInputs(curfn *ir.Func, pos src.XPos, recvOrFn ir.Node, args []ir.Node, init *ir.Nodes) (ir.Node, []ir.Node) {
+ // Evaluate receiver/fn and argument expressions. The receiver/fn is
+ // used twice but we don't want to cause side effects twice. The
+ // arguments are used in two different calls and we can't trivially
+ // copy them.
+ //
+ // recvOrFn must be first in the assignment list as its side effects
+ // must be ordered before argument side effects.
+ var lhs, rhs []ir.Node
+ newRecvOrFn := typecheck.TempAt(pos, curfn, recvOrFn.Type())
+ lhs = append(lhs, newRecvOrFn)
+ rhs = append(rhs, recvOrFn)
+
+ for _, arg := range args {
+ argvar := typecheck.TempAt(pos, curfn, arg.Type())
+
+ lhs = append(lhs, argvar)
+ rhs = append(rhs, arg)
+ }
+
+ asList := ir.NewAssignListStmt(pos, ir.OAS2, lhs, rhs)
+ init.Append(typecheck.Stmt(asList))
+
+ return newRecvOrFn, lhs[1:]
+}
+
+// retTemps returns a slice of temporaries to be used for storing result values from call.
+func retTemps(curfn *ir.Func, pos src.XPos, call *ir.CallExpr) []ir.Node {
+ sig := call.Fun.Type()
+ var retvars []ir.Node
+ for _, ret := range sig.Results() {
+ retvars = append(retvars, typecheck.TempAt(pos, curfn, ret.Type))
+ }
+ return retvars
+}
+
+// condCall returns an ir.InlinedCallExpr that performs a call to thenCall if
+// cond is true and elseCall if cond is false. The return variables of the
+// InlinedCallExpr evaluate to the return values from the call.
+func condCall(curfn *ir.Func, pos src.XPos, cond ir.Node, thenCall, elseCall *ir.CallExpr, init ir.Nodes) *ir.InlinedCallExpr {
+ // Doesn't matter whether we use thenCall or elseCall, they must have
+ // the same return types.
+ retvars := retTemps(curfn, pos, thenCall)
+
+ var thenBlock, elseBlock ir.Nodes
+ if len(retvars) == 0 {
+ thenBlock.Append(thenCall)
+ elseBlock.Append(elseCall)
+ } else {
+ // Copy slice so edits in one location don't affect another.
+ thenRet := append([]ir.Node(nil), retvars...)
+ thenAsList := ir.NewAssignListStmt(pos, ir.OAS2, thenRet, []ir.Node{thenCall})
+ thenBlock.Append(typecheck.Stmt(thenAsList))
+
+ elseRet := append([]ir.Node(nil), retvars...)
+ elseAsList := ir.NewAssignListStmt(pos, ir.OAS2, elseRet, []ir.Node{elseCall})
+ elseBlock.Append(typecheck.Stmt(elseAsList))
+ }
+
+ nif := ir.NewIfStmt(pos, cond, thenBlock, elseBlock)
+ nif.SetInit(init)
+ nif.Likely = true
+
+ body := []ir.Node{typecheck.Stmt(nif)}
+
+ // This isn't really an inlined call of course, but InlinedCallExpr
+ // makes handling reassignment of return values easier.
+ res := ir.NewInlinedCallExpr(pos, body, retvars)
+ res.SetType(thenCall.Type())
+ res.SetTypecheck(1)
+ return res
+}
+
+// rewriteInterfaceCall devirtualizes the given interface call using a direct
+// method call to concretetyp.
+func rewriteInterfaceCall(call *ir.CallExpr, curfn, callee *ir.Func, concretetyp *types.Type) ir.Node {
+ if base.Flag.LowerM != 0 {
+ fmt.Printf("%v: PGO devirtualizing interface call %v to %v\n", ir.Line(call), call.Fun, callee)
+ }
+
+ // We generate an OINCALL of:
+ //
+ // var recv Iface
+ //
+ // var arg1 A1
+ // var argN AN
+ //
+ // var ret1 R1
+ // var retN RN
+ //
+ // recv, arg1, argN = recv expr, arg1 expr, argN expr
+ //
+ // t, ok := recv.(Concrete)
+ // if ok {
+ // ret1, retN = t.Method(arg1, ... argN)
+ // } else {
+ // ret1, retN = recv.Method(arg1, ... argN)
+ // }
+ //
+ // OINCALL retvars: ret1, ... retN
+ //
+ // This isn't really an inlined call of course, but InlinedCallExpr
+ // makes handling reassignment of return values easier.
+ //
+ // TODO(prattmic): This increases the size of the AST in the caller,
+ // making it less like to inline. We may want to compensate for this
+ // somehow.
+
+ sel := call.Fun.(*ir.SelectorExpr)
+ method := sel.Sel
+ pos := call.Pos()
+ init := ir.TakeInit(call)
+
+ recv, args := copyInputs(curfn, pos, sel.X, call.Args.Take(), &init)
+
+ // Copy slice so edits in one location don't affect another.
+ argvars := append([]ir.Node(nil), args...)
+ call.Args = argvars
+
+ tmpnode := typecheck.TempAt(base.Pos, curfn, concretetyp)
+ tmpok := typecheck.TempAt(base.Pos, curfn, types.Types[types.TBOOL])
+
+ assert := ir.NewTypeAssertExpr(pos, recv, concretetyp)
+
+ assertAsList := ir.NewAssignListStmt(pos, ir.OAS2, []ir.Node{tmpnode, tmpok}, []ir.Node{typecheck.Expr(assert)})
+ init.Append(typecheck.Stmt(assertAsList))
+
+ concreteCallee := typecheck.XDotMethod(pos, tmpnode, method, true)
+ // Copy slice so edits in one location don't affect another.
+ argvars = append([]ir.Node(nil), argvars...)
+ concreteCall := typecheck.Call(pos, concreteCallee, argvars, call.IsDDD).(*ir.CallExpr)
+
+ res := condCall(curfn, pos, tmpok, concreteCall, call, init)
+
+ if base.Debug.PGODebug >= 3 {
+ fmt.Printf("PGO devirtualizing interface call to %+v. After: %+v\n", concretetyp, res)
+ }
+
+ return res
+}
+
+// rewriteFunctionCall devirtualizes the given OCALLFUNC using a direct
+// function call to callee.
+func rewriteFunctionCall(call *ir.CallExpr, curfn, callee *ir.Func) ir.Node {
+ if base.Flag.LowerM != 0 {
+ fmt.Printf("%v: PGO devirtualizing function call %v to %v\n", ir.Line(call), call.Fun, callee)
+ }
+
+ // We generate an OINCALL of:
+ //
+ // var fn FuncType
+ //
+ // var arg1 A1
+ // var argN AN
+ //
+ // var ret1 R1
+ // var retN RN
+ //
+ // fn, arg1, argN = fn expr, arg1 expr, argN expr
+ //
+ // fnPC := internal/abi.FuncPCABIInternal(fn)
+ // concretePC := internal/abi.FuncPCABIInternal(concrete)
+ //
+ // if fnPC == concretePC {
+ // ret1, retN = concrete(arg1, ... argN) // Same closure context passed (TODO)
+ // } else {
+ // ret1, retN = fn(arg1, ... argN)
+ // }
+ //
+ // OINCALL retvars: ret1, ... retN
+ //
+ // This isn't really an inlined call of course, but InlinedCallExpr
+ // makes handling reassignment of return values easier.
+
+ pos := call.Pos()
+ init := ir.TakeInit(call)
+
+ fn, args := copyInputs(curfn, pos, call.Fun, call.Args.Take(), &init)
+
+ // Copy slice so edits in one location don't affect another.
+ argvars := append([]ir.Node(nil), args...)
+ call.Args = argvars
+
+ // FuncPCABIInternal takes an interface{}, emulate that. This is needed
+ // for to ensure we get the MAKEFACE we need for SSA.
+ fnIface := typecheck.Expr(ir.NewConvExpr(pos, ir.OCONV, types.Types[types.TINTER], fn))
+ calleeIface := typecheck.Expr(ir.NewConvExpr(pos, ir.OCONV, types.Types[types.TINTER], callee.Nname))
+
+ fnPC := ir.FuncPC(pos, fnIface, obj.ABIInternal)
+ concretePC := ir.FuncPC(pos, calleeIface, obj.ABIInternal)
+
+ pcEq := typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.OEQ, fnPC, concretePC))
+
+ // TODO(go.dev/issue/61577): Handle callees that a closures and need a
+ // copy of the closure context from call. For now, we skip callees that
+ // are closures in maybeDevirtualizeFunctionCall.
+ if callee.OClosure != nil {
+ base.Fatalf("Callee is a closure: %+v", callee)
+ }
+
+ // Copy slice so edits in one location don't affect another.
+ argvars = append([]ir.Node(nil), argvars...)
+ concreteCall := typecheck.Call(pos, callee.Nname, argvars, call.IsDDD).(*ir.CallExpr)
+
+ res := condCall(curfn, pos, pcEq, concreteCall, call, init)
+
+ if base.Debug.PGODebug >= 3 {
+ fmt.Printf("PGO devirtualizing function call to %+v. After: %+v\n", ir.FuncName(callee), res)
+ }
+
+ return res
+}
+
+// methodRecvType returns the type containing method fn. Returns nil if fn
+// is not a method.
+func methodRecvType(fn *ir.Func) *types.Type {
+ recv := fn.Nname.Type().Recv()
+ if recv == nil {
+ return nil
+ }
+ return recv.Type
+}
+
+// interfaceCallRecvTypeAndMethod returns the type and the method of the interface
+// used in an interface call.
+func interfaceCallRecvTypeAndMethod(call *ir.CallExpr) (*types.Type, *types.Sym) {
+ if call.Op() != ir.OCALLINTER {
+ base.Fatalf("Call isn't OCALLINTER: %+v", call)
+ }
+
+ sel, ok := call.Fun.(*ir.SelectorExpr)
+ if !ok {
+ base.Fatalf("OCALLINTER doesn't contain SelectorExpr: %+v", call)
+ }
+
+ return sel.X.Type(), sel.Sel
+}
+
+// findHotConcreteCallee returns the *ir.Func of the hottest callee of a call,
+// if available, and its edge weight. extraFn can perform additional
+// applicability checks on each candidate edge. If extraFn returns false,
+// candidate will not be considered a valid callee candidate.
+func findHotConcreteCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr, extraFn func(callerName string, callOffset int, candidate *pgo.IREdge) bool) (*ir.Func, int64) {
+ callerName := ir.LinkFuncName(caller)
+ callerNode := p.WeightedCG.IRNodes[callerName]
+ callOffset := pgo.NodeLineOffset(call, caller)
+
+ var hottest *pgo.IREdge
+
+ // Returns true if e is hotter than hottest.
+ //
+ // Naively this is just e.Weight > hottest.Weight, but because OutEdges
+ // has arbitrary iteration order, we need to apply additional sort
+ // criteria when e.Weight == hottest.Weight to ensure we have stable
+ // selection.
+ hotter := func(e *pgo.IREdge) bool {
+ if hottest == nil {
+ return true
+ }
+ if e.Weight != hottest.Weight {
+ return e.Weight > hottest.Weight
+ }
+
+ // Now e.Weight == hottest.Weight, we must select on other
+ // criteria.
+
+ // If only one edge has IR, prefer that one.
+ if (hottest.Dst.AST == nil) != (e.Dst.AST == nil) {
+ if e.Dst.AST != nil {
+ return true
+ }
+ return false
+ }
+
+ // Arbitrary, but the callee names will always differ. Select
+ // the lexicographically first callee.
+ return e.Dst.Name() < hottest.Dst.Name()
+ }
+
+ for _, e := range callerNode.OutEdges {
+ if e.CallSiteOffset != callOffset {
+ continue
+ }
+
+ if !hotter(e) {
+ // TODO(prattmic): consider total caller weight? i.e.,
+ // if the hottest callee is only 10% of the weight,
+ // maybe don't devirtualize? Similarly, if this is call
+ // is globally very cold, there is not much value in
+ // devirtualizing.
+ if base.Debug.PGODebug >= 2 {
+ fmt.Printf("%v: edge %s:%d -> %s (weight %d): too cold (hottest %d)\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight, hottest.Weight)
+ }
+ continue
+ }
+
+ if e.Dst.AST == nil {
+ // Destination isn't visible from this package
+ // compilation.
+ //
+ // We must assume it implements the interface.
+ //
+ // We still record this as the hottest callee so far
+ // because we only want to return the #1 hottest
+ // callee. If we skip this then we'd return the #2
+ // hottest callee.
+ if base.Debug.PGODebug >= 2 {
+ fmt.Printf("%v: edge %s:%d -> %s (weight %d) (missing IR): hottest so far\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight)
+ }
+ hottest = e
+ continue
+ }
+
+ if extraFn != nil && !extraFn(callerName, callOffset, e) {
+ continue
+ }
+
+ if base.Debug.PGODebug >= 2 {
+ fmt.Printf("%v: edge %s:%d -> %s (weight %d): hottest so far\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight)
+ }
+ hottest = e
+ }
+
+ if hottest == nil {
+ if base.Debug.PGODebug >= 2 {
+ fmt.Printf("%v: call %s:%d: no hot callee\n", ir.Line(call), callerName, callOffset)
+ }
+ return nil, 0
+ }
+
+ if base.Debug.PGODebug >= 2 {
+ fmt.Printf("%v call %s:%d: hottest callee %s (weight %d)\n", ir.Line(call), callerName, callOffset, hottest.Dst.Name(), hottest.Weight)
+ }
+ return hottest.Dst.AST, hottest.Weight
+}
+
+// findHotConcreteInterfaceCallee returns the *ir.Func of the hottest callee of an
+// interface call, if available, and its edge weight.
+func findHotConcreteInterfaceCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr) (*ir.Func, int64) {
+ inter, method := interfaceCallRecvTypeAndMethod(call)
+
+ return findHotConcreteCallee(p, caller, call, func(callerName string, callOffset int, e *pgo.IREdge) bool {
+ ctyp := methodRecvType(e.Dst.AST)
+ if ctyp == nil {
+ // Not a method.
+ // TODO(prattmic): Support non-interface indirect calls.
+ if base.Debug.PGODebug >= 2 {
+ fmt.Printf("%v: edge %s:%d -> %s (weight %d): callee not a method\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight)
+ }
+ return false
+ }
+
+ // If ctyp doesn't implement inter it is most likely from a
+ // different call on the same line
+ if !typecheck.Implements(ctyp, inter) {
+ // TODO(prattmic): this is overly strict. Consider if
+ // ctyp is a partial implementation of an interface
+ // that gets embedded in types that complete the
+ // interface. It would still be OK to devirtualize a
+ // call to this method.
+ //
+ // What we'd need to do is check that the function
+ // pointer in the itab matches the method we want,
+ // rather than doing a full type assertion.
+ if base.Debug.PGODebug >= 2 {
+ why := typecheck.ImplementsExplain(ctyp, inter)
+ fmt.Printf("%v: edge %s:%d -> %s (weight %d): %v doesn't implement %v (%s)\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight, ctyp, inter, why)
+ }
+ return false
+ }
+
+ // If the method name is different it is most likely from a
+ // different call on the same line
+ if !strings.HasSuffix(e.Dst.Name(), "."+method.Name) {
+ if base.Debug.PGODebug >= 2 {
+ fmt.Printf("%v: edge %s:%d -> %s (weight %d): callee is a different method\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight)
+ }
+ return false
+ }
+
+ return true
+ })
+}
+
+// findHotConcreteFunctionCallee returns the *ir.Func of the hottest callee of an
+// indirect function call, if available, and its edge weight.
+func findHotConcreteFunctionCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr) (*ir.Func, int64) {
+ typ := call.Fun.Type().Underlying()
+
+ return findHotConcreteCallee(p, caller, call, func(callerName string, callOffset int, e *pgo.IREdge) bool {
+ ctyp := e.Dst.AST.Type().Underlying()
+
+ // If ctyp doesn't match typ it is most likely from a different
+ // call on the same line.
+ //
+ // Note that we are comparing underlying types, as different
+ // defined types are OK. e.g., a call to a value of type
+ // net/http.HandlerFunc can be devirtualized to a function with
+ // the same underlying type.
+ if !types.Identical(typ, ctyp) {
+ if base.Debug.PGODebug >= 2 {
+ fmt.Printf("%v: edge %s:%d -> %s (weight %d): %v doesn't match %v\n", ir.Line(call), callerName, callOffset, e.Dst.Name(), e.Weight, ctyp, typ)
+ }
+ return false
+ }
+
+ return true
+ })
+}
diff --git a/src/cmd/compile/internal/devirtualize/pgo_test.go b/src/cmd/compile/internal/devirtualize/pgo_test.go
new file mode 100644
index 0000000..84c96df
--- /dev/null
+++ b/src/cmd/compile/internal/devirtualize/pgo_test.go
@@ -0,0 +1,217 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package devirtualize
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/pgo"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "testing"
+)
+
+func init() {
+ // These are the few constants that need to be initialized in order to use
+ // the types package without using the typecheck package by calling
+ // typecheck.InitUniverse() (the normal way to initialize the types package).
+ types.PtrSize = 8
+ types.RegSize = 8
+ types.MaxWidth = 1 << 50
+ typecheck.InitUniverse()
+ base.Ctxt = &obj.Link{}
+ base.Debug.PGODebug = 3
+}
+
+func makePos(b *src.PosBase, line, col uint) src.XPos {
+ return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col))
+}
+
+type profileBuilder struct {
+ p *pgo.Profile
+}
+
+func newProfileBuilder() *profileBuilder {
+ // findHotConcreteCallee only uses pgo.Profile.WeightedCG, so we're
+ // going to take a shortcut and only construct that.
+ return &profileBuilder{
+ p: &pgo.Profile{
+ WeightedCG: &pgo.IRGraph{
+ IRNodes: make(map[string]*pgo.IRNode),
+ },
+ },
+ }
+}
+
+// Profile returns the constructed profile.
+func (p *profileBuilder) Profile() *pgo.Profile {
+ return p.p
+}
+
+// NewNode creates a new IRNode and adds it to the profile.
+//
+// fn may be nil, in which case the node will set LinkerSymbolName.
+func (p *profileBuilder) NewNode(name string, fn *ir.Func) *pgo.IRNode {
+ n := &pgo.IRNode{
+ OutEdges: make(map[pgo.NamedCallEdge]*pgo.IREdge),
+ }
+ if fn != nil {
+ n.AST = fn
+ } else {
+ n.LinkerSymbolName = name
+ }
+ p.p.WeightedCG.IRNodes[name] = n
+ return n
+}
+
+// Add a new call edge from caller to callee.
+func addEdge(caller, callee *pgo.IRNode, offset int, weight int64) {
+ namedEdge := pgo.NamedCallEdge{
+ CallerName: caller.Name(),
+ CalleeName: callee.Name(),
+ CallSiteOffset: offset,
+ }
+ irEdge := &pgo.IREdge{
+ Src: caller,
+ Dst: callee,
+ CallSiteOffset: offset,
+ Weight: weight,
+ }
+ caller.OutEdges[namedEdge] = irEdge
+}
+
+// Create a new struct type named structName with a method named methName and
+// return the method.
+func makeStructWithMethod(pkg *types.Pkg, structName, methName string) *ir.Func {
+ // type structName struct{}
+ structType := types.NewStruct(nil)
+
+ // func (structName) methodName()
+ recv := types.NewField(src.NoXPos, typecheck.Lookup(structName), structType)
+ sig := types.NewSignature(recv, nil, nil)
+ fn := ir.NewFunc(src.NoXPos, src.NoXPos, pkg.Lookup(structName+"."+methName), sig)
+
+ // Add the method to the struct.
+ structType.SetMethods([]*types.Field{types.NewField(src.NoXPos, typecheck.Lookup(methName), sig)})
+
+ return fn
+}
+
+func TestFindHotConcreteInterfaceCallee(t *testing.T) {
+ p := newProfileBuilder()
+
+ pkgFoo := types.NewPkg("example.com/foo", "foo")
+ basePos := src.NewFileBase("foo.go", "/foo.go")
+
+ const (
+ // Caller start line.
+ callerStart = 42
+
+ // The line offset of the call we care about.
+ callOffset = 1
+
+ // The line offset of some other call we don't care about.
+ wrongCallOffset = 2
+ )
+
+ // type IFace interface {
+ // Foo()
+ // }
+ fooSig := types.NewSignature(types.FakeRecv(), nil, nil)
+ method := types.NewField(src.NoXPos, typecheck.Lookup("Foo"), fooSig)
+ iface := types.NewInterface([]*types.Field{method})
+
+ callerFn := ir.NewFunc(makePos(basePos, callerStart, 1), src.NoXPos, pkgFoo.Lookup("Caller"), types.NewSignature(nil, nil, nil))
+
+ hotCalleeFn := makeStructWithMethod(pkgFoo, "HotCallee", "Foo")
+ coldCalleeFn := makeStructWithMethod(pkgFoo, "ColdCallee", "Foo")
+ wrongLineCalleeFn := makeStructWithMethod(pkgFoo, "WrongLineCallee", "Foo")
+ wrongMethodCalleeFn := makeStructWithMethod(pkgFoo, "WrongMethodCallee", "Bar")
+
+ callerNode := p.NewNode("example.com/foo.Caller", callerFn)
+ hotCalleeNode := p.NewNode("example.com/foo.HotCallee.Foo", hotCalleeFn)
+ coldCalleeNode := p.NewNode("example.com/foo.ColdCallee.Foo", coldCalleeFn)
+ wrongLineCalleeNode := p.NewNode("example.com/foo.WrongCalleeLine.Foo", wrongLineCalleeFn)
+ wrongMethodCalleeNode := p.NewNode("example.com/foo.WrongCalleeMethod.Foo", wrongMethodCalleeFn)
+
+ hotMissingCalleeNode := p.NewNode("example.com/bar.HotMissingCallee.Foo", nil)
+
+ addEdge(callerNode, wrongLineCalleeNode, wrongCallOffset, 100) // Really hot, but wrong line.
+ addEdge(callerNode, wrongMethodCalleeNode, callOffset, 100) // Really hot, but wrong method type.
+ addEdge(callerNode, hotCalleeNode, callOffset, 10)
+ addEdge(callerNode, coldCalleeNode, callOffset, 1)
+
+ // Equal weight, but IR missing.
+ //
+ // N.B. example.com/bar sorts lexicographically before example.com/foo,
+ // so if the IR availability of hotCalleeNode doesn't get precedence,
+ // this would be mistakenly selected.
+ addEdge(callerNode, hotMissingCalleeNode, callOffset, 10)
+
+ // IFace.Foo()
+ sel := typecheck.NewMethodExpr(src.NoXPos, iface, typecheck.Lookup("Foo"))
+ call := ir.NewCallExpr(makePos(basePos, callerStart+callOffset, 1), ir.OCALLINTER, sel, nil)
+
+ gotFn, gotWeight := findHotConcreteInterfaceCallee(p.Profile(), callerFn, call)
+ if gotFn != hotCalleeFn {
+ t.Errorf("findHotConcreteInterfaceCallee func got %v want %v", gotFn, hotCalleeFn)
+ }
+ if gotWeight != 10 {
+ t.Errorf("findHotConcreteInterfaceCallee weight got %v want 10", gotWeight)
+ }
+}
+
+func TestFindHotConcreteFunctionCallee(t *testing.T) {
+ // TestFindHotConcreteInterfaceCallee already covered basic weight
+ // comparisons, which is shared logic. Here we just test type signature
+ // disambiguation.
+
+ p := newProfileBuilder()
+
+ pkgFoo := types.NewPkg("example.com/foo", "foo")
+ basePos := src.NewFileBase("foo.go", "/foo.go")
+
+ const (
+ // Caller start line.
+ callerStart = 42
+
+ // The line offset of the call we care about.
+ callOffset = 1
+ )
+
+ callerFn := ir.NewFunc(makePos(basePos, callerStart, 1), src.NoXPos, pkgFoo.Lookup("Caller"), types.NewSignature(nil, nil, nil))
+
+ // func HotCallee()
+ hotCalleeFn := ir.NewFunc(src.NoXPos, src.NoXPos, pkgFoo.Lookup("HotCallee"), types.NewSignature(nil, nil, nil))
+
+ // func WrongCallee() bool
+ wrongCalleeFn := ir.NewFunc(src.NoXPos, src.NoXPos, pkgFoo.Lookup("WrongCallee"), types.NewSignature(nil, nil,
+ []*types.Field{
+ types.NewField(src.NoXPos, nil, types.Types[types.TBOOL]),
+ },
+ ))
+
+ callerNode := p.NewNode("example.com/foo.Caller", callerFn)
+ hotCalleeNode := p.NewNode("example.com/foo.HotCallee", hotCalleeFn)
+ wrongCalleeNode := p.NewNode("example.com/foo.WrongCallee", wrongCalleeFn)
+
+ addEdge(callerNode, wrongCalleeNode, callOffset, 100) // Really hot, but wrong function type.
+ addEdge(callerNode, hotCalleeNode, callOffset, 10)
+
+ // var fn func()
+ name := ir.NewNameAt(src.NoXPos, typecheck.Lookup("fn"), types.NewSignature(nil, nil, nil))
+ // fn()
+ call := ir.NewCallExpr(makePos(basePos, callerStart+callOffset, 1), ir.OCALL, name, nil)
+
+ gotFn, gotWeight := findHotConcreteFunctionCallee(p.Profile(), callerFn, call)
+ if gotFn != hotCalleeFn {
+ t.Errorf("findHotConcreteFunctionCallee func got %v want %v", gotFn, hotCalleeFn)
+ }
+ if gotWeight != 10 {
+ t.Errorf("findHotConcreteFunctionCallee weight got %v want 10", gotWeight)
+ }
+}
diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go
new file mode 100644
index 0000000..e9553d1
--- /dev/null
+++ b/src/cmd/compile/internal/dwarfgen/dwarf.go
@@ -0,0 +1,594 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dwarfgen
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "internal/buildcfg"
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/dwarf"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn obj.Func) (scopes []dwarf.Scope, inlcalls dwarf.InlCalls) {
+ fn := curfn.(*ir.Func)
+
+ if fn.Nname != nil {
+ expect := fn.Linksym()
+ if fnsym.ABI() == obj.ABI0 {
+ expect = fn.LinksymABI(obj.ABI0)
+ }
+ if fnsym != expect {
+ base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
+ }
+ }
+
+ // Back when there were two different *Funcs for a function, this code
+ // was not consistent about whether a particular *Node being processed
+ // was an ODCLFUNC or ONAME node. Partly this is because inlined function
+ // bodies have no ODCLFUNC node, which was it's own inconsistency.
+ // In any event, the handling of the two different nodes for DWARF purposes
+ // was subtly different, likely in unintended ways. CL 272253 merged the
+ // two nodes' Func fields, so that code sees the same *Func whether it is
+ // holding the ODCLFUNC or the ONAME. This resulted in changes in the
+ // DWARF output. To preserve the existing DWARF output and leave an
+ // intentional change for a future CL, this code does the following when
+ // fn.Op == ONAME:
+ //
+ // 1. Disallow use of createComplexVars in createDwarfVars.
+ // It was not possible to reach that code for an ONAME before,
+ // because the DebugInfo was set only on the ODCLFUNC Func.
+ // Calling into it in the ONAME case causes an index out of bounds panic.
+ //
+ // 2. Do not populate apdecls. fn.Func.Dcl was in the ODCLFUNC Func,
+ // not the ONAME Func. Populating apdecls for the ONAME case results
+ // in selected being populated after createSimpleVars is called in
+ // createDwarfVars, and then that causes the loop to skip all the entries
+ // in dcl, meaning that the RecordAutoType calls don't happen.
+ //
+ // These two adjustments keep toolstash -cmp working for now.
+ // Deciding the right answer is, as they say, future work.
+ //
+ // We can tell the difference between the old ODCLFUNC and ONAME
+ // cases by looking at the infosym.Name. If it's empty, DebugInfo is
+ // being called from (*obj.Link).populateDWARF, which used to use
+ // the ODCLFUNC. If it's non-empty (the name will end in $abstract),
+ // DebugInfo is being called from (*obj.Link).DwarfAbstractFunc,
+ // which used to use the ONAME form.
+ isODCLFUNC := infosym.Name == ""
+
+ var apdecls []*ir.Name
+ // Populate decls for fn.
+ if isODCLFUNC {
+ for _, n := range fn.Dcl {
+ if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL
+ continue
+ }
+ switch n.Class {
+ case ir.PAUTO:
+ if !n.Used() {
+ // Text == nil -> generating abstract function
+ if fnsym.Func().Text != nil {
+ base.Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
+ }
+ continue
+ }
+ case ir.PPARAM, ir.PPARAMOUT:
+ default:
+ continue
+ }
+ apdecls = append(apdecls, n)
+ if n.Type().Kind() == types.TSSA {
+ // Can happen for TypeInt128 types. This only happens for
+ // spill locations, so not a huge deal.
+ continue
+ }
+ fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
+ }
+ }
+
+ decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn, apdecls)
+
+ // For each type referenced by the functions auto vars but not
+ // already referenced by a dwarf var, attach an R_USETYPE relocation to
+ // the function symbol to insure that the type included in DWARF
+ // processing during linking.
+ typesyms := []*obj.LSym{}
+ for t := range fnsym.Func().Autot {
+ typesyms = append(typesyms, t)
+ }
+ sort.Sort(obj.BySymName(typesyms))
+ for _, sym := range typesyms {
+ r := obj.Addrel(infosym)
+ r.Sym = sym
+ r.Type = objabi.R_USETYPE
+ }
+ fnsym.Func().Autot = nil
+
+ var varScopes []ir.ScopeID
+ for _, decl := range decls {
+ pos := declPos(decl)
+ varScopes = append(varScopes, findScope(fn.Marks, pos))
+ }
+
+ scopes = assembleScopes(fnsym, fn, dwarfVars, varScopes)
+ if base.Flag.GenDwarfInl > 0 {
+ inlcalls = assembleInlines(fnsym, dwarfVars)
+ }
+ return scopes, inlcalls
+}
+
+func declPos(decl *ir.Name) src.XPos {
+ return decl.Canonical().Pos()
+}
+
+// createDwarfVars process fn, returning a list of DWARF variables and the
+// Nodes they represent.
+func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var) {
+ // Collect a raw list of DWARF vars.
+ var vars []*dwarf.Var
+ var decls []*ir.Name
+ var selected ir.NameSet
+
+ if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK {
+ decls, vars, selected = createComplexVars(fnsym, fn)
+ } else if fn.ABI == obj.ABIInternal && base.Flag.N != 0 && complexOK {
+ decls, vars, selected = createABIVars(fnsym, fn, apDecls)
+ } else {
+ decls, vars, selected = createSimpleVars(fnsym, apDecls)
+ }
+ if fn.DebugInfo != nil {
+ // Recover zero sized variables eliminated by the stackframe pass
+ for _, n := range fn.DebugInfo.(*ssa.FuncDebug).OptDcl {
+ if n.Class != ir.PAUTO {
+ continue
+ }
+ types.CalcSize(n.Type())
+ if n.Type().Size() == 0 {
+ decls = append(decls, n)
+ vars = append(vars, createSimpleVar(fnsym, n))
+ vars[len(vars)-1].StackOffset = 0
+ fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
+ }
+ }
+ }
+
+ dcl := apDecls
+ if fnsym.WasInlined() {
+ dcl = preInliningDcls(fnsym)
+ } else {
+ // The backend's stackframe pass prunes away entries from the
+ // fn's Dcl list, including PARAMOUT nodes that correspond to
+ // output params passed in registers. Add back in these
+ // entries here so that we can process them properly during
+ // DWARF-gen. See issue 48573 for more details.
+ debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
+ for _, n := range debugInfo.RegOutputParams {
+ if n.Class != ir.PPARAMOUT || !n.IsOutputParamInRegisters() {
+ panic("invalid ir.Name on debugInfo.RegOutputParams list")
+ }
+ dcl = append(dcl, n)
+ }
+ }
+
+ // If optimization is enabled, the list above will typically be
+ // missing some of the original pre-optimization variables in the
+ // function (they may have been promoted to registers, folded into
+ // constants, dead-coded away, etc). Input arguments not eligible
+ // for SSA optimization are also missing. Here we add back in entries
+ // for selected missing vars. Note that the recipe below creates a
+ // conservative location. The idea here is that we want to
+ // communicate to the user that "yes, there is a variable named X
+ // in this function, but no, I don't have enough information to
+ // reliably report its contents."
+ // For non-SSA-able arguments, however, the correct information
+ // is known -- they have a single home on the stack.
+ for _, n := range dcl {
+ if selected.Has(n) {
+ continue
+ }
+ c := n.Sym().Name[0]
+ if c == '.' || n.Type().IsUntyped() {
+ continue
+ }
+ if n.Class == ir.PPARAM && !ssa.CanSSA(n.Type()) {
+ // SSA-able args get location lists, and may move in and
+ // out of registers, so those are handled elsewhere.
+ // Autos and named output params seem to get handled
+ // with VARDEF, which creates location lists.
+ // Args not of SSA-able type are treated here; they
+ // are homed on the stack in a single place for the
+ // entire call.
+ vars = append(vars, createSimpleVar(fnsym, n))
+ decls = append(decls, n)
+ continue
+ }
+ typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
+ decls = append(decls, n)
+ abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
+ isReturnValue := (n.Class == ir.PPARAMOUT)
+ if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
+ abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+ }
+ if n.Esc() == ir.EscHeap {
+ // The variable in question has been promoted to the heap.
+ // Its address is in n.Heapaddr.
+ // TODO(thanm): generate a better location expression
+ }
+ inlIndex := 0
+ if base.Flag.GenDwarfInl > 1 {
+ if n.InlFormal() || n.InlLocal() {
+ inlIndex = posInlIndex(n.Pos()) + 1
+ if n.InlFormal() {
+ abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+ }
+ }
+ }
+ declpos := base.Ctxt.InnermostPos(n.Pos())
+ vars = append(vars, &dwarf.Var{
+ Name: n.Sym().Name,
+ IsReturnValue: isReturnValue,
+ Abbrev: abbrev,
+ StackOffset: int32(n.FrameOffset()),
+ Type: base.Ctxt.Lookup(typename),
+ DeclFile: declpos.RelFilename(),
+ DeclLine: declpos.RelLine(),
+ DeclCol: declpos.RelCol(),
+ InlIndex: int32(inlIndex),
+ ChildIndex: -1,
+ DictIndex: n.DictIndex,
+ })
+ // Record go type of to insure that it gets emitted by the linker.
+ fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
+ }
+
+ // Sort decls and vars.
+ sortDeclsAndVars(fn, decls, vars)
+
+ return decls, vars
+}
+
+// sortDeclsAndVars sorts the decl and dwarf var lists according to
+// parameter declaration order, so as to insure that when a subprogram
+// DIE is emitted, its parameter children appear in declaration order.
+// Prior to the advent of the register ABI, sorting by frame offset
+// would achieve this; with the register we now need to go back to the
+// original function signature.
+func sortDeclsAndVars(fn *ir.Func, decls []*ir.Name, vars []*dwarf.Var) {
+ paramOrder := make(map[*ir.Name]int)
+ idx := 1
+ for _, f := range fn.Type().RecvParamsResults() {
+ if n, ok := f.Nname.(*ir.Name); ok {
+ paramOrder[n] = idx
+ idx++
+ }
+ }
+ sort.Stable(varsAndDecls{decls, vars, paramOrder})
+}
+
+type varsAndDecls struct {
+ decls []*ir.Name
+ vars []*dwarf.Var
+ paramOrder map[*ir.Name]int
+}
+
+func (v varsAndDecls) Len() int {
+ return len(v.decls)
+}
+
+func (v varsAndDecls) Less(i, j int) bool {
+ nameLT := func(ni, nj *ir.Name) bool {
+ oi, foundi := v.paramOrder[ni]
+ oj, foundj := v.paramOrder[nj]
+ if foundi {
+ if foundj {
+ return oi < oj
+ } else {
+ return true
+ }
+ }
+ return false
+ }
+ return nameLT(v.decls[i], v.decls[j])
+}
+
+func (v varsAndDecls) Swap(i, j int) {
+ v.vars[i], v.vars[j] = v.vars[j], v.vars[i]
+ v.decls[i], v.decls[j] = v.decls[j], v.decls[i]
+}
+
+// Given a function that was inlined at some point during the
+// compilation, return a sorted list of nodes corresponding to the
+// autos/locals in that function prior to inlining. If this is a
+// function that is not local to the package being compiled, then the
+// names of the variables may have been "versioned" to avoid conflicts
+// with local vars; disregard this versioning when sorting.
+func preInliningDcls(fnsym *obj.LSym) []*ir.Name {
+ fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Func)
+ var rdcl []*ir.Name
+ for _, n := range fn.Inl.Dcl {
+ c := n.Sym().Name[0]
+ // Avoid reporting "_" parameters, since if there are more than
+ // one, it can result in a collision later on, as in #23179.
+ if n.Sym().Name == "_" || c == '.' || n.Type().IsUntyped() {
+ continue
+ }
+ rdcl = append(rdcl, n)
+ }
+ return rdcl
+}
+
+// createSimpleVars creates a DWARF entry for every variable declared in the
+// function, claiming that they are permanently on the stack.
+func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
+ var vars []*dwarf.Var
+ var decls []*ir.Name
+ var selected ir.NameSet
+ for _, n := range apDecls {
+ if ir.IsAutoTmp(n) {
+ continue
+ }
+
+ decls = append(decls, n)
+ vars = append(vars, createSimpleVar(fnsym, n))
+ selected.Add(n)
+ }
+ return decls, vars, selected
+}
+
+func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
+ var abbrev int
+ var offs int64
+
+ localAutoOffset := func() int64 {
+ offs = n.FrameOffset()
+ if base.Ctxt.Arch.FixedFrameSize == 0 {
+ offs -= int64(types.PtrSize)
+ }
+ if buildcfg.FramePointerEnabled {
+ offs -= int64(types.PtrSize)
+ }
+ return offs
+ }
+
+ switch n.Class {
+ case ir.PAUTO:
+ offs = localAutoOffset()
+ abbrev = dwarf.DW_ABRV_AUTO
+ case ir.PPARAM, ir.PPARAMOUT:
+ abbrev = dwarf.DW_ABRV_PARAM
+ if n.IsOutputParamInRegisters() {
+ offs = localAutoOffset()
+ } else {
+ offs = n.FrameOffset() + base.Ctxt.Arch.FixedFrameSize
+ }
+
+ default:
+ base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class, n)
+ }
+
+ typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
+ delete(fnsym.Func().Autot, reflectdata.TypeLinksym(n.Type()))
+ inlIndex := 0
+ if base.Flag.GenDwarfInl > 1 {
+ if n.InlFormal() || n.InlLocal() {
+ inlIndex = posInlIndex(n.Pos()) + 1
+ if n.InlFormal() {
+ abbrev = dwarf.DW_ABRV_PARAM
+ }
+ }
+ }
+ declpos := base.Ctxt.InnermostPos(declPos(n))
+ return &dwarf.Var{
+ Name: n.Sym().Name,
+ IsReturnValue: n.Class == ir.PPARAMOUT,
+ IsInlFormal: n.InlFormal(),
+ Abbrev: abbrev,
+ StackOffset: int32(offs),
+ Type: base.Ctxt.Lookup(typename),
+ DeclFile: declpos.RelFilename(),
+ DeclLine: declpos.RelLine(),
+ DeclCol: declpos.RelCol(),
+ InlIndex: int32(inlIndex),
+ ChildIndex: -1,
+ DictIndex: n.DictIndex,
+ }
+}
+
+// createABIVars creates DWARF variables for functions in which the
+// register ABI is enabled but optimization is turned off. It uses a
+// hybrid approach in which register-resident input params are
+// captured with location lists, and all other vars use the "simple"
+// strategy.
+func createABIVars(fnsym *obj.LSym, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
+
+ // Invoke createComplexVars to generate dwarf vars for input parameters
+ // that are register-allocated according to the ABI rules.
+ decls, vars, selected := createComplexVars(fnsym, fn)
+
+ // Now fill in the remainder of the variables: input parameters
+ // that are not register-resident, output parameters, and local
+ // variables.
+ for _, n := range apDecls {
+ if ir.IsAutoTmp(n) {
+ continue
+ }
+ if _, ok := selected[n]; ok {
+ // already handled
+ continue
+ }
+
+ decls = append(decls, n)
+ vars = append(vars, createSimpleVar(fnsym, n))
+ selected.Add(n)
+ }
+
+ return decls, vars, selected
+}
+
+// createComplexVars creates recomposed DWARF vars with location lists,
+// suitable for describing optimized code.
+func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
+ debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
+
+ // Produce a DWARF variable entry for each user variable.
+ var decls []*ir.Name
+ var vars []*dwarf.Var
+ var ssaVars ir.NameSet
+
+ for varID, dvar := range debugInfo.Vars {
+ n := dvar
+ ssaVars.Add(n)
+ for _, slot := range debugInfo.VarSlots[varID] {
+ ssaVars.Add(debugInfo.Slots[slot].N)
+ }
+
+ if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
+ decls = append(decls, n)
+ vars = append(vars, dvar)
+ }
+ }
+
+ return decls, vars, ssaVars
+}
+
+// createComplexVar builds a single DWARF variable entry and location list.
+func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var {
+ debug := fn.DebugInfo.(*ssa.FuncDebug)
+ n := debug.Vars[varID]
+
+ var abbrev int
+ switch n.Class {
+ case ir.PAUTO:
+ abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
+ case ir.PPARAM, ir.PPARAMOUT:
+ abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+ default:
+ return nil
+ }
+
+ gotype := reflectdata.TypeLinksym(n.Type())
+ delete(fnsym.Func().Autot, gotype)
+ typename := dwarf.InfoPrefix + gotype.Name[len("type:"):]
+ inlIndex := 0
+ if base.Flag.GenDwarfInl > 1 {
+ if n.InlFormal() || n.InlLocal() {
+ inlIndex = posInlIndex(n.Pos()) + 1
+ if n.InlFormal() {
+ abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+ }
+ }
+ }
+ declpos := base.Ctxt.InnermostPos(n.Pos())
+ dvar := &dwarf.Var{
+ Name: n.Sym().Name,
+ IsReturnValue: n.Class == ir.PPARAMOUT,
+ IsInlFormal: n.InlFormal(),
+ Abbrev: abbrev,
+ Type: base.Ctxt.Lookup(typename),
+ // The stack offset is used as a sorting key, so for decomposed
+ // variables just give it the first one. It's not used otherwise.
+ // This won't work well if the first slot hasn't been assigned a stack
+ // location, but it's not obvious how to do better.
+ StackOffset: ssagen.StackOffset(debug.Slots[debug.VarSlots[varID][0]]),
+ DeclFile: declpos.RelFilename(),
+ DeclLine: declpos.RelLine(),
+ DeclCol: declpos.RelCol(),
+ InlIndex: int32(inlIndex),
+ ChildIndex: -1,
+ DictIndex: n.DictIndex,
+ }
+ list := debug.LocationLists[varID]
+ if len(list) != 0 {
+ dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
+ debug.PutLocationList(list, base.Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
+ }
+ }
+ return dvar
+}
+
+// RecordFlags records the specified command-line flags to be placed
+// in the DWARF info.
+func RecordFlags(flags ...string) {
+ if base.Ctxt.Pkgpath == "" {
+ panic("missing pkgpath")
+ }
+
+ type BoolFlag interface {
+ IsBoolFlag() bool
+ }
+ type CountFlag interface {
+ IsCountFlag() bool
+ }
+ var cmd bytes.Buffer
+ for _, name := range flags {
+ f := flag.Lookup(name)
+ if f == nil {
+ continue
+ }
+ getter := f.Value.(flag.Getter)
+ if getter.String() == f.DefValue {
+ // Flag has default value, so omit it.
+ continue
+ }
+ if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() {
+ val, ok := getter.Get().(bool)
+ if ok && val {
+ fmt.Fprintf(&cmd, " -%s", f.Name)
+ continue
+ }
+ }
+ if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() {
+ val, ok := getter.Get().(int)
+ if ok && val == 1 {
+ fmt.Fprintf(&cmd, " -%s", f.Name)
+ continue
+ }
+ }
+ fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get())
+ }
+
+ // Adds flag to producer string signaling whether regabi is turned on or
+ // off.
+ // Once regabi is turned on across the board and the relative GOEXPERIMENT
+ // knobs no longer exist this code should be removed.
+ if buildcfg.Experiment.RegabiArgs {
+ cmd.Write([]byte(" regabi"))
+ }
+
+ if cmd.Len() == 0 {
+ return
+ }
+ s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + base.Ctxt.Pkgpath)
+ s.Type = objabi.SDWARFCUINFO
+ // Sometimes (for example when building tests) we can link
+ // together two package main archives. So allow dups.
+ s.Set(obj.AttrDuplicateOK, true)
+ base.Ctxt.Data = append(base.Ctxt.Data, s)
+ s.P = cmd.Bytes()[1:]
+}
+
+// RecordPackageName records the name of the package being
+// compiled, so that the linker can save it in the compile unit's DIE.
+func RecordPackageName() {
+ s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + base.Ctxt.Pkgpath)
+ s.Type = objabi.SDWARFCUINFO
+ // Sometimes (for example when building tests) we can link
+ // together two package main archives. So allow dups.
+ s.Set(obj.AttrDuplicateOK, true)
+ base.Ctxt.Data = append(base.Ctxt.Data, s)
+ s.P = []byte(types.LocalPkg.Name)
+}
diff --git a/src/cmd/compile/internal/dwarfgen/dwinl.go b/src/cmd/compile/internal/dwarfgen/dwinl.go
new file mode 100644
index 0000000..655e7c6
--- /dev/null
+++ b/src/cmd/compile/internal/dwarfgen/dwinl.go
@@ -0,0 +1,441 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dwarfgen
+
+import (
+ "fmt"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/internal/dwarf"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+// To identify variables by original source position.
+type varPos struct {
+ DeclName string
+ DeclFile string
+ DeclLine uint
+ DeclCol uint
+}
+
+// This is the main entry point for collection of raw material to
+// drive generation of DWARF "inlined subroutine" DIEs. See proposal
+// 22080 for more details and background info.
+func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
+ var inlcalls dwarf.InlCalls
+
+ if base.Debug.DwarfInl != 0 {
+ base.Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
+ }
+
+ // This maps inline index (from Ctxt.InlTree) to index in inlcalls.Calls
+ imap := make(map[int]int)
+
+ // Walk progs to build up the InlCalls data structure
+ var prevpos src.XPos
+ for p := fnsym.Func().Text; p != nil; p = p.Link {
+ if p.Pos == prevpos {
+ continue
+ }
+ ii := posInlIndex(p.Pos)
+ if ii >= 0 {
+ insertInlCall(&inlcalls, ii, imap)
+ }
+ prevpos = p.Pos
+ }
+
+ // This is used to partition DWARF vars by inline index. Vars not
+ // produced by the inliner will wind up in the vmap[0] entry.
+ vmap := make(map[int32][]*dwarf.Var)
+
+ // Now walk the dwarf vars and partition them based on whether they
+ // were produced by the inliner (dwv.InlIndex > 0) or were original
+ // vars/params from the function (dwv.InlIndex == 0).
+ for _, dwv := range dwVars {
+
+ vmap[dwv.InlIndex] = append(vmap[dwv.InlIndex], dwv)
+
+ // Zero index => var was not produced by an inline
+ if dwv.InlIndex == 0 {
+ continue
+ }
+
+ // Look up index in our map, then tack the var in question
+ // onto the vars list for the correct inlined call.
+ ii := int(dwv.InlIndex) - 1
+ idx, ok := imap[ii]
+ if !ok {
+ // We can occasionally encounter a var produced by the
+ // inliner for which there is no remaining prog; add a new
+ // entry to the call list in this scenario.
+ idx = insertInlCall(&inlcalls, ii, imap)
+ }
+ inlcalls.Calls[idx].InlVars =
+ append(inlcalls.Calls[idx].InlVars, dwv)
+ }
+
+ // Post process the map above to assign child indices to vars.
+ //
+ // A given variable is treated differently depending on whether it
+ // is part of the top-level function (ii == 0) or if it was
+ // produced as a result of an inline (ii != 0).
+ //
+ // If a variable was not produced by an inline and its containing
+ // function was not inlined, then we just assign an ordering of
+ // based on variable name.
+ //
+ // If a variable was not produced by an inline and its containing
+ // function was inlined, then we need to assign a child index
+ // based on the order of vars in the abstract function (in
+ // addition, those vars that don't appear in the abstract
+ // function, such as "~r1", are flagged as such).
+ //
+ // If a variable was produced by an inline, then we locate it in
+ // the pre-inlining decls for the target function and assign child
+ // index accordingly.
+ for ii, sl := range vmap {
+ var m map[varPos]int
+ if ii == 0 {
+ if !fnsym.WasInlined() {
+ for j, v := range sl {
+ v.ChildIndex = int32(j)
+ }
+ continue
+ }
+ m = makePreinlineDclMap(fnsym)
+ } else {
+ ifnlsym := base.Ctxt.InlTree.InlinedFunction(int(ii - 1))
+ m = makePreinlineDclMap(ifnlsym)
+ }
+
+ // Here we assign child indices to variables based on
+ // pre-inlined decls, and set the "IsInAbstract" flag
+ // appropriately. In addition: parameter and local variable
+ // names are given "middle dot" version numbers as part of the
+ // writing them out to export data (see issue 4326). If DWARF
+ // inlined routine generation is turned on, we want to undo
+ // this versioning, since DWARF variables in question will be
+ // parented by the inlined routine and not the top-level
+ // caller.
+ synthCount := len(m)
+ for _, v := range sl {
+ vp := varPos{
+ DeclName: v.Name,
+ DeclFile: v.DeclFile,
+ DeclLine: v.DeclLine,
+ DeclCol: v.DeclCol,
+ }
+ synthesized := strings.HasPrefix(v.Name, "~") || v.Name == "_"
+ if idx, found := m[vp]; found {
+ v.ChildIndex = int32(idx)
+ v.IsInAbstract = !synthesized
+ } else {
+ // Variable can't be found in the pre-inline dcl list.
+ // In the top-level case (ii=0) this can happen
+ // because a composite variable was split into pieces,
+ // and we're looking at a piece. We can also see
+ // return temps (~r%d) that were created during
+ // lowering, or unnamed params ("_").
+ v.ChildIndex = int32(synthCount)
+ synthCount++
+ }
+ }
+ }
+
+ // Make a second pass through the progs to compute PC ranges for
+ // the various inlined calls.
+ start := int64(-1)
+ curii := -1
+ var prevp *obj.Prog
+ for p := fnsym.Func().Text; p != nil; prevp, p = p, p.Link {
+ if prevp != nil && p.Pos == prevp.Pos {
+ continue
+ }
+ ii := posInlIndex(p.Pos)
+ if ii == curii {
+ continue
+ }
+ // Close out the current range
+ if start != -1 {
+ addRange(inlcalls.Calls, start, p.Pc, curii, imap)
+ }
+ // Begin new range
+ start = p.Pc
+ curii = ii
+ }
+ if start != -1 {
+ addRange(inlcalls.Calls, start, fnsym.Size, curii, imap)
+ }
+
+ // Issue 33188: if II foo is a child of II bar, then ensure that
+ // bar's ranges include the ranges of foo (the loop above will produce
+ // disjoint ranges).
+ for k, c := range inlcalls.Calls {
+ if c.Root {
+ unifyCallRanges(inlcalls, k)
+ }
+ }
+
+ // Debugging
+ if base.Debug.DwarfInl != 0 {
+ dumpInlCalls(inlcalls)
+ dumpInlVars(dwVars)
+ }
+
+ // Perform a consistency check on inlined routine PC ranges
+ // produced by unifyCallRanges above. In particular, complain in
+ // cases where you have A -> B -> C (e.g. C is inlined into B, and
+ // B is inlined into A) and the ranges for B are not enclosed
+ // within the ranges for A, or C within B.
+ for k, c := range inlcalls.Calls {
+ if c.Root {
+ checkInlCall(fnsym.Name, inlcalls, fnsym.Size, k, -1)
+ }
+ }
+
+ return inlcalls
+}
+
+// Secondary hook for DWARF inlined subroutine generation. This is called
+// late in the compilation when it is determined that we need an
+// abstract function DIE for an inlined routine imported from a
+// previously compiled package.
+func AbstractFunc(fn *obj.LSym) {
+ ifn := base.Ctxt.DwFixups.GetPrecursorFunc(fn)
+ if ifn == nil {
+ base.Ctxt.Diag("failed to locate precursor fn for %v", fn)
+ return
+ }
+ _ = ifn.(*ir.Func)
+ if base.Debug.DwarfInl != 0 {
+ base.Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
+ }
+ base.Ctxt.DwarfAbstractFunc(ifn, fn)
+}
+
+// Given a function that was inlined as part of the compilation, dig
+// up the pre-inlining DCL list for the function and create a map that
+// supports lookup of pre-inline dcl index, based on variable
+// position/name. NB: the recipe for computing variable pos/file/line
+// needs to be kept in sync with the similar code in gc.createSimpleVars
+// and related functions.
+func makePreinlineDclMap(fnsym *obj.LSym) map[varPos]int {
+ dcl := preInliningDcls(fnsym)
+ m := make(map[varPos]int)
+ for i, n := range dcl {
+ pos := base.Ctxt.InnermostPos(n.Pos())
+ vp := varPos{
+ DeclName: n.Sym().Name,
+ DeclFile: pos.RelFilename(),
+ DeclLine: pos.RelLine(),
+ DeclCol: pos.RelCol(),
+ }
+ if _, found := m[vp]; found {
+ // We can see collisions (variables with the same name/file/line/col) in obfuscated or machine-generated code -- see issue 44378 for an example. Skip duplicates in such cases, since it is unlikely that a human will be debugging such code.
+ continue
+ }
+ m[vp] = i
+ }
+ return m
+}
+
+func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int {
+ callIdx, found := imap[inlIdx]
+ if found {
+ return callIdx
+ }
+
+ // Haven't seen this inline yet. Visit parent of inline if there
+ // is one. We do this first so that parents appear before their
+ // children in the resulting table.
+ parCallIdx := -1
+ parInlIdx := base.Ctxt.InlTree.Parent(inlIdx)
+ if parInlIdx >= 0 {
+ parCallIdx = insertInlCall(dwcalls, parInlIdx, imap)
+ }
+
+ // Create new entry for this inline
+ inlinedFn := base.Ctxt.InlTree.InlinedFunction(inlIdx)
+ callXPos := base.Ctxt.InlTree.CallPos(inlIdx)
+ callPos := base.Ctxt.InnermostPos(callXPos)
+ absFnSym := base.Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
+ ic := dwarf.InlCall{
+ InlIndex: inlIdx,
+ CallPos: callPos,
+ AbsFunSym: absFnSym,
+ Root: parCallIdx == -1,
+ }
+ dwcalls.Calls = append(dwcalls.Calls, ic)
+ callIdx = len(dwcalls.Calls) - 1
+ imap[inlIdx] = callIdx
+
+ if parCallIdx != -1 {
+ // Add this inline to parent's child list
+ dwcalls.Calls[parCallIdx].Children = append(dwcalls.Calls[parCallIdx].Children, callIdx)
+ }
+
+ return callIdx
+}
+
+// Given a src.XPos, return its associated inlining index if it
+// corresponds to something created as a result of an inline, or -1 if
+// there is no inline info. Note that the index returned will refer to
+// the deepest call in the inlined stack, e.g. if you have "A calls B
+// calls C calls D" and all three callees are inlined (B, C, and D),
+// the index for a node from the inlined body of D will refer to the
+// call to D from C. Whew.
+func posInlIndex(xpos src.XPos) int {
+ pos := base.Ctxt.PosTable.Pos(xpos)
+ if b := pos.Base(); b != nil {
+ ii := b.InliningIndex()
+ if ii >= 0 {
+ return ii
+ }
+ }
+ return -1
+}
+
+func addRange(calls []dwarf.InlCall, start, end int64, ii int, imap map[int]int) {
+ if start == -1 {
+ panic("bad range start")
+ }
+ if end == -1 {
+ panic("bad range end")
+ }
+ if ii == -1 {
+ return
+ }
+ if start == end {
+ return
+ }
+ // Append range to correct inlined call
+ callIdx, found := imap[ii]
+ if !found {
+ base.Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
+ }
+ call := &calls[callIdx]
+ call.Ranges = append(call.Ranges, dwarf.Range{Start: start, End: end})
+}
+
+func dumpInlCall(inlcalls dwarf.InlCalls, idx, ilevel int) {
+ for i := 0; i < ilevel; i++ {
+ base.Ctxt.Logf(" ")
+ }
+ ic := inlcalls.Calls[idx]
+ callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex)
+ base.Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
+ for _, f := range ic.InlVars {
+ base.Ctxt.Logf(" %v", f.Name)
+ }
+ base.Ctxt.Logf(" ) C: (")
+ for _, k := range ic.Children {
+ base.Ctxt.Logf(" %v", k)
+ }
+ base.Ctxt.Logf(" ) R:")
+ for _, r := range ic.Ranges {
+ base.Ctxt.Logf(" [%d,%d)", r.Start, r.End)
+ }
+ base.Ctxt.Logf("\n")
+ for _, k := range ic.Children {
+ dumpInlCall(inlcalls, k, ilevel+1)
+ }
+
+}
+
+func dumpInlCalls(inlcalls dwarf.InlCalls) {
+ for k, c := range inlcalls.Calls {
+ if c.Root {
+ dumpInlCall(inlcalls, k, 0)
+ }
+ }
+}
+
+func dumpInlVars(dwvars []*dwarf.Var) {
+ for i, dwv := range dwvars {
+ typ := "local"
+ if dwv.Abbrev == dwarf.DW_ABRV_PARAM_LOCLIST || dwv.Abbrev == dwarf.DW_ABRV_PARAM {
+ typ = "param"
+ }
+ ia := 0
+ if dwv.IsInAbstract {
+ ia = 1
+ }
+ base.Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
+ }
+}
+
+func rangesContains(par []dwarf.Range, rng dwarf.Range) (bool, string) {
+ for _, r := range par {
+ if rng.Start >= r.Start && rng.End <= r.End {
+ return true, ""
+ }
+ }
+ msg := fmt.Sprintf("range [%d,%d) not contained in {", rng.Start, rng.End)
+ for _, r := range par {
+ msg += fmt.Sprintf(" [%d,%d)", r.Start, r.End)
+ }
+ msg += " }"
+ return false, msg
+}
+
+func rangesContainsAll(parent, child []dwarf.Range) (bool, string) {
+ for _, r := range child {
+ c, m := rangesContains(parent, r)
+ if !c {
+ return false, m
+ }
+ }
+ return true, ""
+}
+
+// checkInlCall verifies that the PC ranges for inline info 'idx' are
+// enclosed/contained within the ranges of its parent inline (or if
+// this is a root/toplevel inline, checks that the ranges fall within
+// the extent of the top level function). A panic is issued if a
+// malformed range is found.
+func checkInlCall(funcName string, inlCalls dwarf.InlCalls, funcSize int64, idx, parentIdx int) {
+
+ // Callee
+ ic := inlCalls.Calls[idx]
+ callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
+ calleeRanges := ic.Ranges
+
+ // Caller
+ caller := funcName
+ parentRanges := []dwarf.Range{dwarf.Range{Start: int64(0), End: funcSize}}
+ if parentIdx != -1 {
+ pic := inlCalls.Calls[parentIdx]
+ caller = base.Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
+ parentRanges = pic.Ranges
+ }
+
+ // Callee ranges contained in caller ranges?
+ c, m := rangesContainsAll(parentRanges, calleeRanges)
+ if !c {
+ base.Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
+ }
+
+ // Now visit kids
+ for _, k := range ic.Children {
+ checkInlCall(funcName, inlCalls, funcSize, k, idx)
+ }
+}
+
+// unifyCallRanges ensures that the ranges for a given inline
+// transitively include all of the ranges for its child inlines.
+func unifyCallRanges(inlcalls dwarf.InlCalls, idx int) {
+ ic := &inlcalls.Calls[idx]
+ for _, childIdx := range ic.Children {
+ // First make sure child ranges are unified.
+ unifyCallRanges(inlcalls, childIdx)
+
+ // Then merge child ranges into ranges for this inline.
+ cic := inlcalls.Calls[childIdx]
+ ic.Ranges = dwarf.MergeRanges(ic.Ranges, cic.Ranges)
+ }
+}
diff --git a/src/cmd/compile/internal/dwarfgen/marker.go b/src/cmd/compile/internal/dwarfgen/marker.go
new file mode 100644
index 0000000..ec6ce45
--- /dev/null
+++ b/src/cmd/compile/internal/dwarfgen/marker.go
@@ -0,0 +1,94 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dwarfgen
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/internal/src"
+)
+
+// A ScopeMarker tracks scope nesting and boundaries for later use
+// during DWARF generation.
+type ScopeMarker struct {
+ parents []ir.ScopeID
+ marks []ir.Mark
+}
+
+// checkPos validates the given position and returns the current scope.
+func (m *ScopeMarker) checkPos(pos src.XPos) ir.ScopeID {
+ if !pos.IsKnown() {
+ base.Fatalf("unknown scope position")
+ }
+
+ if len(m.marks) == 0 {
+ return 0
+ }
+
+ last := &m.marks[len(m.marks)-1]
+ if xposBefore(pos, last.Pos) {
+ base.FatalfAt(pos, "non-monotonic scope positions\n\t%v: previous scope position", base.FmtPos(last.Pos))
+ }
+ return last.Scope
+}
+
+// Push records a transition to a new child scope of the current scope.
+func (m *ScopeMarker) Push(pos src.XPos) {
+ current := m.checkPos(pos)
+
+ m.parents = append(m.parents, current)
+ child := ir.ScopeID(len(m.parents))
+
+ m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: child})
+}
+
+// Pop records a transition back to the current scope's parent.
+func (m *ScopeMarker) Pop(pos src.XPos) {
+ current := m.checkPos(pos)
+
+ parent := m.parents[current-1]
+
+ m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: parent})
+}
+
+// Unpush removes the current scope, which must be empty.
+func (m *ScopeMarker) Unpush() {
+ i := len(m.marks) - 1
+ current := m.marks[i].Scope
+
+ if current != ir.ScopeID(len(m.parents)) {
+ base.FatalfAt(m.marks[i].Pos, "current scope is not empty")
+ }
+
+ m.parents = m.parents[:current-1]
+ m.marks = m.marks[:i]
+}
+
+// WriteTo writes the recorded scope marks to the given function,
+// and resets the marker for reuse.
+func (m *ScopeMarker) WriteTo(fn *ir.Func) {
+ m.compactMarks()
+
+ fn.Parents = make([]ir.ScopeID, len(m.parents))
+ copy(fn.Parents, m.parents)
+ m.parents = m.parents[:0]
+
+ fn.Marks = make([]ir.Mark, len(m.marks))
+ copy(fn.Marks, m.marks)
+ m.marks = m.marks[:0]
+}
+
+func (m *ScopeMarker) compactMarks() {
+ n := 0
+ for _, next := range m.marks {
+ if n > 0 && next.Pos == m.marks[n-1].Pos {
+ m.marks[n-1].Scope = next.Scope
+ continue
+ }
+ m.marks[n] = next
+ n++
+ }
+ m.marks = m.marks[:n]
+}
diff --git a/src/cmd/compile/internal/dwarfgen/scope.go b/src/cmd/compile/internal/dwarfgen/scope.go
new file mode 100644
index 0000000..b4ae69e
--- /dev/null
+++ b/src/cmd/compile/internal/dwarfgen/scope.go
@@ -0,0 +1,136 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dwarfgen
+
+import (
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/internal/dwarf"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+// See golang.org/issue/20390.
+func xposBefore(p, q src.XPos) bool {
+ return base.Ctxt.PosTable.Pos(p).Before(base.Ctxt.PosTable.Pos(q))
+}
+
+func findScope(marks []ir.Mark, pos src.XPos) ir.ScopeID {
+ i := sort.Search(len(marks), func(i int) bool {
+ return xposBefore(pos, marks[i].Pos)
+ })
+ if i == 0 {
+ return 0
+ }
+ return marks[i-1].Scope
+}
+
+func assembleScopes(fnsym *obj.LSym, fn *ir.Func, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope {
+ // Initialize the DWARF scope tree based on lexical scopes.
+ dwarfScopes := make([]dwarf.Scope, 1+len(fn.Parents))
+ for i, parent := range fn.Parents {
+ dwarfScopes[i+1].Parent = int32(parent)
+ }
+
+ scopeVariables(dwarfVars, varScopes, dwarfScopes, fnsym.ABI() != obj.ABI0)
+ if fnsym.Func().Text != nil {
+ scopePCs(fnsym, fn.Marks, dwarfScopes)
+ }
+ return compactScopes(dwarfScopes)
+}
+
+// scopeVariables assigns DWARF variable records to their scopes.
+func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ir.ScopeID, dwarfScopes []dwarf.Scope, regabi bool) {
+ if regabi {
+ sort.Stable(varsByScope{dwarfVars, varScopes})
+ } else {
+ sort.Stable(varsByScopeAndOffset{dwarfVars, varScopes})
+ }
+
+ i0 := 0
+ for i := range dwarfVars {
+ if varScopes[i] == varScopes[i0] {
+ continue
+ }
+ dwarfScopes[varScopes[i0]].Vars = dwarfVars[i0:i]
+ i0 = i
+ }
+ if i0 < len(dwarfVars) {
+ dwarfScopes[varScopes[i0]].Vars = dwarfVars[i0:]
+ }
+}
+
+// scopePCs assigns PC ranges to their scopes.
+func scopePCs(fnsym *obj.LSym, marks []ir.Mark, dwarfScopes []dwarf.Scope) {
+ // If there aren't any child scopes (in particular, when scope
+ // tracking is disabled), we can skip a whole lot of work.
+ if len(marks) == 0 {
+ return
+ }
+ p0 := fnsym.Func().Text
+ scope := findScope(marks, p0.Pos)
+ for p := p0; p != nil; p = p.Link {
+ if p.Pos == p0.Pos {
+ continue
+ }
+ dwarfScopes[scope].AppendRange(dwarf.Range{Start: p0.Pc, End: p.Pc})
+ p0 = p
+ scope = findScope(marks, p0.Pos)
+ }
+ if p0.Pc < fnsym.Size {
+ dwarfScopes[scope].AppendRange(dwarf.Range{Start: p0.Pc, End: fnsym.Size})
+ }
+}
+
+func compactScopes(dwarfScopes []dwarf.Scope) []dwarf.Scope {
+ // Reverse pass to propagate PC ranges to parent scopes.
+ for i := len(dwarfScopes) - 1; i > 0; i-- {
+ s := &dwarfScopes[i]
+ dwarfScopes[s.Parent].UnifyRanges(s)
+ }
+
+ return dwarfScopes
+}
+
+type varsByScopeAndOffset struct {
+ vars []*dwarf.Var
+ scopes []ir.ScopeID
+}
+
+func (v varsByScopeAndOffset) Len() int {
+ return len(v.vars)
+}
+
+func (v varsByScopeAndOffset) Less(i, j int) bool {
+ if v.scopes[i] != v.scopes[j] {
+ return v.scopes[i] < v.scopes[j]
+ }
+ return v.vars[i].StackOffset < v.vars[j].StackOffset
+}
+
+func (v varsByScopeAndOffset) Swap(i, j int) {
+ v.vars[i], v.vars[j] = v.vars[j], v.vars[i]
+ v.scopes[i], v.scopes[j] = v.scopes[j], v.scopes[i]
+}
+
+type varsByScope struct {
+ vars []*dwarf.Var
+ scopes []ir.ScopeID
+}
+
+func (v varsByScope) Len() int {
+ return len(v.vars)
+}
+
+func (v varsByScope) Less(i, j int) bool {
+ return v.scopes[i] < v.scopes[j]
+}
+
+func (v varsByScope) Swap(i, j int) {
+ v.vars[i], v.vars[j] = v.vars[j], v.vars[i]
+ v.scopes[i], v.scopes[j] = v.scopes[j], v.scopes[i]
+}
diff --git a/src/cmd/compile/internal/dwarfgen/scope_test.go b/src/cmd/compile/internal/dwarfgen/scope_test.go
new file mode 100644
index 0000000..ee4170e
--- /dev/null
+++ b/src/cmd/compile/internal/dwarfgen/scope_test.go
@@ -0,0 +1,527 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dwarfgen
+
+import (
+ "debug/dwarf"
+ "fmt"
+ "internal/platform"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+
+ "cmd/internal/objfile"
+)
+
+type testline struct {
+ // line is one line of go source
+ line string
+
+ // scopes is a list of scope IDs of all the lexical scopes that this line
+ // of code belongs to.
+ // Scope IDs are assigned by traversing the tree of lexical blocks of a
+ // function in pre-order
+ // Scope IDs are function specific, i.e. scope 0 is always the root scope
+ // of the function that this line belongs to. Empty scopes are not assigned
+ // an ID (because they are not saved in debug_info).
+ // Scope 0 is always omitted from this list since all lines always belong
+ // to it.
+ scopes []int
+
+ // vars is the list of variables that belong in scopes[len(scopes)-1].
+ // Local variables are prefixed with "var ", formal parameters with "arg ".
+ // Must be ordered alphabetically.
+ // Set to nil to skip the check.
+ vars []string
+
+ // decl is the list of variables declared at this line.
+ decl []string
+
+ // declBefore is the list of variables declared at or before this line.
+ declBefore []string
+}
+
+var testfile = []testline{
+ {line: "package main"},
+ {line: "var sink any"},
+ {line: "func f1(x int) { }"},
+ {line: "func f2(x int) { }"},
+ {line: "func f3(x int) { }"},
+ {line: "func f4(x int) { }"},
+ {line: "func f5(x int) { }"},
+ {line: "func f6(x int) { }"},
+ {line: "func leak(x interface{}) { sink = x }"},
+ {line: "func gret1() int { return 2 }"},
+ {line: "func gretbool() bool { return true }"},
+ {line: "func gret3() (int, int, int) { return 0, 1, 2 }"},
+ {line: "var v = []int{ 0, 1, 2 }"},
+ {line: "var ch = make(chan int)"},
+ {line: "var floatch = make(chan float64)"},
+ {line: "var iface interface{}"},
+ {line: "func TestNestedFor() {", vars: []string{"var a int"}},
+ {line: " a := 0", decl: []string{"a"}},
+ {line: " f1(a)"},
+ {line: " for i := 0; i < 5; i++ {", scopes: []int{1}, vars: []string{"var i int"}, decl: []string{"i"}},
+ {line: " f2(i)", scopes: []int{1}},
+ {line: " for i := 0; i < 5; i++ {", scopes: []int{1, 2}, vars: []string{"var i int"}, decl: []string{"i"}},
+ {line: " f3(i)", scopes: []int{1, 2}},
+ {line: " }"},
+ {line: " f4(i)", scopes: []int{1}},
+ {line: " }"},
+ {line: " f5(a)"},
+ {line: "}"},
+ {line: "func TestOas2() {", vars: []string{}},
+ {line: " if a, b, c := gret3(); a != 1 {", scopes: []int{1}, vars: []string{"var a int", "var b int", "var c int"}},
+ {line: " f1(a)", scopes: []int{1}},
+ {line: " f1(b)", scopes: []int{1}},
+ {line: " f1(c)", scopes: []int{1}},
+ {line: " }"},
+ {line: " for i, x := range v {", scopes: []int{2}, vars: []string{"var i int", "var x int"}},
+ {line: " f1(i)", scopes: []int{2}},
+ {line: " f1(x)", scopes: []int{2}},
+ {line: " }"},
+ {line: " if a, ok := <- ch; ok {", scopes: []int{3}, vars: []string{"var a int", "var ok bool"}},
+ {line: " f1(a)", scopes: []int{3}},
+ {line: " }"},
+ {line: " if a, ok := iface.(int); ok {", scopes: []int{4}, vars: []string{"var a int", "var ok bool"}},
+ {line: " f1(a)", scopes: []int{4}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestIfElse() {"},
+ {line: " if x := gret1(); x != 0 {", scopes: []int{1}, vars: []string{"var x int"}},
+ {line: " a := 0", scopes: []int{1, 2}, vars: []string{"var a int"}},
+ {line: " f1(a); f1(x)", scopes: []int{1, 2}},
+ {line: " } else {"},
+ {line: " b := 1", scopes: []int{1, 3}, vars: []string{"var b int"}},
+ {line: " f1(b); f1(x+1)", scopes: []int{1, 3}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestSwitch() {", vars: []string{}},
+ {line: " switch x := gret1(); x {", scopes: []int{1}, vars: []string{"var x int"}},
+ {line: " case 0:", scopes: []int{1, 2}},
+ {line: " i := x + 5", scopes: []int{1, 2}, vars: []string{"var i int"}},
+ {line: " f1(x); f1(i)", scopes: []int{1, 2}},
+ {line: " case 1:", scopes: []int{1, 3}},
+ {line: " j := x + 10", scopes: []int{1, 3}, vars: []string{"var j int"}},
+ {line: " f1(x); f1(j)", scopes: []int{1, 3}},
+ {line: " case 2:", scopes: []int{1, 4}},
+ {line: " k := x + 2", scopes: []int{1, 4}, vars: []string{"var k int"}},
+ {line: " f1(x); f1(k)", scopes: []int{1, 4}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestTypeSwitch() {", vars: []string{}},
+ {line: " switch x := iface.(type) {"},
+ {line: " case int:", scopes: []int{1}},
+ {line: " f1(x)", scopes: []int{1}, vars: []string{"var x int"}},
+ {line: " case uint8:", scopes: []int{2}},
+ {line: " f1(int(x))", scopes: []int{2}, vars: []string{"var x uint8"}},
+ {line: " case float64:", scopes: []int{3}},
+ {line: " f1(int(x)+1)", scopes: []int{3}, vars: []string{"var x float64"}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestSelectScope() {"},
+ {line: " select {"},
+ {line: " case i := <- ch:", scopes: []int{1}},
+ {line: " f1(i)", scopes: []int{1}, vars: []string{"var i int"}},
+ {line: " case f := <- floatch:", scopes: []int{2}},
+ {line: " f1(int(f))", scopes: []int{2}, vars: []string{"var f float64"}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestBlock() {", vars: []string{"var a int"}},
+ {line: " a := 1"},
+ {line: " {"},
+ {line: " b := 2", scopes: []int{1}, vars: []string{"var b int"}},
+ {line: " f1(b)", scopes: []int{1}},
+ {line: " f1(a)", scopes: []int{1}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestDiscontiguousRanges() {", vars: []string{"var a int"}},
+ {line: " a := 0"},
+ {line: " f1(a)"},
+ {line: " {"},
+ {line: " b := 0", scopes: []int{1}, vars: []string{"var b int"}},
+ {line: " f2(b)", scopes: []int{1}},
+ {line: " if gretbool() {", scopes: []int{1}},
+ {line: " c := 0", scopes: []int{1, 2}, vars: []string{"var c int"}},
+ {line: " f3(c)", scopes: []int{1, 2}},
+ {line: " } else {"},
+ {line: " c := 1.1", scopes: []int{1, 3}, vars: []string{"var c float64"}},
+ {line: " f4(int(c))", scopes: []int{1, 3}},
+ {line: " }"},
+ {line: " f5(b)", scopes: []int{1}},
+ {line: " }"},
+ {line: " f6(a)"},
+ {line: "}"},
+ {line: "func TestClosureScope() {", vars: []string{"var a int", "var b int", "var f func(int)"}},
+ {line: " a := 1; b := 1"},
+ {line: " f := func(c int) {", scopes: []int{0}, vars: []string{"arg c int", "var &b *int", "var a int", "var d int"}, declBefore: []string{"&b", "a"}},
+ {line: " d := 3"},
+ {line: " f1(c); f1(d)"},
+ {line: " if e := 3; e != 0 {", scopes: []int{1}, vars: []string{"var e int"}},
+ {line: " f1(e)", scopes: []int{1}},
+ {line: " f1(a)", scopes: []int{1}},
+ {line: " b = 2", scopes: []int{1}},
+ {line: " }"},
+ {line: " }"},
+ {line: " f(3); f1(b)"},
+ {line: "}"},
+ {line: "func TestEscape() {"},
+ {line: " a := 1", vars: []string{"var a int"}},
+ {line: " {"},
+ {line: " b := 2", scopes: []int{1}, vars: []string{"var &b *int", "var p *int"}},
+ {line: " p := &b", scopes: []int{1}},
+ {line: " f1(a)", scopes: []int{1}},
+ {line: " leak(p)", scopes: []int{1}},
+ {line: " }"},
+ {line: "}"},
+ {line: "var fglob func() int"},
+ {line: "func TestCaptureVar(flag bool) {"},
+ {line: " a := 1", vars: []string{"arg flag bool", "var a int"}}, // TODO(register args) restore "arg ~r1 func() int",
+ {line: " if flag {"},
+ {line: " b := 2", scopes: []int{1}, vars: []string{"var b int", "var f func() int"}},
+ {line: " f := func() int {", scopes: []int{1, 0}},
+ {line: " return b + 1"},
+ {line: " }"},
+ {line: " fglob = f", scopes: []int{1}},
+ {line: " }"},
+ {line: " f1(a)"},
+ {line: "}"},
+ {line: "func main() {"},
+ {line: " TestNestedFor()"},
+ {line: " TestOas2()"},
+ {line: " TestIfElse()"},
+ {line: " TestSwitch()"},
+ {line: " TestTypeSwitch()"},
+ {line: " TestSelectScope()"},
+ {line: " TestBlock()"},
+ {line: " TestDiscontiguousRanges()"},
+ {line: " TestClosureScope()"},
+ {line: " TestEscape()"},
+ {line: " TestCaptureVar(true)"},
+ {line: "}"},
+}
+
+const detailOutput = false
+
+// Compiles testfile checks that the description of lexical blocks emitted
+// by the linker in debug_info, for each function in the main package,
+// corresponds to what we expect it to be.
+func TestScopeRanges(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ t.Parallel()
+
+ if !platform.ExecutableHasDWARF(runtime.GOOS, runtime.GOARCH) {
+ t.Skipf("skipping on %s/%s: no DWARF symbol table in executables", runtime.GOOS, runtime.GOARCH)
+ }
+
+ src, f := gobuild(t, t.TempDir(), false, testfile)
+ defer f.Close()
+
+ // the compiler uses forward slashes for paths even on windows
+ src = strings.Replace(src, "\\", "/", -1)
+
+ pcln, err := f.PCLineTable()
+ if err != nil {
+ t.Fatal(err)
+ }
+ dwarfData, err := f.DWARF()
+ if err != nil {
+ t.Fatal(err)
+ }
+ dwarfReader := dwarfData.Reader()
+
+ lines := make(map[line][]*lexblock)
+
+ for {
+ entry, err := dwarfReader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry == nil {
+ break
+ }
+
+ if entry.Tag != dwarf.TagSubprogram {
+ continue
+ }
+
+ name, ok := entry.Val(dwarf.AttrName).(string)
+ if !ok || !strings.HasPrefix(name, "main.Test") {
+ continue
+ }
+
+ var scope lexblock
+ ctxt := scopexplainContext{
+ dwarfData: dwarfData,
+ dwarfReader: dwarfReader,
+ scopegen: 1,
+ }
+
+ readScope(&ctxt, &scope, entry)
+
+ scope.markLines(pcln, lines)
+ }
+
+ anyerror := false
+ for i := range testfile {
+ tgt := testfile[i].scopes
+ out := lines[line{src, i + 1}]
+
+ if detailOutput {
+ t.Logf("%s // %v", testfile[i].line, out)
+ }
+
+ scopesok := checkScopes(tgt, out)
+ if !scopesok {
+ t.Logf("mismatch at line %d %q: expected: %v got: %v\n", i, testfile[i].line, tgt, scopesToString(out))
+ }
+
+ varsok := true
+ if testfile[i].vars != nil {
+ if len(out) > 0 {
+ varsok = checkVars(testfile[i].vars, out[len(out)-1].vars)
+ if !varsok {
+ t.Logf("variable mismatch at line %d %q for scope %d: expected: %v got: %v\n", i+1, testfile[i].line, out[len(out)-1].id, testfile[i].vars, out[len(out)-1].vars)
+ }
+ for j := range testfile[i].decl {
+ if line := declLineForVar(out[len(out)-1].vars, testfile[i].decl[j]); line != i+1 {
+ t.Errorf("wrong declaration line for variable %s, expected %d got: %d", testfile[i].decl[j], i+1, line)
+ }
+ }
+
+ for j := range testfile[i].declBefore {
+ if line := declLineForVar(out[len(out)-1].vars, testfile[i].declBefore[j]); line > i+1 {
+ t.Errorf("wrong declaration line for variable %s, expected %d (or less) got: %d", testfile[i].declBefore[j], i+1, line)
+ }
+ }
+ }
+ }
+
+ anyerror = anyerror || !scopesok || !varsok
+ }
+
+ if anyerror {
+ t.Fatalf("mismatched output")
+ }
+}
+
+func scopesToString(v []*lexblock) string {
+ r := make([]string, len(v))
+ for i, s := range v {
+ r[i] = strconv.Itoa(s.id)
+ }
+ return "[ " + strings.Join(r, ", ") + " ]"
+}
+
+func checkScopes(tgt []int, out []*lexblock) bool {
+ if len(out) > 0 {
+ // omit scope 0
+ out = out[1:]
+ }
+ if len(tgt) != len(out) {
+ return false
+ }
+ for i := range tgt {
+ if tgt[i] != out[i].id {
+ return false
+ }
+ }
+ return true
+}
+
+func checkVars(tgt []string, out []variable) bool {
+ if len(tgt) != len(out) {
+ return false
+ }
+ for i := range tgt {
+ if tgt[i] != out[i].expr {
+ return false
+ }
+ }
+ return true
+}
+
+func declLineForVar(scope []variable, name string) int {
+ for i := range scope {
+ if scope[i].name() == name {
+ return scope[i].declLine
+ }
+ }
+ return -1
+}
+
+type lexblock struct {
+ id int
+ ranges [][2]uint64
+ vars []variable
+ scopes []lexblock
+}
+
+type variable struct {
+ expr string
+ declLine int
+}
+
+func (v *variable) name() string {
+ return strings.Split(v.expr, " ")[1]
+}
+
+type line struct {
+ file string
+ lineno int
+}
+
+type scopexplainContext struct {
+ dwarfData *dwarf.Data
+ dwarfReader *dwarf.Reader
+ scopegen int
+}
+
+// readScope reads the DW_TAG_lexical_block or the DW_TAG_subprogram in
+// entry and writes a description in scope.
+// Nested DW_TAG_lexical_block entries are read recursively.
+func readScope(ctxt *scopexplainContext, scope *lexblock, entry *dwarf.Entry) {
+ var err error
+ scope.ranges, err = ctxt.dwarfData.Ranges(entry)
+ if err != nil {
+ panic(err)
+ }
+ for {
+ e, err := ctxt.dwarfReader.Next()
+ if err != nil {
+ panic(err)
+ }
+ switch e.Tag {
+ case 0:
+ sort.Slice(scope.vars, func(i, j int) bool {
+ return scope.vars[i].expr < scope.vars[j].expr
+ })
+ return
+ case dwarf.TagFormalParameter:
+ typ, err := ctxt.dwarfData.Type(e.Val(dwarf.AttrType).(dwarf.Offset))
+ if err != nil {
+ panic(err)
+ }
+ scope.vars = append(scope.vars, entryToVar(e, "arg", typ))
+ case dwarf.TagVariable:
+ typ, err := ctxt.dwarfData.Type(e.Val(dwarf.AttrType).(dwarf.Offset))
+ if err != nil {
+ panic(err)
+ }
+ scope.vars = append(scope.vars, entryToVar(e, "var", typ))
+ case dwarf.TagLexDwarfBlock:
+ scope.scopes = append(scope.scopes, lexblock{id: ctxt.scopegen})
+ ctxt.scopegen++
+ readScope(ctxt, &scope.scopes[len(scope.scopes)-1], e)
+ }
+ }
+}
+
+func entryToVar(e *dwarf.Entry, kind string, typ dwarf.Type) variable {
+ return variable{
+ fmt.Sprintf("%s %s %s", kind, e.Val(dwarf.AttrName).(string), typ.String()),
+ int(e.Val(dwarf.AttrDeclLine).(int64)),
+ }
+}
+
+// markLines marks all lines that belong to this scope with this scope
+// Recursively calls markLines for all children scopes.
+func (scope *lexblock) markLines(pcln objfile.Liner, lines map[line][]*lexblock) {
+ for _, r := range scope.ranges {
+ for pc := r[0]; pc < r[1]; pc++ {
+ file, lineno, _ := pcln.PCToLine(pc)
+ l := line{file, lineno}
+ if len(lines[l]) == 0 || lines[l][len(lines[l])-1] != scope {
+ lines[l] = append(lines[l], scope)
+ }
+ }
+ }
+
+ for i := range scope.scopes {
+ scope.scopes[i].markLines(pcln, lines)
+ }
+}
+
+func gobuild(t *testing.T, dir string, optimized bool, testfile []testline) (string, *objfile.File) {
+ src := filepath.Join(dir, "test.go")
+ dst := filepath.Join(dir, "out.o")
+
+ f, err := os.Create(src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i := range testfile {
+ f.Write([]byte(testfile[i].line))
+ f.Write([]byte{'\n'})
+ }
+ f.Close()
+
+ args := []string{"build"}
+ if !optimized {
+ args = append(args, "-gcflags=-N -l")
+ }
+ args = append(args, "-o", dst, src)
+
+ cmd := testenv.Command(t, testenv.GoToolPath(t), args...)
+ if b, err := cmd.CombinedOutput(); err != nil {
+ t.Logf("build: %s\n", string(b))
+ t.Fatal(err)
+ }
+
+ pkg, err := objfile.Open(dst)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return src, pkg
+}
+
+// TestEmptyDwarfRanges tests that no list entry in debug_ranges has start == end.
+// See issue #23928.
+func TestEmptyDwarfRanges(t *testing.T) {
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ if !platform.ExecutableHasDWARF(runtime.GOOS, runtime.GOARCH) {
+ t.Skipf("skipping on %s/%s: no DWARF symbol table in executables", runtime.GOOS, runtime.GOARCH)
+ }
+
+ _, f := gobuild(t, t.TempDir(), true, []testline{{line: "package main"}, {line: "func main(){ println(\"hello\") }"}})
+ defer f.Close()
+
+ dwarfData, err := f.DWARF()
+ if err != nil {
+ t.Fatal(err)
+ }
+ dwarfReader := dwarfData.Reader()
+
+ for {
+ entry, err := dwarfReader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry == nil {
+ break
+ }
+
+ ranges, err := dwarfData.Ranges(entry)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ranges == nil {
+ continue
+ }
+
+ for _, rng := range ranges {
+ if rng[0] == rng[1] {
+ t.Errorf("range entry with start == end: %v", rng)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/escape/assign.go b/src/cmd/compile/internal/escape/assign.go
new file mode 100644
index 0000000..6af5388
--- /dev/null
+++ b/src/cmd/compile/internal/escape/assign.go
@@ -0,0 +1,128 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+)
+
+// addr evaluates an addressable expression n and returns a hole
+// that represents storing into the represented location.
+func (e *escape) addr(n ir.Node) hole {
+ if n == nil || ir.IsBlank(n) {
+ // Can happen in select case, range, maybe others.
+ return e.discardHole()
+ }
+
+ k := e.heapHole()
+
+ switch n.Op() {
+ default:
+ base.Fatalf("unexpected addr: %v", n)
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class == ir.PEXTERN {
+ break
+ }
+ k = e.oldLoc(n).asHole()
+ case ir.OLINKSYMOFFSET:
+ break
+ case ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ k = e.addr(n.X)
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ e.discard(n.Index)
+ if n.X.Type().IsArray() {
+ k = e.addr(n.X)
+ } else {
+ e.mutate(n.X)
+ }
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ e.mutate(n.X)
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ e.mutate(n.X)
+ case ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ e.discard(n.X)
+ e.assignHeap(n.Index, "key of map put", n)
+ }
+
+ return k
+}
+
+func (e *escape) mutate(n ir.Node) {
+ e.expr(e.mutatorHole(), n)
+}
+
+func (e *escape) addrs(l ir.Nodes) []hole {
+ var ks []hole
+ for _, n := range l {
+ ks = append(ks, e.addr(n))
+ }
+ return ks
+}
+
+func (e *escape) assignHeap(src ir.Node, why string, where ir.Node) {
+ e.expr(e.heapHole().note(where, why), src)
+}
+
+// assignList evaluates the assignment dsts... = srcs....
+func (e *escape) assignList(dsts, srcs []ir.Node, why string, where ir.Node) {
+ ks := e.addrs(dsts)
+ for i, k := range ks {
+ var src ir.Node
+ if i < len(srcs) {
+ src = srcs[i]
+ }
+
+ if dst := dsts[i]; dst != nil {
+ // Detect implicit conversion of uintptr to unsafe.Pointer when
+ // storing into reflect.{Slice,String}Header.
+ if dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) {
+ e.unsafeValue(e.heapHole().note(where, why), src)
+ continue
+ }
+
+ // Filter out some no-op assignments for escape analysis.
+ if src != nil && isSelfAssign(dst, src) {
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", e.curfn, where)
+ }
+ k = e.discardHole()
+ }
+ }
+
+ e.expr(k.note(where, why), src)
+ }
+
+ e.reassigned(ks, where)
+}
+
+// reassigned marks the locations associated with the given holes as
+// reassigned, unless the location represents a variable declared and
+// assigned exactly once by where.
+func (e *escape) reassigned(ks []hole, where ir.Node) {
+ if as, ok := where.(*ir.AssignStmt); ok && as.Op() == ir.OAS && as.Y == nil {
+ if dst, ok := as.X.(*ir.Name); ok && dst.Op() == ir.ONAME && dst.Defn == nil {
+ // Zero-value assignment for variable declared without an
+ // explicit initial value. Assume this is its initialization
+ // statement.
+ return
+ }
+ }
+
+ for _, k := range ks {
+ loc := k.dst
+ // Variables declared by range statements are assigned on every iteration.
+ if n, ok := loc.n.(*ir.Name); ok && n.Defn == where && where.Op() != ir.ORANGE {
+ continue
+ }
+ loc.reassigned = true
+ }
+}
diff --git a/src/cmd/compile/internal/escape/call.go b/src/cmd/compile/internal/escape/call.go
new file mode 100644
index 0000000..4a3753a
--- /dev/null
+++ b/src/cmd/compile/internal/escape/call.go
@@ -0,0 +1,361 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// call evaluates a call expressions, including builtin calls. ks
+// should contain the holes representing where the function callee's
+// results flows.
+func (e *escape) call(ks []hole, call ir.Node) {
+ argument := func(k hole, arg ir.Node) {
+ // TODO(mdempsky): Should be "call argument".
+ e.expr(k.note(call, "call parameter"), arg)
+ }
+
+ switch call.Op() {
+ default:
+ ir.Dump("esc", call)
+ base.Fatalf("unexpected call op: %v", call.Op())
+
+ case ir.OCALLFUNC, ir.OCALLINTER:
+ call := call.(*ir.CallExpr)
+ typecheck.AssertFixedCall(call)
+
+ // Pick out the function callee, if statically known.
+ //
+ // TODO(mdempsky): Change fn from *ir.Name to *ir.Func, but some
+ // functions (e.g., runtime builtins, method wrappers, generated
+ // eq/hash functions) don't have it set. Investigate whether
+ // that's a concern.
+ var fn *ir.Name
+ switch call.Op() {
+ case ir.OCALLFUNC:
+ v := ir.StaticValue(call.Fun)
+ fn = ir.StaticCalleeName(v)
+ }
+
+ fntype := call.Fun.Type()
+ if fn != nil {
+ fntype = fn.Type()
+ }
+
+ if ks != nil && fn != nil && e.inMutualBatch(fn) {
+ for i, result := range fn.Type().Results() {
+ e.expr(ks[i], result.Nname.(*ir.Name))
+ }
+ }
+
+ var recvArg ir.Node
+ if call.Op() == ir.OCALLFUNC {
+ // Evaluate callee function expression.
+ calleeK := e.discardHole()
+ if fn == nil { // unknown callee
+ for _, k := range ks {
+ if k.dst != &e.blankLoc {
+ // The results flow somewhere, but we don't statically
+ // know the callee function. If a closure flows here, we
+ // need to conservatively assume its results might flow to
+ // the heap.
+ calleeK = e.calleeHole().note(call, "callee operand")
+ break
+ }
+ }
+ }
+ e.expr(calleeK, call.Fun)
+ } else {
+ recvArg = call.Fun.(*ir.SelectorExpr).X
+ }
+
+ // argumentParam handles escape analysis of assigning a call
+ // argument to its corresponding parameter.
+ argumentParam := func(param *types.Field, arg ir.Node) {
+ e.rewriteArgument(arg, call, fn)
+ argument(e.tagHole(ks, fn, param), arg)
+ }
+
+ args := call.Args
+ if recvParam := fntype.Recv(); recvParam != nil {
+ if recvArg == nil {
+ // Function call using method expression. Receiver argument is
+ // at the front of the regular arguments list.
+ recvArg, args = args[0], args[1:]
+ }
+
+ argumentParam(recvParam, recvArg)
+ }
+
+ for i, param := range fntype.Params() {
+ argumentParam(param, args[i])
+ }
+
+ case ir.OINLCALL:
+ call := call.(*ir.InlinedCallExpr)
+ e.stmts(call.Body)
+ for i, result := range call.ReturnVars {
+ k := e.discardHole()
+ if ks != nil {
+ k = ks[i]
+ }
+ e.expr(k, result)
+ }
+
+ case ir.OAPPEND:
+ call := call.(*ir.CallExpr)
+ args := call.Args
+
+ // Appendee slice may flow directly to the result, if
+ // it has enough capacity. Alternatively, a new heap
+ // slice might be allocated, and all slice elements
+ // might flow to heap.
+ appendeeK := e.teeHole(ks[0], e.mutatorHole())
+ if args[0].Type().Elem().HasPointers() {
+ appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
+ }
+ argument(appendeeK, args[0])
+
+ if call.IsDDD {
+ appendedK := e.discardHole()
+ if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
+ appendedK = e.heapHole().deref(call, "appended slice...")
+ }
+ argument(appendedK, args[1])
+ } else {
+ for i := 1; i < len(args); i++ {
+ argument(e.heapHole(), args[i])
+ }
+ }
+ e.discard(call.RType)
+
+ case ir.OCOPY:
+ call := call.(*ir.BinaryExpr)
+ argument(e.mutatorHole(), call.X)
+
+ copiedK := e.discardHole()
+ if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() {
+ copiedK = e.heapHole().deref(call, "copied slice")
+ }
+ argument(copiedK, call.Y)
+ e.discard(call.RType)
+
+ case ir.OPANIC:
+ call := call.(*ir.UnaryExpr)
+ argument(e.heapHole(), call.X)
+
+ case ir.OCOMPLEX:
+ call := call.(*ir.BinaryExpr)
+ e.discard(call.X)
+ e.discard(call.Y)
+
+ case ir.ODELETE, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
+ call := call.(*ir.CallExpr)
+ for _, arg := range call.Args {
+ e.discard(arg)
+ }
+ e.discard(call.RType)
+
+ case ir.OMIN, ir.OMAX:
+ call := call.(*ir.CallExpr)
+ for _, arg := range call.Args {
+ argument(ks[0], arg)
+ }
+ e.discard(call.RType)
+
+ case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
+ call := call.(*ir.UnaryExpr)
+ e.discard(call.X)
+
+ case ir.OCLEAR:
+ call := call.(*ir.UnaryExpr)
+ argument(e.mutatorHole(), call.X)
+
+ case ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA:
+ call := call.(*ir.UnaryExpr)
+ argument(ks[0], call.X)
+
+ case ir.OUNSAFEADD, ir.OUNSAFESLICE, ir.OUNSAFESTRING:
+ call := call.(*ir.BinaryExpr)
+ argument(ks[0], call.X)
+ e.discard(call.Y)
+ e.discard(call.RType)
+ }
+}
+
+// goDeferStmt analyzes a "go" or "defer" statement.
+func (e *escape) goDeferStmt(n *ir.GoDeferStmt) {
+ k := e.heapHole()
+ if n.Op() == ir.ODEFER && e.loopDepth == 1 && n.DeferAt == nil {
+ // Top-level defer arguments don't escape to the heap,
+ // but they do need to last until they're invoked.
+ k = e.later(e.discardHole())
+
+ // force stack allocation of defer record, unless
+ // open-coded defers are used (see ssa.go)
+ n.SetEsc(ir.EscNever)
+ }
+
+ // If the function is already a zero argument/result function call,
+ // just escape analyze it normally.
+ //
+ // Note that the runtime is aware of this optimization for
+ // "go" statements that start in reflect.makeFuncStub or
+ // reflect.methodValueCall.
+
+ call, ok := n.Call.(*ir.CallExpr)
+ if !ok || call.Op() != ir.OCALLFUNC {
+ base.FatalfAt(n.Pos(), "expected function call: %v", n.Call)
+ }
+ if sig := call.Fun.Type(); sig.NumParams()+sig.NumResults() != 0 {
+ base.FatalfAt(n.Pos(), "expected signature without parameters or results: %v", sig)
+ }
+
+ if clo, ok := call.Fun.(*ir.ClosureExpr); ok && n.Op() == ir.OGO {
+ clo.IsGoWrap = true
+ }
+
+ e.expr(k, call.Fun)
+}
+
+// rewriteArgument rewrites the argument arg of the given call expression.
+// fn is the static callee function, if known.
+func (e *escape) rewriteArgument(arg ir.Node, call *ir.CallExpr, fn *ir.Name) {
+ if fn == nil || fn.Func == nil {
+ return
+ }
+ pragma := fn.Func.Pragma
+ if pragma&(ir.UintptrKeepAlive|ir.UintptrEscapes) == 0 {
+ return
+ }
+
+ // unsafeUintptr rewrites "uintptr(ptr)" arguments to syscall-like
+ // functions, so that ptr is kept alive and/or escaped as
+ // appropriate. unsafeUintptr also reports whether it modified arg0.
+ unsafeUintptr := func(arg ir.Node) {
+ // If the argument is really a pointer being converted to uintptr,
+ // arrange for the pointer to be kept alive until the call
+ // returns, by copying it into a temp and marking that temp still
+ // alive when we pop the temp stack.
+ conv, ok := arg.(*ir.ConvExpr)
+ if !ok || conv.Op() != ir.OCONVNOP {
+ return // not a conversion
+ }
+ if !conv.X.Type().IsUnsafePtr() || !conv.Type().IsUintptr() {
+ return // not an unsafe.Pointer->uintptr conversion
+ }
+
+ // Create and declare a new pointer-typed temp variable.
+ //
+ // TODO(mdempsky): This potentially violates the Go spec's order
+ // of evaluations, by evaluating arg.X before any other
+ // operands.
+ tmp := e.copyExpr(conv.Pos(), conv.X, call.PtrInit())
+ conv.X = tmp
+
+ k := e.mutatorHole()
+ if pragma&ir.UintptrEscapes != 0 {
+ k = e.heapHole().note(conv, "//go:uintptrescapes")
+ }
+ e.flow(k, e.oldLoc(tmp))
+
+ if pragma&ir.UintptrKeepAlive != 0 {
+ tmp.SetAddrtaken(true) // ensure SSA keeps the tmp variable
+ call.KeepAlive = append(call.KeepAlive, tmp)
+ }
+ }
+
+ // For variadic functions, the compiler has already rewritten:
+ //
+ // f(a, b, c)
+ //
+ // to:
+ //
+ // f([]T{a, b, c}...)
+ //
+ // So we need to look into slice elements to handle uintptr(ptr)
+ // arguments to variadic syscall-like functions correctly.
+ if arg.Op() == ir.OSLICELIT {
+ list := arg.(*ir.CompLitExpr).List
+ for _, el := range list {
+ if el.Op() == ir.OKEY {
+ el = el.(*ir.KeyExpr).Value
+ }
+ unsafeUintptr(el)
+ }
+ } else {
+ unsafeUintptr(arg)
+ }
+}
+
+// copyExpr creates and returns a new temporary variable within fn;
+// appends statements to init to declare and initialize it to expr;
+// and escape analyzes the data flow.
+func (e *escape) copyExpr(pos src.XPos, expr ir.Node, init *ir.Nodes) *ir.Name {
+ if ir.HasUniquePos(expr) {
+ pos = expr.Pos()
+ }
+
+ tmp := typecheck.TempAt(pos, e.curfn, expr.Type())
+
+ stmts := []ir.Node{
+ ir.NewDecl(pos, ir.ODCL, tmp),
+ ir.NewAssignStmt(pos, tmp, expr),
+ }
+ typecheck.Stmts(stmts)
+ init.Append(stmts...)
+
+ e.newLoc(tmp, true)
+ e.stmts(stmts)
+
+ return tmp
+}
+
+// tagHole returns a hole for evaluating an argument passed to param.
+// ks should contain the holes representing where the function
+// callee's results flows. fn is the statically-known callee function,
+// if any.
+func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole {
+ // If this is a dynamic call, we can't rely on param.Note.
+ if fn == nil {
+ return e.heapHole()
+ }
+
+ if e.inMutualBatch(fn) {
+ if param.Nname == nil {
+ return e.discardHole()
+ }
+ return e.addr(param.Nname.(*ir.Name))
+ }
+
+ // Call to previously tagged function.
+
+ var tagKs []hole
+ esc := parseLeaks(param.Note)
+
+ if x := esc.Heap(); x >= 0 {
+ tagKs = append(tagKs, e.heapHole().shift(x))
+ }
+ if x := esc.Mutator(); x >= 0 {
+ tagKs = append(tagKs, e.mutatorHole().shift(x))
+ }
+ if x := esc.Callee(); x >= 0 {
+ tagKs = append(tagKs, e.calleeHole().shift(x))
+ }
+
+ if ks != nil {
+ for i := 0; i < numEscResults; i++ {
+ if x := esc.Result(i); x >= 0 {
+ tagKs = append(tagKs, ks[i].shift(x))
+ }
+ }
+ }
+
+ return e.teeHole(tagKs...)
+}
diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go
new file mode 100644
index 0000000..7df367c
--- /dev/null
+++ b/src/cmd/compile/internal/escape/escape.go
@@ -0,0 +1,509 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "fmt"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// Escape analysis.
+//
+// Here we analyze functions to determine which Go variables
+// (including implicit allocations such as calls to "new" or "make",
+// composite literals, etc.) can be allocated on the stack. The two
+// key invariants we have to ensure are: (1) pointers to stack objects
+// cannot be stored in the heap, and (2) pointers to a stack object
+// cannot outlive that object (e.g., because the declaring function
+// returned and destroyed the object's stack frame, or its space is
+// reused across loop iterations for logically distinct variables).
+//
+// We implement this with a static data-flow analysis of the AST.
+// First, we construct a directed weighted graph where vertices
+// (termed "locations") represent variables allocated by statements
+// and expressions, and edges represent assignments between variables
+// (with weights representing addressing/dereference counts).
+//
+// Next we walk the graph looking for assignment paths that might
+// violate the invariants stated above. If a variable v's address is
+// stored in the heap or elsewhere that may outlive it, then v is
+// marked as requiring heap allocation.
+//
+// To support interprocedural analysis, we also record data-flow from
+// each function's parameters to the heap and to its result
+// parameters. This information is summarized as "parameter tags",
+// which are used at static call sites to improve escape analysis of
+// function arguments.
+
+// Constructing the location graph.
+//
+// Every allocating statement (e.g., variable declaration) or
+// expression (e.g., "new" or "make") is first mapped to a unique
+// "location."
+//
+// We also model every Go assignment as a directed edges between
+// locations. The number of dereference operations minus the number of
+// addressing operations is recorded as the edge's weight (termed
+// "derefs"). For example:
+//
+// p = &q // -1
+// p = q // 0
+// p = *q // 1
+// p = **q // 2
+//
+// p = **&**&q // 2
+//
+// Note that the & operator can only be applied to addressable
+// expressions, and the expression &x itself is not addressable, so
+// derefs cannot go below -1.
+//
+// Every Go language construct is lowered into this representation,
+// generally without sensitivity to flow, path, or context; and
+// without distinguishing elements within a compound variable. For
+// example:
+//
+// var x struct { f, g *int }
+// var u []*int
+//
+// x.f = u[0]
+//
+// is modeled simply as
+//
+// x = *u
+//
+// That is, we don't distinguish x.f from x.g, or u[0] from u[1],
+// u[2], etc. However, we do record the implicit dereference involved
+// in indexing a slice.
+
+// A batch holds escape analysis state that's shared across an entire
+// batch of functions being analyzed at once.
+type batch struct {
+ allLocs []*location
+ closures []closure
+
+ heapLoc location
+ mutatorLoc location
+ calleeLoc location
+ blankLoc location
+}
+
+// A closure holds a closure expression and its spill hole (i.e.,
+// where the hole representing storing into its closure record).
+type closure struct {
+ k hole
+ clo *ir.ClosureExpr
+}
+
+// An escape holds state specific to a single function being analyzed
+// within a batch.
+type escape struct {
+ *batch
+
+ curfn *ir.Func // function being analyzed
+
+ labels map[*types.Sym]labelState // known labels
+
+ // loopDepth counts the current loop nesting depth within
+ // curfn. It increments within each "for" loop and at each
+ // label with a corresponding backwards "goto" (i.e.,
+ // unstructured loop).
+ loopDepth int
+}
+
+func Funcs(all []*ir.Func) {
+ ir.VisitFuncsBottomUp(all, Batch)
+}
+
+// Batch performs escape analysis on a minimal batch of
+// functions.
+func Batch(fns []*ir.Func, recursive bool) {
+ var b batch
+ b.heapLoc.attrs = attrEscapes | attrPersists | attrMutates | attrCalls
+ b.mutatorLoc.attrs = attrMutates
+ b.calleeLoc.attrs = attrCalls
+
+ // Construct data-flow graph from syntax trees.
+ for _, fn := range fns {
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("\nbefore escape %v", fn)
+ ir.Dump(s, fn)
+ }
+ b.initFunc(fn)
+ }
+ for _, fn := range fns {
+ if !fn.IsHiddenClosure() {
+ b.walkFunc(fn)
+ }
+ }
+
+ // We've walked the function bodies, so we've seen everywhere a
+ // variable might be reassigned or have it's address taken. Now we
+ // can decide whether closures should capture their free variables
+ // by value or reference.
+ for _, closure := range b.closures {
+ b.flowClosure(closure.k, closure.clo)
+ }
+ b.closures = nil
+
+ for _, loc := range b.allLocs {
+ if why := HeapAllocReason(loc.n); why != "" {
+ b.flow(b.heapHole().addr(loc.n, why), loc)
+ }
+ }
+
+ b.walkAll()
+ b.finish(fns)
+}
+
+func (b *batch) with(fn *ir.Func) *escape {
+ return &escape{
+ batch: b,
+ curfn: fn,
+ loopDepth: 1,
+ }
+}
+
+func (b *batch) initFunc(fn *ir.Func) {
+ e := b.with(fn)
+ if fn.Esc() != escFuncUnknown {
+ base.Fatalf("unexpected node: %v", fn)
+ }
+ fn.SetEsc(escFuncPlanned)
+ if base.Flag.LowerM > 3 {
+ ir.Dump("escAnalyze", fn)
+ }
+
+ // Allocate locations for local variables.
+ for _, n := range fn.Dcl {
+ e.newLoc(n, true)
+ }
+
+ // Also for hidden parameters (e.g., the ".this" parameter to a
+ // method value wrapper).
+ if fn.OClosure == nil {
+ for _, n := range fn.ClosureVars {
+ e.newLoc(n.Canonical(), true)
+ }
+ }
+
+ // Initialize resultIndex for result parameters.
+ for i, f := range fn.Type().Results() {
+ e.oldLoc(f.Nname.(*ir.Name)).resultIndex = 1 + i
+ }
+}
+
+func (b *batch) walkFunc(fn *ir.Func) {
+ e := b.with(fn)
+ fn.SetEsc(escFuncStarted)
+
+ // Identify labels that mark the head of an unstructured loop.
+ ir.Visit(fn, func(n ir.Node) {
+ switch n.Op() {
+ case ir.OLABEL:
+ n := n.(*ir.LabelStmt)
+ if n.Label.IsBlank() {
+ break
+ }
+ if e.labels == nil {
+ e.labels = make(map[*types.Sym]labelState)
+ }
+ e.labels[n.Label] = nonlooping
+
+ case ir.OGOTO:
+ // If we visited the label before the goto,
+ // then this is a looping label.
+ n := n.(*ir.BranchStmt)
+ if e.labels[n.Label] == nonlooping {
+ e.labels[n.Label] = looping
+ }
+ }
+ })
+
+ e.block(fn.Body)
+
+ if len(e.labels) != 0 {
+ base.FatalfAt(fn.Pos(), "leftover labels after walkFunc")
+ }
+}
+
+func (b *batch) flowClosure(k hole, clo *ir.ClosureExpr) {
+ for _, cv := range clo.Func.ClosureVars {
+ n := cv.Canonical()
+ loc := b.oldLoc(cv)
+ if !loc.captured {
+ base.FatalfAt(cv.Pos(), "closure variable never captured: %v", cv)
+ }
+
+ // Capture by value for variables <= 128 bytes that are never reassigned.
+ n.SetByval(!loc.addrtaken && !loc.reassigned && n.Type().Size() <= 128)
+ if !n.Byval() {
+ n.SetAddrtaken(true)
+ if n.Sym().Name == typecheck.LocalDictName {
+ base.FatalfAt(n.Pos(), "dictionary variable not captured by value")
+ }
+ }
+
+ if base.Flag.LowerM > 1 {
+ how := "ref"
+ if n.Byval() {
+ how = "value"
+ }
+ base.WarnfAt(n.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", n.Curfn, how, n, loc.addrtaken, loc.reassigned, n.Type().Size())
+ }
+
+ // Flow captured variables to closure.
+ k := k
+ if !cv.Byval() {
+ k = k.addr(cv, "reference")
+ }
+ b.flow(k.note(cv, "captured by a closure"), loc)
+ }
+}
+
+func (b *batch) finish(fns []*ir.Func) {
+ // Record parameter tags for package export data.
+ for _, fn := range fns {
+ fn.SetEsc(escFuncTagged)
+
+ for i, param := range fn.Type().RecvParams() {
+ param.Note = b.paramTag(fn, 1+i, param)
+ }
+ }
+
+ for _, loc := range b.allLocs {
+ n := loc.n
+ if n == nil {
+ continue
+ }
+
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ n.Opt = nil
+ }
+
+ // Update n.Esc based on escape analysis results.
+
+ // Omit escape diagnostics for go/defer wrappers, at least for now.
+ // Historically, we haven't printed them, and test cases don't expect them.
+ // TODO(mdempsky): Update tests to expect this.
+ goDeferWrapper := n.Op() == ir.OCLOSURE && n.(*ir.ClosureExpr).Func.Wrapper()
+
+ if loc.hasAttr(attrEscapes) {
+ if n.Op() == ir.ONAME {
+ if base.Flag.CompilingRuntime {
+ base.ErrorfAt(n.Pos(), 0, "%v escapes to heap, not allowed in runtime", n)
+ }
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(n.Pos(), "moved to heap: %v", n)
+ }
+ } else {
+ if base.Flag.LowerM != 0 && !goDeferWrapper {
+ base.WarnfAt(n.Pos(), "%v escapes to heap", n)
+ }
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e_curfn))
+ }
+ }
+ n.SetEsc(ir.EscHeap)
+ } else {
+ if base.Flag.LowerM != 0 && n.Op() != ir.ONAME && !goDeferWrapper {
+ base.WarnfAt(n.Pos(), "%v does not escape", n)
+ }
+ n.SetEsc(ir.EscNone)
+ if !loc.hasAttr(attrPersists) {
+ switch n.Op() {
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ n.SetTransient(true)
+ case ir.OMETHVALUE:
+ n := n.(*ir.SelectorExpr)
+ n.SetTransient(true)
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ n.SetTransient(true)
+ }
+ }
+ }
+
+ // If the result of a string->[]byte conversion is never mutated,
+ // then it can simply reuse the string's memory directly.
+ if base.Debug.ZeroCopy != 0 {
+ if n, ok := n.(*ir.ConvExpr); ok && n.Op() == ir.OSTR2BYTES && !loc.hasAttr(attrMutates) {
+ if base.Flag.LowerM >= 1 {
+ base.WarnfAt(n.Pos(), "zero-copy string->[]byte conversion")
+ }
+ n.SetOp(ir.OSTR2BYTESTMP)
+ }
+ }
+ }
+}
+
+// inMutualBatch reports whether function fn is in the batch of
+// mutually recursive functions being analyzed. When this is true,
+// fn has not yet been analyzed, so its parameters and results
+// should be incorporated directly into the flow graph instead of
+// relying on its escape analysis tagging.
+func (b *batch) inMutualBatch(fn *ir.Name) bool {
+ if fn.Defn != nil && fn.Defn.Esc() < escFuncTagged {
+ if fn.Defn.Esc() == escFuncUnknown {
+ base.FatalfAt(fn.Pos(), "graph inconsistency: %v", fn)
+ }
+ return true
+ }
+ return false
+}
+
+const (
+ escFuncUnknown = 0 + iota
+ escFuncPlanned
+ escFuncStarted
+ escFuncTagged
+)
+
+// Mark labels that have no backjumps to them as not increasing e.loopdepth.
+type labelState int
+
+const (
+ looping labelState = 1 + iota
+ nonlooping
+)
+
+func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
+ name := func() string {
+ if f.Nname != nil {
+ return f.Nname.Sym().Name
+ }
+ return fmt.Sprintf("arg#%d", narg)
+ }
+
+ // Only report diagnostics for user code;
+ // not for wrappers generated around them.
+ // TODO(mdempsky): Generalize this.
+ diagnose := base.Flag.LowerM != 0 && !(fn.Wrapper() || fn.Dupok())
+
+ if len(fn.Body) == 0 {
+ // Assume that uintptr arguments must be held live across the call.
+ // This is most important for syscall.Syscall.
+ // See golang.org/issue/13372.
+ // This really doesn't have much to do with escape analysis per se,
+ // but we are reusing the ability to annotate an individual function
+ // argument and pass those annotations along to importing code.
+ fn.Pragma |= ir.UintptrKeepAlive
+
+ if f.Type.IsUintptr() {
+ if diagnose {
+ base.WarnfAt(f.Pos, "assuming %v is unsafe uintptr", name())
+ }
+ return ""
+ }
+
+ if !f.Type.HasPointers() { // don't bother tagging for scalars
+ return ""
+ }
+
+ var esc leaks
+
+ // External functions are assumed unsafe, unless
+ // //go:noescape is given before the declaration.
+ if fn.Pragma&ir.Noescape != 0 {
+ if diagnose && f.Sym != nil {
+ base.WarnfAt(f.Pos, "%v does not escape", name())
+ }
+ esc.AddMutator(0)
+ esc.AddCallee(0)
+ } else {
+ if diagnose && f.Sym != nil {
+ base.WarnfAt(f.Pos, "leaking param: %v", name())
+ }
+ esc.AddHeap(0)
+ }
+
+ return esc.Encode()
+ }
+
+ if fn.Pragma&ir.UintptrEscapes != 0 {
+ if f.Type.IsUintptr() {
+ if diagnose {
+ base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name())
+ }
+ return ""
+ }
+ if f.IsDDD() && f.Type.Elem().IsUintptr() {
+ // final argument is ...uintptr.
+ if diagnose {
+ base.WarnfAt(f.Pos, "marking %v as escaping ...uintptr", name())
+ }
+ return ""
+ }
+ }
+
+ if !f.Type.HasPointers() { // don't bother tagging for scalars
+ return ""
+ }
+
+ // Unnamed parameters are unused and therefore do not escape.
+ if f.Sym == nil || f.Sym.IsBlank() {
+ var esc leaks
+ return esc.Encode()
+ }
+
+ n := f.Nname.(*ir.Name)
+ loc := b.oldLoc(n)
+ esc := loc.paramEsc
+ esc.Optimize()
+
+ if diagnose && !loc.hasAttr(attrEscapes) {
+ b.reportLeaks(f.Pos, name(), esc, fn.Type())
+ }
+
+ return esc.Encode()
+}
+
+func (b *batch) reportLeaks(pos src.XPos, name string, esc leaks, sig *types.Type) {
+ warned := false
+ if x := esc.Heap(); x >= 0 {
+ if x == 0 {
+ base.WarnfAt(pos, "leaking param: %v", name)
+ } else {
+ // TODO(mdempsky): Mention level=x like below?
+ base.WarnfAt(pos, "leaking param content: %v", name)
+ }
+ warned = true
+ }
+ for i := 0; i < numEscResults; i++ {
+ if x := esc.Result(i); x >= 0 {
+ res := sig.Result(i).Nname.Sym().Name
+ base.WarnfAt(pos, "leaking param: %v to result %v level=%d", name, res, x)
+ warned = true
+ }
+ }
+
+ if base.Debug.EscapeMutationsCalls <= 0 {
+ if !warned {
+ base.WarnfAt(pos, "%v does not escape", name)
+ }
+ return
+ }
+
+ if x := esc.Mutator(); x >= 0 {
+ base.WarnfAt(pos, "mutates param: %v derefs=%v", name, x)
+ warned = true
+ }
+ if x := esc.Callee(); x >= 0 {
+ base.WarnfAt(pos, "calls param: %v derefs=%v", name, x)
+ warned = true
+ }
+
+ if !warned {
+ base.WarnfAt(pos, "%v does not escape, mutate, or call", name)
+ }
+}
diff --git a/src/cmd/compile/internal/escape/expr.go b/src/cmd/compile/internal/escape/expr.go
new file mode 100644
index 0000000..6aa5ad7
--- /dev/null
+++ b/src/cmd/compile/internal/escape/expr.go
@@ -0,0 +1,341 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
+
+// expr models evaluating an expression n and flowing the result into
+// hole k.
+func (e *escape) expr(k hole, n ir.Node) {
+ if n == nil {
+ return
+ }
+ e.stmts(n.Init())
+ e.exprSkipInit(k, n)
+}
+
+func (e *escape) exprSkipInit(k hole, n ir.Node) {
+ if n == nil {
+ return
+ }
+
+ lno := ir.SetPos(n)
+ defer func() {
+ base.Pos = lno
+ }()
+
+ if k.derefs >= 0 && !n.Type().IsUntyped() && !n.Type().HasPointers() {
+ k.dst = &e.blankLoc
+ }
+
+ switch n.Op() {
+ default:
+ base.Fatalf("unexpected expr: %s %v", n.Op().String(), n)
+
+ case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP, ir.OTYPE, ir.OMETHEXPR, ir.OLINKSYMOFFSET:
+ // nop
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class == ir.PFUNC || n.Class == ir.PEXTERN {
+ return
+ }
+ e.flow(k, e.oldLoc(n))
+
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
+ n := n.(*ir.UnaryExpr)
+ e.discard(n.X)
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n := n.(*ir.BinaryExpr)
+ e.discard(n.X)
+ e.discard(n.Y)
+ case ir.OANDAND, ir.OOROR:
+ n := n.(*ir.LogicalExpr)
+ e.discard(n.X)
+ e.discard(n.Y)
+ case ir.OADDR:
+ n := n.(*ir.AddrExpr)
+ e.expr(k.addr(n, "address-of"), n.X) // "address-of"
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ e.expr(k.deref(n, "indirection"), n.X) // "indirection"
+ case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
+ n := n.(*ir.SelectorExpr)
+ e.expr(k.note(n, "dot"), n.X)
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ e.expr(k.deref(n, "dot of pointer"), n.X) // "dot of pointer"
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n := n.(*ir.TypeAssertExpr)
+ e.expr(k.dotType(n.Type(), n, "dot"), n.X)
+ case ir.ODYNAMICDOTTYPE, ir.ODYNAMICDOTTYPE2:
+ n := n.(*ir.DynamicTypeAssertExpr)
+ e.expr(k.dotType(n.Type(), n, "dot"), n.X)
+ // n.T doesn't need to be tracked; it always points to read-only storage.
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ if n.X.Type().IsArray() {
+ e.expr(k.note(n, "fixed-array-index-of"), n.X)
+ } else {
+ // TODO(mdempsky): Fix why reason text.
+ e.expr(k.deref(n, "dot of pointer"), n.X)
+ }
+ e.discard(n.Index)
+ case ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ e.discard(n.X)
+ e.discard(n.Index)
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
+ n := n.(*ir.SliceExpr)
+ e.expr(k.note(n, "slice"), n.X)
+ e.discard(n.Low)
+ e.discard(n.High)
+ e.discard(n.Max)
+
+ case ir.OCONV, ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if (ir.ShouldCheckPtr(e.curfn, 2) || ir.ShouldAsanCheckPtr(e.curfn)) && n.Type().IsUnsafePtr() && n.X.Type().IsPtr() {
+ // When -d=checkptr=2 or -asan is enabled,
+ // treat conversions to unsafe.Pointer as an
+ // escaping operation. This allows better
+ // runtime instrumentation, since we can more
+ // easily detect object boundaries on the heap
+ // than the stack.
+ e.assignHeap(n.X, "conversion to unsafe.Pointer", n)
+ } else if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() {
+ e.unsafeValue(k, n.X)
+ } else {
+ e.expr(k, n.X)
+ }
+ case ir.OCONVIFACE:
+ n := n.(*ir.ConvExpr)
+ if !n.X.Type().IsInterface() && !types.IsDirectIface(n.X.Type()) {
+ k = e.spill(k, n)
+ }
+ e.expr(k.note(n, "interface-converted"), n.X)
+ case ir.OMAKEFACE:
+ n := n.(*ir.BinaryExpr)
+ // Note: n.X is not needed because it can never point to memory that might escape.
+ e.expr(k, n.Y)
+ case ir.OITAB, ir.OIDATA, ir.OSPTR:
+ n := n.(*ir.UnaryExpr)
+ e.expr(k, n.X)
+ case ir.OSLICE2ARR:
+ // Converting a slice to array is effectively a deref.
+ n := n.(*ir.ConvExpr)
+ e.expr(k.deref(n, "slice-to-array"), n.X)
+ case ir.OSLICE2ARRPTR:
+ // the slice pointer flows directly to the result
+ n := n.(*ir.ConvExpr)
+ e.expr(k, n.X)
+ case ir.ORECV:
+ n := n.(*ir.UnaryExpr)
+ e.discard(n.X)
+
+ case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OINLCALL,
+ ir.OLEN, ir.OCAP, ir.OMIN, ir.OMAX, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.ORECOVERFP,
+ ir.OUNSAFEADD, ir.OUNSAFESLICE, ir.OUNSAFESTRING, ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA:
+ e.call([]hole{k}, n)
+
+ case ir.ONEW:
+ n := n.(*ir.UnaryExpr)
+ e.spill(k, n)
+
+ case ir.OMAKESLICE:
+ n := n.(*ir.MakeExpr)
+ e.spill(k, n)
+ e.discard(n.Len)
+ e.discard(n.Cap)
+ case ir.OMAKECHAN:
+ n := n.(*ir.MakeExpr)
+ e.discard(n.Len)
+ case ir.OMAKEMAP:
+ n := n.(*ir.MakeExpr)
+ e.spill(k, n)
+ e.discard(n.Len)
+
+ case ir.OMETHVALUE:
+ // Flow the receiver argument to both the closure and
+ // to the receiver parameter.
+
+ n := n.(*ir.SelectorExpr)
+ closureK := e.spill(k, n)
+
+ m := n.Selection
+
+ // We don't know how the method value will be called
+ // later, so conservatively assume the result
+ // parameters all flow to the heap.
+ //
+ // TODO(mdempsky): Change ks into a callback, so that
+ // we don't have to create this slice?
+ var ks []hole
+ for i := m.Type.NumResults(); i > 0; i-- {
+ ks = append(ks, e.heapHole())
+ }
+ name, _ := m.Nname.(*ir.Name)
+ paramK := e.tagHole(ks, name, m.Type.Recv())
+
+ e.expr(e.teeHole(paramK, closureK), n.X)
+
+ case ir.OPTRLIT:
+ n := n.(*ir.AddrExpr)
+ e.expr(e.spill(k, n), n.X)
+
+ case ir.OARRAYLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, elt := range n.List {
+ if elt.Op() == ir.OKEY {
+ elt = elt.(*ir.KeyExpr).Value
+ }
+ e.expr(k.note(n, "array literal element"), elt)
+ }
+
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ k = e.spill(k, n)
+
+ for _, elt := range n.List {
+ if elt.Op() == ir.OKEY {
+ elt = elt.(*ir.KeyExpr).Value
+ }
+ e.expr(k.note(n, "slice-literal-element"), elt)
+ }
+
+ case ir.OSTRUCTLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, elt := range n.List {
+ e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Value)
+ }
+
+ case ir.OMAPLIT:
+ n := n.(*ir.CompLitExpr)
+ e.spill(k, n)
+
+ // Map keys and values are always stored in the heap.
+ for _, elt := range n.List {
+ elt := elt.(*ir.KeyExpr)
+ e.assignHeap(elt.Key, "map literal key", n)
+ e.assignHeap(elt.Value, "map literal value", n)
+ }
+
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ k = e.spill(k, n)
+ e.closures = append(e.closures, closure{k, n})
+
+ if fn := n.Func; fn.IsHiddenClosure() {
+ for _, cv := range fn.ClosureVars {
+ if loc := e.oldLoc(cv); !loc.captured {
+ loc.captured = true
+
+ // Ignore reassignments to the variable in straightline code
+ // preceding the first capture by a closure.
+ if loc.loopDepth == e.loopDepth {
+ loc.reassigned = false
+ }
+ }
+ }
+
+ for _, n := range fn.Dcl {
+ // Add locations for local variables of the
+ // closure, if needed, in case we're not including
+ // the closure func in the batch for escape
+ // analysis (happens for escape analysis called
+ // from reflectdata.methodWrapper)
+ if n.Op() == ir.ONAME && n.Opt == nil {
+ e.with(fn).newLoc(n, true)
+ }
+ }
+ e.walkFunc(fn)
+ }
+
+ case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
+ n := n.(*ir.ConvExpr)
+ e.spill(k, n)
+ e.discard(n.X)
+
+ case ir.OADDSTR:
+ n := n.(*ir.AddStringExpr)
+ e.spill(k, n)
+
+ // Arguments of OADDSTR never escape;
+ // runtime.concatstrings makes sure of that.
+ e.discards(n.List)
+
+ case ir.ODYNAMICTYPE:
+ // Nothing to do - argument is a *runtime._type (+ maybe a *runtime.itab) pointing to static data section
+ }
+}
+
+// unsafeValue evaluates a uintptr-typed arithmetic expression looking
+// for conversions from an unsafe.Pointer.
+func (e *escape) unsafeValue(k hole, n ir.Node) {
+ if n.Type().Kind() != types.TUINTPTR {
+ base.Fatalf("unexpected type %v for %v", n.Type(), n)
+ }
+ if k.addrtaken {
+ base.Fatalf("unexpected addrtaken")
+ }
+
+ e.stmts(n.Init())
+
+ switch n.Op() {
+ case ir.OCONV, ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if n.X.Type().IsUnsafePtr() {
+ e.expr(k, n.X)
+ } else {
+ e.discard(n.X)
+ }
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ if ir.IsReflectHeaderDataField(n) {
+ e.expr(k.deref(n, "reflect.Header.Data"), n.X)
+ } else {
+ e.discard(n.X)
+ }
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT:
+ n := n.(*ir.UnaryExpr)
+ e.unsafeValue(k, n.X)
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
+ n := n.(*ir.BinaryExpr)
+ e.unsafeValue(k, n.X)
+ e.unsafeValue(k, n.Y)
+ case ir.OLSH, ir.ORSH:
+ n := n.(*ir.BinaryExpr)
+ e.unsafeValue(k, n.X)
+ // RHS need not be uintptr-typed (#32959) and can't meaningfully
+ // flow pointers anyway.
+ e.discard(n.Y)
+ default:
+ e.exprSkipInit(e.discardHole(), n)
+ }
+}
+
+// discard evaluates an expression n for side-effects, but discards
+// its value.
+func (e *escape) discard(n ir.Node) {
+ e.expr(e.discardHole(), n)
+}
+
+func (e *escape) discards(l ir.Nodes) {
+ for _, n := range l {
+ e.discard(n)
+ }
+}
+
+// spill allocates a new location associated with expression n, flows
+// its address to k, and returns a hole that flows values to it. It's
+// intended for use with most expressions that allocate storage.
+func (e *escape) spill(k hole, n ir.Node) hole {
+ loc := e.newLoc(n, false)
+ e.flow(k.addr(n, "spill"), loc)
+ return loc.asHole()
+}
diff --git a/src/cmd/compile/internal/escape/graph.go b/src/cmd/compile/internal/escape/graph.go
new file mode 100644
index 0000000..75e2546
--- /dev/null
+++ b/src/cmd/compile/internal/escape/graph.go
@@ -0,0 +1,376 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/types"
+ "fmt"
+)
+
+// Below we implement the methods for walking the AST and recording
+// data flow edges. Note that because a sub-expression might have
+// side-effects, it's important to always visit the entire AST.
+//
+// For example, write either:
+//
+// if x {
+// e.discard(n.Left)
+// } else {
+// e.value(k, n.Left)
+// }
+//
+// or
+//
+// if x {
+// k = e.discardHole()
+// }
+// e.value(k, n.Left)
+//
+// Do NOT write:
+//
+// // BAD: possibly loses side-effects within n.Left
+// if !x {
+// e.value(k, n.Left)
+// }
+
+// A location represents an abstract location that stores a Go
+// variable.
+type location struct {
+ n ir.Node // represented variable or expression, if any
+ curfn *ir.Func // enclosing function
+ edges []edge // incoming edges
+ loopDepth int // loopDepth at declaration
+
+ // resultIndex records the tuple index (starting at 1) for
+ // PPARAMOUT variables within their function's result type.
+ // For non-PPARAMOUT variables it's 0.
+ resultIndex int
+
+ // derefs and walkgen are used during walkOne to track the
+ // minimal dereferences from the walk root.
+ derefs int // >= -1
+ walkgen uint32
+
+ // dst and dstEdgeindex track the next immediate assignment
+ // destination location during walkone, along with the index
+ // of the edge pointing back to this location.
+ dst *location
+ dstEdgeIdx int
+
+ // queued is used by walkAll to track whether this location is
+ // in the walk queue.
+ queued bool
+
+ // attrs is a bitset of location attributes.
+ attrs locAttr
+
+ // paramEsc records the represented parameter's leak set.
+ paramEsc leaks
+
+ captured bool // has a closure captured this variable?
+ reassigned bool // has this variable been reassigned?
+ addrtaken bool // has this variable's address been taken?
+}
+
+type locAttr uint8
+
+const (
+ // attrEscapes indicates whether the represented variable's address
+ // escapes; that is, whether the variable must be heap allocated.
+ attrEscapes locAttr = 1 << iota
+
+ // attrPersists indicates whether the represented expression's
+ // address outlives the statement; that is, whether its storage
+ // cannot be immediately reused.
+ attrPersists
+
+ // attrMutates indicates whether pointers that are reachable from
+ // this location may have their addressed memory mutated. This is
+ // used to detect string->[]byte conversions that can be safely
+ // optimized away.
+ attrMutates
+
+ // attrCalls indicates whether closures that are reachable from this
+ // location may be called without tracking their results. This is
+ // used to better optimize indirect closure calls.
+ attrCalls
+)
+
+func (l *location) hasAttr(attr locAttr) bool { return l.attrs&attr != 0 }
+
+// An edge represents an assignment edge between two Go variables.
+type edge struct {
+ src *location
+ derefs int // >= -1
+ notes *note
+}
+
+func (l *location) asHole() hole {
+ return hole{dst: l}
+}
+
+// leak records that parameter l leaks to sink.
+func (l *location) leakTo(sink *location, derefs int) {
+ // If sink is a result parameter that doesn't escape (#44614)
+ // and we can fit return bits into the escape analysis tag,
+ // then record as a result leak.
+ if !sink.hasAttr(attrEscapes) && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
+ ri := sink.resultIndex - 1
+ if ri < numEscResults {
+ // Leak to result parameter.
+ l.paramEsc.AddResult(ri, derefs)
+ return
+ }
+ }
+
+ // Otherwise, record as heap leak.
+ l.paramEsc.AddHeap(derefs)
+}
+
+// leakTo records that parameter l leaks to sink.
+func (b *batch) leakTo(l, sink *location, derefs int) {
+ if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.hasAttr(attrEscapes) {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(sink), derefs)
+ }
+ explanation := b.explainPath(sink, l)
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
+ fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(sink), derefs), explanation)
+ }
+ }
+
+ // If sink is a result parameter that doesn't escape (#44614)
+ // and we can fit return bits into the escape analysis tag,
+ // then record as a result leak.
+ if !sink.hasAttr(attrEscapes) && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
+ if ri := sink.resultIndex - 1; ri < numEscResults {
+ // Leak to result parameter.
+ l.paramEsc.AddResult(ri, derefs)
+ return
+ }
+ }
+
+ // Otherwise, record as heap leak.
+ l.paramEsc.AddHeap(derefs)
+}
+
+func (l *location) isName(c ir.Class) bool {
+ return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class == c
+}
+
+// A hole represents a context for evaluation of a Go
+// expression. E.g., when evaluating p in "x = **p", we'd have a hole
+// with dst==x and derefs==2.
+type hole struct {
+ dst *location
+ derefs int // >= -1
+ notes *note
+
+ // addrtaken indicates whether this context is taking the address of
+ // the expression, independent of whether the address will actually
+ // be stored into a variable.
+ addrtaken bool
+}
+
+type note struct {
+ next *note
+ where ir.Node
+ why string
+}
+
+func (k hole) note(where ir.Node, why string) hole {
+ if where == nil || why == "" {
+ base.Fatalf("note: missing where/why")
+ }
+ if base.Flag.LowerM >= 2 || logopt.Enabled() {
+ k.notes = &note{
+ next: k.notes,
+ where: where,
+ why: why,
+ }
+ }
+ return k
+}
+
+func (k hole) shift(delta int) hole {
+ k.derefs += delta
+ if k.derefs < -1 {
+ base.Fatalf("derefs underflow: %v", k.derefs)
+ }
+ k.addrtaken = delta < 0
+ return k
+}
+
+func (k hole) deref(where ir.Node, why string) hole { return k.shift(1).note(where, why) }
+func (k hole) addr(where ir.Node, why string) hole { return k.shift(-1).note(where, why) }
+
+func (k hole) dotType(t *types.Type, where ir.Node, why string) hole {
+ if !t.IsInterface() && !types.IsDirectIface(t) {
+ k = k.shift(1)
+ }
+ return k.note(where, why)
+}
+
+func (b *batch) flow(k hole, src *location) {
+ if k.addrtaken {
+ src.addrtaken = true
+ }
+
+ dst := k.dst
+ if dst == &b.blankLoc {
+ return
+ }
+ if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
+ return
+ }
+ if dst.hasAttr(attrEscapes) && k.derefs < 0 { // dst = &src
+ if base.Flag.LowerM >= 2 || logopt.Enabled() {
+ pos := base.FmtPos(src.n.Pos())
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
+ }
+ explanation := b.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
+ }
+
+ }
+ src.attrs |= attrEscapes | attrPersists | attrMutates | attrCalls
+ return
+ }
+
+ // TODO(mdempsky): Deduplicate edges?
+ dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes})
+}
+
+func (b *batch) heapHole() hole { return b.heapLoc.asHole() }
+func (b *batch) mutatorHole() hole { return b.mutatorLoc.asHole() }
+func (b *batch) calleeHole() hole { return b.calleeLoc.asHole() }
+func (b *batch) discardHole() hole { return b.blankLoc.asHole() }
+
+func (b *batch) oldLoc(n *ir.Name) *location {
+ if n.Canonical().Opt == nil {
+ base.FatalfAt(n.Pos(), "%v has no location", n)
+ }
+ return n.Canonical().Opt.(*location)
+}
+
+func (e *escape) newLoc(n ir.Node, persists bool) *location {
+ if e.curfn == nil {
+ base.Fatalf("e.curfn isn't set")
+ }
+ if n != nil && n.Type() != nil && n.Type().NotInHeap() {
+ base.ErrorfAt(n.Pos(), 0, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type())
+ }
+
+ if n != nil && n.Op() == ir.ONAME {
+ if canon := n.(*ir.Name).Canonical(); n != canon {
+ base.FatalfAt(n.Pos(), "newLoc on non-canonical %v (canonical is %v)", n, canon)
+ }
+ }
+ loc := &location{
+ n: n,
+ curfn: e.curfn,
+ loopDepth: e.loopDepth,
+ }
+ if persists {
+ loc.attrs |= attrPersists
+ }
+ e.allLocs = append(e.allLocs, loc)
+ if n != nil {
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ if n.Class == ir.PPARAM && n.Curfn == nil {
+ // ok; hidden parameter
+ } else if n.Curfn != e.curfn {
+ base.FatalfAt(n.Pos(), "curfn mismatch: %v != %v for %v", n.Curfn, e.curfn, n)
+ }
+
+ if n.Opt != nil {
+ base.FatalfAt(n.Pos(), "%v already has a location", n)
+ }
+ n.Opt = loc
+ }
+ }
+ return loc
+}
+
+// teeHole returns a new hole that flows into each hole of ks,
+// similar to the Unix tee(1) command.
+func (e *escape) teeHole(ks ...hole) hole {
+ if len(ks) == 0 {
+ return e.discardHole()
+ }
+ if len(ks) == 1 {
+ return ks[0]
+ }
+ // TODO(mdempsky): Optimize if there's only one non-discard hole?
+
+ // Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
+ // new temporary location ltmp, wire it into place, and return
+ // a hole for "ltmp = _".
+ loc := e.newLoc(nil, false)
+ for _, k := range ks {
+ // N.B., "p = &q" and "p = &tmp; tmp = q" are not
+ // semantically equivalent. To combine holes like "l1
+ // = _" and "l2 = &_", we'd need to wire them as "l1 =
+ // *ltmp" and "l2 = ltmp" and return "ltmp = &_"
+ // instead.
+ if k.derefs < 0 {
+ base.Fatalf("teeHole: negative derefs")
+ }
+
+ e.flow(k, loc)
+ }
+ return loc.asHole()
+}
+
+// later returns a new hole that flows into k, but some time later.
+// Its main effect is to prevent immediate reuse of temporary
+// variables introduced during Order.
+func (e *escape) later(k hole) hole {
+ loc := e.newLoc(nil, true)
+ e.flow(k, loc)
+ return loc.asHole()
+}
+
+// Fmt is called from node printing to print information about escape analysis results.
+func Fmt(n ir.Node) string {
+ text := ""
+ switch n.Esc() {
+ case ir.EscUnknown:
+ break
+
+ case ir.EscHeap:
+ text = "esc(h)"
+
+ case ir.EscNone:
+ text = "esc(no)"
+
+ case ir.EscNever:
+ text = "esc(N)"
+
+ default:
+ text = fmt.Sprintf("esc(%d)", n.Esc())
+ }
+
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ if loc, ok := n.Opt.(*location); ok && loc.loopDepth != 0 {
+ if text != "" {
+ text += " "
+ }
+ text += fmt.Sprintf("ld(%d)", loc.loopDepth)
+ }
+ }
+
+ return text
+}
diff --git a/src/cmd/compile/internal/escape/leaks.go b/src/cmd/compile/internal/escape/leaks.go
new file mode 100644
index 0000000..942f87d
--- /dev/null
+++ b/src/cmd/compile/internal/escape/leaks.go
@@ -0,0 +1,126 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "math"
+ "strings"
+)
+
+// A leaks represents a set of assignment flows from a parameter to
+// the heap, mutator, callee, or to any of its function's (first
+// numEscResults) result parameters.
+type leaks [8]uint8
+
+const (
+ leakHeap = iota
+ leakMutator
+ leakCallee
+ leakResult0
+)
+
+const numEscResults = len(leaks{}) - leakResult0
+
+// Heap returns the minimum deref count of any assignment flow from l
+// to the heap. If no such flows exist, Heap returns -1.
+func (l leaks) Heap() int { return l.get(leakHeap) }
+
+// Mutator returns the minimum deref count of any assignment flow from
+// l to the pointer operand of an indirect assignment statement. If no
+// such flows exist, Mutator returns -1.
+func (l leaks) Mutator() int { return l.get(leakMutator) }
+
+// Callee returns the minimum deref count of any assignment flow from
+// l to the callee operand of call expression. If no such flows exist,
+// Callee returns -1.
+func (l leaks) Callee() int { return l.get(leakCallee) }
+
+// Result returns the minimum deref count of any assignment flow from
+// l to its function's i'th result parameter. If no such flows exist,
+// Result returns -1.
+func (l leaks) Result(i int) int { return l.get(leakResult0 + i) }
+
+// AddHeap adds an assignment flow from l to the heap.
+func (l *leaks) AddHeap(derefs int) { l.add(leakHeap, derefs) }
+
+// AddMutator adds a flow from l to the mutator (i.e., a pointer
+// operand of an indirect assignment statement).
+func (l *leaks) AddMutator(derefs int) { l.add(leakMutator, derefs) }
+
+// AddCallee adds an assignment flow from l to the callee operand of a
+// call expression.
+func (l *leaks) AddCallee(derefs int) { l.add(leakCallee, derefs) }
+
+// AddResult adds an assignment flow from l to its function's i'th
+// result parameter.
+func (l *leaks) AddResult(i, derefs int) { l.add(leakResult0+i, derefs) }
+
+func (l leaks) get(i int) int { return int(l[i]) - 1 }
+
+func (l *leaks) add(i, derefs int) {
+ if old := l.get(i); old < 0 || derefs < old {
+ l.set(i, derefs)
+ }
+}
+
+func (l *leaks) set(i, derefs int) {
+ v := derefs + 1
+ if v < 0 {
+ base.Fatalf("invalid derefs count: %v", derefs)
+ }
+ if v > math.MaxUint8 {
+ v = math.MaxUint8
+ }
+
+ l[i] = uint8(v)
+}
+
+// Optimize removes result flow paths that are equal in length or
+// longer than the shortest heap flow path.
+func (l *leaks) Optimize() {
+ // If we have a path to the heap, then there's no use in
+ // keeping equal or longer paths elsewhere.
+ if x := l.Heap(); x >= 0 {
+ for i := 1; i < len(*l); i++ {
+ if l.get(i) >= x {
+ l.set(i, -1)
+ }
+ }
+ }
+}
+
+var leakTagCache = map[leaks]string{}
+
+// Encode converts l into a binary string for export data.
+func (l leaks) Encode() string {
+ if l.Heap() == 0 {
+ // Space optimization: empty string encodes more
+ // efficiently in export data.
+ return ""
+ }
+ if s, ok := leakTagCache[l]; ok {
+ return s
+ }
+
+ n := len(l)
+ for n > 0 && l[n-1] == 0 {
+ n--
+ }
+ s := "esc:" + string(l[:n])
+ leakTagCache[l] = s
+ return s
+}
+
+// parseLeaks parses a binary string representing a leaks.
+func parseLeaks(s string) leaks {
+ var l leaks
+ if !strings.HasPrefix(s, "esc:") {
+ l.AddHeap(0)
+ return l
+ }
+ copy(l[:], s[4:])
+ return l
+}
diff --git a/src/cmd/compile/internal/escape/solve.go b/src/cmd/compile/internal/escape/solve.go
new file mode 100644
index 0000000..2675a16
--- /dev/null
+++ b/src/cmd/compile/internal/escape/solve.go
@@ -0,0 +1,326 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/internal/src"
+ "fmt"
+ "strings"
+)
+
+// walkAll computes the minimal dereferences between all pairs of
+// locations.
+func (b *batch) walkAll() {
+ // We use a work queue to keep track of locations that we need
+ // to visit, and repeatedly walk until we reach a fixed point.
+ //
+ // We walk once from each location (including the heap), and
+ // then re-enqueue each location on its transition from
+ // !persists->persists and !escapes->escapes, which can each
+ // happen at most once. So we take Θ(len(e.allLocs)) walks.
+
+ // LIFO queue, has enough room for e.allLocs and e.heapLoc.
+ todo := make([]*location, 0, len(b.allLocs)+1)
+ enqueue := func(loc *location) {
+ if !loc.queued {
+ todo = append(todo, loc)
+ loc.queued = true
+ }
+ }
+
+ for _, loc := range b.allLocs {
+ enqueue(loc)
+ }
+ enqueue(&b.mutatorLoc)
+ enqueue(&b.calleeLoc)
+ enqueue(&b.heapLoc)
+
+ var walkgen uint32
+ for len(todo) > 0 {
+ root := todo[len(todo)-1]
+ todo = todo[:len(todo)-1]
+ root.queued = false
+
+ walkgen++
+ b.walkOne(root, walkgen, enqueue)
+ }
+}
+
+// walkOne computes the minimal number of dereferences from root to
+// all other locations.
+func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) {
+ // The data flow graph has negative edges (from addressing
+ // operations), so we use the Bellman-Ford algorithm. However,
+ // we don't have to worry about infinite negative cycles since
+ // we bound intermediate dereference counts to 0.
+
+ root.walkgen = walkgen
+ root.derefs = 0
+ root.dst = nil
+
+ if root.hasAttr(attrCalls) {
+ if clo, ok := root.n.(*ir.ClosureExpr); ok {
+ if fn := clo.Func; b.inMutualBatch(fn.Nname) && !fn.ClosureResultsLost() {
+ fn.SetClosureResultsLost(true)
+
+ // Re-flow from the closure's results, now that we're aware
+ // we lost track of them.
+ for _, result := range fn.Type().Results() {
+ enqueue(b.oldLoc(result.Nname.(*ir.Name)))
+ }
+ }
+ }
+ }
+
+ todo := []*location{root} // LIFO queue
+ for len(todo) > 0 {
+ l := todo[len(todo)-1]
+ todo = todo[:len(todo)-1]
+
+ derefs := l.derefs
+ var newAttrs locAttr
+
+ // If l.derefs < 0, then l's address flows to root.
+ addressOf := derefs < 0
+ if addressOf {
+ // For a flow path like "root = &l; l = x",
+ // l's address flows to root, but x's does
+ // not. We recognize this by lower bounding
+ // derefs at 0.
+ derefs = 0
+
+ // If l's address flows somewhere that
+ // outlives it, then l needs to be heap
+ // allocated.
+ if b.outlives(root, l) {
+ if !l.hasAttr(attrEscapes) && (logopt.Enabled() || base.Flag.LowerM >= 2) {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
+ }
+ explanation := b.explainPath(root, l)
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
+ }
+ }
+ newAttrs |= attrEscapes | attrPersists | attrMutates | attrCalls
+ } else
+ // If l's address flows to a persistent location, then l needs
+ // to persist too.
+ if root.hasAttr(attrPersists) {
+ newAttrs |= attrPersists
+ }
+ }
+
+ if derefs == 0 {
+ newAttrs |= root.attrs & (attrMutates | attrCalls)
+ }
+
+ // l's value flows to root. If l is a function
+ // parameter and root is the heap or a
+ // corresponding result parameter, then record
+ // that value flow for tagging the function
+ // later.
+ if l.isName(ir.PPARAM) {
+ if b.outlives(root, l) {
+ if !l.hasAttr(attrEscapes) && (logopt.Enabled() || base.Flag.LowerM >= 2) {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs)
+ }
+ explanation := b.explainPath(root, l)
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
+ fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
+ }
+ }
+ l.leakTo(root, derefs)
+ }
+ if root.hasAttr(attrMutates) {
+ l.paramEsc.AddMutator(derefs)
+ }
+ if root.hasAttr(attrCalls) {
+ l.paramEsc.AddCallee(derefs)
+ }
+ }
+
+ if newAttrs&^l.attrs != 0 {
+ l.attrs |= newAttrs
+ enqueue(l)
+ if l.attrs&attrEscapes != 0 {
+ continue
+ }
+ }
+
+ for i, edge := range l.edges {
+ if edge.src.hasAttr(attrEscapes) {
+ continue
+ }
+ d := derefs + edge.derefs
+ if edge.src.walkgen != walkgen || edge.src.derefs > d {
+ edge.src.walkgen = walkgen
+ edge.src.derefs = d
+ edge.src.dst = l
+ edge.src.dstEdgeIdx = i
+ todo = append(todo, edge.src)
+ }
+ }
+ }
+}
+
+// explainPath prints an explanation of how src flows to the walk root.
+func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt {
+ visited := make(map[*location]bool)
+ pos := base.FmtPos(src.n.Pos())
+ var explanation []*logopt.LoggedOpt
+ for {
+ // Prevent infinite loop.
+ if visited[src] {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
+ }
+ break
+ }
+ visited[src] = true
+ dst := src.dst
+ edge := &dst.edges[src.dstEdgeIdx]
+ if edge.src != src {
+ base.Fatalf("path inconsistency: %v != %v", edge.src, src)
+ }
+
+ explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
+
+ if dst == root {
+ break
+ }
+ src = dst
+ }
+
+ return explanation
+}
+
+func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
+ ops := "&"
+ if derefs >= 0 {
+ ops = strings.Repeat("*", derefs)
+ }
+ print := base.Flag.LowerM >= 2
+
+ flow := fmt.Sprintf(" flow: %s = %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc))
+ if print {
+ fmt.Printf("%s:%s\n", pos, flow)
+ }
+ if logopt.Enabled() {
+ var epos src.XPos
+ if notes != nil {
+ epos = notes.where.Pos()
+ } else if srcloc != nil && srcloc.n != nil {
+ epos = srcloc.n.Pos()
+ }
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ explanation = append(explanation, logopt.NewLoggedOpt(epos, epos, "escflow", "escape", ir.FuncName(e_curfn), flow))
+ }
+
+ for note := notes; note != nil; note = note.next {
+ if print {
+ fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
+ }
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ notePos := note.where.Pos()
+ explanation = append(explanation, logopt.NewLoggedOpt(notePos, notePos, "escflow", "escape", ir.FuncName(e_curfn),
+ fmt.Sprintf(" from %v (%v)", note.where, note.why)))
+ }
+ }
+ return explanation
+}
+
+func (b *batch) explainLoc(l *location) string {
+ if l == &b.heapLoc {
+ return "{heap}"
+ }
+ if l.n == nil {
+ // TODO(mdempsky): Omit entirely.
+ return "{temp}"
+ }
+ if l.n.Op() == ir.ONAME {
+ return fmt.Sprintf("%v", l.n)
+ }
+ return fmt.Sprintf("{storage for %v}", l.n)
+}
+
+// outlives reports whether values stored in l may survive beyond
+// other's lifetime if stack allocated.
+func (b *batch) outlives(l, other *location) bool {
+ // The heap outlives everything.
+ if l.hasAttr(attrEscapes) {
+ return true
+ }
+
+ // Pseudo-locations that don't really exist.
+ if l == &b.mutatorLoc || l == &b.calleeLoc {
+ return false
+ }
+
+ // We don't know what callers do with returned values, so
+ // pessimistically we need to assume they flow to the heap and
+ // outlive everything too.
+ if l.isName(ir.PPARAMOUT) {
+ // Exception: Closures can return locations allocated outside of
+ // them without forcing them to the heap, if we can statically
+ // identify all call sites. For example:
+ //
+ // var u int // okay to stack allocate
+ // fn := func() *int { return &u }()
+ // *fn() = 42
+ if containsClosure(other.curfn, l.curfn) && !l.curfn.ClosureResultsLost() {
+ return false
+ }
+
+ return true
+ }
+
+ // If l and other are within the same function, then l
+ // outlives other if it was declared outside other's loop
+ // scope. For example:
+ //
+ // var l *int
+ // for {
+ // l = new(int) // must heap allocate: outlives for loop
+ // }
+ if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
+ return true
+ }
+
+ // If other is declared within a child closure of where l is
+ // declared, then l outlives it. For example:
+ //
+ // var l *int
+ // func() {
+ // l = new(int) // must heap allocate: outlives call frame (if not inlined)
+ // }()
+ if containsClosure(l.curfn, other.curfn) {
+ return true
+ }
+
+ return false
+}
+
+// containsClosure reports whether c is a closure contained within f.
+func containsClosure(f, c *ir.Func) bool {
+ // Common cases.
+ if f == c || c.OClosure == nil {
+ return false
+ }
+
+ // Closures within function Foo are named like "Foo.funcN..."
+ // TODO(mdempsky): Better way to recognize this.
+ fn := f.Sym().Name
+ cn := c.Sym().Name
+ return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
+}
diff --git a/src/cmd/compile/internal/escape/stmt.go b/src/cmd/compile/internal/escape/stmt.go
new file mode 100644
index 0000000..b766864
--- /dev/null
+++ b/src/cmd/compile/internal/escape/stmt.go
@@ -0,0 +1,218 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "fmt"
+)
+
+// stmt evaluates a single Go statement.
+func (e *escape) stmt(n ir.Node) {
+ if n == nil {
+ return
+ }
+
+ lno := ir.SetPos(n)
+ defer func() {
+ base.Pos = lno
+ }()
+
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, e.curfn, n)
+ }
+
+ e.stmts(n.Init())
+
+ switch n.Op() {
+ default:
+ base.Fatalf("unexpected stmt: %v", n)
+
+ case ir.OFALL, ir.OINLMARK:
+ // nop
+
+ case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
+ // TODO(mdempsky): Handle dead code?
+
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ e.stmts(n.List)
+
+ case ir.ODCL:
+ // Record loop depth at declaration.
+ n := n.(*ir.Decl)
+ if !ir.IsBlank(n.X) {
+ e.dcl(n.X)
+ }
+
+ case ir.OLABEL:
+ n := n.(*ir.LabelStmt)
+ if n.Label.IsBlank() {
+ break
+ }
+ switch e.labels[n.Label] {
+ case nonlooping:
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n)
+ }
+ case looping:
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n)
+ }
+ e.loopDepth++
+ default:
+ base.Fatalf("label %v missing tag", n.Label)
+ }
+ delete(e.labels, n.Label)
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ e.discard(n.Cond)
+ e.block(n.Body)
+ e.block(n.Else)
+
+ case ir.OCHECKNIL:
+ n := n.(*ir.UnaryExpr)
+ e.discard(n.X)
+
+ case ir.OFOR:
+ n := n.(*ir.ForStmt)
+ base.Assert(!n.DistinctVars) // Should all be rewritten before escape analysis
+ e.loopDepth++
+ e.discard(n.Cond)
+ e.stmt(n.Post)
+ e.block(n.Body)
+ e.loopDepth--
+
+ case ir.ORANGE:
+ // for Key, Value = range X { Body }
+ n := n.(*ir.RangeStmt)
+ base.Assert(!n.DistinctVars) // Should all be rewritten before escape analysis
+
+ // X is evaluated outside the loop and persists until the loop
+ // terminates.
+ tmp := e.newLoc(nil, true)
+ e.expr(tmp.asHole(), n.X)
+
+ e.loopDepth++
+ ks := e.addrs([]ir.Node{n.Key, n.Value})
+ if n.X.Type().IsArray() {
+ e.flow(ks[1].note(n, "range"), tmp)
+ } else {
+ e.flow(ks[1].deref(n, "range-deref"), tmp)
+ }
+ e.reassigned(ks, n)
+
+ e.block(n.Body)
+ e.loopDepth--
+
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+
+ if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok {
+ var ks []hole
+ if guard.Tag != nil {
+ for _, cas := range n.Cases {
+ cv := cas.Var
+ k := e.dcl(cv) // type switch variables have no ODCL.
+ if cv.Type().HasPointers() {
+ ks = append(ks, k.dotType(cv.Type(), cas, "switch case"))
+ }
+ }
+ }
+ e.expr(e.teeHole(ks...), n.Tag.(*ir.TypeSwitchGuard).X)
+ } else {
+ e.discard(n.Tag)
+ }
+
+ for _, cas := range n.Cases {
+ e.discards(cas.List)
+ e.block(cas.Body)
+ }
+
+ case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
+ for _, cas := range n.Cases {
+ e.stmt(cas.Comm)
+ e.block(cas.Body)
+ }
+ case ir.ORECV:
+ // TODO(mdempsky): Consider e.discard(n.Left).
+ n := n.(*ir.UnaryExpr)
+ e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ e.discard(n.Chan)
+ e.assignHeap(n.Value, "send", n)
+
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
+ case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
+ // TODO(mdempsky): Worry about OLSH/ORSH?
+ e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
+ case ir.OAS2:
+ n := n.(*ir.AssignListStmt)
+ e.assignList(n.Lhs, n.Rhs, "assign-pair", n)
+
+ case ir.OAS2DOTTYPE: // v, ok = x.(type)
+ n := n.(*ir.AssignListStmt)
+ e.assignList(n.Lhs, n.Rhs, "assign-pair-dot-type", n)
+ case ir.OAS2MAPR: // v, ok = m[k]
+ n := n.(*ir.AssignListStmt)
+ e.assignList(n.Lhs, n.Rhs, "assign-pair-mapr", n)
+ case ir.OAS2RECV, ir.OSELRECV2: // v, ok = <-ch
+ n := n.(*ir.AssignListStmt)
+ e.assignList(n.Lhs, n.Rhs, "assign-pair-receive", n)
+
+ case ir.OAS2FUNC:
+ n := n.(*ir.AssignListStmt)
+ e.stmts(n.Rhs[0].Init())
+ ks := e.addrs(n.Lhs)
+ e.call(ks, n.Rhs[0])
+ e.reassigned(ks, n)
+ case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
+ results := e.curfn.Type().Results()
+ dsts := make([]ir.Node, len(results))
+ for i, res := range results {
+ dsts[i] = res.Nname.(*ir.Name)
+ }
+ e.assignList(dsts, n.Results, "return", n)
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OINLCALL, ir.OCLEAR, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
+ e.call(nil, n)
+ case ir.OGO, ir.ODEFER:
+ n := n.(*ir.GoDeferStmt)
+ e.goDeferStmt(n)
+
+ case ir.OTAILCALL:
+ n := n.(*ir.TailCallStmt)
+ e.call(nil, n.Call)
+ }
+}
+
+func (e *escape) stmts(l ir.Nodes) {
+ for _, n := range l {
+ e.stmt(n)
+ }
+}
+
+// block is like stmts, but preserves loopDepth.
+func (e *escape) block(l ir.Nodes) {
+ old := e.loopDepth
+ e.stmts(l)
+ e.loopDepth = old
+}
+
+func (e *escape) dcl(n *ir.Name) hole {
+ if n.Curfn != e.curfn || n.IsClosureVar() {
+ base.Fatalf("bad declaration of %v", n)
+ }
+ loc := e.oldLoc(n)
+ loc.loopDepth = e.loopDepth
+ return loc.asHole()
+}
diff --git a/src/cmd/compile/internal/escape/utils.go b/src/cmd/compile/internal/escape/utils.go
new file mode 100644
index 0000000..bd1d2c2
--- /dev/null
+++ b/src/cmd/compile/internal/escape/utils.go
@@ -0,0 +1,222 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+func isSliceSelfAssign(dst, src ir.Node) bool {
+ // Detect the following special case.
+ //
+ // func (b *Buffer) Foo() {
+ // n, m := ...
+ // b.buf = b.buf[n:m]
+ // }
+ //
+ // This assignment is a no-op for escape analysis,
+ // it does not store any new pointers into b that were not already there.
+ // However, without this special case b will escape, because we assign to OIND/ODOTPTR.
+ // Here we assume that the statement will not contain calls,
+ // that is, that order will move any calls to init.
+ // Otherwise base ONAME value could change between the moments
+ // when we evaluate it for dst and for src.
+
+ // dst is ONAME dereference.
+ var dstX ir.Node
+ switch dst.Op() {
+ default:
+ return false
+ case ir.ODEREF:
+ dst := dst.(*ir.StarExpr)
+ dstX = dst.X
+ case ir.ODOTPTR:
+ dst := dst.(*ir.SelectorExpr)
+ dstX = dst.X
+ }
+ if dstX.Op() != ir.ONAME {
+ return false
+ }
+ // src is a slice operation.
+ switch src.Op() {
+ case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR:
+ // OK.
+ case ir.OSLICEARR, ir.OSLICE3ARR:
+ // Since arrays are embedded into containing object,
+ // slice of non-pointer array will introduce a new pointer into b that was not already there
+ // (pointer to b itself). After such assignment, if b contents escape,
+ // b escapes as well. If we ignore such OSLICEARR, we will conclude
+ // that b does not escape when b contents do.
+ //
+ // Pointer to an array is OK since it's not stored inside b directly.
+ // For slicing an array (not pointer to array), there is an implicit OADDR.
+ // We check that to determine non-pointer array slicing.
+ src := src.(*ir.SliceExpr)
+ if src.X.Op() == ir.OADDR {
+ return false
+ }
+ default:
+ return false
+ }
+ // slice is applied to ONAME dereference.
+ var baseX ir.Node
+ switch base := src.(*ir.SliceExpr).X; base.Op() {
+ default:
+ return false
+ case ir.ODEREF:
+ base := base.(*ir.StarExpr)
+ baseX = base.X
+ case ir.ODOTPTR:
+ base := base.(*ir.SelectorExpr)
+ baseX = base.X
+ }
+ if baseX.Op() != ir.ONAME {
+ return false
+ }
+ // dst and src reference the same base ONAME.
+ return dstX.(*ir.Name) == baseX.(*ir.Name)
+}
+
+// isSelfAssign reports whether assignment from src to dst can
+// be ignored by the escape analysis as it's effectively a self-assignment.
+func isSelfAssign(dst, src ir.Node) bool {
+ if isSliceSelfAssign(dst, src) {
+ return true
+ }
+
+ // Detect trivial assignments that assign back to the same object.
+ //
+ // It covers these cases:
+ // val.x = val.y
+ // val.x[i] = val.y[j]
+ // val.x1.x2 = val.x1.y2
+ // ... etc
+ //
+ // These assignments do not change assigned object lifetime.
+
+ if dst == nil || src == nil || dst.Op() != src.Op() {
+ return false
+ }
+
+ // The expression prefix must be both "safe" and identical.
+ switch dst.Op() {
+ case ir.ODOT, ir.ODOTPTR:
+ // Safe trailing accessors that are permitted to differ.
+ dst := dst.(*ir.SelectorExpr)
+ src := src.(*ir.SelectorExpr)
+ return ir.SameSafeExpr(dst.X, src.X)
+ case ir.OINDEX:
+ dst := dst.(*ir.IndexExpr)
+ src := src.(*ir.IndexExpr)
+ if mayAffectMemory(dst.Index) || mayAffectMemory(src.Index) {
+ return false
+ }
+ return ir.SameSafeExpr(dst.X, src.X)
+ default:
+ return false
+ }
+}
+
+// mayAffectMemory reports whether evaluation of n may affect the program's
+// memory state. If the expression can't affect memory state, then it can be
+// safely ignored by the escape analysis.
+func mayAffectMemory(n ir.Node) bool {
+ // We may want to use a list of "memory safe" ops instead of generally
+ // "side-effect free", which would include all calls and other ops that can
+ // allocate or change global state. For now, it's safer to start with the latter.
+ //
+ // We're ignoring things like division by zero, index out of range,
+ // and nil pointer dereference here.
+
+ // TODO(rsc): It seems like it should be possible to replace this with
+ // an ir.Any looking for any op that's not the ones in the case statement.
+ // But that produces changes in the compiled output detected by buildall.
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
+ return false
+
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
+ n := n.(*ir.BinaryExpr)
+ return mayAffectMemory(n.X) || mayAffectMemory(n.Y)
+
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ return mayAffectMemory(n.X) || mayAffectMemory(n.Index)
+
+ case ir.OCONVNOP, ir.OCONV:
+ n := n.(*ir.ConvExpr)
+ return mayAffectMemory(n.X)
+
+ case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG:
+ n := n.(*ir.UnaryExpr)
+ return mayAffectMemory(n.X)
+
+ case ir.ODOT, ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ return mayAffectMemory(n.X)
+
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ return mayAffectMemory(n.X)
+
+ default:
+ return true
+ }
+}
+
+// HeapAllocReason returns the reason the given Node must be heap
+// allocated, or the empty string if it doesn't.
+func HeapAllocReason(n ir.Node) string {
+ if n == nil || n.Type() == nil {
+ return ""
+ }
+
+ // Parameters are always passed via the stack.
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
+ return ""
+ }
+ }
+
+ if n.Type().Size() > ir.MaxStackVarSize {
+ return "too large for stack"
+ }
+ if n.Type().Alignment() > int64(types.PtrSize) {
+ return "too aligned for stack"
+ }
+
+ if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Size() > ir.MaxImplicitStackVarSize {
+ return "too large for stack"
+ }
+ if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Alignment() > int64(types.PtrSize) {
+ return "too aligned for stack"
+ }
+
+ if n.Op() == ir.OCLOSURE && typecheck.ClosureType(n.(*ir.ClosureExpr)).Size() > ir.MaxImplicitStackVarSize {
+ return "too large for stack"
+ }
+ if n.Op() == ir.OMETHVALUE && typecheck.MethodValueType(n.(*ir.SelectorExpr)).Size() > ir.MaxImplicitStackVarSize {
+ return "too large for stack"
+ }
+
+ if n.Op() == ir.OMAKESLICE {
+ n := n.(*ir.MakeExpr)
+ r := n.Cap
+ if r == nil {
+ r = n.Len
+ }
+ if !ir.IsSmallIntConst(r) {
+ return "non-constant size"
+ }
+ if t := n.Type(); t.Elem().Size() != 0 && ir.Int64Val(r) > ir.MaxImplicitStackVarSize/t.Elem().Size() {
+ return "too large for stack"
+ }
+ }
+
+ return ""
+}
diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go
new file mode 100644
index 0000000..0f57f8c
--- /dev/null
+++ b/src/cmd/compile/internal/gc/compile.go
@@ -0,0 +1,198 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "internal/race"
+ "math/rand"
+ "sort"
+ "sync"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/liveness"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/staticinit"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/walk"
+ "cmd/internal/obj"
+)
+
+// "Portable" code generation.
+
+var (
+ compilequeue []*ir.Func // functions waiting to be compiled
+)
+
+func enqueueFunc(fn *ir.Func) {
+ if ir.CurFunc != nil {
+ base.FatalfAt(fn.Pos(), "enqueueFunc %v inside %v", fn, ir.CurFunc)
+ }
+
+ if ir.FuncName(fn) == "_" {
+ // Skip compiling blank functions.
+ // Frontend already reported any spec-mandated errors (#29870).
+ return
+ }
+
+ // Don't try compiling dead hidden closure.
+ if fn.IsDeadcodeClosure() {
+ return
+ }
+
+ if clo := fn.OClosure; clo != nil && !ir.IsTrivialClosure(clo) {
+ return // we'll get this as part of its enclosing function
+ }
+
+ if ssagen.CreateWasmImportWrapper(fn) {
+ return
+ }
+
+ if len(fn.Body) == 0 {
+ // Initialize ABI wrappers if necessary.
+ ir.InitLSym(fn, false)
+ types.CalcSize(fn.Type())
+ a := ssagen.AbiForBodylessFuncStackMap(fn)
+ abiInfo := a.ABIAnalyzeFuncType(fn.Type()) // abiInfo has spill/home locations for wrapper
+ liveness.WriteFuncMap(fn, abiInfo)
+ if fn.ABI == obj.ABI0 {
+ x := ssagen.EmitArgInfo(fn, abiInfo)
+ objw.Global(x, int32(len(x.P)), obj.RODATA|obj.LOCAL)
+ }
+ return
+ }
+
+ errorsBefore := base.Errors()
+
+ todo := []*ir.Func{fn}
+ for len(todo) > 0 {
+ next := todo[len(todo)-1]
+ todo = todo[:len(todo)-1]
+
+ prepareFunc(next)
+ todo = append(todo, next.Closures...)
+ }
+
+ if base.Errors() > errorsBefore {
+ return
+ }
+
+ // Enqueue just fn itself. compileFunctions will handle
+ // scheduling compilation of its closures after it's done.
+ compilequeue = append(compilequeue, fn)
+}
+
+// prepareFunc handles any remaining frontend compilation tasks that
+// aren't yet safe to perform concurrently.
+func prepareFunc(fn *ir.Func) {
+ // Set up the function's LSym early to avoid data races with the assemblers.
+ // Do this before walk, as walk needs the LSym to set attributes/relocations
+ // (e.g. in MarkTypeUsedInInterface).
+ ir.InitLSym(fn, true)
+
+ // If this function is a compiler-generated outlined global map
+ // initializer function, register its LSym for later processing.
+ if staticinit.MapInitToVar != nil {
+ if _, ok := staticinit.MapInitToVar[fn]; ok {
+ ssagen.RegisterMapInitLsym(fn.Linksym())
+ }
+ }
+
+ // Calculate parameter offsets.
+ types.CalcSize(fn.Type())
+
+ ir.CurFunc = fn
+ walk.Walk(fn)
+ ir.CurFunc = nil // enforce no further uses of CurFunc
+}
+
+// compileFunctions compiles all functions in compilequeue.
+// It fans out nBackendWorkers to do the work
+// and waits for them to complete.
+func compileFunctions() {
+ if race.Enabled {
+ // Randomize compilation order to try to shake out races.
+ tmp := make([]*ir.Func, len(compilequeue))
+ perm := rand.Perm(len(compilequeue))
+ for i, v := range perm {
+ tmp[v] = compilequeue[i]
+ }
+ copy(compilequeue, tmp)
+ } else {
+ // Compile the longest functions first,
+ // since they're most likely to be the slowest.
+ // This helps avoid stragglers.
+ sort.Slice(compilequeue, func(i, j int) bool {
+ return len(compilequeue[i].Body) > len(compilequeue[j].Body)
+ })
+ }
+
+ // By default, we perform work right away on the current goroutine
+ // as the solo worker.
+ queue := func(work func(int)) {
+ work(0)
+ }
+
+ if nWorkers := base.Flag.LowerC; nWorkers > 1 {
+ // For concurrent builds, we allow the work queue
+ // to grow arbitrarily large, but only nWorkers work items
+ // can be running concurrently.
+ workq := make(chan func(int))
+ done := make(chan int)
+ go func() {
+ ids := make([]int, nWorkers)
+ for i := range ids {
+ ids[i] = i
+ }
+ var pending []func(int)
+ for {
+ select {
+ case work := <-workq:
+ pending = append(pending, work)
+ case id := <-done:
+ ids = append(ids, id)
+ }
+ for len(pending) > 0 && len(ids) > 0 {
+ work := pending[len(pending)-1]
+ id := ids[len(ids)-1]
+ pending = pending[:len(pending)-1]
+ ids = ids[:len(ids)-1]
+ go func() {
+ work(id)
+ done <- id
+ }()
+ }
+ }
+ }()
+ queue = func(work func(int)) {
+ workq <- work
+ }
+ }
+
+ var wg sync.WaitGroup
+ var compile func([]*ir.Func)
+ compile = func(fns []*ir.Func) {
+ wg.Add(len(fns))
+ for _, fn := range fns {
+ fn := fn
+ queue(func(worker int) {
+ ssagen.Compile(fn, worker)
+ compile(fn.Closures)
+ wg.Done()
+ })
+ }
+ }
+
+ types.CalcSizeDisabled = true // not safe to calculate sizes concurrently
+ base.Ctxt.InParallel = true
+
+ compile(compilequeue)
+ compilequeue = nil
+ wg.Wait()
+
+ base.Ctxt.InParallel = false
+ types.CalcSizeDisabled = false
+}
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
new file mode 100644
index 0000000..c93f008
--- /dev/null
+++ b/src/cmd/compile/internal/gc/export.go
@@ -0,0 +1,51 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "fmt"
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/bio"
+)
+
+func dumpasmhdr() {
+ b, err := bio.Create(base.Flag.AsmHdr)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name)
+ for _, n := range typecheck.Target.AsmHdrDecls {
+ if n.Sym().IsBlank() {
+ continue
+ }
+ switch n.Op() {
+ case ir.OLITERAL:
+ t := n.Val().Kind()
+ if t == constant.Float || t == constant.Complex {
+ break
+ }
+ fmt.Fprintf(b, "#define const_%s %v\n", n.Sym().Name, n.Val().ExactString())
+
+ case ir.OTYPE:
+ t := n.Type()
+ if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() {
+ break
+ }
+ fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Size()))
+ for _, f := range t.Fields() {
+ if !f.Sym.IsBlank() {
+ fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym().Name, f.Sym.Name, int(f.Offset))
+ }
+ }
+ }
+ }
+
+ b.Close()
+}
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
new file mode 100644
index 0000000..7e5069f
--- /dev/null
+++ b/src/cmd/compile/internal/gc/main.go
@@ -0,0 +1,391 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bufio"
+ "bytes"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/coverage"
+ "cmd/compile/internal/dwarfgen"
+ "cmd/compile/internal/escape"
+ "cmd/compile/internal/inline"
+ "cmd/compile/internal/inline/interleaved"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/loopvar"
+ "cmd/compile/internal/noder"
+ "cmd/compile/internal/pgo"
+ "cmd/compile/internal/pkginit"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/rttype"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/staticinit"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/dwarf"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "flag"
+ "fmt"
+ "internal/buildcfg"
+ "log"
+ "os"
+ "runtime"
+)
+
+// handlePanic ensures that we print out an "internal compiler error" for any panic
+// or runtime exception during front-end compiler processing (unless there have
+// already been some compiler errors). It may also be invoked from the explicit panic in
+// hcrash(), in which case, we pass the panic on through.
+func handlePanic() {
+ if err := recover(); err != nil {
+ if err == "-h" {
+ // Force real panic now with -h option (hcrash) - the error
+ // information will have already been printed.
+ panic(err)
+ }
+ base.Fatalf("panic: %v", err)
+ }
+}
+
+// Main parses flags and Go source files specified in the command-line
+// arguments, type-checks the parsed Go package, compiles functions to machine
+// code, and finally writes the compiled package definition to disk.
+func Main(archInit func(*ssagen.ArchInfo)) {
+ base.Timer.Start("fe", "init")
+
+ defer handlePanic()
+
+ archInit(&ssagen.Arch)
+
+ base.Ctxt = obj.Linknew(ssagen.Arch.LinkArch)
+ base.Ctxt.DiagFunc = base.Errorf
+ base.Ctxt.DiagFlush = base.FlushErrors
+ base.Ctxt.Bso = bufio.NewWriter(os.Stdout)
+
+ // UseBASEntries is preferred because it shaves about 2% off build time, but LLDB, dsymutil, and dwarfdump
+ // on Darwin don't support it properly, especially since macOS 10.14 (Mojave). This is exposed as a flag
+ // to allow testing with LLVM tools on Linux, and to help with reporting this bug to the LLVM project.
+ // See bugs 31188 and 21945 (CLs 170638, 98075, 72371).
+ base.Ctxt.UseBASEntries = base.Ctxt.Headtype != objabi.Hdarwin
+
+ base.DebugSSA = ssa.PhaseOption
+ base.ParseFlags()
+
+ if os.Getenv("GOGC") == "" { // GOGC set disables starting heap adjustment
+ // More processors will use more heap, but assume that more memory is available.
+ // So 1 processor -> 40MB, 4 -> 64MB, 12 -> 128MB
+ base.AdjustStartingHeap(uint64(32+8*base.Flag.LowerC) << 20)
+ }
+
+ types.LocalPkg = types.NewPkg(base.Ctxt.Pkgpath, "")
+
+ // pseudo-package, for scoping
+ types.BuiltinPkg = types.NewPkg("go.builtin", "") // TODO(gri) name this package go.builtin?
+ types.BuiltinPkg.Prefix = "go:builtin"
+
+ // pseudo-package, accessed by import "unsafe"
+ types.UnsafePkg = types.NewPkg("unsafe", "unsafe")
+
+ // Pseudo-package that contains the compiler's builtin
+ // declarations for package runtime. These are declared in a
+ // separate package to avoid conflicts with package runtime's
+ // actual declarations, which may differ intentionally but
+ // insignificantly.
+ ir.Pkgs.Runtime = types.NewPkg("go.runtime", "runtime")
+ ir.Pkgs.Runtime.Prefix = "runtime"
+
+ // pseudo-packages used in symbol tables
+ ir.Pkgs.Itab = types.NewPkg("go.itab", "go.itab")
+ ir.Pkgs.Itab.Prefix = "go:itab"
+
+ // pseudo-package used for methods with anonymous receivers
+ ir.Pkgs.Go = types.NewPkg("go", "")
+
+ // pseudo-package for use with code coverage instrumentation.
+ ir.Pkgs.Coverage = types.NewPkg("go.coverage", "runtime/coverage")
+ ir.Pkgs.Coverage.Prefix = "runtime/coverage"
+
+ // Record flags that affect the build result. (And don't
+ // record flags that don't, since that would cause spurious
+ // changes in the binary.)
+ dwarfgen.RecordFlags("B", "N", "l", "msan", "race", "asan", "shared", "dynlink", "dwarf", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre")
+
+ if !base.EnableTrace && base.Flag.LowerT {
+ log.Fatalf("compiler not built with support for -t")
+ }
+
+ // Enable inlining (after RecordFlags, to avoid recording the rewritten -l). For now:
+ // default: inlining on. (Flag.LowerL == 1)
+ // -l: inlining off (Flag.LowerL == 0)
+ // -l=2, -l=3: inlining on again, with extra debugging (Flag.LowerL > 1)
+ if base.Flag.LowerL <= 1 {
+ base.Flag.LowerL = 1 - base.Flag.LowerL
+ }
+
+ if base.Flag.SmallFrames {
+ ir.MaxStackVarSize = 128 * 1024
+ ir.MaxImplicitStackVarSize = 16 * 1024
+ }
+
+ if base.Flag.Dwarf {
+ base.Ctxt.DebugInfo = dwarfgen.Info
+ base.Ctxt.GenAbstractFunc = dwarfgen.AbstractFunc
+ base.Ctxt.DwFixups = obj.NewDwarfFixupTable(base.Ctxt)
+ } else {
+ // turn off inline generation if no dwarf at all
+ base.Flag.GenDwarfInl = 0
+ base.Ctxt.Flag_locationlists = false
+ }
+ if base.Ctxt.Flag_locationlists && len(base.Ctxt.Arch.DWARFRegisters) == 0 {
+ log.Fatalf("location lists requested but register mapping not available on %v", base.Ctxt.Arch.Name)
+ }
+
+ types.ParseLangFlag()
+
+ symABIs := ssagen.NewSymABIs()
+ if base.Flag.SymABIs != "" {
+ symABIs.ReadSymABIs(base.Flag.SymABIs)
+ }
+
+ if objabi.LookupPkgSpecial(base.Ctxt.Pkgpath).NoInstrument {
+ base.Flag.Race = false
+ base.Flag.MSan = false
+ base.Flag.ASan = false
+ }
+
+ ssagen.Arch.LinkArch.Init(base.Ctxt)
+ startProfile()
+ if base.Flag.Race || base.Flag.MSan || base.Flag.ASan {
+ base.Flag.Cfg.Instrumenting = true
+ }
+ if base.Flag.Dwarf {
+ dwarf.EnableLogging(base.Debug.DwarfInl != 0)
+ }
+ if base.Debug.SoftFloat != 0 {
+ ssagen.Arch.SoftFloat = true
+ }
+
+ if base.Flag.JSON != "" { // parse version,destination from json logging optimization.
+ logopt.LogJsonOption(base.Flag.JSON)
+ }
+
+ ir.EscFmt = escape.Fmt
+ ir.IsIntrinsicCall = ssagen.IsIntrinsicCall
+ inline.SSADumpInline = ssagen.DumpInline
+ ssagen.InitEnv()
+ ssagen.InitTables()
+
+ types.PtrSize = ssagen.Arch.LinkArch.PtrSize
+ types.RegSize = ssagen.Arch.LinkArch.RegSize
+ types.MaxWidth = ssagen.Arch.MAXWIDTH
+
+ typecheck.Target = new(ir.Package)
+
+ base.AutogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
+
+ typecheck.InitUniverse()
+ typecheck.InitRuntime()
+ rttype.Init()
+
+ // Parse and typecheck input.
+ noder.LoadPackage(flag.Args())
+
+ // As a convenience to users (toolchain maintainers, in particular),
+ // when compiling a package named "main", we default the package
+ // path to "main" if the -p flag was not specified.
+ if base.Ctxt.Pkgpath == obj.UnlinkablePkg && types.LocalPkg.Name == "main" {
+ base.Ctxt.Pkgpath = "main"
+ types.LocalPkg.Path = "main"
+ types.LocalPkg.Prefix = "main"
+ }
+
+ dwarfgen.RecordPackageName()
+
+ // Prepare for backend processing.
+ ssagen.InitConfig()
+
+ // Apply coverage fixups, if applicable.
+ coverage.Fixup()
+
+ // Read profile file and build profile-graph and weighted-call-graph.
+ base.Timer.Start("fe", "pgo-load-profile")
+ var profile *pgo.Profile
+ if base.Flag.PgoProfile != "" {
+ var err error
+ profile, err = pgo.New(base.Flag.PgoProfile)
+ if err != nil {
+ log.Fatalf("%s: PGO error: %v", base.Flag.PgoProfile, err)
+ }
+ }
+
+ // Interleaved devirtualization and inlining.
+ base.Timer.Start("fe", "devirtualize-and-inline")
+ interleaved.DevirtualizeAndInlinePackage(typecheck.Target, profile)
+
+ noder.MakeWrappers(typecheck.Target) // must happen after inlining
+
+ // Get variable capture right in for loops.
+ var transformed []loopvar.VarAndLoop
+ for _, fn := range typecheck.Target.Funcs {
+ transformed = append(transformed, loopvar.ForCapture(fn)...)
+ }
+ ir.CurFunc = nil
+
+ // Build init task, if needed.
+ pkginit.MakeTask()
+
+ // Generate ABI wrappers. Must happen before escape analysis
+ // and doesn't benefit from dead-coding or inlining.
+ symABIs.GenABIWrappers()
+
+ // Escape analysis.
+ // Required for moving heap allocations onto stack,
+ // which in turn is required by the closure implementation,
+ // which stores the addresses of stack variables into the closure.
+ // If the closure does not escape, it needs to be on the stack
+ // or else the stack copier will not update it.
+ // Large values are also moved off stack in escape analysis;
+ // because large values may contain pointers, it must happen early.
+ base.Timer.Start("fe", "escapes")
+ escape.Funcs(typecheck.Target.Funcs)
+
+ loopvar.LogTransformations(transformed)
+
+ // Collect information for go:nowritebarrierrec
+ // checking. This must happen before transforming closures during Walk
+ // We'll do the final check after write barriers are
+ // inserted.
+ if base.Flag.CompilingRuntime {
+ ssagen.EnableNoWriteBarrierRecCheck()
+ }
+
+ ir.CurFunc = nil
+
+ reflectdata.WriteBasicTypes()
+
+ // Compile top-level declarations.
+ //
+ // There are cyclic dependencies between all of these phases, so we
+ // need to iterate all of them until we reach a fixed point.
+ base.Timer.Start("be", "compilefuncs")
+ for nextFunc, nextExtern := 0, 0; ; {
+ reflectdata.WriteRuntimeTypes()
+
+ if nextExtern < len(typecheck.Target.Externs) {
+ switch n := typecheck.Target.Externs[nextExtern]; n.Op() {
+ case ir.ONAME:
+ dumpGlobal(n)
+ case ir.OLITERAL:
+ dumpGlobalConst(n)
+ case ir.OTYPE:
+ reflectdata.NeedRuntimeType(n.Type())
+ }
+ nextExtern++
+ continue
+ }
+
+ if nextFunc < len(typecheck.Target.Funcs) {
+ enqueueFunc(typecheck.Target.Funcs[nextFunc])
+ nextFunc++
+ continue
+ }
+
+ // The SSA backend supports using multiple goroutines, so keep it
+ // as late as possible to maximize how much work we can batch and
+ // process concurrently.
+ if len(compilequeue) != 0 {
+ compileFunctions()
+ continue
+ }
+
+ // Finalize DWARF inline routine DIEs, then explicitly turn off
+ // further DWARF inlining generation to avoid problems with
+ // generated method wrappers.
+ //
+ // Note: The DWARF fixup code for inlined calls currently doesn't
+ // allow multiple invocations, so we intentionally run it just
+ // once after everything else. Worst case, some generated
+ // functions have slightly larger DWARF DIEs.
+ if base.Ctxt.DwFixups != nil {
+ base.Ctxt.DwFixups.Finalize(base.Ctxt.Pkgpath, base.Debug.DwarfInl != 0)
+ base.Ctxt.DwFixups = nil
+ base.Flag.GenDwarfInl = 0
+ continue // may have called reflectdata.TypeLinksym (#62156)
+ }
+
+ break
+ }
+
+ base.Timer.AddEvent(int64(len(typecheck.Target.Funcs)), "funcs")
+
+ if base.Flag.CompilingRuntime {
+ // Write barriers are now known. Check the call graph.
+ ssagen.NoWriteBarrierRecCheck()
+ }
+
+ // Add keep relocations for global maps.
+ if base.Debug.WrapGlobalMapCtl != 1 {
+ staticinit.AddKeepRelocations()
+ }
+
+ // Write object data to disk.
+ base.Timer.Start("be", "dumpobj")
+ dumpdata()
+ base.Ctxt.NumberSyms()
+ dumpobj()
+ if base.Flag.AsmHdr != "" {
+ dumpasmhdr()
+ }
+
+ ssagen.CheckLargeStacks()
+ typecheck.CheckFuncStack()
+
+ if len(compilequeue) != 0 {
+ base.Fatalf("%d uncompiled functions", len(compilequeue))
+ }
+
+ logopt.FlushLoggedOpts(base.Ctxt, base.Ctxt.Pkgpath)
+ base.ExitIfErrors()
+
+ base.FlushErrors()
+ base.Timer.Stop()
+
+ if base.Flag.Bench != "" {
+ if err := writebench(base.Flag.Bench); err != nil {
+ log.Fatalf("cannot write benchmark data: %v", err)
+ }
+ }
+}
+
+func writebench(filename string) error {
+ f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
+ if err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+ fmt.Fprintln(&buf, "commit:", buildcfg.Version)
+ fmt.Fprintln(&buf, "goos:", runtime.GOOS)
+ fmt.Fprintln(&buf, "goarch:", runtime.GOARCH)
+ base.Timer.Write(&buf, "BenchmarkCompile:"+base.Ctxt.Pkgpath+":")
+
+ n, err := f.Write(buf.Bytes())
+ if err != nil {
+ return err
+ }
+ if n != buf.Len() {
+ panic("bad writer")
+ }
+
+ return f.Close()
+}
+
+func makePos(b *src.PosBase, line, col uint) src.XPos {
+ return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col))
+}
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
new file mode 100644
index 0000000..e090caf
--- /dev/null
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -0,0 +1,284 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/noder"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/pkginit"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/archive"
+ "cmd/internal/bio"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+// These modes say which kind of object file to generate.
+// The default use of the toolchain is to set both bits,
+// generating a combined compiler+linker object, one that
+// serves to describe the package to both the compiler and the linker.
+// In fact the compiler and linker read nearly disjoint sections of
+// that file, though, so in a distributed build setting it can be more
+// efficient to split the output into two files, supplying the compiler
+// object only to future compilations and the linker object only to
+// future links.
+//
+// By default a combined object is written, but if -linkobj is specified
+// on the command line then the default -o output is a compiler object
+// and the -linkobj output is a linker object.
+const (
+ modeCompilerObj = 1 << iota
+ modeLinkerObj
+)
+
+func dumpobj() {
+ if base.Flag.LinkObj == "" {
+ dumpobj1(base.Flag.LowerO, modeCompilerObj|modeLinkerObj)
+ return
+ }
+ dumpobj1(base.Flag.LowerO, modeCompilerObj)
+ dumpobj1(base.Flag.LinkObj, modeLinkerObj)
+}
+
+func dumpobj1(outfile string, mode int) {
+ bout, err := bio.Create(outfile)
+ if err != nil {
+ base.FlushErrors()
+ fmt.Printf("can't create %s: %v\n", outfile, err)
+ base.ErrorExit()
+ }
+ defer bout.Close()
+ bout.WriteString("!<arch>\n")
+
+ if mode&modeCompilerObj != 0 {
+ start := startArchiveEntry(bout)
+ dumpCompilerObj(bout)
+ finishArchiveEntry(bout, start, "__.PKGDEF")
+ }
+ if mode&modeLinkerObj != 0 {
+ start := startArchiveEntry(bout)
+ dumpLinkerObj(bout)
+ finishArchiveEntry(bout, start, "_go_.o")
+ }
+}
+
+func printObjHeader(bout *bio.Writer) {
+ bout.WriteString(objabi.HeaderString())
+ if base.Flag.BuildID != "" {
+ fmt.Fprintf(bout, "build id %q\n", base.Flag.BuildID)
+ }
+ if types.LocalPkg.Name == "main" {
+ fmt.Fprintf(bout, "main\n")
+ }
+ fmt.Fprintf(bout, "\n") // header ends with blank line
+}
+
+func startArchiveEntry(bout *bio.Writer) int64 {
+ var arhdr [archive.HeaderSize]byte
+ bout.Write(arhdr[:])
+ return bout.Offset()
+}
+
+func finishArchiveEntry(bout *bio.Writer, start int64, name string) {
+ bout.Flush()
+ size := bout.Offset() - start
+ if size&1 != 0 {
+ bout.WriteByte(0)
+ }
+ bout.MustSeek(start-archive.HeaderSize, 0)
+
+ var arhdr [archive.HeaderSize]byte
+ archive.FormatHeader(arhdr[:], name, size)
+ bout.Write(arhdr[:])
+ bout.Flush()
+ bout.MustSeek(start+size+(size&1), 0)
+}
+
+func dumpCompilerObj(bout *bio.Writer) {
+ printObjHeader(bout)
+ noder.WriteExports(bout)
+}
+
+func dumpdata() {
+ reflectdata.WriteGCSymbols()
+ reflectdata.WritePluginTable()
+ dumpembeds()
+
+ if reflectdata.ZeroSize > 0 {
+ zero := base.PkgLinksym("go:map", "zero", obj.ABI0)
+ objw.Global(zero, int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA)
+ zero.Set(obj.AttrStatic, true)
+ }
+
+ staticdata.WriteFuncSyms()
+ addGCLocals()
+}
+
+func dumpLinkerObj(bout *bio.Writer) {
+ printObjHeader(bout)
+
+ if len(typecheck.Target.CgoPragmas) != 0 {
+ // write empty export section; must be before cgo section
+ fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
+ fmt.Fprintf(bout, "\n$$ // cgo\n")
+ if err := json.NewEncoder(bout).Encode(typecheck.Target.CgoPragmas); err != nil {
+ base.Fatalf("serializing pragcgobuf: %v", err)
+ }
+ fmt.Fprintf(bout, "\n$$\n\n")
+ }
+
+ fmt.Fprintf(bout, "\n!\n")
+
+ obj.WriteObjFile(base.Ctxt, bout)
+}
+
+func dumpGlobal(n *ir.Name) {
+ if n.Type() == nil {
+ base.Fatalf("external %v nil type\n", n)
+ }
+ if n.Class == ir.PFUNC {
+ return
+ }
+ if n.Sym().Pkg != types.LocalPkg {
+ return
+ }
+ types.CalcSize(n.Type())
+ ggloblnod(n)
+ if n.CoverageCounter() || n.CoverageAuxVar() || n.Linksym().Static() {
+ return
+ }
+ base.Ctxt.DwarfGlobal(types.TypeSymName(n.Type()), n.Linksym())
+}
+
+func dumpGlobalConst(n *ir.Name) {
+ // only export typed constants
+ t := n.Type()
+ if t == nil {
+ return
+ }
+ if n.Sym().Pkg != types.LocalPkg {
+ return
+ }
+ // only export integer constants for now
+ if !t.IsInteger() {
+ return
+ }
+ v := n.Val()
+ if t.IsUntyped() {
+ // Export untyped integers as int (if they fit).
+ t = types.Types[types.TINT]
+ if ir.ConstOverflow(v, t) {
+ return
+ }
+ } else {
+ // If the type of the constant is an instantiated generic, we need to emit
+ // that type so the linker knows about it. See issue 51245.
+ _ = reflectdata.TypeLinksym(t)
+ }
+ base.Ctxt.DwarfIntConst(n.Sym().Name, types.TypeSymName(t), ir.IntVal(t, v))
+}
+
+// addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data.
+//
+// This is done during the sequential phase after compilation, since
+// global symbols can't be declared during parallel compilation.
+func addGCLocals() {
+ for _, s := range base.Ctxt.Text {
+ fn := s.Func()
+ if fn == nil {
+ continue
+ }
+ for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals} {
+ if gcsym != nil && !gcsym.OnList() {
+ objw.Global(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
+ }
+ }
+ if x := fn.StackObjects; x != nil {
+ objw.Global(x, int32(len(x.P)), obj.RODATA)
+ x.Set(obj.AttrStatic, true)
+ }
+ if x := fn.OpenCodedDeferInfo; x != nil {
+ objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
+ }
+ if x := fn.ArgInfo; x != nil {
+ objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
+ x.Set(obj.AttrStatic, true)
+ }
+ if x := fn.ArgLiveInfo; x != nil {
+ objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
+ x.Set(obj.AttrStatic, true)
+ }
+ if x := fn.WrapInfo; x != nil && !x.OnList() {
+ objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
+ x.Set(obj.AttrStatic, true)
+ }
+ for _, jt := range fn.JumpTables {
+ objw.Global(jt.Sym, int32(len(jt.Targets)*base.Ctxt.Arch.PtrSize), obj.RODATA)
+ }
+ }
+}
+
+func ggloblnod(nam *ir.Name) {
+ s := nam.Linksym()
+
+ // main_inittask and runtime_inittask in package runtime (and in
+ // test/initempty.go) aren't real variable declarations, but
+ // linknamed variables pointing to the compiler's generated
+ // .inittask symbol. The real symbol was already written out in
+ // pkginit.Task, so we need to avoid writing them out a second time
+ // here, otherwise base.Ctxt.Globl will fail.
+ if strings.HasSuffix(s.Name, "..inittask") && s.OnList() {
+ return
+ }
+
+ s.Gotype = reflectdata.TypeLinksym(nam.Type())
+ flags := 0
+ if nam.Readonly() {
+ flags = obj.RODATA
+ }
+ if nam.Type() != nil && !nam.Type().HasPointers() {
+ flags |= obj.NOPTR
+ }
+ size := nam.Type().Size()
+ linkname := nam.Sym().Linkname
+ name := nam.Sym().Name
+
+ // We've skipped linkname'd globals's instrument, so we can skip them here as well.
+ if base.Flag.ASan && linkname == "" && pkginit.InstrumentGlobalsMap[name] != nil {
+ // Write the new size of instrumented global variables that have
+ // trailing redzones into object file.
+ rzSize := pkginit.GetRedzoneSizeForGlobal(size)
+ sizeWithRZ := rzSize + size
+ base.Ctxt.Globl(s, sizeWithRZ, flags)
+ } else {
+ base.Ctxt.Globl(s, size, flags)
+ }
+ if nam.Libfuzzer8BitCounter() {
+ s.Type = objabi.SLIBFUZZER_8BIT_COUNTER
+ }
+ if nam.CoverageCounter() {
+ s.Type = objabi.SCOVERAGE_COUNTER
+ }
+ if nam.Sym().Linkname != "" {
+ // Make sure linkname'd symbol is non-package. When a symbol is
+ // both imported and linkname'd, s.Pkg may not set to "_" in
+ // types.Sym.Linksym because LSym already exists. Set it here.
+ s.Pkg = "_"
+ }
+}
+
+func dumpembeds() {
+ for _, v := range typecheck.Target.Embeds {
+ staticdata.WriteEmbed(v)
+ }
+}
diff --git a/src/cmd/compile/internal/gc/util.go b/src/cmd/compile/internal/gc/util.go
new file mode 100644
index 0000000..b82a983
--- /dev/null
+++ b/src/cmd/compile/internal/gc/util.go
@@ -0,0 +1,117 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "net/url"
+ "os"
+ "path/filepath"
+ "runtime"
+ "runtime/pprof"
+ tracepkg "runtime/trace"
+ "strings"
+
+ "cmd/compile/internal/base"
+)
+
+func profileName(fn, suffix string) string {
+ if strings.HasSuffix(fn, string(os.PathSeparator)) {
+ err := os.MkdirAll(fn, 0755)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ }
+ if fi, statErr := os.Stat(fn); statErr == nil && fi.IsDir() {
+ fn = filepath.Join(fn, url.PathEscape(base.Ctxt.Pkgpath)+suffix)
+ }
+ return fn
+}
+
+func startProfile() {
+ if base.Flag.CPUProfile != "" {
+ fn := profileName(base.Flag.CPUProfile, ".cpuprof")
+ f, err := os.Create(fn)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ if err := pprof.StartCPUProfile(f); err != nil {
+ base.Fatalf("%v", err)
+ }
+ base.AtExit(pprof.StopCPUProfile)
+ }
+ if base.Flag.MemProfile != "" {
+ if base.Flag.MemProfileRate != 0 {
+ runtime.MemProfileRate = base.Flag.MemProfileRate
+ }
+ const (
+ gzipFormat = 0
+ textFormat = 1
+ )
+ // compilebench parses the memory profile to extract memstats,
+ // which are only written in the legacy (text) pprof format.
+ // See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap.
+ // gzipFormat is what most people want, otherwise
+ var format = textFormat
+ fn := base.Flag.MemProfile
+ if strings.HasSuffix(fn, string(os.PathSeparator)) {
+ err := os.MkdirAll(fn, 0755)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ }
+ if fi, statErr := os.Stat(fn); statErr == nil && fi.IsDir() {
+ fn = filepath.Join(fn, url.PathEscape(base.Ctxt.Pkgpath)+".memprof")
+ format = gzipFormat
+ }
+
+ f, err := os.Create(fn)
+
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ base.AtExit(func() {
+ // Profile all outstanding allocations.
+ runtime.GC()
+ if err := pprof.Lookup("heap").WriteTo(f, format); err != nil {
+ base.Fatalf("%v", err)
+ }
+ })
+ } else {
+ // Not doing memory profiling; disable it entirely.
+ runtime.MemProfileRate = 0
+ }
+ if base.Flag.BlockProfile != "" {
+ f, err := os.Create(profileName(base.Flag.BlockProfile, ".blockprof"))
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ runtime.SetBlockProfileRate(1)
+ base.AtExit(func() {
+ pprof.Lookup("block").WriteTo(f, 0)
+ f.Close()
+ })
+ }
+ if base.Flag.MutexProfile != "" {
+ f, err := os.Create(profileName(base.Flag.MutexProfile, ".mutexprof"))
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ runtime.SetMutexProfileFraction(1)
+ base.AtExit(func() {
+ pprof.Lookup("mutex").WriteTo(f, 0)
+ f.Close()
+ })
+ }
+ if base.Flag.TraceProfile != "" {
+ f, err := os.Create(profileName(base.Flag.TraceProfile, ".trace"))
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ if err := tracepkg.Start(f); err != nil {
+ base.Fatalf("%v", err)
+ }
+ base.AtExit(tracepkg.Stop)
+ }
+}
diff --git a/src/cmd/compile/internal/importer/exportdata.go b/src/cmd/compile/internal/importer/exportdata.go
new file mode 100644
index 0000000..42fc5c9
--- /dev/null
+++ b/src/cmd/compile/internal/importer/exportdata.go
@@ -0,0 +1,95 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements FindExportData.
+
+package importer
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+func readGopackHeader(r *bufio.Reader) (name string, size int, err error) {
+ // See $GOROOT/include/ar.h.
+ hdr := make([]byte, 16+12+6+6+8+10+2)
+ _, err = io.ReadFull(r, hdr)
+ if err != nil {
+ return
+ }
+ // leave for debugging
+ if false {
+ fmt.Printf("header: %s", hdr)
+ }
+ s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
+ size, err = strconv.Atoi(s)
+ if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
+ err = fmt.Errorf("invalid archive header")
+ return
+ }
+ name = strings.TrimSpace(string(hdr[:16]))
+ return
+}
+
+// FindExportData positions the reader r at the beginning of the
+// export data section of an underlying GC-created object/archive
+// file by reading from it. The reader must be positioned at the
+// start of the file before calling this function. The hdr result
+// is the string before the export data, either "$$" or "$$B".
+//
+// If size is non-negative, it's the number of bytes of export data
+// still available to read from r.
+func FindExportData(r *bufio.Reader) (hdr string, size int, err error) {
+ // Read first line to make sure this is an object file.
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+
+ if string(line) == "!<arch>\n" {
+ // Archive file. Scan to __.PKGDEF.
+ var name string
+ if name, size, err = readGopackHeader(r); err != nil {
+ return
+ }
+
+ // First entry should be __.PKGDEF.
+ if name != "__.PKGDEF" {
+ err = fmt.Errorf("go archive is missing __.PKGDEF")
+ return
+ }
+
+ // Read first line of __.PKGDEF data, so that line
+ // is once again the first line of the input.
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+ }
+
+ // Now at __.PKGDEF in archive or still at beginning of file.
+ // Either way, line should begin with "go object ".
+ if !strings.HasPrefix(string(line), "go object ") {
+ err = fmt.Errorf("not a Go object file")
+ return
+ }
+ size -= len(line)
+
+ // Skip over object header to export data.
+ // Begins after first line starting with $$.
+ for line[0] != '$' {
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+ size -= len(line)
+ }
+ hdr = string(line)
+
+ return
+}
diff --git a/src/cmd/compile/internal/importer/gcimporter.go b/src/cmd/compile/internal/importer/gcimporter.go
new file mode 100644
index 0000000..1f7b49c
--- /dev/null
+++ b/src/cmd/compile/internal/importer/gcimporter.go
@@ -0,0 +1,253 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package importer implements Import for gc-generated object files.
+package importer
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "go/build"
+ "internal/pkgbits"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "cmd/compile/internal/types2"
+)
+
+var exportMap sync.Map // package dir → func() (string, error)
+
+// lookupGorootExport returns the location of the export data
+// (normally found in the build cache, but located in GOROOT/pkg
+// in prior Go releases) for the package located in pkgDir.
+//
+// (We use the package's directory instead of its import path
+// mainly to simplify handling of the packages in src/vendor
+// and cmd/vendor.)
+func lookupGorootExport(pkgDir string) (string, error) {
+ f, ok := exportMap.Load(pkgDir)
+ if !ok {
+ var (
+ listOnce sync.Once
+ exportPath string
+ err error
+ )
+ f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) {
+ listOnce.Do(func() {
+ cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir)
+ cmd.Dir = build.Default.GOROOT
+ cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT)
+ var output []byte
+ output, err = cmd.Output()
+ if err != nil {
+ if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+ err = errors.New(string(ee.Stderr))
+ }
+ return
+ }
+
+ exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
+ if len(exports) != 1 {
+ err = fmt.Errorf("go list reported %d exports; expected 1", len(exports))
+ return
+ }
+
+ exportPath = exports[0]
+ })
+
+ return exportPath, err
+ })
+ }
+
+ return f.(func() (string, error))()
+}
+
+var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension
+
+// FindPkg returns the filename and unique package id for an import
+// path based on package information provided by build.Import (using
+// the build.Default build.Context). A relative srcDir is interpreted
+// relative to the current working directory.
+func FindPkg(path, srcDir string) (filename, id string, err error) {
+ if path == "" {
+ return "", "", errors.New("path is empty")
+ }
+
+ var noext string
+ switch {
+ default:
+ // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
+ // Don't require the source files to be present.
+ if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
+ srcDir = abs
+ }
+ var bp *build.Package
+ bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+ if bp.PkgObj == "" {
+ if bp.Goroot && bp.Dir != "" {
+ filename, err = lookupGorootExport(bp.Dir)
+ if err == nil {
+ _, err = os.Stat(filename)
+ }
+ if err == nil {
+ return filename, bp.ImportPath, nil
+ }
+ }
+ goto notfound
+ } else {
+ noext = strings.TrimSuffix(bp.PkgObj, ".a")
+ }
+ id = bp.ImportPath
+
+ case build.IsLocalImport(path):
+ // "./x" -> "/this/directory/x.ext", "/this/directory/x"
+ noext = filepath.Join(srcDir, path)
+ id = noext
+
+ case filepath.IsAbs(path):
+ // for completeness only - go/build.Import
+ // does not support absolute imports
+ // "/x" -> "/x.ext", "/x"
+ noext = path
+ id = path
+ }
+
+ if false { // for debugging
+ if path != id {
+ fmt.Printf("%s -> %s\n", path, id)
+ }
+ }
+
+ // try extensions
+ for _, ext := range pkgExts {
+ filename = noext + ext
+ f, statErr := os.Stat(filename)
+ if statErr == nil && !f.IsDir() {
+ return filename, id, nil
+ }
+ if err == nil {
+ err = statErr
+ }
+ }
+
+notfound:
+ if err == nil {
+ return "", path, fmt.Errorf("can't find import: %q", path)
+ }
+ return "", path, fmt.Errorf("can't find import: %q: %w", path, err)
+}
+
+// Import imports a gc-generated package given its import path and srcDir, adds
+// the corresponding package object to the packages map, and returns the object.
+// The packages map must contain all packages already imported.
+func Import(packages map[string]*types2.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types2.Package, err error) {
+ var rc io.ReadCloser
+ var id string
+ if lookup != nil {
+ // With custom lookup specified, assume that caller has
+ // converted path to a canonical import path for use in the map.
+ if path == "unsafe" {
+ return types2.Unsafe, nil
+ }
+ id = path
+
+ // No need to re-import if the package was imported completely before.
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+ f, err := lookup(path)
+ if err != nil {
+ return nil, err
+ }
+ rc = f
+ } else {
+ var filename string
+ filename, id, err = FindPkg(path, srcDir)
+ if filename == "" {
+ if path == "unsafe" {
+ return types2.Unsafe, nil
+ }
+ return nil, err
+ }
+
+ // no need to re-import if the package was imported completely before
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+
+ // open file
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("%s: %v", filename, err)
+ }
+ }()
+ rc = f
+ }
+ defer rc.Close()
+
+ buf := bufio.NewReader(rc)
+ hdr, size, err := FindExportData(buf)
+ if err != nil {
+ return
+ }
+
+ switch hdr {
+ case "$$\n":
+ err = fmt.Errorf("import %q: old textual export format no longer supported (recompile library)", path)
+
+ case "$$B\n":
+ var data []byte
+ var r io.Reader = buf
+ if size >= 0 {
+ r = io.LimitReader(r, int64(size))
+ }
+ data, err = io.ReadAll(r)
+ if err != nil {
+ break
+ }
+
+ if len(data) == 0 {
+ err = fmt.Errorf("import %q: missing export data", path)
+ break
+ }
+ exportFormat := data[0]
+ s := string(data[1:])
+
+ // The indexed export format starts with an 'i'; the older
+ // binary export format starts with a 'c', 'd', or 'v'
+ // (from "version"). Select appropriate importer.
+ switch exportFormat {
+ case 'u':
+ s = s[:strings.Index(s, "\n$$\n")]
+ input := pkgbits.NewPkgDecoder(id, s)
+ pkg = ReadPackage(nil, packages, input)
+ case 'i':
+ pkg, err = ImportData(packages, s, id)
+ default:
+ err = fmt.Errorf("import %q: old binary export format no longer supported (recompile library)", path)
+ }
+
+ default:
+ err = fmt.Errorf("import %q: unknown export data header: %q", path, hdr)
+ }
+
+ return
+}
+
+type byPath []*types2.Package
+
+func (a byPath) Len() int { return len(a) }
+func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/src/cmd/compile/internal/importer/gcimporter_test.go b/src/cmd/compile/internal/importer/gcimporter_test.go
new file mode 100644
index 0000000..7fe4445
--- /dev/null
+++ b/src/cmd/compile/internal/importer/gcimporter_test.go
@@ -0,0 +1,608 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importer
+
+import (
+ "bytes"
+ "cmd/compile/internal/types2"
+ "fmt"
+ "go/build"
+ "internal/testenv"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestMain(m *testing.M) {
+ build.Default.GOROOT = testenv.GOROOT(nil)
+ os.Exit(m.Run())
+}
+
+// compile runs the compiler on filename, with dirname as the working directory,
+// and writes the output file to outdirname.
+// compile gives the resulting package a packagepath of testdata/<filebasename>.
+func compile(t *testing.T, dirname, filename, outdirname string, packagefiles map[string]string) string {
+ // filename must end with ".go"
+ basename, ok := strings.CutSuffix(filepath.Base(filename), ".go")
+ if !ok {
+ t.Helper()
+ t.Fatalf("filename doesn't end in .go: %s", filename)
+ }
+ objname := basename + ".o"
+ outname := filepath.Join(outdirname, objname)
+ pkgpath := path.Join("testdata", basename)
+
+ importcfgfile := os.DevNull
+ if len(packagefiles) > 0 {
+ importcfgfile = filepath.Join(outdirname, basename) + ".importcfg"
+ importcfg := new(bytes.Buffer)
+ for k, v := range packagefiles {
+ fmt.Fprintf(importcfg, "packagefile %s=%s\n", k, v)
+ }
+ if err := os.WriteFile(importcfgfile, importcfg.Bytes(), 0655); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-p", pkgpath, "-D", "testdata", "-importcfg", importcfgfile, "-o", outname, filename)
+ cmd.Dir = dirname
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Helper()
+ t.Logf("%s", out)
+ t.Fatalf("go tool compile %s failed: %s", filename, err)
+ }
+ return outname
+}
+
+func testPath(t *testing.T, path, srcDir string) *types2.Package {
+ t0 := time.Now()
+ pkg, err := Import(make(map[string]*types2.Package), path, srcDir, nil)
+ if err != nil {
+ t.Errorf("testPath(%s): %s", path, err)
+ return nil
+ }
+ t.Logf("testPath(%s): %v", path, time.Since(t0))
+ return pkg
+}
+
+func mktmpdir(t *testing.T) string {
+ tmpdir := t.TempDir()
+ if err := os.Mkdir(filepath.Join(tmpdir, "testdata"), 0700); err != nil {
+ t.Fatal("mktmpdir:", err)
+ }
+ return tmpdir
+}
+
+func TestImportTestdata(t *testing.T) {
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ testenv.MustHaveGoBuild(t)
+
+ testfiles := map[string][]string{
+ "exports.go": {"go/ast", "go/token"},
+ "generics.go": nil,
+ }
+ if true /* was goexperiment.Unified */ {
+ // TODO(mdempsky): Fix test below to flatten the transitive
+ // Package.Imports graph. Unified IR is more precise about
+ // recreating the package import graph.
+ testfiles["exports.go"] = []string{"go/ast"}
+ }
+
+ for testfile, wantImports := range testfiles {
+ tmpdir := mktmpdir(t)
+
+ importMap := map[string]string{}
+ for _, pkg := range wantImports {
+ export, _, err := FindPkg(pkg, "testdata")
+ if export == "" {
+ t.Fatalf("no export data found for %s: %v", pkg, err)
+ }
+ importMap[pkg] = export
+ }
+
+ compile(t, "testdata", testfile, filepath.Join(tmpdir, "testdata"), importMap)
+ path := "./testdata/" + strings.TrimSuffix(testfile, ".go")
+
+ if pkg := testPath(t, path, tmpdir); pkg != nil {
+ // The package's Imports list must include all packages
+ // explicitly imported by testfile, plus all packages
+ // referenced indirectly via exported objects in testfile.
+ got := fmt.Sprint(pkg.Imports())
+ for _, want := range wantImports {
+ if !strings.Contains(got, want) {
+ t.Errorf(`Package("exports").Imports() = %s, does not contain %s`, got, want)
+ }
+ }
+ }
+ }
+}
+
+func TestVersionHandling(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ const dir = "./testdata/versions"
+ list, err := os.ReadDir(dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tmpdir := mktmpdir(t)
+ corruptdir := filepath.Join(tmpdir, "testdata", "versions")
+ if err := os.Mkdir(corruptdir, 0700); err != nil {
+ t.Fatal(err)
+ }
+
+ for _, f := range list {
+ name := f.Name()
+ if !strings.HasSuffix(name, ".a") {
+ continue // not a package file
+ }
+ if strings.Contains(name, "corrupted") {
+ continue // don't process a leftover corrupted file
+ }
+ pkgpath := "./" + name[:len(name)-2]
+
+ if testing.Verbose() {
+ t.Logf("importing %s", name)
+ }
+
+ // test that export data can be imported
+ _, err := Import(make(map[string]*types2.Package), pkgpath, dir, nil)
+ if err != nil {
+ // ok to fail if it fails with a no longer supported error for select files
+ if strings.Contains(err.Error(), "no longer supported") {
+ switch name {
+ case "test_go1.7_0.a", "test_go1.7_1.a",
+ "test_go1.8_4.a", "test_go1.8_5.a",
+ "test_go1.11_6b.a", "test_go1.11_999b.a":
+ continue
+ }
+ // fall through
+ }
+ // ok to fail if it fails with a newer version error for select files
+ if strings.Contains(err.Error(), "newer version") {
+ switch name {
+ case "test_go1.11_999i.a":
+ continue
+ }
+ // fall through
+ }
+ t.Errorf("import %q failed: %v", pkgpath, err)
+ continue
+ }
+
+ // create file with corrupted export data
+ // 1) read file
+ data, err := os.ReadFile(filepath.Join(dir, name))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // 2) find export data
+ i := bytes.Index(data, []byte("\n$$B\n")) + 5
+ j := bytes.Index(data[i:], []byte("\n$$\n")) + i
+ if i < 0 || j < 0 || i > j {
+ t.Fatalf("export data section not found (i = %d, j = %d)", i, j)
+ }
+ // 3) corrupt the data (increment every 7th byte)
+ for k := j - 13; k >= i; k -= 7 {
+ data[k]++
+ }
+ // 4) write the file
+ pkgpath += "_corrupted"
+ filename := filepath.Join(corruptdir, pkgpath) + ".a"
+ os.WriteFile(filename, data, 0666)
+
+ // test that importing the corrupted file results in an error
+ _, err = Import(make(map[string]*types2.Package), pkgpath, corruptdir, nil)
+ if err == nil {
+ t.Errorf("import corrupted %q succeeded", pkgpath)
+ } else if msg := err.Error(); !strings.Contains(msg, "version skew") {
+ t.Errorf("import %q error incorrect (%s)", pkgpath, msg)
+ }
+ }
+}
+
+func TestImportStdLib(t *testing.T) {
+ if testing.Short() {
+ t.Skip("the imports can be expensive, and this test is especially slow when the build cache is empty")
+ }
+ testenv.MustHaveGoBuild(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ // Get list of packages in stdlib. Filter out test-only packages with {{if .GoFiles}} check.
+ var stderr bytes.Buffer
+ cmd := exec.Command("go", "list", "-f", "{{if .GoFiles}}{{.ImportPath}}{{end}}", "std")
+ cmd.Stderr = &stderr
+ out, err := cmd.Output()
+ if err != nil {
+ t.Fatalf("failed to run go list to determine stdlib packages: %v\nstderr:\n%v", err, stderr.String())
+ }
+ pkgs := strings.Fields(string(out))
+
+ var nimports int
+ for _, pkg := range pkgs {
+ t.Run(pkg, func(t *testing.T) {
+ if testPath(t, pkg, filepath.Join(testenv.GOROOT(t), "src", path.Dir(pkg))) != nil {
+ nimports++
+ }
+ })
+ }
+ const minPkgs = 225 // 'GOOS=plan9 go1.18 list std | wc -l' reports 228; most other platforms have more.
+ if len(pkgs) < minPkgs {
+ t.Fatalf("too few packages (%d) were imported", nimports)
+ }
+
+ t.Logf("tested %d imports", nimports)
+}
+
+var importedObjectTests = []struct {
+ name string
+ want string
+}{
+ // non-interfaces
+ {"crypto.Hash", "type Hash uint"},
+ {"go/ast.ObjKind", "type ObjKind int"},
+ {"go/types.Qualifier", "type Qualifier func(*Package) string"},
+ {"go/types.Comparable", "func Comparable(T Type) bool"},
+ {"math.Pi", "const Pi untyped float"},
+ {"math.Sin", "func Sin(x float64) float64"},
+ {"go/ast.NotNilFilter", "func NotNilFilter(_ string, v reflect.Value) bool"},
+ {"go/internal/gcimporter.FindPkg", "func FindPkg(path string, srcDir string) (filename string, id string, err error)"},
+
+ // interfaces
+ {"context.Context", "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key any) any}"},
+ {"crypto.Decrypter", "type Decrypter interface{Decrypt(rand io.Reader, msg []byte, opts DecrypterOpts) (plaintext []byte, err error); Public() PublicKey}"},
+ {"encoding.BinaryMarshaler", "type BinaryMarshaler interface{MarshalBinary() (data []byte, err error)}"},
+ {"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"},
+ {"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"},
+ {"go/ast.Node", "type Node interface{End() go/token.Pos; Pos() go/token.Pos}"},
+ {"go/types.Type", "type Type interface{String() string; Underlying() Type}"},
+}
+
+func TestImportedTypes(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ for _, test := range importedObjectTests {
+ s := strings.Split(test.name, ".")
+ if len(s) != 2 {
+ t.Fatal("inconsistent test data")
+ }
+ importPath := s[0]
+ objName := s[1]
+
+ pkg, err := Import(make(map[string]*types2.Package), importPath, ".", nil)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ obj := pkg.Scope().Lookup(objName)
+ if obj == nil {
+ t.Errorf("%s: object not found", test.name)
+ continue
+ }
+
+ got := types2.ObjectString(obj, types2.RelativeTo(pkg))
+ if got != test.want {
+ t.Errorf("%s: got %q; want %q", test.name, got, test.want)
+ }
+
+ if named, _ := obj.Type().(*types2.Named); named != nil {
+ verifyInterfaceMethodRecvs(t, named, 0)
+ }
+ }
+}
+
+// verifyInterfaceMethodRecvs verifies that method receiver types
+// are named if the methods belong to a named interface type.
+func verifyInterfaceMethodRecvs(t *testing.T, named *types2.Named, level int) {
+ // avoid endless recursion in case of an embedding bug that lead to a cycle
+ if level > 10 {
+ t.Errorf("%s: embeds itself", named)
+ return
+ }
+
+ iface, _ := named.Underlying().(*types2.Interface)
+ if iface == nil {
+ return // not an interface
+ }
+
+ // The unified IR importer always sets interface method receiver
+ // parameters to point to the Interface type, rather than the Named.
+ // See #49906.
+ //
+ // TODO(mdempsky): This is only true for the types2 importer. For
+ // the go/types importer, we duplicate the Interface and rewrite its
+ // receiver methods to match historical behavior.
+ var want types2.Type = named
+ if true /* was goexperiment.Unified */ {
+ want = iface
+ }
+
+ // check explicitly declared methods
+ for i := 0; i < iface.NumExplicitMethods(); i++ {
+ m := iface.ExplicitMethod(i)
+ recv := m.Type().(*types2.Signature).Recv()
+ if recv == nil {
+ t.Errorf("%s: missing receiver type", m)
+ continue
+ }
+ if recv.Type() != want {
+ t.Errorf("%s: got recv type %s; want %s", m, recv.Type(), named)
+ }
+ }
+
+ // check embedded interfaces (if they are named, too)
+ for i := 0; i < iface.NumEmbeddeds(); i++ {
+ // embedding of interfaces cannot have cycles; recursion will terminate
+ if etype, _ := iface.EmbeddedType(i).(*types2.Named); etype != nil {
+ verifyInterfaceMethodRecvs(t, etype, level+1)
+ }
+ }
+}
+
+func TestIssue5815(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ pkg := importPkg(t, "strings", ".")
+
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ obj := scope.Lookup(name)
+ if obj.Pkg() == nil {
+ t.Errorf("no pkg for %s", obj)
+ }
+ if tname, _ := obj.(*types2.TypeName); tname != nil {
+ named := tname.Type().(*types2.Named)
+ for i := 0; i < named.NumMethods(); i++ {
+ m := named.Method(i)
+ if m.Pkg() == nil {
+ t.Errorf("no pkg for %s", m)
+ }
+ }
+ }
+ }
+}
+
+// Smoke test to ensure that imported methods get the correct package.
+func TestCorrectMethodPackage(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ imports := make(map[string]*types2.Package)
+ _, err := Import(imports, "net/http", ".", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ mutex := imports["sync"].Scope().Lookup("Mutex").(*types2.TypeName).Type()
+ obj, _, _ := types2.LookupFieldOrMethod(types2.NewPointer(mutex), false, nil, "Lock")
+ lock := obj.(*types2.Func)
+ if got, want := lock.Pkg().Path(), "sync"; got != want {
+ t.Errorf("got package path %q; want %q", got, want)
+ }
+}
+
+func TestIssue13566(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ tmpdir := mktmpdir(t)
+ testoutdir := filepath.Join(tmpdir, "testdata")
+
+ // b.go needs to be compiled from the output directory so that the compiler can
+ // find the compiled package a. We pass the full path to compile() so that we
+ // don't have to copy the file to that directory.
+ bpath, err := filepath.Abs(filepath.Join("testdata", "b.go"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ jsonExport, _, err := FindPkg("encoding/json", "testdata")
+ if jsonExport == "" {
+ t.Fatalf("no export data found for encoding/json: %v", err)
+ }
+
+ compile(t, "testdata", "a.go", testoutdir, map[string]string{"encoding/json": jsonExport})
+ compile(t, testoutdir, bpath, testoutdir, map[string]string{"testdata/a": filepath.Join(testoutdir, "a.o")})
+
+ // import must succeed (test for issue at hand)
+ pkg := importPkg(t, "./testdata/b", tmpdir)
+
+ // make sure all indirectly imported packages have names
+ for _, imp := range pkg.Imports() {
+ if imp.Name() == "" {
+ t.Errorf("no name for %s package", imp.Path())
+ }
+ }
+}
+
+func TestIssue13898(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ // import go/internal/gcimporter which imports go/types partially
+ imports := make(map[string]*types2.Package)
+ _, err := Import(imports, "go/internal/gcimporter", ".", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // look for go/types package
+ var goTypesPkg *types2.Package
+ for path, pkg := range imports {
+ if path == "go/types" {
+ goTypesPkg = pkg
+ break
+ }
+ }
+ if goTypesPkg == nil {
+ t.Fatal("go/types not found")
+ }
+
+ // look for go/types.Object type
+ obj := lookupObj(t, goTypesPkg.Scope(), "Object")
+ typ, ok := obj.Type().(*types2.Named)
+ if !ok {
+ t.Fatalf("go/types.Object type is %v; wanted named type", typ)
+ }
+
+ // lookup go/types.Object.Pkg method
+ m, index, indirect := types2.LookupFieldOrMethod(typ, false, nil, "Pkg")
+ if m == nil {
+ t.Fatalf("go/types.Object.Pkg not found (index = %v, indirect = %v)", index, indirect)
+ }
+
+ // the method must belong to go/types
+ if m.Pkg().Path() != "go/types" {
+ t.Fatalf("found %v; want go/types", m.Pkg())
+ }
+}
+
+func TestIssue15517(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ tmpdir := mktmpdir(t)
+
+ compile(t, "testdata", "p.go", filepath.Join(tmpdir, "testdata"), nil)
+
+ // Multiple imports of p must succeed without redeclaration errors.
+ // We use an import path that's not cleaned up so that the eventual
+ // file path for the package is different from the package path; this
+ // will expose the error if it is present.
+ //
+ // (Issue: Both the textual and the binary importer used the file path
+ // of the package to be imported as key into the shared packages map.
+ // However, the binary importer then used the package path to identify
+ // the imported package to mark it as complete; effectively marking the
+ // wrong package as complete. By using an "unclean" package path, the
+ // file and package path are different, exposing the problem if present.
+ // The same issue occurs with vendoring.)
+ imports := make(map[string]*types2.Package)
+ for i := 0; i < 3; i++ {
+ if _, err := Import(imports, "./././testdata/p", tmpdir, nil); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestIssue15920(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ compileAndImportPkg(t, "issue15920")
+}
+
+func TestIssue20046(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ // "./issue20046".V.M must exist
+ pkg := compileAndImportPkg(t, "issue20046")
+ obj := lookupObj(t, pkg.Scope(), "V")
+ if m, index, indirect := types2.LookupFieldOrMethod(obj.Type(), false, nil, "M"); m == nil {
+ t.Fatalf("V.M not found (index = %v, indirect = %v)", index, indirect)
+ }
+}
+func TestIssue25301(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ compileAndImportPkg(t, "issue25301")
+}
+
+func TestIssue25596(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ compileAndImportPkg(t, "issue25596")
+}
+
+func importPkg(t *testing.T, path, srcDir string) *types2.Package {
+ pkg, err := Import(make(map[string]*types2.Package), path, srcDir, nil)
+ if err != nil {
+ t.Helper()
+ t.Fatal(err)
+ }
+ return pkg
+}
+
+func compileAndImportPkg(t *testing.T, name string) *types2.Package {
+ t.Helper()
+ tmpdir := mktmpdir(t)
+ compile(t, "testdata", name+".go", filepath.Join(tmpdir, "testdata"), nil)
+ return importPkg(t, "./testdata/"+name, tmpdir)
+}
+
+func lookupObj(t *testing.T, scope *types2.Scope, name string) types2.Object {
+ if obj := scope.Lookup(name); obj != nil {
+ return obj
+ }
+ t.Helper()
+ t.Fatalf("%s not found", name)
+ return nil
+}
diff --git a/src/cmd/compile/internal/importer/iimport.go b/src/cmd/compile/internal/importer/iimport.go
new file mode 100644
index 0000000..4981347
--- /dev/null
+++ b/src/cmd/compile/internal/importer/iimport.go
@@ -0,0 +1,793 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package import.
+// See cmd/compile/internal/typecheck/iexport.go for the export data format.
+
+package importer
+
+import (
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types2"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "io"
+ "math/big"
+ "sort"
+ "strings"
+)
+
+type intReader struct {
+ *strings.Reader
+ path string
+}
+
+func (r *intReader) int64() int64 {
+ i, err := binary.ReadVarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+func (r *intReader) uint64() uint64 {
+ i, err := binary.ReadUvarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+// Keep this in sync with constants in iexport.go.
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ iexportVersionGenerics = 2
+ iexportVersionGo1_18 = 2
+
+ iexportVersionCurrent = 2
+)
+
+type ident struct {
+ pkg *types2.Package
+ name string
+}
+
+const predeclReserved = 32
+
+type itag uint64
+
+const (
+ // Types
+ definedType itag = iota
+ pointerType
+ sliceType
+ arrayType
+ chanType
+ mapType
+ signatureType
+ structType
+ interfaceType
+ typeParamType
+ instanceType
+ unionType
+)
+
+// ImportData imports a package from the serialized package data
+// and returns the number of bytes consumed and a reference to the package.
+// If the export data version is not recognized or the format is otherwise
+// compromised, an error is returned.
+func ImportData(imports map[string]*types2.Package, data, path string) (pkg *types2.Package, err error) {
+ const currentVersion = iexportVersionCurrent
+ version := int64(-1)
+ defer func() {
+ if e := recover(); e != nil {
+ if version > currentVersion {
+ err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
+ } else {
+ err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
+ }
+ }
+ }()
+
+ r := &intReader{strings.NewReader(data), path}
+
+ version = int64(r.uint64())
+ switch version {
+ case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11:
+ default:
+ errorf("unknown iexport format version %d", version)
+ }
+
+ sLen := int64(r.uint64())
+ dLen := int64(r.uint64())
+
+ whence, _ := r.Seek(0, io.SeekCurrent)
+ stringData := data[whence : whence+sLen]
+ declData := data[whence+sLen : whence+sLen+dLen]
+ r.Seek(sLen+dLen, io.SeekCurrent)
+
+ p := iimporter{
+ exportVersion: version,
+ ipath: path,
+ version: int(version),
+
+ stringData: stringData,
+ pkgCache: make(map[uint64]*types2.Package),
+ posBaseCache: make(map[uint64]*syntax.PosBase),
+
+ declData: declData,
+ pkgIndex: make(map[*types2.Package]map[string]uint64),
+ typCache: make(map[uint64]types2.Type),
+ // Separate map for typeparams, keyed by their package and unique
+ // name (name with subscript).
+ tparamIndex: make(map[ident]*types2.TypeParam),
+ }
+
+ for i, pt := range predeclared {
+ p.typCache[uint64(i)] = pt
+ }
+
+ pkgList := make([]*types2.Package, r.uint64())
+ for i := range pkgList {
+ pkgPathOff := r.uint64()
+ pkgPath := p.stringAt(pkgPathOff)
+ pkgName := p.stringAt(r.uint64())
+ _ = int(r.uint64()) // was package height, but not necessary anymore.
+
+ if pkgPath == "" {
+ pkgPath = path
+ }
+ pkg := imports[pkgPath]
+ if pkg == nil {
+ pkg = types2.NewPackage(pkgPath, pkgName)
+ imports[pkgPath] = pkg
+ } else {
+ if pkg.Name() != pkgName {
+ errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
+ }
+ }
+
+ p.pkgCache[pkgPathOff] = pkg
+
+ nameIndex := make(map[string]uint64)
+ for nSyms := r.uint64(); nSyms > 0; nSyms-- {
+ name := p.stringAt(r.uint64())
+ nameIndex[name] = r.uint64()
+ }
+
+ p.pkgIndex[pkg] = nameIndex
+ pkgList[i] = pkg
+ }
+
+ localpkg := pkgList[0]
+
+ names := make([]string, 0, len(p.pkgIndex[localpkg]))
+ for name := range p.pkgIndex[localpkg] {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ p.doDecl(localpkg, name)
+ }
+
+ // SetConstraint can't be called if the constraint type is not yet complete.
+ // When type params are created in the 'P' case of (*importReader).obj(),
+ // the associated constraint type may not be complete due to recursion.
+ // Therefore, we defer calling SetConstraint there, and call it here instead
+ // after all types are complete.
+ for _, d := range p.later {
+ d.t.SetConstraint(d.constraint)
+ }
+ // record all referenced packages as imports
+ list := append(([]*types2.Package)(nil), pkgList[1:]...)
+ sort.Sort(byPath(list))
+ localpkg.SetImports(list)
+
+ // package was imported completely and without errors
+ localpkg.MarkComplete()
+
+ return localpkg, nil
+}
+
+type setConstraintArgs struct {
+ t *types2.TypeParam
+ constraint types2.Type
+}
+
+type iimporter struct {
+ exportVersion int64
+ ipath string
+ version int
+
+ stringData string
+ pkgCache map[uint64]*types2.Package
+ posBaseCache map[uint64]*syntax.PosBase
+
+ declData string
+ pkgIndex map[*types2.Package]map[string]uint64
+ typCache map[uint64]types2.Type
+ tparamIndex map[ident]*types2.TypeParam
+
+ interfaceList []*types2.Interface
+
+ // Arguments for calls to SetConstraint that are deferred due to recursive types
+ later []setConstraintArgs
+}
+
+func (p *iimporter) doDecl(pkg *types2.Package, name string) {
+ // See if we've already imported this declaration.
+ if obj := pkg.Scope().Lookup(name); obj != nil {
+ return
+ }
+
+ off, ok := p.pkgIndex[pkg][name]
+ if !ok {
+ errorf("%v.%v not in index", pkg, name)
+ }
+
+ r := &importReader{p: p, currPkg: pkg}
+ r.declReader.Reset(p.declData[off:])
+
+ r.obj(name)
+}
+
+func (p *iimporter) stringAt(off uint64) string {
+ var x [binary.MaxVarintLen64]byte
+ n := copy(x[:], p.stringData[off:])
+
+ slen, n := binary.Uvarint(x[:n])
+ if n <= 0 {
+ errorf("varint failed")
+ }
+ spos := off + uint64(n)
+ return p.stringData[spos : spos+slen]
+}
+
+func (p *iimporter) pkgAt(off uint64) *types2.Package {
+ if pkg, ok := p.pkgCache[off]; ok {
+ return pkg
+ }
+ path := p.stringAt(off)
+ errorf("missing package %q in %q", path, p.ipath)
+ return nil
+}
+
+func (p *iimporter) posBaseAt(off uint64) *syntax.PosBase {
+ if posBase, ok := p.posBaseCache[off]; ok {
+ return posBase
+ }
+ filename := p.stringAt(off)
+ posBase := syntax.NewTrimmedFileBase(filename, true)
+ p.posBaseCache[off] = posBase
+ return posBase
+}
+
+func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
+ if t, ok := p.typCache[off]; ok && canReuse(base, t) {
+ return t
+ }
+
+ if off < predeclReserved {
+ errorf("predeclared type missing from cache: %v", off)
+ }
+
+ r := &importReader{p: p}
+ r.declReader.Reset(p.declData[off-predeclReserved:])
+ t := r.doType(base)
+
+ if canReuse(base, t) {
+ p.typCache[off] = t
+ }
+ return t
+}
+
+// canReuse reports whether the type rhs on the RHS of the declaration for def
+// may be re-used.
+//
+// Specifically, if def is non-nil and rhs is an interface type with methods, it
+// may not be re-used because we have a convention of setting the receiver type
+// for interface methods to def.
+func canReuse(def *types2.Named, rhs types2.Type) bool {
+ if def == nil {
+ return true
+ }
+ iface, _ := rhs.(*types2.Interface)
+ if iface == nil {
+ return true
+ }
+ // Don't use iface.Empty() here as iface may not be complete.
+ return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0
+}
+
+type importReader struct {
+ p *iimporter
+ declReader strings.Reader
+ currPkg *types2.Package
+ prevPosBase *syntax.PosBase
+ prevLine int64
+ prevColumn int64
+}
+
+func (r *importReader) obj(name string) {
+ tag := r.byte()
+ pos := r.pos()
+
+ switch tag {
+ case 'A':
+ typ := r.typ()
+
+ r.declare(types2.NewTypeName(pos, r.currPkg, name, typ))
+
+ case 'C':
+ typ, val := r.value()
+
+ r.declare(types2.NewConst(pos, r.currPkg, name, typ, val))
+
+ case 'F', 'G':
+ var tparams []*types2.TypeParam
+ if tag == 'G' {
+ tparams = r.tparamList()
+ }
+ sig := r.signature(nil, nil, tparams)
+ r.declare(types2.NewFunc(pos, r.currPkg, name, sig))
+
+ case 'T', 'U':
+ // Types can be recursive. We need to setup a stub
+ // declaration before recursing.
+ obj := types2.NewTypeName(pos, r.currPkg, name, nil)
+ named := types2.NewNamed(obj, nil, nil)
+ // Declare obj before calling r.tparamList, so the new type name is recognized
+ // if used in the constraint of one of its own typeparams (see #48280).
+ r.declare(obj)
+ if tag == 'U' {
+ tparams := r.tparamList()
+ named.SetTypeParams(tparams)
+ }
+
+ underlying := r.p.typAt(r.uint64(), named).Underlying()
+ named.SetUnderlying(underlying)
+
+ if !isInterface(underlying) {
+ for n := r.uint64(); n > 0; n-- {
+ mpos := r.pos()
+ mname := r.ident()
+ recv := r.param()
+
+ // If the receiver has any targs, set those as the
+ // rparams of the method (since those are the
+ // typeparams being used in the method sig/body).
+ targs := baseType(recv.Type()).TypeArgs()
+ var rparams []*types2.TypeParam
+ if targs.Len() > 0 {
+ rparams = make([]*types2.TypeParam, targs.Len())
+ for i := range rparams {
+ rparams[i], _ = targs.At(i).(*types2.TypeParam)
+ }
+ }
+ msig := r.signature(recv, rparams, nil)
+
+ named.AddMethod(types2.NewFunc(mpos, r.currPkg, mname, msig))
+ }
+ }
+
+ case 'P':
+ // We need to "declare" a typeparam in order to have a name that
+ // can be referenced recursively (if needed) in the type param's
+ // bound.
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ name0 := typecheck.TparamName(name)
+ if name0 == "" {
+ errorf("malformed type parameter export name %s: missing prefix", name)
+ }
+
+ tn := types2.NewTypeName(pos, r.currPkg, name0, nil)
+ t := types2.NewTypeParam(tn, nil)
+ // To handle recursive references to the typeparam within its
+ // bound, save the partial type in tparamIndex before reading the bounds.
+ id := ident{r.currPkg, name}
+ r.p.tparamIndex[id] = t
+
+ var implicit bool
+ if r.p.exportVersion >= iexportVersionGo1_18 {
+ implicit = r.bool()
+ }
+ constraint := r.typ()
+ if implicit {
+ iface, _ := constraint.(*types2.Interface)
+ if iface == nil {
+ errorf("non-interface constraint marked implicit")
+ }
+ iface.MarkImplicit()
+ }
+ // The constraint type may not be complete, if we
+ // are in the middle of a type recursion involving type
+ // constraints. So, we defer SetConstraint until we have
+ // completely set up all types in ImportData.
+ r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint})
+
+ case 'V':
+ typ := r.typ()
+
+ r.declare(types2.NewVar(pos, r.currPkg, name, typ))
+
+ default:
+ errorf("unexpected tag: %v", tag)
+ }
+}
+
+func (r *importReader) declare(obj types2.Object) {
+ obj.Pkg().Scope().Insert(obj)
+}
+
+func (r *importReader) value() (typ types2.Type, val constant.Value) {
+ typ = r.typ()
+ if r.p.exportVersion >= iexportVersionGo1_18 {
+ // TODO: add support for using the kind
+ _ = constant.Kind(r.int64())
+ }
+
+ switch b := typ.Underlying().(*types2.Basic); b.Info() & types2.IsConstType {
+ case types2.IsBoolean:
+ val = constant.MakeBool(r.bool())
+
+ case types2.IsString:
+ val = constant.MakeString(r.string())
+
+ case types2.IsInteger:
+ var x big.Int
+ r.mpint(&x, b)
+ val = constant.Make(&x)
+
+ case types2.IsFloat:
+ val = r.mpfloat(b)
+
+ case types2.IsComplex:
+ re := r.mpfloat(b)
+ im := r.mpfloat(b)
+ val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+
+ default:
+ errorf("unexpected type %v", typ) // panics
+ panic("unreachable")
+ }
+
+ return
+}
+
+func intSize(b *types2.Basic) (signed bool, maxBytes uint) {
+ if (b.Info() & types2.IsUntyped) != 0 {
+ return true, 64
+ }
+
+ switch b.Kind() {
+ case types2.Float32, types2.Complex64:
+ return true, 3
+ case types2.Float64, types2.Complex128:
+ return true, 7
+ }
+
+ signed = (b.Info() & types2.IsUnsigned) == 0
+ switch b.Kind() {
+ case types2.Int8, types2.Uint8:
+ maxBytes = 1
+ case types2.Int16, types2.Uint16:
+ maxBytes = 2
+ case types2.Int32, types2.Uint32:
+ maxBytes = 4
+ default:
+ maxBytes = 8
+ }
+
+ return
+}
+
+func (r *importReader) mpint(x *big.Int, typ *types2.Basic) {
+ signed, maxBytes := intSize(typ)
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ n, _ := r.declReader.ReadByte()
+ if uint(n) < maxSmall {
+ v := int64(n)
+ if signed {
+ v >>= 1
+ if n&1 != 0 {
+ v = ^v
+ }
+ }
+ x.SetInt64(v)
+ return
+ }
+
+ v := -n
+ if signed {
+ v = -(n &^ 1) >> 1
+ }
+ if v < 1 || uint(v) > maxBytes {
+ errorf("weird decoding: %v, %v => %v", n, signed, v)
+ }
+ b := make([]byte, v)
+ io.ReadFull(&r.declReader, b)
+ x.SetBytes(b)
+ if signed && n&1 != 0 {
+ x.Neg(x)
+ }
+}
+
+func (r *importReader) mpfloat(typ *types2.Basic) constant.Value {
+ var mant big.Int
+ r.mpint(&mant, typ)
+ var f big.Float
+ f.SetInt(&mant)
+ if f.Sign() != 0 {
+ f.SetMantExp(&f, int(r.int64()))
+ }
+ return constant.Make(&f)
+}
+
+func (r *importReader) ident() string {
+ return r.string()
+}
+
+func (r *importReader) qualifiedIdent() (*types2.Package, string) {
+ name := r.string()
+ pkg := r.pkg()
+ return pkg, name
+}
+
+func (r *importReader) pos() syntax.Pos {
+ if r.p.version >= 1 {
+ r.posv1()
+ } else {
+ r.posv0()
+ }
+
+ if (r.prevPosBase == nil || r.prevPosBase.Filename() == "") && r.prevLine == 0 && r.prevColumn == 0 {
+ return syntax.Pos{}
+ }
+
+ return syntax.MakePos(r.prevPosBase, uint(r.prevLine), uint(r.prevColumn))
+}
+
+func (r *importReader) posv0() {
+ delta := r.int64()
+ if delta != deltaNewFile {
+ r.prevLine += delta
+ } else if l := r.int64(); l == -1 {
+ r.prevLine += deltaNewFile
+ } else {
+ r.prevPosBase = r.posBase()
+ r.prevLine = l
+ }
+}
+
+func (r *importReader) posv1() {
+ delta := r.int64()
+ r.prevColumn += delta >> 1
+ if delta&1 != 0 {
+ delta = r.int64()
+ r.prevLine += delta >> 1
+ if delta&1 != 0 {
+ r.prevPosBase = r.posBase()
+ }
+ }
+}
+
+func (r *importReader) typ() types2.Type {
+ return r.p.typAt(r.uint64(), nil)
+}
+
+func isInterface(t types2.Type) bool {
+ _, ok := t.(*types2.Interface)
+ return ok
+}
+
+func (r *importReader) pkg() *types2.Package { return r.p.pkgAt(r.uint64()) }
+func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+func (r *importReader) posBase() *syntax.PosBase { return r.p.posBaseAt(r.uint64()) }
+
+func (r *importReader) doType(base *types2.Named) types2.Type {
+ switch k := r.kind(); k {
+ default:
+ errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
+ return nil
+
+ case definedType:
+ pkg, name := r.qualifiedIdent()
+ r.p.doDecl(pkg, name)
+ return pkg.Scope().Lookup(name).(*types2.TypeName).Type()
+ case pointerType:
+ return types2.NewPointer(r.typ())
+ case sliceType:
+ return types2.NewSlice(r.typ())
+ case arrayType:
+ n := r.uint64()
+ return types2.NewArray(r.typ(), int64(n))
+ case chanType:
+ dir := chanDir(int(r.uint64()))
+ return types2.NewChan(dir, r.typ())
+ case mapType:
+ return types2.NewMap(r.typ(), r.typ())
+ case signatureType:
+ r.currPkg = r.pkg()
+ return r.signature(nil, nil, nil)
+
+ case structType:
+ r.currPkg = r.pkg()
+
+ fields := make([]*types2.Var, r.uint64())
+ tags := make([]string, len(fields))
+ for i := range fields {
+ fpos := r.pos()
+ fname := r.ident()
+ ftyp := r.typ()
+ emb := r.bool()
+ tag := r.string()
+
+ fields[i] = types2.NewField(fpos, r.currPkg, fname, ftyp, emb)
+ tags[i] = tag
+ }
+ return types2.NewStruct(fields, tags)
+
+ case interfaceType:
+ r.currPkg = r.pkg()
+
+ embeddeds := make([]types2.Type, r.uint64())
+ for i := range embeddeds {
+ _ = r.pos()
+ embeddeds[i] = r.typ()
+ }
+
+ methods := make([]*types2.Func, r.uint64())
+ for i := range methods {
+ mpos := r.pos()
+ mname := r.ident()
+
+ // TODO(mdempsky): Matches bimport.go, but I
+ // don't agree with this.
+ var recv *types2.Var
+ if base != nil {
+ recv = types2.NewVar(syntax.Pos{}, r.currPkg, "", base)
+ }
+
+ msig := r.signature(recv, nil, nil)
+ methods[i] = types2.NewFunc(mpos, r.currPkg, mname, msig)
+ }
+
+ typ := types2.NewInterfaceType(methods, embeddeds)
+ r.p.interfaceList = append(r.p.interfaceList, typ)
+ return typ
+
+ case typeParamType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ pkg, name := r.qualifiedIdent()
+ id := ident{pkg, name}
+ if t, ok := r.p.tparamIndex[id]; ok {
+ // We're already in the process of importing this typeparam.
+ return t
+ }
+ // Otherwise, import the definition of the typeparam now.
+ r.p.doDecl(pkg, name)
+ return r.p.tparamIndex[id]
+
+ case instanceType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ // pos does not matter for instances: they are positioned on the original
+ // type.
+ _ = r.pos()
+ len := r.uint64()
+ targs := make([]types2.Type, len)
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+ baseType := r.typ()
+ // The imported instantiated type doesn't include any methods, so
+ // we must always use the methods of the base (orig) type.
+ // TODO provide a non-nil *Context
+ t, _ := types2.Instantiate(nil, baseType, targs, false)
+ return t
+
+ case unionType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ terms := make([]*types2.Term, r.uint64())
+ for i := range terms {
+ terms[i] = types2.NewTerm(r.bool(), r.typ())
+ }
+ return types2.NewUnion(terms)
+ }
+}
+
+func (r *importReader) kind() itag {
+ return itag(r.uint64())
+}
+
+func (r *importReader) signature(recv *types2.Var, rparams, tparams []*types2.TypeParam) *types2.Signature {
+ params := r.paramList()
+ results := r.paramList()
+ variadic := params.Len() > 0 && r.bool()
+ return types2.NewSignatureType(recv, rparams, tparams, params, results, variadic)
+}
+
+func (r *importReader) tparamList() []*types2.TypeParam {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ xs := make([]*types2.TypeParam, n)
+ for i := range xs {
+ xs[i] = r.typ().(*types2.TypeParam)
+ }
+ return xs
+}
+
+func (r *importReader) paramList() *types2.Tuple {
+ xs := make([]*types2.Var, r.uint64())
+ for i := range xs {
+ xs[i] = r.param()
+ }
+ return types2.NewTuple(xs...)
+}
+
+func (r *importReader) param() *types2.Var {
+ pos := r.pos()
+ name := r.ident()
+ typ := r.typ()
+ return types2.NewParam(pos, r.currPkg, name, typ)
+}
+
+func (r *importReader) bool() bool {
+ return r.uint64() != 0
+}
+
+func (r *importReader) int64() int64 {
+ n, err := binary.ReadVarint(&r.declReader)
+ if err != nil {
+ errorf("readVarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) uint64() uint64 {
+ n, err := binary.ReadUvarint(&r.declReader)
+ if err != nil {
+ errorf("readUvarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) byte() byte {
+ x, err := r.declReader.ReadByte()
+ if err != nil {
+ errorf("declReader.ReadByte: %v", err)
+ }
+ return x
+}
+
+func baseType(typ types2.Type) *types2.Named {
+ // pointer receivers are never types2.Named types
+ if p, _ := typ.(*types2.Pointer); p != nil {
+ typ = p.Elem()
+ }
+ // receiver base types are always (possibly generic) types2.Named types
+ n, _ := typ.(*types2.Named)
+ return n
+}
diff --git a/src/cmd/compile/internal/importer/support.go b/src/cmd/compile/internal/importer/support.go
new file mode 100644
index 0000000..5810f5e
--- /dev/null
+++ b/src/cmd/compile/internal/importer/support.go
@@ -0,0 +1,152 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements support functionality for iimport.go.
+
+package importer
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types2"
+ "fmt"
+ "go/token"
+ "internal/pkgbits"
+ "sync"
+)
+
+func assert(p bool) {
+ base.Assert(p)
+}
+
+func errorf(format string, args ...interface{}) {
+ panic(fmt.Sprintf(format, args...))
+}
+
+const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
+
+// Synthesize a token.Pos
+type fakeFileSet struct {
+ fset *token.FileSet
+ files map[string]*token.File
+}
+
+func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
+ // TODO(mdempsky): Make use of column.
+
+ // Since we don't know the set of needed file positions, we
+ // reserve maxlines positions per file.
+ const maxlines = 64 * 1024
+ f := s.files[file]
+ if f == nil {
+ f = s.fset.AddFile(file, -1, maxlines)
+ s.files[file] = f
+ // Allocate the fake linebreak indices on first use.
+ // TODO(adonovan): opt: save ~512KB using a more complex scheme?
+ fakeLinesOnce.Do(func() {
+ fakeLines = make([]int, maxlines)
+ for i := range fakeLines {
+ fakeLines[i] = i
+ }
+ })
+ f.SetLines(fakeLines)
+ }
+
+ if line > maxlines {
+ line = 1
+ }
+
+ // Treat the file as if it contained only newlines
+ // and column=1: use the line number as the offset.
+ return f.Pos(line - 1)
+}
+
+var (
+ fakeLines []int
+ fakeLinesOnce sync.Once
+)
+
+func chanDir(d int) types2.ChanDir {
+ // tag values must match the constants in cmd/compile/internal/gc/go.go
+ switch d {
+ case 1 /* Crecv */ :
+ return types2.RecvOnly
+ case 2 /* Csend */ :
+ return types2.SendOnly
+ case 3 /* Cboth */ :
+ return types2.SendRecv
+ default:
+ errorf("unexpected channel dir %d", d)
+ return 0
+ }
+}
+
+var predeclared = []types2.Type{
+ // basic types
+ types2.Typ[types2.Bool],
+ types2.Typ[types2.Int],
+ types2.Typ[types2.Int8],
+ types2.Typ[types2.Int16],
+ types2.Typ[types2.Int32],
+ types2.Typ[types2.Int64],
+ types2.Typ[types2.Uint],
+ types2.Typ[types2.Uint8],
+ types2.Typ[types2.Uint16],
+ types2.Typ[types2.Uint32],
+ types2.Typ[types2.Uint64],
+ types2.Typ[types2.Uintptr],
+ types2.Typ[types2.Float32],
+ types2.Typ[types2.Float64],
+ types2.Typ[types2.Complex64],
+ types2.Typ[types2.Complex128],
+ types2.Typ[types2.String],
+
+ // basic type aliases
+ types2.Universe.Lookup("byte").Type(),
+ types2.Universe.Lookup("rune").Type(),
+
+ // error
+ types2.Universe.Lookup("error").Type(),
+
+ // untyped types
+ types2.Typ[types2.UntypedBool],
+ types2.Typ[types2.UntypedInt],
+ types2.Typ[types2.UntypedRune],
+ types2.Typ[types2.UntypedFloat],
+ types2.Typ[types2.UntypedComplex],
+ types2.Typ[types2.UntypedString],
+ types2.Typ[types2.UntypedNil],
+
+ // package unsafe
+ types2.Typ[types2.UnsafePointer],
+
+ // invalid type
+ types2.Typ[types2.Invalid], // only appears in packages with errors
+
+ // used internally by gc; never used by this package or in .a files
+ // not to be confused with the universe any
+ anyType{},
+
+ // comparable
+ types2.Universe.Lookup("comparable").Type(),
+
+ // any
+ types2.Universe.Lookup("any").Type(),
+}
+
+type anyType struct{}
+
+func (t anyType) Underlying() types2.Type { return t }
+func (t anyType) String() string { return "any" }
+
+// See cmd/compile/internal/noder.derivedInfo.
+type derivedInfo struct {
+ idx pkgbits.Index
+ needed bool
+}
+
+// See cmd/compile/internal/noder.typeInfo.
+type typeInfo struct {
+ idx pkgbits.Index
+ derived bool
+}
diff --git a/src/cmd/compile/internal/importer/testdata/a.go b/src/cmd/compile/internal/importer/testdata/a.go
new file mode 100644
index 0000000..56e4292
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/a.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Input for TestIssue13566
+
+package a
+
+import "encoding/json"
+
+type A struct {
+ a *A
+ json json.RawMessage
+}
diff --git a/src/cmd/compile/internal/importer/testdata/b.go b/src/cmd/compile/internal/importer/testdata/b.go
new file mode 100644
index 0000000..4196678
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/b.go
@@ -0,0 +1,11 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Input for TestIssue13566
+
+package b
+
+import "./a"
+
+type A a.A
diff --git a/src/cmd/compile/internal/importer/testdata/exports.go b/src/cmd/compile/internal/importer/testdata/exports.go
new file mode 100644
index 0000000..91598c0
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/exports.go
@@ -0,0 +1,91 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is used to generate an object file which
+// serves as test file for gcimporter_test.go.
+
+package exports
+
+import "go/ast"
+
+// Issue 3682: Correctly read dotted identifiers from export data.
+const init1 = 0
+
+func init() {}
+
+const (
+ C0 int = 0
+ C1 = 3.14159265
+ C2 = 2.718281828i
+ C3 = -123.456e-789
+ C4 = +123.456e+789
+ C5 = 1234i
+ C6 = "foo\n"
+ C7 = `bar\n`
+ C8 = 42
+ C9 int = 42
+ C10 float64 = 42
+)
+
+type (
+ T1 int
+ T2 [10]int
+ T3 []int
+ T4 *int
+ T5 chan int
+ T6a chan<- int
+ T6b chan (<-chan int)
+ T6c chan<- (chan int)
+ T7 <-chan *ast.File
+ T8 struct{}
+ T9 struct {
+ a int
+ b, c float32
+ d []string `go:"tag"`
+ }
+ T10 struct {
+ T8
+ T9
+ _ *T10
+ }
+ T11 map[int]string
+ T12 interface{}
+ T13 interface {
+ m1()
+ m2(int) float32
+ }
+ T14 interface {
+ T12
+ T13
+ m3(x ...struct{}) []T9
+ }
+ T15 func()
+ T16 func(int)
+ T17 func(x int)
+ T18 func() float32
+ T19 func() (x float32)
+ T20 func(...interface{})
+ T21 struct{ next *T21 }
+ T22 struct{ link *T23 }
+ T23 struct{ link *T22 }
+ T24 *T24
+ T25 *T26
+ T26 *T27
+ T27 *T25
+ T28 func(T28) T28
+)
+
+var (
+ V0 int
+ V1 = -991.0
+ V2 float32 = 1.2
+)
+
+func F1() {}
+func F2(x int) {}
+func F3() int { return 0 }
+func F4() float32 { return 0 }
+func F5(a, b, c int, u, v, w struct{ x, y T1 }, more ...interface{}) (p, q, r chan<- T10)
+
+func (p *T1) M1()
diff --git a/src/cmd/compile/internal/importer/testdata/generics.go b/src/cmd/compile/internal/importer/testdata/generics.go
new file mode 100644
index 0000000..00bf040
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/generics.go
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is used to generate an object file which
+// serves as test file for gcimporter_test.go.
+
+package generics
+
+type Any any
+
+var x any
+
+type T[A, B any] struct {
+ Left A
+ Right B
+}
+
+var X T[int, string] = T[int, string]{1, "hi"}
+
+func ToInt[P interface{ ~int }](p P) int { return int(p) }
+
+var IntID = ToInt[int]
+
+type G[C comparable] int
+
+func ImplicitFunc[T ~int]() {}
+
+type ImplicitType[T ~int] int
diff --git a/src/cmd/compile/internal/importer/testdata/issue15920.go b/src/cmd/compile/internal/importer/testdata/issue15920.go
new file mode 100644
index 0000000..c70f7d8
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/issue15920.go
@@ -0,0 +1,11 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// The underlying type of Error is the underlying type of error.
+// Make sure we can import this again without problems.
+type Error error
+
+func F() Error { return nil }
diff --git a/src/cmd/compile/internal/importer/testdata/issue20046.go b/src/cmd/compile/internal/importer/testdata/issue20046.go
new file mode 100644
index 0000000..c63ee82
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/issue20046.go
@@ -0,0 +1,9 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var V interface {
+ M()
+}
diff --git a/src/cmd/compile/internal/importer/testdata/issue25301.go b/src/cmd/compile/internal/importer/testdata/issue25301.go
new file mode 100644
index 0000000..e3dc98b
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/issue25301.go
@@ -0,0 +1,17 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue25301
+
+type (
+ A = interface {
+ M()
+ }
+ T interface {
+ A
+ }
+ S struct{}
+)
+
+func (S) M() { println("m") }
diff --git a/src/cmd/compile/internal/importer/testdata/issue25596.go b/src/cmd/compile/internal/importer/testdata/issue25596.go
new file mode 100644
index 0000000..8923373
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/issue25596.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue25596
+
+type E interface {
+ M() T
+}
+
+type T interface {
+ E
+}
diff --git a/src/cmd/compile/internal/importer/testdata/p.go b/src/cmd/compile/internal/importer/testdata/p.go
new file mode 100644
index 0000000..9e2e705
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/p.go
@@ -0,0 +1,13 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Input for TestIssue15517
+
+package p
+
+const C = 0
+
+var V int
+
+func F() {}
diff --git a/src/cmd/compile/internal/importer/testdata/versions/test.go b/src/cmd/compile/internal/importer/testdata/versions/test.go
new file mode 100644
index 0000000..227fc09
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/versions/test.go
@@ -0,0 +1,28 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// To create a test case for a new export format version,
+// build this package with the latest compiler and store
+// the resulting .a file appropriately named in the versions
+// directory. The VersionHandling test will pick it up.
+//
+// In the testdata/versions:
+//
+// go build -o test_go1.$X_$Y.a test.go
+//
+// with $X = Go version and $Y = export format version
+// (add 'b' or 'i' to distinguish between binary and
+// indexed format starting with 1.11 as long as both
+// formats are supported).
+//
+// Make sure this source is extended such that it exercises
+// whatever export format change has taken place.
+
+package test
+
+// Any release before and including Go 1.7 didn't encode
+// the package for a blank struct field.
+type BlankField struct {
+ _ int
+}
diff --git a/src/cmd/compile/internal/importer/ureader.go b/src/cmd/compile/internal/importer/ureader.go
new file mode 100644
index 0000000..f5c2f41
--- /dev/null
+++ b/src/cmd/compile/internal/importer/ureader.go
@@ -0,0 +1,535 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importer
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+ "internal/pkgbits"
+)
+
+type pkgReader struct {
+ pkgbits.PkgDecoder
+
+ ctxt *types2.Context
+ imports map[string]*types2.Package
+
+ posBases []*syntax.PosBase
+ pkgs []*types2.Package
+ typs []types2.Type
+}
+
+func ReadPackage(ctxt *types2.Context, imports map[string]*types2.Package, input pkgbits.PkgDecoder) *types2.Package {
+ pr := pkgReader{
+ PkgDecoder: input,
+
+ ctxt: ctxt,
+ imports: imports,
+
+ posBases: make([]*syntax.PosBase, input.NumElems(pkgbits.RelocPosBase)),
+ pkgs: make([]*types2.Package, input.NumElems(pkgbits.RelocPkg)),
+ typs: make([]types2.Type, input.NumElems(pkgbits.RelocType)),
+ }
+
+ r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
+ pkg := r.pkg()
+ r.Bool() // TODO(mdempsky): Remove; was "has init"
+
+ for i, n := 0, r.Len(); i < n; i++ {
+ // As if r.obj(), but avoiding the Scope.Lookup call,
+ // to avoid eager loading of imports.
+ r.Sync(pkgbits.SyncObject)
+ assert(!r.Bool())
+ r.p.objIdx(r.Reloc(pkgbits.RelocObj))
+ assert(r.Len() == 0)
+ }
+
+ r.Sync(pkgbits.SyncEOF)
+
+ pkg.MarkComplete()
+ return pkg
+}
+
+type reader struct {
+ pkgbits.Decoder
+
+ p *pkgReader
+
+ dict *readerDict
+}
+
+type readerDict struct {
+ bounds []typeInfo
+
+ tparams []*types2.TypeParam
+
+ derived []derivedInfo
+ derivedTypes []types2.Type
+}
+
+type readerTypeBound struct {
+ derived bool
+ boundIdx int
+}
+
+func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
+ return &reader{
+ Decoder: pr.NewDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
+ return &reader{
+ Decoder: pr.TempDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+func (pr *pkgReader) retireReader(r *reader) {
+ pr.RetireDecoder(&r.Decoder)
+}
+
+// @@@ Positions
+
+func (r *reader) pos() syntax.Pos {
+ r.Sync(pkgbits.SyncPos)
+ if !r.Bool() {
+ return syntax.Pos{}
+ }
+
+ // TODO(mdempsky): Delta encoding.
+ posBase := r.posBase()
+ line := r.Uint()
+ col := r.Uint()
+ return syntax.MakePos(posBase, line, col)
+}
+
+func (r *reader) posBase() *syntax.PosBase {
+ return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase))
+}
+
+func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) *syntax.PosBase {
+ if b := pr.posBases[idx]; b != nil {
+ return b
+ }
+ var b *syntax.PosBase
+ {
+ r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase)
+
+ filename := r.String()
+
+ if r.Bool() {
+ b = syntax.NewTrimmedFileBase(filename, true)
+ } else {
+ pos := r.pos()
+ line := r.Uint()
+ col := r.Uint()
+ b = syntax.NewLineBase(pos, filename, true, line, col)
+ }
+ pr.retireReader(r)
+ }
+
+ pr.posBases[idx] = b
+ return b
+}
+
+// @@@ Packages
+
+func (r *reader) pkg() *types2.Package {
+ r.Sync(pkgbits.SyncPkg)
+ return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg))
+}
+
+func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types2.Package {
+ // TODO(mdempsky): Consider using some non-nil pointer to indicate
+ // the universe scope, so we don't need to keep re-reading it.
+ if pkg := pr.pkgs[idx]; pkg != nil {
+ return pkg
+ }
+
+ pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg()
+ pr.pkgs[idx] = pkg
+ return pkg
+}
+
+func (r *reader) doPkg() *types2.Package {
+ path := r.String()
+ switch path {
+ case "":
+ path = r.p.PkgPath()
+ case "builtin":
+ return nil // universe
+ case "unsafe":
+ return types2.Unsafe
+ }
+
+ if pkg := r.p.imports[path]; pkg != nil {
+ return pkg
+ }
+
+ name := r.String()
+ pkg := types2.NewPackage(path, name)
+ r.p.imports[path] = pkg
+
+ // TODO(mdempsky): The list of imported packages is important for
+ // go/types, but we could probably skip populating it for types2.
+ imports := make([]*types2.Package, r.Len())
+ for i := range imports {
+ imports[i] = r.pkg()
+ }
+ pkg.SetImports(imports)
+
+ return pkg
+}
+
+// @@@ Types
+
+func (r *reader) typ() types2.Type {
+ return r.p.typIdx(r.typInfo(), r.dict)
+}
+
+func (r *reader) typInfo() typeInfo {
+ r.Sync(pkgbits.SyncType)
+ if r.Bool() {
+ return typeInfo{idx: pkgbits.Index(r.Len()), derived: true}
+ }
+ return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false}
+}
+
+func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types2.Type {
+ idx := info.idx
+ var where *types2.Type
+ if info.derived {
+ where = &dict.derivedTypes[idx]
+ idx = dict.derived[idx].idx
+ } else {
+ where = &pr.typs[idx]
+ }
+
+ if typ := *where; typ != nil {
+ return typ
+ }
+
+ var typ types2.Type
+ {
+ r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx)
+ r.dict = dict
+
+ typ = r.doTyp()
+ assert(typ != nil)
+ pr.retireReader(r)
+ }
+
+ // See comment in pkgReader.typIdx explaining how this happens.
+ if prev := *where; prev != nil {
+ return prev
+ }
+
+ *where = typ
+ return typ
+}
+
+func (r *reader) doTyp() (res types2.Type) {
+ switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag {
+ default:
+ base.FatalfAt(src.NoXPos, "unhandled type tag: %v", tag)
+ panic("unreachable")
+
+ case pkgbits.TypeBasic:
+ return types2.Typ[r.Len()]
+
+ case pkgbits.TypeNamed:
+ obj, targs := r.obj()
+ name := obj.(*types2.TypeName)
+ if len(targs) != 0 {
+ t, _ := types2.Instantiate(r.p.ctxt, name.Type(), targs, false)
+ return t
+ }
+ return name.Type()
+
+ case pkgbits.TypeTypeParam:
+ return r.dict.tparams[r.Len()]
+
+ case pkgbits.TypeArray:
+ len := int64(r.Uint64())
+ return types2.NewArray(r.typ(), len)
+ case pkgbits.TypeChan:
+ dir := types2.ChanDir(r.Len())
+ return types2.NewChan(dir, r.typ())
+ case pkgbits.TypeMap:
+ return types2.NewMap(r.typ(), r.typ())
+ case pkgbits.TypePointer:
+ return types2.NewPointer(r.typ())
+ case pkgbits.TypeSignature:
+ return r.signature(nil, nil, nil)
+ case pkgbits.TypeSlice:
+ return types2.NewSlice(r.typ())
+ case pkgbits.TypeStruct:
+ return r.structType()
+ case pkgbits.TypeInterface:
+ return r.interfaceType()
+ case pkgbits.TypeUnion:
+ return r.unionType()
+ }
+}
+
+func (r *reader) structType() *types2.Struct {
+ fields := make([]*types2.Var, r.Len())
+ var tags []string
+ for i := range fields {
+ pos := r.pos()
+ pkg, name := r.selector()
+ ftyp := r.typ()
+ tag := r.String()
+ embedded := r.Bool()
+
+ fields[i] = types2.NewField(pos, pkg, name, ftyp, embedded)
+ if tag != "" {
+ for len(tags) < i {
+ tags = append(tags, "")
+ }
+ tags = append(tags, tag)
+ }
+ }
+ return types2.NewStruct(fields, tags)
+}
+
+func (r *reader) unionType() *types2.Union {
+ terms := make([]*types2.Term, r.Len())
+ for i := range terms {
+ terms[i] = types2.NewTerm(r.Bool(), r.typ())
+ }
+ return types2.NewUnion(terms)
+}
+
+func (r *reader) interfaceType() *types2.Interface {
+ methods := make([]*types2.Func, r.Len())
+ embeddeds := make([]types2.Type, r.Len())
+ implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool()
+
+ for i := range methods {
+ pos := r.pos()
+ pkg, name := r.selector()
+ mtyp := r.signature(nil, nil, nil)
+ methods[i] = types2.NewFunc(pos, pkg, name, mtyp)
+ }
+
+ for i := range embeddeds {
+ embeddeds[i] = r.typ()
+ }
+
+ iface := types2.NewInterfaceType(methods, embeddeds)
+ if implicit {
+ iface.MarkImplicit()
+ }
+ return iface
+}
+
+func (r *reader) signature(recv *types2.Var, rtparams, tparams []*types2.TypeParam) *types2.Signature {
+ r.Sync(pkgbits.SyncSignature)
+
+ params := r.params()
+ results := r.params()
+ variadic := r.Bool()
+
+ return types2.NewSignatureType(recv, rtparams, tparams, params, results, variadic)
+}
+
+func (r *reader) params() *types2.Tuple {
+ r.Sync(pkgbits.SyncParams)
+ params := make([]*types2.Var, r.Len())
+ for i := range params {
+ params[i] = r.param()
+ }
+ return types2.NewTuple(params...)
+}
+
+func (r *reader) param() *types2.Var {
+ r.Sync(pkgbits.SyncParam)
+
+ pos := r.pos()
+ pkg, name := r.localIdent()
+ typ := r.typ()
+
+ return types2.NewParam(pos, pkg, name, typ)
+}
+
+// @@@ Objects
+
+func (r *reader) obj() (types2.Object, []types2.Type) {
+ r.Sync(pkgbits.SyncObject)
+
+ assert(!r.Bool())
+
+ pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj))
+ obj := pkg.Scope().Lookup(name)
+
+ targs := make([]types2.Type, r.Len())
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+
+ return obj, targs
+}
+
+func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types2.Package, string) {
+ var objPkg *types2.Package
+ var objName string
+ var tag pkgbits.CodeObj
+ {
+ rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
+
+ objPkg, objName = rname.qualifiedIdent()
+ assert(objName != "")
+
+ tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
+ pr.retireReader(rname)
+ }
+
+ if tag == pkgbits.ObjStub {
+ base.Assertf(objPkg == nil || objPkg == types2.Unsafe, "unexpected stub package: %v", objPkg)
+ return objPkg, objName
+ }
+
+ objPkg.Scope().InsertLazy(objName, func() types2.Object {
+ dict := pr.objDictIdx(idx)
+
+ r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1)
+ r.dict = dict
+
+ switch tag {
+ default:
+ panic("weird")
+
+ case pkgbits.ObjAlias:
+ pos := r.pos()
+ typ := r.typ()
+ return types2.NewTypeName(pos, objPkg, objName, typ)
+
+ case pkgbits.ObjConst:
+ pos := r.pos()
+ typ := r.typ()
+ val := r.Value()
+ return types2.NewConst(pos, objPkg, objName, typ, val)
+
+ case pkgbits.ObjFunc:
+ pos := r.pos()
+ tparams := r.typeParamNames()
+ sig := r.signature(nil, nil, tparams)
+ return types2.NewFunc(pos, objPkg, objName, sig)
+
+ case pkgbits.ObjType:
+ pos := r.pos()
+
+ return types2.NewTypeNameLazy(pos, objPkg, objName, func(named *types2.Named) (tparams []*types2.TypeParam, underlying types2.Type, methods []*types2.Func) {
+ tparams = r.typeParamNames()
+
+ // TODO(mdempsky): Rewrite receiver types to underlying is an
+ // Interface? The go/types importer does this (I think because
+ // unit tests expected that), but cmd/compile doesn't care
+ // about it, so maybe we can avoid worrying about that here.
+ underlying = r.typ().Underlying()
+
+ methods = make([]*types2.Func, r.Len())
+ for i := range methods {
+ methods[i] = r.method()
+ }
+
+ return
+ })
+
+ case pkgbits.ObjVar:
+ pos := r.pos()
+ typ := r.typ()
+ return types2.NewVar(pos, objPkg, objName, typ)
+ }
+ })
+
+ return objPkg, objName
+}
+
+func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
+ var dict readerDict
+ {
+ r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
+
+ if implicits := r.Len(); implicits != 0 {
+ base.Fatalf("unexpected object with %v implicit type parameter(s)", implicits)
+ }
+
+ dict.bounds = make([]typeInfo, r.Len())
+ for i := range dict.bounds {
+ dict.bounds[i] = r.typInfo()
+ }
+
+ dict.derived = make([]derivedInfo, r.Len())
+ dict.derivedTypes = make([]types2.Type, len(dict.derived))
+ for i := range dict.derived {
+ dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
+ }
+
+ pr.retireReader(r)
+ }
+ // function references follow, but reader doesn't need those
+
+ return &dict
+}
+
+func (r *reader) typeParamNames() []*types2.TypeParam {
+ r.Sync(pkgbits.SyncTypeParamNames)
+
+ // Note: This code assumes it only processes objects without
+ // implement type parameters. This is currently fine, because
+ // reader is only used to read in exported declarations, which are
+ // always package scoped.
+
+ if len(r.dict.bounds) == 0 {
+ return nil
+ }
+
+ // Careful: Type parameter lists may have cycles. To allow for this,
+ // we construct the type parameter list in two passes: first we
+ // create all the TypeNames and TypeParams, then we construct and
+ // set the bound type.
+
+ r.dict.tparams = make([]*types2.TypeParam, len(r.dict.bounds))
+ for i := range r.dict.bounds {
+ pos := r.pos()
+ pkg, name := r.localIdent()
+
+ tname := types2.NewTypeName(pos, pkg, name, nil)
+ r.dict.tparams[i] = types2.NewTypeParam(tname, nil)
+ }
+
+ for i, bound := range r.dict.bounds {
+ r.dict.tparams[i].SetConstraint(r.p.typIdx(bound, r.dict))
+ }
+
+ return r.dict.tparams
+}
+
+func (r *reader) method() *types2.Func {
+ r.Sync(pkgbits.SyncMethod)
+ pos := r.pos()
+ pkg, name := r.selector()
+
+ rtparams := r.typeParamNames()
+ sig := r.signature(r.param(), rtparams, nil)
+
+ _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go.
+ return types2.NewFunc(pos, pkg, name, sig)
+}
+
+func (r *reader) qualifiedIdent() (*types2.Package, string) { return r.ident(pkgbits.SyncSym) }
+func (r *reader) localIdent() (*types2.Package, string) { return r.ident(pkgbits.SyncLocalIdent) }
+func (r *reader) selector() (*types2.Package, string) { return r.ident(pkgbits.SyncSelector) }
+
+func (r *reader) ident(marker pkgbits.SyncMarker) (*types2.Package, string) {
+ r.Sync(marker)
+ return r.pkg(), r.String()
+}
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
new file mode 100644
index 0000000..b365008
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -0,0 +1,1217 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// The inlining facility makes 2 passes: first CanInline determines which
+// functions are suitable for inlining, and for those that are it
+// saves a copy of the body. Then InlineCalls walks each function body to
+// expand calls to inlinable functions.
+//
+// The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1,
+// making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and
+// are not supported.
+// 0: disabled
+// 1: 80-nodes leaf functions, oneliners, panic, lazy typechecking (default)
+// 2: (unassigned)
+// 3: (unassigned)
+// 4: allow non-leaf functions
+//
+// At some point this may get another default and become switch-offable with -N.
+//
+// The -d typcheckinl flag enables early typechecking of all imported bodies,
+// which is useful to flush out bugs.
+//
+// The Debug.m flag enables diagnostic output. a single -m is useful for verifying
+// which calls get inlined or not, more is for debugging, and may go away at any point.
+
+package inline
+
+import (
+ "fmt"
+ "go/constant"
+ "internal/buildcfg"
+ "strconv"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/inline/inlheur"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/pgo"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+// Inlining budget parameters, gathered in one place
+const (
+ inlineMaxBudget = 80
+ inlineExtraAppendCost = 0
+ // default is to inline if there's at most one call. -l=4 overrides this by using 1 instead.
+ inlineExtraCallCost = 57 // 57 was benchmarked to provided most benefit with no bad surprises; see https://github.com/golang/go/issues/19348#issuecomment-439370742
+ inlineExtraPanicCost = 1 // do not penalize inlining panics.
+ inlineExtraThrowCost = inlineMaxBudget // with current (2018-05/1.11) code, inlining runtime.throw does not help.
+
+ inlineBigFunctionNodes = 5000 // Functions with this many nodes are considered "big".
+ inlineBigFunctionMaxCost = 20 // Max cost of inlinee when inlining into a "big" function.
+)
+
+var (
+ // List of all hot callee nodes.
+ // TODO(prattmic): Make this non-global.
+ candHotCalleeMap = make(map[*pgo.IRNode]struct{})
+
+ // List of all hot call sites. CallSiteInfo.Callee is always nil.
+ // TODO(prattmic): Make this non-global.
+ candHotEdgeMap = make(map[pgo.CallSiteInfo]struct{})
+
+ // Threshold in percentage for hot callsite inlining.
+ inlineHotCallSiteThresholdPercent float64
+
+ // Threshold in CDF percentage for hot callsite inlining,
+ // that is, for a threshold of X the hottest callsites that
+ // make up the top X% of total edge weight will be
+ // considered hot for inlining candidates.
+ inlineCDFHotCallSiteThresholdPercent = float64(99)
+
+ // Budget increased due to hotness.
+ inlineHotMaxBudget int32 = 2000
+)
+
+// PGOInlinePrologue records the hot callsites from ir-graph.
+func PGOInlinePrologue(p *pgo.Profile, funcs []*ir.Func) {
+ if base.Debug.PGOInlineCDFThreshold != "" {
+ if s, err := strconv.ParseFloat(base.Debug.PGOInlineCDFThreshold, 64); err == nil && s >= 0 && s <= 100 {
+ inlineCDFHotCallSiteThresholdPercent = s
+ } else {
+ base.Fatalf("invalid PGOInlineCDFThreshold, must be between 0 and 100")
+ }
+ }
+ var hotCallsites []pgo.NamedCallEdge
+ inlineHotCallSiteThresholdPercent, hotCallsites = hotNodesFromCDF(p)
+ if base.Debug.PGODebug > 0 {
+ fmt.Printf("hot-callsite-thres-from-CDF=%v\n", inlineHotCallSiteThresholdPercent)
+ }
+
+ if x := base.Debug.PGOInlineBudget; x != 0 {
+ inlineHotMaxBudget = int32(x)
+ }
+
+ for _, n := range hotCallsites {
+ // mark inlineable callees from hot edges
+ if callee := p.WeightedCG.IRNodes[n.CalleeName]; callee != nil {
+ candHotCalleeMap[callee] = struct{}{}
+ }
+ // mark hot call sites
+ if caller := p.WeightedCG.IRNodes[n.CallerName]; caller != nil && caller.AST != nil {
+ csi := pgo.CallSiteInfo{LineOffset: n.CallSiteOffset, Caller: caller.AST}
+ candHotEdgeMap[csi] = struct{}{}
+ }
+ }
+
+ if base.Debug.PGODebug >= 3 {
+ fmt.Printf("hot-cg before inline in dot format:")
+ p.PrintWeightedCallGraphDOT(inlineHotCallSiteThresholdPercent)
+ }
+}
+
+// hotNodesFromCDF computes an edge weight threshold and the list of hot
+// nodes that make up the given percentage of the CDF. The threshold, as
+// a percent, is the lower bound of weight for nodes to be considered hot
+// (currently only used in debug prints) (in case of equal weights,
+// comparing with the threshold may not accurately reflect which nodes are
+// considiered hot).
+func hotNodesFromCDF(p *pgo.Profile) (float64, []pgo.NamedCallEdge) {
+ cum := int64(0)
+ for i, n := range p.NamedEdgeMap.ByWeight {
+ w := p.NamedEdgeMap.Weight[n]
+ cum += w
+ if pgo.WeightInPercentage(cum, p.TotalWeight) > inlineCDFHotCallSiteThresholdPercent {
+ // nodes[:i+1] to include the very last node that makes it to go over the threshold.
+ // (Say, if the CDF threshold is 50% and one hot node takes 60% of weight, we want to
+ // include that node instead of excluding it.)
+ return pgo.WeightInPercentage(w, p.TotalWeight), p.NamedEdgeMap.ByWeight[:i+1]
+ }
+ }
+ return 0, p.NamedEdgeMap.ByWeight
+}
+
+// CanInlineFuncs computes whether a batch of functions are inlinable.
+func CanInlineFuncs(funcs []*ir.Func, profile *pgo.Profile) {
+ if profile != nil {
+ PGOInlinePrologue(profile, funcs)
+ }
+
+ ir.VisitFuncsBottomUp(funcs, func(list []*ir.Func, recursive bool) {
+ CanInlineSCC(list, recursive, profile)
+ })
+}
+
+// CanInlineSCC computes the inlinability of functions within an SCC
+// (strongly connected component).
+//
+// CanInlineSCC is designed to be used by ir.VisitFuncsBottomUp
+// callbacks.
+func CanInlineSCC(funcs []*ir.Func, recursive bool, profile *pgo.Profile) {
+ if base.Flag.LowerL == 0 {
+ return
+ }
+
+ numfns := numNonClosures(funcs)
+
+ for _, fn := range funcs {
+ if !recursive || numfns > 1 {
+ // We allow inlining if there is no
+ // recursion, or the recursion cycle is
+ // across more than one function.
+ CanInline(fn, profile)
+ } else {
+ if base.Flag.LowerM > 1 && fn.OClosure == nil {
+ fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(fn), fn.Nname)
+ }
+ }
+ if inlheur.Enabled() {
+ analyzeFuncProps(fn, profile)
+ }
+ }
+}
+
+// GarbageCollectUnreferencedHiddenClosures makes a pass over all the
+// top-level (non-hidden-closure) functions looking for nested closure
+// functions that are reachable, then sweeps through the Target.Decls
+// list and marks any non-reachable hidden closure function as dead.
+// See issues #59404 and #59638 for more context.
+func GarbageCollectUnreferencedHiddenClosures() {
+
+ liveFuncs := make(map[*ir.Func]bool)
+
+ var markLiveFuncs func(fn *ir.Func)
+ markLiveFuncs = func(fn *ir.Func) {
+ if liveFuncs[fn] {
+ return
+ }
+ liveFuncs[fn] = true
+ ir.Visit(fn, func(n ir.Node) {
+ if clo, ok := n.(*ir.ClosureExpr); ok {
+ markLiveFuncs(clo.Func)
+ }
+ })
+ }
+
+ for i := 0; i < len(typecheck.Target.Funcs); i++ {
+ fn := typecheck.Target.Funcs[i]
+ if fn.IsHiddenClosure() {
+ continue
+ }
+ markLiveFuncs(fn)
+ }
+
+ for i := 0; i < len(typecheck.Target.Funcs); i++ {
+ fn := typecheck.Target.Funcs[i]
+ if !fn.IsHiddenClosure() {
+ continue
+ }
+ if fn.IsDeadcodeClosure() {
+ continue
+ }
+ if liveFuncs[fn] {
+ continue
+ }
+ fn.SetIsDeadcodeClosure(true)
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: unreferenced closure %v marked as dead\n", ir.Line(fn), fn)
+ }
+ if fn.Inl != nil && fn.LSym == nil {
+ ir.InitLSym(fn, true)
+ }
+ }
+}
+
+// inlineBudget determines the max budget for function 'fn' prior to
+// analyzing the hairyness of the body of 'fn'. We pass in the pgo
+// profile if available (which can change the budget), also a
+// 'relaxed' flag, which expands the budget slightly to allow for the
+// possibility that a call to the function might have its score
+// adjusted downwards. If 'verbose' is set, then print a remark where
+// we boost the budget due to PGO.
+func inlineBudget(fn *ir.Func, profile *pgo.Profile, relaxed bool, verbose bool) int32 {
+ // Update the budget for profile-guided inlining.
+ budget := int32(inlineMaxBudget)
+ if profile != nil {
+ if n, ok := profile.WeightedCG.IRNodes[ir.LinkFuncName(fn)]; ok {
+ if _, ok := candHotCalleeMap[n]; ok {
+ budget = int32(inlineHotMaxBudget)
+ if verbose {
+ fmt.Printf("hot-node enabled increased budget=%v for func=%v\n", budget, ir.PkgFuncName(fn))
+ }
+ }
+ }
+ }
+ if relaxed {
+ budget += inlheur.BudgetExpansion(inlineMaxBudget)
+ }
+ return budget
+}
+
+// CanInline determines whether fn is inlineable.
+// If so, CanInline saves copies of fn.Body and fn.Dcl in fn.Inl.
+// fn and fn.Body will already have been typechecked.
+func CanInline(fn *ir.Func, profile *pgo.Profile) {
+ if fn.Nname == nil {
+ base.Fatalf("CanInline no nname %+v", fn)
+ }
+
+ var reason string // reason, if any, that the function was not inlined
+ if base.Flag.LowerM > 1 || logopt.Enabled() {
+ defer func() {
+ if reason != "" {
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Nname, reason)
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(fn.Pos(), "cannotInlineFunction", "inline", ir.FuncName(fn), reason)
+ }
+ }
+ }()
+ }
+
+ reason = InlineImpossible(fn)
+ if reason != "" {
+ return
+ }
+ if fn.Typecheck() == 0 {
+ base.Fatalf("CanInline on non-typechecked function %v", fn)
+ }
+
+ n := fn.Nname
+ if n.Func.InlinabilityChecked() {
+ return
+ }
+ defer n.Func.SetInlinabilityChecked(true)
+
+ cc := int32(inlineExtraCallCost)
+ if base.Flag.LowerL == 4 {
+ cc = 1 // this appears to yield better performance than 0.
+ }
+
+ // Used a "relaxed" inline budget if the new inliner is enabled.
+ relaxed := inlheur.Enabled()
+
+ // Compute the inline budget for this func.
+ budget := inlineBudget(fn, profile, relaxed, base.Debug.PGODebug > 0)
+
+ // At this point in the game the function we're looking at may
+ // have "stale" autos, vars that still appear in the Dcl list, but
+ // which no longer have any uses in the function body (due to
+ // elimination by deadcode). We'd like to exclude these dead vars
+ // when creating the "Inline.Dcl" field below; to accomplish this,
+ // the hairyVisitor below builds up a map of used/referenced
+ // locals, and we use this map to produce a pruned Inline.Dcl
+ // list. See issue 25459 for more context.
+
+ visitor := hairyVisitor{
+ curFunc: fn,
+ isBigFunc: IsBigFunc(fn),
+ budget: budget,
+ maxBudget: budget,
+ extraCallCost: cc,
+ profile: profile,
+ }
+ if visitor.tooHairy(fn) {
+ reason = visitor.reason
+ return
+ }
+
+ n.Func.Inl = &ir.Inline{
+ Cost: budget - visitor.budget,
+ Dcl: pruneUnusedAutos(n.Func.Dcl, &visitor),
+ HaveDcl: true,
+
+ CanDelayResults: canDelayResults(fn),
+ }
+ if base.Flag.LowerM != 0 || logopt.Enabled() {
+ noteInlinableFunc(n, fn, budget-visitor.budget)
+ }
+}
+
+// noteInlinableFunc issues a message to the user that the specified
+// function is inlinable.
+func noteInlinableFunc(n *ir.Name, fn *ir.Func, cost int32) {
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, cost, fn.Type(), ir.Nodes(fn.Body))
+ } else if base.Flag.LowerM != 0 {
+ fmt.Printf("%v: can inline %v\n", ir.Line(fn), n)
+ }
+ // JSON optimization log output.
+ if logopt.Enabled() {
+ logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", cost))
+ }
+}
+
+// InlineImpossible returns a non-empty reason string if fn is impossible to
+// inline regardless of cost or contents.
+func InlineImpossible(fn *ir.Func) string {
+ var reason string // reason, if any, that the function can not be inlined.
+ if fn.Nname == nil {
+ reason = "no name"
+ return reason
+ }
+
+ // If marked "go:noinline", don't inline.
+ if fn.Pragma&ir.Noinline != 0 {
+ reason = "marked go:noinline"
+ return reason
+ }
+
+ // If marked "go:norace" and -race compilation, don't inline.
+ if base.Flag.Race && fn.Pragma&ir.Norace != 0 {
+ reason = "marked go:norace with -race compilation"
+ return reason
+ }
+
+ // If marked "go:nocheckptr" and -d checkptr compilation, don't inline.
+ if base.Debug.Checkptr != 0 && fn.Pragma&ir.NoCheckPtr != 0 {
+ reason = "marked go:nocheckptr"
+ return reason
+ }
+
+ // If marked "go:cgo_unsafe_args", don't inline, since the function
+ // makes assumptions about its argument frame layout.
+ if fn.Pragma&ir.CgoUnsafeArgs != 0 {
+ reason = "marked go:cgo_unsafe_args"
+ return reason
+ }
+
+ // If marked as "go:uintptrkeepalive", don't inline, since the keep
+ // alive information is lost during inlining.
+ //
+ // TODO(prattmic): This is handled on calls during escape analysis,
+ // which is after inlining. Move prior to inlining so the keep-alive is
+ // maintained after inlining.
+ if fn.Pragma&ir.UintptrKeepAlive != 0 {
+ reason = "marked as having a keep-alive uintptr argument"
+ return reason
+ }
+
+ // If marked as "go:uintptrescapes", don't inline, since the escape
+ // information is lost during inlining.
+ if fn.Pragma&ir.UintptrEscapes != 0 {
+ reason = "marked as having an escaping uintptr argument"
+ return reason
+ }
+
+ // The nowritebarrierrec checker currently works at function
+ // granularity, so inlining yeswritebarrierrec functions can confuse it
+ // (#22342). As a workaround, disallow inlining them for now.
+ if fn.Pragma&ir.Yeswritebarrierrec != 0 {
+ reason = "marked go:yeswritebarrierrec"
+ return reason
+ }
+
+ // If a local function has no fn.Body (is defined outside of Go), cannot inline it.
+ // Imported functions don't have fn.Body but might have inline body in fn.Inl.
+ if len(fn.Body) == 0 && !typecheck.HaveInlineBody(fn) {
+ reason = "no function body"
+ return reason
+ }
+
+ return ""
+}
+
+// canDelayResults reports whether inlined calls to fn can delay
+// declaring the result parameter until the "return" statement.
+func canDelayResults(fn *ir.Func) bool {
+ // We can delay declaring+initializing result parameters if:
+ // (1) there's exactly one "return" statement in the inlined function;
+ // (2) it's not an empty return statement (#44355); and
+ // (3) the result parameters aren't named.
+
+ nreturns := 0
+ ir.VisitList(fn.Body, func(n ir.Node) {
+ if n, ok := n.(*ir.ReturnStmt); ok {
+ nreturns++
+ if len(n.Results) == 0 {
+ nreturns++ // empty return statement (case 2)
+ }
+ }
+ })
+
+ if nreturns != 1 {
+ return false // not exactly one return statement (case 1)
+ }
+
+ // temporaries for return values.
+ for _, param := range fn.Type().Results() {
+ if sym := param.Sym; sym != nil && !sym.IsBlank() {
+ return false // found a named result parameter (case 3)
+ }
+ }
+
+ return true
+}
+
+// hairyVisitor visits a function body to determine its inlining
+// hairiness and whether or not it can be inlined.
+type hairyVisitor struct {
+ // This is needed to access the current caller in the doNode function.
+ curFunc *ir.Func
+ isBigFunc bool
+ budget int32
+ maxBudget int32
+ reason string
+ extraCallCost int32
+ usedLocals ir.NameSet
+ do func(ir.Node) bool
+ profile *pgo.Profile
+}
+
+func (v *hairyVisitor) tooHairy(fn *ir.Func) bool {
+ v.do = v.doNode // cache closure
+ if ir.DoChildren(fn, v.do) {
+ return true
+ }
+ if v.budget < 0 {
+ v.reason = fmt.Sprintf("function too complex: cost %d exceeds budget %d", v.maxBudget-v.budget, v.maxBudget)
+ return true
+ }
+ return false
+}
+
+// doNode visits n and its children, updates the state in v, and returns true if
+// n makes the current function too hairy for inlining.
+func (v *hairyVisitor) doNode(n ir.Node) bool {
+ if n == nil {
+ return false
+ }
+opSwitch:
+ switch n.Op() {
+ // Call is okay if inlinable and we have the budget for the body.
+ case ir.OCALLFUNC:
+ n := n.(*ir.CallExpr)
+ // Functions that call runtime.getcaller{pc,sp} can not be inlined
+ // because getcaller{pc,sp} expect a pointer to the caller's first argument.
+ //
+ // runtime.throw is a "cheap call" like panic in normal code.
+ var cheap bool
+ if n.Fun.Op() == ir.ONAME {
+ name := n.Fun.(*ir.Name)
+ if name.Class == ir.PFUNC {
+ switch fn := types.RuntimeSymName(name.Sym()); fn {
+ case "getcallerpc", "getcallersp":
+ v.reason = "call to " + fn
+ return true
+ case "throw":
+ v.budget -= inlineExtraThrowCost
+ break opSwitch
+ case "panicrangeexit":
+ cheap = true
+ }
+ // Special case for reflect.noescape. It does just type
+ // conversions to appease the escape analysis, and doesn't
+ // generate code.
+ if types.ReflectSymName(name.Sym()) == "noescape" {
+ cheap = true
+ }
+ }
+ // Special case for coverage counter updates; although
+ // these correspond to real operations, we treat them as
+ // zero cost for the moment. This is due to the existence
+ // of tests that are sensitive to inlining-- if the
+ // insertion of coverage instrumentation happens to tip a
+ // given function over the threshold and move it from
+ // "inlinable" to "not-inlinable", this can cause changes
+ // in allocation behavior, which can then result in test
+ // failures (a good example is the TestAllocations in
+ // crypto/ed25519).
+ if isAtomicCoverageCounterUpdate(n) {
+ return false
+ }
+ }
+ if n.Fun.Op() == ir.OMETHEXPR {
+ if meth := ir.MethodExprName(n.Fun); meth != nil {
+ if fn := meth.Func; fn != nil {
+ s := fn.Sym()
+ if types.RuntimeSymName(s) == "heapBits.nextArena" {
+ // Special case: explicitly allow mid-stack inlining of
+ // runtime.heapBits.next even though it calls slow-path
+ // runtime.heapBits.nextArena.
+ cheap = true
+ }
+ // Special case: on architectures that can do unaligned loads,
+ // explicitly mark encoding/binary methods as cheap,
+ // because in practice they are, even though our inlining
+ // budgeting system does not see that. See issue 42958.
+ if base.Ctxt.Arch.CanMergeLoads && s.Pkg.Path == "encoding/binary" {
+ switch s.Name {
+ case "littleEndian.Uint64", "littleEndian.Uint32", "littleEndian.Uint16",
+ "bigEndian.Uint64", "bigEndian.Uint32", "bigEndian.Uint16",
+ "littleEndian.PutUint64", "littleEndian.PutUint32", "littleEndian.PutUint16",
+ "bigEndian.PutUint64", "bigEndian.PutUint32", "bigEndian.PutUint16",
+ "littleEndian.AppendUint64", "littleEndian.AppendUint32", "littleEndian.AppendUint16",
+ "bigEndian.AppendUint64", "bigEndian.AppendUint32", "bigEndian.AppendUint16":
+ cheap = true
+ }
+ }
+ }
+ }
+ }
+ if cheap {
+ break // treat like any other node, that is, cost of 1
+ }
+
+ if ir.IsIntrinsicCall(n) {
+ // Treat like any other node.
+ break
+ }
+
+ if callee := inlCallee(v.curFunc, n.Fun, v.profile); callee != nil && typecheck.HaveInlineBody(callee) {
+ // Check whether we'd actually inline this call. Set
+ // log == false since we aren't actually doing inlining
+ // yet.
+ if ok, _ := canInlineCallExpr(v.curFunc, n, callee, v.isBigFunc, false); ok {
+ // mkinlcall would inline this call [1], so use
+ // the cost of the inline body as the cost of
+ // the call, as that is what will actually
+ // appear in the code.
+ //
+ // [1] This is almost a perfect match to the
+ // mkinlcall logic, except that
+ // canInlineCallExpr considers inlining cycles
+ // by looking at what has already been inlined.
+ // Since we haven't done any inlining yet we
+ // will miss those.
+ v.budget -= callee.Inl.Cost
+ break
+ }
+ }
+
+ // Call cost for non-leaf inlining.
+ v.budget -= v.extraCallCost
+
+ case ir.OCALLMETH:
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+
+ // Things that are too hairy, irrespective of the budget
+ case ir.OCALL, ir.OCALLINTER:
+ // Call cost for non-leaf inlining.
+ v.budget -= v.extraCallCost
+
+ case ir.OPANIC:
+ n := n.(*ir.UnaryExpr)
+ if n.X.Op() == ir.OCONVIFACE && n.X.(*ir.ConvExpr).Implicit() {
+ // Hack to keep reflect.flag.mustBe inlinable for TestIntendedInlining.
+ // Before CL 284412, these conversions were introduced later in the
+ // compiler, so they didn't count against inlining budget.
+ v.budget++
+ }
+ v.budget -= inlineExtraPanicCost
+
+ case ir.ORECOVER:
+ base.FatalfAt(n.Pos(), "ORECOVER missed typecheck")
+ case ir.ORECOVERFP:
+ // recover matches the argument frame pointer to find
+ // the right panic value, so it needs an argument frame.
+ v.reason = "call to recover"
+ return true
+
+ case ir.OCLOSURE:
+ if base.Debug.InlFuncsWithClosures == 0 {
+ v.reason = "not inlining functions with closures"
+ return true
+ }
+
+ // TODO(danscales): Maybe make budget proportional to number of closure
+ // variables, e.g.:
+ //v.budget -= int32(len(n.(*ir.ClosureExpr).Func.ClosureVars) * 3)
+ // TODO(austin): However, if we're able to inline this closure into
+ // v.curFunc, then we actually pay nothing for the closure captures. We
+ // should try to account for that if we're going to account for captures.
+ v.budget -= 15
+
+ case ir.OGO, ir.ODEFER, ir.OTAILCALL:
+ v.reason = "unhandled op " + n.Op().String()
+ return true
+
+ case ir.OAPPEND:
+ v.budget -= inlineExtraAppendCost
+
+ case ir.OADDR:
+ n := n.(*ir.AddrExpr)
+ // Make "&s.f" cost 0 when f's offset is zero.
+ if dot, ok := n.X.(*ir.SelectorExpr); ok && (dot.Op() == ir.ODOT || dot.Op() == ir.ODOTPTR) {
+ if _, ok := dot.X.(*ir.Name); ok && dot.Selection.Offset == 0 {
+ v.budget += 2 // undo ir.OADDR+ir.ODOT/ir.ODOTPTR
+ }
+ }
+
+ case ir.ODEREF:
+ // *(*X)(unsafe.Pointer(&x)) is low-cost
+ n := n.(*ir.StarExpr)
+
+ ptr := n.X
+ for ptr.Op() == ir.OCONVNOP {
+ ptr = ptr.(*ir.ConvExpr).X
+ }
+ if ptr.Op() == ir.OADDR {
+ v.budget += 1 // undo half of default cost of ir.ODEREF+ir.OADDR
+ }
+
+ case ir.OCONVNOP:
+ // This doesn't produce code, but the children might.
+ v.budget++ // undo default cost
+
+ case ir.OFALL, ir.OTYPE:
+ // These nodes don't produce code; omit from inlining budget.
+ return false
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ if ir.IsConst(n.Cond, constant.Bool) {
+ // This if and the condition cost nothing.
+ if doList(n.Init(), v.do) {
+ return true
+ }
+ if ir.BoolVal(n.Cond) {
+ return doList(n.Body, v.do)
+ } else {
+ return doList(n.Else, v.do)
+ }
+ }
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class == ir.PAUTO {
+ v.usedLocals.Add(n)
+ }
+
+ case ir.OBLOCK:
+ // The only OBLOCK we should see at this point is an empty one.
+ // In any event, let the visitList(n.List()) below take care of the statements,
+ // and don't charge for the OBLOCK itself. The ++ undoes the -- below.
+ v.budget++
+
+ case ir.OMETHVALUE, ir.OSLICELIT:
+ v.budget-- // Hack for toolstash -cmp.
+
+ case ir.OMETHEXPR:
+ v.budget++ // Hack for toolstash -cmp.
+
+ case ir.OAS2:
+ n := n.(*ir.AssignListStmt)
+
+ // Unified IR unconditionally rewrites:
+ //
+ // a, b = f()
+ //
+ // into:
+ //
+ // DCL tmp1
+ // DCL tmp2
+ // tmp1, tmp2 = f()
+ // a, b = tmp1, tmp2
+ //
+ // so that it can insert implicit conversions as necessary. To
+ // minimize impact to the existing inlining heuristics (in
+ // particular, to avoid breaking the existing inlinability regress
+ // tests), we need to compensate for this here.
+ //
+ // See also identical logic in IsBigFunc.
+ if len(n.Rhs) > 0 {
+ if init := n.Rhs[0].Init(); len(init) == 1 {
+ if _, ok := init[0].(*ir.AssignListStmt); ok {
+ // 4 for each value, because each temporary variable now
+ // appears 3 times (DCL, LHS, RHS), plus an extra DCL node.
+ //
+ // 1 for the extra "tmp1, tmp2 = f()" assignment statement.
+ v.budget += 4*int32(len(n.Lhs)) + 1
+ }
+ }
+ }
+
+ case ir.OAS:
+ // Special case for coverage counter updates and coverage
+ // function registrations. Although these correspond to real
+ // operations, we treat them as zero cost for the moment. This
+ // is primarily due to the existence of tests that are
+ // sensitive to inlining-- if the insertion of coverage
+ // instrumentation happens to tip a given function over the
+ // threshold and move it from "inlinable" to "not-inlinable",
+ // this can cause changes in allocation behavior, which can
+ // then result in test failures (a good example is the
+ // TestAllocations in crypto/ed25519).
+ n := n.(*ir.AssignStmt)
+ if n.X.Op() == ir.OINDEX && isIndexingCoverageCounter(n.X) {
+ return false
+ }
+ }
+
+ v.budget--
+
+ // When debugging, don't stop early, to get full cost of inlining this function
+ if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() {
+ v.reason = "too expensive"
+ return true
+ }
+
+ return ir.DoChildren(n, v.do)
+}
+
+// IsBigFunc reports whether fn is a "big" function.
+//
+// Note: The criteria for "big" is heuristic and subject to change.
+func IsBigFunc(fn *ir.Func) bool {
+ budget := inlineBigFunctionNodes
+ return ir.Any(fn, func(n ir.Node) bool {
+ // See logic in hairyVisitor.doNode, explaining unified IR's
+ // handling of "a, b = f()" assignments.
+ if n, ok := n.(*ir.AssignListStmt); ok && n.Op() == ir.OAS2 && len(n.Rhs) > 0 {
+ if init := n.Rhs[0].Init(); len(init) == 1 {
+ if _, ok := init[0].(*ir.AssignListStmt); ok {
+ budget += 4*len(n.Lhs) + 1
+ }
+ }
+ }
+
+ budget--
+ return budget <= 0
+ })
+}
+
+// TryInlineCall returns an inlined call expression for call, or nil
+// if inlining is not possible.
+func TryInlineCall(callerfn *ir.Func, call *ir.CallExpr, bigCaller bool, profile *pgo.Profile) *ir.InlinedCallExpr {
+ if base.Flag.LowerL == 0 {
+ return nil
+ }
+ if call.Op() != ir.OCALLFUNC {
+ return nil
+ }
+ if call.GoDefer || call.NoInline {
+ return nil
+ }
+
+ // Prevent inlining some reflect.Value methods when using checkptr,
+ // even when package reflect was compiled without it (#35073).
+ if base.Debug.Checkptr != 0 && call.Fun.Op() == ir.OMETHEXPR {
+ if method := ir.MethodExprName(call.Fun); method != nil {
+ switch types.ReflectSymName(method.Sym()) {
+ case "Value.UnsafeAddr", "Value.Pointer":
+ return nil
+ }
+ }
+ }
+
+ if base.Flag.LowerM > 3 {
+ fmt.Printf("%v:call to func %+v\n", ir.Line(call), call.Fun)
+ }
+ if ir.IsIntrinsicCall(call) {
+ return nil
+ }
+ if fn := inlCallee(callerfn, call.Fun, profile); fn != nil && typecheck.HaveInlineBody(fn) {
+ return mkinlcall(callerfn, call, fn, bigCaller)
+ }
+ return nil
+}
+
+// inlCallee takes a function-typed expression and returns the underlying function ONAME
+// that it refers to if statically known. Otherwise, it returns nil.
+func inlCallee(caller *ir.Func, fn ir.Node, profile *pgo.Profile) (res *ir.Func) {
+ fn = ir.StaticValue(fn)
+ switch fn.Op() {
+ case ir.OMETHEXPR:
+ fn := fn.(*ir.SelectorExpr)
+ n := ir.MethodExprName(fn)
+ // Check that receiver type matches fn.X.
+ // TODO(mdempsky): Handle implicit dereference
+ // of pointer receiver argument?
+ if n == nil || !types.Identical(n.Type().Recv().Type, fn.X.Type()) {
+ return nil
+ }
+ return n.Func
+ case ir.ONAME:
+ fn := fn.(*ir.Name)
+ if fn.Class == ir.PFUNC {
+ return fn.Func
+ }
+ case ir.OCLOSURE:
+ fn := fn.(*ir.ClosureExpr)
+ c := fn.Func
+ if len(c.ClosureVars) != 0 && c.ClosureVars[0].Outer.Curfn != caller {
+ return nil // inliner doesn't support inlining across closure frames
+ }
+ CanInline(c, profile)
+ return c
+ }
+ return nil
+}
+
+var inlgen int
+
+// SSADumpInline gives the SSA back end a chance to dump the function
+// when producing output for debugging the compiler itself.
+var SSADumpInline = func(*ir.Func) {}
+
+// InlineCall allows the inliner implementation to be overridden.
+// If it returns nil, the function will not be inlined.
+var InlineCall = func(callerfn *ir.Func, call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
+ base.Fatalf("inline.InlineCall not overridden")
+ panic("unreachable")
+}
+
+// inlineCostOK returns true if call n from caller to callee is cheap enough to
+// inline. bigCaller indicates that caller is a big function.
+//
+// In addition to the "cost OK" boolean, it also returns the "max
+// cost" limit used to make the decision (which may differ depending
+// on func size), and the score assigned to this specific callsite.
+func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool, int32, int32) {
+ maxCost := int32(inlineMaxBudget)
+ if bigCaller {
+ // We use this to restrict inlining into very big functions.
+ // See issue 26546 and 17566.
+ maxCost = inlineBigFunctionMaxCost
+ }
+
+ metric := callee.Inl.Cost
+ if inlheur.Enabled() {
+ score, ok := inlheur.GetCallSiteScore(caller, n)
+ if ok {
+ metric = int32(score)
+ }
+ }
+
+ if metric <= maxCost {
+ // Simple case. Function is already cheap enough.
+ return true, 0, metric
+ }
+
+ // We'll also allow inlining of hot functions below inlineHotMaxBudget,
+ // but only in small functions.
+
+ lineOffset := pgo.NodeLineOffset(n, caller)
+ csi := pgo.CallSiteInfo{LineOffset: lineOffset, Caller: caller}
+ if _, ok := candHotEdgeMap[csi]; !ok {
+ // Cold
+ return false, maxCost, metric
+ }
+
+ // Hot
+
+ if bigCaller {
+ if base.Debug.PGODebug > 0 {
+ fmt.Printf("hot-big check disallows inlining for call %s (cost %d) at %v in big function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller))
+ }
+ return false, maxCost, metric
+ }
+
+ if metric > inlineHotMaxBudget {
+ return false, inlineHotMaxBudget, metric
+ }
+
+ if !base.PGOHash.MatchPosWithInfo(n.Pos(), "inline", nil) {
+ // De-selected by PGO Hash.
+ return false, maxCost, metric
+ }
+
+ if base.Debug.PGODebug > 0 {
+ fmt.Printf("hot-budget check allows inlining for call %s (cost %d) at %v in function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller))
+ }
+
+ return true, 0, metric
+}
+
+// canInlineCallsite returns true if the call n from caller to callee
+// can be inlined, plus the score computed for the call expr in
+// question. bigCaller indicates that caller is a big function. log
+// indicates that the 'cannot inline' reason should be logged.
+//
+// Preconditions: CanInline(callee) has already been called.
+func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCaller bool, log bool) (bool, int32) {
+ if callee.Inl == nil {
+ // callee is never inlinable.
+ if log && logopt.Enabled() {
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
+ fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(callee)))
+ }
+ return false, 0
+ }
+
+ ok, maxCost, callSiteScore := inlineCostOK(n, callerfn, callee, bigCaller)
+ if !ok {
+ // callee cost too high for this call site.
+ if log && logopt.Enabled() {
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
+ fmt.Sprintf("cost %d of %s exceeds max caller cost %d", callee.Inl.Cost, ir.PkgFuncName(callee), maxCost))
+ }
+ return false, 0
+ }
+
+ if callee == callerfn {
+ // Can't recursively inline a function into itself.
+ if log && logopt.Enabled() {
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(callerfn)))
+ }
+ return false, 0
+ }
+
+ if base.Flag.Cfg.Instrumenting && types.IsNoInstrumentPkg(callee.Sym().Pkg) {
+ // Runtime package must not be instrumented.
+ // Instrument skips runtime package. However, some runtime code can be
+ // inlined into other packages and instrumented there. To avoid this,
+ // we disable inlining of runtime functions when instrumenting.
+ // The example that we observed is inlining of LockOSThread,
+ // which lead to false race reports on m contents.
+ if log && logopt.Enabled() {
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
+ fmt.Sprintf("call to runtime function %s in instrumented build", ir.PkgFuncName(callee)))
+ }
+ return false, 0
+ }
+
+ if base.Flag.Race && types.IsNoRacePkg(callee.Sym().Pkg) {
+ if log && logopt.Enabled() {
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
+ fmt.Sprintf(`call to into "no-race" package function %s in race build`, ir.PkgFuncName(callee)))
+ }
+ return false, 0
+ }
+
+ // Check if we've already inlined this function at this particular
+ // call site, in order to stop inlining when we reach the beginning
+ // of a recursion cycle again. We don't inline immediately recursive
+ // functions, but allow inlining if there is a recursion cycle of
+ // many functions. Most likely, the inlining will stop before we
+ // even hit the beginning of the cycle again, but this catches the
+ // unusual case.
+ parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
+ sym := callee.Linksym()
+ for inlIndex := parent; inlIndex >= 0; inlIndex = base.Ctxt.InlTree.Parent(inlIndex) {
+ if base.Ctxt.InlTree.InlinedFunction(inlIndex) == sym {
+ if log {
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), callee, ir.FuncName(callerfn))
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
+ fmt.Sprintf("repeated recursive cycle to %s", ir.PkgFuncName(callee)))
+ }
+ }
+ return false, 0
+ }
+ }
+
+ return true, callSiteScore
+}
+
+// mkinlcall returns an OINLCALL node that can replace OCALLFUNC n, or
+// nil if it cannot be inlined. callerfn is the function that contains
+// n, and fn is the function being called.
+//
+// The result of mkinlcall MUST be assigned back to n, e.g.
+//
+// n.Left = mkinlcall(n.Left, fn, isddd)
+func mkinlcall(callerfn *ir.Func, n *ir.CallExpr, fn *ir.Func, bigCaller bool) *ir.InlinedCallExpr {
+ ok, score := canInlineCallExpr(callerfn, n, fn, bigCaller, true)
+ if !ok {
+ return nil
+ }
+ typecheck.AssertFixedCall(n)
+
+ parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
+ sym := fn.Linksym()
+ inlIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym, ir.FuncName(fn))
+
+ closureInitLSym := func(n *ir.CallExpr, fn *ir.Func) {
+ // The linker needs FuncInfo metadata for all inlined
+ // functions. This is typically handled by gc.enqueueFunc
+ // calling ir.InitLSym for all function declarations in
+ // typecheck.Target.Decls (ir.UseClosure adds all closures to
+ // Decls).
+ //
+ // However, non-trivial closures in Decls are ignored, and are
+ // insteaded enqueued when walk of the calling function
+ // discovers them.
+ //
+ // This presents a problem for direct calls to closures.
+ // Inlining will replace the entire closure definition with its
+ // body, which hides the closure from walk and thus suppresses
+ // symbol creation.
+ //
+ // Explicitly create a symbol early in this edge case to ensure
+ // we keep this metadata.
+ //
+ // TODO: Refactor to keep a reference so this can all be done
+ // by enqueueFunc.
+
+ if n.Op() != ir.OCALLFUNC {
+ // Not a standard call.
+ return
+ }
+ if n.Fun.Op() != ir.OCLOSURE {
+ // Not a direct closure call.
+ return
+ }
+
+ clo := n.Fun.(*ir.ClosureExpr)
+ if ir.IsTrivialClosure(clo) {
+ // enqueueFunc will handle trivial closures anyways.
+ return
+ }
+
+ ir.InitLSym(fn, true)
+ }
+
+ closureInitLSym(n, fn)
+
+ if base.Flag.GenDwarfInl > 0 {
+ if !sym.WasInlined() {
+ base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
+ sym.Set(obj.AttrWasInlined, true)
+ }
+ }
+
+ if base.Flag.LowerM != 0 {
+ if buildcfg.Experiment.NewInliner {
+ fmt.Printf("%v: inlining call to %v with score %d\n",
+ ir.Line(n), fn, score)
+ } else {
+ fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
+ }
+ }
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n)
+ }
+
+ res := InlineCall(callerfn, n, fn, inlIndex)
+
+ if res == nil {
+ base.FatalfAt(n.Pos(), "inlining call to %v failed", fn)
+ }
+
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: After inlining %+v\n\n", ir.Line(res), res)
+ }
+
+ if inlheur.Enabled() {
+ inlheur.UpdateCallsiteTable(callerfn, n, res)
+ }
+
+ return res
+}
+
+// CalleeEffects appends any side effects from evaluating callee to init.
+func CalleeEffects(init *ir.Nodes, callee ir.Node) {
+ for {
+ init.Append(ir.TakeInit(callee)...)
+
+ switch callee.Op() {
+ case ir.ONAME, ir.OCLOSURE, ir.OMETHEXPR:
+ return // done
+
+ case ir.OCONVNOP:
+ conv := callee.(*ir.ConvExpr)
+ callee = conv.X
+
+ case ir.OINLCALL:
+ ic := callee.(*ir.InlinedCallExpr)
+ init.Append(ic.Body.Take()...)
+ callee = ic.SingleResult()
+
+ default:
+ base.FatalfAt(callee.Pos(), "unexpected callee expression: %v", callee)
+ }
+ }
+}
+
+func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name {
+ s := make([]*ir.Name, 0, len(ll))
+ for _, n := range ll {
+ if n.Class == ir.PAUTO {
+ if !vis.usedLocals.Has(n) {
+ // TODO(mdempsky): Simplify code after confident that this
+ // never happens anymore.
+ base.FatalfAt(n.Pos(), "unused auto: %v", n)
+ continue
+ }
+ }
+ s = append(s, n)
+ }
+ return s
+}
+
+// numNonClosures returns the number of functions in list which are not closures.
+func numNonClosures(list []*ir.Func) int {
+ count := 0
+ for _, fn := range list {
+ if fn.OClosure == nil {
+ count++
+ }
+ }
+ return count
+}
+
+func doList(list []ir.Node, do func(ir.Node) bool) bool {
+ for _, x := range list {
+ if x != nil {
+ if do(x) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// isIndexingCoverageCounter returns true if the specified node 'n' is indexing
+// into a coverage counter array.
+func isIndexingCoverageCounter(n ir.Node) bool {
+ if n.Op() != ir.OINDEX {
+ return false
+ }
+ ixn := n.(*ir.IndexExpr)
+ if ixn.X.Op() != ir.ONAME || !ixn.X.Type().IsArray() {
+ return false
+ }
+ nn := ixn.X.(*ir.Name)
+ return nn.CoverageCounter()
+}
+
+// isAtomicCoverageCounterUpdate examines the specified node to
+// determine whether it represents a call to sync/atomic.AddUint32 to
+// increment a coverage counter.
+func isAtomicCoverageCounterUpdate(cn *ir.CallExpr) bool {
+ if cn.Fun.Op() != ir.ONAME {
+ return false
+ }
+ name := cn.Fun.(*ir.Name)
+ if name.Class != ir.PFUNC {
+ return false
+ }
+ fn := name.Sym().Name
+ if name.Sym().Pkg.Path != "sync/atomic" ||
+ (fn != "AddUint32" && fn != "StoreUint32") {
+ return false
+ }
+ if len(cn.Args) != 2 || cn.Args[0].Op() != ir.OADDR {
+ return false
+ }
+ adn := cn.Args[0].(*ir.AddrExpr)
+ v := isIndexingCoverageCounter(adn.X)
+ return v
+}
+
+func PostProcessCallSites(profile *pgo.Profile) {
+ if base.Debug.DumpInlCallSiteScores != 0 {
+ budgetCallback := func(fn *ir.Func, prof *pgo.Profile) (int32, bool) {
+ v := inlineBudget(fn, prof, false, false)
+ return v, v == inlineHotMaxBudget
+ }
+ inlheur.DumpInlCallSiteScores(profile, budgetCallback)
+ }
+}
+
+func analyzeFuncProps(fn *ir.Func, p *pgo.Profile) {
+ canInline := func(fn *ir.Func) { CanInline(fn, p) }
+ budgetForFunc := func(fn *ir.Func) int32 {
+ return inlineBudget(fn, p, true, false)
+ }
+ inlheur.AnalyzeFunc(fn, canInline, budgetForFunc, inlineMaxBudget)
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/actualexprpropbits_string.go b/src/cmd/compile/internal/inline/inlheur/actualexprpropbits_string.go
new file mode 100644
index 0000000..2faf76f
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/actualexprpropbits_string.go
@@ -0,0 +1,58 @@
+// Code generated by "stringer -bitset -type ActualExprPropBits"; DO NOT EDIT.
+
+package inlheur
+
+import "strconv"
+import "bytes"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[ActualExprConstant-1]
+ _ = x[ActualExprIsConcreteConvIface-2]
+ _ = x[ActualExprIsFunc-4]
+ _ = x[ActualExprIsInlinableFunc-8]
+}
+
+var _ActualExprPropBits_value = [...]uint64{
+ 0x1, /* ActualExprConstant */
+ 0x2, /* ActualExprIsConcreteConvIface */
+ 0x4, /* ActualExprIsFunc */
+ 0x8, /* ActualExprIsInlinableFunc */
+}
+
+const _ActualExprPropBits_name = "ActualExprConstantActualExprIsConcreteConvIfaceActualExprIsFuncActualExprIsInlinableFunc"
+
+var _ActualExprPropBits_index = [...]uint8{0, 18, 47, 63, 88}
+
+func (i ActualExprPropBits) String() string {
+ var b bytes.Buffer
+
+ remain := uint64(i)
+ seen := false
+
+ for k, v := range _ActualExprPropBits_value {
+ x := _ActualExprPropBits_name[_ActualExprPropBits_index[k]:_ActualExprPropBits_index[k+1]]
+ if v == 0 {
+ if i == 0 {
+ b.WriteString(x)
+ return b.String()
+ }
+ continue
+ }
+ if (v & remain) == v {
+ remain &^= v
+ x := _ActualExprPropBits_name[_ActualExprPropBits_index[k]:_ActualExprPropBits_index[k+1]]
+ if seen {
+ b.WriteString("|")
+ }
+ seen = true
+ b.WriteString(x)
+ }
+ }
+ if remain == 0 {
+ return b.String()
+ }
+ return "ActualExprPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/analyze.go b/src/cmd/compile/internal/inline/inlheur/analyze.go
new file mode 100644
index 0000000..a1b6f35
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/analyze.go
@@ -0,0 +1,370 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "encoding/json"
+ "fmt"
+ "internal/buildcfg"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+const (
+ debugTraceFuncs = 1 << iota
+ debugTraceFuncFlags
+ debugTraceResults
+ debugTraceParams
+ debugTraceExprClassify
+ debugTraceCalls
+ debugTraceScoring
+)
+
+// propAnalyzer interface is used for defining one or more analyzer
+// helper objects, each tasked with computing some specific subset of
+// the properties we're interested in. The assumption is that
+// properties are independent, so each new analyzer that implements
+// this interface can operate entirely on its own. For a given analyzer
+// there will be a sequence of calls to nodeVisitPre and nodeVisitPost
+// as the nodes within a function are visited, then a followup call to
+// setResults so that the analyzer can transfer its results into the
+// final properties object.
+type propAnalyzer interface {
+ nodeVisitPre(n ir.Node)
+ nodeVisitPost(n ir.Node)
+ setResults(funcProps *FuncProps)
+}
+
+// fnInlHeur contains inline heuristics state information about a
+// specific Go function being analyzed/considered by the inliner. Note
+// that in addition to constructing a fnInlHeur object by analyzing a
+// specific *ir.Func, there is also code in the test harness
+// (funcprops_test.go) that builds up fnInlHeur's by reading in and
+// parsing a dump. This is the reason why we have file/fname/line
+// fields below instead of just an *ir.Func field.
+type fnInlHeur struct {
+ props *FuncProps
+ cstab CallSiteTab
+ fname string
+ file string
+ line uint
+}
+
+var fpmap = map[*ir.Func]fnInlHeur{}
+
+// AnalyzeFunc computes function properties for fn and its contained
+// closures, updating the global 'fpmap' table. It is assumed that
+// "CanInline" has been run on fn and on the closures that feed
+// directly into calls; other closures not directly called will also
+// be checked inlinability for inlinability here in case they are
+// returned as a result.
+func AnalyzeFunc(fn *ir.Func, canInline func(*ir.Func), budgetForFunc func(*ir.Func) int32, inlineMaxBudget int) {
+ if fpmap == nil {
+ // If fpmap is nil this indicates that the main inliner pass is
+ // complete and we're doing inlining of wrappers (no heuristics
+ // used here).
+ return
+ }
+ if fn.OClosure != nil {
+ // closures will be processed along with their outer enclosing func.
+ return
+ }
+ enableDebugTraceIfEnv()
+ if debugTrace&debugTraceFuncs != 0 {
+ fmt.Fprintf(os.Stderr, "=-= AnalyzeFunc(%v)\n", fn)
+ }
+ // Build up a list containing 'fn' and any closures it contains. Along
+ // the way, test to see whether each closure is inlinable in case
+ // we might be returning it.
+ funcs := []*ir.Func{fn}
+ ir.VisitFuncAndClosures(fn, func(n ir.Node) {
+ if clo, ok := n.(*ir.ClosureExpr); ok {
+ funcs = append(funcs, clo.Func)
+ }
+ })
+
+ // Analyze the list of functions. We want to visit a given func
+ // only after the closures it contains have been processed, so
+ // iterate through the list in reverse order. Once a function has
+ // been analyzed, revisit the question of whether it should be
+ // inlinable; if it is over the default hairyness limit and it
+ // doesn't have any interesting properties, then we don't want
+ // the overhead of writing out its inline body.
+ nameFinder := newNameFinder(fn)
+ for i := len(funcs) - 1; i >= 0; i-- {
+ f := funcs[i]
+ if f.OClosure != nil && !f.InlinabilityChecked() {
+ canInline(f)
+ }
+ funcProps := analyzeFunc(f, inlineMaxBudget, nameFinder)
+ revisitInlinability(f, funcProps, budgetForFunc)
+ if f.Inl != nil {
+ f.Inl.Properties = funcProps.SerializeToString()
+ }
+ }
+ disableDebugTrace()
+}
+
+// TearDown is invoked at the end of the main inlining pass; doing
+// function analysis and call site scoring is unlikely to help a lot
+// after this point, so nil out fpmap and other globals to reclaim
+// storage.
+func TearDown() {
+ fpmap = nil
+ scoreCallsCache.tab = nil
+ scoreCallsCache.csl = nil
+}
+
+func analyzeFunc(fn *ir.Func, inlineMaxBudget int, nf *nameFinder) *FuncProps {
+ if funcInlHeur, ok := fpmap[fn]; ok {
+ return funcInlHeur.props
+ }
+ funcProps, fcstab := computeFuncProps(fn, inlineMaxBudget, nf)
+ file, line := fnFileLine(fn)
+ entry := fnInlHeur{
+ fname: fn.Sym().Name,
+ file: file,
+ line: line,
+ props: funcProps,
+ cstab: fcstab,
+ }
+ fn.SetNeverReturns(entry.props.Flags&FuncPropNeverReturns != 0)
+ fpmap[fn] = entry
+ if fn.Inl != nil && fn.Inl.Properties == "" {
+ fn.Inl.Properties = entry.props.SerializeToString()
+ }
+ return funcProps
+}
+
+// revisitInlinability revisits the question of whether to continue to
+// treat function 'fn' as an inline candidate based on the set of
+// properties we've computed for it. If (for example) it has an
+// initial size score of 150 and no interesting properties to speak
+// of, then there isn't really any point to moving ahead with it as an
+// inline candidate.
+func revisitInlinability(fn *ir.Func, funcProps *FuncProps, budgetForFunc func(*ir.Func) int32) {
+ if fn.Inl == nil {
+ return
+ }
+ maxAdj := int32(LargestNegativeScoreAdjustment(fn, funcProps))
+ budget := budgetForFunc(fn)
+ if fn.Inl.Cost+maxAdj > budget {
+ fn.Inl = nil
+ }
+}
+
+// computeFuncProps examines the Go function 'fn' and computes for it
+// a function "properties" object, to be used to drive inlining
+// heuristics. See comments on the FuncProps type for more info.
+func computeFuncProps(fn *ir.Func, inlineMaxBudget int, nf *nameFinder) (*FuncProps, CallSiteTab) {
+ if debugTrace&debugTraceFuncs != 0 {
+ fmt.Fprintf(os.Stderr, "=-= starting analysis of func %v:\n%+v\n",
+ fn, fn)
+ }
+ funcProps := new(FuncProps)
+ ffa := makeFuncFlagsAnalyzer(fn)
+ analyzers := []propAnalyzer{ffa}
+ analyzers = addResultsAnalyzer(fn, analyzers, funcProps, inlineMaxBudget, nf)
+ analyzers = addParamsAnalyzer(fn, analyzers, funcProps, nf)
+ runAnalyzersOnFunction(fn, analyzers)
+ for _, a := range analyzers {
+ a.setResults(funcProps)
+ }
+ cstab := computeCallSiteTable(fn, fn.Body, nil, ffa.panicPathTable(), 0, nf)
+ return funcProps, cstab
+}
+
+func runAnalyzersOnFunction(fn *ir.Func, analyzers []propAnalyzer) {
+ var doNode func(ir.Node) bool
+ doNode = func(n ir.Node) bool {
+ for _, a := range analyzers {
+ a.nodeVisitPre(n)
+ }
+ ir.DoChildren(n, doNode)
+ for _, a := range analyzers {
+ a.nodeVisitPost(n)
+ }
+ return false
+ }
+ doNode(fn)
+}
+
+func propsForFunc(fn *ir.Func) *FuncProps {
+ if funcInlHeur, ok := fpmap[fn]; ok {
+ return funcInlHeur.props
+ } else if fn.Inl != nil && fn.Inl.Properties != "" {
+ // FIXME: considering adding some sort of cache or table
+ // for deserialized properties of imported functions.
+ return DeserializeFromString(fn.Inl.Properties)
+ }
+ return nil
+}
+
+func fnFileLine(fn *ir.Func) (string, uint) {
+ p := base.Ctxt.InnermostPos(fn.Pos())
+ return filepath.Base(p.Filename()), p.Line()
+}
+
+func Enabled() bool {
+ return buildcfg.Experiment.NewInliner || UnitTesting()
+}
+
+func UnitTesting() bool {
+ return base.Debug.DumpInlFuncProps != "" ||
+ base.Debug.DumpInlCallSiteScores != 0
+}
+
+// DumpFuncProps computes and caches function properties for the func
+// 'fn', writing out a description of the previously computed set of
+// properties to the file given in 'dumpfile'. Used for the
+// "-d=dumpinlfuncprops=..." command line flag, intended for use
+// primarily in unit testing.
+func DumpFuncProps(fn *ir.Func, dumpfile string) {
+ if fn != nil {
+ if fn.OClosure != nil {
+ // closures will be processed along with their outer enclosing func.
+ return
+ }
+ captureFuncDumpEntry(fn)
+ ir.VisitFuncAndClosures(fn, func(n ir.Node) {
+ if clo, ok := n.(*ir.ClosureExpr); ok {
+ captureFuncDumpEntry(clo.Func)
+ }
+ })
+ } else {
+ emitDumpToFile(dumpfile)
+ }
+}
+
+// emitDumpToFile writes out the buffer function property dump entries
+// to a file, for unit testing. Dump entries need to be sorted by
+// definition line, and due to generics we need to account for the
+// possibility that several ir.Func's will have the same def line.
+func emitDumpToFile(dumpfile string) {
+ mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC
+ if dumpfile[0] == '+' {
+ dumpfile = dumpfile[1:]
+ mode = os.O_WRONLY | os.O_APPEND | os.O_CREATE
+ }
+ if dumpfile[0] == '%' {
+ dumpfile = dumpfile[1:]
+ d, b := filepath.Dir(dumpfile), filepath.Base(dumpfile)
+ ptag := strings.ReplaceAll(types.LocalPkg.Path, "/", ":")
+ dumpfile = d + "/" + ptag + "." + b
+ }
+ outf, err := os.OpenFile(dumpfile, mode, 0644)
+ if err != nil {
+ base.Fatalf("opening function props dump file %q: %v\n", dumpfile, err)
+ }
+ defer outf.Close()
+ dumpFilePreamble(outf)
+
+ atline := map[uint]uint{}
+ sl := make([]fnInlHeur, 0, len(dumpBuffer))
+ for _, e := range dumpBuffer {
+ sl = append(sl, e)
+ atline[e.line] = atline[e.line] + 1
+ }
+ sl = sortFnInlHeurSlice(sl)
+
+ prevline := uint(0)
+ for _, entry := range sl {
+ idx := uint(0)
+ if prevline == entry.line {
+ idx++
+ }
+ prevline = entry.line
+ atl := atline[entry.line]
+ if err := dumpFnPreamble(outf, &entry, nil, idx, atl); err != nil {
+ base.Fatalf("function props dump: %v\n", err)
+ }
+ }
+ dumpBuffer = nil
+}
+
+// captureFuncDumpEntry grabs the function properties object for 'fn'
+// and enqueues it for later dumping. Used for the
+// "-d=dumpinlfuncprops=..." command line flag, intended for use
+// primarily in unit testing.
+func captureFuncDumpEntry(fn *ir.Func) {
+ // avoid capturing compiler-generated equality funcs.
+ if strings.HasPrefix(fn.Sym().Name, ".eq.") {
+ return
+ }
+ funcInlHeur, ok := fpmap[fn]
+ if !ok {
+ // Missing entry is expected for functions that are too large
+ // to inline. We still want to write out call site scores in
+ // this case however.
+ funcInlHeur = fnInlHeur{cstab: callSiteTab}
+ }
+ if dumpBuffer == nil {
+ dumpBuffer = make(map[*ir.Func]fnInlHeur)
+ }
+ if _, ok := dumpBuffer[fn]; ok {
+ return
+ }
+ if debugTrace&debugTraceFuncs != 0 {
+ fmt.Fprintf(os.Stderr, "=-= capturing dump for %v:\n", fn)
+ }
+ dumpBuffer[fn] = funcInlHeur
+}
+
+// dumpFilePreamble writes out a file-level preamble for a given
+// Go function as part of a function properties dump.
+func dumpFilePreamble(w io.Writer) {
+ fmt.Fprintf(w, "// DO NOT EDIT (use 'go test -v -update-expected' instead.)\n")
+ fmt.Fprintf(w, "// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt\n")
+ fmt.Fprintf(w, "// for more information on the format of this file.\n")
+ fmt.Fprintf(w, "// %s\n", preambleDelimiter)
+}
+
+// dumpFnPreamble writes out a function-level preamble for a given
+// Go function as part of a function properties dump. See the
+// README.txt file in testdata/props for more on the format of
+// this preamble.
+func dumpFnPreamble(w io.Writer, funcInlHeur *fnInlHeur, ecst encodedCallSiteTab, idx, atl uint) error {
+ fmt.Fprintf(w, "// %s %s %d %d %d\n",
+ funcInlHeur.file, funcInlHeur.fname, funcInlHeur.line, idx, atl)
+ // emit props as comments, followed by delimiter
+ fmt.Fprintf(w, "%s// %s\n", funcInlHeur.props.ToString("// "), comDelimiter)
+ data, err := json.Marshal(funcInlHeur.props)
+ if err != nil {
+ return fmt.Errorf("marshall error %v\n", err)
+ }
+ fmt.Fprintf(w, "// %s\n", string(data))
+ dumpCallSiteComments(w, funcInlHeur.cstab, ecst)
+ fmt.Fprintf(w, "// %s\n", fnDelimiter)
+ return nil
+}
+
+// sortFnInlHeurSlice sorts a slice of fnInlHeur based on
+// the starting line of the function definition, then by name.
+func sortFnInlHeurSlice(sl []fnInlHeur) []fnInlHeur {
+ sort.SliceStable(sl, func(i, j int) bool {
+ if sl[i].line != sl[j].line {
+ return sl[i].line < sl[j].line
+ }
+ return sl[i].fname < sl[j].fname
+ })
+ return sl
+}
+
+// delimiters written to various preambles to make parsing of
+// dumps easier.
+const preambleDelimiter = "<endfilepreamble>"
+const fnDelimiter = "<endfuncpreamble>"
+const comDelimiter = "<endpropsdump>"
+const csDelimiter = "<endcallsites>"
+
+// dumpBuffer stores up function properties dumps when
+// "-d=dumpinlfuncprops=..." is in effect.
+var dumpBuffer map[*ir.Func]fnInlHeur
diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go
new file mode 100644
index 0000000..36ebe18
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go
@@ -0,0 +1,413 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/pgo"
+ "cmd/compile/internal/typecheck"
+ "fmt"
+ "os"
+ "strings"
+)
+
+type callSiteAnalyzer struct {
+ fn *ir.Func
+ *nameFinder
+}
+
+type callSiteTableBuilder struct {
+ fn *ir.Func
+ *nameFinder
+ cstab CallSiteTab
+ ptab map[ir.Node]pstate
+ nstack []ir.Node
+ loopNest int
+ isInit bool
+}
+
+func makeCallSiteAnalyzer(fn *ir.Func) *callSiteAnalyzer {
+ return &callSiteAnalyzer{
+ fn: fn,
+ nameFinder: newNameFinder(fn),
+ }
+}
+
+func makeCallSiteTableBuilder(fn *ir.Func, cstab CallSiteTab, ptab map[ir.Node]pstate, loopNestingLevel int, nf *nameFinder) *callSiteTableBuilder {
+ isInit := fn.IsPackageInit() || strings.HasPrefix(fn.Sym().Name, "init.")
+ return &callSiteTableBuilder{
+ fn: fn,
+ cstab: cstab,
+ ptab: ptab,
+ isInit: isInit,
+ loopNest: loopNestingLevel,
+ nstack: []ir.Node{fn},
+ nameFinder: nf,
+ }
+}
+
+// computeCallSiteTable builds and returns a table of call sites for
+// the specified region in function fn. A region here corresponds to a
+// specific subtree within the AST for a function. The main intended
+// use cases are for 'region' to be either A) an entire function body,
+// or B) an inlined call expression.
+func computeCallSiteTable(fn *ir.Func, region ir.Nodes, cstab CallSiteTab, ptab map[ir.Node]pstate, loopNestingLevel int, nf *nameFinder) CallSiteTab {
+ cstb := makeCallSiteTableBuilder(fn, cstab, ptab, loopNestingLevel, nf)
+ var doNode func(ir.Node) bool
+ doNode = func(n ir.Node) bool {
+ cstb.nodeVisitPre(n)
+ ir.DoChildren(n, doNode)
+ cstb.nodeVisitPost(n)
+ return false
+ }
+ for _, n := range region {
+ doNode(n)
+ }
+ return cstb.cstab
+}
+
+func (cstb *callSiteTableBuilder) flagsForNode(call *ir.CallExpr) CSPropBits {
+ var r CSPropBits
+
+ if debugTrace&debugTraceCalls != 0 {
+ fmt.Fprintf(os.Stderr, "=-= analyzing call at %s\n",
+ fmtFullPos(call.Pos()))
+ }
+
+ // Set a bit if this call is within a loop.
+ if cstb.loopNest > 0 {
+ r |= CallSiteInLoop
+ }
+
+ // Set a bit if the call is within an init function (either
+ // compiler-generated or user-written).
+ if cstb.isInit {
+ r |= CallSiteInInitFunc
+ }
+
+ // Decide whether to apply the panic path heuristic. Hack: don't
+ // apply this heuristic in the function "main.main" (mostly just
+ // to avoid annoying users).
+ if !isMainMain(cstb.fn) {
+ r = cstb.determinePanicPathBits(call, r)
+ }
+
+ return r
+}
+
+// determinePanicPathBits updates the CallSiteOnPanicPath bit within
+// "r" if we think this call is on an unconditional path to
+// panic/exit. Do this by walking back up the node stack to see if we
+// can find either A) an enclosing panic, or B) a statement node that
+// we've determined leads to a panic/exit.
+func (cstb *callSiteTableBuilder) determinePanicPathBits(call ir.Node, r CSPropBits) CSPropBits {
+ cstb.nstack = append(cstb.nstack, call)
+ defer func() {
+ cstb.nstack = cstb.nstack[:len(cstb.nstack)-1]
+ }()
+
+ for ri := range cstb.nstack[:len(cstb.nstack)-1] {
+ i := len(cstb.nstack) - ri - 1
+ n := cstb.nstack[i]
+ _, isCallExpr := n.(*ir.CallExpr)
+ _, isStmt := n.(ir.Stmt)
+ if isCallExpr {
+ isStmt = false
+ }
+
+ if debugTrace&debugTraceCalls != 0 {
+ ps, inps := cstb.ptab[n]
+ fmt.Fprintf(os.Stderr, "=-= callpar %d op=%s ps=%s inptab=%v stmt=%v\n", i, n.Op().String(), ps.String(), inps, isStmt)
+ }
+
+ if n.Op() == ir.OPANIC {
+ r |= CallSiteOnPanicPath
+ break
+ }
+ if v, ok := cstb.ptab[n]; ok {
+ if v == psCallsPanic {
+ r |= CallSiteOnPanicPath
+ break
+ }
+ if isStmt {
+ break
+ }
+ }
+ }
+ return r
+}
+
+// propsForArg returns property bits for a given call argument expression arg.
+func (cstb *callSiteTableBuilder) propsForArg(arg ir.Node) ActualExprPropBits {
+ if cval := cstb.constValue(arg); cval != nil {
+ return ActualExprConstant
+ }
+ if cstb.isConcreteConvIface(arg) {
+ return ActualExprIsConcreteConvIface
+ }
+ fname := cstb.funcName(arg)
+ if fname != nil {
+ if fn := fname.Func; fn != nil && typecheck.HaveInlineBody(fn) {
+ return ActualExprIsInlinableFunc
+ }
+ return ActualExprIsFunc
+ }
+ return 0
+}
+
+// argPropsForCall returns a slice of argument properties for the
+// expressions being passed to the callee in the specific call
+// expression; these will be stored in the CallSite object for a given
+// call and then consulted when scoring. If no arg has any interesting
+// properties we try to save some space and return a nil slice.
+func (cstb *callSiteTableBuilder) argPropsForCall(ce *ir.CallExpr) []ActualExprPropBits {
+ rv := make([]ActualExprPropBits, len(ce.Args))
+ somethingInteresting := false
+ for idx := range ce.Args {
+ argProp := cstb.propsForArg(ce.Args[idx])
+ somethingInteresting = somethingInteresting || (argProp != 0)
+ rv[idx] = argProp
+ }
+ if !somethingInteresting {
+ return nil
+ }
+ return rv
+}
+
+func (cstb *callSiteTableBuilder) addCallSite(callee *ir.Func, call *ir.CallExpr) {
+ flags := cstb.flagsForNode(call)
+ argProps := cstb.argPropsForCall(call)
+ if debugTrace&debugTraceCalls != 0 {
+ fmt.Fprintf(os.Stderr, "=-= props %+v for call %v\n", argProps, call)
+ }
+ // FIXME: maybe bulk-allocate these?
+ cs := &CallSite{
+ Call: call,
+ Callee: callee,
+ Assign: cstb.containingAssignment(call),
+ ArgProps: argProps,
+ Flags: flags,
+ ID: uint(len(cstb.cstab)),
+ }
+ if _, ok := cstb.cstab[call]; ok {
+ fmt.Fprintf(os.Stderr, "*** cstab duplicate entry at: %s\n",
+ fmtFullPos(call.Pos()))
+ fmt.Fprintf(os.Stderr, "*** call: %+v\n", call)
+ panic("bad")
+ }
+ // Set initial score for callsite to the cost computed
+ // by CanInline; this score will be refined later based
+ // on heuristics.
+ cs.Score = int(callee.Inl.Cost)
+
+ if cstb.cstab == nil {
+ cstb.cstab = make(CallSiteTab)
+ }
+ cstb.cstab[call] = cs
+ if debugTrace&debugTraceCalls != 0 {
+ fmt.Fprintf(os.Stderr, "=-= added callsite: caller=%v callee=%v n=%s\n",
+ cstb.fn, callee, fmtFullPos(call.Pos()))
+ }
+}
+
+func (cstb *callSiteTableBuilder) nodeVisitPre(n ir.Node) {
+ switch n.Op() {
+ case ir.ORANGE, ir.OFOR:
+ if !hasTopLevelLoopBodyReturnOrBreak(loopBody(n)) {
+ cstb.loopNest++
+ }
+ case ir.OCALLFUNC:
+ ce := n.(*ir.CallExpr)
+ callee := pgo.DirectCallee(ce.Fun)
+ if callee != nil && callee.Inl != nil {
+ cstb.addCallSite(callee, ce)
+ }
+ }
+ cstb.nstack = append(cstb.nstack, n)
+}
+
+func (cstb *callSiteTableBuilder) nodeVisitPost(n ir.Node) {
+ cstb.nstack = cstb.nstack[:len(cstb.nstack)-1]
+ switch n.Op() {
+ case ir.ORANGE, ir.OFOR:
+ if !hasTopLevelLoopBodyReturnOrBreak(loopBody(n)) {
+ cstb.loopNest--
+ }
+ }
+}
+
+func loopBody(n ir.Node) ir.Nodes {
+ if forst, ok := n.(*ir.ForStmt); ok {
+ return forst.Body
+ }
+ if rst, ok := n.(*ir.RangeStmt); ok {
+ return rst.Body
+ }
+ return nil
+}
+
+// hasTopLevelLoopBodyReturnOrBreak examines the body of a "for" or
+// "range" loop to try to verify that it is a real loop, as opposed to
+// a construct that is syntactically loopy but doesn't actually iterate
+// multiple times, like:
+//
+// for {
+// blah()
+// return 1
+// }
+//
+// [Remark: the pattern above crops up quite a bit in the source code
+// for the compiler itself, e.g. the auto-generated rewrite code]
+//
+// Note that we don't look for GOTO statements here, so it's possible
+// we'll get the wrong result for a loop with complicated control
+// jumps via gotos.
+func hasTopLevelLoopBodyReturnOrBreak(loopBody ir.Nodes) bool {
+ for _, n := range loopBody {
+ if n.Op() == ir.ORETURN || n.Op() == ir.OBREAK {
+ return true
+ }
+ }
+ return false
+}
+
+// containingAssignment returns the top-level assignment statement
+// for a statement level function call "n". Examples:
+//
+// x := foo()
+// x, y := bar(z, baz())
+// if blah() { ...
+//
+// Here the top-level assignment statement for the foo() call is the
+// statement assigning to "x"; the top-level assignment for "bar()"
+// call is the assignment to x,y. For the baz() and blah() calls,
+// there is no top level assignment statement.
+//
+// The unstated goal here is that we want to use the containing
+// assignment to establish a connection between a given call and the
+// variables to which its results/returns are being assigned.
+//
+// Note that for the "bar" command above, the front end sometimes
+// decomposes this into two assignments, the first one assigning the
+// call to a pair of auto-temps, then the second one assigning the
+// auto-temps to the user-visible vars. This helper will return the
+// second (outer) of these two.
+func (cstb *callSiteTableBuilder) containingAssignment(n ir.Node) ir.Node {
+ parent := cstb.nstack[len(cstb.nstack)-1]
+
+ // assignsOnlyAutoTemps returns TRUE of the specified OAS2FUNC
+ // node assigns only auto-temps.
+ assignsOnlyAutoTemps := func(x ir.Node) bool {
+ alst := x.(*ir.AssignListStmt)
+ oa2init := alst.Init()
+ if len(oa2init) == 0 {
+ return false
+ }
+ for _, v := range oa2init {
+ d := v.(*ir.Decl)
+ if !ir.IsAutoTmp(d.X) {
+ return false
+ }
+ }
+ return true
+ }
+
+ // Simple case: x := foo()
+ if parent.Op() == ir.OAS {
+ return parent
+ }
+
+ // Multi-return case: x, y := bar()
+ if parent.Op() == ir.OAS2FUNC {
+ // Hack city: if the result vars are auto-temps, try looking
+ // for an outer assignment in the tree. The code shape we're
+ // looking for here is:
+ //
+ // OAS1({x,y},OCONVNOP(OAS2FUNC({auto1,auto2},OCALLFUNC(bar))))
+ //
+ if assignsOnlyAutoTemps(parent) {
+ par2 := cstb.nstack[len(cstb.nstack)-2]
+ if par2.Op() == ir.OAS2 {
+ return par2
+ }
+ if par2.Op() == ir.OCONVNOP {
+ par3 := cstb.nstack[len(cstb.nstack)-3]
+ if par3.Op() == ir.OAS2 {
+ return par3
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// UpdateCallsiteTable handles updating of callerfn's call site table
+// after an inlined has been carried out, e.g. the call at 'n' as been
+// turned into the inlined call expression 'ic' within function
+// callerfn. The chief thing of interest here is to make sure that any
+// call nodes within 'ic' are added to the call site table for
+// 'callerfn' and scored appropriately.
+func UpdateCallsiteTable(callerfn *ir.Func, n *ir.CallExpr, ic *ir.InlinedCallExpr) {
+ enableDebugTraceIfEnv()
+ defer disableDebugTrace()
+
+ funcInlHeur, ok := fpmap[callerfn]
+ if !ok {
+ // This can happen for compiler-generated wrappers.
+ if debugTrace&debugTraceCalls != 0 {
+ fmt.Fprintf(os.Stderr, "=-= early exit, no entry for caller fn %v\n", callerfn)
+ }
+ return
+ }
+
+ if debugTrace&debugTraceCalls != 0 {
+ fmt.Fprintf(os.Stderr, "=-= UpdateCallsiteTable(caller=%v, cs=%s)\n",
+ callerfn, fmtFullPos(n.Pos()))
+ }
+
+ // Mark the call in question as inlined.
+ oldcs, ok := funcInlHeur.cstab[n]
+ if !ok {
+ // This can happen for compiler-generated wrappers.
+ return
+ }
+ oldcs.aux |= csAuxInlined
+
+ if debugTrace&debugTraceCalls != 0 {
+ fmt.Fprintf(os.Stderr, "=-= marked as inlined: callee=%v %s\n",
+ oldcs.Callee, EncodeCallSiteKey(oldcs))
+ }
+
+ // Walk the inlined call region to collect new callsites.
+ var icp pstate
+ if oldcs.Flags&CallSiteOnPanicPath != 0 {
+ icp = psCallsPanic
+ }
+ var loopNestLevel int
+ if oldcs.Flags&CallSiteInLoop != 0 {
+ loopNestLevel = 1
+ }
+ ptab := map[ir.Node]pstate{ic: icp}
+ nf := newNameFinder(nil)
+ icstab := computeCallSiteTable(callerfn, ic.Body, nil, ptab, loopNestLevel, nf)
+
+ // Record parent callsite. This is primarily for debug output.
+ for _, cs := range icstab {
+ cs.parent = oldcs
+ }
+
+ // Score the calls in the inlined body. Note the setting of
+ // "doCallResults" to false here: at the moment there isn't any
+ // easy way to localize or region-ize the work done by
+ // "rescoreBasedOnCallResultUses", which currently does a walk
+ // over the entire function to look for uses of a given set of
+ // results. Similarly we're passing nil to makeCallSiteAnalyzer,
+ // so as to run name finding without the use of static value &
+ // friends.
+ csa := makeCallSiteAnalyzer(nil)
+ const doCallResults = false
+ csa.scoreCallsRegion(callerfn, ic.Body, icstab, doCallResults, ic)
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go
new file mode 100644
index 0000000..b7403a4
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go
@@ -0,0 +1,356 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "fmt"
+ "os"
+)
+
+// funcFlagsAnalyzer computes the "Flags" value for the FuncProps
+// object we're computing. The main item of interest here is "nstate",
+// which stores the disposition of a given ir Node with respect to the
+// flags/properties we're trying to compute.
+type funcFlagsAnalyzer struct {
+ fn *ir.Func
+ nstate map[ir.Node]pstate
+ noInfo bool // set if we see something inscrutable/un-analyzable
+}
+
+// pstate keeps track of the disposition of a given node and its
+// children with respect to panic/exit calls.
+type pstate int
+
+const (
+ psNoInfo pstate = iota // nothing interesting about this node
+ psCallsPanic // node causes call to panic or os.Exit
+ psMayReturn // executing node may trigger a "return" stmt
+ psTop // dataflow lattice "top" element
+)
+
+func makeFuncFlagsAnalyzer(fn *ir.Func) *funcFlagsAnalyzer {
+ return &funcFlagsAnalyzer{
+ fn: fn,
+ nstate: make(map[ir.Node]pstate),
+ }
+}
+
+// setResults transfers func flag results to 'funcProps'.
+func (ffa *funcFlagsAnalyzer) setResults(funcProps *FuncProps) {
+ var rv FuncPropBits
+ if !ffa.noInfo && ffa.stateForList(ffa.fn.Body) == psCallsPanic {
+ rv = FuncPropNeverReturns
+ }
+ // This is slightly hacky and not at all required, but include a
+ // special case for main.main, which often ends in a call to
+ // os.Exit. People who write code like this (very common I
+ // imagine)
+ //
+ // func main() {
+ // rc = perform()
+ // ...
+ // foo()
+ // os.Exit(rc)
+ // }
+ //
+ // will be constantly surprised when foo() is inlined in many
+ // other spots in the program but not in main().
+ if isMainMain(ffa.fn) {
+ rv &^= FuncPropNeverReturns
+ }
+ funcProps.Flags = rv
+}
+
+func (ffa *funcFlagsAnalyzer) getState(n ir.Node) pstate {
+ return ffa.nstate[n]
+}
+
+func (ffa *funcFlagsAnalyzer) setState(n ir.Node, st pstate) {
+ if st != psNoInfo {
+ ffa.nstate[n] = st
+ }
+}
+
+func (ffa *funcFlagsAnalyzer) updateState(n ir.Node, st pstate) {
+ if st == psNoInfo {
+ delete(ffa.nstate, n)
+ } else {
+ ffa.nstate[n] = st
+ }
+}
+
+func (ffa *funcFlagsAnalyzer) panicPathTable() map[ir.Node]pstate {
+ return ffa.nstate
+}
+
+// blockCombine merges together states as part of a linear sequence of
+// statements, where 'pred' and 'succ' are analysis results for a pair
+// of consecutive statements. Examples:
+//
+// case 1: case 2:
+// panic("foo") if q { return x } <-pred
+// return x panic("boo") <-succ
+//
+// In case 1, since the pred state is "always panic" it doesn't matter
+// what the succ state is, hence the state for the combination of the
+// two blocks is "always panics". In case 2, because there is a path
+// to return that avoids the panic in succ, the state for the
+// combination of the two statements is "may return".
+func blockCombine(pred, succ pstate) pstate {
+ switch succ {
+ case psTop:
+ return pred
+ case psMayReturn:
+ if pred == psCallsPanic {
+ return psCallsPanic
+ }
+ return psMayReturn
+ case psNoInfo:
+ return pred
+ case psCallsPanic:
+ if pred == psMayReturn {
+ return psMayReturn
+ }
+ return psCallsPanic
+ }
+ panic("should never execute")
+}
+
+// branchCombine combines two states at a control flow branch point where
+// either p1 or p2 executes (as in an "if" statement).
+func branchCombine(p1, p2 pstate) pstate {
+ if p1 == psCallsPanic && p2 == psCallsPanic {
+ return psCallsPanic
+ }
+ if p1 == psMayReturn || p2 == psMayReturn {
+ return psMayReturn
+ }
+ return psNoInfo
+}
+
+// stateForList walks through a list of statements and computes the
+// state/diposition for the entire list as a whole, as well
+// as updating disposition of intermediate nodes.
+func (ffa *funcFlagsAnalyzer) stateForList(list ir.Nodes) pstate {
+ st := psTop
+ // Walk the list backwards so that we can update the state for
+ // earlier list elements based on what we find out about their
+ // successors. Example:
+ //
+ // if ... {
+ // L10: foo()
+ // L11: <stmt>
+ // L12: panic(...)
+ // }
+ //
+ // After combining the dispositions for line 11 and 12, we want to
+ // update the state for the call at line 10 based on that combined
+ // disposition (if L11 has no path to "return", then the call at
+ // line 10 will be on a panic path).
+ for i := len(list) - 1; i >= 0; i-- {
+ n := list[i]
+ psi := ffa.getState(n)
+ if debugTrace&debugTraceFuncFlags != 0 {
+ fmt.Fprintf(os.Stderr, "=-= %v: stateForList n=%s ps=%s\n",
+ ir.Line(n), n.Op().String(), psi.String())
+ }
+ st = blockCombine(psi, st)
+ ffa.updateState(n, st)
+ }
+ if st == psTop {
+ st = psNoInfo
+ }
+ return st
+}
+
+func isMainMain(fn *ir.Func) bool {
+ s := fn.Sym()
+ return (s.Pkg.Name == "main" && s.Name == "main")
+}
+
+func isWellKnownFunc(s *types.Sym, pkg, name string) bool {
+ return s.Pkg.Path == pkg && s.Name == name
+}
+
+// isExitCall reports TRUE if the node itself is an unconditional
+// call to os.Exit(), a panic, or a function that does likewise.
+func isExitCall(n ir.Node) bool {
+ if n.Op() != ir.OCALLFUNC {
+ return false
+ }
+ cx := n.(*ir.CallExpr)
+ name := ir.StaticCalleeName(cx.Fun)
+ if name == nil {
+ return false
+ }
+ s := name.Sym()
+ if isWellKnownFunc(s, "os", "Exit") ||
+ isWellKnownFunc(s, "runtime", "throw") {
+ return true
+ }
+ if funcProps := propsForFunc(name.Func); funcProps != nil {
+ if funcProps.Flags&FuncPropNeverReturns != 0 {
+ return true
+ }
+ }
+ return name.Func.NeverReturns()
+}
+
+// pessimize is called to record the fact that we saw something in the
+// function that renders it entirely impossible to analyze.
+func (ffa *funcFlagsAnalyzer) pessimize() {
+ ffa.noInfo = true
+}
+
+// shouldVisit reports TRUE if this is an interesting node from the
+// perspective of computing function flags. NB: due to the fact that
+// ir.CallExpr implements the Stmt interface, we wind up visiting
+// a lot of nodes that we don't really need to, but these can
+// simply be screened out as part of the visit.
+func shouldVisit(n ir.Node) bool {
+ _, isStmt := n.(ir.Stmt)
+ return n.Op() != ir.ODCL &&
+ (isStmt || n.Op() == ir.OCALLFUNC || n.Op() == ir.OPANIC)
+}
+
+// nodeVisitPost helps implement the propAnalyzer interface; when
+// called on a given node, it decides the disposition of that node
+// based on the state(s) of the node's children.
+func (ffa *funcFlagsAnalyzer) nodeVisitPost(n ir.Node) {
+ if debugTrace&debugTraceFuncFlags != 0 {
+ fmt.Fprintf(os.Stderr, "=+= nodevis %v %s should=%v\n",
+ ir.Line(n), n.Op().String(), shouldVisit(n))
+ }
+ if !shouldVisit(n) {
+ return
+ }
+ var st pstate
+ switch n.Op() {
+ case ir.OCALLFUNC:
+ if isExitCall(n) {
+ st = psCallsPanic
+ }
+ case ir.OPANIC:
+ st = psCallsPanic
+ case ir.ORETURN:
+ st = psMayReturn
+ case ir.OBREAK, ir.OCONTINUE:
+ // FIXME: this handling of break/continue is sub-optimal; we
+ // have them as "mayReturn" in order to help with this case:
+ //
+ // for {
+ // if q() { break }
+ // panic(...)
+ // }
+ //
+ // where the effect of the 'break' is to cause the subsequent
+ // panic to be skipped. One possible improvement would be to
+ // track whether the currently enclosing loop is a "for {" or
+ // a for/range with condition, then use mayReturn only for the
+ // former. Note also that "break X" or "continue X" is treated
+ // the same as "goto", since we don't have a good way to track
+ // the target of the branch.
+ st = psMayReturn
+ n := n.(*ir.BranchStmt)
+ if n.Label != nil {
+ ffa.pessimize()
+ }
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ st = ffa.stateForList(n.List)
+ case ir.OCASE:
+ if ccst, ok := n.(*ir.CaseClause); ok {
+ st = ffa.stateForList(ccst.Body)
+ } else if ccst, ok := n.(*ir.CommClause); ok {
+ st = ffa.stateForList(ccst.Body)
+ } else {
+ panic("unexpected")
+ }
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ st = branchCombine(ffa.stateForList(n.Body), ffa.stateForList(n.Else))
+ case ir.OFOR:
+ // Treat for { XXX } like a block.
+ // Treat for <cond> { XXX } like an if statement with no else.
+ n := n.(*ir.ForStmt)
+ bst := ffa.stateForList(n.Body)
+ if n.Cond == nil {
+ st = bst
+ } else {
+ if bst == psMayReturn {
+ st = psMayReturn
+ }
+ }
+ case ir.ORANGE:
+ // Treat for range { XXX } like an if statement with no else.
+ n := n.(*ir.RangeStmt)
+ if ffa.stateForList(n.Body) == psMayReturn {
+ st = psMayReturn
+ }
+ case ir.OGOTO:
+ // punt if we see even one goto. if we built a control
+ // flow graph we could do more, but this is just a tree walk.
+ ffa.pessimize()
+ case ir.OSELECT:
+ // process selects for "may return" but not "always panics",
+ // the latter case seems very improbable.
+ n := n.(*ir.SelectStmt)
+ if len(n.Cases) != 0 {
+ st = psTop
+ for _, c := range n.Cases {
+ st = branchCombine(ffa.stateForList(c.Body), st)
+ }
+ }
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+ if len(n.Cases) != 0 {
+ st = psTop
+ for _, c := range n.Cases {
+ st = branchCombine(ffa.stateForList(c.Body), st)
+ }
+ }
+
+ st, fall := psTop, psNoInfo
+ for i := len(n.Cases) - 1; i >= 0; i-- {
+ cas := n.Cases[i]
+ cst := ffa.stateForList(cas.Body)
+ endsInFallthrough := false
+ if len(cas.Body) != 0 {
+ endsInFallthrough = cas.Body[0].Op() == ir.OFALL
+ }
+ if endsInFallthrough {
+ cst = blockCombine(cst, fall)
+ }
+ st = branchCombine(st, cst)
+ fall = cst
+ }
+ case ir.OFALL:
+ // Not important.
+ case ir.ODCLFUNC, ir.ORECOVER, ir.OAS, ir.OAS2, ir.OAS2FUNC, ir.OASOP,
+ ir.OPRINTLN, ir.OPRINT, ir.OLABEL, ir.OCALLINTER, ir.ODEFER,
+ ir.OSEND, ir.ORECV, ir.OSELRECV2, ir.OGO, ir.OAPPEND, ir.OAS2DOTTYPE,
+ ir.OAS2MAPR, ir.OGETG, ir.ODELETE, ir.OINLMARK, ir.OAS2RECV,
+ ir.OMIN, ir.OMAX, ir.OMAKE, ir.ORECOVERFP, ir.OGETCALLERSP:
+ // these should all be benign/uninteresting
+ case ir.OTAILCALL, ir.OJUMPTABLE, ir.OTYPESW:
+ // don't expect to see these at all.
+ base.Fatalf("unexpected op %s in func %s",
+ n.Op().String(), ir.FuncName(ffa.fn))
+ default:
+ base.Fatalf("%v: unhandled op %s in func %v",
+ ir.Line(n), n.Op().String(), ir.FuncName(ffa.fn))
+ }
+ if debugTrace&debugTraceFuncFlags != 0 {
+ fmt.Fprintf(os.Stderr, "=-= %v: visit n=%s returns %s\n",
+ ir.Line(n), n.Op().String(), st.String())
+ }
+ ffa.setState(n, st)
+}
+
+func (ffa *funcFlagsAnalyzer) nodeVisitPre(n ir.Node) {
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_params.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_params.go
new file mode 100644
index 0000000..d85d73b
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_params.go
@@ -0,0 +1,355 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/ir"
+ "fmt"
+ "os"
+)
+
+// paramsAnalyzer holds state information for the phase that computes
+// flags for a Go functions parameters, for use in inline heuristics.
+// Note that the params slice below includes entries for blanks.
+type paramsAnalyzer struct {
+ fname string
+ values []ParamPropBits
+ params []*ir.Name
+ top []bool
+ *condLevelTracker
+ *nameFinder
+}
+
+// getParams returns an *ir.Name slice containing all params for the
+// function (plus rcvr as well if applicable).
+func getParams(fn *ir.Func) []*ir.Name {
+ sig := fn.Type()
+ numParams := sig.NumRecvs() + sig.NumParams()
+ return fn.Dcl[:numParams]
+}
+
+// addParamsAnalyzer creates a new paramsAnalyzer helper object for
+// the function fn, appends it to the analyzers list, and returns the
+// new list. If the function in question doesn't have any interesting
+// parameters then the analyzer list is returned unchanged, and the
+// params flags in "fp" are updated accordingly.
+func addParamsAnalyzer(fn *ir.Func, analyzers []propAnalyzer, fp *FuncProps, nf *nameFinder) []propAnalyzer {
+ pa, props := makeParamsAnalyzer(fn, nf)
+ if pa != nil {
+ analyzers = append(analyzers, pa)
+ } else {
+ fp.ParamFlags = props
+ }
+ return analyzers
+}
+
+// makeParamAnalyzer creates a new helper object to analyze parameters
+// of function fn. If the function doesn't have any interesting
+// params, a nil helper is returned along with a set of default param
+// flags for the func.
+func makeParamsAnalyzer(fn *ir.Func, nf *nameFinder) (*paramsAnalyzer, []ParamPropBits) {
+ params := getParams(fn) // includes receiver if applicable
+ if len(params) == 0 {
+ return nil, nil
+ }
+ vals := make([]ParamPropBits, len(params))
+ if fn.Inl == nil {
+ return nil, vals
+ }
+ top := make([]bool, len(params))
+ interestingToAnalyze := false
+ for i, pn := range params {
+ if pn == nil {
+ continue
+ }
+ pt := pn.Type()
+ if !pt.IsScalar() && !pt.HasNil() {
+ // existing properties not applicable here (for things
+ // like structs, arrays, slices, etc).
+ continue
+ }
+ // If param is reassigned, skip it.
+ if ir.Reassigned(pn) {
+ continue
+ }
+ top[i] = true
+ interestingToAnalyze = true
+ }
+ if !interestingToAnalyze {
+ return nil, vals
+ }
+
+ if debugTrace&debugTraceParams != 0 {
+ fmt.Fprintf(os.Stderr, "=-= param analysis of func %v:\n",
+ fn.Sym().Name)
+ for i := range vals {
+ n := "_"
+ if params[i] != nil {
+ n = params[i].Sym().String()
+ }
+ fmt.Fprintf(os.Stderr, "=-= %d: %q %s top=%v\n",
+ i, n, vals[i].String(), top[i])
+ }
+ }
+ pa := &paramsAnalyzer{
+ fname: fn.Sym().Name,
+ values: vals,
+ params: params,
+ top: top,
+ condLevelTracker: new(condLevelTracker),
+ nameFinder: nf,
+ }
+ return pa, nil
+}
+
+func (pa *paramsAnalyzer) setResults(funcProps *FuncProps) {
+ funcProps.ParamFlags = pa.values
+}
+
+func (pa *paramsAnalyzer) findParamIdx(n *ir.Name) int {
+ if n == nil {
+ panic("bad")
+ }
+ for i := range pa.params {
+ if pa.params[i] == n {
+ return i
+ }
+ }
+ return -1
+}
+
+type testfType func(x ir.Node, param *ir.Name, idx int) (bool, bool)
+
+// paramsAnalyzer invokes function 'testf' on the specified expression
+// 'x' for each parameter, and if the result is TRUE, or's 'flag' into
+// the flags for that param.
+func (pa *paramsAnalyzer) checkParams(x ir.Node, flag ParamPropBits, mayflag ParamPropBits, testf testfType) {
+ for idx, p := range pa.params {
+ if !pa.top[idx] && pa.values[idx] == ParamNoInfo {
+ continue
+ }
+ result, may := testf(x, p, idx)
+ if debugTrace&debugTraceParams != 0 {
+ fmt.Fprintf(os.Stderr, "=-= test expr %v param %s result=%v flag=%s\n", x, p.Sym().Name, result, flag.String())
+ }
+ if result {
+ v := flag
+ if pa.condLevel != 0 || may {
+ v = mayflag
+ }
+ pa.values[idx] |= v
+ pa.top[idx] = false
+ }
+ }
+}
+
+// foldCheckParams checks expression 'x' (an 'if' condition or
+// 'switch' stmt expr) to see if the expr would fold away if a
+// specific parameter had a constant value.
+func (pa *paramsAnalyzer) foldCheckParams(x ir.Node) {
+ pa.checkParams(x, ParamFeedsIfOrSwitch, ParamMayFeedIfOrSwitch,
+ func(x ir.Node, p *ir.Name, idx int) (bool, bool) {
+ return ShouldFoldIfNameConstant(x, []*ir.Name{p}), false
+ })
+}
+
+// callCheckParams examines the target of call expression 'ce' to see
+// if it is making a call to the value passed in for some parameter.
+func (pa *paramsAnalyzer) callCheckParams(ce *ir.CallExpr) {
+ switch ce.Op() {
+ case ir.OCALLINTER:
+ if ce.Op() != ir.OCALLINTER {
+ return
+ }
+ sel := ce.Fun.(*ir.SelectorExpr)
+ r := pa.staticValue(sel.X)
+ if r.Op() != ir.ONAME {
+ return
+ }
+ name := r.(*ir.Name)
+ if name.Class != ir.PPARAM {
+ return
+ }
+ pa.checkParams(r, ParamFeedsInterfaceMethodCall,
+ ParamMayFeedInterfaceMethodCall,
+ func(x ir.Node, p *ir.Name, idx int) (bool, bool) {
+ name := x.(*ir.Name)
+ return name == p, false
+ })
+ case ir.OCALLFUNC:
+ if ce.Fun.Op() != ir.ONAME {
+ return
+ }
+ called := ir.StaticValue(ce.Fun)
+ if called.Op() != ir.ONAME {
+ return
+ }
+ name := called.(*ir.Name)
+ if name.Class == ir.PPARAM {
+ pa.checkParams(called, ParamFeedsIndirectCall,
+ ParamMayFeedIndirectCall,
+ func(x ir.Node, p *ir.Name, idx int) (bool, bool) {
+ name := x.(*ir.Name)
+ return name == p, false
+ })
+ } else {
+ cname := pa.funcName(called)
+ if cname != nil {
+ pa.deriveFlagsFromCallee(ce, cname.Func)
+ }
+ }
+ }
+}
+
+// deriveFlagsFromCallee tries to derive flags for the current
+// function based on a call this function makes to some other
+// function. Example:
+//
+// /* Simple */ /* Derived from callee */
+// func foo(f func(int)) { func foo(f func(int)) {
+// f(2) bar(32, f)
+// } }
+// func bar(x int, f func()) {
+// f(x)
+// }
+//
+// Here we can set the "param feeds indirect call" flag for
+// foo's param 'f' since we know that bar has that flag set for
+// its second param, and we're passing that param a function.
+func (pa *paramsAnalyzer) deriveFlagsFromCallee(ce *ir.CallExpr, callee *ir.Func) {
+ calleeProps := propsForFunc(callee)
+ if calleeProps == nil {
+ return
+ }
+ if debugTrace&debugTraceParams != 0 {
+ fmt.Fprintf(os.Stderr, "=-= callee props for %v:\n%s",
+ callee.Sym().Name, calleeProps.String())
+ }
+
+ must := []ParamPropBits{ParamFeedsInterfaceMethodCall, ParamFeedsIndirectCall, ParamFeedsIfOrSwitch}
+ may := []ParamPropBits{ParamMayFeedInterfaceMethodCall, ParamMayFeedIndirectCall, ParamMayFeedIfOrSwitch}
+
+ for pidx, arg := range ce.Args {
+ // Does the callee param have any interesting properties?
+ // If not we can skip this one.
+ pflag := calleeProps.ParamFlags[pidx]
+ if pflag == 0 {
+ continue
+ }
+ // See if one of the caller's parameters is flowing unmodified
+ // into this actual expression.
+ r := pa.staticValue(arg)
+ if r.Op() != ir.ONAME {
+ return
+ }
+ name := r.(*ir.Name)
+ if name.Class != ir.PPARAM {
+ return
+ }
+ callerParamIdx := pa.findParamIdx(name)
+ // note that callerParamIdx may return -1 in the case where
+ // the param belongs not to the current closure func we're
+ // analyzing but to an outer enclosing func.
+ if callerParamIdx == -1 {
+ return
+ }
+ if pa.params[callerParamIdx] == nil {
+ panic("something went wrong")
+ }
+ if !pa.top[callerParamIdx] &&
+ pa.values[callerParamIdx] == ParamNoInfo {
+ continue
+ }
+ if debugTrace&debugTraceParams != 0 {
+ fmt.Fprintf(os.Stderr, "=-= pflag for arg %d is %s\n",
+ pidx, pflag.String())
+ }
+ for i := range must {
+ mayv := may[i]
+ mustv := must[i]
+ if pflag&mustv != 0 && pa.condLevel == 0 {
+ pa.values[callerParamIdx] |= mustv
+ } else if pflag&(mustv|mayv) != 0 {
+ pa.values[callerParamIdx] |= mayv
+ }
+ }
+ pa.top[callerParamIdx] = false
+ }
+}
+
+func (pa *paramsAnalyzer) nodeVisitPost(n ir.Node) {
+ if len(pa.values) == 0 {
+ return
+ }
+ pa.condLevelTracker.post(n)
+ switch n.Op() {
+ case ir.OCALLFUNC:
+ ce := n.(*ir.CallExpr)
+ pa.callCheckParams(ce)
+ case ir.OCALLINTER:
+ ce := n.(*ir.CallExpr)
+ pa.callCheckParams(ce)
+ case ir.OIF:
+ ifst := n.(*ir.IfStmt)
+ pa.foldCheckParams(ifst.Cond)
+ case ir.OSWITCH:
+ swst := n.(*ir.SwitchStmt)
+ if swst.Tag != nil {
+ pa.foldCheckParams(swst.Tag)
+ }
+ }
+}
+
+func (pa *paramsAnalyzer) nodeVisitPre(n ir.Node) {
+ if len(pa.values) == 0 {
+ return
+ }
+ pa.condLevelTracker.pre(n)
+}
+
+// condLevelTracker helps keeps track very roughly of "level of conditional
+// nesting", e.g. how many "if" statements you have to go through to
+// get to the point where a given stmt executes. Example:
+//
+// cond nesting level
+// func foo() {
+// G = 1 0
+// if x < 10 { 0
+// if y < 10 { 1
+// G = 0 2
+// }
+// }
+// }
+//
+// The intent here is to provide some sort of very abstract relative
+// hotness metric, e.g. "G = 1" above is expected to be executed more
+// often than "G = 0" (in the aggregate, across large numbers of
+// functions).
+type condLevelTracker struct {
+ condLevel int
+}
+
+func (c *condLevelTracker) pre(n ir.Node) {
+ // Increment level of "conditional testing" if we see
+ // an "if" or switch statement, and decrement if in
+ // a loop.
+ switch n.Op() {
+ case ir.OIF, ir.OSWITCH:
+ c.condLevel++
+ case ir.OFOR, ir.ORANGE:
+ c.condLevel--
+ }
+}
+
+func (c *condLevelTracker) post(n ir.Node) {
+ switch n.Op() {
+ case ir.OFOR, ir.ORANGE:
+ c.condLevel++
+ case ir.OIF:
+ c.condLevel--
+ case ir.OSWITCH:
+ c.condLevel--
+ }
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go
new file mode 100644
index 0000000..2aaa68d
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go
@@ -0,0 +1,277 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/ir"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "os"
+)
+
+// resultsAnalyzer stores state information for the process of
+// computing flags/properties for the return values of a specific Go
+// function, as part of inline heuristics synthesis.
+type resultsAnalyzer struct {
+ fname string
+ props []ResultPropBits
+ values []resultVal
+ inlineMaxBudget int
+ *nameFinder
+}
+
+// resultVal captures information about a specific result returned from
+// the function we're analyzing; we are interested in cases where
+// the func always returns the same constant, or always returns
+// the same function, etc. This container stores info on a the specific
+// scenarios we're looking for.
+type resultVal struct {
+ cval constant.Value
+ fn *ir.Name
+ fnClo bool
+ top bool
+ derived bool // see deriveReturnFlagsFromCallee below
+}
+
+// addResultsAnalyzer creates a new resultsAnalyzer helper object for
+// the function fn, appends it to the analyzers list, and returns the
+// new list. If the function in question doesn't have any returns (or
+// any interesting returns) then the analyzer list is left as is, and
+// the result flags in "fp" are updated accordingly.
+func addResultsAnalyzer(fn *ir.Func, analyzers []propAnalyzer, fp *FuncProps, inlineMaxBudget int, nf *nameFinder) []propAnalyzer {
+ ra, props := makeResultsAnalyzer(fn, inlineMaxBudget, nf)
+ if ra != nil {
+ analyzers = append(analyzers, ra)
+ } else {
+ fp.ResultFlags = props
+ }
+ return analyzers
+}
+
+// makeResultsAnalyzer creates a new helper object to analyze results
+// in function fn. If the function doesn't have any interesting
+// results, a nil helper is returned along with a set of default
+// result flags for the func.
+func makeResultsAnalyzer(fn *ir.Func, inlineMaxBudget int, nf *nameFinder) (*resultsAnalyzer, []ResultPropBits) {
+ results := fn.Type().Results()
+ if len(results) == 0 {
+ return nil, nil
+ }
+ props := make([]ResultPropBits, len(results))
+ if fn.Inl == nil {
+ return nil, props
+ }
+ vals := make([]resultVal, len(results))
+ interestingToAnalyze := false
+ for i := range results {
+ rt := results[i].Type
+ if !rt.IsScalar() && !rt.HasNil() {
+ // existing properties not applicable here (for things
+ // like structs, arrays, slices, etc).
+ continue
+ }
+ // set the "top" flag (as in "top element of data flow lattice")
+ // meaning "we have no info yet, but we might later on".
+ vals[i].top = true
+ interestingToAnalyze = true
+ }
+ if !interestingToAnalyze {
+ return nil, props
+ }
+ ra := &resultsAnalyzer{
+ props: props,
+ values: vals,
+ inlineMaxBudget: inlineMaxBudget,
+ nameFinder: nf,
+ }
+ return ra, nil
+}
+
+// setResults transfers the calculated result properties for this
+// function to 'funcProps'.
+func (ra *resultsAnalyzer) setResults(funcProps *FuncProps) {
+ // Promote ResultAlwaysSameFunc to ResultAlwaysSameInlinableFunc
+ for i := range ra.values {
+ if ra.props[i] == ResultAlwaysSameFunc && !ra.values[i].derived {
+ f := ra.values[i].fn.Func
+ // HACK: in order to allow for call site score
+ // adjustments, we used a relaxed inline budget in
+ // determining inlinability. For the check below, however,
+ // we want to know is whether the func in question is
+ // likely to be inlined, as opposed to whether it might
+ // possibly be inlined if all the right score adjustments
+ // happened, so do a simple check based on the cost.
+ if f.Inl != nil && f.Inl.Cost <= int32(ra.inlineMaxBudget) {
+ ra.props[i] = ResultAlwaysSameInlinableFunc
+ }
+ }
+ }
+ funcProps.ResultFlags = ra.props
+}
+
+func (ra *resultsAnalyzer) pessimize() {
+ for i := range ra.props {
+ ra.props[i] = ResultNoInfo
+ }
+}
+
+func (ra *resultsAnalyzer) nodeVisitPre(n ir.Node) {
+}
+
+func (ra *resultsAnalyzer) nodeVisitPost(n ir.Node) {
+ if len(ra.values) == 0 {
+ return
+ }
+ if n.Op() != ir.ORETURN {
+ return
+ }
+ if debugTrace&debugTraceResults != 0 {
+ fmt.Fprintf(os.Stderr, "=+= returns nodevis %v %s\n",
+ ir.Line(n), n.Op().String())
+ }
+
+ // No support currently for named results, so if we see an empty
+ // "return" stmt, be conservative.
+ rs := n.(*ir.ReturnStmt)
+ if len(rs.Results) != len(ra.values) {
+ ra.pessimize()
+ return
+ }
+ for i, r := range rs.Results {
+ ra.analyzeResult(i, r)
+ }
+}
+
+// analyzeResult examines the expression 'n' being returned as the
+// 'ii'th argument in some return statement to see whether has
+// interesting characteristics (for example, returns a constant), then
+// applies a dataflow "meet" operation to combine this result with any
+// previous result (for the given return slot) that we've already
+// processed.
+func (ra *resultsAnalyzer) analyzeResult(ii int, n ir.Node) {
+ isAllocMem := ra.isAllocatedMem(n)
+ isConcConvItf := ra.isConcreteConvIface(n)
+ constVal := ra.constValue(n)
+ isConst := (constVal != nil)
+ isNil := ra.isNil(n)
+ rfunc := ra.funcName(n)
+ isFunc := (rfunc != nil)
+ isClo := (rfunc != nil && rfunc.Func.OClosure != nil)
+ curp := ra.props[ii]
+ dprops, isDerivedFromCall := ra.deriveReturnFlagsFromCallee(n)
+ newp := ResultNoInfo
+ var newcval constant.Value
+ var newfunc *ir.Name
+
+ if debugTrace&debugTraceResults != 0 {
+ fmt.Fprintf(os.Stderr, "=-= %v: analyzeResult n=%s ismem=%v isconcconv=%v isconst=%v isnil=%v isfunc=%v isclo=%v\n", ir.Line(n), n.Op().String(), isAllocMem, isConcConvItf, isConst, isNil, isFunc, isClo)
+ }
+
+ if ra.values[ii].top {
+ ra.values[ii].top = false
+ // this is the first return we've seen; record
+ // whatever properties it has.
+ switch {
+ case isAllocMem:
+ newp = ResultIsAllocatedMem
+ case isConcConvItf:
+ newp = ResultIsConcreteTypeConvertedToInterface
+ case isFunc:
+ newp = ResultAlwaysSameFunc
+ newfunc = rfunc
+ case isConst:
+ newp = ResultAlwaysSameConstant
+ newcval = constVal
+ case isNil:
+ newp = ResultAlwaysSameConstant
+ newcval = nil
+ case isDerivedFromCall:
+ newp = dprops
+ ra.values[ii].derived = true
+ }
+ } else {
+ if !ra.values[ii].derived {
+ // this is not the first return we've seen; apply
+ // what amounts of a "meet" operator to combine
+ // the properties we see here with what we saw on
+ // the previous returns.
+ switch curp {
+ case ResultIsAllocatedMem:
+ if isAllocMem {
+ newp = ResultIsAllocatedMem
+ }
+ case ResultIsConcreteTypeConvertedToInterface:
+ if isConcConvItf {
+ newp = ResultIsConcreteTypeConvertedToInterface
+ }
+ case ResultAlwaysSameConstant:
+ if isNil && ra.values[ii].cval == nil {
+ newp = ResultAlwaysSameConstant
+ newcval = nil
+ } else if isConst && constant.Compare(constVal, token.EQL, ra.values[ii].cval) {
+ newp = ResultAlwaysSameConstant
+ newcval = constVal
+ }
+ case ResultAlwaysSameFunc:
+ if isFunc && isSameFuncName(rfunc, ra.values[ii].fn) {
+ newp = ResultAlwaysSameFunc
+ newfunc = rfunc
+ }
+ }
+ }
+ }
+ ra.values[ii].fn = newfunc
+ ra.values[ii].fnClo = isClo
+ ra.values[ii].cval = newcval
+ ra.props[ii] = newp
+
+ if debugTrace&debugTraceResults != 0 {
+ fmt.Fprintf(os.Stderr, "=-= %v: analyzeResult newp=%s\n",
+ ir.Line(n), newp)
+ }
+}
+
+// deriveReturnFlagsFromCallee tries to set properties for a given
+// return result where we're returning call expression; return value
+// is a return property value and a boolean indicating whether the
+// prop is valid. Examples:
+//
+// func foo() int { return bar() }
+// func bar() int { return 42 }
+// func blix() int { return 43 }
+// func two(y int) int {
+// if y < 0 { return bar() } else { return blix() }
+// }
+//
+// Since "foo" always returns the result of a call to "bar", we can
+// set foo's return property to that of bar. In the case of "two", however,
+// even though each return path returns a constant, we don't know
+// whether the constants are identical, hence we need to be conservative.
+func (ra *resultsAnalyzer) deriveReturnFlagsFromCallee(n ir.Node) (ResultPropBits, bool) {
+ if n.Op() != ir.OCALLFUNC {
+ return 0, false
+ }
+ ce := n.(*ir.CallExpr)
+ if ce.Fun.Op() != ir.ONAME {
+ return 0, false
+ }
+ called := ir.StaticValue(ce.Fun)
+ if called.Op() != ir.ONAME {
+ return 0, false
+ }
+ cname := ra.funcName(called)
+ if cname == nil {
+ return 0, false
+ }
+ calleeProps := propsForFunc(cname.Func)
+ if calleeProps == nil {
+ return 0, false
+ }
+ if len(calleeProps.ResultFlags) != 1 {
+ return 0, false
+ }
+ return calleeProps.ResultFlags[0], true
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/callsite.go b/src/cmd/compile/internal/inline/inlheur/callsite.go
new file mode 100644
index 0000000..f457dd4
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/callsite.go
@@ -0,0 +1,149 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/internal/src"
+ "fmt"
+ "io"
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+// CallSite records useful information about a potentially inlinable
+// (direct) function call. "Callee" is the target of the call, "Call"
+// is the ir node corresponding to the call itself, "Assign" is
+// the top-level assignment statement containing the call (if the call
+// appears in the form of a top-level statement, e.g. "x := foo()"),
+// "Flags" contains properties of the call that might be useful for
+// making inlining decisions, "Score" is the final score assigned to
+// the site, and "ID" is a numeric ID for the site within its
+// containing function.
+type CallSite struct {
+ Callee *ir.Func
+ Call *ir.CallExpr
+ parent *CallSite
+ Assign ir.Node
+ Flags CSPropBits
+
+ ArgProps []ActualExprPropBits
+ Score int
+ ScoreMask scoreAdjustTyp
+ ID uint
+ aux uint8
+}
+
+// CallSiteTab is a table of call sites, keyed by call expr.
+// Ideally it would be nice to key the table by src.XPos, but
+// this results in collisions for calls on very long lines (the
+// front end saturates column numbers at 255). We also wind up
+// with many calls that share the same auto-generated pos.
+type CallSiteTab map[*ir.CallExpr]*CallSite
+
+// ActualExprPropBits describes a property of an actual expression (value
+// passed to some specific func argument at a call site).
+type ActualExprPropBits uint8
+
+const (
+ ActualExprConstant ActualExprPropBits = 1 << iota
+ ActualExprIsConcreteConvIface
+ ActualExprIsFunc
+ ActualExprIsInlinableFunc
+)
+
+type CSPropBits uint32
+
+const (
+ CallSiteInLoop CSPropBits = 1 << iota
+ CallSiteOnPanicPath
+ CallSiteInInitFunc
+)
+
+type csAuxBits uint8
+
+const (
+ csAuxInlined = 1 << iota
+)
+
+// encodedCallSiteTab is a table keyed by "encoded" callsite
+// (stringified src.XPos plus call site ID) mapping to a value of call
+// property bits and score.
+type encodedCallSiteTab map[string]propsAndScore
+
+type propsAndScore struct {
+ props CSPropBits
+ score int
+ mask scoreAdjustTyp
+}
+
+func (pas propsAndScore) String() string {
+ return fmt.Sprintf("P=%s|S=%d|M=%s", pas.props.String(),
+ pas.score, pas.mask.String())
+}
+
+func (cst CallSiteTab) merge(other CallSiteTab) error {
+ for k, v := range other {
+ if prev, ok := cst[k]; ok {
+ return fmt.Errorf("internal error: collision during call site table merge, fn=%s callsite=%s", prev.Callee.Sym().Name, fmtFullPos(prev.Call.Pos()))
+ }
+ cst[k] = v
+ }
+ return nil
+}
+
+func fmtFullPos(p src.XPos) string {
+ var sb strings.Builder
+ sep := ""
+ base.Ctxt.AllPos(p, func(pos src.Pos) {
+ fmt.Fprintf(&sb, sep)
+ sep = "|"
+ file := filepath.Base(pos.Filename())
+ fmt.Fprintf(&sb, "%s:%d:%d", file, pos.Line(), pos.Col())
+ })
+ return sb.String()
+}
+
+func EncodeCallSiteKey(cs *CallSite) string {
+ var sb strings.Builder
+ // FIXME: maybe rewrite line offsets relative to function start?
+ sb.WriteString(fmtFullPos(cs.Call.Pos()))
+ fmt.Fprintf(&sb, "|%d", cs.ID)
+ return sb.String()
+}
+
+func buildEncodedCallSiteTab(tab CallSiteTab) encodedCallSiteTab {
+ r := make(encodedCallSiteTab)
+ for _, cs := range tab {
+ k := EncodeCallSiteKey(cs)
+ r[k] = propsAndScore{
+ props: cs.Flags,
+ score: cs.Score,
+ mask: cs.ScoreMask,
+ }
+ }
+ return r
+}
+
+// dumpCallSiteComments emits comments into the dump file for the
+// callsites in the function of interest. If "ecst" is non-nil, we use
+// that, otherwise generated a fresh encodedCallSiteTab from "tab".
+func dumpCallSiteComments(w io.Writer, tab CallSiteTab, ecst encodedCallSiteTab) {
+ if ecst == nil {
+ ecst = buildEncodedCallSiteTab(tab)
+ }
+ tags := make([]string, 0, len(ecst))
+ for k := range ecst {
+ tags = append(tags, k)
+ }
+ sort.Strings(tags)
+ for _, s := range tags {
+ v := ecst[s]
+ fmt.Fprintf(w, "// callsite: %s flagstr %q flagval %d score %d mask %d maskstr %q\n", s, v.props.String(), v.props, v.score, v.mask, v.mask.String())
+ }
+ fmt.Fprintf(w, "// %s\n", csDelimiter)
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/cspropbits_string.go b/src/cmd/compile/internal/inline/inlheur/cspropbits_string.go
new file mode 100644
index 0000000..216f510
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/cspropbits_string.go
@@ -0,0 +1,56 @@
+// Code generated by "stringer -bitset -type CSPropBits"; DO NOT EDIT.
+
+package inlheur
+
+import "strconv"
+import "bytes"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[CallSiteInLoop-1]
+ _ = x[CallSiteOnPanicPath-2]
+ _ = x[CallSiteInInitFunc-4]
+}
+
+var _CSPropBits_value = [...]uint64{
+ 0x1, /* CallSiteInLoop */
+ 0x2, /* CallSiteOnPanicPath */
+ 0x4, /* CallSiteInInitFunc */
+}
+
+const _CSPropBits_name = "CallSiteInLoopCallSiteOnPanicPathCallSiteInInitFunc"
+
+var _CSPropBits_index = [...]uint8{0, 14, 33, 51}
+
+func (i CSPropBits) String() string {
+ var b bytes.Buffer
+
+ remain := uint64(i)
+ seen := false
+
+ for k, v := range _CSPropBits_value {
+ x := _CSPropBits_name[_CSPropBits_index[k]:_CSPropBits_index[k+1]]
+ if v == 0 {
+ if i == 0 {
+ b.WriteString(x)
+ return b.String()
+ }
+ continue
+ }
+ if (v & remain) == v {
+ remain &^= v
+ x := _CSPropBits_name[_CSPropBits_index[k]:_CSPropBits_index[k+1]]
+ if seen {
+ b.WriteString("|")
+ }
+ seen = true
+ b.WriteString(x)
+ }
+ }
+ if remain == 0 {
+ return b.String()
+ }
+ return "CSPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/debugflags_test.go b/src/cmd/compile/internal/inline/inlheur/debugflags_test.go
new file mode 100644
index 0000000..abf4910
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/debugflags_test.go
@@ -0,0 +1,65 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "testing"
+)
+
+func TestInlScoreAdjFlagParse(t *testing.T) {
+ scenarios := []struct {
+ value string
+ expok bool
+ }{
+ {
+ value: "returnFeedsConcreteToInterfaceCallAdj:9",
+ expok: true,
+ },
+ {
+ value: "panicPathAdj:-1/initFuncAdj:9",
+ expok: true,
+ },
+ {
+ value: "",
+ expok: false,
+ },
+ {
+ value: "nonsenseAdj:10",
+ expok: false,
+ },
+ {
+ value: "inLoopAdj:",
+ expok: false,
+ },
+ {
+ value: "inLoopAdj:10:10",
+ expok: false,
+ },
+ {
+ value: "inLoopAdj:blah",
+ expok: false,
+ },
+ {
+ value: "/",
+ expok: false,
+ },
+ }
+
+ for _, scenario := range scenarios {
+ err := parseScoreAdj(scenario.value)
+ t.Logf("for value=%q err is %v\n", scenario.value, err)
+ if scenario.expok {
+ if err != nil {
+ t.Errorf("expected parseScoreAdj(%s) ok, got err %v",
+ scenario.value, err)
+ }
+ } else {
+ if err == nil {
+ t.Errorf("expected parseScoreAdj(%s) failure, got success",
+ scenario.value)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/dumpscores_test.go b/src/cmd/compile/internal/inline/inlheur/dumpscores_test.go
new file mode 100644
index 0000000..438b700
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/dumpscores_test.go
@@ -0,0 +1,109 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+func TestDumpCallSiteScoreDump(t *testing.T) {
+ td := t.TempDir()
+ testenv.MustHaveGoBuild(t)
+
+ scenarios := []struct {
+ name string
+ promoted int
+ indirectlyPromoted int
+ demoted int
+ unchanged int
+ }{
+ {
+ name: "dumpscores",
+ promoted: 1,
+ indirectlyPromoted: 1,
+ demoted: 1,
+ unchanged: 5,
+ },
+ }
+
+ for _, scen := range scenarios {
+ dumpfile, err := gatherInlCallSitesScoresForFile(t, scen.name, td)
+ if err != nil {
+ t.Fatalf("dumping callsite scores for %q: error %v", scen.name, err)
+ }
+ var lines []string
+ if content, err := os.ReadFile(dumpfile); err != nil {
+ t.Fatalf("reading dump %q: error %v", dumpfile, err)
+ } else {
+ lines = strings.Split(string(content), "\n")
+ }
+ prom, indprom, dem, unch := 0, 0, 0, 0
+ for _, line := range lines {
+ switch {
+ case strings.TrimSpace(line) == "":
+ case !strings.Contains(line, "|"):
+ case strings.HasPrefix(line, "#"):
+ case strings.Contains(line, "PROMOTED"):
+ prom++
+ case strings.Contains(line, "INDPROM"):
+ indprom++
+ case strings.Contains(line, "DEMOTED"):
+ dem++
+ default:
+ unch++
+ }
+ }
+ showout := false
+ if prom != scen.promoted {
+ t.Errorf("testcase %q, got %d promoted want %d promoted",
+ scen.name, prom, scen.promoted)
+ showout = true
+ }
+ if indprom != scen.indirectlyPromoted {
+ t.Errorf("testcase %q, got %d indirectly promoted want %d",
+ scen.name, indprom, scen.indirectlyPromoted)
+ showout = true
+ }
+ if dem != scen.demoted {
+ t.Errorf("testcase %q, got %d demoted want %d demoted",
+ scen.name, dem, scen.demoted)
+ showout = true
+ }
+ if unch != scen.unchanged {
+ t.Errorf("testcase %q, got %d unchanged want %d unchanged",
+ scen.name, unch, scen.unchanged)
+ showout = true
+ }
+ if showout {
+ t.Logf(">> dump output: %s", strings.Join(lines, "\n"))
+ }
+ }
+}
+
+// gatherInlCallSitesScoresForFile builds the specified testcase 'testcase'
+// from testdata/props passing the "-d=dumpinlcallsitescores=1"
+// compiler option, to produce a dump, then returns the path of the
+// newly created file.
+func gatherInlCallSitesScoresForFile(t *testing.T, testcase string, td string) (string, error) {
+ t.Helper()
+ gopath := "testdata/" + testcase + ".go"
+ outpath := filepath.Join(td, testcase+".a")
+ dumpfile := filepath.Join(td, testcase+".callsites.txt")
+ run := []string{testenv.GoToolPath(t), "build",
+ "-gcflags=-d=dumpinlcallsitescores=1", "-o", outpath, gopath}
+ out, err := testenv.Command(t, run[0], run[1:]...).CombinedOutput()
+ t.Logf("run: %+v\n", run)
+ if err != nil {
+ return "", err
+ }
+ if err := os.WriteFile(dumpfile, out, 0666); err != nil {
+ return "", err
+ }
+ return dumpfile, err
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/eclassify.go b/src/cmd/compile/internal/inline/inlheur/eclassify.go
new file mode 100644
index 0000000..1e6d1b9
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/eclassify.go
@@ -0,0 +1,247 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/ir"
+ "fmt"
+ "os"
+)
+
+// ShouldFoldIfNameConstant analyzes expression tree 'e' to see
+// whether it contains only combinations of simple references to all
+// of the names in 'names' with selected constants + operators. The
+// intent is to identify expression that could be folded away to a
+// constant if the value of 'n' were available. Return value is TRUE
+// if 'e' does look foldable given the value of 'n', and given that
+// 'e' actually makes reference to 'n'. Some examples where the type
+// of "n" is int64, type of "s" is string, and type of "p" is *byte:
+//
+// Simple? Expr
+// yes n<10
+// yes n*n-100
+// yes (n < 10 || n > 100) && (n >= 12 || n <= 99 || n != 101)
+// yes s == "foo"
+// yes p == nil
+// no n<foo()
+// no n<1 || n>m
+// no float32(n)<1.0
+// no *p == 1
+// no 1 + 100
+// no 1 / n
+// no 1 + unsafe.Sizeof(n)
+//
+// To avoid complexities (e.g. nan, inf) we stay way from folding and
+// floating point or complex operations (integers, bools, and strings
+// only). We also try to be conservative about avoiding any operation
+// that might result in a panic at runtime, e.g. for "n" with type
+// int64:
+//
+// 1<<(n-9) < 100/(n<<9999)
+//
+// we would return FALSE due to the negative shift count and/or
+// potential divide by zero.
+func ShouldFoldIfNameConstant(n ir.Node, names []*ir.Name) bool {
+ cl := makeExprClassifier(names)
+ var doNode func(ir.Node) bool
+ doNode = func(n ir.Node) bool {
+ ir.DoChildren(n, doNode)
+ cl.Visit(n)
+ return false
+ }
+ doNode(n)
+ if cl.getdisp(n) != exprSimple {
+ return false
+ }
+ for _, v := range cl.names {
+ if !v {
+ return false
+ }
+ }
+ return true
+}
+
+// exprClassifier holds intermediate state about nodes within an
+// expression tree being analyzed by ShouldFoldIfNameConstant. Here
+// "name" is the name node passed in, and "disposition" stores the
+// result of classifying a given IR node.
+type exprClassifier struct {
+ names map[*ir.Name]bool
+ disposition map[ir.Node]disp
+}
+
+type disp int
+
+const (
+ // no info on this expr
+ exprNoInfo disp = iota
+
+ // expr contains only literals
+ exprLiterals
+
+ // expr is legal combination of literals and specified names
+ exprSimple
+)
+
+func (d disp) String() string {
+ switch d {
+ case exprNoInfo:
+ return "noinfo"
+ case exprSimple:
+ return "simple"
+ case exprLiterals:
+ return "literals"
+ default:
+ return fmt.Sprintf("unknown<%d>", d)
+ }
+}
+
+func makeExprClassifier(names []*ir.Name) *exprClassifier {
+ m := make(map[*ir.Name]bool, len(names))
+ for _, n := range names {
+ m[n] = false
+ }
+ return &exprClassifier{
+ names: m,
+ disposition: make(map[ir.Node]disp),
+ }
+}
+
+// Visit sets the classification for 'n' based on the previously
+// calculated classifications for n's children, as part of a bottom-up
+// walk over an expression tree.
+func (ec *exprClassifier) Visit(n ir.Node) {
+
+ ndisp := exprNoInfo
+
+ binparts := func(n ir.Node) (ir.Node, ir.Node) {
+ if lex, ok := n.(*ir.LogicalExpr); ok {
+ return lex.X, lex.Y
+ } else if bex, ok := n.(*ir.BinaryExpr); ok {
+ return bex.X, bex.Y
+ } else {
+ panic("bad")
+ }
+ }
+
+ t := n.Type()
+ if t == nil {
+ if debugTrace&debugTraceExprClassify != 0 {
+ fmt.Fprintf(os.Stderr, "=-= *** untyped op=%s\n",
+ n.Op().String())
+ }
+ } else if t.IsInteger() || t.IsString() || t.IsBoolean() || t.HasNil() {
+ switch n.Op() {
+ // FIXME: maybe add support for OADDSTR?
+ case ir.ONIL:
+ ndisp = exprLiterals
+
+ case ir.OLITERAL:
+ if _, ok := n.(*ir.BasicLit); ok {
+ } else {
+ panic("unexpected")
+ }
+ ndisp = exprLiterals
+
+ case ir.ONAME:
+ nn := n.(*ir.Name)
+ if _, ok := ec.names[nn]; ok {
+ ndisp = exprSimple
+ ec.names[nn] = true
+ } else {
+ sv := ir.StaticValue(n)
+ if sv.Op() == ir.ONAME {
+ nn = sv.(*ir.Name)
+ }
+ if _, ok := ec.names[nn]; ok {
+ ndisp = exprSimple
+ ec.names[nn] = true
+ }
+ }
+
+ case ir.ONOT,
+ ir.OPLUS,
+ ir.ONEG:
+ uex := n.(*ir.UnaryExpr)
+ ndisp = ec.getdisp(uex.X)
+
+ case ir.OEQ,
+ ir.ONE,
+ ir.OLT,
+ ir.OGT,
+ ir.OGE,
+ ir.OLE:
+ // compare ops
+ x, y := binparts(n)
+ ndisp = ec.dispmeet(x, y)
+ if debugTrace&debugTraceExprClassify != 0 {
+ fmt.Fprintf(os.Stderr, "=-= meet(%s,%s) = %s for op=%s\n",
+ ec.getdisp(x), ec.getdisp(y), ec.dispmeet(x, y),
+ n.Op().String())
+ }
+ case ir.OLSH,
+ ir.ORSH,
+ ir.ODIV,
+ ir.OMOD:
+ x, y := binparts(n)
+ if ec.getdisp(y) == exprLiterals {
+ ndisp = ec.dispmeet(x, y)
+ }
+
+ case ir.OADD,
+ ir.OSUB,
+ ir.OOR,
+ ir.OXOR,
+ ir.OMUL,
+ ir.OAND,
+ ir.OANDNOT,
+ ir.OANDAND,
+ ir.OOROR:
+ x, y := binparts(n)
+ if debugTrace&debugTraceExprClassify != 0 {
+ fmt.Fprintf(os.Stderr, "=-= meet(%s,%s) = %s for op=%s\n",
+ ec.getdisp(x), ec.getdisp(y), ec.dispmeet(x, y),
+ n.Op().String())
+ }
+ ndisp = ec.dispmeet(x, y)
+ }
+ }
+
+ if debugTrace&debugTraceExprClassify != 0 {
+ fmt.Fprintf(os.Stderr, "=-= op=%s disp=%v\n", n.Op().String(),
+ ndisp.String())
+ }
+
+ ec.disposition[n] = ndisp
+}
+
+func (ec *exprClassifier) getdisp(x ir.Node) disp {
+ if d, ok := ec.disposition[x]; ok {
+ return d
+ } else {
+ panic("missing node from disp table")
+ }
+}
+
+// dispmeet performs a "meet" operation on the data flow states of
+// node x and y (where the term "meet" is being drawn from traditional
+// lattice-theoretical data flow analysis terminology).
+func (ec *exprClassifier) dispmeet(x, y ir.Node) disp {
+ xd := ec.getdisp(x)
+ if xd == exprNoInfo {
+ return exprNoInfo
+ }
+ yd := ec.getdisp(y)
+ if yd == exprNoInfo {
+ return exprNoInfo
+ }
+ if xd == exprSimple || yd == exprSimple {
+ return exprSimple
+ }
+ if xd != exprLiterals || yd != exprLiterals {
+ panic("unexpected")
+ }
+ return exprLiterals
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/funcprop_string.go b/src/cmd/compile/internal/inline/inlheur/funcprop_string.go
new file mode 100644
index 0000000..d16e4d3
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/funcprop_string.go
@@ -0,0 +1,44 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "fmt"
+ "strings"
+)
+
+func (fp *FuncProps) String() string {
+ return fp.ToString("")
+}
+
+func (fp *FuncProps) ToString(prefix string) string {
+ var sb strings.Builder
+ if fp.Flags != 0 {
+ fmt.Fprintf(&sb, "%sFlags %s\n", prefix, fp.Flags)
+ }
+ flagSliceToSB[ParamPropBits](&sb, fp.ParamFlags,
+ prefix, "ParamFlags")
+ flagSliceToSB[ResultPropBits](&sb, fp.ResultFlags,
+ prefix, "ResultFlags")
+ return sb.String()
+}
+
+func flagSliceToSB[T interface {
+ ~uint32
+ String() string
+}](sb *strings.Builder, sl []T, prefix string, tag string) {
+ var sb2 strings.Builder
+ foundnz := false
+ fmt.Fprintf(&sb2, "%s%s\n", prefix, tag)
+ for i, e := range sl {
+ if e != 0 {
+ foundnz = true
+ }
+ fmt.Fprintf(&sb2, "%s %d %s\n", prefix, i, e.String())
+ }
+ if foundnz {
+ sb.WriteString(sb2.String())
+ }
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/funcpropbits_string.go b/src/cmd/compile/internal/inline/inlheur/funcpropbits_string.go
new file mode 100644
index 0000000..28de4a9
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/funcpropbits_string.go
@@ -0,0 +1,58 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by "stringer -bitset -type FuncPropBits"; DO NOT EDIT.
+
+package inlheur
+
+import (
+ "bytes"
+ "strconv"
+)
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[FuncPropNeverReturns-1]
+}
+
+var _FuncPropBits_value = [...]uint64{
+ 0x1, /* FuncPropNeverReturns */
+}
+
+const _FuncPropBits_name = "FuncPropNeverReturns"
+
+var _FuncPropBits_index = [...]uint8{0, 20}
+
+func (i FuncPropBits) String() string {
+ var b bytes.Buffer
+
+ remain := uint64(i)
+ seen := false
+
+ for k, v := range _FuncPropBits_value {
+ x := _FuncPropBits_name[_FuncPropBits_index[k]:_FuncPropBits_index[k+1]]
+ if v == 0 {
+ if i == 0 {
+ b.WriteString(x)
+ return b.String()
+ }
+ continue
+ }
+ if (v & remain) == v {
+ remain &^= v
+ x := _FuncPropBits_name[_FuncPropBits_index[k]:_FuncPropBits_index[k+1]]
+ if seen {
+ b.WriteString("|")
+ }
+ seen = true
+ b.WriteString(x)
+ }
+ }
+ if remain == 0 {
+ return b.String()
+ }
+ return "FuncPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/funcprops_test.go b/src/cmd/compile/internal/inline/inlheur/funcprops_test.go
new file mode 100644
index 0000000..c04e604
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/funcprops_test.go
@@ -0,0 +1,530 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "bufio"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+)
+
+var remasterflag = flag.Bool("update-expected", false, "if true, generate updated golden results in testcases for all props tests")
+
+func TestFuncProperties(t *testing.T) {
+ td := t.TempDir()
+ // td = "/tmp/qqq"
+ // os.RemoveAll(td)
+ // os.Mkdir(td, 0777)
+ testenv.MustHaveGoBuild(t)
+
+ // NOTE: this testpoint has the unfortunate characteristic that it
+ // relies on the installed compiler, meaning that if you make
+ // changes to the inline heuristics code in your working copy and
+ // then run the test, it will test the installed compiler and not
+ // your local modifications. TODO: decide whether to convert this
+ // to building a fresh compiler on the fly, or using some other
+ // scheme.
+
+ testcases := []string{"funcflags", "returns", "params",
+ "acrosscall", "calls", "returns2"}
+ for _, tc := range testcases {
+ dumpfile, err := gatherPropsDumpForFile(t, tc, td)
+ if err != nil {
+ t.Fatalf("dumping func props for %q: error %v", tc, err)
+ }
+ // Read in the newly generated dump.
+ dentries, dcsites, derr := readDump(t, dumpfile)
+ if derr != nil {
+ t.Fatalf("reading func prop dump: %v", derr)
+ }
+ if *remasterflag {
+ updateExpected(t, tc, dentries, dcsites)
+ continue
+ }
+ // Generate expected dump.
+ epath, egerr := genExpected(td, tc)
+ if egerr != nil {
+ t.Fatalf("generating expected func prop dump: %v", egerr)
+ }
+ // Read in the expected result entries.
+ eentries, ecsites, eerr := readDump(t, epath)
+ if eerr != nil {
+ t.Fatalf("reading expected func prop dump: %v", eerr)
+ }
+ // Compare new vs expected.
+ n := len(dentries)
+ eidx := 0
+ for i := 0; i < n; i++ {
+ dentry := dentries[i]
+ dcst := dcsites[i]
+ if !interestingToCompare(dentry.fname) {
+ continue
+ }
+ if eidx >= len(eentries) {
+ t.Errorf("testcase %s missing expected entry for %s, skipping", tc, dentry.fname)
+ continue
+ }
+ eentry := eentries[eidx]
+ ecst := ecsites[eidx]
+ eidx++
+ if dentry.fname != eentry.fname {
+ t.Errorf("got fn %q wanted %q, skipping checks",
+ dentry.fname, eentry.fname)
+ continue
+ }
+ compareEntries(t, tc, &dentry, dcst, &eentry, ecst)
+ }
+ }
+}
+
+func propBitsToString[T interface{ String() string }](sl []T) string {
+ var sb strings.Builder
+ for i, f := range sl {
+ fmt.Fprintf(&sb, "%d: %s\n", i, f.String())
+ }
+ return sb.String()
+}
+
+func compareEntries(t *testing.T, tc string, dentry *fnInlHeur, dcsites encodedCallSiteTab, eentry *fnInlHeur, ecsites encodedCallSiteTab) {
+ dfp := dentry.props
+ efp := eentry.props
+ dfn := dentry.fname
+
+ // Compare function flags.
+ if dfp.Flags != efp.Flags {
+ t.Errorf("testcase %q: Flags mismatch for %q: got %s, wanted %s",
+ tc, dfn, dfp.Flags.String(), efp.Flags.String())
+ }
+ // Compare returns
+ rgot := propBitsToString[ResultPropBits](dfp.ResultFlags)
+ rwant := propBitsToString[ResultPropBits](efp.ResultFlags)
+ if rgot != rwant {
+ t.Errorf("testcase %q: Results mismatch for %q: got:\n%swant:\n%s",
+ tc, dfn, rgot, rwant)
+ }
+ // Compare receiver + params.
+ pgot := propBitsToString[ParamPropBits](dfp.ParamFlags)
+ pwant := propBitsToString[ParamPropBits](efp.ParamFlags)
+ if pgot != pwant {
+ t.Errorf("testcase %q: Params mismatch for %q: got:\n%swant:\n%s",
+ tc, dfn, pgot, pwant)
+ }
+ // Compare call sites.
+ for k, ve := range ecsites {
+ if vd, ok := dcsites[k]; !ok {
+ t.Errorf("testcase %q missing expected callsite %q in func %q", tc, k, dfn)
+ continue
+ } else {
+ if vd != ve {
+ t.Errorf("testcase %q callsite %q in func %q: got %+v want %+v",
+ tc, k, dfn, vd.String(), ve.String())
+ }
+ }
+ }
+ for k := range dcsites {
+ if _, ok := ecsites[k]; !ok {
+ t.Errorf("testcase %q unexpected extra callsite %q in func %q", tc, k, dfn)
+ }
+ }
+}
+
+type dumpReader struct {
+ s *bufio.Scanner
+ t *testing.T
+ p string
+ ln int
+}
+
+// readDump reads in the contents of a dump file produced
+// by the "-d=dumpinlfuncprops=..." command line flag by the Go
+// compiler. It breaks the dump down into separate sections
+// by function, then deserializes each func section into a
+// fnInlHeur object and returns a slice of those objects.
+func readDump(t *testing.T, path string) ([]fnInlHeur, []encodedCallSiteTab, error) {
+ content, err := os.ReadFile(path)
+ if err != nil {
+ return nil, nil, err
+ }
+ dr := &dumpReader{
+ s: bufio.NewScanner(strings.NewReader(string(content))),
+ t: t,
+ p: path,
+ ln: 1,
+ }
+ // consume header comment until preamble delimiter.
+ found := false
+ for dr.scan() {
+ if dr.curLine() == preambleDelimiter {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return nil, nil, fmt.Errorf("malformed testcase file %s, missing preamble delimiter", path)
+ }
+ res := []fnInlHeur{}
+ csres := []encodedCallSiteTab{}
+ for {
+ dentry, dcst, err := dr.readEntry()
+ if err != nil {
+ t.Fatalf("reading func prop dump: %v", err)
+ }
+ if dentry.fname == "" {
+ break
+ }
+ res = append(res, dentry)
+ csres = append(csres, dcst)
+ }
+ return res, csres, nil
+}
+
+func (dr *dumpReader) scan() bool {
+ v := dr.s.Scan()
+ if v {
+ dr.ln++
+ }
+ return v
+}
+
+func (dr *dumpReader) curLine() string {
+ res := strings.TrimSpace(dr.s.Text())
+ if !strings.HasPrefix(res, "// ") {
+ dr.t.Fatalf("malformed line %s:%d, no comment: %s", dr.p, dr.ln, res)
+ }
+ return res[3:]
+}
+
+// readObjBlob reads in a series of commented lines until
+// it hits a delimiter, then returns the contents of the comments.
+func (dr *dumpReader) readObjBlob(delim string) (string, error) {
+ var sb strings.Builder
+ foundDelim := false
+ for dr.scan() {
+ line := dr.curLine()
+ if delim == line {
+ foundDelim = true
+ break
+ }
+ sb.WriteString(line + "\n")
+ }
+ if err := dr.s.Err(); err != nil {
+ return "", err
+ }
+ if !foundDelim {
+ return "", fmt.Errorf("malformed input %s, missing delimiter %q",
+ dr.p, delim)
+ }
+ return sb.String(), nil
+}
+
+// readEntry reads a single function's worth of material from
+// a file produced by the "-d=dumpinlfuncprops=..." command line
+// flag. It deserializes the json for the func properties and
+// returns the resulting properties and function name. EOF is
+// signaled by a nil FuncProps return (with no error
+func (dr *dumpReader) readEntry() (fnInlHeur, encodedCallSiteTab, error) {
+ var funcInlHeur fnInlHeur
+ var callsites encodedCallSiteTab
+ if !dr.scan() {
+ return funcInlHeur, callsites, nil
+ }
+ // first line contains info about function: file/name/line
+ info := dr.curLine()
+ chunks := strings.Fields(info)
+ funcInlHeur.file = chunks[0]
+ funcInlHeur.fname = chunks[1]
+ if _, err := fmt.Sscanf(chunks[2], "%d", &funcInlHeur.line); err != nil {
+ return funcInlHeur, callsites, fmt.Errorf("scanning line %q: %v", info, err)
+ }
+ // consume comments until and including delimiter
+ for {
+ if !dr.scan() {
+ break
+ }
+ if dr.curLine() == comDelimiter {
+ break
+ }
+ }
+
+ // Consume JSON for encoded props.
+ dr.scan()
+ line := dr.curLine()
+ fp := &FuncProps{}
+ if err := json.Unmarshal([]byte(line), fp); err != nil {
+ return funcInlHeur, callsites, err
+ }
+ funcInlHeur.props = fp
+
+ // Consume callsites.
+ callsites = make(encodedCallSiteTab)
+ for dr.scan() {
+ line := dr.curLine()
+ if line == csDelimiter {
+ break
+ }
+ // expected format: "// callsite: <expanded pos> flagstr <desc> flagval <flags> score <score> mask <scoremask> maskstr <scoremaskstring>"
+ fields := strings.Fields(line)
+ if len(fields) != 12 {
+ return funcInlHeur, nil, fmt.Errorf("malformed callsite (nf=%d) %s line %d: %s", len(fields), dr.p, dr.ln, line)
+ }
+ if fields[2] != "flagstr" || fields[4] != "flagval" || fields[6] != "score" || fields[8] != "mask" || fields[10] != "maskstr" {
+ return funcInlHeur, nil, fmt.Errorf("malformed callsite %s line %d: %s",
+ dr.p, dr.ln, line)
+ }
+ tag := fields[1]
+ flagstr := fields[5]
+ flags, err := strconv.Atoi(flagstr)
+ if err != nil {
+ return funcInlHeur, nil, fmt.Errorf("bad flags val %s line %d: %q err=%v",
+ dr.p, dr.ln, line, err)
+ }
+ scorestr := fields[7]
+ score, err2 := strconv.Atoi(scorestr)
+ if err2 != nil {
+ return funcInlHeur, nil, fmt.Errorf("bad score val %s line %d: %q err=%v",
+ dr.p, dr.ln, line, err2)
+ }
+ maskstr := fields[9]
+ mask, err3 := strconv.Atoi(maskstr)
+ if err3 != nil {
+ return funcInlHeur, nil, fmt.Errorf("bad mask val %s line %d: %q err=%v",
+ dr.p, dr.ln, line, err3)
+ }
+ callsites[tag] = propsAndScore{
+ props: CSPropBits(flags),
+ score: score,
+ mask: scoreAdjustTyp(mask),
+ }
+ }
+
+ // Consume function delimiter.
+ dr.scan()
+ line = dr.curLine()
+ if line != fnDelimiter {
+ return funcInlHeur, nil, fmt.Errorf("malformed testcase file %q, missing delimiter %q", dr.p, fnDelimiter)
+ }
+
+ return funcInlHeur, callsites, nil
+}
+
+// gatherPropsDumpForFile builds the specified testcase 'testcase' from
+// testdata/props passing the "-d=dumpinlfuncprops=..." compiler option,
+// to produce a properties dump, then returns the path of the newly
+// created file. NB: we can't use "go tool compile" here, since
+// some of the test cases import stdlib packages (such as "os").
+// This means using "go build", which is problematic since the
+// Go command can potentially cache the results of the compile step,
+// causing the test to fail when being run interactively. E.g.
+//
+// $ rm -f dump.txt
+// $ go build -o foo.a -gcflags=-d=dumpinlfuncprops=dump.txt foo.go
+// $ rm -f dump.txt foo.a
+// $ go build -o foo.a -gcflags=-d=dumpinlfuncprops=dump.txt foo.go
+// $ ls foo.a dump.txt > /dev/null
+// ls : cannot access 'dump.txt': No such file or directory
+// $
+//
+// For this reason, pick a unique filename for the dump, so as to
+// defeat the caching.
+func gatherPropsDumpForFile(t *testing.T, testcase string, td string) (string, error) {
+ t.Helper()
+ gopath := "testdata/props/" + testcase + ".go"
+ outpath := filepath.Join(td, testcase+".a")
+ salt := fmt.Sprintf(".p%dt%d", os.Getpid(), time.Now().UnixNano())
+ dumpfile := filepath.Join(td, testcase+salt+".dump.txt")
+ run := []string{testenv.GoToolPath(t), "build",
+ "-gcflags=-d=dumpinlfuncprops=" + dumpfile, "-o", outpath, gopath}
+ out, err := testenv.Command(t, run[0], run[1:]...).CombinedOutput()
+ if err != nil {
+ t.Logf("compile command: %+v", run)
+ }
+ if strings.TrimSpace(string(out)) != "" {
+ t.Logf("%s", out)
+ }
+ return dumpfile, err
+}
+
+// genExpected reads in a given Go testcase file, strips out all the
+// unindented (column 0) commands, writes them out to a new file, and
+// returns the path of that new file. By picking out just the comments
+// from the Go file we wind up with something that resembles the
+// output from a "-d=dumpinlfuncprops=..." compilation.
+func genExpected(td string, testcase string) (string, error) {
+ epath := filepath.Join(td, testcase+".expected")
+ outf, err := os.OpenFile(epath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ return "", err
+ }
+ gopath := "testdata/props/" + testcase + ".go"
+ content, err := os.ReadFile(gopath)
+ if err != nil {
+ return "", err
+ }
+ lines := strings.Split(string(content), "\n")
+ for _, line := range lines[3:] {
+ if !strings.HasPrefix(line, "// ") {
+ continue
+ }
+ fmt.Fprintf(outf, "%s\n", line)
+ }
+ if err := outf.Close(); err != nil {
+ return "", err
+ }
+ return epath, nil
+}
+
+type upexState struct {
+ dentries []fnInlHeur
+ newgolines []string
+ atline map[uint]uint
+}
+
+func mkUpexState(dentries []fnInlHeur) *upexState {
+ atline := make(map[uint]uint)
+ for _, e := range dentries {
+ atline[e.line] = atline[e.line] + 1
+ }
+ return &upexState{
+ dentries: dentries,
+ atline: atline,
+ }
+}
+
+// updateExpected takes a given Go testcase file X.go and writes out a
+// new/updated version of the file to X.go.new, where the column-0
+// "expected" comments have been updated using fresh data from
+// "dentries".
+//
+// Writing of expected results is complicated by closures and by
+// generics, where you can have multiple functions that all share the
+// same starting line. Currently we combine up all the dups and
+// closures into the single pre-func comment.
+func updateExpected(t *testing.T, testcase string, dentries []fnInlHeur, dcsites []encodedCallSiteTab) {
+ nd := len(dentries)
+
+ ues := mkUpexState(dentries)
+
+ gopath := "testdata/props/" + testcase + ".go"
+ newgopath := "testdata/props/" + testcase + ".go.new"
+
+ // Read the existing Go file.
+ content, err := os.ReadFile(gopath)
+ if err != nil {
+ t.Fatalf("opening %s: %v", gopath, err)
+ }
+ golines := strings.Split(string(content), "\n")
+
+ // Preserve copyright.
+ ues.newgolines = append(ues.newgolines, golines[:4]...)
+ if !strings.HasPrefix(golines[0], "// Copyright") {
+ t.Fatalf("missing copyright from existing testcase")
+ }
+ golines = golines[4:]
+
+ clore := regexp.MustCompile(`.+\.func\d+[\.\d]*$`)
+
+ emitFunc := func(e *fnInlHeur, dcsites encodedCallSiteTab,
+ instance, atl uint) {
+ var sb strings.Builder
+ dumpFnPreamble(&sb, e, dcsites, instance, atl)
+ ues.newgolines = append(ues.newgolines,
+ strings.Split(strings.TrimSpace(sb.String()), "\n")...)
+ }
+
+ // Write file preamble with "DO NOT EDIT" message and such.
+ var sb strings.Builder
+ dumpFilePreamble(&sb)
+ ues.newgolines = append(ues.newgolines,
+ strings.Split(strings.TrimSpace(sb.String()), "\n")...)
+
+ // Helper to add a clump of functions to the output file.
+ processClump := func(idx int, emit bool) int {
+ // Process func itself, plus anything else defined
+ // on the same line
+ atl := ues.atline[dentries[idx].line]
+ for k := uint(0); k < atl; k++ {
+ if emit {
+ emitFunc(&dentries[idx], dcsites[idx], k, atl)
+ }
+ idx++
+ }
+ // now process any closures it contains
+ ncl := 0
+ for idx < nd {
+ nfn := dentries[idx].fname
+ if !clore.MatchString(nfn) {
+ break
+ }
+ ncl++
+ if emit {
+ emitFunc(&dentries[idx], dcsites[idx], 0, 1)
+ }
+ idx++
+ }
+ return idx
+ }
+
+ didx := 0
+ for _, line := range golines {
+ if strings.HasPrefix(line, "func ") {
+
+ // We have a function definition.
+ // Pick out the corresponding entry or entries in the dump
+ // and emit if interesting (or skip if not).
+ dentry := dentries[didx]
+ emit := interestingToCompare(dentry.fname)
+ didx = processClump(didx, emit)
+ }
+
+ // Consume all existing comments.
+ if strings.HasPrefix(line, "//") {
+ continue
+ }
+ ues.newgolines = append(ues.newgolines, line)
+ }
+
+ if didx != nd {
+ t.Logf("didx=%d wanted %d", didx, nd)
+ }
+
+ // Open new Go file and write contents.
+ of, err := os.OpenFile(newgopath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ t.Fatalf("opening %s: %v", newgopath, err)
+ }
+ fmt.Fprintf(of, "%s", strings.Join(ues.newgolines, "\n"))
+ if err := of.Close(); err != nil {
+ t.Fatalf("closing %s: %v", newgopath, err)
+ }
+
+ t.Logf("update-expected: emitted updated file %s", newgopath)
+ t.Logf("please compare the two files, then overwrite %s with %s\n",
+ gopath, newgopath)
+}
+
+// interestingToCompare returns TRUE if we want to compare results
+// for function 'fname'.
+func interestingToCompare(fname string) bool {
+ if strings.HasPrefix(fname, "init.") {
+ return true
+ }
+ if strings.HasPrefix(fname, "T_") {
+ return true
+ }
+ f := strings.Split(fname, ".")
+ if len(f) == 2 && strings.HasPrefix(f[1], "T_") {
+ return true
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/function_properties.go b/src/cmd/compile/internal/inline/inlheur/function_properties.go
new file mode 100644
index 0000000..b90abf9
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/function_properties.go
@@ -0,0 +1,98 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+// This file defines a set of Go function "properties" intended to
+// guide inlining heuristics; these properties may apply to the
+// function as a whole, or to one or more function return values or
+// parameters.
+//
+// IMPORTANT: function properties are produced on a "best effort"
+// basis, meaning that the code that computes them doesn't verify that
+// the properties are guaranteed to be true in 100% of cases. For this
+// reason, properties should only be used to drive always-safe
+// optimization decisions (e.g. "should I inline this call", or
+// "should I unroll this loop") as opposed to potentially unsafe IR
+// alterations that could change program semantics (e.g. "can I delete
+// this variable" or "can I move this statement to a new location").
+//
+//----------------------------------------------------------------
+
+// FuncProps describes a set of function or method properties that may
+// be useful for inlining heuristics. Here 'Flags' are properties that
+// we think apply to the entire function; 'RecvrParamFlags' are
+// properties of specific function params (or the receiver), and
+// 'ResultFlags' are things properties we think will apply to values
+// of specific results. Note that 'ParamFlags' includes and entry for
+// the receiver if applicable, and does include etries for blank
+// params; for a function such as "func foo(_ int, b byte, _ float32)"
+// the length of ParamFlags will be 3.
+type FuncProps struct {
+ Flags FuncPropBits
+ ParamFlags []ParamPropBits // slot 0 receiver if applicable
+ ResultFlags []ResultPropBits
+}
+
+type FuncPropBits uint32
+
+const (
+ // Function always panics or invokes os.Exit() or a func that does
+ // likewise.
+ FuncPropNeverReturns FuncPropBits = 1 << iota
+)
+
+type ParamPropBits uint32
+
+const (
+ // No info about this param
+ ParamNoInfo ParamPropBits = 0
+
+ // Parameter value feeds unmodified into a top-level interface
+ // call (this assumes the parameter is of interface type).
+ ParamFeedsInterfaceMethodCall ParamPropBits = 1 << iota
+
+ // Parameter value feeds unmodified into an interface call that
+ // may be conditional/nested and not always executed (this assumes
+ // the parameter is of interface type).
+ ParamMayFeedInterfaceMethodCall ParamPropBits = 1 << iota
+
+ // Parameter value feeds unmodified into a top level indirect
+ // function call (assumes parameter is of function type).
+ ParamFeedsIndirectCall
+
+ // Parameter value feeds unmodified into an indirect function call
+ // that is conditional/nested (not guaranteed to execute). Assumes
+ // parameter is of function type.
+ ParamMayFeedIndirectCall
+
+ // Parameter value feeds unmodified into a top level "switch"
+ // statement or "if" statement simple expressions (see more on
+ // "simple" expression classification below).
+ ParamFeedsIfOrSwitch
+
+ // Parameter value feeds unmodified into a "switch" or "if"
+ // statement simple expressions (see more on "simple" expression
+ // classification below), where the if/switch is
+ // conditional/nested.
+ ParamMayFeedIfOrSwitch
+)
+
+type ResultPropBits uint32
+
+const (
+ // No info about this result
+ ResultNoInfo ResultPropBits = 0
+ // This result always contains allocated memory.
+ ResultIsAllocatedMem ResultPropBits = 1 << iota
+ // This result is always a single concrete type that is
+ // implicitly converted to interface.
+ ResultIsConcreteTypeConvertedToInterface
+ // Result is always the same non-composite compile time constant.
+ ResultAlwaysSameConstant
+ // Result is always the same function or closure.
+ ResultAlwaysSameFunc
+ // Result is always the same (potentially) inlinable function or closure.
+ ResultAlwaysSameInlinableFunc
+)
diff --git a/src/cmd/compile/internal/inline/inlheur/names.go b/src/cmd/compile/internal/inline/inlheur/names.go
new file mode 100644
index 0000000..0223850
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/names.go
@@ -0,0 +1,129 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/ir"
+ "go/constant"
+)
+
+// nameFinder provides a set of "isXXX" query methods for clients to
+// ask whether a given AST node corresponds to a function, a constant
+// value, and so on. These methods use an underlying ir.ReassignOracle
+// to return more precise results in cases where an "interesting"
+// value is assigned to a singly-defined local temp. Example:
+//
+// const q = 101
+// fq := func() int { return q }
+// copyOfConstant := q
+// copyOfFunc := f
+// interestingCall(copyOfConstant, copyOfFunc)
+//
+// A name finder query method invoked on the arguments being passed to
+// "interestingCall" will be able detect that 'copyOfConstant' always
+// evaluates to a constant (even though it is in fact a PAUTO local
+// variable). A given nameFinder can also operate without using
+// ir.ReassignOracle (in cases where it is not practical to look
+// at the entire function); in such cases queries will still work
+// for explicit constant values and functions.
+type nameFinder struct {
+ ro *ir.ReassignOracle
+}
+
+// newNameFinder returns a new nameFinder object with a reassignment
+// oracle initialized based on the function fn, or if fn is nil,
+// without an underlying ReassignOracle.
+func newNameFinder(fn *ir.Func) *nameFinder {
+ var ro *ir.ReassignOracle
+ if fn != nil {
+ ro = &ir.ReassignOracle{}
+ ro.Init(fn)
+ }
+ return &nameFinder{ro: ro}
+}
+
+// funcName returns the *ir.Name for the func or method
+// corresponding to node 'n', or nil if n can't be proven
+// to contain a function value.
+func (nf *nameFinder) funcName(n ir.Node) *ir.Name {
+ sv := n
+ if nf.ro != nil {
+ sv = nf.ro.StaticValue(n)
+ }
+ if name := ir.StaticCalleeName(sv); name != nil {
+ return name
+ }
+ return nil
+}
+
+// isAllocatedMem returns true if node n corresponds to a memory
+// allocation expression (make, new, or equivalent).
+func (nf *nameFinder) isAllocatedMem(n ir.Node) bool {
+ sv := n
+ if nf.ro != nil {
+ sv = nf.ro.StaticValue(n)
+ }
+ switch sv.Op() {
+ case ir.OMAKESLICE, ir.ONEW, ir.OPTRLIT, ir.OSLICELIT:
+ return true
+ }
+ return false
+}
+
+// constValue returns the underlying constant.Value for an AST node n
+// if n is itself a constant value/expr, or if n is a singly assigned
+// local containing constant expr/value (or nil not constant).
+func (nf *nameFinder) constValue(n ir.Node) constant.Value {
+ sv := n
+ if nf.ro != nil {
+ sv = nf.ro.StaticValue(n)
+ }
+ if sv.Op() == ir.OLITERAL {
+ return sv.Val()
+ }
+ return nil
+}
+
+// isNil returns whether n is nil (or singly
+// assigned local containing nil).
+func (nf *nameFinder) isNil(n ir.Node) bool {
+ sv := n
+ if nf.ro != nil {
+ sv = nf.ro.StaticValue(n)
+ }
+ return sv.Op() == ir.ONIL
+}
+
+func (nf *nameFinder) staticValue(n ir.Node) ir.Node {
+ if nf.ro == nil {
+ return n
+ }
+ return nf.ro.StaticValue(n)
+}
+
+func (nf *nameFinder) reassigned(n *ir.Name) bool {
+ if nf.ro == nil {
+ return true
+ }
+ return nf.ro.Reassigned(n)
+}
+
+func (nf *nameFinder) isConcreteConvIface(n ir.Node) bool {
+ sv := n
+ if nf.ro != nil {
+ sv = nf.ro.StaticValue(n)
+ }
+ if sv.Op() != ir.OCONVIFACE {
+ return false
+ }
+ return !sv.(*ir.ConvExpr).X.Type().IsInterface()
+}
+
+func isSameFuncName(v1, v2 *ir.Name) bool {
+ // NB: there are a few corner cases where pointer equality
+ // doesn't work here, but this should be good enough for
+ // our purposes here.
+ return v1 == v2
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/parampropbits_string.go b/src/cmd/compile/internal/inline/inlheur/parampropbits_string.go
new file mode 100644
index 0000000..bf4d3ca
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/parampropbits_string.go
@@ -0,0 +1,70 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by "stringer -bitset -type ParamPropBits"; DO NOT EDIT.
+
+package inlheur
+
+import (
+ "bytes"
+ "strconv"
+)
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[ParamNoInfo-0]
+ _ = x[ParamFeedsInterfaceMethodCall-2]
+ _ = x[ParamMayFeedInterfaceMethodCall-4]
+ _ = x[ParamFeedsIndirectCall-8]
+ _ = x[ParamMayFeedIndirectCall-16]
+ _ = x[ParamFeedsIfOrSwitch-32]
+ _ = x[ParamMayFeedIfOrSwitch-64]
+}
+
+var _ParamPropBits_value = [...]uint64{
+ 0x0, /* ParamNoInfo */
+ 0x2, /* ParamFeedsInterfaceMethodCall */
+ 0x4, /* ParamMayFeedInterfaceMethodCall */
+ 0x8, /* ParamFeedsIndirectCall */
+ 0x10, /* ParamMayFeedIndirectCall */
+ 0x20, /* ParamFeedsIfOrSwitch */
+ 0x40, /* ParamMayFeedIfOrSwitch */
+}
+
+const _ParamPropBits_name = "ParamNoInfoParamFeedsInterfaceMethodCallParamMayFeedInterfaceMethodCallParamFeedsIndirectCallParamMayFeedIndirectCallParamFeedsIfOrSwitchParamMayFeedIfOrSwitch"
+
+var _ParamPropBits_index = [...]uint8{0, 11, 40, 71, 93, 117, 137, 159}
+
+func (i ParamPropBits) String() string {
+ var b bytes.Buffer
+
+ remain := uint64(i)
+ seen := false
+
+ for k, v := range _ParamPropBits_value {
+ x := _ParamPropBits_name[_ParamPropBits_index[k]:_ParamPropBits_index[k+1]]
+ if v == 0 {
+ if i == 0 {
+ b.WriteString(x)
+ return b.String()
+ }
+ continue
+ }
+ if (v & remain) == v {
+ remain &^= v
+ x := _ParamPropBits_name[_ParamPropBits_index[k]:_ParamPropBits_index[k+1]]
+ if seen {
+ b.WriteString("|")
+ }
+ seen = true
+ b.WriteString(x)
+ }
+ }
+ if remain == 0 {
+ return b.String()
+ }
+ return "ParamPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/pstate_string.go b/src/cmd/compile/internal/inline/inlheur/pstate_string.go
new file mode 100644
index 0000000..e6108d1
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/pstate_string.go
@@ -0,0 +1,30 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by "stringer -type pstate"; DO NOT EDIT.
+
+package inlheur
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[psNoInfo-0]
+ _ = x[psCallsPanic-1]
+ _ = x[psMayReturn-2]
+ _ = x[psTop-3]
+}
+
+const _pstate_name = "psNoInfopsCallsPanicpsMayReturnpsTop"
+
+var _pstate_index = [...]uint8{0, 8, 20, 31, 36}
+
+func (i pstate) String() string {
+ if i < 0 || i >= pstate(len(_pstate_index)-1) {
+ return "pstate(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _pstate_name[_pstate_index[i]:_pstate_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/resultpropbits_string.go b/src/cmd/compile/internal/inline/inlheur/resultpropbits_string.go
new file mode 100644
index 0000000..888af98
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/resultpropbits_string.go
@@ -0,0 +1,68 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by "stringer -bitset -type ResultPropBits"; DO NOT EDIT.
+
+package inlheur
+
+import (
+ "bytes"
+ "strconv"
+)
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[ResultNoInfo-0]
+ _ = x[ResultIsAllocatedMem-2]
+ _ = x[ResultIsConcreteTypeConvertedToInterface-4]
+ _ = x[ResultAlwaysSameConstant-8]
+ _ = x[ResultAlwaysSameFunc-16]
+ _ = x[ResultAlwaysSameInlinableFunc-32]
+}
+
+var _ResultPropBits_value = [...]uint64{
+ 0x0, /* ResultNoInfo */
+ 0x2, /* ResultIsAllocatedMem */
+ 0x4, /* ResultIsConcreteTypeConvertedToInterface */
+ 0x8, /* ResultAlwaysSameConstant */
+ 0x10, /* ResultAlwaysSameFunc */
+ 0x20, /* ResultAlwaysSameInlinableFunc */
+}
+
+const _ResultPropBits_name = "ResultNoInfoResultIsAllocatedMemResultIsConcreteTypeConvertedToInterfaceResultAlwaysSameConstantResultAlwaysSameFuncResultAlwaysSameInlinableFunc"
+
+var _ResultPropBits_index = [...]uint8{0, 12, 32, 72, 96, 116, 145}
+
+func (i ResultPropBits) String() string {
+ var b bytes.Buffer
+
+ remain := uint64(i)
+ seen := false
+
+ for k, v := range _ResultPropBits_value {
+ x := _ResultPropBits_name[_ResultPropBits_index[k]:_ResultPropBits_index[k+1]]
+ if v == 0 {
+ if i == 0 {
+ b.WriteString(x)
+ return b.String()
+ }
+ continue
+ }
+ if (v & remain) == v {
+ remain &^= v
+ x := _ResultPropBits_name[_ResultPropBits_index[k]:_ResultPropBits_index[k+1]]
+ if seen {
+ b.WriteString("|")
+ }
+ seen = true
+ b.WriteString(x)
+ }
+ }
+ if remain == 0 {
+ return b.String()
+ }
+ return "ResultPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go b/src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go
new file mode 100644
index 0000000..b95ea37
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go
@@ -0,0 +1,413 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/ir"
+ "fmt"
+ "os"
+)
+
+// This file contains code to re-score callsites based on how the
+// results of the call were used. Example:
+//
+// func foo() {
+// x, fptr := bar()
+// switch x {
+// case 10: fptr = baz()
+// default: blix()
+// }
+// fptr(100)
+// }
+//
+// The initial scoring pass will assign a score to "bar()" based on
+// various criteria, however once the first pass of scoring is done,
+// we look at the flags on the result from bar, and check to see
+// how those results are used. If bar() always returns the same constant
+// for its first result, and if the variable receiving that result
+// isn't redefined, and if that variable feeds into an if/switch
+// condition, then we will try to adjust the score for "bar" (on the
+// theory that if we inlined, we can constant fold / deadcode).
+
+type resultPropAndCS struct {
+ defcs *CallSite
+ props ResultPropBits
+}
+
+type resultUseAnalyzer struct {
+ resultNameTab map[*ir.Name]resultPropAndCS
+ fn *ir.Func
+ cstab CallSiteTab
+ *condLevelTracker
+}
+
+// rescoreBasedOnCallResultUses examines how call results are used,
+// and tries to update the scores of calls based on how their results
+// are used in the function.
+func (csa *callSiteAnalyzer) rescoreBasedOnCallResultUses(fn *ir.Func, resultNameTab map[*ir.Name]resultPropAndCS, cstab CallSiteTab) {
+ enableDebugTraceIfEnv()
+ rua := &resultUseAnalyzer{
+ resultNameTab: resultNameTab,
+ fn: fn,
+ cstab: cstab,
+ condLevelTracker: new(condLevelTracker),
+ }
+ var doNode func(ir.Node) bool
+ doNode = func(n ir.Node) bool {
+ rua.nodeVisitPre(n)
+ ir.DoChildren(n, doNode)
+ rua.nodeVisitPost(n)
+ return false
+ }
+ doNode(fn)
+ disableDebugTrace()
+}
+
+func (csa *callSiteAnalyzer) examineCallResults(cs *CallSite, resultNameTab map[*ir.Name]resultPropAndCS) map[*ir.Name]resultPropAndCS {
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= examining call results for %q\n",
+ EncodeCallSiteKey(cs))
+ }
+
+ // Invoke a helper to pick out the specific ir.Name's the results
+ // from this call are assigned into, e.g. "x, y := fooBar()". If
+ // the call is not part of an assignment statement, or if the
+ // variables in question are not newly defined, then we'll receive
+ // an empty list here.
+ //
+ names, autoTemps, props := namesDefined(cs)
+ if len(names) == 0 {
+ return resultNameTab
+ }
+
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= %d names defined\n", len(names))
+ }
+
+ // For each returned value, if the value has interesting
+ // properties (ex: always returns the same constant), and the name
+ // in question is never redefined, then make an entry in the
+ // result table for it.
+ const interesting = (ResultIsConcreteTypeConvertedToInterface |
+ ResultAlwaysSameConstant | ResultAlwaysSameInlinableFunc | ResultAlwaysSameFunc)
+ for idx, n := range names {
+ rprop := props.ResultFlags[idx]
+
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= props for ret %d %q: %s\n",
+ idx, n.Sym().Name, rprop.String())
+ }
+
+ if rprop&interesting == 0 {
+ continue
+ }
+ if csa.nameFinder.reassigned(n) {
+ continue
+ }
+ if resultNameTab == nil {
+ resultNameTab = make(map[*ir.Name]resultPropAndCS)
+ } else if _, ok := resultNameTab[n]; ok {
+ panic("should never happen")
+ }
+ entry := resultPropAndCS{
+ defcs: cs,
+ props: rprop,
+ }
+ resultNameTab[n] = entry
+ if autoTemps[idx] != nil {
+ resultNameTab[autoTemps[idx]] = entry
+ }
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= add resultNameTab table entry n=%v autotemp=%v props=%s\n", n, autoTemps[idx], rprop.String())
+ }
+ }
+ return resultNameTab
+}
+
+// namesDefined returns a list of ir.Name's corresponding to locals
+// that receive the results from the call at site 'cs', plus the
+// properties object for the called function. If a given result
+// isn't cleanly assigned to a newly defined local, the
+// slot for that result in the returned list will be nil. Example:
+//
+// call returned name list
+//
+// x := foo() [ x ]
+// z, y := bar() [ nil, nil ]
+// _, q := baz() [ nil, q ]
+//
+// In the case of a multi-return call, such as "x, y := foo()",
+// the pattern we see from the front end will be a call op
+// assigning to auto-temps, and then an assignment of the auto-temps
+// to the user-level variables. In such cases we return
+// first the user-level variable (in the first func result)
+// and then the auto-temp name in the second result.
+func namesDefined(cs *CallSite) ([]*ir.Name, []*ir.Name, *FuncProps) {
+ // If this call doesn't feed into an assignment (and of course not
+ // all calls do), then we don't have anything to work with here.
+ if cs.Assign == nil {
+ return nil, nil, nil
+ }
+ funcInlHeur, ok := fpmap[cs.Callee]
+ if !ok {
+ // TODO: add an assert/panic here.
+ return nil, nil, nil
+ }
+ if len(funcInlHeur.props.ResultFlags) == 0 {
+ return nil, nil, nil
+ }
+
+ // Single return case.
+ if len(funcInlHeur.props.ResultFlags) == 1 {
+ asgn, ok := cs.Assign.(*ir.AssignStmt)
+ if !ok {
+ return nil, nil, nil
+ }
+ // locate name being assigned
+ aname, ok := asgn.X.(*ir.Name)
+ if !ok {
+ return nil, nil, nil
+ }
+ return []*ir.Name{aname}, []*ir.Name{nil}, funcInlHeur.props
+ }
+
+ // Multi-return case
+ asgn, ok := cs.Assign.(*ir.AssignListStmt)
+ if !ok || !asgn.Def {
+ return nil, nil, nil
+ }
+ userVars := make([]*ir.Name, len(funcInlHeur.props.ResultFlags))
+ autoTemps := make([]*ir.Name, len(funcInlHeur.props.ResultFlags))
+ for idx, x := range asgn.Lhs {
+ if n, ok := x.(*ir.Name); ok {
+ userVars[idx] = n
+ r := asgn.Rhs[idx]
+ if r.Op() == ir.OCONVNOP {
+ r = r.(*ir.ConvExpr).X
+ }
+ if ir.IsAutoTmp(r) {
+ autoTemps[idx] = r.(*ir.Name)
+ }
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= multi-ret namedef uv=%v at=%v\n",
+ x, autoTemps[idx])
+ }
+ } else {
+ return nil, nil, nil
+ }
+ }
+ return userVars, autoTemps, funcInlHeur.props
+}
+
+func (rua *resultUseAnalyzer) nodeVisitPost(n ir.Node) {
+ rua.condLevelTracker.post(n)
+}
+
+func (rua *resultUseAnalyzer) nodeVisitPre(n ir.Node) {
+ rua.condLevelTracker.pre(n)
+ switch n.Op() {
+ case ir.OCALLINTER:
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= rescore examine iface call %v:\n", n)
+ }
+ rua.callTargetCheckResults(n)
+ case ir.OCALLFUNC:
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= rescore examine call %v:\n", n)
+ }
+ rua.callTargetCheckResults(n)
+ case ir.OIF:
+ ifst := n.(*ir.IfStmt)
+ rua.foldCheckResults(ifst.Cond)
+ case ir.OSWITCH:
+ swst := n.(*ir.SwitchStmt)
+ if swst.Tag != nil {
+ rua.foldCheckResults(swst.Tag)
+ }
+
+ }
+}
+
+// callTargetCheckResults examines a given call to see whether the
+// callee expression is potentially an inlinable function returned
+// from a potentially inlinable call. Examples:
+//
+// Scenario 1: named intermediate
+//
+// fn1 := foo() conc := bar()
+// fn1("blah") conc.MyMethod()
+//
+// Scenario 2: returned func or concrete object feeds directly to call
+//
+// foo()("blah") bar().MyMethod()
+//
+// In the second case although at the source level the result of the
+// direct call feeds right into the method call or indirect call,
+// we're relying on the front end having inserted an auto-temp to
+// capture the value.
+func (rua *resultUseAnalyzer) callTargetCheckResults(call ir.Node) {
+ ce := call.(*ir.CallExpr)
+ rname := rua.getCallResultName(ce)
+ if rname == nil {
+ return
+ }
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= staticvalue returns %v:\n",
+ rname)
+ }
+ if rname.Class != ir.PAUTO {
+ return
+ }
+ switch call.Op() {
+ case ir.OCALLINTER:
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= in %s checking %v for cci prop:\n",
+ rua.fn.Sym().Name, rname)
+ }
+ if cs := rua.returnHasProp(rname, ResultIsConcreteTypeConvertedToInterface); cs != nil {
+
+ adj := returnFeedsConcreteToInterfaceCallAdj
+ cs.Score, cs.ScoreMask = adjustScore(adj, cs.Score, cs.ScoreMask)
+ }
+ case ir.OCALLFUNC:
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= in %s checking %v for samefunc props:\n",
+ rua.fn.Sym().Name, rname)
+ v, ok := rua.resultNameTab[rname]
+ if !ok {
+ fmt.Fprintf(os.Stderr, "=-= no entry for %v in rt\n", rname)
+ } else {
+ fmt.Fprintf(os.Stderr, "=-= props for %v: %q\n", rname, v.props.String())
+ }
+ }
+ if cs := rua.returnHasProp(rname, ResultAlwaysSameInlinableFunc); cs != nil {
+ adj := returnFeedsInlinableFuncToIndCallAdj
+ cs.Score, cs.ScoreMask = adjustScore(adj, cs.Score, cs.ScoreMask)
+ } else if cs := rua.returnHasProp(rname, ResultAlwaysSameFunc); cs != nil {
+ adj := returnFeedsFuncToIndCallAdj
+ cs.Score, cs.ScoreMask = adjustScore(adj, cs.Score, cs.ScoreMask)
+
+ }
+ }
+}
+
+// foldCheckResults examines the specified if/switch condition 'cond'
+// to see if it refers to locals defined by a (potentially inlinable)
+// function call at call site C, and if so, whether 'cond' contains
+// only combinations of simple references to all of the names in
+// 'names' with selected constants + operators. If these criteria are
+// met, then we adjust the score for call site C to reflect the
+// fact that inlining will enable deadcode and/or constant propagation.
+// Note: for this heuristic to kick in, the names in question have to
+// be all from the same callsite. Examples:
+//
+// q, r := baz() x, y := foo()
+// switch q+r { a, b, c := bar()
+// ... if x && y && a && b && c {
+// } ...
+// }
+//
+// For the call to "baz" above we apply a score adjustment, but not
+// for the calls to "foo" or "bar".
+func (rua *resultUseAnalyzer) foldCheckResults(cond ir.Node) {
+ namesUsed := collectNamesUsed(cond)
+ if len(namesUsed) == 0 {
+ return
+ }
+ var cs *CallSite
+ for _, n := range namesUsed {
+ rpcs, found := rua.resultNameTab[n]
+ if !found {
+ return
+ }
+ if cs != nil && rpcs.defcs != cs {
+ return
+ }
+ cs = rpcs.defcs
+ if rpcs.props&ResultAlwaysSameConstant == 0 {
+ return
+ }
+ }
+ if debugTrace&debugTraceScoring != 0 {
+ nls := func(nl []*ir.Name) string {
+ r := ""
+ for _, n := range nl {
+ r += " " + n.Sym().Name
+ }
+ return r
+ }
+ fmt.Fprintf(os.Stderr, "=-= calling ShouldFoldIfNameConstant on names={%s} cond=%v\n", nls(namesUsed), cond)
+ }
+
+ if !ShouldFoldIfNameConstant(cond, namesUsed) {
+ return
+ }
+ adj := returnFeedsConstToIfAdj
+ cs.Score, cs.ScoreMask = adjustScore(adj, cs.Score, cs.ScoreMask)
+}
+
+func collectNamesUsed(expr ir.Node) []*ir.Name {
+ res := []*ir.Name{}
+ ir.Visit(expr, func(n ir.Node) {
+ if n.Op() != ir.ONAME {
+ return
+ }
+ nn := n.(*ir.Name)
+ if nn.Class != ir.PAUTO {
+ return
+ }
+ res = append(res, nn)
+ })
+ return res
+}
+
+func (rua *resultUseAnalyzer) returnHasProp(name *ir.Name, prop ResultPropBits) *CallSite {
+ v, ok := rua.resultNameTab[name]
+ if !ok {
+ return nil
+ }
+ if v.props&prop == 0 {
+ return nil
+ }
+ return v.defcs
+}
+
+func (rua *resultUseAnalyzer) getCallResultName(ce *ir.CallExpr) *ir.Name {
+ var callTarg ir.Node
+ if sel, ok := ce.Fun.(*ir.SelectorExpr); ok {
+ // method call
+ callTarg = sel.X
+ } else if ctarg, ok := ce.Fun.(*ir.Name); ok {
+ // regular call
+ callTarg = ctarg
+ } else {
+ return nil
+ }
+ r := ir.StaticValue(callTarg)
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= staticname on %v returns %v:\n",
+ callTarg, r)
+ }
+ if r.Op() == ir.OCALLFUNC {
+ // This corresponds to the "x := foo()" case; here
+ // ir.StaticValue has brought us all the way back to
+ // the call expression itself. We need to back off to
+ // the name defined by the call; do this by looking up
+ // the callsite.
+ ce := r.(*ir.CallExpr)
+ cs, ok := rua.cstab[ce]
+ if !ok {
+ return nil
+ }
+ names, _, _ := namesDefined(cs)
+ if len(names) == 0 {
+ return nil
+ }
+ return names[0]
+ } else if r.Op() == ir.ONAME {
+ return r.(*ir.Name)
+ }
+ return nil
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/scoreadjusttyp_string.go b/src/cmd/compile/internal/inline/inlheur/scoreadjusttyp_string.go
new file mode 100644
index 0000000..f5b8bf6
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/scoreadjusttyp_string.go
@@ -0,0 +1,80 @@
+// Code generated by "stringer -bitset -type scoreAdjustTyp"; DO NOT EDIT.
+
+package inlheur
+
+import "strconv"
+import "bytes"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[panicPathAdj-1]
+ _ = x[initFuncAdj-2]
+ _ = x[inLoopAdj-4]
+ _ = x[passConstToIfAdj-8]
+ _ = x[passConstToNestedIfAdj-16]
+ _ = x[passConcreteToItfCallAdj-32]
+ _ = x[passConcreteToNestedItfCallAdj-64]
+ _ = x[passFuncToIndCallAdj-128]
+ _ = x[passFuncToNestedIndCallAdj-256]
+ _ = x[passInlinableFuncToIndCallAdj-512]
+ _ = x[passInlinableFuncToNestedIndCallAdj-1024]
+ _ = x[returnFeedsConstToIfAdj-2048]
+ _ = x[returnFeedsFuncToIndCallAdj-4096]
+ _ = x[returnFeedsInlinableFuncToIndCallAdj-8192]
+ _ = x[returnFeedsConcreteToInterfaceCallAdj-16384]
+}
+
+var _scoreAdjustTyp_value = [...]uint64{
+ 0x1, /* panicPathAdj */
+ 0x2, /* initFuncAdj */
+ 0x4, /* inLoopAdj */
+ 0x8, /* passConstToIfAdj */
+ 0x10, /* passConstToNestedIfAdj */
+ 0x20, /* passConcreteToItfCallAdj */
+ 0x40, /* passConcreteToNestedItfCallAdj */
+ 0x80, /* passFuncToIndCallAdj */
+ 0x100, /* passFuncToNestedIndCallAdj */
+ 0x200, /* passInlinableFuncToIndCallAdj */
+ 0x400, /* passInlinableFuncToNestedIndCallAdj */
+ 0x800, /* returnFeedsConstToIfAdj */
+ 0x1000, /* returnFeedsFuncToIndCallAdj */
+ 0x2000, /* returnFeedsInlinableFuncToIndCallAdj */
+ 0x4000, /* returnFeedsConcreteToInterfaceCallAdj */
+}
+
+const _scoreAdjustTyp_name = "panicPathAdjinitFuncAdjinLoopAdjpassConstToIfAdjpassConstToNestedIfAdjpassConcreteToItfCallAdjpassConcreteToNestedItfCallAdjpassFuncToIndCallAdjpassFuncToNestedIndCallAdjpassInlinableFuncToIndCallAdjpassInlinableFuncToNestedIndCallAdjreturnFeedsConstToIfAdjreturnFeedsFuncToIndCallAdjreturnFeedsInlinableFuncToIndCallAdjreturnFeedsConcreteToInterfaceCallAdj"
+
+var _scoreAdjustTyp_index = [...]uint16{0, 12, 23, 32, 48, 70, 94, 124, 144, 170, 199, 234, 257, 284, 320, 357}
+
+func (i scoreAdjustTyp) String() string {
+ var b bytes.Buffer
+
+ remain := uint64(i)
+ seen := false
+
+ for k, v := range _scoreAdjustTyp_value {
+ x := _scoreAdjustTyp_name[_scoreAdjustTyp_index[k]:_scoreAdjustTyp_index[k+1]]
+ if v == 0 {
+ if i == 0 {
+ b.WriteString(x)
+ return b.String()
+ }
+ continue
+ }
+ if (v & remain) == v {
+ remain &^= v
+ x := _scoreAdjustTyp_name[_scoreAdjustTyp_index[k]:_scoreAdjustTyp_index[k+1]]
+ if seen {
+ b.WriteString("|")
+ }
+ seen = true
+ b.WriteString(x)
+ }
+ }
+ if remain == 0 {
+ return b.String()
+ }
+ return "scoreAdjustTyp(0x" + strconv.FormatInt(int64(i), 16) + ")"
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/scoring.go b/src/cmd/compile/internal/inline/inlheur/scoring.go
new file mode 100644
index 0000000..623ba8a
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/scoring.go
@@ -0,0 +1,751 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/pgo"
+ "cmd/compile/internal/types"
+ "fmt"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// These constants enumerate the set of possible ways/scenarios
+// in which we'll adjust the score of a given callsite.
+type scoreAdjustTyp uint
+
+// These constants capture the various ways in which the inliner's
+// scoring phase can adjust a callsite score based on heuristics. They
+// fall broadly into three categories:
+//
+// 1) adjustments based solely on the callsite context (ex: call
+// appears on panic path)
+//
+// 2) adjustments that take into account specific interesting values
+// passed at a call site (ex: passing a constant that could result in
+// cprop/deadcode in the caller)
+//
+// 3) adjustments that take into account values returned from the call
+// at a callsite (ex: call always returns the same inlinable function,
+// and return value flows unmodified into an indirect call)
+//
+// For categories 2 and 3 above, each adjustment can have either a
+// "must" version and a "may" version (but not both). Here the idea is
+// that in the "must" version the value flow is unconditional: if the
+// callsite executes, then the condition we're interested in (ex:
+// param feeding call) is guaranteed to happen. For the "may" version,
+// there may be control flow that could cause the benefit to be
+// bypassed.
+const (
+ // Category 1 adjustments (see above)
+ panicPathAdj scoreAdjustTyp = (1 << iota)
+ initFuncAdj
+ inLoopAdj
+
+ // Category 2 adjustments (see above).
+ passConstToIfAdj
+ passConstToNestedIfAdj
+ passConcreteToItfCallAdj
+ passConcreteToNestedItfCallAdj
+ passFuncToIndCallAdj
+ passFuncToNestedIndCallAdj
+ passInlinableFuncToIndCallAdj
+ passInlinableFuncToNestedIndCallAdj
+
+ // Category 3 adjustments.
+ returnFeedsConstToIfAdj
+ returnFeedsFuncToIndCallAdj
+ returnFeedsInlinableFuncToIndCallAdj
+ returnFeedsConcreteToInterfaceCallAdj
+
+ sentinelScoreAdj // sentinel; not a real adjustment
+)
+
+// This table records the specific values we use to adjust call
+// site scores in a given scenario.
+// NOTE: these numbers are chosen very arbitrarily; ideally
+// we will go through some sort of turning process to decide
+// what value for each one produces the best performance.
+
+var adjValues = map[scoreAdjustTyp]int{
+ panicPathAdj: 40,
+ initFuncAdj: 20,
+ inLoopAdj: -5,
+ passConstToIfAdj: -20,
+ passConstToNestedIfAdj: -15,
+ passConcreteToItfCallAdj: -30,
+ passConcreteToNestedItfCallAdj: -25,
+ passFuncToIndCallAdj: -25,
+ passFuncToNestedIndCallAdj: -20,
+ passInlinableFuncToIndCallAdj: -45,
+ passInlinableFuncToNestedIndCallAdj: -40,
+ returnFeedsConstToIfAdj: -15,
+ returnFeedsFuncToIndCallAdj: -25,
+ returnFeedsInlinableFuncToIndCallAdj: -40,
+ returnFeedsConcreteToInterfaceCallAdj: -25,
+}
+
+// SetupScoreAdjustments interprets the value of the -d=inlscoreadj
+// debugging option, if set. The value of this flag is expected to be
+// a series of "/"-separated clauses of the form adj1:value1. Example:
+// -d=inlscoreadj=inLoopAdj=0/passConstToIfAdj=-99
+func SetupScoreAdjustments() {
+ if base.Debug.InlScoreAdj == "" {
+ return
+ }
+ if err := parseScoreAdj(base.Debug.InlScoreAdj); err != nil {
+ base.Fatalf("malformed -d=inlscoreadj argument %q: %v",
+ base.Debug.InlScoreAdj, err)
+ }
+}
+
+func adjStringToVal(s string) (scoreAdjustTyp, bool) {
+ for adj := scoreAdjustTyp(1); adj < sentinelScoreAdj; adj <<= 1 {
+ if adj.String() == s {
+ return adj, true
+ }
+ }
+ return 0, false
+}
+
+func parseScoreAdj(val string) error {
+ clauses := strings.Split(val, "/")
+ if len(clauses) == 0 {
+ return fmt.Errorf("no clauses")
+ }
+ for _, clause := range clauses {
+ elems := strings.Split(clause, ":")
+ if len(elems) < 2 {
+ return fmt.Errorf("clause %q: expected colon", clause)
+ }
+ if len(elems) != 2 {
+ return fmt.Errorf("clause %q has %d elements, wanted 2", clause,
+ len(elems))
+ }
+ adj, ok := adjStringToVal(elems[0])
+ if !ok {
+ return fmt.Errorf("clause %q: unknown adjustment", clause)
+ }
+ val, err := strconv.Atoi(elems[1])
+ if err != nil {
+ return fmt.Errorf("clause %q: malformed value: %v", clause, err)
+ }
+ adjValues[adj] = val
+ }
+ return nil
+}
+
+func adjValue(x scoreAdjustTyp) int {
+ if val, ok := adjValues[x]; ok {
+ return val
+ } else {
+ panic("internal error unregistered adjustment type")
+ }
+}
+
+var mayMustAdj = [...]struct{ may, must scoreAdjustTyp }{
+ {may: passConstToNestedIfAdj, must: passConstToIfAdj},
+ {may: passConcreteToNestedItfCallAdj, must: passConcreteToItfCallAdj},
+ {may: passFuncToNestedIndCallAdj, must: passFuncToNestedIndCallAdj},
+ {may: passInlinableFuncToNestedIndCallAdj, must: passInlinableFuncToIndCallAdj},
+}
+
+func isMay(x scoreAdjustTyp) bool {
+ return mayToMust(x) != 0
+}
+
+func isMust(x scoreAdjustTyp) bool {
+ return mustToMay(x) != 0
+}
+
+func mayToMust(x scoreAdjustTyp) scoreAdjustTyp {
+ for _, v := range mayMustAdj {
+ if x == v.may {
+ return v.must
+ }
+ }
+ return 0
+}
+
+func mustToMay(x scoreAdjustTyp) scoreAdjustTyp {
+ for _, v := range mayMustAdj {
+ if x == v.must {
+ return v.may
+ }
+ }
+ return 0
+}
+
+// computeCallSiteScore takes a given call site whose ir node is
+// 'call' and callee function is 'callee' and with previously computed
+// call site properties 'csflags', then computes a score for the
+// callsite that combines the size cost of the callee with heuristics
+// based on previously computed argument and function properties,
+// then stores the score and the adjustment mask in the appropriate
+// fields in 'cs'
+func (cs *CallSite) computeCallSiteScore(csa *callSiteAnalyzer, calleeProps *FuncProps) {
+ callee := cs.Callee
+ csflags := cs.Flags
+ call := cs.Call
+
+ // Start with the size-based score for the callee.
+ score := int(callee.Inl.Cost)
+ var tmask scoreAdjustTyp
+
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= scoring call to %s at %s , initial=%d\n",
+ callee.Sym().Name, fmtFullPos(call.Pos()), score)
+ }
+
+ // First some score adjustments to discourage inlining in selected cases.
+ if csflags&CallSiteOnPanicPath != 0 {
+ score, tmask = adjustScore(panicPathAdj, score, tmask)
+ }
+ if csflags&CallSiteInInitFunc != 0 {
+ score, tmask = adjustScore(initFuncAdj, score, tmask)
+ }
+
+ // Then adjustments to encourage inlining in selected cases.
+ if csflags&CallSiteInLoop != 0 {
+ score, tmask = adjustScore(inLoopAdj, score, tmask)
+ }
+
+ // Stop here if no callee props.
+ if calleeProps == nil {
+ cs.Score, cs.ScoreMask = score, tmask
+ return
+ }
+
+ // Walk through the actual expressions being passed at the call.
+ calleeRecvrParms := callee.Type().RecvParams()
+ for idx := range call.Args {
+ // ignore blanks
+ if calleeRecvrParms[idx].Sym == nil ||
+ calleeRecvrParms[idx].Sym.IsBlank() {
+ continue
+ }
+ arg := call.Args[idx]
+ pflag := calleeProps.ParamFlags[idx]
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= arg %d of %d: val %v flags=%s\n",
+ idx, len(call.Args), arg, pflag.String())
+ }
+
+ if len(cs.ArgProps) == 0 {
+ continue
+ }
+ argProps := cs.ArgProps[idx]
+
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= arg %d props %s value %v\n",
+ idx, argProps.String(), arg)
+ }
+
+ if argProps&ActualExprConstant != 0 {
+ if pflag&ParamMayFeedIfOrSwitch != 0 {
+ score, tmask = adjustScore(passConstToNestedIfAdj, score, tmask)
+ }
+ if pflag&ParamFeedsIfOrSwitch != 0 {
+ score, tmask = adjustScore(passConstToIfAdj, score, tmask)
+ }
+ }
+
+ if argProps&ActualExprIsConcreteConvIface != 0 {
+ // FIXME: ideally here it would be nice to make a
+ // distinction between the inlinable case and the
+ // non-inlinable case, but this is hard to do. Example:
+ //
+ // type I interface { Tiny() int; Giant() }
+ // type Conc struct { x int }
+ // func (c *Conc) Tiny() int { return 42 }
+ // func (c *Conc) Giant() { <huge amounts of code> }
+ //
+ // func passConcToItf(c *Conc) {
+ // makesItfMethodCall(c)
+ // }
+ //
+ // In the code above, function properties will only tell
+ // us that 'makesItfMethodCall' invokes a method on its
+ // interface parameter, but we don't know whether it calls
+ // "Tiny" or "Giant". If we knew if called "Tiny", then in
+ // theory in addition to converting the interface call to
+ // a direct call, we could also inline (in which case
+ // we'd want to decrease the score even more).
+ //
+ // One thing we could do (not yet implemented) is iterate
+ // through all of the methods of "*Conc" that allow it to
+ // satisfy I, and if all are inlinable, then exploit that.
+ if pflag&ParamMayFeedInterfaceMethodCall != 0 {
+ score, tmask = adjustScore(passConcreteToNestedItfCallAdj, score, tmask)
+ }
+ if pflag&ParamFeedsInterfaceMethodCall != 0 {
+ score, tmask = adjustScore(passConcreteToItfCallAdj, score, tmask)
+ }
+ }
+
+ if argProps&(ActualExprIsFunc|ActualExprIsInlinableFunc) != 0 {
+ mayadj := passFuncToNestedIndCallAdj
+ mustadj := passFuncToIndCallAdj
+ if argProps&ActualExprIsInlinableFunc != 0 {
+ mayadj = passInlinableFuncToNestedIndCallAdj
+ mustadj = passInlinableFuncToIndCallAdj
+ }
+ if pflag&ParamMayFeedIndirectCall != 0 {
+ score, tmask = adjustScore(mayadj, score, tmask)
+ }
+ if pflag&ParamFeedsIndirectCall != 0 {
+ score, tmask = adjustScore(mustadj, score, tmask)
+ }
+ }
+ }
+
+ cs.Score, cs.ScoreMask = score, tmask
+}
+
+func adjustScore(typ scoreAdjustTyp, score int, mask scoreAdjustTyp) (int, scoreAdjustTyp) {
+
+ if isMust(typ) {
+ if mask&typ != 0 {
+ return score, mask
+ }
+ may := mustToMay(typ)
+ if mask&may != 0 {
+ // promote may to must, so undo may
+ score -= adjValue(may)
+ mask &^= may
+ }
+ } else if isMay(typ) {
+ must := mayToMust(typ)
+ if mask&(must|typ) != 0 {
+ return score, mask
+ }
+ }
+ if mask&typ == 0 {
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= applying adj %d for %s\n",
+ adjValue(typ), typ.String())
+ }
+ score += adjValue(typ)
+ mask |= typ
+ }
+ return score, mask
+}
+
+var resultFlagToPositiveAdj map[ResultPropBits]scoreAdjustTyp
+var paramFlagToPositiveAdj map[ParamPropBits]scoreAdjustTyp
+
+func setupFlagToAdjMaps() {
+ resultFlagToPositiveAdj = map[ResultPropBits]scoreAdjustTyp{
+ ResultIsAllocatedMem: returnFeedsConcreteToInterfaceCallAdj,
+ ResultAlwaysSameFunc: returnFeedsFuncToIndCallAdj,
+ ResultAlwaysSameConstant: returnFeedsConstToIfAdj,
+ }
+ paramFlagToPositiveAdj = map[ParamPropBits]scoreAdjustTyp{
+ ParamMayFeedInterfaceMethodCall: passConcreteToNestedItfCallAdj,
+ ParamFeedsInterfaceMethodCall: passConcreteToItfCallAdj,
+ ParamMayFeedIndirectCall: passInlinableFuncToNestedIndCallAdj,
+ ParamFeedsIndirectCall: passInlinableFuncToIndCallAdj,
+ }
+}
+
+// LargestNegativeScoreAdjustment tries to estimate the largest possible
+// negative score adjustment that could be applied to a call of the
+// function with the specified props. Example:
+//
+// func foo() { func bar(x int, p *int) int {
+// ... if x < 0 { *p = x }
+// } return 99
+// }
+//
+// Function 'foo' above on the left has no interesting properties,
+// thus as a result the most we'll adjust any call to is the value for
+// "call in loop". If the calculated cost of the function is 150, and
+// the in-loop adjustment is 5 (for example), then there is not much
+// point treating it as inlinable. On the other hand "bar" has a param
+// property (parameter "x" feeds unmodified to an "if" statement") and
+// a return property (always returns same constant) meaning that a
+// given call _could_ be rescored down as much as -35 points-- thus if
+// the size of "bar" is 100 (for example) then there is at least a
+// chance that scoring will enable inlining.
+func LargestNegativeScoreAdjustment(fn *ir.Func, props *FuncProps) int {
+ if resultFlagToPositiveAdj == nil {
+ setupFlagToAdjMaps()
+ }
+ var tmask scoreAdjustTyp
+ score := adjValues[inLoopAdj] // any call can be in a loop
+ for _, pf := range props.ParamFlags {
+ if adj, ok := paramFlagToPositiveAdj[pf]; ok {
+ score, tmask = adjustScore(adj, score, tmask)
+ }
+ }
+ for _, rf := range props.ResultFlags {
+ if adj, ok := resultFlagToPositiveAdj[rf]; ok {
+ score, tmask = adjustScore(adj, score, tmask)
+ }
+ }
+
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= largestScore(%v) is %d\n",
+ fn, score)
+ }
+
+ return score
+}
+
+// LargestPositiveScoreAdjustment tries to estimate the largest possible
+// positive score adjustment that could be applied to a given callsite.
+// At the moment we don't have very many positive score adjustments, so
+// this is just hard-coded, not table-driven.
+func LargestPositiveScoreAdjustment(fn *ir.Func) int {
+ return adjValues[panicPathAdj] + adjValues[initFuncAdj]
+}
+
+// callSiteTab contains entries for each call in the function
+// currently being processed by InlineCalls; this variable will either
+// be set to 'cstabCache' below (for non-inlinable routines) or to the
+// local 'cstab' entry in the fnInlHeur object for inlinable routines.
+//
+// NOTE: this assumes that inlining operations are happening in a serial,
+// single-threaded fashion,f which is true today but probably won't hold
+// in the future (for example, we might want to score the callsites
+// in multiple functions in parallel); if the inliner evolves in this
+// direction we'll need to come up with a different approach here.
+var callSiteTab CallSiteTab
+
+// scoreCallsCache caches a call site table and call site list between
+// invocations of ScoreCalls so that we can reuse previously allocated
+// storage.
+var scoreCallsCache scoreCallsCacheType
+
+type scoreCallsCacheType struct {
+ tab CallSiteTab
+ csl []*CallSite
+}
+
+// ScoreCalls assigns numeric scores to each of the callsites in
+// function 'fn'; the lower the score, the more helpful we think it
+// will be to inline.
+//
+// Unlike a lot of the other inline heuristics machinery, callsite
+// scoring can't be done as part of the CanInline call for a function,
+// due to fact that we may be working on a non-trivial SCC. So for
+// example with this SCC:
+//
+// func foo(x int) { func bar(x int, f func()) {
+// if x != 0 { f()
+// bar(x, func(){}) foo(x-1)
+// } }
+// }
+//
+// We don't want to perform scoring for the 'foo' call in "bar" until
+// after foo has been analyzed, but it's conceivable that CanInline
+// might visit bar before foo for this SCC.
+func ScoreCalls(fn *ir.Func) {
+ if len(fn.Body) == 0 {
+ return
+ }
+ enableDebugTraceIfEnv()
+
+ nameFinder := newNameFinder(fn)
+
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= ScoreCalls(%v)\n", ir.FuncName(fn))
+ }
+
+ // If this is an inlinable function, use the precomputed
+ // call site table for it. If the function wasn't an inline
+ // candidate, collect a callsite table for it now.
+ var cstab CallSiteTab
+ if funcInlHeur, ok := fpmap[fn]; ok {
+ cstab = funcInlHeur.cstab
+ } else {
+ if len(scoreCallsCache.tab) != 0 {
+ panic("missing call to ScoreCallsCleanup")
+ }
+ if scoreCallsCache.tab == nil {
+ scoreCallsCache.tab = make(CallSiteTab)
+ }
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= building cstab for non-inl func %s\n",
+ ir.FuncName(fn))
+ }
+ cstab = computeCallSiteTable(fn, fn.Body, scoreCallsCache.tab, nil, 0,
+ nameFinder)
+ }
+
+ csa := makeCallSiteAnalyzer(fn)
+ const doCallResults = true
+ csa.scoreCallsRegion(fn, fn.Body, cstab, doCallResults, nil)
+
+ disableDebugTrace()
+}
+
+// scoreCallsRegion assigns numeric scores to each of the callsites in
+// region 'region' within function 'fn'. This can be called on
+// an entire function, or with 'region' set to a chunk of
+// code corresponding to an inlined call.
+func (csa *callSiteAnalyzer) scoreCallsRegion(fn *ir.Func, region ir.Nodes, cstab CallSiteTab, doCallResults bool, ic *ir.InlinedCallExpr) {
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= scoreCallsRegion(%v, %s) len(cstab)=%d\n",
+ ir.FuncName(fn), region[0].Op().String(), len(cstab))
+ }
+
+ // Sort callsites to avoid any surprises with non deterministic
+ // map iteration order (this is probably not needed, but here just
+ // in case).
+ csl := scoreCallsCache.csl[:0]
+ for _, cs := range cstab {
+ csl = append(csl, cs)
+ }
+ scoreCallsCache.csl = csl[:0]
+ sort.Slice(csl, func(i, j int) bool {
+ return csl[i].ID < csl[j].ID
+ })
+
+ // Score each call site.
+ var resultNameTab map[*ir.Name]resultPropAndCS
+ for _, cs := range csl {
+ var cprops *FuncProps
+ fihcprops := false
+ desercprops := false
+ if funcInlHeur, ok := fpmap[cs.Callee]; ok {
+ cprops = funcInlHeur.props
+ fihcprops = true
+ } else if cs.Callee.Inl != nil {
+ cprops = DeserializeFromString(cs.Callee.Inl.Properties)
+ desercprops = true
+ } else {
+ if base.Debug.DumpInlFuncProps != "" {
+ fmt.Fprintf(os.Stderr, "=-= *** unable to score call to %s from %s\n", cs.Callee.Sym().Name, fmtFullPos(cs.Call.Pos()))
+ panic("should never happen")
+ } else {
+ continue
+ }
+ }
+ cs.computeCallSiteScore(csa, cprops)
+
+ if doCallResults {
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= examineCallResults at %s: flags=%d score=%d funcInlHeur=%v deser=%v\n", fmtFullPos(cs.Call.Pos()), cs.Flags, cs.Score, fihcprops, desercprops)
+ }
+ resultNameTab = csa.examineCallResults(cs, resultNameTab)
+ }
+
+ if debugTrace&debugTraceScoring != 0 {
+ fmt.Fprintf(os.Stderr, "=-= scoring call at %s: flags=%d score=%d funcInlHeur=%v deser=%v\n", fmtFullPos(cs.Call.Pos()), cs.Flags, cs.Score, fihcprops, desercprops)
+ }
+ }
+
+ if resultNameTab != nil {
+ csa.rescoreBasedOnCallResultUses(fn, resultNameTab, cstab)
+ }
+
+ disableDebugTrace()
+
+ if ic != nil && callSiteTab != nil {
+ // Integrate the calls from this cstab into the table for the caller.
+ if err := callSiteTab.merge(cstab); err != nil {
+ base.FatalfAt(ic.Pos(), "%v", err)
+ }
+ } else {
+ callSiteTab = cstab
+ }
+}
+
+// ScoreCallsCleanup resets the state of the callsite cache
+// once ScoreCalls is done with a function.
+func ScoreCallsCleanup() {
+ if base.Debug.DumpInlCallSiteScores != 0 {
+ if allCallSites == nil {
+ allCallSites = make(CallSiteTab)
+ }
+ for call, cs := range callSiteTab {
+ allCallSites[call] = cs
+ }
+ }
+ for k := range scoreCallsCache.tab {
+ delete(scoreCallsCache.tab, k)
+ }
+}
+
+// GetCallSiteScore returns the previously calculated score for call
+// within fn.
+func GetCallSiteScore(fn *ir.Func, call *ir.CallExpr) (int, bool) {
+ if funcInlHeur, ok := fpmap[fn]; ok {
+ if cs, ok := funcInlHeur.cstab[call]; ok {
+ return cs.Score, true
+ }
+ }
+ if cs, ok := callSiteTab[call]; ok {
+ return cs.Score, true
+ }
+ return 0, false
+}
+
+// BudgetExpansion returns the amount to relax/expand the base
+// inlining budget when the new inliner is turned on; the inliner
+// will add the returned value to the hairyness budget.
+//
+// Background: with the new inliner, the score for a given callsite
+// can be adjusted down by some amount due to heuristics, however we
+// won't know whether this is going to happen until much later after
+// the CanInline call. This function returns the amount to relax the
+// budget initially (to allow for a large score adjustment); later on
+// in RevisitInlinability we'll look at each individual function to
+// demote it if needed.
+func BudgetExpansion(maxBudget int32) int32 {
+ if base.Debug.InlBudgetSlack != 0 {
+ return int32(base.Debug.InlBudgetSlack)
+ }
+ // In the default case, return maxBudget, which will effectively
+ // double the budget from 80 to 160; this should be good enough
+ // for most cases.
+ return maxBudget
+}
+
+var allCallSites CallSiteTab
+
+// DumpInlCallSiteScores is invoked by the inliner if the debug flag
+// "-d=dumpinlcallsitescores" is set; it dumps out a human-readable
+// summary of all (potentially) inlinable callsites in the package,
+// along with info on call site scoring and the adjustments made to a
+// given score. Here profile is the PGO profile in use (may be
+// nil), budgetCallback is a callback that can be invoked to find out
+// the original pre-adjustment hairyness limit for the function, and
+// inlineHotMaxBudget is the constant of the same name used in the
+// inliner. Sample output lines:
+//
+// Score Adjustment Status Callee CallerPos ScoreFlags
+// 115 40 DEMOTED cmd/compile/internal/abi.(*ABIParamAssignment).Offset expand_calls.go:1679:14|6 panicPathAdj
+// 76 -5n PROMOTED runtime.persistentalloc mcheckmark.go:48:45|3 inLoopAdj
+// 201 0 --- PGO unicode.DecodeRuneInString utf8.go:312:30|1
+// 7 -5 --- PGO internal/abi.Name.DataChecked type.go:625:22|0 inLoopAdj
+//
+// In the dump above, "Score" is the final score calculated for the
+// callsite, "Adjustment" is the amount added to or subtracted from
+// the original hairyness estimate to form the score. "Status" shows
+// whether anything changed with the site -- did the adjustment bump
+// it down just below the threshold ("PROMOTED") or instead bump it
+// above the threshold ("DEMOTED"); this will be blank ("---") if no
+// threshold was crossed as a result of the heuristics. Note that
+// "Status" also shows whether PGO was involved. "Callee" is the name
+// of the function called, "CallerPos" is the position of the
+// callsite, and "ScoreFlags" is a digest of the specific properties
+// we used to make adjustments to callsite score via heuristics.
+func DumpInlCallSiteScores(profile *pgo.Profile, budgetCallback func(fn *ir.Func, profile *pgo.Profile) (int32, bool)) {
+
+ var indirectlyDueToPromotion func(cs *CallSite) bool
+ indirectlyDueToPromotion = func(cs *CallSite) bool {
+ bud, _ := budgetCallback(cs.Callee, profile)
+ hairyval := cs.Callee.Inl.Cost
+ score := int32(cs.Score)
+ if hairyval > bud && score <= bud {
+ return true
+ }
+ if cs.parent != nil {
+ return indirectlyDueToPromotion(cs.parent)
+ }
+ return false
+ }
+
+ genstatus := func(cs *CallSite) string {
+ hairyval := cs.Callee.Inl.Cost
+ bud, isPGO := budgetCallback(cs.Callee, profile)
+ score := int32(cs.Score)
+ st := "---"
+ expinl := false
+ switch {
+ case hairyval <= bud && score <= bud:
+ // "Normal" inlined case: hairy val sufficiently low that
+ // it would have been inlined anyway without heuristics.
+ expinl = true
+ case hairyval > bud && score > bud:
+ // "Normal" not inlined case: hairy val sufficiently high
+ // and scoring didn't lower it.
+ case hairyval > bud && score <= bud:
+ // Promoted: we would not have inlined it before, but
+ // after score adjustment we decided to inline.
+ st = "PROMOTED"
+ expinl = true
+ case hairyval <= bud && score > bud:
+ // Demoted: we would have inlined it before, but after
+ // score adjustment we decided not to inline.
+ st = "DEMOTED"
+ }
+ inlined := cs.aux&csAuxInlined != 0
+ indprom := false
+ if cs.parent != nil {
+ indprom = indirectlyDueToPromotion(cs.parent)
+ }
+ if inlined && indprom {
+ st += "|INDPROM"
+ }
+ if inlined && !expinl {
+ st += "|[NI?]"
+ } else if !inlined && expinl {
+ st += "|[IN?]"
+ }
+ if isPGO {
+ st += "|PGO"
+ }
+ return st
+ }
+
+ if base.Debug.DumpInlCallSiteScores != 0 {
+ var sl []*CallSite
+ for _, cs := range allCallSites {
+ sl = append(sl, cs)
+ }
+ sort.Slice(sl, func(i, j int) bool {
+ if sl[i].Score != sl[j].Score {
+ return sl[i].Score < sl[j].Score
+ }
+ fni := ir.PkgFuncName(sl[i].Callee)
+ fnj := ir.PkgFuncName(sl[j].Callee)
+ if fni != fnj {
+ return fni < fnj
+ }
+ ecsi := EncodeCallSiteKey(sl[i])
+ ecsj := EncodeCallSiteKey(sl[j])
+ return ecsi < ecsj
+ })
+
+ mkname := func(fn *ir.Func) string {
+ var n string
+ if fn == nil || fn.Nname == nil {
+ return "<nil>"
+ }
+ if fn.Sym().Pkg == types.LocalPkg {
+ n = "·" + fn.Sym().Name
+ } else {
+ n = ir.PkgFuncName(fn)
+ }
+ // don't try to print super-long names
+ if len(n) <= 64 {
+ return n
+ }
+ return n[:32] + "..." + n[len(n)-32:len(n)]
+ }
+
+ if len(sl) != 0 {
+ fmt.Fprintf(os.Stdout, "# scores for package %s\n", types.LocalPkg.Path)
+ fmt.Fprintf(os.Stdout, "# Score Adjustment Status Callee CallerPos Flags ScoreFlags\n")
+ }
+ for _, cs := range sl {
+ hairyval := cs.Callee.Inl.Cost
+ adj := int32(cs.Score) - hairyval
+ nm := mkname(cs.Callee)
+ ecc := EncodeCallSiteKey(cs)
+ fmt.Fprintf(os.Stdout, "%d %d\t%s\t%s\t%s\t%s\n",
+ cs.Score, adj, genstatus(cs),
+ nm, ecc,
+ cs.ScoreMask.String())
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/serialize.go b/src/cmd/compile/internal/inline/inlheur/serialize.go
new file mode 100644
index 0000000..d650626
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/serialize.go
@@ -0,0 +1,80 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import "strings"
+
+func (funcProps *FuncProps) SerializeToString() string {
+ if funcProps == nil {
+ return ""
+ }
+ var sb strings.Builder
+ writeUleb128(&sb, uint64(funcProps.Flags))
+ writeUleb128(&sb, uint64(len(funcProps.ParamFlags)))
+ for _, pf := range funcProps.ParamFlags {
+ writeUleb128(&sb, uint64(pf))
+ }
+ writeUleb128(&sb, uint64(len(funcProps.ResultFlags)))
+ for _, rf := range funcProps.ResultFlags {
+ writeUleb128(&sb, uint64(rf))
+ }
+ return sb.String()
+}
+
+func DeserializeFromString(s string) *FuncProps {
+ if len(s) == 0 {
+ return nil
+ }
+ var funcProps FuncProps
+ var v uint64
+ sl := []byte(s)
+ v, sl = readULEB128(sl)
+ funcProps.Flags = FuncPropBits(v)
+ v, sl = readULEB128(sl)
+ funcProps.ParamFlags = make([]ParamPropBits, v)
+ for i := range funcProps.ParamFlags {
+ v, sl = readULEB128(sl)
+ funcProps.ParamFlags[i] = ParamPropBits(v)
+ }
+ v, sl = readULEB128(sl)
+ funcProps.ResultFlags = make([]ResultPropBits, v)
+ for i := range funcProps.ResultFlags {
+ v, sl = readULEB128(sl)
+ funcProps.ResultFlags[i] = ResultPropBits(v)
+ }
+ return &funcProps
+}
+
+func readULEB128(sl []byte) (value uint64, rsl []byte) {
+ var shift uint
+
+ for {
+ b := sl[0]
+ sl = sl[1:]
+ value |= (uint64(b&0x7F) << shift)
+ if b&0x80 == 0 {
+ break
+ }
+ shift += 7
+ }
+ return value, sl
+}
+
+func writeUleb128(sb *strings.Builder, v uint64) {
+ if v < 128 {
+ sb.WriteByte(uint8(v))
+ return
+ }
+ more := true
+ for more {
+ c := uint8(v & 0x7f)
+ v >>= 7
+ more = v != 0
+ if more {
+ c |= 0x80
+ }
+ sb.WriteByte(c)
+ }
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/dumpscores.go b/src/cmd/compile/internal/inline/inlheur/testdata/dumpscores.go
new file mode 100644
index 0000000..6f2f760
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/dumpscores.go
@@ -0,0 +1,45 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dumpscores
+
+var G int
+
+func inlinable(x int, f func(int) int) int {
+ if x != 0 {
+ return 1
+ }
+ G += noninl(x)
+ return f(x)
+}
+
+func inlinable2(x int) int {
+ return noninl(-x)
+}
+
+//go:noinline
+func noninl(x int) int {
+ return x + 1
+}
+
+func tooLargeToInline(x int) int {
+ if x > 101 {
+ // Drive up the cost of inlining this func over the
+ // regular threshold.
+ return big(big(big(big(big(G + x)))))
+ }
+ if x < 100 {
+ // make sure this callsite is scored properly
+ G += inlinable(101, inlinable2)
+ if G == 101 {
+ return 0
+ }
+ panic(inlinable2(3))
+ }
+ return G
+}
+
+func big(q int) int {
+ return noninl(q) + noninl(-q)
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/README.txt b/src/cmd/compile/internal/inline/inlheur/testdata/props/README.txt
new file mode 100644
index 0000000..af5ebec
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/README.txt
@@ -0,0 +1,77 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+Notes on the format of the testcase files in
+cmd/compile/internal/inline/inlheur/testdata/props:
+
+- each (compilable) file contains input Go code and expected results
+ in the form of column-0 comments.
+
+- functions or methods that begin with "T_" are targeted for testing,
+ as well as "init" functions; all other functions are ignored.
+
+- function header comments begin with a line containing
+ the file name, function name, definition line, then index
+ and a count of the number of funcs that share that same
+ definition line (needed to support generics). Example:
+
+ // foo.go T_mumble 35 1 4
+
+ Here "T_mumble" is defined at line 35, and it is func 0
+ out of the 4 funcs that share that same line.
+
+- function property expected results appear as comments in immediately
+ prior to the function. For example, here we have first the function
+ name ("T_feeds_if_simple"), then human-readable dump of the function
+ properties, as well as the JSON for the properties object, each
+ section separated by a "<>" delimiter.
+
+ // params.go T_feeds_if_simple 35 0 1
+ // RecvrParamFlags:
+ // 0: ParamFeedsIfOrSwitch
+ // <endpropsdump>
+ // {"Flags":0,"RecvrParamFlags":[8],"ReturnFlags":[]}
+ // callsite: params.go:34:10|0 "CallSiteOnPanicPath" 2
+ // <endcallsites>
+ // <endfuncpreamble>
+ func T_feeds_if_simple(x int) {
+ if x < 100 {
+ os.Exit(1)
+ }
+ println(x)
+ }
+
+- when the test runs, it will compile the Go source file with an
+ option to dump out function properties, then compare the new dump
+ for each function with the JSON appearing in the header comment for
+ the function (in the example above, the JSON appears between
+ "<endpropsdump>" and "<endfuncpreamble>". The material prior to the
+ dump is simply there for human consumption, so that a developer can
+ easily see that "RecvrParamFlags":[8] means that the first parameter
+ has flag ParamFeedsIfOrSwitch.
+
+- when making changes to the compiler (which can alter the expected
+ results) or edits/additions to the go code in the testcase files,
+ you can remaster the results by running
+
+ go test -v -count=1 .
+
+ In the trace output of this run, you'll see messages of the form
+
+ === RUN TestFuncProperties
+ funcprops_test.go:NNN: update-expected: emitted updated file
+ testdata/props/XYZ.go.new
+ funcprops_test.go:MMM: please compare the two files, then overwrite
+ testdata/props/XYZ.go with testdata/props/XYZ.go.new
+
+ at which point you can compare the old and new files by hand, then
+ overwrite the *.go file with the *.go.new file if you are happy with
+ the diffs.
+
+- note that the remastering process will strip out any existing
+ column-0 (unindented) comments; if you write comments that you
+ want to see preserved, use "/* */" or indent them.
+
+
+
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/acrosscall.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/acrosscall.go
new file mode 100644
index 0000000..a8166fd
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/acrosscall.go
@@ -0,0 +1,214 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+package params
+
+// acrosscall.go T_feeds_indirect_call_via_call_toplevel 19 0 1
+// ParamFlags
+// 0 ParamFeedsIndirectCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[8],"ResultFlags":null}
+// callsite: acrosscall.go:20:12|0 flagstr "" flagval 0 score 60 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_indirect_call_via_call_toplevel(f func(int)) {
+ callsparam(f)
+}
+
+// acrosscall.go T_feeds_indirect_call_via_call_conditional 31 0 1
+// ParamFlags
+// 0 ParamMayFeedIndirectCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[16],"ResultFlags":null}
+// callsite: acrosscall.go:33:13|0 flagstr "" flagval 0 score 60 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_indirect_call_via_call_conditional(f func(int)) {
+ if G != 101 {
+ callsparam(f)
+ }
+}
+
+// acrosscall.go T_feeds_conditional_indirect_call_via_call_toplevel 45 0 1
+// ParamFlags
+// 0 ParamMayFeedIndirectCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[16],"ResultFlags":null}
+// callsite: acrosscall.go:46:23|0 flagstr "" flagval 0 score 64 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_conditional_indirect_call_via_call_toplevel(f func(int)) {
+ callsparamconditional(f)
+}
+
+// acrosscall.go T_feeds_if_via_call 57 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// callsite: acrosscall.go:58:9|0 flagstr "" flagval 0 score 8 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_via_call(x int) {
+ feedsif(x)
+}
+
+// acrosscall.go T_feeds_if_via_call_conditional 69 0 1
+// ParamFlags
+// 0 ParamMayFeedIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[64],"ResultFlags":null}
+// callsite: acrosscall.go:71:10|0 flagstr "" flagval 0 score 8 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_via_call_conditional(x int) {
+ if G != 101 {
+ feedsif(x)
+ }
+}
+
+// acrosscall.go T_feeds_conditional_if_via_call 83 0 1
+// ParamFlags
+// 0 ParamMayFeedIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[64],"ResultFlags":null}
+// callsite: acrosscall.go:84:20|0 flagstr "" flagval 0 score 12 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_conditional_if_via_call(x int) {
+ feedsifconditional(x)
+}
+
+// acrosscall.go T_multifeeds1 97 0 1
+// ParamFlags
+// 0 ParamFeedsIndirectCall|ParamMayFeedIndirectCall
+// 1 ParamNoInfo
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[24,0],"ResultFlags":null}
+// callsite: acrosscall.go:98:12|0 flagstr "" flagval 0 score 60 mask 0 maskstr ""
+// callsite: acrosscall.go:99:23|1 flagstr "" flagval 0 score 64 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_multifeeds1(f1, f2 func(int)) {
+ callsparam(f1)
+ callsparamconditional(f1)
+}
+
+// acrosscall.go T_acrosscall_returnsconstant 110 0 1
+// ResultFlags
+// 0 ResultAlwaysSameConstant
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[8]}
+// callsite: acrosscall.go:111:24|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_acrosscall_returnsconstant() int {
+ return returnsconstant()
+}
+
+// acrosscall.go T_acrosscall_returnsmem 122 0 1
+// ResultFlags
+// 0 ResultIsAllocatedMem
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[2]}
+// callsite: acrosscall.go:123:19|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_acrosscall_returnsmem() *int {
+ return returnsmem()
+}
+
+// acrosscall.go T_acrosscall_returnscci 134 0 1
+// ResultFlags
+// 0 ResultIsConcreteTypeConvertedToInterface
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[4]}
+// callsite: acrosscall.go:135:19|0 flagstr "" flagval 0 score 7 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_acrosscall_returnscci() I {
+ return returnscci()
+}
+
+// acrosscall.go T_acrosscall_multiret 144 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: acrosscall.go:146:25|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_acrosscall_multiret(q int) int {
+ if q != G {
+ return returnsconstant()
+ }
+ return 0
+}
+
+// acrosscall.go T_acrosscall_multiret2 158 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: acrosscall.go:160:25|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// callsite: acrosscall.go:162:25|1 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_acrosscall_multiret2(q int) int {
+ if q == G {
+ return returnsconstant()
+ } else {
+ return returnsconstant()
+ }
+}
+
+func callsparam(f func(int)) {
+ f(2)
+}
+
+func callsparamconditional(f func(int)) {
+ if G != 101 {
+ f(2)
+ }
+}
+
+func feedsif(x int) int {
+ if x != 101 {
+ return 42
+ }
+ return 43
+}
+
+func feedsifconditional(x int) int {
+ if G != 101 {
+ if x != 101 {
+ return 42
+ }
+ }
+ return 43
+}
+
+func returnsconstant() int {
+ return 42
+}
+
+func returnsmem() *int {
+ return new(int)
+}
+
+func returnscci() I {
+ var q Q
+ return q
+}
+
+type I interface {
+ Foo()
+}
+
+type Q int
+
+func (q Q) Foo() {
+}
+
+var G int
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/calls.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/calls.go
new file mode 100644
index 0000000..5cc217b
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/calls.go
@@ -0,0 +1,240 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+package calls
+
+import "os"
+
+// calls.go T_call_in_panic_arg 19 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// callsite: calls.go:21:15|0 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_call_in_panic_arg(x int) {
+ if x < G {
+ panic(callee(x))
+ }
+}
+
+// calls.go T_calls_in_loops 32 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// callsite: calls.go:34:9|0 flagstr "CallSiteInLoop" flagval 1 score -3 mask 4 maskstr "inLoopAdj"
+// callsite: calls.go:37:9|1 flagstr "CallSiteInLoop" flagval 1 score -3 mask 4 maskstr "inLoopAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_calls_in_loops(x int, q []string) {
+ for i := 0; i < x; i++ {
+ callee(i)
+ }
+ for _, s := range q {
+ callee(len(s))
+ }
+}
+
+// calls.go T_calls_in_pseudo_loop 48 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// callsite: calls.go:50:9|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// callsite: calls.go:54:9|1 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_calls_in_pseudo_loop(x int, q []string) {
+ for i := 0; i < x; i++ {
+ callee(i)
+ return
+ }
+ for _, s := range q {
+ callee(len(s))
+ break
+ }
+}
+
+// calls.go T_calls_on_panic_paths 67 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// callsite: calls.go:69:9|0 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj"
+// callsite: calls.go:73:9|1 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj"
+// callsite: calls.go:77:12|2 flagstr "CallSiteOnPanicPath" flagval 2 score 102 mask 1 maskstr "panicPathAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_calls_on_panic_paths(x int, q []string) {
+ if x+G == 101 {
+ callee(x)
+ panic("ouch")
+ }
+ if x < G-101 {
+ callee(x)
+ if len(q) == 0 {
+ G++
+ }
+ callsexit(x)
+ }
+}
+
+// calls.go T_calls_not_on_panic_paths 93 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch|ParamMayFeedIfOrSwitch
+// 1 ParamNoInfo
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[96,0],"ResultFlags":null}
+// callsite: calls.go:103:9|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// callsite: calls.go:112:9|1 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// callsite: calls.go:115:9|2 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// callsite: calls.go:119:12|3 flagstr "CallSiteOnPanicPath" flagval 2 score 102 mask 1 maskstr "panicPathAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_calls_not_on_panic_paths(x int, q []string) {
+ if x != G {
+ panic("ouch")
+ /* Notes: */
+ /* - we only look for post-dominating panic/exit, so */
+ /* this site will on fact not have a panicpath flag */
+ /* - vet will complain about this site as unreachable */
+ callee(x)
+ }
+ if x != G {
+ callee(x)
+ if x < 100 {
+ panic("ouch")
+ }
+ }
+ if x+G == 101 {
+ if x < 100 {
+ panic("ouch")
+ }
+ callee(x)
+ }
+ if x < -101 {
+ callee(x)
+ if len(q) == 0 {
+ return
+ }
+ callsexit(x)
+ }
+}
+
+// calls.go init.0 129 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":null}
+// callsite: calls.go:130:16|0 flagstr "CallSiteInInitFunc" flagval 4 score 22 mask 2 maskstr "initFuncAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func init() {
+ println(callee(5))
+}
+
+// calls.go T_pass_inlinable_func_to_param_feeding_indirect_call 140 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: calls.go:141:19|0 flagstr "" flagval 0 score 16 mask 512 maskstr "passInlinableFuncToIndCallAdj"
+// callsite: calls.go:141:19|calls.go:232:10|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_pass_inlinable_func_to_param_feeding_indirect_call(x int) int {
+ return callsParam(x, callee)
+}
+
+// calls.go T_pass_noninlinable_func_to_param_feeding_indirect_call 150 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: calls.go:153:19|0 flagstr "" flagval 0 score 36 mask 128 maskstr "passFuncToIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_pass_noninlinable_func_to_param_feeding_indirect_call(x int) int {
+ // if we inline callsParam we can convert the indirect call
+ // to a direct call, but we can't inline it.
+ return callsParam(x, calleeNoInline)
+}
+
+// calls.go T_pass_inlinable_func_to_param_feeding_nested_indirect_call 165 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0]}
+// callsite: calls.go:166:25|0 flagstr "" flagval 0 score 27 mask 1024 maskstr "passInlinableFuncToNestedIndCallAdj"
+// callsite: calls.go:166:25|calls.go:237:11|0 flagstr "" flagval 0 score 2 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_pass_inlinable_func_to_param_feeding_nested_indirect_call(x int) int {
+ return callsParamNested(x, callee)
+}
+
+// calls.go T_pass_noninlinable_func_to_param_feeding_nested_indirect_call 177 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0]}
+// callsite: calls.go:178:25|0 flagstr "" flagval 0 score 47 mask 256 maskstr "passFuncToNestedIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_pass_noninlinable_func_to_param_feeding_nested_indirect_call(x int) int {
+ return callsParamNested(x, calleeNoInline)
+}
+
+// calls.go T_call_scoring_in_noninlinable_func 195 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[0]}
+// callsite: calls.go:209:14|0 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj"
+// callsite: calls.go:210:15|1 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj"
+// callsite: calls.go:212:19|2 flagstr "" flagval 0 score 16 mask 512 maskstr "passInlinableFuncToIndCallAdj"
+// callsite: calls.go:212:19|calls.go:232:10|0 flagstr "" flagval 0 score 4 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+// calls.go T_call_scoring_in_noninlinable_func.func1 212 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_call_scoring_in_noninlinable_func(x int, sl []int) int {
+ if x == 101 {
+ // Drive up the cost of inlining this funcfunc over the
+ // regular threshold.
+ for i := 0; i < 10; i++ {
+ for j := 0; j < i; j++ {
+ sl = append(sl, append(sl, append(sl, append(sl, x)...)...)...)
+ sl = append(sl, sl[0], sl[1], sl[2])
+ x += calleeNoInline(x)
+ }
+ }
+ }
+ if x < 100 {
+ // make sure this callsite is scored properly
+ G += callee(101)
+ panic(callee(x))
+ }
+ return callsParam(x, func(y int) int { return y + x })
+}
+
+var G int
+
+func callee(x int) int {
+ return x
+}
+
+func calleeNoInline(x int) int {
+ defer func() { G++ }()
+ return x
+}
+
+func callsexit(x int) {
+ println(x)
+ os.Exit(x)
+}
+
+func callsParam(x int, f func(int) int) int {
+ return f(x)
+}
+
+func callsParamNested(x int, f func(int) int) int {
+ if x < 0 {
+ return f(x)
+ }
+ return 0
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/funcflags.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/funcflags.go
new file mode 100644
index 0000000..f3d7424
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/funcflags.go
@@ -0,0 +1,341 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+
+package funcflags
+
+import "os"
+
+// funcflags.go T_simple 20 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":null,"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_simple() {
+ panic("bad")
+}
+
+// funcflags.go T_nested 32 0 1
+// Flags FuncPropNeverReturns
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_nested(x int) {
+ if x < 10 {
+ panic("bad")
+ } else {
+ panic("good")
+ }
+}
+
+// funcflags.go T_block1 46 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_block1(x int) {
+ panic("bad")
+ if x < 10 {
+ return
+ }
+}
+
+// funcflags.go T_block2 60 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_block2(x int) {
+ if x < 10 {
+ return
+ }
+ panic("bad")
+}
+
+// funcflags.go T_switches1 75 0 1
+// Flags FuncPropNeverReturns
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_switches1(x int) {
+ switch x {
+ case 1:
+ panic("one")
+ case 2:
+ panic("two")
+ }
+ panic("whatev")
+}
+
+// funcflags.go T_switches1a 92 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_switches1a(x int) {
+ switch x {
+ case 2:
+ panic("two")
+ }
+}
+
+// funcflags.go T_switches2 106 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_switches2(x int) {
+ switch x {
+ case 1:
+ panic("one")
+ case 2:
+ panic("two")
+ default:
+ return
+ }
+ panic("whatev")
+}
+
+// funcflags.go T_switches3 123 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_switches3(x interface{}) {
+ switch x.(type) {
+ case bool:
+ panic("one")
+ case float32:
+ panic("two")
+ }
+}
+
+// funcflags.go T_switches4 138 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_switches4(x int) {
+ switch x {
+ case 1:
+ x++
+ fallthrough
+ case 2:
+ panic("two")
+ fallthrough
+ default:
+ panic("bad")
+ }
+ panic("whatev")
+}
+
+// funcflags.go T_recov 157 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_recov(x int) {
+ if x := recover(); x != nil {
+ panic(x)
+ }
+}
+
+// funcflags.go T_forloops1 169 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_forloops1(x int) {
+ for {
+ panic("wokketa")
+ }
+}
+
+// funcflags.go T_forloops2 180 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_forloops2(x int) {
+ for {
+ println("blah")
+ if true {
+ break
+ }
+ panic("warg")
+ }
+}
+
+// funcflags.go T_forloops3 195 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_forloops3(x int) {
+ for i := 0; i < 101; i++ {
+ println("blah")
+ if true {
+ continue
+ }
+ panic("plark")
+ }
+ for i := range [10]int{} {
+ println(i)
+ panic("plark")
+ }
+ panic("whatev")
+}
+
+// funcflags.go T_hasgotos 215 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_hasgotos(x int, y int) {
+ {
+ xx := x
+ panic("bad")
+ lab1:
+ goto lab2
+ lab2:
+ if false {
+ goto lab1
+ } else {
+ goto lab4
+ }
+ lab4:
+ if xx < y {
+ lab3:
+ if false {
+ goto lab3
+ }
+ }
+ println(9)
+ }
+}
+
+// funcflags.go T_break_with_label 246 0 1
+// ParamFlags
+// 0 ParamMayFeedIfOrSwitch
+// 1 ParamNoInfo
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[64,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_break_with_label(x int, y int) {
+ // presence of break with label should pessimize this func
+ // (similar to goto).
+ panic("bad")
+lab1:
+ for {
+ println("blah")
+ if x < 0 {
+ break lab1
+ }
+ panic("hubba")
+ }
+}
+
+// funcflags.go T_callsexit 268 0 1
+// Flags FuncPropNeverReturns
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_callsexit(x int) {
+ if x < 0 {
+ os.Exit(1)
+ }
+ os.Exit(2)
+}
+
+// funcflags.go T_exitinexpr 281 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// callsite: funcflags.go:286:18|0 flagstr "CallSiteOnPanicPath" flagval 2 score 102 mask 1 maskstr "panicPathAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_exitinexpr(x int) {
+ // This function does indeed unconditionally call exit, since the
+ // first thing it does is invoke exprcallsexit, however from the
+ // perspective of this function, the call is not at the statement
+ // level, so we'll wind up missing it.
+ if exprcallsexit(x) < 0 {
+ println("foo")
+ }
+}
+
+// funcflags.go T_select_noreturn 297 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[0,0,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_select_noreturn(chi chan int, chf chan float32, p *int) {
+ rv := 0
+ select {
+ case i := <-chi:
+ rv = i
+ case f := <-chf:
+ rv = int(f)
+ }
+ *p = rv
+ panic("bad")
+}
+
+// funcflags.go T_select_mayreturn 314 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0,0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_select_mayreturn(chi chan int, chf chan float32, p *int) int {
+ rv := 0
+ select {
+ case i := <-chi:
+ rv = i
+ return i
+ case f := <-chf:
+ rv = int(f)
+ }
+ *p = rv
+ panic("bad")
+}
+
+// funcflags.go T_calls_callsexit 334 0 1
+// Flags FuncPropNeverReturns
+// <endpropsdump>
+// {"Flags":1,"ParamFlags":[0],"ResultFlags":null}
+// callsite: funcflags.go:335:15|0 flagstr "CallSiteOnPanicPath" flagval 2 score 102 mask 1 maskstr "panicPathAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_calls_callsexit(x int) {
+ exprcallsexit(x)
+}
+
+func exprcallsexit(x int) int {
+ os.Exit(x)
+ return x
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/params.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/params.go
new file mode 100644
index 0000000..1a3073c
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/params.go
@@ -0,0 +1,367 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+package params
+
+import "os"
+
+// params.go T_feeds_if_simple 20 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_simple(x int) {
+ if x < 100 {
+ os.Exit(1)
+ }
+ println(x)
+}
+
+// params.go T_feeds_if_nested 35 0 1
+// ParamFlags
+// 0 ParamMayFeedIfOrSwitch
+// 1 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[64,32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_nested(x, y int) {
+ if y != 0 {
+ if x < 100 {
+ os.Exit(1)
+ }
+ }
+ println(x)
+}
+
+// params.go T_feeds_if_pointer 51 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_pointer(xp *int) {
+ if xp != nil {
+ os.Exit(1)
+ }
+ println(xp)
+}
+
+// params.go T.T_feeds_if_simple_method 66 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// 1 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32,32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func (r T) T_feeds_if_simple_method(x int) {
+ if x < 100 {
+ os.Exit(1)
+ }
+ if r != 99 {
+ os.Exit(2)
+ }
+ println(x)
+}
+
+// params.go T_feeds_if_blanks 86 0 1
+// ParamFlags
+// 0 ParamNoInfo
+// 1 ParamFeedsIfOrSwitch
+// 2 ParamNoInfo
+// 3 ParamNoInfo
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,32,0,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_blanks(_ string, x int, _ bool, _ bool) {
+ // blanks ignored; from a props perspective "x" is param 0
+ if x < 100 {
+ os.Exit(1)
+ }
+ println(x)
+}
+
+// params.go T_feeds_if_with_copy 101 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_with_copy(x int) {
+ // simple copy here -- we get this case
+ xx := x
+ if xx < 100 {
+ os.Exit(1)
+ }
+ println(x)
+}
+
+// params.go T_feeds_if_with_copy_expr 115 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_with_copy_expr(x int) {
+ // this case (copy of expression) currently not handled.
+ xx := x < 100
+ if xx {
+ os.Exit(1)
+ }
+ println(x)
+}
+
+// params.go T_feeds_switch 131 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_switch(x int) {
+ switch x {
+ case 101:
+ println(101)
+ case 202:
+ panic("bad")
+ }
+ println(x)
+}
+
+// params.go T_feeds_if_toocomplex 146 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_toocomplex(x int, y int) {
+ // not handled at the moment; we only look for cases where
+ // an "if" or "switch" can be simplified based on a single
+ // constant param, not a combination of constant params.
+ if x < y {
+ panic("bad")
+ }
+ println(x + y)
+}
+
+// params.go T_feeds_if_redefined 161 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_redefined(x int) {
+ if x < G {
+ x++
+ }
+ if x == 101 {
+ panic("bad")
+ }
+}
+
+// params.go T_feeds_if_redefined2 175 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_redefined2(x int) {
+ // this currently classifies "x" as "no info", since the analysis we
+ // use to check for reassignments/redefinitions is not flow-sensitive,
+ // but we could probably catch this case with better analysis or
+ // high-level SSA.
+ if x == 101 {
+ panic("bad")
+ }
+ if x < G {
+ x++
+ }
+}
+
+// params.go T_feeds_multi_if 196 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// 1 ParamNoInfo
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32,0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_multi_if(x int, y int) {
+ // Here we have one "if" that is too complex (x < y) but one that is
+ // simple enough. Currently we enable the heuristic for this. It's
+ // possible to imagine this being a bad thing if the function in
+ // question is sufficiently large, but if it's too large we probably
+ // can't inline it anyhow.
+ if x < y {
+ panic("bad")
+ }
+ if x < 10 {
+ panic("whatev")
+ }
+ println(x + y)
+}
+
+// params.go T_feeds_if_redefined_indirectwrite 216 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_redefined_indirectwrite(x int) {
+ ax := &x
+ if G != 2 {
+ *ax = G
+ }
+ if x == 101 {
+ panic("bad")
+ }
+}
+
+// params.go T_feeds_if_redefined_indirectwrite_copy 231 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_redefined_indirectwrite_copy(x int) {
+ // we don't catch this case, "x" is marked as no info,
+ // since we're conservative about redefinitions.
+ ax := &x
+ cx := x
+ if G != 2 {
+ *ax = G
+ }
+ if cx == 101 {
+ panic("bad")
+ }
+}
+
+// params.go T_feeds_if_expr1 251 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_expr1(x int) {
+ if x == 101 || x == 102 || x&0xf == 0 {
+ panic("bad")
+ }
+}
+
+// params.go T_feeds_if_expr2 262 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_expr2(x int) {
+ if (x*x)-(x+x)%x == 101 || x&0xf == 0 {
+ panic("bad")
+ }
+}
+
+// params.go T_feeds_if_expr3 273 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_expr3(x int) {
+ if x-(x&0x1)^378 > (1 - G) {
+ panic("bad")
+ }
+}
+
+// params.go T_feeds_if_shift_may_panic 284 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_shift_may_panic(x int) *int {
+ // here if "x" is a constant like 2, we could simplify the "if",
+ // but if we were to pass in a negative value for "x" we can't
+ // fold the condition due to the need to panic on negative shift.
+ if 1<<x > 1024 {
+ return nil
+ }
+ return &G
+}
+
+// params.go T_feeds_if_maybe_divide_by_zero 299 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_if_maybe_divide_by_zero(x int) {
+ if 99/x == 3 {
+ return
+ }
+ println("blarg")
+}
+
+// params.go T_feeds_indcall 313 0 1
+// ParamFlags
+// 0 ParamMayFeedIndirectCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[16],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_indcall(x func()) {
+ if G != 20 {
+ x()
+ }
+}
+
+// params.go T_feeds_indcall_and_if 326 0 1
+// ParamFlags
+// 0 ParamMayFeedIndirectCall|ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[48],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_indcall_and_if(x func()) {
+ if x != nil {
+ x()
+ }
+}
+
+// params.go T_feeds_indcall_with_copy 339 0 1
+// ParamFlags
+// 0 ParamFeedsIndirectCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[8],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_indcall_with_copy(x func()) {
+ xx := x
+ if G < 10 {
+ G--
+ }
+ xx()
+}
+
+// params.go T_feeds_interface_method_call 354 0 1
+// ParamFlags
+// 0 ParamFeedsInterfaceMethodCall
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[2],"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_feeds_interface_method_call(i I) {
+ i.Blarg()
+}
+
+var G int
+
+type T int
+
+type I interface {
+ Blarg()
+}
+
+func (r T) Blarg() {
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/returns.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/returns.go
new file mode 100644
index 0000000..51f2bc7
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/returns.go
@@ -0,0 +1,370 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+
+package returns1
+
+import "unsafe"
+
+// returns.go T_simple_allocmem 21 0 1
+// ResultFlags
+// 0 ResultIsAllocatedMem
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[2]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_simple_allocmem() *Bar {
+ return &Bar{}
+}
+
+// returns.go T_allocmem_two_returns 34 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// ResultFlags
+// 0 ResultIsAllocatedMem
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[2]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_allocmem_two_returns(x int) *Bar {
+ // multiple returns
+ if x < 0 {
+ return new(Bar)
+ } else {
+ return &Bar{x: 2}
+ }
+}
+
+// returns.go T_allocmem_three_returns 52 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// ResultFlags
+// 0 ResultIsAllocatedMem
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[2]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_allocmem_three_returns(x int) []*Bar {
+ // more multiple returns
+ switch x {
+ case 10, 11, 12:
+ return make([]*Bar, 10)
+ case 13:
+ fallthrough
+ case 15:
+ return []*Bar{&Bar{x: 15}}
+ }
+ return make([]*Bar, 0, 10)
+}
+
+// returns.go T_return_nil 72 0 1
+// ResultFlags
+// 0 ResultAlwaysSameConstant
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[8]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_nil() *Bar {
+ // simple case: no alloc
+ return nil
+}
+
+// returns.go T_multi_return_nil 84 0 1
+// ResultFlags
+// 0 ResultAlwaysSameConstant
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[8]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_nil(x, y bool) *Bar {
+ if x && y {
+ return nil
+ }
+ return nil
+}
+
+// returns.go T_multi_return_nil_anomoly 98 0 1
+// ResultFlags
+// 0 ResultIsConcreteTypeConvertedToInterface
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[4]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_nil_anomoly(x, y bool) Itf {
+ if x && y {
+ var qnil *Q
+ return qnil
+ }
+ var barnil *Bar
+ return barnil
+}
+
+// returns.go T_multi_return_some_nil 112 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_some_nil(x, y bool) *Bar {
+ if x && y {
+ return nil
+ } else {
+ return &GB
+ }
+}
+
+// returns.go T_mixed_returns 127 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_mixed_returns(x int) *Bar {
+ // mix of alloc and non-alloc
+ if x < 0 {
+ return new(Bar)
+ } else {
+ return &GB
+ }
+}
+
+// returns.go T_mixed_returns_slice 143 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_mixed_returns_slice(x int) []*Bar {
+ // mix of alloc and non-alloc
+ switch x {
+ case 10, 11, 12:
+ return make([]*Bar, 10)
+ case 13:
+ fallthrough
+ case 15:
+ return []*Bar{&Bar{x: 15}}
+ }
+ ba := [...]*Bar{&GB, &GB}
+ return ba[:]
+}
+
+// returns.go T_maps_and_channels 167 0 1
+// ResultFlags
+// 0 ResultNoInfo
+// 1 ResultNoInfo
+// 2 ResultNoInfo
+// 3 ResultAlwaysSameConstant
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[0,0,0,8]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_maps_and_channels(x int, b bool) (bool, map[int]int, chan bool, unsafe.Pointer) {
+ // maps and channels
+ return b, make(map[int]int), make(chan bool), nil
+}
+
+// returns.go T_assignment_to_named_returns 179 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0,0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_assignment_to_named_returns(x int) (r1 *uint64, r2 *uint64) {
+ // assignments to named returns and then "return" not supported
+ r1 = new(uint64)
+ if x < 1 {
+ *r1 = 2
+ }
+ r2 = new(uint64)
+ return
+}
+
+// returns.go T_named_returns_but_return_explicit_values 199 0 1
+// ParamFlags
+// 0 ParamFeedsIfOrSwitch
+// ResultFlags
+// 0 ResultIsAllocatedMem
+// 1 ResultIsAllocatedMem
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[32],"ResultFlags":[2,2]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_named_returns_but_return_explicit_values(x int) (r1 *uint64, r2 *uint64) {
+ // named returns ok if all returns are non-empty
+ rx1 := new(uint64)
+ if x < 1 {
+ *rx1 = 2
+ }
+ rx2 := new(uint64)
+ return rx1, rx2
+}
+
+// returns.go T_return_concrete_type_to_itf 216 0 1
+// ResultFlags
+// 0 ResultIsConcreteTypeConvertedToInterface
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[4]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_concrete_type_to_itf(x, y int) Itf {
+ return &Bar{}
+}
+
+// returns.go T_return_concrete_type_to_itfwith_copy 227 0 1
+// ResultFlags
+// 0 ResultIsConcreteTypeConvertedToInterface
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[4]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_concrete_type_to_itfwith_copy(x, y int) Itf {
+ b := &Bar{}
+ println("whee")
+ return b
+}
+
+// returns.go T_return_concrete_type_to_itf_mixed 238 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_concrete_type_to_itf_mixed(x, y int) Itf {
+ if x < y {
+ b := &Bar{}
+ return b
+ }
+ return nil
+}
+
+// returns.go T_return_same_func 253 0 1
+// ResultFlags
+// 0 ResultAlwaysSameInlinableFunc
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[32]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_same_func() func(int) int {
+ if G < 10 {
+ return foo
+ } else {
+ return foo
+ }
+}
+
+// returns.go T_return_different_funcs 266 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_different_funcs() func(int) int {
+ if G != 10 {
+ return foo
+ } else {
+ return bar
+ }
+}
+
+// returns.go T_return_same_closure 286 0 1
+// ResultFlags
+// 0 ResultAlwaysSameInlinableFunc
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[32]}
+// <endcallsites>
+// <endfuncpreamble>
+// returns.go T_return_same_closure.func1 287 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_same_closure() func(int) int {
+ p := func(q int) int { return q }
+ if G < 10 {
+ return p
+ } else {
+ return p
+ }
+}
+
+// returns.go T_return_different_closures 312 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+// returns.go T_return_different_closures.func1 313 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// <endcallsites>
+// <endfuncpreamble>
+// returns.go T_return_different_closures.func2 317 0 1
+// ResultFlags
+// 0 ResultAlwaysSameConstant
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[8]}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_different_closures() func(int) int {
+ p := func(q int) int { return q }
+ if G < 10 {
+ return p
+ } else {
+ return func(q int) int { return 101 }
+ }
+}
+
+// returns.go T_return_noninlinable 339 0 1
+// ResultFlags
+// 0 ResultAlwaysSameFunc
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[16]}
+// <endcallsites>
+// <endfuncpreamble>
+// returns.go T_return_noninlinable.func1 340 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: returns.go:343:4|0 flagstr "" flagval 0 score 4 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+// returns.go T_return_noninlinable.func1.1 341 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":null}
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_noninlinable(x int) func(int) int {
+ noti := func(q int) int {
+ defer func() {
+ println(q + x)
+ }()
+ return q
+ }
+ return noti
+}
+
+type Bar struct {
+ x int
+ y string
+}
+
+func (b *Bar) Plark() {
+}
+
+type Q int
+
+func (q *Q) Plark() {
+}
+
+func foo(x int) int { return x }
+func bar(x int) int { return -x }
+
+var G int
+var GB Bar
+
+type Itf interface {
+ Plark()
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/testdata/props/returns2.go b/src/cmd/compile/internal/inline/inlheur/testdata/props/returns2.go
new file mode 100644
index 0000000..7200926
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/testdata/props/returns2.go
@@ -0,0 +1,231 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// DO NOT EDIT (use 'go test -v -update-expected' instead.)
+// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt
+// for more information on the format of this file.
+// <endfilepreamble>
+
+package returns2
+
+// returns2.go T_return_feeds_iface_call 18 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":null}
+// callsite: returns2.go:19:13|0 flagstr "" flagval 0 score 1 mask 16384 maskstr "returnFeedsConcreteToInterfaceCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_feeds_iface_call() {
+ b := newBar(10)
+ b.Plark()
+}
+
+// returns2.go T_multi_return_feeds_iface_call 29 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":null,"ResultFlags":null}
+// callsite: returns2.go:30:20|0 flagstr "" flagval 0 score 3 mask 16384 maskstr "returnFeedsConcreteToInterfaceCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_feeds_iface_call() {
+ _, b, _ := newBar2(10)
+ b.Plark()
+}
+
+// returns2.go T_returned_inlinable_func_feeds_indirect_call 41 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// callsite: returns2.go:42:18|0 flagstr "" flagval 0 score -51 mask 8200 maskstr "passConstToIfAdj|returnFeedsInlinableFuncToIndCallAdj"
+// callsite: returns2.go:44:20|1 flagstr "" flagval 0 score -23 mask 8192 maskstr "returnFeedsInlinableFuncToIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_returned_inlinable_func_feeds_indirect_call(q int) {
+ f := returnsFunc(10)
+ f(q)
+ f2 := returnsFunc2()
+ f2(q)
+}
+
+// returns2.go T_returned_noninlineable_func_feeds_indirect_call 54 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// callsite: returns2.go:55:30|0 flagstr "" flagval 0 score -23 mask 4096 maskstr "returnFeedsFuncToIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_returned_noninlineable_func_feeds_indirect_call(q int) {
+ f := returnsNonInlinableFunc()
+ f(q)
+}
+
+// returns2.go T_multi_return_feeds_indirect_call 65 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":null}
+// callsite: returns2.go:66:29|0 flagstr "" flagval 0 score -21 mask 8192 maskstr "returnFeedsInlinableFuncToIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_feeds_indirect_call(q int) {
+ _, f, _ := multiReturnsFunc()
+ f(q)
+}
+
+// returns2.go T_return_feeds_ifswitch 76 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: returns2.go:77:14|0 flagstr "" flagval 0 score 10 mask 2048 maskstr "returnFeedsConstToIfAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_return_feeds_ifswitch(q int) int {
+ x := meaning(q)
+ if x < 42 {
+ switch x {
+ case 42:
+ return 1
+ }
+ }
+ return 0
+}
+
+// returns2.go T_multi_return_feeds_ifswitch 93 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: returns2.go:94:21|0 flagstr "" flagval 0 score 9 mask 2048 maskstr "returnFeedsConstToIfAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_multi_return_feeds_ifswitch(q int) int {
+ x, y, z := meanings(q)
+ if x < y {
+ switch x {
+ case 42:
+ return z
+ }
+ }
+ return 0
+}
+
+// returns2.go T_two_calls_feed_ifswitch 111 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]}
+// callsite: returns2.go:115:14|0 flagstr "" flagval 0 score 25 mask 0 maskstr ""
+// callsite: returns2.go:116:14|1 flagstr "" flagval 0 score 25 mask 0 maskstr ""
+// <endcallsites>
+// <endfuncpreamble>
+func T_two_calls_feed_ifswitch(q int) int {
+ // This case we don't handle; for the heuristic to kick in,
+ // all names in a given if/switch cond have to come from the
+ // same callsite
+ x := meaning(q)
+ y := meaning(-q)
+ if x < y {
+ switch x + y {
+ case 42:
+ return 1
+ }
+ }
+ return 0
+}
+
+// returns2.go T_chained_indirect_call 132 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// callsite: returns2.go:135:18|0 flagstr "" flagval 0 score -31 mask 8192 maskstr "returnFeedsInlinableFuncToIndCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_chained_indirect_call(x, y int) {
+ // Here 'returnsFunc' returns an inlinable func that feeds
+ // directly into a call (no named intermediate).
+ G += returnsFunc(x - y)(x + y)
+}
+
+// returns2.go T_chained_conc_iface_call 144 0 1
+// <endpropsdump>
+// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null}
+// callsite: returns2.go:148:8|0 flagstr "" flagval 0 score 1 mask 16384 maskstr "returnFeedsConcreteToInterfaceCallAdj"
+// <endcallsites>
+// <endfuncpreamble>
+func T_chained_conc_iface_call(x, y int) {
+ // Similar to the case above, return from call returning concrete type
+ // feeds directly into interface call. Note that only the first
+ // iface call is interesting here.
+ newBar(10).Plark().Plark()
+}
+
+func returnsFunc(x int) func(int) int {
+ if x < 0 {
+ G++
+ }
+ return adder
+}
+
+func returnsFunc2() func(int) int {
+ return func(x int) int {
+ return adder(x)
+ }
+}
+
+func returnsNonInlinableFunc() func(int) int {
+ return adderNoInline
+}
+
+func multiReturnsFunc() (int, func(int) int, int) {
+ return 42, func(x int) int { G++; return 1 }, -42
+}
+
+func adder(x int) int {
+ G += 1
+ return G
+}
+
+func adderNoInline(x int) int {
+ defer func() { G += x }()
+ G += 1
+ return G
+}
+
+func meaning(q int) int {
+ r := 0
+ for i := 0; i < 42; i++ {
+ r += q
+ }
+ G += r
+ return 42
+}
+
+func meanings(q int) (int, int, int) {
+ r := 0
+ for i := 0; i < 42; i++ {
+ r += q
+ }
+ return 42, 43, r
+}
+
+type Bar struct {
+ x int
+ y string
+}
+
+func (b *Bar) Plark() Itf {
+ return b
+}
+
+type Itf interface {
+ Plark() Itf
+}
+
+func newBar(x int) Itf {
+ s := 0
+ for i := 0; i < x; i++ {
+ s += i
+ }
+ return &Bar{
+ x: s,
+ }
+}
+
+func newBar2(x int) (int, Itf, bool) {
+ s := 0
+ for i := 0; i < x; i++ {
+ s += i
+ }
+ return 0, &Bar{x: s}, false
+}
+
+var G int
diff --git a/src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go b/src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go
new file mode 100644
index 0000000..587eab0
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go
@@ -0,0 +1,217 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "go/constant"
+ "testing"
+)
+
+var pos src.XPos
+var local *types.Pkg
+var f *ir.Func
+
+func init() {
+ types.PtrSize = 8
+ types.RegSize = 8
+ types.MaxWidth = 1 << 50
+ typecheck.InitUniverse()
+ local = types.NewPkg("", "")
+ fsym := &types.Sym{
+ Pkg: types.NewPkg("my/import/path", "path"),
+ Name: "function",
+ }
+ f = ir.NewFunc(src.NoXPos, src.NoXPos, fsym, nil)
+}
+
+type state struct {
+ ntab map[string]*ir.Name
+}
+
+func mkstate() *state {
+ return &state{
+ ntab: make(map[string]*ir.Name),
+ }
+}
+
+func bin(x ir.Node, op ir.Op, y ir.Node) ir.Node {
+ return ir.NewBinaryExpr(pos, op, x, y)
+}
+
+func conv(x ir.Node, t *types.Type) ir.Node {
+ return ir.NewConvExpr(pos, ir.OCONV, t, x)
+}
+
+func logical(x ir.Node, op ir.Op, y ir.Node) ir.Node {
+ return ir.NewLogicalExpr(pos, op, x, y)
+}
+
+func un(op ir.Op, x ir.Node) ir.Node {
+ return ir.NewUnaryExpr(pos, op, x)
+}
+
+func liti(i int64) ir.Node {
+ return ir.NewBasicLit(pos, types.Types[types.TINT64], constant.MakeInt64(i))
+}
+
+func lits(s string) ir.Node {
+ return ir.NewBasicLit(pos, types.Types[types.TSTRING], constant.MakeString(s))
+}
+
+func (s *state) nm(name string, t *types.Type) *ir.Name {
+ if n, ok := s.ntab[name]; ok {
+ if n.Type() != t {
+ panic("bad")
+ }
+ return n
+ }
+ sym := local.Lookup(name)
+ nn := ir.NewNameAt(pos, sym, t)
+ s.ntab[name] = nn
+ return nn
+}
+
+func (s *state) nmi64(name string) *ir.Name {
+ return s.nm(name, types.Types[types.TINT64])
+}
+
+func (s *state) nms(name string) *ir.Name {
+ return s.nm(name, types.Types[types.TSTRING])
+}
+
+func TestClassifyIntegerCompare(t *testing.T) {
+
+ // (n < 10 || n > 100) && (n >= 12 || n <= 99 || n != 101)
+ s := mkstate()
+ nn := s.nmi64("n")
+ nlt10 := bin(nn, ir.OLT, liti(10)) // n < 10
+ ngt100 := bin(nn, ir.OGT, liti(100)) // n > 100
+ nge12 := bin(nn, ir.OGE, liti(12)) // n >= 12
+ nle99 := bin(nn, ir.OLE, liti(99)) // n < 10
+ nne101 := bin(nn, ir.ONE, liti(101)) // n != 101
+ noror1 := logical(nlt10, ir.OOROR, ngt100) // n < 10 || n > 100
+ noror2 := logical(nge12, ir.OOROR, nle99) // n >= 12 || n <= 99
+ noror3 := logical(noror2, ir.OOROR, nne101)
+ nandand := typecheck.Expr(logical(noror1, ir.OANDAND, noror3))
+
+ wantv := true
+ v := ShouldFoldIfNameConstant(nandand, []*ir.Name{nn})
+ if v != wantv {
+ t.Errorf("wanted shouldfold(%v) %v, got %v", nandand, wantv, v)
+ }
+}
+
+func TestClassifyStringCompare(t *testing.T) {
+
+ // s != "foo" && s < "ooblek" && s > "plarkish"
+ s := mkstate()
+ nn := s.nms("s")
+ snefoo := bin(nn, ir.ONE, lits("foo")) // s != "foo"
+ sltoob := bin(nn, ir.OLT, lits("ooblek")) // s < "ooblek"
+ sgtpk := bin(nn, ir.OGT, lits("plarkish")) // s > "plarkish"
+ nandand := logical(snefoo, ir.OANDAND, sltoob)
+ top := typecheck.Expr(logical(nandand, ir.OANDAND, sgtpk))
+
+ wantv := true
+ v := ShouldFoldIfNameConstant(top, []*ir.Name{nn})
+ if v != wantv {
+ t.Errorf("wanted shouldfold(%v) %v, got %v", top, wantv, v)
+ }
+}
+
+func TestClassifyIntegerArith(t *testing.T) {
+ // n+1 ^ n-3 * n/2 + n<<9 + n>>2 - n&^7
+
+ s := mkstate()
+ nn := s.nmi64("n")
+ np1 := bin(nn, ir.OADD, liti(1)) // n+1
+ nm3 := bin(nn, ir.OSUB, liti(3)) // n-3
+ nd2 := bin(nn, ir.ODIV, liti(2)) // n/2
+ nls9 := bin(nn, ir.OLSH, liti(9)) // n<<9
+ nrs2 := bin(nn, ir.ORSH, liti(2)) // n>>2
+ nan7 := bin(nn, ir.OANDNOT, liti(7)) // n&^7
+ c1xor := bin(np1, ir.OXOR, nm3)
+ c2mul := bin(c1xor, ir.OMUL, nd2)
+ c3add := bin(c2mul, ir.OADD, nls9)
+ c4add := bin(c3add, ir.OADD, nrs2)
+ c5sub := bin(c4add, ir.OSUB, nan7)
+ top := typecheck.Expr(c5sub)
+
+ wantv := true
+ v := ShouldFoldIfNameConstant(top, []*ir.Name{nn})
+ if v != wantv {
+ t.Errorf("wanted shouldfold(%v) %v, got %v", top, wantv, v)
+ }
+}
+
+func TestClassifyAssortedShifts(t *testing.T) {
+
+ s := mkstate()
+ nn := s.nmi64("n")
+ badcases := []ir.Node{
+ bin(liti(3), ir.OLSH, nn), // 3<<n
+ bin(liti(7), ir.ORSH, nn), // 7>>n
+ }
+ for _, bc := range badcases {
+ wantv := false
+ v := ShouldFoldIfNameConstant(typecheck.Expr(bc), []*ir.Name{nn})
+ if v != wantv {
+ t.Errorf("wanted shouldfold(%v) %v, got %v", bc, wantv, v)
+ }
+ }
+}
+
+func TestClassifyFloat(t *testing.T) {
+ // float32(n) + float32(10)
+ s := mkstate()
+ nn := s.nm("n", types.Types[types.TUINT32])
+ f1 := conv(nn, types.Types[types.TFLOAT32])
+ f2 := conv(liti(10), types.Types[types.TFLOAT32])
+ add := bin(f1, ir.OADD, f2)
+
+ wantv := false
+ v := ShouldFoldIfNameConstant(typecheck.Expr(add), []*ir.Name{nn})
+ if v != wantv {
+ t.Errorf("wanted shouldfold(%v) %v, got %v", add, wantv, v)
+ }
+}
+
+func TestMultipleNamesAllUsed(t *testing.T) {
+ // n != 101 && m < 2
+ s := mkstate()
+ nn := s.nmi64("n")
+ nm := s.nmi64("m")
+ nne101 := bin(nn, ir.ONE, liti(101)) // n != 101
+ mlt2 := bin(nm, ir.OLT, liti(2)) // m < 2
+ nandand := typecheck.Expr(logical(nne101, ir.OANDAND, mlt2))
+
+ // all names used
+ wantv := true
+ v := ShouldFoldIfNameConstant(nandand, []*ir.Name{nn, nm})
+ if v != wantv {
+ t.Errorf("wanted shouldfold(%v) %v, got %v", nandand, wantv, v)
+ }
+
+ // not all names used
+ wantv = false
+ v = ShouldFoldIfNameConstant(nne101, []*ir.Name{nn, nm})
+ if v != wantv {
+ t.Errorf("wanted shouldfold(%v) %v, got %v", nne101, wantv, v)
+ }
+
+ // other names used.
+ np := s.nmi64("p")
+ pne0 := bin(np, ir.ONE, liti(101)) // p != 0
+ noror := logical(nandand, ir.OOROR, pne0)
+ wantv = false
+ v = ShouldFoldIfNameConstant(noror, []*ir.Name{nn, nm})
+ if v != wantv {
+ t.Errorf("wanted shouldfold(%v) %v, got %v", noror, wantv, v)
+ }
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/trace_off.go b/src/cmd/compile/internal/inline/inlheur/trace_off.go
new file mode 100644
index 0000000..9eea7fa
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/trace_off.go
@@ -0,0 +1,18 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !debugtrace
+
+package inlheur
+
+const debugTrace = 0
+
+func enableDebugTrace(x int) {
+}
+
+func enableDebugTraceIfEnv() {
+}
+
+func disableDebugTrace() {
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/trace_on.go b/src/cmd/compile/internal/inline/inlheur/trace_on.go
new file mode 100644
index 0000000..1608429
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/trace_on.go
@@ -0,0 +1,40 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build debugtrace
+
+package inlheur
+
+import (
+ "os"
+ "strconv"
+)
+
+var debugTrace = 0
+
+func enableDebugTrace(x int) {
+ debugTrace = x
+}
+
+func enableDebugTraceIfEnv() {
+ v := os.Getenv("DEBUG_TRACE_INLHEUR")
+ if v == "" {
+ return
+ }
+ if v[0] == '*' {
+ if !UnitTesting() {
+ return
+ }
+ v = v[1:]
+ }
+ i, err := strconv.Atoi(v)
+ if err != nil {
+ return
+ }
+ debugTrace = i
+}
+
+func disableDebugTrace() {
+ debugTrace = 0
+}
diff --git a/src/cmd/compile/internal/inline/inlheur/tserial_test.go b/src/cmd/compile/internal/inline/inlheur/tserial_test.go
new file mode 100644
index 0000000..def12f5
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inlheur/tserial_test.go
@@ -0,0 +1,65 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inlheur
+
+import "testing"
+
+func fpeq(fp1, fp2 FuncProps) bool {
+ if fp1.Flags != fp2.Flags {
+ return false
+ }
+ if len(fp1.ParamFlags) != len(fp2.ParamFlags) {
+ return false
+ }
+ for i := range fp1.ParamFlags {
+ if fp1.ParamFlags[i] != fp2.ParamFlags[i] {
+ return false
+ }
+ }
+ if len(fp1.ResultFlags) != len(fp2.ResultFlags) {
+ return false
+ }
+ for i := range fp1.ResultFlags {
+ if fp1.ResultFlags[i] != fp2.ResultFlags[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func TestSerDeser(t *testing.T) {
+ testcases := []FuncProps{
+ FuncProps{},
+ FuncProps{
+ Flags: 0xfffff,
+ },
+ FuncProps{
+ Flags: 1,
+ ResultFlags: []ResultPropBits{ResultAlwaysSameConstant},
+ },
+ FuncProps{
+ Flags: 1,
+ ParamFlags: []ParamPropBits{0x99, 0xaa, 0xfffff},
+ ResultFlags: []ResultPropBits{0xfeedface},
+ },
+ }
+
+ for k, tc := range testcases {
+ s := tc.SerializeToString()
+ fp := DeserializeFromString(s)
+ got := fp.String()
+ want := tc.String()
+ if !fpeq(*fp, tc) {
+ t.Errorf("eq check failed for test %d: got:\n%s\nwant:\n%s\n", k, got, want)
+ }
+ }
+
+ var nilt *FuncProps
+ ns := nilt.SerializeToString()
+ nfp := DeserializeFromString(ns)
+ if len(ns) != 0 || nfp != nil {
+ t.Errorf("nil serialize/deserialize failed")
+ }
+}
diff --git a/src/cmd/compile/internal/inline/interleaved/interleaved.go b/src/cmd/compile/internal/inline/interleaved/interleaved.go
new file mode 100644
index 0000000..a6f19d4
--- /dev/null
+++ b/src/cmd/compile/internal/inline/interleaved/interleaved.go
@@ -0,0 +1,132 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package interleaved implements the interleaved devirtualization and
+// inlining pass.
+package interleaved
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/devirtualize"
+ "cmd/compile/internal/inline"
+ "cmd/compile/internal/inline/inlheur"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/pgo"
+ "cmd/compile/internal/typecheck"
+ "fmt"
+)
+
+// DevirtualizeAndInlinePackage interleaves devirtualization and inlining on
+// all functions within pkg.
+func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgo.Profile) {
+ if profile != nil && base.Debug.PGODevirtualize > 0 {
+ // TODO(mdempsky): Integrate into DevirtualizeAndInlineFunc below.
+ ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) {
+ for _, fn := range list {
+ devirtualize.ProfileGuided(fn, profile)
+ }
+ })
+ ir.CurFunc = nil
+ }
+
+ if base.Flag.LowerL != 0 {
+ inlheur.SetupScoreAdjustments()
+ }
+
+ var inlProfile *pgo.Profile // copy of profile for inlining
+ if base.Debug.PGOInline != 0 {
+ inlProfile = profile
+ }
+ if inlProfile != nil {
+ inline.PGOInlinePrologue(inlProfile, pkg.Funcs)
+ }
+
+ ir.VisitFuncsBottomUp(pkg.Funcs, func(funcs []*ir.Func, recursive bool) {
+ // We visit functions within an SCC in fairly arbitrary order,
+ // so by computing inlinability for all functions in the SCC
+ // before performing any inlining, the results are less
+ // sensitive to the order within the SCC (see #58905 for an
+ // example).
+
+ // First compute inlinability for all functions in the SCC ...
+ inline.CanInlineSCC(funcs, recursive, inlProfile)
+
+ // ... then make a second pass to do devirtualization and inlining
+ // of calls.
+ for _, fn := range funcs {
+ DevirtualizeAndInlineFunc(fn, inlProfile)
+ }
+ })
+
+ if base.Flag.LowerL != 0 {
+ // Perform a garbage collection of hidden closures functions that
+ // are no longer reachable from top-level functions following
+ // inlining. See #59404 and #59638 for more context.
+ inline.GarbageCollectUnreferencedHiddenClosures()
+
+ if base.Debug.DumpInlFuncProps != "" {
+ inlheur.DumpFuncProps(nil, base.Debug.DumpInlFuncProps)
+ }
+ if inlheur.Enabled() {
+ inline.PostProcessCallSites(inlProfile)
+ inlheur.TearDown()
+ }
+ }
+}
+
+// DevirtualizeAndInlineFunc interleaves devirtualization and inlining
+// on a single function.
+func DevirtualizeAndInlineFunc(fn *ir.Func, profile *pgo.Profile) {
+ ir.WithFunc(fn, func() {
+ if base.Flag.LowerL != 0 {
+ if inlheur.Enabled() && !fn.Wrapper() {
+ inlheur.ScoreCalls(fn)
+ defer inlheur.ScoreCallsCleanup()
+ }
+ if base.Debug.DumpInlFuncProps != "" && !fn.Wrapper() {
+ inlheur.DumpFuncProps(fn, base.Debug.DumpInlFuncProps)
+ }
+ }
+
+ bigCaller := base.Flag.LowerL != 0 && inline.IsBigFunc(fn)
+ if bigCaller && base.Flag.LowerM > 1 {
+ fmt.Printf("%v: function %v considered 'big'; reducing max cost of inlinees\n", ir.Line(fn), fn)
+ }
+
+ // Walk fn's body and apply devirtualization and inlining.
+ var inlCalls []*ir.InlinedCallExpr
+ var edit func(ir.Node) ir.Node
+ edit = func(n ir.Node) ir.Node {
+ switch n := n.(type) {
+ case *ir.TailCallStmt:
+ n.Call.NoInline = true // can't inline yet
+ }
+
+ ir.EditChildren(n, edit)
+
+ if call, ok := n.(*ir.CallExpr); ok {
+ devirtualize.StaticCall(call)
+
+ if inlCall := inline.TryInlineCall(fn, call, bigCaller, profile); inlCall != nil {
+ inlCalls = append(inlCalls, inlCall)
+ n = inlCall
+ }
+ }
+
+ return n
+ }
+ ir.EditChildren(fn, edit)
+
+ // If we inlined any calls, we want to recursively visit their
+ // bodies for further devirtualization and inlining. However, we
+ // need to wait until *after* the original function body has been
+ // expanded, or else inlCallee can have false positives (e.g.,
+ // #54632).
+ for len(inlCalls) > 0 {
+ call := inlCalls[0]
+ inlCalls = inlCalls[1:]
+ ir.EditChildren(call, edit)
+ }
+ })
+}
diff --git a/src/cmd/compile/internal/ir/abi.go b/src/cmd/compile/internal/ir/abi.go
new file mode 100644
index 0000000..ebe0fbf
--- /dev/null
+++ b/src/cmd/compile/internal/ir/abi.go
@@ -0,0 +1,78 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/internal/obj"
+)
+
+// InitLSym defines f's obj.LSym and initializes it based on the
+// properties of f. This includes setting the symbol flags and ABI and
+// creating and initializing related DWARF symbols.
+//
+// InitLSym must be called exactly once per function and must be
+// called for both functions with bodies and functions without bodies.
+// For body-less functions, we only create the LSym; for functions
+// with bodies call a helper to setup up / populate the LSym.
+func InitLSym(f *Func, hasBody bool) {
+ if f.LSym != nil {
+ base.FatalfAt(f.Pos(), "InitLSym called twice on %v", f)
+ }
+
+ if nam := f.Nname; !IsBlank(nam) {
+ f.LSym = nam.LinksymABI(f.ABI)
+ if f.Pragma&Systemstack != 0 {
+ f.LSym.Set(obj.AttrCFunc, true)
+ }
+ }
+ if hasBody {
+ setupTextLSym(f, 0)
+ }
+}
+
+// setupTextLSym initializes the LSym for a with-body text symbol.
+func setupTextLSym(f *Func, flag int) {
+ if f.Dupok() {
+ flag |= obj.DUPOK
+ }
+ if f.Wrapper() {
+ flag |= obj.WRAPPER
+ }
+ if f.ABIWrapper() {
+ flag |= obj.ABIWRAPPER
+ }
+ if f.Needctxt() {
+ flag |= obj.NEEDCTXT
+ }
+ if f.Pragma&Nosplit != 0 {
+ flag |= obj.NOSPLIT
+ }
+ if f.IsPackageInit() {
+ flag |= obj.PKGINIT
+ }
+
+ // Clumsy but important.
+ // For functions that could be on the path of invoking a deferred
+ // function that can recover (runtime.reflectcall, reflect.callReflect,
+ // and reflect.callMethod), we want the panic+recover special handling.
+ // See test/recover.go for test cases and src/reflect/value.go
+ // for the actual functions being considered.
+ //
+ // runtime.reflectcall is an assembly function which tailcalls
+ // WRAPPER functions (runtime.callNN). Its ABI wrapper needs WRAPPER
+ // flag as well.
+ fnname := f.Sym().Name
+ if base.Ctxt.Pkgpath == "runtime" && fnname == "reflectcall" {
+ flag |= obj.WRAPPER
+ } else if base.Ctxt.Pkgpath == "reflect" {
+ switch fnname {
+ case "callReflect", "callMethod":
+ flag |= obj.WRAPPER
+ }
+ }
+
+ base.Ctxt.InitTextSym(f.LSym, flag, f.Pos())
+}
diff --git a/src/cmd/compile/internal/ir/bitset.go b/src/cmd/compile/internal/ir/bitset.go
new file mode 100644
index 0000000..bae4005
--- /dev/null
+++ b/src/cmd/compile/internal/ir/bitset.go
@@ -0,0 +1,37 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+type bitset8 uint8
+
+func (f *bitset8) set(mask uint8, b bool) {
+ if b {
+ *(*uint8)(f) |= mask
+ } else {
+ *(*uint8)(f) &^= mask
+ }
+}
+
+func (f bitset8) get2(shift uint8) uint8 {
+ return uint8(f>>shift) & 3
+}
+
+// set2 sets two bits in f using the bottom two bits of b.
+func (f *bitset8) set2(shift uint8, b uint8) {
+ // Clear old bits.
+ *(*uint8)(f) &^= 3 << shift
+ // Set new bits.
+ *(*uint8)(f) |= uint8(b&3) << shift
+}
+
+type bitset16 uint16
+
+func (f *bitset16) set(mask uint16, b bool) {
+ if b {
+ *(*uint16)(f) |= mask
+ } else {
+ *(*uint16)(f) &^= mask
+ }
+}
diff --git a/src/cmd/compile/internal/ir/cfg.go b/src/cmd/compile/internal/ir/cfg.go
new file mode 100644
index 0000000..49e1ed3
--- /dev/null
+++ b/src/cmd/compile/internal/ir/cfg.go
@@ -0,0 +1,26 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+var (
+ // MaxStackVarSize is the maximum size variable which we will allocate on the stack.
+ // This limit is for explicit variable declarations like "var x T" or "x := ...".
+ // Note: the flag smallframes can update this value.
+ MaxStackVarSize = int64(10 * 1024 * 1024)
+
+ // MaxImplicitStackVarSize is the maximum size of implicit variables that we will allocate on the stack.
+ // p := new(T) allocating T on the stack
+ // p := &T{} allocating T on the stack
+ // s := make([]T, n) allocating [n]T on the stack
+ // s := []byte("...") allocating [n]byte on the stack
+ // Note: the flag smallframes can update this value.
+ MaxImplicitStackVarSize = int64(64 * 1024)
+
+ // MaxSmallArraySize is the maximum size of an array which is considered small.
+ // Small arrays will be initialized directly with a sequence of constant stores.
+ // Large arrays will be initialized by copying from a static temp.
+ // 256 bytes was chosen to minimize generated code + statictmp size.
+ MaxSmallArraySize = int64(256)
+)
diff --git a/src/cmd/compile/internal/ir/check_reassign_no.go b/src/cmd/compile/internal/ir/check_reassign_no.go
new file mode 100644
index 0000000..8290a7d
--- /dev/null
+++ b/src/cmd/compile/internal/ir/check_reassign_no.go
@@ -0,0 +1,9 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !checknewoldreassignment
+
+package ir
+
+const consistencyCheckEnabled = false
diff --git a/src/cmd/compile/internal/ir/check_reassign_yes.go b/src/cmd/compile/internal/ir/check_reassign_yes.go
new file mode 100644
index 0000000..30876cc
--- /dev/null
+++ b/src/cmd/compile/internal/ir/check_reassign_yes.go
@@ -0,0 +1,9 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build checknewoldreassignment
+
+package ir
+
+const consistencyCheckEnabled = true
diff --git a/src/cmd/compile/internal/ir/class_string.go b/src/cmd/compile/internal/ir/class_string.go
new file mode 100644
index 0000000..11a94c0
--- /dev/null
+++ b/src/cmd/compile/internal/ir/class_string.go
@@ -0,0 +1,30 @@
+// Code generated by "stringer -type=Class name.go"; DO NOT EDIT.
+
+package ir
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Pxxx-0]
+ _ = x[PEXTERN-1]
+ _ = x[PAUTO-2]
+ _ = x[PAUTOHEAP-3]
+ _ = x[PPARAM-4]
+ _ = x[PPARAMOUT-5]
+ _ = x[PTYPEPARAM-6]
+ _ = x[PFUNC-7]
+}
+
+const _Class_name = "PxxxPEXTERNPAUTOPAUTOHEAPPPARAMPPARAMOUTPTYPEPARAMPFUNC"
+
+var _Class_index = [...]uint8{0, 4, 11, 16, 25, 31, 40, 50, 55}
+
+func (i Class) String() string {
+ if i >= Class(len(_Class_index)-1) {
+ return "Class(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Class_name[_Class_index[i]:_Class_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/ir/const.go b/src/cmd/compile/internal/ir/const.go
new file mode 100644
index 0000000..0efd113
--- /dev/null
+++ b/src/cmd/compile/internal/ir/const.go
@@ -0,0 +1,161 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "go/constant"
+ "math"
+ "math/big"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// NewBool returns an OLITERAL representing b as an untyped boolean.
+func NewBool(pos src.XPos, b bool) Node {
+ return NewBasicLit(pos, types.UntypedBool, constant.MakeBool(b))
+}
+
+// NewInt returns an OLITERAL representing v as an untyped integer.
+func NewInt(pos src.XPos, v int64) Node {
+ return NewBasicLit(pos, types.UntypedInt, constant.MakeInt64(v))
+}
+
+// NewString returns an OLITERAL representing s as an untyped string.
+func NewString(pos src.XPos, s string) Node {
+ return NewBasicLit(pos, types.UntypedString, constant.MakeString(s))
+}
+
+// NewUintptr returns an OLITERAL representing v as a uintptr.
+func NewUintptr(pos src.XPos, v int64) Node {
+ return NewBasicLit(pos, types.Types[types.TUINTPTR], constant.MakeInt64(v))
+}
+
+// NewZero returns a zero value of the given type.
+func NewZero(pos src.XPos, typ *types.Type) Node {
+ switch {
+ case typ.HasNil():
+ return NewNilExpr(pos, typ)
+ case typ.IsInteger():
+ return NewBasicLit(pos, typ, intZero)
+ case typ.IsFloat():
+ return NewBasicLit(pos, typ, floatZero)
+ case typ.IsComplex():
+ return NewBasicLit(pos, typ, complexZero)
+ case typ.IsBoolean():
+ return NewBasicLit(pos, typ, constant.MakeBool(false))
+ case typ.IsString():
+ return NewBasicLit(pos, typ, constant.MakeString(""))
+ case typ.IsArray() || typ.IsStruct():
+ // TODO(mdempsky): Return a typechecked expression instead.
+ return NewCompLitExpr(pos, OCOMPLIT, typ, nil)
+ }
+
+ base.FatalfAt(pos, "unexpected type: %v", typ)
+ panic("unreachable")
+}
+
+var (
+ intZero = constant.MakeInt64(0)
+ floatZero = constant.ToFloat(intZero)
+ complexZero = constant.ToComplex(intZero)
+)
+
+// NewOne returns an OLITERAL representing 1 with the given type.
+func NewOne(pos src.XPos, typ *types.Type) Node {
+ var val constant.Value
+ switch {
+ case typ.IsInteger():
+ val = intOne
+ case typ.IsFloat():
+ val = floatOne
+ case typ.IsComplex():
+ val = complexOne
+ default:
+ base.FatalfAt(pos, "%v cannot represent 1", typ)
+ }
+
+ return NewBasicLit(pos, typ, val)
+}
+
+var (
+ intOne = constant.MakeInt64(1)
+ floatOne = constant.ToFloat(intOne)
+ complexOne = constant.ToComplex(intOne)
+)
+
+const (
+ // Maximum size in bits for big.Ints before signaling
+ // overflow and also mantissa precision for big.Floats.
+ ConstPrec = 512
+)
+
+func BigFloat(v constant.Value) *big.Float {
+ f := new(big.Float)
+ f.SetPrec(ConstPrec)
+ switch u := constant.Val(v).(type) {
+ case int64:
+ f.SetInt64(u)
+ case *big.Int:
+ f.SetInt(u)
+ case *big.Float:
+ f.Set(u)
+ case *big.Rat:
+ f.SetRat(u)
+ default:
+ base.Fatalf("unexpected: %v", u)
+ }
+ return f
+}
+
+// ConstOverflow reports whether constant value v is too large
+// to represent with type t.
+func ConstOverflow(v constant.Value, t *types.Type) bool {
+ switch {
+ case t.IsInteger():
+ bits := uint(8 * t.Size())
+ if t.IsUnsigned() {
+ x, ok := constant.Uint64Val(v)
+ return !ok || x>>bits != 0
+ }
+ x, ok := constant.Int64Val(v)
+ if x < 0 {
+ x = ^x
+ }
+ return !ok || x>>(bits-1) != 0
+ case t.IsFloat():
+ switch t.Size() {
+ case 4:
+ f, _ := constant.Float32Val(v)
+ return math.IsInf(float64(f), 0)
+ case 8:
+ f, _ := constant.Float64Val(v)
+ return math.IsInf(f, 0)
+ }
+ case t.IsComplex():
+ ft := types.FloatForComplex(t)
+ return ConstOverflow(constant.Real(v), ft) || ConstOverflow(constant.Imag(v), ft)
+ }
+ base.Fatalf("ConstOverflow: %v, %v", v, t)
+ panic("unreachable")
+}
+
+// IsConstNode reports whether n is a Go language constant (as opposed to a
+// compile-time constant).
+//
+// Expressions derived from nil, like string([]byte(nil)), while they
+// may be known at compile time, are not Go language constants.
+func IsConstNode(n Node) bool {
+ return n.Op() == OLITERAL
+}
+
+func IsSmallIntConst(n Node) bool {
+ if n.Op() == OLITERAL {
+ v, ok := constant.Int64Val(n.Val())
+ return ok && int64(int32(v)) == v
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go
new file mode 100644
index 0000000..d30f7bc
--- /dev/null
+++ b/src/cmd/compile/internal/ir/copy.go
@@ -0,0 +1,43 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/internal/src"
+)
+
+// Copy returns a shallow copy of n.
+func Copy(n Node) Node {
+ return n.copy()
+}
+
+// DeepCopy returns a “deep” copy of n, with its entire structure copied
+// (except for shared nodes like ONAME, ONONAME, OLITERAL, and OTYPE).
+// If pos.IsKnown(), it sets the source position of newly allocated Nodes to pos.
+func DeepCopy(pos src.XPos, n Node) Node {
+ var edit func(Node) Node
+ edit = func(x Node) Node {
+ switch x.Op() {
+ case ONAME, ONONAME, OLITERAL, ONIL, OTYPE:
+ return x
+ }
+ x = Copy(x)
+ if pos.IsKnown() {
+ x.SetPos(pos)
+ }
+ EditChildren(x, edit)
+ return x
+ }
+ return edit(n)
+}
+
+// DeepCopyList returns a list of deep copies (using DeepCopy) of the nodes in list.
+func DeepCopyList(pos src.XPos, list []Node) []Node {
+ var out []Node
+ for _, n := range list {
+ out = append(out, DeepCopy(pos, n))
+ }
+ return out
+}
diff --git a/src/cmd/compile/internal/ir/dump.go b/src/cmd/compile/internal/ir/dump.go
new file mode 100644
index 0000000..4c21868
--- /dev/null
+++ b/src/cmd/compile/internal/ir/dump.go
@@ -0,0 +1,256 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements textual dumping of arbitrary data structures
+// for debugging purposes. The code is customized for Node graphs
+// and may be used for an alternative view of the node structure.
+
+package ir
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// DumpAny is like FDumpAny but prints to stderr.
+func DumpAny(root interface{}, filter string, depth int) {
+ FDumpAny(os.Stderr, root, filter, depth)
+}
+
+// FDumpAny prints the structure of a rooted data structure
+// to w by depth-first traversal of the data structure.
+//
+// The filter parameter is a regular expression. If it is
+// non-empty, only struct fields whose names match filter
+// are printed.
+//
+// The depth parameter controls how deep traversal recurses
+// before it returns (higher value means greater depth).
+// If an empty field filter is given, a good depth default value
+// is 4. A negative depth means no depth limit, which may be fine
+// for small data structures or if there is a non-empty filter.
+//
+// In the output, Node structs are identified by their Op name
+// rather than their type; struct fields with zero values or
+// non-matching field names are omitted, and "…" means recursion
+// depth has been reached or struct fields have been omitted.
+func FDumpAny(w io.Writer, root interface{}, filter string, depth int) {
+ if root == nil {
+ fmt.Fprintln(w, "nil")
+ return
+ }
+
+ if filter == "" {
+ filter = ".*" // default
+ }
+
+ p := dumper{
+ output: w,
+ fieldrx: regexp.MustCompile(filter),
+ ptrmap: make(map[uintptr]int),
+ last: '\n', // force printing of line number on first line
+ }
+
+ p.dump(reflect.ValueOf(root), depth)
+ p.printf("\n")
+}
+
+type dumper struct {
+ output io.Writer
+ fieldrx *regexp.Regexp // field name filter
+ ptrmap map[uintptr]int // ptr -> dump line number
+ lastadr string // last address string printed (for shortening)
+
+ // output
+ indent int // current indentation level
+ last byte // last byte processed by Write
+ line int // current line number
+}
+
+var indentBytes = []byte(". ")
+
+func (p *dumper) Write(data []byte) (n int, err error) {
+ var m int
+ for i, b := range data {
+ // invariant: data[0:n] has been written
+ if b == '\n' {
+ m, err = p.output.Write(data[n : i+1])
+ n += m
+ if err != nil {
+ return
+ }
+ } else if p.last == '\n' {
+ p.line++
+ _, err = fmt.Fprintf(p.output, "%6d ", p.line)
+ if err != nil {
+ return
+ }
+ for j := p.indent; j > 0; j-- {
+ _, err = p.output.Write(indentBytes)
+ if err != nil {
+ return
+ }
+ }
+ }
+ p.last = b
+ }
+ if len(data) > n {
+ m, err = p.output.Write(data[n:])
+ n += m
+ }
+ return
+}
+
+// printf is a convenience wrapper.
+func (p *dumper) printf(format string, args ...interface{}) {
+ if _, err := fmt.Fprintf(p, format, args...); err != nil {
+ panic(err)
+ }
+}
+
+// addr returns the (hexadecimal) address string of the object
+// represented by x (or "?" if x is not addressable), with the
+// common prefix between this and the prior address replaced by
+// "0x…" to make it easier to visually match addresses.
+func (p *dumper) addr(x reflect.Value) string {
+ if !x.CanAddr() {
+ return "?"
+ }
+ adr := fmt.Sprintf("%p", x.Addr().Interface())
+ s := adr
+ if i := commonPrefixLen(p.lastadr, adr); i > 0 {
+ s = "0x…" + adr[i:]
+ }
+ p.lastadr = adr
+ return s
+}
+
+// dump prints the contents of x.
+func (p *dumper) dump(x reflect.Value, depth int) {
+ if depth == 0 {
+ p.printf("…")
+ return
+ }
+
+ if pos, ok := x.Interface().(src.XPos); ok {
+ p.printf("%s", base.FmtPos(pos))
+ return
+ }
+
+ switch x.Kind() {
+ case reflect.String:
+ p.printf("%q", x.Interface()) // print strings in quotes
+
+ case reflect.Interface:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+ p.dump(x.Elem(), depth-1)
+
+ case reflect.Ptr:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+
+ p.printf("*")
+ ptr := x.Pointer()
+ if line, exists := p.ptrmap[ptr]; exists {
+ p.printf("(@%d)", line)
+ return
+ }
+ p.ptrmap[ptr] = p.line
+ p.dump(x.Elem(), depth) // don't count pointer indirection towards depth
+
+ case reflect.Slice:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+ p.printf("%s (%d entries) {", x.Type(), x.Len())
+ if x.Len() > 0 {
+ p.indent++
+ p.printf("\n")
+ for i, n := 0, x.Len(); i < n; i++ {
+ p.printf("%d: ", i)
+ p.dump(x.Index(i), depth-1)
+ p.printf("\n")
+ }
+ p.indent--
+ }
+ p.printf("}")
+
+ case reflect.Struct:
+ typ := x.Type()
+
+ isNode := false
+ if n, ok := x.Interface().(Node); ok {
+ isNode = true
+ p.printf("%s %s {", n.Op().String(), p.addr(x))
+ } else {
+ p.printf("%s {", typ)
+ }
+ p.indent++
+
+ first := true
+ omitted := false
+ for i, n := 0, typ.NumField(); i < n; i++ {
+ // Exclude non-exported fields because their
+ // values cannot be accessed via reflection.
+ if name := typ.Field(i).Name; types.IsExported(name) {
+ if !p.fieldrx.MatchString(name) {
+ omitted = true
+ continue // field name not selected by filter
+ }
+
+ // special cases
+ if isNode && name == "Op" {
+ omitted = true
+ continue // Op field already printed for Nodes
+ }
+ x := x.Field(i)
+ if x.IsZero() {
+ omitted = true
+ continue // exclude zero-valued fields
+ }
+ if n, ok := x.Interface().(Nodes); ok && len(n) == 0 {
+ omitted = true
+ continue // exclude empty Nodes slices
+ }
+
+ if first {
+ p.printf("\n")
+ first = false
+ }
+ p.printf("%s: ", name)
+ p.dump(x, depth-1)
+ p.printf("\n")
+ }
+ }
+ if omitted {
+ p.printf("…\n")
+ }
+
+ p.indent--
+ p.printf("}")
+
+ default:
+ p.printf("%v", x.Interface())
+ }
+}
+
+func commonPrefixLen(a, b string) (i int) {
+ for i < len(a) && i < len(b) && a[i] == b[i] {
+ i++
+ }
+ return
+}
diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go
new file mode 100644
index 0000000..da5b437
--- /dev/null
+++ b/src/cmd/compile/internal/ir/expr.go
@@ -0,0 +1,1256 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "bytes"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "fmt"
+ "go/constant"
+ "go/token"
+)
+
+// An Expr is a Node that can appear as an expression.
+type Expr interface {
+ Node
+ isExpr()
+}
+
+// A miniExpr is a miniNode with extra fields common to expressions.
+// TODO(rsc): Once we are sure about the contents, compact the bools
+// into a bit field and leave extra bits available for implementations
+// embedding miniExpr. Right now there are ~60 unused bits sitting here.
+type miniExpr struct {
+ miniNode
+ typ *types.Type
+ init Nodes // TODO(rsc): Don't require every Node to have an init
+ flags bitset8
+}
+
+const (
+ miniExprNonNil = 1 << iota
+ miniExprTransient
+ miniExprBounded
+ miniExprImplicit // for use by implementations; not supported by every Expr
+ miniExprCheckPtr
+)
+
+func (*miniExpr) isExpr() {}
+
+func (n *miniExpr) Type() *types.Type { return n.typ }
+func (n *miniExpr) SetType(x *types.Type) { n.typ = x }
+func (n *miniExpr) NonNil() bool { return n.flags&miniExprNonNil != 0 }
+func (n *miniExpr) MarkNonNil() { n.flags |= miniExprNonNil }
+func (n *miniExpr) Transient() bool { return n.flags&miniExprTransient != 0 }
+func (n *miniExpr) SetTransient(b bool) { n.flags.set(miniExprTransient, b) }
+func (n *miniExpr) Bounded() bool { return n.flags&miniExprBounded != 0 }
+func (n *miniExpr) SetBounded(b bool) { n.flags.set(miniExprBounded, b) }
+func (n *miniExpr) Init() Nodes { return n.init }
+func (n *miniExpr) PtrInit() *Nodes { return &n.init }
+func (n *miniExpr) SetInit(x Nodes) { n.init = x }
+
+// An AddStringExpr is a string concatenation List[0] + List[1] + ... + List[len(List)-1].
+type AddStringExpr struct {
+ miniExpr
+ List Nodes
+ Prealloc *Name
+}
+
+func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr {
+ n := &AddStringExpr{}
+ n.pos = pos
+ n.op = OADDSTR
+ n.List = list
+ return n
+}
+
+// An AddrExpr is an address-of expression &X.
+// It may end up being a normal address-of or an allocation of a composite literal.
+type AddrExpr struct {
+ miniExpr
+ X Node
+ Prealloc *Name // preallocated storage if any
+}
+
+func NewAddrExpr(pos src.XPos, x Node) *AddrExpr {
+ if x == nil || x.Typecheck() != 1 {
+ base.FatalfAt(pos, "missed typecheck: %L", x)
+ }
+ n := &AddrExpr{X: x}
+ n.pos = pos
+
+ switch x.Op() {
+ case OARRAYLIT, OMAPLIT, OSLICELIT, OSTRUCTLIT:
+ n.op = OPTRLIT
+
+ default:
+ n.op = OADDR
+ if r, ok := OuterValue(x).(*Name); ok && r.Op() == ONAME {
+ r.SetAddrtaken(true)
+
+ // If r is a closure variable, we need to mark its canonical
+ // variable as addrtaken too, so that closure conversion
+ // captures it by reference.
+ //
+ // Exception: if we've already marked the variable as
+ // capture-by-value, then that means this variable isn't
+ // logically modified, and we must be taking its address to pass
+ // to a runtime function that won't mutate it. In that case, we
+ // only need to make sure our own copy is addressable.
+ if r.IsClosureVar() && !r.Byval() {
+ r.Canonical().SetAddrtaken(true)
+ }
+ }
+ }
+
+ n.SetType(types.NewPtr(x.Type()))
+ n.SetTypecheck(1)
+
+ return n
+}
+
+func (n *AddrExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *AddrExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+func (n *AddrExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OADDR, OPTRLIT:
+ n.op = op
+ }
+}
+
+// A BasicLit is a literal of basic type.
+type BasicLit struct {
+ miniExpr
+ val constant.Value
+}
+
+// NewBasicLit returns an OLITERAL representing val with the given type.
+func NewBasicLit(pos src.XPos, typ *types.Type, val constant.Value) Node {
+ AssertValidTypeForConst(typ, val)
+
+ n := &BasicLit{val: val}
+ n.op = OLITERAL
+ n.pos = pos
+ n.SetType(typ)
+ n.SetTypecheck(1)
+ return n
+}
+
+func (n *BasicLit) Val() constant.Value { return n.val }
+func (n *BasicLit) SetVal(val constant.Value) { n.val = val }
+
+// NewConstExpr returns an OLITERAL representing val, copying the
+// position and type from orig.
+func NewConstExpr(val constant.Value, orig Node) Node {
+ return NewBasicLit(orig.Pos(), orig.Type(), val)
+}
+
+// A BinaryExpr is a binary expression X Op Y,
+// or Op(X, Y) for builtin functions that do not become calls.
+type BinaryExpr struct {
+ miniExpr
+ X Node
+ Y Node
+ RType Node `mknode:"-"` // see reflectdata/helpers.go
+}
+
+func NewBinaryExpr(pos src.XPos, op Op, x, y Node) *BinaryExpr {
+ n := &BinaryExpr{X: x, Y: y}
+ n.pos = pos
+ n.SetOp(op)
+ return n
+}
+
+func (n *BinaryExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OADD, OADDSTR, OAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE,
+ OLSH, OLT, OMOD, OMUL, ONE, OOR, ORSH, OSUB, OXOR,
+ OCOPY, OCOMPLEX, OUNSAFEADD, OUNSAFESLICE, OUNSAFESTRING,
+ OMAKEFACE:
+ n.op = op
+ }
+}
+
+// A CallExpr is a function call Fun(Args).
+type CallExpr struct {
+ miniExpr
+ Fun Node
+ Args Nodes
+ DeferAt Node
+ RType Node `mknode:"-"` // see reflectdata/helpers.go
+ KeepAlive []*Name // vars to be kept alive until call returns
+ IsDDD bool
+ GoDefer bool // whether this call is part of a go or defer statement
+ NoInline bool // whether this call must not be inlined
+}
+
+func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr {
+ n := &CallExpr{Fun: fun}
+ n.pos = pos
+ n.SetOp(op)
+ n.Args = args
+ return n
+}
+
+func (*CallExpr) isStmt() {}
+
+func (n *CallExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OAPPEND,
+ OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
+ ODELETE,
+ OGETG, OGETCALLERPC, OGETCALLERSP,
+ OMAKE, OMAX, OMIN, OPRINT, OPRINTLN,
+ ORECOVER, ORECOVERFP:
+ n.op = op
+ }
+}
+
+// A ClosureExpr is a function literal expression.
+type ClosureExpr struct {
+ miniExpr
+ Func *Func `mknode:"-"`
+ Prealloc *Name
+ IsGoWrap bool // whether this is wrapper closure of a go statement
+}
+
+// A CompLitExpr is a composite literal Type{Vals}.
+// Before type-checking, the type is Ntype.
+type CompLitExpr struct {
+ miniExpr
+ List Nodes // initialized values
+ RType Node `mknode:"-"` // *runtime._type for OMAPLIT map types
+ Prealloc *Name
+ // For OSLICELIT, Len is the backing array length.
+ // For OMAPLIT, Len is the number of entries that we've removed from List and
+ // generated explicit mapassign calls for. This is used to inform the map alloc hint.
+ Len int64
+}
+
+func NewCompLitExpr(pos src.XPos, op Op, typ *types.Type, list []Node) *CompLitExpr {
+ n := &CompLitExpr{List: list}
+ n.pos = pos
+ n.SetOp(op)
+ if typ != nil {
+ n.SetType(typ)
+ }
+ return n
+}
+
+func (n *CompLitExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *CompLitExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+func (n *CompLitExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OARRAYLIT, OCOMPLIT, OMAPLIT, OSTRUCTLIT, OSLICELIT:
+ n.op = op
+ }
+}
+
+// A ConvExpr is a conversion Type(X).
+// It may end up being a value or a type.
+type ConvExpr struct {
+ miniExpr
+ X Node
+
+ // For implementing OCONVIFACE expressions.
+ //
+ // TypeWord is an expression yielding a *runtime._type or
+ // *runtime.itab value to go in the type word of the iface/eface
+ // result. See reflectdata.ConvIfaceTypeWord for further details.
+ //
+ // SrcRType is an expression yielding a *runtime._type value for X,
+ // if it's not pointer-shaped and needs to be heap allocated.
+ TypeWord Node `mknode:"-"`
+ SrcRType Node `mknode:"-"`
+
+ // For -d=checkptr instrumentation of conversions from
+ // unsafe.Pointer to *Elem or *[Len]Elem.
+ //
+ // TODO(mdempsky): We only ever need one of these, but currently we
+ // don't decide which one until walk. Longer term, it probably makes
+ // sense to have a dedicated IR op for `(*[Len]Elem)(ptr)[:n:m]`
+ // expressions.
+ ElemRType Node `mknode:"-"`
+ ElemElemRType Node `mknode:"-"`
+}
+
+func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr {
+ n := &ConvExpr{X: x}
+ n.pos = pos
+ n.typ = typ
+ n.SetOp(op)
+ return n
+}
+
+func (n *ConvExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *ConvExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+func (n *ConvExpr) CheckPtr() bool { return n.flags&miniExprCheckPtr != 0 }
+func (n *ConvExpr) SetCheckPtr(b bool) { n.flags.set(miniExprCheckPtr, b) }
+
+func (n *ConvExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OCONV, OCONVIFACE, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR, OSLICE2ARR, OSLICE2ARRPTR:
+ n.op = op
+ }
+}
+
+// An IndexExpr is an index expression X[Index].
+type IndexExpr struct {
+ miniExpr
+ X Node
+ Index Node
+ RType Node `mknode:"-"` // see reflectdata/helpers.go
+ Assigned bool
+}
+
+func NewIndexExpr(pos src.XPos, x, index Node) *IndexExpr {
+ n := &IndexExpr{X: x, Index: index}
+ n.pos = pos
+ n.op = OINDEX
+ return n
+}
+
+func (n *IndexExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OINDEX, OINDEXMAP:
+ n.op = op
+ }
+}
+
+// A KeyExpr is a Key: Value composite literal key.
+type KeyExpr struct {
+ miniExpr
+ Key Node
+ Value Node
+}
+
+func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr {
+ n := &KeyExpr{Key: key, Value: value}
+ n.pos = pos
+ n.op = OKEY
+ return n
+}
+
+// A StructKeyExpr is a Field: Value composite literal key.
+type StructKeyExpr struct {
+ miniExpr
+ Field *types.Field
+ Value Node
+}
+
+func NewStructKeyExpr(pos src.XPos, field *types.Field, value Node) *StructKeyExpr {
+ n := &StructKeyExpr{Field: field, Value: value}
+ n.pos = pos
+ n.op = OSTRUCTKEY
+ return n
+}
+
+func (n *StructKeyExpr) Sym() *types.Sym { return n.Field.Sym }
+
+// An InlinedCallExpr is an inlined function call.
+type InlinedCallExpr struct {
+ miniExpr
+ Body Nodes
+ ReturnVars Nodes // must be side-effect free
+}
+
+func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr {
+ n := &InlinedCallExpr{}
+ n.pos = pos
+ n.op = OINLCALL
+ n.Body = body
+ n.ReturnVars = retvars
+ return n
+}
+
+func (n *InlinedCallExpr) SingleResult() Node {
+ if have := len(n.ReturnVars); have != 1 {
+ base.FatalfAt(n.Pos(), "inlined call has %v results, expected 1", have)
+ }
+ if !n.Type().HasShape() && n.ReturnVars[0].Type().HasShape() {
+ // If the type of the call is not a shape, but the type of the return value
+ // is a shape, we need to do an implicit conversion, so the real type
+ // of n is maintained.
+ r := NewConvExpr(n.Pos(), OCONVNOP, n.Type(), n.ReturnVars[0])
+ r.SetTypecheck(1)
+ return r
+ }
+ return n.ReturnVars[0]
+}
+
+// A LogicalExpr is an expression X Op Y where Op is && or ||.
+// It is separate from BinaryExpr to make room for statements
+// that must be executed before Y but after X.
+type LogicalExpr struct {
+ miniExpr
+ X Node
+ Y Node
+}
+
+func NewLogicalExpr(pos src.XPos, op Op, x, y Node) *LogicalExpr {
+ n := &LogicalExpr{X: x, Y: y}
+ n.pos = pos
+ n.SetOp(op)
+ return n
+}
+
+func (n *LogicalExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OANDAND, OOROR:
+ n.op = op
+ }
+}
+
+// A MakeExpr is a make expression: make(Type[, Len[, Cap]]).
+// Op is OMAKECHAN, OMAKEMAP, OMAKESLICE, or OMAKESLICECOPY,
+// but *not* OMAKE (that's a pre-typechecking CallExpr).
+type MakeExpr struct {
+ miniExpr
+ RType Node `mknode:"-"` // see reflectdata/helpers.go
+ Len Node
+ Cap Node
+}
+
+func NewMakeExpr(pos src.XPos, op Op, len, cap Node) *MakeExpr {
+ n := &MakeExpr{Len: len, Cap: cap}
+ n.pos = pos
+ n.SetOp(op)
+ return n
+}
+
+func (n *MakeExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OMAKECHAN, OMAKEMAP, OMAKESLICE, OMAKESLICECOPY:
+ n.op = op
+ }
+}
+
+// A NilExpr represents the predefined untyped constant nil.
+type NilExpr struct {
+ miniExpr
+}
+
+func NewNilExpr(pos src.XPos, typ *types.Type) *NilExpr {
+ if typ == nil {
+ base.FatalfAt(pos, "missing type")
+ }
+ n := &NilExpr{}
+ n.pos = pos
+ n.op = ONIL
+ n.SetType(typ)
+ n.SetTypecheck(1)
+ return n
+}
+
+// A ParenExpr is a parenthesized expression (X).
+// It may end up being a value or a type.
+type ParenExpr struct {
+ miniExpr
+ X Node
+}
+
+func NewParenExpr(pos src.XPos, x Node) *ParenExpr {
+ n := &ParenExpr{X: x}
+ n.op = OPAREN
+ n.pos = pos
+ return n
+}
+
+func (n *ParenExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *ParenExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+// A ResultExpr represents a direct access to a result.
+type ResultExpr struct {
+ miniExpr
+ Index int64 // index of the result expr.
+}
+
+func NewResultExpr(pos src.XPos, typ *types.Type, index int64) *ResultExpr {
+ n := &ResultExpr{Index: index}
+ n.pos = pos
+ n.op = ORESULT
+ n.typ = typ
+ return n
+}
+
+// A LinksymOffsetExpr refers to an offset within a global variable.
+// It is like a SelectorExpr but without the field name.
+type LinksymOffsetExpr struct {
+ miniExpr
+ Linksym *obj.LSym
+ Offset_ int64
+}
+
+func NewLinksymOffsetExpr(pos src.XPos, lsym *obj.LSym, offset int64, typ *types.Type) *LinksymOffsetExpr {
+ if typ == nil {
+ base.FatalfAt(pos, "nil type")
+ }
+ n := &LinksymOffsetExpr{Linksym: lsym, Offset_: offset}
+ n.typ = typ
+ n.op = OLINKSYMOFFSET
+ n.SetTypecheck(1)
+ return n
+}
+
+// NewLinksymExpr is NewLinksymOffsetExpr, but with offset fixed at 0.
+func NewLinksymExpr(pos src.XPos, lsym *obj.LSym, typ *types.Type) *LinksymOffsetExpr {
+ return NewLinksymOffsetExpr(pos, lsym, 0, typ)
+}
+
+// NewNameOffsetExpr is NewLinksymOffsetExpr, but taking a *Name
+// representing a global variable instead of an *obj.LSym directly.
+func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *LinksymOffsetExpr {
+ if name == nil || IsBlank(name) || !(name.Op() == ONAME && name.Class == PEXTERN) {
+ base.FatalfAt(pos, "cannot take offset of nil, blank name or non-global variable: %v", name)
+ }
+ return NewLinksymOffsetExpr(pos, name.Linksym(), offset, typ)
+}
+
+// A SelectorExpr is a selector expression X.Sel.
+type SelectorExpr struct {
+ miniExpr
+ X Node
+ // Sel is the name of the field or method being selected, without (in the
+ // case of methods) any preceding type specifier. If the field/method is
+ // exported, than the Sym uses the local package regardless of the package
+ // of the containing type.
+ Sel *types.Sym
+ // The actual selected field - may not be filled in until typechecking.
+ Selection *types.Field
+ Prealloc *Name // preallocated storage for OMETHVALUE, if any
+}
+
+func NewSelectorExpr(pos src.XPos, op Op, x Node, sel *types.Sym) *SelectorExpr {
+ n := &SelectorExpr{X: x, Sel: sel}
+ n.pos = pos
+ n.SetOp(op)
+ return n
+}
+
+func (n *SelectorExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OMETHVALUE, OMETHEXPR:
+ n.op = op
+ }
+}
+
+func (n *SelectorExpr) Sym() *types.Sym { return n.Sel }
+func (n *SelectorExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *SelectorExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+func (n *SelectorExpr) Offset() int64 { return n.Selection.Offset }
+
+func (n *SelectorExpr) FuncName() *Name {
+ if n.Op() != OMETHEXPR {
+ panic(n.no("FuncName"))
+ }
+ fn := NewNameAt(n.Selection.Pos, MethodSym(n.X.Type(), n.Sel), n.Type())
+ fn.Class = PFUNC
+ if n.Selection.Nname != nil {
+ // TODO(austin): Nname is nil for interface method
+ // expressions (I.M), so we can't attach a Func to
+ // those here.
+ fn.Func = n.Selection.Nname.(*Name).Func
+ }
+ return fn
+}
+
+// A SliceExpr is a slice expression X[Low:High] or X[Low:High:Max].
+type SliceExpr struct {
+ miniExpr
+ X Node
+ Low Node
+ High Node
+ Max Node
+}
+
+func NewSliceExpr(pos src.XPos, op Op, x, low, high, max Node) *SliceExpr {
+ n := &SliceExpr{X: x, Low: low, High: high, Max: max}
+ n.pos = pos
+ n.op = op
+ return n
+}
+
+func (n *SliceExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
+ n.op = op
+ }
+}
+
+// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR).
+// o must be a slicing op.
+func (o Op) IsSlice3() bool {
+ switch o {
+ case OSLICE, OSLICEARR, OSLICESTR:
+ return false
+ case OSLICE3, OSLICE3ARR:
+ return true
+ }
+ base.Fatalf("IsSlice3 op %v", o)
+ return false
+}
+
+// A SliceHeader expression constructs a slice header from its parts.
+type SliceHeaderExpr struct {
+ miniExpr
+ Ptr Node
+ Len Node
+ Cap Node
+}
+
+func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *SliceHeaderExpr {
+ n := &SliceHeaderExpr{Ptr: ptr, Len: len, Cap: cap}
+ n.pos = pos
+ n.op = OSLICEHEADER
+ n.typ = typ
+ return n
+}
+
+// A StringHeaderExpr expression constructs a string header from its parts.
+type StringHeaderExpr struct {
+ miniExpr
+ Ptr Node
+ Len Node
+}
+
+func NewStringHeaderExpr(pos src.XPos, ptr, len Node) *StringHeaderExpr {
+ n := &StringHeaderExpr{Ptr: ptr, Len: len}
+ n.pos = pos
+ n.op = OSTRINGHEADER
+ n.typ = types.Types[types.TSTRING]
+ return n
+}
+
+// A StarExpr is a dereference expression *X.
+// It may end up being a value or a type.
+type StarExpr struct {
+ miniExpr
+ X Node
+}
+
+func NewStarExpr(pos src.XPos, x Node) *StarExpr {
+ n := &StarExpr{X: x}
+ n.op = ODEREF
+ n.pos = pos
+ return n
+}
+
+func (n *StarExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *StarExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+// A TypeAssertionExpr is a selector expression X.(Type).
+// Before type-checking, the type is Ntype.
+type TypeAssertExpr struct {
+ miniExpr
+ X Node
+
+ // Runtime type information provided by walkDotType for
+ // assertions from non-empty interface to concrete type.
+ ITab Node `mknode:"-"` // *runtime.itab for Type implementing X's type
+
+ // An internal/abi.TypeAssert descriptor to pass to the runtime.
+ Descriptor *obj.LSym
+}
+
+func NewTypeAssertExpr(pos src.XPos, x Node, typ *types.Type) *TypeAssertExpr {
+ n := &TypeAssertExpr{X: x}
+ n.pos = pos
+ n.op = ODOTTYPE
+ if typ != nil {
+ n.SetType(typ)
+ }
+ return n
+}
+
+func (n *TypeAssertExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case ODOTTYPE, ODOTTYPE2:
+ n.op = op
+ }
+}
+
+// A DynamicTypeAssertExpr asserts that X is of dynamic type RType.
+type DynamicTypeAssertExpr struct {
+ miniExpr
+ X Node
+
+ // SrcRType is an expression that yields a *runtime._type value
+ // representing X's type. It's used in failed assertion panic
+ // messages.
+ SrcRType Node
+
+ // RType is an expression that yields a *runtime._type value
+ // representing the asserted type.
+ //
+ // BUG(mdempsky): If ITab is non-nil, RType may be nil.
+ RType Node
+
+ // ITab is an expression that yields a *runtime.itab value
+ // representing the asserted type within the assertee expression's
+ // original interface type.
+ //
+ // ITab is only used for assertions from non-empty interface type to
+ // a concrete (i.e., non-interface) type. For all other assertions,
+ // ITab is nil.
+ ITab Node
+}
+
+func NewDynamicTypeAssertExpr(pos src.XPos, op Op, x, rtype Node) *DynamicTypeAssertExpr {
+ n := &DynamicTypeAssertExpr{X: x, RType: rtype}
+ n.pos = pos
+ n.op = op
+ return n
+}
+
+func (n *DynamicTypeAssertExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case ODYNAMICDOTTYPE, ODYNAMICDOTTYPE2:
+ n.op = op
+ }
+}
+
+// A UnaryExpr is a unary expression Op X,
+// or Op(X) for a builtin function that does not end up being a call.
+type UnaryExpr struct {
+ miniExpr
+ X Node
+}
+
+func NewUnaryExpr(pos src.XPos, op Op, x Node) *UnaryExpr {
+ n := &UnaryExpr{X: x}
+ n.pos = pos
+ n.SetOp(op)
+ return n
+}
+
+func (n *UnaryExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OBITNOT, ONEG, ONOT, OPLUS, ORECV,
+ OCAP, OCLEAR, OCLOSE, OIMAG, OLEN, ONEW, OPANIC, OREAL,
+ OCHECKNIL, OCFUNC, OIDATA, OITAB, OSPTR,
+ OUNSAFESTRINGDATA, OUNSAFESLICEDATA:
+ n.op = op
+ }
+}
+
+func IsZero(n Node) bool {
+ switch n.Op() {
+ case ONIL:
+ return true
+
+ case OLITERAL:
+ switch u := n.Val(); u.Kind() {
+ case constant.String:
+ return constant.StringVal(u) == ""
+ case constant.Bool:
+ return !constant.BoolVal(u)
+ default:
+ return constant.Sign(u) == 0
+ }
+
+ case OARRAYLIT:
+ n := n.(*CompLitExpr)
+ for _, n1 := range n.List {
+ if n1.Op() == OKEY {
+ n1 = n1.(*KeyExpr).Value
+ }
+ if !IsZero(n1) {
+ return false
+ }
+ }
+ return true
+
+ case OSTRUCTLIT:
+ n := n.(*CompLitExpr)
+ for _, n1 := range n.List {
+ n1 := n1.(*StructKeyExpr)
+ if !IsZero(n1.Value) {
+ return false
+ }
+ }
+ return true
+ }
+
+ return false
+}
+
+// lvalue etc
+func IsAddressable(n Node) bool {
+ switch n.Op() {
+ case OINDEX:
+ n := n.(*IndexExpr)
+ if n.X.Type() != nil && n.X.Type().IsArray() {
+ return IsAddressable(n.X)
+ }
+ if n.X.Type() != nil && n.X.Type().IsString() {
+ return false
+ }
+ fallthrough
+ case ODEREF, ODOTPTR:
+ return true
+
+ case ODOT:
+ n := n.(*SelectorExpr)
+ return IsAddressable(n.X)
+
+ case ONAME:
+ n := n.(*Name)
+ if n.Class == PFUNC {
+ return false
+ }
+ return true
+
+ case OLINKSYMOFFSET:
+ return true
+ }
+
+ return false
+}
+
+// StaticValue analyzes n to find the earliest expression that always
+// evaluates to the same value as n, which might be from an enclosing
+// function.
+//
+// For example, given:
+//
+// var x int = g()
+// func() {
+// y := x
+// *p = int(y)
+// }
+//
+// calling StaticValue on the "int(y)" expression returns the outer
+// "g()" expression.
+func StaticValue(n Node) Node {
+ for {
+ if n.Op() == OCONVNOP {
+ n = n.(*ConvExpr).X
+ continue
+ }
+
+ if n.Op() == OINLCALL {
+ n = n.(*InlinedCallExpr).SingleResult()
+ continue
+ }
+
+ n1 := staticValue1(n)
+ if n1 == nil {
+ return n
+ }
+ n = n1
+ }
+}
+
+func staticValue1(nn Node) Node {
+ if nn.Op() != ONAME {
+ return nil
+ }
+ n := nn.(*Name).Canonical()
+ if n.Class != PAUTO {
+ return nil
+ }
+
+ defn := n.Defn
+ if defn == nil {
+ return nil
+ }
+
+ var rhs Node
+FindRHS:
+ switch defn.Op() {
+ case OAS:
+ defn := defn.(*AssignStmt)
+ rhs = defn.Y
+ case OAS2:
+ defn := defn.(*AssignListStmt)
+ for i, lhs := range defn.Lhs {
+ if lhs == n {
+ rhs = defn.Rhs[i]
+ break FindRHS
+ }
+ }
+ base.Fatalf("%v missing from LHS of %v", n, defn)
+ default:
+ return nil
+ }
+ if rhs == nil {
+ base.Fatalf("RHS is nil: %v", defn)
+ }
+
+ if Reassigned(n) {
+ return nil
+ }
+
+ return rhs
+}
+
+// Reassigned takes an ONAME node, walks the function in which it is
+// defined, and returns a boolean indicating whether the name has any
+// assignments other than its declaration.
+// NB: global variables are always considered to be re-assigned.
+// TODO: handle initial declaration not including an assignment and
+// followed by a single assignment?
+// NOTE: any changes made here should also be made in the corresponding
+// code in the ReassignOracle.Init method.
+func Reassigned(name *Name) bool {
+ if name.Op() != ONAME {
+ base.Fatalf("reassigned %v", name)
+ }
+ // no way to reliably check for no-reassignment of globals, assume it can be
+ if name.Curfn == nil {
+ return true
+ }
+
+ if name.Addrtaken() {
+ return true // conservatively assume it's reassigned indirectly
+ }
+
+ // TODO(mdempsky): This is inefficient and becoming increasingly
+ // unwieldy. Figure out a way to generalize escape analysis's
+ // reassignment detection for use by inlining and devirtualization.
+
+ // isName reports whether n is a reference to name.
+ isName := func(x Node) bool {
+ if x == nil {
+ return false
+ }
+ n, ok := OuterValue(x).(*Name)
+ return ok && n.Canonical() == name
+ }
+
+ var do func(n Node) bool
+ do = func(n Node) bool {
+ switch n.Op() {
+ case OAS:
+ n := n.(*AssignStmt)
+ if isName(n.X) && n != name.Defn {
+ return true
+ }
+ case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV, OSELRECV2:
+ n := n.(*AssignListStmt)
+ for _, p := range n.Lhs {
+ if isName(p) && n != name.Defn {
+ return true
+ }
+ }
+ case OASOP:
+ n := n.(*AssignOpStmt)
+ if isName(n.X) {
+ return true
+ }
+ case OADDR:
+ n := n.(*AddrExpr)
+ if isName(n.X) {
+ base.FatalfAt(n.Pos(), "%v not marked addrtaken", name)
+ }
+ case ORANGE:
+ n := n.(*RangeStmt)
+ if isName(n.Key) || isName(n.Value) {
+ return true
+ }
+ case OCLOSURE:
+ n := n.(*ClosureExpr)
+ if Any(n.Func, do) {
+ return true
+ }
+ }
+ return false
+ }
+ return Any(name.Curfn, do)
+}
+
+// StaticCalleeName returns the ONAME/PFUNC for n, if known.
+func StaticCalleeName(n Node) *Name {
+ switch n.Op() {
+ case OMETHEXPR:
+ n := n.(*SelectorExpr)
+ return MethodExprName(n)
+ case ONAME:
+ n := n.(*Name)
+ if n.Class == PFUNC {
+ return n
+ }
+ case OCLOSURE:
+ return n.(*ClosureExpr).Func.Nname
+ }
+ return nil
+}
+
+// IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation.
+var IsIntrinsicCall = func(*CallExpr) bool { return false }
+
+// SameSafeExpr checks whether it is safe to reuse one of l and r
+// instead of computing both. SameSafeExpr assumes that l and r are
+// used in the same statement or expression. In order for it to be
+// safe to reuse l or r, they must:
+// - be the same expression
+// - not have side-effects (no function calls, no channel ops);
+// however, panics are ok
+// - not cause inappropriate aliasing; e.g. two string to []byte
+// conversions, must result in two distinct slices
+//
+// The handling of OINDEXMAP is subtle. OINDEXMAP can occur both
+// as an lvalue (map assignment) and an rvalue (map access). This is
+// currently OK, since the only place SameSafeExpr gets used on an
+// lvalue expression is for OSLICE and OAPPEND optimizations, and it
+// is correct in those settings.
+func SameSafeExpr(l Node, r Node) bool {
+ for l.Op() == OCONVNOP {
+ l = l.(*ConvExpr).X
+ }
+ for r.Op() == OCONVNOP {
+ r = r.(*ConvExpr).X
+ }
+ if l.Op() != r.Op() || !types.Identical(l.Type(), r.Type()) {
+ return false
+ }
+
+ switch l.Op() {
+ case ONAME:
+ return l == r
+
+ case ODOT, ODOTPTR:
+ l := l.(*SelectorExpr)
+ r := r.(*SelectorExpr)
+ return l.Sel != nil && r.Sel != nil && l.Sel == r.Sel && SameSafeExpr(l.X, r.X)
+
+ case ODEREF:
+ l := l.(*StarExpr)
+ r := r.(*StarExpr)
+ return SameSafeExpr(l.X, r.X)
+
+ case ONOT, OBITNOT, OPLUS, ONEG:
+ l := l.(*UnaryExpr)
+ r := r.(*UnaryExpr)
+ return SameSafeExpr(l.X, r.X)
+
+ case OCONV:
+ l := l.(*ConvExpr)
+ r := r.(*ConvExpr)
+ // Some conversions can't be reused, such as []byte(str).
+ // Allow only numeric-ish types. This is a bit conservative.
+ return types.IsSimple[l.Type().Kind()] && SameSafeExpr(l.X, r.X)
+
+ case OINDEX, OINDEXMAP:
+ l := l.(*IndexExpr)
+ r := r.(*IndexExpr)
+ return SameSafeExpr(l.X, r.X) && SameSafeExpr(l.Index, r.Index)
+
+ case OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
+ l := l.(*BinaryExpr)
+ r := r.(*BinaryExpr)
+ return SameSafeExpr(l.X, r.X) && SameSafeExpr(l.Y, r.Y)
+
+ case OLITERAL:
+ return constant.Compare(l.Val(), token.EQL, r.Val())
+
+ case ONIL:
+ return true
+ }
+
+ return false
+}
+
+// ShouldCheckPtr reports whether pointer checking should be enabled for
+// function fn at a given level. See debugHelpFooter for defined
+// levels.
+func ShouldCheckPtr(fn *Func, level int) bool {
+ return base.Debug.Checkptr >= level && fn.Pragma&NoCheckPtr == 0
+}
+
+// ShouldAsanCheckPtr reports whether pointer checking should be enabled for
+// function fn when -asan is enabled.
+func ShouldAsanCheckPtr(fn *Func) bool {
+ return base.Flag.ASan && fn.Pragma&NoCheckPtr == 0
+}
+
+// IsReflectHeaderDataField reports whether l is an expression p.Data
+// where p has type reflect.SliceHeader or reflect.StringHeader.
+func IsReflectHeaderDataField(l Node) bool {
+ if l.Type() != types.Types[types.TUINTPTR] {
+ return false
+ }
+
+ var tsym *types.Sym
+ switch l.Op() {
+ case ODOT:
+ l := l.(*SelectorExpr)
+ tsym = l.X.Type().Sym()
+ case ODOTPTR:
+ l := l.(*SelectorExpr)
+ tsym = l.X.Type().Elem().Sym()
+ default:
+ return false
+ }
+
+ if tsym == nil || l.Sym().Name != "Data" || tsym.Pkg.Path != "reflect" {
+ return false
+ }
+ return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
+}
+
+func ParamNames(ft *types.Type) []Node {
+ args := make([]Node, ft.NumParams())
+ for i, f := range ft.Params() {
+ args[i] = f.Nname.(*Name)
+ }
+ return args
+}
+
+// MethodSym returns the method symbol representing a method name
+// associated with a specific receiver type.
+//
+// Method symbols can be used to distinguish the same method appearing
+// in different method sets. For example, T.M and (*T).M have distinct
+// method symbols.
+//
+// The returned symbol will be marked as a function.
+func MethodSym(recv *types.Type, msym *types.Sym) *types.Sym {
+ sym := MethodSymSuffix(recv, msym, "")
+ sym.SetFunc(true)
+ return sym
+}
+
+// MethodSymSuffix is like MethodSym, but allows attaching a
+// distinguisher suffix. To avoid collisions, the suffix must not
+// start with a letter, number, or period.
+func MethodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym {
+ if msym.IsBlank() {
+ base.Fatalf("blank method name")
+ }
+
+ rsym := recv.Sym()
+ if recv.IsPtr() {
+ if rsym != nil {
+ base.Fatalf("declared pointer receiver type: %v", recv)
+ }
+ rsym = recv.Elem().Sym()
+ }
+
+ // Find the package the receiver type appeared in. For
+ // anonymous receiver types (i.e., anonymous structs with
+ // embedded fields), use the "go" pseudo-package instead.
+ rpkg := Pkgs.Go
+ if rsym != nil {
+ rpkg = rsym.Pkg
+ }
+
+ var b bytes.Buffer
+ if recv.IsPtr() {
+ // The parentheses aren't really necessary, but
+ // they're pretty traditional at this point.
+ fmt.Fprintf(&b, "(%-S)", recv)
+ } else {
+ fmt.Fprintf(&b, "%-S", recv)
+ }
+
+ // A particular receiver type may have multiple non-exported
+ // methods with the same name. To disambiguate them, include a
+ // package qualifier for names that came from a different
+ // package than the receiver type.
+ if !types.IsExported(msym.Name) && msym.Pkg != rpkg {
+ b.WriteString(".")
+ b.WriteString(msym.Pkg.Prefix)
+ }
+
+ b.WriteString(".")
+ b.WriteString(msym.Name)
+ b.WriteString(suffix)
+ return rpkg.LookupBytes(b.Bytes())
+}
+
+// LookupMethodSelector returns the types.Sym of the selector for a method
+// named in local symbol name, as well as the types.Sym of the receiver.
+//
+// TODO(prattmic): this does not attempt to handle method suffixes (wrappers).
+func LookupMethodSelector(pkg *types.Pkg, name string) (typ, meth *types.Sym, err error) {
+ typeName, methName := splitType(name)
+ if typeName == "" {
+ return nil, nil, fmt.Errorf("%s doesn't contain type split", name)
+ }
+
+ if len(typeName) > 3 && typeName[:2] == "(*" && typeName[len(typeName)-1] == ')' {
+ // Symbol name is for a pointer receiver method. We just want
+ // the base type name.
+ typeName = typeName[2 : len(typeName)-1]
+ }
+
+ typ = pkg.Lookup(typeName)
+ meth = pkg.Selector(methName)
+ return typ, meth, nil
+}
+
+// splitType splits a local symbol name into type and method (fn). If this a
+// free function, typ == "".
+//
+// N.B. closures and methods can be ambiguous (e.g., bar.func1). These cases
+// are returned as methods.
+func splitType(name string) (typ, fn string) {
+ // Types are split on the first dot, ignoring everything inside
+ // brackets (instantiation of type parameter, usually including
+ // "go.shape").
+ bracket := 0
+ for i, r := range name {
+ if r == '.' && bracket == 0 {
+ return name[:i], name[i+1:]
+ }
+ if r == '[' {
+ bracket++
+ }
+ if r == ']' {
+ bracket--
+ }
+ }
+ return "", name
+}
+
+// MethodExprName returns the ONAME representing the method
+// referenced by expression n, which must be a method selector,
+// method expression, or method value.
+func MethodExprName(n Node) *Name {
+ name, _ := MethodExprFunc(n).Nname.(*Name)
+ return name
+}
+
+// MethodExprFunc is like MethodExprName, but returns the types.Field instead.
+func MethodExprFunc(n Node) *types.Field {
+ switch n.Op() {
+ case ODOTMETH, OMETHEXPR, OMETHVALUE:
+ return n.(*SelectorExpr).Selection
+ }
+ base.Fatalf("unexpected node: %v (%v)", n, n.Op())
+ panic("unreachable")
+}
diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go
new file mode 100644
index 0000000..31c6103
--- /dev/null
+++ b/src/cmd/compile/internal/ir/fmt.go
@@ -0,0 +1,1208 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "bytes"
+ "fmt"
+ "go/constant"
+ "io"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+
+ "unicode/utf8"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// Op
+
+var OpNames = []string{
+ OADDR: "&",
+ OADD: "+",
+ OADDSTR: "+",
+ OANDAND: "&&",
+ OANDNOT: "&^",
+ OAND: "&",
+ OAPPEND: "append",
+ OAS: "=",
+ OAS2: "=",
+ OBREAK: "break",
+ OCALL: "function call", // not actual syntax
+ OCAP: "cap",
+ OCASE: "case",
+ OCLEAR: "clear",
+ OCLOSE: "close",
+ OCOMPLEX: "complex",
+ OBITNOT: "^",
+ OCONTINUE: "continue",
+ OCOPY: "copy",
+ ODELETE: "delete",
+ ODEFER: "defer",
+ ODIV: "/",
+ OEQ: "==",
+ OFALL: "fallthrough",
+ OFOR: "for",
+ OGE: ">=",
+ OGOTO: "goto",
+ OGT: ">",
+ OIF: "if",
+ OIMAG: "imag",
+ OINLMARK: "inlmark",
+ ODEREF: "*",
+ OLEN: "len",
+ OLE: "<=",
+ OLSH: "<<",
+ OLT: "<",
+ OMAKE: "make",
+ ONEG: "-",
+ OMAX: "max",
+ OMIN: "min",
+ OMOD: "%",
+ OMUL: "*",
+ ONEW: "new",
+ ONE: "!=",
+ ONOT: "!",
+ OOROR: "||",
+ OOR: "|",
+ OPANIC: "panic",
+ OPLUS: "+",
+ OPRINTLN: "println",
+ OPRINT: "print",
+ ORANGE: "range",
+ OREAL: "real",
+ ORECV: "<-",
+ ORECOVER: "recover",
+ ORETURN: "return",
+ ORSH: ">>",
+ OSELECT: "select",
+ OSEND: "<-",
+ OSUB: "-",
+ OSWITCH: "switch",
+ OUNSAFEADD: "unsafe.Add",
+ OUNSAFESLICE: "unsafe.Slice",
+ OUNSAFESLICEDATA: "unsafe.SliceData",
+ OUNSAFESTRING: "unsafe.String",
+ OUNSAFESTRINGDATA: "unsafe.StringData",
+ OXOR: "^",
+}
+
+// GoString returns the Go syntax for the Op, or else its name.
+func (o Op) GoString() string {
+ if int(o) < len(OpNames) && OpNames[o] != "" {
+ return OpNames[o]
+ }
+ return o.String()
+}
+
+// Format implements formatting for an Op.
+// The valid formats are:
+//
+// %v Go syntax ("+", "<-", "print")
+// %+v Debug syntax ("ADD", "RECV", "PRINT")
+func (o Op) Format(s fmt.State, verb rune) {
+ switch verb {
+ default:
+ fmt.Fprintf(s, "%%!%c(Op=%d)", verb, int(o))
+ case 'v':
+ if s.Flag('+') {
+ // %+v is OMUL instead of "*"
+ io.WriteString(s, o.String())
+ return
+ }
+ io.WriteString(s, o.GoString())
+ }
+}
+
+// Node
+
+// fmtNode implements formatting for a Node n.
+// Every Node implementation must define a Format method that calls fmtNode.
+// The valid formats are:
+//
+// %v Go syntax
+// %L Go syntax followed by " (type T)" if type is known.
+// %+v Debug syntax, as in Dump.
+func fmtNode(n Node, s fmt.State, verb rune) {
+ // %+v prints Dump.
+ // Otherwise we print Go syntax.
+ if s.Flag('+') && verb == 'v' {
+ dumpNode(s, n, 1)
+ return
+ }
+
+ if verb != 'v' && verb != 'S' && verb != 'L' {
+ fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n)
+ return
+ }
+
+ if n == nil {
+ fmt.Fprint(s, "<nil>")
+ return
+ }
+
+ t := n.Type()
+ if verb == 'L' && t != nil {
+ if t.Kind() == types.TNIL {
+ fmt.Fprint(s, "nil")
+ } else if n.Op() == ONAME && n.Name().AutoTemp() {
+ fmt.Fprintf(s, "%v value", t)
+ } else {
+ fmt.Fprintf(s, "%v (type %v)", n, t)
+ }
+ return
+ }
+
+ // TODO inlining produces expressions with ninits. we can't print these yet.
+
+ if OpPrec[n.Op()] < 0 {
+ stmtFmt(n, s)
+ return
+ }
+
+ exprFmt(n, s, 0)
+}
+
+var OpPrec = []int{
+ OAPPEND: 8,
+ OBYTES2STR: 8,
+ OARRAYLIT: 8,
+ OSLICELIT: 8,
+ ORUNES2STR: 8,
+ OCALLFUNC: 8,
+ OCALLINTER: 8,
+ OCALLMETH: 8,
+ OCALL: 8,
+ OCAP: 8,
+ OCLEAR: 8,
+ OCLOSE: 8,
+ OCOMPLIT: 8,
+ OCONVIFACE: 8,
+ OCONVNOP: 8,
+ OCONV: 8,
+ OCOPY: 8,
+ ODELETE: 8,
+ OGETG: 8,
+ OLEN: 8,
+ OLITERAL: 8,
+ OMAKESLICE: 8,
+ OMAKESLICECOPY: 8,
+ OMAKE: 8,
+ OMAPLIT: 8,
+ OMAX: 8,
+ OMIN: 8,
+ ONAME: 8,
+ ONEW: 8,
+ ONIL: 8,
+ ONONAME: 8,
+ OPANIC: 8,
+ OPAREN: 8,
+ OPRINTLN: 8,
+ OPRINT: 8,
+ ORUNESTR: 8,
+ OSLICE2ARR: 8,
+ OSLICE2ARRPTR: 8,
+ OSTR2BYTES: 8,
+ OSTR2RUNES: 8,
+ OSTRUCTLIT: 8,
+ OTYPE: 8,
+ OUNSAFEADD: 8,
+ OUNSAFESLICE: 8,
+ OUNSAFESLICEDATA: 8,
+ OUNSAFESTRING: 8,
+ OUNSAFESTRINGDATA: 8,
+ OINDEXMAP: 8,
+ OINDEX: 8,
+ OSLICE: 8,
+ OSLICESTR: 8,
+ OSLICEARR: 8,
+ OSLICE3: 8,
+ OSLICE3ARR: 8,
+ OSLICEHEADER: 8,
+ OSTRINGHEADER: 8,
+ ODOTINTER: 8,
+ ODOTMETH: 8,
+ ODOTPTR: 8,
+ ODOTTYPE2: 8,
+ ODOTTYPE: 8,
+ ODOT: 8,
+ OXDOT: 8,
+ OMETHVALUE: 8,
+ OMETHEXPR: 8,
+ OPLUS: 7,
+ ONOT: 7,
+ OBITNOT: 7,
+ ONEG: 7,
+ OADDR: 7,
+ ODEREF: 7,
+ ORECV: 7,
+ OMUL: 6,
+ ODIV: 6,
+ OMOD: 6,
+ OLSH: 6,
+ ORSH: 6,
+ OAND: 6,
+ OANDNOT: 6,
+ OADD: 5,
+ OSUB: 5,
+ OOR: 5,
+ OXOR: 5,
+ OEQ: 4,
+ OLT: 4,
+ OLE: 4,
+ OGE: 4,
+ OGT: 4,
+ ONE: 4,
+ OSEND: 3,
+ OANDAND: 2,
+ OOROR: 1,
+
+ // Statements handled by stmtfmt
+ OAS: -1,
+ OAS2: -1,
+ OAS2DOTTYPE: -1,
+ OAS2FUNC: -1,
+ OAS2MAPR: -1,
+ OAS2RECV: -1,
+ OASOP: -1,
+ OBLOCK: -1,
+ OBREAK: -1,
+ OCASE: -1,
+ OCONTINUE: -1,
+ ODCL: -1,
+ ODEFER: -1,
+ OFALL: -1,
+ OFOR: -1,
+ OGOTO: -1,
+ OIF: -1,
+ OLABEL: -1,
+ OGO: -1,
+ ORANGE: -1,
+ ORETURN: -1,
+ OSELECT: -1,
+ OSWITCH: -1,
+
+ OEND: 0,
+}
+
+// StmtWithInit reports whether op is a statement with an explicit init list.
+func StmtWithInit(op Op) bool {
+ switch op {
+ case OIF, OFOR, OSWITCH:
+ return true
+ }
+ return false
+}
+
+func stmtFmt(n Node, s fmt.State) {
+ // NOTE(rsc): This code used to support the text-based
+ // which was more aggressive about printing full Go syntax
+ // (for example, an actual loop instead of "for loop").
+ // The code is preserved for now in case we want to expand
+ // any of those shortenings later. Or maybe we will delete
+ // the code. But for now, keep it.
+ const exportFormat = false
+
+ // some statements allow for an init, but at most one,
+ // but we may have an arbitrary number added, eg by typecheck
+ // and inlining. If it doesn't fit the syntax, emit an enclosing
+ // block starting with the init statements.
+
+ // if we can just say "for" n->ninit; ... then do so
+ simpleinit := len(n.Init()) == 1 && len(n.Init()[0].Init()) == 0 && StmtWithInit(n.Op())
+
+ // otherwise, print the inits as separate statements
+ complexinit := len(n.Init()) != 0 && !simpleinit && exportFormat
+
+ // but if it was for if/for/switch, put in an extra surrounding block to limit the scope
+ extrablock := complexinit && StmtWithInit(n.Op())
+
+ if extrablock {
+ fmt.Fprint(s, "{")
+ }
+
+ if complexinit {
+ fmt.Fprintf(s, " %v; ", n.Init())
+ }
+
+ switch n.Op() {
+ case ODCL:
+ n := n.(*Decl)
+ fmt.Fprintf(s, "var %v %v", n.X.Sym(), n.X.Type())
+
+ // Don't export "v = <N>" initializing statements, hope they're always
+ // preceded by the DCL which will be re-parsed and typechecked to reproduce
+ // the "v = <N>" again.
+ case OAS:
+ n := n.(*AssignStmt)
+ if n.Def && !complexinit {
+ fmt.Fprintf(s, "%v := %v", n.X, n.Y)
+ } else {
+ fmt.Fprintf(s, "%v = %v", n.X, n.Y)
+ }
+
+ case OASOP:
+ n := n.(*AssignOpStmt)
+ if n.IncDec {
+ if n.AsOp == OADD {
+ fmt.Fprintf(s, "%v++", n.X)
+ } else {
+ fmt.Fprintf(s, "%v--", n.X)
+ }
+ break
+ }
+
+ fmt.Fprintf(s, "%v %v= %v", n.X, n.AsOp, n.Y)
+
+ case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+ n := n.(*AssignListStmt)
+ if n.Def && !complexinit {
+ fmt.Fprintf(s, "%.v := %.v", n.Lhs, n.Rhs)
+ } else {
+ fmt.Fprintf(s, "%.v = %.v", n.Lhs, n.Rhs)
+ }
+
+ case OBLOCK:
+ n := n.(*BlockStmt)
+ if len(n.List) != 0 {
+ fmt.Fprintf(s, "%v", n.List)
+ }
+
+ case ORETURN:
+ n := n.(*ReturnStmt)
+ fmt.Fprintf(s, "return %.v", n.Results)
+
+ case OTAILCALL:
+ n := n.(*TailCallStmt)
+ fmt.Fprintf(s, "tailcall %v", n.Call)
+
+ case OINLMARK:
+ n := n.(*InlineMarkStmt)
+ fmt.Fprintf(s, "inlmark %d", n.Index)
+
+ case OGO:
+ n := n.(*GoDeferStmt)
+ fmt.Fprintf(s, "go %v", n.Call)
+
+ case ODEFER:
+ n := n.(*GoDeferStmt)
+ fmt.Fprintf(s, "defer %v", n.Call)
+
+ case OIF:
+ n := n.(*IfStmt)
+ if simpleinit {
+ fmt.Fprintf(s, "if %v; %v { %v }", n.Init()[0], n.Cond, n.Body)
+ } else {
+ fmt.Fprintf(s, "if %v { %v }", n.Cond, n.Body)
+ }
+ if len(n.Else) != 0 {
+ fmt.Fprintf(s, " else { %v }", n.Else)
+ }
+
+ case OFOR:
+ n := n.(*ForStmt)
+ if !exportFormat { // TODO maybe only if FmtShort, same below
+ fmt.Fprintf(s, "for loop")
+ break
+ }
+
+ fmt.Fprint(s, "for")
+ if n.DistinctVars {
+ fmt.Fprint(s, " /* distinct */")
+ }
+ if simpleinit {
+ fmt.Fprintf(s, " %v;", n.Init()[0])
+ } else if n.Post != nil {
+ fmt.Fprint(s, " ;")
+ }
+
+ if n.Cond != nil {
+ fmt.Fprintf(s, " %v", n.Cond)
+ }
+
+ if n.Post != nil {
+ fmt.Fprintf(s, "; %v", n.Post)
+ } else if simpleinit {
+ fmt.Fprint(s, ";")
+ }
+
+ fmt.Fprintf(s, " { %v }", n.Body)
+
+ case ORANGE:
+ n := n.(*RangeStmt)
+ if !exportFormat {
+ fmt.Fprint(s, "for loop")
+ break
+ }
+
+ fmt.Fprint(s, "for")
+ if n.Key != nil {
+ fmt.Fprintf(s, " %v", n.Key)
+ if n.Value != nil {
+ fmt.Fprintf(s, ", %v", n.Value)
+ }
+ fmt.Fprint(s, " =")
+ }
+ fmt.Fprintf(s, " range %v { %v }", n.X, n.Body)
+ if n.DistinctVars {
+ fmt.Fprint(s, " /* distinct vars */")
+ }
+
+ case OSELECT:
+ n := n.(*SelectStmt)
+ if !exportFormat {
+ fmt.Fprintf(s, "%v statement", n.Op())
+ break
+ }
+ fmt.Fprintf(s, "select { %v }", n.Cases)
+
+ case OSWITCH:
+ n := n.(*SwitchStmt)
+ if !exportFormat {
+ fmt.Fprintf(s, "%v statement", n.Op())
+ break
+ }
+ fmt.Fprintf(s, "switch")
+ if simpleinit {
+ fmt.Fprintf(s, " %v;", n.Init()[0])
+ }
+ if n.Tag != nil {
+ fmt.Fprintf(s, " %v ", n.Tag)
+ }
+ fmt.Fprintf(s, " { %v }", n.Cases)
+
+ case OCASE:
+ n := n.(*CaseClause)
+ if len(n.List) != 0 {
+ fmt.Fprintf(s, "case %.v", n.List)
+ } else {
+ fmt.Fprint(s, "default")
+ }
+ fmt.Fprintf(s, ": %v", n.Body)
+
+ case OBREAK, OCONTINUE, OGOTO, OFALL:
+ n := n.(*BranchStmt)
+ if n.Label != nil {
+ fmt.Fprintf(s, "%v %v", n.Op(), n.Label)
+ } else {
+ fmt.Fprintf(s, "%v", n.Op())
+ }
+
+ case OLABEL:
+ n := n.(*LabelStmt)
+ fmt.Fprintf(s, "%v: ", n.Label)
+ }
+
+ if extrablock {
+ fmt.Fprint(s, "}")
+ }
+}
+
+func exprFmt(n Node, s fmt.State, prec int) {
+ // NOTE(rsc): This code used to support the text-based
+ // which was more aggressive about printing full Go syntax
+ // (for example, an actual loop instead of "for loop").
+ // The code is preserved for now in case we want to expand
+ // any of those shortenings later. Or maybe we will delete
+ // the code. But for now, keep it.
+ const exportFormat = false
+
+ for {
+ if n == nil {
+ fmt.Fprint(s, "<nil>")
+ return
+ }
+
+ // Skip implicit operations introduced during typechecking.
+ switch nn := n; nn.Op() {
+ case OADDR:
+ nn := nn.(*AddrExpr)
+ if nn.Implicit() {
+ n = nn.X
+ continue
+ }
+ case ODEREF:
+ nn := nn.(*StarExpr)
+ if nn.Implicit() {
+ n = nn.X
+ continue
+ }
+ case OCONV, OCONVNOP, OCONVIFACE:
+ nn := nn.(*ConvExpr)
+ if nn.Implicit() {
+ n = nn.X
+ continue
+ }
+ }
+
+ break
+ }
+
+ nprec := OpPrec[n.Op()]
+ if n.Op() == OTYPE && n.Type() != nil && n.Type().IsPtr() {
+ nprec = OpPrec[ODEREF]
+ }
+
+ if prec > nprec {
+ fmt.Fprintf(s, "(%v)", n)
+ return
+ }
+
+ switch n.Op() {
+ case OPAREN:
+ n := n.(*ParenExpr)
+ fmt.Fprintf(s, "(%v)", n.X)
+
+ case ONIL:
+ fmt.Fprint(s, "nil")
+
+ case OLITERAL:
+ if n.Sym() != nil {
+ fmt.Fprint(s, n.Sym())
+ return
+ }
+
+ typ := n.Type()
+ val := n.Val()
+
+ // Special case for rune constants.
+ if typ == types.RuneType || typ == types.UntypedRune {
+ if x, ok := constant.Uint64Val(val); ok && x <= utf8.MaxRune {
+ fmt.Fprintf(s, "%q", x)
+ return
+ }
+ }
+
+ // Only include typ if it's neither the default nor untyped type
+ // for the constant value.
+ if k := val.Kind(); typ == types.Types[types.DefaultKinds[k]] || typ == types.UntypedTypes[k] {
+ fmt.Fprint(s, val)
+ } else {
+ fmt.Fprintf(s, "%v(%v)", typ, val)
+ }
+
+ case ODCLFUNC:
+ n := n.(*Func)
+ if sym := n.Sym(); sym != nil {
+ fmt.Fprint(s, sym)
+ return
+ }
+ fmt.Fprintf(s, "<unnamed Func>")
+
+ case ONAME:
+ n := n.(*Name)
+ // Special case: name used as local variable in export.
+ // _ becomes ~b%d internally; print as _ for export
+ if !exportFormat && n.Sym() != nil && n.Sym().Name[0] == '~' && n.Sym().Name[1] == 'b' {
+ fmt.Fprint(s, "_")
+ return
+ }
+ fallthrough
+ case ONONAME:
+ fmt.Fprint(s, n.Sym())
+
+ case OLINKSYMOFFSET:
+ n := n.(*LinksymOffsetExpr)
+ fmt.Fprintf(s, "(%v)(%s@%d)", n.Type(), n.Linksym.Name, n.Offset_)
+
+ case OTYPE:
+ if n.Type() == nil && n.Sym() != nil {
+ fmt.Fprint(s, n.Sym())
+ return
+ }
+ fmt.Fprintf(s, "%v", n.Type())
+
+ case OCLOSURE:
+ n := n.(*ClosureExpr)
+ if !exportFormat {
+ fmt.Fprint(s, "func literal")
+ return
+ }
+ fmt.Fprintf(s, "%v { %v }", n.Type(), n.Func.Body)
+
+ case OPTRLIT:
+ n := n.(*AddrExpr)
+ fmt.Fprintf(s, "&%v", n.X)
+
+ case OCOMPLIT, OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
+ n := n.(*CompLitExpr)
+ if n.Implicit() {
+ fmt.Fprintf(s, "... argument")
+ return
+ }
+ fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(len(n.List) != 0))
+
+ case OKEY:
+ n := n.(*KeyExpr)
+ if n.Key != nil && n.Value != nil {
+ fmt.Fprintf(s, "%v:%v", n.Key, n.Value)
+ return
+ }
+
+ if n.Key == nil && n.Value != nil {
+ fmt.Fprintf(s, ":%v", n.Value)
+ return
+ }
+ if n.Key != nil && n.Value == nil {
+ fmt.Fprintf(s, "%v:", n.Key)
+ return
+ }
+ fmt.Fprint(s, ":")
+
+ case OSTRUCTKEY:
+ n := n.(*StructKeyExpr)
+ fmt.Fprintf(s, "%v:%v", n.Field, n.Value)
+
+ case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH, OMETHVALUE, OMETHEXPR:
+ n := n.(*SelectorExpr)
+ exprFmt(n.X, s, nprec)
+ if n.Sel == nil {
+ fmt.Fprint(s, ".<nil>")
+ return
+ }
+ fmt.Fprintf(s, ".%s", n.Sel.Name)
+
+ case ODOTTYPE, ODOTTYPE2:
+ n := n.(*TypeAssertExpr)
+ exprFmt(n.X, s, nprec)
+ fmt.Fprintf(s, ".(%v)", n.Type())
+
+ case OINDEX, OINDEXMAP:
+ n := n.(*IndexExpr)
+ exprFmt(n.X, s, nprec)
+ fmt.Fprintf(s, "[%v]", n.Index)
+
+ case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
+ n := n.(*SliceExpr)
+ exprFmt(n.X, s, nprec)
+ fmt.Fprint(s, "[")
+ if n.Low != nil {
+ fmt.Fprint(s, n.Low)
+ }
+ fmt.Fprint(s, ":")
+ if n.High != nil {
+ fmt.Fprint(s, n.High)
+ }
+ if n.Op().IsSlice3() {
+ fmt.Fprint(s, ":")
+ if n.Max != nil {
+ fmt.Fprint(s, n.Max)
+ }
+ }
+ fmt.Fprint(s, "]")
+
+ case OSLICEHEADER:
+ n := n.(*SliceHeaderExpr)
+ fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Ptr, n.Len, n.Cap)
+
+ case OCOMPLEX, OCOPY, OUNSAFEADD, OUNSAFESLICE:
+ n := n.(*BinaryExpr)
+ fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.X, n.Y)
+
+ case OCONV,
+ OCONVIFACE,
+ OCONVNOP,
+ OBYTES2STR,
+ ORUNES2STR,
+ OSTR2BYTES,
+ OSTR2RUNES,
+ ORUNESTR,
+ OSLICE2ARR,
+ OSLICE2ARRPTR:
+ n := n.(*ConvExpr)
+ if n.Type() == nil || n.Type().Sym() == nil {
+ fmt.Fprintf(s, "(%v)", n.Type())
+ } else {
+ fmt.Fprintf(s, "%v", n.Type())
+ }
+ fmt.Fprintf(s, "(%v)", n.X)
+
+ case OREAL,
+ OIMAG,
+ OCAP,
+ OCLEAR,
+ OCLOSE,
+ OLEN,
+ ONEW,
+ OPANIC:
+ n := n.(*UnaryExpr)
+ fmt.Fprintf(s, "%v(%v)", n.Op(), n.X)
+
+ case OAPPEND,
+ ODELETE,
+ OMAKE,
+ OMAX,
+ OMIN,
+ ORECOVER,
+ OPRINT,
+ OPRINTLN:
+ n := n.(*CallExpr)
+ if n.IsDDD {
+ fmt.Fprintf(s, "%v(%.v...)", n.Op(), n.Args)
+ return
+ }
+ fmt.Fprintf(s, "%v(%.v)", n.Op(), n.Args)
+
+ case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
+ n := n.(*CallExpr)
+ exprFmt(n.Fun, s, nprec)
+ if n.IsDDD {
+ fmt.Fprintf(s, "(%.v...)", n.Args)
+ return
+ }
+ fmt.Fprintf(s, "(%.v)", n.Args)
+
+ case OINLCALL:
+ n := n.(*InlinedCallExpr)
+ // TODO(mdempsky): Print Init and/or Body?
+ if len(n.ReturnVars) == 1 {
+ fmt.Fprintf(s, "%v", n.ReturnVars[0])
+ return
+ }
+ fmt.Fprintf(s, "(.%v)", n.ReturnVars)
+
+ case OMAKEMAP, OMAKECHAN, OMAKESLICE:
+ n := n.(*MakeExpr)
+ if n.Cap != nil {
+ fmt.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Len, n.Cap)
+ return
+ }
+ if n.Len != nil && (n.Op() == OMAKESLICE || !n.Len.Type().IsUntyped()) {
+ fmt.Fprintf(s, "make(%v, %v)", n.Type(), n.Len)
+ return
+ }
+ fmt.Fprintf(s, "make(%v)", n.Type())
+
+ case OMAKESLICECOPY:
+ n := n.(*MakeExpr)
+ fmt.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Len, n.Cap)
+
+ case OPLUS, ONEG, OBITNOT, ONOT, ORECV:
+ // Unary
+ n := n.(*UnaryExpr)
+ fmt.Fprintf(s, "%v", n.Op())
+ if n.X != nil && n.X.Op() == n.Op() {
+ fmt.Fprint(s, " ")
+ }
+ exprFmt(n.X, s, nprec+1)
+
+ case OADDR:
+ n := n.(*AddrExpr)
+ fmt.Fprintf(s, "%v", n.Op())
+ if n.X != nil && n.X.Op() == n.Op() {
+ fmt.Fprint(s, " ")
+ }
+ exprFmt(n.X, s, nprec+1)
+
+ case ODEREF:
+ n := n.(*StarExpr)
+ fmt.Fprintf(s, "%v", n.Op())
+ exprFmt(n.X, s, nprec+1)
+
+ // Binary
+ case OADD,
+ OAND,
+ OANDNOT,
+ ODIV,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLT,
+ OLSH,
+ OMOD,
+ OMUL,
+ ONE,
+ OOR,
+ ORSH,
+ OSUB,
+ OXOR:
+ n := n.(*BinaryExpr)
+ exprFmt(n.X, s, nprec)
+ fmt.Fprintf(s, " %v ", n.Op())
+ exprFmt(n.Y, s, nprec+1)
+
+ case OANDAND,
+ OOROR:
+ n := n.(*LogicalExpr)
+ exprFmt(n.X, s, nprec)
+ fmt.Fprintf(s, " %v ", n.Op())
+ exprFmt(n.Y, s, nprec+1)
+
+ case OSEND:
+ n := n.(*SendStmt)
+ exprFmt(n.Chan, s, nprec)
+ fmt.Fprintf(s, " <- ")
+ exprFmt(n.Value, s, nprec+1)
+
+ case OADDSTR:
+ n := n.(*AddStringExpr)
+ for i, n1 := range n.List {
+ if i != 0 {
+ fmt.Fprint(s, " + ")
+ }
+ exprFmt(n1, s, nprec)
+ }
+ default:
+ fmt.Fprintf(s, "<node %v>", n.Op())
+ }
+}
+
+func ellipsisIf(b bool) string {
+ if b {
+ return "..."
+ }
+ return ""
+}
+
+// Nodes
+
+// Format implements formatting for a Nodes.
+// The valid formats are:
+//
+// %v Go syntax, semicolon-separated
+// %.v Go syntax, comma-separated
+// %+v Debug syntax, as in DumpList.
+func (l Nodes) Format(s fmt.State, verb rune) {
+ if s.Flag('+') && verb == 'v' {
+ // %+v is DumpList output
+ dumpNodes(s, l, 1)
+ return
+ }
+
+ if verb != 'v' {
+ fmt.Fprintf(s, "%%!%c(Nodes)", verb)
+ return
+ }
+
+ sep := "; "
+ if _, ok := s.Precision(); ok { // %.v is expr list
+ sep = ", "
+ }
+
+ for i, n := range l {
+ fmt.Fprint(s, n)
+ if i+1 < len(l) {
+ fmt.Fprint(s, sep)
+ }
+ }
+}
+
+// Dump
+
+// Dump prints the message s followed by a debug dump of n.
+func Dump(s string, n Node) {
+ fmt.Printf("%s%+v\n", s, n)
+}
+
+// DumpList prints the message s followed by a debug dump of each node in the list.
+func DumpList(s string, list Nodes) {
+ var buf bytes.Buffer
+ FDumpList(&buf, s, list)
+ os.Stdout.Write(buf.Bytes())
+}
+
+// FDumpList prints to w the message s followed by a debug dump of each node in the list.
+func FDumpList(w io.Writer, s string, list Nodes) {
+ io.WriteString(w, s)
+ dumpNodes(w, list, 1)
+ io.WriteString(w, "\n")
+}
+
+// indent prints indentation to w.
+func indent(w io.Writer, depth int) {
+ fmt.Fprint(w, "\n")
+ for i := 0; i < depth; i++ {
+ fmt.Fprint(w, ". ")
+ }
+}
+
+// EscFmt is set by the escape analysis code to add escape analysis details to the node print.
+var EscFmt func(n Node) string
+
+// dumpNodeHeader prints the debug-format node header line to w.
+func dumpNodeHeader(w io.Writer, n Node) {
+ // Useful to see which nodes in an AST printout are actually identical
+ if base.Debug.DumpPtrs != 0 {
+ fmt.Fprintf(w, " p(%p)", n)
+ }
+
+ if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Defn != nil {
+ // Useful to see where Defn is set and what node it points to
+ fmt.Fprintf(w, " defn(%p)", n.Name().Defn)
+ }
+
+ if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Curfn != nil {
+ // Useful to see where Defn is set and what node it points to
+ fmt.Fprintf(w, " curfn(%p)", n.Name().Curfn)
+ }
+ if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Outer != nil {
+ // Useful to see where Defn is set and what node it points to
+ fmt.Fprintf(w, " outer(%p)", n.Name().Outer)
+ }
+
+ if EscFmt != nil {
+ if esc := EscFmt(n); esc != "" {
+ fmt.Fprintf(w, " %s", esc)
+ }
+ }
+
+ if n.Sym() != nil && n.Op() != ONAME && n.Op() != ONONAME && n.Op() != OTYPE {
+ fmt.Fprintf(w, " %+v", n.Sym())
+ }
+
+ // Print Node-specific fields of basic type in header line.
+ v := reflect.ValueOf(n).Elem()
+ t := v.Type()
+ nf := t.NumField()
+ for i := 0; i < nf; i++ {
+ tf := t.Field(i)
+ if tf.PkgPath != "" {
+ // skip unexported field - Interface will fail
+ continue
+ }
+ k := tf.Type.Kind()
+ if reflect.Bool <= k && k <= reflect.Complex128 {
+ name := strings.TrimSuffix(tf.Name, "_")
+ vf := v.Field(i)
+ vfi := vf.Interface()
+ if name == "Offset" && vfi == types.BADWIDTH || name != "Offset" && vf.IsZero() {
+ continue
+ }
+ if vfi == true {
+ fmt.Fprintf(w, " %s", name)
+ } else {
+ fmt.Fprintf(w, " %s:%+v", name, vf.Interface())
+ }
+ }
+ }
+
+ // Print Node-specific booleans by looking for methods.
+ // Different v, t from above - want *Struct not Struct, for methods.
+ v = reflect.ValueOf(n)
+ t = v.Type()
+ nm := t.NumMethod()
+ for i := 0; i < nm; i++ {
+ tm := t.Method(i)
+ if tm.PkgPath != "" {
+ // skip unexported method - call will fail
+ continue
+ }
+ m := v.Method(i)
+ mt := m.Type()
+ if mt.NumIn() == 0 && mt.NumOut() == 1 && mt.Out(0).Kind() == reflect.Bool {
+ // TODO(rsc): Remove the func/defer/recover wrapping,
+ // which is guarding against panics in miniExpr,
+ // once we get down to the simpler state in which
+ // nodes have no getter methods that aren't allowed to be called.
+ func() {
+ defer func() { recover() }()
+ if m.Call(nil)[0].Bool() {
+ name := strings.TrimSuffix(tm.Name, "_")
+ fmt.Fprintf(w, " %s", name)
+ }
+ }()
+ }
+ }
+
+ if n.Op() == OCLOSURE {
+ n := n.(*ClosureExpr)
+ if fn := n.Func; fn != nil && fn.Nname.Sym() != nil {
+ fmt.Fprintf(w, " fnName(%+v)", fn.Nname.Sym())
+ }
+ }
+
+ if n.Type() != nil {
+ if n.Op() == OTYPE {
+ fmt.Fprintf(w, " type")
+ }
+ fmt.Fprintf(w, " %+v", n.Type())
+ }
+ if n.Typecheck() != 0 {
+ fmt.Fprintf(w, " tc(%d)", n.Typecheck())
+ }
+
+ if n.Pos().IsKnown() {
+ fmt.Fprint(w, " # ")
+ switch n.Pos().IsStmt() {
+ case src.PosNotStmt:
+ fmt.Fprint(w, "_") // "-" would be confusing
+ case src.PosIsStmt:
+ fmt.Fprint(w, "+")
+ }
+ sep := ""
+ base.Ctxt.AllPos(n.Pos(), func(pos src.Pos) {
+ fmt.Fprint(w, sep)
+ sep = " "
+ // TODO(mdempsky): Print line pragma details too.
+ file := filepath.Base(pos.Filename())
+ // Note: this output will be parsed by ssa/html.go:(*HTMLWriter).WriteAST. Keep in sync.
+ fmt.Fprintf(w, "%s:%d:%d", file, pos.Line(), pos.Col())
+ })
+ }
+}
+
+func dumpNode(w io.Writer, n Node, depth int) {
+ indent(w, depth)
+ if depth > 40 {
+ fmt.Fprint(w, "...")
+ return
+ }
+
+ if n == nil {
+ fmt.Fprint(w, "NilIrNode")
+ return
+ }
+
+ if len(n.Init()) != 0 {
+ fmt.Fprintf(w, "%+v-init", n.Op())
+ dumpNodes(w, n.Init(), depth+1)
+ indent(w, depth)
+ }
+
+ switch n.Op() {
+ default:
+ fmt.Fprintf(w, "%+v", n.Op())
+ dumpNodeHeader(w, n)
+
+ case OLITERAL:
+ fmt.Fprintf(w, "%+v-%v", n.Op(), n.Val())
+ dumpNodeHeader(w, n)
+ return
+
+ case ONAME, ONONAME:
+ if n.Sym() != nil {
+ fmt.Fprintf(w, "%+v-%+v", n.Op(), n.Sym())
+ } else {
+ fmt.Fprintf(w, "%+v", n.Op())
+ }
+ dumpNodeHeader(w, n)
+ return
+
+ case OLINKSYMOFFSET:
+ n := n.(*LinksymOffsetExpr)
+ fmt.Fprintf(w, "%+v-%v", n.Op(), n.Linksym)
+ // Offset is almost always 0, so only print when it's interesting.
+ if n.Offset_ != 0 {
+ fmt.Fprintf(w, "%+v", n.Offset_)
+ }
+ dumpNodeHeader(w, n)
+
+ case OASOP:
+ n := n.(*AssignOpStmt)
+ fmt.Fprintf(w, "%+v-%+v", n.Op(), n.AsOp)
+ dumpNodeHeader(w, n)
+
+ case OTYPE:
+ fmt.Fprintf(w, "%+v %+v", n.Op(), n.Sym())
+ dumpNodeHeader(w, n)
+ return
+
+ case OCLOSURE:
+ fmt.Fprintf(w, "%+v", n.Op())
+ dumpNodeHeader(w, n)
+
+ case ODCLFUNC:
+ // Func has many fields we don't want to print.
+ // Bypass reflection and just print what we want.
+ n := n.(*Func)
+ fmt.Fprintf(w, "%+v", n.Op())
+ dumpNodeHeader(w, n)
+ fn := n
+ if len(fn.Dcl) > 0 {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-Dcl", n.Op())
+ for _, dcl := range n.Dcl {
+ dumpNode(w, dcl, depth+1)
+ }
+ }
+ if len(fn.ClosureVars) > 0 {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-ClosureVars", n.Op())
+ for _, cv := range fn.ClosureVars {
+ dumpNode(w, cv, depth+1)
+ }
+ }
+ if len(fn.Body) > 0 {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-body", n.Op())
+ dumpNodes(w, fn.Body, depth+1)
+ }
+ return
+ }
+
+ v := reflect.ValueOf(n).Elem()
+ t := reflect.TypeOf(n).Elem()
+ nf := t.NumField()
+ for i := 0; i < nf; i++ {
+ tf := t.Field(i)
+ vf := v.Field(i)
+ if tf.PkgPath != "" {
+ // skip unexported field - Interface will fail
+ continue
+ }
+ switch tf.Type.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Slice:
+ if vf.IsNil() {
+ continue
+ }
+ }
+ name := strings.TrimSuffix(tf.Name, "_")
+ // Do not bother with field name header lines for the
+ // most common positional arguments: unary, binary expr,
+ // index expr, send stmt, go and defer call expression.
+ switch name {
+ case "X", "Y", "Index", "Chan", "Value", "Call":
+ name = ""
+ }
+ switch val := vf.Interface().(type) {
+ case Node:
+ if name != "" {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-%s", n.Op(), name)
+ }
+ dumpNode(w, val, depth+1)
+ case Nodes:
+ if len(val) == 0 {
+ continue
+ }
+ if name != "" {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-%s", n.Op(), name)
+ }
+ dumpNodes(w, val, depth+1)
+ default:
+ if vf.Kind() == reflect.Slice && vf.Type().Elem().Implements(nodeType) {
+ if vf.Len() == 0 {
+ continue
+ }
+ if name != "" {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-%s", n.Op(), name)
+ }
+ for i, n := 0, vf.Len(); i < n; i++ {
+ dumpNode(w, vf.Index(i).Interface().(Node), depth+1)
+ }
+ }
+ }
+ }
+}
+
+var nodeType = reflect.TypeOf((*Node)(nil)).Elem()
+
+func dumpNodes(w io.Writer, list Nodes, depth int) {
+ if len(list) == 0 {
+ fmt.Fprintf(w, " <nil>")
+ return
+ }
+
+ for _, n := range list {
+ dumpNode(w, n, depth)
+ }
+}
diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go
new file mode 100644
index 0000000..303c5e4
--- /dev/null
+++ b/src/cmd/compile/internal/ir/func.go
@@ -0,0 +1,598 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+// A Func corresponds to a single function in a Go program
+// (and vice versa: each function is denoted by exactly one *Func).
+//
+// There are multiple nodes that represent a Func in the IR.
+//
+// The ONAME node (Func.Nname) is used for plain references to it.
+// The ODCLFUNC node (the Func itself) is used for its declaration code.
+// The OCLOSURE node (Func.OClosure) is used for a reference to a
+// function literal.
+//
+// An imported function will have an ONAME node which points to a Func
+// with an empty body.
+// A declared function or method has an ODCLFUNC (the Func itself) and an ONAME.
+// A function literal is represented directly by an OCLOSURE, but it also
+// has an ODCLFUNC (and a matching ONAME) representing the compiled
+// underlying form of the closure, which accesses the captured variables
+// using a special data structure passed in a register.
+//
+// A method declaration is represented like functions, except f.Sym
+// will be the qualified method name (e.g., "T.m").
+//
+// A method expression (T.M) is represented as an OMETHEXPR node,
+// in which n.Left and n.Right point to the type and method, respectively.
+// Each distinct mention of a method expression in the source code
+// constructs a fresh node.
+//
+// A method value (t.M) is represented by ODOTMETH/ODOTINTER
+// when it is called directly and by OMETHVALUE otherwise.
+// These are like method expressions, except that for ODOTMETH/ODOTINTER,
+// the method name is stored in Sym instead of Right.
+// Each OMETHVALUE ends up being implemented as a new
+// function, a bit like a closure, with its own ODCLFUNC.
+// The OMETHVALUE uses n.Func to record the linkage to
+// the generated ODCLFUNC, but there is no
+// pointer from the Func back to the OMETHVALUE.
+type Func struct {
+ miniNode
+ Body Nodes
+
+ Nname *Name // ONAME node
+ OClosure *ClosureExpr // OCLOSURE node
+
+ // ONAME nodes for all params/locals for this func/closure, does NOT
+ // include closurevars until transforming closures during walk.
+ // Names must be listed PPARAMs, PPARAMOUTs, then PAUTOs,
+ // with PPARAMs and PPARAMOUTs in order corresponding to the function signature.
+ // Anonymous and blank params are declared as ~pNN (for PPARAMs) and ~rNN (for PPARAMOUTs).
+ Dcl []*Name
+
+ // ClosureVars lists the free variables that are used within a
+ // function literal, but formally declared in an enclosing
+ // function. The variables in this slice are the closure function's
+ // own copy of the variables, which are used within its function
+ // body. They will also each have IsClosureVar set, and will have
+ // Byval set if they're captured by value.
+ ClosureVars []*Name
+
+ // Enclosed functions that need to be compiled.
+ // Populated during walk.
+ Closures []*Func
+
+ // Parents records the parent scope of each scope within a
+ // function. The root scope (0) has no parent, so the i'th
+ // scope's parent is stored at Parents[i-1].
+ Parents []ScopeID
+
+ // Marks records scope boundary changes.
+ Marks []Mark
+
+ FieldTrack map[*obj.LSym]struct{}
+ DebugInfo interface{}
+ LSym *obj.LSym // Linker object in this function's native ABI (Func.ABI)
+
+ Inl *Inline
+
+ // funcLitGen and goDeferGen track how many closures have been
+ // created in this function for function literals and go/defer
+ // wrappers, respectively. Used by closureName for creating unique
+ // function names.
+ //
+ // Tracking goDeferGen separately avoids wrappers throwing off
+ // function literal numbering (e.g., runtime/trace_test.TestTraceSymbolize.func11).
+ funcLitGen int32
+ goDeferGen int32
+
+ Label int32 // largest auto-generated label in this function
+
+ Endlineno src.XPos
+ WBPos src.XPos // position of first write barrier; see SetWBPos
+
+ Pragma PragmaFlag // go:xxx function annotations
+
+ flags bitset16
+
+ // ABI is a function's "definition" ABI. This is the ABI that
+ // this function's generated code is expecting to be called by.
+ //
+ // For most functions, this will be obj.ABIInternal. It may be
+ // a different ABI for functions defined in assembly or ABI wrappers.
+ //
+ // This is included in the export data and tracked across packages.
+ ABI obj.ABI
+ // ABIRefs is the set of ABIs by which this function is referenced.
+ // For ABIs other than this function's definition ABI, the
+ // compiler generates ABI wrapper functions. This is only tracked
+ // within a package.
+ ABIRefs obj.ABISet
+
+ NumDefers int32 // number of defer calls in the function
+ NumReturns int32 // number of explicit returns in the function
+
+ // NWBRCalls records the LSyms of functions called by this
+ // function for go:nowritebarrierrec analysis. Only filled in
+ // if nowritebarrierrecCheck != nil.
+ NWBRCalls *[]SymAndPos
+
+ // For wrapper functions, WrappedFunc point to the original Func.
+ // Currently only used for go/defer wrappers.
+ WrappedFunc *Func
+
+ // WasmImport is used by the //go:wasmimport directive to store info about
+ // a WebAssembly function import.
+ WasmImport *WasmImport
+}
+
+// WasmImport stores metadata associated with the //go:wasmimport pragma.
+type WasmImport struct {
+ Module string
+ Name string
+}
+
+// NewFunc returns a new Func with the given name and type.
+//
+// fpos is the position of the "func" token, and npos is the position
+// of the name identifier.
+//
+// TODO(mdempsky): I suspect there's no need for separate fpos and
+// npos.
+func NewFunc(fpos, npos src.XPos, sym *types.Sym, typ *types.Type) *Func {
+ name := NewNameAt(npos, sym, typ)
+ name.Class = PFUNC
+ sym.SetFunc(true)
+
+ fn := &Func{Nname: name}
+ fn.pos = fpos
+ fn.op = ODCLFUNC
+ // Most functions are ABIInternal. The importer or symabis
+ // pass may override this.
+ fn.ABI = obj.ABIInternal
+ fn.SetTypecheck(1)
+
+ name.Func = fn
+
+ return fn
+}
+
+func (f *Func) isStmt() {}
+
+func (n *Func) copy() Node { panic(n.no("copy")) }
+func (n *Func) doChildren(do func(Node) bool) bool { return doNodes(n.Body, do) }
+func (n *Func) editChildren(edit func(Node) Node) { editNodes(n.Body, edit) }
+func (n *Func) editChildrenWithHidden(edit func(Node) Node) { editNodes(n.Body, edit) }
+
+func (f *Func) Type() *types.Type { return f.Nname.Type() }
+func (f *Func) Sym() *types.Sym { return f.Nname.Sym() }
+func (f *Func) Linksym() *obj.LSym { return f.Nname.Linksym() }
+func (f *Func) LinksymABI(abi obj.ABI) *obj.LSym { return f.Nname.LinksymABI(abi) }
+
+// An Inline holds fields used for function bodies that can be inlined.
+type Inline struct {
+ Cost int32 // heuristic cost of inlining this function
+
+ // Copy of Func.Dcl for use during inlining. This copy is needed
+ // because the function's Dcl may change from later compiler
+ // transformations. This field is also populated when a function
+ // from another package is imported and inlined.
+ Dcl []*Name
+ HaveDcl bool // whether we've loaded Dcl
+
+ // Function properties, encoded as a string (these are used for
+ // making inlining decisions). See cmd/compile/internal/inline/inlheur.
+ Properties string
+
+ // CanDelayResults reports whether it's safe for the inliner to delay
+ // initializing the result parameters until immediately before the
+ // "return" statement.
+ CanDelayResults bool
+}
+
+// A Mark represents a scope boundary.
+type Mark struct {
+ // Pos is the position of the token that marks the scope
+ // change.
+ Pos src.XPos
+
+ // Scope identifies the innermost scope to the right of Pos.
+ Scope ScopeID
+}
+
+// A ScopeID represents a lexical scope within a function.
+type ScopeID int32
+
+const (
+ funcDupok = 1 << iota // duplicate definitions ok
+ funcWrapper // hide frame from users (elide in tracebacks, don't count as a frame for recover())
+ funcABIWrapper // is an ABI wrapper (also set flagWrapper)
+ funcNeedctxt // function uses context register (has closure variables)
+ // true if closure inside a function; false if a simple function or a
+ // closure in a global variable initialization
+ funcIsHiddenClosure
+ funcIsDeadcodeClosure // true if closure is deadcode
+ funcHasDefer // contains a defer statement
+ funcNilCheckDisabled // disable nil checks when compiling this function
+ funcInlinabilityChecked // inliner has already determined whether the function is inlinable
+ funcNeverReturns // function never returns (in most cases calls panic(), os.Exit(), or equivalent)
+ funcOpenCodedDeferDisallowed // can't do open-coded defers
+ funcClosureResultsLost // closure is called indirectly and we lost track of its results; used by escape analysis
+ funcPackageInit // compiler emitted .init func for package
+)
+
+type SymAndPos struct {
+ Sym *obj.LSym // LSym of callee
+ Pos src.XPos // line of call
+}
+
+func (f *Func) Dupok() bool { return f.flags&funcDupok != 0 }
+func (f *Func) Wrapper() bool { return f.flags&funcWrapper != 0 }
+func (f *Func) ABIWrapper() bool { return f.flags&funcABIWrapper != 0 }
+func (f *Func) Needctxt() bool { return f.flags&funcNeedctxt != 0 }
+func (f *Func) IsHiddenClosure() bool { return f.flags&funcIsHiddenClosure != 0 }
+func (f *Func) IsDeadcodeClosure() bool { return f.flags&funcIsDeadcodeClosure != 0 }
+func (f *Func) HasDefer() bool { return f.flags&funcHasDefer != 0 }
+func (f *Func) NilCheckDisabled() bool { return f.flags&funcNilCheckDisabled != 0 }
+func (f *Func) InlinabilityChecked() bool { return f.flags&funcInlinabilityChecked != 0 }
+func (f *Func) NeverReturns() bool { return f.flags&funcNeverReturns != 0 }
+func (f *Func) OpenCodedDeferDisallowed() bool { return f.flags&funcOpenCodedDeferDisallowed != 0 }
+func (f *Func) ClosureResultsLost() bool { return f.flags&funcClosureResultsLost != 0 }
+func (f *Func) IsPackageInit() bool { return f.flags&funcPackageInit != 0 }
+
+func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) }
+func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) }
+func (f *Func) SetABIWrapper(b bool) { f.flags.set(funcABIWrapper, b) }
+func (f *Func) SetNeedctxt(b bool) { f.flags.set(funcNeedctxt, b) }
+func (f *Func) SetIsHiddenClosure(b bool) { f.flags.set(funcIsHiddenClosure, b) }
+func (f *Func) SetIsDeadcodeClosure(b bool) { f.flags.set(funcIsDeadcodeClosure, b) }
+func (f *Func) SetHasDefer(b bool) { f.flags.set(funcHasDefer, b) }
+func (f *Func) SetNilCheckDisabled(b bool) { f.flags.set(funcNilCheckDisabled, b) }
+func (f *Func) SetInlinabilityChecked(b bool) { f.flags.set(funcInlinabilityChecked, b) }
+func (f *Func) SetNeverReturns(b bool) { f.flags.set(funcNeverReturns, b) }
+func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) }
+func (f *Func) SetClosureResultsLost(b bool) { f.flags.set(funcClosureResultsLost, b) }
+func (f *Func) SetIsPackageInit(b bool) { f.flags.set(funcPackageInit, b) }
+
+func (f *Func) SetWBPos(pos src.XPos) {
+ if base.Debug.WB != 0 {
+ base.WarnfAt(pos, "write barrier")
+ }
+ if !f.WBPos.IsKnown() {
+ f.WBPos = pos
+ }
+}
+
+// FuncName returns the name (without the package) of the function f.
+func FuncName(f *Func) string {
+ if f == nil || f.Nname == nil {
+ return "<nil>"
+ }
+ return f.Sym().Name
+}
+
+// PkgFuncName returns the name of the function referenced by f, with package
+// prepended.
+//
+// This differs from the compiler's internal convention where local functions
+// lack a package. This is primarily useful when the ultimate consumer of this
+// is a human looking at message.
+func PkgFuncName(f *Func) string {
+ if f == nil || f.Nname == nil {
+ return "<nil>"
+ }
+ s := f.Sym()
+ pkg := s.Pkg
+
+ return pkg.Path + "." + s.Name
+}
+
+// LinkFuncName returns the name of the function f, as it will appear in the
+// symbol table of the final linked binary.
+func LinkFuncName(f *Func) string {
+ if f == nil || f.Nname == nil {
+ return "<nil>"
+ }
+ s := f.Sym()
+ pkg := s.Pkg
+
+ return objabi.PathToPrefix(pkg.Path) + "." + s.Name
+}
+
+// ParseLinkFuncName parsers a symbol name (as returned from LinkFuncName) back
+// to the package path and local symbol name.
+func ParseLinkFuncName(name string) (pkg, sym string, err error) {
+ pkg, sym = splitPkg(name)
+ if pkg == "" {
+ return "", "", fmt.Errorf("no package path in name")
+ }
+
+ pkg, err = objabi.PrefixToPath(pkg) // unescape
+ if err != nil {
+ return "", "", fmt.Errorf("malformed package path: %v", err)
+ }
+
+ return pkg, sym, nil
+}
+
+// Borrowed from x/mod.
+func modPathOK(r rune) bool {
+ if r < utf8.RuneSelf {
+ return r == '-' || r == '.' || r == '_' || r == '~' ||
+ '0' <= r && r <= '9' ||
+ 'A' <= r && r <= 'Z' ||
+ 'a' <= r && r <= 'z'
+ }
+ return false
+}
+
+func escapedImportPathOK(r rune) bool {
+ return modPathOK(r) || r == '+' || r == '/' || r == '%'
+}
+
+// splitPkg splits the full linker symbol name into package and local symbol
+// name.
+func splitPkg(name string) (pkgpath, sym string) {
+ // package-sym split is at first dot after last the / that comes before
+ // any characters illegal in a package path.
+
+ lastSlashIdx := 0
+ for i, r := range name {
+ // Catches cases like:
+ // * example.foo[sync/atomic.Uint64].
+ // * example%2ecom.foo[sync/atomic.Uint64].
+ //
+ // Note that name is still escaped; unescape occurs after splitPkg.
+ if !escapedImportPathOK(r) {
+ break
+ }
+ if r == '/' {
+ lastSlashIdx = i
+ }
+ }
+ for i := lastSlashIdx; i < len(name); i++ {
+ r := name[i]
+ if r == '.' {
+ return name[:i], name[i+1:]
+ }
+ }
+
+ return "", name
+}
+
+var CurFunc *Func
+
+// WithFunc invokes do with CurFunc and base.Pos set to curfn and
+// curfn.Pos(), respectively, and then restores their previous values
+// before returning.
+func WithFunc(curfn *Func, do func()) {
+ oldfn, oldpos := CurFunc, base.Pos
+ defer func() { CurFunc, base.Pos = oldfn, oldpos }()
+
+ CurFunc, base.Pos = curfn, curfn.Pos()
+ do()
+}
+
+func FuncSymName(s *types.Sym) string {
+ return s.Name + "·f"
+}
+
+// ClosureDebugRuntimeCheck applies boilerplate checks for debug flags
+// and compiling runtime.
+func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
+ if base.Debug.Closure > 0 {
+ if clo.Esc() == EscHeap {
+ base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func.ClosureVars)
+ } else {
+ base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars)
+ }
+ }
+ if base.Flag.CompilingRuntime && clo.Esc() == EscHeap && !clo.IsGoWrap {
+ base.ErrorfAt(clo.Pos(), 0, "heap-allocated closure %s, not allowed in runtime", FuncName(clo.Func))
+ }
+}
+
+// IsTrivialClosure reports whether closure clo has an
+// empty list of captured vars.
+func IsTrivialClosure(clo *ClosureExpr) bool {
+ return len(clo.Func.ClosureVars) == 0
+}
+
+// globClosgen is like Func.Closgen, but for the global scope.
+var globClosgen int32
+
+// closureName generates a new unique name for a closure within outerfn at pos.
+func closureName(outerfn *Func, pos src.XPos, why Op) *types.Sym {
+ pkg := types.LocalPkg
+ outer := "glob."
+ var prefix string
+ switch why {
+ default:
+ base.FatalfAt(pos, "closureName: bad Op: %v", why)
+ case OCLOSURE:
+ if outerfn == nil || outerfn.OClosure == nil {
+ prefix = "func"
+ }
+ case OGO:
+ prefix = "gowrap"
+ case ODEFER:
+ prefix = "deferwrap"
+ }
+ gen := &globClosgen
+
+ // There may be multiple functions named "_". In those
+ // cases, we can't use their individual Closgens as it
+ // would lead to name clashes.
+ if outerfn != nil && !IsBlank(outerfn.Nname) {
+ pkg = outerfn.Sym().Pkg
+ outer = FuncName(outerfn)
+
+ if why == OCLOSURE {
+ gen = &outerfn.funcLitGen
+ } else {
+ gen = &outerfn.goDeferGen
+ }
+ }
+
+ // If this closure was created due to inlining, then incorporate any
+ // inlined functions' names into the closure's linker symbol name
+ // too (#60324).
+ if inlIndex := base.Ctxt.InnermostPos(pos).Base().InliningIndex(); inlIndex >= 0 {
+ names := []string{outer}
+ base.Ctxt.InlTree.AllParents(inlIndex, func(call obj.InlinedCall) {
+ names = append(names, call.Name)
+ })
+ outer = strings.Join(names, ".")
+ }
+
+ *gen++
+ return pkg.Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
+}
+
+// NewClosureFunc creates a new Func to represent a function literal
+// with the given type.
+//
+// fpos the position used for the underlying ODCLFUNC and ONAME,
+// whereas cpos is the position used for the OCLOSURE. They're
+// separate because in the presence of inlining, the OCLOSURE node
+// should have an inline-adjusted position, whereas the ODCLFUNC and
+// ONAME must not.
+//
+// outerfn is the enclosing function, if any. The returned function is
+// appending to pkg.Funcs.
+//
+// why is the reason we're generating this Func. It can be OCLOSURE
+// (for a normal function literal) or OGO or ODEFER (for wrapping a
+// call expression that has parameters or results).
+func NewClosureFunc(fpos, cpos src.XPos, why Op, typ *types.Type, outerfn *Func, pkg *Package) *Func {
+ fn := NewFunc(fpos, fpos, closureName(outerfn, cpos, why), typ)
+ fn.SetIsHiddenClosure(outerfn != nil)
+
+ clo := &ClosureExpr{Func: fn}
+ clo.op = OCLOSURE
+ clo.pos = cpos
+ clo.SetType(typ)
+ clo.SetTypecheck(1)
+ fn.OClosure = clo
+
+ fn.Nname.Defn = fn
+ pkg.Funcs = append(pkg.Funcs, fn)
+
+ return fn
+}
+
+// IsFuncPCIntrinsic returns whether n is a direct call of internal/abi.FuncPCABIxxx functions.
+func IsFuncPCIntrinsic(n *CallExpr) bool {
+ if n.Op() != OCALLFUNC || n.Fun.Op() != ONAME {
+ return false
+ }
+ fn := n.Fun.(*Name).Sym()
+ return (fn.Name == "FuncPCABI0" || fn.Name == "FuncPCABIInternal") &&
+ fn.Pkg.Path == "internal/abi"
+}
+
+// IsIfaceOfFunc inspects whether n is an interface conversion from a direct
+// reference of a func. If so, it returns referenced Func; otherwise nil.
+//
+// This is only usable before walk.walkConvertInterface, which converts to an
+// OMAKEFACE.
+func IsIfaceOfFunc(n Node) *Func {
+ if n, ok := n.(*ConvExpr); ok && n.Op() == OCONVIFACE {
+ if name, ok := n.X.(*Name); ok && name.Op() == ONAME && name.Class == PFUNC {
+ return name.Func
+ }
+ }
+ return nil
+}
+
+// FuncPC returns a uintptr-typed expression that evaluates to the PC of a
+// function as uintptr, as returned by internal/abi.FuncPC{ABI0,ABIInternal}.
+//
+// n should be a Node of an interface type, as is passed to
+// internal/abi.FuncPC{ABI0,ABIInternal}.
+//
+// TODO(prattmic): Since n is simply an interface{} there is no assertion that
+// it is actually a function at all. Perhaps we should emit a runtime type
+// assertion?
+func FuncPC(pos src.XPos, n Node, wantABI obj.ABI) Node {
+ if !n.Type().IsInterface() {
+ base.ErrorfAt(pos, 0, "internal/abi.FuncPC%s expects an interface value, got %v", wantABI, n.Type())
+ }
+
+ if fn := IsIfaceOfFunc(n); fn != nil {
+ name := fn.Nname
+ abi := fn.ABI
+ if abi != wantABI {
+ base.ErrorfAt(pos, 0, "internal/abi.FuncPC%s expects an %v function, %s is defined as %v", wantABI, wantABI, name.Sym().Name, abi)
+ }
+ var e Node = NewLinksymExpr(pos, name.Sym().LinksymABI(abi), types.Types[types.TUINTPTR])
+ e = NewAddrExpr(pos, e)
+ e.SetType(types.Types[types.TUINTPTR].PtrTo())
+ e = NewConvExpr(pos, OCONVNOP, types.Types[types.TUINTPTR], e)
+ e.SetTypecheck(1)
+ return e
+ }
+ // fn is not a defined function. It must be ABIInternal.
+ // Read the address from func value, i.e. *(*uintptr)(idata(fn)).
+ if wantABI != obj.ABIInternal {
+ base.ErrorfAt(pos, 0, "internal/abi.FuncPC%s does not accept func expression, which is ABIInternal", wantABI)
+ }
+ var e Node = NewUnaryExpr(pos, OIDATA, n)
+ e.SetType(types.Types[types.TUINTPTR].PtrTo())
+ e.SetTypecheck(1)
+ e = NewStarExpr(pos, e)
+ e.SetType(types.Types[types.TUINTPTR])
+ e.SetTypecheck(1)
+ return e
+}
+
+// DeclareParams creates Names for all of the parameters in fn's
+// signature and adds them to fn.Dcl.
+//
+// If setNname is true, then it also sets types.Field.Nname for each
+// parameter.
+func (fn *Func) DeclareParams(setNname bool) {
+ if fn.Dcl != nil {
+ base.FatalfAt(fn.Pos(), "%v already has Dcl", fn)
+ }
+
+ declareParams := func(params []*types.Field, ctxt Class, prefix string, offset int) {
+ for i, param := range params {
+ sym := param.Sym
+ if sym == nil || sym.IsBlank() {
+ sym = fn.Sym().Pkg.LookupNum(prefix, i)
+ }
+
+ name := NewNameAt(param.Pos, sym, param.Type)
+ name.Class = ctxt
+ name.Curfn = fn
+ fn.Dcl[offset+i] = name
+
+ if setNname {
+ param.Nname = name
+ }
+ }
+ }
+
+ sig := fn.Type()
+ params := sig.RecvParams()
+ results := sig.Results()
+
+ fn.Dcl = make([]*Name, len(params)+len(results))
+ declareParams(params, PPARAM, "~p", 0)
+ declareParams(results, PPARAMOUT, "~r", len(params))
+}
diff --git a/src/cmd/compile/internal/ir/func_test.go b/src/cmd/compile/internal/ir/func_test.go
new file mode 100644
index 0000000..5b40c02
--- /dev/null
+++ b/src/cmd/compile/internal/ir/func_test.go
@@ -0,0 +1,82 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "testing"
+)
+
+func TestSplitPkg(t *testing.T) {
+ tests := []struct {
+ in string
+ pkg string
+ sym string
+ }{
+ {
+ in: "foo.Bar",
+ pkg: "foo",
+ sym: "Bar",
+ },
+ {
+ in: "foo/bar.Baz",
+ pkg: "foo/bar",
+ sym: "Baz",
+ },
+ {
+ in: "memeqbody",
+ pkg: "",
+ sym: "memeqbody",
+ },
+ {
+ in: `example%2ecom.Bar`,
+ pkg: `example%2ecom`,
+ sym: "Bar",
+ },
+ {
+ // Not a real generated symbol name, but easier to catch the general parameter form.
+ in: `foo.Bar[sync/atomic.Uint64]`,
+ pkg: `foo`,
+ sym: "Bar[sync/atomic.Uint64]",
+ },
+ {
+ in: `example%2ecom.Bar[sync/atomic.Uint64]`,
+ pkg: `example%2ecom`,
+ sym: "Bar[sync/atomic.Uint64]",
+ },
+ {
+ in: `gopkg.in/yaml%2ev3.Bar[sync/atomic.Uint64]`,
+ pkg: `gopkg.in/yaml%2ev3`,
+ sym: "Bar[sync/atomic.Uint64]",
+ },
+ {
+ // This one is a real symbol name.
+ in: `foo.Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]`,
+ pkg: `foo`,
+ sym: "Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]",
+ },
+ {
+ in: `example%2ecom.Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]`,
+ pkg: `example%2ecom`,
+ sym: "Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]",
+ },
+ {
+ in: `gopkg.in/yaml%2ev3.Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]`,
+ pkg: `gopkg.in/yaml%2ev3`,
+ sym: "Bar[go.shape.struct { sync/atomic._ sync/atomic.noCopy; sync/atomic._ sync/atomic.align64; sync/atomic.v uint64 }]",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.in, func(t *testing.T) {
+ pkg, sym := splitPkg(tc.in)
+ if pkg != tc.pkg {
+ t.Errorf("splitPkg(%q) got pkg %q want %q", tc.in, pkg, tc.pkg)
+ }
+ if sym != tc.sym {
+ t.Errorf("splitPkg(%q) got sym %q want %q", tc.in, sym, tc.sym)
+ }
+ })
+ }
+}
diff --git a/src/cmd/compile/internal/ir/ir.go b/src/cmd/compile/internal/ir/ir.go
new file mode 100644
index 0000000..82224ca
--- /dev/null
+++ b/src/cmd/compile/internal/ir/ir.go
@@ -0,0 +1,5 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go
new file mode 100644
index 0000000..52c622d
--- /dev/null
+++ b/src/cmd/compile/internal/ir/mini.go
@@ -0,0 +1,86 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run mknode.go
+
+package ir
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "go/constant"
+)
+
+// A miniNode is a minimal node implementation,
+// meant to be embedded as the first field in a larger node implementation,
+// at a cost of 8 bytes.
+//
+// A miniNode is NOT a valid Node by itself: the embedding struct
+// must at the least provide:
+//
+// func (n *MyNode) String() string { return fmt.Sprint(n) }
+// func (n *MyNode) rawCopy() Node { c := *n; return &c }
+// func (n *MyNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+//
+// The embedding struct should also fill in n.op in its constructor,
+// for more useful panic messages when invalid methods are called,
+// instead of implementing Op itself.
+type miniNode struct {
+ pos src.XPos // uint32
+ op Op // uint8
+ bits bitset8
+ esc uint16
+}
+
+// posOr returns pos if known, or else n.pos.
+// For use in DeepCopy.
+func (n *miniNode) posOr(pos src.XPos) src.XPos {
+ if pos.IsKnown() {
+ return pos
+ }
+ return n.pos
+}
+
+// op can be read, but not written.
+// An embedding implementation can provide a SetOp if desired.
+// (The panicking SetOp is with the other panics below.)
+func (n *miniNode) Op() Op { return n.op }
+func (n *miniNode) Pos() src.XPos { return n.pos }
+func (n *miniNode) SetPos(x src.XPos) { n.pos = x }
+func (n *miniNode) Esc() uint16 { return n.esc }
+func (n *miniNode) SetEsc(x uint16) { n.esc = x }
+
+const (
+ miniTypecheckShift = 0
+ miniWalked = 1 << 2 // to prevent/catch re-walking
+)
+
+func (n *miniNode) Typecheck() uint8 { return n.bits.get2(miniTypecheckShift) }
+func (n *miniNode) SetTypecheck(x uint8) {
+ if x > 2 {
+ panic(fmt.Sprintf("cannot SetTypecheck %d", x))
+ }
+ n.bits.set2(miniTypecheckShift, x)
+}
+
+func (n *miniNode) Walked() bool { return n.bits&miniWalked != 0 }
+func (n *miniNode) SetWalked(x bool) { n.bits.set(miniWalked, x) }
+
+// Empty, immutable graph structure.
+
+func (n *miniNode) Init() Nodes { return Nodes{} }
+
+// Additional functionality unavailable.
+
+func (n *miniNode) no(name string) string { return "cannot " + name + " on " + n.op.String() }
+
+func (n *miniNode) Type() *types.Type { return nil }
+func (n *miniNode) SetType(*types.Type) { panic(n.no("SetType")) }
+func (n *miniNode) Name() *Name { return nil }
+func (n *miniNode) Sym() *types.Sym { return nil }
+func (n *miniNode) Val() constant.Value { panic(n.no("Val")) }
+func (n *miniNode) SetVal(v constant.Value) { panic(n.no("SetVal")) }
+func (n *miniNode) NonNil() bool { return false }
+func (n *miniNode) MarkNonNil() { panic(n.no("MarkNonNil")) }
diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go
new file mode 100644
index 0000000..ca78a03
--- /dev/null
+++ b/src/cmd/compile/internal/ir/mknode.go
@@ -0,0 +1,366 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+
+// Note: this program must be run in this directory.
+// go run mknode.go
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "io/fs"
+ "log"
+ "os"
+ "sort"
+ "strings"
+)
+
+var fset = token.NewFileSet()
+
+var buf bytes.Buffer
+
+// concreteNodes contains all concrete types in the package that implement Node
+// (except for the mini* types).
+var concreteNodes []*ast.TypeSpec
+
+// interfaceNodes contains all interface types in the package that implement Node.
+var interfaceNodes []*ast.TypeSpec
+
+// mini contains the embeddable mini types (miniNode, miniExpr, and miniStmt).
+var mini = map[string]*ast.TypeSpec{}
+
+// implementsNode reports whether the type t is one which represents a Node
+// in the AST.
+func implementsNode(t ast.Expr) bool {
+ id, ok := t.(*ast.Ident)
+ if !ok {
+ return false // only named types
+ }
+ for _, ts := range interfaceNodes {
+ if ts.Name.Name == id.Name {
+ return true
+ }
+ }
+ for _, ts := range concreteNodes {
+ if ts.Name.Name == id.Name {
+ return true
+ }
+ }
+ return false
+}
+
+func isMini(t ast.Expr) bool {
+ id, ok := t.(*ast.Ident)
+ return ok && mini[id.Name] != nil
+}
+
+func isNamedType(t ast.Expr, name string) bool {
+ if id, ok := t.(*ast.Ident); ok {
+ if id.Name == name {
+ return true
+ }
+ }
+ return false
+}
+
+func main() {
+ fmt.Fprintln(&buf, "// Code generated by mknode.go. DO NOT EDIT.")
+ fmt.Fprintln(&buf)
+ fmt.Fprintln(&buf, "package ir")
+ fmt.Fprintln(&buf)
+ fmt.Fprintln(&buf, `import "fmt"`)
+
+ filter := func(file fs.FileInfo) bool {
+ return !strings.HasPrefix(file.Name(), "mknode")
+ }
+ pkgs, err := parser.ParseDir(fset, ".", filter, 0)
+ if err != nil {
+ panic(err)
+ }
+ pkg := pkgs["ir"]
+
+ // Find all the mini types. These let us determine which
+ // concrete types implement Node, so we need to find them first.
+ for _, f := range pkg.Files {
+ for _, d := range f.Decls {
+ g, ok := d.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+ for _, s := range g.Specs {
+ t, ok := s.(*ast.TypeSpec)
+ if !ok {
+ continue
+ }
+ if strings.HasPrefix(t.Name.Name, "mini") {
+ mini[t.Name.Name] = t
+ // Double-check that it is or embeds miniNode.
+ if t.Name.Name != "miniNode" {
+ s := t.Type.(*ast.StructType)
+ if !isNamedType(s.Fields.List[0].Type, "miniNode") {
+ panic(fmt.Sprintf("can't find miniNode in %s", t.Name.Name))
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Find all the declarations of concrete types that implement Node.
+ for _, f := range pkg.Files {
+ for _, d := range f.Decls {
+ g, ok := d.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+ for _, s := range g.Specs {
+ t, ok := s.(*ast.TypeSpec)
+ if !ok {
+ continue
+ }
+ if strings.HasPrefix(t.Name.Name, "mini") {
+ // We don't treat the mini types as
+ // concrete implementations of Node
+ // (even though they are) because
+ // we only use them by embedding them.
+ continue
+ }
+ if isConcreteNode(t) {
+ concreteNodes = append(concreteNodes, t)
+ }
+ if isInterfaceNode(t) {
+ interfaceNodes = append(interfaceNodes, t)
+ }
+ }
+ }
+ }
+ // Sort for deterministic output.
+ sort.Slice(concreteNodes, func(i, j int) bool {
+ return concreteNodes[i].Name.Name < concreteNodes[j].Name.Name
+ })
+ // Generate code for each concrete type.
+ for _, t := range concreteNodes {
+ processType(t)
+ }
+ // Add some helpers.
+ generateHelpers()
+
+ // Format and write output.
+ out, err := format.Source(buf.Bytes())
+ if err != nil {
+ // write out mangled source so we can see the bug.
+ out = buf.Bytes()
+ }
+ err = os.WriteFile("node_gen.go", out, 0666)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+// isConcreteNode reports whether the type t is a concrete type
+// implementing Node.
+func isConcreteNode(t *ast.TypeSpec) bool {
+ s, ok := t.Type.(*ast.StructType)
+ if !ok {
+ return false
+ }
+ for _, f := range s.Fields.List {
+ if isMini(f.Type) {
+ return true
+ }
+ }
+ return false
+}
+
+// isInterfaceNode reports whether the type t is an interface type
+// implementing Node (including Node itself).
+func isInterfaceNode(t *ast.TypeSpec) bool {
+ s, ok := t.Type.(*ast.InterfaceType)
+ if !ok {
+ return false
+ }
+ if t.Name.Name == "Node" {
+ return true
+ }
+ if t.Name.Name == "OrigNode" || t.Name.Name == "InitNode" {
+ // These we exempt from consideration (fields of
+ // this type don't need to be walked or copied).
+ return false
+ }
+
+ // Look for embedded Node type.
+ // Note that this doesn't handle multi-level embedding, but
+ // we have none of that at the moment.
+ for _, f := range s.Methods.List {
+ if len(f.Names) != 0 {
+ continue
+ }
+ if isNamedType(f.Type, "Node") {
+ return true
+ }
+ }
+ return false
+}
+
+func processType(t *ast.TypeSpec) {
+ name := t.Name.Name
+ fmt.Fprintf(&buf, "\n")
+ fmt.Fprintf(&buf, "func (n *%s) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }\n", name)
+
+ switch name {
+ case "Name", "Func":
+ // Too specialized to automate.
+ return
+ }
+
+ s := t.Type.(*ast.StructType)
+ fields := s.Fields.List
+
+ // Expand any embedded fields.
+ for i := 0; i < len(fields); i++ {
+ f := fields[i]
+ if len(f.Names) != 0 {
+ continue // not embedded
+ }
+ if isMini(f.Type) {
+ // Insert the fields of the embedded type into the main type.
+ // (It would be easier just to append, but inserting in place
+ // matches the old mknode behavior.)
+ ss := mini[f.Type.(*ast.Ident).Name].Type.(*ast.StructType)
+ var f2 []*ast.Field
+ f2 = append(f2, fields[:i]...)
+ f2 = append(f2, ss.Fields.List...)
+ f2 = append(f2, fields[i+1:]...)
+ fields = f2
+ i--
+ continue
+ } else if isNamedType(f.Type, "origNode") {
+ // Ignore this field
+ copy(fields[i:], fields[i+1:])
+ fields = fields[:len(fields)-1]
+ i--
+ continue
+ } else {
+ panic("unknown embedded field " + fmt.Sprintf("%v", f.Type))
+ }
+ }
+ // Process fields.
+ var copyBody strings.Builder
+ var doChildrenBody strings.Builder
+ var editChildrenBody strings.Builder
+ var editChildrenWithHiddenBody strings.Builder
+ for _, f := range fields {
+ names := f.Names
+ ft := f.Type
+ hidden := false
+ if f.Tag != nil {
+ tag := f.Tag.Value[1 : len(f.Tag.Value)-1]
+ if strings.HasPrefix(tag, "mknode:") {
+ if tag[7:] == "\"-\"" {
+ if !isNamedType(ft, "Node") {
+ continue
+ }
+ hidden = true
+ } else {
+ panic(fmt.Sprintf("unexpected tag value: %s", tag))
+ }
+ }
+ }
+ if isNamedType(ft, "Nodes") {
+ // Nodes == []Node
+ ft = &ast.ArrayType{Elt: &ast.Ident{Name: "Node"}}
+ }
+ isSlice := false
+ if a, ok := ft.(*ast.ArrayType); ok && a.Len == nil {
+ isSlice = true
+ ft = a.Elt
+ }
+ isPtr := false
+ if p, ok := ft.(*ast.StarExpr); ok {
+ isPtr = true
+ ft = p.X
+ }
+ if !implementsNode(ft) {
+ continue
+ }
+ for _, name := range names {
+ ptr := ""
+ if isPtr {
+ ptr = "*"
+ }
+ if isSlice {
+ fmt.Fprintf(&editChildrenWithHiddenBody,
+ "edit%ss(n.%s, edit)\n", ft, name)
+ } else {
+ fmt.Fprintf(&editChildrenWithHiddenBody,
+ "if n.%s != nil {\nn.%s = edit(n.%s).(%s%s)\n}\n", name, name, name, ptr, ft)
+ }
+ if hidden {
+ continue
+ }
+ if isSlice {
+ fmt.Fprintf(&copyBody, "c.%s = copy%ss(c.%s)\n", name, ft, name)
+ fmt.Fprintf(&doChildrenBody,
+ "if do%ss(n.%s, do) {\nreturn true\n}\n", ft, name)
+ fmt.Fprintf(&editChildrenBody,
+ "edit%ss(n.%s, edit)\n", ft, name)
+ } else {
+ fmt.Fprintf(&doChildrenBody,
+ "if n.%s != nil && do(n.%s) {\nreturn true\n}\n", name, name)
+ fmt.Fprintf(&editChildrenBody,
+ "if n.%s != nil {\nn.%s = edit(n.%s).(%s%s)\n}\n", name, name, name, ptr, ft)
+ }
+ }
+ }
+ fmt.Fprintf(&buf, "func (n *%s) copy() Node {\nc := *n\n", name)
+ buf.WriteString(copyBody.String())
+ fmt.Fprintf(&buf, "return &c\n}\n")
+ fmt.Fprintf(&buf, "func (n *%s) doChildren(do func(Node) bool) bool {\n", name)
+ buf.WriteString(doChildrenBody.String())
+ fmt.Fprintf(&buf, "return false\n}\n")
+ fmt.Fprintf(&buf, "func (n *%s) editChildren(edit func(Node) Node) {\n", name)
+ buf.WriteString(editChildrenBody.String())
+ fmt.Fprintf(&buf, "}\n")
+ fmt.Fprintf(&buf, "func (n *%s) editChildrenWithHidden(edit func(Node) Node) {\n", name)
+ buf.WriteString(editChildrenWithHiddenBody.String())
+ fmt.Fprintf(&buf, "}\n")
+}
+
+func generateHelpers() {
+ for _, typ := range []string{"CaseClause", "CommClause", "Name", "Node"} {
+ ptr := "*"
+ if typ == "Node" {
+ ptr = "" // interfaces don't need *
+ }
+ fmt.Fprintf(&buf, "\n")
+ fmt.Fprintf(&buf, "func copy%ss(list []%s%s) []%s%s {\n", typ, ptr, typ, ptr, typ)
+ fmt.Fprintf(&buf, "if list == nil { return nil }\n")
+ fmt.Fprintf(&buf, "c := make([]%s%s, len(list))\n", ptr, typ)
+ fmt.Fprintf(&buf, "copy(c, list)\n")
+ fmt.Fprintf(&buf, "return c\n")
+ fmt.Fprintf(&buf, "}\n")
+ fmt.Fprintf(&buf, "func do%ss(list []%s%s, do func(Node) bool) bool {\n", typ, ptr, typ)
+ fmt.Fprintf(&buf, "for _, x := range list {\n")
+ fmt.Fprintf(&buf, "if x != nil && do(x) {\n")
+ fmt.Fprintf(&buf, "return true\n")
+ fmt.Fprintf(&buf, "}\n")
+ fmt.Fprintf(&buf, "}\n")
+ fmt.Fprintf(&buf, "return false\n")
+ fmt.Fprintf(&buf, "}\n")
+ fmt.Fprintf(&buf, "func edit%ss(list []%s%s, edit func(Node) Node) {\n", typ, ptr, typ)
+ fmt.Fprintf(&buf, "for i, x := range list {\n")
+ fmt.Fprintf(&buf, "if x != nil {\n")
+ fmt.Fprintf(&buf, "list[i] = edit(x).(%s%s)\n", ptr, typ)
+ fmt.Fprintf(&buf, "}\n")
+ fmt.Fprintf(&buf, "}\n")
+ fmt.Fprintf(&buf, "}\n")
+ }
+}
diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go
new file mode 100644
index 0000000..2844c0b
--- /dev/null
+++ b/src/cmd/compile/internal/ir/name.go
@@ -0,0 +1,399 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "fmt"
+
+ "go/constant"
+)
+
+// An Ident is an identifier, possibly qualified.
+type Ident struct {
+ miniExpr
+ sym *types.Sym
+}
+
+func NewIdent(pos src.XPos, sym *types.Sym) *Ident {
+ n := new(Ident)
+ n.op = ONONAME
+ n.pos = pos
+ n.sym = sym
+ return n
+}
+
+func (n *Ident) Sym() *types.Sym { return n.sym }
+
+// Name holds Node fields used only by named nodes (ONAME, OTYPE, some OLITERAL).
+type Name struct {
+ miniExpr
+ BuiltinOp Op // uint8
+ Class Class // uint8
+ pragma PragmaFlag // int16
+ flags bitset16
+ DictIndex uint16 // index of the dictionary entry describing the type of this variable declaration plus 1
+ sym *types.Sym
+ Func *Func // TODO(austin): nil for I.M
+ Offset_ int64
+ val constant.Value
+ Opt interface{} // for use by escape analysis
+ Embed *[]Embed // list of embedded files, for ONAME var
+
+ // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
+ // For a closure var, the ONAME node of the original (outermost) captured variable.
+ // For the case-local variables of a type switch, the type switch guard (OTYPESW).
+ // For a range variable, the range statement (ORANGE)
+ // For a recv variable in a case of a select statement, the receive assignment (OSELRECV2)
+ // For the name of a function, points to corresponding Func node.
+ Defn Node
+
+ // The function, method, or closure in which local variable or param is declared.
+ Curfn *Func
+
+ Heapaddr *Name // temp holding heap address of param
+
+ // Outer points to the immediately enclosing function's copy of this
+ // closure variable. If not a closure variable, then Outer is nil.
+ Outer *Name
+}
+
+func (n *Name) isExpr() {}
+
+func (n *Name) copy() Node { panic(n.no("copy")) }
+func (n *Name) doChildren(do func(Node) bool) bool { return false }
+func (n *Name) editChildren(edit func(Node) Node) {}
+func (n *Name) editChildrenWithHidden(edit func(Node) Node) {}
+
+// RecordFrameOffset records the frame offset for the name.
+// It is used by package types when laying out function arguments.
+func (n *Name) RecordFrameOffset(offset int64) {
+ n.SetFrameOffset(offset)
+}
+
+// NewNameAt returns a new ONAME Node associated with symbol s at position pos.
+// The caller is responsible for setting Curfn.
+func NewNameAt(pos src.XPos, sym *types.Sym, typ *types.Type) *Name {
+ if sym == nil {
+ base.Fatalf("NewNameAt nil")
+ }
+ n := newNameAt(pos, ONAME, sym)
+ if typ != nil {
+ n.SetType(typ)
+ n.SetTypecheck(1)
+ }
+ return n
+}
+
+// NewBuiltin returns a new Name representing a builtin function,
+// either predeclared or from package unsafe.
+func NewBuiltin(sym *types.Sym, op Op) *Name {
+ n := newNameAt(src.NoXPos, ONAME, sym)
+ n.BuiltinOp = op
+ n.SetTypecheck(1)
+ sym.Def = n
+ return n
+}
+
+// NewLocal returns a new function-local variable with the given name and type.
+func (fn *Func) NewLocal(pos src.XPos, sym *types.Sym, typ *types.Type) *Name {
+ if fn.Dcl == nil {
+ base.FatalfAt(pos, "must call DeclParams on %v first", fn)
+ }
+
+ n := NewNameAt(pos, sym, typ)
+ n.Class = PAUTO
+ n.Curfn = fn
+ fn.Dcl = append(fn.Dcl, n)
+ return n
+}
+
+// NewDeclNameAt returns a new Name associated with symbol s at position pos.
+// The caller is responsible for setting Curfn.
+func NewDeclNameAt(pos src.XPos, op Op, sym *types.Sym) *Name {
+ if sym == nil {
+ base.Fatalf("NewDeclNameAt nil")
+ }
+ switch op {
+ case ONAME, OTYPE, OLITERAL:
+ // ok
+ default:
+ base.Fatalf("NewDeclNameAt op %v", op)
+ }
+ return newNameAt(pos, op, sym)
+}
+
+// NewConstAt returns a new OLITERAL Node associated with symbol s at position pos.
+func NewConstAt(pos src.XPos, sym *types.Sym, typ *types.Type, val constant.Value) *Name {
+ if sym == nil {
+ base.Fatalf("NewConstAt nil")
+ }
+ n := newNameAt(pos, OLITERAL, sym)
+ n.SetType(typ)
+ n.SetTypecheck(1)
+ n.SetVal(val)
+ return n
+}
+
+// newNameAt is like NewNameAt but allows sym == nil.
+func newNameAt(pos src.XPos, op Op, sym *types.Sym) *Name {
+ n := new(Name)
+ n.op = op
+ n.pos = pos
+ n.sym = sym
+ return n
+}
+
+func (n *Name) Name() *Name { return n }
+func (n *Name) Sym() *types.Sym { return n.sym }
+func (n *Name) SetSym(x *types.Sym) { n.sym = x }
+func (n *Name) SubOp() Op { return n.BuiltinOp }
+func (n *Name) SetSubOp(x Op) { n.BuiltinOp = x }
+func (n *Name) SetFunc(x *Func) { n.Func = x }
+func (n *Name) FrameOffset() int64 { return n.Offset_ }
+func (n *Name) SetFrameOffset(x int64) { n.Offset_ = x }
+
+func (n *Name) Linksym() *obj.LSym { return n.sym.Linksym() }
+func (n *Name) LinksymABI(abi obj.ABI) *obj.LSym { return n.sym.LinksymABI(abi) }
+
+func (*Name) CanBeNtype() {}
+func (*Name) CanBeAnSSASym() {}
+func (*Name) CanBeAnSSAAux() {}
+
+// Pragma returns the PragmaFlag for p, which must be for an OTYPE.
+func (n *Name) Pragma() PragmaFlag { return n.pragma }
+
+// SetPragma sets the PragmaFlag for p, which must be for an OTYPE.
+func (n *Name) SetPragma(flag PragmaFlag) { n.pragma = flag }
+
+// Alias reports whether p, which must be for an OTYPE, is a type alias.
+func (n *Name) Alias() bool { return n.flags&nameAlias != 0 }
+
+// SetAlias sets whether p, which must be for an OTYPE, is a type alias.
+func (n *Name) SetAlias(alias bool) { n.flags.set(nameAlias, alias) }
+
+const (
+ nameReadonly = 1 << iota
+ nameByval // is the variable captured by value or by reference
+ nameNeedzero // if it contains pointers, needs to be zeroed on function entry
+ nameAutoTemp // is the variable a temporary (implies no dwarf info. reset if escapes to heap)
+ nameUsed // for variable declared and not used error
+ nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original (if any) at n.Defn
+ nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy
+ nameIsOutputParamInRegisters // output parameter in registers spills as an auto
+ nameAddrtaken // address taken, even if not moved to heap
+ nameInlFormal // PAUTO created by inliner, derived from callee formal
+ nameInlLocal // PAUTO created by inliner, derived from callee local
+ nameOpenDeferSlot // if temporary var storing info for open-coded defers
+ nameLibfuzzer8BitCounter // if PEXTERN should be assigned to __sancov_cntrs section
+ nameCoverageCounter // instrumentation counter var for cmd/cover
+ nameCoverageAuxVar // instrumentation pkg ID variable cmd/cover
+ nameAlias // is type name an alias
+)
+
+func (n *Name) Readonly() bool { return n.flags&nameReadonly != 0 }
+func (n *Name) Needzero() bool { return n.flags&nameNeedzero != 0 }
+func (n *Name) AutoTemp() bool { return n.flags&nameAutoTemp != 0 }
+func (n *Name) Used() bool { return n.flags&nameUsed != 0 }
+func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 }
+func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 }
+func (n *Name) IsOutputParamInRegisters() bool { return n.flags&nameIsOutputParamInRegisters != 0 }
+func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 }
+func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 }
+func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 }
+func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 }
+func (n *Name) Libfuzzer8BitCounter() bool { return n.flags&nameLibfuzzer8BitCounter != 0 }
+func (n *Name) CoverageCounter() bool { return n.flags&nameCoverageCounter != 0 }
+func (n *Name) CoverageAuxVar() bool { return n.flags&nameCoverageAuxVar != 0 }
+
+func (n *Name) setReadonly(b bool) { n.flags.set(nameReadonly, b) }
+func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) }
+func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) }
+func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) }
+func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) }
+func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) }
+func (n *Name) SetIsOutputParamInRegisters(b bool) { n.flags.set(nameIsOutputParamInRegisters, b) }
+func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) }
+func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) }
+func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) }
+func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) }
+func (n *Name) SetLibfuzzer8BitCounter(b bool) { n.flags.set(nameLibfuzzer8BitCounter, b) }
+func (n *Name) SetCoverageCounter(b bool) { n.flags.set(nameCoverageCounter, b) }
+func (n *Name) SetCoverageAuxVar(b bool) { n.flags.set(nameCoverageAuxVar, b) }
+
+// OnStack reports whether variable n may reside on the stack.
+func (n *Name) OnStack() bool {
+ if n.Op() == ONAME {
+ switch n.Class {
+ case PPARAM, PPARAMOUT, PAUTO:
+ return n.Esc() != EscHeap
+ case PEXTERN, PAUTOHEAP:
+ return false
+ }
+ }
+ // Note: fmt.go:dumpNodeHeader calls all "func() bool"-typed
+ // methods, but it can only recover from panics, not Fatalf.
+ panic(fmt.Sprintf("%v: not a variable: %v", base.FmtPos(n.Pos()), n))
+}
+
+// MarkReadonly indicates that n is an ONAME with readonly contents.
+func (n *Name) MarkReadonly() {
+ if n.Op() != ONAME {
+ base.Fatalf("Node.MarkReadonly %v", n.Op())
+ }
+ n.setReadonly(true)
+ // Mark the linksym as readonly immediately
+ // so that the SSA backend can use this information.
+ // It will be overridden later during dumpglobls.
+ n.Linksym().Type = objabi.SRODATA
+}
+
+// Val returns the constant.Value for the node.
+func (n *Name) Val() constant.Value {
+ if n.val == nil {
+ return constant.MakeUnknown()
+ }
+ return n.val
+}
+
+// SetVal sets the constant.Value for the node.
+func (n *Name) SetVal(v constant.Value) {
+ if n.op != OLITERAL {
+ panic(n.no("SetVal"))
+ }
+ AssertValidTypeForConst(n.Type(), v)
+ n.val = v
+}
+
+// Canonical returns the logical declaration that n represents. If n
+// is a closure variable, then Canonical returns the original Name as
+// it appears in the function that immediately contains the
+// declaration. Otherwise, Canonical simply returns n itself.
+func (n *Name) Canonical() *Name {
+ if n.IsClosureVar() && n.Defn != nil {
+ n = n.Defn.(*Name)
+ }
+ return n
+}
+
+func (n *Name) SetByval(b bool) {
+ if n.Canonical() != n {
+ base.Fatalf("SetByval called on non-canonical variable: %v", n)
+ }
+ n.flags.set(nameByval, b)
+}
+
+func (n *Name) Byval() bool {
+ // We require byval to be set on the canonical variable, but we
+ // allow it to be accessed from any instance.
+ return n.Canonical().flags&nameByval != 0
+}
+
+// NewClosureVar returns a new closure variable for fn to refer to
+// outer variable n.
+func NewClosureVar(pos src.XPos, fn *Func, n *Name) *Name {
+ switch n.Class {
+ case PAUTO, PPARAM, PPARAMOUT, PAUTOHEAP:
+ // ok
+ default:
+ // Prevent mistaken capture of global variables.
+ base.Fatalf("NewClosureVar: %+v", n)
+ }
+
+ c := NewNameAt(pos, n.Sym(), n.Type())
+ c.Curfn = fn
+ c.Class = PAUTOHEAP
+ c.SetIsClosureVar(true)
+ c.Defn = n.Canonical()
+ c.Outer = n
+
+ fn.ClosureVars = append(fn.ClosureVars, c)
+
+ return c
+}
+
+// NewHiddenParam returns a new hidden parameter for fn with the given
+// name and type.
+func NewHiddenParam(pos src.XPos, fn *Func, sym *types.Sym, typ *types.Type) *Name {
+ if fn.OClosure != nil {
+ base.FatalfAt(fn.Pos(), "cannot add hidden parameters to closures")
+ }
+
+ fn.SetNeedctxt(true)
+
+ // Create a fake parameter, disassociated from any real function, to
+ // pretend to capture.
+ fake := NewNameAt(pos, sym, typ)
+ fake.Class = PPARAM
+ fake.SetByval(true)
+
+ return NewClosureVar(pos, fn, fake)
+}
+
+// SameSource reports whether two nodes refer to the same source
+// element.
+//
+// It exists to help incrementally migrate the compiler towards
+// allowing the introduction of IdentExpr (#42990). Once we have
+// IdentExpr, it will no longer be safe to directly compare Node
+// values to tell if they refer to the same Name. Instead, code will
+// need to explicitly get references to the underlying Name object(s),
+// and compare those instead.
+//
+// It will still be safe to compare Nodes directly for checking if two
+// nodes are syntactically the same. The SameSource function exists to
+// indicate code that intentionally compares Nodes for syntactic
+// equality as opposed to code that has yet to be updated in
+// preparation for IdentExpr.
+func SameSource(n1, n2 Node) bool {
+ return n1 == n2
+}
+
+// Uses reports whether expression x is a (direct) use of the given
+// variable.
+func Uses(x Node, v *Name) bool {
+ if v == nil || v.Op() != ONAME {
+ base.Fatalf("RefersTo bad Name: %v", v)
+ }
+ return x.Op() == ONAME && x.Name() == v
+}
+
+// DeclaredBy reports whether expression x refers (directly) to a
+// variable that was declared by the given statement.
+func DeclaredBy(x, stmt Node) bool {
+ if stmt == nil {
+ base.Fatalf("DeclaredBy nil")
+ }
+ return x.Op() == ONAME && SameSource(x.Name().Defn, stmt)
+}
+
+// The Class of a variable/function describes the "storage class"
+// of a variable or function. During parsing, storage classes are
+// called declaration contexts.
+type Class uint8
+
+//go:generate stringer -type=Class name.go
+const (
+ Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables
+ PEXTERN // global variables
+ PAUTO // local variables
+ PAUTOHEAP // local variables or parameters moved to heap
+ PPARAM // input arguments
+ PPARAMOUT // output results
+ PTYPEPARAM // type params
+ PFUNC // global functions
+
+ // Careful: Class is stored in three bits in Node.flags.
+ _ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
+)
+
+type Embed struct {
+ Pos src.XPos
+ Patterns []string
+}
diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go
new file mode 100644
index 0000000..6513386
--- /dev/null
+++ b/src/cmd/compile/internal/ir/node.go
@@ -0,0 +1,586 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// “Abstract” syntax representation.
+
+package ir
+
+import (
+ "fmt"
+ "go/constant"
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// A Node is the abstract interface to an IR node.
+type Node interface {
+ // Formatting
+ Format(s fmt.State, verb rune)
+
+ // Source position.
+ Pos() src.XPos
+ SetPos(x src.XPos)
+
+ // For making copies. For Copy and SepCopy.
+ copy() Node
+
+ doChildren(func(Node) bool) bool
+ editChildren(func(Node) Node)
+ editChildrenWithHidden(func(Node) Node)
+
+ // Abstract graph structure, for generic traversals.
+ Op() Op
+ Init() Nodes
+
+ // Fields specific to certain Ops only.
+ Type() *types.Type
+ SetType(t *types.Type)
+ Name() *Name
+ Sym() *types.Sym
+ Val() constant.Value
+ SetVal(v constant.Value)
+
+ // Storage for analysis passes.
+ Esc() uint16
+ SetEsc(x uint16)
+
+ // Typecheck values:
+ // 0 means the node is not typechecked
+ // 1 means the node is completely typechecked
+ // 2 means typechecking of the node is in progress
+ Typecheck() uint8
+ SetTypecheck(x uint8)
+ NonNil() bool
+ MarkNonNil()
+}
+
+// Line returns n's position as a string. If n has been inlined,
+// it uses the outermost position where n has been inlined.
+func Line(n Node) string {
+ return base.FmtPos(n.Pos())
+}
+
+func IsSynthetic(n Node) bool {
+ name := n.Sym().Name
+ return name[0] == '.' || name[0] == '~'
+}
+
+// IsAutoTmp indicates if n was created by the compiler as a temporary,
+// based on the setting of the .AutoTemp flag in n's Name.
+func IsAutoTmp(n Node) bool {
+ if n == nil || n.Op() != ONAME {
+ return false
+ }
+ return n.Name().AutoTemp()
+}
+
+// MayBeShared reports whether n may occur in multiple places in the AST.
+// Extra care must be taken when mutating such a node.
+func MayBeShared(n Node) bool {
+ switch n.Op() {
+ case ONAME, OLITERAL, ONIL, OTYPE:
+ return true
+ }
+ return false
+}
+
+type InitNode interface {
+ Node
+ PtrInit() *Nodes
+ SetInit(x Nodes)
+}
+
+func TakeInit(n Node) Nodes {
+ init := n.Init()
+ if len(init) != 0 {
+ n.(InitNode).SetInit(nil)
+ }
+ return init
+}
+
+//go:generate stringer -type=Op -trimprefix=O node.go
+
+type Op uint8
+
+// Node ops.
+const (
+ OXXX Op = iota
+
+ // names
+ ONAME // var or func name
+ // Unnamed arg or return value: f(int, string) (int, error) { etc }
+ // Also used for a qualified package identifier that hasn't been resolved yet.
+ ONONAME
+ OTYPE // type name
+ OLITERAL // literal
+ ONIL // nil
+
+ // expressions
+ OADD // X + Y
+ OSUB // X - Y
+ OOR // X | Y
+ OXOR // X ^ Y
+ OADDSTR // +{List} (string addition, list elements are strings)
+ OADDR // &X
+ OANDAND // X && Y
+ OAPPEND // append(Args); after walk, X may contain elem type descriptor
+ OBYTES2STR // Type(X) (Type is string, X is a []byte)
+ OBYTES2STRTMP // Type(X) (Type is string, X is a []byte, ephemeral)
+ ORUNES2STR // Type(X) (Type is string, X is a []rune)
+ OSTR2BYTES // Type(X) (Type is []byte, X is a string)
+ OSTR2BYTESTMP // Type(X) (Type is []byte, X is a string, ephemeral)
+ OSTR2RUNES // Type(X) (Type is []rune, X is a string)
+ OSLICE2ARR // Type(X) (Type is [N]T, X is a []T)
+ OSLICE2ARRPTR // Type(X) (Type is *[N]T, X is a []T)
+ // X = Y or (if Def=true) X := Y
+ // If Def, then Init includes a DCL node for X.
+ OAS
+ // Lhs = Rhs (x, y, z = a, b, c) or (if Def=true) Lhs := Rhs
+ // If Def, then Init includes DCL nodes for Lhs
+ OAS2
+ OAS2DOTTYPE // Lhs = Rhs (x, ok = I.(int))
+ OAS2FUNC // Lhs = Rhs (x, y = f())
+ OAS2MAPR // Lhs = Rhs (x, ok = m["foo"])
+ OAS2RECV // Lhs = Rhs (x, ok = <-c)
+ OASOP // X AsOp= Y (x += y)
+ OCALL // X(Args) (function call, method call or type conversion)
+
+ // OCALLFUNC, OCALLMETH, and OCALLINTER have the same structure.
+ // Prior to walk, they are: X(Args), where Args is all regular arguments.
+ // After walk, if any argument whose evaluation might requires temporary variable,
+ // that temporary variable will be pushed to Init, Args will contains an updated
+ // set of arguments.
+ OCALLFUNC // X(Args) (function call f(args))
+ OCALLMETH // X(Args) (direct method call x.Method(args))
+ OCALLINTER // X(Args) (interface method call x.Method(args))
+ OCAP // cap(X)
+ OCLEAR // clear(X)
+ OCLOSE // close(X)
+ OCLOSURE // func Type { Func.Closure.Body } (func literal)
+ OCOMPLIT // Type{List} (composite literal, not yet lowered to specific form)
+ OMAPLIT // Type{List} (composite literal, Type is map)
+ OSTRUCTLIT // Type{List} (composite literal, Type is struct)
+ OARRAYLIT // Type{List} (composite literal, Type is array)
+ OSLICELIT // Type{List} (composite literal, Type is slice), Len is slice length.
+ OPTRLIT // &X (X is composite literal)
+ OCONV // Type(X) (type conversion)
+ OCONVIFACE // Type(X) (type conversion, to interface)
+ OCONVNOP // Type(X) (type conversion, no effect)
+ OCOPY // copy(X, Y)
+ ODCL // var X (declares X of type X.Type)
+
+ // Used during parsing but don't last.
+ ODCLFUNC // func f() or func (r) f()
+
+ ODELETE // delete(Args)
+ ODOT // X.Sel (X is of struct type)
+ ODOTPTR // X.Sel (X is of pointer to struct type)
+ ODOTMETH // X.Sel (X is non-interface, Sel is method name)
+ ODOTINTER // X.Sel (X is interface, Sel is method name)
+ OXDOT // X.Sel (before rewrite to one of the preceding)
+ ODOTTYPE // X.Ntype or X.Type (.Ntype during parsing, .Type once resolved); after walk, Itab contains address of interface type descriptor and Itab.X contains address of concrete type descriptor
+ ODOTTYPE2 // X.Ntype or X.Type (.Ntype during parsing, .Type once resolved; on rhs of OAS2DOTTYPE); after walk, Itab contains address of interface type descriptor
+ OEQ // X == Y
+ ONE // X != Y
+ OLT // X < Y
+ OLE // X <= Y
+ OGE // X >= Y
+ OGT // X > Y
+ ODEREF // *X
+ OINDEX // X[Index] (index of array or slice)
+ OINDEXMAP // X[Index] (index of map)
+ OKEY // Key:Value (key:value in struct/array/map literal)
+ OSTRUCTKEY // Field:Value (key:value in struct literal, after type checking)
+ OLEN // len(X)
+ OMAKE // make(Args) (before type checking converts to one of the following)
+ OMAKECHAN // make(Type[, Len]) (type is chan)
+ OMAKEMAP // make(Type[, Len]) (type is map)
+ OMAKESLICE // make(Type[, Len[, Cap]]) (type is slice)
+ OMAKESLICECOPY // makeslicecopy(Type, Len, Cap) (type is slice; Len is length and Cap is the copied from slice)
+ // OMAKESLICECOPY is created by the order pass and corresponds to:
+ // s = make(Type, Len); copy(s, Cap)
+ //
+ // Bounded can be set on the node when Len == len(Cap) is known at compile time.
+ //
+ // This node is created so the walk pass can optimize this pattern which would
+ // otherwise be hard to detect after the order pass.
+ OMUL // X * Y
+ ODIV // X / Y
+ OMOD // X % Y
+ OLSH // X << Y
+ ORSH // X >> Y
+ OAND // X & Y
+ OANDNOT // X &^ Y
+ ONEW // new(X); corresponds to calls to new in source code
+ ONOT // !X
+ OBITNOT // ^X
+ OPLUS // +X
+ ONEG // -X
+ OOROR // X || Y
+ OPANIC // panic(X)
+ OPRINT // print(List)
+ OPRINTLN // println(List)
+ OPAREN // (X)
+ OSEND // Chan <- Value
+ OSLICE // X[Low : High] (X is untypechecked or slice)
+ OSLICEARR // X[Low : High] (X is pointer to array)
+ OSLICESTR // X[Low : High] (X is string)
+ OSLICE3 // X[Low : High : Max] (X is untypedchecked or slice)
+ OSLICE3ARR // X[Low : High : Max] (X is pointer to array)
+ OSLICEHEADER // sliceheader{Ptr, Len, Cap} (Ptr is unsafe.Pointer, Len is length, Cap is capacity)
+ OSTRINGHEADER // stringheader{Ptr, Len} (Ptr is unsafe.Pointer, Len is length)
+ ORECOVER // recover()
+ ORECOVERFP // recover(Args) w/ explicit FP argument
+ ORECV // <-X
+ ORUNESTR // Type(X) (Type is string, X is rune)
+ OSELRECV2 // like OAS2: Lhs = Rhs where len(Lhs)=2, len(Rhs)=1, Rhs[0].Op = ORECV (appears as .Var of OCASE)
+ OMIN // min(List)
+ OMAX // max(List)
+ OREAL // real(X)
+ OIMAG // imag(X)
+ OCOMPLEX // complex(X, Y)
+ OUNSAFEADD // unsafe.Add(X, Y)
+ OUNSAFESLICE // unsafe.Slice(X, Y)
+ OUNSAFESLICEDATA // unsafe.SliceData(X)
+ OUNSAFESTRING // unsafe.String(X, Y)
+ OUNSAFESTRINGDATA // unsafe.StringData(X)
+ OMETHEXPR // X(Args) (method expression T.Method(args), first argument is the method receiver)
+ OMETHVALUE // X.Sel (method expression t.Method, not called)
+
+ // statements
+ OBLOCK // { List } (block of code)
+ OBREAK // break [Label]
+ // OCASE: case List: Body (List==nil means default)
+ // For OTYPESW, List is a OTYPE node for the specified type (or OLITERAL
+ // for nil) or an ODYNAMICTYPE indicating a runtime type for generics.
+ // If a type-switch variable is specified, Var is an
+ // ONAME for the version of the type-switch variable with the specified
+ // type.
+ OCASE
+ OCONTINUE // continue [Label]
+ ODEFER // defer Call
+ OFALL // fallthrough
+ OFOR // for Init; Cond; Post { Body }
+ OGOTO // goto Label
+ OIF // if Init; Cond { Then } else { Else }
+ OLABEL // Label:
+ OGO // go Call
+ ORANGE // for Key, Value = range X { Body }
+ ORETURN // return Results
+ OSELECT // select { Cases }
+ OSWITCH // switch Init; Expr { Cases }
+ // OTYPESW: X := Y.(type) (appears as .Tag of OSWITCH)
+ // X is nil if there is no type-switch variable
+ OTYPESW
+
+ // misc
+ // intermediate representation of an inlined call. Uses Init (assignments
+ // for the captured variables, parameters, retvars, & INLMARK op),
+ // Body (body of the inlined function), and ReturnVars (list of
+ // return values)
+ OINLCALL // intermediary representation of an inlined call.
+ OMAKEFACE // construct an interface value from rtype/itab and data pointers
+ OITAB // rtype/itab pointer of an interface value
+ OIDATA // data pointer of an interface value
+ OSPTR // base pointer of a slice or string. Bounded==1 means known non-nil.
+ OCFUNC // reference to c function pointer (not go func value)
+ OCHECKNIL // emit code to ensure pointer/interface not nil
+ ORESULT // result of a function call; Xoffset is stack offset
+ OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
+ OLINKSYMOFFSET // offset within a name
+ OJUMPTABLE // A jump table structure for implementing dense expression switches
+ OINTERFACESWITCH // A type switch with interface cases
+
+ // opcodes for generics
+ ODYNAMICDOTTYPE // x = i.(T) where T is a type parameter (or derived from a type parameter)
+ ODYNAMICDOTTYPE2 // x, ok = i.(T) where T is a type parameter (or derived from a type parameter)
+ ODYNAMICTYPE // a type node for type switches (represents a dynamic target type for a type switch)
+
+ // arch-specific opcodes
+ OTAILCALL // tail call to another function
+ OGETG // runtime.getg() (read g pointer)
+ OGETCALLERPC // runtime.getcallerpc() (continuation PC in caller frame)
+ OGETCALLERSP // runtime.getcallersp() (stack pointer in caller frame)
+
+ OEND
+)
+
+// IsCmp reports whether op is a comparison operation (==, !=, <, <=,
+// >, or >=).
+func (op Op) IsCmp() bool {
+ switch op {
+ case OEQ, ONE, OLT, OLE, OGT, OGE:
+ return true
+ }
+ return false
+}
+
+// Nodes is a slice of Node.
+type Nodes []Node
+
+// ToNodes returns s as a slice of Nodes.
+func ToNodes[T Node](s []T) Nodes {
+ res := make(Nodes, len(s))
+ for i, n := range s {
+ res[i] = n
+ }
+ return res
+}
+
+// Append appends entries to Nodes.
+func (n *Nodes) Append(a ...Node) {
+ if len(a) == 0 {
+ return
+ }
+ *n = append(*n, a...)
+}
+
+// Prepend prepends entries to Nodes.
+// If a slice is passed in, this will take ownership of it.
+func (n *Nodes) Prepend(a ...Node) {
+ if len(a) == 0 {
+ return
+ }
+ *n = append(a, *n...)
+}
+
+// Take clears n, returning its former contents.
+func (n *Nodes) Take() []Node {
+ ret := *n
+ *n = nil
+ return ret
+}
+
+// Copy returns a copy of the content of the slice.
+func (n Nodes) Copy() Nodes {
+ if n == nil {
+ return nil
+ }
+ c := make(Nodes, len(n))
+ copy(c, n)
+ return c
+}
+
+// NameQueue is a FIFO queue of *Name. The zero value of NameQueue is
+// a ready-to-use empty queue.
+type NameQueue struct {
+ ring []*Name
+ head, tail int
+}
+
+// Empty reports whether q contains no Names.
+func (q *NameQueue) Empty() bool {
+ return q.head == q.tail
+}
+
+// PushRight appends n to the right of the queue.
+func (q *NameQueue) PushRight(n *Name) {
+ if len(q.ring) == 0 {
+ q.ring = make([]*Name, 16)
+ } else if q.head+len(q.ring) == q.tail {
+ // Grow the ring.
+ nring := make([]*Name, len(q.ring)*2)
+ // Copy the old elements.
+ part := q.ring[q.head%len(q.ring):]
+ if q.tail-q.head <= len(part) {
+ part = part[:q.tail-q.head]
+ copy(nring, part)
+ } else {
+ pos := copy(nring, part)
+ copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
+ }
+ q.ring, q.head, q.tail = nring, 0, q.tail-q.head
+ }
+
+ q.ring[q.tail%len(q.ring)] = n
+ q.tail++
+}
+
+// PopLeft pops a Name from the left of the queue. It panics if q is
+// empty.
+func (q *NameQueue) PopLeft() *Name {
+ if q.Empty() {
+ panic("dequeue empty")
+ }
+ n := q.ring[q.head%len(q.ring)]
+ q.head++
+ return n
+}
+
+// NameSet is a set of Names.
+type NameSet map[*Name]struct{}
+
+// Has reports whether s contains n.
+func (s NameSet) Has(n *Name) bool {
+ _, isPresent := s[n]
+ return isPresent
+}
+
+// Add adds n to s.
+func (s *NameSet) Add(n *Name) {
+ if *s == nil {
+ *s = make(map[*Name]struct{})
+ }
+ (*s)[n] = struct{}{}
+}
+
+// Sorted returns s sorted according to less.
+func (s NameSet) Sorted(less func(*Name, *Name) bool) []*Name {
+ var res []*Name
+ for n := range s {
+ res = append(res, n)
+ }
+ sort.Slice(res, func(i, j int) bool { return less(res[i], res[j]) })
+ return res
+}
+
+type PragmaFlag uint16
+
+const (
+ // Func pragmas.
+ Nointerface PragmaFlag = 1 << iota
+ Noescape // func parameters don't escape
+ Norace // func must not have race detector annotations
+ Nosplit // func should not execute on separate stack
+ Noinline // func should not be inlined
+ NoCheckPtr // func should not be instrumented by checkptr
+ CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
+ UintptrKeepAlive // pointers converted to uintptr must be kept alive
+ UintptrEscapes // pointers converted to uintptr escape
+
+ // Runtime-only func pragmas.
+ // See ../../../../runtime/HACKING.md for detailed descriptions.
+ Systemstack // func must run on system stack
+ Nowritebarrier // emit compiler error instead of write barrier
+ Nowritebarrierrec // error on write barrier in this or recursive callees
+ Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees
+
+ // Go command pragmas
+ GoBuildPragma
+
+ RegisterParams // TODO(register args) remove after register abi is working
+
+)
+
+var BlankNode *Name
+
+func IsConst(n Node, ct constant.Kind) bool {
+ return ConstType(n) == ct
+}
+
+// IsNil reports whether n represents the universal untyped zero value "nil".
+func IsNil(n Node) bool {
+ return n != nil && n.Op() == ONIL
+}
+
+func IsBlank(n Node) bool {
+ if n == nil {
+ return false
+ }
+ return n.Sym().IsBlank()
+}
+
+// IsMethod reports whether n is a method.
+// n must be a function or a method.
+func IsMethod(n Node) bool {
+ return n.Type().Recv() != nil
+}
+
+// HasUniquePos reports whether n has a unique position that can be
+// used for reporting error messages.
+//
+// It's primarily used to distinguish references to named objects,
+// whose Pos will point back to their declaration position rather than
+// their usage position.
+func HasUniquePos(n Node) bool {
+ switch n.Op() {
+ case ONAME:
+ return false
+ case OLITERAL, ONIL, OTYPE:
+ if n.Sym() != nil {
+ return false
+ }
+ }
+
+ if !n.Pos().IsKnown() {
+ if base.Flag.K != 0 {
+ base.Warn("setlineno: unknown position (line 0)")
+ }
+ return false
+ }
+
+ return true
+}
+
+func SetPos(n Node) src.XPos {
+ lno := base.Pos
+ if n != nil && HasUniquePos(n) {
+ base.Pos = n.Pos()
+ }
+ return lno
+}
+
+// The result of InitExpr MUST be assigned back to n, e.g.
+//
+// n.X = InitExpr(init, n.X)
+func InitExpr(init []Node, expr Node) Node {
+ if len(init) == 0 {
+ return expr
+ }
+
+ n, ok := expr.(InitNode)
+ if !ok || MayBeShared(n) {
+ // Introduce OCONVNOP to hold init list.
+ n = NewConvExpr(base.Pos, OCONVNOP, nil, expr)
+ n.SetType(expr.Type())
+ n.SetTypecheck(1)
+ }
+
+ n.PtrInit().Prepend(init...)
+ return n
+}
+
+// what's the outer value that a write to n affects?
+// outer value means containing struct or array.
+func OuterValue(n Node) Node {
+ for {
+ switch nn := n; nn.Op() {
+ case OXDOT:
+ base.FatalfAt(n.Pos(), "OXDOT in OuterValue: %v", n)
+ case ODOT:
+ nn := nn.(*SelectorExpr)
+ n = nn.X
+ continue
+ case OPAREN:
+ nn := nn.(*ParenExpr)
+ n = nn.X
+ continue
+ case OCONVNOP:
+ nn := nn.(*ConvExpr)
+ n = nn.X
+ continue
+ case OINDEX:
+ nn := nn.(*IndexExpr)
+ if nn.X.Type() == nil {
+ base.Fatalf("OuterValue needs type for %v", nn.X)
+ }
+ if nn.X.Type().IsArray() {
+ n = nn.X
+ continue
+ }
+ }
+
+ return n
+ }
+}
+
+const (
+ EscUnknown = iota
+ EscNone // Does not escape to heap, result, or parameters.
+ EscHeap // Reachable from the heap
+ EscNever // By construction will not escape.
+)
diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go
new file mode 100644
index 0000000..fc28067
--- /dev/null
+++ b/src/cmd/compile/internal/ir/node_gen.go
@@ -0,0 +1,1809 @@
+// Code generated by mknode.go. DO NOT EDIT.
+
+package ir
+
+import "fmt"
+
+func (n *AddStringExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *AddStringExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.List = copyNodes(c.List)
+ return &c
+}
+func (n *AddStringExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if doNodes(n.List, do) {
+ return true
+ }
+ if n.Prealloc != nil && do(n.Prealloc) {
+ return true
+ }
+ return false
+}
+func (n *AddStringExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.List, edit)
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+func (n *AddStringExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.List, edit)
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+
+func (n *AddrExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *AddrExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *AddrExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Prealloc != nil && do(n.Prealloc) {
+ return true
+ }
+ return false
+}
+func (n *AddrExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+func (n *AddrExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+
+func (n *AssignListStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *AssignListStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Lhs = copyNodes(c.Lhs)
+ c.Rhs = copyNodes(c.Rhs)
+ return &c
+}
+func (n *AssignListStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if doNodes(n.Lhs, do) {
+ return true
+ }
+ if doNodes(n.Rhs, do) {
+ return true
+ }
+ return false
+}
+func (n *AssignListStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.Lhs, edit)
+ editNodes(n.Rhs, edit)
+}
+func (n *AssignListStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.Lhs, edit)
+ editNodes(n.Rhs, edit)
+}
+
+func (n *AssignOpStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *AssignOpStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *AssignOpStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Y != nil && do(n.Y) {
+ return true
+ }
+ return false
+}
+func (n *AssignOpStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Y != nil {
+ n.Y = edit(n.Y).(Node)
+ }
+}
+func (n *AssignOpStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Y != nil {
+ n.Y = edit(n.Y).(Node)
+ }
+}
+
+func (n *AssignStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *AssignStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *AssignStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Y != nil && do(n.Y) {
+ return true
+ }
+ return false
+}
+func (n *AssignStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Y != nil {
+ n.Y = edit(n.Y).(Node)
+ }
+}
+func (n *AssignStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Y != nil {
+ n.Y = edit(n.Y).(Node)
+ }
+}
+
+func (n *BasicLit) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *BasicLit) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *BasicLit) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *BasicLit) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+func (n *BasicLit) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *BinaryExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *BinaryExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *BinaryExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Y != nil && do(n.Y) {
+ return true
+ }
+ return false
+}
+func (n *BinaryExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Y != nil {
+ n.Y = edit(n.Y).(Node)
+ }
+}
+func (n *BinaryExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Y != nil {
+ n.Y = edit(n.Y).(Node)
+ }
+ if n.RType != nil {
+ n.RType = edit(n.RType).(Node)
+ }
+}
+
+func (n *BlockStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *BlockStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.List = copyNodes(c.List)
+ return &c
+}
+func (n *BlockStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if doNodes(n.List, do) {
+ return true
+ }
+ return false
+}
+func (n *BlockStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.List, edit)
+}
+func (n *BlockStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.List, edit)
+}
+
+func (n *BranchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *BranchStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *BranchStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *BranchStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+func (n *BranchStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *CallExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *CallExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Args = copyNodes(c.Args)
+ c.KeepAlive = copyNames(c.KeepAlive)
+ return &c
+}
+func (n *CallExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Fun != nil && do(n.Fun) {
+ return true
+ }
+ if doNodes(n.Args, do) {
+ return true
+ }
+ if doNames(n.KeepAlive, do) {
+ return true
+ }
+ return false
+}
+func (n *CallExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Fun != nil {
+ n.Fun = edit(n.Fun).(Node)
+ }
+ editNodes(n.Args, edit)
+ editNames(n.KeepAlive, edit)
+}
+func (n *CallExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Fun != nil {
+ n.Fun = edit(n.Fun).(Node)
+ }
+ editNodes(n.Args, edit)
+ if n.RType != nil {
+ n.RType = edit(n.RType).(Node)
+ }
+ editNames(n.KeepAlive, edit)
+}
+
+func (n *CaseClause) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *CaseClause) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.List = copyNodes(c.List)
+ c.RTypes = copyNodes(c.RTypes)
+ c.Body = copyNodes(c.Body)
+ return &c
+}
+func (n *CaseClause) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Var != nil && do(n.Var) {
+ return true
+ }
+ if doNodes(n.List, do) {
+ return true
+ }
+ if doNodes(n.RTypes, do) {
+ return true
+ }
+ if doNodes(n.Body, do) {
+ return true
+ }
+ return false
+}
+func (n *CaseClause) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Var != nil {
+ n.Var = edit(n.Var).(*Name)
+ }
+ editNodes(n.List, edit)
+ editNodes(n.RTypes, edit)
+ editNodes(n.Body, edit)
+}
+func (n *CaseClause) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Var != nil {
+ n.Var = edit(n.Var).(*Name)
+ }
+ editNodes(n.List, edit)
+ editNodes(n.RTypes, edit)
+ editNodes(n.Body, edit)
+}
+
+func (n *ClosureExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ClosureExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *ClosureExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Prealloc != nil && do(n.Prealloc) {
+ return true
+ }
+ return false
+}
+func (n *ClosureExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+func (n *ClosureExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+
+func (n *CommClause) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *CommClause) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Body = copyNodes(c.Body)
+ return &c
+}
+func (n *CommClause) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Comm != nil && do(n.Comm) {
+ return true
+ }
+ if doNodes(n.Body, do) {
+ return true
+ }
+ return false
+}
+func (n *CommClause) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Comm != nil {
+ n.Comm = edit(n.Comm).(Node)
+ }
+ editNodes(n.Body, edit)
+}
+func (n *CommClause) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Comm != nil {
+ n.Comm = edit(n.Comm).(Node)
+ }
+ editNodes(n.Body, edit)
+}
+
+func (n *CompLitExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *CompLitExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.List = copyNodes(c.List)
+ return &c
+}
+func (n *CompLitExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if doNodes(n.List, do) {
+ return true
+ }
+ if n.Prealloc != nil && do(n.Prealloc) {
+ return true
+ }
+ return false
+}
+func (n *CompLitExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.List, edit)
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+func (n *CompLitExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.List, edit)
+ if n.RType != nil {
+ n.RType = edit(n.RType).(Node)
+ }
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+
+func (n *ConvExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ConvExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *ConvExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ return false
+}
+func (n *ConvExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+}
+func (n *ConvExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.TypeWord != nil {
+ n.TypeWord = edit(n.TypeWord).(Node)
+ }
+ if n.SrcRType != nil {
+ n.SrcRType = edit(n.SrcRType).(Node)
+ }
+ if n.ElemRType != nil {
+ n.ElemRType = edit(n.ElemRType).(Node)
+ }
+ if n.ElemElemRType != nil {
+ n.ElemElemRType = edit(n.ElemElemRType).(Node)
+ }
+}
+
+func (n *Decl) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *Decl) copy() Node {
+ c := *n
+ return &c
+}
+func (n *Decl) doChildren(do func(Node) bool) bool {
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ return false
+}
+func (n *Decl) editChildren(edit func(Node) Node) {
+ if n.X != nil {
+ n.X = edit(n.X).(*Name)
+ }
+}
+func (n *Decl) editChildrenWithHidden(edit func(Node) Node) {
+ if n.X != nil {
+ n.X = edit(n.X).(*Name)
+ }
+}
+
+func (n *DynamicType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *DynamicType) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *DynamicType) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.RType != nil && do(n.RType) {
+ return true
+ }
+ if n.ITab != nil && do(n.ITab) {
+ return true
+ }
+ return false
+}
+func (n *DynamicType) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.RType != nil {
+ n.RType = edit(n.RType).(Node)
+ }
+ if n.ITab != nil {
+ n.ITab = edit(n.ITab).(Node)
+ }
+}
+func (n *DynamicType) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.RType != nil {
+ n.RType = edit(n.RType).(Node)
+ }
+ if n.ITab != nil {
+ n.ITab = edit(n.ITab).(Node)
+ }
+}
+
+func (n *DynamicTypeAssertExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *DynamicTypeAssertExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *DynamicTypeAssertExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.SrcRType != nil && do(n.SrcRType) {
+ return true
+ }
+ if n.RType != nil && do(n.RType) {
+ return true
+ }
+ if n.ITab != nil && do(n.ITab) {
+ return true
+ }
+ return false
+}
+func (n *DynamicTypeAssertExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.SrcRType != nil {
+ n.SrcRType = edit(n.SrcRType).(Node)
+ }
+ if n.RType != nil {
+ n.RType = edit(n.RType).(Node)
+ }
+ if n.ITab != nil {
+ n.ITab = edit(n.ITab).(Node)
+ }
+}
+func (n *DynamicTypeAssertExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.SrcRType != nil {
+ n.SrcRType = edit(n.SrcRType).(Node)
+ }
+ if n.RType != nil {
+ n.RType = edit(n.RType).(Node)
+ }
+ if n.ITab != nil {
+ n.ITab = edit(n.ITab).(Node)
+ }
+}
+
+func (n *ForStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ForStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Body = copyNodes(c.Body)
+ return &c
+}
+func (n *ForStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Cond != nil && do(n.Cond) {
+ return true
+ }
+ if n.Post != nil && do(n.Post) {
+ return true
+ }
+ if doNodes(n.Body, do) {
+ return true
+ }
+ return false
+}
+func (n *ForStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Cond != nil {
+ n.Cond = edit(n.Cond).(Node)
+ }
+ if n.Post != nil {
+ n.Post = edit(n.Post).(Node)
+ }
+ editNodes(n.Body, edit)
+}
+func (n *ForStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Cond != nil {
+ n.Cond = edit(n.Cond).(Node)
+ }
+ if n.Post != nil {
+ n.Post = edit(n.Post).(Node)
+ }
+ editNodes(n.Body, edit)
+}
+
+func (n *Func) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+
+func (n *GoDeferStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *GoDeferStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *GoDeferStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Call != nil && do(n.Call) {
+ return true
+ }
+ return false
+}
+func (n *GoDeferStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Call != nil {
+ n.Call = edit(n.Call).(Node)
+ }
+}
+func (n *GoDeferStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Call != nil {
+ n.Call = edit(n.Call).(Node)
+ }
+}
+
+func (n *Ident) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *Ident) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *Ident) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *Ident) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+func (n *Ident) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *IfStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *IfStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Body = copyNodes(c.Body)
+ c.Else = copyNodes(c.Else)
+ return &c
+}
+func (n *IfStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Cond != nil && do(n.Cond) {
+ return true
+ }
+ if doNodes(n.Body, do) {
+ return true
+ }
+ if doNodes(n.Else, do) {
+ return true
+ }
+ return false
+}
+func (n *IfStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Cond != nil {
+ n.Cond = edit(n.Cond).(Node)
+ }
+ editNodes(n.Body, edit)
+ editNodes(n.Else, edit)
+}
+func (n *IfStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Cond != nil {
+ n.Cond = edit(n.Cond).(Node)
+ }
+ editNodes(n.Body, edit)
+ editNodes(n.Else, edit)
+}
+
+func (n *IndexExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *IndexExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *IndexExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Index != nil && do(n.Index) {
+ return true
+ }
+ return false
+}
+func (n *IndexExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Index != nil {
+ n.Index = edit(n.Index).(Node)
+ }
+}
+func (n *IndexExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Index != nil {
+ n.Index = edit(n.Index).(Node)
+ }
+ if n.RType != nil {
+ n.RType = edit(n.RType).(Node)
+ }
+}
+
+func (n *InlineMarkStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *InlineMarkStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *InlineMarkStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *InlineMarkStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+func (n *InlineMarkStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *InlinedCallExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Body = copyNodes(c.Body)
+ c.ReturnVars = copyNodes(c.ReturnVars)
+ return &c
+}
+func (n *InlinedCallExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if doNodes(n.Body, do) {
+ return true
+ }
+ if doNodes(n.ReturnVars, do) {
+ return true
+ }
+ return false
+}
+func (n *InlinedCallExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.Body, edit)
+ editNodes(n.ReturnVars, edit)
+}
+func (n *InlinedCallExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.Body, edit)
+ editNodes(n.ReturnVars, edit)
+}
+
+func (n *InterfaceSwitchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *InterfaceSwitchStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *InterfaceSwitchStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Case != nil && do(n.Case) {
+ return true
+ }
+ if n.Itab != nil && do(n.Itab) {
+ return true
+ }
+ if n.RuntimeType != nil && do(n.RuntimeType) {
+ return true
+ }
+ return false
+}
+func (n *InterfaceSwitchStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Case != nil {
+ n.Case = edit(n.Case).(Node)
+ }
+ if n.Itab != nil {
+ n.Itab = edit(n.Itab).(Node)
+ }
+ if n.RuntimeType != nil {
+ n.RuntimeType = edit(n.RuntimeType).(Node)
+ }
+}
+func (n *InterfaceSwitchStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Case != nil {
+ n.Case = edit(n.Case).(Node)
+ }
+ if n.Itab != nil {
+ n.Itab = edit(n.Itab).(Node)
+ }
+ if n.RuntimeType != nil {
+ n.RuntimeType = edit(n.RuntimeType).(Node)
+ }
+}
+
+func (n *JumpTableStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *JumpTableStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *JumpTableStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Idx != nil && do(n.Idx) {
+ return true
+ }
+ return false
+}
+func (n *JumpTableStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Idx != nil {
+ n.Idx = edit(n.Idx).(Node)
+ }
+}
+func (n *JumpTableStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Idx != nil {
+ n.Idx = edit(n.Idx).(Node)
+ }
+}
+
+func (n *KeyExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *KeyExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *KeyExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Key != nil && do(n.Key) {
+ return true
+ }
+ if n.Value != nil && do(n.Value) {
+ return true
+ }
+ return false
+}
+func (n *KeyExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Key != nil {
+ n.Key = edit(n.Key).(Node)
+ }
+ if n.Value != nil {
+ n.Value = edit(n.Value).(Node)
+ }
+}
+func (n *KeyExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Key != nil {
+ n.Key = edit(n.Key).(Node)
+ }
+ if n.Value != nil {
+ n.Value = edit(n.Value).(Node)
+ }
+}
+
+func (n *LabelStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *LabelStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *LabelStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *LabelStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+func (n *LabelStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *LinksymOffsetExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *LinksymOffsetExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *LinksymOffsetExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *LinksymOffsetExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+func (n *LinksymOffsetExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *LogicalExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *LogicalExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *LogicalExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Y != nil && do(n.Y) {
+ return true
+ }
+ return false
+}
+func (n *LogicalExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Y != nil {
+ n.Y = edit(n.Y).(Node)
+ }
+}
+func (n *LogicalExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Y != nil {
+ n.Y = edit(n.Y).(Node)
+ }
+}
+
+func (n *MakeExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *MakeExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *MakeExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Len != nil && do(n.Len) {
+ return true
+ }
+ if n.Cap != nil && do(n.Cap) {
+ return true
+ }
+ return false
+}
+func (n *MakeExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Len != nil {
+ n.Len = edit(n.Len).(Node)
+ }
+ if n.Cap != nil {
+ n.Cap = edit(n.Cap).(Node)
+ }
+}
+func (n *MakeExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.RType != nil {
+ n.RType = edit(n.RType).(Node)
+ }
+ if n.Len != nil {
+ n.Len = edit(n.Len).(Node)
+ }
+ if n.Cap != nil {
+ n.Cap = edit(n.Cap).(Node)
+ }
+}
+
+func (n *Name) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+
+func (n *NilExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *NilExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *NilExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *NilExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+func (n *NilExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *ParenExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ParenExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *ParenExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ return false
+}
+func (n *ParenExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+}
+func (n *ParenExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+}
+
+func (n *RangeStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *RangeStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Body = copyNodes(c.Body)
+ return &c
+}
+func (n *RangeStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Key != nil && do(n.Key) {
+ return true
+ }
+ if n.Value != nil && do(n.Value) {
+ return true
+ }
+ if doNodes(n.Body, do) {
+ return true
+ }
+ if n.Prealloc != nil && do(n.Prealloc) {
+ return true
+ }
+ return false
+}
+func (n *RangeStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Key != nil {
+ n.Key = edit(n.Key).(Node)
+ }
+ if n.Value != nil {
+ n.Value = edit(n.Value).(Node)
+ }
+ editNodes(n.Body, edit)
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+func (n *RangeStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.RType != nil {
+ n.RType = edit(n.RType).(Node)
+ }
+ if n.Key != nil {
+ n.Key = edit(n.Key).(Node)
+ }
+ if n.Value != nil {
+ n.Value = edit(n.Value).(Node)
+ }
+ editNodes(n.Body, edit)
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+ if n.KeyTypeWord != nil {
+ n.KeyTypeWord = edit(n.KeyTypeWord).(Node)
+ }
+ if n.KeySrcRType != nil {
+ n.KeySrcRType = edit(n.KeySrcRType).(Node)
+ }
+ if n.ValueTypeWord != nil {
+ n.ValueTypeWord = edit(n.ValueTypeWord).(Node)
+ }
+ if n.ValueSrcRType != nil {
+ n.ValueSrcRType = edit(n.ValueSrcRType).(Node)
+ }
+}
+
+func (n *ResultExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ResultExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *ResultExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *ResultExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+func (n *ResultExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *ReturnStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ReturnStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Results = copyNodes(c.Results)
+ return &c
+}
+func (n *ReturnStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if doNodes(n.Results, do) {
+ return true
+ }
+ return false
+}
+func (n *ReturnStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.Results, edit)
+}
+func (n *ReturnStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.Results, edit)
+}
+
+func (n *SelectStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SelectStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Cases = copyCommClauses(c.Cases)
+ c.Compiled = copyNodes(c.Compiled)
+ return &c
+}
+func (n *SelectStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if doCommClauses(n.Cases, do) {
+ return true
+ }
+ if doNodes(n.Compiled, do) {
+ return true
+ }
+ return false
+}
+func (n *SelectStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editCommClauses(n.Cases, edit)
+ editNodes(n.Compiled, edit)
+}
+func (n *SelectStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editCommClauses(n.Cases, edit)
+ editNodes(n.Compiled, edit)
+}
+
+func (n *SelectorExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SelectorExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *SelectorExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Prealloc != nil && do(n.Prealloc) {
+ return true
+ }
+ return false
+}
+func (n *SelectorExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+func (n *SelectorExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+
+func (n *SendStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SendStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *SendStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Chan != nil && do(n.Chan) {
+ return true
+ }
+ if n.Value != nil && do(n.Value) {
+ return true
+ }
+ return false
+}
+func (n *SendStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Chan != nil {
+ n.Chan = edit(n.Chan).(Node)
+ }
+ if n.Value != nil {
+ n.Value = edit(n.Value).(Node)
+ }
+}
+func (n *SendStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Chan != nil {
+ n.Chan = edit(n.Chan).(Node)
+ }
+ if n.Value != nil {
+ n.Value = edit(n.Value).(Node)
+ }
+}
+
+func (n *SliceExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SliceExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *SliceExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Low != nil && do(n.Low) {
+ return true
+ }
+ if n.High != nil && do(n.High) {
+ return true
+ }
+ if n.Max != nil && do(n.Max) {
+ return true
+ }
+ return false
+}
+func (n *SliceExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Low != nil {
+ n.Low = edit(n.Low).(Node)
+ }
+ if n.High != nil {
+ n.High = edit(n.High).(Node)
+ }
+ if n.Max != nil {
+ n.Max = edit(n.Max).(Node)
+ }
+}
+func (n *SliceExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Low != nil {
+ n.Low = edit(n.Low).(Node)
+ }
+ if n.High != nil {
+ n.High = edit(n.High).(Node)
+ }
+ if n.Max != nil {
+ n.Max = edit(n.Max).(Node)
+ }
+}
+
+func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SliceHeaderExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *SliceHeaderExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Ptr != nil && do(n.Ptr) {
+ return true
+ }
+ if n.Len != nil && do(n.Len) {
+ return true
+ }
+ if n.Cap != nil && do(n.Cap) {
+ return true
+ }
+ return false
+}
+func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Ptr != nil {
+ n.Ptr = edit(n.Ptr).(Node)
+ }
+ if n.Len != nil {
+ n.Len = edit(n.Len).(Node)
+ }
+ if n.Cap != nil {
+ n.Cap = edit(n.Cap).(Node)
+ }
+}
+func (n *SliceHeaderExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Ptr != nil {
+ n.Ptr = edit(n.Ptr).(Node)
+ }
+ if n.Len != nil {
+ n.Len = edit(n.Len).(Node)
+ }
+ if n.Cap != nil {
+ n.Cap = edit(n.Cap).(Node)
+ }
+}
+
+func (n *StarExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *StarExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *StarExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ return false
+}
+func (n *StarExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+}
+func (n *StarExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+}
+
+func (n *StringHeaderExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *StringHeaderExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *StringHeaderExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Ptr != nil && do(n.Ptr) {
+ return true
+ }
+ if n.Len != nil && do(n.Len) {
+ return true
+ }
+ return false
+}
+func (n *StringHeaderExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Ptr != nil {
+ n.Ptr = edit(n.Ptr).(Node)
+ }
+ if n.Len != nil {
+ n.Len = edit(n.Len).(Node)
+ }
+}
+func (n *StringHeaderExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Ptr != nil {
+ n.Ptr = edit(n.Ptr).(Node)
+ }
+ if n.Len != nil {
+ n.Len = edit(n.Len).(Node)
+ }
+}
+
+func (n *StructKeyExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *StructKeyExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *StructKeyExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Value != nil && do(n.Value) {
+ return true
+ }
+ return false
+}
+func (n *StructKeyExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Value != nil {
+ n.Value = edit(n.Value).(Node)
+ }
+}
+func (n *StructKeyExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Value != nil {
+ n.Value = edit(n.Value).(Node)
+ }
+}
+
+func (n *SwitchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SwitchStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Cases = copyCaseClauses(c.Cases)
+ c.Compiled = copyNodes(c.Compiled)
+ return &c
+}
+func (n *SwitchStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Tag != nil && do(n.Tag) {
+ return true
+ }
+ if doCaseClauses(n.Cases, do) {
+ return true
+ }
+ if doNodes(n.Compiled, do) {
+ return true
+ }
+ return false
+}
+func (n *SwitchStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Tag != nil {
+ n.Tag = edit(n.Tag).(Node)
+ }
+ editCaseClauses(n.Cases, edit)
+ editNodes(n.Compiled, edit)
+}
+func (n *SwitchStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Tag != nil {
+ n.Tag = edit(n.Tag).(Node)
+ }
+ editCaseClauses(n.Cases, edit)
+ editNodes(n.Compiled, edit)
+}
+
+func (n *TailCallStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *TailCallStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *TailCallStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Call != nil && do(n.Call) {
+ return true
+ }
+ return false
+}
+func (n *TailCallStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Call != nil {
+ n.Call = edit(n.Call).(*CallExpr)
+ }
+}
+func (n *TailCallStmt) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Call != nil {
+ n.Call = edit(n.Call).(*CallExpr)
+ }
+}
+
+func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *TypeAssertExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *TypeAssertExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ return false
+}
+func (n *TypeAssertExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+}
+func (n *TypeAssertExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.ITab != nil {
+ n.ITab = edit(n.ITab).(Node)
+ }
+}
+
+func (n *TypeSwitchGuard) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *TypeSwitchGuard) copy() Node {
+ c := *n
+ return &c
+}
+func (n *TypeSwitchGuard) doChildren(do func(Node) bool) bool {
+ if n.Tag != nil && do(n.Tag) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ return false
+}
+func (n *TypeSwitchGuard) editChildren(edit func(Node) Node) {
+ if n.Tag != nil {
+ n.Tag = edit(n.Tag).(*Ident)
+ }
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+}
+func (n *TypeSwitchGuard) editChildrenWithHidden(edit func(Node) Node) {
+ if n.Tag != nil {
+ n.Tag = edit(n.Tag).(*Ident)
+ }
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+}
+
+func (n *UnaryExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *UnaryExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *UnaryExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ return false
+}
+func (n *UnaryExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+}
+func (n *UnaryExpr) editChildrenWithHidden(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+}
+
+func (n *typeNode) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *typeNode) copy() Node {
+ c := *n
+ return &c
+}
+func (n *typeNode) doChildren(do func(Node) bool) bool {
+ return false
+}
+func (n *typeNode) editChildren(edit func(Node) Node) {
+}
+func (n *typeNode) editChildrenWithHidden(edit func(Node) Node) {
+}
+
+func copyCaseClauses(list []*CaseClause) []*CaseClause {
+ if list == nil {
+ return nil
+ }
+ c := make([]*CaseClause, len(list))
+ copy(c, list)
+ return c
+}
+func doCaseClauses(list []*CaseClause, do func(Node) bool) bool {
+ for _, x := range list {
+ if x != nil && do(x) {
+ return true
+ }
+ }
+ return false
+}
+func editCaseClauses(list []*CaseClause, edit func(Node) Node) {
+ for i, x := range list {
+ if x != nil {
+ list[i] = edit(x).(*CaseClause)
+ }
+ }
+}
+
+func copyCommClauses(list []*CommClause) []*CommClause {
+ if list == nil {
+ return nil
+ }
+ c := make([]*CommClause, len(list))
+ copy(c, list)
+ return c
+}
+func doCommClauses(list []*CommClause, do func(Node) bool) bool {
+ for _, x := range list {
+ if x != nil && do(x) {
+ return true
+ }
+ }
+ return false
+}
+func editCommClauses(list []*CommClause, edit func(Node) Node) {
+ for i, x := range list {
+ if x != nil {
+ list[i] = edit(x).(*CommClause)
+ }
+ }
+}
+
+func copyNames(list []*Name) []*Name {
+ if list == nil {
+ return nil
+ }
+ c := make([]*Name, len(list))
+ copy(c, list)
+ return c
+}
+func doNames(list []*Name, do func(Node) bool) bool {
+ for _, x := range list {
+ if x != nil && do(x) {
+ return true
+ }
+ }
+ return false
+}
+func editNames(list []*Name, edit func(Node) Node) {
+ for i, x := range list {
+ if x != nil {
+ list[i] = edit(x).(*Name)
+ }
+ }
+}
+
+func copyNodes(list []Node) []Node {
+ if list == nil {
+ return nil
+ }
+ c := make([]Node, len(list))
+ copy(c, list)
+ return c
+}
+func doNodes(list []Node, do func(Node) bool) bool {
+ for _, x := range list {
+ if x != nil && do(x) {
+ return true
+ }
+ }
+ return false
+}
+func editNodes(list []Node, edit func(Node) Node) {
+ for i, x := range list {
+ if x != nil {
+ list[i] = edit(x).(Node)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go
new file mode 100644
index 0000000..fb97ac6
--- /dev/null
+++ b/src/cmd/compile/internal/ir/op_string.go
@@ -0,0 +1,174 @@
+// Code generated by "stringer -type=Op -trimprefix=O node.go"; DO NOT EDIT.
+
+package ir
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[OXXX-0]
+ _ = x[ONAME-1]
+ _ = x[ONONAME-2]
+ _ = x[OTYPE-3]
+ _ = x[OLITERAL-4]
+ _ = x[ONIL-5]
+ _ = x[OADD-6]
+ _ = x[OSUB-7]
+ _ = x[OOR-8]
+ _ = x[OXOR-9]
+ _ = x[OADDSTR-10]
+ _ = x[OADDR-11]
+ _ = x[OANDAND-12]
+ _ = x[OAPPEND-13]
+ _ = x[OBYTES2STR-14]
+ _ = x[OBYTES2STRTMP-15]
+ _ = x[ORUNES2STR-16]
+ _ = x[OSTR2BYTES-17]
+ _ = x[OSTR2BYTESTMP-18]
+ _ = x[OSTR2RUNES-19]
+ _ = x[OSLICE2ARR-20]
+ _ = x[OSLICE2ARRPTR-21]
+ _ = x[OAS-22]
+ _ = x[OAS2-23]
+ _ = x[OAS2DOTTYPE-24]
+ _ = x[OAS2FUNC-25]
+ _ = x[OAS2MAPR-26]
+ _ = x[OAS2RECV-27]
+ _ = x[OASOP-28]
+ _ = x[OCALL-29]
+ _ = x[OCALLFUNC-30]
+ _ = x[OCALLMETH-31]
+ _ = x[OCALLINTER-32]
+ _ = x[OCAP-33]
+ _ = x[OCLEAR-34]
+ _ = x[OCLOSE-35]
+ _ = x[OCLOSURE-36]
+ _ = x[OCOMPLIT-37]
+ _ = x[OMAPLIT-38]
+ _ = x[OSTRUCTLIT-39]
+ _ = x[OARRAYLIT-40]
+ _ = x[OSLICELIT-41]
+ _ = x[OPTRLIT-42]
+ _ = x[OCONV-43]
+ _ = x[OCONVIFACE-44]
+ _ = x[OCONVNOP-45]
+ _ = x[OCOPY-46]
+ _ = x[ODCL-47]
+ _ = x[ODCLFUNC-48]
+ _ = x[ODELETE-49]
+ _ = x[ODOT-50]
+ _ = x[ODOTPTR-51]
+ _ = x[ODOTMETH-52]
+ _ = x[ODOTINTER-53]
+ _ = x[OXDOT-54]
+ _ = x[ODOTTYPE-55]
+ _ = x[ODOTTYPE2-56]
+ _ = x[OEQ-57]
+ _ = x[ONE-58]
+ _ = x[OLT-59]
+ _ = x[OLE-60]
+ _ = x[OGE-61]
+ _ = x[OGT-62]
+ _ = x[ODEREF-63]
+ _ = x[OINDEX-64]
+ _ = x[OINDEXMAP-65]
+ _ = x[OKEY-66]
+ _ = x[OSTRUCTKEY-67]
+ _ = x[OLEN-68]
+ _ = x[OMAKE-69]
+ _ = x[OMAKECHAN-70]
+ _ = x[OMAKEMAP-71]
+ _ = x[OMAKESLICE-72]
+ _ = x[OMAKESLICECOPY-73]
+ _ = x[OMUL-74]
+ _ = x[ODIV-75]
+ _ = x[OMOD-76]
+ _ = x[OLSH-77]
+ _ = x[ORSH-78]
+ _ = x[OAND-79]
+ _ = x[OANDNOT-80]
+ _ = x[ONEW-81]
+ _ = x[ONOT-82]
+ _ = x[OBITNOT-83]
+ _ = x[OPLUS-84]
+ _ = x[ONEG-85]
+ _ = x[OOROR-86]
+ _ = x[OPANIC-87]
+ _ = x[OPRINT-88]
+ _ = x[OPRINTLN-89]
+ _ = x[OPAREN-90]
+ _ = x[OSEND-91]
+ _ = x[OSLICE-92]
+ _ = x[OSLICEARR-93]
+ _ = x[OSLICESTR-94]
+ _ = x[OSLICE3-95]
+ _ = x[OSLICE3ARR-96]
+ _ = x[OSLICEHEADER-97]
+ _ = x[OSTRINGHEADER-98]
+ _ = x[ORECOVER-99]
+ _ = x[ORECOVERFP-100]
+ _ = x[ORECV-101]
+ _ = x[ORUNESTR-102]
+ _ = x[OSELRECV2-103]
+ _ = x[OMIN-104]
+ _ = x[OMAX-105]
+ _ = x[OREAL-106]
+ _ = x[OIMAG-107]
+ _ = x[OCOMPLEX-108]
+ _ = x[OUNSAFEADD-109]
+ _ = x[OUNSAFESLICE-110]
+ _ = x[OUNSAFESLICEDATA-111]
+ _ = x[OUNSAFESTRING-112]
+ _ = x[OUNSAFESTRINGDATA-113]
+ _ = x[OMETHEXPR-114]
+ _ = x[OMETHVALUE-115]
+ _ = x[OBLOCK-116]
+ _ = x[OBREAK-117]
+ _ = x[OCASE-118]
+ _ = x[OCONTINUE-119]
+ _ = x[ODEFER-120]
+ _ = x[OFALL-121]
+ _ = x[OFOR-122]
+ _ = x[OGOTO-123]
+ _ = x[OIF-124]
+ _ = x[OLABEL-125]
+ _ = x[OGO-126]
+ _ = x[ORANGE-127]
+ _ = x[ORETURN-128]
+ _ = x[OSELECT-129]
+ _ = x[OSWITCH-130]
+ _ = x[OTYPESW-131]
+ _ = x[OINLCALL-132]
+ _ = x[OMAKEFACE-133]
+ _ = x[OITAB-134]
+ _ = x[OIDATA-135]
+ _ = x[OSPTR-136]
+ _ = x[OCFUNC-137]
+ _ = x[OCHECKNIL-138]
+ _ = x[ORESULT-139]
+ _ = x[OINLMARK-140]
+ _ = x[OLINKSYMOFFSET-141]
+ _ = x[OJUMPTABLE-142]
+ _ = x[OINTERFACESWITCH-143]
+ _ = x[ODYNAMICDOTTYPE-144]
+ _ = x[ODYNAMICDOTTYPE2-145]
+ _ = x[ODYNAMICTYPE-146]
+ _ = x[OTAILCALL-147]
+ _ = x[OGETG-148]
+ _ = x[OGETCALLERPC-149]
+ _ = x[OGETCALLERSP-150]
+ _ = x[OEND-151]
+}
+
+const _Op_name = "XXXNAMENONAMETYPELITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLEARCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERSTRINGHEADERRECOVERRECOVERFPRECVRUNESTRSELRECV2MINMAXREALIMAGCOMPLEXUNSAFEADDUNSAFESLICEUNSAFESLICEDATAUNSAFESTRINGUNSAFESTRINGDATAMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWINLCALLMAKEFACEITABIDATASPTRCFUNCCHECKNILRESULTINLMARKLINKSYMOFFSETJUMPTABLEINTERFACESWITCHDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERPCGETCALLERSPEND"
+
+var _Op_index = [...]uint16{0, 3, 7, 13, 17, 24, 27, 30, 33, 35, 38, 44, 48, 54, 60, 69, 81, 90, 99, 111, 120, 129, 141, 143, 146, 156, 163, 170, 177, 181, 185, 193, 201, 210, 213, 218, 223, 230, 237, 243, 252, 260, 268, 274, 278, 287, 294, 298, 301, 308, 314, 317, 323, 330, 338, 342, 349, 357, 359, 361, 363, 365, 367, 369, 374, 379, 387, 390, 399, 402, 406, 414, 421, 430, 443, 446, 449, 452, 455, 458, 461, 467, 470, 473, 479, 483, 486, 490, 495, 500, 506, 511, 515, 520, 528, 536, 542, 551, 562, 574, 581, 590, 594, 601, 609, 612, 615, 619, 623, 630, 639, 650, 665, 677, 693, 701, 710, 715, 720, 724, 732, 737, 741, 744, 748, 750, 755, 757, 762, 768, 774, 780, 786, 793, 801, 805, 810, 814, 819, 827, 833, 840, 853, 862, 877, 891, 906, 917, 925, 929, 940, 951, 954}
+
+func (i Op) String() string {
+ if i >= Op(len(_Op_index)-1) {
+ return "Op(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Op_name[_Op_index[i]:_Op_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/ir/package.go b/src/cmd/compile/internal/ir/package.go
new file mode 100644
index 0000000..3b70a92
--- /dev/null
+++ b/src/cmd/compile/internal/ir/package.go
@@ -0,0 +1,42 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import "cmd/compile/internal/types"
+
+// A Package holds information about the package being compiled.
+type Package struct {
+ // Imports, listed in source order.
+ // See golang.org/issue/31636.
+ Imports []*types.Pkg
+
+ // Init functions, listed in source order.
+ Inits []*Func
+
+ // Funcs contains all (instantiated) functions, methods, and
+ // function literals to be compiled.
+ Funcs []*Func
+
+ // Externs holds constants, (non-generic) types, and variables
+ // declared at package scope.
+ Externs []*Name
+
+ // AsmHdrDecls holds declared constants and struct types that should
+ // be included in -asmhdr output. It's only populated when -asmhdr
+ // is set.
+ AsmHdrDecls []*Name
+
+ // Cgo directives.
+ CgoPragmas [][]string
+
+ // Variables with //go:embed lines.
+ Embeds []*Name
+
+ // PluginExports holds exported functions and variables that are
+ // accessible through the package plugin API. It's only populated
+ // for -buildmode=plugin (i.e., compiling package main and -dynlink
+ // is set).
+ PluginExports []*Name
+}
diff --git a/src/cmd/compile/internal/ir/reassign_consistency_check.go b/src/cmd/compile/internal/ir/reassign_consistency_check.go
new file mode 100644
index 0000000..e4d928d
--- /dev/null
+++ b/src/cmd/compile/internal/ir/reassign_consistency_check.go
@@ -0,0 +1,46 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/internal/src"
+ "fmt"
+ "path/filepath"
+ "strings"
+)
+
+// checkStaticValueResult compares the result from ReassignOracle.StaticValue
+// with the corresponding result from ir.StaticValue to make sure they agree.
+// This method is called only when turned on via build tag.
+func checkStaticValueResult(n Node, newres Node) {
+ oldres := StaticValue(n)
+ if oldres != newres {
+ base.Fatalf("%s: new/old static value disagreement on %v:\nnew=%v\nold=%v", fmtFullPos(n.Pos()), n, newres, oldres)
+ }
+}
+
+// checkStaticValueResult compares the result from ReassignOracle.Reassigned
+// with the corresponding result from ir.Reassigned to make sure they agree.
+// This method is called only when turned on via build tag.
+func checkReassignedResult(n *Name, newres bool) {
+ origres := Reassigned(n)
+ if newres != origres {
+ base.Fatalf("%s: new/old reassigned disagreement on %v (class %s) newres=%v oldres=%v", fmtFullPos(n.Pos()), n, n.Class.String(), newres, origres)
+ }
+}
+
+// fmtFullPos returns a verbose dump for pos p, including inlines.
+func fmtFullPos(p src.XPos) string {
+ var sb strings.Builder
+ sep := ""
+ base.Ctxt.AllPos(p, func(pos src.Pos) {
+ fmt.Fprintf(&sb, sep)
+ sep = "|"
+ file := filepath.Base(pos.Filename())
+ fmt.Fprintf(&sb, "%s:%d:%d", file, pos.Line(), pos.Col())
+ })
+ return sb.String()
+}
diff --git a/src/cmd/compile/internal/ir/reassignment.go b/src/cmd/compile/internal/ir/reassignment.go
new file mode 100644
index 0000000..9974292
--- /dev/null
+++ b/src/cmd/compile/internal/ir/reassignment.go
@@ -0,0 +1,205 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+)
+
+// A ReassignOracle efficiently answers queries about whether local
+// variables are reassigned. This helper works by looking for function
+// params and short variable declarations (e.g.
+// https://go.dev/ref/spec#Short_variable_declarations) that are
+// neither address taken nor subsequently re-assigned. It is intended
+// to operate much like "ir.StaticValue" and "ir.Reassigned", but in a
+// way that does just a single walk of the containing function (as
+// opposed to a new walk on every call).
+type ReassignOracle struct {
+ fn *Func
+ // maps candidate name to its defining assignment (or for
+ // for params, defining func).
+ singleDef map[*Name]Node
+}
+
+// Init initializes the oracle based on the IR in function fn, laying
+// the groundwork for future calls to the StaticValue and Reassigned
+// methods. If the fn's IR is subsequently modified, Init must be
+// called again.
+func (ro *ReassignOracle) Init(fn *Func) {
+ ro.fn = fn
+
+ // Collect candidate map. Start by adding function parameters
+ // explicitly.
+ ro.singleDef = make(map[*Name]Node)
+ sig := fn.Type()
+ numParams := sig.NumRecvs() + sig.NumParams()
+ for _, param := range fn.Dcl[:numParams] {
+ if IsBlank(param) {
+ continue
+ }
+ // For params, use func itself as defining node.
+ ro.singleDef[param] = fn
+ }
+
+ // Walk the function body to discover any locals assigned
+ // via ":=" syntax (e.g. "a := <expr>").
+ var findLocals func(n Node) bool
+ findLocals = func(n Node) bool {
+ if nn, ok := n.(*Name); ok {
+ if nn.Defn != nil && !nn.Addrtaken() && nn.Class == PAUTO {
+ ro.singleDef[nn] = nn.Defn
+ }
+ } else if nn, ok := n.(*ClosureExpr); ok {
+ Any(nn.Func, findLocals)
+ }
+ return false
+ }
+ Any(fn, findLocals)
+
+ outerName := func(x Node) *Name {
+ if x == nil {
+ return nil
+ }
+ n, ok := OuterValue(x).(*Name)
+ if ok {
+ return n.Canonical()
+ }
+ return nil
+ }
+
+ // pruneIfNeeded examines node nn appearing on the left hand side
+ // of assignment statement asn to see if it contains a reassignment
+ // to any nodes in our candidate map ro.singleDef; if a reassignment
+ // is found, the corresponding name is deleted from singleDef.
+ pruneIfNeeded := func(nn Node, asn Node) {
+ oname := outerName(nn)
+ if oname == nil {
+ return
+ }
+ defn, ok := ro.singleDef[oname]
+ if !ok {
+ return
+ }
+ // any assignment to a param invalidates the entry.
+ paramAssigned := oname.Class == PPARAM
+ // assignment to local ok iff assignment is its orig def.
+ localAssigned := (oname.Class == PAUTO && asn != defn)
+ if paramAssigned || localAssigned {
+ // We found an assignment to name N that doesn't
+ // correspond to its original definition; remove
+ // from candidates.
+ delete(ro.singleDef, oname)
+ }
+ }
+
+ // Prune away anything that looks assigned. This code modeled after
+ // similar code in ir.Reassigned; any changes there should be made
+ // here as well.
+ var do func(n Node) bool
+ do = func(n Node) bool {
+ switch n.Op() {
+ case OAS:
+ asn := n.(*AssignStmt)
+ pruneIfNeeded(asn.X, n)
+ case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV, OSELRECV2:
+ asn := n.(*AssignListStmt)
+ for _, p := range asn.Lhs {
+ pruneIfNeeded(p, n)
+ }
+ case OASOP:
+ asn := n.(*AssignOpStmt)
+ pruneIfNeeded(asn.X, n)
+ case ORANGE:
+ rs := n.(*RangeStmt)
+ pruneIfNeeded(rs.Key, n)
+ pruneIfNeeded(rs.Value, n)
+ case OCLOSURE:
+ n := n.(*ClosureExpr)
+ Any(n.Func, do)
+ }
+ return false
+ }
+ Any(fn, do)
+}
+
+// StaticValue method has the same semantics as the ir package function
+// of the same name; see comments on [StaticValue].
+func (ro *ReassignOracle) StaticValue(n Node) Node {
+ arg := n
+ for {
+ if n.Op() == OCONVNOP {
+ n = n.(*ConvExpr).X
+ continue
+ }
+
+ if n.Op() == OINLCALL {
+ n = n.(*InlinedCallExpr).SingleResult()
+ continue
+ }
+
+ n1 := ro.staticValue1(n)
+ if n1 == nil {
+ if consistencyCheckEnabled {
+ checkStaticValueResult(arg, n)
+ }
+ return n
+ }
+ n = n1
+ }
+}
+
+func (ro *ReassignOracle) staticValue1(nn Node) Node {
+ if nn.Op() != ONAME {
+ return nil
+ }
+ n := nn.(*Name).Canonical()
+ if n.Class != PAUTO {
+ return nil
+ }
+
+ defn := n.Defn
+ if defn == nil {
+ return nil
+ }
+
+ var rhs Node
+FindRHS:
+ switch defn.Op() {
+ case OAS:
+ defn := defn.(*AssignStmt)
+ rhs = defn.Y
+ case OAS2:
+ defn := defn.(*AssignListStmt)
+ for i, lhs := range defn.Lhs {
+ if lhs == n {
+ rhs = defn.Rhs[i]
+ break FindRHS
+ }
+ }
+ base.Fatalf("%v missing from LHS of %v", n, defn)
+ default:
+ return nil
+ }
+ if rhs == nil {
+ base.Fatalf("RHS is nil: %v", defn)
+ }
+
+ if _, ok := ro.singleDef[n]; !ok {
+ return nil
+ }
+
+ return rhs
+}
+
+// Reassigned method has the same semantics as the ir package function
+// of the same name; see comments on [Reassigned] for more info.
+func (ro *ReassignOracle) Reassigned(n *Name) bool {
+ _, ok := ro.singleDef[n]
+ result := !ok
+ if consistencyCheckEnabled {
+ checkReassignedResult(n, result)
+ }
+ return result
+}
diff --git a/src/cmd/compile/internal/ir/scc.go b/src/cmd/compile/internal/ir/scc.go
new file mode 100644
index 0000000..a640f4f
--- /dev/null
+++ b/src/cmd/compile/internal/ir/scc.go
@@ -0,0 +1,125 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+// Strongly connected components.
+//
+// Run analysis on minimal sets of mutually recursive functions
+// or single non-recursive functions, bottom up.
+//
+// Finding these sets is finding strongly connected components
+// by reverse topological order in the static call graph.
+// The algorithm (known as Tarjan's algorithm) for doing that is taken from
+// Sedgewick, Algorithms, Second Edition, p. 482, with two adaptations.
+//
+// First, a hidden closure function (n.Func.IsHiddenClosure()) cannot be the
+// root of a connected component. Refusing to use it as a root
+// forces it into the component of the function in which it appears.
+// This is more convenient for escape analysis.
+//
+// Second, each function becomes two virtual nodes in the graph,
+// with numbers n and n+1. We record the function's node number as n
+// but search from node n+1. If the search tells us that the component
+// number (min) is n+1, we know that this is a trivial component: one function
+// plus its closures. If the search tells us that the component number is
+// n, then there was a path from node n+1 back to node n, meaning that
+// the function set is mutually recursive. The escape analysis can be
+// more precise when analyzing a single non-recursive function than
+// when analyzing a set of mutually recursive functions.
+
+type bottomUpVisitor struct {
+ analyze func([]*Func, bool)
+ visitgen uint32
+ nodeID map[*Func]uint32
+ stack []*Func
+}
+
+// VisitFuncsBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
+// It calls analyze with successive groups of functions, working from
+// the bottom of the call graph upward. Each time analyze is called with
+// a list of functions, every function on that list only calls other functions
+// on the list or functions that have been passed in previous invocations of
+// analyze. Closures appear in the same list as their outer functions.
+// The lists are as short as possible while preserving those requirements.
+// (In a typical program, many invocations of analyze will be passed just
+// a single function.) The boolean argument 'recursive' passed to analyze
+// specifies whether the functions on the list are mutually recursive.
+// If recursive is false, the list consists of only a single function and its closures.
+// If recursive is true, the list may still contain only a single function,
+// if that function is itself recursive.
+func VisitFuncsBottomUp(list []*Func, analyze func(list []*Func, recursive bool)) {
+ var v bottomUpVisitor
+ v.analyze = analyze
+ v.nodeID = make(map[*Func]uint32)
+ for _, n := range list {
+ if !n.IsHiddenClosure() {
+ v.visit(n)
+ }
+ }
+}
+
+func (v *bottomUpVisitor) visit(n *Func) uint32 {
+ if id := v.nodeID[n]; id > 0 {
+ // already visited
+ return id
+ }
+
+ v.visitgen++
+ id := v.visitgen
+ v.nodeID[n] = id
+ v.visitgen++
+ min := v.visitgen
+ v.stack = append(v.stack, n)
+
+ do := func(defn Node) {
+ if defn != nil {
+ if m := v.visit(defn.(*Func)); m < min {
+ min = m
+ }
+ }
+ }
+
+ Visit(n, func(n Node) {
+ switch n.Op() {
+ case ONAME:
+ if n := n.(*Name); n.Class == PFUNC {
+ do(n.Defn)
+ }
+ case ODOTMETH, OMETHVALUE, OMETHEXPR:
+ if fn := MethodExprName(n); fn != nil {
+ do(fn.Defn)
+ }
+ case OCLOSURE:
+ n := n.(*ClosureExpr)
+ do(n.Func)
+ }
+ })
+
+ if (min == id || min == id+1) && !n.IsHiddenClosure() {
+ // This node is the root of a strongly connected component.
+
+ // The original min was id+1. If the bottomUpVisitor found its way
+ // back to id, then this block is a set of mutually recursive functions.
+ // Otherwise, it's just a lone function that does not recurse.
+ recursive := min == id
+
+ // Remove connected component from stack and mark v.nodeID so that future
+ // visits return a large number, which will not affect the caller's min.
+ var i int
+ for i = len(v.stack) - 1; i >= 0; i-- {
+ x := v.stack[i]
+ v.nodeID[x] = ^uint32(0)
+ if x == n {
+ break
+ }
+ }
+ block := v.stack[i:]
+ // Call analyze on this set of functions.
+ v.stack = v.stack[:i]
+ v.analyze(block, recursive)
+ }
+
+ return min
+}
diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go
new file mode 100644
index 0000000..3b68238
--- /dev/null
+++ b/src/cmd/compile/internal/ir/sizeof_test.go
@@ -0,0 +1,37 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "reflect"
+ "testing"
+ "unsafe"
+)
+
+// Assert that the size of important structures do not change unexpectedly.
+
+func TestSizeof(t *testing.T) {
+ const _64bit = unsafe.Sizeof(uintptr(0)) == 8
+
+ var tests = []struct {
+ val interface{} // type as a value
+ _32bit uintptr // size on 32bit platforms
+ _64bit uintptr // size on 64bit platforms
+ }{
+ {Func{}, 168, 288},
+ {Name{}, 96, 168},
+ }
+
+ for _, tt := range tests {
+ want := tt._32bit
+ if _64bit {
+ want = tt._64bit
+ }
+ got := reflect.TypeOf(tt.val).Size()
+ if want != got {
+ t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go
new file mode 100644
index 0000000..0801ecd
--- /dev/null
+++ b/src/cmd/compile/internal/ir/stmt.go
@@ -0,0 +1,505 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "go/constant"
+)
+
+// A Decl is a declaration of a const, type, or var. (A declared func is a Func.)
+type Decl struct {
+ miniNode
+ X *Name // the thing being declared
+}
+
+func NewDecl(pos src.XPos, op Op, x *Name) *Decl {
+ n := &Decl{X: x}
+ n.pos = pos
+ switch op {
+ default:
+ panic("invalid Decl op " + op.String())
+ case ODCL:
+ n.op = op
+ }
+ return n
+}
+
+func (*Decl) isStmt() {}
+
+// A Stmt is a Node that can appear as a statement.
+// This includes statement-like expressions such as f().
+//
+// (It's possible it should include <-c, but that would require
+// splitting ORECV out of UnaryExpr, which hasn't yet been
+// necessary. Maybe instead we will introduce ExprStmt at
+// some point.)
+type Stmt interface {
+ Node
+ isStmt()
+}
+
+// A miniStmt is a miniNode with extra fields common to statements.
+type miniStmt struct {
+ miniNode
+ init Nodes
+}
+
+func (*miniStmt) isStmt() {}
+
+func (n *miniStmt) Init() Nodes { return n.init }
+func (n *miniStmt) SetInit(x Nodes) { n.init = x }
+func (n *miniStmt) PtrInit() *Nodes { return &n.init }
+
+// An AssignListStmt is an assignment statement with
+// more than one item on at least one side: Lhs = Rhs.
+// If Def is true, the assignment is a :=.
+type AssignListStmt struct {
+ miniStmt
+ Lhs Nodes
+ Def bool
+ Rhs Nodes
+}
+
+func NewAssignListStmt(pos src.XPos, op Op, lhs, rhs []Node) *AssignListStmt {
+ n := &AssignListStmt{}
+ n.pos = pos
+ n.SetOp(op)
+ n.Lhs = lhs
+ n.Rhs = rhs
+ return n
+}
+
+func (n *AssignListStmt) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV, OSELRECV2:
+ n.op = op
+ }
+}
+
+// An AssignStmt is a simple assignment statement: X = Y.
+// If Def is true, the assignment is a :=.
+type AssignStmt struct {
+ miniStmt
+ X Node
+ Def bool
+ Y Node
+}
+
+func NewAssignStmt(pos src.XPos, x, y Node) *AssignStmt {
+ n := &AssignStmt{X: x, Y: y}
+ n.pos = pos
+ n.op = OAS
+ return n
+}
+
+func (n *AssignStmt) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OAS:
+ n.op = op
+ }
+}
+
+// An AssignOpStmt is an AsOp= assignment statement: X AsOp= Y.
+type AssignOpStmt struct {
+ miniStmt
+ X Node
+ AsOp Op // OADD etc
+ Y Node
+ IncDec bool // actually ++ or --
+}
+
+func NewAssignOpStmt(pos src.XPos, asOp Op, x, y Node) *AssignOpStmt {
+ n := &AssignOpStmt{AsOp: asOp, X: x, Y: y}
+ n.pos = pos
+ n.op = OASOP
+ return n
+}
+
+// A BlockStmt is a block: { List }.
+type BlockStmt struct {
+ miniStmt
+ List Nodes
+}
+
+func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt {
+ n := &BlockStmt{}
+ n.pos = pos
+ if !pos.IsKnown() {
+ n.pos = base.Pos
+ if len(list) > 0 {
+ n.pos = list[0].Pos()
+ }
+ }
+ n.op = OBLOCK
+ n.List = list
+ return n
+}
+
+// A BranchStmt is a break, continue, fallthrough, or goto statement.
+type BranchStmt struct {
+ miniStmt
+ Label *types.Sym // label if present
+}
+
+func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt {
+ switch op {
+ case OBREAK, OCONTINUE, OFALL, OGOTO:
+ // ok
+ default:
+ panic("NewBranch " + op.String())
+ }
+ n := &BranchStmt{Label: label}
+ n.pos = pos
+ n.op = op
+ return n
+}
+
+func (n *BranchStmt) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OBREAK, OCONTINUE, OFALL, OGOTO:
+ n.op = op
+ }
+}
+
+func (n *BranchStmt) Sym() *types.Sym { return n.Label }
+
+// A CaseClause is a case statement in a switch or select: case List: Body.
+type CaseClause struct {
+ miniStmt
+ Var *Name // declared variable for this case in type switch
+ List Nodes // list of expressions for switch, early select
+
+ // RTypes is a list of RType expressions, which are copied to the
+ // corresponding OEQ nodes that are emitted when switch statements
+ // are desugared. RTypes[i] must be non-nil if the emitted
+ // comparison for List[i] will be a mixed interface/concrete
+ // comparison; see reflectdata.CompareRType for details.
+ //
+ // Because mixed interface/concrete switch cases are rare, we allow
+ // len(RTypes) < len(List). Missing entries are implicitly nil.
+ RTypes Nodes
+
+ Body Nodes
+}
+
+func NewCaseStmt(pos src.XPos, list, body []Node) *CaseClause {
+ n := &CaseClause{List: list, Body: body}
+ n.pos = pos
+ n.op = OCASE
+ return n
+}
+
+type CommClause struct {
+ miniStmt
+ Comm Node // communication case
+ Body Nodes
+}
+
+func NewCommStmt(pos src.XPos, comm Node, body []Node) *CommClause {
+ n := &CommClause{Comm: comm, Body: body}
+ n.pos = pos
+ n.op = OCASE
+ return n
+}
+
+// A ForStmt is a non-range for loop: for Init; Cond; Post { Body }
+type ForStmt struct {
+ miniStmt
+ Label *types.Sym
+ Cond Node
+ Post Node
+ Body Nodes
+ DistinctVars bool
+}
+
+func NewForStmt(pos src.XPos, init Node, cond, post Node, body []Node, distinctVars bool) *ForStmt {
+ n := &ForStmt{Cond: cond, Post: post}
+ n.pos = pos
+ n.op = OFOR
+ if init != nil {
+ n.init = []Node{init}
+ }
+ n.Body = body
+ n.DistinctVars = distinctVars
+ return n
+}
+
+// A GoDeferStmt is a go or defer statement: go Call / defer Call.
+//
+// The two opcodes use a single syntax because the implementations
+// are very similar: both are concerned with saving Call and running it
+// in a different context (a separate goroutine or a later time).
+type GoDeferStmt struct {
+ miniStmt
+ Call Node
+ DeferAt Expr
+}
+
+func NewGoDeferStmt(pos src.XPos, op Op, call Node) *GoDeferStmt {
+ n := &GoDeferStmt{Call: call}
+ n.pos = pos
+ switch op {
+ case ODEFER, OGO:
+ n.op = op
+ default:
+ panic("NewGoDeferStmt " + op.String())
+ }
+ return n
+}
+
+// An IfStmt is a return statement: if Init; Cond { Body } else { Else }.
+type IfStmt struct {
+ miniStmt
+ Cond Node
+ Body Nodes
+ Else Nodes
+ Likely bool // code layout hint
+}
+
+func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt {
+ n := &IfStmt{Cond: cond}
+ n.pos = pos
+ n.op = OIF
+ n.Body = body
+ n.Else = els
+ return n
+}
+
+// A JumpTableStmt is used to implement switches. Its semantics are:
+//
+// tmp := jt.Idx
+// if tmp == Cases[0] goto Targets[0]
+// if tmp == Cases[1] goto Targets[1]
+// ...
+// if tmp == Cases[n] goto Targets[n]
+//
+// Note that a JumpTableStmt is more like a multiway-goto than
+// a multiway-if. In particular, the case bodies are just
+// labels to jump to, not full Nodes lists.
+type JumpTableStmt struct {
+ miniStmt
+
+ // Value used to index the jump table.
+ // We support only integer types that
+ // are at most the size of a uintptr.
+ Idx Node
+
+ // If Idx is equal to Cases[i], jump to Targets[i].
+ // Cases entries must be distinct and in increasing order.
+ // The length of Cases and Targets must be equal.
+ Cases []constant.Value
+ Targets []*types.Sym
+}
+
+func NewJumpTableStmt(pos src.XPos, idx Node) *JumpTableStmt {
+ n := &JumpTableStmt{Idx: idx}
+ n.pos = pos
+ n.op = OJUMPTABLE
+ return n
+}
+
+// An InterfaceSwitchStmt is used to implement type switches.
+// Its semantics are:
+//
+// if RuntimeType implements Descriptor.Cases[0] {
+// Case, Itab = 0, itab<RuntimeType, Descriptor.Cases[0]>
+// } else if RuntimeType implements Descriptor.Cases[1] {
+// Case, Itab = 1, itab<RuntimeType, Descriptor.Cases[1]>
+// ...
+// } else if RuntimeType implements Descriptor.Cases[N-1] {
+// Case, Itab = N-1, itab<RuntimeType, Descriptor.Cases[N-1]>
+// } else {
+// Case, Itab = len(cases), nil
+// }
+//
+// RuntimeType must be a non-nil *runtime._type.
+// Hash must be the hash field of RuntimeType (or its copy loaded from an itab).
+// Descriptor must represent an abi.InterfaceSwitch global variable.
+type InterfaceSwitchStmt struct {
+ miniStmt
+
+ Case Node
+ Itab Node
+ RuntimeType Node
+ Hash Node
+ Descriptor *obj.LSym
+}
+
+func NewInterfaceSwitchStmt(pos src.XPos, case_, itab, runtimeType, hash Node, descriptor *obj.LSym) *InterfaceSwitchStmt {
+ n := &InterfaceSwitchStmt{
+ Case: case_,
+ Itab: itab,
+ RuntimeType: runtimeType,
+ Hash: hash,
+ Descriptor: descriptor,
+ }
+ n.pos = pos
+ n.op = OINTERFACESWITCH
+ return n
+}
+
+// An InlineMarkStmt is a marker placed just before an inlined body.
+type InlineMarkStmt struct {
+ miniStmt
+ Index int64
+}
+
+func NewInlineMarkStmt(pos src.XPos, index int64) *InlineMarkStmt {
+ n := &InlineMarkStmt{Index: index}
+ n.pos = pos
+ n.op = OINLMARK
+ return n
+}
+
+func (n *InlineMarkStmt) Offset() int64 { return n.Index }
+func (n *InlineMarkStmt) SetOffset(x int64) { n.Index = x }
+
+// A LabelStmt is a label statement (just the label, not including the statement it labels).
+type LabelStmt struct {
+ miniStmt
+ Label *types.Sym // "Label:"
+}
+
+func NewLabelStmt(pos src.XPos, label *types.Sym) *LabelStmt {
+ n := &LabelStmt{Label: label}
+ n.pos = pos
+ n.op = OLABEL
+ return n
+}
+
+func (n *LabelStmt) Sym() *types.Sym { return n.Label }
+
+// A RangeStmt is a range loop: for Key, Value = range X { Body }
+type RangeStmt struct {
+ miniStmt
+ Label *types.Sym
+ Def bool
+ X Node
+ RType Node `mknode:"-"` // see reflectdata/helpers.go
+ Key Node
+ Value Node
+ Body Nodes
+ DistinctVars bool
+ Prealloc *Name
+
+ // When desugaring the RangeStmt during walk, the assignments to Key
+ // and Value may require OCONVIFACE operations. If so, these fields
+ // will be copied to their respective ConvExpr fields.
+ KeyTypeWord Node `mknode:"-"`
+ KeySrcRType Node `mknode:"-"`
+ ValueTypeWord Node `mknode:"-"`
+ ValueSrcRType Node `mknode:"-"`
+}
+
+func NewRangeStmt(pos src.XPos, key, value, x Node, body []Node, distinctVars bool) *RangeStmt {
+ n := &RangeStmt{X: x, Key: key, Value: value}
+ n.pos = pos
+ n.op = ORANGE
+ n.Body = body
+ n.DistinctVars = distinctVars
+ return n
+}
+
+// A ReturnStmt is a return statement.
+type ReturnStmt struct {
+ miniStmt
+ Results Nodes // return list
+}
+
+func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt {
+ n := &ReturnStmt{}
+ n.pos = pos
+ n.op = ORETURN
+ n.Results = results
+ return n
+}
+
+// A SelectStmt is a block: { Cases }.
+type SelectStmt struct {
+ miniStmt
+ Label *types.Sym
+ Cases []*CommClause
+
+ // TODO(rsc): Instead of recording here, replace with a block?
+ Compiled Nodes // compiled form, after walkSelect
+}
+
+func NewSelectStmt(pos src.XPos, cases []*CommClause) *SelectStmt {
+ n := &SelectStmt{Cases: cases}
+ n.pos = pos
+ n.op = OSELECT
+ return n
+}
+
+// A SendStmt is a send statement: X <- Y.
+type SendStmt struct {
+ miniStmt
+ Chan Node
+ Value Node
+}
+
+func NewSendStmt(pos src.XPos, ch, value Node) *SendStmt {
+ n := &SendStmt{Chan: ch, Value: value}
+ n.pos = pos
+ n.op = OSEND
+ return n
+}
+
+// A SwitchStmt is a switch statement: switch Init; Tag { Cases }.
+type SwitchStmt struct {
+ miniStmt
+ Tag Node
+ Cases []*CaseClause
+ Label *types.Sym
+
+ // TODO(rsc): Instead of recording here, replace with a block?
+ Compiled Nodes // compiled form, after walkSwitch
+}
+
+func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseClause) *SwitchStmt {
+ n := &SwitchStmt{Tag: tag, Cases: cases}
+ n.pos = pos
+ n.op = OSWITCH
+ return n
+}
+
+// A TailCallStmt is a tail call statement, which is used for back-end
+// code generation to jump directly to another function entirely.
+type TailCallStmt struct {
+ miniStmt
+ Call *CallExpr // the underlying call
+}
+
+func NewTailCallStmt(pos src.XPos, call *CallExpr) *TailCallStmt {
+ n := &TailCallStmt{Call: call}
+ n.pos = pos
+ n.op = OTAILCALL
+ return n
+}
+
+// A TypeSwitchGuard is the [Name :=] X.(type) in a type switch.
+type TypeSwitchGuard struct {
+ miniNode
+ Tag *Ident
+ X Node
+ Used bool
+}
+
+func NewTypeSwitchGuard(pos src.XPos, tag *Ident, x Node) *TypeSwitchGuard {
+ n := &TypeSwitchGuard{Tag: tag, X: x}
+ n.pos = pos
+ n.op = OTYPESW
+ return n
+}
diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go
new file mode 100644
index 0000000..202c494
--- /dev/null
+++ b/src/cmd/compile/internal/ir/symtab.go
@@ -0,0 +1,82 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+// Syms holds known symbols.
+var Syms symsStruct
+
+type symsStruct struct {
+ AssertE2I *obj.LSym
+ AssertE2I2 *obj.LSym
+ AssertI2I *obj.LSym
+ AssertI2I2 *obj.LSym
+ Asanread *obj.LSym
+ Asanwrite *obj.LSym
+ CgoCheckMemmove *obj.LSym
+ CgoCheckPtrWrite *obj.LSym
+ CheckPtrAlignment *obj.LSym
+ Deferproc *obj.LSym
+ Deferprocat *obj.LSym
+ DeferprocStack *obj.LSym
+ Deferreturn *obj.LSym
+ Duffcopy *obj.LSym
+ Duffzero *obj.LSym
+ GCWriteBarrier [8]*obj.LSym
+ Goschedguarded *obj.LSym
+ Growslice *obj.LSym
+ InterfaceSwitch *obj.LSym
+ Memmove *obj.LSym
+ Msanread *obj.LSym
+ Msanwrite *obj.LSym
+ Msanmove *obj.LSym
+ Newobject *obj.LSym
+ Newproc *obj.LSym
+ Panicdivide *obj.LSym
+ Panicshift *obj.LSym
+ PanicdottypeE *obj.LSym
+ PanicdottypeI *obj.LSym
+ Panicnildottype *obj.LSym
+ Panicoverflow *obj.LSym
+ Racefuncenter *obj.LSym
+ Racefuncexit *obj.LSym
+ Raceread *obj.LSym
+ Racereadrange *obj.LSym
+ Racewrite *obj.LSym
+ Racewriterange *obj.LSym
+ TypeAssert *obj.LSym
+ WBZero *obj.LSym
+ WBMove *obj.LSym
+ // Wasm
+ SigPanic *obj.LSym
+ Staticuint64s *obj.LSym
+ Typedmemmove *obj.LSym
+ Udiv *obj.LSym
+ WriteBarrier *obj.LSym
+ Zerobase *obj.LSym
+ ARM64HasATOMICS *obj.LSym
+ ARMHasVFPv4 *obj.LSym
+ X86HasFMA *obj.LSym
+ X86HasPOPCNT *obj.LSym
+ X86HasSSE41 *obj.LSym
+ // Wasm
+ WasmDiv *obj.LSym
+ // Wasm
+ WasmTruncS *obj.LSym
+ // Wasm
+ WasmTruncU *obj.LSym
+}
+
+// Pkgs holds known packages.
+var Pkgs struct {
+ Go *types.Pkg
+ Itab *types.Pkg
+ Runtime *types.Pkg
+ Coverage *types.Pkg
+}
diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go
new file mode 100644
index 0000000..7db76c1
--- /dev/null
+++ b/src/cmd/compile/internal/ir/type.go
@@ -0,0 +1,69 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// Calling TypeNode converts a *types.Type to a Node shell.
+
+// A typeNode is a Node wrapper for type t.
+type typeNode struct {
+ miniNode
+ typ *types.Type
+}
+
+func newTypeNode(typ *types.Type) *typeNode {
+ n := &typeNode{typ: typ}
+ n.pos = src.NoXPos
+ n.op = OTYPE
+ n.SetTypecheck(1)
+ return n
+}
+
+func (n *typeNode) Type() *types.Type { return n.typ }
+func (n *typeNode) Sym() *types.Sym { return n.typ.Sym() }
+
+// TypeNode returns the Node representing the type t.
+func TypeNode(t *types.Type) Node {
+ if n := t.Obj(); n != nil {
+ if n.Type() != t {
+ base.Fatalf("type skew: %v has type %v, but expected %v", n, n.Type(), t)
+ }
+ return n.(*Name)
+ }
+ return newTypeNode(t)
+}
+
+// A DynamicType represents a type expression whose exact type must be
+// computed dynamically.
+type DynamicType struct {
+ miniExpr
+
+ // RType is an expression that yields a *runtime._type value
+ // representing the asserted type.
+ //
+ // BUG(mdempsky): If ITab is non-nil, RType may be nil.
+ RType Node
+
+ // ITab is an expression that yields a *runtime.itab value
+ // representing the asserted type within the assertee expression's
+ // original interface type.
+ //
+ // ITab is only used for assertions (including type switches) from
+ // non-empty interface type to a concrete (i.e., non-interface)
+ // type. For all other assertions, ITab is nil.
+ ITab Node
+}
+
+func NewDynamicType(pos src.XPos, rtype Node) *DynamicType {
+ n := &DynamicType{RType: rtype}
+ n.pos = pos
+ n.op = ODYNAMICTYPE
+ return n
+}
diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go
new file mode 100644
index 0000000..16c8a08
--- /dev/null
+++ b/src/cmd/compile/internal/ir/val.go
@@ -0,0 +1,107 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+)
+
+func ConstType(n Node) constant.Kind {
+ if n == nil || n.Op() != OLITERAL {
+ return constant.Unknown
+ }
+ return n.Val().Kind()
+}
+
+// IntVal returns v converted to int64.
+// Note: if t is uint64, very large values will be converted to negative int64.
+func IntVal(t *types.Type, v constant.Value) int64 {
+ if t.IsUnsigned() {
+ if x, ok := constant.Uint64Val(v); ok {
+ return int64(x)
+ }
+ } else {
+ if x, ok := constant.Int64Val(v); ok {
+ return x
+ }
+ }
+ base.Fatalf("%v out of range for %v", v, t)
+ panic("unreachable")
+}
+
+func AssertValidTypeForConst(t *types.Type, v constant.Value) {
+ if !ValidTypeForConst(t, v) {
+ base.Fatalf("%v (%v) does not represent %v (%v)", t, t.Kind(), v, v.Kind())
+ }
+}
+
+func ValidTypeForConst(t *types.Type, v constant.Value) bool {
+ switch v.Kind() {
+ case constant.Unknown:
+ return OKForConst[t.Kind()]
+ case constant.Bool:
+ return t.IsBoolean()
+ case constant.String:
+ return t.IsString()
+ case constant.Int:
+ return t.IsInteger()
+ case constant.Float:
+ return t.IsFloat()
+ case constant.Complex:
+ return t.IsComplex()
+ }
+
+ base.Fatalf("unexpected constant kind: %v", v)
+ panic("unreachable")
+}
+
+var OKForConst [types.NTYPE]bool
+
+// Int64Val returns n as an int64.
+// n must be an integer or rune constant.
+func Int64Val(n Node) int64 {
+ if !IsConst(n, constant.Int) {
+ base.Fatalf("Int64Val(%v)", n)
+ }
+ x, ok := constant.Int64Val(n.Val())
+ if !ok {
+ base.Fatalf("Int64Val(%v)", n)
+ }
+ return x
+}
+
+// Uint64Val returns n as a uint64.
+// n must be an integer or rune constant.
+func Uint64Val(n Node) uint64 {
+ if !IsConst(n, constant.Int) {
+ base.Fatalf("Uint64Val(%v)", n)
+ }
+ x, ok := constant.Uint64Val(n.Val())
+ if !ok {
+ base.Fatalf("Uint64Val(%v)", n)
+ }
+ return x
+}
+
+// BoolVal returns n as a bool.
+// n must be a boolean constant.
+func BoolVal(n Node) bool {
+ if !IsConst(n, constant.Bool) {
+ base.Fatalf("BoolVal(%v)", n)
+ }
+ return constant.BoolVal(n.Val())
+}
+
+// StringVal returns the value of a literal string Node as a string.
+// n must be a string constant.
+func StringVal(n Node) string {
+ if !IsConst(n, constant.String) {
+ base.Fatalf("StringVal(%v)", n)
+ }
+ return constant.StringVal(n.Val())
+}
diff --git a/src/cmd/compile/internal/ir/visit.go b/src/cmd/compile/internal/ir/visit.go
new file mode 100644
index 0000000..73ec1de
--- /dev/null
+++ b/src/cmd/compile/internal/ir/visit.go
@@ -0,0 +1,209 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// IR visitors for walking the IR tree.
+//
+// The lowest level helpers are DoChildren and EditChildren, which
+// nodes help implement and provide control over whether and when
+// recursion happens during the walk of the IR.
+//
+// Although these are both useful directly, two simpler patterns
+// are fairly common and also provided: Visit and Any.
+
+package ir
+
+// DoChildren calls do(x) on each of n's non-nil child nodes x.
+// If any call returns true, DoChildren stops and returns true.
+// Otherwise, DoChildren returns false.
+//
+// Note that DoChildren(n, do) only calls do(x) for n's immediate children.
+// If x's children should be processed, then do(x) must call DoChildren(x, do).
+//
+// DoChildren allows constructing general traversals of the IR graph
+// that can stop early if needed. The most general usage is:
+//
+// var do func(ir.Node) bool
+// do = func(x ir.Node) bool {
+// ... processing BEFORE visiting children ...
+// if ... should visit children ... {
+// ir.DoChildren(x, do)
+// ... processing AFTER visiting children ...
+// }
+// if ... should stop parent DoChildren call from visiting siblings ... {
+// return true
+// }
+// return false
+// }
+// do(root)
+//
+// Since DoChildren does not return true itself, if the do function
+// never wants to stop the traversal, it can assume that DoChildren
+// itself will always return false, simplifying to:
+//
+// var do func(ir.Node) bool
+// do = func(x ir.Node) bool {
+// ... processing BEFORE visiting children ...
+// if ... should visit children ... {
+// ir.DoChildren(x, do)
+// }
+// ... processing AFTER visiting children ...
+// return false
+// }
+// do(root)
+//
+// The Visit function illustrates a further simplification of the pattern,
+// only processing before visiting children and never stopping:
+//
+// func Visit(n ir.Node, visit func(ir.Node)) {
+// if n == nil {
+// return
+// }
+// var do func(ir.Node) bool
+// do = func(x ir.Node) bool {
+// visit(x)
+// return ir.DoChildren(x, do)
+// }
+// do(n)
+// }
+//
+// The Any function illustrates a different simplification of the pattern,
+// visiting each node and then its children, recursively, until finding
+// a node x for which cond(x) returns true, at which point the entire
+// traversal stops and returns true.
+//
+// func Any(n ir.Node, cond(ir.Node) bool) bool {
+// if n == nil {
+// return false
+// }
+// var do func(ir.Node) bool
+// do = func(x ir.Node) bool {
+// return cond(x) || ir.DoChildren(x, do)
+// }
+// return do(n)
+// }
+//
+// Visit and Any are presented above as examples of how to use
+// DoChildren effectively, but of course, usage that fits within the
+// simplifications captured by Visit or Any will be best served
+// by directly calling the ones provided by this package.
+func DoChildren(n Node, do func(Node) bool) bool {
+ if n == nil {
+ return false
+ }
+ return n.doChildren(do)
+}
+
+// Visit visits each non-nil node x in the IR tree rooted at n
+// in a depth-first preorder traversal, calling visit on each node visited.
+func Visit(n Node, visit func(Node)) {
+ if n == nil {
+ return
+ }
+ var do func(Node) bool
+ do = func(x Node) bool {
+ visit(x)
+ return DoChildren(x, do)
+ }
+ do(n)
+}
+
+// VisitList calls Visit(x, visit) for each node x in the list.
+func VisitList(list Nodes, visit func(Node)) {
+ for _, x := range list {
+ Visit(x, visit)
+ }
+}
+
+// VisitFuncAndClosures calls visit on each non-nil node in fn.Body,
+// including any nested closure bodies.
+func VisitFuncAndClosures(fn *Func, visit func(n Node)) {
+ VisitList(fn.Body, func(n Node) {
+ visit(n)
+ if n, ok := n.(*ClosureExpr); ok && n.Op() == OCLOSURE {
+ VisitFuncAndClosures(n.Func, visit)
+ }
+ })
+}
+
+// Any looks for a non-nil node x in the IR tree rooted at n
+// for which cond(x) returns true.
+// Any considers nodes in a depth-first, preorder traversal.
+// When Any finds a node x such that cond(x) is true,
+// Any ends the traversal and returns true immediately.
+// Otherwise Any returns false after completing the entire traversal.
+func Any(n Node, cond func(Node) bool) bool {
+ if n == nil {
+ return false
+ }
+ var do func(Node) bool
+ do = func(x Node) bool {
+ return cond(x) || DoChildren(x, do)
+ }
+ return do(n)
+}
+
+// AnyList calls Any(x, cond) for each node x in the list, in order.
+// If any call returns true, AnyList stops and returns true.
+// Otherwise, AnyList returns false after calling Any(x, cond)
+// for every x in the list.
+func AnyList(list Nodes, cond func(Node) bool) bool {
+ for _, x := range list {
+ if Any(x, cond) {
+ return true
+ }
+ }
+ return false
+}
+
+// EditChildren edits the child nodes of n, replacing each child x with edit(x).
+//
+// Note that EditChildren(n, edit) only calls edit(x) for n's immediate children.
+// If x's children should be processed, then edit(x) must call EditChildren(x, edit).
+//
+// EditChildren allows constructing general editing passes of the IR graph.
+// The most general usage is:
+//
+// var edit func(ir.Node) ir.Node
+// edit = func(x ir.Node) ir.Node {
+// ... processing BEFORE editing children ...
+// if ... should edit children ... {
+// EditChildren(x, edit)
+// ... processing AFTER editing children ...
+// }
+// ... return x ...
+// }
+// n = edit(n)
+//
+// EditChildren edits the node in place. To edit a copy, call Copy first.
+// As an example, a simple deep copy implementation would be:
+//
+// func deepCopy(n ir.Node) ir.Node {
+// var edit func(ir.Node) ir.Node
+// edit = func(x ir.Node) ir.Node {
+// x = ir.Copy(x)
+// ir.EditChildren(x, edit)
+// return x
+// }
+// return edit(n)
+// }
+//
+// Of course, in this case it is better to call ir.DeepCopy than to build one anew.
+func EditChildren(n Node, edit func(Node) Node) {
+ if n == nil {
+ return
+ }
+ n.editChildren(edit)
+}
+
+// EditChildrenWithHidden is like EditChildren, but also edits
+// Node-typed fields tagged with `mknode:"-"`.
+//
+// TODO(mdempsky): Remove the `mknode:"-"` tags so this function can
+// go away.
+func EditChildrenWithHidden(n Node, edit func(Node) Node) {
+ if n == nil {
+ return
+ }
+ n.editChildrenWithHidden(edit)
+}
diff --git a/src/cmd/compile/internal/liveness/arg.go b/src/cmd/compile/internal/liveness/arg.go
new file mode 100644
index 0000000..e1269a1
--- /dev/null
+++ b/src/cmd/compile/internal/liveness/arg.go
@@ -0,0 +1,339 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package liveness
+
+import (
+ "fmt"
+ "internal/abi"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj"
+)
+
+// Argument liveness tracking.
+//
+// For arguments passed in registers, this file tracks if their spill slots
+// are live for runtime traceback. An argument spill slot is live at a PC
+// if we know that an actual value has stored into it at or before this point.
+//
+// Stack args are always live and not tracked in this code. Stack args are
+// laid out before register spill slots, so we emit the smallest offset that
+// needs tracking. Slots before that offset are always live. That offset is
+// usually the offset of the first spill slot. But if the first spill slot is
+// always live (e.g. if it is address-taken), it will be the offset of a later
+// one.
+//
+// The liveness information is emitted as a FUNCDATA and a PCDATA.
+//
+// FUNCDATA format:
+// - start (smallest) offset that needs tracking (1 byte)
+// - a list of bitmaps.
+// In a bitmap bit i is set if the i-th spill slot is live.
+//
+// At a PC where the liveness info changes, a PCDATA indicates the
+// byte offset of the liveness map in the FUNCDATA. PCDATA -1 is a
+// special case indicating all slots are live (for binary size
+// saving).
+
+const allLiveIdx = -1
+
+// name and offset
+type nameOff struct {
+ n *ir.Name
+ off int64
+}
+
+func (a nameOff) FrameOffset() int64 { return a.n.FrameOffset() + a.off }
+func (a nameOff) String() string { return fmt.Sprintf("%v+%d", a.n, a.off) }
+
+type blockArgEffects struct {
+ livein bitvec.BitVec // variables live at block entry
+ liveout bitvec.BitVec // variables live at block exit
+}
+
+type argLiveness struct {
+ fn *ir.Func
+ f *ssa.Func
+ args []nameOff // name and offset of spill slots
+ idx map[nameOff]int32 // index in args
+
+ be []blockArgEffects // indexed by block ID
+
+ bvset bvecSet // Set of liveness bitmaps, used for uniquifying.
+
+ // Liveness map indices at each Value (where it changes) and Block entry.
+ // During the computation the indices are temporarily index to bvset.
+ // At the end they will be index (offset) to the output funcdata (changed
+ // in (*argLiveness).emit).
+ blockIdx map[ssa.ID]int
+ valueIdx map[ssa.ID]int
+}
+
+// ArgLiveness computes the liveness information of register argument spill slots.
+// An argument's spill slot is "live" if we know it contains a meaningful value,
+// that is, we have stored the register value to it.
+// Returns the liveness map indices at each Block entry and at each Value (where
+// it changes).
+func ArgLiveness(fn *ir.Func, f *ssa.Func, pp *objw.Progs) (blockIdx, valueIdx map[ssa.ID]int) {
+ if f.OwnAux.ABIInfo().InRegistersUsed() == 0 || base.Flag.N != 0 {
+ // No register args. Nothing to emit.
+ // Or if -N is used we spill everything upfront so it is always live.
+ return nil, nil
+ }
+
+ lv := &argLiveness{
+ fn: fn,
+ f: f,
+ idx: make(map[nameOff]int32),
+ be: make([]blockArgEffects, f.NumBlocks()),
+ blockIdx: make(map[ssa.ID]int),
+ valueIdx: make(map[ssa.ID]int),
+ }
+ // Gather all register arg spill slots.
+ for _, a := range f.OwnAux.ABIInfo().InParams() {
+ n := a.Name
+ if n == nil || len(a.Registers) == 0 {
+ continue
+ }
+ _, offs := a.RegisterTypesAndOffsets()
+ for _, off := range offs {
+ if n.FrameOffset()+off > 0xff {
+ // We only print a limited number of args, with stack
+ // offsets no larger than 255.
+ continue
+ }
+ lv.args = append(lv.args, nameOff{n, off})
+ }
+ }
+ if len(lv.args) > 10 {
+ lv.args = lv.args[:10] // We print no more than 10 args.
+ }
+
+ // We spill address-taken or non-SSA-able value upfront, so they are always live.
+ alwaysLive := func(n *ir.Name) bool { return n.Addrtaken() || !ssa.CanSSA(n.Type()) }
+
+ // We'll emit the smallest offset for the slots that need liveness info.
+ // No need to include a slot with a lower offset if it is always live.
+ for len(lv.args) > 0 && alwaysLive(lv.args[0].n) {
+ lv.args = lv.args[1:]
+ }
+ if len(lv.args) == 0 {
+ return // everything is always live
+ }
+
+ for i, a := range lv.args {
+ lv.idx[a] = int32(i)
+ }
+
+ nargs := int32(len(lv.args))
+ bulk := bitvec.NewBulk(nargs, int32(len(f.Blocks)*2))
+ for _, b := range f.Blocks {
+ be := &lv.be[b.ID]
+ be.livein = bulk.Next()
+ be.liveout = bulk.Next()
+
+ // initialize to all 1s, so we can AND them
+ be.livein.Not()
+ be.liveout.Not()
+ }
+
+ entrybe := &lv.be[f.Entry.ID]
+ entrybe.livein.Clear()
+ for i, a := range lv.args {
+ if alwaysLive(a.n) {
+ entrybe.livein.Set(int32(i))
+ }
+ }
+
+ // Visit blocks in reverse-postorder, compute block effects.
+ po := f.Postorder()
+ for i := len(po) - 1; i >= 0; i-- {
+ b := po[i]
+ be := &lv.be[b.ID]
+
+ // A slot is live at block entry if it is live in all predecessors.
+ for _, pred := range b.Preds {
+ pb := pred.Block()
+ be.livein.And(be.livein, lv.be[pb.ID].liveout)
+ }
+
+ be.liveout.Copy(be.livein)
+ for _, v := range b.Values {
+ lv.valueEffect(v, be.liveout)
+ }
+ }
+
+ // Coalesce identical live vectors. Compute liveness indices at each PC
+ // where it changes.
+ live := bitvec.New(nargs)
+ addToSet := func(bv bitvec.BitVec) (int, bool) {
+ if bv.Count() == int(nargs) { // special case for all live
+ return allLiveIdx, false
+ }
+ return lv.bvset.add(bv)
+ }
+ for _, b := range lv.f.Blocks {
+ be := &lv.be[b.ID]
+ lv.blockIdx[b.ID], _ = addToSet(be.livein)
+
+ live.Copy(be.livein)
+ var lastv *ssa.Value
+ for i, v := range b.Values {
+ if lv.valueEffect(v, live) {
+ // Record that liveness changes but not emit a map now.
+ // For a sequence of StoreRegs we only need to emit one
+ // at last.
+ lastv = v
+ }
+ if lastv != nil && (mayFault(v) || i == len(b.Values)-1) {
+ // Emit the liveness map if it may fault or at the end of
+ // the block. We may need a traceback if the instruction
+ // may cause a panic.
+ var added bool
+ lv.valueIdx[lastv.ID], added = addToSet(live)
+ if added {
+ // live is added to bvset and we cannot modify it now.
+ // Make a copy.
+ t := live
+ live = bitvec.New(nargs)
+ live.Copy(t)
+ }
+ lastv = nil
+ }
+ }
+
+ // Sanity check.
+ if !live.Eq(be.liveout) {
+ panic("wrong arg liveness map at block end")
+ }
+ }
+
+ // Emit funcdata symbol, update indices to offsets in the symbol data.
+ lsym := lv.emit()
+ fn.LSym.Func().ArgLiveInfo = lsym
+
+ //lv.print()
+
+ p := pp.Prog(obj.AFUNCDATA)
+ p.From.SetConst(abi.FUNCDATA_ArgLiveInfo)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = lsym
+
+ return lv.blockIdx, lv.valueIdx
+}
+
+// valueEffect applies the effect of v to live, return whether it is changed.
+func (lv *argLiveness) valueEffect(v *ssa.Value, live bitvec.BitVec) bool {
+ if v.Op != ssa.OpStoreReg { // TODO: include other store instructions?
+ return false
+ }
+ n, off := ssa.AutoVar(v)
+ if n.Class != ir.PPARAM {
+ return false
+ }
+ i, ok := lv.idx[nameOff{n, off}]
+ if !ok || live.Get(i) {
+ return false
+ }
+ live.Set(i)
+ return true
+}
+
+func mayFault(v *ssa.Value) bool {
+ switch v.Op {
+ case ssa.OpLoadReg, ssa.OpStoreReg, ssa.OpCopy, ssa.OpPhi,
+ ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive,
+ ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN, ssa.OpMakeResult,
+ ssa.OpConvert, ssa.OpInlMark, ssa.OpGetG:
+ return false
+ }
+ if len(v.Args) == 0 {
+ return false // assume constant op cannot fault
+ }
+ return true // conservatively assume all other ops could fault
+}
+
+func (lv *argLiveness) print() {
+ fmt.Println("argument liveness:", lv.f.Name)
+ live := bitvec.New(int32(len(lv.args)))
+ for _, b := range lv.f.Blocks {
+ be := &lv.be[b.ID]
+
+ fmt.Printf("%v: live in: ", b)
+ lv.printLivenessVec(be.livein)
+ if idx, ok := lv.blockIdx[b.ID]; ok {
+ fmt.Printf(" #%d", idx)
+ }
+ fmt.Println()
+
+ for _, v := range b.Values {
+ if lv.valueEffect(v, live) {
+ fmt.Printf(" %v: ", v)
+ lv.printLivenessVec(live)
+ if idx, ok := lv.valueIdx[v.ID]; ok {
+ fmt.Printf(" #%d", idx)
+ }
+ fmt.Println()
+ }
+ }
+
+ fmt.Printf("%v: live out: ", b)
+ lv.printLivenessVec(be.liveout)
+ fmt.Println()
+ }
+ fmt.Println("liveness maps data:", lv.fn.LSym.Func().ArgLiveInfo.P)
+}
+
+func (lv *argLiveness) printLivenessVec(bv bitvec.BitVec) {
+ for i, a := range lv.args {
+ if bv.Get(int32(i)) {
+ fmt.Printf("%v ", a)
+ }
+ }
+}
+
+func (lv *argLiveness) emit() *obj.LSym {
+ livenessMaps := lv.bvset.extractUnique()
+
+ // stack offsets of register arg spill slots
+ argOffsets := make([]uint8, len(lv.args))
+ for i, a := range lv.args {
+ off := a.FrameOffset()
+ if off > 0xff {
+ panic("offset too large")
+ }
+ argOffsets[i] = uint8(off)
+ }
+
+ idx2off := make([]int, len(livenessMaps))
+
+ lsym := base.Ctxt.Lookup(lv.fn.LSym.Name + ".argliveinfo")
+ lsym.Set(obj.AttrContentAddressable, true)
+
+ off := objw.Uint8(lsym, 0, argOffsets[0]) // smallest offset that needs liveness info.
+ for idx, live := range livenessMaps {
+ idx2off[idx] = off
+ off = objw.BitVec(lsym, off, live)
+ }
+
+ // Update liveness indices to offsets.
+ for i, x := range lv.blockIdx {
+ if x != allLiveIdx {
+ lv.blockIdx[i] = idx2off[x]
+ }
+ }
+ for i, x := range lv.valueIdx {
+ if x != allLiveIdx {
+ lv.valueIdx[i] = idx2off[x]
+ }
+ }
+
+ return lsym
+}
diff --git a/src/cmd/compile/internal/liveness/bvset.go b/src/cmd/compile/internal/liveness/bvset.go
new file mode 100644
index 0000000..60b2593
--- /dev/null
+++ b/src/cmd/compile/internal/liveness/bvset.go
@@ -0,0 +1,98 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package liveness
+
+import "cmd/compile/internal/bitvec"
+
+// FNV-1 hash function constants.
+const (
+ h0 = 2166136261
+ hp = 16777619
+)
+
+// bvecSet is a set of bvecs, in initial insertion order.
+type bvecSet struct {
+ index []int // hash -> uniq index. -1 indicates empty slot.
+ uniq []bitvec.BitVec // unique bvecs, in insertion order
+}
+
+func (m *bvecSet) grow() {
+ // Allocate new index.
+ n := len(m.index) * 2
+ if n == 0 {
+ n = 32
+ }
+ newIndex := make([]int, n)
+ for i := range newIndex {
+ newIndex[i] = -1
+ }
+
+ // Rehash into newIndex.
+ for i, bv := range m.uniq {
+ h := hashbitmap(h0, bv) % uint32(len(newIndex))
+ for {
+ j := newIndex[h]
+ if j < 0 {
+ newIndex[h] = i
+ break
+ }
+ h++
+ if h == uint32(len(newIndex)) {
+ h = 0
+ }
+ }
+ }
+ m.index = newIndex
+}
+
+// add adds bv to the set and returns its index in m.extractUnique,
+// and whether it is newly added.
+// If it is newly added, the caller must not modify bv after this.
+func (m *bvecSet) add(bv bitvec.BitVec) (int, bool) {
+ if len(m.uniq)*4 >= len(m.index) {
+ m.grow()
+ }
+
+ index := m.index
+ h := hashbitmap(h0, bv) % uint32(len(index))
+ for {
+ j := index[h]
+ if j < 0 {
+ // New bvec.
+ index[h] = len(m.uniq)
+ m.uniq = append(m.uniq, bv)
+ return len(m.uniq) - 1, true
+ }
+ jlive := m.uniq[j]
+ if bv.Eq(jlive) {
+ // Existing bvec.
+ return j, false
+ }
+
+ h++
+ if h == uint32(len(index)) {
+ h = 0
+ }
+ }
+}
+
+// extractUnique returns this slice of unique bit vectors in m, as
+// indexed by the result of bvecSet.add.
+func (m *bvecSet) extractUnique() []bitvec.BitVec {
+ return m.uniq
+}
+
+func hashbitmap(h uint32, bv bitvec.BitVec) uint32 {
+ n := int((bv.N + 31) / 32)
+ for i := 0; i < n; i++ {
+ w := bv.B[i]
+ h = (h * hp) ^ (w & 0xff)
+ h = (h * hp) ^ ((w >> 8) & 0xff)
+ h = (h * hp) ^ ((w >> 16) & 0xff)
+ h = (h * hp) ^ ((w >> 24) & 0xff)
+ }
+
+ return h
+}
diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go
new file mode 100644
index 0000000..e4dbfa9
--- /dev/null
+++ b/src/cmd/compile/internal/liveness/plive.go
@@ -0,0 +1,1548 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Garbage collector liveness bitmap generation.
+
+// The command line flag -live causes this code to print debug information.
+// The levels are:
+//
+// -live (aka -live=1): print liveness lists as code warnings at safe points
+// -live=2: print an assembly listing with liveness annotations
+//
+// Each level includes the earlier output as well.
+
+package liveness
+
+import (
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/typebits"
+ "cmd/compile/internal/types"
+ "cmd/internal/notsha256"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+
+ rtabi "internal/abi"
+)
+
+// OpVarDef is an annotation for the liveness analysis, marking a place
+// where a complete initialization (definition) of a variable begins.
+// Since the liveness analysis can see initialization of single-word
+// variables quite easy, OpVarDef is only needed for multi-word
+// variables satisfying isfat(n.Type). For simplicity though, buildssa
+// emits OpVarDef regardless of variable width.
+//
+// An 'OpVarDef x' annotation in the instruction stream tells the liveness
+// analysis to behave as though the variable x is being initialized at that
+// point in the instruction stream. The OpVarDef must appear before the
+// actual (multi-instruction) initialization, and it must also appear after
+// any uses of the previous value, if any. For example, if compiling:
+//
+// x = x[1:]
+//
+// it is important to generate code like:
+//
+// base, len, cap = pieces of x[1:]
+// OpVarDef x
+// x = {base, len, cap}
+//
+// If instead the generated code looked like:
+//
+// OpVarDef x
+// base, len, cap = pieces of x[1:]
+// x = {base, len, cap}
+//
+// then the liveness analysis would decide the previous value of x was
+// unnecessary even though it is about to be used by the x[1:] computation.
+// Similarly, if the generated code looked like:
+//
+// base, len, cap = pieces of x[1:]
+// x = {base, len, cap}
+// OpVarDef x
+//
+// then the liveness analysis will not preserve the new value of x, because
+// the OpVarDef appears to have "overwritten" it.
+//
+// OpVarDef is a bit of a kludge to work around the fact that the instruction
+// stream is working on single-word values but the liveness analysis
+// wants to work on individual variables, which might be multi-word
+// aggregates. It might make sense at some point to look into letting
+// the liveness analysis work on single-word values as well, although
+// there are complications around interface values, slices, and strings,
+// all of which cannot be treated as individual words.
+
+// blockEffects summarizes the liveness effects on an SSA block.
+type blockEffects struct {
+ // Computed during Liveness.prologue using only the content of
+ // individual blocks:
+ //
+ // uevar: upward exposed variables (used before set in block)
+ // varkill: killed variables (set in block)
+ uevar bitvec.BitVec
+ varkill bitvec.BitVec
+
+ // Computed during Liveness.solve using control flow information:
+ //
+ // livein: variables live at block entry
+ // liveout: variables live at block exit
+ livein bitvec.BitVec
+ liveout bitvec.BitVec
+}
+
+// A collection of global state used by liveness analysis.
+type liveness struct {
+ fn *ir.Func
+ f *ssa.Func
+ vars []*ir.Name
+ idx map[*ir.Name]int32
+ stkptrsize int64
+
+ be []blockEffects
+
+ // allUnsafe indicates that all points in this function are
+ // unsafe-points.
+ allUnsafe bool
+ // unsafePoints bit i is set if Value ID i is an unsafe-point
+ // (preemption is not allowed). Only valid if !allUnsafe.
+ unsafePoints bitvec.BitVec
+ // unsafeBlocks bit i is set if Block ID i is an unsafe-point
+ // (preemption is not allowed on any end-of-block
+ // instructions). Only valid if !allUnsafe.
+ unsafeBlocks bitvec.BitVec
+
+ // An array with a bit vector for each safe point in the
+ // current Block during liveness.epilogue. Indexed in Value
+ // order for that block. Additionally, for the entry block
+ // livevars[0] is the entry bitmap. liveness.compact moves
+ // these to stackMaps.
+ livevars []bitvec.BitVec
+
+ // livenessMap maps from safe points (i.e., CALLs) to their
+ // liveness map indexes.
+ livenessMap Map
+ stackMapSet bvecSet
+ stackMaps []bitvec.BitVec
+
+ cache progeffectscache
+
+ // partLiveArgs includes input arguments (PPARAM) that may
+ // be partially live. That is, it is considered live because
+ // a part of it is used, but we may not initialize all parts.
+ partLiveArgs map[*ir.Name]bool
+
+ doClobber bool // Whether to clobber dead stack slots in this function.
+ noClobberArgs bool // Do not clobber function arguments
+}
+
+// Map maps from *ssa.Value to StackMapIndex.
+// Also keeps track of unsafe ssa.Values and ssa.Blocks.
+// (unsafe = can't be interrupted during GC.)
+type Map struct {
+ Vals map[ssa.ID]objw.StackMapIndex
+ UnsafeVals map[ssa.ID]bool
+ UnsafeBlocks map[ssa.ID]bool
+ // The set of live, pointer-containing variables at the DeferReturn
+ // call (only set when open-coded defers are used).
+ DeferReturn objw.StackMapIndex
+}
+
+func (m *Map) reset() {
+ if m.Vals == nil {
+ m.Vals = make(map[ssa.ID]objw.StackMapIndex)
+ m.UnsafeVals = make(map[ssa.ID]bool)
+ m.UnsafeBlocks = make(map[ssa.ID]bool)
+ } else {
+ for k := range m.Vals {
+ delete(m.Vals, k)
+ }
+ for k := range m.UnsafeVals {
+ delete(m.UnsafeVals, k)
+ }
+ for k := range m.UnsafeBlocks {
+ delete(m.UnsafeBlocks, k)
+ }
+ }
+ m.DeferReturn = objw.StackMapDontCare
+}
+
+func (m *Map) set(v *ssa.Value, i objw.StackMapIndex) {
+ m.Vals[v.ID] = i
+}
+func (m *Map) setUnsafeVal(v *ssa.Value) {
+ m.UnsafeVals[v.ID] = true
+}
+func (m *Map) setUnsafeBlock(b *ssa.Block) {
+ m.UnsafeBlocks[b.ID] = true
+}
+
+func (m Map) Get(v *ssa.Value) objw.StackMapIndex {
+ // If v isn't in the map, then it's a "don't care".
+ if idx, ok := m.Vals[v.ID]; ok {
+ return idx
+ }
+ return objw.StackMapDontCare
+}
+func (m Map) GetUnsafe(v *ssa.Value) bool {
+ // default is safe
+ return m.UnsafeVals[v.ID]
+}
+func (m Map) GetUnsafeBlock(b *ssa.Block) bool {
+ // default is safe
+ return m.UnsafeBlocks[b.ID]
+}
+
+type progeffectscache struct {
+ retuevar []int32
+ tailuevar []int32
+ initialized bool
+}
+
+// shouldTrack reports whether the liveness analysis
+// should track the variable n.
+// We don't care about variables that have no pointers,
+// nor do we care about non-local variables,
+// nor do we care about empty structs (handled by the pointer check),
+// nor do we care about the fake PAUTOHEAP variables.
+func shouldTrack(n *ir.Name) bool {
+ return (n.Class == ir.PAUTO && n.Esc() != ir.EscHeap || n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT) && n.Type().HasPointers()
+}
+
+// getvariables returns the list of on-stack variables that we need to track
+// and a map for looking up indices by *Node.
+func getvariables(fn *ir.Func) ([]*ir.Name, map[*ir.Name]int32) {
+ var vars []*ir.Name
+ for _, n := range fn.Dcl {
+ if shouldTrack(n) {
+ vars = append(vars, n)
+ }
+ }
+ idx := make(map[*ir.Name]int32, len(vars))
+ for i, n := range vars {
+ idx[n] = int32(i)
+ }
+ return vars, idx
+}
+
+func (lv *liveness) initcache() {
+ if lv.cache.initialized {
+ base.Fatalf("liveness cache initialized twice")
+ return
+ }
+ lv.cache.initialized = true
+
+ for i, node := range lv.vars {
+ switch node.Class {
+ case ir.PPARAM:
+ // A return instruction with a p.to is a tail return, which brings
+ // the stack pointer back up (if it ever went down) and then jumps
+ // to a new function entirely. That form of instruction must read
+ // all the parameters for correctness, and similarly it must not
+ // read the out arguments - they won't be set until the new
+ // function runs.
+ lv.cache.tailuevar = append(lv.cache.tailuevar, int32(i))
+
+ case ir.PPARAMOUT:
+ // All results are live at every return point.
+ // Note that this point is after escaping return values
+ // are copied back to the stack using their PAUTOHEAP references.
+ lv.cache.retuevar = append(lv.cache.retuevar, int32(i))
+ }
+ }
+}
+
+// A liveEffect is a set of flags that describe an instruction's
+// liveness effects on a variable.
+//
+// The possible flags are:
+//
+// uevar - used by the instruction
+// varkill - killed by the instruction (set)
+//
+// A kill happens after the use (for an instruction that updates a value, for example).
+type liveEffect int
+
+const (
+ uevar liveEffect = 1 << iota
+ varkill
+)
+
+// valueEffects returns the index of a variable in lv.vars and the
+// liveness effects v has on that variable.
+// If v does not affect any tracked variables, it returns -1, 0.
+func (lv *liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
+ n, e := affectedVar(v)
+ if e == 0 || n == nil { // cheapest checks first
+ return -1, 0
+ }
+ // AllocFrame has dropped unused variables from
+ // lv.fn.Func.Dcl, but they might still be referenced by
+ // OpVarFoo pseudo-ops. Ignore them to prevent "lost track of
+ // variable" ICEs (issue 19632).
+ switch v.Op {
+ case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive:
+ if !n.Used() {
+ return -1, 0
+ }
+ }
+
+ if n.Class == ir.PPARAM && !n.Addrtaken() && n.Type().Size() > int64(types.PtrSize) {
+ // Only aggregate-typed arguments that are not address-taken can be
+ // partially live.
+ lv.partLiveArgs[n] = true
+ }
+
+ var effect liveEffect
+ // Read is a read, obviously.
+ //
+ // Addr is a read also, as any subsequent holder of the pointer must be able
+ // to see all the values (including initialization) written so far.
+ // This also prevents a variable from "coming back from the dead" and presenting
+ // stale pointers to the garbage collector. See issue 28445.
+ if e&(ssa.SymRead|ssa.SymAddr) != 0 {
+ effect |= uevar
+ }
+ if e&ssa.SymWrite != 0 && (!isfat(n.Type()) || v.Op == ssa.OpVarDef) {
+ effect |= varkill
+ }
+
+ if effect == 0 {
+ return -1, 0
+ }
+
+ if pos, ok := lv.idx[n]; ok {
+ return pos, effect
+ }
+ return -1, 0
+}
+
+// affectedVar returns the *ir.Name node affected by v.
+func affectedVar(v *ssa.Value) (*ir.Name, ssa.SymEffect) {
+ // Special cases.
+ switch v.Op {
+ case ssa.OpLoadReg:
+ n, _ := ssa.AutoVar(v.Args[0])
+ return n, ssa.SymRead
+ case ssa.OpStoreReg:
+ n, _ := ssa.AutoVar(v)
+ return n, ssa.SymWrite
+
+ case ssa.OpArgIntReg:
+ // This forces the spill slot for the register to be live at function entry.
+ // one of the following holds for a function F with pointer-valued register arg X:
+ // 0. No GC (so an uninitialized spill slot is okay)
+ // 1. GC at entry of F. GC is precise, but the spills around morestack initialize X's spill slot
+ // 2. Stack growth at entry of F. Same as GC.
+ // 3. GC occurs within F itself. This has to be from preemption, and thus GC is conservative.
+ // a. X is in a register -- then X is seen, and the spill slot is also scanned conservatively.
+ // b. X is spilled -- the spill slot is initialized, and scanned conservatively
+ // c. X is not live -- the spill slot is scanned conservatively, and it may contain X from an earlier spill.
+ // 4. GC within G, transitively called from F
+ // a. X is live at call site, therefore is spilled, to its spill slot (which is live because of subsequent LoadReg).
+ // b. X is not live at call site -- but neither is its spill slot.
+ n, _ := ssa.AutoVar(v)
+ return n, ssa.SymRead
+
+ case ssa.OpVarLive:
+ return v.Aux.(*ir.Name), ssa.SymRead
+ case ssa.OpVarDef:
+ return v.Aux.(*ir.Name), ssa.SymWrite
+ case ssa.OpKeepAlive:
+ n, _ := ssa.AutoVar(v.Args[0])
+ return n, ssa.SymRead
+ }
+
+ e := v.Op.SymEffect()
+ if e == 0 {
+ return nil, 0
+ }
+
+ switch a := v.Aux.(type) {
+ case nil, *obj.LSym:
+ // ok, but no node
+ return nil, e
+ case *ir.Name:
+ return a, e
+ default:
+ base.Fatalf("weird aux: %s", v.LongString())
+ return nil, e
+ }
+}
+
+type livenessFuncCache struct {
+ be []blockEffects
+ livenessMap Map
+}
+
+// Constructs a new liveness structure used to hold the global state of the
+// liveness computation. The cfg argument is a slice of *BasicBlocks and the
+// vars argument is a slice of *Nodes.
+func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int32, stkptrsize int64) *liveness {
+ lv := &liveness{
+ fn: fn,
+ f: f,
+ vars: vars,
+ idx: idx,
+ stkptrsize: stkptrsize,
+ }
+
+ // Significant sources of allocation are kept in the ssa.Cache
+ // and reused. Surprisingly, the bit vectors themselves aren't
+ // a major source of allocation, but the liveness maps are.
+ if lc, _ := f.Cache.Liveness.(*livenessFuncCache); lc == nil {
+ // Prep the cache so liveness can fill it later.
+ f.Cache.Liveness = new(livenessFuncCache)
+ } else {
+ if cap(lc.be) >= f.NumBlocks() {
+ lv.be = lc.be[:f.NumBlocks()]
+ }
+ lv.livenessMap = Map{
+ Vals: lc.livenessMap.Vals,
+ UnsafeVals: lc.livenessMap.UnsafeVals,
+ UnsafeBlocks: lc.livenessMap.UnsafeBlocks,
+ DeferReturn: objw.StackMapDontCare,
+ }
+ lc.livenessMap.Vals = nil
+ lc.livenessMap.UnsafeVals = nil
+ lc.livenessMap.UnsafeBlocks = nil
+ }
+ if lv.be == nil {
+ lv.be = make([]blockEffects, f.NumBlocks())
+ }
+
+ nblocks := int32(len(f.Blocks))
+ nvars := int32(len(vars))
+ bulk := bitvec.NewBulk(nvars, nblocks*7)
+ for _, b := range f.Blocks {
+ be := lv.blockEffects(b)
+
+ be.uevar = bulk.Next()
+ be.varkill = bulk.Next()
+ be.livein = bulk.Next()
+ be.liveout = bulk.Next()
+ }
+ lv.livenessMap.reset()
+
+ lv.markUnsafePoints()
+
+ lv.partLiveArgs = make(map[*ir.Name]bool)
+
+ lv.enableClobber()
+
+ return lv
+}
+
+func (lv *liveness) blockEffects(b *ssa.Block) *blockEffects {
+ return &lv.be[b.ID]
+}
+
+// Generates live pointer value maps for arguments and local variables. The
+// this argument and the in arguments are always assumed live. The vars
+// argument is a slice of *Nodes.
+func (lv *liveness) pointerMap(liveout bitvec.BitVec, vars []*ir.Name, args, locals bitvec.BitVec) {
+ for i := int32(0); ; i++ {
+ i = liveout.Next(i)
+ if i < 0 {
+ break
+ }
+ node := vars[i]
+ switch node.Class {
+ case ir.PPARAM, ir.PPARAMOUT:
+ if !node.IsOutputParamInRegisters() {
+ if node.FrameOffset() < 0 {
+ lv.f.Fatalf("Node %v has frameoffset %d\n", node.Sym().Name, node.FrameOffset())
+ }
+ typebits.SetNoCheck(node.Type(), node.FrameOffset(), args)
+ break
+ }
+ fallthrough // PPARAMOUT in registers acts memory-allocates like an AUTO
+ case ir.PAUTO:
+ typebits.Set(node.Type(), node.FrameOffset()+lv.stkptrsize, locals)
+ }
+ }
+}
+
+// IsUnsafe indicates that all points in this function are
+// unsafe-points.
+func IsUnsafe(f *ssa.Func) bool {
+ // The runtime assumes the only safe-points are function
+ // prologues (because that's how it used to be). We could and
+ // should improve that, but for now keep consider all points
+ // in the runtime unsafe. obj will add prologues and their
+ // safe-points.
+ //
+ // go:nosplit functions are similar. Since safe points used to
+ // be coupled with stack checks, go:nosplit often actually
+ // means "no safe points in this function".
+ return base.Flag.CompilingRuntime || f.NoSplit
+}
+
+// markUnsafePoints finds unsafe points and computes lv.unsafePoints.
+func (lv *liveness) markUnsafePoints() {
+ if IsUnsafe(lv.f) {
+ // No complex analysis necessary.
+ lv.allUnsafe = true
+ return
+ }
+
+ lv.unsafePoints = bitvec.New(int32(lv.f.NumValues()))
+ lv.unsafeBlocks = bitvec.New(int32(lv.f.NumBlocks()))
+
+ // Mark architecture-specific unsafe points.
+ for _, b := range lv.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op.UnsafePoint() {
+ lv.unsafePoints.Set(int32(v.ID))
+ }
+ }
+ }
+
+ for _, b := range lv.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != ssa.OpWBend {
+ continue
+ }
+ // WBend appears at the start of a block, like this:
+ // ...
+ // if wbEnabled: goto C else D
+ // C:
+ // ... some write barrier enabled code ...
+ // goto B
+ // D:
+ // ... some write barrier disabled code ...
+ // goto B
+ // B:
+ // m1 = Phi mem_C mem_D
+ // m2 = store operation ... m1
+ // m3 = store operation ... m2
+ // m4 = WBend m3
+
+ // Find first memory op in the block, which should be a Phi.
+ m := v
+ for {
+ m = m.MemoryArg()
+ if m.Block != b {
+ lv.f.Fatalf("can't find Phi before write barrier end mark %v", v)
+ }
+ if m.Op == ssa.OpPhi {
+ break
+ }
+ }
+ // Find the two predecessor blocks (write barrier on and write barrier off)
+ if len(m.Args) != 2 {
+ lv.f.Fatalf("phi before write barrier end mark has %d args, want 2", len(m.Args))
+ }
+ c := b.Preds[0].Block()
+ d := b.Preds[1].Block()
+
+ // Find their common predecessor block (the one that branches based on wb on/off).
+ // It might be a diamond pattern, or one of the blocks in the diamond pattern might
+ // be missing.
+ var decisionBlock *ssa.Block
+ if len(c.Preds) == 1 && c.Preds[0].Block() == d {
+ decisionBlock = d
+ } else if len(d.Preds) == 1 && d.Preds[0].Block() == c {
+ decisionBlock = c
+ } else if len(c.Preds) == 1 && len(d.Preds) == 1 && c.Preds[0].Block() == d.Preds[0].Block() {
+ decisionBlock = c.Preds[0].Block()
+ } else {
+ lv.f.Fatalf("can't find write barrier pattern %v", v)
+ }
+ if len(decisionBlock.Succs) != 2 {
+ lv.f.Fatalf("common predecessor block the wrong type %s", decisionBlock.Kind)
+ }
+
+ // Flow backwards from the control value to find the
+ // flag load. We don't know what lowered ops we're
+ // looking for, but all current arches produce a
+ // single op that does the memory load from the flag
+ // address, so we look for that.
+ var load *ssa.Value
+ v := decisionBlock.Controls[0]
+ for {
+ if v.MemoryArg() != nil {
+ // Single instruction to load (and maybe compare) the write barrier flag.
+ if sym, ok := v.Aux.(*obj.LSym); ok && sym == ir.Syms.WriteBarrier {
+ load = v
+ break
+ }
+ // Some architectures have to materialize the address separate from
+ // the load.
+ if sym, ok := v.Args[0].Aux.(*obj.LSym); ok && sym == ir.Syms.WriteBarrier {
+ load = v
+ break
+ }
+ v.Fatalf("load of write barrier flag not from correct global: %s", v.LongString())
+ }
+ // Common case: just flow backwards.
+ if len(v.Args) == 1 || len(v.Args) == 2 && v.Args[0] == v.Args[1] {
+ // Note: 386 lowers Neq32 to (TESTL cond cond),
+ v = v.Args[0]
+ continue
+ }
+ v.Fatalf("write barrier control value has more than one argument: %s", v.LongString())
+ }
+
+ // Mark everything after the load unsafe.
+ found := false
+ for _, v := range decisionBlock.Values {
+ if found {
+ lv.unsafePoints.Set(int32(v.ID))
+ }
+ found = found || v == load
+ }
+ lv.unsafeBlocks.Set(int32(decisionBlock.ID))
+
+ // Mark the write barrier on/off blocks as unsafe.
+ for _, e := range decisionBlock.Succs {
+ x := e.Block()
+ if x == b {
+ continue
+ }
+ for _, v := range x.Values {
+ lv.unsafePoints.Set(int32(v.ID))
+ }
+ lv.unsafeBlocks.Set(int32(x.ID))
+ }
+
+ // Mark from the join point up to the WBend as unsafe.
+ for _, v := range b.Values {
+ if v.Op == ssa.OpWBend {
+ break
+ }
+ lv.unsafePoints.Set(int32(v.ID))
+ }
+ }
+ }
+}
+
+// Returns true for instructions that must have a stack map.
+//
+// This does not necessarily mean the instruction is a safe-point. In
+// particular, call Values can have a stack map in case the callee
+// grows the stack, but not themselves be a safe-point.
+func (lv *liveness) hasStackMap(v *ssa.Value) bool {
+ if !v.Op.IsCall() {
+ return false
+ }
+ // wbZero and wbCopy are write barriers and
+ // deeply non-preemptible. They are unsafe points and
+ // hence should not have liveness maps.
+ if sym, ok := v.Aux.(*ssa.AuxCall); ok && (sym.Fn == ir.Syms.WBZero || sym.Fn == ir.Syms.WBMove) {
+ return false
+ }
+ return true
+}
+
+// Initializes the sets for solving the live variables. Visits all the
+// instructions in each basic block to summarizes the information at each basic
+// block
+func (lv *liveness) prologue() {
+ lv.initcache()
+
+ for _, b := range lv.f.Blocks {
+ be := lv.blockEffects(b)
+
+ // Walk the block instructions backward and update the block
+ // effects with the each prog effects.
+ for j := len(b.Values) - 1; j >= 0; j-- {
+ pos, e := lv.valueEffects(b.Values[j])
+ if e&varkill != 0 {
+ be.varkill.Set(pos)
+ be.uevar.Unset(pos)
+ }
+ if e&uevar != 0 {
+ be.uevar.Set(pos)
+ }
+ }
+ }
+}
+
+// Solve the liveness dataflow equations.
+func (lv *liveness) solve() {
+ // These temporary bitvectors exist to avoid successive allocations and
+ // frees within the loop.
+ nvars := int32(len(lv.vars))
+ newlivein := bitvec.New(nvars)
+ newliveout := bitvec.New(nvars)
+
+ // Walk blocks in postorder ordering. This improves convergence.
+ po := lv.f.Postorder()
+
+ // Iterate through the blocks in reverse round-robin fashion. A work
+ // queue might be slightly faster. As is, the number of iterations is
+ // so low that it hardly seems to be worth the complexity.
+
+ for change := true; change; {
+ change = false
+ for _, b := range po {
+ be := lv.blockEffects(b)
+
+ newliveout.Clear()
+ switch b.Kind {
+ case ssa.BlockRet:
+ for _, pos := range lv.cache.retuevar {
+ newliveout.Set(pos)
+ }
+ case ssa.BlockRetJmp:
+ for _, pos := range lv.cache.tailuevar {
+ newliveout.Set(pos)
+ }
+ case ssa.BlockExit:
+ // panic exit - nothing to do
+ default:
+ // A variable is live on output from this block
+ // if it is live on input to some successor.
+ //
+ // out[b] = \bigcup_{s \in succ[b]} in[s]
+ newliveout.Copy(lv.blockEffects(b.Succs[0].Block()).livein)
+ for _, succ := range b.Succs[1:] {
+ newliveout.Or(newliveout, lv.blockEffects(succ.Block()).livein)
+ }
+ }
+
+ if !be.liveout.Eq(newliveout) {
+ change = true
+ be.liveout.Copy(newliveout)
+ }
+
+ // A variable is live on input to this block
+ // if it is used by this block, or live on output from this block and
+ // not set by the code in this block.
+ //
+ // in[b] = uevar[b] \cup (out[b] \setminus varkill[b])
+ newlivein.AndNot(be.liveout, be.varkill)
+ be.livein.Or(newlivein, be.uevar)
+ }
+ }
+}
+
+// Visits all instructions in a basic block and computes a bit vector of live
+// variables at each safe point locations.
+func (lv *liveness) epilogue() {
+ nvars := int32(len(lv.vars))
+ liveout := bitvec.New(nvars)
+ livedefer := bitvec.New(nvars) // always-live variables
+
+ // If there is a defer (that could recover), then all output
+ // parameters are live all the time. In addition, any locals
+ // that are pointers to heap-allocated output parameters are
+ // also always live (post-deferreturn code needs these
+ // pointers to copy values back to the stack).
+ // TODO: if the output parameter is heap-allocated, then we
+ // don't need to keep the stack copy live?
+ if lv.fn.HasDefer() {
+ for i, n := range lv.vars {
+ if n.Class == ir.PPARAMOUT {
+ if n.IsOutputParamHeapAddr() {
+ // Just to be paranoid. Heap addresses are PAUTOs.
+ base.Fatalf("variable %v both output param and heap output param", n)
+ }
+ if n.Heapaddr != nil {
+ // If this variable moved to the heap, then
+ // its stack copy is not live.
+ continue
+ }
+ // Note: zeroing is handled by zeroResults in walk.go.
+ livedefer.Set(int32(i))
+ }
+ if n.IsOutputParamHeapAddr() {
+ // This variable will be overwritten early in the function
+ // prologue (from the result of a mallocgc) but we need to
+ // zero it in case that malloc causes a stack scan.
+ n.SetNeedzero(true)
+ livedefer.Set(int32(i))
+ }
+ if n.OpenDeferSlot() {
+ // Open-coded defer args slots must be live
+ // everywhere in a function, since a panic can
+ // occur (almost) anywhere. Because it is live
+ // everywhere, it must be zeroed on entry.
+ livedefer.Set(int32(i))
+ // It was already marked as Needzero when created.
+ if !n.Needzero() {
+ base.Fatalf("all pointer-containing defer arg slots should have Needzero set")
+ }
+ }
+ }
+ }
+
+ // We must analyze the entry block first. The runtime assumes
+ // the function entry map is index 0. Conveniently, layout
+ // already ensured that the entry block is first.
+ if lv.f.Entry != lv.f.Blocks[0] {
+ lv.f.Fatalf("entry block must be first")
+ }
+
+ {
+ // Reserve an entry for function entry.
+ live := bitvec.New(nvars)
+ lv.livevars = append(lv.livevars, live)
+ }
+
+ for _, b := range lv.f.Blocks {
+ be := lv.blockEffects(b)
+
+ // Walk forward through the basic block instructions and
+ // allocate liveness maps for those instructions that need them.
+ for _, v := range b.Values {
+ if !lv.hasStackMap(v) {
+ continue
+ }
+
+ live := bitvec.New(nvars)
+ lv.livevars = append(lv.livevars, live)
+ }
+
+ // walk backward, construct maps at each safe point
+ index := int32(len(lv.livevars) - 1)
+
+ liveout.Copy(be.liveout)
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+
+ if lv.hasStackMap(v) {
+ // Found an interesting instruction, record the
+ // corresponding liveness information.
+
+ live := &lv.livevars[index]
+ live.Or(*live, liveout)
+ live.Or(*live, livedefer) // only for non-entry safe points
+ index--
+ }
+
+ // Update liveness information.
+ pos, e := lv.valueEffects(v)
+ if e&varkill != 0 {
+ liveout.Unset(pos)
+ }
+ if e&uevar != 0 {
+ liveout.Set(pos)
+ }
+ }
+
+ if b == lv.f.Entry {
+ if index != 0 {
+ base.Fatalf("bad index for entry point: %v", index)
+ }
+
+ // Check to make sure only input variables are live.
+ for i, n := range lv.vars {
+ if !liveout.Get(int32(i)) {
+ continue
+ }
+ if n.Class == ir.PPARAM {
+ continue // ok
+ }
+ base.FatalfAt(n.Pos(), "bad live variable at entry of %v: %L", lv.fn.Nname, n)
+ }
+
+ // Record live variables.
+ live := &lv.livevars[index]
+ live.Or(*live, liveout)
+ }
+
+ if lv.doClobber {
+ lv.clobber(b)
+ }
+
+ // The liveness maps for this block are now complete. Compact them.
+ lv.compact(b)
+ }
+
+ // If we have an open-coded deferreturn call, make a liveness map for it.
+ if lv.fn.OpenCodedDeferDisallowed() {
+ lv.livenessMap.DeferReturn = objw.StackMapDontCare
+ } else {
+ idx, _ := lv.stackMapSet.add(livedefer)
+ lv.livenessMap.DeferReturn = objw.StackMapIndex(idx)
+ }
+
+ // Done compacting. Throw out the stack map set.
+ lv.stackMaps = lv.stackMapSet.extractUnique()
+ lv.stackMapSet = bvecSet{}
+
+ // Useful sanity check: on entry to the function,
+ // the only things that can possibly be live are the
+ // input parameters.
+ for j, n := range lv.vars {
+ if n.Class != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) {
+ lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Nname, n)
+ }
+ }
+}
+
+// Compact coalesces identical bitmaps from lv.livevars into the sets
+// lv.stackMapSet.
+//
+// Compact clears lv.livevars.
+//
+// There are actually two lists of bitmaps, one list for the local variables and one
+// list for the function arguments. Both lists are indexed by the same PCDATA
+// index, so the corresponding pairs must be considered together when
+// merging duplicates. The argument bitmaps change much less often during
+// function execution than the local variable bitmaps, so it is possible that
+// we could introduce a separate PCDATA index for arguments vs locals and
+// then compact the set of argument bitmaps separately from the set of
+// local variable bitmaps. As of 2014-04-02, doing this to the godoc binary
+// is actually a net loss: we save about 50k of argument bitmaps but the new
+// PCDATA tables cost about 100k. So for now we keep using a single index for
+// both bitmap lists.
+func (lv *liveness) compact(b *ssa.Block) {
+ pos := 0
+ if b == lv.f.Entry {
+ // Handle entry stack map.
+ lv.stackMapSet.add(lv.livevars[0])
+ pos++
+ }
+ for _, v := range b.Values {
+ if lv.hasStackMap(v) {
+ idx, _ := lv.stackMapSet.add(lv.livevars[pos])
+ pos++
+ lv.livenessMap.set(v, objw.StackMapIndex(idx))
+ }
+ if lv.allUnsafe || v.Op != ssa.OpClobber && lv.unsafePoints.Get(int32(v.ID)) {
+ lv.livenessMap.setUnsafeVal(v)
+ }
+ }
+ if lv.allUnsafe || lv.unsafeBlocks.Get(int32(b.ID)) {
+ lv.livenessMap.setUnsafeBlock(b)
+ }
+
+ // Reset livevars.
+ lv.livevars = lv.livevars[:0]
+}
+
+func (lv *liveness) enableClobber() {
+ // The clobberdead experiment inserts code to clobber pointer slots in all
+ // the dead variables (locals and args) at every synchronous safepoint.
+ if !base.Flag.ClobberDead {
+ return
+ }
+ if lv.fn.Pragma&ir.CgoUnsafeArgs != 0 {
+ // C or assembly code uses the exact frame layout. Don't clobber.
+ return
+ }
+ if len(lv.vars) > 10000 || len(lv.f.Blocks) > 10000 {
+ // Be careful to avoid doing too much work.
+ // Bail if >10000 variables or >10000 blocks.
+ // Otherwise, giant functions make this experiment generate too much code.
+ return
+ }
+ if lv.f.Name == "forkAndExecInChild" {
+ // forkAndExecInChild calls vfork on some platforms.
+ // The code we add here clobbers parts of the stack in the child.
+ // When the parent resumes, it is using the same stack frame. But the
+ // child has clobbered stack variables that the parent needs. Boom!
+ // In particular, the sys argument gets clobbered.
+ return
+ }
+ if lv.f.Name == "wbBufFlush" ||
+ ((lv.f.Name == "callReflect" || lv.f.Name == "callMethod") && lv.fn.ABIWrapper()) {
+ // runtime.wbBufFlush must not modify its arguments. See the comments
+ // in runtime/mwbbuf.go:wbBufFlush.
+ //
+ // reflect.callReflect and reflect.callMethod are called from special
+ // functions makeFuncStub and methodValueCall. The runtime expects
+ // that it can find the first argument (ctxt) at 0(SP) in makeFuncStub
+ // and methodValueCall's frame (see runtime/traceback.go:getArgInfo).
+ // Normally callReflect and callMethod already do not modify the
+ // argument, and keep it alive. But the compiler-generated ABI wrappers
+ // don't do that. Special case the wrappers to not clobber its arguments.
+ lv.noClobberArgs = true
+ }
+ if h := os.Getenv("GOCLOBBERDEADHASH"); h != "" {
+ // Clobber only functions where the hash of the function name matches a pattern.
+ // Useful for binary searching for a miscompiled function.
+ hstr := ""
+ for _, b := range notsha256.Sum256([]byte(lv.f.Name)) {
+ hstr += fmt.Sprintf("%08b", b)
+ }
+ if !strings.HasSuffix(hstr, h) {
+ return
+ }
+ fmt.Printf("\t\t\tCLOBBERDEAD %s\n", lv.f.Name)
+ }
+ lv.doClobber = true
+}
+
+// Inserts code to clobber pointer slots in all the dead variables (locals and args)
+// at every synchronous safepoint in b.
+func (lv *liveness) clobber(b *ssa.Block) {
+ // Copy block's values to a temporary.
+ oldSched := append([]*ssa.Value{}, b.Values...)
+ b.Values = b.Values[:0]
+ idx := 0
+
+ // Clobber pointer slots in all dead variables at entry.
+ if b == lv.f.Entry {
+ for len(oldSched) > 0 && len(oldSched[0].Args) == 0 {
+ // Skip argless ops. We need to skip at least
+ // the lowered ClosurePtr op, because it
+ // really wants to be first. This will also
+ // skip ops like InitMem and SP, which are ok.
+ b.Values = append(b.Values, oldSched[0])
+ oldSched = oldSched[1:]
+ }
+ clobber(lv, b, lv.livevars[0])
+ idx++
+ }
+
+ // Copy values into schedule, adding clobbering around safepoints.
+ for _, v := range oldSched {
+ if !lv.hasStackMap(v) {
+ b.Values = append(b.Values, v)
+ continue
+ }
+ clobber(lv, b, lv.livevars[idx])
+ b.Values = append(b.Values, v)
+ idx++
+ }
+}
+
+// clobber generates code to clobber pointer slots in all dead variables
+// (those not marked in live). Clobbering instructions are added to the end
+// of b.Values.
+func clobber(lv *liveness, b *ssa.Block, live bitvec.BitVec) {
+ for i, n := range lv.vars {
+ if !live.Get(int32(i)) && !n.Addrtaken() && !n.OpenDeferSlot() && !n.IsOutputParamHeapAddr() {
+ // Don't clobber stack objects (address-taken). They are
+ // tracked dynamically.
+ // Also don't clobber slots that are live for defers (see
+ // the code setting livedefer in epilogue).
+ if lv.noClobberArgs && n.Class == ir.PPARAM {
+ continue
+ }
+ clobberVar(b, n)
+ }
+ }
+}
+
+// clobberVar generates code to trash the pointers in v.
+// Clobbering instructions are added to the end of b.Values.
+func clobberVar(b *ssa.Block, v *ir.Name) {
+ clobberWalk(b, v, 0, v.Type())
+}
+
+// b = block to which we append instructions
+// v = variable
+// offset = offset of (sub-portion of) variable to clobber (in bytes)
+// t = type of sub-portion of v.
+func clobberWalk(b *ssa.Block, v *ir.Name, offset int64, t *types.Type) {
+ if !t.HasPointers() {
+ return
+ }
+ switch t.Kind() {
+ case types.TPTR,
+ types.TUNSAFEPTR,
+ types.TFUNC,
+ types.TCHAN,
+ types.TMAP:
+ clobberPtr(b, v, offset)
+
+ case types.TSTRING:
+ // struct { byte *str; int len; }
+ clobberPtr(b, v, offset)
+
+ case types.TINTER:
+ // struct { Itab *tab; void *data; }
+ // or, when isnilinter(t)==true:
+ // struct { Type *type; void *data; }
+ clobberPtr(b, v, offset)
+ clobberPtr(b, v, offset+int64(types.PtrSize))
+
+ case types.TSLICE:
+ // struct { byte *array; int len; int cap; }
+ clobberPtr(b, v, offset)
+
+ case types.TARRAY:
+ for i := int64(0); i < t.NumElem(); i++ {
+ clobberWalk(b, v, offset+i*t.Elem().Size(), t.Elem())
+ }
+
+ case types.TSTRUCT:
+ for _, t1 := range t.Fields() {
+ clobberWalk(b, v, offset+t1.Offset, t1.Type)
+ }
+
+ default:
+ base.Fatalf("clobberWalk: unexpected type, %v", t)
+ }
+}
+
+// clobberPtr generates a clobber of the pointer at offset offset in v.
+// The clobber instruction is added at the end of b.
+func clobberPtr(b *ssa.Block, v *ir.Name, offset int64) {
+ b.NewValue0IA(src.NoXPos, ssa.OpClobber, types.TypeVoid, offset, v)
+}
+
+func (lv *liveness) showlive(v *ssa.Value, live bitvec.BitVec) {
+ if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") {
+ return
+ }
+ if lv.fn.Wrapper() || lv.fn.Dupok() {
+ // Skip reporting liveness information for compiler-generated wrappers.
+ return
+ }
+ if !(v == nil || v.Op.IsCall()) {
+ // Historically we only printed this information at
+ // calls. Keep doing so.
+ return
+ }
+ if live.IsEmpty() {
+ return
+ }
+
+ pos := lv.fn.Nname.Pos()
+ if v != nil {
+ pos = v.Pos
+ }
+
+ s := "live at "
+ if v == nil {
+ s += fmt.Sprintf("entry to %s:", ir.FuncName(lv.fn))
+ } else if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
+ fn := sym.Fn.Name
+ if pos := strings.Index(fn, "."); pos >= 0 {
+ fn = fn[pos+1:]
+ }
+ s += fmt.Sprintf("call to %s:", fn)
+ } else {
+ s += "indirect call:"
+ }
+
+ // Sort variable names for display. Variables aren't in any particular order, and
+ // the order can change by architecture, particularly with differences in regabi.
+ var names []string
+ for j, n := range lv.vars {
+ if live.Get(int32(j)) {
+ names = append(names, n.Sym().Name)
+ }
+ }
+ sort.Strings(names)
+ for _, v := range names {
+ s += " " + v
+ }
+
+ base.WarnfAt(pos, s)
+}
+
+func (lv *liveness) printbvec(printed bool, name string, live bitvec.BitVec) bool {
+ if live.IsEmpty() {
+ return printed
+ }
+
+ if !printed {
+ fmt.Printf("\t")
+ } else {
+ fmt.Printf(" ")
+ }
+ fmt.Printf("%s=", name)
+
+ comma := ""
+ for i, n := range lv.vars {
+ if !live.Get(int32(i)) {
+ continue
+ }
+ fmt.Printf("%s%s", comma, n.Sym().Name)
+ comma = ","
+ }
+ return true
+}
+
+// printeffect is like printbvec, but for valueEffects.
+func (lv *liveness) printeffect(printed bool, name string, pos int32, x bool) bool {
+ if !x {
+ return printed
+ }
+ if !printed {
+ fmt.Printf("\t")
+ } else {
+ fmt.Printf(" ")
+ }
+ fmt.Printf("%s=", name)
+ if x {
+ fmt.Printf("%s", lv.vars[pos].Sym().Name)
+ }
+
+ return true
+}
+
+// Prints the computed liveness information and inputs, for debugging.
+// This format synthesizes the information used during the multiple passes
+// into a single presentation.
+func (lv *liveness) printDebug() {
+ fmt.Printf("liveness: %s\n", ir.FuncName(lv.fn))
+
+ for i, b := range lv.f.Blocks {
+ if i > 0 {
+ fmt.Printf("\n")
+ }
+
+ // bb#0 pred=1,2 succ=3,4
+ fmt.Printf("bb#%d pred=", b.ID)
+ for j, pred := range b.Preds {
+ if j > 0 {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%d", pred.Block().ID)
+ }
+ fmt.Printf(" succ=")
+ for j, succ := range b.Succs {
+ if j > 0 {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%d", succ.Block().ID)
+ }
+ fmt.Printf("\n")
+
+ be := lv.blockEffects(b)
+
+ // initial settings
+ printed := false
+ printed = lv.printbvec(printed, "uevar", be.uevar)
+ printed = lv.printbvec(printed, "livein", be.livein)
+ if printed {
+ fmt.Printf("\n")
+ }
+
+ // program listing, with individual effects listed
+
+ if b == lv.f.Entry {
+ live := lv.stackMaps[0]
+ fmt.Printf("(%s) function entry\n", base.FmtPos(lv.fn.Nname.Pos()))
+ fmt.Printf("\tlive=")
+ printed = false
+ for j, n := range lv.vars {
+ if !live.Get(int32(j)) {
+ continue
+ }
+ if printed {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%v", n)
+ printed = true
+ }
+ fmt.Printf("\n")
+ }
+
+ for _, v := range b.Values {
+ fmt.Printf("(%s) %v\n", base.FmtPos(v.Pos), v.LongString())
+
+ pcdata := lv.livenessMap.Get(v)
+
+ pos, effect := lv.valueEffects(v)
+ printed = false
+ printed = lv.printeffect(printed, "uevar", pos, effect&uevar != 0)
+ printed = lv.printeffect(printed, "varkill", pos, effect&varkill != 0)
+ if printed {
+ fmt.Printf("\n")
+ }
+
+ if pcdata.StackMapValid() {
+ fmt.Printf("\tlive=")
+ printed = false
+ if pcdata.StackMapValid() {
+ live := lv.stackMaps[pcdata]
+ for j, n := range lv.vars {
+ if !live.Get(int32(j)) {
+ continue
+ }
+ if printed {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%v", n)
+ printed = true
+ }
+ }
+ fmt.Printf("\n")
+ }
+
+ if lv.livenessMap.GetUnsafe(v) {
+ fmt.Printf("\tunsafe-point\n")
+ }
+ }
+ if lv.livenessMap.GetUnsafeBlock(b) {
+ fmt.Printf("\tunsafe-block\n")
+ }
+
+ // bb bitsets
+ fmt.Printf("end\n")
+ printed = false
+ printed = lv.printbvec(printed, "varkill", be.varkill)
+ printed = lv.printbvec(printed, "liveout", be.liveout)
+ if printed {
+ fmt.Printf("\n")
+ }
+ }
+
+ fmt.Printf("\n")
+}
+
+// Dumps a slice of bitmaps to a symbol as a sequence of uint32 values. The
+// first word dumped is the total number of bitmaps. The second word is the
+// length of the bitmaps. All bitmaps are assumed to be of equal length. The
+// remaining bytes are the raw bitmaps.
+func (lv *liveness) emit() (argsSym, liveSym *obj.LSym) {
+ // Size args bitmaps to be just large enough to hold the largest pointer.
+ // First, find the largest Xoffset node we care about.
+ // (Nodes without pointers aren't in lv.vars; see ShouldTrack.)
+ var maxArgNode *ir.Name
+ for _, n := range lv.vars {
+ switch n.Class {
+ case ir.PPARAM, ir.PPARAMOUT:
+ if !n.IsOutputParamInRegisters() {
+ if maxArgNode == nil || n.FrameOffset() > maxArgNode.FrameOffset() {
+ maxArgNode = n
+ }
+ }
+ }
+ }
+ // Next, find the offset of the largest pointer in the largest node.
+ var maxArgs int64
+ if maxArgNode != nil {
+ maxArgs = maxArgNode.FrameOffset() + types.PtrDataSize(maxArgNode.Type())
+ }
+
+ // Size locals bitmaps to be stkptrsize sized.
+ // We cannot shrink them to only hold the largest pointer,
+ // because their size is used to calculate the beginning
+ // of the local variables frame.
+ // Further discussion in https://golang.org/cl/104175.
+ // TODO: consider trimming leading zeros.
+ // This would require shifting all bitmaps.
+ maxLocals := lv.stkptrsize
+
+ // Temporary symbols for encoding bitmaps.
+ var argsSymTmp, liveSymTmp obj.LSym
+
+ args := bitvec.New(int32(maxArgs / int64(types.PtrSize)))
+ aoff := objw.Uint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
+ aoff = objw.Uint32(&argsSymTmp, aoff, uint32(args.N)) // number of bits in each bitmap
+
+ locals := bitvec.New(int32(maxLocals / int64(types.PtrSize)))
+ loff := objw.Uint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
+ loff = objw.Uint32(&liveSymTmp, loff, uint32(locals.N)) // number of bits in each bitmap
+
+ for _, live := range lv.stackMaps {
+ args.Clear()
+ locals.Clear()
+
+ lv.pointerMap(live, lv.vars, args, locals)
+
+ aoff = objw.BitVec(&argsSymTmp, aoff, args)
+ loff = objw.BitVec(&liveSymTmp, loff, locals)
+ }
+
+ // These symbols will be added to Ctxt.Data by addGCLocals
+ // after parallel compilation is done.
+ return base.Ctxt.GCLocalsSym(argsSymTmp.P), base.Ctxt.GCLocalsSym(liveSymTmp.P)
+}
+
+// Entry pointer for Compute analysis. Solves for the Compute of
+// pointer variables in the function and emits a runtime data
+// structure read by the garbage collector.
+// Returns a map from GC safe points to their corresponding stack map index,
+// and a map that contains all input parameters that may be partially live.
+func Compute(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) (Map, map[*ir.Name]bool) {
+ // Construct the global liveness state.
+ vars, idx := getvariables(curfn)
+ lv := newliveness(curfn, f, vars, idx, stkptrsize)
+
+ // Run the dataflow framework.
+ lv.prologue()
+ lv.solve()
+ lv.epilogue()
+ if base.Flag.Live > 0 {
+ lv.showlive(nil, lv.stackMaps[0])
+ for _, b := range f.Blocks {
+ for _, val := range b.Values {
+ if idx := lv.livenessMap.Get(val); idx.StackMapValid() {
+ lv.showlive(val, lv.stackMaps[idx])
+ }
+ }
+ }
+ }
+ if base.Flag.Live >= 2 {
+ lv.printDebug()
+ }
+
+ // Update the function cache.
+ {
+ cache := f.Cache.Liveness.(*livenessFuncCache)
+ if cap(lv.be) < 2000 { // Threshold from ssa.Cache slices.
+ for i := range lv.be {
+ lv.be[i] = blockEffects{}
+ }
+ cache.be = lv.be
+ }
+ if len(lv.livenessMap.Vals) < 2000 {
+ cache.livenessMap = lv.livenessMap
+ }
+ }
+
+ // Emit the live pointer map data structures
+ ls := curfn.LSym
+ fninfo := ls.Func()
+ fninfo.GCArgs, fninfo.GCLocals = lv.emit()
+
+ p := pp.Prog(obj.AFUNCDATA)
+ p.From.SetConst(rtabi.FUNCDATA_ArgsPointerMaps)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = fninfo.GCArgs
+
+ p = pp.Prog(obj.AFUNCDATA)
+ p.From.SetConst(rtabi.FUNCDATA_LocalsPointerMaps)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = fninfo.GCLocals
+
+ if x := lv.emitStackObjects(); x != nil {
+ p := pp.Prog(obj.AFUNCDATA)
+ p.From.SetConst(rtabi.FUNCDATA_StackObjects)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = x
+ }
+
+ return lv.livenessMap, lv.partLiveArgs
+}
+
+func (lv *liveness) emitStackObjects() *obj.LSym {
+ var vars []*ir.Name
+ for _, n := range lv.fn.Dcl {
+ if shouldTrack(n) && n.Addrtaken() && n.Esc() != ir.EscHeap {
+ vars = append(vars, n)
+ }
+ }
+ if len(vars) == 0 {
+ return nil
+ }
+
+ // Sort variables from lowest to highest address.
+ sort.Slice(vars, func(i, j int) bool { return vars[i].FrameOffset() < vars[j].FrameOffset() })
+
+ // Populate the stack object data.
+ // Format must match runtime/stack.go:stackObjectRecord.
+ x := base.Ctxt.Lookup(lv.fn.LSym.Name + ".stkobj")
+ x.Set(obj.AttrContentAddressable, true)
+ lv.fn.LSym.Func().StackObjects = x
+ off := 0
+ off = objw.Uintptr(x, off, uint64(len(vars)))
+ for _, v := range vars {
+ // Note: arguments and return values have non-negative Xoffset,
+ // in which case the offset is relative to argp.
+ // Locals have a negative Xoffset, in which case the offset is relative to varp.
+ // We already limit the frame size, so the offset and the object size
+ // should not be too big.
+ frameOffset := v.FrameOffset()
+ if frameOffset != int64(int32(frameOffset)) {
+ base.Fatalf("frame offset too big: %v %d", v, frameOffset)
+ }
+ off = objw.Uint32(x, off, uint32(frameOffset))
+
+ t := v.Type()
+ sz := t.Size()
+ if sz != int64(int32(sz)) {
+ base.Fatalf("stack object too big: %v of type %v, size %d", v, t, sz)
+ }
+ lsym, useGCProg, ptrdata := reflectdata.GCSym(t)
+ if useGCProg {
+ ptrdata = -ptrdata
+ }
+ off = objw.Uint32(x, off, uint32(sz))
+ off = objw.Uint32(x, off, uint32(ptrdata))
+ off = objw.SymPtrOff(x, off, lsym)
+ }
+
+ if base.Flag.Live != 0 {
+ for _, v := range vars {
+ base.WarnfAt(v.Pos(), "stack object %v %v", v, v.Type())
+ }
+ }
+
+ return x
+}
+
+// isfat reports whether a variable of type t needs multiple assignments to initialize.
+// For example:
+//
+// type T struct { x, y int }
+// x := T{x: 0, y: 1}
+//
+// Then we need:
+//
+// var t T
+// t.x = 0
+// t.y = 1
+//
+// to fully initialize t.
+func isfat(t *types.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case types.TSLICE, types.TSTRING,
+ types.TINTER: // maybe remove later
+ return true
+ case types.TARRAY:
+ // Array of 1 element, check if element is fat
+ if t.NumElem() == 1 {
+ return isfat(t.Elem())
+ }
+ return true
+ case types.TSTRUCT:
+ // Struct with 1 field, check if field is fat
+ if t.NumFields() == 1 {
+ return isfat(t.Field(0).Type)
+ }
+ return true
+ }
+ }
+
+ return false
+}
+
+// WriteFuncMap writes the pointer bitmaps for bodyless function fn's
+// inputs and outputs as the value of symbol <fn>.args_stackmap.
+// If fn has outputs, two bitmaps are written, otherwise just one.
+func WriteFuncMap(fn *ir.Func, abiInfo *abi.ABIParamResultInfo) {
+ if ir.FuncName(fn) == "_" || fn.Sym().Linkname != "" {
+ return
+ }
+ nptr := int(abiInfo.ArgWidth() / int64(types.PtrSize))
+ bv := bitvec.New(int32(nptr))
+
+ for _, p := range abiInfo.InParams() {
+ typebits.SetNoCheck(p.Type, p.FrameOffset(abiInfo), bv)
+ }
+
+ nbitmap := 1
+ if fn.Type().NumResults() > 0 {
+ nbitmap = 2
+ }
+ lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap")
+ off := objw.Uint32(lsym, 0, uint32(nbitmap))
+ off = objw.Uint32(lsym, off, uint32(bv.N))
+ off = objw.BitVec(lsym, off, bv)
+
+ if fn.Type().NumResults() > 0 {
+ for _, p := range abiInfo.OutParams() {
+ if len(p.Registers) == 0 {
+ typebits.SetNoCheck(p.Type, p.FrameOffset(abiInfo), bv)
+ }
+ }
+ off = objw.BitVec(lsym, off, bv)
+ }
+
+ objw.Global(lsym, int32(off), obj.RODATA|obj.LOCAL)
+}
diff --git a/src/cmd/compile/internal/logopt/log_opts.go b/src/cmd/compile/internal/logopt/log_opts.go
new file mode 100644
index 0000000..b731e55
--- /dev/null
+++ b/src/cmd/compile/internal/logopt/log_opts.go
@@ -0,0 +1,540 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logopt
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "encoding/json"
+ "fmt"
+ "internal/buildcfg"
+ "io"
+ "log"
+ "net/url"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+)
+
+// This implements (non)optimization logging for -json option to the Go compiler
+// The option is -json 0,<destination>.
+//
+// 0 is the version number; to avoid the need for synchronized updates, if
+// new versions of the logging appear, the compiler will support both, for a while,
+// and clients will specify what they need.
+//
+// <destination> is a directory.
+// Directories are specified with a leading / or os.PathSeparator,
+// or more explicitly with file://directory. The second form is intended to
+// deal with corner cases on Windows, and to allow specification of a relative
+// directory path (which is normally a bad idea, because the local directory
+// varies a lot in a build, especially with modules and/or vendoring, and may
+// not be writeable).
+//
+// For each package pkg compiled, a url.PathEscape(pkg)-named subdirectory
+// is created. For each source file.go in that package that generates
+// diagnostics (no diagnostics means no file),
+// a url.PathEscape(file)+".json"-named file is created and contains the
+// logged diagnostics.
+//
+// For example, "cmd%2Finternal%2Fdwarf/%3Cautogenerated%3E.json"
+// for "cmd/internal/dwarf" and <autogenerated> (which is not really a file, but the compiler sees it)
+//
+// If the package string is empty, it is replaced internally with string(0) which encodes to %00.
+//
+// Each log file begins with a JSON record identifying version,
+// platform, and other context, followed by optimization-relevant
+// LSP Diagnostic records, one per line (LSP version 3.15, no difference from 3.14 on the subset used here
+// see https://microsoft.github.io/language-server-protocol/specifications/specification-3-15/ )
+//
+// The fields of a Diagnostic are used in the following way:
+// Range: the outermost source position, for now begin and end are equal.
+// Severity: (always) SeverityInformation (3)
+// Source: (always) "go compiler"
+// Code: a string describing the missed optimization, e.g., "nilcheck", "cannotInline", "isInBounds", "escape"
+// Message: depending on code, additional information, e.g., the reason a function cannot be inlined.
+// RelatedInformation: if the missed optimization actually occurred at a function inlined at Range,
+// then the sequence of inlined locations appears here, from (second) outermost to innermost,
+// each with message="inlineLoc".
+//
+// In the case of escape analysis explanations, after any outer inlining locations,
+// the lines of the explanation appear, each potentially followed with its own inlining
+// location if the escape flow occurred within an inlined function.
+//
+// For example <destination>/cmd%2Fcompile%2Finternal%2Fssa/prove.json
+// might begin with the following line (wrapped for legibility):
+//
+// {"version":0,"package":"cmd/compile/internal/ssa","goos":"darwin","goarch":"amd64",
+// "gc_version":"devel +e1b9a57852 Fri Nov 1 15:07:00 2019 -0400",
+// "file":"/Users/drchase/work/go/src/cmd/compile/internal/ssa/prove.go"}
+//
+// and later contain (also wrapped for legibility):
+//
+// {"range":{"start":{"line":191,"character":24},"end":{"line":191,"character":24}},
+// "severity":3,"code":"nilcheck","source":"go compiler","message":"",
+// "relatedInformation":[
+// {"location":{"uri":"file:///Users/drchase/work/go/src/cmd/compile/internal/ssa/func.go",
+// "range":{"start":{"line":153,"character":16},"end":{"line":153,"character":16}}},
+// "message":"inlineLoc"}]}
+//
+// That is, at prove.go (implicit from context, provided in both filename and header line),
+// line 191, column 24, a nilcheck occurred in the generated code.
+// The relatedInformation indicates that this code actually came from
+// an inlined call to func.go, line 153, character 16.
+//
+// prove.go:191:
+// ft.orderS = f.newPoset()
+// func.go:152 and 153:
+// func (f *Func) newPoset() *poset {
+// if len(f.Cache.scrPoset) > 0 {
+//
+// In the case that the package is empty, the string(0) package name is also used in the header record, for example
+//
+// go tool compile -json=0,file://logopt x.go # no -p option to set the package
+// head -1 logopt/%00/x.json
+// {"version":0,"package":"\u0000","goos":"darwin","goarch":"amd64","gc_version":"devel +86487adf6a Thu Nov 7 19:34:56 2019 -0500","file":"x.go"}
+
+type VersionHeader struct {
+ Version int `json:"version"`
+ Package string `json:"package"`
+ Goos string `json:"goos"`
+ Goarch string `json:"goarch"`
+ GcVersion string `json:"gc_version"`
+ File string `json:"file,omitempty"` // LSP requires an enclosing resource, i.e., a file
+}
+
+// DocumentURI, Position, Range, Location, Diagnostic, DiagnosticRelatedInformation all reuse json definitions from gopls.
+// See https://github.com/golang/tools/blob/22afafe3322a860fcd3d88448768f9db36f8bc5f/internal/lsp/protocol/tsprotocol.go
+
+type DocumentURI string
+
+type Position struct {
+ Line uint `json:"line"` // gopls uses float64, but json output is the same for integers
+ Character uint `json:"character"` // gopls uses float64, but json output is the same for integers
+}
+
+// A Range in a text document expressed as (zero-based) start and end positions.
+// A range is comparable to a selection in an editor. Therefore the end position is exclusive.
+// If you want to specify a range that contains a line including the line ending character(s)
+// then use an end position denoting the start of the next line.
+type Range struct {
+ /*Start defined:
+ * The range's start position
+ */
+ Start Position `json:"start"`
+
+ /*End defined:
+ * The range's end position
+ */
+ End Position `json:"end"` // exclusive
+}
+
+// A Location represents a location inside a resource, such as a line inside a text file.
+type Location struct {
+ // URI is
+ URI DocumentURI `json:"uri"`
+
+ // Range is
+ Range Range `json:"range"`
+}
+
+/* DiagnosticRelatedInformation defined:
+ * Represents a related message and source code location for a diagnostic. This should be
+ * used to point to code locations that cause or related to a diagnostics, e.g when duplicating
+ * a symbol in a scope.
+ */
+type DiagnosticRelatedInformation struct {
+
+ /*Location defined:
+ * The location of this related diagnostic information.
+ */
+ Location Location `json:"location"`
+
+ /*Message defined:
+ * The message of this related diagnostic information.
+ */
+ Message string `json:"message"`
+}
+
+// DiagnosticSeverity defines constants
+type DiagnosticSeverity uint
+
+const (
+ /*SeverityInformation defined:
+ * Reports an information.
+ */
+ SeverityInformation DiagnosticSeverity = 3
+)
+
+// DiagnosticTag defines constants
+type DiagnosticTag uint
+
+/*Diagnostic defined:
+ * Represents a diagnostic, such as a compiler error or warning. Diagnostic objects
+ * are only valid in the scope of a resource.
+ */
+type Diagnostic struct {
+
+ /*Range defined:
+ * The range at which the message applies
+ */
+ Range Range `json:"range"`
+
+ /*Severity defined:
+ * The diagnostic's severity. Can be omitted. If omitted it is up to the
+ * client to interpret diagnostics as error, warning, info or hint.
+ */
+ Severity DiagnosticSeverity `json:"severity,omitempty"` // always SeverityInformation for optimizer logging.
+
+ /*Code defined:
+ * The diagnostic's code, which usually appear in the user interface.
+ */
+ Code string `json:"code,omitempty"` // LSP uses 'number | string' = gopls interface{}, but only string here, e.g. "boundsCheck", "nilcheck", etc.
+
+ /*Source defined:
+ * A human-readable string describing the source of this
+ * diagnostic, e.g. 'typescript' or 'super lint'. It usually
+ * appears in the user interface.
+ */
+ Source string `json:"source,omitempty"` // "go compiler"
+
+ /*Message defined:
+ * The diagnostic's message. It usually appears in the user interface
+ */
+ Message string `json:"message"` // sometimes used, provides additional information.
+
+ /*Tags defined:
+ * Additional metadata about the diagnostic.
+ */
+ Tags []DiagnosticTag `json:"tags,omitempty"` // always empty for logging optimizations.
+
+ /*RelatedInformation defined:
+ * An array of related diagnostic information, e.g. when symbol-names within
+ * a scope collide all definitions can be marked via this property.
+ */
+ RelatedInformation []DiagnosticRelatedInformation `json:"relatedInformation,omitempty"`
+}
+
+// A LoggedOpt is what the compiler produces and accumulates,
+// to be converted to JSON for human or IDE consumption.
+type LoggedOpt struct {
+ pos src.XPos // Source code position at which the event occurred. If it is inlined, outer and all inlined locations will appear in JSON.
+ lastPos src.XPos // Usually the same as pos; current exception is for reporting entire range of transformed loops
+ compilerPass string // Compiler pass. For human/adhoc consumption; does not appear in JSON (yet)
+ functionName string // Function name. For human/adhoc consumption; does not appear in JSON (yet)
+ what string // The (non) optimization; "nilcheck", "boundsCheck", "inline", "noInline"
+ target []interface{} // Optional target(s) or parameter(s) of "what" -- what was inlined, why it was not, size of copy, etc. 1st is most important/relevant.
+}
+
+type logFormat uint8
+
+const (
+ None logFormat = iota
+ Json0 // version 0 for LSP 3.14, 3.15; future versions of LSP may change the format and the compiler may need to support both as clients are updated.
+)
+
+var Format = None
+var dest string
+
+// LogJsonOption parses and validates the version,directory value attached to the -json compiler flag.
+func LogJsonOption(flagValue string) {
+ version, directory := parseLogFlag("json", flagValue)
+ if version != 0 {
+ log.Fatal("-json version must be 0")
+ }
+ dest = checkLogPath(directory)
+ Format = Json0
+}
+
+// parseLogFlag checks the flag passed to -json
+// for version,destination format and returns the two parts.
+func parseLogFlag(flag, value string) (version int, directory string) {
+ if Format != None {
+ log.Fatal("Cannot repeat -json flag")
+ }
+ commaAt := strings.Index(value, ",")
+ if commaAt <= 0 {
+ log.Fatalf("-%s option should be '<version>,<destination>' where <version> is a number", flag)
+ }
+ v, err := strconv.Atoi(value[:commaAt])
+ if err != nil {
+ log.Fatalf("-%s option should be '<version>,<destination>' where <version> is a number: err=%v", flag, err)
+ }
+ version = v
+ directory = value[commaAt+1:]
+ return
+}
+
+// isWindowsDriveURIPath returns true if the file URI is of the format used by
+// Windows URIs. The url.Parse package does not specially handle Windows paths
+// (see golang/go#6027), so we check if the URI path has a drive prefix (e.g. "/C:").
+// (copied from tools/internal/span/uri.go)
+// this is less comprehensive that the processing in filepath.IsAbs on Windows.
+func isWindowsDriveURIPath(uri string) bool {
+ if len(uri) < 4 {
+ return false
+ }
+ return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':'
+}
+
+func parseLogPath(destination string) (string, string) {
+ if filepath.IsAbs(destination) {
+ return filepath.Clean(destination), ""
+ }
+ if strings.HasPrefix(destination, "file://") { // IKWIAD, or Windows C:\foo\bar\baz
+ uri, err := url.Parse(destination)
+ if err != nil {
+ return "", fmt.Sprintf("optimizer logging destination looked like file:// URI but failed to parse: err=%v", err)
+ }
+ destination = uri.Host + uri.Path
+ if isWindowsDriveURIPath(destination) {
+ // strip leading / from /C:
+ // unlike tools/internal/span/uri.go, do not uppercase the drive letter -- let filepath.Clean do what it does.
+ destination = destination[1:]
+ }
+ return filepath.Clean(destination), ""
+ }
+ return "", fmt.Sprintf("optimizer logging destination %s was neither %s-prefixed directory nor file://-prefixed file URI", destination, string(filepath.Separator))
+}
+
+// checkLogPath does superficial early checking of the string specifying
+// the directory to which optimizer logging is directed, and if
+// it passes the test, stores the string in LO_dir.
+func checkLogPath(destination string) string {
+ path, complaint := parseLogPath(destination)
+ if complaint != "" {
+ log.Fatalf(complaint)
+ }
+ err := os.MkdirAll(path, 0755)
+ if err != nil {
+ log.Fatalf("optimizer logging destination '<version>,<directory>' but could not create <directory>: err=%v", err)
+ }
+ return path
+}
+
+var loggedOpts []*LoggedOpt
+var mu = sync.Mutex{} // mu protects loggedOpts.
+
+// NewLoggedOpt allocates a new LoggedOpt, to later be passed to either NewLoggedOpt or LogOpt as "args".
+// Pos is the source position (including inlining), what is the message, pass is which pass created the message,
+// funcName is the name of the function
+// A typical use for this to accumulate an explanation for a missed optimization, for example, why did something escape?
+func NewLoggedOpt(pos, lastPos src.XPos, what, pass, funcName string, args ...interface{}) *LoggedOpt {
+ pass = strings.Replace(pass, " ", "_", -1)
+ return &LoggedOpt{pos, lastPos, pass, funcName, what, args}
+}
+
+// LogOpt logs information about a (usually missed) optimization performed by the compiler.
+// Pos is the source position (including inlining), what is the message, pass is which pass created the message,
+// funcName is the name of the function.
+func LogOpt(pos src.XPos, what, pass, funcName string, args ...interface{}) {
+ if Format == None {
+ return
+ }
+ lo := NewLoggedOpt(pos, pos, what, pass, funcName, args...)
+ mu.Lock()
+ defer mu.Unlock()
+ // Because of concurrent calls from back end, no telling what the order will be, but is stable-sorted by outer Pos before use.
+ loggedOpts = append(loggedOpts, lo)
+}
+
+// LogOptRange is the same as LogOpt, but includes the ability to express a range of positions,
+// not just a point.
+func LogOptRange(pos, lastPos src.XPos, what, pass, funcName string, args ...interface{}) {
+ if Format == None {
+ return
+ }
+ lo := NewLoggedOpt(pos, lastPos, what, pass, funcName, args...)
+ mu.Lock()
+ defer mu.Unlock()
+ // Because of concurrent calls from back end, no telling what the order will be, but is stable-sorted by outer Pos before use.
+ loggedOpts = append(loggedOpts, lo)
+}
+
+// Enabled returns whether optimization logging is enabled.
+func Enabled() bool {
+ switch Format {
+ case None:
+ return false
+ case Json0:
+ return true
+ }
+ panic("Unexpected optimizer-logging level")
+}
+
+// byPos sorts diagnostics by source position.
+type byPos struct {
+ ctxt *obj.Link
+ a []*LoggedOpt
+}
+
+func (x byPos) Len() int { return len(x.a) }
+func (x byPos) Less(i, j int) bool {
+ return x.ctxt.OutermostPos(x.a[i].pos).Before(x.ctxt.OutermostPos(x.a[j].pos))
+}
+func (x byPos) Swap(i, j int) { x.a[i], x.a[j] = x.a[j], x.a[i] }
+
+func writerForLSP(subdirpath, file string) io.WriteCloser {
+ basename := file
+ lastslash := strings.LastIndexAny(basename, "\\/")
+ if lastslash != -1 {
+ basename = basename[lastslash+1:]
+ }
+ lastdot := strings.LastIndex(basename, ".go")
+ if lastdot != -1 {
+ basename = basename[:lastdot]
+ }
+ basename = url.PathEscape(basename)
+
+ // Assume a directory, make a file
+ p := filepath.Join(subdirpath, basename+".json")
+ w, err := os.Create(p)
+ if err != nil {
+ log.Fatalf("Could not create file %s for logging optimizer actions, %v", p, err)
+ }
+ return w
+}
+
+func fixSlash(f string) string {
+ if os.PathSeparator == '/' {
+ return f
+ }
+ return strings.Replace(f, string(os.PathSeparator), "/", -1)
+}
+
+func uriIfy(f string) DocumentURI {
+ url := url.URL{
+ Scheme: "file",
+ Path: fixSlash(f),
+ }
+ return DocumentURI(url.String())
+}
+
+// Return filename, replacing a first occurrence of $GOROOT with the
+// actual value of the GOROOT (because LSP does not speak "$GOROOT").
+func uprootedPath(filename string) string {
+ if filename == "" {
+ return "__unnamed__"
+ }
+ if buildcfg.GOROOT == "" || !strings.HasPrefix(filename, "$GOROOT/") {
+ return filename
+ }
+ return buildcfg.GOROOT + filename[len("$GOROOT"):]
+}
+
+// FlushLoggedOpts flushes all the accumulated optimization log entries.
+func FlushLoggedOpts(ctxt *obj.Link, slashPkgPath string) {
+ if Format == None {
+ return
+ }
+
+ sort.Stable(byPos{ctxt, loggedOpts}) // Stable is necessary to preserve the per-function order, which is repeatable.
+ switch Format {
+
+ case Json0: // LSP 3.15
+ var posTmp, lastTmp []src.Pos
+ var encoder *json.Encoder
+ var w io.WriteCloser
+
+ if slashPkgPath == "" {
+ slashPkgPath = "\000"
+ }
+ subdirpath := filepath.Join(dest, url.PathEscape(slashPkgPath))
+ err := os.MkdirAll(subdirpath, 0755)
+ if err != nil {
+ log.Fatalf("Could not create directory %s for logging optimizer actions, %v", subdirpath, err)
+ }
+ diagnostic := Diagnostic{Source: "go compiler", Severity: SeverityInformation}
+
+ // For LSP, make a subdirectory for the package, and for each file foo.go, create foo.json in that subdirectory.
+ currentFile := ""
+ for _, x := range loggedOpts {
+ posTmp, p0 := parsePos(ctxt, x.pos, posTmp)
+ lastTmp, l0 := parsePos(ctxt, x.lastPos, lastTmp) // These match posTmp/p0 except for most-inline, and that often also matches.
+ p0f := uprootedPath(p0.Filename())
+
+ if currentFile != p0f {
+ if w != nil {
+ w.Close()
+ }
+ currentFile = p0f
+ w = writerForLSP(subdirpath, currentFile)
+ encoder = json.NewEncoder(w)
+ encoder.Encode(VersionHeader{Version: 0, Package: slashPkgPath, Goos: buildcfg.GOOS, Goarch: buildcfg.GOARCH, GcVersion: buildcfg.Version, File: currentFile})
+ }
+
+ // The first "target" is the most important one.
+ var target string
+ if len(x.target) > 0 {
+ target = fmt.Sprint(x.target[0])
+ }
+
+ diagnostic.Code = x.what
+ diagnostic.Message = target
+ diagnostic.Range = newRange(p0, l0)
+ diagnostic.RelatedInformation = diagnostic.RelatedInformation[:0]
+
+ appendInlinedPos(posTmp, lastTmp, &diagnostic)
+
+ // Diagnostic explanation is stored in RelatedInformation after inlining info
+ if len(x.target) > 1 {
+ switch y := x.target[1].(type) {
+ case []*LoggedOpt:
+ for _, z := range y {
+ posTmp, p0 := parsePos(ctxt, z.pos, posTmp)
+ lastTmp, l0 := parsePos(ctxt, z.lastPos, lastTmp)
+ loc := newLocation(p0, l0)
+ msg := z.what
+ if len(z.target) > 0 {
+ msg = msg + ": " + fmt.Sprint(z.target[0])
+ }
+
+ diagnostic.RelatedInformation = append(diagnostic.RelatedInformation, DiagnosticRelatedInformation{Location: loc, Message: msg})
+ appendInlinedPos(posTmp, lastTmp, &diagnostic)
+ }
+ }
+ }
+
+ encoder.Encode(diagnostic)
+ }
+ if w != nil {
+ w.Close()
+ }
+ }
+}
+
+// newRange returns a single-position Range for the compiler source location p.
+func newRange(p, last src.Pos) Range {
+ return Range{Start: Position{p.Line(), p.Col()},
+ End: Position{last.Line(), last.Col()}}
+}
+
+// newLocation returns the Location for the compiler source location p.
+func newLocation(p, last src.Pos) Location {
+ loc := Location{URI: uriIfy(uprootedPath(p.Filename())), Range: newRange(p, last)}
+ return loc
+}
+
+// appendInlinedPos extracts inlining information from posTmp and append it to diagnostic.
+func appendInlinedPos(posTmp, lastTmp []src.Pos, diagnostic *Diagnostic) {
+ for i := 1; i < len(posTmp); i++ {
+ loc := newLocation(posTmp[i], lastTmp[i])
+ diagnostic.RelatedInformation = append(diagnostic.RelatedInformation, DiagnosticRelatedInformation{Location: loc, Message: "inlineLoc"})
+ }
+}
+
+// parsePos expands a src.XPos into a slice of src.Pos, with the outermost first.
+// It returns the slice, and the outermost.
+func parsePos(ctxt *obj.Link, pos src.XPos, posTmp []src.Pos) ([]src.Pos, src.Pos) {
+ posTmp = posTmp[:0]
+ ctxt.AllPos(pos, func(p src.Pos) {
+ posTmp = append(posTmp, p)
+ })
+ return posTmp, posTmp[0]
+}
diff --git a/src/cmd/compile/internal/logopt/logopt_test.go b/src/cmd/compile/internal/logopt/logopt_test.go
new file mode 100644
index 0000000..c7debd9
--- /dev/null
+++ b/src/cmd/compile/internal/logopt/logopt_test.go
@@ -0,0 +1,250 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logopt
+
+import (
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+const srcCode = `package x
+type pair struct {a,b int}
+func bar(y *pair) *int {
+ return &y.b
+}
+var a []int
+func foo(w, z *pair) *int {
+ if *bar(w) > 0 {
+ return bar(z)
+ }
+ if a[1] > 0 {
+ a = a[:2]
+ }
+ return &a[0]
+}
+
+// address taking prevents closure inlining
+func n() int {
+ foo := func() int { return 1 }
+ bar := &foo
+ x := (*bar)() + foo()
+ return x
+}
+`
+
+func want(t *testing.T, out string, desired string) {
+ // On Windows, Unicode escapes in the JSON output end up "normalized" elsewhere to /u....,
+ // so "normalize" what we're looking for to match that.
+ s := strings.ReplaceAll(desired, string(os.PathSeparator), "/")
+ if !strings.Contains(out, s) {
+ t.Errorf("did not see phrase %s in \n%s", s, out)
+ }
+}
+
+func wantN(t *testing.T, out string, desired string, n int) {
+ if strings.Count(out, desired) != n {
+ t.Errorf("expected exactly %d occurrences of %s in \n%s", n, desired, out)
+ }
+}
+
+func TestPathStuff(t *testing.T) {
+ sep := string(filepath.Separator)
+ if path, whine := parseLogPath("file:///c:foo"); path != "c:foo" || whine != "" { // good path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ if path, whine := parseLogPath("file:///foo"); path != sep+"foo" || whine != "" { // good path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ if path, whine := parseLogPath("foo"); path != "" || whine == "" { // BAD path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ if sep == "\\" { // On WINDOWS ONLY
+ if path, whine := parseLogPath("C:/foo"); path != "C:\\foo" || whine != "" { // good path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ if path, whine := parseLogPath("c:foo"); path != "" || whine == "" { // BAD path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ if path, whine := parseLogPath("/foo"); path != "" || whine == "" { // BAD path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ } else { // ON UNIX ONLY
+ if path, whine := parseLogPath("/foo"); path != sep+"foo" || whine != "" { // good path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ }
+}
+
+func TestLogOpt(t *testing.T) {
+ t.Parallel()
+
+ testenv.MustHaveGoBuild(t)
+
+ dir := fixSlash(t.TempDir()) // Normalize the directory name as much as possible, for Windows testing
+ src := filepath.Join(dir, "file.go")
+ if err := os.WriteFile(src, []byte(srcCode), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ outfile := filepath.Join(dir, "file.o")
+
+ t.Run("JSON_fails", func(t *testing.T) {
+ // Test malformed flag
+ out, err := testLogOpt(t, "-json=foo", src, outfile)
+ if err == nil {
+ t.Error("-json=foo succeeded unexpectedly")
+ }
+ want(t, out, "option should be")
+ want(t, out, "number")
+
+ // Test a version number that is currently unsupported (and should remain unsupported for a while)
+ out, err = testLogOpt(t, "-json=9,foo", src, outfile)
+ if err == nil {
+ t.Error("-json=0,foo succeeded unexpectedly")
+ }
+ want(t, out, "version must be")
+
+ })
+
+ // replace d (dir) with t ("tmpdir") and convert path separators to '/'
+ normalize := func(out []byte, d, t string) string {
+ s := string(out)
+ s = strings.ReplaceAll(s, d, t)
+ s = strings.ReplaceAll(s, string(os.PathSeparator), "/")
+ return s
+ }
+
+ // Ensure that <128 byte copies are not reported and that 128-byte copies are.
+ // Check at both 1 and 8-byte alignments.
+ t.Run("Copy", func(t *testing.T) {
+ const copyCode = `package x
+func s128a1(x *[128]int8) [128]int8 {
+ return *x
+}
+func s127a1(x *[127]int8) [127]int8 {
+ return *x
+}
+func s16a8(x *[16]int64) [16]int64 {
+ return *x
+}
+func s15a8(x *[15]int64) [15]int64 {
+ return *x
+}
+`
+ copy := filepath.Join(dir, "copy.go")
+ if err := os.WriteFile(copy, []byte(copyCode), 0644); err != nil {
+ t.Fatal(err)
+ }
+ outcopy := filepath.Join(dir, "copy.o")
+
+ // On not-amd64, test the host architecture and os
+ arches := []string{runtime.GOARCH}
+ goos0 := runtime.GOOS
+ if runtime.GOARCH == "amd64" { // Test many things with "linux" (wasm will get "js")
+ arches = []string{"arm", "arm64", "386", "amd64", "mips", "mips64", "loong64", "ppc64le", "riscv64", "s390x", "wasm"}
+ goos0 = "linux"
+ }
+
+ for _, arch := range arches {
+ t.Run(arch, func(t *testing.T) {
+ goos := goos0
+ if arch == "wasm" {
+ goos = "js"
+ }
+ _, err := testCopy(t, dir, arch, goos, copy, outcopy)
+ if err != nil {
+ t.Error("-json=0,file://log/opt should have succeeded")
+ }
+ logged, err := os.ReadFile(filepath.Join(dir, "log", "opt", "x", "copy.json"))
+ if err != nil {
+ t.Error("-json=0,file://log/opt missing expected log file")
+ }
+ slogged := normalize(logged, string(uriIfy(dir)), string(uriIfy("tmpdir")))
+ t.Logf("%s", slogged)
+ want(t, slogged, `{"range":{"start":{"line":3,"character":2},"end":{"line":3,"character":2}},"severity":3,"code":"copy","source":"go compiler","message":"128 bytes"}`)
+ want(t, slogged, `{"range":{"start":{"line":9,"character":2},"end":{"line":9,"character":2}},"severity":3,"code":"copy","source":"go compiler","message":"128 bytes"}`)
+ wantN(t, slogged, `"code":"copy"`, 2)
+ })
+ }
+ })
+
+ // Some architectures don't fault on nil dereference, so nilchecks are eliminated differently.
+ // The N-way copy test also doesn't need to run N-ways N times.
+ if runtime.GOARCH != "amd64" {
+ return
+ }
+
+ t.Run("Success", func(t *testing.T) {
+ // This test is supposed to succeed
+
+ // Note 'file://' is the I-Know-What-I-Am-Doing way of specifying a file, also to deal with corner cases for Windows.
+ _, err := testLogOptDir(t, dir, "-json=0,file://log/opt", src, outfile)
+ if err != nil {
+ t.Error("-json=0,file://log/opt should have succeeded")
+ }
+ logged, err := os.ReadFile(filepath.Join(dir, "log", "opt", "x", "file.json"))
+ if err != nil {
+ t.Error("-json=0,file://log/opt missing expected log file")
+ }
+ // All this delicacy with uriIfy and filepath.Join is to get this test to work right on Windows.
+ slogged := normalize(logged, string(uriIfy(dir)), string(uriIfy("tmpdir")))
+ t.Logf("%s", slogged)
+ // below shows proper nilcheck
+ want(t, slogged, `{"range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}},"severity":3,"code":"nilcheck","source":"go compiler","message":"",`+
+ `"relatedInformation":[{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"}]}`)
+ want(t, slogged, `{"range":{"start":{"line":11,"character":6},"end":{"line":11,"character":6}},"severity":3,"code":"isInBounds","source":"go compiler","message":""}`)
+ want(t, slogged, `{"range":{"start":{"line":7,"character":6},"end":{"line":7,"character":6}},"severity":3,"code":"canInlineFunction","source":"go compiler","message":"cost: 35"}`)
+ // escape analysis explanation
+ want(t, slogged, `{"range":{"start":{"line":7,"character":13},"end":{"line":7,"character":13}},"severity":3,"code":"leak","source":"go compiler","message":"parameter z leaks to ~r0 with derefs=0",`+
+ `"relatedInformation":[`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: y = z:"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y := z (assign-pair)"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: ~r0 = y:"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y.b (dot of pointer)"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from \u0026y.b (address-of)"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":9},"end":{"line":4,"character":9}}},"message":"inlineLoc"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~r0 = \u0026y.b (assign-pair)"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r0 = ~r0:"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return ~r0 (return)"}]}`)
+ })
+}
+
+func testLogOpt(t *testing.T, flag, src, outfile string) (string, error) {
+ run := []string{testenv.GoToolPath(t), "tool", "compile", "-p=p", flag, "-o", outfile, src}
+ t.Log(run)
+ cmd := testenv.Command(t, run[0], run[1:]...)
+ out, err := cmd.CombinedOutput()
+ t.Logf("%s", out)
+ return string(out), err
+}
+
+func testLogOptDir(t *testing.T, dir, flag, src, outfile string) (string, error) {
+ // Notice the specified import path "x"
+ run := []string{testenv.GoToolPath(t), "tool", "compile", "-p=x", flag, "-o", outfile, src}
+ t.Log(run)
+ cmd := testenv.Command(t, run[0], run[1:]...)
+ cmd.Dir = dir
+ out, err := cmd.CombinedOutput()
+ t.Logf("%s", out)
+ return string(out), err
+}
+
+func testCopy(t *testing.T, dir, goarch, goos, src, outfile string) (string, error) {
+ // Notice the specified import path "x"
+ run := []string{testenv.GoToolPath(t), "tool", "compile", "-p=x", "-json=0,file://log/opt", "-o", outfile, src}
+ t.Log(run)
+ cmd := testenv.Command(t, run[0], run[1:]...)
+ cmd.Dir = dir
+ cmd.Env = append(os.Environ(), "GOARCH="+goarch, "GOOS="+goos)
+ out, err := cmd.CombinedOutput()
+ t.Logf("%s", out)
+ return string(out), err
+}
diff --git a/src/cmd/compile/internal/loong64/galign.go b/src/cmd/compile/internal/loong64/galign.go
new file mode 100644
index 0000000..a613165
--- /dev/null
+++ b/src/cmd/compile/internal/loong64/galign.go
@@ -0,0 +1,25 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package loong64
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/loong64"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &loong64.Linkloong64
+ arch.REGSP = loong64.REGSP
+ arch.MAXWIDTH = 1 << 50
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+ arch.LoadRegResult = loadRegResult
+ arch.SpillArgReg = spillArgReg
+}
diff --git a/src/cmd/compile/internal/loong64/ggen.go b/src/cmd/compile/internal/loong64/ggen.go
new file mode 100644
index 0000000..27d318a
--- /dev/null
+++ b/src/cmd/compile/internal/loong64/ggen.go
@@ -0,0 +1,60 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package loong64
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/loong64"
+)
+
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+
+ // Adjust the frame to account for LR.
+ off += base.Ctxt.Arch.FixedFrameSize
+
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, loong64.AMOVV, obj.TYPE_REG, loong64.REGZERO, 0, obj.TYPE_MEM, loong64.REGSP, off+i)
+ }
+ } else if cnt <= int64(128*types.PtrSize) {
+ p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, off, obj.TYPE_REG, loong64.REGRT1, 0)
+ p.Reg = loong64.REGSP
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
+ } else {
+ // ADDV $(off), SP, r1
+ // ADDV $cnt, r1, r2
+ // loop:
+ // MOVV R0, (r1)
+ // ADDV $Widthptr, r1
+ // BNE r1, r2, loop
+ p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, off, obj.TYPE_REG, loong64.REGRT1, 0)
+ p.Reg = loong64.REGSP
+ p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, loong64.REGRT2, 0)
+ p.Reg = loong64.REGRT1
+ p = pp.Append(p, loong64.AMOVV, obj.TYPE_REG, loong64.REGZERO, 0, obj.TYPE_MEM, loong64.REGRT1, 0)
+ loop := p
+ p = pp.Append(p, loong64.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, loong64.REGRT1, 0)
+ p = pp.Append(p, loong64.ABNE, obj.TYPE_REG, loong64.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
+ p.Reg = loong64.REGRT2
+ p.To.SetTarget(loop)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ p := pp.Prog(loong64.ANOOP)
+ return p
+}
diff --git a/src/cmd/compile/internal/loong64/ssa.go b/src/cmd/compile/internal/loong64/ssa.go
new file mode 100644
index 0000000..e7298bd
--- /dev/null
+++ b/src/cmd/compile/internal/loong64/ssa.go
@@ -0,0 +1,830 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package loong64
+
+import (
+ "math"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/loong64"
+)
+
+// isFPreg reports whether r is an FP register.
+func isFPreg(r int16) bool {
+ return loong64.REG_F0 <= r && r <= loong64.REG_F31
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type, r int16) obj.As {
+ if isFPreg(r) {
+ if t.Size() == 4 {
+ return loong64.AMOVF
+ } else {
+ return loong64.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return loong64.AMOVB
+ } else {
+ return loong64.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return loong64.AMOVH
+ } else {
+ return loong64.AMOVHU
+ }
+ case 4:
+ if t.IsSigned() {
+ return loong64.AMOVW
+ } else {
+ return loong64.AMOVWU
+ }
+ case 8:
+ return loong64.AMOVV
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type, r int16) obj.As {
+ if isFPreg(r) {
+ if t.Size() == 4 {
+ return loong64.AMOVF
+ } else {
+ return loong64.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return loong64.AMOVB
+ case 2:
+ return loong64.AMOVH
+ case 4:
+ return loong64.AMOVW
+ case 8:
+ return loong64.AMOVV
+ }
+ }
+ panic("bad store type")
+}
+
+// largestMove returns the largest move instruction possible and its size,
+// given the alignment of the total size of the move.
+//
+// e.g., a 16-byte move may use MOVV, but an 11-byte move must use MOVB.
+//
+// Note that the moves may not be on naturally aligned addresses depending on
+// the source and destination.
+//
+// This matches the calculation in ssa.moveSize.
+func largestMove(alignment int64) (obj.As, int64) {
+ switch {
+ case alignment%8 == 0:
+ return loong64.AMOVV, 8
+ case alignment%4 == 0:
+ return loong64.AMOVW, 4
+ case alignment%2 == 0:
+ return loong64.AMOVH, 2
+ default:
+ return loong64.AMOVB, 1
+ }
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpCopy, ssa.OpLOONG64MOVVreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x == y {
+ return
+ }
+ as := loong64.AMOVV
+ if isFPreg(x) && isFPreg(y) {
+ as = loong64.AMOVD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ case ssa.OpLOONG64MOVVnop:
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ r := v.Reg()
+ p := s.Prog(loadByType(v.Type, r))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ r := v.Args[0].Reg()
+ p := s.Prog(storeByType(v.Type, r))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.OpArgIntReg, ssa.OpArgFloatReg:
+ // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
+ // The loop only runs once.
+ for _, a := range v.Block.Func.RegArgs {
+ // Pass the spill/unspill information along to the assembler, offset by size of
+ // the saved LR slot.
+ addr := ssagen.SpillSlotAddr(a, loong64.REGSP, base.Ctxt.Arch.FixedFrameSize)
+ s.FuncInfo().AddSpill(
+ obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type, a.Reg), Spill: storeByType(a.Type, a.Reg)})
+ }
+ v.Block.Func.RegArgs = nil
+ ssagen.CheckArgReg(v)
+ case ssa.OpLOONG64ADDV,
+ ssa.OpLOONG64SUBV,
+ ssa.OpLOONG64AND,
+ ssa.OpLOONG64OR,
+ ssa.OpLOONG64XOR,
+ ssa.OpLOONG64NOR,
+ ssa.OpLOONG64SLLV,
+ ssa.OpLOONG64SRLV,
+ ssa.OpLOONG64SRAV,
+ ssa.OpLOONG64ROTR,
+ ssa.OpLOONG64ROTRV,
+ ssa.OpLOONG64ADDF,
+ ssa.OpLOONG64ADDD,
+ ssa.OpLOONG64SUBF,
+ ssa.OpLOONG64SUBD,
+ ssa.OpLOONG64MULF,
+ ssa.OpLOONG64MULD,
+ ssa.OpLOONG64DIVF,
+ ssa.OpLOONG64DIVD,
+ ssa.OpLOONG64MULV, ssa.OpLOONG64MULHV, ssa.OpLOONG64MULHVU,
+ ssa.OpLOONG64DIVV, ssa.OpLOONG64REMV, ssa.OpLOONG64DIVVU, ssa.OpLOONG64REMVU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpLOONG64SGT,
+ ssa.OpLOONG64SGTU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpLOONG64ADDVconst,
+ ssa.OpLOONG64SUBVconst,
+ ssa.OpLOONG64ANDconst,
+ ssa.OpLOONG64ORconst,
+ ssa.OpLOONG64XORconst,
+ ssa.OpLOONG64NORconst,
+ ssa.OpLOONG64SLLVconst,
+ ssa.OpLOONG64SRLVconst,
+ ssa.OpLOONG64SRAVconst,
+ ssa.OpLOONG64ROTRconst,
+ ssa.OpLOONG64ROTRVconst,
+ ssa.OpLOONG64SGTconst,
+ ssa.OpLOONG64SGTUconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpLOONG64MOVVconst:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ if isFPreg(r) {
+ // cannot move into FP or special registers, use TMP as intermediate
+ p.To.Reg = loong64.REGTMP
+ p = s.Prog(loong64.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = loong64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ case ssa.OpLOONG64MOVFconst,
+ ssa.OpLOONG64MOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpLOONG64CMPEQF,
+ ssa.OpLOONG64CMPEQD,
+ ssa.OpLOONG64CMPGEF,
+ ssa.OpLOONG64CMPGED,
+ ssa.OpLOONG64CMPGTF,
+ ssa.OpLOONG64CMPGTD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ case ssa.OpLOONG64MOVVaddr:
+ p := s.Prog(loong64.AMOVV)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ var wantreg string
+ // MOVV $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R3)
+ // when constant is large, tmp register (R30) may be used
+ // - base is SB: load external address with relocation
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ ssagen.AddAux(&p.From, v)
+ case *ir.Name:
+ wantreg = "SP"
+ ssagen.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVV $off(SP), R
+ wantreg = "SP"
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpLOONG64MOVBload,
+ ssa.OpLOONG64MOVBUload,
+ ssa.OpLOONG64MOVHload,
+ ssa.OpLOONG64MOVHUload,
+ ssa.OpLOONG64MOVWload,
+ ssa.OpLOONG64MOVWUload,
+ ssa.OpLOONG64MOVVload,
+ ssa.OpLOONG64MOVFload,
+ ssa.OpLOONG64MOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpLOONG64MOVBstore,
+ ssa.OpLOONG64MOVHstore,
+ ssa.OpLOONG64MOVWstore,
+ ssa.OpLOONG64MOVVstore,
+ ssa.OpLOONG64MOVFstore,
+ ssa.OpLOONG64MOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpLOONG64MOVBstorezero,
+ ssa.OpLOONG64MOVHstorezero,
+ ssa.OpLOONG64MOVWstorezero,
+ ssa.OpLOONG64MOVVstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = loong64.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpLOONG64MOVBreg,
+ ssa.OpLOONG64MOVBUreg,
+ ssa.OpLOONG64MOVHreg,
+ ssa.OpLOONG64MOVHUreg,
+ ssa.OpLOONG64MOVWreg,
+ ssa.OpLOONG64MOVWUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpLOONG64MOVVreg {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg && loong64.REG_R0 <= a.Reg() && a.Reg() <= loong64.REG_R31 {
+ // LoadReg from a narrower type does an extension, except loading
+ // to a floating point register. So only eliminate the extension
+ // if it is loaded to an integer register.
+
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpLOONG64MOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpLOONG64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpLOONG64MOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpLOONG64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
+ v.Op == ssa.OpLOONG64MOVWreg && t.Size() == 4 && t.IsSigned(),
+ v.Op == ssa.OpLOONG64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if v.Reg() == v.Args[0].Reg() {
+ return
+ }
+ p := s.Prog(loong64.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ return
+ default:
+ }
+ }
+ fallthrough
+ case ssa.OpLOONG64MOVWF,
+ ssa.OpLOONG64MOVWD,
+ ssa.OpLOONG64TRUNCFW,
+ ssa.OpLOONG64TRUNCDW,
+ ssa.OpLOONG64MOVVF,
+ ssa.OpLOONG64MOVVD,
+ ssa.OpLOONG64TRUNCFV,
+ ssa.OpLOONG64TRUNCDV,
+ ssa.OpLOONG64MOVFD,
+ ssa.OpLOONG64MOVDF,
+ ssa.OpLOONG64NEGF,
+ ssa.OpLOONG64NEGD,
+ ssa.OpLOONG64SQRTD,
+ ssa.OpLOONG64SQRTF:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpLOONG64NEGV:
+ // SUB from REGZERO
+ p := s.Prog(loong64.ASUBVU)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = loong64.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpLOONG64DUFFZERO:
+ // runtime.duffzero expects start address in R20
+ p := s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = v.AuxInt
+ case ssa.OpLOONG64LoweredZero:
+ // MOVx R0, (Rarg0)
+ // ADDV $sz, Rarg0
+ // BGEU Rarg1, Rarg0, -2(PC)
+ mov, sz := largestMove(v.AuxInt)
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = loong64.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ p2 := s.Prog(loong64.AADDVU)
+ p2.From.Type = obj.TYPE_CONST
+ p2.From.Offset = sz
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(loong64.ABGEU)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[1].Reg()
+ p3.Reg = v.Args[0].Reg()
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+
+ case ssa.OpLOONG64DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffcopy
+ p.To.Offset = v.AuxInt
+ case ssa.OpLOONG64LoweredMove:
+ // MOVx (Rarg1), Rtmp
+ // MOVx Rtmp, (Rarg0)
+ // ADDV $sz, Rarg1
+ // ADDV $sz, Rarg0
+ // BGEU Rarg2, Rarg0, -4(PC)
+ mov, sz := largestMove(v.AuxInt)
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = loong64.REGTMP
+
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = loong64.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(loong64.AADDVU)
+ p3.From.Type = obj.TYPE_CONST
+ p3.From.Offset = sz
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Args[1].Reg()
+
+ p4 := s.Prog(loong64.AADDVU)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = sz
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Args[0].Reg()
+
+ p5 := s.Prog(loong64.ABGEU)
+ p5.From.Type = obj.TYPE_REG
+ p5.From.Reg = v.Args[2].Reg()
+ p5.Reg = v.Args[1].Reg()
+ p5.To.Type = obj.TYPE_BRANCH
+ p5.To.SetTarget(p)
+
+ case ssa.OpLOONG64CALLstatic, ssa.OpLOONG64CALLclosure, ssa.OpLOONG64CALLinter:
+ s.Call(v)
+ case ssa.OpLOONG64CALLtail:
+ s.TailCall(v)
+ case ssa.OpLOONG64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ // AuxInt encodes how many buffer entries we need.
+ p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
+ case ssa.OpLOONG64LoweredPanicBoundsA, ssa.OpLOONG64LoweredPanicBoundsB, ssa.OpLOONG64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+ case ssa.OpLOONG64LoweredAtomicLoad8, ssa.OpLOONG64LoweredAtomicLoad32, ssa.OpLOONG64LoweredAtomicLoad64:
+ as := loong64.AMOVV
+ switch v.Op {
+ case ssa.OpLOONG64LoweredAtomicLoad8:
+ as = loong64.AMOVB
+ case ssa.OpLOONG64LoweredAtomicLoad32:
+ as = loong64.AMOVW
+ }
+ s.Prog(loong64.ADBAR)
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ s.Prog(loong64.ADBAR)
+ case ssa.OpLOONG64LoweredAtomicStore8, ssa.OpLOONG64LoweredAtomicStore32, ssa.OpLOONG64LoweredAtomicStore64:
+ as := loong64.AMOVV
+ switch v.Op {
+ case ssa.OpLOONG64LoweredAtomicStore8:
+ as = loong64.AMOVB
+ case ssa.OpLOONG64LoweredAtomicStore32:
+ as = loong64.AMOVW
+ }
+ s.Prog(loong64.ADBAR)
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ s.Prog(loong64.ADBAR)
+ case ssa.OpLOONG64LoweredAtomicStorezero32, ssa.OpLOONG64LoweredAtomicStorezero64:
+ as := loong64.AMOVV
+ if v.Op == ssa.OpLOONG64LoweredAtomicStorezero32 {
+ as = loong64.AMOVW
+ }
+ s.Prog(loong64.ADBAR)
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = loong64.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ s.Prog(loong64.ADBAR)
+ case ssa.OpLOONG64LoweredAtomicExchange32, ssa.OpLOONG64LoweredAtomicExchange64:
+ // DBAR
+ // MOVV Rarg1, Rtmp
+ // LL (Rarg0), Rout
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // DBAR
+ ll := loong64.ALLV
+ sc := loong64.ASCV
+ if v.Op == ssa.OpLOONG64LoweredAtomicExchange32 {
+ ll = loong64.ALL
+ sc = loong64.ASC
+ }
+ s.Prog(loong64.ADBAR)
+ p := s.Prog(loong64.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = loong64.REGTMP
+ p1 := s.Prog(ll)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = v.Args[0].Reg()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg0()
+ p2 := s.Prog(sc)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = loong64.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+ p3 := s.Prog(loong64.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = loong64.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ s.Prog(loong64.ADBAR)
+ case ssa.OpLOONG64LoweredAtomicAdd32, ssa.OpLOONG64LoweredAtomicAdd64:
+ // DBAR
+ // LL (Rarg0), Rout
+ // ADDV Rarg1, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // DBAR
+ // ADDV Rarg1, Rout
+ ll := loong64.ALLV
+ sc := loong64.ASCV
+ if v.Op == ssa.OpLOONG64LoweredAtomicAdd32 {
+ ll = loong64.ALL
+ sc = loong64.ASC
+ }
+ s.Prog(loong64.ADBAR)
+ p := s.Prog(ll)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p1 := s.Prog(loong64.AADDVU)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = v.Args[1].Reg()
+ p1.Reg = v.Reg0()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = loong64.REGTMP
+ p2 := s.Prog(sc)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = loong64.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+ p3 := s.Prog(loong64.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = loong64.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ s.Prog(loong64.ADBAR)
+ p4 := s.Prog(loong64.AADDVU)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Args[1].Reg()
+ p4.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Reg0()
+ case ssa.OpLOONG64LoweredAtomicAddconst32, ssa.OpLOONG64LoweredAtomicAddconst64:
+ // DBAR
+ // LL (Rarg0), Rout
+ // ADDV $auxint, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // DBAR
+ // ADDV $auxint, Rout
+ ll := loong64.ALLV
+ sc := loong64.ASCV
+ if v.Op == ssa.OpLOONG64LoweredAtomicAddconst32 {
+ ll = loong64.ALL
+ sc = loong64.ASC
+ }
+ s.Prog(loong64.ADBAR)
+ p := s.Prog(ll)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p1 := s.Prog(loong64.AADDVU)
+ p1.From.Type = obj.TYPE_CONST
+ p1.From.Offset = v.AuxInt
+ p1.Reg = v.Reg0()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = loong64.REGTMP
+ p2 := s.Prog(sc)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = loong64.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+ p3 := s.Prog(loong64.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = loong64.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ s.Prog(loong64.ADBAR)
+ p4 := s.Prog(loong64.AADDVU)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = v.AuxInt
+ p4.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Reg0()
+ case ssa.OpLOONG64LoweredAtomicCas32, ssa.OpLOONG64LoweredAtomicCas64:
+ // MOVV $0, Rout
+ // DBAR
+ // LL (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 4(PC)
+ // MOVV Rarg2, Rout
+ // SC Rout, (Rarg0)
+ // BEQ Rout, -4(PC)
+ // DBAR
+ ll := loong64.ALLV
+ sc := loong64.ASCV
+ if v.Op == ssa.OpLOONG64LoweredAtomicCas32 {
+ ll = loong64.ALL
+ sc = loong64.ASC
+ }
+ p := s.Prog(loong64.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = loong64.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ s.Prog(loong64.ADBAR)
+ p1 := s.Prog(ll)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = v.Args[0].Reg()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = loong64.REGTMP
+ p2 := s.Prog(loong64.ABNE)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = v.Args[1].Reg()
+ p2.Reg = loong64.REGTMP
+ p2.To.Type = obj.TYPE_BRANCH
+ p3 := s.Prog(loong64.AMOVV)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[2].Reg()
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Reg0()
+ p4 := s.Prog(sc)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_MEM
+ p4.To.Reg = v.Args[0].Reg()
+ p5 := s.Prog(loong64.ABEQ)
+ p5.From.Type = obj.TYPE_REG
+ p5.From.Reg = v.Reg0()
+ p5.To.Type = obj.TYPE_BRANCH
+ p5.To.SetTarget(p1)
+ p6 := s.Prog(loong64.ADBAR)
+ p2.To.SetTarget(p6)
+ case ssa.OpLOONG64LoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ p := s.Prog(loong64.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = loong64.REGTMP
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+ case ssa.OpLOONG64FPFlagTrue,
+ ssa.OpLOONG64FPFlagFalse:
+ // MOVV $0, r
+ // BFPF 2(PC)
+ // MOVV $1, r
+ branch := loong64.ABFPF
+ if v.Op == ssa.OpLOONG64FPFlagFalse {
+ branch = loong64.ABFPT
+ }
+ p := s.Prog(loong64.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = loong64.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p2 := s.Prog(branch)
+ p2.To.Type = obj.TYPE_BRANCH
+ p3 := s.Prog(loong64.AMOVV)
+ p3.From.Type = obj.TYPE_CONST
+ p3.From.Offset = 1
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Reg()
+ p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land
+ p2.To.SetTarget(p4)
+ case ssa.OpLOONG64LoweredGetClosurePtr:
+ // Closure pointer is R22 (loong64.REGCTXT).
+ ssagen.CheckLoweredGetClosurePtr(v)
+ case ssa.OpLOONG64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(loong64.AMOVV)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpLOONG64LoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpLOONG64MASKEQZ, ssa.OpLOONG64MASKNEZ:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpClobber, ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockLOONG64EQ: {loong64.ABEQ, loong64.ABNE},
+ ssa.BlockLOONG64NE: {loong64.ABNE, loong64.ABEQ},
+ ssa.BlockLOONG64LTZ: {loong64.ABLTZ, loong64.ABGEZ},
+ ssa.BlockLOONG64GEZ: {loong64.ABGEZ, loong64.ABLTZ},
+ ssa.BlockLOONG64LEZ: {loong64.ABLEZ, loong64.ABGTZ},
+ ssa.BlockLOONG64GTZ: {loong64.ABGTZ, loong64.ABLEZ},
+ ssa.BlockLOONG64FPT: {loong64.ABFPT, loong64.ABFPF},
+ ssa.BlockLOONG64FPF: {loong64.ABFPF, loong64.ABFPT},
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockDefer:
+ // defer returns in R19:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(loong64.ABNE)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = loong64.REGZERO
+ p.Reg = loong64.REG_R19
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit, ssa.BlockRetJmp:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ case ssa.BlockLOONG64EQ, ssa.BlockLOONG64NE,
+ ssa.BlockLOONG64LTZ, ssa.BlockLOONG64GEZ,
+ ssa.BlockLOONG64LEZ, ssa.BlockLOONG64GTZ,
+ ssa.BlockLOONG64FPT, ssa.BlockLOONG64FPF:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ if !b.Controls[0].Type.IsFlags() {
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = b.Controls[0].Reg()
+ }
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
+
+func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p := s.Prog(loadByType(t, reg))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_AUTO
+ p.From.Sym = n.Linksym()
+ p.From.Offset = n.FrameOffset() + off
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = reg
+ return p
+}
+
+func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p = pp.Append(p, storeByType(t, reg), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
+ p.To.Name = obj.NAME_PARAM
+ p.To.Sym = n.Linksym()
+ p.Pos = p.Pos.WithNotStmt()
+ return p
+}
diff --git a/src/cmd/compile/internal/loopvar/loopvar.go b/src/cmd/compile/internal/loopvar/loopvar.go
new file mode 100644
index 0000000..030fc04
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/loopvar.go
@@ -0,0 +1,612 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package loopvar applies the proper variable capture, according
+// to experiment, flags, language version, etc.
+package loopvar
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+)
+
+type VarAndLoop struct {
+ Name *ir.Name
+ Loop ir.Node // the *ir.RangeStmt or *ir.ForStmt. Used for identity and position
+ LastPos src.XPos // the last position observed within Loop
+}
+
+// ForCapture transforms for and range loops that declare variables that might be
+// captured by a closure or escaped to the heap, using a syntactic check that
+// conservatively overestimates the loops where capture occurs, but still avoids
+// transforming the (large) majority of loops. It returns the list of names
+// subject to this change, that may (once transformed) be heap allocated in the
+// process. (This allows checking after escape analysis to call out any such
+// variables, in case it causes allocation/performance problems).
+//
+// The decision to transform loops is normally encoded in the For/Range loop node
+// field DistinctVars but is also dependent on base.LoopVarHash, and some values
+// of base.Debug.LoopVar (which is set per-package). Decisions encoded in DistinctVars
+// are preserved across inlining, so if package a calls b.F and loops in b.F are
+// transformed, then they are always transformed, whether b.F is inlined or not.
+//
+// Per-package, the debug flag settings that affect this transformer:
+//
+// base.LoopVarHash != nil => use hash setting to govern transformation.
+// note that LoopVarHash != nil sets base.Debug.LoopVar to 1 (unless it is >= 11, for testing/debugging).
+//
+// base.Debug.LoopVar == 11 => transform ALL loops ignoring syntactic/potential escape. Do not log, can be in addition to GOEXPERIMENT.
+//
+// The effect of GOEXPERIMENT=loopvar is to change the default value (0) of base.Debug.LoopVar to 1 for all packages.
+func ForCapture(fn *ir.Func) []VarAndLoop {
+ // if a loop variable is transformed it is appended to this slice for later logging
+ var transformed []VarAndLoop
+
+ describe := func(n *ir.Name) string {
+ pos := n.Pos()
+ inner := base.Ctxt.InnermostPos(pos)
+ outer := base.Ctxt.OutermostPos(pos)
+ if inner == outer {
+ return fmt.Sprintf("loop variable %v now per-iteration", n)
+ }
+ return fmt.Sprintf("loop variable %v now per-iteration (loop inlined into %s:%d)", n, outer.Filename(), outer.Line())
+ }
+
+ forCapture := func() {
+ seq := 1
+
+ dclFixups := make(map[*ir.Name]ir.Stmt)
+
+ // possibly leaked includes names of declared loop variables that may be leaked;
+ // the mapped value is true if the name is *syntactically* leaked, and those loops
+ // will be transformed.
+ possiblyLeaked := make(map[*ir.Name]bool)
+
+ // these enable an optimization of "escape" under return statements
+ loopDepth := 0
+ returnInLoopDepth := 0
+
+ // noteMayLeak is called for candidate variables in for range/3-clause, and
+ // adds them (mapped to false) to possiblyLeaked.
+ noteMayLeak := func(x ir.Node) {
+ if n, ok := x.(*ir.Name); ok {
+ if n.Type().Kind() == types.TBLANK {
+ return
+ }
+ // default is false (leak candidate, not yet known to leak), but flag can make all variables "leak"
+ possiblyLeaked[n] = base.Debug.LoopVar >= 11
+ }
+ }
+
+ // For reporting, keep track of the last position within any loop.
+ // Loops nest, also need to be sensitive to inlining.
+ var lastPos src.XPos
+
+ updateLastPos := func(p src.XPos) {
+ pl, ll := p.Line(), lastPos.Line()
+ if p.SameFile(lastPos) &&
+ (pl > ll || pl == ll && p.Col() > lastPos.Col()) {
+ lastPos = p
+ }
+ }
+
+ // maybeReplaceVar unshares an iteration variable for a range loop,
+ // if that variable was actually (syntactically) leaked,
+ // subject to hash-variable debugging.
+ maybeReplaceVar := func(k ir.Node, x *ir.RangeStmt) ir.Node {
+ if n, ok := k.(*ir.Name); ok && possiblyLeaked[n] {
+ desc := func() string {
+ return describe(n)
+ }
+ if base.LoopVarHash.MatchPos(n.Pos(), desc) {
+ // Rename the loop key, prefix body with assignment from loop key
+ transformed = append(transformed, VarAndLoop{n, x, lastPos})
+ tk := typecheck.TempAt(base.Pos, fn, n.Type())
+ tk.SetTypecheck(1)
+ as := ir.NewAssignStmt(x.Pos(), n, tk)
+ as.Def = true
+ as.SetTypecheck(1)
+ x.Body.Prepend(as)
+ dclFixups[n] = as
+ return tk
+ }
+ }
+ return k
+ }
+
+ // scanChildrenThenTransform processes node x to:
+ // 1. if x is a for/range w/ DistinctVars, note declared iteration variables possiblyLeaked (PL)
+ // 2. search all of x's children for syntactically escaping references to v in PL,
+ // meaning either address-of-v or v-captured-by-a-closure
+ // 3. for all v in PL that had a syntactically escaping reference, transform the declaration
+ // and (in case of 3-clause loop) the loop to the unshared loop semantics.
+ // This is all much simpler for range loops; 3-clause loops can have an arbitrary number
+ // of iteration variables and the transformation is more involved, range loops have at most 2.
+ var scanChildrenThenTransform func(x ir.Node) bool
+ scanChildrenThenTransform = func(n ir.Node) bool {
+
+ if loopDepth > 0 {
+ updateLastPos(n.Pos())
+ }
+
+ switch x := n.(type) {
+ case *ir.ClosureExpr:
+ if returnInLoopDepth >= loopDepth {
+ // This expression is a child of a return, which escapes all loops above
+ // the return, but not those between this expression and the return.
+ break
+ }
+ for _, cv := range x.Func.ClosureVars {
+ v := cv.Canonical()
+ if _, ok := possiblyLeaked[v]; ok {
+ possiblyLeaked[v] = true
+ }
+ }
+
+ case *ir.AddrExpr:
+ if returnInLoopDepth >= loopDepth {
+ // This expression is a child of a return, which escapes all loops above
+ // the return, but not those between this expression and the return.
+ break
+ }
+ // Explicitly note address-taken so that return-statements can be excluded
+ y := ir.OuterValue(x.X)
+ if y.Op() != ir.ONAME {
+ break
+ }
+ z, ok := y.(*ir.Name)
+ if !ok {
+ break
+ }
+ switch z.Class {
+ case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT, ir.PAUTOHEAP:
+ if _, ok := possiblyLeaked[z]; ok {
+ possiblyLeaked[z] = true
+ }
+ }
+
+ case *ir.ReturnStmt:
+ savedRILD := returnInLoopDepth
+ returnInLoopDepth = loopDepth
+ defer func() { returnInLoopDepth = savedRILD }()
+
+ case *ir.RangeStmt:
+ if !(x.Def && x.DistinctVars) {
+ // range loop must define its iteration variables AND have distinctVars.
+ x.DistinctVars = false
+ break
+ }
+ noteMayLeak(x.Key)
+ noteMayLeak(x.Value)
+ loopDepth++
+ savedLastPos := lastPos
+ lastPos = x.Pos() // this sets the file.
+ ir.DoChildren(n, scanChildrenThenTransform)
+ loopDepth--
+ x.Key = maybeReplaceVar(x.Key, x)
+ x.Value = maybeReplaceVar(x.Value, x)
+ thisLastPos := lastPos
+ lastPos = savedLastPos
+ updateLastPos(thisLastPos) // this will propagate lastPos if in the same file.
+ x.DistinctVars = false
+ return false
+
+ case *ir.ForStmt:
+ if !x.DistinctVars {
+ break
+ }
+ forAllDefInInit(x, noteMayLeak)
+ loopDepth++
+ savedLastPos := lastPos
+ lastPos = x.Pos() // this sets the file.
+ ir.DoChildren(n, scanChildrenThenTransform)
+ loopDepth--
+ var leaked []*ir.Name
+ // Collect the leaking variables for the much-more-complex transformation.
+ forAllDefInInit(x, func(z ir.Node) {
+ if n, ok := z.(*ir.Name); ok && possiblyLeaked[n] {
+ desc := func() string {
+ return describe(n)
+ }
+ // Hash on n.Pos() for most precise failure location.
+ if base.LoopVarHash.MatchPos(n.Pos(), desc) {
+ leaked = append(leaked, n)
+ }
+ }
+ })
+
+ if len(leaked) > 0 {
+ // need to transform the for loop just so.
+
+ /* Contrived example, w/ numbered comments from the transformation:
+ BEFORE:
+ var escape []*int
+ for z := 0; z < n; z++ {
+ if reason() {
+ escape = append(escape, &z)
+ continue
+ }
+ z = z + z
+ stuff
+ }
+ AFTER:
+ for z', tmp_first := 0, true; ; { // (4)
+ // (5) body' follows:
+ z := z' // (1)
+ if tmp_first {tmp_first = false} else {z++} // (6)
+ if ! (z < n) { break } // (7)
+ // (3, 8) body_continue
+ if reason() {
+ escape = append(escape, &z)
+ goto next // rewritten continue
+ }
+ z = z + z
+ stuff
+ next: // (9)
+ z' = z // (2)
+ }
+
+ In the case that the loop contains no increment (z++),
+ there is no need for step 6,
+ and thus no need to test, update, or declare tmp_first (part of step 4).
+ Similarly if the loop contains no exit test (z < n),
+ then there is no need for step 7.
+ */
+
+ // Expressed in terms of the input ForStmt
+ //
+ // type ForStmt struct {
+ // init Nodes
+ // Label *types.Sym
+ // Cond Node // empty if OFORUNTIL
+ // Post Node
+ // Body Nodes
+ // HasBreak bool
+ // }
+
+ // OFOR: init; loop: if !Cond {break}; Body; Post; goto loop
+
+ // (1) prebody = {z := z' for z in leaked}
+ // (2) postbody = {z' = z for z in leaked}
+ // (3) body_continue = {body : s/continue/goto next}
+ // (4) init' = (init : s/z/z' for z in leaked) + tmp_first := true
+ // (5) body' = prebody + // appears out of order below
+ // (6) if tmp_first {tmp_first = false} else {Post} +
+ // (7) if !cond {break} +
+ // (8) body_continue (3) +
+ // (9) next: postbody (2)
+ // (10) cond' = {}
+ // (11) post' = {}
+
+ // minor optimizations:
+ // if Post is empty, tmp_first and step 6 can be skipped.
+ // if Cond is empty, that code can also be skipped.
+
+ var preBody, postBody ir.Nodes
+
+ // Given original iteration variable z, what is the corresponding z'
+ // that carries the value from iteration to iteration?
+ zPrimeForZ := make(map[*ir.Name]*ir.Name)
+
+ // (1,2) initialize preBody and postBody
+ for _, z := range leaked {
+ transformed = append(transformed, VarAndLoop{z, x, lastPos})
+
+ tz := typecheck.TempAt(base.Pos, fn, z.Type())
+ tz.SetTypecheck(1)
+ zPrimeForZ[z] = tz
+
+ as := ir.NewAssignStmt(x.Pos(), z, tz)
+ as.Def = true
+ as.SetTypecheck(1)
+ preBody.Append(as)
+ dclFixups[z] = as
+
+ as = ir.NewAssignStmt(x.Pos(), tz, z)
+ as.SetTypecheck(1)
+ postBody.Append(as)
+
+ }
+
+ // (3) rewrite continues in body -- rewrite is inplace, so works for top level visit, too.
+ label := typecheck.Lookup(fmt.Sprintf(".3clNext_%d", seq))
+ seq++
+ labelStmt := ir.NewLabelStmt(x.Pos(), label)
+ labelStmt.SetTypecheck(1)
+
+ loopLabel := x.Label
+ loopDepth := 0
+ var editContinues func(x ir.Node) bool
+ editContinues = func(x ir.Node) bool {
+
+ switch c := x.(type) {
+ case *ir.BranchStmt:
+ // If this is a continue targeting the loop currently being rewritten, transform it to an appropriate GOTO
+ if c.Op() == ir.OCONTINUE && (loopDepth == 0 && c.Label == nil || loopLabel != nil && c.Label == loopLabel) {
+ c.Label = label
+ c.SetOp(ir.OGOTO)
+ }
+ case *ir.RangeStmt, *ir.ForStmt:
+ loopDepth++
+ ir.DoChildren(x, editContinues)
+ loopDepth--
+ return false
+ }
+ ir.DoChildren(x, editContinues)
+ return false
+ }
+ for _, y := range x.Body {
+ editContinues(y)
+ }
+ bodyContinue := x.Body
+
+ // (4) rewrite init
+ forAllDefInInitUpdate(x, func(z ir.Node, pz *ir.Node) {
+ // note tempFor[n] can be nil if hash searching.
+ if n, ok := z.(*ir.Name); ok && possiblyLeaked[n] && zPrimeForZ[n] != nil {
+ *pz = zPrimeForZ[n]
+ }
+ })
+
+ postNotNil := x.Post != nil
+ var tmpFirstDcl ir.Node
+ if postNotNil {
+ // body' = prebody +
+ // (6) if tmp_first {tmp_first = false} else {Post} +
+ // if !cond {break} + ...
+ tmpFirst := typecheck.TempAt(base.Pos, fn, types.Types[types.TBOOL])
+ tmpFirstDcl = typecheck.Stmt(ir.NewAssignStmt(x.Pos(), tmpFirst, ir.NewBool(base.Pos, true)))
+ tmpFirstSetFalse := typecheck.Stmt(ir.NewAssignStmt(x.Pos(), tmpFirst, ir.NewBool(base.Pos, false)))
+ ifTmpFirst := ir.NewIfStmt(x.Pos(), tmpFirst, ir.Nodes{tmpFirstSetFalse}, ir.Nodes{x.Post})
+ ifTmpFirst.PtrInit().Append(typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, tmpFirst))) // declares tmpFirst
+ preBody.Append(typecheck.Stmt(ifTmpFirst))
+ }
+
+ // body' = prebody +
+ // if tmp_first {tmp_first = false} else {Post} +
+ // (7) if !cond {break} + ...
+ if x.Cond != nil {
+ notCond := ir.NewUnaryExpr(x.Cond.Pos(), ir.ONOT, x.Cond)
+ notCond.SetType(x.Cond.Type())
+ notCond.SetTypecheck(1)
+ newBreak := ir.NewBranchStmt(x.Pos(), ir.OBREAK, nil)
+ newBreak.SetTypecheck(1)
+ ifNotCond := ir.NewIfStmt(x.Pos(), notCond, ir.Nodes{newBreak}, nil)
+ ifNotCond.SetTypecheck(1)
+ preBody.Append(ifNotCond)
+ }
+
+ if postNotNil {
+ x.PtrInit().Append(tmpFirstDcl)
+ }
+
+ // (8)
+ preBody.Append(bodyContinue...)
+ // (9)
+ preBody.Append(labelStmt)
+ preBody.Append(postBody...)
+
+ // (5) body' = prebody + ...
+ x.Body = preBody
+
+ // (10) cond' = {}
+ x.Cond = nil
+
+ // (11) post' = {}
+ x.Post = nil
+ }
+ thisLastPos := lastPos
+ lastPos = savedLastPos
+ updateLastPos(thisLastPos) // this will propagate lastPos if in the same file.
+ x.DistinctVars = false
+
+ return false
+ }
+
+ ir.DoChildren(n, scanChildrenThenTransform)
+
+ return false
+ }
+ scanChildrenThenTransform(fn)
+ if len(transformed) > 0 {
+ // editNodes scans a slice C of ir.Node, looking for declarations that
+ // appear in dclFixups. Any declaration D whose "fixup" is an assignmnt
+ // statement A is removed from the C and relocated to the Init
+ // of A. editNodes returns the modified slice of ir.Node.
+ editNodes := func(c ir.Nodes) ir.Nodes {
+ j := 0
+ for _, n := range c {
+ if d, ok := n.(*ir.Decl); ok {
+ if s := dclFixups[d.X]; s != nil {
+ switch a := s.(type) {
+ case *ir.AssignStmt:
+ a.PtrInit().Prepend(d)
+ delete(dclFixups, d.X) // can't be sure of visit order, wouldn't want to visit twice.
+ default:
+ base.Fatalf("not implemented yet for node type %v", s.Op())
+ }
+ continue // do not copy this node, and do not increment j
+ }
+ }
+ c[j] = n
+ j++
+ }
+ for k := j; k < len(c); k++ {
+ c[k] = nil
+ }
+ return c[:j]
+ }
+ // fixup all tagged declarations in all the statements lists in fn.
+ rewriteNodes(fn, editNodes)
+ }
+ }
+ ir.WithFunc(fn, forCapture)
+ return transformed
+}
+
+// forAllDefInInitUpdate applies "do" to all the defining assignments in the Init clause of a ForStmt.
+// This abstracts away some of the boilerplate from the already complex and verbose for-3-clause case.
+func forAllDefInInitUpdate(x *ir.ForStmt, do func(z ir.Node, update *ir.Node)) {
+ for _, s := range x.Init() {
+ switch y := s.(type) {
+ case *ir.AssignListStmt:
+ if !y.Def {
+ continue
+ }
+ for i, z := range y.Lhs {
+ do(z, &y.Lhs[i])
+ }
+ case *ir.AssignStmt:
+ if !y.Def {
+ continue
+ }
+ do(y.X, &y.X)
+ }
+ }
+}
+
+// forAllDefInInit is forAllDefInInitUpdate without the update option.
+func forAllDefInInit(x *ir.ForStmt, do func(z ir.Node)) {
+ forAllDefInInitUpdate(x, func(z ir.Node, _ *ir.Node) { do(z) })
+}
+
+// rewriteNodes applies editNodes to all statement lists in fn.
+func rewriteNodes(fn *ir.Func, editNodes func(c ir.Nodes) ir.Nodes) {
+ var forNodes func(x ir.Node) bool
+ forNodes = func(n ir.Node) bool {
+ if stmt, ok := n.(ir.InitNode); ok {
+ // process init list
+ stmt.SetInit(editNodes(stmt.Init()))
+ }
+ switch x := n.(type) {
+ case *ir.Func:
+ x.Body = editNodes(x.Body)
+ case *ir.InlinedCallExpr:
+ x.Body = editNodes(x.Body)
+
+ case *ir.CaseClause:
+ x.Body = editNodes(x.Body)
+ case *ir.CommClause:
+ x.Body = editNodes(x.Body)
+
+ case *ir.BlockStmt:
+ x.List = editNodes(x.List)
+
+ case *ir.ForStmt:
+ x.Body = editNodes(x.Body)
+ case *ir.RangeStmt:
+ x.Body = editNodes(x.Body)
+ case *ir.IfStmt:
+ x.Body = editNodes(x.Body)
+ x.Else = editNodes(x.Else)
+ case *ir.SelectStmt:
+ x.Compiled = editNodes(x.Compiled)
+ case *ir.SwitchStmt:
+ x.Compiled = editNodes(x.Compiled)
+ }
+ ir.DoChildren(n, forNodes)
+ return false
+ }
+ forNodes(fn)
+}
+
+func LogTransformations(transformed []VarAndLoop) {
+ print := 2 <= base.Debug.LoopVar && base.Debug.LoopVar != 11
+
+ if print || logopt.Enabled() { // 11 is do them all, quietly, 12 includes debugging.
+ fileToPosBase := make(map[string]*src.PosBase) // used to remove inline context for innermost reporting.
+
+ // trueInlinedPos rebases inner w/o inline context so that it prints correctly in WarnfAt; otherwise it prints as outer.
+ trueInlinedPos := func(inner src.Pos) src.XPos {
+ afn := inner.AbsFilename()
+ pb, ok := fileToPosBase[afn]
+ if !ok {
+ pb = src.NewFileBase(inner.Filename(), afn)
+ fileToPosBase[afn] = pb
+ }
+ inner.SetBase(pb)
+ return base.Ctxt.PosTable.XPos(inner)
+ }
+
+ type unit struct{}
+ loopsSeen := make(map[ir.Node]unit)
+ type loopPos struct {
+ loop ir.Node
+ last src.XPos
+ curfn *ir.Func
+ }
+ var loops []loopPos
+ for _, lv := range transformed {
+ n := lv.Name
+ if _, ok := loopsSeen[lv.Loop]; !ok {
+ l := lv.Loop
+ loopsSeen[l] = unit{}
+ loops = append(loops, loopPos{l, lv.LastPos, n.Curfn})
+ }
+ pos := n.Pos()
+
+ inner := base.Ctxt.InnermostPos(pos)
+ outer := base.Ctxt.OutermostPos(pos)
+
+ if logopt.Enabled() {
+ // For automated checking of coverage of this transformation, include this in the JSON information.
+ var nString interface{} = n
+ if inner != outer {
+ nString = fmt.Sprintf("%v (from inline)", n)
+ }
+ if n.Esc() == ir.EscHeap {
+ logopt.LogOpt(pos, "iteration-variable-to-heap", "loopvar", ir.FuncName(n.Curfn), nString)
+ } else {
+ logopt.LogOpt(pos, "iteration-variable-to-stack", "loopvar", ir.FuncName(n.Curfn), nString)
+ }
+ }
+ if print {
+ if inner == outer {
+ if n.Esc() == ir.EscHeap {
+ base.WarnfAt(pos, "loop variable %v now per-iteration, heap-allocated", n)
+ } else {
+ base.WarnfAt(pos, "loop variable %v now per-iteration, stack-allocated", n)
+ }
+ } else {
+ innerXPos := trueInlinedPos(inner)
+ if n.Esc() == ir.EscHeap {
+ base.WarnfAt(innerXPos, "loop variable %v now per-iteration, heap-allocated (loop inlined into %s:%d)", n, outer.Filename(), outer.Line())
+ } else {
+ base.WarnfAt(innerXPos, "loop variable %v now per-iteration, stack-allocated (loop inlined into %s:%d)", n, outer.Filename(), outer.Line())
+ }
+ }
+ }
+ }
+ for _, l := range loops {
+ pos := l.loop.Pos()
+ last := l.last
+ loopKind := "range"
+ if _, ok := l.loop.(*ir.ForStmt); ok {
+ loopKind = "for"
+ }
+ if logopt.Enabled() {
+ // Intended to help with performance debugging, we record whole loop ranges
+ logopt.LogOptRange(pos, last, "loop-modified-"+loopKind, "loopvar", ir.FuncName(l.curfn))
+ }
+ if print && 4 <= base.Debug.LoopVar {
+ // TODO decide if we want to keep this, or not. It was helpful for validating logopt, otherwise, eh.
+ inner := base.Ctxt.InnermostPos(pos)
+ outer := base.Ctxt.OutermostPos(pos)
+
+ if inner == outer {
+ base.WarnfAt(pos, "%s loop ending at %d:%d was modified", loopKind, last.Line(), last.Col())
+ } else {
+ pos = trueInlinedPos(inner)
+ last = trueInlinedPos(base.Ctxt.InnermostPos(last))
+ base.WarnfAt(pos, "%s loop ending at %d:%d was modified (loop inlined into %s:%d)", loopKind, last.Line(), last.Col(), outer.Filename(), outer.Line())
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/loopvar/loopvar_test.go b/src/cmd/compile/internal/loopvar/loopvar_test.go
new file mode 100644
index 0000000..64cfdb7
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/loopvar_test.go
@@ -0,0 +1,383 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package loopvar_test
+
+import (
+ "internal/testenv"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+type testcase struct {
+ lvFlag string // ==-2, -1, 0, 1, 2
+ buildExpect string // message, if any
+ expectRC int
+ files []string
+}
+
+var for_files = []string{
+ "for_esc_address.go", // address of variable
+ "for_esc_closure.go", // closure of variable
+ "for_esc_minimal_closure.go", // simple closure of variable
+ "for_esc_method.go", // method value of variable
+ "for_complicated_esc_address.go", // modifies loop index in body
+}
+
+var range_files = []string{
+ "range_esc_address.go", // address of variable
+ "range_esc_closure.go", // closure of variable
+ "range_esc_minimal_closure.go", // simple closure of variable
+ "range_esc_method.go", // method value of variable
+}
+
+var cases = []testcase{
+ {"-1", "", 11, for_files[:1]},
+ {"0", "", 0, for_files[:1]},
+ {"1", "", 0, for_files[:1]},
+ {"2", "loop variable i now per-iteration,", 0, for_files},
+
+ {"-1", "", 11, range_files[:1]},
+ {"0", "", 0, range_files[:1]},
+ {"1", "", 0, range_files[:1]},
+ {"2", "loop variable i now per-iteration,", 0, range_files},
+
+ {"1", "", 0, []string{"for_nested.go"}},
+}
+
+// TestLoopVar checks that the GOEXPERIMENT and debug flags behave as expected.
+func TestLoopVarGo1_21(t *testing.T) {
+ switch runtime.GOOS {
+ case "linux", "darwin":
+ default:
+ t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS)
+ }
+ switch runtime.GOARCH {
+ case "amd64", "arm64":
+ default:
+ t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH)
+ }
+
+ testenv.MustHaveGoBuild(t)
+ gocmd := testenv.GoToolPath(t)
+ tmpdir := t.TempDir()
+ output := filepath.Join(tmpdir, "foo.exe")
+
+ for i, tc := range cases {
+ for _, f := range tc.files {
+ source := f
+ cmd := testenv.Command(t, gocmd, "build", "-o", output, "-gcflags=-lang=go1.21 -d=loopvar="+tc.lvFlag, source)
+ cmd.Env = append(cmd.Env, "GOEXPERIMENT=loopvar", "HOME="+tmpdir)
+ cmd.Dir = "testdata"
+ t.Logf("File %s loopvar=%s expect '%s' exit code %d", f, tc.lvFlag, tc.buildExpect, tc.expectRC)
+ b, e := cmd.CombinedOutput()
+ if e != nil {
+ t.Error(e)
+ }
+ if tc.buildExpect != "" {
+ s := string(b)
+ if !strings.Contains(s, tc.buildExpect) {
+ t.Errorf("File %s test %d expected to match '%s' with \n-----\n%s\n-----", f, i, tc.buildExpect, s)
+ }
+ }
+ // run what we just built.
+ cmd = testenv.Command(t, output)
+ b, e = cmd.CombinedOutput()
+ if tc.expectRC != 0 {
+ if e == nil {
+ t.Errorf("Missing expected error, file %s, case %d", f, i)
+ } else if ee, ok := (e).(*exec.ExitError); !ok || ee.ExitCode() != tc.expectRC {
+ t.Error(e)
+ } else {
+ // okay
+ }
+ } else if e != nil {
+ t.Error(e)
+ }
+ }
+ }
+}
+
+func TestLoopVarInlinesGo1_21(t *testing.T) {
+ switch runtime.GOOS {
+ case "linux", "darwin":
+ default:
+ t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS)
+ }
+ switch runtime.GOARCH {
+ case "amd64", "arm64":
+ default:
+ t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH)
+ }
+
+ testenv.MustHaveGoBuild(t)
+ gocmd := testenv.GoToolPath(t)
+ tmpdir := t.TempDir()
+
+ root := "cmd/compile/internal/loopvar/testdata/inlines"
+
+ f := func(pkg string) string {
+ // This disables the loopvar change, except for the specified package.
+ // The effect should follow the package, even though everything (except "c")
+ // is inlined.
+ cmd := testenv.Command(t, gocmd, "run", "-gcflags="+root+"/...=-lang=go1.21", "-gcflags="+pkg+"=-d=loopvar=1", root)
+ cmd.Env = append(cmd.Env, "GOEXPERIMENT=noloopvar", "HOME="+tmpdir)
+ cmd.Dir = filepath.Join("testdata", "inlines")
+
+ b, e := cmd.CombinedOutput()
+ if e != nil {
+ t.Error(e)
+ }
+ return string(b)
+ }
+
+ a := f(root + "/a")
+ b := f(root + "/b")
+ c := f(root + "/c")
+ m := f(root)
+
+ t.Logf(a)
+ t.Logf(b)
+ t.Logf(c)
+ t.Logf(m)
+
+ if !strings.Contains(a, "f, af, bf, abf, cf sums = 100, 45, 100, 100, 100") {
+ t.Errorf("Did not see expected value of a")
+ }
+ if !strings.Contains(b, "f, af, bf, abf, cf sums = 100, 100, 45, 45, 100") {
+ t.Errorf("Did not see expected value of b")
+ }
+ if !strings.Contains(c, "f, af, bf, abf, cf sums = 100, 100, 100, 100, 45") {
+ t.Errorf("Did not see expected value of c")
+ }
+ if !strings.Contains(m, "f, af, bf, abf, cf sums = 45, 100, 100, 100, 100") {
+ t.Errorf("Did not see expected value of m")
+ }
+}
+
+func countMatches(s, re string) int {
+ slice := regexp.MustCompile(re).FindAllString(s, -1)
+ return len(slice)
+}
+
+func TestLoopVarHashes(t *testing.T) {
+ // This behavior does not depend on Go version (1.21 or greater)
+ switch runtime.GOOS {
+ case "linux", "darwin":
+ default:
+ t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS)
+ }
+ switch runtime.GOARCH {
+ case "amd64", "arm64":
+ default:
+ t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH)
+ }
+
+ testenv.MustHaveGoBuild(t)
+ gocmd := testenv.GoToolPath(t)
+ tmpdir := t.TempDir()
+
+ root := "cmd/compile/internal/loopvar/testdata/inlines"
+
+ f := func(hash string) string {
+ // This disables the loopvar change, except for the specified hash pattern.
+ // -trimpath is necessary so we get the same answer no matter where the
+ // Go repository is checked out. This is not normally a concern since people
+ // do not normally rely on the meaning of specific hashes.
+ cmd := testenv.Command(t, gocmd, "run", "-trimpath", root)
+ cmd.Env = append(cmd.Env, "GOCOMPILEDEBUG=loopvarhash="+hash, "HOME="+tmpdir)
+ cmd.Dir = filepath.Join("testdata", "inlines")
+
+ b, _ := cmd.CombinedOutput()
+ // Ignore the error, sometimes it's supposed to fail, the output test will catch it.
+ return string(b)
+ }
+
+ for _, arg := range []string{"v001100110110110010100100", "vx336ca4"} {
+ m := f(arg)
+ t.Logf(m)
+
+ mCount := countMatches(m, "loopvarhash triggered cmd/compile/internal/loopvar/testdata/inlines/main.go:27:6: .* 001100110110110010100100")
+ otherCount := strings.Count(m, "loopvarhash")
+ if mCount < 1 {
+ t.Errorf("%s: did not see triggered main.go:27:6", arg)
+ }
+ if mCount != otherCount {
+ t.Errorf("%s: too many matches", arg)
+ }
+ mCount = countMatches(m, "cmd/compile/internal/loopvar/testdata/inlines/main.go:27:6: .* \\[bisect-match 0x7802e115b9336ca4\\]")
+ otherCount = strings.Count(m, "[bisect-match ")
+ if mCount < 1 {
+ t.Errorf("%s: did not see bisect-match for main.go:27:6", arg)
+ }
+ if mCount != otherCount {
+ t.Errorf("%s: too many matches", arg)
+ }
+
+ // This next test carefully dodges a bug-to-be-fixed with inlined locations for ir.Names.
+ if !strings.Contains(m, ", 100, 100, 100, 100") {
+ t.Errorf("%s: did not see expected value of m run", arg)
+ }
+ }
+}
+
+// TestLoopVarVersionEnableFlag checks for loopvar transformation enabled by command line flag (1.22).
+func TestLoopVarVersionEnableFlag(t *testing.T) {
+ switch runtime.GOOS {
+ case "linux", "darwin":
+ default:
+ t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS)
+ }
+ switch runtime.GOARCH {
+ case "amd64", "arm64":
+ default:
+ t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH)
+ }
+
+ testenv.MustHaveGoBuild(t)
+ gocmd := testenv.GoToolPath(t)
+
+ // loopvar=3 logs info but does not change loopvarness
+ cmd := testenv.Command(t, gocmd, "run", "-gcflags=-lang=go1.22 -d=loopvar=3", "opt.go")
+ cmd.Dir = filepath.Join("testdata")
+
+ b, err := cmd.CombinedOutput()
+ m := string(b)
+
+ t.Logf(m)
+
+ yCount := strings.Count(m, "opt.go:16:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt.go:29)")
+ nCount := strings.Count(m, "shared")
+
+ if yCount != 1 {
+ t.Errorf("yCount=%d != 1", yCount)
+ }
+ if nCount > 0 {
+ t.Errorf("nCount=%d > 0", nCount)
+ }
+ if err != nil {
+ t.Errorf("err=%v != nil", err)
+ }
+}
+
+// TestLoopVarVersionEnableGoBuild checks for loopvar transformation enabled by go:build version (1.22).
+func TestLoopVarVersionEnableGoBuild(t *testing.T) {
+ switch runtime.GOOS {
+ case "linux", "darwin":
+ default:
+ t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS)
+ }
+ switch runtime.GOARCH {
+ case "amd64", "arm64":
+ default:
+ t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH)
+ }
+
+ testenv.MustHaveGoBuild(t)
+ gocmd := testenv.GoToolPath(t)
+
+ // loopvar=3 logs info but does not change loopvarness
+ cmd := testenv.Command(t, gocmd, "run", "-gcflags=-lang=go1.21 -d=loopvar=3", "opt-122.go")
+ cmd.Dir = filepath.Join("testdata")
+
+ b, err := cmd.CombinedOutput()
+ m := string(b)
+
+ t.Logf(m)
+
+ yCount := strings.Count(m, "opt-122.go:18:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt-122.go:31)")
+ nCount := strings.Count(m, "shared")
+
+ if yCount != 1 {
+ t.Errorf("yCount=%d != 1", yCount)
+ }
+ if nCount > 0 {
+ t.Errorf("nCount=%d > 0", nCount)
+ }
+ if err != nil {
+ t.Errorf("err=%v != nil", err)
+ }
+}
+
+// TestLoopVarVersionDisableFlag checks for loopvar transformation DISABLED by command line version (1.21).
+func TestLoopVarVersionDisableFlag(t *testing.T) {
+ switch runtime.GOOS {
+ case "linux", "darwin":
+ default:
+ t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS)
+ }
+ switch runtime.GOARCH {
+ case "amd64", "arm64":
+ default:
+ t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH)
+ }
+
+ testenv.MustHaveGoBuild(t)
+ gocmd := testenv.GoToolPath(t)
+
+ // loopvar=3 logs info but does not change loopvarness
+ cmd := testenv.Command(t, gocmd, "run", "-gcflags=-lang=go1.21 -d=loopvar=3", "opt.go")
+ cmd.Dir = filepath.Join("testdata")
+
+ b, err := cmd.CombinedOutput()
+ m := string(b)
+
+ t.Logf(m) // expect error
+
+ yCount := strings.Count(m, "opt.go:16:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt.go:29)")
+ nCount := strings.Count(m, "shared")
+
+ if yCount != 0 {
+ t.Errorf("yCount=%d != 0", yCount)
+ }
+ if nCount > 0 {
+ t.Errorf("nCount=%d > 0", nCount)
+ }
+ if err == nil { // expect error
+ t.Errorf("err=%v == nil", err)
+ }
+}
+
+// TestLoopVarVersionDisableGoBuild checks for loopvar transformation DISABLED by go:build version (1.21).
+func TestLoopVarVersionDisableGoBuild(t *testing.T) {
+ switch runtime.GOOS {
+ case "linux", "darwin":
+ default:
+ t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS)
+ }
+ switch runtime.GOARCH {
+ case "amd64", "arm64":
+ default:
+ t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH)
+ }
+
+ testenv.MustHaveGoBuild(t)
+ gocmd := testenv.GoToolPath(t)
+
+ // loopvar=3 logs info but does not change loopvarness
+ cmd := testenv.Command(t, gocmd, "run", "-gcflags=-lang=go1.22 -d=loopvar=3", "opt-121.go")
+ cmd.Dir = filepath.Join("testdata")
+
+ b, err := cmd.CombinedOutput()
+ m := string(b)
+
+ t.Logf(m) // expect error
+
+ yCount := strings.Count(m, "opt-121.go:18:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt-121.go:31)")
+ nCount := strings.Count(m, "shared")
+
+ if yCount != 0 {
+ t.Errorf("yCount=%d != 0", yCount)
+ }
+ if nCount > 0 {
+ t.Errorf("nCount=%d > 0", nCount)
+ }
+ if err == nil { // expect error
+ t.Errorf("err=%v == nil", err)
+ }
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/for_complicated_esc_address.go b/src/cmd/compile/internal/loopvar/testdata/for_complicated_esc_address.go
new file mode 100644
index 0000000..c658340
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/for_complicated_esc_address.go
@@ -0,0 +1,115 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+func main() {
+ ss, sa := shared(23)
+ ps, pa := private(23)
+ es, ea := experiment(23)
+
+ fmt.Printf("shared s, a; private, s, a; experiment s, a = %d, %d; %d, %d; %d, %d\n", ss, sa, ps, pa, es, ea)
+
+ if ss != ps || ss != es || ea != pa || sa == pa {
+ os.Exit(11)
+ } else {
+ fmt.Println("PASS")
+ }
+}
+
+func experiment(x int) (int, int) {
+ sum := 0
+ var is []*int
+ for i := x; i != 1; i = i / 2 {
+ for j := 0; j < 10; j++ {
+ if i == j { // 10 skips
+ continue
+ }
+ sum++
+ }
+ i = i*3 + 1
+ if i&1 == 0 {
+ is = append(is, &i)
+ for i&2 == 0 {
+ i = i >> 1
+ }
+ } else {
+ i = i + i
+ }
+ }
+
+ asum := 0
+ for _, pi := range is {
+ asum += *pi
+ }
+
+ return sum, asum
+}
+
+func private(x int) (int, int) {
+ sum := 0
+ var is []*int
+ I := x
+ for ; I != 1; I = I / 2 {
+ i := I
+ for j := 0; j < 10; j++ {
+ if i == j { // 10 skips
+ I = i
+ continue
+ }
+ sum++
+ }
+ i = i*3 + 1
+ if i&1 == 0 {
+ is = append(is, &i)
+ for i&2 == 0 {
+ i = i >> 1
+ }
+ } else {
+ i = i + i
+ }
+ I = i
+ }
+
+ asum := 0
+ for _, pi := range is {
+ asum += *pi
+ }
+
+ return sum, asum
+}
+
+func shared(x int) (int, int) {
+ sum := 0
+ var is []*int
+ i := x
+ for ; i != 1; i = i / 2 {
+ for j := 0; j < 10; j++ {
+ if i == j { // 10 skips
+ continue
+ }
+ sum++
+ }
+ i = i*3 + 1
+ if i&1 == 0 {
+ is = append(is, &i)
+ for i&2 == 0 {
+ i = i >> 1
+ }
+ } else {
+ i = i + i
+ }
+ }
+
+ asum := 0
+ for _, pi := range is {
+ asum += *pi
+ }
+ return sum, asum
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/for_esc_address.go b/src/cmd/compile/internal/loopvar/testdata/for_esc_address.go
new file mode 100644
index 0000000..beaefb1
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/for_esc_address.go
@@ -0,0 +1,45 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+func main() {
+ sum := 0
+ var is []*int
+ for i := 0; i < 10; i++ {
+ for j := 0; j < 10; j++ {
+ if i == j { // 10 skips
+ continue
+ }
+ sum++
+ }
+ if i&1 == 0 {
+ is = append(is, &i)
+ }
+ }
+
+ bug := false
+ if sum != 100-10 {
+ fmt.Printf("wrong sum, expected %d, saw %d\n", 90, sum)
+ bug = true
+ }
+ sum = 0
+ for _, pi := range is {
+ sum += *pi
+ }
+ if sum != 2+4+6+8 {
+ fmt.Printf("wrong sum, expected %d, saw %d\n", 20, sum)
+ bug = true
+ }
+ if !bug {
+ fmt.Printf("PASS\n")
+ } else {
+ os.Exit(11)
+ }
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/for_esc_closure.go b/src/cmd/compile/internal/loopvar/testdata/for_esc_closure.go
new file mode 100644
index 0000000..b60d000
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/for_esc_closure.go
@@ -0,0 +1,51 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+var is []func() int
+
+func main() {
+ sum := 0
+ for i := 0; i < 10; i++ {
+ for j := 0; j < 10; j++ {
+ if i == j { // 10 skips
+ continue
+ }
+ sum++
+ }
+ if i&1 == 0 {
+ is = append(is, func() int {
+ if i%17 == 15 {
+ i++
+ }
+ return i
+ })
+ }
+ }
+
+ bug := false
+ if sum != 100-10 {
+ fmt.Printf("wrong sum, expected %d, saw %d\n", 90, sum)
+ bug = true
+ }
+ sum = 0
+ for _, f := range is {
+ sum += f()
+ }
+ if sum != 2+4+6+8 {
+ fmt.Printf("wrong sum, expected %d, saw %d\n", 20, sum)
+ bug = true
+ }
+ if !bug {
+ fmt.Printf("PASS\n")
+ } else {
+ os.Exit(11)
+ }
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/for_esc_method.go b/src/cmd/compile/internal/loopvar/testdata/for_esc_method.go
new file mode 100644
index 0000000..0e2f801
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/for_esc_method.go
@@ -0,0 +1,51 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+type I int
+
+func (x *I) method() int {
+ return int(*x)
+}
+
+func main() {
+ sum := 0
+ var is []func() int
+ for i := I(0); int(i) < 10; i++ {
+ for j := 0; j < 10; j++ {
+ if int(i) == j { // 10 skips
+ continue
+ }
+ sum++
+ }
+ if i&1 == 0 {
+ is = append(is, i.method)
+ }
+ }
+
+ bug := false
+ if sum != 100-10 {
+ fmt.Printf("wrong sum, expected %d, saw %d\n", 90, sum)
+ bug = true
+ }
+ sum = 0
+ for _, m := range is {
+ sum += m()
+ }
+ if sum != 2+4+6+8 {
+ fmt.Printf("wrong sum, expected %d, saw %d\n", 20, sum)
+ bug = true
+ }
+ if !bug {
+ fmt.Printf("PASS\n")
+ } else {
+ os.Exit(11)
+ }
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/for_esc_minimal_closure.go b/src/cmd/compile/internal/loopvar/testdata/for_esc_minimal_closure.go
new file mode 100644
index 0000000..971c91d
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/for_esc_minimal_closure.go
@@ -0,0 +1,48 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+var is []func() int
+
+func main() {
+ sum := 0
+ for i := 0; i < 10; i++ {
+ for j := 0; j < 10; j++ {
+ if i == j { // 10 skips
+ continue
+ }
+ sum++
+ }
+ if i&1 == 0 {
+ is = append(is, func() int {
+ return i
+ })
+ }
+ }
+
+ bug := false
+ if sum != 100-10 {
+ fmt.Printf("wrong sum, expected %d, saw %d\n", 90, sum)
+ bug = true
+ }
+ sum = 0
+ for _, f := range is {
+ sum += f()
+ }
+ if sum != 2+4+6+8 {
+ fmt.Printf("wrong sum, expected %d, saw %d\n", 20, sum)
+ bug = true
+ }
+ if !bug {
+ fmt.Printf("PASS\n")
+ } else {
+ os.Exit(11)
+ }
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/for_nested.go b/src/cmd/compile/internal/loopvar/testdata/for_nested.go
new file mode 100644
index 0000000..4888fab
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/for_nested.go
@@ -0,0 +1,47 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+func main() {
+ x := f(60)
+ fmt.Println(x)
+ if x != 54 {
+ os.Exit(11)
+ }
+}
+
+var escape *int
+
+func f(i int) int {
+ a := 0
+outer:
+ for {
+ switch {
+ case i > 55:
+ i--
+ continue
+ case i == 55:
+ for j := i; j != 1; j = j / 2 {
+ a++
+ if j == 4 {
+ escape = &j
+ i--
+ continue outer
+ }
+ if j&1 == 1 {
+ j = 2 * (3*j + 1)
+ }
+ }
+ return a
+ case i < 55:
+ return i
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/inlines/a/a.go b/src/cmd/compile/internal/loopvar/testdata/inlines/a/a.go
new file mode 100644
index 0000000..0bae36d
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/inlines/a/a.go
@@ -0,0 +1,20 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import "cmd/compile/internal/loopvar/testdata/inlines/b"
+
+func F() []*int {
+ var s []*int
+ for i := 0; i < 10; i++ {
+ s = append(s, &i)
+ }
+ return s
+}
+
+func Fb() []*int {
+ bf, _ := b.F()
+ return bf
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/inlines/b/b.go b/src/cmd/compile/internal/loopvar/testdata/inlines/b/b.go
new file mode 100644
index 0000000..7b1d8ce
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/inlines/b/b.go
@@ -0,0 +1,21 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package b
+
+var slice = []int{1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024}
+
+func F() ([]*int, []*int) {
+ return g()
+}
+
+func g() ([]*int, []*int) {
+ var s []*int
+ var t []*int
+ for i, j := range slice {
+ s = append(s, &i)
+ t = append(t, &j)
+ }
+ return s[:len(s)-1], t
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/inlines/c/c.go b/src/cmd/compile/internal/loopvar/testdata/inlines/c/c.go
new file mode 100644
index 0000000..0405ace
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/inlines/c/c.go
@@ -0,0 +1,14 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package c
+
+//go:noinline
+func F() []*int {
+ var s []*int
+ for i := 0; i < 10; i++ {
+ s = append(s, &i)
+ }
+ return s
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/inlines/main.go b/src/cmd/compile/internal/loopvar/testdata/inlines/main.go
new file mode 100644
index 0000000..46fcee1
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/inlines/main.go
@@ -0,0 +1,53 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/compile/internal/loopvar/testdata/inlines/a"
+ "cmd/compile/internal/loopvar/testdata/inlines/b"
+ "cmd/compile/internal/loopvar/testdata/inlines/c"
+ "fmt"
+ "os"
+)
+
+func sum(s []*int) int {
+ sum := 0
+ for _, pi := range s {
+ sum += *pi
+ }
+ return sum
+}
+
+var t []*int
+
+func F() []*int {
+ var s []*int
+ for i, j := 0, 0; j < 10; i, j = i+1, j+1 {
+ s = append(s, &i)
+ t = append(s, &j)
+ }
+ return s
+}
+
+func main() {
+ f := F()
+ af := a.F()
+ bf, _ := b.F()
+ abf := a.Fb()
+ cf := c.F()
+
+ sf, saf, sbf, sabf, scf := sum(f), sum(af), sum(bf), sum(abf), sum(cf)
+
+ fmt.Printf("f, af, bf, abf, cf sums = %d, %d, %d, %d, %d\n", sf, saf, sbf, sabf, scf)
+
+ // Special failure just for use with hash searching, to prove it fires exactly once.
+ // To test: `gossahash -e loopvarhash go run .` in this directory.
+ // This is designed to fail in two different ways, because gossahash searches randomly
+ // it will find both failures over time.
+ if os.Getenv("GOCOMPILEDEBUG") != "" && (sabf == 45 || sf == 45) {
+ os.Exit(11)
+ }
+ os.Exit(0)
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/opt-121.go b/src/cmd/compile/internal/loopvar/testdata/opt-121.go
new file mode 100644
index 0000000..4afb658
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/opt-121.go
@@ -0,0 +1,43 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+var is []func() int
+
+func inline(j, k int) []*int {
+ var a []*int
+ for private := j; private < k; private++ {
+ a = append(a, &private)
+ }
+ return a
+}
+
+//go:noinline
+func notinline(j, k int) ([]*int, *int) {
+ for shared := j; shared < k; shared++ {
+ if shared == k/2 {
+ // want the call inlined, want "private" in that inline to be transformed,
+ // (believe it ends up on init node of the return).
+ // but do not want "shared" transformed,
+ return inline(j, k), &shared
+ }
+ }
+ return nil, &j
+}
+
+func main() {
+ a, p := notinline(2, 9)
+ fmt.Printf("a[0]=%d,*p=%d\n", *a[0], *p)
+ if *a[0] != 2 {
+ os.Exit(1)
+ }
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/opt-122.go b/src/cmd/compile/internal/loopvar/testdata/opt-122.go
new file mode 100644
index 0000000..9dceab9
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/opt-122.go
@@ -0,0 +1,43 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.22
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+var is []func() int
+
+func inline(j, k int) []*int {
+ var a []*int
+ for private := j; private < k; private++ {
+ a = append(a, &private)
+ }
+ return a
+}
+
+//go:noinline
+func notinline(j, k int) ([]*int, *int) {
+ for shared := j; shared < k; shared++ {
+ if shared == k/2 {
+ // want the call inlined, want "private" in that inline to be transformed,
+ // (believe it ends up on init node of the return).
+ // but do not want "shared" transformed,
+ return inline(j, k), &shared
+ }
+ }
+ return nil, &j
+}
+
+func main() {
+ a, p := notinline(2, 9)
+ fmt.Printf("a[0]=%d,*p=%d\n", *a[0], *p)
+ if *a[0] != 2 {
+ os.Exit(1)
+ }
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/opt.go b/src/cmd/compile/internal/loopvar/testdata/opt.go
new file mode 100644
index 0000000..82c8616
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/opt.go
@@ -0,0 +1,41 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+var is []func() int
+
+func inline(j, k int) []*int {
+ var a []*int
+ for private := j; private < k; private++ {
+ a = append(a, &private)
+ }
+ return a
+}
+
+//go:noinline
+func notinline(j, k int) ([]*int, *int) {
+ for shared := j; shared < k; shared++ {
+ if shared == k/2 {
+ // want the call inlined, want "private" in that inline to be transformed,
+ // (believe it ends up on init node of the return).
+ // but do not want "shared" transformed,
+ return inline(j, k), &shared
+ }
+ }
+ return nil, &j
+}
+
+func main() {
+ a, p := notinline(2, 9)
+ fmt.Printf("a[0]=%d,*p=%d\n", *a[0], *p)
+ if *a[0] != 2 {
+ os.Exit(1)
+ }
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/range_esc_address.go b/src/cmd/compile/internal/loopvar/testdata/range_esc_address.go
new file mode 100644
index 0000000..79d7f04
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/range_esc_address.go
@@ -0,0 +1,47 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+var ints = []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+
+func main() {
+ sum := 0
+ var is []*int
+ for _, i := range ints {
+ for j := 0; j < 10; j++ {
+ if i == j { // 10 skips
+ continue
+ }
+ sum++
+ }
+ if i&1 == 0 {
+ is = append(is, &i)
+ }
+ }
+
+ bug := false
+ if sum != 100-10 {
+ fmt.Printf("wrong sum, expected %d, saw %d\n", 90, sum)
+ bug = true
+ }
+ sum = 0
+ for _, pi := range is {
+ sum += *pi
+ }
+ if sum != 2+4+6+8 {
+ fmt.Printf("wrong sum, expected %d, saw %d\n", 20, sum)
+ bug = true
+ }
+ if !bug {
+ fmt.Printf("PASS\n")
+ } else {
+ os.Exit(11)
+ }
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/range_esc_closure.go b/src/cmd/compile/internal/loopvar/testdata/range_esc_closure.go
new file mode 100644
index 0000000..9bcb5ef
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/range_esc_closure.go
@@ -0,0 +1,53 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+var is []func() int
+
+var ints = []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+
+func main() {
+ sum := 0
+ for _, i := range ints {
+ for j := 0; j < 10; j++ {
+ if i == j { // 10 skips
+ continue
+ }
+ sum++
+ }
+ if i&1 == 0 {
+ is = append(is, func() int {
+ if i%17 == 15 {
+ i++
+ }
+ return i
+ })
+ }
+ }
+
+ bug := false
+ if sum != 100-10 {
+ fmt.Printf("wrong sum, expected %d, saw %d\n", 90, sum)
+ bug = true
+ }
+ sum = 0
+ for _, f := range is {
+ sum += f()
+ }
+ if sum != 2+4+6+8 {
+ fmt.Printf("wrong sum, expected %d, saw %d\n", 20, sum)
+ bug = true
+ }
+ if !bug {
+ fmt.Printf("PASS\n")
+ } else {
+ os.Exit(11)
+ }
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/range_esc_method.go b/src/cmd/compile/internal/loopvar/testdata/range_esc_method.go
new file mode 100644
index 0000000..9a85ab0
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/range_esc_method.go
@@ -0,0 +1,53 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+type I int
+
+func (x *I) method() int {
+ return int(*x)
+}
+
+var ints = []I{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+
+func main() {
+ sum := 0
+ var is []func() int
+ for _, i := range ints {
+ for j := 0; j < 10; j++ {
+ if int(i) == j { // 10 skips
+ continue
+ }
+ sum++
+ }
+ if i&1 == 0 {
+ is = append(is, i.method)
+ }
+ }
+
+ bug := false
+ if sum != 100-10 {
+ fmt.Printf("wrong sum, expected %d, saw %d\n", 90, sum)
+ bug = true
+ }
+ sum = 0
+ for _, m := range is {
+ sum += m()
+ }
+ if sum != 2+4+6+8 {
+ fmt.Printf("wrong sum, expected %d, saw %d\n", 20, sum)
+ bug = true
+ }
+ if !bug {
+ fmt.Printf("PASS\n")
+ } else {
+ os.Exit(11)
+ }
+}
diff --git a/src/cmd/compile/internal/loopvar/testdata/range_esc_minimal_closure.go b/src/cmd/compile/internal/loopvar/testdata/range_esc_minimal_closure.go
new file mode 100644
index 0000000..8804d8b
--- /dev/null
+++ b/src/cmd/compile/internal/loopvar/testdata/range_esc_minimal_closure.go
@@ -0,0 +1,50 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+var is []func() int
+
+var ints = []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+
+func main() {
+ sum := 0
+ for _, i := range ints {
+ for j := 0; j < 10; j++ {
+ if i == j { // 10 skips
+ continue
+ }
+ sum++
+ }
+ if i&1 == 0 {
+ is = append(is, func() int {
+ return i
+ })
+ }
+ }
+
+ bug := false
+ if sum != 100-10 {
+ fmt.Printf("wrong sum, expected %d, saw %d\n", 90, sum)
+ bug = true
+ }
+ sum = 0
+ for _, f := range is {
+ sum += f()
+ }
+ if sum != 2+4+6+8 {
+ fmt.Printf("wrong sum, expected %d, saw %d\n", 20, sum)
+ bug = true
+ }
+ if !bug {
+ fmt.Printf("PASS\n")
+ } else {
+ os.Exit(11)
+ }
+}
diff --git a/src/cmd/compile/internal/mips/galign.go b/src/cmd/compile/internal/mips/galign.go
new file mode 100644
index 0000000..4e68970
--- /dev/null
+++ b/src/cmd/compile/internal/mips/galign.go
@@ -0,0 +1,27 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/mips"
+ "internal/buildcfg"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &mips.Linkmips
+ if buildcfg.GOARCH == "mipsle" {
+ arch.LinkArch = &mips.Linkmipsle
+ }
+ arch.REGSP = mips.REGSP
+ arch.MAXWIDTH = (1 << 31) - 1
+ arch.SoftFloat = (buildcfg.GOMIPS == "softfloat")
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+ arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/mips/ggen.go b/src/cmd/compile/internal/mips/ggen.go
new file mode 100644
index 0000000..e235ef9
--- /dev/null
+++ b/src/cmd/compile/internal/mips/ggen.go
@@ -0,0 +1,51 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/mips"
+)
+
+// TODO(mips): implement DUFFZERO
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+
+ if cnt == 0 {
+ return p
+ }
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.Arch.FixedFrameSize+off+i)
+ }
+ } else {
+ //fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi)
+ // ADD $(FIXED_FRAME+frame+lo-4), SP, r1
+ // ADD $cnt, r1, r2
+ // loop:
+ // MOVW R0, (Widthptr)r1
+ // ADD $Widthptr, r1
+ // BNE r1, r2, loop
+ p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.Arch.FixedFrameSize+off-4, obj.TYPE_REG, mips.REGRT1, 0)
+ p.Reg = mips.REGSP
+ p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
+ p.Reg = mips.REGRT1
+ p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
+ p1 := p
+ p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
+ p.Reg = mips.REGRT2
+ p.To.SetTarget(p1)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ p := pp.Prog(mips.ANOOP)
+ return p
+}
diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go
new file mode 100644
index 0000000..bfccafd
--- /dev/null
+++ b/src/cmd/compile/internal/mips/ssa.go
@@ -0,0 +1,880 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips
+
+import (
+ "math"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/mips"
+)
+
+// isFPreg reports whether r is an FP register.
+func isFPreg(r int16) bool {
+ return mips.REG_F0 <= r && r <= mips.REG_F31
+}
+
+// isHILO reports whether r is HI or LO register.
+func isHILO(r int16) bool {
+ return r == mips.REG_HI || r == mips.REG_LO
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type, r int16) obj.As {
+ if isFPreg(r) {
+ if t.Size() == 4 { // float32 or int32
+ return mips.AMOVF
+ } else { // float64 or int64
+ return mips.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return mips.AMOVB
+ } else {
+ return mips.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return mips.AMOVH
+ } else {
+ return mips.AMOVHU
+ }
+ case 4:
+ return mips.AMOVW
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type, r int16) obj.As {
+ if isFPreg(r) {
+ if t.Size() == 4 { // float32 or int32
+ return mips.AMOVF
+ } else { // float64 or int64
+ return mips.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return mips.AMOVB
+ case 2:
+ return mips.AMOVH
+ case 4:
+ return mips.AMOVW
+ }
+ }
+ panic("bad store type")
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpCopy, ssa.OpMIPSMOVWreg:
+ t := v.Type
+ if t.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x == y {
+ return
+ }
+ as := mips.AMOVW
+ if isFPreg(x) && isFPreg(y) {
+ as = mips.AMOVF
+ if t.Size() == 8 {
+ as = mips.AMOVD
+ }
+ }
+
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) {
+ // cannot move between special registers, use TMP as intermediate
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ }
+ case ssa.OpMIPSMOVWnop:
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ r := v.Reg()
+ p := s.Prog(loadByType(v.Type, r))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ if isHILO(r) {
+ // cannot directly load, load to TMP and move
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ r := v.Args[0].Reg()
+ if isHILO(r) {
+ // cannot directly store, move to TMP and store
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+ r = mips.REGTMP
+ }
+ p := s.Prog(storeByType(v.Type, r))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.OpMIPSADD,
+ ssa.OpMIPSSUB,
+ ssa.OpMIPSAND,
+ ssa.OpMIPSOR,
+ ssa.OpMIPSXOR,
+ ssa.OpMIPSNOR,
+ ssa.OpMIPSSLL,
+ ssa.OpMIPSSRL,
+ ssa.OpMIPSSRA,
+ ssa.OpMIPSADDF,
+ ssa.OpMIPSADDD,
+ ssa.OpMIPSSUBF,
+ ssa.OpMIPSSUBD,
+ ssa.OpMIPSMULF,
+ ssa.OpMIPSMULD,
+ ssa.OpMIPSDIVF,
+ ssa.OpMIPSDIVD,
+ ssa.OpMIPSMUL:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSSGT,
+ ssa.OpMIPSSGTU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSSGTzero,
+ ssa.OpMIPSSGTUzero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSADDconst,
+ ssa.OpMIPSSUBconst,
+ ssa.OpMIPSANDconst,
+ ssa.OpMIPSORconst,
+ ssa.OpMIPSXORconst,
+ ssa.OpMIPSNORconst,
+ ssa.OpMIPSSLLconst,
+ ssa.OpMIPSSRLconst,
+ ssa.OpMIPSSRAconst,
+ ssa.OpMIPSSGTconst,
+ ssa.OpMIPSSGTUconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSMULT,
+ ssa.OpMIPSMULTU,
+ ssa.OpMIPSDIV,
+ ssa.OpMIPSDIVU:
+ // result in hi,lo
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpMIPSMOVWconst:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ if isFPreg(r) || isHILO(r) {
+ // cannot move into FP or special registers, use TMP as intermediate
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ case ssa.OpMIPSMOVFconst,
+ ssa.OpMIPSMOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSCMOVZ:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSCMOVZzero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSCMPEQF,
+ ssa.OpMIPSCMPEQD,
+ ssa.OpMIPSCMPGEF,
+ ssa.OpMIPSCMPGED,
+ ssa.OpMIPSCMPGTF,
+ ssa.OpMIPSCMPGTD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ case ssa.OpMIPSMOVWaddr:
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ var wantreg string
+ // MOVW $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R29)
+ // when constant is large, tmp register (R23) may be used
+ // - base is SB: load external address with relocation
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ ssagen.AddAux(&p.From, v)
+ case *ir.Name:
+ wantreg = "SP"
+ ssagen.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVW $off(SP), R
+ wantreg = "SP"
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSMOVBload,
+ ssa.OpMIPSMOVBUload,
+ ssa.OpMIPSMOVHload,
+ ssa.OpMIPSMOVHUload,
+ ssa.OpMIPSMOVWload,
+ ssa.OpMIPSMOVFload,
+ ssa.OpMIPSMOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSMOVBstore,
+ ssa.OpMIPSMOVHstore,
+ ssa.OpMIPSMOVWstore,
+ ssa.OpMIPSMOVFstore,
+ ssa.OpMIPSMOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpMIPSMOVBstorezero,
+ ssa.OpMIPSMOVHstorezero,
+ ssa.OpMIPSMOVWstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpMIPSMOVBreg,
+ ssa.OpMIPSMOVBUreg,
+ ssa.OpMIPSMOVHreg,
+ ssa.OpMIPSMOVHUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpMIPSMOVWreg || a.Op == ssa.OpMIPSMOVWnop {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpMIPSMOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpMIPSMOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpMIPSMOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpMIPSMOVHUreg && t.Size() == 2 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if v.Reg() == v.Args[0].Reg() {
+ return
+ }
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ return
+ default:
+ }
+ }
+ fallthrough
+ case ssa.OpMIPSMOVWF,
+ ssa.OpMIPSMOVWD,
+ ssa.OpMIPSTRUNCFW,
+ ssa.OpMIPSTRUNCDW,
+ ssa.OpMIPSMOVFD,
+ ssa.OpMIPSMOVDF,
+ ssa.OpMIPSMOVWfpgp,
+ ssa.OpMIPSMOVWgpfp,
+ ssa.OpMIPSNEGF,
+ ssa.OpMIPSNEGD,
+ ssa.OpMIPSABSD,
+ ssa.OpMIPSSQRTF,
+ ssa.OpMIPSSQRTD,
+ ssa.OpMIPSCLZ:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSNEG:
+ // SUB from REGZERO
+ p := s.Prog(mips.ASUBU)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSLoweredZero:
+ // SUBU $4, R1
+ // MOVW R0, 4(R1)
+ // ADDU $4, R1
+ // BNE Rarg1, R1, -2(PC)
+ // arg1 is the address of the last element to zero
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = mips.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = mips.AMOVH
+ default:
+ sz = 1
+ mov = mips.AMOVB
+ }
+ p := s.Prog(mips.ASUBU)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGZERO
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = mips.REG_R1
+ p2.To.Offset = sz
+ p3 := s.Prog(mips.AADDU)
+ p3.From.Type = obj.TYPE_CONST
+ p3.From.Offset = sz
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = mips.REG_R1
+ p4 := s.Prog(mips.ABNE)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Args[1].Reg()
+ p4.Reg = mips.REG_R1
+ p4.To.Type = obj.TYPE_BRANCH
+ p4.To.SetTarget(p2)
+ case ssa.OpMIPSLoweredMove:
+ // SUBU $4, R1
+ // MOVW 4(R1), Rtmp
+ // MOVW Rtmp, (R2)
+ // ADDU $4, R1
+ // ADDU $4, R2
+ // BNE Rarg2, R1, -4(PC)
+ // arg2 is the address of the last element of src
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = mips.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = mips.AMOVH
+ default:
+ sz = 1
+ mov = mips.AMOVB
+ }
+ p := s.Prog(mips.ASUBU)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_MEM
+ p2.From.Reg = mips.REG_R1
+ p2.From.Offset = sz
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = mips.REGTMP
+ p3 := s.Prog(mov)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = mips.REG_R2
+ p4 := s.Prog(mips.AADDU)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = sz
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = mips.REG_R1
+ p5 := s.Prog(mips.AADDU)
+ p5.From.Type = obj.TYPE_CONST
+ p5.From.Offset = sz
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = mips.REG_R2
+ p6 := s.Prog(mips.ABNE)
+ p6.From.Type = obj.TYPE_REG
+ p6.From.Reg = v.Args[2].Reg()
+ p6.Reg = mips.REG_R1
+ p6.To.Type = obj.TYPE_BRANCH
+ p6.To.SetTarget(p2)
+ case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter:
+ s.Call(v)
+ case ssa.OpMIPSCALLtail:
+ s.TailCall(v)
+ case ssa.OpMIPSLoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ // AuxInt encodes how many buffer entries we need.
+ p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
+ case ssa.OpMIPSLoweredPanicBoundsA, ssa.OpMIPSLoweredPanicBoundsB, ssa.OpMIPSLoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(8) // space used in callee args area by assembly stubs
+ case ssa.OpMIPSLoweredPanicExtendA, ssa.OpMIPSLoweredPanicExtendB, ssa.OpMIPSLoweredPanicExtendC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
+ s.UseArgs(12) // space used in callee args area by assembly stubs
+ case ssa.OpMIPSLoweredAtomicLoad8,
+ ssa.OpMIPSLoweredAtomicLoad32:
+ s.Prog(mips.ASYNC)
+
+ var op obj.As
+ switch v.Op {
+ case ssa.OpMIPSLoweredAtomicLoad8:
+ op = mips.AMOVB
+ case ssa.OpMIPSLoweredAtomicLoad32:
+ op = mips.AMOVW
+ }
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPSLoweredAtomicStore8,
+ ssa.OpMIPSLoweredAtomicStore32:
+ s.Prog(mips.ASYNC)
+
+ var op obj.As
+ switch v.Op {
+ case ssa.OpMIPSLoweredAtomicStore8:
+ op = mips.AMOVB
+ case ssa.OpMIPSLoweredAtomicStore32:
+ op = mips.AMOVW
+ }
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPSLoweredAtomicStorezero:
+ s.Prog(mips.ASYNC)
+
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPSLoweredAtomicExchange:
+ // SYNC
+ // MOVW Rarg1, Rtmp
+ // LL (Rarg0), Rout
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ s.Prog(mips.ASYNC)
+
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+
+ p1 := s.Prog(mips.ALL)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = v.Args[0].Reg()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg0()
+
+ p2 := s.Prog(mips.ASC)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPSLoweredAtomicAdd:
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDU Rarg1, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDU Rarg1, Rout
+ s.Prog(mips.ASYNC)
+
+ p := s.Prog(mips.ALL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ p1 := s.Prog(mips.AADDU)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = v.Args[1].Reg()
+ p1.Reg = v.Reg0()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+
+ p2 := s.Prog(mips.ASC)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+
+ s.Prog(mips.ASYNC)
+
+ p4 := s.Prog(mips.AADDU)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Args[1].Reg()
+ p4.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Reg0()
+
+ case ssa.OpMIPSLoweredAtomicAddconst:
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDU $auxInt, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDU $auxInt, Rout
+ s.Prog(mips.ASYNC)
+
+ p := s.Prog(mips.ALL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ p1 := s.Prog(mips.AADDU)
+ p1.From.Type = obj.TYPE_CONST
+ p1.From.Offset = v.AuxInt
+ p1.Reg = v.Reg0()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+
+ p2 := s.Prog(mips.ASC)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+
+ s.Prog(mips.ASYNC)
+
+ p4 := s.Prog(mips.AADDU)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = v.AuxInt
+ p4.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Reg0()
+
+ case ssa.OpMIPSLoweredAtomicAnd,
+ ssa.OpMIPSLoweredAtomicOr:
+ // SYNC
+ // LL (Rarg0), Rtmp
+ // AND/OR Rarg1, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ s.Prog(mips.ASYNC)
+
+ p := s.Prog(mips.ALL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+
+ p1 := s.Prog(v.Op.Asm())
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = v.Args[1].Reg()
+ p1.Reg = mips.REGTMP
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+
+ p2 := s.Prog(mips.ASC)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+
+ s.Prog(mips.ASYNC)
+
+ case ssa.OpMIPSLoweredAtomicCas:
+ // MOVW $0, Rout
+ // SYNC
+ // LL (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 4(PC)
+ // MOVW Rarg2, Rout
+ // SC Rout, (Rarg0)
+ // BEQ Rout, -4(PC)
+ // SYNC
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ s.Prog(mips.ASYNC)
+
+ p1 := s.Prog(mips.ALL)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = v.Args[0].Reg()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+
+ p2 := s.Prog(mips.ABNE)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = v.Args[1].Reg()
+ p2.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_BRANCH
+
+ p3 := s.Prog(mips.AMOVW)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[2].Reg()
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Reg0()
+
+ p4 := s.Prog(mips.ASC)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_MEM
+ p4.To.Reg = v.Args[0].Reg()
+
+ p5 := s.Prog(mips.ABEQ)
+ p5.From.Type = obj.TYPE_REG
+ p5.From.Reg = v.Reg0()
+ p5.To.Type = obj.TYPE_BRANCH
+ p5.To.SetTarget(p1)
+
+ s.Prog(mips.ASYNC)
+
+ p6 := s.Prog(obj.ANOP)
+ p2.To.SetTarget(p6)
+
+ case ssa.OpMIPSLoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ p := s.Prog(mips.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+ case ssa.OpMIPSFPFlagTrue,
+ ssa.OpMIPSFPFlagFalse:
+ // MOVW $1, r
+ // CMOVF R0, r
+
+ cmov := mips.ACMOVF
+ if v.Op == ssa.OpMIPSFPFlagFalse {
+ cmov = mips.ACMOVT
+ }
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p1 := s.Prog(cmov)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = mips.REGZERO
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg()
+
+ case ssa.OpMIPSLoweredGetClosurePtr:
+ // Closure pointer is R22 (mips.REGCTXT).
+ ssagen.CheckLoweredGetClosurePtr(v)
+ case ssa.OpMIPSLoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSLoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpClobber, ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockMIPSEQ: {mips.ABEQ, mips.ABNE},
+ ssa.BlockMIPSNE: {mips.ABNE, mips.ABEQ},
+ ssa.BlockMIPSLTZ: {mips.ABLTZ, mips.ABGEZ},
+ ssa.BlockMIPSGEZ: {mips.ABGEZ, mips.ABLTZ},
+ ssa.BlockMIPSLEZ: {mips.ABLEZ, mips.ABGTZ},
+ ssa.BlockMIPSGTZ: {mips.ABGTZ, mips.ABLEZ},
+ ssa.BlockMIPSFPT: {mips.ABFPT, mips.ABFPF},
+ ssa.BlockMIPSFPF: {mips.ABFPF, mips.ABFPT},
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockDefer:
+ // defer returns in R1:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(mips.ABNE)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.Reg = mips.REG_R1
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit, ssa.BlockRetJmp:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ case ssa.BlockMIPSEQ, ssa.BlockMIPSNE,
+ ssa.BlockMIPSLTZ, ssa.BlockMIPSGEZ,
+ ssa.BlockMIPSLEZ, ssa.BlockMIPSGTZ,
+ ssa.BlockMIPSFPT, ssa.BlockMIPSFPF:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ if !b.Controls[0].Type.IsFlags() {
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = b.Controls[0].Reg()
+ }
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/mips64/galign.go b/src/cmd/compile/internal/mips64/galign.go
new file mode 100644
index 0000000..412bc71
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/galign.go
@@ -0,0 +1,28 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips64
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/mips"
+ "internal/buildcfg"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &mips.Linkmips64
+ if buildcfg.GOARCH == "mips64le" {
+ arch.LinkArch = &mips.Linkmips64le
+ }
+ arch.REGSP = mips.REGSP
+ arch.MAXWIDTH = 1 << 50
+ arch.SoftFloat = buildcfg.GOMIPS64 == "softfloat"
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/mips64/ggen.go b/src/cmd/compile/internal/mips64/ggen.go
new file mode 100644
index 0000000..5f3f3e6
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/ggen.go
@@ -0,0 +1,55 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips64
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/mips"
+)
+
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i)
+ }
+ } else if cnt <= int64(128*types.PtrSize) {
+ p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
+ p.Reg = mips.REGSP
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
+ } else {
+ // ADDV $(8+frame+lo-8), SP, r1
+ // ADDV $cnt, r1, r2
+ // loop:
+ // MOVV R0, (Widthptr)r1
+ // ADDV $Widthptr, r1
+ // BNE r1, r2, loop
+ p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
+ p.Reg = mips.REGSP
+ p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
+ p.Reg = mips.REGRT1
+ p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
+ p1 := p
+ p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
+ p.Reg = mips.REGRT2
+ p.To.SetTarget(p1)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ p := pp.Prog(mips.ANOOP)
+ return p
+}
diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go
new file mode 100644
index 0000000..0c0dc6e
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/ssa.go
@@ -0,0 +1,889 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips64
+
+import (
+ "math"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/mips"
+)
+
+// isFPreg reports whether r is an FP register.
+func isFPreg(r int16) bool {
+ return mips.REG_F0 <= r && r <= mips.REG_F31
+}
+
+// isHILO reports whether r is HI or LO register.
+func isHILO(r int16) bool {
+ return r == mips.REG_HI || r == mips.REG_LO
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type, r int16) obj.As {
+ if isFPreg(r) {
+ if t.Size() == 4 { // float32 or int32
+ return mips.AMOVF
+ } else { // float64 or int64
+ return mips.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return mips.AMOVB
+ } else {
+ return mips.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return mips.AMOVH
+ } else {
+ return mips.AMOVHU
+ }
+ case 4:
+ if t.IsSigned() {
+ return mips.AMOVW
+ } else {
+ return mips.AMOVWU
+ }
+ case 8:
+ return mips.AMOVV
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type, r int16) obj.As {
+ if isFPreg(r) {
+ if t.Size() == 4 { // float32 or int32
+ return mips.AMOVF
+ } else { // float64 or int64
+ return mips.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return mips.AMOVB
+ case 2:
+ return mips.AMOVH
+ case 4:
+ return mips.AMOVW
+ case 8:
+ return mips.AMOVV
+ }
+ }
+ panic("bad store type")
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpCopy, ssa.OpMIPS64MOVVreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x == y {
+ return
+ }
+ as := mips.AMOVV
+ if isFPreg(x) && isFPreg(y) {
+ as = mips.AMOVD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) {
+ // cannot move between special registers, use TMP as intermediate
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ }
+ case ssa.OpMIPS64MOVVnop:
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ r := v.Reg()
+ p := s.Prog(loadByType(v.Type, r))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ if isHILO(r) {
+ // cannot directly load, load to TMP and move
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ r := v.Args[0].Reg()
+ if isHILO(r) {
+ // cannot directly store, move to TMP and store
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+ r = mips.REGTMP
+ }
+ p := s.Prog(storeByType(v.Type, r))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.OpMIPS64ADDV,
+ ssa.OpMIPS64SUBV,
+ ssa.OpMIPS64AND,
+ ssa.OpMIPS64OR,
+ ssa.OpMIPS64XOR,
+ ssa.OpMIPS64NOR,
+ ssa.OpMIPS64SLLV,
+ ssa.OpMIPS64SRLV,
+ ssa.OpMIPS64SRAV,
+ ssa.OpMIPS64ADDF,
+ ssa.OpMIPS64ADDD,
+ ssa.OpMIPS64SUBF,
+ ssa.OpMIPS64SUBD,
+ ssa.OpMIPS64MULF,
+ ssa.OpMIPS64MULD,
+ ssa.OpMIPS64DIVF,
+ ssa.OpMIPS64DIVD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64SGT,
+ ssa.OpMIPS64SGTU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64ADDVconst,
+ ssa.OpMIPS64SUBVconst,
+ ssa.OpMIPS64ANDconst,
+ ssa.OpMIPS64ORconst,
+ ssa.OpMIPS64XORconst,
+ ssa.OpMIPS64NORconst,
+ ssa.OpMIPS64SLLVconst,
+ ssa.OpMIPS64SRLVconst,
+ ssa.OpMIPS64SRAVconst,
+ ssa.OpMIPS64SGTconst,
+ ssa.OpMIPS64SGTUconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64MULV,
+ ssa.OpMIPS64MULVU,
+ ssa.OpMIPS64DIVV,
+ ssa.OpMIPS64DIVVU:
+ // result in hi,lo
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpMIPS64MOVVconst:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ if isFPreg(r) || isHILO(r) {
+ // cannot move into FP or special registers, use TMP as intermediate
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ case ssa.OpMIPS64MOVFconst,
+ ssa.OpMIPS64MOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64CMPEQF,
+ ssa.OpMIPS64CMPEQD,
+ ssa.OpMIPS64CMPGEF,
+ ssa.OpMIPS64CMPGED,
+ ssa.OpMIPS64CMPGTF,
+ ssa.OpMIPS64CMPGTD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ case ssa.OpMIPS64MOVVaddr:
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ var wantreg string
+ // MOVV $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R29)
+ // when constant is large, tmp register (R23) may be used
+ // - base is SB: load external address with relocation
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ ssagen.AddAux(&p.From, v)
+ case *ir.Name:
+ wantreg = "SP"
+ ssagen.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVV $off(SP), R
+ wantreg = "SP"
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64MOVBload,
+ ssa.OpMIPS64MOVBUload,
+ ssa.OpMIPS64MOVHload,
+ ssa.OpMIPS64MOVHUload,
+ ssa.OpMIPS64MOVWload,
+ ssa.OpMIPS64MOVWUload,
+ ssa.OpMIPS64MOVVload,
+ ssa.OpMIPS64MOVFload,
+ ssa.OpMIPS64MOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64MOVBstore,
+ ssa.OpMIPS64MOVHstore,
+ ssa.OpMIPS64MOVWstore,
+ ssa.OpMIPS64MOVVstore,
+ ssa.OpMIPS64MOVFstore,
+ ssa.OpMIPS64MOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpMIPS64MOVBstorezero,
+ ssa.OpMIPS64MOVHstorezero,
+ ssa.OpMIPS64MOVWstorezero,
+ ssa.OpMIPS64MOVVstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpMIPS64MOVBreg,
+ ssa.OpMIPS64MOVBUreg,
+ ssa.OpMIPS64MOVHreg,
+ ssa.OpMIPS64MOVHUreg,
+ ssa.OpMIPS64MOVWreg,
+ ssa.OpMIPS64MOVWUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpMIPS64MOVVreg {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg && mips.REG_R0 <= a.Reg() && a.Reg() <= mips.REG_R31 {
+ // LoadReg from a narrower type does an extension, except loading
+ // to a floating point register. So only eliminate the extension
+ // if it is loaded to an integer register.
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpMIPS64MOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVWreg && t.Size() == 4 && t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if v.Reg() == v.Args[0].Reg() {
+ return
+ }
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ return
+ default:
+ }
+ }
+ fallthrough
+ case ssa.OpMIPS64MOVWF,
+ ssa.OpMIPS64MOVWD,
+ ssa.OpMIPS64TRUNCFW,
+ ssa.OpMIPS64TRUNCDW,
+ ssa.OpMIPS64MOVVF,
+ ssa.OpMIPS64MOVVD,
+ ssa.OpMIPS64TRUNCFV,
+ ssa.OpMIPS64TRUNCDV,
+ ssa.OpMIPS64MOVFD,
+ ssa.OpMIPS64MOVDF,
+ ssa.OpMIPS64MOVWfpgp,
+ ssa.OpMIPS64MOVWgpfp,
+ ssa.OpMIPS64MOVVfpgp,
+ ssa.OpMIPS64MOVVgpfp,
+ ssa.OpMIPS64NEGF,
+ ssa.OpMIPS64NEGD,
+ ssa.OpMIPS64ABSD,
+ ssa.OpMIPS64SQRTF,
+ ssa.OpMIPS64SQRTD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64NEGV:
+ // SUB from REGZERO
+ p := s.Prog(mips.ASUBVU)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64DUFFZERO:
+ // runtime.duffzero expects start address - 8 in R1
+ p := s.Prog(mips.ASUBVU)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 8
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1
+ p = s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = v.AuxInt
+ case ssa.OpMIPS64LoweredZero:
+ // SUBV $8, R1
+ // MOVV R0, 8(R1)
+ // ADDV $8, R1
+ // BNE Rarg1, R1, -2(PC)
+ // arg1 is the address of the last element to zero
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%8 == 0:
+ sz = 8
+ mov = mips.AMOVV
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = mips.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = mips.AMOVH
+ default:
+ sz = 1
+ mov = mips.AMOVB
+ }
+ p := s.Prog(mips.ASUBVU)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGZERO
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = mips.REG_R1
+ p2.To.Offset = sz
+ p3 := s.Prog(mips.AADDVU)
+ p3.From.Type = obj.TYPE_CONST
+ p3.From.Offset = sz
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = mips.REG_R1
+ p4 := s.Prog(mips.ABNE)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Args[1].Reg()
+ p4.Reg = mips.REG_R1
+ p4.To.Type = obj.TYPE_BRANCH
+ p4.To.SetTarget(p2)
+ case ssa.OpMIPS64DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffcopy
+ p.To.Offset = v.AuxInt
+ case ssa.OpMIPS64LoweredMove:
+ // SUBV $8, R1
+ // MOVV 8(R1), Rtmp
+ // MOVV Rtmp, (R2)
+ // ADDV $8, R1
+ // ADDV $8, R2
+ // BNE Rarg2, R1, -4(PC)
+ // arg2 is the address of the last element of src
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%8 == 0:
+ sz = 8
+ mov = mips.AMOVV
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = mips.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = mips.AMOVH
+ default:
+ sz = 1
+ mov = mips.AMOVB
+ }
+ p := s.Prog(mips.ASUBVU)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_MEM
+ p2.From.Reg = mips.REG_R1
+ p2.From.Offset = sz
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = mips.REGTMP
+ p3 := s.Prog(mov)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = mips.REG_R2
+ p4 := s.Prog(mips.AADDVU)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = sz
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = mips.REG_R1
+ p5 := s.Prog(mips.AADDVU)
+ p5.From.Type = obj.TYPE_CONST
+ p5.From.Offset = sz
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = mips.REG_R2
+ p6 := s.Prog(mips.ABNE)
+ p6.From.Type = obj.TYPE_REG
+ p6.From.Reg = v.Args[2].Reg()
+ p6.Reg = mips.REG_R1
+ p6.To.Type = obj.TYPE_BRANCH
+ p6.To.SetTarget(p2)
+ case ssa.OpMIPS64CALLstatic, ssa.OpMIPS64CALLclosure, ssa.OpMIPS64CALLinter:
+ s.Call(v)
+ case ssa.OpMIPS64CALLtail:
+ s.TailCall(v)
+ case ssa.OpMIPS64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ // AuxInt encodes how many buffer entries we need.
+ p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
+ case ssa.OpMIPS64LoweredPanicBoundsA, ssa.OpMIPS64LoweredPanicBoundsB, ssa.OpMIPS64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+ case ssa.OpMIPS64LoweredAtomicLoad8, ssa.OpMIPS64LoweredAtomicLoad32, ssa.OpMIPS64LoweredAtomicLoad64:
+ as := mips.AMOVV
+ switch v.Op {
+ case ssa.OpMIPS64LoweredAtomicLoad8:
+ as = mips.AMOVB
+ case ssa.OpMIPS64LoweredAtomicLoad32:
+ as = mips.AMOVW
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPS64LoweredAtomicStore8, ssa.OpMIPS64LoweredAtomicStore32, ssa.OpMIPS64LoweredAtomicStore64:
+ as := mips.AMOVV
+ switch v.Op {
+ case ssa.OpMIPS64LoweredAtomicStore8:
+ as = mips.AMOVB
+ case ssa.OpMIPS64LoweredAtomicStore32:
+ as = mips.AMOVW
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPS64LoweredAtomicStorezero32, ssa.OpMIPS64LoweredAtomicStorezero64:
+ as := mips.AMOVV
+ if v.Op == ssa.OpMIPS64LoweredAtomicStorezero32 {
+ as = mips.AMOVW
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPS64LoweredAtomicExchange32, ssa.OpMIPS64LoweredAtomicExchange64:
+ // SYNC
+ // MOVV Rarg1, Rtmp
+ // LL (Rarg0), Rout
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ ll := mips.ALLV
+ sc := mips.ASCV
+ if v.Op == ssa.OpMIPS64LoweredAtomicExchange32 {
+ ll = mips.ALL
+ sc = mips.ASC
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+ p1 := s.Prog(ll)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = v.Args[0].Reg()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg0()
+ p2 := s.Prog(sc)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPS64LoweredAtomicAdd32, ssa.OpMIPS64LoweredAtomicAdd64:
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDV Rarg1, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDV Rarg1, Rout
+ ll := mips.ALLV
+ sc := mips.ASCV
+ if v.Op == ssa.OpMIPS64LoweredAtomicAdd32 {
+ ll = mips.ALL
+ sc = mips.ASC
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(ll)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p1 := s.Prog(mips.AADDVU)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = v.Args[1].Reg()
+ p1.Reg = v.Reg0()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+ p2 := s.Prog(sc)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ s.Prog(mips.ASYNC)
+ p4 := s.Prog(mips.AADDVU)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Args[1].Reg()
+ p4.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Reg0()
+ case ssa.OpMIPS64LoweredAtomicAddconst32, ssa.OpMIPS64LoweredAtomicAddconst64:
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDV $auxint, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDV $auxint, Rout
+ ll := mips.ALLV
+ sc := mips.ASCV
+ if v.Op == ssa.OpMIPS64LoweredAtomicAddconst32 {
+ ll = mips.ALL
+ sc = mips.ASC
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(ll)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p1 := s.Prog(mips.AADDVU)
+ p1.From.Type = obj.TYPE_CONST
+ p1.From.Offset = v.AuxInt
+ p1.Reg = v.Reg0()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+ p2 := s.Prog(sc)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ s.Prog(mips.ASYNC)
+ p4 := s.Prog(mips.AADDVU)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = v.AuxInt
+ p4.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Reg0()
+ case ssa.OpMIPS64LoweredAtomicAnd32,
+ ssa.OpMIPS64LoweredAtomicOr32:
+ // SYNC
+ // LL (Rarg0), Rtmp
+ // AND/OR Rarg1, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ s.Prog(mips.ASYNC)
+
+ p := s.Prog(mips.ALL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+
+ p1 := s.Prog(v.Op.Asm())
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = v.Args[1].Reg()
+ p1.Reg = mips.REGTMP
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+
+ p2 := s.Prog(mips.ASC)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+
+ s.Prog(mips.ASYNC)
+
+ case ssa.OpMIPS64LoweredAtomicCas32, ssa.OpMIPS64LoweredAtomicCas64:
+ // MOVV $0, Rout
+ // SYNC
+ // LL (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 4(PC)
+ // MOVV Rarg2, Rout
+ // SC Rout, (Rarg0)
+ // BEQ Rout, -4(PC)
+ // SYNC
+ ll := mips.ALLV
+ sc := mips.ASCV
+ if v.Op == ssa.OpMIPS64LoweredAtomicCas32 {
+ ll = mips.ALL
+ sc = mips.ASC
+ }
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ s.Prog(mips.ASYNC)
+ p1 := s.Prog(ll)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = v.Args[0].Reg()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+ p2 := s.Prog(mips.ABNE)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = v.Args[1].Reg()
+ p2.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_BRANCH
+ p3 := s.Prog(mips.AMOVV)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[2].Reg()
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Reg0()
+ p4 := s.Prog(sc)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_MEM
+ p4.To.Reg = v.Args[0].Reg()
+ p5 := s.Prog(mips.ABEQ)
+ p5.From.Type = obj.TYPE_REG
+ p5.From.Reg = v.Reg0()
+ p5.To.Type = obj.TYPE_BRANCH
+ p5.To.SetTarget(p1)
+ p6 := s.Prog(mips.ASYNC)
+ p2.To.SetTarget(p6)
+ case ssa.OpMIPS64LoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ p := s.Prog(mips.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+ case ssa.OpMIPS64FPFlagTrue,
+ ssa.OpMIPS64FPFlagFalse:
+ // MOVV $0, r
+ // BFPF 2(PC)
+ // MOVV $1, r
+ branch := mips.ABFPF
+ if v.Op == ssa.OpMIPS64FPFlagFalse {
+ branch = mips.ABFPT
+ }
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p2 := s.Prog(branch)
+ p2.To.Type = obj.TYPE_BRANCH
+ p3 := s.Prog(mips.AMOVV)
+ p3.From.Type = obj.TYPE_CONST
+ p3.From.Offset = 1
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Reg()
+ p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land
+ p2.To.SetTarget(p4)
+ case ssa.OpMIPS64LoweredGetClosurePtr:
+ // Closure pointer is R22 (mips.REGCTXT).
+ ssagen.CheckLoweredGetClosurePtr(v)
+ case ssa.OpMIPS64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64LoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpClobber, ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockMIPS64EQ: {mips.ABEQ, mips.ABNE},
+ ssa.BlockMIPS64NE: {mips.ABNE, mips.ABEQ},
+ ssa.BlockMIPS64LTZ: {mips.ABLTZ, mips.ABGEZ},
+ ssa.BlockMIPS64GEZ: {mips.ABGEZ, mips.ABLTZ},
+ ssa.BlockMIPS64LEZ: {mips.ABLEZ, mips.ABGTZ},
+ ssa.BlockMIPS64GTZ: {mips.ABGTZ, mips.ABLEZ},
+ ssa.BlockMIPS64FPT: {mips.ABFPT, mips.ABFPF},
+ ssa.BlockMIPS64FPF: {mips.ABFPF, mips.ABFPT},
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockDefer:
+ // defer returns in R1:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(mips.ABNE)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.Reg = mips.REG_R1
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit, ssa.BlockRetJmp:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ case ssa.BlockMIPS64EQ, ssa.BlockMIPS64NE,
+ ssa.BlockMIPS64LTZ, ssa.BlockMIPS64GEZ,
+ ssa.BlockMIPS64LEZ, ssa.BlockMIPS64GTZ,
+ ssa.BlockMIPS64FPT, ssa.BlockMIPS64FPF:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ if !b.Controls[0].Type.IsFlags() {
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = b.Controls[0].Reg()
+ }
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/noder/codes.go b/src/cmd/compile/internal/noder/codes.go
new file mode 100644
index 0000000..8bdbfc9
--- /dev/null
+++ b/src/cmd/compile/internal/noder/codes.go
@@ -0,0 +1,91 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import "internal/pkgbits"
+
+// A codeStmt distinguishes among statement encodings.
+type codeStmt int
+
+func (c codeStmt) Marker() pkgbits.SyncMarker { return pkgbits.SyncStmt1 }
+func (c codeStmt) Value() int { return int(c) }
+
+const (
+ stmtEnd codeStmt = iota
+ stmtLabel
+ stmtBlock
+ stmtExpr
+ stmtSend
+ stmtAssign
+ stmtAssignOp
+ stmtIncDec
+ stmtBranch
+ stmtCall
+ stmtReturn
+ stmtIf
+ stmtFor
+ stmtSwitch
+ stmtSelect
+)
+
+// A codeExpr distinguishes among expression encodings.
+type codeExpr int
+
+func (c codeExpr) Marker() pkgbits.SyncMarker { return pkgbits.SyncExpr }
+func (c codeExpr) Value() int { return int(c) }
+
+// TODO(mdempsky): Split expr into addr, for lvalues.
+const (
+ exprConst codeExpr = iota
+ exprLocal // local variable
+ exprGlobal // global variable or function
+ exprCompLit
+ exprFuncLit
+ exprFieldVal
+ exprMethodVal
+ exprMethodExpr
+ exprIndex
+ exprSlice
+ exprAssert
+ exprUnaryOp
+ exprBinaryOp
+ exprCall
+ exprConvert
+ exprNew
+ exprMake
+ exprSizeof
+ exprAlignof
+ exprOffsetof
+ exprZero
+ exprFuncInst
+ exprRecv
+ exprReshape
+ exprRuntimeBuiltin // a reference to a runtime function from transformed syntax. Followed by string name, e.g., "panicrangeexit"
+)
+
+type codeAssign int
+
+func (c codeAssign) Marker() pkgbits.SyncMarker { return pkgbits.SyncAssign }
+func (c codeAssign) Value() int { return int(c) }
+
+const (
+ assignBlank codeAssign = iota
+ assignDef
+ assignExpr
+)
+
+// A codeDecl distinguishes among declaration encodings.
+type codeDecl int
+
+func (c codeDecl) Marker() pkgbits.SyncMarker { return pkgbits.SyncDecl }
+func (c codeDecl) Value() int { return int(c) }
+
+const (
+ declEnd codeDecl = iota
+ declFunc
+ declMethod
+ declVar
+ declOther
+)
diff --git a/src/cmd/compile/internal/noder/export.go b/src/cmd/compile/internal/noder/export.go
new file mode 100644
index 0000000..e1f289b
--- /dev/null
+++ b/src/cmd/compile/internal/noder/export.go
@@ -0,0 +1,30 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+
+ "cmd/compile/internal/base"
+ "cmd/internal/bio"
+)
+
+func WriteExports(out *bio.Writer) {
+ var data bytes.Buffer
+
+ data.WriteByte('u')
+ writeUnifiedExport(&data)
+
+ // The linker also looks for the $$ marker - use char after $$ to distinguish format.
+ out.WriteString("\n$$B\n") // indicate binary export format
+ io.Copy(out, &data)
+ out.WriteString("\n$$\n")
+
+ if base.Debug.Export != 0 {
+ fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, data.Len())
+ }
+}
diff --git a/src/cmd/compile/internal/noder/helpers.go b/src/cmd/compile/internal/noder/helpers.go
new file mode 100644
index 0000000..0bff71e
--- /dev/null
+++ b/src/cmd/compile/internal/noder/helpers.go
@@ -0,0 +1,140 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+)
+
+// Helpers for constructing typed IR nodes.
+//
+// TODO(mdempsky): Move into their own package so they can be easily
+// reused by iimport and frontend optimizations.
+
+type ImplicitNode interface {
+ ir.Node
+ SetImplicit(x bool)
+}
+
+// Implicit returns n after marking it as Implicit.
+func Implicit(n ImplicitNode) ImplicitNode {
+ n.SetImplicit(true)
+ return n
+}
+
+// typed returns n after setting its type to typ.
+func typed(typ *types.Type, n ir.Node) ir.Node {
+ n.SetType(typ)
+ n.SetTypecheck(1)
+ return n
+}
+
+// Values
+
+// FixValue returns val after converting and truncating it as
+// appropriate for typ.
+func FixValue(typ *types.Type, val constant.Value) constant.Value {
+ assert(typ.Kind() != types.TFORW)
+ switch {
+ case typ.IsInteger():
+ val = constant.ToInt(val)
+ case typ.IsFloat():
+ val = constant.ToFloat(val)
+ case typ.IsComplex():
+ val = constant.ToComplex(val)
+ }
+ if !typ.IsUntyped() {
+ val = typecheck.ConvertVal(val, typ, false)
+ }
+ ir.AssertValidTypeForConst(typ, val)
+ return val
+}
+
+// Expressions
+
+func Addr(pos src.XPos, x ir.Node) *ir.AddrExpr {
+ n := typecheck.NodAddrAt(pos, x)
+ typed(types.NewPtr(x.Type()), n)
+ return n
+}
+
+func Deref(pos src.XPos, typ *types.Type, x ir.Node) *ir.StarExpr {
+ n := ir.NewStarExpr(pos, x)
+ typed(typ, n)
+ return n
+}
+
+// Statements
+
+func idealType(tv syntax.TypeAndValue) types2.Type {
+ // The gc backend expects all expressions to have a concrete type, and
+ // types2 mostly satisfies this expectation already. But there are a few
+ // cases where the Go spec doesn't require converting to concrete type,
+ // and so types2 leaves them untyped. So we need to fix those up here.
+ typ := types2.Unalias(tv.Type)
+ if basic, ok := typ.(*types2.Basic); ok && basic.Info()&types2.IsUntyped != 0 {
+ switch basic.Kind() {
+ case types2.UntypedNil:
+ // ok; can appear in type switch case clauses
+ // TODO(mdempsky): Handle as part of type switches instead?
+ case types2.UntypedInt, types2.UntypedFloat, types2.UntypedComplex:
+ typ = types2.Typ[types2.Uint]
+ if tv.Value != nil {
+ s := constant.ToInt(tv.Value)
+ assert(s.Kind() == constant.Int)
+ if constant.Sign(s) < 0 {
+ typ = types2.Typ[types2.Int]
+ }
+ }
+ case types2.UntypedBool:
+ typ = types2.Typ[types2.Bool] // expression in "if" or "for" condition
+ case types2.UntypedString:
+ typ = types2.Typ[types2.String] // argument to "append" or "copy" calls
+ case types2.UntypedRune:
+ typ = types2.Typ[types2.Int32] // range over rune
+ default:
+ return nil
+ }
+ }
+ return typ
+}
+
+func isTypeParam(t types2.Type) bool {
+ _, ok := types2.Unalias(t).(*types2.TypeParam)
+ return ok
+}
+
+// isNotInHeap reports whether typ is or contains an element of type
+// runtime/internal/sys.NotInHeap.
+func isNotInHeap(typ types2.Type) bool {
+ typ = types2.Unalias(typ)
+ if named, ok := typ.(*types2.Named); ok {
+ if obj := named.Obj(); obj.Name() == "nih" && obj.Pkg().Path() == "runtime/internal/sys" {
+ return true
+ }
+ typ = named.Underlying()
+ }
+
+ switch typ := typ.(type) {
+ case *types2.Array:
+ return isNotInHeap(typ.Elem())
+ case *types2.Struct:
+ for i := 0; i < typ.NumFields(); i++ {
+ if isNotInHeap(typ.Field(i).Type()) {
+ return true
+ }
+ }
+ return false
+ default:
+ return false
+ }
+}
diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go
new file mode 100644
index 0000000..e9bb1e3
--- /dev/null
+++ b/src/cmd/compile/internal/noder/import.go
@@ -0,0 +1,374 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "errors"
+ "fmt"
+ "internal/buildcfg"
+ "internal/pkgbits"
+ "os"
+ pathpkg "path"
+ "runtime"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/importer"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/types2"
+ "cmd/internal/archive"
+ "cmd/internal/bio"
+ "cmd/internal/goobj"
+ "cmd/internal/objabi"
+)
+
+type gcimports struct {
+ ctxt *types2.Context
+ packages map[string]*types2.Package
+}
+
+func (m *gcimports) Import(path string) (*types2.Package, error) {
+ return m.ImportFrom(path, "" /* no vendoring */, 0)
+}
+
+func (m *gcimports) ImportFrom(path, srcDir string, mode types2.ImportMode) (*types2.Package, error) {
+ if mode != 0 {
+ panic("mode must be 0")
+ }
+
+ _, pkg, err := readImportFile(path, typecheck.Target, m.ctxt, m.packages)
+ return pkg, err
+}
+
+func isDriveLetter(b byte) bool {
+ return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z'
+}
+
+// is this path a local name? begins with ./ or ../ or /
+func islocalname(name string) bool {
+ return strings.HasPrefix(name, "/") ||
+ runtime.GOOS == "windows" && len(name) >= 3 && isDriveLetter(name[0]) && name[1] == ':' && name[2] == '/' ||
+ strings.HasPrefix(name, "./") || name == "." ||
+ strings.HasPrefix(name, "../") || name == ".."
+}
+
+func openPackage(path string) (*os.File, error) {
+ if islocalname(path) {
+ if base.Flag.NoLocalImports {
+ return nil, errors.New("local imports disallowed")
+ }
+
+ if base.Flag.Cfg.PackageFile != nil {
+ return os.Open(base.Flag.Cfg.PackageFile[path])
+ }
+
+ // try .a before .o. important for building libraries:
+ // if there is an array.o in the array.a library,
+ // want to find all of array.a, not just array.o.
+ if file, err := os.Open(fmt.Sprintf("%s.a", path)); err == nil {
+ return file, nil
+ }
+ if file, err := os.Open(fmt.Sprintf("%s.o", path)); err == nil {
+ return file, nil
+ }
+ return nil, errors.New("file not found")
+ }
+
+ // local imports should be canonicalized already.
+ // don't want to see "encoding/../encoding/base64"
+ // as different from "encoding/base64".
+ if q := pathpkg.Clean(path); q != path {
+ return nil, fmt.Errorf("non-canonical import path %q (should be %q)", path, q)
+ }
+
+ if base.Flag.Cfg.PackageFile != nil {
+ return os.Open(base.Flag.Cfg.PackageFile[path])
+ }
+
+ for _, dir := range base.Flag.Cfg.ImportDirs {
+ if file, err := os.Open(fmt.Sprintf("%s/%s.a", dir, path)); err == nil {
+ return file, nil
+ }
+ if file, err := os.Open(fmt.Sprintf("%s/%s.o", dir, path)); err == nil {
+ return file, nil
+ }
+ }
+
+ if buildcfg.GOROOT != "" {
+ suffix := ""
+ if base.Flag.InstallSuffix != "" {
+ suffix = "_" + base.Flag.InstallSuffix
+ } else if base.Flag.Race {
+ suffix = "_race"
+ } else if base.Flag.MSan {
+ suffix = "_msan"
+ } else if base.Flag.ASan {
+ suffix = "_asan"
+ }
+
+ if file, err := os.Open(fmt.Sprintf("%s/pkg/%s_%s%s/%s.a", buildcfg.GOROOT, buildcfg.GOOS, buildcfg.GOARCH, suffix, path)); err == nil {
+ return file, nil
+ }
+ if file, err := os.Open(fmt.Sprintf("%s/pkg/%s_%s%s/%s.o", buildcfg.GOROOT, buildcfg.GOOS, buildcfg.GOARCH, suffix, path)); err == nil {
+ return file, nil
+ }
+ }
+ return nil, errors.New("file not found")
+}
+
+// resolveImportPath resolves an import path as it appears in a Go
+// source file to the package's full path.
+func resolveImportPath(path string) (string, error) {
+ // The package name main is no longer reserved,
+ // but we reserve the import path "main" to identify
+ // the main package, just as we reserve the import
+ // path "math" to identify the standard math package.
+ if path == "main" {
+ return "", errors.New("cannot import \"main\"")
+ }
+
+ if base.Ctxt.Pkgpath == "" {
+ panic("missing pkgpath")
+ }
+ if path == base.Ctxt.Pkgpath {
+ return "", fmt.Errorf("import %q while compiling that package (import cycle)", path)
+ }
+
+ if mapped, ok := base.Flag.Cfg.ImportMap[path]; ok {
+ path = mapped
+ }
+
+ if islocalname(path) {
+ if path[0] == '/' {
+ return "", errors.New("import path cannot be absolute path")
+ }
+
+ prefix := base.Flag.D
+ if prefix == "" {
+ // Questionable, but when -D isn't specified, historically we
+ // resolve local import paths relative to the directory the
+ // compiler's current directory, not the respective source
+ // file's directory.
+ prefix = base.Ctxt.Pathname
+ }
+ path = pathpkg.Join(prefix, path)
+
+ if err := checkImportPath(path, true); err != nil {
+ return "", err
+ }
+ }
+
+ return path, nil
+}
+
+// readImportFile reads the import file for the given package path and
+// returns its types.Pkg representation. If packages is non-nil, the
+// types2.Package representation is also returned.
+func readImportFile(path string, target *ir.Package, env *types2.Context, packages map[string]*types2.Package) (pkg1 *types.Pkg, pkg2 *types2.Package, err error) {
+ path, err = resolveImportPath(path)
+ if err != nil {
+ return
+ }
+
+ if path == "unsafe" {
+ pkg1, pkg2 = types.UnsafePkg, types2.Unsafe
+
+ // TODO(mdempsky): Investigate if this actually matters. Why would
+ // the linker or runtime care whether a package imported unsafe?
+ if !pkg1.Direct {
+ pkg1.Direct = true
+ target.Imports = append(target.Imports, pkg1)
+ }
+
+ return
+ }
+
+ pkg1 = types.NewPkg(path, "")
+ if packages != nil {
+ pkg2 = packages[path]
+ assert(pkg1.Direct == (pkg2 != nil && pkg2.Complete()))
+ }
+
+ if pkg1.Direct {
+ return
+ }
+ pkg1.Direct = true
+ target.Imports = append(target.Imports, pkg1)
+
+ f, err := openPackage(path)
+ if err != nil {
+ return
+ }
+ defer f.Close()
+
+ r, end, err := findExportData(f)
+ if err != nil {
+ return
+ }
+
+ if base.Debug.Export != 0 {
+ fmt.Printf("importing %s (%s)\n", path, f.Name())
+ }
+
+ c, err := r.ReadByte()
+ if err != nil {
+ return
+ }
+
+ pos := r.Offset()
+
+ // Map export data section into memory as a single large
+ // string. This reduces heap fragmentation and allows returning
+ // individual substrings very efficiently.
+ var data string
+ data, err = base.MapFile(r.File(), pos, end-pos)
+ if err != nil {
+ return
+ }
+
+ switch c {
+ case 'u':
+ // TODO(mdempsky): This seems a bit clunky.
+ data = strings.TrimSuffix(data, "\n$$\n")
+
+ pr := pkgbits.NewPkgDecoder(pkg1.Path, data)
+
+ // Read package descriptors for both types2 and compiler backend.
+ readPackage(newPkgReader(pr), pkg1, false)
+ pkg2 = importer.ReadPackage(env, packages, pr)
+
+ default:
+ // Indexed format is distinguished by an 'i' byte,
+ // whereas previous export formats started with 'c', 'd', or 'v'.
+ err = fmt.Errorf("unexpected package format byte: %v", c)
+ return
+ }
+
+ err = addFingerprint(path, f, end)
+ return
+}
+
+// findExportData returns a *bio.Reader positioned at the start of the
+// binary export data section, and a file offset for where to stop
+// reading.
+func findExportData(f *os.File) (r *bio.Reader, end int64, err error) {
+ r = bio.NewReader(f)
+
+ // check object header
+ line, err := r.ReadString('\n')
+ if err != nil {
+ return
+ }
+
+ if line == "!<arch>\n" { // package archive
+ // package export block should be first
+ sz := int64(archive.ReadHeader(r.Reader, "__.PKGDEF"))
+ if sz <= 0 {
+ err = errors.New("not a package file")
+ return
+ }
+ end = r.Offset() + sz
+ line, err = r.ReadString('\n')
+ if err != nil {
+ return
+ }
+ } else {
+ // Not an archive; provide end of file instead.
+ // TODO(mdempsky): I don't think this happens anymore.
+ var fi os.FileInfo
+ fi, err = f.Stat()
+ if err != nil {
+ return
+ }
+ end = fi.Size()
+ }
+
+ if !strings.HasPrefix(line, "go object ") {
+ err = fmt.Errorf("not a go object file: %s", line)
+ return
+ }
+ if expect := objabi.HeaderString(); line != expect {
+ err = fmt.Errorf("object is [%s] expected [%s]", line, expect)
+ return
+ }
+
+ // process header lines
+ for !strings.HasPrefix(line, "$$") {
+ line, err = r.ReadString('\n')
+ if err != nil {
+ return
+ }
+ }
+
+ // Expect $$B\n to signal binary import format.
+ if line != "$$B\n" {
+ err = errors.New("old export format no longer supported (recompile library)")
+ return
+ }
+
+ return
+}
+
+// addFingerprint reads the linker fingerprint included at the end of
+// the exportdata.
+func addFingerprint(path string, f *os.File, end int64) error {
+ const eom = "\n$$\n"
+ var fingerprint goobj.FingerprintType
+
+ var buf [len(fingerprint) + len(eom)]byte
+ if _, err := f.ReadAt(buf[:], end-int64(len(buf))); err != nil {
+ return err
+ }
+
+ // Caller should have given us the end position of the export data,
+ // which should end with the "\n$$\n" marker. As a consistency check
+ // to make sure we're reading at the right offset, make sure we
+ // found the marker.
+ if s := string(buf[len(fingerprint):]); s != eom {
+ return fmt.Errorf("expected $$ marker, but found %q", s)
+ }
+
+ copy(fingerprint[:], buf[:])
+ base.Ctxt.AddImport(path, fingerprint)
+
+ return nil
+}
+
+func checkImportPath(path string, allowSpace bool) error {
+ if path == "" {
+ return errors.New("import path is empty")
+ }
+
+ if strings.Contains(path, "\x00") {
+ return errors.New("import path contains NUL")
+ }
+
+ for ri := range base.ReservedImports {
+ if path == ri {
+ return fmt.Errorf("import path %q is reserved and cannot be used", path)
+ }
+ }
+
+ for _, r := range path {
+ switch {
+ case r == utf8.RuneError:
+ return fmt.Errorf("import path contains invalid UTF-8 sequence: %q", path)
+ case r < 0x20 || r == 0x7f:
+ return fmt.Errorf("import path contains control character: %q", path)
+ case r == '\\':
+ return fmt.Errorf("import path contains backslash; use slash: %q", path)
+ case !allowSpace && unicode.IsSpace(r):
+ return fmt.Errorf("import path contains space character: %q", path)
+ case strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r):
+ return fmt.Errorf("import path contains invalid character '%c': %q", r, path)
+ }
+ }
+
+ return nil
+}
diff --git a/src/cmd/compile/internal/noder/irgen.go b/src/cmd/compile/internal/noder/irgen.go
new file mode 100644
index 0000000..e0b7bb9
--- /dev/null
+++ b/src/cmd/compile/internal/noder/irgen.go
@@ -0,0 +1,238 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+ "internal/buildcfg"
+ "internal/types/errors"
+ "regexp"
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/rangefunc"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+)
+
+var versionErrorRx = regexp.MustCompile(`requires go[0-9]+\.[0-9]+ or later`)
+
+// checkFiles configures and runs the types2 checker on the given
+// parsed source files and then returns the result.
+func checkFiles(m posMap, noders []*noder) (*types2.Package, *types2.Info) {
+ if base.SyntaxErrors() != 0 {
+ base.ErrorExit()
+ }
+
+ // setup and syntax error reporting
+ files := make([]*syntax.File, len(noders))
+ // posBaseMap maps all file pos bases back to *syntax.File
+ // for checking Go version mismatched.
+ posBaseMap := make(map[*syntax.PosBase]*syntax.File)
+ for i, p := range noders {
+ files[i] = p.file
+ posBaseMap[p.file.Pos().Base()] = p.file
+ }
+
+ // typechecking
+ ctxt := types2.NewContext()
+ importer := gcimports{
+ ctxt: ctxt,
+ packages: make(map[string]*types2.Package),
+ }
+ conf := types2.Config{
+ Context: ctxt,
+ GoVersion: base.Flag.Lang,
+ IgnoreBranchErrors: true, // parser already checked via syntax.CheckBranches mode
+ Importer: &importer,
+ Sizes: types2.SizesFor("gc", buildcfg.GOARCH),
+ }
+ if base.Flag.ErrorURL {
+ conf.ErrorURL = " [go.dev/e/%s]"
+ }
+ info := &types2.Info{
+ StoreTypesInSyntax: true,
+ Defs: make(map[*syntax.Name]types2.Object),
+ Uses: make(map[*syntax.Name]types2.Object),
+ Selections: make(map[*syntax.SelectorExpr]*types2.Selection),
+ Implicits: make(map[syntax.Node]types2.Object),
+ Scopes: make(map[syntax.Node]*types2.Scope),
+ Instances: make(map[*syntax.Name]types2.Instance),
+ FileVersions: make(map[*syntax.PosBase]string),
+ // expand as needed
+ }
+ conf.Error = func(err error) {
+ terr := err.(types2.Error)
+ msg := terr.Msg
+ if versionErrorRx.MatchString(msg) {
+ posBase := terr.Pos.Base()
+ for !posBase.IsFileBase() { // line directive base
+ posBase = posBase.Pos().Base()
+ }
+ fileVersion := info.FileVersions[posBase]
+ file := posBaseMap[posBase]
+ if file.GoVersion == fileVersion {
+ // If we have a version error caused by //go:build, report it.
+ msg = fmt.Sprintf("%s (file declares //go:build %s)", msg, fileVersion)
+ } else {
+ // Otherwise, hint at the -lang setting.
+ msg = fmt.Sprintf("%s (-lang was set to %s; check go.mod)", msg, base.Flag.Lang)
+ }
+ }
+ base.ErrorfAt(m.makeXPos(terr.Pos), terr.Code, "%s", msg)
+ }
+
+ pkg, err := conf.Check(base.Ctxt.Pkgpath, files, info)
+ base.ExitIfErrors()
+ if err != nil {
+ base.FatalfAt(src.NoXPos, "conf.Check error: %v", err)
+ }
+
+ // Check for anonymous interface cycles (#56103).
+ // TODO(gri) move this code into the type checkers (types2 and go/types)
+ var f cycleFinder
+ for _, file := range files {
+ syntax.Inspect(file, func(n syntax.Node) bool {
+ if n, ok := n.(*syntax.InterfaceType); ok {
+ if f.hasCycle(types2.Unalias(n.GetTypeInfo().Type).(*types2.Interface)) {
+ base.ErrorfAt(m.makeXPos(n.Pos()), errors.InvalidTypeCycle, "invalid recursive type: anonymous interface refers to itself (see https://go.dev/issue/56103)")
+
+ for typ := range f.cyclic {
+ f.cyclic[typ] = false // suppress duplicate errors
+ }
+ }
+ return false
+ }
+ return true
+ })
+ }
+ base.ExitIfErrors()
+
+ // Implementation restriction: we don't allow not-in-heap types to
+ // be used as type arguments (#54765).
+ {
+ type nihTarg struct {
+ pos src.XPos
+ typ types2.Type
+ }
+ var nihTargs []nihTarg
+
+ for name, inst := range info.Instances {
+ for i := 0; i < inst.TypeArgs.Len(); i++ {
+ if targ := inst.TypeArgs.At(i); isNotInHeap(targ) {
+ nihTargs = append(nihTargs, nihTarg{m.makeXPos(name.Pos()), targ})
+ }
+ }
+ }
+ sort.Slice(nihTargs, func(i, j int) bool {
+ ti, tj := nihTargs[i], nihTargs[j]
+ return ti.pos.Before(tj.pos)
+ })
+ for _, targ := range nihTargs {
+ base.ErrorfAt(targ.pos, 0, "cannot use incomplete (or unallocatable) type as a type argument: %v", targ.typ)
+ }
+ }
+ base.ExitIfErrors()
+
+ // Rewrite range over function to explicit function calls
+ // with the loop bodies converted into new implicit closures.
+ // We do this now, before serialization to unified IR, so that if the
+ // implicit closures are inlined, we will have the unified IR form.
+ // If we do the rewrite in the back end, like between typecheck and walk,
+ // then the new implicit closure will not have a unified IR inline body,
+ // and bodyReaderFor will fail.
+ rangefunc.Rewrite(pkg, info, files)
+
+ return pkg, info
+}
+
+// A cycleFinder detects anonymous interface cycles (go.dev/issue/56103).
+type cycleFinder struct {
+ cyclic map[*types2.Interface]bool
+}
+
+// hasCycle reports whether typ is part of an anonymous interface cycle.
+func (f *cycleFinder) hasCycle(typ *types2.Interface) bool {
+ // We use Method instead of ExplicitMethod to implicitly expand any
+ // embedded interfaces. Then we just need to walk any anonymous
+ // types, keeping track of *types2.Interface types we visit along
+ // the way.
+ for i := 0; i < typ.NumMethods(); i++ {
+ if f.visit(typ.Method(i).Type()) {
+ return true
+ }
+ }
+ return false
+}
+
+// visit recursively walks typ0 to check any referenced interface types.
+func (f *cycleFinder) visit(typ0 types2.Type) bool {
+ for { // loop for tail recursion
+ switch typ := types2.Unalias(typ0).(type) {
+ default:
+ base.Fatalf("unexpected type: %T", typ)
+
+ case *types2.Basic, *types2.Named, *types2.TypeParam:
+ return false // named types cannot be part of an anonymous cycle
+ case *types2.Pointer:
+ typ0 = typ.Elem()
+ case *types2.Array:
+ typ0 = typ.Elem()
+ case *types2.Chan:
+ typ0 = typ.Elem()
+ case *types2.Map:
+ if f.visit(typ.Key()) {
+ return true
+ }
+ typ0 = typ.Elem()
+ case *types2.Slice:
+ typ0 = typ.Elem()
+
+ case *types2.Struct:
+ for i := 0; i < typ.NumFields(); i++ {
+ if f.visit(typ.Field(i).Type()) {
+ return true
+ }
+ }
+ return false
+
+ case *types2.Interface:
+ // The empty interface (e.g., "any") cannot be part of a cycle.
+ if typ.NumExplicitMethods() == 0 && typ.NumEmbeddeds() == 0 {
+ return false
+ }
+
+ // As an optimization, we wait to allocate cyclic here, after
+ // we've found at least one other (non-empty) anonymous
+ // interface. This means when a cycle is present, we need to
+ // make an extra recursive call to actually detect it. But for
+ // most packages, it allows skipping the map allocation
+ // entirely.
+ if x, ok := f.cyclic[typ]; ok {
+ return x
+ }
+ if f.cyclic == nil {
+ f.cyclic = make(map[*types2.Interface]bool)
+ }
+ f.cyclic[typ] = true
+ if f.hasCycle(typ) {
+ return true
+ }
+ f.cyclic[typ] = false
+ return false
+
+ case *types2.Signature:
+ return f.visit(typ.Params()) || f.visit(typ.Results())
+ case *types2.Tuple:
+ for i := 0; i < typ.Len(); i++ {
+ if f.visit(typ.At(i).Type()) {
+ return true
+ }
+ }
+ return false
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/noder/lex.go b/src/cmd/compile/internal/noder/lex.go
new file mode 100644
index 0000000..c964eca
--- /dev/null
+++ b/src/cmd/compile/internal/noder/lex.go
@@ -0,0 +1,184 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+ "internal/buildcfg"
+ "strings"
+
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+)
+
+func isSpace(c rune) bool {
+ return c == ' ' || c == '\t' || c == '\n' || c == '\r'
+}
+
+func isQuoted(s string) bool {
+ return len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"'
+}
+
+const (
+ funcPragmas = ir.Nointerface |
+ ir.Noescape |
+ ir.Norace |
+ ir.Nosplit |
+ ir.Noinline |
+ ir.NoCheckPtr |
+ ir.RegisterParams | // TODO(register args) remove after register abi is working
+ ir.CgoUnsafeArgs |
+ ir.UintptrKeepAlive |
+ ir.UintptrEscapes |
+ ir.Systemstack |
+ ir.Nowritebarrier |
+ ir.Nowritebarrierrec |
+ ir.Yeswritebarrierrec
+)
+
+func pragmaFlag(verb string) ir.PragmaFlag {
+ switch verb {
+ case "go:build":
+ return ir.GoBuildPragma
+ case "go:nointerface":
+ if buildcfg.Experiment.FieldTrack {
+ return ir.Nointerface
+ }
+ case "go:noescape":
+ return ir.Noescape
+ case "go:norace":
+ return ir.Norace
+ case "go:nosplit":
+ return ir.Nosplit | ir.NoCheckPtr // implies NoCheckPtr (see #34972)
+ case "go:noinline":
+ return ir.Noinline
+ case "go:nocheckptr":
+ return ir.NoCheckPtr
+ case "go:systemstack":
+ return ir.Systemstack
+ case "go:nowritebarrier":
+ return ir.Nowritebarrier
+ case "go:nowritebarrierrec":
+ return ir.Nowritebarrierrec | ir.Nowritebarrier // implies Nowritebarrier
+ case "go:yeswritebarrierrec":
+ return ir.Yeswritebarrierrec
+ case "go:cgo_unsafe_args":
+ return ir.CgoUnsafeArgs | ir.NoCheckPtr // implies NoCheckPtr (see #34968)
+ case "go:uintptrkeepalive":
+ return ir.UintptrKeepAlive
+ case "go:uintptrescapes":
+ // This directive extends //go:uintptrkeepalive by forcing
+ // uintptr arguments to escape to the heap, which makes stack
+ // growth safe.
+ return ir.UintptrEscapes | ir.UintptrKeepAlive // implies UintptrKeepAlive
+ case "go:registerparams": // TODO(register args) remove after register abi is working
+ return ir.RegisterParams
+ }
+ return 0
+}
+
+// pragcgo is called concurrently if files are parsed concurrently.
+func (p *noder) pragcgo(pos syntax.Pos, text string) {
+ f := pragmaFields(text)
+
+ verb := strings.TrimPrefix(f[0], "go:")
+ f[0] = verb
+
+ switch verb {
+ case "cgo_export_static", "cgo_export_dynamic":
+ switch {
+ case len(f) == 2 && !isQuoted(f[1]):
+ case len(f) == 3 && !isQuoted(f[1]) && !isQuoted(f[2]):
+ default:
+ p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf(`usage: //go:%s local [remote]`, verb)})
+ return
+ }
+ case "cgo_import_dynamic":
+ switch {
+ case len(f) == 2 && !isQuoted(f[1]):
+ case len(f) == 3 && !isQuoted(f[1]) && !isQuoted(f[2]):
+ case len(f) == 4 && !isQuoted(f[1]) && !isQuoted(f[2]) && isQuoted(f[3]):
+ f[3] = strings.Trim(f[3], `"`)
+ if buildcfg.GOOS == "aix" && f[3] != "" {
+ // On Aix, library pattern must be "lib.a/object.o"
+ // or "lib.a/libname.so.X"
+ n := strings.Split(f[3], "/")
+ if len(n) != 2 || !strings.HasSuffix(n[0], ".a") || (!strings.HasSuffix(n[1], ".o") && !strings.Contains(n[1], ".so.")) {
+ p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_import_dynamic local [remote ["lib.a/object.o"]]`})
+ return
+ }
+ }
+ default:
+ p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_import_dynamic local [remote ["library"]]`})
+ return
+ }
+ case "cgo_import_static":
+ switch {
+ case len(f) == 2 && !isQuoted(f[1]):
+ default:
+ p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_import_static local`})
+ return
+ }
+ case "cgo_dynamic_linker":
+ switch {
+ case len(f) == 2 && isQuoted(f[1]):
+ f[1] = strings.Trim(f[1], `"`)
+ default:
+ p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_dynamic_linker "path"`})
+ return
+ }
+ case "cgo_ldflag":
+ switch {
+ case len(f) == 2 && isQuoted(f[1]):
+ f[1] = strings.Trim(f[1], `"`)
+ default:
+ p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_ldflag "arg"`})
+ return
+ }
+ default:
+ return
+ }
+ p.pragcgobuf = append(p.pragcgobuf, f)
+}
+
+// pragmaFields is similar to strings.FieldsFunc(s, isSpace)
+// but does not split when inside double quoted regions and always
+// splits before the start and after the end of a double quoted region.
+// pragmaFields does not recognize escaped quotes. If a quote in s is not
+// closed the part after the opening quote will not be returned as a field.
+func pragmaFields(s string) []string {
+ var a []string
+ inQuote := false
+ fieldStart := -1 // Set to -1 when looking for start of field.
+ for i, c := range s {
+ switch {
+ case c == '"':
+ if inQuote {
+ inQuote = false
+ a = append(a, s[fieldStart:i+1])
+ fieldStart = -1
+ } else {
+ inQuote = true
+ if fieldStart >= 0 {
+ a = append(a, s[fieldStart:i])
+ }
+ fieldStart = i
+ }
+ case !inQuote && isSpace(c):
+ if fieldStart >= 0 {
+ a = append(a, s[fieldStart:i])
+ fieldStart = -1
+ }
+ default:
+ if fieldStart == -1 {
+ fieldStart = i
+ }
+ }
+ }
+ if !inQuote && fieldStart >= 0 { // Last field might end at the end of the string.
+ a = append(a, s[fieldStart:])
+ }
+ return a
+}
diff --git a/src/cmd/compile/internal/noder/lex_test.go b/src/cmd/compile/internal/noder/lex_test.go
new file mode 100644
index 0000000..85a3f06
--- /dev/null
+++ b/src/cmd/compile/internal/noder/lex_test.go
@@ -0,0 +1,122 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "reflect"
+ "runtime"
+ "testing"
+
+ "cmd/compile/internal/syntax"
+)
+
+func eq(a, b []string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := 0; i < len(a); i++ {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func TestPragmaFields(t *testing.T) {
+ var tests = []struct {
+ in string
+ want []string
+ }{
+ {"", []string{}},
+ {" \t ", []string{}},
+ {`""""`, []string{`""`, `""`}},
+ {" a'b'c ", []string{"a'b'c"}},
+ {"1 2 3 4", []string{"1", "2", "3", "4"}},
+ {"\n☺\t☹\n", []string{"☺", "☹"}},
+ {`"1 2 " 3 " 4 5"`, []string{`"1 2 "`, `3`, `" 4 5"`}},
+ {`"1""2 3""4"`, []string{`"1"`, `"2 3"`, `"4"`}},
+ {`12"34"`, []string{`12`, `"34"`}},
+ {`12"34 `, []string{`12`}},
+ }
+
+ for _, tt := range tests {
+ got := pragmaFields(tt.in)
+ if !eq(got, tt.want) {
+ t.Errorf("pragmaFields(%q) = %v; want %v", tt.in, got, tt.want)
+ continue
+ }
+ }
+}
+
+func TestPragcgo(t *testing.T) {
+ type testStruct struct {
+ in string
+ want []string
+ }
+
+ var tests = []testStruct{
+ {`go:cgo_export_dynamic local`, []string{`cgo_export_dynamic`, `local`}},
+ {`go:cgo_export_dynamic local remote`, []string{`cgo_export_dynamic`, `local`, `remote`}},
+ {`go:cgo_export_dynamic local' remote'`, []string{`cgo_export_dynamic`, `local'`, `remote'`}},
+ {`go:cgo_export_static local`, []string{`cgo_export_static`, `local`}},
+ {`go:cgo_export_static local remote`, []string{`cgo_export_static`, `local`, `remote`}},
+ {`go:cgo_export_static local' remote'`, []string{`cgo_export_static`, `local'`, `remote'`}},
+ {`go:cgo_import_dynamic local`, []string{`cgo_import_dynamic`, `local`}},
+ {`go:cgo_import_dynamic local remote`, []string{`cgo_import_dynamic`, `local`, `remote`}},
+ {`go:cgo_import_static local`, []string{`cgo_import_static`, `local`}},
+ {`go:cgo_import_static local'`, []string{`cgo_import_static`, `local'`}},
+ {`go:cgo_dynamic_linker "/path/"`, []string{`cgo_dynamic_linker`, `/path/`}},
+ {`go:cgo_dynamic_linker "/p ath/"`, []string{`cgo_dynamic_linker`, `/p ath/`}},
+ {`go:cgo_ldflag "arg"`, []string{`cgo_ldflag`, `arg`}},
+ {`go:cgo_ldflag "a rg"`, []string{`cgo_ldflag`, `a rg`}},
+ }
+
+ if runtime.GOOS != "aix" {
+ tests = append(tests, []testStruct{
+ {`go:cgo_import_dynamic local remote "library"`, []string{`cgo_import_dynamic`, `local`, `remote`, `library`}},
+ {`go:cgo_import_dynamic local' remote' "lib rary"`, []string{`cgo_import_dynamic`, `local'`, `remote'`, `lib rary`}},
+ }...)
+ } else {
+ // cgo_import_dynamic with a library is slightly different on AIX
+ // as the library field must follow the pattern [libc.a/object.o].
+ tests = append(tests, []testStruct{
+ {`go:cgo_import_dynamic local remote "lib.a/obj.o"`, []string{`cgo_import_dynamic`, `local`, `remote`, `lib.a/obj.o`}},
+ // This test must fail.
+ {`go:cgo_import_dynamic local' remote' "library"`, []string{`<unknown position>: usage: //go:cgo_import_dynamic local [remote ["lib.a/object.o"]]`}},
+ }...)
+
+ }
+
+ var p noder
+ var nopos syntax.Pos
+ for _, tt := range tests {
+
+ p.err = make(chan syntax.Error)
+ gotch := make(chan [][]string, 1)
+ go func() {
+ p.pragcgobuf = nil
+ p.pragcgo(nopos, tt.in)
+ if p.pragcgobuf != nil {
+ gotch <- p.pragcgobuf
+ }
+ }()
+
+ select {
+ case e := <-p.err:
+ want := tt.want[0]
+ if e.Error() != want {
+ t.Errorf("pragcgo(%q) = %q; want %q", tt.in, e, want)
+ continue
+ }
+ case got := <-gotch:
+ want := [][]string{tt.want}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("pragcgo(%q) = %q; want %q", tt.in, got, want)
+ continue
+ }
+ }
+
+ }
+}
diff --git a/src/cmd/compile/internal/noder/linker.go b/src/cmd/compile/internal/noder/linker.go
new file mode 100644
index 0000000..f5667f5
--- /dev/null
+++ b/src/cmd/compile/internal/noder/linker.go
@@ -0,0 +1,349 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "internal/buildcfg"
+ "internal/pkgbits"
+ "io"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/types"
+ "cmd/internal/goobj"
+ "cmd/internal/obj"
+)
+
+// This file implements the unified IR linker, which combines the
+// local package's stub data with imported package data to produce a
+// complete export data file. It also rewrites the compiler's
+// extension data sections based on the results of compilation (e.g.,
+// the function inlining cost and linker symbol index assignments).
+//
+// TODO(mdempsky): Using the name "linker" here is confusing, because
+// readers are likely to mistake references to it for cmd/link. But
+// there's a shortage of good names for "something that combines
+// multiple parts into a cohesive whole"... e.g., "assembler" and
+// "compiler" are also already taken.
+
+// TODO(mdempsky): Should linker go into pkgbits? Probably the
+// low-level linking details can be moved there, but the logic for
+// handling extension data needs to stay in the compiler.
+
+// A linker combines a package's stub export data with any referenced
+// elements from imported packages into a single, self-contained
+// export data file.
+type linker struct {
+ pw pkgbits.PkgEncoder
+
+ pkgs map[string]pkgbits.Index
+ decls map[*types.Sym]pkgbits.Index
+ bodies map[*types.Sym]pkgbits.Index
+}
+
+// relocAll ensures that all elements specified by pr and relocs are
+// copied into the output export data file, and returns the
+// corresponding indices in the output.
+func (l *linker) relocAll(pr *pkgReader, relocs []pkgbits.RelocEnt) []pkgbits.RelocEnt {
+ res := make([]pkgbits.RelocEnt, len(relocs))
+ for i, rent := range relocs {
+ rent.Idx = l.relocIdx(pr, rent.Kind, rent.Idx)
+ res[i] = rent
+ }
+ return res
+}
+
+// relocIdx ensures a single element is copied into the output export
+// data file, and returns the corresponding index in the output.
+func (l *linker) relocIdx(pr *pkgReader, k pkgbits.RelocKind, idx pkgbits.Index) pkgbits.Index {
+ assert(pr != nil)
+
+ absIdx := pr.AbsIdx(k, idx)
+
+ if newidx := pr.newindex[absIdx]; newidx != 0 {
+ return ^newidx
+ }
+
+ var newidx pkgbits.Index
+ switch k {
+ case pkgbits.RelocString:
+ newidx = l.relocString(pr, idx)
+ case pkgbits.RelocPkg:
+ newidx = l.relocPkg(pr, idx)
+ case pkgbits.RelocObj:
+ newidx = l.relocObj(pr, idx)
+
+ default:
+ // Generic relocations.
+ //
+ // TODO(mdempsky): Deduplicate more sections? In fact, I think
+ // every section could be deduplicated. This would also be easier
+ // if we do external relocations.
+
+ w := l.pw.NewEncoderRaw(k)
+ l.relocCommon(pr, &w, k, idx)
+ newidx = w.Idx
+ }
+
+ pr.newindex[absIdx] = ^newidx
+
+ return newidx
+}
+
+// relocString copies the specified string from pr into the output
+// export data file, deduplicating it against other strings.
+func (l *linker) relocString(pr *pkgReader, idx pkgbits.Index) pkgbits.Index {
+ return l.pw.StringIdx(pr.StringIdx(idx))
+}
+
+// relocPkg copies the specified package from pr into the output
+// export data file, rewriting its import path to match how it was
+// imported.
+//
+// TODO(mdempsky): Since CL 391014, we already have the compilation
+// unit's import path, so there should be no need to rewrite packages
+// anymore.
+func (l *linker) relocPkg(pr *pkgReader, idx pkgbits.Index) pkgbits.Index {
+ path := pr.PeekPkgPath(idx)
+
+ if newidx, ok := l.pkgs[path]; ok {
+ return newidx
+ }
+
+ r := pr.NewDecoder(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef)
+ w := l.pw.NewEncoder(pkgbits.RelocPkg, pkgbits.SyncPkgDef)
+ l.pkgs[path] = w.Idx
+
+ // TODO(mdempsky): We end up leaving an empty string reference here
+ // from when the package was originally written as "". Probably not
+ // a big deal, but a little annoying. Maybe relocating
+ // cross-references in place is the way to go after all.
+ w.Relocs = l.relocAll(pr, r.Relocs)
+
+ _ = r.String() // original path
+ w.String(path)
+
+ io.Copy(&w.Data, &r.Data)
+
+ return w.Flush()
+}
+
+// relocObj copies the specified object from pr into the output export
+// data file, rewriting its compiler-private extension data (e.g.,
+// adding inlining cost and escape analysis results for functions).
+func (l *linker) relocObj(pr *pkgReader, idx pkgbits.Index) pkgbits.Index {
+ path, name, tag := pr.PeekObj(idx)
+ sym := types.NewPkg(path, "").Lookup(name)
+
+ if newidx, ok := l.decls[sym]; ok {
+ return newidx
+ }
+
+ if tag == pkgbits.ObjStub && path != "builtin" && path != "unsafe" {
+ pri, ok := objReader[sym]
+ if !ok {
+ base.Fatalf("missing reader for %q.%v", path, name)
+ }
+ assert(ok)
+
+ pr = pri.pr
+ idx = pri.idx
+
+ path2, name2, tag2 := pr.PeekObj(idx)
+ sym2 := types.NewPkg(path2, "").Lookup(name2)
+ assert(sym == sym2)
+ assert(tag2 != pkgbits.ObjStub)
+ }
+
+ w := l.pw.NewEncoderRaw(pkgbits.RelocObj)
+ wext := l.pw.NewEncoderRaw(pkgbits.RelocObjExt)
+ wname := l.pw.NewEncoderRaw(pkgbits.RelocName)
+ wdict := l.pw.NewEncoderRaw(pkgbits.RelocObjDict)
+
+ l.decls[sym] = w.Idx
+ assert(wext.Idx == w.Idx)
+ assert(wname.Idx == w.Idx)
+ assert(wdict.Idx == w.Idx)
+
+ l.relocCommon(pr, &w, pkgbits.RelocObj, idx)
+ l.relocCommon(pr, &wname, pkgbits.RelocName, idx)
+ l.relocCommon(pr, &wdict, pkgbits.RelocObjDict, idx)
+
+ // Generic types and functions won't have definitions, and imported
+ // objects may not either.
+ obj, _ := sym.Def.(*ir.Name)
+ local := sym.Pkg == types.LocalPkg
+
+ if local && obj != nil {
+ wext.Sync(pkgbits.SyncObject1)
+ switch tag {
+ case pkgbits.ObjFunc:
+ l.relocFuncExt(&wext, obj)
+ case pkgbits.ObjType:
+ l.relocTypeExt(&wext, obj)
+ case pkgbits.ObjVar:
+ l.relocVarExt(&wext, obj)
+ }
+ wext.Flush()
+ } else {
+ l.relocCommon(pr, &wext, pkgbits.RelocObjExt, idx)
+ }
+
+ // Check if we need to export the inline bodies for functions and
+ // methods.
+ if obj != nil {
+ if obj.Op() == ir.ONAME && obj.Class == ir.PFUNC {
+ l.exportBody(obj, local)
+ }
+
+ if obj.Op() == ir.OTYPE && !obj.Alias() {
+ if typ := obj.Type(); !typ.IsInterface() {
+ for _, method := range typ.Methods() {
+ l.exportBody(method.Nname.(*ir.Name), local)
+ }
+ }
+ }
+ }
+
+ return w.Idx
+}
+
+// exportBody exports the given function or method's body, if
+// appropriate. local indicates whether it's a local function or
+// method available on a locally declared type. (Due to cross-package
+// type aliases, a method may be imported, but still available on a
+// locally declared type.)
+func (l *linker) exportBody(obj *ir.Name, local bool) {
+ assert(obj.Op() == ir.ONAME && obj.Class == ir.PFUNC)
+
+ fn := obj.Func
+ if fn.Inl == nil {
+ return // not inlinable anyway
+ }
+
+ // As a simple heuristic, if the function was declared in this
+ // package or we inlined it somewhere in this package, then we'll
+ // (re)export the function body. This isn't perfect, but seems
+ // reasonable in practice. In particular, it has the nice property
+ // that in the worst case, adding a blank import ensures the
+ // function body is available for inlining.
+ //
+ // TODO(mdempsky): Reimplement the reachable method crawling logic
+ // from typecheck/crawler.go.
+ exportBody := local || fn.Inl.HaveDcl
+ if !exportBody {
+ return
+ }
+
+ sym := obj.Sym()
+ if _, ok := l.bodies[sym]; ok {
+ // Due to type aliases, we might visit methods multiple times.
+ base.AssertfAt(obj.Type().Recv() != nil, obj.Pos(), "expected method: %v", obj)
+ return
+ }
+
+ pri, ok := bodyReaderFor(fn)
+ assert(ok)
+ l.bodies[sym] = l.relocIdx(pri.pr, pkgbits.RelocBody, pri.idx)
+}
+
+// relocCommon copies the specified element from pr into w,
+// recursively relocating any referenced elements as well.
+func (l *linker) relocCommon(pr *pkgReader, w *pkgbits.Encoder, k pkgbits.RelocKind, idx pkgbits.Index) {
+ r := pr.NewDecoderRaw(k, idx)
+ w.Relocs = l.relocAll(pr, r.Relocs)
+ io.Copy(&w.Data, &r.Data)
+ w.Flush()
+}
+
+func (l *linker) pragmaFlag(w *pkgbits.Encoder, pragma ir.PragmaFlag) {
+ w.Sync(pkgbits.SyncPragma)
+ w.Int(int(pragma))
+}
+
+func (l *linker) relocFuncExt(w *pkgbits.Encoder, name *ir.Name) {
+ w.Sync(pkgbits.SyncFuncExt)
+
+ l.pragmaFlag(w, name.Func.Pragma)
+ l.linkname(w, name)
+
+ if buildcfg.GOARCH == "wasm" {
+ if name.Func.WasmImport != nil {
+ w.String(name.Func.WasmImport.Module)
+ w.String(name.Func.WasmImport.Name)
+ } else {
+ w.String("")
+ w.String("")
+ }
+ }
+
+ // Relocated extension data.
+ w.Bool(true)
+
+ // Record definition ABI so cross-ABI calls can be direct.
+ // This is important for the performance of calling some
+ // common functions implemented in assembly (e.g., bytealg).
+ w.Uint64(uint64(name.Func.ABI))
+
+ // Escape analysis.
+ for _, f := range name.Type().RecvParams() {
+ w.String(f.Note)
+ }
+
+ if inl := name.Func.Inl; w.Bool(inl != nil) {
+ w.Len(int(inl.Cost))
+ w.Bool(inl.CanDelayResults)
+ if buildcfg.Experiment.NewInliner {
+ w.String(inl.Properties)
+ }
+ }
+
+ w.Sync(pkgbits.SyncEOF)
+}
+
+func (l *linker) relocTypeExt(w *pkgbits.Encoder, name *ir.Name) {
+ w.Sync(pkgbits.SyncTypeExt)
+
+ typ := name.Type()
+
+ l.pragmaFlag(w, name.Pragma())
+
+ // For type T, export the index of type descriptor symbols of T and *T.
+ l.lsymIdx(w, "", reflectdata.TypeLinksym(typ))
+ l.lsymIdx(w, "", reflectdata.TypeLinksym(typ.PtrTo()))
+
+ if typ.Kind() != types.TINTER {
+ for _, method := range typ.Methods() {
+ l.relocFuncExt(w, method.Nname.(*ir.Name))
+ }
+ }
+}
+
+func (l *linker) relocVarExt(w *pkgbits.Encoder, name *ir.Name) {
+ w.Sync(pkgbits.SyncVarExt)
+ l.linkname(w, name)
+}
+
+func (l *linker) linkname(w *pkgbits.Encoder, name *ir.Name) {
+ w.Sync(pkgbits.SyncLinkname)
+
+ linkname := name.Sym().Linkname
+ if !l.lsymIdx(w, linkname, name.Linksym()) {
+ w.String(linkname)
+ }
+}
+
+func (l *linker) lsymIdx(w *pkgbits.Encoder, linkname string, lsym *obj.LSym) bool {
+ if lsym.PkgIdx > goobj.PkgIdxSelf || (lsym.PkgIdx == goobj.PkgIdxInvalid && !lsym.Indexed()) || linkname != "" {
+ w.Int64(-1)
+ return false
+ }
+
+ // For a defined symbol, export its index.
+ // For re-exporting an imported symbol, pass its index through.
+ w.Int64(int64(lsym.SymIdx))
+ return true
+}
diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go
new file mode 100644
index 0000000..1652dc6
--- /dev/null
+++ b/src/cmd/compile/internal/noder/noder.go
@@ -0,0 +1,449 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "errors"
+ "fmt"
+ "internal/buildcfg"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/objabi"
+)
+
+func LoadPackage(filenames []string) {
+ base.Timer.Start("fe", "parse")
+
+ // Limit the number of simultaneously open files.
+ sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10)
+
+ noders := make([]*noder, len(filenames))
+ for i := range noders {
+ p := noder{
+ err: make(chan syntax.Error),
+ }
+ noders[i] = &p
+ }
+
+ // Move the entire syntax processing logic into a separate goroutine to avoid blocking on the "sem".
+ go func() {
+ for i, filename := range filenames {
+ filename := filename
+ p := noders[i]
+ sem <- struct{}{}
+ go func() {
+ defer func() { <-sem }()
+ defer close(p.err)
+ fbase := syntax.NewFileBase(filename)
+
+ f, err := os.Open(filename)
+ if err != nil {
+ p.error(syntax.Error{Msg: err.Error()})
+ return
+ }
+ defer f.Close()
+
+ p.file, _ = syntax.Parse(fbase, f, p.error, p.pragma, syntax.CheckBranches) // errors are tracked via p.error
+ }()
+ }
+ }()
+
+ var lines uint
+ var m posMap
+ for _, p := range noders {
+ for e := range p.err {
+ base.ErrorfAt(m.makeXPos(e.Pos), 0, "%s", e.Msg)
+ }
+ if p.file == nil {
+ base.ErrorExit()
+ }
+ lines += p.file.EOF.Line()
+ }
+ base.Timer.AddEvent(int64(lines), "lines")
+
+ unified(m, noders)
+}
+
+// trimFilename returns the "trimmed" filename of b, which is the
+// absolute filename after applying -trimpath processing. This
+// filename form is suitable for use in object files and export data.
+//
+// If b's filename has already been trimmed (i.e., because it was read
+// in from an imported package's export data), then the filename is
+// returned unchanged.
+func trimFilename(b *syntax.PosBase) string {
+ filename := b.Filename()
+ if !b.Trimmed() {
+ dir := ""
+ if b.IsFileBase() {
+ dir = base.Ctxt.Pathname
+ }
+ filename = objabi.AbsFile(dir, filename, base.Flag.TrimPath)
+ }
+ return filename
+}
+
+// noder transforms package syntax's AST into a Node tree.
+type noder struct {
+ file *syntax.File
+ linknames []linkname
+ pragcgobuf [][]string
+ err chan syntax.Error
+}
+
+// linkname records a //go:linkname directive.
+type linkname struct {
+ pos syntax.Pos
+ local string
+ remote string
+}
+
+var unOps = [...]ir.Op{
+ syntax.Recv: ir.ORECV,
+ syntax.Mul: ir.ODEREF,
+ syntax.And: ir.OADDR,
+
+ syntax.Not: ir.ONOT,
+ syntax.Xor: ir.OBITNOT,
+ syntax.Add: ir.OPLUS,
+ syntax.Sub: ir.ONEG,
+}
+
+var binOps = [...]ir.Op{
+ syntax.OrOr: ir.OOROR,
+ syntax.AndAnd: ir.OANDAND,
+
+ syntax.Eql: ir.OEQ,
+ syntax.Neq: ir.ONE,
+ syntax.Lss: ir.OLT,
+ syntax.Leq: ir.OLE,
+ syntax.Gtr: ir.OGT,
+ syntax.Geq: ir.OGE,
+
+ syntax.Add: ir.OADD,
+ syntax.Sub: ir.OSUB,
+ syntax.Or: ir.OOR,
+ syntax.Xor: ir.OXOR,
+
+ syntax.Mul: ir.OMUL,
+ syntax.Div: ir.ODIV,
+ syntax.Rem: ir.OMOD,
+ syntax.And: ir.OAND,
+ syntax.AndNot: ir.OANDNOT,
+ syntax.Shl: ir.OLSH,
+ syntax.Shr: ir.ORSH,
+}
+
+// error is called concurrently if files are parsed concurrently.
+func (p *noder) error(err error) {
+ p.err <- err.(syntax.Error)
+}
+
+// pragmas that are allowed in the std lib, but don't have
+// a syntax.Pragma value (see lex.go) associated with them.
+var allowedStdPragmas = map[string]bool{
+ "go:cgo_export_static": true,
+ "go:cgo_export_dynamic": true,
+ "go:cgo_import_static": true,
+ "go:cgo_import_dynamic": true,
+ "go:cgo_ldflag": true,
+ "go:cgo_dynamic_linker": true,
+ "go:embed": true,
+ "go:generate": true,
+}
+
+// *pragmas is the value stored in a syntax.pragmas during parsing.
+type pragmas struct {
+ Flag ir.PragmaFlag // collected bits
+ Pos []pragmaPos // position of each individual flag
+ Embeds []pragmaEmbed
+ WasmImport *WasmImport
+}
+
+// WasmImport stores metadata associated with the //go:wasmimport pragma
+type WasmImport struct {
+ Pos syntax.Pos
+ Module string
+ Name string
+}
+
+type pragmaPos struct {
+ Flag ir.PragmaFlag
+ Pos syntax.Pos
+}
+
+type pragmaEmbed struct {
+ Pos syntax.Pos
+ Patterns []string
+}
+
+func (p *noder) checkUnusedDuringParse(pragma *pragmas) {
+ for _, pos := range pragma.Pos {
+ if pos.Flag&pragma.Flag != 0 {
+ p.error(syntax.Error{Pos: pos.Pos, Msg: "misplaced compiler directive"})
+ }
+ }
+ if len(pragma.Embeds) > 0 {
+ for _, e := range pragma.Embeds {
+ p.error(syntax.Error{Pos: e.Pos, Msg: "misplaced go:embed directive"})
+ }
+ }
+ if pragma.WasmImport != nil {
+ p.error(syntax.Error{Pos: pragma.WasmImport.Pos, Msg: "misplaced go:wasmimport directive"})
+ }
+}
+
+// pragma is called concurrently if files are parsed concurrently.
+func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.Pragma) syntax.Pragma {
+ pragma, _ := old.(*pragmas)
+ if pragma == nil {
+ pragma = new(pragmas)
+ }
+
+ if text == "" {
+ // unused pragma; only called with old != nil.
+ p.checkUnusedDuringParse(pragma)
+ return nil
+ }
+
+ if strings.HasPrefix(text, "line ") {
+ // line directives are handled by syntax package
+ panic("unreachable")
+ }
+
+ if !blankLine {
+ // directive must be on line by itself
+ p.error(syntax.Error{Pos: pos, Msg: "misplaced compiler directive"})
+ return pragma
+ }
+
+ switch {
+ case strings.HasPrefix(text, "go:wasmimport "):
+ f := strings.Fields(text)
+ if len(f) != 3 {
+ p.error(syntax.Error{Pos: pos, Msg: "usage: //go:wasmimport importmodule importname"})
+ break
+ }
+
+ if buildcfg.GOARCH == "wasm" {
+ // Only actually use them if we're compiling to WASM though.
+ pragma.WasmImport = &WasmImport{
+ Pos: pos,
+ Module: f[1],
+ Name: f[2],
+ }
+ }
+ case strings.HasPrefix(text, "go:linkname "):
+ f := strings.Fields(text)
+ if !(2 <= len(f) && len(f) <= 3) {
+ p.error(syntax.Error{Pos: pos, Msg: "usage: //go:linkname localname [linkname]"})
+ break
+ }
+ // The second argument is optional. If omitted, we use
+ // the default object symbol name for this and
+ // linkname only serves to mark this symbol as
+ // something that may be referenced via the object
+ // symbol name from another package.
+ var target string
+ if len(f) == 3 {
+ target = f[2]
+ } else if base.Ctxt.Pkgpath != "" {
+ // Use the default object symbol name if the
+ // user didn't provide one.
+ target = objabi.PathToPrefix(base.Ctxt.Pkgpath) + "." + f[1]
+ } else {
+ panic("missing pkgpath")
+ }
+ p.linknames = append(p.linknames, linkname{pos, f[1], target})
+
+ case text == "go:embed", strings.HasPrefix(text, "go:embed "):
+ args, err := parseGoEmbed(text[len("go:embed"):])
+ if err != nil {
+ p.error(syntax.Error{Pos: pos, Msg: err.Error()})
+ }
+ if len(args) == 0 {
+ p.error(syntax.Error{Pos: pos, Msg: "usage: //go:embed pattern..."})
+ break
+ }
+ pragma.Embeds = append(pragma.Embeds, pragmaEmbed{pos, args})
+
+ case strings.HasPrefix(text, "go:cgo_import_dynamic "):
+ // This is permitted for general use because Solaris
+ // code relies on it in golang.org/x/sys/unix and others.
+ fields := pragmaFields(text)
+ if len(fields) >= 4 {
+ lib := strings.Trim(fields[3], `"`)
+ if lib != "" && !safeArg(lib) && !isCgoGeneratedFile(pos) {
+ p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("invalid library name %q in cgo_import_dynamic directive", lib)})
+ }
+ p.pragcgo(pos, text)
+ pragma.Flag |= pragmaFlag("go:cgo_import_dynamic")
+ break
+ }
+ fallthrough
+ case strings.HasPrefix(text, "go:cgo_"):
+ // For security, we disallow //go:cgo_* directives other
+ // than cgo_import_dynamic outside cgo-generated files.
+ // Exception: they are allowed in the standard library, for runtime and syscall.
+ if !isCgoGeneratedFile(pos) && !base.Flag.Std {
+ p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in cgo-generated code", text)})
+ }
+ p.pragcgo(pos, text)
+ fallthrough // because of //go:cgo_unsafe_args
+ default:
+ verb := text
+ if i := strings.Index(text, " "); i >= 0 {
+ verb = verb[:i]
+ }
+ flag := pragmaFlag(verb)
+ const runtimePragmas = ir.Systemstack | ir.Nowritebarrier | ir.Nowritebarrierrec | ir.Yeswritebarrierrec
+ if !base.Flag.CompilingRuntime && flag&runtimePragmas != 0 {
+ p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in runtime", verb)})
+ }
+ if flag == ir.UintptrKeepAlive && !base.Flag.Std {
+ p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is only allowed in the standard library", verb)})
+ }
+ if flag == 0 && !allowedStdPragmas[verb] && base.Flag.Std {
+ p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is not allowed in the standard library", verb)})
+ }
+ pragma.Flag |= flag
+ pragma.Pos = append(pragma.Pos, pragmaPos{flag, pos})
+ }
+
+ return pragma
+}
+
+// isCgoGeneratedFile reports whether pos is in a file
+// generated by cgo, which is to say a file with name
+// beginning with "_cgo_". Such files are allowed to
+// contain cgo directives, and for security reasons
+// (primarily misuse of linker flags), other files are not.
+// See golang.org/issue/23672.
+// Note that cmd/go ignores files whose names start with underscore,
+// so the only _cgo_ files we will see from cmd/go are generated by cgo.
+// It's easy to bypass this check by calling the compiler directly;
+// we only protect against uses by cmd/go.
+func isCgoGeneratedFile(pos syntax.Pos) bool {
+ // We need the absolute file, independent of //line directives,
+ // so we call pos.Base().Pos().
+ return strings.HasPrefix(filepath.Base(trimFilename(pos.Base().Pos().Base())), "_cgo_")
+}
+
+// safeArg reports whether arg is a "safe" command-line argument,
+// meaning that when it appears in a command-line, it probably
+// doesn't have some special meaning other than its own name.
+// This is copied from SafeArg in cmd/go/internal/load/pkg.go.
+func safeArg(name string) bool {
+ if name == "" {
+ return false
+ }
+ c := name[0]
+ return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf
+}
+
+// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns.
+// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
+// go/build/read.go also processes these strings and contains similar logic.
+func parseGoEmbed(args string) ([]string, error) {
+ var list []string
+ for args = strings.TrimSpace(args); args != ""; args = strings.TrimSpace(args) {
+ var path string
+ Switch:
+ switch args[0] {
+ default:
+ i := len(args)
+ for j, c := range args {
+ if unicode.IsSpace(c) {
+ i = j
+ break
+ }
+ }
+ path = args[:i]
+ args = args[i:]
+
+ case '`':
+ i := strings.Index(args[1:], "`")
+ if i < 0 {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ path = args[1 : 1+i]
+ args = args[1+i+1:]
+
+ case '"':
+ i := 1
+ for ; i < len(args); i++ {
+ if args[i] == '\\' {
+ i++
+ continue
+ }
+ if args[i] == '"' {
+ q, err := strconv.Unquote(args[:i+1])
+ if err != nil {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1])
+ }
+ path = q
+ args = args[i+1:]
+ break Switch
+ }
+ }
+ if i >= len(args) {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ }
+
+ if args != "" {
+ r, _ := utf8.DecodeRuneInString(args)
+ if !unicode.IsSpace(r) {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ }
+ list = append(list, path)
+ }
+ return list, nil
+}
+
+// A function named init is a special case.
+// It is called by the initialization before main is run.
+// To make it unique within a package and also uncallable,
+// the name, normally "pkg.init", is altered to "pkg.init.0".
+var renameinitgen int
+
+func Renameinit() *types.Sym {
+ s := typecheck.LookupNum("init.", renameinitgen)
+ renameinitgen++
+ return s
+}
+
+func checkEmbed(decl *syntax.VarDecl, haveEmbed, withinFunc bool) error {
+ switch {
+ case !haveEmbed:
+ return errors.New("go:embed only allowed in Go files that import \"embed\"")
+ case len(decl.NameList) > 1:
+ return errors.New("go:embed cannot apply to multiple vars")
+ case decl.Values != nil:
+ return errors.New("go:embed cannot apply to var with initializer")
+ case decl.Type == nil:
+ // Should not happen, since Values == nil now.
+ return errors.New("go:embed cannot apply to var without type")
+ case withinFunc:
+ return errors.New("go:embed cannot apply to var inside func")
+ case !types.AllowsGoVersion(1, 16):
+ return fmt.Errorf("go:embed requires go1.16 or later (-lang was set to %s; check go.mod)", base.Flag.Lang)
+
+ default:
+ return nil
+ }
+}
diff --git a/src/cmd/compile/internal/noder/posmap.go b/src/cmd/compile/internal/noder/posmap.go
new file mode 100644
index 0000000..439daf4
--- /dev/null
+++ b/src/cmd/compile/internal/noder/posmap.go
@@ -0,0 +1,74 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/syntax"
+ "cmd/internal/src"
+)
+
+// A posMap handles mapping from syntax.Pos to src.XPos.
+type posMap struct {
+ bases map[*syntax.PosBase]*src.PosBase
+ cache struct {
+ last *syntax.PosBase
+ base *src.PosBase
+ }
+}
+
+type poser interface{ Pos() syntax.Pos }
+type ender interface{ End() syntax.Pos }
+
+func (m *posMap) pos(p poser) src.XPos { return m.makeXPos(p.Pos()) }
+func (m *posMap) end(p ender) src.XPos { return m.makeXPos(p.End()) }
+
+func (m *posMap) makeXPos(pos syntax.Pos) src.XPos {
+ // Predeclared objects (e.g., the result parameter for error.Error)
+ // do not have a position.
+ if !pos.IsKnown() {
+ return src.NoXPos
+ }
+
+ posBase := m.makeSrcPosBase(pos.Base())
+ return base.Ctxt.PosTable.XPos(src.MakePos(posBase, pos.Line(), pos.Col()))
+}
+
+// makeSrcPosBase translates from a *syntax.PosBase to a *src.PosBase.
+func (m *posMap) makeSrcPosBase(b0 *syntax.PosBase) *src.PosBase {
+ // fast path: most likely PosBase hasn't changed
+ if m.cache.last == b0 {
+ return m.cache.base
+ }
+
+ b1, ok := m.bases[b0]
+ if !ok {
+ fn := b0.Filename()
+ absfn := trimFilename(b0)
+
+ if b0.IsFileBase() {
+ b1 = src.NewFileBase(fn, absfn)
+ } else {
+ // line directive base
+ p0 := b0.Pos()
+ p0b := p0.Base()
+ if p0b == b0 {
+ panic("infinite recursion in makeSrcPosBase")
+ }
+ p1 := src.MakePos(m.makeSrcPosBase(p0b), p0.Line(), p0.Col())
+ b1 = src.NewLinePragmaBase(p1, fn, absfn, b0.Line(), b0.Col())
+ }
+ if m.bases == nil {
+ m.bases = make(map[*syntax.PosBase]*src.PosBase)
+ }
+ m.bases[b0] = b1
+ }
+
+ // update cache
+ m.cache.last = b0
+ m.cache.base = b1
+
+ return b1
+}
diff --git a/src/cmd/compile/internal/noder/quirks.go b/src/cmd/compile/internal/noder/quirks.go
new file mode 100644
index 0000000..dd9cec9
--- /dev/null
+++ b/src/cmd/compile/internal/noder/quirks.go
@@ -0,0 +1,79 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+
+ "cmd/compile/internal/syntax"
+)
+
+// typeExprEndPos returns the position that noder would leave base.Pos
+// after parsing the given type expression.
+//
+// Deprecated: This function exists to emulate position semantics from
+// Go 1.17, necessary for compatibility with the backend DWARF
+// generation logic that assigns variables to their appropriate scope.
+func typeExprEndPos(expr0 syntax.Expr) syntax.Pos {
+ for {
+ switch expr := expr0.(type) {
+ case *syntax.Name:
+ return expr.Pos()
+ case *syntax.SelectorExpr:
+ return expr.X.Pos()
+
+ case *syntax.ParenExpr:
+ expr0 = expr.X
+
+ case *syntax.Operation:
+ assert(expr.Op == syntax.Mul)
+ assert(expr.Y == nil)
+ expr0 = expr.X
+
+ case *syntax.ArrayType:
+ expr0 = expr.Elem
+ case *syntax.ChanType:
+ expr0 = expr.Elem
+ case *syntax.DotsType:
+ expr0 = expr.Elem
+ case *syntax.MapType:
+ expr0 = expr.Value
+ case *syntax.SliceType:
+ expr0 = expr.Elem
+
+ case *syntax.StructType:
+ return expr.Pos()
+
+ case *syntax.InterfaceType:
+ expr0 = lastFieldType(expr.MethodList)
+ if expr0 == nil {
+ return expr.Pos()
+ }
+
+ case *syntax.FuncType:
+ expr0 = lastFieldType(expr.ResultList)
+ if expr0 == nil {
+ expr0 = lastFieldType(expr.ParamList)
+ if expr0 == nil {
+ return expr.Pos()
+ }
+ }
+
+ case *syntax.IndexExpr: // explicit type instantiation
+ targs := syntax.UnpackListExpr(expr.Index)
+ expr0 = targs[len(targs)-1]
+
+ default:
+ panic(fmt.Sprintf("%s: unexpected type expression %v", expr.Pos(), syntax.String(expr)))
+ }
+ }
+}
+
+func lastFieldType(fields []*syntax.Field) syntax.Expr {
+ if len(fields) == 0 {
+ return nil
+ }
+ return fields[len(fields)-1].Type
+}
diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go
new file mode 100644
index 0000000..2dddd20
--- /dev/null
+++ b/src/cmd/compile/internal/noder/reader.go
@@ -0,0 +1,3941 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "encoding/hex"
+ "fmt"
+ "go/constant"
+ "internal/buildcfg"
+ "internal/pkgbits"
+ "path/filepath"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/dwarfgen"
+ "cmd/compile/internal/inline"
+ "cmd/compile/internal/inline/interleaved"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/staticinit"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/notsha256"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+// This file implements cmd/compile backend's reader for the Unified
+// IR export data.
+
+// A pkgReader reads Unified IR export data.
+type pkgReader struct {
+ pkgbits.PkgDecoder
+
+ // Indices for encoded things; lazily populated as needed.
+ //
+ // Note: Objects (i.e., ir.Names) are lazily instantiated by
+ // populating their types.Sym.Def; see objReader below.
+
+ posBases []*src.PosBase
+ pkgs []*types.Pkg
+ typs []*types.Type
+
+ // offset for rewriting the given (absolute!) index into the output,
+ // but bitwise inverted so we can detect if we're missing the entry
+ // or not.
+ newindex []pkgbits.Index
+}
+
+func newPkgReader(pr pkgbits.PkgDecoder) *pkgReader {
+ return &pkgReader{
+ PkgDecoder: pr,
+
+ posBases: make([]*src.PosBase, pr.NumElems(pkgbits.RelocPosBase)),
+ pkgs: make([]*types.Pkg, pr.NumElems(pkgbits.RelocPkg)),
+ typs: make([]*types.Type, pr.NumElems(pkgbits.RelocType)),
+
+ newindex: make([]pkgbits.Index, pr.TotalElems()),
+ }
+}
+
+// A pkgReaderIndex compactly identifies an index (and its
+// corresponding dictionary) within a package's export data.
+type pkgReaderIndex struct {
+ pr *pkgReader
+ idx pkgbits.Index
+ dict *readerDict
+ methodSym *types.Sym
+
+ synthetic func(pos src.XPos, r *reader)
+}
+
+func (pri pkgReaderIndex) asReader(k pkgbits.RelocKind, marker pkgbits.SyncMarker) *reader {
+ if pri.synthetic != nil {
+ return &reader{synthetic: pri.synthetic}
+ }
+
+ r := pri.pr.newReader(k, pri.idx, marker)
+ r.dict = pri.dict
+ r.methodSym = pri.methodSym
+ return r
+}
+
+func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
+ return &reader{
+ Decoder: pr.NewDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+// A reader provides APIs for reading an individual element.
+type reader struct {
+ pkgbits.Decoder
+
+ p *pkgReader
+
+ dict *readerDict
+
+ // TODO(mdempsky): The state below is all specific to reading
+ // function bodies. It probably makes sense to split it out
+ // separately so that it doesn't take up space in every reader
+ // instance.
+
+ curfn *ir.Func
+ locals []*ir.Name
+ closureVars []*ir.Name
+
+ // funarghack is used during inlining to suppress setting
+ // Field.Nname to the inlined copies of the parameters. This is
+ // necessary because we reuse the same types.Type as the original
+ // function, and most of the compiler still relies on field.Nname to
+ // find parameters/results.
+ funarghack bool
+
+ // methodSym is the name of method's name, if reading a method.
+ // It's nil if reading a normal function or closure body.
+ methodSym *types.Sym
+
+ // dictParam is the .dict param, if any.
+ dictParam *ir.Name
+
+ // synthetic is a callback function to construct a synthetic
+ // function body. It's used for creating the bodies of function
+ // literals used to curry arguments to shaped functions.
+ synthetic func(pos src.XPos, r *reader)
+
+ // scopeVars is a stack tracking the number of variables declared in
+ // the current function at the moment each open scope was opened.
+ scopeVars []int
+ marker dwarfgen.ScopeMarker
+ lastCloseScopePos src.XPos
+
+ // === details for handling inline body expansion ===
+
+ // If we're reading in a function body because of inlining, this is
+ // the call that we're inlining for.
+ inlCaller *ir.Func
+ inlCall *ir.CallExpr
+ inlFunc *ir.Func
+ inlTreeIndex int
+ inlPosBases map[*src.PosBase]*src.PosBase
+
+ // suppressInlPos tracks whether position base rewriting for
+ // inlining should be suppressed. See funcLit.
+ suppressInlPos int
+
+ delayResults bool
+
+ // Label to return to.
+ retlabel *types.Sym
+}
+
+// A readerDict represents an instantiated "compile-time dictionary,"
+// used for resolving any derived types needed for instantiating a
+// generic object.
+//
+// A compile-time dictionary can either be "shaped" or "non-shaped."
+// Shaped compile-time dictionaries are only used for instantiating
+// shaped type definitions and function bodies, while non-shaped
+// compile-time dictionaries are used for instantiating runtime
+// dictionaries.
+type readerDict struct {
+ shaped bool // whether this is a shaped dictionary
+
+ // baseSym is the symbol for the object this dictionary belongs to.
+ // If the object is an instantiated function or defined type, then
+ // baseSym is the mangled symbol, including any type arguments.
+ baseSym *types.Sym
+
+ // For non-shaped dictionaries, shapedObj is a reference to the
+ // corresponding shaped object (always a function or defined type).
+ shapedObj *ir.Name
+
+ // targs holds the implicit and explicit type arguments in use for
+ // reading the current object. For example:
+ //
+ // func F[T any]() {
+ // type X[U any] struct { t T; u U }
+ // var _ X[string]
+ // }
+ //
+ // var _ = F[int]
+ //
+ // While instantiating F[int], we need to in turn instantiate
+ // X[string]. [int] and [string] are explicit type arguments for F
+ // and X, respectively; but [int] is also the implicit type
+ // arguments for X.
+ //
+ // (As an analogy to function literals, explicits are the function
+ // literal's formal parameters, while implicits are variables
+ // captured by the function literal.)
+ targs []*types.Type
+
+ // implicits counts how many of types within targs are implicit type
+ // arguments; the rest are explicit.
+ implicits int
+
+ derived []derivedInfo // reloc index of the derived type's descriptor
+ derivedTypes []*types.Type // slice of previously computed derived types
+
+ // These slices correspond to entries in the runtime dictionary.
+ typeParamMethodExprs []readerMethodExprInfo
+ subdicts []objInfo
+ rtypes []typeInfo
+ itabs []itabInfo
+}
+
+type readerMethodExprInfo struct {
+ typeParamIdx int
+ method *types.Sym
+}
+
+func setType(n ir.Node, typ *types.Type) {
+ n.SetType(typ)
+ n.SetTypecheck(1)
+}
+
+func setValue(name *ir.Name, val constant.Value) {
+ name.SetVal(val)
+ name.Defn = nil
+}
+
+// @@@ Positions
+
+// pos reads a position from the bitstream.
+func (r *reader) pos() src.XPos {
+ return base.Ctxt.PosTable.XPos(r.pos0())
+}
+
+// origPos reads a position from the bitstream, and returns both the
+// original raw position and an inlining-adjusted position.
+func (r *reader) origPos() (origPos, inlPos src.XPos) {
+ r.suppressInlPos++
+ origPos = r.pos()
+ r.suppressInlPos--
+ inlPos = r.inlPos(origPos)
+ return
+}
+
+func (r *reader) pos0() src.Pos {
+ r.Sync(pkgbits.SyncPos)
+ if !r.Bool() {
+ return src.NoPos
+ }
+
+ posBase := r.posBase()
+ line := r.Uint()
+ col := r.Uint()
+ return src.MakePos(posBase, line, col)
+}
+
+// posBase reads a position base from the bitstream.
+func (r *reader) posBase() *src.PosBase {
+ return r.inlPosBase(r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)))
+}
+
+// posBaseIdx returns the specified position base, reading it first if
+// needed.
+func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) *src.PosBase {
+ if b := pr.posBases[idx]; b != nil {
+ return b
+ }
+
+ r := pr.newReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase)
+ var b *src.PosBase
+
+ absFilename := r.String()
+ filename := absFilename
+
+ // For build artifact stability, the export data format only
+ // contains the "absolute" filename as returned by objabi.AbsFile.
+ // However, some tests (e.g., test/run.go's asmcheck tests) expect
+ // to see the full, original filename printed out. Re-expanding
+ // "$GOROOT" to buildcfg.GOROOT is a close-enough approximation to
+ // satisfy this.
+ //
+ // The export data format only ever uses slash paths
+ // (for cross-operating-system reproducible builds),
+ // but error messages need to use native paths (backslash on Windows)
+ // as if they had been specified on the command line.
+ // (The go command always passes native paths to the compiler.)
+ const dollarGOROOT = "$GOROOT"
+ if buildcfg.GOROOT != "" && strings.HasPrefix(filename, dollarGOROOT) {
+ filename = filepath.FromSlash(buildcfg.GOROOT + filename[len(dollarGOROOT):])
+ }
+
+ if r.Bool() {
+ b = src.NewFileBase(filename, absFilename)
+ } else {
+ pos := r.pos0()
+ line := r.Uint()
+ col := r.Uint()
+ b = src.NewLinePragmaBase(pos, filename, absFilename, line, col)
+ }
+
+ pr.posBases[idx] = b
+ return b
+}
+
+// inlPosBase returns the inlining-adjusted src.PosBase corresponding
+// to oldBase, which must be a non-inlined position. When not
+// inlining, this is just oldBase.
+func (r *reader) inlPosBase(oldBase *src.PosBase) *src.PosBase {
+ if index := oldBase.InliningIndex(); index >= 0 {
+ base.Fatalf("oldBase %v already has inlining index %v", oldBase, index)
+ }
+
+ if r.inlCall == nil || r.suppressInlPos != 0 {
+ return oldBase
+ }
+
+ if newBase, ok := r.inlPosBases[oldBase]; ok {
+ return newBase
+ }
+
+ newBase := src.NewInliningBase(oldBase, r.inlTreeIndex)
+ r.inlPosBases[oldBase] = newBase
+ return newBase
+}
+
+// inlPos returns the inlining-adjusted src.XPos corresponding to
+// xpos, which must be a non-inlined position. When not inlining, this
+// is just xpos.
+func (r *reader) inlPos(xpos src.XPos) src.XPos {
+ pos := base.Ctxt.PosTable.Pos(xpos)
+ pos.SetBase(r.inlPosBase(pos.Base()))
+ return base.Ctxt.PosTable.XPos(pos)
+}
+
+// @@@ Packages
+
+// pkg reads a package reference from the bitstream.
+func (r *reader) pkg() *types.Pkg {
+ r.Sync(pkgbits.SyncPkg)
+ return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg))
+}
+
+// pkgIdx returns the specified package from the export data, reading
+// it first if needed.
+func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Pkg {
+ if pkg := pr.pkgs[idx]; pkg != nil {
+ return pkg
+ }
+
+ pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg()
+ pr.pkgs[idx] = pkg
+ return pkg
+}
+
+// doPkg reads a package definition from the bitstream.
+func (r *reader) doPkg() *types.Pkg {
+ path := r.String()
+ switch path {
+ case "":
+ path = r.p.PkgPath()
+ case "builtin":
+ return types.BuiltinPkg
+ case "unsafe":
+ return types.UnsafePkg
+ }
+
+ name := r.String()
+
+ pkg := types.NewPkg(path, "")
+
+ if pkg.Name == "" {
+ pkg.Name = name
+ } else {
+ base.Assertf(pkg.Name == name, "package %q has name %q, but want %q", pkg.Path, pkg.Name, name)
+ }
+
+ return pkg
+}
+
+// @@@ Types
+
+func (r *reader) typ() *types.Type {
+ return r.typWrapped(true)
+}
+
+// typWrapped is like typ, but allows suppressing generation of
+// unnecessary wrappers as a compile-time optimization.
+func (r *reader) typWrapped(wrapped bool) *types.Type {
+ return r.p.typIdx(r.typInfo(), r.dict, wrapped)
+}
+
+func (r *reader) typInfo() typeInfo {
+ r.Sync(pkgbits.SyncType)
+ if r.Bool() {
+ return typeInfo{idx: pkgbits.Index(r.Len()), derived: true}
+ }
+ return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false}
+}
+
+// typListIdx returns a list of the specified types, resolving derived
+// types within the given dictionary.
+func (pr *pkgReader) typListIdx(infos []typeInfo, dict *readerDict) []*types.Type {
+ typs := make([]*types.Type, len(infos))
+ for i, info := range infos {
+ typs[i] = pr.typIdx(info, dict, true)
+ }
+ return typs
+}
+
+// typIdx returns the specified type. If info specifies a derived
+// type, it's resolved within the given dictionary. If wrapped is
+// true, then method wrappers will be generated, if appropriate.
+func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict, wrapped bool) *types.Type {
+ idx := info.idx
+ var where **types.Type
+ if info.derived {
+ where = &dict.derivedTypes[idx]
+ idx = dict.derived[idx].idx
+ } else {
+ where = &pr.typs[idx]
+ }
+
+ if typ := *where; typ != nil {
+ return typ
+ }
+
+ r := pr.newReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx)
+ r.dict = dict
+
+ typ := r.doTyp()
+ assert(typ != nil)
+
+ // For recursive type declarations involving interfaces and aliases,
+ // above r.doTyp() call may have already set pr.typs[idx], so just
+ // double check and return the type.
+ //
+ // Example:
+ //
+ // type F = func(I)
+ //
+ // type I interface {
+ // m(F)
+ // }
+ //
+ // The writer writes data types in following index order:
+ //
+ // 0: func(I)
+ // 1: I
+ // 2: interface{m(func(I))}
+ //
+ // The reader resolves it in following index order:
+ //
+ // 0 -> 1 -> 2 -> 0 -> 1
+ //
+ // and can divide in logically 2 steps:
+ //
+ // - 0 -> 1 : first time the reader reach type I,
+ // it creates new named type with symbol I.
+ //
+ // - 2 -> 0 -> 1: the reader ends up reaching symbol I again,
+ // now the symbol I was setup in above step, so
+ // the reader just return the named type.
+ //
+ // Now, the functions called return, the pr.typs looks like below:
+ //
+ // - 0 -> 1 -> 2 -> 0 : [<T> I <T>]
+ // - 0 -> 1 -> 2 : [func(I) I <T>]
+ // - 0 -> 1 : [func(I) I interface { "".m(func("".I)) }]
+ //
+ // The idx 1, corresponding with type I was resolved successfully
+ // after r.doTyp() call.
+
+ if prev := *where; prev != nil {
+ return prev
+ }
+
+ if wrapped {
+ // Only cache if we're adding wrappers, so that other callers that
+ // find a cached type know it was wrapped.
+ *where = typ
+
+ r.needWrapper(typ)
+ }
+
+ if !typ.IsUntyped() {
+ types.CheckSize(typ)
+ }
+
+ return typ
+}
+
+func (r *reader) doTyp() *types.Type {
+ switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag {
+ default:
+ panic(fmt.Sprintf("unexpected type: %v", tag))
+
+ case pkgbits.TypeBasic:
+ return *basics[r.Len()]
+
+ case pkgbits.TypeNamed:
+ obj := r.obj()
+ assert(obj.Op() == ir.OTYPE)
+ return obj.Type()
+
+ case pkgbits.TypeTypeParam:
+ return r.dict.targs[r.Len()]
+
+ case pkgbits.TypeArray:
+ len := int64(r.Uint64())
+ return types.NewArray(r.typ(), len)
+ case pkgbits.TypeChan:
+ dir := dirs[r.Len()]
+ return types.NewChan(r.typ(), dir)
+ case pkgbits.TypeMap:
+ return types.NewMap(r.typ(), r.typ())
+ case pkgbits.TypePointer:
+ return types.NewPtr(r.typ())
+ case pkgbits.TypeSignature:
+ return r.signature(nil)
+ case pkgbits.TypeSlice:
+ return types.NewSlice(r.typ())
+ case pkgbits.TypeStruct:
+ return r.structType()
+ case pkgbits.TypeInterface:
+ return r.interfaceType()
+ case pkgbits.TypeUnion:
+ return r.unionType()
+ }
+}
+
+func (r *reader) unionType() *types.Type {
+ // In the types1 universe, we only need to handle value types.
+ // Impure interfaces (i.e., interfaces with non-trivial type sets
+ // like "int | string") can only appear as type parameter bounds,
+ // and this is enforced by the types2 type checker.
+ //
+ // However, type unions can still appear in pure interfaces if the
+ // type union is equivalent to "any". E.g., typeparam/issue52124.go
+ // declares variables with the type "interface { any | int }".
+ //
+ // To avoid needing to represent type unions in types1 (since we
+ // don't have any uses for that today anyway), we simply fold them
+ // to "any".
+
+ // TODO(mdempsky): Restore consistency check to make sure folding to
+ // "any" is safe. This is unfortunately tricky, because a pure
+ // interface can reference impure interfaces too, including
+ // cyclically (#60117).
+ if false {
+ pure := false
+ for i, n := 0, r.Len(); i < n; i++ {
+ _ = r.Bool() // tilde
+ term := r.typ()
+ if term.IsEmptyInterface() {
+ pure = true
+ }
+ }
+ if !pure {
+ base.Fatalf("impure type set used in value type")
+ }
+ }
+
+ return types.Types[types.TINTER]
+}
+
+func (r *reader) interfaceType() *types.Type {
+ nmethods, nembeddeds := r.Len(), r.Len()
+ implicit := nmethods == 0 && nembeddeds == 1 && r.Bool()
+ assert(!implicit) // implicit interfaces only appear in constraints
+
+ fields := make([]*types.Field, nmethods+nembeddeds)
+ methods, embeddeds := fields[:nmethods], fields[nmethods:]
+
+ for i := range methods {
+ methods[i] = types.NewField(r.pos(), r.selector(), r.signature(types.FakeRecv()))
+ }
+ for i := range embeddeds {
+ embeddeds[i] = types.NewField(src.NoXPos, nil, r.typ())
+ }
+
+ if len(fields) == 0 {
+ return types.Types[types.TINTER] // empty interface
+ }
+ return types.NewInterface(fields)
+}
+
+func (r *reader) structType() *types.Type {
+ fields := make([]*types.Field, r.Len())
+ for i := range fields {
+ field := types.NewField(r.pos(), r.selector(), r.typ())
+ field.Note = r.String()
+ if r.Bool() {
+ field.Embedded = 1
+ }
+ fields[i] = field
+ }
+ return types.NewStruct(fields)
+}
+
+func (r *reader) signature(recv *types.Field) *types.Type {
+ r.Sync(pkgbits.SyncSignature)
+
+ params := r.params()
+ results := r.params()
+ if r.Bool() { // variadic
+ params[len(params)-1].SetIsDDD(true)
+ }
+
+ return types.NewSignature(recv, params, results)
+}
+
+func (r *reader) params() []*types.Field {
+ r.Sync(pkgbits.SyncParams)
+ params := make([]*types.Field, r.Len())
+ for i := range params {
+ params[i] = r.param()
+ }
+ return params
+}
+
+func (r *reader) param() *types.Field {
+ r.Sync(pkgbits.SyncParam)
+ return types.NewField(r.pos(), r.localIdent(), r.typ())
+}
+
+// @@@ Objects
+
+// objReader maps qualified identifiers (represented as *types.Sym) to
+// a pkgReader and corresponding index that can be used for reading
+// that object's definition.
+var objReader = map[*types.Sym]pkgReaderIndex{}
+
+// obj reads an instantiated object reference from the bitstream.
+func (r *reader) obj() ir.Node {
+ return r.p.objInstIdx(r.objInfo(), r.dict, false)
+}
+
+// objInfo reads an instantiated object reference from the bitstream
+// and returns the encoded reference to it, without instantiating it.
+func (r *reader) objInfo() objInfo {
+ r.Sync(pkgbits.SyncObject)
+ assert(!r.Bool()) // TODO(mdempsky): Remove; was derived func inst.
+ idx := r.Reloc(pkgbits.RelocObj)
+
+ explicits := make([]typeInfo, r.Len())
+ for i := range explicits {
+ explicits[i] = r.typInfo()
+ }
+
+ return objInfo{idx, explicits}
+}
+
+// objInstIdx returns the encoded, instantiated object. If shaped is
+// true, then the shaped variant of the object is returned instead.
+func (pr *pkgReader) objInstIdx(info objInfo, dict *readerDict, shaped bool) ir.Node {
+ explicits := pr.typListIdx(info.explicits, dict)
+
+ var implicits []*types.Type
+ if dict != nil {
+ implicits = dict.targs
+ }
+
+ return pr.objIdx(info.idx, implicits, explicits, shaped)
+}
+
+// objIdx returns the specified object, instantiated with the given
+// type arguments, if any.
+// If shaped is true, then the shaped variant of the object is returned
+// instead.
+func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) ir.Node {
+ n, err := pr.objIdxMayFail(idx, implicits, explicits, shaped)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ return n
+}
+
+// objIdxMayFail is equivalent to objIdx, but returns an error rather than
+// failing the build if this object requires type arguments and the incorrect
+// number of type arguments were passed.
+//
+// Other sources of internal failure (such as duplicate definitions) still fail
+// the build.
+func (pr *pkgReader) objIdxMayFail(idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) (ir.Node, error) {
+ rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
+ _, sym := rname.qualifiedIdent()
+ tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
+
+ if tag == pkgbits.ObjStub {
+ assert(!sym.IsBlank())
+ switch sym.Pkg {
+ case types.BuiltinPkg, types.UnsafePkg:
+ return sym.Def.(ir.Node), nil
+ }
+ if pri, ok := objReader[sym]; ok {
+ return pri.pr.objIdxMayFail(pri.idx, nil, explicits, shaped)
+ }
+ if sym.Pkg.Path == "runtime" {
+ return typecheck.LookupRuntime(sym.Name), nil
+ }
+ base.Fatalf("unresolved stub: %v", sym)
+ }
+
+ dict, err := pr.objDictIdx(sym, idx, implicits, explicits, shaped)
+ if err != nil {
+ return nil, err
+ }
+
+ sym = dict.baseSym
+ if !sym.IsBlank() && sym.Def != nil {
+ return sym.Def.(*ir.Name), nil
+ }
+
+ r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1)
+ rext := pr.newReader(pkgbits.RelocObjExt, idx, pkgbits.SyncObject1)
+
+ r.dict = dict
+ rext.dict = dict
+
+ do := func(op ir.Op, hasTParams bool) *ir.Name {
+ pos := r.pos()
+ setBasePos(pos)
+ if hasTParams {
+ r.typeParamNames()
+ }
+
+ name := ir.NewDeclNameAt(pos, op, sym)
+ name.Class = ir.PEXTERN // may be overridden later
+ if !sym.IsBlank() {
+ if sym.Def != nil {
+ base.FatalfAt(name.Pos(), "already have a definition for %v", name)
+ }
+ assert(sym.Def == nil)
+ sym.Def = name
+ }
+ return name
+ }
+
+ switch tag {
+ default:
+ panic("unexpected object")
+
+ case pkgbits.ObjAlias:
+ name := do(ir.OTYPE, false)
+ setType(name, r.typ())
+ name.SetAlias(true)
+ return name, nil
+
+ case pkgbits.ObjConst:
+ name := do(ir.OLITERAL, false)
+ typ := r.typ()
+ val := FixValue(typ, r.Value())
+ setType(name, typ)
+ setValue(name, val)
+ return name, nil
+
+ case pkgbits.ObjFunc:
+ if sym.Name == "init" {
+ sym = Renameinit()
+ }
+
+ npos := r.pos()
+ setBasePos(npos)
+ r.typeParamNames()
+ typ := r.signature(nil)
+ fpos := r.pos()
+
+ fn := ir.NewFunc(fpos, npos, sym, typ)
+ name := fn.Nname
+ if !sym.IsBlank() {
+ if sym.Def != nil {
+ base.FatalfAt(name.Pos(), "already have a definition for %v", name)
+ }
+ assert(sym.Def == nil)
+ sym.Def = name
+ }
+
+ if r.hasTypeParams() {
+ name.Func.SetDupok(true)
+ if r.dict.shaped {
+ setType(name, shapeSig(name.Func, r.dict))
+ } else {
+ todoDicts = append(todoDicts, func() {
+ r.dict.shapedObj = pr.objIdx(idx, implicits, explicits, true).(*ir.Name)
+ })
+ }
+ }
+
+ rext.funcExt(name, nil)
+ return name, nil
+
+ case pkgbits.ObjType:
+ name := do(ir.OTYPE, true)
+ typ := types.NewNamed(name)
+ setType(name, typ)
+ if r.hasTypeParams() && r.dict.shaped {
+ typ.SetHasShape(true)
+ }
+
+ // Important: We need to do this before SetUnderlying.
+ rext.typeExt(name)
+
+ // We need to defer CheckSize until we've called SetUnderlying to
+ // handle recursive types.
+ types.DeferCheckSize()
+ typ.SetUnderlying(r.typWrapped(false))
+ types.ResumeCheckSize()
+
+ if r.hasTypeParams() && !r.dict.shaped {
+ todoDicts = append(todoDicts, func() {
+ r.dict.shapedObj = pr.objIdx(idx, implicits, explicits, true).(*ir.Name)
+ })
+ }
+
+ methods := make([]*types.Field, r.Len())
+ for i := range methods {
+ methods[i] = r.method(rext)
+ }
+ if len(methods) != 0 {
+ typ.SetMethods(methods)
+ }
+
+ if !r.dict.shaped {
+ r.needWrapper(typ)
+ }
+
+ return name, nil
+
+ case pkgbits.ObjVar:
+ name := do(ir.ONAME, false)
+ setType(name, r.typ())
+ rext.varExt(name)
+ return name, nil
+ }
+}
+
+func (dict *readerDict) mangle(sym *types.Sym) *types.Sym {
+ if !dict.hasTypeParams() {
+ return sym
+ }
+
+ // If sym is a locally defined generic type, we need the suffix to
+ // stay at the end after mangling so that types/fmt.go can strip it
+ // out again when writing the type's runtime descriptor (#54456).
+ base, suffix := types.SplitVargenSuffix(sym.Name)
+
+ var buf strings.Builder
+ buf.WriteString(base)
+ buf.WriteByte('[')
+ for i, targ := range dict.targs {
+ if i > 0 {
+ if i == dict.implicits {
+ buf.WriteByte(';')
+ } else {
+ buf.WriteByte(',')
+ }
+ }
+ buf.WriteString(targ.LinkString())
+ }
+ buf.WriteByte(']')
+ buf.WriteString(suffix)
+ return sym.Pkg.Lookup(buf.String())
+}
+
+// shapify returns the shape type for targ.
+//
+// If basic is true, then the type argument is used to instantiate a
+// type parameter whose constraint is a basic interface.
+func shapify(targ *types.Type, basic bool) *types.Type {
+ if targ.Kind() == types.TFORW {
+ if targ.IsFullyInstantiated() {
+ // For recursive instantiated type argument, it may still be a TFORW
+ // when shapifying happens. If we don't have targ's underlying type,
+ // shapify won't work. The worst case is we end up not reusing code
+ // optimally in some tricky cases.
+ if base.Debug.Shapify != 0 {
+ base.Warn("skipping shaping of recursive type %v", targ)
+ }
+ if targ.HasShape() {
+ return targ
+ }
+ } else {
+ base.Fatalf("%v is missing its underlying type", targ)
+ }
+ }
+
+ // When a pointer type is used to instantiate a type parameter
+ // constrained by a basic interface, we know the pointer's element
+ // type can't matter to the generated code. In this case, we can use
+ // an arbitrary pointer type as the shape type. (To match the
+ // non-unified frontend, we use `*byte`.)
+ //
+ // Otherwise, we simply use the type's underlying type as its shape.
+ //
+ // TODO(mdempsky): It should be possible to do much more aggressive
+ // shaping still; e.g., collapsing all pointer-shaped types into a
+ // common type, collapsing scalars of the same size/alignment into a
+ // common type, recursively shaping the element types of composite
+ // types, and discarding struct field names and tags. However, we'll
+ // need to start tracking how type parameters are actually used to
+ // implement some of these optimizations.
+ under := targ.Underlying()
+ if basic && targ.IsPtr() && !targ.Elem().NotInHeap() {
+ under = types.NewPtr(types.Types[types.TUINT8])
+ }
+
+ // Hash long type names to bound symbol name length seen by users,
+ // particularly for large protobuf structs (#65030).
+ uls := under.LinkString()
+ if base.Debug.MaxShapeLen != 0 &&
+ len(uls) > base.Debug.MaxShapeLen {
+ h := notsha256.Sum256([]byte(uls))
+ uls = hex.EncodeToString(h[:])
+ }
+
+ sym := types.ShapePkg.Lookup(uls)
+ if sym.Def == nil {
+ name := ir.NewDeclNameAt(under.Pos(), ir.OTYPE, sym)
+ typ := types.NewNamed(name)
+ typ.SetUnderlying(under)
+ sym.Def = typed(typ, name)
+ }
+ res := sym.Def.Type()
+ assert(res.IsShape())
+ assert(res.HasShape())
+ return res
+}
+
+// objDictIdx reads and returns the specified object dictionary.
+func (pr *pkgReader) objDictIdx(sym *types.Sym, idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) (*readerDict, error) {
+ r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
+
+ dict := readerDict{
+ shaped: shaped,
+ }
+
+ nimplicits := r.Len()
+ nexplicits := r.Len()
+
+ if nimplicits > len(implicits) || nexplicits != len(explicits) {
+ return nil, fmt.Errorf("%v has %v+%v params, but instantiated with %v+%v args", sym, nimplicits, nexplicits, len(implicits), len(explicits))
+ }
+
+ dict.targs = append(implicits[:nimplicits:nimplicits], explicits...)
+ dict.implicits = nimplicits
+
+ // Within the compiler, we can just skip over the type parameters.
+ for range dict.targs[dict.implicits:] {
+ // Skip past bounds without actually evaluating them.
+ r.typInfo()
+ }
+
+ dict.derived = make([]derivedInfo, r.Len())
+ dict.derivedTypes = make([]*types.Type, len(dict.derived))
+ for i := range dict.derived {
+ dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
+ }
+
+ // Runtime dictionary information; private to the compiler.
+
+ // If any type argument is already shaped, then we're constructing a
+ // shaped object, even if not explicitly requested (i.e., calling
+ // objIdx with shaped==true). This can happen with instantiating
+ // types that are referenced within a function body.
+ for _, targ := range dict.targs {
+ if targ.HasShape() {
+ dict.shaped = true
+ break
+ }
+ }
+
+ // And if we're constructing a shaped object, then shapify all type
+ // arguments.
+ for i, targ := range dict.targs {
+ basic := r.Bool()
+ if dict.shaped {
+ dict.targs[i] = shapify(targ, basic)
+ }
+ }
+
+ dict.baseSym = dict.mangle(sym)
+
+ dict.typeParamMethodExprs = make([]readerMethodExprInfo, r.Len())
+ for i := range dict.typeParamMethodExprs {
+ typeParamIdx := r.Len()
+ method := r.selector()
+
+ dict.typeParamMethodExprs[i] = readerMethodExprInfo{typeParamIdx, method}
+ }
+
+ dict.subdicts = make([]objInfo, r.Len())
+ for i := range dict.subdicts {
+ dict.subdicts[i] = r.objInfo()
+ }
+
+ dict.rtypes = make([]typeInfo, r.Len())
+ for i := range dict.rtypes {
+ dict.rtypes[i] = r.typInfo()
+ }
+
+ dict.itabs = make([]itabInfo, r.Len())
+ for i := range dict.itabs {
+ dict.itabs[i] = itabInfo{typ: r.typInfo(), iface: r.typInfo()}
+ }
+
+ return &dict, nil
+}
+
+func (r *reader) typeParamNames() {
+ r.Sync(pkgbits.SyncTypeParamNames)
+
+ for range r.dict.targs[r.dict.implicits:] {
+ r.pos()
+ r.localIdent()
+ }
+}
+
+func (r *reader) method(rext *reader) *types.Field {
+ r.Sync(pkgbits.SyncMethod)
+ npos := r.pos()
+ sym := r.selector()
+ r.typeParamNames()
+ recv := r.param()
+ typ := r.signature(recv)
+
+ fpos := r.pos()
+ fn := ir.NewFunc(fpos, npos, ir.MethodSym(recv.Type, sym), typ)
+ name := fn.Nname
+
+ if r.hasTypeParams() {
+ name.Func.SetDupok(true)
+ if r.dict.shaped {
+ typ = shapeSig(name.Func, r.dict)
+ setType(name, typ)
+ }
+ }
+
+ rext.funcExt(name, sym)
+
+ meth := types.NewField(name.Func.Pos(), sym, typ)
+ meth.Nname = name
+ meth.SetNointerface(name.Func.Pragma&ir.Nointerface != 0)
+
+ return meth
+}
+
+func (r *reader) qualifiedIdent() (pkg *types.Pkg, sym *types.Sym) {
+ r.Sync(pkgbits.SyncSym)
+ pkg = r.pkg()
+ if name := r.String(); name != "" {
+ sym = pkg.Lookup(name)
+ }
+ return
+}
+
+func (r *reader) localIdent() *types.Sym {
+ r.Sync(pkgbits.SyncLocalIdent)
+ pkg := r.pkg()
+ if name := r.String(); name != "" {
+ return pkg.Lookup(name)
+ }
+ return nil
+}
+
+func (r *reader) selector() *types.Sym {
+ r.Sync(pkgbits.SyncSelector)
+ pkg := r.pkg()
+ name := r.String()
+ if types.IsExported(name) {
+ pkg = types.LocalPkg
+ }
+ return pkg.Lookup(name)
+}
+
+func (r *reader) hasTypeParams() bool {
+ return r.dict.hasTypeParams()
+}
+
+func (dict *readerDict) hasTypeParams() bool {
+ return dict != nil && len(dict.targs) != 0
+}
+
+// @@@ Compiler extensions
+
+func (r *reader) funcExt(name *ir.Name, method *types.Sym) {
+ r.Sync(pkgbits.SyncFuncExt)
+
+ fn := name.Func
+
+ // XXX: Workaround because linker doesn't know how to copy Pos.
+ if !fn.Pos().IsKnown() {
+ fn.SetPos(name.Pos())
+ }
+
+ // Normally, we only compile local functions, which saves redundant compilation work.
+ // n.Defn is not nil for local functions, and is nil for imported function. But for
+ // generic functions, we might have an instantiation that no other package has seen before.
+ // So we need to be conservative and compile it again.
+ //
+ // That's why name.Defn is set here, so ir.VisitFuncsBottomUp can analyze function.
+ // TODO(mdempsky,cuonglm): find a cleaner way to handle this.
+ if name.Sym().Pkg == types.LocalPkg || r.hasTypeParams() {
+ name.Defn = fn
+ }
+
+ fn.Pragma = r.pragmaFlag()
+ r.linkname(name)
+
+ if buildcfg.GOARCH == "wasm" {
+ xmod := r.String()
+ xname := r.String()
+
+ if xmod != "" && xname != "" {
+ fn.WasmImport = &ir.WasmImport{
+ Module: xmod,
+ Name: xname,
+ }
+ }
+ }
+
+ if r.Bool() {
+ assert(name.Defn == nil)
+
+ fn.ABI = obj.ABI(r.Uint64())
+
+ // Escape analysis.
+ for _, f := range name.Type().RecvParams() {
+ f.Note = r.String()
+ }
+
+ if r.Bool() {
+ fn.Inl = &ir.Inline{
+ Cost: int32(r.Len()),
+ CanDelayResults: r.Bool(),
+ }
+ if buildcfg.Experiment.NewInliner {
+ fn.Inl.Properties = r.String()
+ }
+ }
+ } else {
+ r.addBody(name.Func, method)
+ }
+ r.Sync(pkgbits.SyncEOF)
+}
+
+func (r *reader) typeExt(name *ir.Name) {
+ r.Sync(pkgbits.SyncTypeExt)
+
+ typ := name.Type()
+
+ if r.hasTypeParams() {
+ // Set "RParams" (really type arguments here, not parameters) so
+ // this type is treated as "fully instantiated". This ensures the
+ // type descriptor is written out as DUPOK and method wrappers are
+ // generated even for imported types.
+ var targs []*types.Type
+ targs = append(targs, r.dict.targs...)
+ typ.SetRParams(targs)
+ }
+
+ name.SetPragma(r.pragmaFlag())
+
+ typecheck.SetBaseTypeIndex(typ, r.Int64(), r.Int64())
+}
+
+func (r *reader) varExt(name *ir.Name) {
+ r.Sync(pkgbits.SyncVarExt)
+ r.linkname(name)
+}
+
+func (r *reader) linkname(name *ir.Name) {
+ assert(name.Op() == ir.ONAME)
+ r.Sync(pkgbits.SyncLinkname)
+
+ if idx := r.Int64(); idx >= 0 {
+ lsym := name.Linksym()
+ lsym.SymIdx = int32(idx)
+ lsym.Set(obj.AttrIndexed, true)
+ } else {
+ name.Sym().Linkname = r.String()
+ }
+}
+
+func (r *reader) pragmaFlag() ir.PragmaFlag {
+ r.Sync(pkgbits.SyncPragma)
+ return ir.PragmaFlag(r.Int())
+}
+
+// @@@ Function bodies
+
+// bodyReader tracks where the serialized IR for a local or imported,
+// generic function's body can be found.
+var bodyReader = map[*ir.Func]pkgReaderIndex{}
+
+// importBodyReader tracks where the serialized IR for an imported,
+// static (i.e., non-generic) function body can be read.
+var importBodyReader = map[*types.Sym]pkgReaderIndex{}
+
+// bodyReaderFor returns the pkgReaderIndex for reading fn's
+// serialized IR, and whether one was found.
+func bodyReaderFor(fn *ir.Func) (pri pkgReaderIndex, ok bool) {
+ if fn.Nname.Defn != nil {
+ pri, ok = bodyReader[fn]
+ base.AssertfAt(ok, base.Pos, "must have bodyReader for %v", fn) // must always be available
+ } else {
+ pri, ok = importBodyReader[fn.Sym()]
+ }
+ return
+}
+
+// todoDicts holds the list of dictionaries that still need their
+// runtime dictionary objects constructed.
+var todoDicts []func()
+
+// todoBodies holds the list of function bodies that still need to be
+// constructed.
+var todoBodies []*ir.Func
+
+// addBody reads a function body reference from the element bitstream,
+// and associates it with fn.
+func (r *reader) addBody(fn *ir.Func, method *types.Sym) {
+ // addBody should only be called for local functions or imported
+ // generic functions; see comment in funcExt.
+ assert(fn.Nname.Defn != nil)
+
+ idx := r.Reloc(pkgbits.RelocBody)
+
+ pri := pkgReaderIndex{r.p, idx, r.dict, method, nil}
+ bodyReader[fn] = pri
+
+ if r.curfn == nil {
+ todoBodies = append(todoBodies, fn)
+ return
+ }
+
+ pri.funcBody(fn)
+}
+
+func (pri pkgReaderIndex) funcBody(fn *ir.Func) {
+ r := pri.asReader(pkgbits.RelocBody, pkgbits.SyncFuncBody)
+ r.funcBody(fn)
+}
+
+// funcBody reads a function body definition from the element
+// bitstream, and populates fn with it.
+func (r *reader) funcBody(fn *ir.Func) {
+ r.curfn = fn
+ r.closureVars = fn.ClosureVars
+ if len(r.closureVars) != 0 && r.hasTypeParams() {
+ r.dictParam = r.closureVars[len(r.closureVars)-1] // dictParam is last; see reader.funcLit
+ }
+
+ ir.WithFunc(fn, func() {
+ r.declareParams()
+
+ if r.syntheticBody(fn.Pos()) {
+ return
+ }
+
+ if !r.Bool() {
+ return
+ }
+
+ body := r.stmts()
+ if body == nil {
+ body = []ir.Node{typecheck.Stmt(ir.NewBlockStmt(src.NoXPos, nil))}
+ }
+ fn.Body = body
+ fn.Endlineno = r.pos()
+ })
+
+ r.marker.WriteTo(fn)
+}
+
+// syntheticBody adds a synthetic body to r.curfn if appropriate, and
+// reports whether it did.
+func (r *reader) syntheticBody(pos src.XPos) bool {
+ if r.synthetic != nil {
+ r.synthetic(pos, r)
+ return true
+ }
+
+ // If this function has type parameters and isn't shaped, then we
+ // just tail call its corresponding shaped variant.
+ if r.hasTypeParams() && !r.dict.shaped {
+ r.callShaped(pos)
+ return true
+ }
+
+ return false
+}
+
+// callShaped emits a tail call to r.shapedFn, passing along the
+// arguments to the current function.
+func (r *reader) callShaped(pos src.XPos) {
+ shapedObj := r.dict.shapedObj
+ assert(shapedObj != nil)
+
+ var shapedFn ir.Node
+ if r.methodSym == nil {
+ // Instantiating a generic function; shapedObj is the shaped
+ // function itself.
+ assert(shapedObj.Op() == ir.ONAME && shapedObj.Class == ir.PFUNC)
+ shapedFn = shapedObj
+ } else {
+ // Instantiating a generic type's method; shapedObj is the shaped
+ // type, so we need to select it's corresponding method.
+ shapedFn = shapedMethodExpr(pos, shapedObj, r.methodSym)
+ }
+
+ params := r.syntheticArgs()
+
+ // Construct the arguments list: receiver (if any), then runtime
+ // dictionary, and finally normal parameters.
+ //
+ // Note: For simplicity, shaped methods are added as normal methods
+ // on their shaped types. So existing code (e.g., packages ir and
+ // typecheck) expects the shaped type to appear as the receiver
+ // parameter (or first parameter, as a method expression). Hence
+ // putting the dictionary parameter after that is the least invasive
+ // solution at the moment.
+ var args ir.Nodes
+ if r.methodSym != nil {
+ args.Append(params[0])
+ params = params[1:]
+ }
+ args.Append(typecheck.Expr(ir.NewAddrExpr(pos, r.p.dictNameOf(r.dict))))
+ args.Append(params...)
+
+ r.syntheticTailCall(pos, shapedFn, args)
+}
+
+// syntheticArgs returns the recvs and params arguments passed to the
+// current function.
+func (r *reader) syntheticArgs() ir.Nodes {
+ sig := r.curfn.Nname.Type()
+ return ir.ToNodes(r.curfn.Dcl[:sig.NumRecvs()+sig.NumParams()])
+}
+
+// syntheticTailCall emits a tail call to fn, passing the given
+// arguments list.
+func (r *reader) syntheticTailCall(pos src.XPos, fn ir.Node, args ir.Nodes) {
+ // Mark the function as a wrapper so it doesn't show up in stack
+ // traces.
+ r.curfn.SetWrapper(true)
+
+ call := typecheck.Call(pos, fn, args, fn.Type().IsVariadic()).(*ir.CallExpr)
+
+ var stmt ir.Node
+ if fn.Type().NumResults() != 0 {
+ stmt = typecheck.Stmt(ir.NewReturnStmt(pos, []ir.Node{call}))
+ } else {
+ stmt = call
+ }
+ r.curfn.Body.Append(stmt)
+}
+
+// dictNameOf returns the runtime dictionary corresponding to dict.
+func (pr *pkgReader) dictNameOf(dict *readerDict) *ir.Name {
+ pos := base.AutogeneratedPos
+
+ // Check that we only instantiate runtime dictionaries with real types.
+ base.AssertfAt(!dict.shaped, pos, "runtime dictionary of shaped object %v", dict.baseSym)
+
+ sym := dict.baseSym.Pkg.Lookup(objabi.GlobalDictPrefix + "." + dict.baseSym.Name)
+ if sym.Def != nil {
+ return sym.Def.(*ir.Name)
+ }
+
+ name := ir.NewNameAt(pos, sym, dict.varType())
+ name.Class = ir.PEXTERN
+ sym.Def = name // break cycles with mutual subdictionaries
+
+ lsym := name.Linksym()
+ ot := 0
+
+ assertOffset := func(section string, offset int) {
+ base.AssertfAt(ot == offset*types.PtrSize, pos, "writing section %v at offset %v, but it should be at %v*%v", section, ot, offset, types.PtrSize)
+ }
+
+ assertOffset("type param method exprs", dict.typeParamMethodExprsOffset())
+ for _, info := range dict.typeParamMethodExprs {
+ typeParam := dict.targs[info.typeParamIdx]
+ method := typecheck.NewMethodExpr(pos, typeParam, info.method)
+
+ rsym := method.FuncName().Linksym()
+ assert(rsym.ABI() == obj.ABIInternal) // must be ABIInternal; see ir.OCFUNC in ssagen/ssa.go
+
+ ot = objw.SymPtr(lsym, ot, rsym, 0)
+ }
+
+ assertOffset("subdictionaries", dict.subdictsOffset())
+ for _, info := range dict.subdicts {
+ explicits := pr.typListIdx(info.explicits, dict)
+
+ // Careful: Due to subdictionary cycles, name may not be fully
+ // initialized yet.
+ name := pr.objDictName(info.idx, dict.targs, explicits)
+
+ ot = objw.SymPtr(lsym, ot, name.Linksym(), 0)
+ }
+
+ assertOffset("rtypes", dict.rtypesOffset())
+ for _, info := range dict.rtypes {
+ typ := pr.typIdx(info, dict, true)
+ ot = objw.SymPtr(lsym, ot, reflectdata.TypeLinksym(typ), 0)
+
+ // TODO(mdempsky): Double check this.
+ reflectdata.MarkTypeUsedInInterface(typ, lsym)
+ }
+
+ // For each (typ, iface) pair, we write the *runtime.itab pointer
+ // for the pair. For pairs that don't actually require an itab
+ // (i.e., typ is an interface, or iface is an empty interface), we
+ // write a nil pointer instead. This is wasteful, but rare in
+ // practice (e.g., instantiating a type parameter with an interface
+ // type).
+ assertOffset("itabs", dict.itabsOffset())
+ for _, info := range dict.itabs {
+ typ := pr.typIdx(info.typ, dict, true)
+ iface := pr.typIdx(info.iface, dict, true)
+
+ if !typ.IsInterface() && iface.IsInterface() && !iface.IsEmptyInterface() {
+ ot = objw.SymPtr(lsym, ot, reflectdata.ITabLsym(typ, iface), 0)
+ } else {
+ ot += types.PtrSize
+ }
+
+ // TODO(mdempsky): Double check this.
+ reflectdata.MarkTypeUsedInInterface(typ, lsym)
+ reflectdata.MarkTypeUsedInInterface(iface, lsym)
+ }
+
+ objw.Global(lsym, int32(ot), obj.DUPOK|obj.RODATA)
+
+ return name
+}
+
+// typeParamMethodExprsOffset returns the offset of the runtime
+// dictionary's type parameter method expressions section, in words.
+func (dict *readerDict) typeParamMethodExprsOffset() int {
+ return 0
+}
+
+// subdictsOffset returns the offset of the runtime dictionary's
+// subdictionary section, in words.
+func (dict *readerDict) subdictsOffset() int {
+ return dict.typeParamMethodExprsOffset() + len(dict.typeParamMethodExprs)
+}
+
+// rtypesOffset returns the offset of the runtime dictionary's rtypes
+// section, in words.
+func (dict *readerDict) rtypesOffset() int {
+ return dict.subdictsOffset() + len(dict.subdicts)
+}
+
+// itabsOffset returns the offset of the runtime dictionary's itabs
+// section, in words.
+func (dict *readerDict) itabsOffset() int {
+ return dict.rtypesOffset() + len(dict.rtypes)
+}
+
+// numWords returns the total number of words that comprise dict's
+// runtime dictionary variable.
+func (dict *readerDict) numWords() int64 {
+ return int64(dict.itabsOffset() + len(dict.itabs))
+}
+
+// varType returns the type of dict's runtime dictionary variable.
+func (dict *readerDict) varType() *types.Type {
+ return types.NewArray(types.Types[types.TUINTPTR], dict.numWords())
+}
+
+func (r *reader) declareParams() {
+ r.curfn.DeclareParams(!r.funarghack)
+
+ for _, name := range r.curfn.Dcl {
+ if name.Sym().Name == dictParamName {
+ r.dictParam = name
+ continue
+ }
+
+ r.addLocal(name)
+ }
+}
+
+func (r *reader) addLocal(name *ir.Name) {
+ if r.synthetic == nil {
+ r.Sync(pkgbits.SyncAddLocal)
+ if r.p.SyncMarkers() {
+ want := r.Int()
+ if have := len(r.locals); have != want {
+ base.FatalfAt(name.Pos(), "locals table has desynced")
+ }
+ }
+ r.varDictIndex(name)
+ }
+
+ r.locals = append(r.locals, name)
+}
+
+func (r *reader) useLocal() *ir.Name {
+ r.Sync(pkgbits.SyncUseObjLocal)
+ if r.Bool() {
+ return r.locals[r.Len()]
+ }
+ return r.closureVars[r.Len()]
+}
+
+func (r *reader) openScope() {
+ r.Sync(pkgbits.SyncOpenScope)
+ pos := r.pos()
+
+ if base.Flag.Dwarf {
+ r.scopeVars = append(r.scopeVars, len(r.curfn.Dcl))
+ r.marker.Push(pos)
+ }
+}
+
+func (r *reader) closeScope() {
+ r.Sync(pkgbits.SyncCloseScope)
+ r.lastCloseScopePos = r.pos()
+
+ r.closeAnotherScope()
+}
+
+// closeAnotherScope is like closeScope, but it reuses the same mark
+// position as the last closeScope call. This is useful for "for" and
+// "if" statements, as their implicit blocks always end at the same
+// position as an explicit block.
+func (r *reader) closeAnotherScope() {
+ r.Sync(pkgbits.SyncCloseAnotherScope)
+
+ if base.Flag.Dwarf {
+ scopeVars := r.scopeVars[len(r.scopeVars)-1]
+ r.scopeVars = r.scopeVars[:len(r.scopeVars)-1]
+
+ // Quirkish: noder decides which scopes to keep before
+ // typechecking, whereas incremental typechecking during IR
+ // construction can result in new autotemps being allocated. To
+ // produce identical output, we ignore autotemps here for the
+ // purpose of deciding whether to retract the scope.
+ //
+ // This is important for net/http/fcgi, because it contains:
+ //
+ // var body io.ReadCloser
+ // if len(content) > 0 {
+ // body, req.pw = io.Pipe()
+ // } else { … }
+ //
+ // Notably, io.Pipe is inlinable, and inlining it introduces a ~R0
+ // variable at the call site.
+ //
+ // Noder does not preserve the scope where the io.Pipe() call
+ // resides, because it doesn't contain any declared variables in
+ // source. So the ~R0 variable ends up being assigned to the
+ // enclosing scope instead.
+ //
+ // However, typechecking this assignment also introduces
+ // autotemps, because io.Pipe's results need conversion before
+ // they can be assigned to their respective destination variables.
+ //
+ // TODO(mdempsky): We should probably just keep all scopes, and
+ // let dwarfgen take care of pruning them instead.
+ retract := true
+ for _, n := range r.curfn.Dcl[scopeVars:] {
+ if !n.AutoTemp() {
+ retract = false
+ break
+ }
+ }
+
+ if retract {
+ // no variables were declared in this scope, so we can retract it.
+ r.marker.Unpush()
+ } else {
+ r.marker.Pop(r.lastCloseScopePos)
+ }
+ }
+}
+
+// @@@ Statements
+
+func (r *reader) stmt() ir.Node {
+ return block(r.stmts())
+}
+
+func block(stmts []ir.Node) ir.Node {
+ switch len(stmts) {
+ case 0:
+ return nil
+ case 1:
+ return stmts[0]
+ default:
+ return ir.NewBlockStmt(stmts[0].Pos(), stmts)
+ }
+}
+
+func (r *reader) stmts() ir.Nodes {
+ assert(ir.CurFunc == r.curfn)
+ var res ir.Nodes
+
+ r.Sync(pkgbits.SyncStmts)
+ for {
+ tag := codeStmt(r.Code(pkgbits.SyncStmt1))
+ if tag == stmtEnd {
+ r.Sync(pkgbits.SyncStmtsEnd)
+ return res
+ }
+
+ if n := r.stmt1(tag, &res); n != nil {
+ res.Append(typecheck.Stmt(n))
+ }
+ }
+}
+
+func (r *reader) stmt1(tag codeStmt, out *ir.Nodes) ir.Node {
+ var label *types.Sym
+ if n := len(*out); n > 0 {
+ if ls, ok := (*out)[n-1].(*ir.LabelStmt); ok {
+ label = ls.Label
+ }
+ }
+
+ switch tag {
+ default:
+ panic("unexpected statement")
+
+ case stmtAssign:
+ pos := r.pos()
+ names, lhs := r.assignList()
+ rhs := r.multiExpr()
+
+ if len(rhs) == 0 {
+ for _, name := range names {
+ as := ir.NewAssignStmt(pos, name, nil)
+ as.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, name))
+ out.Append(typecheck.Stmt(as))
+ }
+ return nil
+ }
+
+ if len(lhs) == 1 && len(rhs) == 1 {
+ n := ir.NewAssignStmt(pos, lhs[0], rhs[0])
+ n.Def = r.initDefn(n, names)
+ return n
+ }
+
+ n := ir.NewAssignListStmt(pos, ir.OAS2, lhs, rhs)
+ n.Def = r.initDefn(n, names)
+ return n
+
+ case stmtAssignOp:
+ op := r.op()
+ lhs := r.expr()
+ pos := r.pos()
+ rhs := r.expr()
+ return ir.NewAssignOpStmt(pos, op, lhs, rhs)
+
+ case stmtIncDec:
+ op := r.op()
+ lhs := r.expr()
+ pos := r.pos()
+ n := ir.NewAssignOpStmt(pos, op, lhs, ir.NewOne(pos, lhs.Type()))
+ n.IncDec = true
+ return n
+
+ case stmtBlock:
+ out.Append(r.blockStmt()...)
+ return nil
+
+ case stmtBranch:
+ pos := r.pos()
+ op := r.op()
+ sym := r.optLabel()
+ return ir.NewBranchStmt(pos, op, sym)
+
+ case stmtCall:
+ pos := r.pos()
+ op := r.op()
+ call := r.expr()
+ stmt := ir.NewGoDeferStmt(pos, op, call)
+ if op == ir.ODEFER {
+ x := r.optExpr()
+ if x != nil {
+ stmt.DeferAt = x.(ir.Expr)
+ }
+ }
+ return stmt
+
+ case stmtExpr:
+ return r.expr()
+
+ case stmtFor:
+ return r.forStmt(label)
+
+ case stmtIf:
+ return r.ifStmt()
+
+ case stmtLabel:
+ pos := r.pos()
+ sym := r.label()
+ return ir.NewLabelStmt(pos, sym)
+
+ case stmtReturn:
+ pos := r.pos()
+ results := r.multiExpr()
+ return ir.NewReturnStmt(pos, results)
+
+ case stmtSelect:
+ return r.selectStmt(label)
+
+ case stmtSend:
+ pos := r.pos()
+ ch := r.expr()
+ value := r.expr()
+ return ir.NewSendStmt(pos, ch, value)
+
+ case stmtSwitch:
+ return r.switchStmt(label)
+ }
+}
+
+func (r *reader) assignList() ([]*ir.Name, []ir.Node) {
+ lhs := make([]ir.Node, r.Len())
+ var names []*ir.Name
+
+ for i := range lhs {
+ expr, def := r.assign()
+ lhs[i] = expr
+ if def {
+ names = append(names, expr.(*ir.Name))
+ }
+ }
+
+ return names, lhs
+}
+
+// assign returns an assignee expression. It also reports whether the
+// returned expression is a newly declared variable.
+func (r *reader) assign() (ir.Node, bool) {
+ switch tag := codeAssign(r.Code(pkgbits.SyncAssign)); tag {
+ default:
+ panic("unhandled assignee expression")
+
+ case assignBlank:
+ return typecheck.AssignExpr(ir.BlankNode), false
+
+ case assignDef:
+ pos := r.pos()
+ setBasePos(pos) // test/fixedbugs/issue49767.go depends on base.Pos being set for the r.typ() call here, ugh
+ name := r.curfn.NewLocal(pos, r.localIdent(), r.typ())
+ r.addLocal(name)
+ return name, true
+
+ case assignExpr:
+ return r.expr(), false
+ }
+}
+
+func (r *reader) blockStmt() []ir.Node {
+ r.Sync(pkgbits.SyncBlockStmt)
+ r.openScope()
+ stmts := r.stmts()
+ r.closeScope()
+ return stmts
+}
+
+func (r *reader) forStmt(label *types.Sym) ir.Node {
+ r.Sync(pkgbits.SyncForStmt)
+
+ r.openScope()
+
+ if r.Bool() {
+ pos := r.pos()
+ rang := ir.NewRangeStmt(pos, nil, nil, nil, nil, false)
+ rang.Label = label
+
+ names, lhs := r.assignList()
+ if len(lhs) >= 1 {
+ rang.Key = lhs[0]
+ if len(lhs) >= 2 {
+ rang.Value = lhs[1]
+ }
+ }
+ rang.Def = r.initDefn(rang, names)
+
+ rang.X = r.expr()
+ if rang.X.Type().IsMap() {
+ rang.RType = r.rtype(pos)
+ }
+ if rang.Key != nil && !ir.IsBlank(rang.Key) {
+ rang.KeyTypeWord, rang.KeySrcRType = r.convRTTI(pos)
+ }
+ if rang.Value != nil && !ir.IsBlank(rang.Value) {
+ rang.ValueTypeWord, rang.ValueSrcRType = r.convRTTI(pos)
+ }
+
+ rang.Body = r.blockStmt()
+ rang.DistinctVars = r.Bool()
+ r.closeAnotherScope()
+
+ return rang
+ }
+
+ pos := r.pos()
+ init := r.stmt()
+ cond := r.optExpr()
+ post := r.stmt()
+ body := r.blockStmt()
+ perLoopVars := r.Bool()
+ r.closeAnotherScope()
+
+ if ir.IsConst(cond, constant.Bool) && !ir.BoolVal(cond) {
+ return init // simplify "for init; false; post { ... }" into "init"
+ }
+
+ stmt := ir.NewForStmt(pos, init, cond, post, body, perLoopVars)
+ stmt.Label = label
+ return stmt
+}
+
+func (r *reader) ifStmt() ir.Node {
+ r.Sync(pkgbits.SyncIfStmt)
+ r.openScope()
+ pos := r.pos()
+ init := r.stmts()
+ cond := r.expr()
+ staticCond := r.Int()
+ var then, els []ir.Node
+ if staticCond >= 0 {
+ then = r.blockStmt()
+ } else {
+ r.lastCloseScopePos = r.pos()
+ }
+ if staticCond <= 0 {
+ els = r.stmts()
+ }
+ r.closeAnotherScope()
+
+ if staticCond != 0 {
+ // We may have removed a dead return statement, which can trip up
+ // later passes (#62211). To avoid confusion, we instead flatten
+ // the if statement into a block.
+
+ if cond.Op() != ir.OLITERAL {
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(pos, ir.BlankNode, cond))) // for side effects
+ }
+ init.Append(then...)
+ init.Append(els...)
+ return block(init)
+ }
+
+ n := ir.NewIfStmt(pos, cond, then, els)
+ n.SetInit(init)
+ return n
+}
+
+func (r *reader) selectStmt(label *types.Sym) ir.Node {
+ r.Sync(pkgbits.SyncSelectStmt)
+
+ pos := r.pos()
+ clauses := make([]*ir.CommClause, r.Len())
+ for i := range clauses {
+ if i > 0 {
+ r.closeScope()
+ }
+ r.openScope()
+
+ pos := r.pos()
+ comm := r.stmt()
+ body := r.stmts()
+
+ // "case i = <-c: ..." may require an implicit conversion (e.g.,
+ // see fixedbugs/bug312.go). Currently, typecheck throws away the
+ // implicit conversion and relies on it being reinserted later,
+ // but that would lose any explicit RTTI operands too. To preserve
+ // RTTI, we rewrite this as "case tmp := <-c: i = tmp; ...".
+ if as, ok := comm.(*ir.AssignStmt); ok && as.Op() == ir.OAS && !as.Def {
+ if conv, ok := as.Y.(*ir.ConvExpr); ok && conv.Op() == ir.OCONVIFACE {
+ base.AssertfAt(conv.Implicit(), conv.Pos(), "expected implicit conversion: %v", conv)
+
+ recv := conv.X
+ base.AssertfAt(recv.Op() == ir.ORECV, recv.Pos(), "expected receive expression: %v", recv)
+
+ tmp := r.temp(pos, recv.Type())
+
+ // Replace comm with `tmp := <-c`.
+ tmpAs := ir.NewAssignStmt(pos, tmp, recv)
+ tmpAs.Def = true
+ tmpAs.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, tmp))
+ comm = tmpAs
+
+ // Change original assignment to `i = tmp`, and prepend to body.
+ conv.X = tmp
+ body = append([]ir.Node{as}, body...)
+ }
+ }
+
+ // multiExpr will have desugared a comma-ok receive expression
+ // into a separate statement. However, the rest of the compiler
+ // expects comm to be the OAS2RECV statement itself, so we need to
+ // shuffle things around to fit that pattern.
+ if as2, ok := comm.(*ir.AssignListStmt); ok && as2.Op() == ir.OAS2 {
+ init := ir.TakeInit(as2.Rhs[0])
+ base.AssertfAt(len(init) == 1 && init[0].Op() == ir.OAS2RECV, as2.Pos(), "unexpected assignment: %+v", as2)
+
+ comm = init[0]
+ body = append([]ir.Node{as2}, body...)
+ }
+
+ clauses[i] = ir.NewCommStmt(pos, comm, body)
+ }
+ if len(clauses) > 0 {
+ r.closeScope()
+ }
+ n := ir.NewSelectStmt(pos, clauses)
+ n.Label = label
+ return n
+}
+
+func (r *reader) switchStmt(label *types.Sym) ir.Node {
+ r.Sync(pkgbits.SyncSwitchStmt)
+
+ r.openScope()
+ pos := r.pos()
+ init := r.stmt()
+
+ var tag ir.Node
+ var ident *ir.Ident
+ var iface *types.Type
+ if r.Bool() {
+ pos := r.pos()
+ if r.Bool() {
+ ident = ir.NewIdent(r.pos(), r.localIdent())
+ }
+ x := r.expr()
+ iface = x.Type()
+ tag = ir.NewTypeSwitchGuard(pos, ident, x)
+ } else {
+ tag = r.optExpr()
+ }
+
+ clauses := make([]*ir.CaseClause, r.Len())
+ for i := range clauses {
+ if i > 0 {
+ r.closeScope()
+ }
+ r.openScope()
+
+ pos := r.pos()
+ var cases, rtypes []ir.Node
+ if iface != nil {
+ cases = make([]ir.Node, r.Len())
+ if len(cases) == 0 {
+ cases = nil // TODO(mdempsky): Unclear if this matters.
+ }
+ for i := range cases {
+ if r.Bool() { // case nil
+ cases[i] = typecheck.Expr(types.BuiltinPkg.Lookup("nil").Def.(*ir.NilExpr))
+ } else {
+ cases[i] = r.exprType()
+ }
+ }
+ } else {
+ cases = r.exprList()
+
+ // For `switch { case any(true): }` (e.g., issue 3980 in
+ // test/switch.go), the backend still creates a mixed bool/any
+ // comparison, and we need to explicitly supply the RTTI for the
+ // comparison.
+ //
+ // TODO(mdempsky): Change writer.go to desugar "switch {" into
+ // "switch true {", which we already handle correctly.
+ if tag == nil {
+ for i, cas := range cases {
+ if cas.Type().IsEmptyInterface() {
+ for len(rtypes) < i {
+ rtypes = append(rtypes, nil)
+ }
+ rtypes = append(rtypes, reflectdata.TypePtrAt(cas.Pos(), types.Types[types.TBOOL]))
+ }
+ }
+ }
+ }
+
+ clause := ir.NewCaseStmt(pos, cases, nil)
+ clause.RTypes = rtypes
+
+ if ident != nil {
+ name := r.curfn.NewLocal(r.pos(), ident.Sym(), r.typ())
+ r.addLocal(name)
+ clause.Var = name
+ name.Defn = tag
+ }
+
+ clause.Body = r.stmts()
+ clauses[i] = clause
+ }
+ if len(clauses) > 0 {
+ r.closeScope()
+ }
+ r.closeScope()
+
+ n := ir.NewSwitchStmt(pos, tag, clauses)
+ n.Label = label
+ if init != nil {
+ n.SetInit([]ir.Node{init})
+ }
+ return n
+}
+
+func (r *reader) label() *types.Sym {
+ r.Sync(pkgbits.SyncLabel)
+ name := r.String()
+ if r.inlCall != nil {
+ name = fmt.Sprintf("~%s·%d", name, inlgen)
+ }
+ return typecheck.Lookup(name)
+}
+
+func (r *reader) optLabel() *types.Sym {
+ r.Sync(pkgbits.SyncOptLabel)
+ if r.Bool() {
+ return r.label()
+ }
+ return nil
+}
+
+// initDefn marks the given names as declared by defn and populates
+// its Init field with ODCL nodes. It then reports whether any names
+// were so declared, which can be used to initialize defn.Def.
+func (r *reader) initDefn(defn ir.InitNode, names []*ir.Name) bool {
+ if len(names) == 0 {
+ return false
+ }
+
+ init := make([]ir.Node, len(names))
+ for i, name := range names {
+ name.Defn = defn
+ init[i] = ir.NewDecl(name.Pos(), ir.ODCL, name)
+ }
+ defn.SetInit(init)
+ return true
+}
+
+// @@@ Expressions
+
+// expr reads and returns a typechecked expression.
+func (r *reader) expr() (res ir.Node) {
+ defer func() {
+ if res != nil && res.Typecheck() == 0 {
+ base.FatalfAt(res.Pos(), "%v missed typecheck", res)
+ }
+ }()
+
+ switch tag := codeExpr(r.Code(pkgbits.SyncExpr)); tag {
+ default:
+ panic("unhandled expression")
+
+ case exprLocal:
+ return typecheck.Expr(r.useLocal())
+
+ case exprGlobal:
+ // Callee instead of Expr allows builtins
+ // TODO(mdempsky): Handle builtins directly in exprCall, like method calls?
+ return typecheck.Callee(r.obj())
+
+ case exprFuncInst:
+ origPos, pos := r.origPos()
+ wrapperFn, baseFn, dictPtr := r.funcInst(pos)
+ if wrapperFn != nil {
+ return wrapperFn
+ }
+ return r.curry(origPos, false, baseFn, dictPtr, nil)
+
+ case exprConst:
+ pos := r.pos()
+ typ := r.typ()
+ val := FixValue(typ, r.Value())
+ return ir.NewBasicLit(pos, typ, val)
+
+ case exprZero:
+ pos := r.pos()
+ typ := r.typ()
+ return ir.NewZero(pos, typ)
+
+ case exprCompLit:
+ return r.compLit()
+
+ case exprFuncLit:
+ return r.funcLit()
+
+ case exprFieldVal:
+ x := r.expr()
+ pos := r.pos()
+ sym := r.selector()
+
+ return typecheck.XDotField(pos, x, sym)
+
+ case exprMethodVal:
+ recv := r.expr()
+ origPos, pos := r.origPos()
+ wrapperFn, baseFn, dictPtr := r.methodExpr()
+
+ // For simple wrapperFn values, the existing machinery for creating
+ // and deduplicating wrapperFn value wrappers still works fine.
+ if wrapperFn, ok := wrapperFn.(*ir.SelectorExpr); ok && wrapperFn.Op() == ir.OMETHEXPR {
+ // The receiver expression we constructed may have a shape type.
+ // For example, in fixedbugs/issue54343.go, `New[int]()` is
+ // constructed as `New[go.shape.int](&.dict.New[int])`, which
+ // has type `*T[go.shape.int]`, not `*T[int]`.
+ //
+ // However, the method we want to select here is `(*T[int]).M`,
+ // not `(*T[go.shape.int]).M`, so we need to manually convert
+ // the type back so that the OXDOT resolves correctly.
+ //
+ // TODO(mdempsky): Logically it might make more sense for
+ // exprCall to take responsibility for setting a non-shaped
+ // result type, but this is the only place where we care
+ // currently. And only because existing ir.OMETHVALUE backend
+ // code relies on n.X.Type() instead of n.Selection.Recv().Type
+ // (because the latter is types.FakeRecvType() in the case of
+ // interface method values).
+ //
+ if recv.Type().HasShape() {
+ typ := wrapperFn.Type().Param(0).Type
+ if !types.Identical(typ, recv.Type()) {
+ base.FatalfAt(wrapperFn.Pos(), "receiver %L does not match %L", recv, wrapperFn)
+ }
+ recv = typecheck.Expr(ir.NewConvExpr(recv.Pos(), ir.OCONVNOP, typ, recv))
+ }
+
+ n := typecheck.XDotMethod(pos, recv, wrapperFn.Sel, false)
+
+ // As a consistency check here, we make sure "n" selected the
+ // same method (represented by a types.Field) that wrapperFn
+ // selected. However, for anonymous receiver types, there can be
+ // multiple such types.Field instances (#58563). So we may need
+ // to fallback to making sure Sym and Type (including the
+ // receiver parameter's type) match.
+ if n.Selection != wrapperFn.Selection {
+ assert(n.Selection.Sym == wrapperFn.Selection.Sym)
+ assert(types.Identical(n.Selection.Type, wrapperFn.Selection.Type))
+ assert(types.Identical(n.Selection.Type.Recv().Type, wrapperFn.Selection.Type.Recv().Type))
+ }
+
+ wrapper := methodValueWrapper{
+ rcvr: n.X.Type(),
+ method: n.Selection,
+ }
+
+ if r.importedDef() {
+ haveMethodValueWrappers = append(haveMethodValueWrappers, wrapper)
+ } else {
+ needMethodValueWrappers = append(needMethodValueWrappers, wrapper)
+ }
+ return n
+ }
+
+ // For more complicated method expressions, we construct a
+ // function literal wrapper.
+ return r.curry(origPos, true, baseFn, recv, dictPtr)
+
+ case exprMethodExpr:
+ recv := r.typ()
+
+ implicits := make([]int, r.Len())
+ for i := range implicits {
+ implicits[i] = r.Len()
+ }
+ var deref, addr bool
+ if r.Bool() {
+ deref = true
+ } else if r.Bool() {
+ addr = true
+ }
+
+ origPos, pos := r.origPos()
+ wrapperFn, baseFn, dictPtr := r.methodExpr()
+
+ // If we already have a wrapper and don't need to do anything with
+ // it, we can just return the wrapper directly.
+ //
+ // N.B., we use implicits/deref/addr here as the source of truth
+ // rather than types.Identical, because the latter can be confused
+ // by tricky promoted methods (e.g., typeparam/mdempsky/21.go).
+ if wrapperFn != nil && len(implicits) == 0 && !deref && !addr {
+ if !types.Identical(recv, wrapperFn.Type().Param(0).Type) {
+ base.FatalfAt(pos, "want receiver type %v, but have method %L", recv, wrapperFn)
+ }
+ return wrapperFn
+ }
+
+ // Otherwise, if the wrapper function is a static method
+ // expression (OMETHEXPR) and the receiver type is unshaped, then
+ // we can rely on a statically generated wrapper being available.
+ if method, ok := wrapperFn.(*ir.SelectorExpr); ok && method.Op() == ir.OMETHEXPR && !recv.HasShape() {
+ return typecheck.NewMethodExpr(pos, recv, method.Sel)
+ }
+
+ return r.methodExprWrap(origPos, recv, implicits, deref, addr, baseFn, dictPtr)
+
+ case exprIndex:
+ x := r.expr()
+ pos := r.pos()
+ index := r.expr()
+ n := typecheck.Expr(ir.NewIndexExpr(pos, x, index))
+ switch n.Op() {
+ case ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ n.RType = r.rtype(pos)
+ }
+ return n
+
+ case exprSlice:
+ x := r.expr()
+ pos := r.pos()
+ var index [3]ir.Node
+ for i := range index {
+ index[i] = r.optExpr()
+ }
+ op := ir.OSLICE
+ if index[2] != nil {
+ op = ir.OSLICE3
+ }
+ return typecheck.Expr(ir.NewSliceExpr(pos, op, x, index[0], index[1], index[2]))
+
+ case exprAssert:
+ x := r.expr()
+ pos := r.pos()
+ typ := r.exprType()
+ srcRType := r.rtype(pos)
+
+ // TODO(mdempsky): Always emit ODYNAMICDOTTYPE for uniformity?
+ if typ, ok := typ.(*ir.DynamicType); ok && typ.Op() == ir.ODYNAMICTYPE {
+ assert := ir.NewDynamicTypeAssertExpr(pos, ir.ODYNAMICDOTTYPE, x, typ.RType)
+ assert.SrcRType = srcRType
+ assert.ITab = typ.ITab
+ return typed(typ.Type(), assert)
+ }
+ return typecheck.Expr(ir.NewTypeAssertExpr(pos, x, typ.Type()))
+
+ case exprUnaryOp:
+ op := r.op()
+ pos := r.pos()
+ x := r.expr()
+
+ switch op {
+ case ir.OADDR:
+ return typecheck.Expr(typecheck.NodAddrAt(pos, x))
+ case ir.ODEREF:
+ return typecheck.Expr(ir.NewStarExpr(pos, x))
+ }
+ return typecheck.Expr(ir.NewUnaryExpr(pos, op, x))
+
+ case exprBinaryOp:
+ op := r.op()
+ x := r.expr()
+ pos := r.pos()
+ y := r.expr()
+
+ switch op {
+ case ir.OANDAND, ir.OOROR:
+ return typecheck.Expr(ir.NewLogicalExpr(pos, op, x, y))
+ case ir.OLSH, ir.ORSH:
+ // Untyped rhs of non-constant shift, e.g. x << 1.0.
+ // If we have a constant value, it must be an int >= 0.
+ if ir.IsConstNode(y) {
+ val := constant.ToInt(y.Val())
+ assert(val.Kind() == constant.Int && constant.Sign(val) >= 0)
+ }
+ }
+ return typecheck.Expr(ir.NewBinaryExpr(pos, op, x, y))
+
+ case exprRecv:
+ x := r.expr()
+ pos := r.pos()
+ for i, n := 0, r.Len(); i < n; i++ {
+ x = Implicit(typecheck.DotField(pos, x, r.Len()))
+ }
+ if r.Bool() { // needs deref
+ x = Implicit(Deref(pos, x.Type().Elem(), x))
+ } else if r.Bool() { // needs addr
+ x = Implicit(Addr(pos, x))
+ }
+ return x
+
+ case exprCall:
+ var fun ir.Node
+ var args ir.Nodes
+ if r.Bool() { // method call
+ recv := r.expr()
+ _, method, dictPtr := r.methodExpr()
+
+ if recv.Type().IsInterface() && method.Op() == ir.OMETHEXPR {
+ method := method.(*ir.SelectorExpr)
+
+ // The compiler backend (e.g., devirtualization) handle
+ // OCALLINTER/ODOTINTER better than OCALLFUNC/OMETHEXPR for
+ // interface calls, so we prefer to continue constructing
+ // calls that way where possible.
+ //
+ // There are also corner cases where semantically it's perhaps
+ // significant; e.g., fixedbugs/issue15975.go, #38634, #52025.
+
+ fun = typecheck.XDotMethod(method.Pos(), recv, method.Sel, true)
+ } else {
+ if recv.Type().IsInterface() {
+ // N.B., this happens currently for typeparam/issue51521.go
+ // and typeparam/typeswitch3.go.
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(method.Pos(), "imprecise interface call")
+ }
+ }
+
+ fun = method
+ args.Append(recv)
+ }
+ if dictPtr != nil {
+ args.Append(dictPtr)
+ }
+ } else if r.Bool() { // call to instanced function
+ pos := r.pos()
+ _, shapedFn, dictPtr := r.funcInst(pos)
+ fun = shapedFn
+ args.Append(dictPtr)
+ } else {
+ fun = r.expr()
+ }
+ pos := r.pos()
+ args.Append(r.multiExpr()...)
+ dots := r.Bool()
+ n := typecheck.Call(pos, fun, args, dots)
+ switch n.Op() {
+ case ir.OAPPEND:
+ n := n.(*ir.CallExpr)
+ n.RType = r.rtype(pos)
+ // For append(a, b...), we don't need the implicit conversion. The typechecker already
+ // ensured that a and b are both slices with the same base type, or []byte and string.
+ if n.IsDDD {
+ if conv, ok := n.Args[1].(*ir.ConvExpr); ok && conv.Op() == ir.OCONVNOP && conv.Implicit() {
+ n.Args[1] = conv.X
+ }
+ }
+ case ir.OCOPY:
+ n := n.(*ir.BinaryExpr)
+ n.RType = r.rtype(pos)
+ case ir.ODELETE:
+ n := n.(*ir.CallExpr)
+ n.RType = r.rtype(pos)
+ case ir.OUNSAFESLICE:
+ n := n.(*ir.BinaryExpr)
+ n.RType = r.rtype(pos)
+ }
+ return n
+
+ case exprMake:
+ pos := r.pos()
+ typ := r.exprType()
+ extra := r.exprs()
+ n := typecheck.Expr(ir.NewCallExpr(pos, ir.OMAKE, nil, append([]ir.Node{typ}, extra...))).(*ir.MakeExpr)
+ n.RType = r.rtype(pos)
+ return n
+
+ case exprNew:
+ pos := r.pos()
+ typ := r.exprType()
+ return typecheck.Expr(ir.NewUnaryExpr(pos, ir.ONEW, typ))
+
+ case exprSizeof:
+ return ir.NewUintptr(r.pos(), r.typ().Size())
+
+ case exprAlignof:
+ return ir.NewUintptr(r.pos(), r.typ().Alignment())
+
+ case exprOffsetof:
+ pos := r.pos()
+ typ := r.typ()
+ types.CalcSize(typ)
+
+ var offset int64
+ for i := r.Len(); i >= 0; i-- {
+ field := typ.Field(r.Len())
+ offset += field.Offset
+ typ = field.Type
+ }
+
+ return ir.NewUintptr(pos, offset)
+
+ case exprReshape:
+ typ := r.typ()
+ x := r.expr()
+
+ if types.IdenticalStrict(x.Type(), typ) {
+ return x
+ }
+
+ // Comparison expressions are constructed as "untyped bool" still.
+ //
+ // TODO(mdempsky): It should be safe to reshape them here too, but
+ // maybe it's better to construct them with the proper type
+ // instead.
+ if x.Type() == types.UntypedBool && typ.IsBoolean() {
+ return x
+ }
+
+ base.AssertfAt(x.Type().HasShape() || typ.HasShape(), x.Pos(), "%L and %v are not shape types", x, typ)
+ base.AssertfAt(types.Identical(x.Type(), typ), x.Pos(), "%L is not shape-identical to %v", x, typ)
+
+ // We use ir.HasUniquePos here as a check that x only appears once
+ // in the AST, so it's okay for us to call SetType without
+ // breaking any other uses of it.
+ //
+ // Notably, any ONAMEs should already have the exactly right shape
+ // type and been caught by types.IdenticalStrict above.
+ base.AssertfAt(ir.HasUniquePos(x), x.Pos(), "cannot call SetType(%v) on %L", typ, x)
+
+ if base.Debug.Reshape != 0 {
+ base.WarnfAt(x.Pos(), "reshaping %L to %v", x, typ)
+ }
+
+ x.SetType(typ)
+ return x
+
+ case exprConvert:
+ implicit := r.Bool()
+ typ := r.typ()
+ pos := r.pos()
+ typeWord, srcRType := r.convRTTI(pos)
+ dstTypeParam := r.Bool()
+ identical := r.Bool()
+ x := r.expr()
+
+ // TODO(mdempsky): Stop constructing expressions of untyped type.
+ x = typecheck.DefaultLit(x, typ)
+
+ ce := ir.NewConvExpr(pos, ir.OCONV, typ, x)
+ ce.TypeWord, ce.SrcRType = typeWord, srcRType
+ if implicit {
+ ce.SetImplicit(true)
+ }
+ n := typecheck.Expr(ce)
+
+ // Conversions between non-identical, non-empty interfaces always
+ // requires a runtime call, even if they have identical underlying
+ // interfaces. This is because we create separate itab instances
+ // for each unique interface type, not merely each unique
+ // interface shape.
+ //
+ // However, due to shape types, typecheck.Expr might mistakenly
+ // think a conversion between two non-empty interfaces are
+ // identical and set ir.OCONVNOP, instead of ir.OCONVIFACE. To
+ // ensure we update the itab field appropriately, we force it to
+ // ir.OCONVIFACE instead when shape types are involved.
+ //
+ // TODO(mdempsky): Are there other places we might get this wrong?
+ // Should this be moved down into typecheck.{Assign,Convert}op?
+ // This would be a non-issue if itabs were unique for each
+ // *underlying* interface type instead.
+ if !identical {
+ if n, ok := n.(*ir.ConvExpr); ok && n.Op() == ir.OCONVNOP && n.Type().IsInterface() && !n.Type().IsEmptyInterface() && (n.Type().HasShape() || n.X.Type().HasShape()) {
+ n.SetOp(ir.OCONVIFACE)
+ }
+ }
+
+ // spec: "If the type is a type parameter, the constant is converted
+ // into a non-constant value of the type parameter."
+ if dstTypeParam && ir.IsConstNode(n) {
+ // Wrap in an OCONVNOP node to ensure result is non-constant.
+ n = Implicit(ir.NewConvExpr(pos, ir.OCONVNOP, n.Type(), n))
+ n.SetTypecheck(1)
+ }
+ return n
+
+ case exprRuntimeBuiltin:
+ builtin := typecheck.LookupRuntime(r.String())
+ return builtin
+ }
+}
+
+// funcInst reads an instantiated function reference, and returns
+// three (possibly nil) expressions related to it:
+//
+// baseFn is always non-nil: it's either a function of the appropriate
+// type already, or it has an extra dictionary parameter as the first
+// parameter.
+//
+// If dictPtr is non-nil, then it's a dictionary argument that must be
+// passed as the first argument to baseFn.
+//
+// If wrapperFn is non-nil, then it's either the same as baseFn (if
+// dictPtr is nil), or it's semantically equivalent to currying baseFn
+// to pass dictPtr. (wrapperFn is nil when dictPtr is an expression
+// that needs to be computed dynamically.)
+//
+// For callers that are creating a call to the returned function, it's
+// best to emit a call to baseFn, and include dictPtr in the arguments
+// list as appropriate.
+//
+// For callers that want to return the function without invoking it,
+// they may return wrapperFn if it's non-nil; but otherwise, they need
+// to create their own wrapper.
+func (r *reader) funcInst(pos src.XPos) (wrapperFn, baseFn, dictPtr ir.Node) {
+ // Like in methodExpr, I'm pretty sure this isn't needed.
+ var implicits []*types.Type
+ if r.dict != nil {
+ implicits = r.dict.targs
+ }
+
+ if r.Bool() { // dynamic subdictionary
+ idx := r.Len()
+ info := r.dict.subdicts[idx]
+ explicits := r.p.typListIdx(info.explicits, r.dict)
+
+ baseFn = r.p.objIdx(info.idx, implicits, explicits, true).(*ir.Name)
+
+ // TODO(mdempsky): Is there a more robust way to get the
+ // dictionary pointer type here?
+ dictPtrType := baseFn.Type().Param(0).Type
+ dictPtr = typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, dictPtrType, r.dictWord(pos, r.dict.subdictsOffset()+idx)))
+
+ return
+ }
+
+ info := r.objInfo()
+ explicits := r.p.typListIdx(info.explicits, r.dict)
+
+ wrapperFn = r.p.objIdx(info.idx, implicits, explicits, false).(*ir.Name)
+ baseFn = r.p.objIdx(info.idx, implicits, explicits, true).(*ir.Name)
+
+ dictName := r.p.objDictName(info.idx, implicits, explicits)
+ dictPtr = typecheck.Expr(ir.NewAddrExpr(pos, dictName))
+
+ return
+}
+
+func (pr *pkgReader) objDictName(idx pkgbits.Index, implicits, explicits []*types.Type) *ir.Name {
+ rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
+ _, sym := rname.qualifiedIdent()
+ tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
+
+ if tag == pkgbits.ObjStub {
+ assert(!sym.IsBlank())
+ if pri, ok := objReader[sym]; ok {
+ return pri.pr.objDictName(pri.idx, nil, explicits)
+ }
+ base.Fatalf("unresolved stub: %v", sym)
+ }
+
+ dict, err := pr.objDictIdx(sym, idx, implicits, explicits, false)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+
+ return pr.dictNameOf(dict)
+}
+
+// curry returns a function literal that calls fun with arg0 and
+// (optionally) arg1, accepting additional arguments to the function
+// literal as necessary to satisfy fun's signature.
+//
+// If nilCheck is true and arg0 is an interface value, then it's
+// checked to be non-nil as an initial step at the point of evaluating
+// the function literal itself.
+func (r *reader) curry(origPos src.XPos, ifaceHack bool, fun ir.Node, arg0, arg1 ir.Node) ir.Node {
+ var captured ir.Nodes
+ captured.Append(fun, arg0)
+ if arg1 != nil {
+ captured.Append(arg1)
+ }
+
+ params, results := syntheticSig(fun.Type())
+ params = params[len(captured)-1:] // skip curried parameters
+ typ := types.NewSignature(nil, params, results)
+
+ addBody := func(pos src.XPos, r *reader, captured []ir.Node) {
+ fun := captured[0]
+
+ var args ir.Nodes
+ args.Append(captured[1:]...)
+ args.Append(r.syntheticArgs()...)
+
+ r.syntheticTailCall(pos, fun, args)
+ }
+
+ return r.syntheticClosure(origPos, typ, ifaceHack, captured, addBody)
+}
+
+// methodExprWrap returns a function literal that changes method's
+// first parameter's type to recv, and uses implicits/deref/addr to
+// select the appropriate receiver parameter to pass to method.
+func (r *reader) methodExprWrap(origPos src.XPos, recv *types.Type, implicits []int, deref, addr bool, method, dictPtr ir.Node) ir.Node {
+ var captured ir.Nodes
+ captured.Append(method)
+
+ params, results := syntheticSig(method.Type())
+
+ // Change first parameter to recv.
+ params[0].Type = recv
+
+ // If we have a dictionary pointer argument to pass, then omit the
+ // underlying method expression's dictionary parameter from the
+ // returned signature too.
+ if dictPtr != nil {
+ captured.Append(dictPtr)
+ params = append(params[:1], params[2:]...)
+ }
+
+ typ := types.NewSignature(nil, params, results)
+
+ addBody := func(pos src.XPos, r *reader, captured []ir.Node) {
+ fn := captured[0]
+ args := r.syntheticArgs()
+
+ // Rewrite first argument based on implicits/deref/addr.
+ {
+ arg := args[0]
+ for _, ix := range implicits {
+ arg = Implicit(typecheck.DotField(pos, arg, ix))
+ }
+ if deref {
+ arg = Implicit(Deref(pos, arg.Type().Elem(), arg))
+ } else if addr {
+ arg = Implicit(Addr(pos, arg))
+ }
+ args[0] = arg
+ }
+
+ // Insert dictionary argument, if provided.
+ if dictPtr != nil {
+ newArgs := make([]ir.Node, len(args)+1)
+ newArgs[0] = args[0]
+ newArgs[1] = captured[1]
+ copy(newArgs[2:], args[1:])
+ args = newArgs
+ }
+
+ r.syntheticTailCall(pos, fn, args)
+ }
+
+ return r.syntheticClosure(origPos, typ, false, captured, addBody)
+}
+
+// syntheticClosure constructs a synthetic function literal for
+// currying dictionary arguments. origPos is the position used for the
+// closure, which must be a non-inlined position. typ is the function
+// literal's signature type.
+//
+// captures is a list of expressions that need to be evaluated at the
+// point of function literal evaluation and captured by the function
+// literal. If ifaceHack is true and captures[1] is an interface type,
+// it's checked to be non-nil after evaluation.
+//
+// addBody is a callback function to populate the function body. The
+// list of captured values passed back has the captured variables for
+// use within the function literal, corresponding to the expressions
+// in captures.
+func (r *reader) syntheticClosure(origPos src.XPos, typ *types.Type, ifaceHack bool, captures ir.Nodes, addBody func(pos src.XPos, r *reader, captured []ir.Node)) ir.Node {
+ // isSafe reports whether n is an expression that we can safely
+ // defer to evaluating inside the closure instead, to avoid storing
+ // them into the closure.
+ //
+ // In practice this is always (and only) the wrappee function.
+ isSafe := func(n ir.Node) bool {
+ if n.Op() == ir.ONAME && n.(*ir.Name).Class == ir.PFUNC {
+ return true
+ }
+ if n.Op() == ir.OMETHEXPR {
+ return true
+ }
+
+ return false
+ }
+
+ fn := r.inlClosureFunc(origPos, typ)
+ fn.SetWrapper(true)
+
+ clo := fn.OClosure
+ inlPos := clo.Pos()
+
+ var init ir.Nodes
+ for i, n := range captures {
+ if isSafe(n) {
+ continue // skip capture; can reference directly
+ }
+
+ tmp := r.tempCopy(inlPos, n, &init)
+ ir.NewClosureVar(origPos, fn, tmp)
+
+ // We need to nil check interface receivers at the point of method
+ // value evaluation, ugh.
+ if ifaceHack && i == 1 && n.Type().IsInterface() {
+ check := ir.NewUnaryExpr(inlPos, ir.OCHECKNIL, ir.NewUnaryExpr(inlPos, ir.OITAB, tmp))
+ init.Append(typecheck.Stmt(check))
+ }
+ }
+
+ pri := pkgReaderIndex{synthetic: func(pos src.XPos, r *reader) {
+ captured := make([]ir.Node, len(captures))
+ next := 0
+ for i, n := range captures {
+ if isSafe(n) {
+ captured[i] = n
+ } else {
+ captured[i] = r.closureVars[next]
+ next++
+ }
+ }
+ assert(next == len(r.closureVars))
+
+ addBody(origPos, r, captured)
+ }}
+ bodyReader[fn] = pri
+ pri.funcBody(fn)
+
+ return ir.InitExpr(init, clo)
+}
+
+// syntheticSig duplicates and returns the params and results lists
+// for sig, but renaming anonymous parameters so they can be assigned
+// ir.Names.
+func syntheticSig(sig *types.Type) (params, results []*types.Field) {
+ clone := func(params []*types.Field) []*types.Field {
+ res := make([]*types.Field, len(params))
+ for i, param := range params {
+ // TODO(mdempsky): It would be nice to preserve the original
+ // parameter positions here instead, but at least
+ // typecheck.NewMethodType replaces them with base.Pos, making
+ // them useless. Worse, the positions copied from base.Pos may
+ // have inlining contexts, which we definitely don't want here
+ // (e.g., #54625).
+ res[i] = types.NewField(base.AutogeneratedPos, param.Sym, param.Type)
+ res[i].SetIsDDD(param.IsDDD())
+ }
+ return res
+ }
+
+ return clone(sig.Params()), clone(sig.Results())
+}
+
+func (r *reader) optExpr() ir.Node {
+ if r.Bool() {
+ return r.expr()
+ }
+ return nil
+}
+
+// methodExpr reads a method expression reference, and returns three
+// (possibly nil) expressions related to it:
+//
+// baseFn is always non-nil: it's either a function of the appropriate
+// type already, or it has an extra dictionary parameter as the second
+// parameter (i.e., immediately after the promoted receiver
+// parameter).
+//
+// If dictPtr is non-nil, then it's a dictionary argument that must be
+// passed as the second argument to baseFn.
+//
+// If wrapperFn is non-nil, then it's either the same as baseFn (if
+// dictPtr is nil), or it's semantically equivalent to currying baseFn
+// to pass dictPtr. (wrapperFn is nil when dictPtr is an expression
+// that needs to be computed dynamically.)
+//
+// For callers that are creating a call to the returned method, it's
+// best to emit a call to baseFn, and include dictPtr in the arguments
+// list as appropriate.
+//
+// For callers that want to return a method expression without
+// invoking it, they may return wrapperFn if it's non-nil; but
+// otherwise, they need to create their own wrapper.
+func (r *reader) methodExpr() (wrapperFn, baseFn, dictPtr ir.Node) {
+ recv := r.typ()
+ sig0 := r.typ()
+ pos := r.pos()
+ sym := r.selector()
+
+ // Signature type to return (i.e., recv prepended to the method's
+ // normal parameters list).
+ sig := typecheck.NewMethodType(sig0, recv)
+
+ if r.Bool() { // type parameter method expression
+ idx := r.Len()
+ word := r.dictWord(pos, r.dict.typeParamMethodExprsOffset()+idx)
+
+ // TODO(mdempsky): If the type parameter was instantiated with an
+ // interface type (i.e., embed.IsInterface()), then we could
+ // return the OMETHEXPR instead and save an indirection.
+
+ // We wrote the method expression's entry point PC into the
+ // dictionary, but for Go `func` values we need to return a
+ // closure (i.e., pointer to a structure with the PC as the first
+ // field). Because method expressions don't have any closure
+ // variables, we pun the dictionary entry as the closure struct.
+ fn := typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, sig, ir.NewAddrExpr(pos, word)))
+ return fn, fn, nil
+ }
+
+ // TODO(mdempsky): I'm pretty sure this isn't needed: implicits is
+ // only relevant to locally defined types, but they can't have
+ // (non-promoted) methods.
+ var implicits []*types.Type
+ if r.dict != nil {
+ implicits = r.dict.targs
+ }
+
+ if r.Bool() { // dynamic subdictionary
+ idx := r.Len()
+ info := r.dict.subdicts[idx]
+ explicits := r.p.typListIdx(info.explicits, r.dict)
+
+ shapedObj := r.p.objIdx(info.idx, implicits, explicits, true).(*ir.Name)
+ shapedFn := shapedMethodExpr(pos, shapedObj, sym)
+
+ // TODO(mdempsky): Is there a more robust way to get the
+ // dictionary pointer type here?
+ dictPtrType := shapedFn.Type().Param(1).Type
+ dictPtr := typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, dictPtrType, r.dictWord(pos, r.dict.subdictsOffset()+idx)))
+
+ return nil, shapedFn, dictPtr
+ }
+
+ if r.Bool() { // static dictionary
+ info := r.objInfo()
+ explicits := r.p.typListIdx(info.explicits, r.dict)
+
+ shapedObj := r.p.objIdx(info.idx, implicits, explicits, true).(*ir.Name)
+ shapedFn := shapedMethodExpr(pos, shapedObj, sym)
+
+ dict := r.p.objDictName(info.idx, implicits, explicits)
+ dictPtr := typecheck.Expr(ir.NewAddrExpr(pos, dict))
+
+ // Check that dictPtr matches shapedFn's dictionary parameter.
+ if !types.Identical(dictPtr.Type(), shapedFn.Type().Param(1).Type) {
+ base.FatalfAt(pos, "dict %L, but shaped method %L", dict, shapedFn)
+ }
+
+ // For statically known instantiations, we can take advantage of
+ // the stenciled wrapper.
+ base.AssertfAt(!recv.HasShape(), pos, "shaped receiver %v", recv)
+ wrapperFn := typecheck.NewMethodExpr(pos, recv, sym)
+ base.AssertfAt(types.Identical(sig, wrapperFn.Type()), pos, "wrapper %L does not have type %v", wrapperFn, sig)
+
+ return wrapperFn, shapedFn, dictPtr
+ }
+
+ // Simple method expression; no dictionary needed.
+ base.AssertfAt(!recv.HasShape() || recv.IsInterface(), pos, "shaped receiver %v", recv)
+ fn := typecheck.NewMethodExpr(pos, recv, sym)
+ return fn, fn, nil
+}
+
+// shapedMethodExpr returns the specified method on the given shaped
+// type.
+func shapedMethodExpr(pos src.XPos, obj *ir.Name, sym *types.Sym) *ir.SelectorExpr {
+ assert(obj.Op() == ir.OTYPE)
+
+ typ := obj.Type()
+ assert(typ.HasShape())
+
+ method := func() *types.Field {
+ for _, method := range typ.Methods() {
+ if method.Sym == sym {
+ return method
+ }
+ }
+
+ base.FatalfAt(pos, "failed to find method %v in shaped type %v", sym, typ)
+ panic("unreachable")
+ }()
+
+ // Construct an OMETHEXPR node.
+ recv := method.Type.Recv().Type
+ return typecheck.NewMethodExpr(pos, recv, sym)
+}
+
+func (r *reader) multiExpr() []ir.Node {
+ r.Sync(pkgbits.SyncMultiExpr)
+
+ if r.Bool() { // N:1
+ pos := r.pos()
+ expr := r.expr()
+
+ results := make([]ir.Node, r.Len())
+ as := ir.NewAssignListStmt(pos, ir.OAS2, nil, []ir.Node{expr})
+ as.Def = true
+ for i := range results {
+ tmp := r.temp(pos, r.typ())
+ as.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, tmp))
+ as.Lhs.Append(tmp)
+
+ res := ir.Node(tmp)
+ if r.Bool() {
+ n := ir.NewConvExpr(pos, ir.OCONV, r.typ(), res)
+ n.TypeWord, n.SrcRType = r.convRTTI(pos)
+ n.SetImplicit(true)
+ res = typecheck.Expr(n)
+ }
+ results[i] = res
+ }
+
+ // TODO(mdempsky): Could use ir.InlinedCallExpr instead?
+ results[0] = ir.InitExpr([]ir.Node{typecheck.Stmt(as)}, results[0])
+ return results
+ }
+
+ // N:N
+ exprs := make([]ir.Node, r.Len())
+ if len(exprs) == 0 {
+ return nil
+ }
+ for i := range exprs {
+ exprs[i] = r.expr()
+ }
+ return exprs
+}
+
+// temp returns a new autotemp of the specified type.
+func (r *reader) temp(pos src.XPos, typ *types.Type) *ir.Name {
+ return typecheck.TempAt(pos, r.curfn, typ)
+}
+
+// tempCopy declares and returns a new autotemp initialized to the
+// value of expr.
+func (r *reader) tempCopy(pos src.XPos, expr ir.Node, init *ir.Nodes) *ir.Name {
+ tmp := r.temp(pos, expr.Type())
+
+ init.Append(typecheck.Stmt(ir.NewDecl(pos, ir.ODCL, tmp)))
+
+ assign := ir.NewAssignStmt(pos, tmp, expr)
+ assign.Def = true
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(pos, tmp, expr)))
+
+ tmp.Defn = assign
+
+ return tmp
+}
+
+func (r *reader) compLit() ir.Node {
+ r.Sync(pkgbits.SyncCompLit)
+ pos := r.pos()
+ typ0 := r.typ()
+
+ typ := typ0
+ if typ.IsPtr() {
+ typ = typ.Elem()
+ }
+ if typ.Kind() == types.TFORW {
+ base.FatalfAt(pos, "unresolved composite literal type: %v", typ)
+ }
+ var rtype ir.Node
+ if typ.IsMap() {
+ rtype = r.rtype(pos)
+ }
+ isStruct := typ.Kind() == types.TSTRUCT
+
+ elems := make([]ir.Node, r.Len())
+ for i := range elems {
+ elemp := &elems[i]
+
+ if isStruct {
+ sk := ir.NewStructKeyExpr(r.pos(), typ.Field(r.Len()), nil)
+ *elemp, elemp = sk, &sk.Value
+ } else if r.Bool() {
+ kv := ir.NewKeyExpr(r.pos(), r.expr(), nil)
+ *elemp, elemp = kv, &kv.Value
+ }
+
+ *elemp = wrapName(r.pos(), r.expr())
+ }
+
+ lit := typecheck.Expr(ir.NewCompLitExpr(pos, ir.OCOMPLIT, typ, elems))
+ if rtype != nil {
+ lit := lit.(*ir.CompLitExpr)
+ lit.RType = rtype
+ }
+ if typ0.IsPtr() {
+ lit = typecheck.Expr(typecheck.NodAddrAt(pos, lit))
+ lit.SetType(typ0)
+ }
+ return lit
+}
+
+func wrapName(pos src.XPos, x ir.Node) ir.Node {
+ // These nodes do not carry line numbers.
+ // Introduce a wrapper node to give them the correct line.
+ switch x.Op() {
+ case ir.OTYPE, ir.OLITERAL:
+ if x.Sym() == nil {
+ break
+ }
+ fallthrough
+ case ir.ONAME, ir.ONONAME, ir.ONIL:
+ p := ir.NewParenExpr(pos, x)
+ p.SetImplicit(true)
+ return p
+ }
+ return x
+}
+
+func (r *reader) funcLit() ir.Node {
+ r.Sync(pkgbits.SyncFuncLit)
+
+ // The underlying function declaration (including its parameters'
+ // positions, if any) need to remain the original, uninlined
+ // positions. This is because we track inlining-context on nodes so
+ // we can synthesize the extra implied stack frames dynamically when
+ // generating tracebacks, whereas those stack frames don't make
+ // sense *within* the function literal. (Any necessary inlining
+ // adjustments will have been applied to the call expression
+ // instead.)
+ //
+ // This is subtle, and getting it wrong leads to cycles in the
+ // inlining tree, which lead to infinite loops during stack
+ // unwinding (#46234, #54625).
+ //
+ // Note that we *do* want the inline-adjusted position for the
+ // OCLOSURE node, because that position represents where any heap
+ // allocation of the closure is credited (#49171).
+ r.suppressInlPos++
+ origPos := r.pos()
+ sig := r.signature(nil)
+ r.suppressInlPos--
+
+ fn := r.inlClosureFunc(origPos, sig)
+
+ fn.ClosureVars = make([]*ir.Name, 0, r.Len())
+ for len(fn.ClosureVars) < cap(fn.ClosureVars) {
+ // TODO(mdempsky): I think these should be original positions too
+ // (i.e., not inline-adjusted).
+ ir.NewClosureVar(r.pos(), fn, r.useLocal())
+ }
+ if param := r.dictParam; param != nil {
+ // If we have a dictionary parameter, capture it too. For
+ // simplicity, we capture it last and unconditionally.
+ ir.NewClosureVar(param.Pos(), fn, param)
+ }
+
+ r.addBody(fn, nil)
+
+ // un-hide closures belong to init function.
+ if (r.curfn.IsPackageInit() || strings.HasPrefix(r.curfn.Sym().Name, "init.")) && ir.IsTrivialClosure(fn.OClosure) {
+ fn.SetIsHiddenClosure(false)
+ }
+
+ return fn.OClosure
+}
+
+// inlClosureFunc constructs a new closure function, but correctly
+// handles inlining.
+func (r *reader) inlClosureFunc(origPos src.XPos, sig *types.Type) *ir.Func {
+ curfn := r.inlCaller
+ if curfn == nil {
+ curfn = r.curfn
+ }
+
+ // TODO(mdempsky): Remove hard-coding of typecheck.Target.
+ return ir.NewClosureFunc(origPos, r.inlPos(origPos), ir.OCLOSURE, sig, curfn, typecheck.Target)
+}
+
+func (r *reader) exprList() []ir.Node {
+ r.Sync(pkgbits.SyncExprList)
+ return r.exprs()
+}
+
+func (r *reader) exprs() []ir.Node {
+ r.Sync(pkgbits.SyncExprs)
+ nodes := make([]ir.Node, r.Len())
+ if len(nodes) == 0 {
+ return nil // TODO(mdempsky): Unclear if this matters.
+ }
+ for i := range nodes {
+ nodes[i] = r.expr()
+ }
+ return nodes
+}
+
+// dictWord returns an expression to return the specified
+// uintptr-typed word from the dictionary parameter.
+func (r *reader) dictWord(pos src.XPos, idx int) ir.Node {
+ base.AssertfAt(r.dictParam != nil, pos, "expected dictParam in %v", r.curfn)
+ return typecheck.Expr(ir.NewIndexExpr(pos, r.dictParam, ir.NewInt(pos, int64(idx))))
+}
+
+// rttiWord is like dictWord, but converts it to *byte (the type used
+// internally to represent *runtime._type and *runtime.itab).
+func (r *reader) rttiWord(pos src.XPos, idx int) ir.Node {
+ return typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, types.NewPtr(types.Types[types.TUINT8]), r.dictWord(pos, idx)))
+}
+
+// rtype reads a type reference from the element bitstream, and
+// returns an expression of type *runtime._type representing that
+// type.
+func (r *reader) rtype(pos src.XPos) ir.Node {
+ _, rtype := r.rtype0(pos)
+ return rtype
+}
+
+func (r *reader) rtype0(pos src.XPos) (typ *types.Type, rtype ir.Node) {
+ r.Sync(pkgbits.SyncRType)
+ if r.Bool() { // derived type
+ idx := r.Len()
+ info := r.dict.rtypes[idx]
+ typ = r.p.typIdx(info, r.dict, true)
+ rtype = r.rttiWord(pos, r.dict.rtypesOffset()+idx)
+ return
+ }
+
+ typ = r.typ()
+ rtype = reflectdata.TypePtrAt(pos, typ)
+ return
+}
+
+// varDictIndex populates name.DictIndex if name is a derived type.
+func (r *reader) varDictIndex(name *ir.Name) {
+ if r.Bool() {
+ idx := 1 + r.dict.rtypesOffset() + r.Len()
+ if int(uint16(idx)) != idx {
+ base.FatalfAt(name.Pos(), "DictIndex overflow for %v: %v", name, idx)
+ }
+ name.DictIndex = uint16(idx)
+ }
+}
+
+// itab returns a (typ, iface) pair of types.
+//
+// typRType and ifaceRType are expressions that evaluate to the
+// *runtime._type for typ and iface, respectively.
+//
+// If typ is a concrete type and iface is a non-empty interface type,
+// then itab is an expression that evaluates to the *runtime.itab for
+// the pair. Otherwise, itab is nil.
+func (r *reader) itab(pos src.XPos) (typ *types.Type, typRType ir.Node, iface *types.Type, ifaceRType ir.Node, itab ir.Node) {
+ typ, typRType = r.rtype0(pos)
+ iface, ifaceRType = r.rtype0(pos)
+
+ idx := -1
+ if r.Bool() {
+ idx = r.Len()
+ }
+
+ if !typ.IsInterface() && iface.IsInterface() && !iface.IsEmptyInterface() {
+ if idx >= 0 {
+ itab = r.rttiWord(pos, r.dict.itabsOffset()+idx)
+ } else {
+ base.AssertfAt(!typ.HasShape(), pos, "%v is a shape type", typ)
+ base.AssertfAt(!iface.HasShape(), pos, "%v is a shape type", iface)
+
+ lsym := reflectdata.ITabLsym(typ, iface)
+ itab = typecheck.LinksymAddr(pos, lsym, types.Types[types.TUINT8])
+ }
+ }
+
+ return
+}
+
+// convRTTI returns expressions appropriate for populating an
+// ir.ConvExpr's TypeWord and SrcRType fields, respectively.
+func (r *reader) convRTTI(pos src.XPos) (typeWord, srcRType ir.Node) {
+ r.Sync(pkgbits.SyncConvRTTI)
+ src, srcRType0, dst, dstRType, itab := r.itab(pos)
+ if !dst.IsInterface() {
+ return
+ }
+
+ // See reflectdata.ConvIfaceTypeWord.
+ switch {
+ case dst.IsEmptyInterface():
+ if !src.IsInterface() {
+ typeWord = srcRType0 // direct eface construction
+ }
+ case !src.IsInterface():
+ typeWord = itab // direct iface construction
+ default:
+ typeWord = dstRType // convI2I
+ }
+
+ // See reflectdata.ConvIfaceSrcRType.
+ if !src.IsInterface() {
+ srcRType = srcRType0
+ }
+
+ return
+}
+
+func (r *reader) exprType() ir.Node {
+ r.Sync(pkgbits.SyncExprType)
+ pos := r.pos()
+
+ var typ *types.Type
+ var rtype, itab ir.Node
+
+ if r.Bool() {
+ typ, rtype, _, _, itab = r.itab(pos)
+ if !typ.IsInterface() {
+ rtype = nil // TODO(mdempsky): Leave set?
+ }
+ } else {
+ typ, rtype = r.rtype0(pos)
+
+ if !r.Bool() { // not derived
+ return ir.TypeNode(typ)
+ }
+ }
+
+ dt := ir.NewDynamicType(pos, rtype)
+ dt.ITab = itab
+ return typed(typ, dt)
+}
+
+func (r *reader) op() ir.Op {
+ r.Sync(pkgbits.SyncOp)
+ return ir.Op(r.Len())
+}
+
+// @@@ Package initialization
+
+func (r *reader) pkgInit(self *types.Pkg, target *ir.Package) {
+ cgoPragmas := make([][]string, r.Len())
+ for i := range cgoPragmas {
+ cgoPragmas[i] = r.Strings()
+ }
+ target.CgoPragmas = cgoPragmas
+
+ r.pkgInitOrder(target)
+
+ r.pkgDecls(target)
+
+ r.Sync(pkgbits.SyncEOF)
+}
+
+// pkgInitOrder creates a synthetic init function to handle any
+// package-scope initialization statements.
+func (r *reader) pkgInitOrder(target *ir.Package) {
+ initOrder := make([]ir.Node, r.Len())
+ if len(initOrder) == 0 {
+ return
+ }
+
+ // Make a function that contains all the initialization statements.
+ pos := base.AutogeneratedPos
+ base.Pos = pos
+
+ fn := ir.NewFunc(pos, pos, typecheck.Lookup("init"), types.NewSignature(nil, nil, nil))
+ fn.SetIsPackageInit(true)
+ fn.SetInlinabilityChecked(true) // suppress useless "can inline" diagnostics
+
+ typecheck.DeclFunc(fn)
+ r.curfn = fn
+
+ for i := range initOrder {
+ lhs := make([]ir.Node, r.Len())
+ for j := range lhs {
+ lhs[j] = r.obj()
+ }
+ rhs := r.expr()
+ pos := lhs[0].Pos()
+
+ var as ir.Node
+ if len(lhs) == 1 {
+ as = typecheck.Stmt(ir.NewAssignStmt(pos, lhs[0], rhs))
+ } else {
+ as = typecheck.Stmt(ir.NewAssignListStmt(pos, ir.OAS2, lhs, []ir.Node{rhs}))
+ }
+
+ for _, v := range lhs {
+ v.(*ir.Name).Defn = as
+ }
+
+ initOrder[i] = as
+ }
+
+ fn.Body = initOrder
+
+ typecheck.FinishFuncBody()
+ r.curfn = nil
+ r.locals = nil
+
+ // Outline (if legal/profitable) global map inits.
+ staticinit.OutlineMapInits(fn)
+
+ target.Inits = append(target.Inits, fn)
+}
+
+func (r *reader) pkgDecls(target *ir.Package) {
+ r.Sync(pkgbits.SyncDecls)
+ for {
+ switch code := codeDecl(r.Code(pkgbits.SyncDecl)); code {
+ default:
+ panic(fmt.Sprintf("unhandled decl: %v", code))
+
+ case declEnd:
+ return
+
+ case declFunc:
+ names := r.pkgObjs(target)
+ assert(len(names) == 1)
+ target.Funcs = append(target.Funcs, names[0].Func)
+
+ case declMethod:
+ typ := r.typ()
+ sym := r.selector()
+
+ method := typecheck.Lookdot1(nil, sym, typ, typ.Methods(), 0)
+ target.Funcs = append(target.Funcs, method.Nname.(*ir.Name).Func)
+
+ case declVar:
+ names := r.pkgObjs(target)
+
+ if n := r.Len(); n > 0 {
+ assert(len(names) == 1)
+ embeds := make([]ir.Embed, n)
+ for i := range embeds {
+ embeds[i] = ir.Embed{Pos: r.pos(), Patterns: r.Strings()}
+ }
+ names[0].Embed = &embeds
+ target.Embeds = append(target.Embeds, names[0])
+ }
+
+ case declOther:
+ r.pkgObjs(target)
+ }
+ }
+}
+
+func (r *reader) pkgObjs(target *ir.Package) []*ir.Name {
+ r.Sync(pkgbits.SyncDeclNames)
+ nodes := make([]*ir.Name, r.Len())
+ for i := range nodes {
+ r.Sync(pkgbits.SyncDeclName)
+
+ name := r.obj().(*ir.Name)
+ nodes[i] = name
+
+ sym := name.Sym()
+ if sym.IsBlank() {
+ continue
+ }
+
+ switch name.Class {
+ default:
+ base.FatalfAt(name.Pos(), "unexpected class: %v", name.Class)
+
+ case ir.PEXTERN:
+ target.Externs = append(target.Externs, name)
+
+ case ir.PFUNC:
+ assert(name.Type().Recv() == nil)
+
+ // TODO(mdempsky): Cleaner way to recognize init?
+ if strings.HasPrefix(sym.Name, "init.") {
+ target.Inits = append(target.Inits, name.Func)
+ }
+ }
+
+ if base.Ctxt.Flag_dynlink && types.LocalPkg.Name == "main" && types.IsExported(sym.Name) && name.Op() == ir.ONAME {
+ assert(!sym.OnExportList())
+ target.PluginExports = append(target.PluginExports, name)
+ sym.SetOnExportList(true)
+ }
+
+ if base.Flag.AsmHdr != "" && (name.Op() == ir.OLITERAL || name.Op() == ir.OTYPE) {
+ assert(!sym.Asm())
+ target.AsmHdrDecls = append(target.AsmHdrDecls, name)
+ sym.SetAsm(true)
+ }
+ }
+
+ return nodes
+}
+
+// @@@ Inlining
+
+// unifiedHaveInlineBody reports whether we have the function body for
+// fn, so we can inline it.
+func unifiedHaveInlineBody(fn *ir.Func) bool {
+ if fn.Inl == nil {
+ return false
+ }
+
+ _, ok := bodyReaderFor(fn)
+ return ok
+}
+
+var inlgen = 0
+
+// unifiedInlineCall implements inline.NewInline by re-reading the function
+// body from its Unified IR export data.
+func unifiedInlineCall(callerfn *ir.Func, call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
+ pri, ok := bodyReaderFor(fn)
+ if !ok {
+ base.FatalfAt(call.Pos(), "cannot inline call to %v: missing inline body", fn)
+ }
+
+ if !fn.Inl.HaveDcl {
+ expandInline(fn, pri)
+ }
+
+ r := pri.asReader(pkgbits.RelocBody, pkgbits.SyncFuncBody)
+
+ tmpfn := ir.NewFunc(fn.Pos(), fn.Nname.Pos(), callerfn.Sym(), fn.Type())
+
+ r.curfn = tmpfn
+
+ r.inlCaller = callerfn
+ r.inlCall = call
+ r.inlFunc = fn
+ r.inlTreeIndex = inlIndex
+ r.inlPosBases = make(map[*src.PosBase]*src.PosBase)
+ r.funarghack = true
+
+ r.closureVars = make([]*ir.Name, len(r.inlFunc.ClosureVars))
+ for i, cv := range r.inlFunc.ClosureVars {
+ // TODO(mdempsky): It should be possible to support this case, but
+ // for now we rely on the inliner avoiding it.
+ if cv.Outer.Curfn != callerfn {
+ base.FatalfAt(call.Pos(), "inlining closure call across frames")
+ }
+ r.closureVars[i] = cv.Outer
+ }
+ if len(r.closureVars) != 0 && r.hasTypeParams() {
+ r.dictParam = r.closureVars[len(r.closureVars)-1] // dictParam is last; see reader.funcLit
+ }
+
+ r.declareParams()
+
+ var inlvars, retvars []*ir.Name
+ {
+ sig := r.curfn.Type()
+ endParams := sig.NumRecvs() + sig.NumParams()
+ endResults := endParams + sig.NumResults()
+
+ inlvars = r.curfn.Dcl[:endParams]
+ retvars = r.curfn.Dcl[endParams:endResults]
+ }
+
+ r.delayResults = fn.Inl.CanDelayResults
+
+ r.retlabel = typecheck.AutoLabel(".i")
+ inlgen++
+
+ init := ir.TakeInit(call)
+
+ // For normal function calls, the function callee expression
+ // may contain side effects. Make sure to preserve these,
+ // if necessary (#42703).
+ if call.Op() == ir.OCALLFUNC {
+ inline.CalleeEffects(&init, call.Fun)
+ }
+
+ var args ir.Nodes
+ if call.Op() == ir.OCALLMETH {
+ base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
+ }
+ args.Append(call.Args...)
+
+ // Create assignment to declare and initialize inlvars.
+ as2 := ir.NewAssignListStmt(call.Pos(), ir.OAS2, ir.ToNodes(inlvars), args)
+ as2.Def = true
+ var as2init ir.Nodes
+ for _, name := range inlvars {
+ if ir.IsBlank(name) {
+ continue
+ }
+ // TODO(mdempsky): Use inlined position of name.Pos() instead?
+ as2init.Append(ir.NewDecl(call.Pos(), ir.ODCL, name))
+ name.Defn = as2
+ }
+ as2.SetInit(as2init)
+ init.Append(typecheck.Stmt(as2))
+
+ if !r.delayResults {
+ // If not delaying retvars, declare and zero initialize the
+ // result variables now.
+ for _, name := range retvars {
+ // TODO(mdempsky): Use inlined position of name.Pos() instead?
+ init.Append(ir.NewDecl(call.Pos(), ir.ODCL, name))
+ ras := ir.NewAssignStmt(call.Pos(), name, nil)
+ init.Append(typecheck.Stmt(ras))
+ }
+ }
+
+ // Add an inline mark just before the inlined body.
+ // This mark is inline in the code so that it's a reasonable spot
+ // to put a breakpoint. Not sure if that's really necessary or not
+ // (in which case it could go at the end of the function instead).
+ // Note issue 28603.
+ init.Append(ir.NewInlineMarkStmt(call.Pos().WithIsStmt(), int64(r.inlTreeIndex)))
+
+ ir.WithFunc(r.curfn, func() {
+ if !r.syntheticBody(call.Pos()) {
+ assert(r.Bool()) // have body
+
+ r.curfn.Body = r.stmts()
+ r.curfn.Endlineno = r.pos()
+ }
+
+ // TODO(mdempsky): This shouldn't be necessary. Inlining might
+ // read in new function/method declarations, which could
+ // potentially be recursively inlined themselves; but we shouldn't
+ // need to read in the non-inlined bodies for the declarations
+ // themselves. But currently it's an easy fix to #50552.
+ readBodies(typecheck.Target, true)
+
+ // Replace any "return" statements within the function body.
+ var edit func(ir.Node) ir.Node
+ edit = func(n ir.Node) ir.Node {
+ if ret, ok := n.(*ir.ReturnStmt); ok {
+ n = typecheck.Stmt(r.inlReturn(ret, retvars))
+ }
+ ir.EditChildren(n, edit)
+ return n
+ }
+ edit(r.curfn)
+ })
+
+ body := ir.Nodes(r.curfn.Body)
+
+ // Reparent any declarations into the caller function.
+ for _, name := range r.curfn.Dcl {
+ name.Curfn = callerfn
+
+ if name.Class != ir.PAUTO {
+ name.SetPos(r.inlPos(name.Pos()))
+ name.SetInlFormal(true)
+ name.Class = ir.PAUTO
+ } else {
+ name.SetInlLocal(true)
+ }
+ }
+ callerfn.Dcl = append(callerfn.Dcl, r.curfn.Dcl...)
+
+ body.Append(ir.NewLabelStmt(call.Pos(), r.retlabel))
+
+ res := ir.NewInlinedCallExpr(call.Pos(), body, ir.ToNodes(retvars))
+ res.SetInit(init)
+ res.SetType(call.Type())
+ res.SetTypecheck(1)
+
+ // Inlining shouldn't add any functions to todoBodies.
+ assert(len(todoBodies) == 0)
+
+ return res
+}
+
+// inlReturn returns a statement that can substitute for the given
+// return statement when inlining.
+func (r *reader) inlReturn(ret *ir.ReturnStmt, retvars []*ir.Name) *ir.BlockStmt {
+ pos := r.inlCall.Pos()
+
+ block := ir.TakeInit(ret)
+
+ if results := ret.Results; len(results) != 0 {
+ assert(len(retvars) == len(results))
+
+ as2 := ir.NewAssignListStmt(pos, ir.OAS2, ir.ToNodes(retvars), ret.Results)
+
+ if r.delayResults {
+ for _, name := range retvars {
+ // TODO(mdempsky): Use inlined position of name.Pos() instead?
+ block.Append(ir.NewDecl(pos, ir.ODCL, name))
+ name.Defn = as2
+ }
+ }
+
+ block.Append(as2)
+ }
+
+ block.Append(ir.NewBranchStmt(pos, ir.OGOTO, r.retlabel))
+ return ir.NewBlockStmt(pos, block)
+}
+
+// expandInline reads in an extra copy of IR to populate
+// fn.Inl.Dcl.
+func expandInline(fn *ir.Func, pri pkgReaderIndex) {
+ // TODO(mdempsky): Remove this function. It's currently needed by
+ // dwarfgen/dwarf.go:preInliningDcls, which requires fn.Inl.Dcl to
+ // create abstract function DIEs. But we should be able to provide it
+ // with the same information some other way.
+
+ fndcls := len(fn.Dcl)
+ topdcls := len(typecheck.Target.Funcs)
+
+ tmpfn := ir.NewFunc(fn.Pos(), fn.Nname.Pos(), fn.Sym(), fn.Type())
+ tmpfn.ClosureVars = fn.ClosureVars
+
+ {
+ r := pri.asReader(pkgbits.RelocBody, pkgbits.SyncFuncBody)
+
+ // Don't change parameter's Sym/Nname fields.
+ r.funarghack = true
+
+ r.funcBody(tmpfn)
+ }
+
+ // Move tmpfn's params to fn.Inl.Dcl, and reparent under fn.
+ for _, name := range tmpfn.Dcl {
+ name.Curfn = fn
+ }
+ fn.Inl.Dcl = tmpfn.Dcl
+ fn.Inl.HaveDcl = true
+
+ // Double check that we didn't change fn.Dcl by accident.
+ assert(fndcls == len(fn.Dcl))
+
+ // typecheck.Stmts may have added function literals to
+ // typecheck.Target.Decls. Remove them again so we don't risk trying
+ // to compile them multiple times.
+ typecheck.Target.Funcs = typecheck.Target.Funcs[:topdcls]
+}
+
+// usedLocals returns a set of local variables that are used within body.
+func usedLocals(body []ir.Node) ir.NameSet {
+ var used ir.NameSet
+ ir.VisitList(body, func(n ir.Node) {
+ if n, ok := n.(*ir.Name); ok && n.Op() == ir.ONAME && n.Class == ir.PAUTO {
+ used.Add(n)
+ }
+ })
+ return used
+}
+
+// @@@ Method wrappers
+
+// needWrapperTypes lists types for which we may need to generate
+// method wrappers.
+var needWrapperTypes []*types.Type
+
+// haveWrapperTypes lists types for which we know we already have
+// method wrappers, because we found the type in an imported package.
+var haveWrapperTypes []*types.Type
+
+// needMethodValueWrappers lists methods for which we may need to
+// generate method value wrappers.
+var needMethodValueWrappers []methodValueWrapper
+
+// haveMethodValueWrappers lists methods for which we know we already
+// have method value wrappers, because we found it in an imported
+// package.
+var haveMethodValueWrappers []methodValueWrapper
+
+type methodValueWrapper struct {
+ rcvr *types.Type
+ method *types.Field
+}
+
+func (r *reader) needWrapper(typ *types.Type) {
+ if typ.IsPtr() {
+ return
+ }
+
+ // If a type was found in an imported package, then we can assume
+ // that package (or one of its transitive dependencies) already
+ // generated method wrappers for it.
+ if r.importedDef() {
+ haveWrapperTypes = append(haveWrapperTypes, typ)
+ } else {
+ needWrapperTypes = append(needWrapperTypes, typ)
+ }
+}
+
+// importedDef reports whether r is reading from an imported and
+// non-generic element.
+//
+// If a type was found in an imported package, then we can assume that
+// package (or one of its transitive dependencies) already generated
+// method wrappers for it.
+//
+// Exception: If we're instantiating an imported generic type or
+// function, we might be instantiating it with type arguments not
+// previously seen before.
+//
+// TODO(mdempsky): Distinguish when a generic function or type was
+// instantiated in an imported package so that we can add types to
+// haveWrapperTypes instead.
+func (r *reader) importedDef() bool {
+ return r.p != localPkgReader && !r.hasTypeParams()
+}
+
+func MakeWrappers(target *ir.Package) {
+ // always generate a wrapper for error.Error (#29304)
+ needWrapperTypes = append(needWrapperTypes, types.ErrorType)
+
+ seen := make(map[string]*types.Type)
+
+ for _, typ := range haveWrapperTypes {
+ wrapType(typ, target, seen, false)
+ }
+ haveWrapperTypes = nil
+
+ for _, typ := range needWrapperTypes {
+ wrapType(typ, target, seen, true)
+ }
+ needWrapperTypes = nil
+
+ for _, wrapper := range haveMethodValueWrappers {
+ wrapMethodValue(wrapper.rcvr, wrapper.method, target, false)
+ }
+ haveMethodValueWrappers = nil
+
+ for _, wrapper := range needMethodValueWrappers {
+ wrapMethodValue(wrapper.rcvr, wrapper.method, target, true)
+ }
+ needMethodValueWrappers = nil
+}
+
+func wrapType(typ *types.Type, target *ir.Package, seen map[string]*types.Type, needed bool) {
+ key := typ.LinkString()
+ if prev := seen[key]; prev != nil {
+ if !types.Identical(typ, prev) {
+ base.Fatalf("collision: types %v and %v have link string %q", typ, prev, key)
+ }
+ return
+ }
+ seen[key] = typ
+
+ if !needed {
+ // Only called to add to 'seen'.
+ return
+ }
+
+ if !typ.IsInterface() {
+ typecheck.CalcMethods(typ)
+ }
+ for _, meth := range typ.AllMethods() {
+ if meth.Sym.IsBlank() || !meth.IsMethod() {
+ base.FatalfAt(meth.Pos, "invalid method: %v", meth)
+ }
+
+ methodWrapper(0, typ, meth, target)
+
+ // For non-interface types, we also want *T wrappers.
+ if !typ.IsInterface() {
+ methodWrapper(1, typ, meth, target)
+
+ // For not-in-heap types, *T is a scalar, not pointer shaped,
+ // so the interface wrappers use **T.
+ if typ.NotInHeap() {
+ methodWrapper(2, typ, meth, target)
+ }
+ }
+ }
+}
+
+func methodWrapper(derefs int, tbase *types.Type, method *types.Field, target *ir.Package) {
+ wrapper := tbase
+ for i := 0; i < derefs; i++ {
+ wrapper = types.NewPtr(wrapper)
+ }
+
+ sym := ir.MethodSym(wrapper, method.Sym)
+ base.Assertf(!sym.Siggen(), "already generated wrapper %v", sym)
+ sym.SetSiggen(true)
+
+ wrappee := method.Type.Recv().Type
+ if types.Identical(wrapper, wrappee) ||
+ !types.IsMethodApplicable(wrapper, method) ||
+ !reflectdata.NeedEmit(tbase) {
+ return
+ }
+
+ // TODO(mdempsky): Use method.Pos instead?
+ pos := base.AutogeneratedPos
+
+ fn := newWrapperFunc(pos, sym, wrapper, method)
+
+ var recv ir.Node = fn.Nname.Type().Recv().Nname.(*ir.Name)
+
+ // For simple *T wrappers around T methods, panicwrap produces a
+ // nicer panic message.
+ if wrapper.IsPtr() && types.Identical(wrapper.Elem(), wrappee) {
+ cond := ir.NewBinaryExpr(pos, ir.OEQ, recv, types.BuiltinPkg.Lookup("nil").Def.(ir.Node))
+ then := []ir.Node{ir.NewCallExpr(pos, ir.OCALL, typecheck.LookupRuntime("panicwrap"), nil)}
+ fn.Body.Append(ir.NewIfStmt(pos, cond, then, nil))
+ }
+
+ // typecheck will add one implicit deref, if necessary,
+ // but not-in-heap types require more for their **T wrappers.
+ for i := 1; i < derefs; i++ {
+ recv = Implicit(ir.NewStarExpr(pos, recv))
+ }
+
+ addTailCall(pos, fn, recv, method)
+
+ finishWrapperFunc(fn, target)
+}
+
+func wrapMethodValue(recvType *types.Type, method *types.Field, target *ir.Package, needed bool) {
+ sym := ir.MethodSymSuffix(recvType, method.Sym, "-fm")
+ if sym.Uniq() {
+ return
+ }
+ sym.SetUniq(true)
+
+ // TODO(mdempsky): Use method.Pos instead?
+ pos := base.AutogeneratedPos
+
+ fn := newWrapperFunc(pos, sym, nil, method)
+ sym.Def = fn.Nname
+
+ // Declare and initialize variable holding receiver.
+ recv := ir.NewHiddenParam(pos, fn, typecheck.Lookup(".this"), recvType)
+
+ if !needed {
+ return
+ }
+
+ addTailCall(pos, fn, recv, method)
+
+ finishWrapperFunc(fn, target)
+}
+
+func newWrapperFunc(pos src.XPos, sym *types.Sym, wrapper *types.Type, method *types.Field) *ir.Func {
+ sig := newWrapperType(wrapper, method)
+
+ fn := ir.NewFunc(pos, pos, sym, sig)
+ fn.DeclareParams(true)
+ fn.SetDupok(true) // TODO(mdempsky): Leave unset for local, non-generic wrappers?
+
+ return fn
+}
+
+func finishWrapperFunc(fn *ir.Func, target *ir.Package) {
+ ir.WithFunc(fn, func() {
+ typecheck.Stmts(fn.Body)
+ })
+
+ // We generate wrappers after the global inlining pass,
+ // so we're responsible for applying inlining ourselves here.
+ // TODO(prattmic): plumb PGO.
+ interleaved.DevirtualizeAndInlineFunc(fn, nil)
+
+ // The body of wrapper function after inlining may reveal new ir.OMETHVALUE node,
+ // we don't know whether wrapper function has been generated for it or not, so
+ // generate one immediately here.
+ //
+ // Further, after CL 492017, function that construct closures is allowed to be inlined,
+ // even though the closure itself can't be inline. So we also need to visit body of any
+ // closure that we see when visiting body of the wrapper function.
+ ir.VisitFuncAndClosures(fn, func(n ir.Node) {
+ if n, ok := n.(*ir.SelectorExpr); ok && n.Op() == ir.OMETHVALUE {
+ wrapMethodValue(n.X.Type(), n.Selection, target, true)
+ }
+ })
+
+ fn.Nname.Defn = fn
+ target.Funcs = append(target.Funcs, fn)
+}
+
+// newWrapperType returns a copy of the given signature type, but with
+// the receiver parameter type substituted with recvType.
+// If recvType is nil, newWrapperType returns a signature
+// without a receiver parameter.
+func newWrapperType(recvType *types.Type, method *types.Field) *types.Type {
+ clone := func(params []*types.Field) []*types.Field {
+ res := make([]*types.Field, len(params))
+ for i, param := range params {
+ res[i] = types.NewField(param.Pos, param.Sym, param.Type)
+ res[i].SetIsDDD(param.IsDDD())
+ }
+ return res
+ }
+
+ sig := method.Type
+
+ var recv *types.Field
+ if recvType != nil {
+ recv = types.NewField(sig.Recv().Pos, sig.Recv().Sym, recvType)
+ }
+ params := clone(sig.Params())
+ results := clone(sig.Results())
+
+ return types.NewSignature(recv, params, results)
+}
+
+func addTailCall(pos src.XPos, fn *ir.Func, recv ir.Node, method *types.Field) {
+ sig := fn.Nname.Type()
+ args := make([]ir.Node, sig.NumParams())
+ for i, param := range sig.Params() {
+ args[i] = param.Nname.(*ir.Name)
+ }
+
+ // TODO(mdempsky): Support creating OTAILCALL, when possible. See reflectdata.methodWrapper.
+ // Not urgent though, because tail calls are currently incompatible with regabi anyway.
+
+ fn.SetWrapper(true) // TODO(mdempsky): Leave unset for tail calls?
+
+ dot := typecheck.XDotMethod(pos, recv, method.Sym, true)
+ call := typecheck.Call(pos, dot, args, method.Type.IsVariadic()).(*ir.CallExpr)
+
+ if method.Type.NumResults() == 0 {
+ fn.Body.Append(call)
+ return
+ }
+
+ ret := ir.NewReturnStmt(pos, nil)
+ ret.Results = []ir.Node{call}
+ fn.Body.Append(ret)
+}
+
+func setBasePos(pos src.XPos) {
+ // Set the position for any error messages we might print (e.g. too large types).
+ base.Pos = pos
+}
+
+// dictParamName is the name of the synthetic dictionary parameter
+// added to shaped functions.
+//
+// N.B., this variable name is known to Delve:
+// https://github.com/go-delve/delve/blob/cb91509630529e6055be845688fd21eb89ae8714/pkg/proc/eval.go#L28
+const dictParamName = typecheck.LocalDictName
+
+// shapeSig returns a copy of fn's signature, except adding a
+// dictionary parameter and promoting the receiver parameter (if any)
+// to a normal parameter.
+//
+// The parameter types.Fields are all copied too, so their Nname
+// fields can be initialized for use by the shape function.
+func shapeSig(fn *ir.Func, dict *readerDict) *types.Type {
+ sig := fn.Nname.Type()
+ oldRecv := sig.Recv()
+
+ var recv *types.Field
+ if oldRecv != nil {
+ recv = types.NewField(oldRecv.Pos, oldRecv.Sym, oldRecv.Type)
+ }
+
+ params := make([]*types.Field, 1+sig.NumParams())
+ params[0] = types.NewField(fn.Pos(), fn.Sym().Pkg.Lookup(dictParamName), types.NewPtr(dict.varType()))
+ for i, param := range sig.Params() {
+ d := types.NewField(param.Pos, param.Sym, param.Type)
+ d.SetIsDDD(param.IsDDD())
+ params[1+i] = d
+ }
+
+ results := make([]*types.Field, sig.NumResults())
+ for i, result := range sig.Results() {
+ results[i] = types.NewField(result.Pos, result.Sym, result.Type)
+ }
+
+ return types.NewSignature(recv, params, results)
+}
diff --git a/src/cmd/compile/internal/noder/stencil.go b/src/cmd/compile/internal/noder/stencil.go
new file mode 100644
index 0000000..43a39ab
--- /dev/null
+++ b/src/cmd/compile/internal/noder/stencil.go
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file will evolve, since we plan to do a mix of stenciling and passing
+// around dictionaries.
+
+package noder
+
+import (
+ "cmd/compile/internal/base"
+)
+
+func assert(p bool) {
+ base.Assert(p)
+}
diff --git a/src/cmd/compile/internal/noder/stmt.go b/src/cmd/compile/internal/noder/stmt.go
new file mode 100644
index 0000000..04f92d2
--- /dev/null
+++ b/src/cmd/compile/internal/noder/stmt.go
@@ -0,0 +1,24 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+)
+
+// TODO(mdempsky): Investigate replacing with switch statements or dense arrays.
+
+var branchOps = [...]ir.Op{
+ syntax.Break: ir.OBREAK,
+ syntax.Continue: ir.OCONTINUE,
+ syntax.Fallthrough: ir.OFALL,
+ syntax.Goto: ir.OGOTO,
+}
+
+var callOps = [...]ir.Op{
+ syntax.Defer: ir.ODEFER,
+ syntax.Go: ir.OGO,
+}
diff --git a/src/cmd/compile/internal/noder/types.go b/src/cmd/compile/internal/noder/types.go
new file mode 100644
index 0000000..76c6d15
--- /dev/null
+++ b/src/cmd/compile/internal/noder/types.go
@@ -0,0 +1,53 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/types2"
+)
+
+var basics = [...]**types.Type{
+ types2.Invalid: new(*types.Type),
+ types2.Bool: &types.Types[types.TBOOL],
+ types2.Int: &types.Types[types.TINT],
+ types2.Int8: &types.Types[types.TINT8],
+ types2.Int16: &types.Types[types.TINT16],
+ types2.Int32: &types.Types[types.TINT32],
+ types2.Int64: &types.Types[types.TINT64],
+ types2.Uint: &types.Types[types.TUINT],
+ types2.Uint8: &types.Types[types.TUINT8],
+ types2.Uint16: &types.Types[types.TUINT16],
+ types2.Uint32: &types.Types[types.TUINT32],
+ types2.Uint64: &types.Types[types.TUINT64],
+ types2.Uintptr: &types.Types[types.TUINTPTR],
+ types2.Float32: &types.Types[types.TFLOAT32],
+ types2.Float64: &types.Types[types.TFLOAT64],
+ types2.Complex64: &types.Types[types.TCOMPLEX64],
+ types2.Complex128: &types.Types[types.TCOMPLEX128],
+ types2.String: &types.Types[types.TSTRING],
+ types2.UnsafePointer: &types.Types[types.TUNSAFEPTR],
+ types2.UntypedBool: &types.UntypedBool,
+ types2.UntypedInt: &types.UntypedInt,
+ types2.UntypedRune: &types.UntypedRune,
+ types2.UntypedFloat: &types.UntypedFloat,
+ types2.UntypedComplex: &types.UntypedComplex,
+ types2.UntypedString: &types.UntypedString,
+ types2.UntypedNil: &types.Types[types.TNIL],
+}
+
+var dirs = [...]types.ChanDir{
+ types2.SendRecv: types.Cboth,
+ types2.SendOnly: types.Csend,
+ types2.RecvOnly: types.Crecv,
+}
+
+// deref2 does a single deref of types2 type t, if it is a pointer type.
+func deref2(t types2.Type) types2.Type {
+ if ptr := types2.AsPointer(t); ptr != nil {
+ t = ptr.Elem()
+ }
+ return t
+}
diff --git a/src/cmd/compile/internal/noder/unified.go b/src/cmd/compile/internal/noder/unified.go
new file mode 100644
index 0000000..492b00d
--- /dev/null
+++ b/src/cmd/compile/internal/noder/unified.go
@@ -0,0 +1,535 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+ "internal/pkgbits"
+ "io"
+ "runtime"
+ "sort"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/inline"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/pgo"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+)
+
+// localPkgReader holds the package reader used for reading the local
+// package. It exists so the unified IR linker can refer back to it
+// later.
+var localPkgReader *pkgReader
+
+// LookupMethodFunc returns the ir.Func for an arbitrary full symbol name if
+// that function exists in the set of available export data.
+//
+// This allows lookup of arbitrary functions and methods that aren't otherwise
+// referenced by the local package and thus haven't been read yet.
+//
+// TODO(prattmic): Does not handle instantiation of generic types. Currently
+// profiles don't contain the original type arguments, so we won't be able to
+// create the runtime dictionaries.
+//
+// TODO(prattmic): Hit rate of this function is usually fairly low, and errors
+// are only used when debug logging is enabled. Consider constructing cheaper
+// errors by default.
+func LookupFunc(fullName string) (*ir.Func, error) {
+ pkgPath, symName, err := ir.ParseLinkFuncName(fullName)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing symbol name %q: %v", fullName, err)
+ }
+
+ pkg, ok := types.PkgMap()[pkgPath]
+ if !ok {
+ return nil, fmt.Errorf("pkg %s doesn't exist in %v", pkgPath, types.PkgMap())
+ }
+
+ // Symbol naming is ambiguous. We can't necessarily distinguish between
+ // a method and a closure. e.g., is foo.Bar.func1 a closure defined in
+ // function Bar, or a method on type Bar? Thus we must simply attempt
+ // to lookup both.
+
+ fn, err := lookupFunction(pkg, symName)
+ if err == nil {
+ return fn, nil
+ }
+
+ fn, mErr := lookupMethod(pkg, symName)
+ if mErr == nil {
+ return fn, nil
+ }
+
+ return nil, fmt.Errorf("%s is not a function (%v) or method (%v)", fullName, err, mErr)
+}
+
+func lookupFunction(pkg *types.Pkg, symName string) (*ir.Func, error) {
+ sym := pkg.Lookup(symName)
+
+ // TODO(prattmic): Enclosed functions (e.g., foo.Bar.func1) are not
+ // present in objReader, only as OCLOSURE nodes in the enclosing
+ // function.
+ pri, ok := objReader[sym]
+ if !ok {
+ return nil, fmt.Errorf("func sym %v missing objReader", sym)
+ }
+
+ node, err := pri.pr.objIdxMayFail(pri.idx, nil, nil, false)
+ if err != nil {
+ return nil, fmt.Errorf("func sym %v lookup error: %w", sym, err)
+ }
+ name := node.(*ir.Name)
+ if name.Op() != ir.ONAME || name.Class != ir.PFUNC {
+ return nil, fmt.Errorf("func sym %v refers to non-function name: %v", sym, name)
+ }
+ return name.Func, nil
+}
+
+func lookupMethod(pkg *types.Pkg, symName string) (*ir.Func, error) {
+ // N.B. readPackage creates a Sym for every object in the package to
+ // initialize objReader and importBodyReader, even if the object isn't
+ // read.
+ //
+ // However, objReader is only initialized for top-level objects, so we
+ // must first lookup the type and use that to find the method rather
+ // than looking for the method directly.
+ typ, meth, err := ir.LookupMethodSelector(pkg, symName)
+ if err != nil {
+ return nil, fmt.Errorf("error looking up method symbol %q: %v", symName, err)
+ }
+
+ pri, ok := objReader[typ]
+ if !ok {
+ return nil, fmt.Errorf("type sym %v missing objReader", typ)
+ }
+
+ node, err := pri.pr.objIdxMayFail(pri.idx, nil, nil, false)
+ if err != nil {
+ return nil, fmt.Errorf("func sym %v lookup error: %w", typ, err)
+ }
+ name := node.(*ir.Name)
+ if name.Op() != ir.OTYPE {
+ return nil, fmt.Errorf("type sym %v refers to non-type name: %v", typ, name)
+ }
+ if name.Alias() {
+ return nil, fmt.Errorf("type sym %v refers to alias", typ)
+ }
+
+ for _, m := range name.Type().Methods() {
+ if m.Sym == meth {
+ fn := m.Nname.(*ir.Name).Func
+ return fn, nil
+ }
+ }
+
+ return nil, fmt.Errorf("method %s missing from method set of %v", symName, typ)
+}
+
+// unified constructs the local package's Internal Representation (IR)
+// from its syntax tree (AST).
+//
+// The pipeline contains 2 steps:
+//
+// 1. Generate the export data "stub".
+//
+// 2. Generate the IR from the export data above.
+//
+// The package data "stub" at step (1) contains everything from the local package,
+// but nothing that has been imported. When we're actually writing out export data
+// to the output files (see writeNewExport), we run the "linker", which:
+//
+// - Updates compiler extensions data (e.g. inlining cost, escape analysis results).
+//
+// - Handles re-exporting any transitive dependencies.
+//
+// - Prunes out any unnecessary details (e.g. non-inlineable functions, because any
+// downstream importers only care about inlinable functions).
+//
+// The source files are typechecked twice: once before writing the export data
+// using types2, and again after reading the export data using gc/typecheck.
+// The duplication of work will go away once we only use the types2 type checker,
+// removing the gc/typecheck step. For now, it is kept because:
+//
+// - It reduces the engineering costs in maintaining a fork of typecheck
+// (e.g. no need to backport fixes like CL 327651).
+//
+// - It makes it easier to pass toolstash -cmp.
+//
+// - Historically, we would always re-run the typechecker after importing a package,
+// even though we know the imported data is valid. It's not ideal, but it's
+// not causing any problems either.
+//
+// - gc/typecheck is still in charge of some transformations, such as rewriting
+// multi-valued function calls or transforming ir.OINDEX to ir.OINDEXMAP.
+//
+// Using the syntax tree with types2, which has a complete representation of generics,
+// the unified IR has the full typed AST needed for introspection during step (1).
+// In other words, we have all the necessary information to build the generic IR form
+// (see writer.captureVars for an example).
+func unified(m posMap, noders []*noder) {
+ inline.InlineCall = unifiedInlineCall
+ typecheck.HaveInlineBody = unifiedHaveInlineBody
+ pgo.LookupFunc = LookupFunc
+
+ data := writePkgStub(m, noders)
+
+ target := typecheck.Target
+
+ localPkgReader = newPkgReader(pkgbits.NewPkgDecoder(types.LocalPkg.Path, data))
+ readPackage(localPkgReader, types.LocalPkg, true)
+
+ r := localPkgReader.newReader(pkgbits.RelocMeta, pkgbits.PrivateRootIdx, pkgbits.SyncPrivate)
+ r.pkgInit(types.LocalPkg, target)
+
+ readBodies(target, false)
+
+ // Check that nothing snuck past typechecking.
+ for _, fn := range target.Funcs {
+ if fn.Typecheck() == 0 {
+ base.FatalfAt(fn.Pos(), "missed typecheck: %v", fn)
+ }
+
+ // For functions, check that at least their first statement (if
+ // any) was typechecked too.
+ if len(fn.Body) != 0 {
+ if stmt := fn.Body[0]; stmt.Typecheck() == 0 {
+ base.FatalfAt(stmt.Pos(), "missed typecheck: %v", stmt)
+ }
+ }
+ }
+
+ // For functions originally came from package runtime,
+ // mark as norace to prevent instrumenting, see issue #60439.
+ for _, fn := range target.Funcs {
+ if !base.Flag.CompilingRuntime && types.RuntimeSymName(fn.Sym()) != "" {
+ fn.Pragma |= ir.Norace
+ }
+ }
+
+ base.ExitIfErrors() // just in case
+}
+
+// readBodies iteratively expands all pending dictionaries and
+// function bodies.
+//
+// If duringInlining is true, then the inline.InlineDecls is called as
+// necessary on instantiations of imported generic functions, so their
+// inlining costs can be computed.
+func readBodies(target *ir.Package, duringInlining bool) {
+ var inlDecls []*ir.Func
+
+ // Don't use range--bodyIdx can add closures to todoBodies.
+ for {
+ // The order we expand dictionaries and bodies doesn't matter, so
+ // pop from the end to reduce todoBodies reallocations if it grows
+ // further.
+ //
+ // However, we do at least need to flush any pending dictionaries
+ // before reading bodies, because bodies might reference the
+ // dictionaries.
+
+ if len(todoDicts) > 0 {
+ fn := todoDicts[len(todoDicts)-1]
+ todoDicts = todoDicts[:len(todoDicts)-1]
+ fn()
+ continue
+ }
+
+ if len(todoBodies) > 0 {
+ fn := todoBodies[len(todoBodies)-1]
+ todoBodies = todoBodies[:len(todoBodies)-1]
+
+ pri, ok := bodyReader[fn]
+ assert(ok)
+ pri.funcBody(fn)
+
+ // Instantiated generic function: add to Decls for typechecking
+ // and compilation.
+ if fn.OClosure == nil && len(pri.dict.targs) != 0 {
+ // cmd/link does not support a type symbol referencing a method symbol
+ // across DSO boundary, so force re-compiling methods on a generic type
+ // even it was seen from imported package in linkshared mode, see #58966.
+ canSkipNonGenericMethod := !(base.Ctxt.Flag_linkshared && ir.IsMethod(fn))
+ if duringInlining && canSkipNonGenericMethod {
+ inlDecls = append(inlDecls, fn)
+ } else {
+ target.Funcs = append(target.Funcs, fn)
+ }
+ }
+
+ continue
+ }
+
+ break
+ }
+
+ todoDicts = nil
+ todoBodies = nil
+
+ if len(inlDecls) != 0 {
+ // If we instantiated any generic functions during inlining, we need
+ // to call CanInline on them so they'll be transitively inlined
+ // correctly (#56280).
+ //
+ // We know these functions were already compiled in an imported
+ // package though, so we don't need to actually apply InlineCalls or
+ // save the function bodies any further than this.
+ //
+ // We can also lower the -m flag to 0, to suppress duplicate "can
+ // inline" diagnostics reported against the imported package. Again,
+ // we already reported those diagnostics in the original package, so
+ // it's pointless repeating them here.
+
+ oldLowerM := base.Flag.LowerM
+ base.Flag.LowerM = 0
+ inline.CanInlineFuncs(inlDecls, nil)
+ base.Flag.LowerM = oldLowerM
+
+ for _, fn := range inlDecls {
+ fn.Body = nil // free memory
+ }
+ }
+}
+
+// writePkgStub type checks the given parsed source files,
+// writes an export data package stub representing them,
+// and returns the result.
+func writePkgStub(m posMap, noders []*noder) string {
+ pkg, info := checkFiles(m, noders)
+
+ pw := newPkgWriter(m, pkg, info)
+
+ pw.collectDecls(noders)
+
+ publicRootWriter := pw.newWriter(pkgbits.RelocMeta, pkgbits.SyncPublic)
+ privateRootWriter := pw.newWriter(pkgbits.RelocMeta, pkgbits.SyncPrivate)
+
+ assert(publicRootWriter.Idx == pkgbits.PublicRootIdx)
+ assert(privateRootWriter.Idx == pkgbits.PrivateRootIdx)
+
+ {
+ w := publicRootWriter
+ w.pkg(pkg)
+ w.Bool(false) // TODO(mdempsky): Remove; was "has init"
+
+ scope := pkg.Scope()
+ names := scope.Names()
+ w.Len(len(names))
+ for _, name := range names {
+ w.obj(scope.Lookup(name), nil)
+ }
+
+ w.Sync(pkgbits.SyncEOF)
+ w.Flush()
+ }
+
+ {
+ w := privateRootWriter
+ w.pkgInit(noders)
+ w.Flush()
+ }
+
+ var sb strings.Builder
+ pw.DumpTo(&sb)
+
+ // At this point, we're done with types2. Make sure the package is
+ // garbage collected.
+ freePackage(pkg)
+
+ return sb.String()
+}
+
+// freePackage ensures the given package is garbage collected.
+func freePackage(pkg *types2.Package) {
+ // The GC test below relies on a precise GC that runs finalizers as
+ // soon as objects are unreachable. Our implementation provides
+ // this, but other/older implementations may not (e.g., Go 1.4 does
+ // not because of #22350). To avoid imposing unnecessary
+ // restrictions on the GOROOT_BOOTSTRAP toolchain, we skip the test
+ // during bootstrapping.
+ if base.CompilerBootstrap || base.Debug.GCCheck == 0 {
+ *pkg = types2.Package{}
+ return
+ }
+
+ // Set a finalizer on pkg so we can detect if/when it's collected.
+ done := make(chan struct{})
+ runtime.SetFinalizer(pkg, func(*types2.Package) { close(done) })
+
+ // Important: objects involved in cycles are not finalized, so zero
+ // out pkg to break its cycles and allow the finalizer to run.
+ *pkg = types2.Package{}
+
+ // It typically takes just 1 or 2 cycles to release pkg, but it
+ // doesn't hurt to try a few more times.
+ for i := 0; i < 10; i++ {
+ select {
+ case <-done:
+ return
+ default:
+ runtime.GC()
+ }
+ }
+
+ base.Fatalf("package never finalized")
+}
+
+// readPackage reads package export data from pr to populate
+// importpkg.
+//
+// localStub indicates whether pr is reading the stub export data for
+// the local package, as opposed to relocated export data for an
+// import.
+func readPackage(pr *pkgReader, importpkg *types.Pkg, localStub bool) {
+ {
+ r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
+
+ pkg := r.pkg()
+ base.Assertf(pkg == importpkg, "have package %q (%p), want package %q (%p)", pkg.Path, pkg, importpkg.Path, importpkg)
+
+ r.Bool() // TODO(mdempsky): Remove; was "has init"
+
+ for i, n := 0, r.Len(); i < n; i++ {
+ r.Sync(pkgbits.SyncObject)
+ assert(!r.Bool())
+ idx := r.Reloc(pkgbits.RelocObj)
+ assert(r.Len() == 0)
+
+ path, name, code := r.p.PeekObj(idx)
+ if code != pkgbits.ObjStub {
+ objReader[types.NewPkg(path, "").Lookup(name)] = pkgReaderIndex{pr, idx, nil, nil, nil}
+ }
+ }
+
+ r.Sync(pkgbits.SyncEOF)
+ }
+
+ if !localStub {
+ r := pr.newReader(pkgbits.RelocMeta, pkgbits.PrivateRootIdx, pkgbits.SyncPrivate)
+
+ if r.Bool() {
+ sym := importpkg.Lookup(".inittask")
+ task := ir.NewNameAt(src.NoXPos, sym, nil)
+ task.Class = ir.PEXTERN
+ sym.Def = task
+ }
+
+ for i, n := 0, r.Len(); i < n; i++ {
+ path := r.String()
+ name := r.String()
+ idx := r.Reloc(pkgbits.RelocBody)
+
+ sym := types.NewPkg(path, "").Lookup(name)
+ if _, ok := importBodyReader[sym]; !ok {
+ importBodyReader[sym] = pkgReaderIndex{pr, idx, nil, nil, nil}
+ }
+ }
+
+ r.Sync(pkgbits.SyncEOF)
+ }
+}
+
+// writeUnifiedExport writes to `out` the finalized, self-contained
+// Unified IR export data file for the current compilation unit.
+func writeUnifiedExport(out io.Writer) {
+ l := linker{
+ pw: pkgbits.NewPkgEncoder(base.Debug.SyncFrames),
+
+ pkgs: make(map[string]pkgbits.Index),
+ decls: make(map[*types.Sym]pkgbits.Index),
+ bodies: make(map[*types.Sym]pkgbits.Index),
+ }
+
+ publicRootWriter := l.pw.NewEncoder(pkgbits.RelocMeta, pkgbits.SyncPublic)
+ privateRootWriter := l.pw.NewEncoder(pkgbits.RelocMeta, pkgbits.SyncPrivate)
+ assert(publicRootWriter.Idx == pkgbits.PublicRootIdx)
+ assert(privateRootWriter.Idx == pkgbits.PrivateRootIdx)
+
+ var selfPkgIdx pkgbits.Index
+
+ {
+ pr := localPkgReader
+ r := pr.NewDecoder(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
+
+ r.Sync(pkgbits.SyncPkg)
+ selfPkgIdx = l.relocIdx(pr, pkgbits.RelocPkg, r.Reloc(pkgbits.RelocPkg))
+
+ r.Bool() // TODO(mdempsky): Remove; was "has init"
+
+ for i, n := 0, r.Len(); i < n; i++ {
+ r.Sync(pkgbits.SyncObject)
+ assert(!r.Bool())
+ idx := r.Reloc(pkgbits.RelocObj)
+ assert(r.Len() == 0)
+
+ xpath, xname, xtag := pr.PeekObj(idx)
+ assert(xpath == pr.PkgPath())
+ assert(xtag != pkgbits.ObjStub)
+
+ if types.IsExported(xname) {
+ l.relocIdx(pr, pkgbits.RelocObj, idx)
+ }
+ }
+
+ r.Sync(pkgbits.SyncEOF)
+ }
+
+ {
+ var idxs []pkgbits.Index
+ for _, idx := range l.decls {
+ idxs = append(idxs, idx)
+ }
+ sort.Slice(idxs, func(i, j int) bool { return idxs[i] < idxs[j] })
+
+ w := publicRootWriter
+
+ w.Sync(pkgbits.SyncPkg)
+ w.Reloc(pkgbits.RelocPkg, selfPkgIdx)
+ w.Bool(false) // TODO(mdempsky): Remove; was "has init"
+
+ w.Len(len(idxs))
+ for _, idx := range idxs {
+ w.Sync(pkgbits.SyncObject)
+ w.Bool(false)
+ w.Reloc(pkgbits.RelocObj, idx)
+ w.Len(0)
+ }
+
+ w.Sync(pkgbits.SyncEOF)
+ w.Flush()
+ }
+
+ {
+ type symIdx struct {
+ sym *types.Sym
+ idx pkgbits.Index
+ }
+ var bodies []symIdx
+ for sym, idx := range l.bodies {
+ bodies = append(bodies, symIdx{sym, idx})
+ }
+ sort.Slice(bodies, func(i, j int) bool { return bodies[i].idx < bodies[j].idx })
+
+ w := privateRootWriter
+
+ w.Bool(typecheck.Lookup(".inittask").Def != nil)
+
+ w.Len(len(bodies))
+ for _, body := range bodies {
+ w.String(body.sym.Pkg.Path)
+ w.String(body.sym.Name)
+ w.Reloc(pkgbits.RelocBody, body.idx)
+ }
+
+ w.Sync(pkgbits.SyncEOF)
+ w.Flush()
+ }
+
+ base.Ctxt.Fingerprint = l.pw.DumpTo(out)
+}
diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go
new file mode 100644
index 0000000..e5894c9
--- /dev/null
+++ b/src/cmd/compile/internal/noder/writer.go
@@ -0,0 +1,3003 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/version"
+ "internal/buildcfg"
+ "internal/pkgbits"
+ "os"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/types2"
+)
+
+// This file implements the Unified IR package writer and defines the
+// Unified IR export data format.
+//
+// Low-level coding details (e.g., byte-encoding of individual
+// primitive values, or handling element bitstreams and
+// cross-references) are handled by internal/pkgbits, so here we only
+// concern ourselves with higher-level worries like mapping Go
+// language constructs into elements.
+
+// There are two central types in the writing process: the "writer"
+// type handles writing out individual elements, while the "pkgWriter"
+// type keeps track of which elements have already been created.
+//
+// For each sort of "thing" (e.g., position, package, object, type)
+// that can be written into the export data, there are generally
+// several methods that work together:
+//
+// - writer.thing handles writing out a *use* of a thing, which often
+// means writing a relocation to that thing's encoded index.
+//
+// - pkgWriter.thingIdx handles reserving an index for a thing, and
+// writing out any elements needed for the thing.
+//
+// - writer.doThing handles writing out the *definition* of a thing,
+// which in general is a mix of low-level coding primitives (e.g.,
+// ints and strings) or uses of other things.
+//
+// A design goal of Unified IR is to have a single, canonical writer
+// implementation, but multiple reader implementations each tailored
+// to their respective needs. For example, within cmd/compile's own
+// backend, inlining is implemented largely by just re-running the
+// function body reading code.
+
+// TODO(mdempsky): Add an importer for Unified IR to the x/tools repo,
+// and better document the file format boundary between public and
+// private data.
+
+// A pkgWriter constructs Unified IR export data from the results of
+// running the types2 type checker on a Go compilation unit.
+type pkgWriter struct {
+ pkgbits.PkgEncoder
+
+ m posMap
+ curpkg *types2.Package
+ info *types2.Info
+
+ // Indices for previously written syntax and types2 things.
+
+ posBasesIdx map[*syntax.PosBase]pkgbits.Index
+ pkgsIdx map[*types2.Package]pkgbits.Index
+ typsIdx map[types2.Type]pkgbits.Index
+ objsIdx map[types2.Object]pkgbits.Index
+
+ // Maps from types2.Objects back to their syntax.Decl.
+
+ funDecls map[*types2.Func]*syntax.FuncDecl
+ typDecls map[*types2.TypeName]typeDeclGen
+
+ // linknames maps package-scope objects to their linker symbol name,
+ // if specified by a //go:linkname directive.
+ linknames map[types2.Object]string
+
+ // cgoPragmas accumulates any //go:cgo_* pragmas that need to be
+ // passed through to cmd/link.
+ cgoPragmas [][]string
+}
+
+// newPkgWriter returns an initialized pkgWriter for the specified
+// package.
+func newPkgWriter(m posMap, pkg *types2.Package, info *types2.Info) *pkgWriter {
+ return &pkgWriter{
+ PkgEncoder: pkgbits.NewPkgEncoder(base.Debug.SyncFrames),
+
+ m: m,
+ curpkg: pkg,
+ info: info,
+
+ pkgsIdx: make(map[*types2.Package]pkgbits.Index),
+ objsIdx: make(map[types2.Object]pkgbits.Index),
+ typsIdx: make(map[types2.Type]pkgbits.Index),
+
+ posBasesIdx: make(map[*syntax.PosBase]pkgbits.Index),
+
+ funDecls: make(map[*types2.Func]*syntax.FuncDecl),
+ typDecls: make(map[*types2.TypeName]typeDeclGen),
+
+ linknames: make(map[types2.Object]string),
+ }
+}
+
+// errorf reports a user error about thing p.
+func (pw *pkgWriter) errorf(p poser, msg string, args ...interface{}) {
+ base.ErrorfAt(pw.m.pos(p), 0, msg, args...)
+}
+
+// fatalf reports an internal compiler error about thing p.
+func (pw *pkgWriter) fatalf(p poser, msg string, args ...interface{}) {
+ base.FatalfAt(pw.m.pos(p), msg, args...)
+}
+
+// unexpected reports a fatal error about a thing of unexpected
+// dynamic type.
+func (pw *pkgWriter) unexpected(what string, p poser) {
+ pw.fatalf(p, "unexpected %s: %v (%T)", what, p, p)
+}
+
+func (pw *pkgWriter) typeAndValue(x syntax.Expr) syntax.TypeAndValue {
+ tv, ok := pw.maybeTypeAndValue(x)
+ if !ok {
+ pw.fatalf(x, "missing Types entry: %v", syntax.String(x))
+ }
+ return tv
+}
+
+func (pw *pkgWriter) maybeTypeAndValue(x syntax.Expr) (syntax.TypeAndValue, bool) {
+ tv := x.GetTypeInfo()
+
+ // If x is a generic function whose type arguments are inferred
+ // from assignment context, then we need to find its inferred type
+ // in Info.Instances instead.
+ if name, ok := x.(*syntax.Name); ok {
+ if inst, ok := pw.info.Instances[name]; ok {
+ tv.Type = inst.Type
+ }
+ }
+
+ return tv, tv.Type != nil
+}
+
+// typeOf returns the Type of the given value expression.
+func (pw *pkgWriter) typeOf(expr syntax.Expr) types2.Type {
+ tv := pw.typeAndValue(expr)
+ if !tv.IsValue() {
+ pw.fatalf(expr, "expected value: %v", syntax.String(expr))
+ }
+ return tv.Type
+}
+
+// A writer provides APIs for writing out an individual element.
+type writer struct {
+ p *pkgWriter
+
+ pkgbits.Encoder
+
+ // sig holds the signature for the current function body, if any.
+ sig *types2.Signature
+
+ // TODO(mdempsky): We should be able to prune localsIdx whenever a
+ // scope closes, and then maybe we can just use the same map for
+ // storing the TypeParams too (as their TypeName instead).
+
+ // localsIdx tracks any local variables declared within this
+ // function body. It's unused for writing out non-body things.
+ localsIdx map[*types2.Var]int
+
+ // closureVars tracks any free variables that are referenced by this
+ // function body. It's unused for writing out non-body things.
+ closureVars []posVar
+ closureVarsIdx map[*types2.Var]int // index of previously seen free variables
+
+ dict *writerDict
+
+ // derived tracks whether the type being written out references any
+ // type parameters. It's unused for writing non-type things.
+ derived bool
+}
+
+// A writerDict tracks types and objects that are used by a declaration.
+type writerDict struct {
+ implicits []*types2.TypeName
+
+ // derived is a slice of type indices for computing derived types
+ // (i.e., types that depend on the declaration's type parameters).
+ derived []derivedInfo
+
+ // derivedIdx maps a Type to its corresponding index within the
+ // derived slice, if present.
+ derivedIdx map[types2.Type]pkgbits.Index
+
+ // These slices correspond to entries in the runtime dictionary.
+ typeParamMethodExprs []writerMethodExprInfo
+ subdicts []objInfo
+ rtypes []typeInfo
+ itabs []itabInfo
+}
+
+type itabInfo struct {
+ typ typeInfo
+ iface typeInfo
+}
+
+// typeParamIndex returns the index of the given type parameter within
+// the dictionary. This may differ from typ.Index() when there are
+// implicit type parameters due to defined types declared within a
+// generic function or method.
+func (dict *writerDict) typeParamIndex(typ *types2.TypeParam) int {
+ for idx, implicit := range dict.implicits {
+ if types2.Unalias(implicit.Type()).(*types2.TypeParam) == typ {
+ return idx
+ }
+ }
+
+ return len(dict.implicits) + typ.Index()
+}
+
+// A derivedInfo represents a reference to an encoded generic Go type.
+type derivedInfo struct {
+ idx pkgbits.Index
+ needed bool // TODO(mdempsky): Remove.
+}
+
+// A typeInfo represents a reference to an encoded Go type.
+//
+// If derived is true, then the typeInfo represents a generic Go type
+// that contains type parameters. In this case, idx is an index into
+// the readerDict.derived{,Types} arrays.
+//
+// Otherwise, the typeInfo represents a non-generic Go type, and idx
+// is an index into the reader.typs array instead.
+type typeInfo struct {
+ idx pkgbits.Index
+ derived bool
+}
+
+// An objInfo represents a reference to an encoded, instantiated (if
+// applicable) Go object.
+type objInfo struct {
+ idx pkgbits.Index // index for the generic function declaration
+ explicits []typeInfo // info for the type arguments
+}
+
+// A selectorInfo represents a reference to an encoded field or method
+// name (i.e., objects that can only be accessed using selector
+// expressions).
+type selectorInfo struct {
+ pkgIdx pkgbits.Index
+ nameIdx pkgbits.Index
+}
+
+// anyDerived reports whether any of info's explicit type arguments
+// are derived types.
+func (info objInfo) anyDerived() bool {
+ for _, explicit := range info.explicits {
+ if explicit.derived {
+ return true
+ }
+ }
+ return false
+}
+
+// equals reports whether info and other represent the same Go object
+// (i.e., same base object and identical type arguments, if any).
+func (info objInfo) equals(other objInfo) bool {
+ if info.idx != other.idx {
+ return false
+ }
+ assert(len(info.explicits) == len(other.explicits))
+ for i, targ := range info.explicits {
+ if targ != other.explicits[i] {
+ return false
+ }
+ }
+ return true
+}
+
+type writerMethodExprInfo struct {
+ typeParamIdx int
+ methodInfo selectorInfo
+}
+
+// typeParamMethodExprIdx returns the index where the given encoded
+// method expression function pointer appears within this dictionary's
+// type parameters method expressions section, adding it if necessary.
+func (dict *writerDict) typeParamMethodExprIdx(typeParamIdx int, methodInfo selectorInfo) int {
+ newInfo := writerMethodExprInfo{typeParamIdx, methodInfo}
+
+ for idx, oldInfo := range dict.typeParamMethodExprs {
+ if oldInfo == newInfo {
+ return idx
+ }
+ }
+
+ idx := len(dict.typeParamMethodExprs)
+ dict.typeParamMethodExprs = append(dict.typeParamMethodExprs, newInfo)
+ return idx
+}
+
+// subdictIdx returns the index where the given encoded object's
+// runtime dictionary appears within this dictionary's subdictionary
+// section, adding it if necessary.
+func (dict *writerDict) subdictIdx(newInfo objInfo) int {
+ for idx, oldInfo := range dict.subdicts {
+ if oldInfo.equals(newInfo) {
+ return idx
+ }
+ }
+
+ idx := len(dict.subdicts)
+ dict.subdicts = append(dict.subdicts, newInfo)
+ return idx
+}
+
+// rtypeIdx returns the index where the given encoded type's
+// *runtime._type value appears within this dictionary's rtypes
+// section, adding it if necessary.
+func (dict *writerDict) rtypeIdx(newInfo typeInfo) int {
+ for idx, oldInfo := range dict.rtypes {
+ if oldInfo == newInfo {
+ return idx
+ }
+ }
+
+ idx := len(dict.rtypes)
+ dict.rtypes = append(dict.rtypes, newInfo)
+ return idx
+}
+
+// itabIdx returns the index where the given encoded type pair's
+// *runtime.itab value appears within this dictionary's itabs section,
+// adding it if necessary.
+func (dict *writerDict) itabIdx(typInfo, ifaceInfo typeInfo) int {
+ newInfo := itabInfo{typInfo, ifaceInfo}
+
+ for idx, oldInfo := range dict.itabs {
+ if oldInfo == newInfo {
+ return idx
+ }
+ }
+
+ idx := len(dict.itabs)
+ dict.itabs = append(dict.itabs, newInfo)
+ return idx
+}
+
+func (pw *pkgWriter) newWriter(k pkgbits.RelocKind, marker pkgbits.SyncMarker) *writer {
+ return &writer{
+ Encoder: pw.NewEncoder(k, marker),
+ p: pw,
+ }
+}
+
+// @@@ Positions
+
+// pos writes the position of p into the element bitstream.
+func (w *writer) pos(p poser) {
+ w.Sync(pkgbits.SyncPos)
+ pos := p.Pos()
+
+ // TODO(mdempsky): Track down the remaining cases here and fix them.
+ if !w.Bool(pos.IsKnown()) {
+ return
+ }
+
+ // TODO(mdempsky): Delta encoding.
+ w.posBase(pos.Base())
+ w.Uint(pos.Line())
+ w.Uint(pos.Col())
+}
+
+// posBase writes a reference to the given PosBase into the element
+// bitstream.
+func (w *writer) posBase(b *syntax.PosBase) {
+ w.Reloc(pkgbits.RelocPosBase, w.p.posBaseIdx(b))
+}
+
+// posBaseIdx returns the index for the given PosBase.
+func (pw *pkgWriter) posBaseIdx(b *syntax.PosBase) pkgbits.Index {
+ if idx, ok := pw.posBasesIdx[b]; ok {
+ return idx
+ }
+
+ w := pw.newWriter(pkgbits.RelocPosBase, pkgbits.SyncPosBase)
+ w.p.posBasesIdx[b] = w.Idx
+
+ w.String(trimFilename(b))
+
+ if !w.Bool(b.IsFileBase()) {
+ w.pos(b)
+ w.Uint(b.Line())
+ w.Uint(b.Col())
+ }
+
+ return w.Flush()
+}
+
+// @@@ Packages
+
+// pkg writes a use of the given Package into the element bitstream.
+func (w *writer) pkg(pkg *types2.Package) {
+ w.pkgRef(w.p.pkgIdx(pkg))
+}
+
+func (w *writer) pkgRef(idx pkgbits.Index) {
+ w.Sync(pkgbits.SyncPkg)
+ w.Reloc(pkgbits.RelocPkg, idx)
+}
+
+// pkgIdx returns the index for the given package, adding it to the
+// package export data if needed.
+func (pw *pkgWriter) pkgIdx(pkg *types2.Package) pkgbits.Index {
+ if idx, ok := pw.pkgsIdx[pkg]; ok {
+ return idx
+ }
+
+ w := pw.newWriter(pkgbits.RelocPkg, pkgbits.SyncPkgDef)
+ pw.pkgsIdx[pkg] = w.Idx
+
+ // The universe and package unsafe need to be handled specially by
+ // importers anyway, so we serialize them using just their package
+ // path. This ensures that readers don't confuse them for
+ // user-defined packages.
+ switch pkg {
+ case nil: // universe
+ w.String("builtin") // same package path used by godoc
+ case types2.Unsafe:
+ w.String("unsafe")
+ default:
+ // TODO(mdempsky): Write out pkg.Path() for curpkg too.
+ var path string
+ if pkg != w.p.curpkg {
+ path = pkg.Path()
+ }
+ base.Assertf(path != "builtin" && path != "unsafe", "unexpected path for user-defined package: %q", path)
+ w.String(path)
+ w.String(pkg.Name())
+
+ w.Len(len(pkg.Imports()))
+ for _, imp := range pkg.Imports() {
+ w.pkg(imp)
+ }
+ }
+
+ return w.Flush()
+}
+
+// @@@ Types
+
+var (
+ anyTypeName = types2.Universe.Lookup("any").(*types2.TypeName)
+ comparableTypeName = types2.Universe.Lookup("comparable").(*types2.TypeName)
+ runeTypeName = types2.Universe.Lookup("rune").(*types2.TypeName)
+)
+
+// typ writes a use of the given type into the bitstream.
+func (w *writer) typ(typ types2.Type) {
+ w.typInfo(w.p.typIdx(typ, w.dict))
+}
+
+// typInfo writes a use of the given type (specified as a typeInfo
+// instead) into the bitstream.
+func (w *writer) typInfo(info typeInfo) {
+ w.Sync(pkgbits.SyncType)
+ if w.Bool(info.derived) {
+ w.Len(int(info.idx))
+ w.derived = true
+ } else {
+ w.Reloc(pkgbits.RelocType, info.idx)
+ }
+}
+
+// typIdx returns the index where the export data description of type
+// can be read back in. If no such index exists yet, it's created.
+//
+// typIdx also reports whether typ is a derived type; that is, whether
+// its identity depends on type parameters.
+func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo {
+ if idx, ok := pw.typsIdx[typ]; ok {
+ return typeInfo{idx: idx, derived: false}
+ }
+ if dict != nil {
+ if idx, ok := dict.derivedIdx[typ]; ok {
+ return typeInfo{idx: idx, derived: true}
+ }
+ }
+
+ w := pw.newWriter(pkgbits.RelocType, pkgbits.SyncTypeIdx)
+ w.dict = dict
+
+ switch typ := types2.Unalias(typ).(type) {
+ default:
+ base.Fatalf("unexpected type: %v (%T)", typ, typ)
+
+ case *types2.Basic:
+ switch kind := typ.Kind(); {
+ case kind == types2.Invalid:
+ base.Fatalf("unexpected types2.Invalid")
+
+ case types2.Typ[kind] == typ:
+ w.Code(pkgbits.TypeBasic)
+ w.Len(int(kind))
+
+ default:
+ // Handle "byte" and "rune" as references to their TypeNames.
+ obj := types2.Universe.Lookup(typ.Name())
+ assert(obj.Type() == typ)
+
+ w.Code(pkgbits.TypeNamed)
+ w.obj(obj, nil)
+ }
+
+ case *types2.Named:
+ obj, targs := splitNamed(typ)
+
+ // Defined types that are declared within a generic function (and
+ // thus have implicit type parameters) are always derived types.
+ if w.p.hasImplicitTypeParams(obj) {
+ w.derived = true
+ }
+
+ w.Code(pkgbits.TypeNamed)
+ w.obj(obj, targs)
+
+ case *types2.TypeParam:
+ w.derived = true
+ w.Code(pkgbits.TypeTypeParam)
+ w.Len(w.dict.typeParamIndex(typ))
+
+ case *types2.Array:
+ w.Code(pkgbits.TypeArray)
+ w.Uint64(uint64(typ.Len()))
+ w.typ(typ.Elem())
+
+ case *types2.Chan:
+ w.Code(pkgbits.TypeChan)
+ w.Len(int(typ.Dir()))
+ w.typ(typ.Elem())
+
+ case *types2.Map:
+ w.Code(pkgbits.TypeMap)
+ w.typ(typ.Key())
+ w.typ(typ.Elem())
+
+ case *types2.Pointer:
+ w.Code(pkgbits.TypePointer)
+ w.typ(typ.Elem())
+
+ case *types2.Signature:
+ base.Assertf(typ.TypeParams() == nil, "unexpected type params: %v", typ)
+ w.Code(pkgbits.TypeSignature)
+ w.signature(typ)
+
+ case *types2.Slice:
+ w.Code(pkgbits.TypeSlice)
+ w.typ(typ.Elem())
+
+ case *types2.Struct:
+ w.Code(pkgbits.TypeStruct)
+ w.structType(typ)
+
+ case *types2.Interface:
+ // Handle "any" as reference to its TypeName.
+ if typ == anyTypeName.Type() {
+ w.Code(pkgbits.TypeNamed)
+ w.obj(anyTypeName, nil)
+ break
+ }
+
+ w.Code(pkgbits.TypeInterface)
+ w.interfaceType(typ)
+
+ case *types2.Union:
+ w.Code(pkgbits.TypeUnion)
+ w.unionType(typ)
+ }
+
+ if w.derived {
+ idx := pkgbits.Index(len(dict.derived))
+ dict.derived = append(dict.derived, derivedInfo{idx: w.Flush()})
+ dict.derivedIdx[typ] = idx
+ return typeInfo{idx: idx, derived: true}
+ }
+
+ pw.typsIdx[typ] = w.Idx
+ return typeInfo{idx: w.Flush(), derived: false}
+}
+
+func (w *writer) structType(typ *types2.Struct) {
+ w.Len(typ.NumFields())
+ for i := 0; i < typ.NumFields(); i++ {
+ f := typ.Field(i)
+ w.pos(f)
+ w.selector(f)
+ w.typ(f.Type())
+ w.String(typ.Tag(i))
+ w.Bool(f.Embedded())
+ }
+}
+
+func (w *writer) unionType(typ *types2.Union) {
+ w.Len(typ.Len())
+ for i := 0; i < typ.Len(); i++ {
+ t := typ.Term(i)
+ w.Bool(t.Tilde())
+ w.typ(t.Type())
+ }
+}
+
+func (w *writer) interfaceType(typ *types2.Interface) {
+ // If typ has no embedded types but it's not a basic interface, then
+ // the natural description we write out below will fail to
+ // reconstruct it.
+ if typ.NumEmbeddeds() == 0 && !typ.IsMethodSet() {
+ // Currently, this can only happen for the underlying Interface of
+ // "comparable", which is needed to handle type declarations like
+ // "type C comparable".
+ assert(typ == comparableTypeName.Type().(*types2.Named).Underlying())
+
+ // Export as "interface{ comparable }".
+ w.Len(0) // NumExplicitMethods
+ w.Len(1) // NumEmbeddeds
+ w.Bool(false) // IsImplicit
+ w.typ(comparableTypeName.Type()) // EmbeddedType(0)
+ return
+ }
+
+ w.Len(typ.NumExplicitMethods())
+ w.Len(typ.NumEmbeddeds())
+
+ if typ.NumExplicitMethods() == 0 && typ.NumEmbeddeds() == 1 {
+ w.Bool(typ.IsImplicit())
+ } else {
+ // Implicit interfaces always have 0 explicit methods and 1
+ // embedded type, so we skip writing out the implicit flag
+ // otherwise as a space optimization.
+ assert(!typ.IsImplicit())
+ }
+
+ for i := 0; i < typ.NumExplicitMethods(); i++ {
+ m := typ.ExplicitMethod(i)
+ sig := m.Type().(*types2.Signature)
+ assert(sig.TypeParams() == nil)
+
+ w.pos(m)
+ w.selector(m)
+ w.signature(sig)
+ }
+
+ for i := 0; i < typ.NumEmbeddeds(); i++ {
+ w.typ(typ.EmbeddedType(i))
+ }
+}
+
+func (w *writer) signature(sig *types2.Signature) {
+ w.Sync(pkgbits.SyncSignature)
+ w.params(sig.Params())
+ w.params(sig.Results())
+ w.Bool(sig.Variadic())
+}
+
+func (w *writer) params(typ *types2.Tuple) {
+ w.Sync(pkgbits.SyncParams)
+ w.Len(typ.Len())
+ for i := 0; i < typ.Len(); i++ {
+ w.param(typ.At(i))
+ }
+}
+
+func (w *writer) param(param *types2.Var) {
+ w.Sync(pkgbits.SyncParam)
+ w.pos(param)
+ w.localIdent(param)
+ w.typ(param.Type())
+}
+
+// @@@ Objects
+
+// obj writes a use of the given object into the bitstream.
+//
+// If obj is a generic object, then explicits are the explicit type
+// arguments used to instantiate it (i.e., used to substitute the
+// object's own declared type parameters).
+func (w *writer) obj(obj types2.Object, explicits *types2.TypeList) {
+ w.objInfo(w.p.objInstIdx(obj, explicits, w.dict))
+}
+
+// objInfo writes a use of the given encoded object into the
+// bitstream.
+func (w *writer) objInfo(info objInfo) {
+ w.Sync(pkgbits.SyncObject)
+ w.Bool(false) // TODO(mdempsky): Remove; was derived func inst.
+ w.Reloc(pkgbits.RelocObj, info.idx)
+
+ w.Len(len(info.explicits))
+ for _, info := range info.explicits {
+ w.typInfo(info)
+ }
+}
+
+// objInstIdx returns the indices for an object and a corresponding
+// list of type arguments used to instantiate it, adding them to the
+// export data as needed.
+func (pw *pkgWriter) objInstIdx(obj types2.Object, explicits *types2.TypeList, dict *writerDict) objInfo {
+ explicitInfos := make([]typeInfo, explicits.Len())
+ for i := range explicitInfos {
+ explicitInfos[i] = pw.typIdx(explicits.At(i), dict)
+ }
+ return objInfo{idx: pw.objIdx(obj), explicits: explicitInfos}
+}
+
+// objIdx returns the index for the given Object, adding it to the
+// export data as needed.
+func (pw *pkgWriter) objIdx(obj types2.Object) pkgbits.Index {
+ // TODO(mdempsky): Validate that obj is a global object (or a local
+ // defined type, which we hoist to global scope anyway).
+
+ if idx, ok := pw.objsIdx[obj]; ok {
+ return idx
+ }
+
+ dict := &writerDict{
+ derivedIdx: make(map[types2.Type]pkgbits.Index),
+ }
+
+ if isDefinedType(obj) && obj.Pkg() == pw.curpkg {
+ decl, ok := pw.typDecls[obj.(*types2.TypeName)]
+ assert(ok)
+ dict.implicits = decl.implicits
+ }
+
+ // We encode objects into 4 elements across different sections, all
+ // sharing the same index:
+ //
+ // - RelocName has just the object's qualified name (i.e.,
+ // Object.Pkg and Object.Name) and the CodeObj indicating what
+ // specific type of Object it is (Var, Func, etc).
+ //
+ // - RelocObj has the remaining public details about the object,
+ // relevant to go/types importers.
+ //
+ // - RelocObjExt has additional private details about the object,
+ // which are only relevant to cmd/compile itself. This is
+ // separated from RelocObj so that go/types importers are
+ // unaffected by internal compiler changes.
+ //
+ // - RelocObjDict has public details about the object's type
+ // parameters and derived type's used by the object. This is
+ // separated to facilitate the eventual introduction of
+ // shape-based stenciling.
+ //
+ // TODO(mdempsky): Re-evaluate whether RelocName still makes sense
+ // to keep separate from RelocObj.
+
+ w := pw.newWriter(pkgbits.RelocObj, pkgbits.SyncObject1)
+ wext := pw.newWriter(pkgbits.RelocObjExt, pkgbits.SyncObject1)
+ wname := pw.newWriter(pkgbits.RelocName, pkgbits.SyncObject1)
+ wdict := pw.newWriter(pkgbits.RelocObjDict, pkgbits.SyncObject1)
+
+ pw.objsIdx[obj] = w.Idx // break cycles
+ assert(wext.Idx == w.Idx)
+ assert(wname.Idx == w.Idx)
+ assert(wdict.Idx == w.Idx)
+
+ w.dict = dict
+ wext.dict = dict
+
+ code := w.doObj(wext, obj)
+ w.Flush()
+ wext.Flush()
+
+ wname.qualifiedIdent(obj)
+ wname.Code(code)
+ wname.Flush()
+
+ wdict.objDict(obj, w.dict)
+ wdict.Flush()
+
+ return w.Idx
+}
+
+// doObj writes the RelocObj definition for obj to w, and the
+// RelocObjExt definition to wext.
+func (w *writer) doObj(wext *writer, obj types2.Object) pkgbits.CodeObj {
+ if obj.Pkg() != w.p.curpkg {
+ return pkgbits.ObjStub
+ }
+
+ switch obj := obj.(type) {
+ default:
+ w.p.unexpected("object", obj)
+ panic("unreachable")
+
+ case *types2.Const:
+ w.pos(obj)
+ w.typ(obj.Type())
+ w.Value(obj.Val())
+ return pkgbits.ObjConst
+
+ case *types2.Func:
+ decl, ok := w.p.funDecls[obj]
+ assert(ok)
+ sig := obj.Type().(*types2.Signature)
+
+ w.pos(obj)
+ w.typeParamNames(sig.TypeParams())
+ w.signature(sig)
+ w.pos(decl)
+ wext.funcExt(obj)
+ return pkgbits.ObjFunc
+
+ case *types2.TypeName:
+ if obj.IsAlias() {
+ w.pos(obj)
+ w.typ(obj.Type())
+ return pkgbits.ObjAlias
+ }
+
+ named := obj.Type().(*types2.Named)
+ assert(named.TypeArgs() == nil)
+
+ w.pos(obj)
+ w.typeParamNames(named.TypeParams())
+ wext.typeExt(obj)
+ w.typ(named.Underlying())
+
+ w.Len(named.NumMethods())
+ for i := 0; i < named.NumMethods(); i++ {
+ w.method(wext, named.Method(i))
+ }
+
+ return pkgbits.ObjType
+
+ case *types2.Var:
+ w.pos(obj)
+ w.typ(obj.Type())
+ wext.varExt(obj)
+ return pkgbits.ObjVar
+ }
+}
+
+// objDict writes the dictionary needed for reading the given object.
+func (w *writer) objDict(obj types2.Object, dict *writerDict) {
+ // TODO(mdempsky): Split objDict into multiple entries? reader.go
+ // doesn't care about the type parameter bounds, and reader2.go
+ // doesn't care about referenced functions.
+
+ w.dict = dict // TODO(mdempsky): This is a bit sketchy.
+
+ w.Len(len(dict.implicits))
+
+ tparams := objTypeParams(obj)
+ ntparams := tparams.Len()
+ w.Len(ntparams)
+ for i := 0; i < ntparams; i++ {
+ w.typ(tparams.At(i).Constraint())
+ }
+
+ nderived := len(dict.derived)
+ w.Len(nderived)
+ for _, typ := range dict.derived {
+ w.Reloc(pkgbits.RelocType, typ.idx)
+ w.Bool(typ.needed)
+ }
+
+ // Write runtime dictionary information.
+ //
+ // N.B., the go/types importer reads up to the section, but doesn't
+ // read any further, so it's safe to change. (See TODO above.)
+
+ // For each type parameter, write out whether the constraint is a
+ // basic interface. This is used to determine how aggressively we
+ // can shape corresponding type arguments.
+ //
+ // This is somewhat redundant with writing out the full type
+ // parameter constraints above, but the compiler currently skips
+ // over those. Also, we don't care about the *declared* constraints,
+ // but how the type parameters are actually *used*. E.g., if a type
+ // parameter is constrained to `int | uint` but then never used in
+ // arithmetic/conversions/etc, we could shape those together.
+ for _, implicit := range dict.implicits {
+ tparam := types2.Unalias(implicit.Type()).(*types2.TypeParam)
+ w.Bool(tparam.Underlying().(*types2.Interface).IsMethodSet())
+ }
+ for i := 0; i < ntparams; i++ {
+ tparam := tparams.At(i)
+ w.Bool(tparam.Underlying().(*types2.Interface).IsMethodSet())
+ }
+
+ w.Len(len(dict.typeParamMethodExprs))
+ for _, info := range dict.typeParamMethodExprs {
+ w.Len(info.typeParamIdx)
+ w.selectorInfo(info.methodInfo)
+ }
+
+ w.Len(len(dict.subdicts))
+ for _, info := range dict.subdicts {
+ w.objInfo(info)
+ }
+
+ w.Len(len(dict.rtypes))
+ for _, info := range dict.rtypes {
+ w.typInfo(info)
+ }
+
+ w.Len(len(dict.itabs))
+ for _, info := range dict.itabs {
+ w.typInfo(info.typ)
+ w.typInfo(info.iface)
+ }
+
+ assert(len(dict.derived) == nderived)
+}
+
+func (w *writer) typeParamNames(tparams *types2.TypeParamList) {
+ w.Sync(pkgbits.SyncTypeParamNames)
+
+ ntparams := tparams.Len()
+ for i := 0; i < ntparams; i++ {
+ tparam := tparams.At(i).Obj()
+ w.pos(tparam)
+ w.localIdent(tparam)
+ }
+}
+
+func (w *writer) method(wext *writer, meth *types2.Func) {
+ decl, ok := w.p.funDecls[meth]
+ assert(ok)
+ sig := meth.Type().(*types2.Signature)
+
+ w.Sync(pkgbits.SyncMethod)
+ w.pos(meth)
+ w.selector(meth)
+ w.typeParamNames(sig.RecvTypeParams())
+ w.param(sig.Recv())
+ w.signature(sig)
+
+ w.pos(decl) // XXX: Hack to workaround linker limitations.
+ wext.funcExt(meth)
+}
+
+// qualifiedIdent writes out the name of an object declared at package
+// scope. (For now, it's also used to refer to local defined types.)
+func (w *writer) qualifiedIdent(obj types2.Object) {
+ w.Sync(pkgbits.SyncSym)
+
+ name := obj.Name()
+ if isDefinedType(obj) && obj.Pkg() == w.p.curpkg {
+ decl, ok := w.p.typDecls[obj.(*types2.TypeName)]
+ assert(ok)
+ if decl.gen != 0 {
+ // For local defined types, we embed a scope-disambiguation
+ // number directly into their name. types.SplitVargenSuffix then
+ // knows to look for this.
+ //
+ // TODO(mdempsky): Find a better solution; this is terrible.
+ name = fmt.Sprintf("%s·%v", name, decl.gen)
+ }
+ }
+
+ w.pkg(obj.Pkg())
+ w.String(name)
+}
+
+// TODO(mdempsky): We should be able to omit pkg from both localIdent
+// and selector, because they should always be known from context.
+// However, past frustrations with this optimization in iexport make
+// me a little nervous to try it again.
+
+// localIdent writes the name of a locally declared object (i.e.,
+// objects that can only be accessed by non-qualified name, within the
+// context of a particular function).
+func (w *writer) localIdent(obj types2.Object) {
+ assert(!isGlobal(obj))
+ w.Sync(pkgbits.SyncLocalIdent)
+ w.pkg(obj.Pkg())
+ w.String(obj.Name())
+}
+
+// selector writes the name of a field or method (i.e., objects that
+// can only be accessed using selector expressions).
+func (w *writer) selector(obj types2.Object) {
+ w.selectorInfo(w.p.selectorIdx(obj))
+}
+
+func (w *writer) selectorInfo(info selectorInfo) {
+ w.Sync(pkgbits.SyncSelector)
+ w.pkgRef(info.pkgIdx)
+ w.StringRef(info.nameIdx)
+}
+
+func (pw *pkgWriter) selectorIdx(obj types2.Object) selectorInfo {
+ pkgIdx := pw.pkgIdx(obj.Pkg())
+ nameIdx := pw.StringIdx(obj.Name())
+ return selectorInfo{pkgIdx: pkgIdx, nameIdx: nameIdx}
+}
+
+// @@@ Compiler extensions
+
+func (w *writer) funcExt(obj *types2.Func) {
+ decl, ok := w.p.funDecls[obj]
+ assert(ok)
+
+ // TODO(mdempsky): Extend these pragma validation flags to account
+ // for generics. E.g., linkname probably doesn't make sense at
+ // least.
+
+ pragma := asPragmaFlag(decl.Pragma)
+ if pragma&ir.Systemstack != 0 && pragma&ir.Nosplit != 0 {
+ w.p.errorf(decl, "go:nosplit and go:systemstack cannot be combined")
+ }
+ wi := asWasmImport(decl.Pragma)
+
+ if decl.Body != nil {
+ if pragma&ir.Noescape != 0 {
+ w.p.errorf(decl, "can only use //go:noescape with external func implementations")
+ }
+ if wi != nil {
+ w.p.errorf(decl, "can only use //go:wasmimport with external func implementations")
+ }
+ if (pragma&ir.UintptrKeepAlive != 0 && pragma&ir.UintptrEscapes == 0) && pragma&ir.Nosplit == 0 {
+ // Stack growth can't handle uintptr arguments that may
+ // be pointers (as we don't know which are pointers
+ // when creating the stack map). Thus uintptrkeepalive
+ // functions (and all transitive callees) must be
+ // nosplit.
+ //
+ // N.B. uintptrescapes implies uintptrkeepalive but it
+ // is OK since the arguments must escape to the heap.
+ //
+ // TODO(prattmic): Add recursive nosplit check of callees.
+ // TODO(prattmic): Functions with no body (i.e.,
+ // assembly) must also be nosplit, but we can't check
+ // that here.
+ w.p.errorf(decl, "go:uintptrkeepalive requires go:nosplit")
+ }
+ } else {
+ if base.Flag.Complete || decl.Name.Value == "init" {
+ // Linknamed functions are allowed to have no body. Hopefully
+ // the linkname target has a body. See issue 23311.
+ // Wasmimport functions are also allowed to have no body.
+ if _, ok := w.p.linknames[obj]; !ok && wi == nil {
+ w.p.errorf(decl, "missing function body")
+ }
+ }
+ }
+
+ sig, block := obj.Type().(*types2.Signature), decl.Body
+ body, closureVars := w.p.bodyIdx(sig, block, w.dict)
+ if len(closureVars) > 0 {
+ fmt.Fprintln(os.Stderr, "CLOSURE", closureVars)
+ }
+ assert(len(closureVars) == 0)
+
+ w.Sync(pkgbits.SyncFuncExt)
+ w.pragmaFlag(pragma)
+ w.linkname(obj)
+
+ if buildcfg.GOARCH == "wasm" {
+ if wi != nil {
+ w.String(wi.Module)
+ w.String(wi.Name)
+ } else {
+ w.String("")
+ w.String("")
+ }
+ }
+
+ w.Bool(false) // stub extension
+ w.Reloc(pkgbits.RelocBody, body)
+ w.Sync(pkgbits.SyncEOF)
+}
+
+func (w *writer) typeExt(obj *types2.TypeName) {
+ decl, ok := w.p.typDecls[obj]
+ assert(ok)
+
+ w.Sync(pkgbits.SyncTypeExt)
+
+ w.pragmaFlag(asPragmaFlag(decl.Pragma))
+
+ // No LSym.SymIdx info yet.
+ w.Int64(-1)
+ w.Int64(-1)
+}
+
+func (w *writer) varExt(obj *types2.Var) {
+ w.Sync(pkgbits.SyncVarExt)
+ w.linkname(obj)
+}
+
+func (w *writer) linkname(obj types2.Object) {
+ w.Sync(pkgbits.SyncLinkname)
+ w.Int64(-1)
+ w.String(w.p.linknames[obj])
+}
+
+func (w *writer) pragmaFlag(p ir.PragmaFlag) {
+ w.Sync(pkgbits.SyncPragma)
+ w.Int(int(p))
+}
+
+// @@@ Function bodies
+
+// bodyIdx returns the index for the given function body (specified by
+// block), adding it to the export data
+func (pw *pkgWriter) bodyIdx(sig *types2.Signature, block *syntax.BlockStmt, dict *writerDict) (idx pkgbits.Index, closureVars []posVar) {
+ w := pw.newWriter(pkgbits.RelocBody, pkgbits.SyncFuncBody)
+ w.sig = sig
+ w.dict = dict
+
+ w.declareParams(sig)
+ if w.Bool(block != nil) {
+ w.stmts(block.List)
+ w.pos(block.Rbrace)
+ }
+
+ return w.Flush(), w.closureVars
+}
+
+func (w *writer) declareParams(sig *types2.Signature) {
+ addLocals := func(params *types2.Tuple) {
+ for i := 0; i < params.Len(); i++ {
+ w.addLocal(params.At(i))
+ }
+ }
+
+ if recv := sig.Recv(); recv != nil {
+ w.addLocal(recv)
+ }
+ addLocals(sig.Params())
+ addLocals(sig.Results())
+}
+
+// addLocal records the declaration of a new local variable.
+func (w *writer) addLocal(obj *types2.Var) {
+ idx := len(w.localsIdx)
+
+ w.Sync(pkgbits.SyncAddLocal)
+ if w.p.SyncMarkers() {
+ w.Int(idx)
+ }
+ w.varDictIndex(obj)
+
+ if w.localsIdx == nil {
+ w.localsIdx = make(map[*types2.Var]int)
+ }
+ w.localsIdx[obj] = idx
+}
+
+// useLocal writes a reference to the given local or free variable
+// into the bitstream.
+func (w *writer) useLocal(pos syntax.Pos, obj *types2.Var) {
+ w.Sync(pkgbits.SyncUseObjLocal)
+
+ if idx, ok := w.localsIdx[obj]; w.Bool(ok) {
+ w.Len(idx)
+ return
+ }
+
+ idx, ok := w.closureVarsIdx[obj]
+ if !ok {
+ if w.closureVarsIdx == nil {
+ w.closureVarsIdx = make(map[*types2.Var]int)
+ }
+ idx = len(w.closureVars)
+ w.closureVars = append(w.closureVars, posVar{pos, obj})
+ w.closureVarsIdx[obj] = idx
+ }
+ w.Len(idx)
+}
+
+func (w *writer) openScope(pos syntax.Pos) {
+ w.Sync(pkgbits.SyncOpenScope)
+ w.pos(pos)
+}
+
+func (w *writer) closeScope(pos syntax.Pos) {
+ w.Sync(pkgbits.SyncCloseScope)
+ w.pos(pos)
+ w.closeAnotherScope()
+}
+
+func (w *writer) closeAnotherScope() {
+ w.Sync(pkgbits.SyncCloseAnotherScope)
+}
+
+// @@@ Statements
+
+// stmt writes the given statement into the function body bitstream.
+func (w *writer) stmt(stmt syntax.Stmt) {
+ var stmts []syntax.Stmt
+ if stmt != nil {
+ stmts = []syntax.Stmt{stmt}
+ }
+ w.stmts(stmts)
+}
+
+func (w *writer) stmts(stmts []syntax.Stmt) {
+ dead := false
+ w.Sync(pkgbits.SyncStmts)
+ for _, stmt := range stmts {
+ if dead {
+ // Any statements after a terminating statement are safe to
+ // omit, at least until the next labeled statement.
+ if _, ok := stmt.(*syntax.LabeledStmt); !ok {
+ continue
+ }
+ }
+ w.stmt1(stmt)
+ dead = w.p.terminates(stmt)
+ }
+ w.Code(stmtEnd)
+ w.Sync(pkgbits.SyncStmtsEnd)
+}
+
+func (w *writer) stmt1(stmt syntax.Stmt) {
+ switch stmt := stmt.(type) {
+ default:
+ w.p.unexpected("statement", stmt)
+
+ case nil, *syntax.EmptyStmt:
+ return
+
+ case *syntax.AssignStmt:
+ switch {
+ case stmt.Rhs == nil:
+ w.Code(stmtIncDec)
+ w.op(binOps[stmt.Op])
+ w.expr(stmt.Lhs)
+ w.pos(stmt)
+
+ case stmt.Op != 0 && stmt.Op != syntax.Def:
+ w.Code(stmtAssignOp)
+ w.op(binOps[stmt.Op])
+ w.expr(stmt.Lhs)
+ w.pos(stmt)
+
+ var typ types2.Type
+ if stmt.Op != syntax.Shl && stmt.Op != syntax.Shr {
+ typ = w.p.typeOf(stmt.Lhs)
+ }
+ w.implicitConvExpr(typ, stmt.Rhs)
+
+ default:
+ w.assignStmt(stmt, stmt.Lhs, stmt.Rhs)
+ }
+
+ case *syntax.BlockStmt:
+ w.Code(stmtBlock)
+ w.blockStmt(stmt)
+
+ case *syntax.BranchStmt:
+ w.Code(stmtBranch)
+ w.pos(stmt)
+ w.op(branchOps[stmt.Tok])
+ w.optLabel(stmt.Label)
+
+ case *syntax.CallStmt:
+ w.Code(stmtCall)
+ w.pos(stmt)
+ w.op(callOps[stmt.Tok])
+ w.expr(stmt.Call)
+ if stmt.Tok == syntax.Defer {
+ w.optExpr(stmt.DeferAt)
+ }
+
+ case *syntax.DeclStmt:
+ for _, decl := range stmt.DeclList {
+ w.declStmt(decl)
+ }
+
+ case *syntax.ExprStmt:
+ w.Code(stmtExpr)
+ w.expr(stmt.X)
+
+ case *syntax.ForStmt:
+ w.Code(stmtFor)
+ w.forStmt(stmt)
+
+ case *syntax.IfStmt:
+ w.Code(stmtIf)
+ w.ifStmt(stmt)
+
+ case *syntax.LabeledStmt:
+ w.Code(stmtLabel)
+ w.pos(stmt)
+ w.label(stmt.Label)
+ w.stmt1(stmt.Stmt)
+
+ case *syntax.ReturnStmt:
+ w.Code(stmtReturn)
+ w.pos(stmt)
+
+ resultTypes := w.sig.Results()
+ dstType := func(i int) types2.Type {
+ return resultTypes.At(i).Type()
+ }
+ w.multiExpr(stmt, dstType, syntax.UnpackListExpr(stmt.Results))
+
+ case *syntax.SelectStmt:
+ w.Code(stmtSelect)
+ w.selectStmt(stmt)
+
+ case *syntax.SendStmt:
+ chanType := types2.CoreType(w.p.typeOf(stmt.Chan)).(*types2.Chan)
+
+ w.Code(stmtSend)
+ w.pos(stmt)
+ w.expr(stmt.Chan)
+ w.implicitConvExpr(chanType.Elem(), stmt.Value)
+
+ case *syntax.SwitchStmt:
+ w.Code(stmtSwitch)
+ w.switchStmt(stmt)
+ }
+}
+
+func (w *writer) assignList(expr syntax.Expr) {
+ exprs := syntax.UnpackListExpr(expr)
+ w.Len(len(exprs))
+
+ for _, expr := range exprs {
+ w.assign(expr)
+ }
+}
+
+func (w *writer) assign(expr syntax.Expr) {
+ expr = syntax.Unparen(expr)
+
+ if name, ok := expr.(*syntax.Name); ok {
+ if name.Value == "_" {
+ w.Code(assignBlank)
+ return
+ }
+
+ if obj, ok := w.p.info.Defs[name]; ok {
+ obj := obj.(*types2.Var)
+
+ w.Code(assignDef)
+ w.pos(obj)
+ w.localIdent(obj)
+ w.typ(obj.Type())
+
+ // TODO(mdempsky): Minimize locals index size by deferring
+ // this until the variables actually come into scope.
+ w.addLocal(obj)
+ return
+ }
+ }
+
+ w.Code(assignExpr)
+ w.expr(expr)
+}
+
+func (w *writer) declStmt(decl syntax.Decl) {
+ switch decl := decl.(type) {
+ default:
+ w.p.unexpected("declaration", decl)
+
+ case *syntax.ConstDecl, *syntax.TypeDecl:
+
+ case *syntax.VarDecl:
+ w.assignStmt(decl, namesAsExpr(decl.NameList), decl.Values)
+ }
+}
+
+// assignStmt writes out an assignment for "lhs = rhs".
+func (w *writer) assignStmt(pos poser, lhs0, rhs0 syntax.Expr) {
+ lhs := syntax.UnpackListExpr(lhs0)
+ rhs := syntax.UnpackListExpr(rhs0)
+
+ w.Code(stmtAssign)
+ w.pos(pos)
+
+ // As if w.assignList(lhs0).
+ w.Len(len(lhs))
+ for _, expr := range lhs {
+ w.assign(expr)
+ }
+
+ dstType := func(i int) types2.Type {
+ dst := lhs[i]
+
+ // Finding dstType is somewhat involved, because for VarDecl
+ // statements, the Names are only added to the info.{Defs,Uses}
+ // maps, not to info.Types.
+ if name, ok := syntax.Unparen(dst).(*syntax.Name); ok {
+ if name.Value == "_" {
+ return nil // ok: no implicit conversion
+ } else if def, ok := w.p.info.Defs[name].(*types2.Var); ok {
+ return def.Type()
+ } else if use, ok := w.p.info.Uses[name].(*types2.Var); ok {
+ return use.Type()
+ } else {
+ w.p.fatalf(dst, "cannot find type of destination object: %v", dst)
+ }
+ }
+
+ return w.p.typeOf(dst)
+ }
+
+ w.multiExpr(pos, dstType, rhs)
+}
+
+func (w *writer) blockStmt(stmt *syntax.BlockStmt) {
+ w.Sync(pkgbits.SyncBlockStmt)
+ w.openScope(stmt.Pos())
+ w.stmts(stmt.List)
+ w.closeScope(stmt.Rbrace)
+}
+
+func (w *writer) forStmt(stmt *syntax.ForStmt) {
+ w.Sync(pkgbits.SyncForStmt)
+ w.openScope(stmt.Pos())
+
+ if rang, ok := stmt.Init.(*syntax.RangeClause); w.Bool(ok) {
+ w.pos(rang)
+ w.assignList(rang.Lhs)
+ w.expr(rang.X)
+
+ xtyp := w.p.typeOf(rang.X)
+ if _, isMap := types2.CoreType(xtyp).(*types2.Map); isMap {
+ w.rtype(xtyp)
+ }
+ {
+ lhs := syntax.UnpackListExpr(rang.Lhs)
+ assign := func(i int, src types2.Type) {
+ if i >= len(lhs) {
+ return
+ }
+ dst := syntax.Unparen(lhs[i])
+ if name, ok := dst.(*syntax.Name); ok && name.Value == "_" {
+ return
+ }
+
+ var dstType types2.Type
+ if rang.Def {
+ // For `:=` assignments, the LHS names only appear in Defs,
+ // not Types (as used by typeOf).
+ dstType = w.p.info.Defs[dst.(*syntax.Name)].(*types2.Var).Type()
+ } else {
+ dstType = w.p.typeOf(dst)
+ }
+
+ w.convRTTI(src, dstType)
+ }
+
+ keyType, valueType := types2.RangeKeyVal(w.p.typeOf(rang.X))
+ assign(0, keyType)
+ assign(1, valueType)
+ }
+
+ } else {
+ if stmt.Cond != nil && w.p.staticBool(&stmt.Cond) < 0 { // always false
+ stmt.Post = nil
+ stmt.Body.List = nil
+ }
+
+ w.pos(stmt)
+ w.stmt(stmt.Init)
+ w.optExpr(stmt.Cond)
+ w.stmt(stmt.Post)
+ }
+
+ w.blockStmt(stmt.Body)
+ w.Bool(w.distinctVars(stmt))
+ w.closeAnotherScope()
+}
+
+func (w *writer) distinctVars(stmt *syntax.ForStmt) bool {
+ lv := base.Debug.LoopVar
+ fileVersion := w.p.info.FileVersions[stmt.Pos().Base()]
+ is122 := fileVersion == "" || version.Compare(fileVersion, "go1.22") >= 0
+
+ // Turning off loopvar for 1.22 is only possible with loopvarhash=qn
+ //
+ // Debug.LoopVar values to be preserved for 1.21 compatibility are 1 and 2,
+ // which are also set (=1) by GOEXPERIMENT=loopvar. The knobs for turning on
+ // the new, unshared, loopvar behavior apply to versions less than 1.21 because
+ // (1) 1.21 also did that and (2) this is believed to be the likely use case;
+ // anyone checking to see if it affects their code will just run the GOEXPERIMENT
+ // but will not also update all their go.mod files to 1.21.
+ //
+ // -gcflags=-d=loopvar=3 enables logging for 1.22 but does not turn loopvar on for <= 1.21.
+
+ return is122 || lv > 0 && lv != 3
+}
+
+func (w *writer) ifStmt(stmt *syntax.IfStmt) {
+ cond := w.p.staticBool(&stmt.Cond)
+
+ w.Sync(pkgbits.SyncIfStmt)
+ w.openScope(stmt.Pos())
+ w.pos(stmt)
+ w.stmt(stmt.Init)
+ w.expr(stmt.Cond)
+ w.Int(cond)
+ if cond >= 0 {
+ w.blockStmt(stmt.Then)
+ } else {
+ w.pos(stmt.Then.Rbrace)
+ }
+ if cond <= 0 {
+ w.stmt(stmt.Else)
+ }
+ w.closeAnotherScope()
+}
+
+func (w *writer) selectStmt(stmt *syntax.SelectStmt) {
+ w.Sync(pkgbits.SyncSelectStmt)
+
+ w.pos(stmt)
+ w.Len(len(stmt.Body))
+ for i, clause := range stmt.Body {
+ if i > 0 {
+ w.closeScope(clause.Pos())
+ }
+ w.openScope(clause.Pos())
+
+ w.pos(clause)
+ w.stmt(clause.Comm)
+ w.stmts(clause.Body)
+ }
+ if len(stmt.Body) > 0 {
+ w.closeScope(stmt.Rbrace)
+ }
+}
+
+func (w *writer) switchStmt(stmt *syntax.SwitchStmt) {
+ w.Sync(pkgbits.SyncSwitchStmt)
+
+ w.openScope(stmt.Pos())
+ w.pos(stmt)
+ w.stmt(stmt.Init)
+
+ var iface, tagType types2.Type
+ if guard, ok := stmt.Tag.(*syntax.TypeSwitchGuard); w.Bool(ok) {
+ iface = w.p.typeOf(guard.X)
+
+ w.pos(guard)
+ if tag := guard.Lhs; w.Bool(tag != nil) {
+ w.pos(tag)
+
+ // Like w.localIdent, but we don't have a types2.Object.
+ w.Sync(pkgbits.SyncLocalIdent)
+ w.pkg(w.p.curpkg)
+ w.String(tag.Value)
+ }
+ w.expr(guard.X)
+ } else {
+ tag := stmt.Tag
+
+ var tagValue constant.Value
+ if tag != nil {
+ tv := w.p.typeAndValue(tag)
+ tagType = tv.Type
+ tagValue = tv.Value
+ } else {
+ tagType = types2.Typ[types2.Bool]
+ tagValue = constant.MakeBool(true)
+ }
+
+ if tagValue != nil {
+ // If the switch tag has a constant value, look for a case
+ // clause that we always branch to.
+ func() {
+ var target *syntax.CaseClause
+ Outer:
+ for _, clause := range stmt.Body {
+ if clause.Cases == nil {
+ target = clause
+ }
+ for _, cas := range syntax.UnpackListExpr(clause.Cases) {
+ tv := w.p.typeAndValue(cas)
+ if tv.Value == nil {
+ return // non-constant case; give up
+ }
+ if constant.Compare(tagValue, token.EQL, tv.Value) {
+ target = clause
+ break Outer
+ }
+ }
+ }
+ // We've found the target clause, if any.
+
+ if target != nil {
+ if hasFallthrough(target.Body) {
+ return // fallthrough is tricky; give up
+ }
+
+ // Rewrite as single "default" case.
+ target.Cases = nil
+ stmt.Body = []*syntax.CaseClause{target}
+ } else {
+ stmt.Body = nil
+ }
+
+ // Clear switch tag (i.e., replace with implicit "true").
+ tag = nil
+ stmt.Tag = nil
+ tagType = types2.Typ[types2.Bool]
+ }()
+ }
+
+ // Walk is going to emit comparisons between the tag value and
+ // each case expression, and we want these comparisons to always
+ // have the same type. If there are any case values that can't be
+ // converted to the tag value's type, then convert everything to
+ // `any` instead.
+ Outer:
+ for _, clause := range stmt.Body {
+ for _, cas := range syntax.UnpackListExpr(clause.Cases) {
+ if casType := w.p.typeOf(cas); !types2.AssignableTo(casType, tagType) {
+ tagType = types2.NewInterfaceType(nil, nil)
+ break Outer
+ }
+ }
+ }
+
+ if w.Bool(tag != nil) {
+ w.implicitConvExpr(tagType, tag)
+ }
+ }
+
+ w.Len(len(stmt.Body))
+ for i, clause := range stmt.Body {
+ if i > 0 {
+ w.closeScope(clause.Pos())
+ }
+ w.openScope(clause.Pos())
+
+ w.pos(clause)
+
+ cases := syntax.UnpackListExpr(clause.Cases)
+ if iface != nil {
+ w.Len(len(cases))
+ for _, cas := range cases {
+ if w.Bool(isNil(w.p, cas)) {
+ continue
+ }
+ w.exprType(iface, cas)
+ }
+ } else {
+ // As if w.exprList(clause.Cases),
+ // but with implicit conversions to tagType.
+
+ w.Sync(pkgbits.SyncExprList)
+ w.Sync(pkgbits.SyncExprs)
+ w.Len(len(cases))
+ for _, cas := range cases {
+ w.implicitConvExpr(tagType, cas)
+ }
+ }
+
+ if obj, ok := w.p.info.Implicits[clause]; ok {
+ // TODO(mdempsky): These pos details are quirkish, but also
+ // necessary so the variable's position is correct for DWARF
+ // scope assignment later. It would probably be better for us to
+ // instead just set the variable's DWARF scoping info earlier so
+ // we can give it the correct position information.
+ pos := clause.Pos()
+ if typs := syntax.UnpackListExpr(clause.Cases); len(typs) != 0 {
+ pos = typeExprEndPos(typs[len(typs)-1])
+ }
+ w.pos(pos)
+
+ obj := obj.(*types2.Var)
+ w.typ(obj.Type())
+ w.addLocal(obj)
+ }
+
+ w.stmts(clause.Body)
+ }
+ if len(stmt.Body) > 0 {
+ w.closeScope(stmt.Rbrace)
+ }
+
+ w.closeScope(stmt.Rbrace)
+}
+
+func (w *writer) label(label *syntax.Name) {
+ w.Sync(pkgbits.SyncLabel)
+
+ // TODO(mdempsky): Replace label strings with dense indices.
+ w.String(label.Value)
+}
+
+func (w *writer) optLabel(label *syntax.Name) {
+ w.Sync(pkgbits.SyncOptLabel)
+ if w.Bool(label != nil) {
+ w.label(label)
+ }
+}
+
+// @@@ Expressions
+
+// expr writes the given expression into the function body bitstream.
+func (w *writer) expr(expr syntax.Expr) {
+ base.Assertf(expr != nil, "missing expression")
+
+ expr = syntax.Unparen(expr) // skip parens; unneeded after typecheck
+
+ obj, inst := lookupObj(w.p, expr)
+ targs := inst.TypeArgs
+
+ if tv, ok := w.p.maybeTypeAndValue(expr); ok {
+ if tv.IsRuntimeHelper() {
+ if pkg := obj.Pkg(); pkg != nil && pkg.Name() == "runtime" {
+ objName := obj.Name()
+ w.Code(exprRuntimeBuiltin)
+ w.String(objName)
+ return
+ }
+ }
+
+ if tv.IsType() {
+ w.p.fatalf(expr, "unexpected type expression %v", syntax.String(expr))
+ }
+
+ if tv.Value != nil {
+ w.Code(exprConst)
+ w.pos(expr)
+ typ := idealType(tv)
+ assert(typ != nil)
+ w.typ(typ)
+ w.Value(tv.Value)
+ return
+ }
+
+ if _, isNil := obj.(*types2.Nil); isNil {
+ w.Code(exprZero)
+ w.pos(expr)
+ w.typ(tv.Type)
+ return
+ }
+
+ // With shape types (and particular pointer shaping), we may have
+ // an expression of type "go.shape.*uint8", but need to reshape it
+ // to another shape-identical type to allow use in field
+ // selection, indexing, etc.
+ if typ := tv.Type; !tv.IsBuiltin() && !isTuple(typ) && !isUntyped(typ) {
+ w.Code(exprReshape)
+ w.typ(typ)
+ // fallthrough
+ }
+ }
+
+ if obj != nil {
+ if targs.Len() != 0 {
+ obj := obj.(*types2.Func)
+
+ w.Code(exprFuncInst)
+ w.pos(expr)
+ w.funcInst(obj, targs)
+ return
+ }
+
+ if isGlobal(obj) {
+ w.Code(exprGlobal)
+ w.obj(obj, nil)
+ return
+ }
+
+ obj := obj.(*types2.Var)
+ assert(!obj.IsField())
+
+ w.Code(exprLocal)
+ w.useLocal(expr.Pos(), obj)
+ return
+ }
+
+ switch expr := expr.(type) {
+ default:
+ w.p.unexpected("expression", expr)
+
+ case *syntax.CompositeLit:
+ w.Code(exprCompLit)
+ w.compLit(expr)
+
+ case *syntax.FuncLit:
+ w.Code(exprFuncLit)
+ w.funcLit(expr)
+
+ case *syntax.SelectorExpr:
+ sel, ok := w.p.info.Selections[expr]
+ assert(ok)
+
+ switch sel.Kind() {
+ default:
+ w.p.fatalf(expr, "unexpected selection kind: %v", sel.Kind())
+
+ case types2.FieldVal:
+ w.Code(exprFieldVal)
+ w.expr(expr.X)
+ w.pos(expr)
+ w.selector(sel.Obj())
+
+ case types2.MethodVal:
+ w.Code(exprMethodVal)
+ typ := w.recvExpr(expr, sel)
+ w.pos(expr)
+ w.methodExpr(expr, typ, sel)
+
+ case types2.MethodExpr:
+ w.Code(exprMethodExpr)
+
+ tv := w.p.typeAndValue(expr.X)
+ assert(tv.IsType())
+
+ index := sel.Index()
+ implicits := index[:len(index)-1]
+
+ typ := tv.Type
+ w.typ(typ)
+
+ w.Len(len(implicits))
+ for _, ix := range implicits {
+ w.Len(ix)
+ typ = deref2(typ).Underlying().(*types2.Struct).Field(ix).Type()
+ }
+
+ recv := sel.Obj().(*types2.Func).Type().(*types2.Signature).Recv().Type()
+ if w.Bool(isPtrTo(typ, recv)) { // need deref
+ typ = recv
+ } else if w.Bool(isPtrTo(recv, typ)) { // need addr
+ typ = recv
+ }
+
+ w.pos(expr)
+ w.methodExpr(expr, typ, sel)
+ }
+
+ case *syntax.IndexExpr:
+ _ = w.p.typeOf(expr.Index) // ensure this is an index expression, not an instantiation
+
+ xtyp := w.p.typeOf(expr.X)
+
+ var keyType types2.Type
+ if mapType, ok := types2.CoreType(xtyp).(*types2.Map); ok {
+ keyType = mapType.Key()
+ }
+
+ w.Code(exprIndex)
+ w.expr(expr.X)
+ w.pos(expr)
+ w.implicitConvExpr(keyType, expr.Index)
+ if keyType != nil {
+ w.rtype(xtyp)
+ }
+
+ case *syntax.SliceExpr:
+ w.Code(exprSlice)
+ w.expr(expr.X)
+ w.pos(expr)
+ for _, n := range &expr.Index {
+ w.optExpr(n)
+ }
+
+ case *syntax.AssertExpr:
+ iface := w.p.typeOf(expr.X)
+
+ w.Code(exprAssert)
+ w.expr(expr.X)
+ w.pos(expr)
+ w.exprType(iface, expr.Type)
+ w.rtype(iface)
+
+ case *syntax.Operation:
+ if expr.Y == nil {
+ w.Code(exprUnaryOp)
+ w.op(unOps[expr.Op])
+ w.pos(expr)
+ w.expr(expr.X)
+ break
+ }
+
+ var commonType types2.Type
+ switch expr.Op {
+ case syntax.Shl, syntax.Shr:
+ // ok: operands are allowed to have different types
+ default:
+ xtyp := w.p.typeOf(expr.X)
+ ytyp := w.p.typeOf(expr.Y)
+ switch {
+ case types2.AssignableTo(xtyp, ytyp):
+ commonType = ytyp
+ case types2.AssignableTo(ytyp, xtyp):
+ commonType = xtyp
+ default:
+ w.p.fatalf(expr, "failed to find common type between %v and %v", xtyp, ytyp)
+ }
+ }
+
+ w.Code(exprBinaryOp)
+ w.op(binOps[expr.Op])
+ w.implicitConvExpr(commonType, expr.X)
+ w.pos(expr)
+ w.implicitConvExpr(commonType, expr.Y)
+
+ case *syntax.CallExpr:
+ tv := w.p.typeAndValue(expr.Fun)
+ if tv.IsType() {
+ assert(len(expr.ArgList) == 1)
+ assert(!expr.HasDots)
+ w.convertExpr(tv.Type, expr.ArgList[0], false)
+ break
+ }
+
+ var rtype types2.Type
+ if tv.IsBuiltin() {
+ switch obj, _ := lookupObj(w.p, syntax.Unparen(expr.Fun)); obj.Name() {
+ case "make":
+ assert(len(expr.ArgList) >= 1)
+ assert(!expr.HasDots)
+
+ w.Code(exprMake)
+ w.pos(expr)
+ w.exprType(nil, expr.ArgList[0])
+ w.exprs(expr.ArgList[1:])
+
+ typ := w.p.typeOf(expr)
+ switch coreType := types2.CoreType(typ).(type) {
+ default:
+ w.p.fatalf(expr, "unexpected core type: %v", coreType)
+ case *types2.Chan:
+ w.rtype(typ)
+ case *types2.Map:
+ w.rtype(typ)
+ case *types2.Slice:
+ w.rtype(sliceElem(typ))
+ }
+
+ return
+
+ case "new":
+ assert(len(expr.ArgList) == 1)
+ assert(!expr.HasDots)
+
+ w.Code(exprNew)
+ w.pos(expr)
+ w.exprType(nil, expr.ArgList[0])
+ return
+
+ case "Sizeof":
+ assert(len(expr.ArgList) == 1)
+ assert(!expr.HasDots)
+
+ w.Code(exprSizeof)
+ w.pos(expr)
+ w.typ(w.p.typeOf(expr.ArgList[0]))
+ return
+
+ case "Alignof":
+ assert(len(expr.ArgList) == 1)
+ assert(!expr.HasDots)
+
+ w.Code(exprAlignof)
+ w.pos(expr)
+ w.typ(w.p.typeOf(expr.ArgList[0]))
+ return
+
+ case "Offsetof":
+ assert(len(expr.ArgList) == 1)
+ assert(!expr.HasDots)
+ selector := syntax.Unparen(expr.ArgList[0]).(*syntax.SelectorExpr)
+ index := w.p.info.Selections[selector].Index()
+
+ w.Code(exprOffsetof)
+ w.pos(expr)
+ w.typ(deref2(w.p.typeOf(selector.X)))
+ w.Len(len(index) - 1)
+ for _, idx := range index {
+ w.Len(idx)
+ }
+ return
+
+ case "append":
+ rtype = sliceElem(w.p.typeOf(expr))
+ case "copy":
+ typ := w.p.typeOf(expr.ArgList[0])
+ if tuple, ok := typ.(*types2.Tuple); ok { // "copy(g())"
+ typ = tuple.At(0).Type()
+ }
+ rtype = sliceElem(typ)
+ case "delete":
+ typ := w.p.typeOf(expr.ArgList[0])
+ if tuple, ok := typ.(*types2.Tuple); ok { // "delete(g())"
+ typ = tuple.At(0).Type()
+ }
+ rtype = typ
+ case "Slice":
+ rtype = sliceElem(w.p.typeOf(expr))
+ }
+ }
+
+ writeFunExpr := func() {
+ fun := syntax.Unparen(expr.Fun)
+
+ if selector, ok := fun.(*syntax.SelectorExpr); ok {
+ if sel, ok := w.p.info.Selections[selector]; ok && sel.Kind() == types2.MethodVal {
+ w.Bool(true) // method call
+ typ := w.recvExpr(selector, sel)
+ w.methodExpr(selector, typ, sel)
+ return
+ }
+ }
+
+ w.Bool(false) // not a method call (i.e., normal function call)
+
+ if obj, inst := lookupObj(w.p, fun); w.Bool(obj != nil && inst.TypeArgs.Len() != 0) {
+ obj := obj.(*types2.Func)
+
+ w.pos(fun)
+ w.funcInst(obj, inst.TypeArgs)
+ return
+ }
+
+ w.expr(fun)
+ }
+
+ sigType := types2.CoreType(tv.Type).(*types2.Signature)
+ paramTypes := sigType.Params()
+
+ w.Code(exprCall)
+ writeFunExpr()
+ w.pos(expr)
+
+ paramType := func(i int) types2.Type {
+ if sigType.Variadic() && !expr.HasDots && i >= paramTypes.Len()-1 {
+ return paramTypes.At(paramTypes.Len() - 1).Type().(*types2.Slice).Elem()
+ }
+ return paramTypes.At(i).Type()
+ }
+
+ w.multiExpr(expr, paramType, expr.ArgList)
+ w.Bool(expr.HasDots)
+ if rtype != nil {
+ w.rtype(rtype)
+ }
+ }
+}
+
+func sliceElem(typ types2.Type) types2.Type {
+ return types2.CoreType(typ).(*types2.Slice).Elem()
+}
+
+func (w *writer) optExpr(expr syntax.Expr) {
+ if w.Bool(expr != nil) {
+ w.expr(expr)
+ }
+}
+
+// recvExpr writes out expr.X, but handles any implicit addressing,
+// dereferencing, and field selections appropriate for the method
+// selection.
+func (w *writer) recvExpr(expr *syntax.SelectorExpr, sel *types2.Selection) types2.Type {
+ index := sel.Index()
+ implicits := index[:len(index)-1]
+
+ w.Code(exprRecv)
+ w.expr(expr.X)
+ w.pos(expr)
+ w.Len(len(implicits))
+
+ typ := w.p.typeOf(expr.X)
+ for _, ix := range implicits {
+ typ = deref2(typ).Underlying().(*types2.Struct).Field(ix).Type()
+ w.Len(ix)
+ }
+
+ recv := sel.Obj().(*types2.Func).Type().(*types2.Signature).Recv().Type()
+ if w.Bool(isPtrTo(typ, recv)) { // needs deref
+ typ = recv
+ } else if w.Bool(isPtrTo(recv, typ)) { // needs addr
+ typ = recv
+ }
+
+ return typ
+}
+
+// funcInst writes a reference to an instantiated function.
+func (w *writer) funcInst(obj *types2.Func, targs *types2.TypeList) {
+ info := w.p.objInstIdx(obj, targs, w.dict)
+
+ // Type arguments list contains derived types; we can emit a static
+ // call to the shaped function, but need to dynamically compute the
+ // runtime dictionary pointer.
+ if w.Bool(info.anyDerived()) {
+ w.Len(w.dict.subdictIdx(info))
+ return
+ }
+
+ // Type arguments list is statically known; we can emit a static
+ // call with a statically reference to the respective runtime
+ // dictionary.
+ w.objInfo(info)
+}
+
+// methodExpr writes out a reference to the method selected by
+// expr. sel should be the corresponding types2.Selection, and recv
+// the type produced after any implicit addressing, dereferencing, and
+// field selection. (Note: recv might differ from sel.Obj()'s receiver
+// parameter in the case of interface types, and is needed for
+// handling type parameter methods.)
+func (w *writer) methodExpr(expr *syntax.SelectorExpr, recv types2.Type, sel *types2.Selection) {
+ fun := sel.Obj().(*types2.Func)
+ sig := fun.Type().(*types2.Signature)
+
+ w.typ(recv)
+ w.typ(sig)
+ w.pos(expr)
+ w.selector(fun)
+
+ // Method on a type parameter. These require an indirect call
+ // through the current function's runtime dictionary.
+ if typeParam, ok := types2.Unalias(recv).(*types2.TypeParam); w.Bool(ok) {
+ typeParamIdx := w.dict.typeParamIndex(typeParam)
+ methodInfo := w.p.selectorIdx(fun)
+
+ w.Len(w.dict.typeParamMethodExprIdx(typeParamIdx, methodInfo))
+ return
+ }
+
+ if isInterface(recv) != isInterface(sig.Recv().Type()) {
+ w.p.fatalf(expr, "isInterface inconsistency: %v and %v", recv, sig.Recv().Type())
+ }
+
+ if !isInterface(recv) {
+ if named, ok := types2.Unalias(deref2(recv)).(*types2.Named); ok {
+ obj, targs := splitNamed(named)
+ info := w.p.objInstIdx(obj, targs, w.dict)
+
+ // Method on a derived receiver type. These can be handled by a
+ // static call to the shaped method, but require dynamically
+ // looking up the appropriate dictionary argument in the current
+ // function's runtime dictionary.
+ if w.p.hasImplicitTypeParams(obj) || info.anyDerived() {
+ w.Bool(true) // dynamic subdictionary
+ w.Len(w.dict.subdictIdx(info))
+ return
+ }
+
+ // Method on a fully known receiver type. These can be handled
+ // by a static call to the shaped method, and with a static
+ // reference to the receiver type's dictionary.
+ if targs.Len() != 0 {
+ w.Bool(false) // no dynamic subdictionary
+ w.Bool(true) // static dictionary
+ w.objInfo(info)
+ return
+ }
+ }
+ }
+
+ w.Bool(false) // no dynamic subdictionary
+ w.Bool(false) // no static dictionary
+}
+
+// multiExpr writes a sequence of expressions, where the i'th value is
+// implicitly converted to dstType(i). It also handles when exprs is a
+// single, multi-valued expression (e.g., the multi-valued argument in
+// an f(g()) call, or the RHS operand in a comma-ok assignment).
+func (w *writer) multiExpr(pos poser, dstType func(int) types2.Type, exprs []syntax.Expr) {
+ w.Sync(pkgbits.SyncMultiExpr)
+
+ if len(exprs) == 1 {
+ expr := exprs[0]
+ if tuple, ok := w.p.typeOf(expr).(*types2.Tuple); ok {
+ assert(tuple.Len() > 1)
+ w.Bool(true) // N:1 assignment
+ w.pos(pos)
+ w.expr(expr)
+
+ w.Len(tuple.Len())
+ for i := 0; i < tuple.Len(); i++ {
+ src := tuple.At(i).Type()
+ // TODO(mdempsky): Investigate not writing src here. I think
+ // the reader should be able to infer it from expr anyway.
+ w.typ(src)
+ if dst := dstType(i); w.Bool(dst != nil && !types2.Identical(src, dst)) {
+ if src == nil || dst == nil {
+ w.p.fatalf(pos, "src is %v, dst is %v", src, dst)
+ }
+ if !types2.AssignableTo(src, dst) {
+ w.p.fatalf(pos, "%v is not assignable to %v", src, dst)
+ }
+ w.typ(dst)
+ w.convRTTI(src, dst)
+ }
+ }
+ return
+ }
+ }
+
+ w.Bool(false) // N:N assignment
+ w.Len(len(exprs))
+ for i, expr := range exprs {
+ w.implicitConvExpr(dstType(i), expr)
+ }
+}
+
+// implicitConvExpr is like expr, but if dst is non-nil and different
+// from expr's type, then an implicit conversion operation is inserted
+// at expr's position.
+func (w *writer) implicitConvExpr(dst types2.Type, expr syntax.Expr) {
+ w.convertExpr(dst, expr, true)
+}
+
+func (w *writer) convertExpr(dst types2.Type, expr syntax.Expr, implicit bool) {
+ src := w.p.typeOf(expr)
+
+ // Omit implicit no-op conversions.
+ identical := dst == nil || types2.Identical(src, dst)
+ if implicit && identical {
+ w.expr(expr)
+ return
+ }
+
+ if implicit && !types2.AssignableTo(src, dst) {
+ w.p.fatalf(expr, "%v is not assignable to %v", src, dst)
+ }
+
+ w.Code(exprConvert)
+ w.Bool(implicit)
+ w.typ(dst)
+ w.pos(expr)
+ w.convRTTI(src, dst)
+ w.Bool(isTypeParam(dst))
+ w.Bool(identical)
+ w.expr(expr)
+}
+
+func (w *writer) compLit(lit *syntax.CompositeLit) {
+ typ := w.p.typeOf(lit)
+
+ w.Sync(pkgbits.SyncCompLit)
+ w.pos(lit)
+ w.typ(typ)
+
+ if ptr, ok := types2.CoreType(typ).(*types2.Pointer); ok {
+ typ = ptr.Elem()
+ }
+ var keyType, elemType types2.Type
+ var structType *types2.Struct
+ switch typ0 := typ; typ := types2.CoreType(typ).(type) {
+ default:
+ w.p.fatalf(lit, "unexpected composite literal type: %v", typ)
+ case *types2.Array:
+ elemType = typ.Elem()
+ case *types2.Map:
+ w.rtype(typ0)
+ keyType, elemType = typ.Key(), typ.Elem()
+ case *types2.Slice:
+ elemType = typ.Elem()
+ case *types2.Struct:
+ structType = typ
+ }
+
+ w.Len(len(lit.ElemList))
+ for i, elem := range lit.ElemList {
+ elemType := elemType
+ if structType != nil {
+ if kv, ok := elem.(*syntax.KeyValueExpr); ok {
+ // use position of expr.Key rather than of elem (which has position of ':')
+ w.pos(kv.Key)
+ i = fieldIndex(w.p.info, structType, kv.Key.(*syntax.Name))
+ elem = kv.Value
+ } else {
+ w.pos(elem)
+ }
+ elemType = structType.Field(i).Type()
+ w.Len(i)
+ } else {
+ if kv, ok := elem.(*syntax.KeyValueExpr); w.Bool(ok) {
+ // use position of expr.Key rather than of elem (which has position of ':')
+ w.pos(kv.Key)
+ w.implicitConvExpr(keyType, kv.Key)
+ elem = kv.Value
+ }
+ }
+ w.pos(elem)
+ w.implicitConvExpr(elemType, elem)
+ }
+}
+
+func (w *writer) funcLit(expr *syntax.FuncLit) {
+ sig := w.p.typeOf(expr).(*types2.Signature)
+
+ body, closureVars := w.p.bodyIdx(sig, expr.Body, w.dict)
+
+ w.Sync(pkgbits.SyncFuncLit)
+ w.pos(expr)
+ w.signature(sig)
+
+ w.Len(len(closureVars))
+ for _, cv := range closureVars {
+ w.pos(cv.pos)
+ w.useLocal(cv.pos, cv.var_)
+ }
+
+ w.Reloc(pkgbits.RelocBody, body)
+}
+
+type posVar struct {
+ pos syntax.Pos
+ var_ *types2.Var
+}
+
+func (p posVar) String() string {
+ return p.pos.String() + ":" + p.var_.String()
+}
+
+func (w *writer) exprList(expr syntax.Expr) {
+ w.Sync(pkgbits.SyncExprList)
+ w.exprs(syntax.UnpackListExpr(expr))
+}
+
+func (w *writer) exprs(exprs []syntax.Expr) {
+ w.Sync(pkgbits.SyncExprs)
+ w.Len(len(exprs))
+ for _, expr := range exprs {
+ w.expr(expr)
+ }
+}
+
+// rtype writes information so that the reader can construct an
+// expression of type *runtime._type representing typ.
+func (w *writer) rtype(typ types2.Type) {
+ typ = types2.Default(typ)
+
+ info := w.p.typIdx(typ, w.dict)
+ w.rtypeInfo(info)
+}
+
+func (w *writer) rtypeInfo(info typeInfo) {
+ w.Sync(pkgbits.SyncRType)
+
+ if w.Bool(info.derived) {
+ w.Len(w.dict.rtypeIdx(info))
+ } else {
+ w.typInfo(info)
+ }
+}
+
+// varDictIndex writes out information for populating DictIndex for
+// the ir.Name that will represent obj.
+func (w *writer) varDictIndex(obj *types2.Var) {
+ info := w.p.typIdx(obj.Type(), w.dict)
+ if w.Bool(info.derived) {
+ w.Len(w.dict.rtypeIdx(info))
+ }
+}
+
+func isUntyped(typ types2.Type) bool {
+ basic, ok := types2.Unalias(typ).(*types2.Basic)
+ return ok && basic.Info()&types2.IsUntyped != 0
+}
+
+func isTuple(typ types2.Type) bool {
+ _, ok := typ.(*types2.Tuple)
+ return ok
+}
+
+func (w *writer) itab(typ, iface types2.Type) {
+ typ = types2.Default(typ)
+ iface = types2.Default(iface)
+
+ typInfo := w.p.typIdx(typ, w.dict)
+ ifaceInfo := w.p.typIdx(iface, w.dict)
+
+ w.rtypeInfo(typInfo)
+ w.rtypeInfo(ifaceInfo)
+ if w.Bool(typInfo.derived || ifaceInfo.derived) {
+ w.Len(w.dict.itabIdx(typInfo, ifaceInfo))
+ }
+}
+
+// convRTTI writes information so that the reader can construct
+// expressions for converting from src to dst.
+func (w *writer) convRTTI(src, dst types2.Type) {
+ w.Sync(pkgbits.SyncConvRTTI)
+ w.itab(src, dst)
+}
+
+func (w *writer) exprType(iface types2.Type, typ syntax.Expr) {
+ base.Assertf(iface == nil || isInterface(iface), "%v must be nil or an interface type", iface)
+
+ tv := w.p.typeAndValue(typ)
+ assert(tv.IsType())
+
+ w.Sync(pkgbits.SyncExprType)
+ w.pos(typ)
+
+ if w.Bool(iface != nil && !iface.Underlying().(*types2.Interface).Empty()) {
+ w.itab(tv.Type, iface)
+ } else {
+ w.rtype(tv.Type)
+
+ info := w.p.typIdx(tv.Type, w.dict)
+ w.Bool(info.derived)
+ }
+}
+
+// isInterface reports whether typ is known to be an interface type.
+// If typ is a type parameter, then isInterface reports an internal
+// compiler error instead.
+func isInterface(typ types2.Type) bool {
+ if _, ok := types2.Unalias(typ).(*types2.TypeParam); ok {
+ // typ is a type parameter and may be instantiated as either a
+ // concrete or interface type, so the writer can't depend on
+ // knowing this.
+ base.Fatalf("%v is a type parameter", typ)
+ }
+
+ _, ok := typ.Underlying().(*types2.Interface)
+ return ok
+}
+
+// op writes an Op into the bitstream.
+func (w *writer) op(op ir.Op) {
+ // TODO(mdempsky): Remove in favor of explicit codes? Would make
+ // export data more stable against internal refactorings, but low
+ // priority at the moment.
+ assert(op != 0)
+ w.Sync(pkgbits.SyncOp)
+ w.Len(int(op))
+}
+
+// @@@ Package initialization
+
+// Caution: This code is still clumsy, because toolstash -cmp is
+// particularly sensitive to it.
+
+type typeDeclGen struct {
+ *syntax.TypeDecl
+ gen int
+
+ // Implicit type parameters in scope at this type declaration.
+ implicits []*types2.TypeName
+}
+
+type fileImports struct {
+ importedEmbed, importedUnsafe bool
+}
+
+// declCollector is a visitor type that collects compiler-needed
+// information about declarations that types2 doesn't track.
+//
+// Notably, it maps declared types and functions back to their
+// declaration statement, keeps track of implicit type parameters, and
+// assigns unique type "generation" numbers to local defined types.
+type declCollector struct {
+ pw *pkgWriter
+ typegen *int
+ file *fileImports
+ withinFunc bool
+ implicits []*types2.TypeName
+}
+
+func (c *declCollector) withTParams(obj types2.Object) *declCollector {
+ tparams := objTypeParams(obj)
+ n := tparams.Len()
+ if n == 0 {
+ return c
+ }
+
+ copy := *c
+ copy.implicits = copy.implicits[:len(copy.implicits):len(copy.implicits)]
+ for i := 0; i < n; i++ {
+ copy.implicits = append(copy.implicits, tparams.At(i).Obj())
+ }
+ return &copy
+}
+
+func (c *declCollector) Visit(n syntax.Node) syntax.Visitor {
+ pw := c.pw
+
+ switch n := n.(type) {
+ case *syntax.File:
+ pw.checkPragmas(n.Pragma, ir.GoBuildPragma, false)
+
+ case *syntax.ImportDecl:
+ pw.checkPragmas(n.Pragma, 0, false)
+
+ switch pw.info.PkgNameOf(n).Imported().Path() {
+ case "embed":
+ c.file.importedEmbed = true
+ case "unsafe":
+ c.file.importedUnsafe = true
+ }
+
+ case *syntax.ConstDecl:
+ pw.checkPragmas(n.Pragma, 0, false)
+
+ case *syntax.FuncDecl:
+ pw.checkPragmas(n.Pragma, funcPragmas, false)
+
+ obj := pw.info.Defs[n.Name].(*types2.Func)
+ pw.funDecls[obj] = n
+
+ return c.withTParams(obj)
+
+ case *syntax.TypeDecl:
+ obj := pw.info.Defs[n.Name].(*types2.TypeName)
+ d := typeDeclGen{TypeDecl: n, implicits: c.implicits}
+
+ if n.Alias {
+ pw.checkPragmas(n.Pragma, 0, false)
+ } else {
+ pw.checkPragmas(n.Pragma, 0, false)
+
+ // Assign a unique ID to function-scoped defined types.
+ if c.withinFunc {
+ *c.typegen++
+ d.gen = *c.typegen
+ }
+ }
+
+ pw.typDecls[obj] = d
+
+ // TODO(mdempsky): Omit? Not strictly necessary; only matters for
+ // type declarations within function literals within parameterized
+ // type declarations, but types2 the function literals will be
+ // constant folded away.
+ return c.withTParams(obj)
+
+ case *syntax.VarDecl:
+ pw.checkPragmas(n.Pragma, 0, true)
+
+ if p, ok := n.Pragma.(*pragmas); ok && len(p.Embeds) > 0 {
+ if err := checkEmbed(n, c.file.importedEmbed, c.withinFunc); err != nil {
+ pw.errorf(p.Embeds[0].Pos, "%s", err)
+ }
+ }
+
+ case *syntax.BlockStmt:
+ if !c.withinFunc {
+ copy := *c
+ copy.withinFunc = true
+ return &copy
+ }
+ }
+
+ return c
+}
+
+func (pw *pkgWriter) collectDecls(noders []*noder) {
+ var typegen int
+ for _, p := range noders {
+ var file fileImports
+
+ syntax.Walk(p.file, &declCollector{
+ pw: pw,
+ typegen: &typegen,
+ file: &file,
+ })
+
+ pw.cgoPragmas = append(pw.cgoPragmas, p.pragcgobuf...)
+
+ for _, l := range p.linknames {
+ if !file.importedUnsafe {
+ pw.errorf(l.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
+ continue
+ }
+
+ switch obj := pw.curpkg.Scope().Lookup(l.local).(type) {
+ case *types2.Func, *types2.Var:
+ if _, ok := pw.linknames[obj]; !ok {
+ pw.linknames[obj] = l.remote
+ } else {
+ pw.errorf(l.pos, "duplicate //go:linkname for %s", l.local)
+ }
+
+ default:
+ if types.AllowsGoVersion(1, 18) {
+ pw.errorf(l.pos, "//go:linkname must refer to declared function or variable")
+ }
+ }
+ }
+ }
+}
+
+func (pw *pkgWriter) checkPragmas(p syntax.Pragma, allowed ir.PragmaFlag, embedOK bool) {
+ if p == nil {
+ return
+ }
+ pragma := p.(*pragmas)
+
+ for _, pos := range pragma.Pos {
+ if pos.Flag&^allowed != 0 {
+ pw.errorf(pos.Pos, "misplaced compiler directive")
+ }
+ }
+
+ if !embedOK {
+ for _, e := range pragma.Embeds {
+ pw.errorf(e.Pos, "misplaced go:embed directive")
+ }
+ }
+}
+
+func (w *writer) pkgInit(noders []*noder) {
+ w.Len(len(w.p.cgoPragmas))
+ for _, cgoPragma := range w.p.cgoPragmas {
+ w.Strings(cgoPragma)
+ }
+
+ w.pkgInitOrder()
+
+ w.Sync(pkgbits.SyncDecls)
+ for _, p := range noders {
+ for _, decl := range p.file.DeclList {
+ w.pkgDecl(decl)
+ }
+ }
+ w.Code(declEnd)
+
+ w.Sync(pkgbits.SyncEOF)
+}
+
+func (w *writer) pkgInitOrder() {
+ // TODO(mdempsky): Write as a function body instead?
+ w.Len(len(w.p.info.InitOrder))
+ for _, init := range w.p.info.InitOrder {
+ w.Len(len(init.Lhs))
+ for _, v := range init.Lhs {
+ w.obj(v, nil)
+ }
+ w.expr(init.Rhs)
+ }
+}
+
+func (w *writer) pkgDecl(decl syntax.Decl) {
+ switch decl := decl.(type) {
+ default:
+ w.p.unexpected("declaration", decl)
+
+ case *syntax.ImportDecl:
+
+ case *syntax.ConstDecl:
+ w.Code(declOther)
+ w.pkgObjs(decl.NameList...)
+
+ case *syntax.FuncDecl:
+ if decl.Name.Value == "_" {
+ break // skip blank functions
+ }
+
+ obj := w.p.info.Defs[decl.Name].(*types2.Func)
+ sig := obj.Type().(*types2.Signature)
+
+ if sig.RecvTypeParams() != nil || sig.TypeParams() != nil {
+ break // skip generic functions
+ }
+
+ if recv := sig.Recv(); recv != nil {
+ w.Code(declMethod)
+ w.typ(recvBase(recv))
+ w.selector(obj)
+ break
+ }
+
+ w.Code(declFunc)
+ w.pkgObjs(decl.Name)
+
+ case *syntax.TypeDecl:
+ if len(decl.TParamList) != 0 {
+ break // skip generic type decls
+ }
+
+ if decl.Name.Value == "_" {
+ break // skip blank type decls
+ }
+
+ name := w.p.info.Defs[decl.Name].(*types2.TypeName)
+ // Skip type declarations for interfaces that are only usable as
+ // type parameter bounds.
+ if iface, ok := name.Type().Underlying().(*types2.Interface); ok && !iface.IsMethodSet() {
+ break
+ }
+
+ w.Code(declOther)
+ w.pkgObjs(decl.Name)
+
+ case *syntax.VarDecl:
+ w.Code(declVar)
+ w.pkgObjs(decl.NameList...)
+
+ var embeds []pragmaEmbed
+ if p, ok := decl.Pragma.(*pragmas); ok {
+ embeds = p.Embeds
+ }
+ w.Len(len(embeds))
+ for _, embed := range embeds {
+ w.pos(embed.Pos)
+ w.Strings(embed.Patterns)
+ }
+ }
+}
+
+func (w *writer) pkgObjs(names ...*syntax.Name) {
+ w.Sync(pkgbits.SyncDeclNames)
+ w.Len(len(names))
+
+ for _, name := range names {
+ obj, ok := w.p.info.Defs[name]
+ assert(ok)
+
+ w.Sync(pkgbits.SyncDeclName)
+ w.obj(obj, nil)
+ }
+}
+
+// @@@ Helpers
+
+// staticBool analyzes a boolean expression and reports whether it's
+// always true (positive result), always false (negative result), or
+// unknown (zero).
+//
+// It also simplifies the expression while preserving semantics, if
+// possible.
+func (pw *pkgWriter) staticBool(ep *syntax.Expr) int {
+ if val := pw.typeAndValue(*ep).Value; val != nil {
+ if constant.BoolVal(val) {
+ return +1
+ } else {
+ return -1
+ }
+ }
+
+ if e, ok := (*ep).(*syntax.Operation); ok {
+ switch e.Op {
+ case syntax.Not:
+ return pw.staticBool(&e.X)
+
+ case syntax.AndAnd:
+ x := pw.staticBool(&e.X)
+ if x < 0 {
+ *ep = e.X
+ return x
+ }
+
+ y := pw.staticBool(&e.Y)
+ if x > 0 || y < 0 {
+ if pw.typeAndValue(e.X).Value != nil {
+ *ep = e.Y
+ }
+ return y
+ }
+
+ case syntax.OrOr:
+ x := pw.staticBool(&e.X)
+ if x > 0 {
+ *ep = e.X
+ return x
+ }
+
+ y := pw.staticBool(&e.Y)
+ if x < 0 || y > 0 {
+ if pw.typeAndValue(e.X).Value != nil {
+ *ep = e.Y
+ }
+ return y
+ }
+ }
+ }
+
+ return 0
+}
+
+// hasImplicitTypeParams reports whether obj is a defined type with
+// implicit type parameters (e.g., declared within a generic function
+// or method).
+func (pw *pkgWriter) hasImplicitTypeParams(obj *types2.TypeName) bool {
+ if obj.Pkg() == pw.curpkg {
+ decl, ok := pw.typDecls[obj]
+ assert(ok)
+ if len(decl.implicits) != 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// isDefinedType reports whether obj is a defined type.
+func isDefinedType(obj types2.Object) bool {
+ if obj, ok := obj.(*types2.TypeName); ok {
+ return !obj.IsAlias()
+ }
+ return false
+}
+
+// isGlobal reports whether obj was declared at package scope.
+//
+// Caveat: blank objects are not declared.
+func isGlobal(obj types2.Object) bool {
+ return obj.Parent() == obj.Pkg().Scope()
+}
+
+// lookupObj returns the object that expr refers to, if any. If expr
+// is an explicit instantiation of a generic object, then the instance
+// object is returned as well.
+func lookupObj(p *pkgWriter, expr syntax.Expr) (obj types2.Object, inst types2.Instance) {
+ if index, ok := expr.(*syntax.IndexExpr); ok {
+ args := syntax.UnpackListExpr(index.Index)
+ if len(args) == 1 {
+ tv := p.typeAndValue(args[0])
+ if tv.IsValue() {
+ return // normal index expression
+ }
+ }
+
+ expr = index.X
+ }
+
+ // Strip package qualifier, if present.
+ if sel, ok := expr.(*syntax.SelectorExpr); ok {
+ if !isPkgQual(p.info, sel) {
+ return // normal selector expression
+ }
+ expr = sel.Sel
+ }
+
+ if name, ok := expr.(*syntax.Name); ok {
+ obj = p.info.Uses[name]
+ inst = p.info.Instances[name]
+ }
+ return
+}
+
+// isPkgQual reports whether the given selector expression is a
+// package-qualified identifier.
+func isPkgQual(info *types2.Info, sel *syntax.SelectorExpr) bool {
+ if name, ok := sel.X.(*syntax.Name); ok {
+ _, isPkgName := info.Uses[name].(*types2.PkgName)
+ return isPkgName
+ }
+ return false
+}
+
+// isNil reports whether expr is a (possibly parenthesized) reference
+// to the predeclared nil value.
+func isNil(p *pkgWriter, expr syntax.Expr) bool {
+ tv := p.typeAndValue(expr)
+ return tv.IsNil()
+}
+
+// isBuiltin reports whether expr is a (possibly parenthesized)
+// referenced to the specified built-in function.
+func (pw *pkgWriter) isBuiltin(expr syntax.Expr, builtin string) bool {
+ if name, ok := syntax.Unparen(expr).(*syntax.Name); ok && name.Value == builtin {
+ return pw.typeAndValue(name).IsBuiltin()
+ }
+ return false
+}
+
+// recvBase returns the base type for the given receiver parameter.
+func recvBase(recv *types2.Var) *types2.Named {
+ typ := types2.Unalias(recv.Type())
+ if ptr, ok := typ.(*types2.Pointer); ok {
+ typ = ptr.Elem()
+ }
+ return typ.(*types2.Named)
+}
+
+// namesAsExpr returns a list of names as a syntax.Expr.
+func namesAsExpr(names []*syntax.Name) syntax.Expr {
+ if len(names) == 1 {
+ return names[0]
+ }
+
+ exprs := make([]syntax.Expr, len(names))
+ for i, name := range names {
+ exprs[i] = name
+ }
+ return &syntax.ListExpr{ElemList: exprs}
+}
+
+// fieldIndex returns the index of the struct field named by key.
+func fieldIndex(info *types2.Info, str *types2.Struct, key *syntax.Name) int {
+ field := info.Uses[key].(*types2.Var)
+
+ for i := 0; i < str.NumFields(); i++ {
+ if str.Field(i) == field {
+ return i
+ }
+ }
+
+ panic(fmt.Sprintf("%s: %v is not a field of %v", key.Pos(), field, str))
+}
+
+// objTypeParams returns the type parameters on the given object.
+func objTypeParams(obj types2.Object) *types2.TypeParamList {
+ switch obj := obj.(type) {
+ case *types2.Func:
+ sig := obj.Type().(*types2.Signature)
+ if sig.Recv() != nil {
+ return sig.RecvTypeParams()
+ }
+ return sig.TypeParams()
+ case *types2.TypeName:
+ if !obj.IsAlias() {
+ return obj.Type().(*types2.Named).TypeParams()
+ }
+ }
+ return nil
+}
+
+// splitNamed decomposes a use of a defined type into its original
+// type definition and the type arguments used to instantiate it.
+func splitNamed(typ *types2.Named) (*types2.TypeName, *types2.TypeList) {
+ base.Assertf(typ.TypeParams().Len() == typ.TypeArgs().Len(), "use of uninstantiated type: %v", typ)
+
+ orig := typ.Origin()
+ base.Assertf(orig.TypeArgs() == nil, "origin %v of %v has type arguments", orig, typ)
+ base.Assertf(typ.Obj() == orig.Obj(), "%v has object %v, but %v has object %v", typ, typ.Obj(), orig, orig.Obj())
+
+ return typ.Obj(), typ.TypeArgs()
+}
+
+func asPragmaFlag(p syntax.Pragma) ir.PragmaFlag {
+ if p == nil {
+ return 0
+ }
+ return p.(*pragmas).Flag
+}
+
+func asWasmImport(p syntax.Pragma) *WasmImport {
+ if p == nil {
+ return nil
+ }
+ return p.(*pragmas).WasmImport
+}
+
+// isPtrTo reports whether from is the type *to.
+func isPtrTo(from, to types2.Type) bool {
+ ptr, ok := types2.Unalias(from).(*types2.Pointer)
+ return ok && types2.Identical(ptr.Elem(), to)
+}
+
+// hasFallthrough reports whether stmts ends in a fallthrough
+// statement.
+func hasFallthrough(stmts []syntax.Stmt) bool {
+ last, ok := lastNonEmptyStmt(stmts).(*syntax.BranchStmt)
+ return ok && last.Tok == syntax.Fallthrough
+}
+
+// lastNonEmptyStmt returns the last non-empty statement in list, if
+// any.
+func lastNonEmptyStmt(stmts []syntax.Stmt) syntax.Stmt {
+ for i := len(stmts) - 1; i >= 0; i-- {
+ stmt := stmts[i]
+ if _, ok := stmt.(*syntax.EmptyStmt); !ok {
+ return stmt
+ }
+ }
+ return nil
+}
+
+// terminates reports whether stmt terminates normal control flow
+// (i.e., does not merely advance to the following statement).
+func (pw *pkgWriter) terminates(stmt syntax.Stmt) bool {
+ switch stmt := stmt.(type) {
+ case *syntax.BranchStmt:
+ if stmt.Tok == syntax.Goto {
+ return true
+ }
+ case *syntax.ReturnStmt:
+ return true
+ case *syntax.ExprStmt:
+ if call, ok := syntax.Unparen(stmt.X).(*syntax.CallExpr); ok {
+ if pw.isBuiltin(call.Fun, "panic") {
+ return true
+ }
+ }
+
+ // The handling of BlockStmt here is approximate, but it serves to
+ // allow dead-code elimination for:
+ //
+ // if true {
+ // return x
+ // }
+ // unreachable
+ case *syntax.IfStmt:
+ cond := pw.staticBool(&stmt.Cond)
+ return (cond < 0 || pw.terminates(stmt.Then)) && (cond > 0 || pw.terminates(stmt.Else))
+ case *syntax.BlockStmt:
+ return pw.terminates(lastNonEmptyStmt(stmt.List))
+ }
+
+ return false
+}
diff --git a/src/cmd/compile/internal/objw/objw.go b/src/cmd/compile/internal/objw/objw.go
new file mode 100644
index 0000000..7774467
--- /dev/null
+++ b/src/cmd/compile/internal/objw/objw.go
@@ -0,0 +1,102 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package objw
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "encoding/binary"
+)
+
+// Uint8 writes an unsigned byte v into s at offset off,
+// and returns the next unused offset (i.e., off+1).
+func Uint8(s *obj.LSym, off int, v uint8) int {
+ return UintN(s, off, uint64(v), 1)
+}
+
+func Uint16(s *obj.LSym, off int, v uint16) int {
+ return UintN(s, off, uint64(v), 2)
+}
+
+func Uint32(s *obj.LSym, off int, v uint32) int {
+ return UintN(s, off, uint64(v), 4)
+}
+
+func Uintptr(s *obj.LSym, off int, v uint64) int {
+ return UintN(s, off, v, types.PtrSize)
+}
+
+// Uvarint writes a varint v into s at offset off,
+// and returns the next unused offset.
+func Uvarint(s *obj.LSym, off int, v uint64) int {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], v)
+ return int(s.WriteBytes(base.Ctxt, int64(off), buf[:n]))
+}
+
+func Bool(s *obj.LSym, off int, v bool) int {
+ w := 0
+ if v {
+ w = 1
+ }
+ return UintN(s, off, uint64(w), 1)
+}
+
+// UintN writes an unsigned integer v of size wid bytes into s at offset off,
+// and returns the next unused offset.
+func UintN(s *obj.LSym, off int, v uint64, wid int) int {
+ if off&(wid-1) != 0 {
+ base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
+ }
+ s.WriteInt(base.Ctxt, int64(off), wid, int64(v))
+ return off + wid
+}
+
+func SymPtr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
+ off = int(types.RoundUp(int64(off), int64(types.PtrSize)))
+ s.WriteAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff))
+ off += types.PtrSize
+ return off
+}
+
+func SymPtrWeak(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
+ off = int(types.RoundUp(int64(off), int64(types.PtrSize)))
+ s.WriteWeakAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff))
+ off += types.PtrSize
+ return off
+}
+
+func SymPtrOff(s *obj.LSym, off int, x *obj.LSym) int {
+ s.WriteOff(base.Ctxt, int64(off), x, 0)
+ off += 4
+ return off
+}
+
+func SymPtrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
+ s.WriteWeakOff(base.Ctxt, int64(off), x, 0)
+ off += 4
+ return off
+}
+
+func Global(s *obj.LSym, width int32, flags int16) {
+ if flags&obj.LOCAL != 0 {
+ s.Set(obj.AttrLocal, true)
+ flags &^= obj.LOCAL
+ }
+ base.Ctxt.Globl(s, int64(width), int(flags))
+}
+
+// BitVec writes the contents of bv into s as sequence of bytes
+// in little-endian order, and returns the next unused offset.
+func BitVec(s *obj.LSym, off int, bv bitvec.BitVec) int {
+ // Runtime reads the bitmaps as byte arrays. Oblige.
+ for j := 0; int32(j) < bv.N; j += 8 {
+ word := bv.B[j/32]
+ off = Uint8(s, off, uint8(word>>(uint(j)%32)))
+ }
+ return off
+}
diff --git a/src/cmd/compile/internal/objw/prog.go b/src/cmd/compile/internal/objw/prog.go
new file mode 100644
index 0000000..84fb996
--- /dev/null
+++ b/src/cmd/compile/internal/objw/prog.go
@@ -0,0 +1,214 @@
+// Derived from Inferno utils/6c/txt.c
+// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package objw
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "internal/abi"
+)
+
+var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
+
+// NewProgs returns a new Progs for fn.
+// worker indicates which of the backend workers will use the Progs.
+func NewProgs(fn *ir.Func, worker int) *Progs {
+ pp := new(Progs)
+ if base.Ctxt.CanReuseProgs() {
+ sz := len(sharedProgArray) / base.Flag.LowerC
+ pp.Cache = sharedProgArray[sz*worker : sz*(worker+1)]
+ }
+ pp.CurFunc = fn
+
+ // prime the pump
+ pp.Next = pp.NewProg()
+ pp.Clear(pp.Next)
+
+ pp.Pos = fn.Pos()
+ pp.SetText(fn)
+ // PCDATA tables implicitly start with index -1.
+ pp.PrevLive = -1
+ pp.NextLive = pp.PrevLive
+ pp.NextUnsafe = pp.PrevUnsafe
+ return pp
+}
+
+// Progs accumulates Progs for a function and converts them into machine code.
+type Progs struct {
+ Text *obj.Prog // ATEXT Prog for this function
+ Next *obj.Prog // next Prog
+ PC int64 // virtual PC; count of Progs
+ Pos src.XPos // position to use for new Progs
+ CurFunc *ir.Func // fn these Progs are for
+ Cache []obj.Prog // local progcache
+ CacheIndex int // first free element of progcache
+
+ NextLive StackMapIndex // liveness index for the next Prog
+ PrevLive StackMapIndex // last emitted liveness index
+
+ NextUnsafe bool // unsafe mark for the next Prog
+ PrevUnsafe bool // last emitted unsafe mark
+}
+
+type StackMapIndex int
+
+// StackMapDontCare indicates that the stack map index at a Value
+// doesn't matter.
+//
+// This is a sentinel value that should never be emitted to the PCDATA
+// stream. We use -1000 because that's obviously never a valid stack
+// index (but -1 is).
+const StackMapDontCare StackMapIndex = -1000
+
+func (s StackMapIndex) StackMapValid() bool {
+ return s != StackMapDontCare
+}
+
+func (pp *Progs) NewProg() *obj.Prog {
+ var p *obj.Prog
+ if pp.CacheIndex < len(pp.Cache) {
+ p = &pp.Cache[pp.CacheIndex]
+ pp.CacheIndex++
+ } else {
+ p = new(obj.Prog)
+ }
+ p.Ctxt = base.Ctxt
+ return p
+}
+
+// Flush converts from pp to machine code.
+func (pp *Progs) Flush() {
+ plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.CurFunc}
+ obj.Flushplist(base.Ctxt, plist, pp.NewProg)
+}
+
+// Free clears pp and any associated resources.
+func (pp *Progs) Free() {
+ if base.Ctxt.CanReuseProgs() {
+ // Clear progs to enable GC and avoid abuse.
+ s := pp.Cache[:pp.CacheIndex]
+ for i := range s {
+ s[i] = obj.Prog{}
+ }
+ }
+ // Clear pp to avoid abuse.
+ *pp = Progs{}
+}
+
+// Prog adds a Prog with instruction As to pp.
+func (pp *Progs) Prog(as obj.As) *obj.Prog {
+ if pp.NextLive != StackMapDontCare && pp.NextLive != pp.PrevLive {
+ // Emit stack map index change.
+ idx := pp.NextLive
+ pp.PrevLive = idx
+ p := pp.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_StackMapIndex)
+ p.To.SetConst(int64(idx))
+ }
+ if pp.NextUnsafe != pp.PrevUnsafe {
+ // Emit unsafe-point marker.
+ pp.PrevUnsafe = pp.NextUnsafe
+ p := pp.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_UnsafePoint)
+ if pp.NextUnsafe {
+ p.To.SetConst(abi.UnsafePointUnsafe)
+ } else {
+ p.To.SetConst(abi.UnsafePointSafe)
+ }
+ }
+
+ p := pp.Next
+ pp.Next = pp.NewProg()
+ pp.Clear(pp.Next)
+ p.Link = pp.Next
+
+ if !pp.Pos.IsKnown() && base.Flag.K != 0 {
+ base.Warn("prog: unknown position (line 0)")
+ }
+
+ p.As = as
+ p.Pos = pp.Pos
+ if pp.Pos.IsStmt() == src.PosIsStmt {
+ // Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt
+ if LosesStmtMark(as) {
+ return p
+ }
+ pp.Pos = pp.Pos.WithNotStmt()
+ }
+ return p
+}
+
+func (pp *Progs) Clear(p *obj.Prog) {
+ obj.Nopout(p)
+ p.As = obj.AEND
+ p.Pc = pp.PC
+ pp.PC++
+}
+
+func (pp *Progs) Append(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
+ q := pp.NewProg()
+ pp.Clear(q)
+ q.As = as
+ q.Pos = p.Pos
+ q.From.Type = ftype
+ q.From.Reg = freg
+ q.From.Offset = foffset
+ q.To.Type = ttype
+ q.To.Reg = treg
+ q.To.Offset = toffset
+ q.Link = p.Link
+ p.Link = q
+ return q
+}
+
+func (pp *Progs) SetText(fn *ir.Func) {
+ if pp.Text != nil {
+ base.Fatalf("Progs.SetText called twice")
+ }
+ ptxt := pp.Prog(obj.ATEXT)
+ pp.Text = ptxt
+
+ fn.LSym.Func().Text = ptxt
+ ptxt.From.Type = obj.TYPE_MEM
+ ptxt.From.Name = obj.NAME_EXTERN
+ ptxt.From.Sym = fn.LSym
+}
+
+// LosesStmtMark reports whether a prog with op as loses its statement mark on the way to DWARF.
+// The attributes from some opcodes are lost in translation.
+// TODO: this is an artifact of how funcpctab combines information for instructions at a single PC.
+// Should try to fix it there.
+func LosesStmtMark(as obj.As) bool {
+ // is_stmt does not work for these; it DOES for ANOP even though that generates no code.
+ return as == obj.APCDATA || as == obj.AFUNCDATA
+}
diff --git a/src/cmd/compile/internal/pgo/internal/graph/graph.go b/src/cmd/compile/internal/pgo/internal/graph/graph.go
new file mode 100644
index 0000000..4d89b1b
--- /dev/null
+++ b/src/cmd/compile/internal/pgo/internal/graph/graph.go
@@ -0,0 +1,520 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package graph represents a pprof profile as a directed graph.
+//
+// This package is a simplified fork of github.com/google/pprof/internal/graph.
+package graph
+
+import (
+ "fmt"
+ "internal/profile"
+ "sort"
+ "strings"
+)
+
+// Options encodes the options for constructing a graph
+type Options struct {
+ SampleValue func(s []int64) int64 // Function to compute the value of a sample
+ SampleMeanDivisor func(s []int64) int64 // Function to compute the divisor for mean graphs, or nil
+
+ DropNegative bool // Drop nodes with overall negative values
+
+ KeptNodes NodeSet // If non-nil, only use nodes in this set
+}
+
+// Nodes is an ordered collection of graph nodes.
+type Nodes []*Node
+
+// Node is an entry on a profiling report. It represents a unique
+// program location.
+type Node struct {
+ // Info describes the source location associated to this node.
+ Info NodeInfo
+
+ // Function represents the function that this node belongs to. On
+ // graphs with sub-function resolution (eg line number or
+ // addresses), two nodes in a NodeMap that are part of the same
+ // function have the same value of Node.Function. If the Node
+ // represents the whole function, it points back to itself.
+ Function *Node
+
+ // Values associated to this node. Flat is exclusive to this node,
+ // Cum includes all descendents.
+ Flat, FlatDiv, Cum, CumDiv int64
+
+ // In and out Contains the nodes immediately reaching or reached by
+ // this node.
+ In, Out EdgeMap
+}
+
+// Graph summarizes a performance profile into a format that is
+// suitable for visualization.
+type Graph struct {
+ Nodes Nodes
+}
+
+// FlatValue returns the exclusive value for this node, computing the
+// mean if a divisor is available.
+func (n *Node) FlatValue() int64 {
+ if n.FlatDiv == 0 {
+ return n.Flat
+ }
+ return n.Flat / n.FlatDiv
+}
+
+// CumValue returns the inclusive value for this node, computing the
+// mean if a divisor is available.
+func (n *Node) CumValue() int64 {
+ if n.CumDiv == 0 {
+ return n.Cum
+ }
+ return n.Cum / n.CumDiv
+}
+
+// AddToEdge increases the weight of an edge between two nodes. If
+// there isn't such an edge one is created.
+func (n *Node) AddToEdge(to *Node, v int64, residual, inline bool) {
+ n.AddToEdgeDiv(to, 0, v, residual, inline)
+}
+
+// AddToEdgeDiv increases the weight of an edge between two nodes. If
+// there isn't such an edge one is created.
+func (n *Node) AddToEdgeDiv(to *Node, dv, v int64, residual, inline bool) {
+ if e := n.Out.FindTo(to); e != nil {
+ e.WeightDiv += dv
+ e.Weight += v
+ if residual {
+ e.Residual = true
+ }
+ if !inline {
+ e.Inline = false
+ }
+ return
+ }
+
+ info := &Edge{Src: n, Dest: to, WeightDiv: dv, Weight: v, Residual: residual, Inline: inline}
+ n.Out.Add(info)
+ to.In.Add(info)
+}
+
+// NodeInfo contains the attributes for a node.
+type NodeInfo struct {
+ Name string
+ Address uint64
+ StartLine, Lineno int
+}
+
+// PrintableName calls the Node's Formatter function with a single space separator.
+func (i *NodeInfo) PrintableName() string {
+ return strings.Join(i.NameComponents(), " ")
+}
+
+// NameComponents returns the components of the printable name to be used for a node.
+func (i *NodeInfo) NameComponents() []string {
+ var name []string
+ if i.Address != 0 {
+ name = append(name, fmt.Sprintf("%016x", i.Address))
+ }
+ if fun := i.Name; fun != "" {
+ name = append(name, fun)
+ }
+
+ switch {
+ case i.Lineno != 0:
+ // User requested line numbers, provide what we have.
+ name = append(name, fmt.Sprintf(":%d", i.Lineno))
+ case i.Name != "":
+ // User requested function name. It was already included.
+ default:
+ // Do not leave it empty if there is no information at all.
+ name = append(name, "<unknown>")
+ }
+ return name
+}
+
+// NodeMap maps from a node info struct to a node. It is used to merge
+// report entries with the same info.
+type NodeMap map[NodeInfo]*Node
+
+// NodeSet is a collection of node info structs.
+type NodeSet map[NodeInfo]bool
+
+// NodePtrSet is a collection of nodes. Trimming a graph or tree requires a set
+// of objects which uniquely identify the nodes to keep. In a graph, NodeInfo
+// works as a unique identifier; however, in a tree multiple nodes may share
+// identical NodeInfos. A *Node does uniquely identify a node so we can use that
+// instead. Though a *Node also uniquely identifies a node in a graph,
+// currently, during trimming, graphs are rebuilt from scratch using only the
+// NodeSet, so there would not be the required context of the initial graph to
+// allow for the use of *Node.
+type NodePtrSet map[*Node]bool
+
+// FindOrInsertNode takes the info for a node and either returns a matching node
+// from the node map if one exists, or adds one to the map if one does not.
+// If kept is non-nil, nodes are only added if they can be located on it.
+func (nm NodeMap) FindOrInsertNode(info NodeInfo, kept NodeSet) *Node {
+ if kept != nil {
+ if _, ok := kept[info]; !ok {
+ return nil
+ }
+ }
+
+ if n, ok := nm[info]; ok {
+ return n
+ }
+
+ n := &Node{
+ Info: info,
+ }
+ nm[info] = n
+ if info.Address == 0 && info.Lineno == 0 {
+ // This node represents the whole function, so point Function
+ // back to itself.
+ n.Function = n
+ return n
+ }
+ // Find a node that represents the whole function.
+ info.Address = 0
+ info.Lineno = 0
+ n.Function = nm.FindOrInsertNode(info, nil)
+ return n
+}
+
+// EdgeMap is used to represent the incoming/outgoing edges from a node.
+type EdgeMap []*Edge
+
+func (em EdgeMap) FindTo(n *Node) *Edge {
+ for _, e := range em {
+ if e.Dest == n {
+ return e
+ }
+ }
+ return nil
+}
+
+func (em *EdgeMap) Add(e *Edge) {
+ *em = append(*em, e)
+}
+
+func (em *EdgeMap) Delete(e *Edge) {
+ for i, edge := range *em {
+ if edge == e {
+ (*em)[i] = (*em)[len(*em)-1]
+ *em = (*em)[:len(*em)-1]
+ return
+ }
+ }
+}
+
+// Edge contains any attributes to be represented about edges in a graph.
+type Edge struct {
+ Src, Dest *Node
+ // The summary weight of the edge
+ Weight, WeightDiv int64
+
+ // residual edges connect nodes that were connected through a
+ // separate node, which has been removed from the report.
+ Residual bool
+ // An inline edge represents a call that was inlined into the caller.
+ Inline bool
+}
+
+// WeightValue returns the weight value for this edge, normalizing if a
+// divisor is available.
+func (e *Edge) WeightValue() int64 {
+ if e.WeightDiv == 0 {
+ return e.Weight
+ }
+ return e.Weight / e.WeightDiv
+}
+
+// NewGraph computes a graph from a profile.
+func NewGraph(prof *profile.Profile, o *Options) *Graph {
+ nodes, locationMap := CreateNodes(prof, o)
+ seenNode := make(map[*Node]bool)
+ seenEdge := make(map[nodePair]bool)
+ for _, sample := range prof.Sample {
+ var w, dw int64
+ w = o.SampleValue(sample.Value)
+ if o.SampleMeanDivisor != nil {
+ dw = o.SampleMeanDivisor(sample.Value)
+ }
+ if dw == 0 && w == 0 {
+ continue
+ }
+ for k := range seenNode {
+ delete(seenNode, k)
+ }
+ for k := range seenEdge {
+ delete(seenEdge, k)
+ }
+ var parent *Node
+ // A residual edge goes over one or more nodes that were not kept.
+ residual := false
+
+ // Group the sample frames, based on a global map.
+ // Count only the last two frames as a call edge. Frames higher up
+ // the stack are unlikely to be repeated calls (e.g. runtime.main
+ // calling main.main). So adding weights to call edges higher up
+ // the stack may be not reflecting the actual call edge weights
+ // in the program. Without a branch profile this is just an
+ // approximation.
+ i := 1
+ if last := len(sample.Location) - 1; last < i {
+ i = last
+ }
+ for ; i >= 0; i-- {
+ l := sample.Location[i]
+ locNodes := locationMap.get(l.ID)
+ for ni := len(locNodes) - 1; ni >= 0; ni-- {
+ n := locNodes[ni]
+ if n == nil {
+ residual = true
+ continue
+ }
+ // Add cum weight to all nodes in stack, avoiding double counting.
+ _, sawNode := seenNode[n]
+ if !sawNode {
+ seenNode[n] = true
+ n.addSample(dw, w, false)
+ }
+ // Update edge weights for all edges in stack, avoiding double counting.
+ if (!sawNode || !seenEdge[nodePair{n, parent}]) && parent != nil && n != parent {
+ seenEdge[nodePair{n, parent}] = true
+ parent.AddToEdgeDiv(n, dw, w, residual, ni != len(locNodes)-1)
+ }
+
+ parent = n
+ residual = false
+ }
+ }
+ if parent != nil && !residual {
+ // Add flat weight to leaf node.
+ parent.addSample(dw, w, true)
+ }
+ }
+
+ return selectNodesForGraph(nodes, o.DropNegative)
+}
+
+func selectNodesForGraph(nodes Nodes, dropNegative bool) *Graph {
+ // Collect nodes into a graph.
+ gNodes := make(Nodes, 0, len(nodes))
+ for _, n := range nodes {
+ if n == nil {
+ continue
+ }
+ if n.Cum == 0 && n.Flat == 0 {
+ continue
+ }
+ if dropNegative && isNegative(n) {
+ continue
+ }
+ gNodes = append(gNodes, n)
+ }
+ return &Graph{gNodes}
+}
+
+type nodePair struct {
+ src, dest *Node
+}
+
+// isNegative returns true if the node is considered as "negative" for the
+// purposes of drop_negative.
+func isNegative(n *Node) bool {
+ switch {
+ case n.Flat < 0:
+ return true
+ case n.Flat == 0 && n.Cum < 0:
+ return true
+ default:
+ return false
+ }
+}
+
+type locationMap struct {
+ s []Nodes // a slice for small sequential IDs
+ m map[uint64]Nodes // fallback for large IDs (unlikely)
+}
+
+func (l *locationMap) add(id uint64, n Nodes) {
+ if id < uint64(len(l.s)) {
+ l.s[id] = n
+ } else {
+ l.m[id] = n
+ }
+}
+
+func (l locationMap) get(id uint64) Nodes {
+ if id < uint64(len(l.s)) {
+ return l.s[id]
+ } else {
+ return l.m[id]
+ }
+}
+
+// CreateNodes creates graph nodes for all locations in a profile. It
+// returns set of all nodes, plus a mapping of each location to the
+// set of corresponding nodes (one per location.Line).
+func CreateNodes(prof *profile.Profile, o *Options) (Nodes, locationMap) {
+ locations := locationMap{make([]Nodes, len(prof.Location)+1), make(map[uint64]Nodes)}
+ nm := make(NodeMap, len(prof.Location))
+ for _, l := range prof.Location {
+ lines := l.Line
+ if len(lines) == 0 {
+ lines = []profile.Line{{}} // Create empty line to include location info.
+ }
+ nodes := make(Nodes, len(lines))
+ for ln := range lines {
+ nodes[ln] = nm.findOrInsertLine(l, lines[ln], o)
+ }
+ locations.add(l.ID, nodes)
+ }
+ return nm.nodes(), locations
+}
+
+func (nm NodeMap) nodes() Nodes {
+ nodes := make(Nodes, 0, len(nm))
+ for _, n := range nm {
+ nodes = append(nodes, n)
+ }
+ return nodes
+}
+
+func (nm NodeMap) findOrInsertLine(l *profile.Location, li profile.Line, o *Options) *Node {
+ var objfile string
+ if m := l.Mapping; m != nil && m.File != "" {
+ objfile = m.File
+ }
+
+ if ni := nodeInfo(l, li, objfile, o); ni != nil {
+ return nm.FindOrInsertNode(*ni, o.KeptNodes)
+ }
+ return nil
+}
+
+func nodeInfo(l *profile.Location, line profile.Line, objfile string, o *Options) *NodeInfo {
+ if line.Function == nil {
+ return &NodeInfo{Address: l.Address}
+ }
+ ni := &NodeInfo{
+ Address: l.Address,
+ Lineno: int(line.Line),
+ Name: line.Function.Name,
+ }
+ ni.StartLine = int(line.Function.StartLine)
+ return ni
+}
+
+// Sum adds the flat and cum values of a set of nodes.
+func (ns Nodes) Sum() (flat int64, cum int64) {
+ for _, n := range ns {
+ flat += n.Flat
+ cum += n.Cum
+ }
+ return
+}
+
+func (n *Node) addSample(dw, w int64, flat bool) {
+ // Update sample value
+ if flat {
+ n.FlatDiv += dw
+ n.Flat += w
+ } else {
+ n.CumDiv += dw
+ n.Cum += w
+ }
+}
+
+// String returns a text representation of a graph, for debugging purposes.
+func (g *Graph) String() string {
+ var s []string
+
+ nodeIndex := make(map[*Node]int, len(g.Nodes))
+
+ for i, n := range g.Nodes {
+ nodeIndex[n] = i + 1
+ }
+
+ for i, n := range g.Nodes {
+ name := n.Info.PrintableName()
+ var in, out []int
+
+ for _, from := range n.In {
+ in = append(in, nodeIndex[from.Src])
+ }
+ for _, to := range n.Out {
+ out = append(out, nodeIndex[to.Dest])
+ }
+ s = append(s, fmt.Sprintf("%d: %s[flat=%d cum=%d] %x -> %v ", i+1, name, n.Flat, n.Cum, in, out))
+ }
+ return strings.Join(s, "\n")
+}
+
+// Sort returns a slice of the edges in the map, in a consistent
+// order. The sort order is first based on the edge weight
+// (higher-to-lower) and then by the node names to avoid flakiness.
+func (em EdgeMap) Sort() []*Edge {
+ el := make(edgeList, 0, len(em))
+ for _, w := range em {
+ el = append(el, w)
+ }
+
+ sort.Sort(el)
+ return el
+}
+
+// Sum returns the total weight for a set of nodes.
+func (em EdgeMap) Sum() int64 {
+ var ret int64
+ for _, edge := range em {
+ ret += edge.Weight
+ }
+ return ret
+}
+
+type edgeList []*Edge
+
+func (el edgeList) Len() int {
+ return len(el)
+}
+
+func (el edgeList) Less(i, j int) bool {
+ if el[i].Weight != el[j].Weight {
+ return abs64(el[i].Weight) > abs64(el[j].Weight)
+ }
+
+ from1 := el[i].Src.Info.PrintableName()
+ from2 := el[j].Src.Info.PrintableName()
+ if from1 != from2 {
+ return from1 < from2
+ }
+
+ to1 := el[i].Dest.Info.PrintableName()
+ to2 := el[j].Dest.Info.PrintableName()
+
+ return to1 < to2
+}
+
+func (el edgeList) Swap(i, j int) {
+ el[i], el[j] = el[j], el[i]
+}
+
+func abs64(i int64) int64 {
+ if i < 0 {
+ return -i
+ }
+ return i
+}
diff --git a/src/cmd/compile/internal/pgo/irgraph.go b/src/cmd/compile/internal/pgo/irgraph.go
new file mode 100644
index 0000000..96485e3
--- /dev/null
+++ b/src/cmd/compile/internal/pgo/irgraph.go
@@ -0,0 +1,603 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// A note on line numbers: when working with line numbers, we always use the
+// binary-visible relative line number. i.e., the line number as adjusted by
+// //line directives (ctxt.InnermostPos(ir.Node.Pos()).RelLine()). Use
+// NodeLineOffset to compute line offsets.
+//
+// If you are thinking, "wait, doesn't that just make things more complex than
+// using the real line number?", then you are 100% correct. Unfortunately,
+// pprof profiles generated by the runtime always contain line numbers as
+// adjusted by //line directives (because that is what we put in pclntab). Thus
+// for the best behavior when attempting to match the source with the profile
+// it makes sense to use the same line number space.
+//
+// Some of the effects of this to keep in mind:
+//
+// - For files without //line directives there is no impact, as RelLine() ==
+// Line().
+// - For functions entirely covered by the same //line directive (i.e., a
+// directive before the function definition and no directives within the
+// function), there should also be no impact, as line offsets within the
+// function should be the same as the real line offsets.
+// - Functions containing //line directives may be impacted. As fake line
+// numbers need not be monotonic, we may compute negative line offsets. We
+// should accept these and attempt to use them for best-effort matching, as
+// these offsets should still match if the source is unchanged, and may
+// continue to match with changed source depending on the impact of the
+// changes on fake line numbers.
+// - Functions containing //line directives may also contain duplicate lines,
+// making it ambiguous which call the profile is referencing. This is a
+// similar problem to multiple calls on a single real line, as we don't
+// currently track column numbers.
+//
+// Long term it would be best to extend pprof profiles to include real line
+// numbers. Until then, we have to live with these complexities. Luckily,
+// //line directives that change line numbers in strange ways should be rare,
+// and failing PGO matching on these files is not too big of a loss.
+
+package pgo
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/pgo/internal/graph"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "errors"
+ "fmt"
+ "internal/profile"
+ "os"
+ "sort"
+)
+
+// IRGraph is a call graph with nodes pointing to IRs of functions and edges
+// carrying weights and callsite information.
+//
+// Nodes for indirect calls may have missing IR (IRNode.AST == nil) if the node
+// is not visible from this package (e.g., not in the transitive deps). Keeping
+// these nodes allows determining the hottest edge from a call even if that
+// callee is not available.
+//
+// TODO(prattmic): Consider merging this data structure with Graph. This is
+// effectively a copy of Graph aggregated to line number and pointing to IR.
+type IRGraph struct {
+ // Nodes of the graph. Each node represents a function, keyed by linker
+ // symbol name.
+ IRNodes map[string]*IRNode
+}
+
+// IRNode represents a node (function) in the IRGraph.
+type IRNode struct {
+ // Pointer to the IR of the Function represented by this node.
+ AST *ir.Func
+ // Linker symbol name of the Function represented by this node.
+ // Populated only if AST == nil.
+ LinkerSymbolName string
+
+ // Set of out-edges in the callgraph. The map uniquely identifies each
+ // edge based on the callsite and callee, for fast lookup.
+ OutEdges map[NamedCallEdge]*IREdge
+}
+
+// Name returns the symbol name of this function.
+func (i *IRNode) Name() string {
+ if i.AST != nil {
+ return ir.LinkFuncName(i.AST)
+ }
+ return i.LinkerSymbolName
+}
+
+// IREdge represents a call edge in the IRGraph with source, destination,
+// weight, callsite, and line number information.
+type IREdge struct {
+ // Source and destination of the edge in IRNode.
+ Src, Dst *IRNode
+ Weight int64
+ CallSiteOffset int // Line offset from function start line.
+}
+
+// NamedCallEdge identifies a call edge by linker symbol names and call site
+// offset.
+type NamedCallEdge struct {
+ CallerName string
+ CalleeName string
+ CallSiteOffset int // Line offset from function start line.
+}
+
+// NamedEdgeMap contains all unique call edges in the profile and their
+// edge weight.
+type NamedEdgeMap struct {
+ Weight map[NamedCallEdge]int64
+
+ // ByWeight lists all keys in Weight, sorted by edge weight.
+ ByWeight []NamedCallEdge
+}
+
+// CallSiteInfo captures call-site information and its caller/callee.
+type CallSiteInfo struct {
+ LineOffset int // Line offset from function start line.
+ Caller *ir.Func
+ Callee *ir.Func
+}
+
+// Profile contains the processed PGO profile and weighted call graph used for
+// PGO optimizations.
+type Profile struct {
+ // Aggregated edge weights across the profile. This helps us determine
+ // the percentage threshold for hot/cold partitioning.
+ TotalWeight int64
+
+ // NamedEdgeMap contains all unique call edges in the profile and their
+ // edge weight.
+ NamedEdgeMap NamedEdgeMap
+
+ // WeightedCG represents the IRGraph built from profile, which we will
+ // update as part of inlining.
+ WeightedCG *IRGraph
+}
+
+// New generates a profile-graph from the profile.
+func New(profileFile string) (*Profile, error) {
+ f, err := os.Open(profileFile)
+ if err != nil {
+ return nil, fmt.Errorf("error opening profile: %w", err)
+ }
+ defer f.Close()
+ p, err := profile.Parse(f)
+ if errors.Is(err, profile.ErrNoData) {
+ // Treat a completely empty file the same as a profile with no
+ // samples: nothing to do.
+ return nil, nil
+ } else if err != nil {
+ return nil, fmt.Errorf("error parsing profile: %w", err)
+ }
+
+ if len(p.Sample) == 0 {
+ // We accept empty profiles, but there is nothing to do.
+ return nil, nil
+ }
+
+ valueIndex := -1
+ for i, s := range p.SampleType {
+ // Samples count is the raw data collected, and CPU nanoseconds is just
+ // a scaled version of it, so either one we can find is fine.
+ if (s.Type == "samples" && s.Unit == "count") ||
+ (s.Type == "cpu" && s.Unit == "nanoseconds") {
+ valueIndex = i
+ break
+ }
+ }
+
+ if valueIndex == -1 {
+ return nil, fmt.Errorf(`profile does not contain a sample index with value/type "samples/count" or cpu/nanoseconds"`)
+ }
+
+ g := graph.NewGraph(p, &graph.Options{
+ SampleValue: func(v []int64) int64 { return v[valueIndex] },
+ })
+
+ namedEdgeMap, totalWeight, err := createNamedEdgeMap(g)
+ if err != nil {
+ return nil, err
+ }
+
+ if totalWeight == 0 {
+ return nil, nil // accept but ignore profile with no samples.
+ }
+
+ // Create package-level call graph with weights from profile and IR.
+ wg := createIRGraph(namedEdgeMap)
+
+ return &Profile{
+ TotalWeight: totalWeight,
+ NamedEdgeMap: namedEdgeMap,
+ WeightedCG: wg,
+ }, nil
+}
+
+// createNamedEdgeMap builds a map of callsite-callee edge weights from the
+// profile-graph.
+//
+// Caller should ignore the profile if totalWeight == 0.
+func createNamedEdgeMap(g *graph.Graph) (edgeMap NamedEdgeMap, totalWeight int64, err error) {
+ seenStartLine := false
+
+ // Process graph and build various node and edge maps which will
+ // be consumed by AST walk.
+ weight := make(map[NamedCallEdge]int64)
+ for _, n := range g.Nodes {
+ seenStartLine = seenStartLine || n.Info.StartLine != 0
+
+ canonicalName := n.Info.Name
+ // Create the key to the nodeMapKey.
+ namedEdge := NamedCallEdge{
+ CallerName: canonicalName,
+ CallSiteOffset: n.Info.Lineno - n.Info.StartLine,
+ }
+
+ for _, e := range n.Out {
+ totalWeight += e.WeightValue()
+ namedEdge.CalleeName = e.Dest.Info.Name
+ // Create new entry or increment existing entry.
+ weight[namedEdge] += e.WeightValue()
+ }
+ }
+
+ if totalWeight == 0 {
+ return NamedEdgeMap{}, 0, nil // accept but ignore profile with no samples.
+ }
+
+ if !seenStartLine {
+ // TODO(prattmic): If Function.start_line is missing we could
+ // fall back to using absolute line numbers, which is better
+ // than nothing.
+ return NamedEdgeMap{}, 0, fmt.Errorf("profile missing Function.start_line data (Go version of profiled application too old? Go 1.20+ automatically adds this to profiles)")
+ }
+
+ byWeight := make([]NamedCallEdge, 0, len(weight))
+ for namedEdge := range weight {
+ byWeight = append(byWeight, namedEdge)
+ }
+ sort.Slice(byWeight, func(i, j int) bool {
+ ei, ej := byWeight[i], byWeight[j]
+ if wi, wj := weight[ei], weight[ej]; wi != wj {
+ return wi > wj // want larger weight first
+ }
+ // same weight, order by name/line number
+ if ei.CallerName != ej.CallerName {
+ return ei.CallerName < ej.CallerName
+ }
+ if ei.CalleeName != ej.CalleeName {
+ return ei.CalleeName < ej.CalleeName
+ }
+ return ei.CallSiteOffset < ej.CallSiteOffset
+ })
+
+ edgeMap = NamedEdgeMap{
+ Weight: weight,
+ ByWeight: byWeight,
+ }
+
+ return edgeMap, totalWeight, nil
+}
+
+// initializeIRGraph builds the IRGraph by visiting all the ir.Func in decl list
+// of a package.
+func createIRGraph(namedEdgeMap NamedEdgeMap) *IRGraph {
+ g := &IRGraph{
+ IRNodes: make(map[string]*IRNode),
+ }
+
+ // Bottomup walk over the function to create IRGraph.
+ ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) {
+ for _, fn := range list {
+ visitIR(fn, namedEdgeMap, g)
+ }
+ })
+
+ // Add additional edges for indirect calls. This must be done second so
+ // that IRNodes is fully populated (see the dummy node TODO in
+ // addIndirectEdges).
+ //
+ // TODO(prattmic): visitIR above populates the graph via direct calls
+ // discovered via the IR. addIndirectEdges populates the graph via
+ // calls discovered via the profile. This combination of opposite
+ // approaches is a bit awkward, particularly because direct calls are
+ // discoverable via the profile as well. Unify these into a single
+ // approach.
+ addIndirectEdges(g, namedEdgeMap)
+
+ return g
+}
+
+// visitIR traverses the body of each ir.Func adds edges to g from ir.Func to
+// any called function in the body.
+func visitIR(fn *ir.Func, namedEdgeMap NamedEdgeMap, g *IRGraph) {
+ name := ir.LinkFuncName(fn)
+ node, ok := g.IRNodes[name]
+ if !ok {
+ node = &IRNode{
+ AST: fn,
+ }
+ g.IRNodes[name] = node
+ }
+
+ // Recursively walk over the body of the function to create IRGraph edges.
+ createIRGraphEdge(fn, node, name, namedEdgeMap, g)
+}
+
+// createIRGraphEdge traverses the nodes in the body of ir.Func and adds edges
+// between the callernode which points to the ir.Func and the nodes in the
+// body.
+func createIRGraphEdge(fn *ir.Func, callernode *IRNode, name string, namedEdgeMap NamedEdgeMap, g *IRGraph) {
+ ir.VisitList(fn.Body, func(n ir.Node) {
+ switch n.Op() {
+ case ir.OCALLFUNC:
+ call := n.(*ir.CallExpr)
+ // Find the callee function from the call site and add the edge.
+ callee := DirectCallee(call.Fun)
+ if callee != nil {
+ addIREdge(callernode, name, n, callee, namedEdgeMap, g)
+ }
+ case ir.OCALLMETH:
+ call := n.(*ir.CallExpr)
+ // Find the callee method from the call site and add the edge.
+ callee := ir.MethodExprName(call.Fun).Func
+ addIREdge(callernode, name, n, callee, namedEdgeMap, g)
+ }
+ })
+}
+
+// NodeLineOffset returns the line offset of n in fn.
+func NodeLineOffset(n ir.Node, fn *ir.Func) int {
+ // See "A note on line numbers" at the top of the file.
+ line := int(base.Ctxt.InnermostPos(n.Pos()).RelLine())
+ startLine := int(base.Ctxt.InnermostPos(fn.Pos()).RelLine())
+ return line - startLine
+}
+
+// addIREdge adds an edge between caller and new node that points to `callee`
+// based on the profile-graph and NodeMap.
+func addIREdge(callerNode *IRNode, callerName string, call ir.Node, callee *ir.Func, namedEdgeMap NamedEdgeMap, g *IRGraph) {
+ calleeName := ir.LinkFuncName(callee)
+ calleeNode, ok := g.IRNodes[calleeName]
+ if !ok {
+ calleeNode = &IRNode{
+ AST: callee,
+ }
+ g.IRNodes[calleeName] = calleeNode
+ }
+
+ namedEdge := NamedCallEdge{
+ CallerName: callerName,
+ CalleeName: calleeName,
+ CallSiteOffset: NodeLineOffset(call, callerNode.AST),
+ }
+
+ // Add edge in the IRGraph from caller to callee.
+ edge := &IREdge{
+ Src: callerNode,
+ Dst: calleeNode,
+ Weight: namedEdgeMap.Weight[namedEdge],
+ CallSiteOffset: namedEdge.CallSiteOffset,
+ }
+
+ if callerNode.OutEdges == nil {
+ callerNode.OutEdges = make(map[NamedCallEdge]*IREdge)
+ }
+ callerNode.OutEdges[namedEdge] = edge
+}
+
+// LookupFunc looks up a function or method in export data. It is expected to
+// be overridden by package noder, to break a dependency cycle.
+var LookupFunc = func(fullName string) (*ir.Func, error) {
+ base.Fatalf("pgo.LookupMethodFunc not overridden")
+ panic("unreachable")
+}
+
+// addIndirectEdges adds indirect call edges found in the profile to the graph,
+// to be used for devirtualization.
+//
+// N.B. despite the name, addIndirectEdges will add any edges discovered via
+// the profile. We don't know for sure that they are indirect, but assume they
+// are since direct calls would already be added. (e.g., direct calls that have
+// been deleted from source since the profile was taken would be added here).
+//
+// TODO(prattmic): Devirtualization runs before inlining, so we can't devirtualize
+// calls inside inlined call bodies. If we did add that, we'd need edges from
+// inlined bodies as well.
+func addIndirectEdges(g *IRGraph, namedEdgeMap NamedEdgeMap) {
+ // g.IRNodes is populated with the set of functions in the local
+ // package build by VisitIR. We want to filter for local functions
+ // below, but we also add unknown callees to IRNodes as we go. So make
+ // an initial copy of IRNodes to recall just the local functions.
+ localNodes := make(map[string]*IRNode, len(g.IRNodes))
+ for k, v := range g.IRNodes {
+ localNodes[k] = v
+ }
+
+ // N.B. We must consider edges in a stable order because export data
+ // lookup order (LookupMethodFunc, below) can impact the export data of
+ // this package, which must be stable across different invocations for
+ // reproducibility.
+ //
+ // The weight ordering of ByWeight is irrelevant, it just happens to be
+ // an ordered list of edges that is already available.
+ for _, key := range namedEdgeMap.ByWeight {
+ weight := namedEdgeMap.Weight[key]
+ // All callers in the local package build were added to IRNodes
+ // in VisitIR. If a caller isn't in the local package build we
+ // can skip adding edges, since we won't be devirtualizing in
+ // them anyway. This keeps the graph smaller.
+ callerNode, ok := localNodes[key.CallerName]
+ if !ok {
+ continue
+ }
+
+ // Already handled this edge?
+ if _, ok := callerNode.OutEdges[key]; ok {
+ continue
+ }
+
+ calleeNode, ok := g.IRNodes[key.CalleeName]
+ if !ok {
+ // IR is missing for this callee. VisitIR populates
+ // IRNodes with all functions discovered via local
+ // package function declarations and calls. This
+ // function may still be available from export data of
+ // a transitive dependency.
+ //
+ // TODO(prattmic): Parameterized types/functions are
+ // not supported.
+ //
+ // TODO(prattmic): This eager lookup during graph load
+ // is simple, but wasteful. We are likely to load many
+ // functions that we never need. We could delay load
+ // until we actually need the method in
+ // devirtualization. Instantiation of generic functions
+ // will likely need to be done at the devirtualization
+ // site, if at all.
+ fn, err := LookupFunc(key.CalleeName)
+ if err == nil {
+ if base.Debug.PGODebug >= 3 {
+ fmt.Printf("addIndirectEdges: %s found in export data\n", key.CalleeName)
+ }
+ calleeNode = &IRNode{AST: fn}
+
+ // N.B. we could call createIRGraphEdge to add
+ // direct calls in this newly-imported
+ // function's body to the graph. Similarly, we
+ // could add to this function's queue to add
+ // indirect calls. However, those would be
+ // useless given the visit order of inlining,
+ // and the ordering of PGO devirtualization and
+ // inlining. This function can only be used as
+ // an inlined body. We will never do PGO
+ // devirtualization inside an inlined call. Nor
+ // will we perform inlining inside an inlined
+ // call.
+ } else {
+ // Still not found. Most likely this is because
+ // the callee isn't in the transitive deps of
+ // this package.
+ //
+ // Record this call anyway. If this is the hottest,
+ // then we want to skip devirtualization rather than
+ // devirtualizing to the second most common callee.
+ if base.Debug.PGODebug >= 3 {
+ fmt.Printf("addIndirectEdges: %s not found in export data: %v\n", key.CalleeName, err)
+ }
+ calleeNode = &IRNode{LinkerSymbolName: key.CalleeName}
+ }
+
+ // Add dummy node back to IRNodes. We don't need this
+ // directly, but PrintWeightedCallGraphDOT uses these
+ // to print nodes.
+ g.IRNodes[key.CalleeName] = calleeNode
+ }
+ edge := &IREdge{
+ Src: callerNode,
+ Dst: calleeNode,
+ Weight: weight,
+ CallSiteOffset: key.CallSiteOffset,
+ }
+
+ if callerNode.OutEdges == nil {
+ callerNode.OutEdges = make(map[NamedCallEdge]*IREdge)
+ }
+ callerNode.OutEdges[key] = edge
+ }
+}
+
+// WeightInPercentage converts profile weights to a percentage.
+func WeightInPercentage(value int64, total int64) float64 {
+ return (float64(value) / float64(total)) * 100
+}
+
+// PrintWeightedCallGraphDOT prints IRGraph in DOT format.
+func (p *Profile) PrintWeightedCallGraphDOT(edgeThreshold float64) {
+ fmt.Printf("\ndigraph G {\n")
+ fmt.Printf("forcelabels=true;\n")
+
+ // List of functions in this package.
+ funcs := make(map[string]struct{})
+ ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) {
+ for _, f := range list {
+ name := ir.LinkFuncName(f)
+ funcs[name] = struct{}{}
+ }
+ })
+
+ // Determine nodes of DOT.
+ //
+ // Note that ir.Func may be nil for functions not visible from this
+ // package.
+ nodes := make(map[string]*ir.Func)
+ for name := range funcs {
+ if n, ok := p.WeightedCG.IRNodes[name]; ok {
+ for _, e := range n.OutEdges {
+ if _, ok := nodes[e.Src.Name()]; !ok {
+ nodes[e.Src.Name()] = e.Src.AST
+ }
+ if _, ok := nodes[e.Dst.Name()]; !ok {
+ nodes[e.Dst.Name()] = e.Dst.AST
+ }
+ }
+ if _, ok := nodes[n.Name()]; !ok {
+ nodes[n.Name()] = n.AST
+ }
+ }
+ }
+
+ // Print nodes.
+ for name, ast := range nodes {
+ if _, ok := p.WeightedCG.IRNodes[name]; ok {
+ style := "solid"
+ if ast == nil {
+ style = "dashed"
+ }
+
+ if ast != nil && ast.Inl != nil {
+ fmt.Printf("\"%v\" [color=black, style=%s, label=\"%v,inl_cost=%d\"];\n", name, style, name, ast.Inl.Cost)
+ } else {
+ fmt.Printf("\"%v\" [color=black, style=%s, label=\"%v\"];\n", name, style, name)
+ }
+ }
+ }
+ // Print edges.
+ ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) {
+ for _, f := range list {
+ name := ir.LinkFuncName(f)
+ if n, ok := p.WeightedCG.IRNodes[name]; ok {
+ for _, e := range n.OutEdges {
+ style := "solid"
+ if e.Dst.AST == nil {
+ style = "dashed"
+ }
+ color := "black"
+ edgepercent := WeightInPercentage(e.Weight, p.TotalWeight)
+ if edgepercent > edgeThreshold {
+ color = "red"
+ }
+
+ fmt.Printf("edge [color=%s, style=%s];\n", color, style)
+ fmt.Printf("\"%v\" -> \"%v\" [label=\"%.2f\"];\n", n.Name(), e.Dst.Name(), edgepercent)
+ }
+ }
+ }
+ })
+ fmt.Printf("}\n")
+}
+
+// DirectCallee takes a function-typed expression and returns the underlying
+// function that it refers to if statically known. Otherwise, it returns nil.
+//
+// Equivalent to inline.inlCallee without calling CanInline on closures.
+func DirectCallee(fn ir.Node) *ir.Func {
+ fn = ir.StaticValue(fn)
+ switch fn.Op() {
+ case ir.OMETHEXPR:
+ fn := fn.(*ir.SelectorExpr)
+ n := ir.MethodExprName(fn)
+ // Check that receiver type matches fn.X.
+ // TODO(mdempsky): Handle implicit dereference
+ // of pointer receiver argument?
+ if n == nil || !types.Identical(n.Type().Recv().Type, fn.X.Type()) {
+ return nil
+ }
+ return n.Func
+ case ir.ONAME:
+ fn := fn.(*ir.Name)
+ if fn.Class == ir.PFUNC {
+ return fn.Func
+ }
+ case ir.OCLOSURE:
+ fn := fn.(*ir.ClosureExpr)
+ c := fn.Func
+ return c
+ }
+ return nil
+}
diff --git a/src/cmd/compile/internal/pkginit/init.go b/src/cmd/compile/internal/pkginit/init.go
new file mode 100644
index 0000000..9278890
--- /dev/null
+++ b/src/cmd/compile/internal/pkginit/init.go
@@ -0,0 +1,148 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkginit
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/noder"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/staticinit"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+// MakeTask makes an initialization record for the package, if necessary.
+// See runtime/proc.go:initTask for its layout.
+// The 3 tasks for initialization are:
+// 1. Initialize all of the packages the current package depends on.
+// 2. Initialize all the variables that have initializers.
+// 3. Run any init functions.
+func MakeTask() {
+ var deps []*obj.LSym // initTask records for packages the current package depends on
+ var fns []*obj.LSym // functions to call for package initialization
+
+ // Find imported packages with init tasks.
+ for _, pkg := range typecheck.Target.Imports {
+ n, ok := pkg.Lookup(".inittask").Def.(*ir.Name)
+ if !ok {
+ continue
+ }
+ if n.Op() != ir.ONAME || n.Class != ir.PEXTERN {
+ base.Fatalf("bad inittask: %v", n)
+ }
+ deps = append(deps, n.Linksym())
+ }
+ if base.Flag.ASan {
+ // Make an initialization function to call runtime.asanregisterglobals to register an
+ // array of instrumented global variables when -asan is enabled. An instrumented global
+ // variable is described by a structure.
+ // See the _asan_global structure declared in src/runtime/asan/asan.go.
+ //
+ // func init {
+ // var globals []_asan_global {...}
+ // asanregisterglobals(&globals[0], len(globals))
+ // }
+ for _, n := range typecheck.Target.Externs {
+ if canInstrumentGlobal(n) {
+ name := n.Sym().Name
+ InstrumentGlobalsMap[name] = n
+ InstrumentGlobalsSlice = append(InstrumentGlobalsSlice, n)
+ }
+ }
+ ni := len(InstrumentGlobalsMap)
+ if ni != 0 {
+ // Make an init._ function.
+ pos := base.AutogeneratedPos
+ base.Pos = pos
+
+ sym := noder.Renameinit()
+ fnInit := ir.NewFunc(pos, pos, sym, types.NewSignature(nil, nil, nil))
+ typecheck.DeclFunc(fnInit)
+
+ // Get an array of instrumented global variables.
+ globals := instrumentGlobals(fnInit)
+
+ // Call runtime.asanregisterglobals function to poison redzones.
+ // runtime.asanregisterglobals(unsafe.Pointer(&globals[0]), ni)
+ asancall := ir.NewCallExpr(base.Pos, ir.OCALL, typecheck.LookupRuntime("asanregisterglobals"), nil)
+ asancall.Args.Append(typecheck.ConvNop(typecheck.NodAddr(
+ ir.NewIndexExpr(base.Pos, globals, ir.NewInt(base.Pos, 0))), types.Types[types.TUNSAFEPTR]))
+ asancall.Args.Append(typecheck.DefaultLit(ir.NewInt(base.Pos, int64(ni)), types.Types[types.TUINTPTR]))
+
+ fnInit.Body.Append(asancall)
+ typecheck.FinishFuncBody()
+ ir.CurFunc = fnInit
+ typecheck.Stmts(fnInit.Body)
+ ir.CurFunc = nil
+
+ typecheck.Target.Inits = append(typecheck.Target.Inits, fnInit)
+ }
+ }
+
+ // Record user init functions.
+ for _, fn := range typecheck.Target.Inits {
+ if fn.Sym().Name == "init" {
+ // Synthetic init function for initialization of package-scope
+ // variables. We can use staticinit to optimize away static
+ // assignments.
+ s := staticinit.Schedule{
+ Plans: make(map[ir.Node]*staticinit.Plan),
+ Temps: make(map[ir.Node]*ir.Name),
+ }
+ for _, n := range fn.Body {
+ s.StaticInit(n)
+ }
+ fn.Body = s.Out
+ ir.WithFunc(fn, func() {
+ typecheck.Stmts(fn.Body)
+ })
+
+ if len(fn.Body) == 0 {
+ fn.Body = []ir.Node{ir.NewBlockStmt(src.NoXPos, nil)}
+ }
+ }
+
+ // Skip init functions with empty bodies.
+ if len(fn.Body) == 1 {
+ if stmt := fn.Body[0]; stmt.Op() == ir.OBLOCK && len(stmt.(*ir.BlockStmt).List) == 0 {
+ continue
+ }
+ }
+ fns = append(fns, fn.Nname.Linksym())
+ }
+
+ if len(deps) == 0 && len(fns) == 0 && types.LocalPkg.Path != "main" && types.LocalPkg.Path != "runtime" {
+ return // nothing to initialize
+ }
+
+ // Make an .inittask structure.
+ sym := typecheck.Lookup(".inittask")
+ task := ir.NewNameAt(base.Pos, sym, types.Types[types.TUINT8]) // fake type
+ task.Class = ir.PEXTERN
+ sym.Def = task
+ lsym := task.Linksym()
+ ot := 0
+ ot = objw.Uint32(lsym, ot, 0) // state: not initialized yet
+ ot = objw.Uint32(lsym, ot, uint32(len(fns)))
+ for _, f := range fns {
+ ot = objw.SymPtr(lsym, ot, f, 0)
+ }
+
+ // Add relocations which tell the linker all of the packages
+ // that this package depends on (and thus, all of the packages
+ // that need to be initialized before this one).
+ for _, d := range deps {
+ r := obj.Addrel(lsym)
+ r.Type = objabi.R_INITORDER
+ r.Sym = d
+ }
+ // An initTask has pointers, but none into the Go heap.
+ // It's not quite read only, the state field must be modifiable.
+ objw.Global(lsym, int32(ot), obj.NOPTR)
+}
diff --git a/src/cmd/compile/internal/pkginit/initAsanGlobals.go b/src/cmd/compile/internal/pkginit/initAsanGlobals.go
new file mode 100644
index 0000000..42db0ea
--- /dev/null
+++ b/src/cmd/compile/internal/pkginit/initAsanGlobals.go
@@ -0,0 +1,236 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkginit
+
+import (
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// instrumentGlobals declares a global array of _asan_global structures and initializes it.
+func instrumentGlobals(fn *ir.Func) *ir.Name {
+ asanGlobalStruct, asanLocationStruct, defStringstruct := createtypes()
+ lname := typecheck.Lookup
+ tconv := typecheck.ConvNop
+ // Make a global array of asanGlobalStruct type.
+ // var asanglobals []asanGlobalStruct
+ arraytype := types.NewArray(asanGlobalStruct, int64(len(InstrumentGlobalsMap)))
+ symG := lname(".asanglobals")
+ globals := ir.NewNameAt(base.Pos, symG, arraytype)
+ globals.Class = ir.PEXTERN
+ symG.Def = globals
+ typecheck.Target.Externs = append(typecheck.Target.Externs, globals)
+ // Make a global array of asanLocationStruct type.
+ // var asanL []asanLocationStruct
+ arraytype = types.NewArray(asanLocationStruct, int64(len(InstrumentGlobalsMap)))
+ symL := lname(".asanL")
+ asanlocation := ir.NewNameAt(base.Pos, symL, arraytype)
+ asanlocation.Class = ir.PEXTERN
+ symL.Def = asanlocation
+ typecheck.Target.Externs = append(typecheck.Target.Externs, asanlocation)
+ // Make three global string variables to pass the global name and module name
+ // and the name of the source file that defines it.
+ // var asanName string
+ // var asanModulename string
+ // var asanFilename string
+ symL = lname(".asanName")
+ asanName := ir.NewNameAt(base.Pos, symL, types.Types[types.TSTRING])
+ asanName.Class = ir.PEXTERN
+ symL.Def = asanName
+ typecheck.Target.Externs = append(typecheck.Target.Externs, asanName)
+
+ symL = lname(".asanModulename")
+ asanModulename := ir.NewNameAt(base.Pos, symL, types.Types[types.TSTRING])
+ asanModulename.Class = ir.PEXTERN
+ symL.Def = asanModulename
+ typecheck.Target.Externs = append(typecheck.Target.Externs, asanModulename)
+
+ symL = lname(".asanFilename")
+ asanFilename := ir.NewNameAt(base.Pos, symL, types.Types[types.TSTRING])
+ asanFilename.Class = ir.PEXTERN
+ symL.Def = asanFilename
+ typecheck.Target.Externs = append(typecheck.Target.Externs, asanFilename)
+
+ var init ir.Nodes
+ var c ir.Node
+ // globals[i].odrIndicator = 0 is the default, no need to set it explicitly here.
+ for i, n := range InstrumentGlobalsSlice {
+ setField := func(f string, val ir.Node, i int) {
+ r := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT,
+ ir.NewIndexExpr(base.Pos, globals, ir.NewInt(base.Pos, int64(i))), lname(f)), val)
+ init.Append(typecheck.Stmt(r))
+ }
+ // globals[i].beg = uintptr(unsafe.Pointer(&n))
+ c = tconv(typecheck.NodAddr(n), types.Types[types.TUNSAFEPTR])
+ c = tconv(c, types.Types[types.TUINTPTR])
+ setField("beg", c, i)
+ // Assign globals[i].size.
+ g := n.(*ir.Name)
+ size := g.Type().Size()
+ c = typecheck.DefaultLit(ir.NewInt(base.Pos, size), types.Types[types.TUINTPTR])
+ setField("size", c, i)
+ // Assign globals[i].sizeWithRedzone.
+ rzSize := GetRedzoneSizeForGlobal(size)
+ sizeWithRz := rzSize + size
+ c = typecheck.DefaultLit(ir.NewInt(base.Pos, sizeWithRz), types.Types[types.TUINTPTR])
+ setField("sizeWithRedzone", c, i)
+ // The C string type is terminated by a null character "\0", Go should use three-digit
+ // octal "\000" or two-digit hexadecimal "\x00" to create null terminated string.
+ // asanName = symbol's linkname + "\000"
+ // globals[i].name = (*defString)(unsafe.Pointer(&asanName)).data
+ name := g.Linksym().Name
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, asanName, ir.NewString(base.Pos, name+"\000"))))
+ c = tconv(typecheck.NodAddr(asanName), types.Types[types.TUNSAFEPTR])
+ c = tconv(c, types.NewPtr(defStringstruct))
+ c = ir.NewSelectorExpr(base.Pos, ir.ODOT, c, lname("data"))
+ setField("name", c, i)
+
+ // Set the name of package being compiled as a unique identifier of a module.
+ // asanModulename = pkgName + "\000"
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, asanModulename, ir.NewString(base.Pos, types.LocalPkg.Name+"\000"))))
+ c = tconv(typecheck.NodAddr(asanModulename), types.Types[types.TUNSAFEPTR])
+ c = tconv(c, types.NewPtr(defStringstruct))
+ c = ir.NewSelectorExpr(base.Pos, ir.ODOT, c, lname("data"))
+ setField("moduleName", c, i)
+ // Assign asanL[i].filename, asanL[i].line, asanL[i].column
+ // and assign globals[i].location = uintptr(unsafe.Pointer(&asanL[i]))
+ asanLi := ir.NewIndexExpr(base.Pos, asanlocation, ir.NewInt(base.Pos, int64(i)))
+ filename := ir.NewString(base.Pos, base.Ctxt.PosTable.Pos(n.Pos()).Filename()+"\000")
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, asanFilename, filename)))
+ c = tconv(typecheck.NodAddr(asanFilename), types.Types[types.TUNSAFEPTR])
+ c = tconv(c, types.NewPtr(defStringstruct))
+ c = ir.NewSelectorExpr(base.Pos, ir.ODOT, c, lname("data"))
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, asanLi, lname("filename")), c)))
+ line := ir.NewInt(base.Pos, int64(n.Pos().Line()))
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, asanLi, lname("line")), line)))
+ col := ir.NewInt(base.Pos, int64(n.Pos().Col()))
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, asanLi, lname("column")), col)))
+ c = tconv(typecheck.NodAddr(asanLi), types.Types[types.TUNSAFEPTR])
+ c = tconv(c, types.Types[types.TUINTPTR])
+ setField("sourceLocation", c, i)
+ }
+ fn.Body.Append(init...)
+ return globals
+}
+
+// createtypes creates the asanGlobal, asanLocation and defString struct type.
+// Go compiler does not refer to the C types, we represent the struct field
+// by a uintptr, then use type conversion to make copies of the data.
+// E.g., (*defString)(asanGlobal.name).data to C string.
+//
+// Keep in sync with src/runtime/asan/asan.go.
+// type asanGlobal struct {
+// beg uintptr
+// size uintptr
+// size_with_redzone uintptr
+// name uintptr
+// moduleName uintptr
+// hasDynamicInit uintptr
+// sourceLocation uintptr
+// odrIndicator uintptr
+// }
+//
+// type asanLocation struct {
+// filename uintptr
+// line int32
+// column int32
+// }
+//
+// defString is synthesized struct type meant to capture the underlying
+// implementations of string.
+// type defString struct {
+// data uintptr
+// len uintptr
+// }
+
+func createtypes() (*types.Type, *types.Type, *types.Type) {
+ up := types.Types[types.TUINTPTR]
+ i32 := types.Types[types.TINT32]
+ fname := typecheck.Lookup
+ nxp := src.NoXPos
+ nfield := types.NewField
+ asanGlobal := types.NewStruct([]*types.Field{
+ nfield(nxp, fname("beg"), up),
+ nfield(nxp, fname("size"), up),
+ nfield(nxp, fname("sizeWithRedzone"), up),
+ nfield(nxp, fname("name"), up),
+ nfield(nxp, fname("moduleName"), up),
+ nfield(nxp, fname("hasDynamicInit"), up),
+ nfield(nxp, fname("sourceLocation"), up),
+ nfield(nxp, fname("odrIndicator"), up),
+ })
+ types.CalcSize(asanGlobal)
+
+ asanLocation := types.NewStruct([]*types.Field{
+ nfield(nxp, fname("filename"), up),
+ nfield(nxp, fname("line"), i32),
+ nfield(nxp, fname("column"), i32),
+ })
+ types.CalcSize(asanLocation)
+
+ defString := types.NewStruct([]*types.Field{
+ types.NewField(nxp, fname("data"), up),
+ types.NewField(nxp, fname("len"), up),
+ })
+ types.CalcSize(defString)
+
+ return asanGlobal, asanLocation, defString
+}
+
+// Calculate redzone for globals.
+func GetRedzoneSizeForGlobal(size int64) int64 {
+ maxRZ := int64(1 << 18)
+ minRZ := int64(32)
+ redZone := (size / minRZ / 4) * minRZ
+ switch {
+ case redZone > maxRZ:
+ redZone = maxRZ
+ case redZone < minRZ:
+ redZone = minRZ
+ }
+ // Round up to multiple of minRZ.
+ if size%minRZ != 0 {
+ redZone += minRZ - (size % minRZ)
+ }
+ return redZone
+}
+
+// InstrumentGlobalsMap contains only package-local (and unlinknamed from somewhere else)
+// globals.
+// And the key is the object name. For example, in package p, a global foo would be in this
+// map as "foo".
+// Consider range over maps is nondeterministic, make a slice to hold all the values in the
+// InstrumentGlobalsMap and iterate over the InstrumentGlobalsSlice.
+var InstrumentGlobalsMap = make(map[string]ir.Node)
+var InstrumentGlobalsSlice = make([]ir.Node, 0, 0)
+
+func canInstrumentGlobal(g ir.Node) bool {
+ if g.Op() != ir.ONAME {
+ return false
+ }
+ n := g.(*ir.Name)
+ if n.Class == ir.PFUNC {
+ return false
+ }
+ if n.Sym().Pkg != types.LocalPkg {
+ return false
+ }
+ // Do not instrument any _cgo_ related global variables, because they are declared in C code.
+ if strings.Contains(n.Sym().Name, "cgo") {
+ return false
+ }
+
+ // Do not instrument globals that are linknamed, because their home package will do the work.
+ if n.Sym().Linkname != "" {
+ return false
+ }
+
+ return true
+}
diff --git a/src/cmd/compile/internal/ppc64/galign.go b/src/cmd/compile/internal/ppc64/galign.go
new file mode 100644
index 0000000..20fd8ce
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/galign.go
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/ppc64"
+ "internal/buildcfg"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &ppc64.Linkppc64
+ if buildcfg.GOARCH == "ppc64le" {
+ arch.LinkArch = &ppc64.Linkppc64le
+ }
+ arch.REGSP = ppc64.REGSP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = ssaMarkMoves
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+ arch.LoadRegResult = loadRegResult
+ arch.SpillArgReg = spillArgReg
+}
diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go
new file mode 100644
index 0000000..4c935cf
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/ggen.go
@@ -0,0 +1,54 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+)
+
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.Arch.FixedFrameSize+off+i)
+ }
+ } else if cnt <= int64(128*types.PtrSize) {
+ p = pp.Append(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.Arch.FixedFrameSize+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p.Reg = ppc64.REGSP
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
+ } else {
+ p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.Arch.FixedFrameSize+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p.Reg = ppc64.REGSP
+ p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+ p.Reg = ppc64.REGRT1
+ p = pp.Append(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(types.PtrSize))
+ p1 := p
+ p = pp.Append(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+ p = pp.Append(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ p.To.SetTarget(p1)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ // Generate the preferred hardware nop: ori 0,0,0
+ p := pp.Prog(ppc64.AOR)
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: 0}
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: ppc64.REG_R0}
+ return p
+}
diff --git a/src/cmd/compile/internal/ppc64/opt.go b/src/cmd/compile/internal/ppc64/opt.go
new file mode 100644
index 0000000..4f81aa9
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/opt.go
@@ -0,0 +1,12 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+// Many Power ISA arithmetic and logical instructions come in four
+// standard variants. These bits let us map between variants.
+const (
+ V_CC = 1 << 0 // xCC (affect CR field 0 flags)
+ V_V = 1 << 1 // xV (affect SO and OV flags)
+)
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
new file mode 100644
index 0000000..d20a31e
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -0,0 +1,2078 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+ "internal/buildcfg"
+ "math"
+ "strings"
+)
+
+// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
+ // flive := b.FlagsLiveAtEnd
+ // if b.Control != nil && b.Control.Type.IsFlags() {
+ // flive = true
+ // }
+ // for i := len(b.Values) - 1; i >= 0; i-- {
+ // v := b.Values[i]
+ // if flive && (v.Op == v.Op == ssa.OpPPC64MOVDconst) {
+ // // The "mark" is any non-nil Aux value.
+ // v.Aux = v
+ // }
+ // if v.Type.IsFlags() {
+ // flive = false
+ // }
+ // for _, a := range v.Args {
+ // if a.Type.IsFlags() {
+ // flive = true
+ // }
+ // }
+ // }
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return ppc64.AFMOVS
+ case 8:
+ return ppc64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return ppc64.AMOVB
+ } else {
+ return ppc64.AMOVBZ
+ }
+ case 2:
+ if t.IsSigned() {
+ return ppc64.AMOVH
+ } else {
+ return ppc64.AMOVHZ
+ }
+ case 4:
+ if t.IsSigned() {
+ return ppc64.AMOVW
+ } else {
+ return ppc64.AMOVWZ
+ }
+ case 8:
+ return ppc64.AMOVD
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return ppc64.AFMOVS
+ case 8:
+ return ppc64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return ppc64.AMOVB
+ case 2:
+ return ppc64.AMOVH
+ case 4:
+ return ppc64.AMOVW
+ case 8:
+ return ppc64.AMOVD
+ }
+ }
+ panic("bad store type")
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpCopy:
+ t := v.Type
+ if t.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x != y {
+ rt := obj.TYPE_REG
+ op := ppc64.AMOVD
+
+ if t.IsFloat() {
+ op = ppc64.AFMOVD
+ }
+ p := s.Prog(op)
+ p.From.Type = rt
+ p.From.Reg = x
+ p.To.Type = rt
+ p.To.Reg = y
+ }
+
+ case ssa.OpPPC64LoweredAtomicAnd8,
+ ssa.OpPPC64LoweredAtomicAnd32,
+ ssa.OpPPC64LoweredAtomicOr8,
+ ssa.OpPPC64LoweredAtomicOr32:
+ // LWSYNC
+ // LBAR/LWAR (Rarg0), Rtmp
+ // AND/OR Rarg1, Rtmp
+ // STBCCC/STWCCC Rtmp, (Rarg0)
+ // BNE -3(PC)
+ ld := ppc64.ALBAR
+ st := ppc64.ASTBCCC
+ if v.Op == ssa.OpPPC64LoweredAtomicAnd32 || v.Op == ssa.OpPPC64LoweredAtomicOr32 {
+ ld = ppc64.ALWAR
+ st = ppc64.ASTWCCC
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ // LWSYNC - Assuming shared data not write-through-required nor
+ // caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b.
+ plwsync := s.Prog(ppc64.ALWSYNC)
+ plwsync.To.Type = obj.TYPE_NONE
+ // LBAR or LWAR
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ // AND/OR reg1,out
+ p1 := s.Prog(v.Op.Asm())
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = ppc64.REGTMP
+ // STBCCC or STWCCC
+ p2 := s.Prog(st)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = ppc64.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = r0
+ p2.RegTo2 = ppc64.REGTMP
+ // BNE retry
+ p3 := s.Prog(ppc64.ABNE)
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+
+ case ssa.OpPPC64LoweredAtomicAdd32,
+ ssa.OpPPC64LoweredAtomicAdd64:
+ // LWSYNC
+ // LDAR/LWAR (Rarg0), Rout
+ // ADD Rarg1, Rout
+ // STDCCC/STWCCC Rout, (Rarg0)
+ // BNE -3(PC)
+ // MOVW Rout,Rout (if Add32)
+ ld := ppc64.ALDAR
+ st := ppc64.ASTDCCC
+ if v.Op == ssa.OpPPC64LoweredAtomicAdd32 {
+ ld = ppc64.ALWAR
+ st = ppc64.ASTWCCC
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ // LWSYNC - Assuming shared data not write-through-required nor
+ // caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b.
+ plwsync := s.Prog(ppc64.ALWSYNC)
+ plwsync.To.Type = obj.TYPE_NONE
+ // LDAR or LWAR
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ // ADD reg1,out
+ p1 := s.Prog(ppc64.AADD)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Reg = out
+ p1.To.Type = obj.TYPE_REG
+ // STDCCC or STWCCC
+ p3 := s.Prog(st)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = out
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = r0
+ // BNE retry
+ p4 := s.Prog(ppc64.ABNE)
+ p4.To.Type = obj.TYPE_BRANCH
+ p4.To.SetTarget(p)
+
+ // Ensure a 32 bit result
+ if v.Op == ssa.OpPPC64LoweredAtomicAdd32 {
+ p5 := s.Prog(ppc64.AMOVWZ)
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = out
+ p5.From.Type = obj.TYPE_REG
+ p5.From.Reg = out
+ }
+
+ case ssa.OpPPC64LoweredAtomicExchange32,
+ ssa.OpPPC64LoweredAtomicExchange64:
+ // LWSYNC
+ // LDAR/LWAR (Rarg0), Rout
+ // STDCCC/STWCCC Rout, (Rarg0)
+ // BNE -2(PC)
+ // ISYNC
+ ld := ppc64.ALDAR
+ st := ppc64.ASTDCCC
+ if v.Op == ssa.OpPPC64LoweredAtomicExchange32 {
+ ld = ppc64.ALWAR
+ st = ppc64.ASTWCCC
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ // LWSYNC - Assuming shared data not write-through-required nor
+ // caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b.
+ plwsync := s.Prog(ppc64.ALWSYNC)
+ plwsync.To.Type = obj.TYPE_NONE
+ // LDAR or LWAR
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ // STDCCC or STWCCC
+ p1 := s.Prog(st)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = r0
+ // BNE retry
+ p2 := s.Prog(ppc64.ABNE)
+ p2.To.Type = obj.TYPE_BRANCH
+ p2.To.SetTarget(p)
+ // ISYNC
+ pisync := s.Prog(ppc64.AISYNC)
+ pisync.To.Type = obj.TYPE_NONE
+
+ case ssa.OpPPC64LoweredAtomicLoad8,
+ ssa.OpPPC64LoweredAtomicLoad32,
+ ssa.OpPPC64LoweredAtomicLoad64,
+ ssa.OpPPC64LoweredAtomicLoadPtr:
+ // SYNC
+ // MOVB/MOVD/MOVW (Rarg0), Rout
+ // CMP Rout,Rout
+ // BNE 1(PC)
+ // ISYNC
+ ld := ppc64.AMOVD
+ cmp := ppc64.ACMP
+ switch v.Op {
+ case ssa.OpPPC64LoweredAtomicLoad8:
+ ld = ppc64.AMOVBZ
+ case ssa.OpPPC64LoweredAtomicLoad32:
+ ld = ppc64.AMOVWZ
+ cmp = ppc64.ACMPW
+ }
+ arg0 := v.Args[0].Reg()
+ out := v.Reg0()
+ // SYNC when AuxInt == 1; otherwise, load-acquire
+ if v.AuxInt == 1 {
+ psync := s.Prog(ppc64.ASYNC)
+ psync.To.Type = obj.TYPE_NONE
+ }
+ // Load
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = arg0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ // CMP
+ p1 := s.Prog(cmp)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = out
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = out
+ // BNE
+ p2 := s.Prog(ppc64.ABNE)
+ p2.To.Type = obj.TYPE_BRANCH
+ // ISYNC
+ pisync := s.Prog(ppc64.AISYNC)
+ pisync.To.Type = obj.TYPE_NONE
+ p2.To.SetTarget(pisync)
+
+ case ssa.OpPPC64LoweredAtomicStore8,
+ ssa.OpPPC64LoweredAtomicStore32,
+ ssa.OpPPC64LoweredAtomicStore64:
+ // SYNC or LWSYNC
+ // MOVB/MOVW/MOVD arg1,(arg0)
+ st := ppc64.AMOVD
+ switch v.Op {
+ case ssa.OpPPC64LoweredAtomicStore8:
+ st = ppc64.AMOVB
+ case ssa.OpPPC64LoweredAtomicStore32:
+ st = ppc64.AMOVW
+ }
+ arg0 := v.Args[0].Reg()
+ arg1 := v.Args[1].Reg()
+ // If AuxInt == 0, LWSYNC (Store-Release), else SYNC
+ // SYNC
+ syncOp := ppc64.ASYNC
+ if v.AuxInt == 0 {
+ syncOp = ppc64.ALWSYNC
+ }
+ psync := s.Prog(syncOp)
+ psync.To.Type = obj.TYPE_NONE
+ // Store
+ p := s.Prog(st)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arg0
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arg1
+
+ case ssa.OpPPC64LoweredAtomicCas64,
+ ssa.OpPPC64LoweredAtomicCas32:
+ // MOVD $0, Rout
+ // LWSYNC
+ // loop:
+ // LDAR (Rarg0), MutexHint, Rtmp
+ // CMP Rarg1, Rtmp
+ // BNE end
+ // STDCCC Rarg2, (Rarg0)
+ // BNE loop
+ // MOVD $1, Rout
+ // end:
+ // LWSYNC // Only for sequential consistency; not required in CasRel.
+ ld := ppc64.ALDAR
+ st := ppc64.ASTDCCC
+ cmp := ppc64.ACMP
+ if v.Op == ssa.OpPPC64LoweredAtomicCas32 {
+ ld = ppc64.ALWAR
+ st = ppc64.ASTWCCC
+ cmp = ppc64.ACMPW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ out := v.Reg0()
+ // Initialize return value to false
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ // LWSYNC - Assuming shared data not write-through-required nor
+ // caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b.
+ plwsync1 := s.Prog(ppc64.ALWSYNC)
+ plwsync1.To.Type = obj.TYPE_NONE
+ // LDAR or LWAR
+ p0 := s.Prog(ld)
+ p0.From.Type = obj.TYPE_MEM
+ p0.From.Reg = r0
+ p0.To.Type = obj.TYPE_REG
+ p0.To.Reg = ppc64.REGTMP
+ // If it is a Compare-and-Swap-Release operation, set the EH field with
+ // the release hint.
+ if v.AuxInt == 0 {
+ p0.AddRestSourceConst(0)
+ }
+ // CMP reg1,reg2
+ p1 := s.Prog(cmp)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Reg = ppc64.REGTMP
+ p1.To.Type = obj.TYPE_REG
+ // BNE done with return value = false
+ p2 := s.Prog(ppc64.ABNE)
+ p2.To.Type = obj.TYPE_BRANCH
+ // STDCCC or STWCCC
+ p3 := s.Prog(st)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = r2
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = r0
+ // BNE retry
+ p4 := s.Prog(ppc64.ABNE)
+ p4.To.Type = obj.TYPE_BRANCH
+ p4.To.SetTarget(p0)
+ // return value true
+ p5 := s.Prog(ppc64.AMOVD)
+ p5.From.Type = obj.TYPE_CONST
+ p5.From.Offset = 1
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = out
+ // LWSYNC - Assuming shared data not write-through-required nor
+ // caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b.
+ // If the operation is a CAS-Release, then synchronization is not necessary.
+ if v.AuxInt != 0 {
+ plwsync2 := s.Prog(ppc64.ALWSYNC)
+ plwsync2.To.Type = obj.TYPE_NONE
+ p2.To.SetTarget(plwsync2)
+ } else {
+ // done (label)
+ p6 := s.Prog(obj.ANOP)
+ p2.To.SetTarget(p6)
+ }
+
+ case ssa.OpPPC64LoweredPubBarrier:
+ // LWSYNC
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpPPC64LoweredGetClosurePtr:
+ // Closure pointer is R11 (already)
+ ssagen.CheckLoweredGetClosurePtr(v)
+
+ case ssa.OpPPC64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64LoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64LoweredRound32F, ssa.OpPPC64LoweredRound64F:
+ // input is already rounded
+
+ case ssa.OpLoadReg:
+ loadOp := loadByType(v.Type)
+ p := s.Prog(loadOp)
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpStoreReg:
+ storeOp := storeByType(v.Type)
+ p := s.Prog(storeOp)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddrAuto(&p.To, v)
+
+ case ssa.OpArgIntReg, ssa.OpArgFloatReg:
+ // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
+ // The loop only runs once.
+ for _, a := range v.Block.Func.RegArgs {
+ // Pass the spill/unspill information along to the assembler, offset by size of
+ // the saved LR slot.
+ addr := ssagen.SpillSlotAddr(a, ppc64.REGSP, base.Ctxt.Arch.FixedFrameSize)
+ s.FuncInfo().AddSpill(
+ obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
+ }
+ v.Block.Func.RegArgs = nil
+
+ ssagen.CheckArgReg(v)
+
+ case ssa.OpPPC64DIVD:
+ // For now,
+ //
+ // cmp arg1, -1
+ // be ahead
+ // v = arg0 / arg1
+ // b over
+ // ahead: v = - arg0
+ // over: nop
+ r := v.Reg()
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+
+ p := s.Prog(ppc64.ACMP)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = -1
+
+ pbahead := s.Prog(ppc64.ABEQ)
+ pbahead.To.Type = obj.TYPE_BRANCH
+
+ p = s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ pbover := s.Prog(obj.AJMP)
+ pbover.To.Type = obj.TYPE_BRANCH
+
+ p = s.Prog(ppc64.ANEG)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r0
+ pbahead.To.SetTarget(p)
+
+ p = s.Prog(obj.ANOP)
+ pbover.To.SetTarget(p)
+
+ case ssa.OpPPC64DIVW:
+ // word-width version of above
+ r := v.Reg()
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+
+ p := s.Prog(ppc64.ACMPW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = -1
+
+ pbahead := s.Prog(ppc64.ABEQ)
+ pbahead.To.Type = obj.TYPE_BRANCH
+
+ p = s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ pbover := s.Prog(obj.AJMP)
+ pbover.To.Type = obj.TYPE_BRANCH
+
+ p = s.Prog(ppc64.ANEG)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r0
+ pbahead.To.SetTarget(p)
+
+ p = s.Prog(obj.ANOP)
+ pbover.To.SetTarget(p)
+
+ case ssa.OpPPC64CLRLSLWI:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ shifts := v.AuxInt
+ p := s.Prog(v.Op.Asm())
+ // clrlslwi ra,rs,mb,sh will become rlwinm ra,rs,sh,mb-sh,31-sh as described in ISA
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)}
+ p.AddRestSourceConst(ssa.GetPPC64Shiftsh(shifts))
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64CLRLSLDI:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ shifts := v.AuxInt
+ p := s.Prog(v.Op.Asm())
+ // clrlsldi ra,rs,mb,sh will become rldic ra,rs,sh,mb-sh
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)}
+ p.AddRestSourceConst(ssa.GetPPC64Shiftsh(shifts))
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS,
+ ssa.OpPPC64MULLD, ssa.OpPPC64MULLW, ssa.OpPPC64DIVDU, ssa.OpPPC64DIVWU,
+ ssa.OpPPC64SRAD, ssa.OpPPC64SRAW, ssa.OpPPC64SRD, ssa.OpPPC64SRW, ssa.OpPPC64SLD, ssa.OpPPC64SLW,
+ ssa.OpPPC64ROTL, ssa.OpPPC64ROTLW,
+ ssa.OpPPC64MULHD, ssa.OpPPC64MULHW, ssa.OpPPC64MULHDU, ssa.OpPPC64MULHWU,
+ ssa.OpPPC64FMUL, ssa.OpPPC64FMULS, ssa.OpPPC64FDIV, ssa.OpPPC64FDIVS, ssa.OpPPC64FCPSGN,
+ ssa.OpPPC64AND, ssa.OpPPC64OR, ssa.OpPPC64ANDN, ssa.OpPPC64ORN, ssa.OpPPC64NOR, ssa.OpPPC64XOR, ssa.OpPPC64EQV,
+ ssa.OpPPC64MODUD, ssa.OpPPC64MODSD, ssa.OpPPC64MODUW, ssa.OpPPC64MODSW:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64ADDCC, ssa.OpPPC64ANDCC, ssa.OpPPC64SUBCC, ssa.OpPPC64ORCC, ssa.OpPPC64XORCC, ssa.OpPPC64NORCC,
+ ssa.OpPPC64ANDNCC:
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ case ssa.OpPPC64NEGCC, ssa.OpPPC64CNTLZDCC:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+
+ case ssa.OpPPC64ROTLconst, ssa.OpPPC64ROTLWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ // Auxint holds encoded rotate + mask
+ case ssa.OpPPC64RLWINM, ssa.OpPPC64RLWMI:
+ sh, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
+ p := s.Prog(v.Op.Asm())
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+ p.Reg = v.Args[0].Reg()
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(sh)}
+ p.AddRestSourceArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}})
+ // Auxint holds mask
+
+ case ssa.OpPPC64RLDICL, ssa.OpPPC64RLDICR:
+ sh, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
+ p := s.Prog(v.Op.Asm())
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: sh}
+ switch v.Op {
+ case ssa.OpPPC64RLDICL:
+ p.AddRestSourceConst(mb)
+ case ssa.OpPPC64RLDICR:
+ p.AddRestSourceConst(me)
+ }
+ p.Reg = v.Args[0].Reg()
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+
+ case ssa.OpPPC64RLWNM:
+ _, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
+ p := s.Prog(v.Op.Asm())
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+ p.Reg = v.Args[0].Reg()
+ p.From = obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[1].Reg()}
+ p.AddRestSourceArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}})
+
+ case ssa.OpPPC64MADDLD:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ r3 := v.Args[2].Reg()
+ // r = r1*r2 ± r3
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r2
+ p.AddRestSourceReg(r3)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64FMADD, ssa.OpPPC64FMADDS, ssa.OpPPC64FMSUB, ssa.OpPPC64FMSUBS:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ r3 := v.Args[2].Reg()
+ // r = r1*r2 ± r3
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r3
+ p.AddRestSourceReg(r2)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FFLOOR, ssa.OpPPC64FTRUNC, ssa.OpPPC64FCEIL,
+ ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FCFIDS, ssa.OpPPC64FRSP, ssa.OpPPC64CNTLZD, ssa.OpPPC64CNTLZW,
+ ssa.OpPPC64POPCNTD, ssa.OpPPC64POPCNTW, ssa.OpPPC64POPCNTB, ssa.OpPPC64MFVSRD, ssa.OpPPC64MTVSRD, ssa.OpPPC64FABS, ssa.OpPPC64FNABS,
+ ssa.OpPPC64FROUND, ssa.OpPPC64CNTTZW, ssa.OpPPC64CNTTZD, ssa.OpPPC64BRH, ssa.OpPPC64BRW, ssa.OpPPC64BRD:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+
+ case ssa.OpPPC64ADDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst,
+ ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst,
+ ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst, ssa.OpPPC64EXTSWSLconst, ssa.OpPPC64MULLWconst, ssa.OpPPC64MULLDconst:
+ p := s.Prog(v.Op.Asm())
+ p.Reg = v.Args[0].Reg()
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64ADDC, ssa.OpPPC64ADDE, ssa.OpPPC64SUBC, ssa.OpPPC64SUBE:
+ r := v.Reg0() // CA is the first, implied argument.
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64ADDZEzero, ssa.OpPPC64SUBZEzero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64ADDCconst:
+ p := s.Prog(v.Op.Asm())
+ p.Reg = v.Args[0].Reg()
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ // Output is a pair, the second is the CA, which is implied.
+ p.To.Reg = v.Reg0()
+
+ case ssa.OpPPC64SUBCconst:
+ p := s.Prog(v.Op.Asm())
+ p.AddRestSourceConst(v.AuxInt)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ case ssa.OpPPC64SUBFCconst:
+ p := s.Prog(v.Op.Asm())
+ p.AddRestSourceConst(v.AuxInt)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64ADDCCconst, ssa.OpPPC64ANDCCconst:
+ p := s.Prog(v.Op.Asm())
+ p.Reg = v.Args[0].Reg()
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ case ssa.OpPPC64MOVDaddr:
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux in MOVDaddr is of unknown type %T", v.Aux)
+ case nil:
+ // If aux offset and aux int are both 0, and the same
+ // input and output regs are used, no instruction
+ // needs to be generated, since it would just be
+ // addi rx, rx, 0.
+ if v.AuxInt != 0 || v.Args[0].Reg() != v.Reg() {
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ }
+
+ case *obj.LSym, ir.Node:
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ ssagen.AddAux(&p.From, v)
+
+ }
+
+ case ssa.OpPPC64MOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64FMOVDconst, ssa.OpPPC64FMOVSconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64FCMPU, ssa.OpPPC64CMP, ssa.OpPPC64CMPW, ssa.OpPPC64CMPU, ssa.OpPPC64CMPWU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[1].Reg()
+
+ case ssa.OpPPC64CMPconst, ssa.OpPPC64CMPUconst, ssa.OpPPC64CMPWconst, ssa.OpPPC64CMPWUconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+
+ case ssa.OpPPC64MOVBreg, ssa.OpPPC64MOVBZreg, ssa.OpPPC64MOVHreg, ssa.OpPPC64MOVHZreg, ssa.OpPPC64MOVWreg, ssa.OpPPC64MOVWZreg:
+ // Shift in register to required size
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Reg = v.Reg()
+ p.To.Type = obj.TYPE_REG
+
+ case ssa.OpPPC64MOVDload, ssa.OpPPC64MOVWload:
+
+ // MOVDload and MOVWload are DS form instructions that are restricted to
+ // offsets that are a multiple of 4. If the offset is not a multiple of 4,
+ // then the address of the symbol to be loaded is computed (base + offset)
+ // and used as the new base register and the offset field in the instruction
+ // can be set to zero.
+
+ // This same problem can happen with gostrings since the final offset is not
+ // known yet, but could be unaligned after the relocation is resolved.
+ // So gostrings are handled the same way.
+
+ // This allows the MOVDload and MOVWload to be generated in more cases and
+ // eliminates some offset and alignment checking in the rules file.
+
+ fromAddr := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()}
+ ssagen.AddAux(&fromAddr, v)
+
+ genAddr := false
+
+ switch fromAddr.Name {
+ case obj.NAME_EXTERN, obj.NAME_STATIC:
+ // Special case for a rule combines the bytes of gostring.
+ // The v alignment might seem OK, but we don't want to load it
+ // using an offset because relocation comes later.
+ genAddr = strings.HasPrefix(fromAddr.Sym.Name, "go:string") || v.Type.Alignment()%4 != 0 || fromAddr.Offset%4 != 0
+ default:
+ genAddr = fromAddr.Offset%4 != 0
+ }
+ if genAddr {
+ // Load full address into the temp register.
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ // Load target using temp as base register
+ // and offset zero. Setting NAME_NONE
+ // prevents any extra offsets from being
+ // added.
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ fromAddr.Reg = ppc64.REGTMP
+ // Clear the offset field and other
+ // information that might be used
+ // by the assembler to add to the
+ // final offset value.
+ fromAddr.Offset = 0
+ fromAddr.Name = obj.NAME_NONE
+ fromAddr.Sym = nil
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From = fromAddr
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload, ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64MOVDBRload, ssa.OpPPC64MOVWBRload, ssa.OpPPC64MOVHBRload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64MOVDBRstore, ssa.OpPPC64MOVWBRstore, ssa.OpPPC64MOVHBRstore:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ case ssa.OpPPC64MOVDloadidx, ssa.OpPPC64MOVWloadidx, ssa.OpPPC64MOVHloadidx, ssa.OpPPC64MOVWZloadidx,
+ ssa.OpPPC64MOVBZloadidx, ssa.OpPPC64MOVHZloadidx, ssa.OpPPC64FMOVDloadidx, ssa.OpPPC64FMOVSloadidx,
+ ssa.OpPPC64MOVDBRloadidx, ssa.OpPPC64MOVWBRloadidx, ssa.OpPPC64MOVHBRloadidx:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.From.Index = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64DCBT:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+
+ case ssa.OpPPC64MOVWstorezero, ssa.OpPPC64MOVHstorezero, ssa.OpPPC64MOVBstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+
+ case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVDstorezero:
+
+ // MOVDstore and MOVDstorezero become DS form instructions that are restricted
+ // to offset values that are a multiple of 4. If the offset field is not a
+ // multiple of 4, then the full address of the store target is computed (base +
+ // offset) and used as the new base register and the offset in the instruction
+ // is set to 0.
+
+ // This allows the MOVDstore and MOVDstorezero to be generated in more cases,
+ // and prevents checking of the offset value and alignment in the rules.
+
+ toAddr := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()}
+ ssagen.AddAux(&toAddr, v)
+
+ if toAddr.Offset%4 != 0 {
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ toAddr.Reg = ppc64.REGTMP
+ // Clear the offset field and other
+ // information that might be used
+ // by the assembler to add to the
+ // final offset value.
+ toAddr.Offset = 0
+ toAddr.Name = obj.NAME_NONE
+ toAddr.Sym = nil
+ }
+ p := s.Prog(v.Op.Asm())
+ p.To = toAddr
+ p.From.Type = obj.TYPE_REG
+ if v.Op == ssa.OpPPC64MOVDstorezero {
+ p.From.Reg = ppc64.REGZERO
+ } else {
+ p.From.Reg = v.Args[1].Reg()
+ }
+
+ case ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore, ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+
+ case ssa.OpPPC64MOVDstoreidx, ssa.OpPPC64MOVWstoreidx, ssa.OpPPC64MOVHstoreidx, ssa.OpPPC64MOVBstoreidx,
+ ssa.OpPPC64FMOVDstoreidx, ssa.OpPPC64FMOVSstoreidx, ssa.OpPPC64MOVDBRstoreidx, ssa.OpPPC64MOVWBRstoreidx,
+ ssa.OpPPC64MOVHBRstoreidx:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Index = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ case ssa.OpPPC64ISEL, ssa.OpPPC64ISELZ:
+ // ISEL AuxInt ? arg0 : arg1
+ // ISELZ is a special case of ISEL where arg1 is implicitly $0.
+ //
+ // AuxInt value indicates conditions 0=LT 1=GT 2=EQ 3=SO 4=GE 5=LE 6=NE 7=NSO.
+ // ISEL accepts a CR bit argument, not a condition as expressed by AuxInt.
+ // Convert the condition to a CR bit argument by the following conversion:
+ //
+ // AuxInt&3 ? arg0 : arg1 for conditions LT, GT, EQ, SO
+ // AuxInt&3 ? arg1 : arg0 for conditions GE, LE, NE, NSO
+ p := s.Prog(v.Op.Asm())
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+ p.Reg = v.Args[0].Reg()
+ if v.Op == ssa.OpPPC64ISEL {
+ p.AddRestSourceReg(v.Args[1].Reg())
+ } else {
+ p.AddRestSourceReg(ppc64.REG_R0)
+ }
+ // AuxInt values 4,5,6 implemented with reverse operand order from 0,1,2
+ if v.AuxInt > 3 {
+ p.Reg, p.GetFrom3().Reg = p.GetFrom3().Reg, p.Reg
+ }
+ p.From.SetConst(v.AuxInt & 3)
+
+ case ssa.OpPPC64SETBC, ssa.OpPPC64SETBCR:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = int16(ppc64.REG_CR0LT + v.AuxInt)
+
+ case ssa.OpPPC64LoweredQuadZero, ssa.OpPPC64LoweredQuadZeroShort:
+ // The LoweredQuad code generation
+ // generates STXV instructions on
+ // power9. The Short variation is used
+ // if no loop is generated.
+
+ // sizes >= 64 generate a loop as follows:
+
+ // Set up loop counter in CTR, used by BC
+ // XXLXOR clears VS32
+ // XXLXOR VS32,VS32,VS32
+ // MOVD len/64,REG_TMP
+ // MOVD REG_TMP,CTR
+ // loop:
+ // STXV VS32,0(R20)
+ // STXV VS32,16(R20)
+ // STXV VS32,32(R20)
+ // STXV VS32,48(R20)
+ // ADD $64,R20
+ // BC 16, 0, loop
+
+ // Bytes per iteration
+ ctr := v.AuxInt / 64
+
+ // Remainder bytes
+ rem := v.AuxInt % 64
+
+ // Only generate a loop if there is more
+ // than 1 iteration.
+ if ctr > 1 {
+ // Set up VS32 (V0) to hold 0s
+ p := s.Prog(ppc64.AXXLXOR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+ p.Reg = ppc64.REG_VS32
+
+ // Set up CTR loop counter
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ctr
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+
+ // Don't generate padding for
+ // loops with few iterations.
+ if ctr > 3 {
+ p = s.Prog(obj.APCALIGN)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ }
+
+ // generate 4 STXVs to zero 64 bytes
+ var top *obj.Prog
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ // Save the top of loop
+ if top == nil {
+ top = p
+ }
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = 16
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = 32
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = 48
+
+ // Increment address for the
+ // 64 bytes just zeroed.
+ p = s.Prog(ppc64.AADD)
+ p.Reg = v.Args[0].Reg()
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 64
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[0].Reg()
+
+ // Branch back to top of loop
+ // based on CTR
+ // BC with BO_BCTR generates bdnz
+ p = s.Prog(ppc64.ABC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ppc64.BO_BCTR
+ p.Reg = ppc64.REG_CR0LT
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.SetTarget(top)
+ }
+ // When ctr == 1 the loop was not generated but
+ // there are at least 64 bytes to clear, so add
+ // that to the remainder to generate the code
+ // to clear those doublewords
+ if ctr == 1 {
+ rem += 64
+ }
+
+ // Clear the remainder starting at offset zero
+ offset := int64(0)
+
+ if rem >= 16 && ctr <= 1 {
+ // If the XXLXOR hasn't already been
+ // generated, do it here to initialize
+ // VS32 (V0) to 0.
+ p := s.Prog(ppc64.AXXLXOR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+ p.Reg = ppc64.REG_VS32
+ }
+ // Generate STXV for 32 or 64
+ // bytes.
+ for rem >= 32 {
+ p := s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = offset
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = offset + 16
+ offset += 32
+ rem -= 32
+ }
+ // Generate 16 bytes
+ if rem >= 16 {
+ p := s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = offset
+ offset += 16
+ rem -= 16
+ }
+
+ // first clear as many doublewords as possible
+ // then clear remaining sizes as available
+ for rem > 0 {
+ op, size := ppc64.AMOVB, int64(1)
+ switch {
+ case rem >= 8:
+ op, size = ppc64.AMOVD, 8
+ case rem >= 4:
+ op, size = ppc64.AMOVW, 4
+ case rem >= 2:
+ op, size = ppc64.AMOVH, 2
+ }
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = offset
+ rem -= size
+ offset += size
+ }
+
+ case ssa.OpPPC64LoweredZero, ssa.OpPPC64LoweredZeroShort:
+
+ // Unaligned data doesn't hurt performance
+ // for these instructions on power8.
+
+ // For sizes >= 64 generate a loop as follows:
+
+ // Set up loop counter in CTR, used by BC
+ // XXLXOR VS32,VS32,VS32
+ // MOVD len/32,REG_TMP
+ // MOVD REG_TMP,CTR
+ // MOVD $16,REG_TMP
+ // loop:
+ // STXVD2X VS32,(R0)(R20)
+ // STXVD2X VS32,(R31)(R20)
+ // ADD $32,R20
+ // BC 16, 0, loop
+ //
+ // any remainder is done as described below
+
+ // for sizes < 64 bytes, first clear as many doublewords as possible,
+ // then handle the remainder
+ // MOVD R0,(R20)
+ // MOVD R0,8(R20)
+ // .... etc.
+ //
+ // the remainder bytes are cleared using one or more
+ // of the following instructions with the appropriate
+ // offsets depending which instructions are needed
+ //
+ // MOVW R0,n1(R20) 4 bytes
+ // MOVH R0,n2(R20) 2 bytes
+ // MOVB R0,n3(R20) 1 byte
+ //
+ // 7 bytes: MOVW, MOVH, MOVB
+ // 6 bytes: MOVW, MOVH
+ // 5 bytes: MOVW, MOVB
+ // 3 bytes: MOVH, MOVB
+
+ // each loop iteration does 32 bytes
+ ctr := v.AuxInt / 32
+
+ // remainder bytes
+ rem := v.AuxInt % 32
+
+ // only generate a loop if there is more
+ // than 1 iteration.
+ if ctr > 1 {
+ // Set up VS32 (V0) to hold 0s
+ p := s.Prog(ppc64.AXXLXOR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+ p.Reg = ppc64.REG_VS32
+
+ // Set up CTR loop counter
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ctr
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+
+ // Set up R31 to hold index value 16
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ // Don't add padding for alignment
+ // with few loop iterations.
+ if ctr > 3 {
+ p = s.Prog(obj.APCALIGN)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ }
+
+ // generate 2 STXVD2Xs to store 16 bytes
+ // when this is a loop then the top must be saved
+ var top *obj.Prog
+ // This is the top of loop
+
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Index = ppc64.REGZERO
+ // Save the top of loop
+ if top == nil {
+ top = p
+ }
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Index = ppc64.REGTMP
+
+ // Increment address for the
+ // 4 doublewords just zeroed.
+ p = s.Prog(ppc64.AADD)
+ p.Reg = v.Args[0].Reg()
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[0].Reg()
+
+ // Branch back to top of loop
+ // based on CTR
+ // BC with BO_BCTR generates bdnz
+ p = s.Prog(ppc64.ABC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ppc64.BO_BCTR
+ p.Reg = ppc64.REG_CR0LT
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.SetTarget(top)
+ }
+
+ // when ctr == 1 the loop was not generated but
+ // there are at least 32 bytes to clear, so add
+ // that to the remainder to generate the code
+ // to clear those doublewords
+ if ctr == 1 {
+ rem += 32
+ }
+
+ // clear the remainder starting at offset zero
+ offset := int64(0)
+
+ // first clear as many doublewords as possible
+ // then clear remaining sizes as available
+ for rem > 0 {
+ op, size := ppc64.AMOVB, int64(1)
+ switch {
+ case rem >= 8:
+ op, size = ppc64.AMOVD, 8
+ case rem >= 4:
+ op, size = ppc64.AMOVW, 4
+ case rem >= 2:
+ op, size = ppc64.AMOVH, 2
+ }
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = offset
+ rem -= size
+ offset += size
+ }
+
+ case ssa.OpPPC64LoweredMove, ssa.OpPPC64LoweredMoveShort:
+
+ bytesPerLoop := int64(32)
+ // This will be used when moving more
+ // than 8 bytes. Moves start with
+ // as many 8 byte moves as possible, then
+ // 4, 2, or 1 byte(s) as remaining. This will
+ // work and be efficient for power8 or later.
+ // If there are 64 or more bytes, then a
+ // loop is generated to move 32 bytes and
+ // update the src and dst addresses on each
+ // iteration. When < 64 bytes, the appropriate
+ // number of moves are generated based on the
+ // size.
+ // When moving >= 64 bytes a loop is used
+ // MOVD len/32,REG_TMP
+ // MOVD REG_TMP,CTR
+ // MOVD $16,REG_TMP
+ // top:
+ // LXVD2X (R0)(R21),VS32
+ // LXVD2X (R31)(R21),VS33
+ // ADD $32,R21
+ // STXVD2X VS32,(R0)(R20)
+ // STXVD2X VS33,(R31)(R20)
+ // ADD $32,R20
+ // BC 16,0,top
+ // Bytes not moved by this loop are moved
+ // with a combination of the following instructions,
+ // starting with the largest sizes and generating as
+ // many as needed, using the appropriate offset value.
+ // MOVD n(R21),R31
+ // MOVD R31,n(R20)
+ // MOVW n1(R21),R31
+ // MOVW R31,n1(R20)
+ // MOVH n2(R21),R31
+ // MOVH R31,n2(R20)
+ // MOVB n3(R21),R31
+ // MOVB R31,n3(R20)
+
+ // Each loop iteration moves 32 bytes
+ ctr := v.AuxInt / bytesPerLoop
+
+ // Remainder after the loop
+ rem := v.AuxInt % bytesPerLoop
+
+ dstReg := v.Args[0].Reg()
+ srcReg := v.Args[1].Reg()
+
+ // The set of registers used here, must match the clobbered reg list
+ // in PPC64Ops.go.
+ offset := int64(0)
+
+ // top of the loop
+ var top *obj.Prog
+ // Only generate looping code when loop counter is > 1 for >= 64 bytes
+ if ctr > 1 {
+ // Set up the CTR
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ctr
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+
+ // Use REGTMP as index reg
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ // Don't adding padding for
+ // alignment with small iteration
+ // counts.
+ if ctr > 3 {
+ p = s.Prog(obj.APCALIGN)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ }
+
+ // Generate 16 byte loads and stores.
+ // Use temp register for index (16)
+ // on the second one.
+
+ p = s.Prog(ppc64.ALXVD2X)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Index = ppc64.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+ if top == nil {
+ top = p
+ }
+ p = s.Prog(ppc64.ALXVD2X)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Index = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS33
+
+ // increment the src reg for next iteration
+ p = s.Prog(ppc64.AADD)
+ p.Reg = srcReg
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = bytesPerLoop
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = srcReg
+
+ // generate 16 byte stores
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Index = ppc64.REGZERO
+
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS33
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Index = ppc64.REGTMP
+
+ // increment the dst reg for next iteration
+ p = s.Prog(ppc64.AADD)
+ p.Reg = dstReg
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = bytesPerLoop
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dstReg
+
+ // BC with BO_BCTR generates bdnz to branch on nonzero CTR
+ // to loop top.
+ p = s.Prog(ppc64.ABC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ppc64.BO_BCTR
+ p.Reg = ppc64.REG_CR0LT
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.SetTarget(top)
+
+ // srcReg and dstReg were incremented in the loop, so
+ // later instructions start with offset 0.
+ offset = int64(0)
+ }
+
+ // No loop was generated for one iteration, so
+ // add 32 bytes to the remainder to move those bytes.
+ if ctr == 1 {
+ rem += bytesPerLoop
+ }
+
+ if rem >= 16 {
+ // Generate 16 byte loads and stores.
+ // Use temp register for index (value 16)
+ // on the second one.
+ p := s.Prog(ppc64.ALXVD2X)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Index = ppc64.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Index = ppc64.REGZERO
+
+ offset = 16
+ rem -= 16
+
+ if rem >= 16 {
+ // Use REGTMP as index reg
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ p = s.Prog(ppc64.ALXVD2X)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Index = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Index = ppc64.REGTMP
+
+ offset = 32
+ rem -= 16
+ }
+ }
+
+ // Generate all the remaining load and store pairs, starting with
+ // as many 8 byte moves as possible, then 4, 2, 1.
+ for rem > 0 {
+ op, size := ppc64.AMOVB, int64(1)
+ switch {
+ case rem >= 8:
+ op, size = ppc64.AMOVD, 8
+ case rem >= 4:
+ op, size = ppc64.AMOVWZ, 4
+ case rem >= 2:
+ op, size = ppc64.AMOVH, 2
+ }
+ // Load
+ p := s.Prog(op)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset
+
+ // Store
+ p = s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset
+ rem -= size
+ offset += size
+ }
+
+ case ssa.OpPPC64LoweredQuadMove, ssa.OpPPC64LoweredQuadMoveShort:
+ bytesPerLoop := int64(64)
+ // This is used when moving more
+ // than 8 bytes on power9. Moves start with
+ // as many 8 byte moves as possible, then
+ // 4, 2, or 1 byte(s) as remaining. This will
+ // work and be efficient for power8 or later.
+ // If there are 64 or more bytes, then a
+ // loop is generated to move 32 bytes and
+ // update the src and dst addresses on each
+ // iteration. When < 64 bytes, the appropriate
+ // number of moves are generated based on the
+ // size.
+ // When moving >= 64 bytes a loop is used
+ // MOVD len/32,REG_TMP
+ // MOVD REG_TMP,CTR
+ // top:
+ // LXV 0(R21),VS32
+ // LXV 16(R21),VS33
+ // ADD $32,R21
+ // STXV VS32,0(R20)
+ // STXV VS33,16(R20)
+ // ADD $32,R20
+ // BC 16,0,top
+ // Bytes not moved by this loop are moved
+ // with a combination of the following instructions,
+ // starting with the largest sizes and generating as
+ // many as needed, using the appropriate offset value.
+ // MOVD n(R21),R31
+ // MOVD R31,n(R20)
+ // MOVW n1(R21),R31
+ // MOVW R31,n1(R20)
+ // MOVH n2(R21),R31
+ // MOVH R31,n2(R20)
+ // MOVB n3(R21),R31
+ // MOVB R31,n3(R20)
+
+ // Each loop iteration moves 32 bytes
+ ctr := v.AuxInt / bytesPerLoop
+
+ // Remainder after the loop
+ rem := v.AuxInt % bytesPerLoop
+
+ dstReg := v.Args[0].Reg()
+ srcReg := v.Args[1].Reg()
+
+ offset := int64(0)
+
+ // top of the loop
+ var top *obj.Prog
+
+ // Only generate looping code when loop counter is > 1 for >= 64 bytes
+ if ctr > 1 {
+ // Set up the CTR
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ctr
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+
+ p = s.Prog(obj.APCALIGN)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+
+ // Generate 16 byte loads and stores.
+ p = s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+ if top == nil {
+ top = p
+ }
+ p = s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset + 16
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS33
+
+ // generate 16 byte stores
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS33
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset + 16
+
+ // Generate 16 byte loads and stores.
+ p = s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset + 32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset + 48
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS33
+
+ // generate 16 byte stores
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset + 32
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS33
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset + 48
+
+ // increment the src reg for next iteration
+ p = s.Prog(ppc64.AADD)
+ p.Reg = srcReg
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = bytesPerLoop
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = srcReg
+
+ // increment the dst reg for next iteration
+ p = s.Prog(ppc64.AADD)
+ p.Reg = dstReg
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = bytesPerLoop
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dstReg
+
+ // BC with BO_BCTR generates bdnz to branch on nonzero CTR
+ // to loop top.
+ p = s.Prog(ppc64.ABC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ppc64.BO_BCTR
+ p.Reg = ppc64.REG_CR0LT
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.SetTarget(top)
+
+ // srcReg and dstReg were incremented in the loop, so
+ // later instructions start with offset 0.
+ offset = int64(0)
+ }
+
+ // No loop was generated for one iteration, so
+ // add 32 bytes to the remainder to move those bytes.
+ if ctr == 1 {
+ rem += bytesPerLoop
+ }
+ if rem >= 32 {
+ p := s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = 16
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS33
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS33
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = 16
+
+ offset = 32
+ rem -= 32
+ }
+
+ if rem >= 16 {
+ // Generate 16 byte loads and stores.
+ p := s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset
+
+ offset += 16
+ rem -= 16
+
+ if rem >= 16 {
+ p := s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset
+
+ offset += 16
+ rem -= 16
+ }
+ }
+ // Generate all the remaining load and store pairs, starting with
+ // as many 8 byte moves as possible, then 4, 2, 1.
+ for rem > 0 {
+ op, size := ppc64.AMOVB, int64(1)
+ switch {
+ case rem >= 8:
+ op, size = ppc64.AMOVD, 8
+ case rem >= 4:
+ op, size = ppc64.AMOVWZ, 4
+ case rem >= 2:
+ op, size = ppc64.AMOVH, 2
+ }
+ // Load
+ p := s.Prog(op)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset
+
+ // Store
+ p = s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset
+ rem -= size
+ offset += size
+ }
+
+ case ssa.OpPPC64CALLstatic:
+ s.Call(v)
+
+ case ssa.OpPPC64CALLtail:
+ s.TailCall(v)
+
+ case ssa.OpPPC64CALLclosure, ssa.OpPPC64CALLinter:
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_LR
+
+ if v.Args[0].Reg() != ppc64.REG_R12 {
+ v.Fatalf("Function address for %v should be in R12 %d but is in %d", v.LongString(), ppc64.REG_R12, p.From.Reg)
+ }
+
+ pp := s.Call(v)
+
+ // Convert the call into a blrl with hint this is not a subroutine return.
+ // The full bclrl opcode must be specified when passing a hint.
+ pp.As = ppc64.ABCL
+ pp.From.Type = obj.TYPE_CONST
+ pp.From.Offset = ppc64.BO_ALWAYS
+ pp.Reg = ppc64.REG_CR0LT // The preferred value if BI is ignored.
+ pp.To.Reg = ppc64.REG_LR
+ pp.AddRestSourceConst(1)
+
+ if ppc64.NeedTOCpointer(base.Ctxt) {
+ // When compiling Go into PIC, the function we just
+ // called via pointer might have been implemented in
+ // a separate module and so overwritten the TOC
+ // pointer in R2; reload it.
+ q := s.Prog(ppc64.AMOVD)
+ q.From.Type = obj.TYPE_MEM
+ q.From.Offset = 24
+ q.From.Reg = ppc64.REGSP
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = ppc64.REG_R2
+ }
+
+ case ssa.OpPPC64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ // AuxInt encodes how many buffer entries we need.
+ p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
+
+ case ssa.OpPPC64LoweredPanicBoundsA, ssa.OpPPC64LoweredPanicBoundsB, ssa.OpPPC64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+
+ case ssa.OpPPC64LoweredNilCheck:
+ if buildcfg.GOOS == "aix" {
+ // CMP Rarg0, R0
+ // BNE 2(PC)
+ // STW R0, 0(R0)
+ // NOP (so the BNE has somewhere to land)
+
+ // CMP Rarg0, R0
+ p := s.Prog(ppc64.ACMP)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R0
+
+ // BNE 2(PC)
+ p2 := s.Prog(ppc64.ABNE)
+ p2.To.Type = obj.TYPE_BRANCH
+
+ // STW R0, 0(R0)
+ // Write at 0 is forbidden and will trigger a SIGSEGV
+ p = s.Prog(ppc64.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = ppc64.REG_R0
+
+ // NOP (so the BNE has somewhere to land)
+ nop := s.Prog(obj.ANOP)
+ p2.To.SetTarget(nop)
+
+ } else {
+ // Issue a load which will fault if arg is nil.
+ p := s.Prog(ppc64.AMOVBZ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+
+ // These should be resolved by rules and not make it here.
+ case ssa.OpPPC64Equal, ssa.OpPPC64NotEqual, ssa.OpPPC64LessThan, ssa.OpPPC64FLessThan,
+ ssa.OpPPC64LessEqual, ssa.OpPPC64GreaterThan, ssa.OpPPC64FGreaterThan, ssa.OpPPC64GreaterEqual,
+ ssa.OpPPC64FLessEqual, ssa.OpPPC64FGreaterEqual:
+ v.Fatalf("Pseudo-op should not make it to codegen: %s ###\n", v.LongString())
+ case ssa.OpPPC64InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.OpClobber, ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = [...]struct {
+ asm, invasm obj.As
+ asmeq, invasmun bool
+}{
+ ssa.BlockPPC64EQ: {ppc64.ABEQ, ppc64.ABNE, false, false},
+ ssa.BlockPPC64NE: {ppc64.ABNE, ppc64.ABEQ, false, false},
+
+ ssa.BlockPPC64LT: {ppc64.ABLT, ppc64.ABGE, false, false},
+ ssa.BlockPPC64GE: {ppc64.ABGE, ppc64.ABLT, false, false},
+ ssa.BlockPPC64LE: {ppc64.ABLE, ppc64.ABGT, false, false},
+ ssa.BlockPPC64GT: {ppc64.ABGT, ppc64.ABLE, false, false},
+
+ // TODO: need to work FP comparisons into block jumps
+ ssa.BlockPPC64FLT: {ppc64.ABLT, ppc64.ABGE, false, false},
+ ssa.BlockPPC64FGE: {ppc64.ABGT, ppc64.ABLT, true, true}, // GE = GT or EQ; !GE = LT or UN
+ ssa.BlockPPC64FLE: {ppc64.ABLT, ppc64.ABGT, true, true}, // LE = LT or EQ; !LE = GT or UN
+ ssa.BlockPPC64FGT: {ppc64.ABGT, ppc64.ABLE, false, false},
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockDefer:
+ // defer returns in R3:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(ppc64.ACMP)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R3
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R0
+
+ p = s.Prog(ppc64.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit, ssa.BlockRetJmp:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+
+ case ssa.BlockPPC64EQ, ssa.BlockPPC64NE,
+ ssa.BlockPPC64LT, ssa.BlockPPC64GE,
+ ssa.BlockPPC64LE, ssa.BlockPPC64GT,
+ ssa.BlockPPC64FLT, ssa.BlockPPC64FGE,
+ ssa.BlockPPC64FLE, ssa.BlockPPC64FGT:
+ jmp := blockJump[b.Kind]
+ switch next {
+ case b.Succs[0].Block():
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ if jmp.invasmun {
+ // TODO: The second branch is probably predict-not-taken since it is for FP unordered
+ s.Br(ppc64.ABVS, b.Succs[1].Block())
+ }
+ case b.Succs[1].Block():
+ s.Br(jmp.asm, b.Succs[0].Block())
+ if jmp.asmeq {
+ s.Br(ppc64.ABEQ, b.Succs[0].Block())
+ }
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ s.Br(jmp.asm, b.Succs[0].Block())
+ if jmp.asmeq {
+ s.Br(ppc64.ABEQ, b.Succs[0].Block())
+ }
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ if jmp.invasmun {
+ // TODO: The second branch is probably predict-not-taken since it is for FP unordered
+ s.Br(ppc64.ABVS, b.Succs[1].Block())
+ }
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
+
+func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p := s.Prog(loadByType(t))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_AUTO
+ p.From.Sym = n.Linksym()
+ p.From.Offset = n.FrameOffset() + off
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = reg
+ return p
+}
+
+func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
+ p.To.Name = obj.NAME_PARAM
+ p.To.Sym = n.Linksym()
+ p.Pos = p.Pos.WithNotStmt()
+ return p
+}
diff --git a/src/cmd/compile/internal/rangefunc/rangefunc_test.go b/src/cmd/compile/internal/rangefunc/rangefunc_test.go
new file mode 100644
index 0000000..16856c6
--- /dev/null
+++ b/src/cmd/compile/internal/rangefunc/rangefunc_test.go
@@ -0,0 +1,1297 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build goexperiment.rangefunc
+
+package rangefunc_test
+
+import (
+ "slices"
+ "testing"
+)
+
+type Seq2[T1, T2 any] func(yield func(T1, T2) bool)
+
+// OfSliceIndex returns a Seq over the elements of s. It is equivalent
+// to range s.
+func OfSliceIndex[T any, S ~[]T](s S) Seq2[int, T] {
+ return func(yield func(int, T) bool) {
+ for i, v := range s {
+ if !yield(i, v) {
+ return
+ }
+ }
+ return
+ }
+}
+
+// BadOfSliceIndex is "bad" because it ignores the return value from yield
+// and just keeps on iterating.
+func BadOfSliceIndex[T any, S ~[]T](s S) Seq2[int, T] {
+ return func(yield func(int, T) bool) {
+ for i, v := range s {
+ yield(i, v)
+ }
+ return
+ }
+}
+
+// VeryBadOfSliceIndex is "very bad" because it ignores the return value from yield
+// and just keeps on iterating, and also wraps that call in a defer-recover so it can
+// keep on trying after the first panic.
+func VeryBadOfSliceIndex[T any, S ~[]T](s S) Seq2[int, T] {
+ return func(yield func(int, T) bool) {
+ for i, v := range s {
+ func() {
+ defer func() {
+ recover()
+ }()
+ yield(i, v)
+ }()
+ }
+ return
+ }
+}
+
+// CooperativeBadOfSliceIndex calls the loop body from a goroutine after
+// a ping on a channel, and returns recover()on that same channel.
+func CooperativeBadOfSliceIndex[T any, S ~[]T](s S, proceed chan any) Seq2[int, T] {
+ return func(yield func(int, T) bool) {
+ for i, v := range s {
+ if !yield(i, v) {
+ // if the body breaks, call yield just once in a goroutine
+ go func() {
+ <-proceed
+ defer func() {
+ proceed <- recover()
+ }()
+ yield(0, s[0])
+ }()
+ return
+ }
+ }
+ return
+ }
+}
+
+// TrickyIterator is a type intended to test whether an iterator that
+// calls a yield function after loop exit must inevitably escape the
+// closure; this might be relevant to future checking/optimization.
+type TrickyIterator struct {
+ yield func(int, int) bool
+}
+
+func (ti *TrickyIterator) iterAll(s []int) Seq2[int, int] {
+ return func(yield func(int, int) bool) {
+ ti.yield = yield // Save yield for future abuse
+ for i, v := range s {
+ if !yield(i, v) {
+ return
+ }
+ }
+ return
+ }
+}
+
+func (ti *TrickyIterator) iterOne(s []int) Seq2[int, int] {
+ return func(yield func(int, int) bool) {
+ ti.yield = yield // Save yield for future abuse
+ if len(s) > 0 { // Not in a loop might escape differently
+ yield(0, s[0])
+ }
+ return
+ }
+}
+
+func (ti *TrickyIterator) iterZero(s []int) Seq2[int, int] {
+ return func(yield func(int, int) bool) {
+ ti.yield = yield // Save yield for future abuse
+ // Don't call it at all, maybe it won't escape
+ return
+ }
+}
+
+func (ti *TrickyIterator) fail() {
+ if ti.yield != nil {
+ ti.yield(1, 1)
+ }
+}
+
+// Check wraps the function body passed to iterator forall
+// in code that ensures that it cannot (successfully) be called
+// either after body return false (control flow out of loop) or
+// forall itself returns (the iteration is now done).
+//
+// Note that this can catch errors before the inserted checks.
+func Check[U, V any](forall Seq2[U, V]) Seq2[U, V] {
+ return func(body func(U, V) bool) {
+ ret := true
+ forall(func(u U, v V) bool {
+ if !ret {
+ panic("Checked iterator access after exit")
+ }
+ ret = body(u, v)
+ return ret
+ })
+ ret = false
+ }
+}
+
+func TestCheck(t *testing.T) {
+ i := 0
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+ for _, x := range Check(BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})) {
+ i += x
+ if i > 4*9 {
+ break
+ }
+ }
+}
+
+func TestCooperativeBadOfSliceIndex(t *testing.T) {
+ i := 0
+ proceed := make(chan any)
+ for _, x := range CooperativeBadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, proceed) {
+ i += x
+ if i >= 36 {
+ break
+ }
+ }
+ proceed <- true
+ if r := <-proceed; r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ if i != 36 {
+ t.Errorf("Expected i == 36, saw %d instead", i)
+ } else {
+ t.Logf("i = %d", i)
+ }
+}
+
+func TestCheckCooperativeBadOfSliceIndex(t *testing.T) {
+ i := 0
+ proceed := make(chan any)
+ for _, x := range Check(CooperativeBadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, proceed)) {
+ i += x
+ if i >= 36 {
+ break
+ }
+ }
+ proceed <- true
+ if r := <-proceed; r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ if i != 36 {
+ t.Errorf("Expected i == 36, saw %d instead", i)
+ } else {
+ t.Logf("i = %d", i)
+ }
+}
+
+func TestTrickyIterAll(t *testing.T) {
+ trickItAll := TrickyIterator{}
+ i := 0
+ for _, x := range trickItAll.iterAll([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ i += x
+ if i >= 36 {
+ break
+ }
+ }
+
+ if i != 36 {
+ t.Errorf("Expected i == 36, saw %d instead", i)
+ } else {
+ t.Logf("i = %d", i)
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+
+ trickItAll.fail()
+}
+
+func TestTrickyIterOne(t *testing.T) {
+ trickItOne := TrickyIterator{}
+ i := 0
+ for _, x := range trickItOne.iterOne([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ i += x
+ if i >= 36 {
+ break
+ }
+ }
+
+ // Don't care about value, ought to be 36 anyhow.
+ t.Logf("i = %d", i)
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+
+ trickItOne.fail()
+}
+
+func TestTrickyIterZero(t *testing.T) {
+ trickItZero := TrickyIterator{}
+ i := 0
+ for _, x := range trickItZero.iterZero([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ i += x
+ if i >= 36 {
+ break
+ }
+ }
+
+ // Don't care about value, ought to be 0 anyhow.
+ t.Logf("i = %d", i)
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+
+ trickItZero.fail()
+}
+
+func TestCheckTrickyIterZero(t *testing.T) {
+ trickItZero := TrickyIterator{}
+ i := 0
+ for _, x := range Check(trickItZero.iterZero([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})) {
+ i += x
+ if i >= 36 {
+ break
+ }
+ }
+
+ // Don't care about value, ought to be 0 anyhow.
+ t.Logf("i = %d", i)
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+
+ trickItZero.fail()
+}
+
+// TestBreak1 should just work, with well-behaved iterators.
+// (The misbehaving iterator detector should not trigger.)
+func TestBreak1(t *testing.T) {
+ var result []int
+ var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3}
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4}) {
+ if x == -4 {
+ break
+ }
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ break
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestBreak2 should just work, with well-behaved iterators.
+// (The misbehaving iterator detector should not trigger.)
+func TestBreak2(t *testing.T) {
+ var result []int
+ var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3}
+outer:
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4}) {
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ break
+ }
+ if x == -4 {
+ break outer
+ }
+
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestContinue should just work, with well-behaved iterators.
+// (The misbehaving iterator detector should not trigger.)
+func TestContinue(t *testing.T) {
+ var result []int
+ var expect = []int{-1, 1, 2, -2, 1, 2, -3, 1, 2, -4}
+outer:
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4}) {
+ result = append(result, x)
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ continue outer
+ }
+ if x == -4 {
+ break outer
+ }
+
+ result = append(result, y)
+ }
+ result = append(result, x-10)
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestBreak3 should just work, with well-behaved iterators.
+// (The misbehaving iterator detector should not trigger.)
+func TestBreak3(t *testing.T) {
+ var result []int
+ var expect = []int{100, 10, 2, 4, 200, 10, 2, 4, 20, 2, 4, 300, 10, 2, 4, 20, 2, 4, 30}
+X:
+ for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+ Y:
+ for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+ if 10*y >= x {
+ break
+ }
+ result = append(result, y)
+ if y == 30 {
+ continue X
+ }
+ Z:
+ for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue Z
+ }
+ result = append(result, z)
+ if z >= 4 {
+ continue Y
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestBreak1BadA should end in a panic when the outer-loop's
+// single-level break is ignore by BadOfSliceIndex
+func TestBreak1BadA(t *testing.T) {
+ var result []int
+ var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3}
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+
+ for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ if x == -4 {
+ break
+ }
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ break
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+}
+
+// TestBreak1BadB should end in a panic, sooner, when the inner-loop's
+// (nested) single-level break is ignored by BadOfSliceIndex
+func TestBreak1BadB(t *testing.T) {
+ var result []int
+ var expect = []int{1, 2} // inner breaks, panics, after before outer appends
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ if x == -4 {
+ break
+ }
+ for _, y := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ break
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+}
+
+// TestMultiCont0 tests multilevel continue with no bad iterators
+// (it should just work)
+func TestMultiCont0(t *testing.T) {
+ var result []int
+ var expect = []int{1000, 10, 2, 4, 2000}
+
+W:
+ for _, w := range OfSliceIndex([]int{1000, 2000}) {
+ result = append(result, w)
+ if w == 2000 {
+ break
+ }
+ for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+ for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+ result = append(result, y)
+ for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue
+ }
+ result = append(result, z)
+ if z >= 4 {
+ continue W // modified to be multilevel
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestMultiCont1 tests multilevel continue with a bad iterator
+// in the outermost loop exited by the continue.
+func TestMultiCont1(t *testing.T) {
+ var result []int
+ var expect = []int{1000, 10, 2, 4}
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ }()
+
+W:
+ for _, w := range OfSliceIndex([]int{1000, 2000}) {
+ result = append(result, w)
+ if w == 2000 {
+ break
+ }
+ for _, x := range BadOfSliceIndex([]int{100, 200, 300, 400}) {
+ for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+ result = append(result, y)
+ for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue
+ }
+ result = append(result, z)
+ if z >= 4 {
+ continue W
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestMultiCont2 tests multilevel continue with a bad iterator
+// in a middle loop exited by the continue.
+func TestMultiCont2(t *testing.T) {
+ var result []int
+ var expect = []int{1000, 10, 2, 4}
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ }()
+
+W:
+ for _, w := range OfSliceIndex([]int{1000, 2000}) {
+ result = append(result, w)
+ if w == 2000 {
+ break
+ }
+ for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+ for _, y := range BadOfSliceIndex([]int{10, 20, 30, 40}) {
+ result = append(result, y)
+ for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue
+ }
+ result = append(result, z)
+ if z >= 4 {
+ continue W
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestMultiCont3 tests multilevel continue with a bad iterator
+// in the innermost loop exited by the continue.
+func TestMultiCont3(t *testing.T) {
+ var result []int
+ var expect = []int{1000, 10, 2, 4}
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ }()
+
+W:
+ for _, w := range OfSliceIndex([]int{1000, 2000}) {
+ result = append(result, w)
+ if w == 2000 {
+ break
+ }
+ for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+ for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+ result = append(result, y)
+ for _, z := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue
+ }
+ result = append(result, z)
+ if z >= 4 {
+ continue W
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestMultiBreak0 tests multilevel break with a bad iterator
+// in the outermost loop exited by the break (the outermost loop).
+func TestMultiBreak0(t *testing.T) {
+ var result []int
+ var expect = []int{1000, 10, 2, 4}
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ }()
+
+W:
+ for _, w := range BadOfSliceIndex([]int{1000, 2000}) {
+ result = append(result, w)
+ if w == 2000 {
+ break
+ }
+ for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+ for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+ result = append(result, y)
+ for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue
+ }
+ result = append(result, z)
+ if z >= 4 {
+ break W
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestMultiBreak1 tests multilevel break with a bad iterator
+// in an intermediate loop exited by the break.
+func TestMultiBreak1(t *testing.T) {
+ var result []int
+ var expect = []int{1000, 10, 2, 4}
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ }()
+
+W:
+ for _, w := range OfSliceIndex([]int{1000, 2000}) {
+ result = append(result, w)
+ if w == 2000 {
+ break
+ }
+ for _, x := range BadOfSliceIndex([]int{100, 200, 300, 400}) {
+ for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+ result = append(result, y)
+ for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue
+ }
+ result = append(result, z)
+ if z >= 4 {
+ break W
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestMultiBreak2 tests multilevel break with two bad iterators
+// in intermediate loops exited by the break.
+func TestMultiBreak2(t *testing.T) {
+ var result []int
+ var expect = []int{1000, 10, 2, 4}
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ }()
+
+W:
+ for _, w := range OfSliceIndex([]int{1000, 2000}) {
+ result = append(result, w)
+ if w == 2000 {
+ break
+ }
+ for _, x := range BadOfSliceIndex([]int{100, 200, 300, 400}) {
+ for _, y := range BadOfSliceIndex([]int{10, 20, 30, 40}) {
+ result = append(result, y)
+ for _, z := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue
+ }
+ result = append(result, z)
+ if z >= 4 {
+ break W
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestMultiBreak3 tests multilevel break with the bad iterator
+// in the innermost loop exited by the break.
+func TestMultiBreak3(t *testing.T) {
+ var result []int
+ var expect = []int{1000, 10, 2, 4}
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Errorf("Wanted to see a failure, result was %v", result)
+ }
+ }()
+
+W:
+ for _, w := range OfSliceIndex([]int{1000, 2000}) {
+ result = append(result, w)
+ if w == 2000 {
+ break
+ }
+ for _, x := range OfSliceIndex([]int{100, 200, 300, 400}) {
+ for _, y := range OfSliceIndex([]int{10, 20, 30, 40}) {
+ result = append(result, y)
+ for _, z := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if z&1 == 1 {
+ continue
+ }
+ result = append(result, z)
+ if z >= 4 {
+ break W
+ }
+ }
+ result = append(result, -y) // should never be executed
+ }
+ result = append(result, x)
+ }
+ }
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// veryBad tests that a loop nest behaves sensibly in the face of a
+// "very bad" iterator. In this case, "sensibly" means that the
+// break out of X still occurs after the very bad iterator finally
+// quits running (the control flow bread crumbs remain.)
+func veryBad(s []int) []int {
+ var result []int
+X:
+ for _, x := range OfSliceIndex([]int{1, 2, 3}) {
+
+ result = append(result, x)
+
+ for _, y := range VeryBadOfSliceIndex(s) {
+ result = append(result, y)
+ break X
+ }
+ for _, z := range OfSliceIndex([]int{100, 200, 300}) {
+ result = append(result, z)
+ if z == 100 {
+ break
+ }
+ }
+ }
+ return result
+}
+
+// checkVeryBad wraps a "very bad" iterator with Check,
+// demonstrating that the very bad iterator also hides panics
+// thrown by Check.
+func checkVeryBad(s []int) []int {
+ var result []int
+X:
+ for _, x := range OfSliceIndex([]int{1, 2, 3}) {
+
+ result = append(result, x)
+
+ for _, y := range Check(VeryBadOfSliceIndex(s)) {
+ result = append(result, y)
+ break X
+ }
+ for _, z := range OfSliceIndex([]int{100, 200, 300}) {
+ result = append(result, z)
+ if z == 100 {
+ break
+ }
+ }
+ }
+ return result
+}
+
+// okay is the not-bad version of veryBad.
+// They should behave the same.
+func okay(s []int) []int {
+ var result []int
+X:
+ for _, x := range OfSliceIndex([]int{1, 2, 3}) {
+
+ result = append(result, x)
+
+ for _, y := range OfSliceIndex(s) {
+ result = append(result, y)
+ break X
+ }
+ for _, z := range OfSliceIndex([]int{100, 200, 300}) {
+ result = append(result, z)
+ if z == 100 {
+ break
+ }
+ }
+ }
+ return result
+}
+
+// TestVeryBad1 checks the behavior of an extremely poorly behaved iterator.
+func TestVeryBad1(t *testing.T) {
+ result := veryBad([]int{10, 20, 30, 40, 50}) // odd length
+ expect := []int{1, 10}
+
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestVeryBad2 checks the behavior of an extremely poorly behaved iterator.
+func TestVeryBad2(t *testing.T) {
+ result := veryBad([]int{10, 20, 30, 40}) // even length
+ expect := []int{1, 10}
+
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestCheckVeryBad checks the behavior of an extremely poorly behaved iterator,
+// which also suppresses the exceptions from "Check"
+func TestCheckVeryBad(t *testing.T) {
+ result := checkVeryBad([]int{10, 20, 30, 40}) // even length
+ expect := []int{1, 10}
+
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// TestOk is the nice version of the very bad iterator.
+func TestOk(t *testing.T) {
+ result := okay([]int{10, 20, 30, 40, 50}) // odd length
+ expect := []int{1, 10}
+
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+}
+
+// testBreak1BadDefer checks that defer behaves properly even in
+// the presence of loop bodies panicking out of bad iterators.
+// (i.e., the instrumentation did not break defer in these loops)
+func testBreak1BadDefer(t *testing.T) (result []int) {
+ var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3, -30, -20, -10}
+
+ defer func() {
+ if r := recover(); r != nil {
+ t.Logf("Saw expected panic '%v'", r)
+ if !slices.Equal(expect, result) {
+ t.Errorf("(Inner) Expected %v, got %v", expect, result)
+ }
+ } else {
+ t.Error("Wanted to see a failure")
+ }
+ }()
+
+ for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ break
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+ return
+}
+
+func TestBreak1BadDefer(t *testing.T) {
+ var result []int
+ var expect = []int{1, 2, -1, 1, 2, -2, 1, 2, -3, -30, -20, -10}
+ result = testBreak1BadDefer(t)
+ if !slices.Equal(expect, result) {
+ t.Errorf("(Outer) Expected %v, got %v", expect, result)
+ }
+}
+
+// testReturn1 has no bad iterators.
+func testReturn1(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ return
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+ return
+}
+
+// testReturn2 has an outermost bad iterator
+func testReturn2(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ return
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+ return
+}
+
+// testReturn3 has an innermost bad iterator
+func testReturn3(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ return
+ }
+ result = append(result, y)
+ }
+ }
+ return
+}
+
+// TestReturns checks that returns through bad iterators behave properly,
+// for inner and outer bad iterators.
+func TestReturns(t *testing.T) {
+ var result []int
+ var expect = []int{-1, 1, 2, -10}
+ var err any
+
+ result, err = testReturn1(t)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
+
+ result, err = testReturn2(t)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ if err == nil {
+ t.Errorf("Missing expected error")
+ } else {
+ t.Logf("Saw expected panic '%v'", err)
+ }
+
+ result, err = testReturn3(t)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ if err == nil {
+ t.Errorf("Missing expected error")
+ } else {
+ t.Logf("Saw expected panic '%v'", err)
+ }
+
+}
+
+// testGotoA1 tests loop-nest-internal goto, no bad iterators.
+func testGotoA1(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ goto A
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ A:
+ }
+ return
+}
+
+// testGotoA2 tests loop-nest-internal goto, outer bad iterator.
+func testGotoA2(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ goto A
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ A:
+ }
+ return
+}
+
+// testGotoA3 tests loop-nest-internal goto, inner bad iterator.
+func testGotoA3(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ goto A
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ A:
+ }
+ return
+}
+
+func TestGotoA(t *testing.T) {
+ var result []int
+ var expect = []int{-1, 1, 2, -2, 1, 2, -3, 1, 2, -4, -30, -20, -10}
+ var expect3 = []int{-1, 1, 2, -10} // first goto becomes a panic
+ var err any
+
+ result, err = testGotoA1(t)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
+
+ result, err = testGotoA2(t)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ if err == nil {
+ t.Errorf("Missing expected error")
+ } else {
+ t.Logf("Saw expected panic '%v'", err)
+ }
+
+ result, err = testGotoA3(t)
+ if !slices.Equal(expect3, result) {
+ t.Errorf("Expected %v, got %v", expect3, result)
+ }
+ if err == nil {
+ t.Errorf("Missing expected error")
+ } else {
+ t.Logf("Saw expected panic '%v'", err)
+ }
+}
+
+// testGotoB1 tests loop-nest-exiting goto, no bad iterators.
+func testGotoB1(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ goto B
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+B:
+ result = append(result, 999)
+ return
+}
+
+// testGotoB2 tests loop-nest-exiting goto, outer bad iterator.
+func testGotoB2(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range BadOfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range OfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ goto B
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+B:
+ result = append(result, 999)
+ return
+}
+
+// testGotoB3 tests loop-nest-exiting goto, inner bad iterator.
+func testGotoB3(t *testing.T) (result []int, err any) {
+ defer func() {
+ err = recover()
+ }()
+ for _, x := range OfSliceIndex([]int{-1, -2, -3, -4, -5}) {
+ result = append(result, x)
+ if x == -4 {
+ break
+ }
+ defer func() {
+ result = append(result, x*10)
+ }()
+ for _, y := range BadOfSliceIndex([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) {
+ if y == 3 {
+ goto B
+ }
+ result = append(result, y)
+ }
+ result = append(result, x)
+ }
+B:
+ result = append(result, 999)
+ return
+}
+
+func TestGotoB(t *testing.T) {
+ var result []int
+ var expect = []int{-1, 1, 2, 999, -10}
+ var expectX = []int{-1, 1, 2, -10}
+ var err any
+
+ result, err = testGotoB1(t)
+ if !slices.Equal(expect, result) {
+ t.Errorf("Expected %v, got %v", expect, result)
+ }
+ if err != nil {
+ t.Errorf("Unexpected error %v", err)
+ }
+
+ result, err = testGotoB2(t)
+ if !slices.Equal(expectX, result) {
+ t.Errorf("Expected %v, got %v", expectX, result)
+ }
+ if err == nil {
+ t.Errorf("Missing expected error")
+ } else {
+ t.Logf("Saw expected panic '%v'", err)
+ }
+
+ result, err = testGotoB3(t)
+ if !slices.Equal(expectX, result) {
+ t.Errorf("Expected %v, got %v", expectX, result)
+ }
+ if err == nil {
+ t.Errorf("Missing expected error")
+ } else {
+ t.Logf("Saw expected panic '%v'", err)
+ }
+}
diff --git a/src/cmd/compile/internal/rangefunc/rewrite.go b/src/cmd/compile/internal/rangefunc/rewrite.go
new file mode 100644
index 0000000..d439412
--- /dev/null
+++ b/src/cmd/compile/internal/rangefunc/rewrite.go
@@ -0,0 +1,1334 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package rangefunc rewrites range-over-func to code that doesn't use range-over-funcs.
+Rewriting the construct in the front end, before noder, means the functions generated during
+the rewrite are available in a noder-generated representation for inlining by the back end.
+
+# Theory of Operation
+
+The basic idea is to rewrite
+
+ for x := range f {
+ ...
+ }
+
+into
+
+ f(func(x T) bool {
+ ...
+ })
+
+But it's not usually that easy.
+
+# Range variables
+
+For a range not using :=, the assigned variables cannot be function parameters
+in the generated body function. Instead, we allocate fake parameters and
+start the body with an assignment. For example:
+
+ for expr1, expr2 = range f {
+ ...
+ }
+
+becomes
+
+ f(func(#p1 T1, #p2 T2) bool {
+ expr1, expr2 = #p1, #p2
+ ...
+ })
+
+(All the generated variables have a # at the start to signal that they
+are internal variables when looking at the generated code in a
+debugger. Because variables have all been resolved to the specific
+objects they represent, there is no danger of using plain "p1" and
+colliding with a Go variable named "p1"; the # is just nice to have,
+not for correctness.)
+
+It can also happen that there are fewer range variables than function
+arguments, in which case we end up with something like
+
+ f(func(x T1, _ T2) bool {
+ ...
+ })
+
+or
+
+ f(func(#p1 T1, #p2 T2, _ T3) bool {
+ expr1, expr2 = #p1, #p2
+ ...
+ })
+
+# Return
+
+If the body contains a "break", that break turns into "return false",
+to tell f to stop. And if the body contains a "continue", that turns
+into "return true", to tell f to proceed with the next value.
+Those are the easy cases.
+
+If the body contains a return or a break/continue/goto L, then we need
+to rewrite that into code that breaks out of the loop and then
+triggers that control flow. In general we rewrite
+
+ for x := range f {
+ ...
+ }
+
+into
+
+ {
+ var #next int
+ f(func(x T1) bool {
+ ...
+ return true
+ })
+ ... check #next ...
+ }
+
+The variable #next is an integer code that says what to do when f
+returns. Each difficult statement sets #next and then returns false to
+stop f.
+
+A plain "return" rewrites to {#next = -1; return false}.
+The return false breaks the loop. Then when f returns, the "check
+#next" section includes
+
+ if #next == -1 { return }
+
+which causes the return we want.
+
+Return with arguments is more involved. We need somewhere to store the
+arguments while we break out of f, so we add them to the var
+declaration, like:
+
+ {
+ var (
+ #next int
+ #r1 type1
+ #r2 type2
+ )
+ f(func(x T1) bool {
+ ...
+ {
+ // return a, b
+ #r1, #r2 = a, b
+ #next = -2
+ return false
+ }
+ ...
+ return true
+ })
+ if #next == -2 { return #r1, #r2 }
+ }
+
+TODO: What about:
+
+ func f() (x bool) {
+ for range g(&x) {
+ return true
+ }
+ }
+
+ func g(p *bool) func(func() bool) {
+ return func(yield func() bool) {
+ yield()
+ // Is *p true or false here?
+ }
+ }
+
+With this rewrite the "return true" is not visible after yield returns,
+but maybe it should be?
+
+# Checking
+
+To permit checking that an iterator is well-behaved -- that is, that
+it does not call the loop body again after it has returned false or
+after the entire loop has exited (it might retain a copy of the body
+function, or pass it to another goroutine) -- each generated loop has
+its own #exitK flag that is checked before each iteration, and set both
+at any early exit and after the iteration completes.
+
+For example:
+
+ for x := range f {
+ ...
+ if ... { break }
+ ...
+ }
+
+becomes
+
+ {
+ var #exit1 bool
+ f(func(x T1) bool {
+ if #exit1 { runtime.panicrangeexit() }
+ ...
+ if ... { #exit1 = true ; return false }
+ ...
+ return true
+ })
+ #exit1 = true
+ }
+
+# Nested Loops
+
+So far we've only considered a single loop. If a function contains a
+sequence of loops, each can be translated individually. But loops can
+be nested. It would work to translate the innermost loop and then
+translate the loop around it, and so on, except that there'd be a lot
+of rewriting of rewritten code and the overall traversals could end up
+taking time quadratic in the depth of the nesting. To avoid all that,
+we use a single rewriting pass that handles a top-most range-over-func
+loop and all the range-over-func loops it contains at the same time.
+
+If we need to return from inside a doubly-nested loop, the rewrites
+above stay the same, but the check after the inner loop only says
+
+ if #next < 0 { return false }
+
+to stop the outer loop so it can do the actual return. That is,
+
+ for range f {
+ for range g {
+ ...
+ return a, b
+ ...
+ }
+ }
+
+becomes
+
+ {
+ var (
+ #next int
+ #r1 type1
+ #r2 type2
+ )
+ var #exit1 bool
+ f(func() {
+ if #exit1 { runtime.panicrangeexit() }
+ var #exit2 bool
+ g(func() {
+ if #exit2 { runtime.panicrangeexit() }
+ ...
+ {
+ // return a, b
+ #r1, #r2 = a, b
+ #next = -2
+ #exit1, #exit2 = true, true
+ return false
+ }
+ ...
+ return true
+ })
+ #exit2 = true
+ if #next < 0 {
+ return false
+ }
+ return true
+ })
+ #exit1 = true
+ if #next == -2 {
+ return #r1, #r2
+ }
+ }
+
+Note that the #next < 0 after the inner loop handles both kinds of
+return with a single check.
+
+# Labeled break/continue of range-over-func loops
+
+For a labeled break or continue of an outer range-over-func, we
+use positive #next values. Any such labeled break or continue
+really means "do N breaks" or "do N breaks and 1 continue".
+We encode that as perLoopStep*N or perLoopStep*N+1 respectively.
+
+Loops that might need to propagate a labeled break or continue
+add one or both of these to the #next checks:
+
+ if #next >= 2 {
+ #next -= 2
+ return false
+ }
+
+ if #next == 1 {
+ #next = 0
+ return true
+ }
+
+For example
+
+ F: for range f {
+ for range g {
+ for range h {
+ ...
+ break F
+ ...
+ ...
+ continue F
+ ...
+ }
+ }
+ ...
+ }
+
+becomes
+
+ {
+ var #next int
+ var #exit1 bool
+ f(func() {
+ if #exit1 { runtime.panicrangeexit() }
+ var #exit2 bool
+ g(func() {
+ if #exit2 { runtime.panicrangeexit() }
+ var #exit3 bool
+ h(func() {
+ if #exit3 { runtime.panicrangeexit() }
+ ...
+ {
+ // break F
+ #next = 4
+ #exit1, #exit2, #exit3 = true, true, true
+ return false
+ }
+ ...
+ {
+ // continue F
+ #next = 3
+ #exit2, #exit3 = true, true
+ return false
+ }
+ ...
+ return true
+ })
+ #exit3 = true
+ if #next >= 2 {
+ #next -= 2
+ return false
+ }
+ return true
+ })
+ #exit2 = true
+ if #next >= 2 {
+ #next -= 2
+ return false
+ }
+ if #next == 1 {
+ #next = 0
+ return true
+ }
+ ...
+ return true
+ })
+ #exit1 = true
+ }
+
+Note that the post-h checks only consider a break,
+since no generated code tries to continue g.
+
+# Gotos and other labeled break/continue
+
+The final control flow translations are goto and break/continue of a
+non-range-over-func statement. In both cases, we may need to break out
+of one or more range-over-func loops before we can do the actual
+control flow statement. Each such break/continue/goto L statement is
+assigned a unique negative #next value (below -2, since -1 and -2 are
+for the two kinds of return). Then the post-checks for a given loop
+test for the specific codes that refer to labels directly targetable
+from that block. Otherwise, the generic
+
+ if #next < 0 { return false }
+
+check handles stopping the next loop to get one step closer to the label.
+
+For example
+
+ Top: print("start\n")
+ for range f {
+ for range g {
+ ...
+ for range h {
+ ...
+ goto Top
+ ...
+ }
+ }
+ }
+
+becomes
+
+ Top: print("start\n")
+ {
+ var #next int
+ var #exit1 bool
+ f(func() {
+ if #exit1 { runtime.panicrangeexit() }
+ var #exit2 bool
+ g(func() {
+ if #exit2 { runtime.panicrangeexit() }
+ ...
+ var #exit3 bool
+ h(func() {
+ if #exit3 { runtime.panicrangeexit() }
+ ...
+ {
+ // goto Top
+ #next = -3
+ #exit1, #exit2, #exit3 = true, true, true
+ return false
+ }
+ ...
+ return true
+ })
+ #exit3 = true
+ if #next < 0 {
+ return false
+ }
+ return true
+ })
+ #exit2 = true
+ if #next < 0 {
+ return false
+ }
+ return true
+ })
+ #exit1 = true
+ if #next == -3 {
+ #next = 0
+ goto Top
+ }
+ }
+
+Labeled break/continue to non-range-over-funcs are handled the same
+way as goto.
+
+# Defers
+
+The last wrinkle is handling defer statements. If we have
+
+ for range f {
+ defer print("A")
+ }
+
+we cannot rewrite that into
+
+ f(func() {
+ defer print("A")
+ })
+
+because the deferred code will run at the end of the iteration, not
+the end of the containing function. To fix that, the runtime provides
+a special hook that lets us obtain a defer "token" representing the
+outer function and then use it in a later defer to attach the deferred
+code to that outer function.
+
+Normally,
+
+ defer print("A")
+
+compiles to
+
+ runtime.deferproc(func() { print("A") })
+
+This changes in a range-over-func. For example:
+
+ for range f {
+ defer print("A")
+ }
+
+compiles to
+
+ var #defers = runtime.deferrangefunc()
+ f(func() {
+ runtime.deferprocat(func() { print("A") }, #defers)
+ })
+
+For this rewriting phase, we insert the explicit initialization of
+#defers and then attach the #defers variable to the CallStmt
+representing the defer. That variable will be propagated to the
+backend and will cause the backend to compile the defer using
+deferprocat instead of an ordinary deferproc.
+
+TODO: Could call runtime.deferrangefuncend after f.
+*/
+package rangefunc
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+ "fmt"
+ "go/constant"
+ "os"
+)
+
+// nopos is the zero syntax.Pos.
+var nopos syntax.Pos
+
+// A rewriter implements rewriting the range-over-funcs in a given function.
+type rewriter struct {
+ pkg *types2.Package
+ info *types2.Info
+ outer *syntax.FuncType
+ body *syntax.BlockStmt
+
+ // References to important types and values.
+ any types2.Object
+ bool types2.Object
+ int types2.Object
+ true types2.Object
+ false types2.Object
+
+ // Branch numbering, computed as needed.
+ branchNext map[branch]int // branch -> #next value
+ labelLoop map[string]*syntax.ForStmt // label -> innermost rangefunc loop it is declared inside (nil for no loop)
+
+ // Stack of nodes being visited.
+ stack []syntax.Node // all nodes
+ forStack []*forLoop // range-over-func loops
+
+ rewritten map[*syntax.ForStmt]syntax.Stmt
+
+ // Declared variables in generated code for outermost loop.
+ declStmt *syntax.DeclStmt
+ nextVar types2.Object
+ retVars []types2.Object
+ defers types2.Object
+ exitVarCount int // exitvars are referenced from their respective loops
+}
+
+// A branch is a single labeled branch.
+type branch struct {
+ tok syntax.Token
+ label string
+}
+
+// A forLoop describes a single range-over-func loop being processed.
+type forLoop struct {
+ nfor *syntax.ForStmt // actual syntax
+ exitFlag *types2.Var // #exit variable for this loop
+ exitFlagDecl *syntax.VarDecl
+
+ checkRet bool // add check for "return" after loop
+ checkRetArgs bool // add check for "return args" after loop
+ checkBreak bool // add check for "break" after loop
+ checkContinue bool // add check for "continue" after loop
+ checkBranch []branch // add check for labeled branch after loop
+}
+
+// Rewrite rewrites all the range-over-funcs in the files.
+func Rewrite(pkg *types2.Package, info *types2.Info, files []*syntax.File) {
+ for _, file := range files {
+ syntax.Inspect(file, func(n syntax.Node) bool {
+ switch n := n.(type) {
+ case *syntax.FuncDecl:
+ rewriteFunc(pkg, info, n.Type, n.Body)
+ return false
+ case *syntax.FuncLit:
+ rewriteFunc(pkg, info, n.Type, n.Body)
+ return false
+ }
+ return true
+ })
+ }
+}
+
+// rewriteFunc rewrites all the range-over-funcs in a single function (a top-level func or a func literal).
+// The typ and body are the function's type and body.
+func rewriteFunc(pkg *types2.Package, info *types2.Info, typ *syntax.FuncType, body *syntax.BlockStmt) {
+ if body == nil {
+ return
+ }
+ r := &rewriter{
+ pkg: pkg,
+ info: info,
+ outer: typ,
+ body: body,
+ }
+ syntax.Inspect(body, r.inspect)
+ if (base.Flag.W != 0) && r.forStack != nil {
+ syntax.Fdump(os.Stderr, body)
+ }
+}
+
+// checkFuncMisuse reports whether to check for misuse of iterator callbacks functions.
+func (r *rewriter) checkFuncMisuse() bool {
+ return base.Debug.RangeFuncCheck != 0
+}
+
+// inspect is a callback for syntax.Inspect that drives the actual rewriting.
+// If it sees a func literal, it kicks off a separate rewrite for that literal.
+// Otherwise, it maintains a stack of range-over-func loops and
+// converts each in turn.
+func (r *rewriter) inspect(n syntax.Node) bool {
+ switch n := n.(type) {
+ case *syntax.FuncLit:
+ rewriteFunc(r.pkg, r.info, n.Type, n.Body)
+ return false
+
+ default:
+ // Push n onto stack.
+ r.stack = append(r.stack, n)
+ if nfor, ok := forRangeFunc(n); ok {
+ loop := &forLoop{nfor: nfor}
+ r.forStack = append(r.forStack, loop)
+ r.startLoop(loop)
+ }
+
+ case nil:
+ // n == nil signals that we are done visiting
+ // the top-of-stack node's children. Find it.
+ n = r.stack[len(r.stack)-1]
+
+ // If we are inside a range-over-func,
+ // take this moment to replace any break/continue/goto/return
+ // statements directly contained in this node.
+ // Also replace any converted for statements
+ // with the rewritten block.
+ switch n := n.(type) {
+ case *syntax.BlockStmt:
+ for i, s := range n.List {
+ n.List[i] = r.editStmt(s)
+ }
+ case *syntax.CaseClause:
+ for i, s := range n.Body {
+ n.Body[i] = r.editStmt(s)
+ }
+ case *syntax.CommClause:
+ for i, s := range n.Body {
+ n.Body[i] = r.editStmt(s)
+ }
+ case *syntax.LabeledStmt:
+ n.Stmt = r.editStmt(n.Stmt)
+ }
+
+ // Pop n.
+ if len(r.forStack) > 0 && r.stack[len(r.stack)-1] == r.forStack[len(r.forStack)-1].nfor {
+ r.endLoop(r.forStack[len(r.forStack)-1])
+ r.forStack = r.forStack[:len(r.forStack)-1]
+ }
+ r.stack = r.stack[:len(r.stack)-1]
+ }
+ return true
+}
+
+// startLoop sets up for converting a range-over-func loop.
+func (r *rewriter) startLoop(loop *forLoop) {
+ // For first loop in function, allocate syntax for any, bool, int, true, and false.
+ if r.any == nil {
+ r.any = types2.Universe.Lookup("any")
+ r.bool = types2.Universe.Lookup("bool")
+ r.int = types2.Universe.Lookup("int")
+ r.true = types2.Universe.Lookup("true")
+ r.false = types2.Universe.Lookup("false")
+ r.rewritten = make(map[*syntax.ForStmt]syntax.Stmt)
+ }
+ if r.checkFuncMisuse() {
+ // declare the exit flag for this loop's body
+ loop.exitFlag, loop.exitFlagDecl = r.exitVar(loop.nfor.Pos())
+ }
+}
+
+// editStmt returns the replacement for the statement x,
+// or x itself if it should be left alone.
+// This includes the for loops we are converting,
+// as left in x.rewritten by r.endLoop.
+func (r *rewriter) editStmt(x syntax.Stmt) syntax.Stmt {
+ if x, ok := x.(*syntax.ForStmt); ok {
+ if s := r.rewritten[x]; s != nil {
+ return s
+ }
+ }
+
+ if len(r.forStack) > 0 {
+ switch x := x.(type) {
+ case *syntax.BranchStmt:
+ return r.editBranch(x)
+ case *syntax.CallStmt:
+ if x.Tok == syntax.Defer {
+ return r.editDefer(x)
+ }
+ case *syntax.ReturnStmt:
+ return r.editReturn(x)
+ }
+ }
+
+ return x
+}
+
+// editDefer returns the replacement for the defer statement x.
+// See the "Defers" section in the package doc comment above for more context.
+func (r *rewriter) editDefer(x *syntax.CallStmt) syntax.Stmt {
+ if r.defers == nil {
+ // Declare and initialize the #defers token.
+ init := &syntax.CallExpr{
+ Fun: runtimeSym(r.info, "deferrangefunc"),
+ }
+ tv := syntax.TypeAndValue{Type: r.any.Type()}
+ tv.SetIsValue()
+ init.SetTypeInfo(tv)
+ r.defers = r.declVar("#defers", r.any.Type(), init)
+ }
+
+ // Attach the token as an "extra" argument to the defer.
+ x.DeferAt = r.useVar(r.defers)
+ setPos(x.DeferAt, x.Pos())
+ return x
+}
+
+func (r *rewriter) exitVar(pos syntax.Pos) (*types2.Var, *syntax.VarDecl) {
+ r.exitVarCount++
+
+ name := fmt.Sprintf("#exit%d", r.exitVarCount)
+ typ := r.bool.Type()
+ obj := types2.NewVar(pos, r.pkg, name, typ)
+ n := syntax.NewName(pos, name)
+ setValueType(n, typ)
+ r.info.Defs[n] = obj
+
+ return obj, &syntax.VarDecl{NameList: []*syntax.Name{n}}
+}
+
+// editReturn returns the replacement for the return statement x.
+// See the "Return" section in the package doc comment above for more context.
+func (r *rewriter) editReturn(x *syntax.ReturnStmt) syntax.Stmt {
+ // #next = -1 is return with no arguments; -2 is return with arguments.
+ var next int
+ if x.Results == nil {
+ next = -1
+ r.forStack[0].checkRet = true
+ } else {
+ next = -2
+ r.forStack[0].checkRetArgs = true
+ }
+
+ // Tell the loops along the way to check for a return.
+ for _, loop := range r.forStack[1:] {
+ loop.checkRet = true
+ }
+
+ // Assign results, set #next, and return false.
+ bl := &syntax.BlockStmt{}
+ if x.Results != nil {
+ if r.retVars == nil {
+ for i, a := range r.outer.ResultList {
+ obj := r.declVar(fmt.Sprintf("#r%d", i+1), a.Type.GetTypeInfo().Type, nil)
+ r.retVars = append(r.retVars, obj)
+ }
+ }
+ bl.List = append(bl.List, &syntax.AssignStmt{Lhs: r.useList(r.retVars), Rhs: x.Results})
+ }
+ bl.List = append(bl.List, &syntax.AssignStmt{Lhs: r.next(), Rhs: r.intConst(next)})
+ if r.checkFuncMisuse() {
+ // mark all enclosing loop bodies as exited
+ for i := 0; i < len(r.forStack); i++ {
+ bl.List = append(bl.List, r.setExitedAt(i))
+ }
+ }
+ bl.List = append(bl.List, &syntax.ReturnStmt{Results: r.useVar(r.false)})
+ setPos(bl, x.Pos())
+ return bl
+}
+
+// perLoopStep is part of the encoding of loop-spanning control flow
+// for function range iterators. Each multiple of two encodes a "return false"
+// passing control to an enclosing iterator; a terminal value of 1 encodes
+// "return true" (i.e., local continue) from the body function, and a terminal
+// value of 0 encodes executing the remainder of the body function.
+const perLoopStep = 2
+
+// editBranch returns the replacement for the branch statement x,
+// or x itself if it should be left alone.
+// See the package doc comment above for more context.
+func (r *rewriter) editBranch(x *syntax.BranchStmt) syntax.Stmt {
+ if x.Tok == syntax.Fallthrough {
+ // Fallthrough is unaffected by the rewrite.
+ return x
+ }
+
+ // Find target of break/continue/goto in r.forStack.
+ // (The target may not be in r.forStack at all.)
+ targ := x.Target
+ i := len(r.forStack) - 1
+ if x.Label == nil && r.forStack[i].nfor != targ {
+ // Unlabeled break or continue that's not nfor must be inside nfor. Leave alone.
+ return x
+ }
+ for i >= 0 && r.forStack[i].nfor != targ {
+ i--
+ }
+ // exitFrom is the index of the loop interior to the target of the control flow,
+ // if such a loop exists (it does not if i == len(r.forStack) - 1)
+ exitFrom := i + 1
+
+ // Compute the value to assign to #next and the specific return to use.
+ var next int
+ var ret *syntax.ReturnStmt
+ if x.Tok == syntax.Goto || i < 0 {
+ // goto Label
+ // or break/continue of labeled non-range-over-func loop.
+ // We may be able to leave it alone, or we may have to break
+ // out of one or more nested loops and then use #next to signal
+ // to complete the break/continue/goto.
+ // Figure out which range-over-func loop contains the label.
+ r.computeBranchNext()
+ nfor := r.forStack[len(r.forStack)-1].nfor
+ label := x.Label.Value
+ targ := r.labelLoop[label]
+ if nfor == targ {
+ // Label is in the innermost range-over-func loop; use it directly.
+ return x
+ }
+
+ // Set #next to the code meaning break/continue/goto label.
+ next = r.branchNext[branch{x.Tok, label}]
+
+ // Break out of nested loops up to targ.
+ i := len(r.forStack) - 1
+ for i >= 0 && r.forStack[i].nfor != targ {
+ i--
+ }
+ exitFrom = i + 1
+
+ // Mark loop we exit to get to targ to check for that branch.
+ // When i==-1 that's the outermost func body
+ top := r.forStack[i+1]
+ top.checkBranch = append(top.checkBranch, branch{x.Tok, label})
+
+ // Mark loops along the way to check for a plain return, so they break.
+ for j := i + 2; j < len(r.forStack); j++ {
+ r.forStack[j].checkRet = true
+ }
+
+ // In the innermost loop, use a plain "return false".
+ ret = &syntax.ReturnStmt{Results: r.useVar(r.false)}
+ } else {
+ // break/continue of labeled range-over-func loop.
+ depth := len(r.forStack) - 1 - i
+
+ // For continue of innermost loop, use "return true".
+ // Otherwise we are breaking the innermost loop, so "return false".
+
+ if depth == 0 && x.Tok == syntax.Continue {
+ ret = &syntax.ReturnStmt{Results: r.useVar(r.true)}
+ setPos(ret, x.Pos())
+ return ret
+ }
+ ret = &syntax.ReturnStmt{Results: r.useVar(r.false)}
+
+ // If this is a simple break, mark this loop as exited and return false.
+ // No adjustments to #next.
+ if depth == 0 {
+ var stmts []syntax.Stmt
+ if r.checkFuncMisuse() {
+ stmts = []syntax.Stmt{r.setExited(), ret}
+ } else {
+ stmts = []syntax.Stmt{ret}
+ }
+ bl := &syntax.BlockStmt{
+ List: stmts,
+ }
+ setPos(bl, x.Pos())
+ return bl
+ }
+
+ // The loop inside the one we are break/continue-ing
+ // needs to make that happen when we break out of it.
+ if x.Tok == syntax.Continue {
+ r.forStack[exitFrom].checkContinue = true
+ } else {
+ exitFrom = i
+ r.forStack[exitFrom].checkBreak = true
+ }
+
+ // The loops along the way just need to break.
+ for j := exitFrom + 1; j < len(r.forStack); j++ {
+ r.forStack[j].checkBreak = true
+ }
+
+ // Set next to break the appropriate number of times;
+ // the final time may be a continue, not a break.
+ next = perLoopStep * depth
+ if x.Tok == syntax.Continue {
+ next--
+ }
+ }
+
+ // Assign #next = next and do the return.
+ as := &syntax.AssignStmt{Lhs: r.next(), Rhs: r.intConst(next)}
+ bl := &syntax.BlockStmt{
+ List: []syntax.Stmt{as},
+ }
+
+ if r.checkFuncMisuse() {
+ // Set #exitK for this loop and those exited by the control flow.
+ for i := exitFrom; i < len(r.forStack); i++ {
+ bl.List = append(bl.List, r.setExitedAt(i))
+ }
+ }
+
+ bl.List = append(bl.List, ret)
+ setPos(bl, x.Pos())
+ return bl
+}
+
+// computeBranchNext computes the branchNext numbering
+// and determines which labels end up inside which range-over-func loop bodies.
+func (r *rewriter) computeBranchNext() {
+ if r.labelLoop != nil {
+ return
+ }
+
+ r.labelLoop = make(map[string]*syntax.ForStmt)
+ r.branchNext = make(map[branch]int)
+
+ var labels []string
+ var stack []syntax.Node
+ var forStack []*syntax.ForStmt
+ forStack = append(forStack, nil)
+ syntax.Inspect(r.body, func(n syntax.Node) bool {
+ if n != nil {
+ stack = append(stack, n)
+ if nfor, ok := forRangeFunc(n); ok {
+ forStack = append(forStack, nfor)
+ }
+ if n, ok := n.(*syntax.LabeledStmt); ok {
+ l := n.Label.Value
+ labels = append(labels, l)
+ f := forStack[len(forStack)-1]
+ r.labelLoop[l] = f
+ }
+ } else {
+ n := stack[len(stack)-1]
+ stack = stack[:len(stack)-1]
+ if n == forStack[len(forStack)-1] {
+ forStack = forStack[:len(forStack)-1]
+ }
+ }
+ return true
+ })
+
+ // Assign numbers to all the labels we observed.
+ used := -2
+ for _, l := range labels {
+ used -= 3
+ r.branchNext[branch{syntax.Break, l}] = used
+ r.branchNext[branch{syntax.Continue, l}] = used + 1
+ r.branchNext[branch{syntax.Goto, l}] = used + 2
+ }
+}
+
+// endLoop finishes the conversion of a range-over-func loop.
+// We have inspected and rewritten the body of the loop and can now
+// construct the body function and rewrite the for loop into a call
+// bracketed by any declarations and checks it requires.
+func (r *rewriter) endLoop(loop *forLoop) {
+ // Pick apart for range X { ... }
+ nfor := loop.nfor
+ start, end := nfor.Pos(), nfor.Body.Rbrace // start, end position of for loop
+ rclause := nfor.Init.(*syntax.RangeClause)
+ rfunc := types2.CoreType(rclause.X.GetTypeInfo().Type).(*types2.Signature) // type of X - func(func(...)bool)
+ if rfunc.Params().Len() != 1 {
+ base.Fatalf("invalid typecheck of range func")
+ }
+ ftyp := types2.CoreType(rfunc.Params().At(0).Type()).(*types2.Signature) // func(...) bool
+ if ftyp.Results().Len() != 1 {
+ base.Fatalf("invalid typecheck of range func")
+ }
+
+ // Build X(bodyFunc)
+ call := &syntax.ExprStmt{
+ X: &syntax.CallExpr{
+ Fun: rclause.X,
+ ArgList: []syntax.Expr{
+ r.bodyFunc(nfor.Body.List, syntax.UnpackListExpr(rclause.Lhs), rclause.Def, ftyp, start, end),
+ },
+ },
+ }
+ setPos(call, start)
+
+ // Build checks based on #next after X(bodyFunc)
+ checks := r.checks(loop, end)
+
+ // Rewrite for vars := range X { ... } to
+ //
+ // {
+ // r.declStmt
+ // call
+ // checks
+ // }
+ //
+ // The r.declStmt can be added to by this loop or any inner loop
+ // during the creation of r.bodyFunc; it is only emitted in the outermost
+ // converted range loop.
+ block := &syntax.BlockStmt{Rbrace: end}
+ setPos(block, start)
+ if len(r.forStack) == 1 && r.declStmt != nil {
+ setPos(r.declStmt, start)
+ block.List = append(block.List, r.declStmt)
+ }
+
+ // declare the exitFlag here so it has proper scope and zeroing
+ if r.checkFuncMisuse() {
+ exitFlagDecl := &syntax.DeclStmt{DeclList: []syntax.Decl{loop.exitFlagDecl}}
+ block.List = append(block.List, exitFlagDecl)
+ }
+
+ // iteratorFunc(bodyFunc)
+ block.List = append(block.List, call)
+
+ if r.checkFuncMisuse() {
+ // iteratorFunc has exited, mark the exit flag for the body
+ block.List = append(block.List, r.setExited())
+ }
+ block.List = append(block.List, checks...)
+
+ if len(r.forStack) == 1 { // ending an outermost loop
+ r.declStmt = nil
+ r.nextVar = nil
+ r.retVars = nil
+ r.defers = nil
+ }
+
+ r.rewritten[nfor] = block
+}
+
+func (r *rewriter) setExited() *syntax.AssignStmt {
+ return r.setExitedAt(len(r.forStack) - 1)
+}
+
+func (r *rewriter) setExitedAt(index int) *syntax.AssignStmt {
+ loop := r.forStack[index]
+ return &syntax.AssignStmt{
+ Lhs: r.useVar(loop.exitFlag),
+ Rhs: r.useVar(r.true),
+ }
+}
+
+// bodyFunc converts the loop body (control flow has already been updated)
+// to a func literal that can be passed to the range function.
+//
+// vars is the range variables from the range statement.
+// def indicates whether this is a := range statement.
+// ftyp is the type of the function we are creating
+// start and end are the syntax positions to use for new nodes
+// that should be at the start or end of the loop.
+func (r *rewriter) bodyFunc(body []syntax.Stmt, lhs []syntax.Expr, def bool, ftyp *types2.Signature, start, end syntax.Pos) *syntax.FuncLit {
+ // Starting X(bodyFunc); build up bodyFunc first.
+ var params, results []*types2.Var
+ results = append(results, types2.NewVar(start, nil, "", r.bool.Type()))
+ bodyFunc := &syntax.FuncLit{
+ // Note: Type is ignored but needs to be non-nil to avoid panic in syntax.Inspect.
+ Type: &syntax.FuncType{},
+ Body: &syntax.BlockStmt{
+ List: []syntax.Stmt{},
+ Rbrace: end,
+ },
+ }
+ setPos(bodyFunc, start)
+
+ for i := 0; i < ftyp.Params().Len(); i++ {
+ typ := ftyp.Params().At(i).Type()
+ var paramVar *types2.Var
+ if i < len(lhs) && def {
+ // Reuse range variable as parameter.
+ x := lhs[i]
+ paramVar = r.info.Defs[x.(*syntax.Name)].(*types2.Var)
+ } else {
+ // Declare new parameter and assign it to range expression.
+ paramVar = types2.NewVar(start, r.pkg, fmt.Sprintf("#p%d", 1+i), typ)
+ if i < len(lhs) {
+ x := lhs[i]
+ as := &syntax.AssignStmt{Lhs: x, Rhs: r.useVar(paramVar)}
+ as.SetPos(x.Pos())
+ setPos(as.Rhs, x.Pos())
+ bodyFunc.Body.List = append(bodyFunc.Body.List, as)
+ }
+ }
+ params = append(params, paramVar)
+ }
+
+ tv := syntax.TypeAndValue{
+ Type: types2.NewSignatureType(nil, nil, nil,
+ types2.NewTuple(params...),
+ types2.NewTuple(results...),
+ false),
+ }
+ tv.SetIsValue()
+ bodyFunc.SetTypeInfo(tv)
+
+ loop := r.forStack[len(r.forStack)-1]
+
+ if r.checkFuncMisuse() {
+ bodyFunc.Body.List = append(bodyFunc.Body.List, r.assertNotExited(start, loop))
+ }
+
+ // Original loop body (already rewritten by editStmt during inspect).
+ bodyFunc.Body.List = append(bodyFunc.Body.List, body...)
+
+ // return true to continue at end of loop body
+ ret := &syntax.ReturnStmt{Results: r.useVar(r.true)}
+ ret.SetPos(end)
+ bodyFunc.Body.List = append(bodyFunc.Body.List, ret)
+
+ return bodyFunc
+}
+
+// checks returns the post-call checks that need to be done for the given loop.
+func (r *rewriter) checks(loop *forLoop, pos syntax.Pos) []syntax.Stmt {
+ var list []syntax.Stmt
+ if len(loop.checkBranch) > 0 {
+ did := make(map[branch]bool)
+ for _, br := range loop.checkBranch {
+ if did[br] {
+ continue
+ }
+ did[br] = true
+ doBranch := &syntax.BranchStmt{Tok: br.tok, Label: &syntax.Name{Value: br.label}}
+ list = append(list, r.ifNext(syntax.Eql, r.branchNext[br], doBranch))
+ }
+ }
+ if len(r.forStack) == 1 {
+ if loop.checkRetArgs {
+ list = append(list, r.ifNext(syntax.Eql, -2, retStmt(r.useList(r.retVars))))
+ }
+ if loop.checkRet {
+ list = append(list, r.ifNext(syntax.Eql, -1, retStmt(nil)))
+ }
+ } else {
+ if loop.checkRetArgs || loop.checkRet {
+ // Note: next < 0 also handles gotos handled by outer loops.
+ // We set checkRet in that case to trigger this check.
+ list = append(list, r.ifNext(syntax.Lss, 0, retStmt(r.useVar(r.false))))
+ }
+ if loop.checkBreak {
+ list = append(list, r.ifNext(syntax.Geq, perLoopStep, retStmt(r.useVar(r.false))))
+ }
+ if loop.checkContinue {
+ list = append(list, r.ifNext(syntax.Eql, perLoopStep-1, retStmt(r.useVar(r.true))))
+ }
+ }
+
+ for _, j := range list {
+ setPos(j, pos)
+ }
+ return list
+}
+
+// retStmt returns a return statement returning the given return values.
+func retStmt(results syntax.Expr) *syntax.ReturnStmt {
+ return &syntax.ReturnStmt{Results: results}
+}
+
+// ifNext returns the statement:
+//
+// if #next op c { adjust; then }
+//
+// When op is >=, adjust is #next -= c.
+// When op is == and c is not -1 or -2, adjust is #next = 0.
+// Otherwise adjust is omitted.
+func (r *rewriter) ifNext(op syntax.Operator, c int, then syntax.Stmt) syntax.Stmt {
+ nif := &syntax.IfStmt{
+ Cond: &syntax.Operation{Op: op, X: r.next(), Y: r.intConst(c)},
+ Then: &syntax.BlockStmt{
+ List: []syntax.Stmt{then},
+ },
+ }
+ tv := syntax.TypeAndValue{Type: r.bool.Type()}
+ tv.SetIsValue()
+ nif.Cond.SetTypeInfo(tv)
+
+ if op == syntax.Geq {
+ sub := &syntax.AssignStmt{
+ Op: syntax.Sub,
+ Lhs: r.next(),
+ Rhs: r.intConst(c),
+ }
+ nif.Then.List = []syntax.Stmt{sub, then}
+ }
+ if op == syntax.Eql && c != -1 && c != -2 {
+ clr := &syntax.AssignStmt{
+ Lhs: r.next(),
+ Rhs: r.intConst(0),
+ }
+ nif.Then.List = []syntax.Stmt{clr, then}
+ }
+
+ return nif
+}
+
+// setValueType marks x as a value with type typ.
+func setValueType(x syntax.Expr, typ syntax.Type) {
+ tv := syntax.TypeAndValue{Type: typ}
+ tv.SetIsValue()
+ x.SetTypeInfo(tv)
+}
+
+// assertNotExited returns the statement:
+//
+// if #exitK { runtime.panicrangeexit() }
+//
+// where #exitK is the exit guard for loop.
+func (r *rewriter) assertNotExited(start syntax.Pos, loop *forLoop) syntax.Stmt {
+ callPanicExpr := &syntax.CallExpr{
+ Fun: runtimeSym(r.info, "panicrangeexit"),
+ }
+ setValueType(callPanicExpr, nil) // no result type
+
+ callPanic := &syntax.ExprStmt{X: callPanicExpr}
+
+ nif := &syntax.IfStmt{
+ Cond: r.useVar(loop.exitFlag),
+ Then: &syntax.BlockStmt{
+ List: []syntax.Stmt{callPanic},
+ },
+ }
+ setPos(nif, start)
+ return nif
+}
+
+// next returns a reference to the #next variable.
+func (r *rewriter) next() *syntax.Name {
+ if r.nextVar == nil {
+ r.nextVar = r.declVar("#next", r.int.Type(), nil)
+ }
+ return r.useVar(r.nextVar)
+}
+
+// forRangeFunc checks whether n is a range-over-func.
+// If so, it returns n.(*syntax.ForStmt), true.
+// Otherwise it returns nil, false.
+func forRangeFunc(n syntax.Node) (*syntax.ForStmt, bool) {
+ nfor, ok := n.(*syntax.ForStmt)
+ if !ok {
+ return nil, false
+ }
+ nrange, ok := nfor.Init.(*syntax.RangeClause)
+ if !ok {
+ return nil, false
+ }
+ _, ok = types2.CoreType(nrange.X.GetTypeInfo().Type).(*types2.Signature)
+ if !ok {
+ return nil, false
+ }
+ return nfor, true
+}
+
+// intConst returns syntax for an integer literal with the given value.
+func (r *rewriter) intConst(c int) *syntax.BasicLit {
+ lit := &syntax.BasicLit{
+ Value: fmt.Sprint(c),
+ Kind: syntax.IntLit,
+ }
+ tv := syntax.TypeAndValue{Type: r.int.Type(), Value: constant.MakeInt64(int64(c))}
+ tv.SetIsValue()
+ lit.SetTypeInfo(tv)
+ return lit
+}
+
+// useVar returns syntax for a reference to decl, which should be its declaration.
+func (r *rewriter) useVar(obj types2.Object) *syntax.Name {
+ n := syntax.NewName(nopos, obj.Name())
+ tv := syntax.TypeAndValue{Type: obj.Type()}
+ tv.SetIsValue()
+ n.SetTypeInfo(tv)
+ r.info.Uses[n] = obj
+ return n
+}
+
+// useList is useVar for a list of decls.
+func (r *rewriter) useList(vars []types2.Object) syntax.Expr {
+ var new []syntax.Expr
+ for _, obj := range vars {
+ new = append(new, r.useVar(obj))
+ }
+ if len(new) == 1 {
+ return new[0]
+ }
+ return &syntax.ListExpr{ElemList: new}
+}
+
+// declVar declares a variable with a given name type and initializer value.
+func (r *rewriter) declVar(name string, typ types2.Type, init syntax.Expr) *types2.Var {
+ if r.declStmt == nil {
+ r.declStmt = &syntax.DeclStmt{}
+ }
+ stmt := r.declStmt
+ obj := types2.NewVar(stmt.Pos(), r.pkg, name, typ)
+ n := syntax.NewName(stmt.Pos(), name)
+ tv := syntax.TypeAndValue{Type: typ}
+ tv.SetIsValue()
+ n.SetTypeInfo(tv)
+ r.info.Defs[n] = obj
+ stmt.DeclList = append(stmt.DeclList, &syntax.VarDecl{
+ NameList: []*syntax.Name{n},
+ // Note: Type is ignored
+ Values: init,
+ })
+ return obj
+}
+
+// declType declares a type with the given name and type.
+// This is more like "type name = typ" than "type name typ".
+func declType(pos syntax.Pos, name string, typ types2.Type) *syntax.Name {
+ n := syntax.NewName(pos, name)
+ n.SetTypeInfo(syntax.TypeAndValue{Type: typ})
+ return n
+}
+
+// runtimePkg is a fake runtime package that contains what we need to refer to in package runtime.
+var runtimePkg = func() *types2.Package {
+ var nopos syntax.Pos
+ pkg := types2.NewPackage("runtime", "runtime")
+ anyType := types2.Universe.Lookup("any").Type()
+
+ // func deferrangefunc() unsafe.Pointer
+ obj := types2.NewFunc(nopos, pkg, "deferrangefunc", types2.NewSignatureType(nil, nil, nil, nil, types2.NewTuple(types2.NewParam(nopos, pkg, "extra", anyType)), false))
+ pkg.Scope().Insert(obj)
+
+ // func panicrangeexit()
+ obj = types2.NewFunc(nopos, pkg, "panicrangeexit", types2.NewSignatureType(nil, nil, nil, nil, nil, false))
+ pkg.Scope().Insert(obj)
+
+ return pkg
+}()
+
+// runtimeSym returns a reference to a symbol in the fake runtime package.
+func runtimeSym(info *types2.Info, name string) *syntax.Name {
+ obj := runtimePkg.Scope().Lookup(name)
+ n := syntax.NewName(nopos, "runtime."+name)
+ tv := syntax.TypeAndValue{Type: obj.Type()}
+ tv.SetIsValue()
+ tv.SetIsRuntimeHelper()
+ n.SetTypeInfo(tv)
+ info.Uses[n] = obj
+ return n
+}
+
+// setPos walks the top structure of x that has no position assigned
+// and assigns it all to have position pos.
+// When setPos encounters a syntax node with a position assigned,
+// setPos does not look inside that node.
+// setPos only needs to handle syntax we create in this package;
+// all other syntax should have positions assigned already.
+func setPos(x syntax.Node, pos syntax.Pos) {
+ if x == nil {
+ return
+ }
+ syntax.Inspect(x, func(n syntax.Node) bool {
+ if n == nil || n.Pos() != nopos {
+ return false
+ }
+ n.SetPos(pos)
+ switch n := n.(type) {
+ case *syntax.BlockStmt:
+ if n.Rbrace == nopos {
+ n.Rbrace = pos
+ }
+ }
+ return true
+ })
+}
diff --git a/src/cmd/compile/internal/reflectdata/alg.go b/src/cmd/compile/internal/reflectdata/alg.go
new file mode 100644
index 0000000..a0f5522
--- /dev/null
+++ b/src/cmd/compile/internal/reflectdata/alg.go
@@ -0,0 +1,667 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflectdata
+
+import (
+ "fmt"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/compare"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+// AlgType returns the fixed-width AMEMxx variants instead of the general
+// AMEM kind when possible.
+func AlgType(t *types.Type) types.AlgKind {
+ a, _ := types.AlgType(t)
+ if a == types.AMEM {
+ if t.Alignment() < int64(base.Ctxt.Arch.Alignment) && t.Alignment() < t.Size() {
+ // For example, we can't treat [2]int16 as an int32 if int32s require
+ // 4-byte alignment. See issue 46283.
+ return a
+ }
+ switch t.Size() {
+ case 0:
+ return types.AMEM0
+ case 1:
+ return types.AMEM8
+ case 2:
+ return types.AMEM16
+ case 4:
+ return types.AMEM32
+ case 8:
+ return types.AMEM64
+ case 16:
+ return types.AMEM128
+ }
+ }
+
+ return a
+}
+
+// genhash returns a symbol which is the closure used to compute
+// the hash of a value of type t.
+// Note: the generated function must match runtime.typehash exactly.
+func genhash(t *types.Type) *obj.LSym {
+ switch AlgType(t) {
+ default:
+ // genhash is only called for types that have equality
+ base.Fatalf("genhash %v", t)
+ case types.AMEM0:
+ return sysClosure("memhash0")
+ case types.AMEM8:
+ return sysClosure("memhash8")
+ case types.AMEM16:
+ return sysClosure("memhash16")
+ case types.AMEM32:
+ return sysClosure("memhash32")
+ case types.AMEM64:
+ return sysClosure("memhash64")
+ case types.AMEM128:
+ return sysClosure("memhash128")
+ case types.ASTRING:
+ return sysClosure("strhash")
+ case types.AINTER:
+ return sysClosure("interhash")
+ case types.ANILINTER:
+ return sysClosure("nilinterhash")
+ case types.AFLOAT32:
+ return sysClosure("f32hash")
+ case types.AFLOAT64:
+ return sysClosure("f64hash")
+ case types.ACPLX64:
+ return sysClosure("c64hash")
+ case types.ACPLX128:
+ return sysClosure("c128hash")
+ case types.AMEM:
+ // For other sizes of plain memory, we build a closure
+ // that calls memhash_varlen. The size of the memory is
+ // encoded in the first slot of the closure.
+ closure := TypeLinksymLookup(fmt.Sprintf(".hashfunc%d", t.Size()))
+ if len(closure.P) > 0 { // already generated
+ return closure
+ }
+ if memhashvarlen == nil {
+ memhashvarlen = typecheck.LookupRuntimeFunc("memhash_varlen")
+ }
+ ot := 0
+ ot = objw.SymPtr(closure, ot, memhashvarlen, 0)
+ ot = objw.Uintptr(closure, ot, uint64(t.Size())) // size encoded in closure
+ objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
+ return closure
+ case types.ASPECIAL:
+ break
+ }
+
+ closure := TypeLinksymPrefix(".hashfunc", t)
+ if len(closure.P) > 0 { // already generated
+ return closure
+ }
+
+ // Generate hash functions for subtypes.
+ // There are cases where we might not use these hashes,
+ // but in that case they will get dead-code eliminated.
+ // (And the closure generated by genhash will also get
+ // dead-code eliminated, as we call the subtype hashers
+ // directly.)
+ switch t.Kind() {
+ case types.TARRAY:
+ genhash(t.Elem())
+ case types.TSTRUCT:
+ for _, f := range t.Fields() {
+ genhash(f.Type)
+ }
+ }
+
+ if base.Flag.LowerR != 0 {
+ fmt.Printf("genhash %v %v\n", closure, t)
+ }
+
+ fn := hashFunc(t)
+
+ // Build closure. It doesn't close over any variables, so
+ // it contains just the function pointer.
+ objw.SymPtr(closure, 0, fn.Linksym(), 0)
+ objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+
+ return closure
+}
+
+func hashFunc(t *types.Type) *ir.Func {
+ sym := TypeSymPrefix(".hash", t)
+ if sym.Def != nil {
+ return sym.Def.(*ir.Name).Func
+ }
+
+ pos := base.AutogeneratedPos // less confusing than end of input
+ base.Pos = pos
+
+ // func sym(p *T, h uintptr) uintptr
+ fn := ir.NewFunc(pos, pos, sym, types.NewSignature(nil,
+ []*types.Field{
+ types.NewField(pos, typecheck.Lookup("p"), types.NewPtr(t)),
+ types.NewField(pos, typecheck.Lookup("h"), types.Types[types.TUINTPTR]),
+ },
+ []*types.Field{
+ types.NewField(pos, nil, types.Types[types.TUINTPTR]),
+ },
+ ))
+ sym.Def = fn.Nname
+ fn.Pragma |= ir.Noinline // TODO(mdempsky): We need to emit this during the unified frontend instead, to allow inlining.
+
+ typecheck.DeclFunc(fn)
+ np := fn.Dcl[0]
+ nh := fn.Dcl[1]
+
+ switch t.Kind() {
+ case types.TARRAY:
+ // An array of pure memory would be handled by the
+ // standard algorithm, so the element type must not be
+ // pure memory.
+ hashel := hashfor(t.Elem())
+
+ // for i := 0; i < nelem; i++
+ ni := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+ init := ir.NewAssignStmt(base.Pos, ni, ir.NewInt(base.Pos, 0))
+ cond := ir.NewBinaryExpr(base.Pos, ir.OLT, ni, ir.NewInt(base.Pos, t.NumElem()))
+ post := ir.NewAssignStmt(base.Pos, ni, ir.NewBinaryExpr(base.Pos, ir.OADD, ni, ir.NewInt(base.Pos, 1)))
+ loop := ir.NewForStmt(base.Pos, nil, cond, post, nil, false)
+ loop.PtrInit().Append(init)
+
+ // h = hashel(&p[i], h)
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
+
+ nx := ir.NewIndexExpr(base.Pos, np, ni)
+ nx.SetBounded(true)
+ na := typecheck.NodAddr(nx)
+ call.Args.Append(na)
+ call.Args.Append(nh)
+ loop.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
+
+ fn.Body.Append(loop)
+
+ case types.TSTRUCT:
+ // Walk the struct using memhash for runs of AMEM
+ // and calling specific hash functions for the others.
+ for i, fields := 0, t.Fields(); i < len(fields); {
+ f := fields[i]
+
+ // Skip blank fields.
+ if f.Sym.IsBlank() {
+ i++
+ continue
+ }
+
+ // Hash non-memory fields with appropriate hash function.
+ if !compare.IsRegularMemory(f.Type) {
+ hashel := hashfor(f.Type)
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
+ na := typecheck.NodAddr(typecheck.DotField(base.Pos, np, i))
+ call.Args.Append(na)
+ call.Args.Append(nh)
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
+ i++
+ continue
+ }
+
+ // Otherwise, hash a maximal length run of raw memory.
+ size, next := compare.Memrun(t, i)
+
+ // h = hashel(&p.first, size, h)
+ hashel := hashmem(f.Type)
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
+ na := typecheck.NodAddr(typecheck.DotField(base.Pos, np, i))
+ call.Args.Append(na)
+ call.Args.Append(nh)
+ call.Args.Append(ir.NewInt(base.Pos, size))
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
+
+ i = next
+ }
+ }
+
+ r := ir.NewReturnStmt(base.Pos, nil)
+ r.Results.Append(nh)
+ fn.Body.Append(r)
+
+ if base.Flag.LowerR != 0 {
+ ir.DumpList("genhash body", fn.Body)
+ }
+
+ typecheck.FinishFuncBody()
+
+ fn.SetDupok(true)
+
+ ir.WithFunc(fn, func() {
+ typecheck.Stmts(fn.Body)
+ })
+
+ fn.SetNilCheckDisabled(true)
+
+ return fn
+}
+
+func runtimeHashFor(name string, t *types.Type) *ir.Name {
+ return typecheck.LookupRuntime(name, t)
+}
+
+// hashfor returns the function to compute the hash of a value of type t.
+func hashfor(t *types.Type) *ir.Name {
+ switch a, _ := types.AlgType(t); a {
+ case types.AMEM:
+ base.Fatalf("hashfor with AMEM type")
+ case types.AINTER:
+ return runtimeHashFor("interhash", t)
+ case types.ANILINTER:
+ return runtimeHashFor("nilinterhash", t)
+ case types.ASTRING:
+ return runtimeHashFor("strhash", t)
+ case types.AFLOAT32:
+ return runtimeHashFor("f32hash", t)
+ case types.AFLOAT64:
+ return runtimeHashFor("f64hash", t)
+ case types.ACPLX64:
+ return runtimeHashFor("c64hash", t)
+ case types.ACPLX128:
+ return runtimeHashFor("c128hash", t)
+ }
+
+ fn := hashFunc(t)
+ return fn.Nname
+}
+
+// sysClosure returns a closure which will call the
+// given runtime function (with no closed-over variables).
+func sysClosure(name string) *obj.LSym {
+ s := typecheck.LookupRuntimeVar(name + "·f")
+ if len(s.P) == 0 {
+ f := typecheck.LookupRuntimeFunc(name)
+ objw.SymPtr(s, 0, f, 0)
+ objw.Global(s, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+ }
+ return s
+}
+
+// geneq returns a symbol which is the closure used to compute
+// equality for two objects of type t.
+func geneq(t *types.Type) *obj.LSym {
+ switch AlgType(t) {
+ case types.ANOEQ:
+ // The runtime will panic if it tries to compare
+ // a type with a nil equality function.
+ return nil
+ case types.AMEM0:
+ return sysClosure("memequal0")
+ case types.AMEM8:
+ return sysClosure("memequal8")
+ case types.AMEM16:
+ return sysClosure("memequal16")
+ case types.AMEM32:
+ return sysClosure("memequal32")
+ case types.AMEM64:
+ return sysClosure("memequal64")
+ case types.AMEM128:
+ return sysClosure("memequal128")
+ case types.ASTRING:
+ return sysClosure("strequal")
+ case types.AINTER:
+ return sysClosure("interequal")
+ case types.ANILINTER:
+ return sysClosure("nilinterequal")
+ case types.AFLOAT32:
+ return sysClosure("f32equal")
+ case types.AFLOAT64:
+ return sysClosure("f64equal")
+ case types.ACPLX64:
+ return sysClosure("c64equal")
+ case types.ACPLX128:
+ return sysClosure("c128equal")
+ case types.AMEM:
+ // make equality closure. The size of the type
+ // is encoded in the closure.
+ closure := TypeLinksymLookup(fmt.Sprintf(".eqfunc%d", t.Size()))
+ if len(closure.P) != 0 {
+ return closure
+ }
+ if memequalvarlen == nil {
+ memequalvarlen = typecheck.LookupRuntimeFunc("memequal_varlen")
+ }
+ ot := 0
+ ot = objw.SymPtr(closure, ot, memequalvarlen, 0)
+ ot = objw.Uintptr(closure, ot, uint64(t.Size()))
+ objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
+ return closure
+ case types.ASPECIAL:
+ break
+ }
+
+ closure := TypeLinksymPrefix(".eqfunc", t)
+ if len(closure.P) > 0 { // already generated
+ return closure
+ }
+
+ if base.Flag.LowerR != 0 {
+ fmt.Printf("geneq %v\n", t)
+ }
+
+ fn := eqFunc(t)
+
+ // Generate a closure which points at the function we just generated.
+ objw.SymPtr(closure, 0, fn.Linksym(), 0)
+ objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+ return closure
+}
+
+func eqFunc(t *types.Type) *ir.Func {
+ // Autogenerate code for equality of structs and arrays.
+ sym := TypeSymPrefix(".eq", t)
+ if sym.Def != nil {
+ return sym.Def.(*ir.Name).Func
+ }
+
+ pos := base.AutogeneratedPos // less confusing than end of input
+ base.Pos = pos
+
+ // func sym(p, q *T) bool
+ fn := ir.NewFunc(pos, pos, sym, types.NewSignature(nil,
+ []*types.Field{
+ types.NewField(pos, typecheck.Lookup("p"), types.NewPtr(t)),
+ types.NewField(pos, typecheck.Lookup("q"), types.NewPtr(t)),
+ },
+ []*types.Field{
+ types.NewField(pos, typecheck.Lookup("r"), types.Types[types.TBOOL]),
+ },
+ ))
+ sym.Def = fn.Nname
+ fn.Pragma |= ir.Noinline // TODO(mdempsky): We need to emit this during the unified frontend instead, to allow inlining.
+
+ typecheck.DeclFunc(fn)
+ np := fn.Dcl[0]
+ nq := fn.Dcl[1]
+ nr := fn.Dcl[2]
+
+ // Label to jump to if an equality test fails.
+ neq := typecheck.AutoLabel(".neq")
+
+ // We reach here only for types that have equality but
+ // cannot be handled by the standard algorithms,
+ // so t must be either an array or a struct.
+ switch t.Kind() {
+ default:
+ base.Fatalf("geneq %v", t)
+
+ case types.TARRAY:
+ nelem := t.NumElem()
+
+ // checkAll generates code to check the equality of all array elements.
+ // If unroll is greater than nelem, checkAll generates:
+ //
+ // if eq(p[0], q[0]) && eq(p[1], q[1]) && ... {
+ // } else {
+ // goto neq
+ // }
+ //
+ // And so on.
+ //
+ // Otherwise it generates:
+ //
+ // iterateTo := nelem/unroll*unroll
+ // for i := 0; i < iterateTo; i += unroll {
+ // if eq(p[i+0], q[i+0]) && eq(p[i+1], q[i+1]) && ... && eq(p[i+unroll-1], q[i+unroll-1]) {
+ // } else {
+ // goto neq
+ // }
+ // }
+ // if eq(p[iterateTo+0], q[iterateTo+0]) && eq(p[iterateTo+1], q[iterateTo+1]) && ... {
+ // } else {
+ // goto neq
+ // }
+ //
+ checkAll := func(unroll int64, last bool, eq func(pi, qi ir.Node) ir.Node) {
+ // checkIdx generates a node to check for equality at index i.
+ checkIdx := func(i ir.Node) ir.Node {
+ // pi := p[i]
+ pi := ir.NewIndexExpr(base.Pos, np, i)
+ pi.SetBounded(true)
+ pi.SetType(t.Elem())
+ // qi := q[i]
+ qi := ir.NewIndexExpr(base.Pos, nq, i)
+ qi.SetBounded(true)
+ qi.SetType(t.Elem())
+ return eq(pi, qi)
+ }
+
+ iterations := nelem / unroll
+ iterateTo := iterations * unroll
+ // If a loop is iterated only once, there shouldn't be any loop at all.
+ if iterations == 1 {
+ iterateTo = 0
+ }
+
+ if iterateTo > 0 {
+ // Generate an unrolled for loop.
+ // for i := 0; i < nelem/unroll*unroll; i += unroll
+ i := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+ init := ir.NewAssignStmt(base.Pos, i, ir.NewInt(base.Pos, 0))
+ cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(base.Pos, iterateTo))
+ loop := ir.NewForStmt(base.Pos, nil, cond, nil, nil, false)
+ loop.PtrInit().Append(init)
+
+ // if eq(p[i+0], q[i+0]) && eq(p[i+1], q[i+1]) && ... && eq(p[i+unroll-1], q[i+unroll-1]) {
+ // } else {
+ // goto neq
+ // }
+ for j := int64(0); j < unroll; j++ {
+ // if check {} else { goto neq }
+ nif := ir.NewIfStmt(base.Pos, checkIdx(i), nil, nil)
+ nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
+ loop.Body.Append(nif)
+ post := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(base.Pos, 1)))
+ loop.Body.Append(post)
+ }
+
+ fn.Body.Append(loop)
+
+ if nelem == iterateTo {
+ if last {
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(base.Pos, true)))
+ }
+ return
+ }
+ }
+
+ // Generate remaining checks, if nelem is not a multiple of unroll.
+ if last {
+ // Do last comparison in a different manner.
+ nelem--
+ }
+ // if eq(p[iterateTo+0], q[iterateTo+0]) && eq(p[iterateTo+1], q[iterateTo+1]) && ... {
+ // } else {
+ // goto neq
+ // }
+ for j := iterateTo; j < nelem; j++ {
+ // if check {} else { goto neq }
+ nif := ir.NewIfStmt(base.Pos, checkIdx(ir.NewInt(base.Pos, j)), nil, nil)
+ nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
+ fn.Body.Append(nif)
+ }
+ if last {
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, checkIdx(ir.NewInt(base.Pos, nelem))))
+ }
+ }
+
+ switch t.Elem().Kind() {
+ case types.TSTRING:
+ // Do two loops. First, check that all the lengths match (cheap).
+ // Second, check that all the contents match (expensive).
+ checkAll(3, false, func(pi, qi ir.Node) ir.Node {
+ // Compare lengths.
+ eqlen, _ := compare.EqString(pi, qi)
+ return eqlen
+ })
+ checkAll(1, true, func(pi, qi ir.Node) ir.Node {
+ // Compare contents.
+ _, eqmem := compare.EqString(pi, qi)
+ return eqmem
+ })
+ case types.TFLOAT32, types.TFLOAT64:
+ checkAll(2, true, func(pi, qi ir.Node) ir.Node {
+ // p[i] == q[i]
+ return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
+ })
+ case types.TSTRUCT:
+ isCall := func(n ir.Node) bool {
+ return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
+ }
+ var expr ir.Node
+ var hasCallExprs bool
+ allCallExprs := true
+ and := func(cond ir.Node) {
+ if expr == nil {
+ expr = cond
+ } else {
+ expr = ir.NewLogicalExpr(base.Pos, ir.OANDAND, expr, cond)
+ }
+ }
+
+ var tmpPos src.XPos
+ pi := ir.NewIndexExpr(tmpPos, np, ir.NewInt(tmpPos, 0))
+ pi.SetBounded(true)
+ pi.SetType(t.Elem())
+ qi := ir.NewIndexExpr(tmpPos, nq, ir.NewInt(tmpPos, 0))
+ qi.SetBounded(true)
+ qi.SetType(t.Elem())
+ flatConds, canPanic := compare.EqStruct(t.Elem(), pi, qi)
+ for _, c := range flatConds {
+ if isCall(c) {
+ hasCallExprs = true
+ } else {
+ allCallExprs = false
+ }
+ }
+ if !hasCallExprs || allCallExprs || canPanic {
+ checkAll(1, true, func(pi, qi ir.Node) ir.Node {
+ // p[i] == q[i]
+ return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
+ })
+ } else {
+ checkAll(4, false, func(pi, qi ir.Node) ir.Node {
+ expr = nil
+ flatConds, _ := compare.EqStruct(t.Elem(), pi, qi)
+ if len(flatConds) == 0 {
+ return ir.NewBool(base.Pos, true)
+ }
+ for _, c := range flatConds {
+ if !isCall(c) {
+ and(c)
+ }
+ }
+ return expr
+ })
+ checkAll(2, true, func(pi, qi ir.Node) ir.Node {
+ expr = nil
+ flatConds, _ := compare.EqStruct(t.Elem(), pi, qi)
+ for _, c := range flatConds {
+ if isCall(c) {
+ and(c)
+ }
+ }
+ return expr
+ })
+ }
+ default:
+ checkAll(1, true, func(pi, qi ir.Node) ir.Node {
+ // p[i] == q[i]
+ return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
+ })
+ }
+
+ case types.TSTRUCT:
+ flatConds, _ := compare.EqStruct(t, np, nq)
+ if len(flatConds) == 0 {
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(base.Pos, true)))
+ } else {
+ for _, c := range flatConds[:len(flatConds)-1] {
+ // if cond {} else { goto neq }
+ n := ir.NewIfStmt(base.Pos, c, nil, nil)
+ n.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
+ fn.Body.Append(n)
+ }
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, flatConds[len(flatConds)-1]))
+ }
+ }
+
+ // ret:
+ // return
+ ret := typecheck.AutoLabel(".ret")
+ fn.Body.Append(ir.NewLabelStmt(base.Pos, ret))
+ fn.Body.Append(ir.NewReturnStmt(base.Pos, nil))
+
+ // neq:
+ // r = false
+ // return (or goto ret)
+ fn.Body.Append(ir.NewLabelStmt(base.Pos, neq))
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(base.Pos, false)))
+ if compare.EqCanPanic(t) || anyCall(fn) {
+ // Epilogue is large, so share it with the equal case.
+ fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret))
+ } else {
+ // Epilogue is small, so don't bother sharing.
+ fn.Body.Append(ir.NewReturnStmt(base.Pos, nil))
+ }
+ // TODO(khr): the epilogue size detection condition above isn't perfect.
+ // We should really do a generic CL that shares epilogues across
+ // the board. See #24936.
+
+ if base.Flag.LowerR != 0 {
+ ir.DumpList("geneq body", fn.Body)
+ }
+
+ typecheck.FinishFuncBody()
+
+ fn.SetDupok(true)
+
+ ir.WithFunc(fn, func() {
+ typecheck.Stmts(fn.Body)
+ })
+
+ // Disable checknils while compiling this code.
+ // We are comparing a struct or an array,
+ // neither of which can be nil, and our comparisons
+ // are shallow.
+ fn.SetNilCheckDisabled(true)
+ return fn
+}
+
+// EqFor returns ONAME node represents type t's equal function, and a boolean
+// to indicates whether a length needs to be passed when calling the function.
+func EqFor(t *types.Type) (ir.Node, bool) {
+ switch a, _ := types.AlgType(t); a {
+ case types.AMEM:
+ return typecheck.LookupRuntime("memequal", t, t), true
+ case types.ASPECIAL:
+ fn := eqFunc(t)
+ return fn.Nname, false
+ }
+ base.Fatalf("EqFor %v", t)
+ return nil, false
+}
+
+func anyCall(fn *ir.Func) bool {
+ return ir.Any(fn, func(n ir.Node) bool {
+ // TODO(rsc): No methods?
+ op := n.Op()
+ return op == ir.OCALL || op == ir.OCALLFUNC
+ })
+}
+
+func hashmem(t *types.Type) ir.Node {
+ return typecheck.LookupRuntime("memhash", t)
+}
diff --git a/src/cmd/compile/internal/reflectdata/alg_test.go b/src/cmd/compile/internal/reflectdata/alg_test.go
new file mode 100644
index 0000000..38fb974
--- /dev/null
+++ b/src/cmd/compile/internal/reflectdata/alg_test.go
@@ -0,0 +1,147 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflectdata_test
+
+import (
+ "testing"
+)
+
+func BenchmarkEqArrayOfStrings5(b *testing.B) {
+ var a [5]string
+ var c [5]string
+
+ for i := 0; i < 5; i++ {
+ a[i] = "aaaa"
+ c[i] = "cccc"
+ }
+
+ for j := 0; j < b.N; j++ {
+ _ = a == c
+ }
+}
+
+func BenchmarkEqArrayOfStrings64(b *testing.B) {
+ var a [64]string
+ var c [64]string
+
+ for i := 0; i < 64; i++ {
+ a[i] = "aaaa"
+ c[i] = "cccc"
+ }
+
+ for j := 0; j < b.N; j++ {
+ _ = a == c
+ }
+}
+
+func BenchmarkEqArrayOfStrings1024(b *testing.B) {
+ var a [1024]string
+ var c [1024]string
+
+ for i := 0; i < 1024; i++ {
+ a[i] = "aaaa"
+ c[i] = "cccc"
+ }
+
+ for j := 0; j < b.N; j++ {
+ _ = a == c
+ }
+}
+
+func BenchmarkEqArrayOfFloats5(b *testing.B) {
+ var a [5]float32
+ var c [5]float32
+
+ for i := 0; i < b.N; i++ {
+ _ = a == c
+ }
+}
+
+func BenchmarkEqArrayOfFloats64(b *testing.B) {
+ var a [64]float32
+ var c [64]float32
+
+ for i := 0; i < b.N; i++ {
+ _ = a == c
+ }
+}
+
+func BenchmarkEqArrayOfFloats1024(b *testing.B) {
+ var a [1024]float32
+ var c [1024]float32
+
+ for i := 0; i < b.N; i++ {
+ _ = a == c
+ }
+}
+
+func BenchmarkEqArrayOfStructsEq(b *testing.B) {
+ type T2 struct {
+ a string
+ b int
+ }
+ const size = 1024
+ var (
+ str1 = "foobar"
+
+ a [size]T2
+ c [size]T2
+ )
+
+ for i := 0; i < size; i++ {
+ a[i].a = str1
+ c[i].a = str1
+ }
+
+ b.ResetTimer()
+ for j := 0; j < b.N; j++ {
+ _ = a == c
+ }
+}
+
+func BenchmarkEqArrayOfStructsNotEq(b *testing.B) {
+ type T2 struct {
+ a string
+ b int
+ }
+ const size = 1024
+ var (
+ str1 = "foobar"
+ str2 = "foobarz"
+
+ a [size]T2
+ c [size]T2
+ )
+
+ for i := 0; i < size; i++ {
+ a[i].a = str1
+ c[i].a = str1
+ }
+ c[len(c)-1].a = str2
+
+ b.ResetTimer()
+ for j := 0; j < b.N; j++ {
+ _ = a == c
+ }
+}
+
+const size = 16
+
+type T1 struct {
+ a [size]byte
+}
+
+func BenchmarkEqStruct(b *testing.B) {
+ x, y := T1{}, T1{}
+ x.a = [size]byte{1, 2, 3, 4, 5, 6, 7, 8}
+ y.a = [size]byte{2, 3, 4, 5, 6, 7, 8, 9}
+
+ for i := 0; i < b.N; i++ {
+ f := x == y
+ if f {
+ println("hello")
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/reflectdata/helpers.go b/src/cmd/compile/internal/reflectdata/helpers.go
new file mode 100644
index 0000000..9ba62d6
--- /dev/null
+++ b/src/cmd/compile/internal/reflectdata/helpers.go
@@ -0,0 +1,216 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflectdata
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+func hasRType(n, rtype ir.Node, fieldName string) bool {
+ if rtype != nil {
+ return true
+ }
+
+ return false
+}
+
+// assertOp asserts that n is an op.
+func assertOp(n ir.Node, op ir.Op) {
+ base.AssertfAt(n.Op() == op, n.Pos(), "want %v, have %v", op, n)
+}
+
+// assertOp2 asserts that n is an op1 or op2.
+func assertOp2(n ir.Node, op1, op2 ir.Op) {
+ base.AssertfAt(n.Op() == op1 || n.Op() == op2, n.Pos(), "want %v or %v, have %v", op1, op2, n)
+}
+
+// kindRType asserts that typ has the given kind, and returns an
+// expression that yields the *runtime._type value representing typ.
+func kindRType(pos src.XPos, typ *types.Type, k types.Kind) ir.Node {
+ base.AssertfAt(typ.Kind() == k, pos, "want %v type, have %v", k, typ)
+ return TypePtrAt(pos, typ)
+}
+
+// mapRType asserts that typ is a map type, and returns an expression
+// that yields the *runtime._type value representing typ.
+func mapRType(pos src.XPos, typ *types.Type) ir.Node {
+ return kindRType(pos, typ, types.TMAP)
+}
+
+// chanRType asserts that typ is a map type, and returns an expression
+// that yields the *runtime._type value representing typ.
+func chanRType(pos src.XPos, typ *types.Type) ir.Node {
+ return kindRType(pos, typ, types.TCHAN)
+}
+
+// sliceElemRType asserts that typ is a slice type, and returns an
+// expression that yields the *runtime._type value representing typ's
+// element type.
+func sliceElemRType(pos src.XPos, typ *types.Type) ir.Node {
+ base.AssertfAt(typ.IsSlice(), pos, "want slice type, have %v", typ)
+ return TypePtrAt(pos, typ.Elem())
+}
+
+// concreteRType asserts that typ is not an interface type, and
+// returns an expression that yields the *runtime._type value
+// representing typ.
+func concreteRType(pos src.XPos, typ *types.Type) ir.Node {
+ base.AssertfAt(!typ.IsInterface(), pos, "want non-interface type, have %v", typ)
+ return TypePtrAt(pos, typ)
+}
+
+// AppendElemRType asserts that n is an "append" operation, and
+// returns an expression that yields the *runtime._type value
+// representing the result slice type's element type.
+func AppendElemRType(pos src.XPos, n *ir.CallExpr) ir.Node {
+ assertOp(n, ir.OAPPEND)
+ if hasRType(n, n.RType, "RType") {
+ return n.RType
+ }
+ return sliceElemRType(pos, n.Type())
+}
+
+// CompareRType asserts that n is a comparison (== or !=) operation
+// between expressions of interface and non-interface type, and
+// returns an expression that yields the *runtime._type value
+// representing the non-interface type.
+func CompareRType(pos src.XPos, n *ir.BinaryExpr) ir.Node {
+ assertOp2(n, ir.OEQ, ir.ONE)
+ base.AssertfAt(n.X.Type().IsInterface() != n.Y.Type().IsInterface(), n.Pos(), "expect mixed interface and non-interface, have %L and %L", n.X, n.Y)
+ if hasRType(n, n.RType, "RType") {
+ return n.RType
+ }
+ typ := n.X.Type()
+ if typ.IsInterface() {
+ typ = n.Y.Type()
+ }
+ return concreteRType(pos, typ)
+}
+
+// ConvIfaceTypeWord asserts that n is conversion to interface type,
+// and returns an expression that yields the *runtime._type or
+// *runtime.itab value necessary for implementing the conversion.
+//
+// - *runtime._type for the destination type, for I2I conversions
+// - *runtime.itab, for T2I conversions
+// - *runtime._type for the source type, for T2E conversions
+func ConvIfaceTypeWord(pos src.XPos, n *ir.ConvExpr) ir.Node {
+ assertOp(n, ir.OCONVIFACE)
+ src, dst := n.X.Type(), n.Type()
+ base.AssertfAt(dst.IsInterface(), n.Pos(), "want interface type, have %L", n)
+ if hasRType(n, n.TypeWord, "TypeWord") {
+ return n.TypeWord
+ }
+ if dst.IsEmptyInterface() {
+ return concreteRType(pos, src) // direct eface construction
+ }
+ if !src.IsInterface() {
+ return ITabAddrAt(pos, src, dst) // direct iface construction
+ }
+ return TypePtrAt(pos, dst) // convI2I
+}
+
+// ConvIfaceSrcRType asserts that n is a conversion from
+// non-interface type to interface type, and
+// returns an expression that yields the *runtime._type for copying
+// the convertee value to the heap.
+func ConvIfaceSrcRType(pos src.XPos, n *ir.ConvExpr) ir.Node {
+ assertOp(n, ir.OCONVIFACE)
+ if hasRType(n, n.SrcRType, "SrcRType") {
+ return n.SrcRType
+ }
+ return concreteRType(pos, n.X.Type())
+}
+
+// CopyElemRType asserts that n is a "copy" operation, and returns an
+// expression that yields the *runtime._type value representing the
+// destination slice type's element type.
+func CopyElemRType(pos src.XPos, n *ir.BinaryExpr) ir.Node {
+ assertOp(n, ir.OCOPY)
+ if hasRType(n, n.RType, "RType") {
+ return n.RType
+ }
+ return sliceElemRType(pos, n.X.Type())
+}
+
+// DeleteMapRType asserts that n is a "delete" operation, and returns
+// an expression that yields the *runtime._type value representing the
+// map type.
+func DeleteMapRType(pos src.XPos, n *ir.CallExpr) ir.Node {
+ assertOp(n, ir.ODELETE)
+ if hasRType(n, n.RType, "RType") {
+ return n.RType
+ }
+ return mapRType(pos, n.Args[0].Type())
+}
+
+// IndexMapRType asserts that n is a map index operation, and returns
+// an expression that yields the *runtime._type value representing the
+// map type.
+func IndexMapRType(pos src.XPos, n *ir.IndexExpr) ir.Node {
+ assertOp(n, ir.OINDEXMAP)
+ if hasRType(n, n.RType, "RType") {
+ return n.RType
+ }
+ return mapRType(pos, n.X.Type())
+}
+
+// MakeChanRType asserts that n is a "make" operation for a channel
+// type, and returns an expression that yields the *runtime._type
+// value representing that channel type.
+func MakeChanRType(pos src.XPos, n *ir.MakeExpr) ir.Node {
+ assertOp(n, ir.OMAKECHAN)
+ if hasRType(n, n.RType, "RType") {
+ return n.RType
+ }
+ return chanRType(pos, n.Type())
+}
+
+// MakeMapRType asserts that n is a "make" operation for a map type,
+// and returns an expression that yields the *runtime._type value
+// representing that map type.
+func MakeMapRType(pos src.XPos, n *ir.MakeExpr) ir.Node {
+ assertOp(n, ir.OMAKEMAP)
+ if hasRType(n, n.RType, "RType") {
+ return n.RType
+ }
+ return mapRType(pos, n.Type())
+}
+
+// MakeSliceElemRType asserts that n is a "make" operation for a slice
+// type, and returns an expression that yields the *runtime._type
+// value representing that slice type's element type.
+func MakeSliceElemRType(pos src.XPos, n *ir.MakeExpr) ir.Node {
+ assertOp2(n, ir.OMAKESLICE, ir.OMAKESLICECOPY)
+ if hasRType(n, n.RType, "RType") {
+ return n.RType
+ }
+ return sliceElemRType(pos, n.Type())
+}
+
+// RangeMapRType asserts that n is a "range" loop over a map value,
+// and returns an expression that yields the *runtime._type value
+// representing that map type.
+func RangeMapRType(pos src.XPos, n *ir.RangeStmt) ir.Node {
+ assertOp(n, ir.ORANGE)
+ if hasRType(n, n.RType, "RType") {
+ return n.RType
+ }
+ return mapRType(pos, n.X.Type())
+}
+
+// UnsafeSliceElemRType asserts that n is an "unsafe.Slice" operation,
+// and returns an expression that yields the *runtime._type value
+// representing the result slice type's element type.
+func UnsafeSliceElemRType(pos src.XPos, n *ir.BinaryExpr) ir.Node {
+ assertOp(n, ir.OUNSAFESLICE)
+ if hasRType(n, n.RType, "RType") {
+ return n.RType
+ }
+ return sliceElemRType(pos, n.Type())
+}
diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
new file mode 100644
index 0000000..c2407af
--- /dev/null
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -0,0 +1,1898 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflectdata
+
+import (
+ "encoding/binary"
+ "fmt"
+ "internal/abi"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
+ "cmd/compile/internal/compare"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/rttype"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/typebits"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/gcprog"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+type ptabEntry struct {
+ s *types.Sym
+ t *types.Type
+}
+
+// runtime interface and reflection data structures
+var (
+ // protects signatset and signatslice
+ signatmu sync.Mutex
+ // Tracking which types need runtime type descriptor
+ signatset = make(map[*types.Type]struct{})
+ // Queue of types wait to be generated runtime type descriptor
+ signatslice []typeAndStr
+
+ gcsymmu sync.Mutex // protects gcsymset and gcsymslice
+ gcsymset = make(map[*types.Type]struct{})
+)
+
+type typeSig struct {
+ name *types.Sym
+ isym *obj.LSym
+ tsym *obj.LSym
+ type_ *types.Type
+ mtype *types.Type
+}
+
+// Builds a type representing a Bucket structure for
+// the given map type. This type is not visible to users -
+// we include only enough information to generate a correct GC
+// program for it.
+// Make sure this stays in sync with runtime/map.go.
+//
+// A "bucket" is a "struct" {
+// tophash [BUCKETSIZE]uint8
+// keys [BUCKETSIZE]keyType
+// elems [BUCKETSIZE]elemType
+// overflow *bucket
+// }
+const (
+ BUCKETSIZE = abi.MapBucketCount
+ MAXKEYSIZE = abi.MapMaxKeyBytes
+ MAXELEMSIZE = abi.MapMaxElemBytes
+)
+
+func commonSize() int { return int(rttype.Type.Size()) } // Sizeof(runtime._type{})
+
+func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{})
+ if t.Sym() == nil && len(methods(t)) == 0 {
+ return 0
+ }
+ return int(rttype.UncommonType.Size())
+}
+
+func makefield(name string, t *types.Type) *types.Field {
+ sym := (*types.Pkg)(nil).Lookup(name)
+ return types.NewField(src.NoXPos, sym, t)
+}
+
+// MapBucketType makes the map bucket type given the type of the map.
+func MapBucketType(t *types.Type) *types.Type {
+ if t.MapType().Bucket != nil {
+ return t.MapType().Bucket
+ }
+
+ keytype := t.Key()
+ elemtype := t.Elem()
+ types.CalcSize(keytype)
+ types.CalcSize(elemtype)
+ if keytype.Size() > MAXKEYSIZE {
+ keytype = types.NewPtr(keytype)
+ }
+ if elemtype.Size() > MAXELEMSIZE {
+ elemtype = types.NewPtr(elemtype)
+ }
+
+ field := make([]*types.Field, 0, 5)
+
+ // The first field is: uint8 topbits[BUCKETSIZE].
+ arr := types.NewArray(types.Types[types.TUINT8], BUCKETSIZE)
+ field = append(field, makefield("topbits", arr))
+
+ arr = types.NewArray(keytype, BUCKETSIZE)
+ arr.SetNoalg(true)
+ keys := makefield("keys", arr)
+ field = append(field, keys)
+
+ arr = types.NewArray(elemtype, BUCKETSIZE)
+ arr.SetNoalg(true)
+ elems := makefield("elems", arr)
+ field = append(field, elems)
+
+ // If keys and elems have no pointers, the map implementation
+ // can keep a list of overflow pointers on the side so that
+ // buckets can be marked as having no pointers.
+ // Arrange for the bucket to have no pointers by changing
+ // the type of the overflow field to uintptr in this case.
+ // See comment on hmap.overflow in runtime/map.go.
+ otyp := types.Types[types.TUNSAFEPTR]
+ if !elemtype.HasPointers() && !keytype.HasPointers() {
+ otyp = types.Types[types.TUINTPTR]
+ }
+ overflow := makefield("overflow", otyp)
+ field = append(field, overflow)
+
+ // link up fields
+ bucket := types.NewStruct(field[:])
+ bucket.SetNoalg(true)
+ types.CalcSize(bucket)
+
+ // Check invariants that map code depends on.
+ if !types.IsComparable(t.Key()) {
+ base.Fatalf("unsupported map key type for %v", t)
+ }
+ if BUCKETSIZE < 8 {
+ base.Fatalf("bucket size %d too small for proper alignment %d", BUCKETSIZE, 8)
+ }
+ if uint8(keytype.Alignment()) > BUCKETSIZE {
+ base.Fatalf("key align too big for %v", t)
+ }
+ if uint8(elemtype.Alignment()) > BUCKETSIZE {
+ base.Fatalf("elem align %d too big for %v, BUCKETSIZE=%d", elemtype.Alignment(), t, BUCKETSIZE)
+ }
+ if keytype.Size() > MAXKEYSIZE {
+ base.Fatalf("key size too large for %v", t)
+ }
+ if elemtype.Size() > MAXELEMSIZE {
+ base.Fatalf("elem size too large for %v", t)
+ }
+ if t.Key().Size() > MAXKEYSIZE && !keytype.IsPtr() {
+ base.Fatalf("key indirect incorrect for %v", t)
+ }
+ if t.Elem().Size() > MAXELEMSIZE && !elemtype.IsPtr() {
+ base.Fatalf("elem indirect incorrect for %v", t)
+ }
+ if keytype.Size()%keytype.Alignment() != 0 {
+ base.Fatalf("key size not a multiple of key align for %v", t)
+ }
+ if elemtype.Size()%elemtype.Alignment() != 0 {
+ base.Fatalf("elem size not a multiple of elem align for %v", t)
+ }
+ if uint8(bucket.Alignment())%uint8(keytype.Alignment()) != 0 {
+ base.Fatalf("bucket align not multiple of key align %v", t)
+ }
+ if uint8(bucket.Alignment())%uint8(elemtype.Alignment()) != 0 {
+ base.Fatalf("bucket align not multiple of elem align %v", t)
+ }
+ if keys.Offset%keytype.Alignment() != 0 {
+ base.Fatalf("bad alignment of keys in bmap for %v", t)
+ }
+ if elems.Offset%elemtype.Alignment() != 0 {
+ base.Fatalf("bad alignment of elems in bmap for %v", t)
+ }
+
+ // Double-check that overflow field is final memory in struct,
+ // with no padding at end.
+ if overflow.Offset != bucket.Size()-int64(types.PtrSize) {
+ base.Fatalf("bad offset of overflow in bmap for %v, overflow.Offset=%d, bucket.Size()-int64(types.PtrSize)=%d",
+ t, overflow.Offset, bucket.Size()-int64(types.PtrSize))
+ }
+
+ t.MapType().Bucket = bucket
+
+ bucket.StructType().Map = t
+ return bucket
+}
+
+var hmapType *types.Type
+
+// MapType returns a type interchangeable with runtime.hmap.
+// Make sure this stays in sync with runtime/map.go.
+func MapType() *types.Type {
+ if hmapType != nil {
+ return hmapType
+ }
+
+ // build a struct:
+ // type hmap struct {
+ // count int
+ // flags uint8
+ // B uint8
+ // noverflow uint16
+ // hash0 uint32
+ // buckets unsafe.Pointer
+ // oldbuckets unsafe.Pointer
+ // nevacuate uintptr
+ // extra unsafe.Pointer // *mapextra
+ // }
+ // must match runtime/map.go:hmap.
+ fields := []*types.Field{
+ makefield("count", types.Types[types.TINT]),
+ makefield("flags", types.Types[types.TUINT8]),
+ makefield("B", types.Types[types.TUINT8]),
+ makefield("noverflow", types.Types[types.TUINT16]),
+ makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP.
+ makefield("buckets", types.Types[types.TUNSAFEPTR]), // Used in walk.go for OMAKEMAP.
+ makefield("oldbuckets", types.Types[types.TUNSAFEPTR]),
+ makefield("nevacuate", types.Types[types.TUINTPTR]),
+ makefield("extra", types.Types[types.TUNSAFEPTR]),
+ }
+
+ n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hmap"))
+ hmap := types.NewNamed(n)
+ n.SetType(hmap)
+ n.SetTypecheck(1)
+
+ hmap.SetUnderlying(types.NewStruct(fields))
+ types.CalcSize(hmap)
+
+ // The size of hmap should be 48 bytes on 64 bit
+ // and 28 bytes on 32 bit platforms.
+ if size := int64(8 + 5*types.PtrSize); hmap.Size() != size {
+ base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size)
+ }
+
+ hmapType = hmap
+ return hmap
+}
+
+var hiterType *types.Type
+
+// MapIterType returns a type interchangeable with runtime.hiter.
+// Make sure this stays in sync with runtime/map.go.
+func MapIterType() *types.Type {
+ if hiterType != nil {
+ return hiterType
+ }
+
+ hmap := MapType()
+
+ // build a struct:
+ // type hiter struct {
+ // key unsafe.Pointer // *Key
+ // elem unsafe.Pointer // *Elem
+ // t unsafe.Pointer // *MapType
+ // h *hmap
+ // buckets unsafe.Pointer
+ // bptr unsafe.Pointer // *bmap
+ // overflow unsafe.Pointer // *[]*bmap
+ // oldoverflow unsafe.Pointer // *[]*bmap
+ // startBucket uintptr
+ // offset uint8
+ // wrapped bool
+ // B uint8
+ // i uint8
+ // bucket uintptr
+ // checkBucket uintptr
+ // }
+ // must match runtime/map.go:hiter.
+ fields := []*types.Field{
+ makefield("key", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
+ makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP.
+ makefield("t", types.Types[types.TUNSAFEPTR]),
+ makefield("h", types.NewPtr(hmap)),
+ makefield("buckets", types.Types[types.TUNSAFEPTR]),
+ makefield("bptr", types.Types[types.TUNSAFEPTR]),
+ makefield("overflow", types.Types[types.TUNSAFEPTR]),
+ makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
+ makefield("startBucket", types.Types[types.TUINTPTR]),
+ makefield("offset", types.Types[types.TUINT8]),
+ makefield("wrapped", types.Types[types.TBOOL]),
+ makefield("B", types.Types[types.TUINT8]),
+ makefield("i", types.Types[types.TUINT8]),
+ makefield("bucket", types.Types[types.TUINTPTR]),
+ makefield("checkBucket", types.Types[types.TUINTPTR]),
+ }
+
+ // build iterator struct holding the above fields
+ n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hiter"))
+ hiter := types.NewNamed(n)
+ n.SetType(hiter)
+ n.SetTypecheck(1)
+
+ hiter.SetUnderlying(types.NewStruct(fields))
+ types.CalcSize(hiter)
+ if hiter.Size() != int64(12*types.PtrSize) {
+ base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 12*types.PtrSize)
+ }
+
+ hiterType = hiter
+ return hiter
+}
+
+// methods returns the methods of the non-interface type t, sorted by name.
+// Generates stub functions as needed.
+func methods(t *types.Type) []*typeSig {
+ if t.HasShape() {
+ // Shape types have no methods.
+ return nil
+ }
+ // method type
+ mt := types.ReceiverBaseType(t)
+
+ if mt == nil {
+ return nil
+ }
+ typecheck.CalcMethods(mt)
+
+ // make list of methods for t,
+ // generating code if necessary.
+ var ms []*typeSig
+ for _, f := range mt.AllMethods() {
+ if f.Sym == nil {
+ base.Fatalf("method with no sym on %v", mt)
+ }
+ if !f.IsMethod() {
+ base.Fatalf("non-method on %v method %v %v", mt, f.Sym, f)
+ }
+ if f.Type.Recv() == nil {
+ base.Fatalf("receiver with no type on %v method %v %v", mt, f.Sym, f)
+ }
+ if f.Nointerface() && !t.IsFullyInstantiated() {
+ // Skip creating method wrappers if f is nointerface. But, if
+ // t is an instantiated type, we still have to call
+ // methodWrapper, because methodWrapper generates the actual
+ // generic method on the type as well.
+ continue
+ }
+
+ // get receiver type for this particular method.
+ // if pointer receiver but non-pointer t and
+ // this is not an embedded pointer inside a struct,
+ // method does not apply.
+ if !types.IsMethodApplicable(t, f) {
+ continue
+ }
+
+ sig := &typeSig{
+ name: f.Sym,
+ isym: methodWrapper(t, f, true),
+ tsym: methodWrapper(t, f, false),
+ type_: typecheck.NewMethodType(f.Type, t),
+ mtype: typecheck.NewMethodType(f.Type, nil),
+ }
+ if f.Nointerface() {
+ // In the case of a nointerface method on an instantiated
+ // type, don't actually append the typeSig.
+ continue
+ }
+ ms = append(ms, sig)
+ }
+
+ return ms
+}
+
+// imethods returns the methods of the interface type t, sorted by name.
+func imethods(t *types.Type) []*typeSig {
+ var methods []*typeSig
+ for _, f := range t.AllMethods() {
+ if f.Type.Kind() != types.TFUNC || f.Sym == nil {
+ continue
+ }
+ if f.Sym.IsBlank() {
+ base.Fatalf("unexpected blank symbol in interface method set")
+ }
+ if n := len(methods); n > 0 {
+ last := methods[n-1]
+ if !last.name.Less(f.Sym) {
+ base.Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym)
+ }
+ }
+
+ sig := &typeSig{
+ name: f.Sym,
+ mtype: f.Type,
+ type_: typecheck.NewMethodType(f.Type, nil),
+ }
+ methods = append(methods, sig)
+
+ // NOTE(rsc): Perhaps an oversight that
+ // IfaceType.Method is not in the reflect data.
+ // Generate the method body, so that compiled
+ // code can refer to it.
+ methodWrapper(t, f, false)
+ }
+
+ return methods
+}
+
+func dimportpath(p *types.Pkg) {
+ if p.Pathsym != nil {
+ return
+ }
+
+ if p == types.LocalPkg && base.Ctxt.Pkgpath == "" {
+ panic("missing pkgpath")
+ }
+
+ // If we are compiling the runtime package, there are two runtime packages around
+ // -- localpkg and Pkgs.Runtime. We don't want to produce import path symbols for
+ // both of them, so just produce one for localpkg.
+ if base.Ctxt.Pkgpath == "runtime" && p == ir.Pkgs.Runtime {
+ return
+ }
+
+ s := base.Ctxt.Lookup("type:.importpath." + p.Prefix + ".")
+ ot := dnameData(s, 0, p.Path, "", nil, false, false)
+ objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
+ s.Set(obj.AttrContentAddressable, true)
+ p.Pathsym = s
+}
+
+func dgopkgpath(c rttype.Cursor, pkg *types.Pkg) {
+ c = c.Field("Bytes")
+ if pkg == nil {
+ c.WritePtr(nil)
+ return
+ }
+
+ dimportpath(pkg)
+ c.WritePtr(pkg.Pathsym)
+}
+
+// dgopkgpathOff writes an offset relocation to the pkg path symbol to c.
+func dgopkgpathOff(c rttype.Cursor, pkg *types.Pkg) {
+ if pkg == nil {
+ c.WriteInt32(0)
+ return
+ }
+
+ dimportpath(pkg)
+ c.WriteSymPtrOff(pkg.Pathsym, false)
+}
+
+// dnameField dumps a reflect.name for a struct field.
+func dnameField(c rttype.Cursor, spkg *types.Pkg, ft *types.Field) {
+ if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg {
+ base.Fatalf("package mismatch for %v", ft.Sym)
+ }
+ nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name), ft.Embedded != 0)
+ c.Field("Bytes").WritePtr(nsym)
+}
+
+// dnameData writes the contents of a reflect.name into s at offset ot.
+func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported, embedded bool) int {
+ if len(name) >= 1<<29 {
+ base.Fatalf("name too long: %d %s...", len(name), name[:1024])
+ }
+ if len(tag) >= 1<<29 {
+ base.Fatalf("tag too long: %d %s...", len(tag), tag[:1024])
+ }
+ var nameLen [binary.MaxVarintLen64]byte
+ nameLenLen := binary.PutUvarint(nameLen[:], uint64(len(name)))
+ var tagLen [binary.MaxVarintLen64]byte
+ tagLenLen := binary.PutUvarint(tagLen[:], uint64(len(tag)))
+
+ // Encode name and tag. See reflect/type.go for details.
+ var bits byte
+ l := 1 + nameLenLen + len(name)
+ if exported {
+ bits |= 1 << 0
+ }
+ if len(tag) > 0 {
+ l += tagLenLen + len(tag)
+ bits |= 1 << 1
+ }
+ if pkg != nil {
+ bits |= 1 << 2
+ }
+ if embedded {
+ bits |= 1 << 3
+ }
+ b := make([]byte, l)
+ b[0] = bits
+ copy(b[1:], nameLen[:nameLenLen])
+ copy(b[1+nameLenLen:], name)
+ if len(tag) > 0 {
+ tb := b[1+nameLenLen+len(name):]
+ copy(tb, tagLen[:tagLenLen])
+ copy(tb[tagLenLen:], tag)
+ }
+
+ ot = int(s.WriteBytes(base.Ctxt, int64(ot), b))
+
+ if pkg != nil {
+ c := rttype.NewCursor(s, int64(ot), types.Types[types.TUINT32])
+ dgopkgpathOff(c, pkg)
+ ot += 4
+ }
+
+ return ot
+}
+
+var dnameCount int
+
+// dname creates a reflect.name for a struct field or method.
+func dname(name, tag string, pkg *types.Pkg, exported, embedded bool) *obj.LSym {
+ // Write out data as "type:." to signal two things to the
+ // linker, first that when dynamically linking, the symbol
+ // should be moved to a relro section, and second that the
+ // contents should not be decoded as a type.
+ sname := "type:.namedata."
+ if pkg == nil {
+ // In the common case, share data with other packages.
+ if name == "" {
+ if exported {
+ sname += "-noname-exported." + tag
+ } else {
+ sname += "-noname-unexported." + tag
+ }
+ } else {
+ if exported {
+ sname += name + "." + tag
+ } else {
+ sname += name + "-" + tag
+ }
+ }
+ } else {
+ // TODO(mdempsky): We should be able to share these too (except
+ // maybe when dynamic linking).
+ sname = fmt.Sprintf("%s%s.%d", sname, types.LocalPkg.Prefix, dnameCount)
+ dnameCount++
+ }
+ if embedded {
+ sname += ".embedded"
+ }
+ s := base.Ctxt.Lookup(sname)
+ if len(s.P) > 0 {
+ return s
+ }
+ ot := dnameData(s, 0, name, tag, pkg, exported, embedded)
+ objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
+ s.Set(obj.AttrContentAddressable, true)
+ return s
+}
+
+// dextratype dumps the fields of a runtime.uncommontype.
+// dataAdd is the offset in bytes after the header where the
+// backing array of the []method field should be written.
+func dextratype(lsym *obj.LSym, off int64, t *types.Type, dataAdd int) {
+ m := methods(t)
+ if t.Sym() == nil && len(m) == 0 {
+ base.Fatalf("extra requested of type with no extra info %v", t)
+ }
+ noff := types.RoundUp(off, int64(types.PtrSize))
+ if noff != off {
+ base.Fatalf("unexpected alignment in dextratype for %v", t)
+ }
+
+ for _, a := range m {
+ writeType(a.type_)
+ }
+
+ c := rttype.NewCursor(lsym, off, rttype.UncommonType)
+ dgopkgpathOff(c.Field("PkgPath"), typePkg(t))
+
+ dataAdd += uncommonSize(t)
+ mcount := len(m)
+ if mcount != int(uint16(mcount)) {
+ base.Fatalf("too many methods on %v: %d", t, mcount)
+ }
+ xcount := sort.Search(mcount, func(i int) bool { return !types.IsExported(m[i].name.Name) })
+ if dataAdd != int(uint32(dataAdd)) {
+ base.Fatalf("methods are too far away on %v: %d", t, dataAdd)
+ }
+
+ c.Field("Mcount").WriteUint16(uint16(mcount))
+ c.Field("Xcount").WriteUint16(uint16(xcount))
+ c.Field("Moff").WriteUint32(uint32(dataAdd))
+ // Note: there is an unused uint32 field here.
+
+ // Write the backing array for the []method field.
+ array := rttype.NewArrayCursor(lsym, off+int64(dataAdd), rttype.Method, mcount)
+ for i, a := range m {
+ exported := types.IsExported(a.name.Name)
+ var pkg *types.Pkg
+ if !exported && a.name.Pkg != typePkg(t) {
+ pkg = a.name.Pkg
+ }
+ nsym := dname(a.name.Name, "", pkg, exported, false)
+
+ e := array.Elem(i)
+ e.Field("Name").WriteSymPtrOff(nsym, false)
+ dmethodptrOff(e.Field("Mtyp"), writeType(a.mtype))
+ dmethodptrOff(e.Field("Ifn"), a.isym)
+ dmethodptrOff(e.Field("Tfn"), a.tsym)
+ }
+}
+
+func typePkg(t *types.Type) *types.Pkg {
+ tsym := t.Sym()
+ if tsym == nil {
+ switch t.Kind() {
+ case types.TARRAY, types.TSLICE, types.TPTR, types.TCHAN:
+ if t.Elem() != nil {
+ tsym = t.Elem().Sym()
+ }
+ }
+ }
+ if tsym != nil && tsym.Pkg != types.BuiltinPkg {
+ return tsym.Pkg
+ }
+ return nil
+}
+
+func dmethodptrOff(c rttype.Cursor, x *obj.LSym) {
+ c.WriteInt32(0)
+ r := c.Reloc()
+ r.Sym = x
+ r.Type = objabi.R_METHODOFF
+}
+
+var kinds = []int{
+ types.TINT: objabi.KindInt,
+ types.TUINT: objabi.KindUint,
+ types.TINT8: objabi.KindInt8,
+ types.TUINT8: objabi.KindUint8,
+ types.TINT16: objabi.KindInt16,
+ types.TUINT16: objabi.KindUint16,
+ types.TINT32: objabi.KindInt32,
+ types.TUINT32: objabi.KindUint32,
+ types.TINT64: objabi.KindInt64,
+ types.TUINT64: objabi.KindUint64,
+ types.TUINTPTR: objabi.KindUintptr,
+ types.TFLOAT32: objabi.KindFloat32,
+ types.TFLOAT64: objabi.KindFloat64,
+ types.TBOOL: objabi.KindBool,
+ types.TSTRING: objabi.KindString,
+ types.TPTR: objabi.KindPtr,
+ types.TSTRUCT: objabi.KindStruct,
+ types.TINTER: objabi.KindInterface,
+ types.TCHAN: objabi.KindChan,
+ types.TMAP: objabi.KindMap,
+ types.TARRAY: objabi.KindArray,
+ types.TSLICE: objabi.KindSlice,
+ types.TFUNC: objabi.KindFunc,
+ types.TCOMPLEX64: objabi.KindComplex64,
+ types.TCOMPLEX128: objabi.KindComplex128,
+ types.TUNSAFEPTR: objabi.KindUnsafePointer,
+}
+
+var (
+ memhashvarlen *obj.LSym
+ memequalvarlen *obj.LSym
+)
+
+// dcommontype dumps the contents of a reflect.rtype (runtime._type) to c.
+func dcommontype(c rttype.Cursor, t *types.Type) {
+ types.CalcSize(t)
+ eqfunc := geneq(t)
+
+ sptrWeak := true
+ var sptr *obj.LSym
+ if !t.IsPtr() || t.IsPtrElem() {
+ tptr := types.NewPtr(t)
+ if t.Sym() != nil || methods(tptr) != nil {
+ sptrWeak = false
+ }
+ sptr = writeType(tptr)
+ }
+
+ gcsym, useGCProg, ptrdata := dgcsym(t, true)
+ delete(gcsymset, t)
+
+ // ../../../../reflect/type.go:/^type.rtype
+ // actual type structure
+ // type rtype struct {
+ // size uintptr
+ // ptrdata uintptr
+ // hash uint32
+ // tflag tflag
+ // align uint8
+ // fieldAlign uint8
+ // kind uint8
+ // equal func(unsafe.Pointer, unsafe.Pointer) bool
+ // gcdata *byte
+ // str nameOff
+ // ptrToThis typeOff
+ // }
+ c.Field("Size_").WriteUintptr(uint64(t.Size()))
+ c.Field("PtrBytes").WriteUintptr(uint64(ptrdata))
+ c.Field("Hash").WriteUint32(types.TypeHash(t))
+
+ var tflag abi.TFlag
+ if uncommonSize(t) != 0 {
+ tflag |= abi.TFlagUncommon
+ }
+ if t.Sym() != nil && t.Sym().Name != "" {
+ tflag |= abi.TFlagNamed
+ }
+ if compare.IsRegularMemory(t) {
+ tflag |= abi.TFlagRegularMemory
+ }
+
+ exported := false
+ p := t.NameString()
+ // If we're writing out type T,
+ // we are very likely to write out type *T as well.
+ // Use the string "*T"[1:] for "T", so that the two
+ // share storage. This is a cheap way to reduce the
+ // amount of space taken up by reflect strings.
+ if !strings.HasPrefix(p, "*") {
+ p = "*" + p
+ tflag |= abi.TFlagExtraStar
+ if t.Sym() != nil {
+ exported = types.IsExported(t.Sym().Name)
+ }
+ } else {
+ if t.Elem() != nil && t.Elem().Sym() != nil {
+ exported = types.IsExported(t.Elem().Sym().Name)
+ }
+ }
+
+ if tflag != abi.TFlag(uint8(tflag)) {
+ // this should optimize away completely
+ panic("Unexpected change in size of abi.TFlag")
+ }
+ c.Field("TFlag").WriteUint8(uint8(tflag))
+
+ // runtime (and common sense) expects alignment to be a power of two.
+ i := int(uint8(t.Alignment()))
+
+ if i == 0 {
+ i = 1
+ }
+ if i&(i-1) != 0 {
+ base.Fatalf("invalid alignment %d for %v", uint8(t.Alignment()), t)
+ }
+ c.Field("Align_").WriteUint8(uint8(t.Alignment()))
+ c.Field("FieldAlign_").WriteUint8(uint8(t.Alignment()))
+
+ i = kinds[t.Kind()]
+ if types.IsDirectIface(t) {
+ i |= objabi.KindDirectIface
+ }
+ if useGCProg {
+ i |= objabi.KindGCProg
+ }
+ c.Field("Kind_").WriteUint8(uint8(i))
+
+ c.Field("Equal").WritePtr(eqfunc)
+ c.Field("GCData").WritePtr(gcsym)
+
+ nsym := dname(p, "", nil, exported, false)
+ c.Field("Str").WriteSymPtrOff(nsym, false)
+ c.Field("PtrToThis").WriteSymPtrOff(sptr, sptrWeak)
+}
+
+// TrackSym returns the symbol for tracking use of field/method f, assumed
+// to be a member of struct/interface type t.
+func TrackSym(t *types.Type, f *types.Field) *obj.LSym {
+ return base.PkgLinksym("go:track", t.LinkString()+"."+f.Sym.Name, obj.ABI0)
+}
+
+func TypeSymPrefix(prefix string, t *types.Type) *types.Sym {
+ p := prefix + "." + t.LinkString()
+ s := types.TypeSymLookup(p)
+
+ // This function is for looking up type-related generated functions
+ // (e.g. eq and hash). Make sure they are indeed generated.
+ signatmu.Lock()
+ NeedRuntimeType(t)
+ signatmu.Unlock()
+
+ //print("algsym: %s -> %+S\n", p, s);
+
+ return s
+}
+
+func TypeSym(t *types.Type) *types.Sym {
+ if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
+ base.Fatalf("TypeSym %v", t)
+ }
+ if t.Kind() == types.TFUNC && t.Recv() != nil {
+ base.Fatalf("misuse of method type: %v", t)
+ }
+ s := types.TypeSym(t)
+ signatmu.Lock()
+ NeedRuntimeType(t)
+ signatmu.Unlock()
+ return s
+}
+
+func TypeLinksymPrefix(prefix string, t *types.Type) *obj.LSym {
+ return TypeSymPrefix(prefix, t).Linksym()
+}
+
+func TypeLinksymLookup(name string) *obj.LSym {
+ return types.TypeSymLookup(name).Linksym()
+}
+
+func TypeLinksym(t *types.Type) *obj.LSym {
+ lsym := TypeSym(t).Linksym()
+ signatmu.Lock()
+ if lsym.Extra == nil {
+ ti := lsym.NewTypeInfo()
+ ti.Type = t
+ }
+ signatmu.Unlock()
+ return lsym
+}
+
+// TypePtrAt returns an expression that evaluates to the
+// *runtime._type value for t.
+func TypePtrAt(pos src.XPos, t *types.Type) *ir.AddrExpr {
+ return typecheck.LinksymAddr(pos, TypeLinksym(t), types.Types[types.TUINT8])
+}
+
+// ITabLsym returns the LSym representing the itab for concrete type typ implementing
+// interface iface. A dummy tab will be created in the unusual case where typ doesn't
+// implement iface. Normally, this wouldn't happen, because the typechecker would
+// have reported a compile-time error. This situation can only happen when the
+// destination type of a type assert or a type in a type switch is parameterized, so
+// it may sometimes, but not always, be a type that can't implement the specified
+// interface.
+func ITabLsym(typ, iface *types.Type) *obj.LSym {
+ s, existed := ir.Pkgs.Itab.LookupOK(typ.LinkString() + "," + iface.LinkString())
+ lsym := s.Linksym()
+
+ if !existed {
+ writeITab(lsym, typ, iface, true)
+ }
+ return lsym
+}
+
+// ITabAddrAt returns an expression that evaluates to the
+// *runtime.itab value for concrete type typ implementing interface
+// iface.
+func ITabAddrAt(pos src.XPos, typ, iface *types.Type) *ir.AddrExpr {
+ s, existed := ir.Pkgs.Itab.LookupOK(typ.LinkString() + "," + iface.LinkString())
+ lsym := s.Linksym()
+
+ if !existed {
+ writeITab(lsym, typ, iface, false)
+ }
+
+ return typecheck.LinksymAddr(pos, lsym, types.Types[types.TUINT8])
+}
+
+// needkeyupdate reports whether map updates with t as a key
+// need the key to be updated.
+func needkeyupdate(t *types.Type) bool {
+ switch t.Kind() {
+ case types.TBOOL, types.TINT, types.TUINT, types.TINT8, types.TUINT8, types.TINT16, types.TUINT16, types.TINT32, types.TUINT32,
+ types.TINT64, types.TUINT64, types.TUINTPTR, types.TPTR, types.TUNSAFEPTR, types.TCHAN:
+ return false
+
+ case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128, // floats and complex can be +0/-0
+ types.TINTER,
+ types.TSTRING: // strings might have smaller backing stores
+ return true
+
+ case types.TARRAY:
+ return needkeyupdate(t.Elem())
+
+ case types.TSTRUCT:
+ for _, t1 := range t.Fields() {
+ if needkeyupdate(t1.Type) {
+ return true
+ }
+ }
+ return false
+
+ default:
+ base.Fatalf("bad type for map key: %v", t)
+ return true
+ }
+}
+
+// hashMightPanic reports whether the hash of a map key of type t might panic.
+func hashMightPanic(t *types.Type) bool {
+ switch t.Kind() {
+ case types.TINTER:
+ return true
+
+ case types.TARRAY:
+ return hashMightPanic(t.Elem())
+
+ case types.TSTRUCT:
+ for _, t1 := range t.Fields() {
+ if hashMightPanic(t1.Type) {
+ return true
+ }
+ }
+ return false
+
+ default:
+ return false
+ }
+}
+
+// formalType replaces predeclared aliases with real types.
+// They've been separate internally to make error messages
+// better, but we have to merge them in the reflect tables.
+func formalType(t *types.Type) *types.Type {
+ switch t {
+ case types.AnyType, types.ByteType, types.RuneType:
+ return types.Types[t.Kind()]
+ }
+ return t
+}
+
+func writeType(t *types.Type) *obj.LSym {
+ t = formalType(t)
+ if t.IsUntyped() {
+ base.Fatalf("writeType %v", t)
+ }
+
+ s := types.TypeSym(t)
+ lsym := s.Linksym()
+
+ // special case (look for runtime below):
+ // when compiling package runtime,
+ // emit the type structures for int, float, etc.
+ tbase := t
+ if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil {
+ tbase = t.Elem()
+ }
+ if tbase.Kind() == types.TFORW {
+ base.Fatalf("unresolved defined type: %v", tbase)
+ }
+
+ // This is a fake type we generated for our builtin pseudo-runtime
+ // package. We'll emit a description for the real type while
+ // compiling package runtime, so we don't need or want to emit one
+ // from this fake type.
+ if sym := tbase.Sym(); sym != nil && sym.Pkg == ir.Pkgs.Runtime {
+ return lsym
+ }
+
+ if s.Siggen() {
+ return lsym
+ }
+ s.SetSiggen(true)
+
+ if !NeedEmit(tbase) {
+ if i := typecheck.BaseTypeIndex(t); i >= 0 {
+ lsym.Pkg = tbase.Sym().Pkg.Prefix
+ lsym.SymIdx = int32(i)
+ lsym.Set(obj.AttrIndexed, true)
+ }
+
+ // TODO(mdempsky): Investigate whether this still happens.
+ // If we know we don't need to emit code for a type,
+ // we should have a link-symbol index for it.
+ // See also TODO in NeedEmit.
+ return lsym
+ }
+
+ // Type layout Written by Marker
+ // +--------------------------------+ - 0
+ // | abi/internal.Type | dcommontype
+ // +--------------------------------+ - A
+ // | additional type-dependent | code in the switch below
+ // | fields, e.g. |
+ // | abi/internal.ArrayType.Len |
+ // +--------------------------------+ - B
+ // | internal/abi.UncommonType | dextratype
+ // | This section is optional, |
+ // | if type has a name or methods |
+ // +--------------------------------+ - C
+ // | variable-length data | code in the switch below
+ // | referenced by |
+ // | type-dependent fields, e.g. |
+ // | abi/internal.StructType.Fields |
+ // | dataAdd = size of this section |
+ // +--------------------------------+ - D
+ // | method list, if any | dextratype
+ // +--------------------------------+ - E
+
+ // UncommonType section is included if we have a name or a method.
+ extra := t.Sym() != nil || len(methods(t)) != 0
+
+ // Decide the underlying type of the descriptor, and remember
+ // the size we need for variable-length data.
+ var rt *types.Type
+ dataAdd := 0
+ switch t.Kind() {
+ default:
+ rt = rttype.Type
+ case types.TARRAY:
+ rt = rttype.ArrayType
+ case types.TSLICE:
+ rt = rttype.SliceType
+ case types.TCHAN:
+ rt = rttype.ChanType
+ case types.TFUNC:
+ rt = rttype.FuncType
+ dataAdd = (t.NumRecvs() + t.NumParams() + t.NumResults()) * types.PtrSize
+ case types.TINTER:
+ rt = rttype.InterfaceType
+ dataAdd = len(imethods(t)) * int(rttype.IMethod.Size())
+ case types.TMAP:
+ rt = rttype.MapType
+ case types.TPTR:
+ rt = rttype.PtrType
+ // TODO: use rttype.Type for Elem() is ANY?
+ case types.TSTRUCT:
+ rt = rttype.StructType
+ dataAdd = t.NumFields() * int(rttype.StructField.Size())
+ }
+
+ // Compute offsets of each section.
+ B := rt.Size()
+ C := B
+ if extra {
+ C = B + rttype.UncommonType.Size()
+ }
+ D := C + int64(dataAdd)
+ E := D + int64(len(methods(t)))*rttype.Method.Size()
+
+ // Write the runtime._type
+ c := rttype.NewCursor(lsym, 0, rt)
+ if rt == rttype.Type {
+ dcommontype(c, t)
+ } else {
+ dcommontype(c.Field("Type"), t)
+ }
+
+ // Write additional type-specific data
+ // (Both the fixed size and variable-sized sections.)
+ switch t.Kind() {
+ case types.TARRAY:
+ // internal/abi.ArrayType
+ s1 := writeType(t.Elem())
+ t2 := types.NewSlice(t.Elem())
+ s2 := writeType(t2)
+ c.Field("Elem").WritePtr(s1)
+ c.Field("Slice").WritePtr(s2)
+ c.Field("Len").WriteUintptr(uint64(t.NumElem()))
+
+ case types.TSLICE:
+ // internal/abi.SliceType
+ s1 := writeType(t.Elem())
+ c.Field("Elem").WritePtr(s1)
+
+ case types.TCHAN:
+ // internal/abi.ChanType
+ s1 := writeType(t.Elem())
+ c.Field("Elem").WritePtr(s1)
+ c.Field("Dir").WriteInt(int64(t.ChanDir()))
+
+ case types.TFUNC:
+ // internal/abi.FuncType
+ for _, t1 := range t.RecvParamsResults() {
+ writeType(t1.Type)
+ }
+ inCount := t.NumRecvs() + t.NumParams()
+ outCount := t.NumResults()
+ if t.IsVariadic() {
+ outCount |= 1 << 15
+ }
+
+ c.Field("InCount").WriteUint16(uint16(inCount))
+ c.Field("OutCount").WriteUint16(uint16(outCount))
+
+ // Array of rtype pointers follows funcType.
+ typs := t.RecvParamsResults()
+ array := rttype.NewArrayCursor(lsym, C, types.Types[types.TUNSAFEPTR], len(typs))
+ for i, t1 := range typs {
+ array.Elem(i).WritePtr(writeType(t1.Type))
+ }
+
+ case types.TINTER:
+ // internal/abi.InterfaceType
+ m := imethods(t)
+ n := len(m)
+ for _, a := range m {
+ writeType(a.type_)
+ }
+
+ var tpkg *types.Pkg
+ if t.Sym() != nil && t != types.Types[t.Kind()] && t != types.ErrorType {
+ tpkg = t.Sym().Pkg
+ }
+ dgopkgpath(c.Field("PkgPath"), tpkg)
+ c.Field("Methods").WriteSlice(lsym, C, int64(n), int64(n))
+
+ array := rttype.NewArrayCursor(lsym, C, rttype.IMethod, n)
+ for i, a := range m {
+ exported := types.IsExported(a.name.Name)
+ var pkg *types.Pkg
+ if !exported && a.name.Pkg != tpkg {
+ pkg = a.name.Pkg
+ }
+ nsym := dname(a.name.Name, "", pkg, exported, false)
+
+ e := array.Elem(i)
+ e.Field("Name").WriteSymPtrOff(nsym, false)
+ e.Field("Typ").WriteSymPtrOff(writeType(a.type_), false)
+ }
+
+ case types.TMAP:
+ // internal/abi.MapType
+ s1 := writeType(t.Key())
+ s2 := writeType(t.Elem())
+ s3 := writeType(MapBucketType(t))
+ hasher := genhash(t.Key())
+
+ c.Field("Key").WritePtr(s1)
+ c.Field("Elem").WritePtr(s2)
+ c.Field("Bucket").WritePtr(s3)
+ c.Field("Hasher").WritePtr(hasher)
+ var flags uint32
+ // Note: flags must match maptype accessors in ../../../../runtime/type.go
+ // and maptype builder in ../../../../reflect/type.go:MapOf.
+ if t.Key().Size() > MAXKEYSIZE {
+ c.Field("KeySize").WriteUint8(uint8(types.PtrSize))
+ flags |= 1 // indirect key
+ } else {
+ c.Field("KeySize").WriteUint8(uint8(t.Key().Size()))
+ }
+
+ if t.Elem().Size() > MAXELEMSIZE {
+ c.Field("ValueSize").WriteUint8(uint8(types.PtrSize))
+ flags |= 2 // indirect value
+ } else {
+ c.Field("ValueSize").WriteUint8(uint8(t.Elem().Size()))
+ }
+ c.Field("BucketSize").WriteUint16(uint16(MapBucketType(t).Size()))
+ if types.IsReflexive(t.Key()) {
+ flags |= 4 // reflexive key
+ }
+ if needkeyupdate(t.Key()) {
+ flags |= 8 // need key update
+ }
+ if hashMightPanic(t.Key()) {
+ flags |= 16 // hash might panic
+ }
+ c.Field("Flags").WriteUint32(flags)
+
+ if u := t.Underlying(); u != t {
+ // If t is a named map type, also keep the underlying map
+ // type live in the binary. This is important to make sure that
+ // a named map and that same map cast to its underlying type via
+ // reflection, use the same hash function. See issue 37716.
+ r := obj.Addrel(lsym)
+ r.Sym = writeType(u)
+ r.Type = objabi.R_KEEP
+ }
+
+ case types.TPTR:
+ // internal/abi.PtrType
+ if t.Elem().Kind() == types.TANY {
+ base.Fatalf("bad pointer base type")
+ }
+
+ s1 := writeType(t.Elem())
+ c.Field("Elem").WritePtr(s1)
+
+ case types.TSTRUCT:
+ // internal/abi.StructType
+ fields := t.Fields()
+ for _, t1 := range fields {
+ writeType(t1.Type)
+ }
+
+ // All non-exported struct field names within a struct
+ // type must originate from a single package. By
+ // identifying and recording that package within the
+ // struct type descriptor, we can omit that
+ // information from the field descriptors.
+ var spkg *types.Pkg
+ for _, f := range fields {
+ if !types.IsExported(f.Sym.Name) {
+ spkg = f.Sym.Pkg
+ break
+ }
+ }
+
+ dgopkgpath(c.Field("PkgPath"), spkg)
+ c.Field("Fields").WriteSlice(lsym, C, int64(len(fields)), int64(len(fields)))
+
+ array := rttype.NewArrayCursor(lsym, C, rttype.StructField, len(fields))
+ for i, f := range fields {
+ e := array.Elem(i)
+ dnameField(e.Field("Name"), spkg, f)
+ e.Field("Typ").WritePtr(writeType(f.Type))
+ e.Field("Offset").WriteUintptr(uint64(f.Offset))
+ }
+ }
+
+ // Write the extra info, if any.
+ if extra {
+ dextratype(lsym, B, t, dataAdd)
+ }
+
+ // Note: DUPOK is required to ensure that we don't end up with more
+ // than one type descriptor for a given type, if the type descriptor
+ // can be defined in multiple packages, that is, unnamed types,
+ // instantiated types and shape types.
+ dupok := 0
+ if tbase.Sym() == nil || tbase.IsFullyInstantiated() || tbase.HasShape() {
+ dupok = obj.DUPOK
+ }
+
+ objw.Global(lsym, int32(E), int16(dupok|obj.RODATA))
+
+ // The linker will leave a table of all the typelinks for
+ // types in the binary, so the runtime can find them.
+ //
+ // When buildmode=shared, all types are in typelinks so the
+ // runtime can deduplicate type pointers.
+ keep := base.Ctxt.Flag_dynlink
+ if !keep && t.Sym() == nil {
+ // For an unnamed type, we only need the link if the type can
+ // be created at run time by reflect.PointerTo and similar
+ // functions. If the type exists in the program, those
+ // functions must return the existing type structure rather
+ // than creating a new one.
+ switch t.Kind() {
+ case types.TPTR, types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRUCT:
+ keep = true
+ }
+ }
+ // Do not put Noalg types in typelinks. See issue #22605.
+ if types.TypeHasNoAlg(t) {
+ keep = false
+ }
+ lsym.Set(obj.AttrMakeTypelink, keep)
+
+ return lsym
+}
+
+// InterfaceMethodOffset returns the offset of the i-th method in the interface
+// type descriptor, ityp.
+func InterfaceMethodOffset(ityp *types.Type, i int64) int64 {
+ // interface type descriptor layout is struct {
+ // _type // commonSize
+ // pkgpath // 1 word
+ // []imethod // 3 words (pointing to [...]imethod below)
+ // uncommontype // uncommonSize
+ // [...]imethod
+ // }
+ // The size of imethod is 8.
+ return int64(commonSize()+4*types.PtrSize+uncommonSize(ityp)) + i*8
+}
+
+// NeedRuntimeType ensures that a runtime type descriptor is emitted for t.
+func NeedRuntimeType(t *types.Type) {
+ if _, ok := signatset[t]; !ok {
+ signatset[t] = struct{}{}
+ signatslice = append(signatslice, typeAndStr{t: t, short: types.TypeSymName(t), regular: t.String()})
+ }
+}
+
+func WriteRuntimeTypes() {
+ // Process signatslice. Use a loop, as writeType adds
+ // entries to signatslice while it is being processed.
+ for len(signatslice) > 0 {
+ signats := signatslice
+ // Sort for reproducible builds.
+ sort.Sort(typesByString(signats))
+ for _, ts := range signats {
+ t := ts.t
+ writeType(t)
+ if t.Sym() != nil {
+ writeType(types.NewPtr(t))
+ }
+ }
+ signatslice = signatslice[len(signats):]
+ }
+}
+
+func WriteGCSymbols() {
+ // Emit GC data symbols.
+ gcsyms := make([]typeAndStr, 0, len(gcsymset))
+ for t := range gcsymset {
+ gcsyms = append(gcsyms, typeAndStr{t: t, short: types.TypeSymName(t), regular: t.String()})
+ }
+ sort.Sort(typesByString(gcsyms))
+ for _, ts := range gcsyms {
+ dgcsym(ts.t, true)
+ }
+}
+
+// writeITab writes the itab for concrete type typ implementing interface iface. If
+// allowNonImplement is true, allow the case where typ does not implement iface, and just
+// create a dummy itab with zeroed-out method entries.
+func writeITab(lsym *obj.LSym, typ, iface *types.Type, allowNonImplement bool) {
+ // TODO(mdempsky): Fix methodWrapper, geneq, and genhash (and maybe
+ // others) to stop clobbering these.
+ oldpos, oldfn := base.Pos, ir.CurFunc
+ defer func() { base.Pos, ir.CurFunc = oldpos, oldfn }()
+
+ if typ == nil || (typ.IsPtr() && typ.Elem() == nil) || typ.IsUntyped() || iface == nil || !iface.IsInterface() || iface.IsEmptyInterface() {
+ base.Fatalf("writeITab(%v, %v)", typ, iface)
+ }
+
+ sigs := iface.AllMethods()
+ entries := make([]*obj.LSym, 0, len(sigs))
+
+ // both sigs and methods are sorted by name,
+ // so we can find the intersection in a single pass
+ for _, m := range methods(typ) {
+ if m.name == sigs[0].Sym {
+ entries = append(entries, m.isym)
+ if m.isym == nil {
+ panic("NO ISYM")
+ }
+ sigs = sigs[1:]
+ if len(sigs) == 0 {
+ break
+ }
+ }
+ }
+ completeItab := len(sigs) == 0
+ if !allowNonImplement && !completeItab {
+ base.Fatalf("incomplete itab")
+ }
+
+ // dump empty itab symbol into i.sym
+ // type itab struct {
+ // inter *interfacetype
+ // _type *_type
+ // hash uint32 // copy of _type.hash. Used for type switches.
+ // _ [4]byte
+ // fun [1]uintptr // variable sized. fun[0]==0 means _type does not implement inter.
+ // }
+ o := objw.SymPtr(lsym, 0, writeType(iface), 0)
+ o = objw.SymPtr(lsym, o, writeType(typ), 0)
+ o = objw.Uint32(lsym, o, types.TypeHash(typ)) // copy of type hash
+ o += 4 // skip unused field
+ if !completeItab {
+ // If typ doesn't implement iface, make method entries be zero.
+ o = objw.Uintptr(lsym, o, 0)
+ entries = entries[:0]
+ }
+ for _, fn := range entries {
+ o = objw.SymPtrWeak(lsym, o, fn, 0) // method pointer for each method
+ }
+ // Nothing writes static itabs, so they are read only.
+ objw.Global(lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
+ lsym.Set(obj.AttrContentAddressable, true)
+}
+
+func WritePluginTable() {
+ ptabs := typecheck.Target.PluginExports
+ if len(ptabs) == 0 {
+ return
+ }
+
+ lsym := base.Ctxt.Lookup("go:plugin.tabs")
+ ot := 0
+ for _, p := range ptabs {
+ // Dump ptab symbol into go.pluginsym package.
+ //
+ // type ptab struct {
+ // name nameOff
+ // typ typeOff // pointer to symbol
+ // }
+ nsym := dname(p.Sym().Name, "", nil, true, false)
+ t := p.Type()
+ if p.Class != ir.PFUNC {
+ t = types.NewPtr(t)
+ }
+ tsym := writeType(t)
+ ot = objw.SymPtrOff(lsym, ot, nsym)
+ ot = objw.SymPtrOff(lsym, ot, tsym)
+ // Plugin exports symbols as interfaces. Mark their types
+ // as UsedInIface.
+ tsym.Set(obj.AttrUsedInIface, true)
+ }
+ objw.Global(lsym, int32(ot), int16(obj.RODATA))
+
+ lsym = base.Ctxt.Lookup("go:plugin.exports")
+ ot = 0
+ for _, p := range ptabs {
+ ot = objw.SymPtr(lsym, ot, p.Linksym(), 0)
+ }
+ objw.Global(lsym, int32(ot), int16(obj.RODATA))
+}
+
+// writtenByWriteBasicTypes reports whether typ is written by WriteBasicTypes.
+// WriteBasicTypes always writes pointer types; any pointer has been stripped off typ already.
+func writtenByWriteBasicTypes(typ *types.Type) bool {
+ if typ.Sym() == nil && typ.Kind() == types.TFUNC {
+ // func(error) string
+ if typ.NumRecvs() == 0 &&
+ typ.NumParams() == 1 && typ.NumResults() == 1 &&
+ typ.Param(0).Type == types.ErrorType &&
+ typ.Result(0).Type == types.Types[types.TSTRING] {
+ return true
+ }
+ }
+
+ // Now we have left the basic types plus any and error, plus slices of them.
+ // Strip the slice.
+ if typ.Sym() == nil && typ.IsSlice() {
+ typ = typ.Elem()
+ }
+
+ // Basic types.
+ sym := typ.Sym()
+ if sym != nil && (sym.Pkg == types.BuiltinPkg || sym.Pkg == types.UnsafePkg) {
+ return true
+ }
+ // any or error
+ return (sym == nil && typ.IsEmptyInterface()) || typ == types.ErrorType
+}
+
+func WriteBasicTypes() {
+ // do basic types if compiling package runtime.
+ // they have to be in at least one package,
+ // and runtime is always loaded implicitly,
+ // so this is as good as any.
+ // another possible choice would be package main,
+ // but using runtime means fewer copies in object files.
+ // The code here needs to be in sync with writtenByWriteBasicTypes above.
+ if base.Ctxt.Pkgpath != "runtime" {
+ return
+ }
+
+ // Note: always write NewPtr(t) because NeedEmit's caller strips the pointer.
+ var list []*types.Type
+ for i := types.Kind(1); i <= types.TBOOL; i++ {
+ list = append(list, types.Types[i])
+ }
+ list = append(list,
+ types.Types[types.TSTRING],
+ types.Types[types.TUNSAFEPTR],
+ types.AnyType,
+ types.ErrorType)
+ for _, t := range list {
+ writeType(types.NewPtr(t))
+ writeType(types.NewPtr(types.NewSlice(t)))
+ }
+
+ // emit type for func(error) string,
+ // which is the type of an auto-generated wrapper.
+ writeType(types.NewPtr(types.NewSignature(nil, []*types.Field{
+ types.NewField(base.Pos, nil, types.ErrorType),
+ }, []*types.Field{
+ types.NewField(base.Pos, nil, types.Types[types.TSTRING]),
+ })))
+}
+
+type typeAndStr struct {
+ t *types.Type
+ short string // "short" here means TypeSymName
+ regular string
+}
+
+type typesByString []typeAndStr
+
+func (a typesByString) Len() int { return len(a) }
+func (a typesByString) Less(i, j int) bool {
+ // put named types before unnamed types
+ if a[i].t.Sym() != nil && a[j].t.Sym() == nil {
+ return true
+ }
+ if a[i].t.Sym() == nil && a[j].t.Sym() != nil {
+ return false
+ }
+
+ if a[i].short != a[j].short {
+ return a[i].short < a[j].short
+ }
+ // When the only difference between the types is whether
+ // they refer to byte or uint8, such as **byte vs **uint8,
+ // the types' NameStrings can be identical.
+ // To preserve deterministic sort ordering, sort these by String().
+ //
+ // TODO(mdempsky): This all seems suspect. Using LinkString would
+ // avoid naming collisions, and there shouldn't be a reason to care
+ // about "byte" vs "uint8": they share the same runtime type
+ // descriptor anyway.
+ if a[i].regular != a[j].regular {
+ return a[i].regular < a[j].regular
+ }
+ // Identical anonymous interfaces defined in different locations
+ // will be equal for the above checks, but different in DWARF output.
+ // Sort by source position to ensure deterministic order.
+ // See issues 27013 and 30202.
+ if a[i].t.Kind() == types.TINTER && len(a[i].t.AllMethods()) > 0 {
+ return a[i].t.AllMethods()[0].Pos.Before(a[j].t.AllMethods()[0].Pos)
+ }
+ return false
+}
+func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap,
+// which holds 1-bit entries describing where pointers are in a given type.
+// Above this length, the GC information is recorded as a GC program,
+// which can express repetition compactly. In either form, the
+// information is used by the runtime to initialize the heap bitmap,
+// and for large types (like 128 or more words), they are roughly the
+// same speed. GC programs are never much larger and often more
+// compact. (If large arrays are involved, they can be arbitrarily
+// more compact.)
+//
+// The cutoff must be large enough that any allocation large enough to
+// use a GC program is large enough that it does not share heap bitmap
+// bytes with any other objects, allowing the GC program execution to
+// assume an aligned start and not use atomic operations. In the current
+// runtime, this means all malloc size classes larger than the cutoff must
+// be multiples of four words. On 32-bit systems that's 16 bytes, and
+// all size classes >= 16 bytes are 16-byte aligned, so no real constraint.
+// On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed
+// for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated
+// is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes
+// must be >= 4.
+//
+// We used to use 16 because the GC programs do have some constant overhead
+// to get started, and processing 128 pointers seems to be enough to
+// amortize that overhead well.
+//
+// To make sure that the runtime's chansend can call typeBitsBulkBarrier,
+// we raised the limit to 2048, so that even 32-bit systems are guaranteed to
+// use bitmaps for objects up to 64 kB in size.
+//
+// Also known to reflect/type.go.
+const maxPtrmaskBytes = 2048
+
+// GCSym returns a data symbol containing GC information for type t, along
+// with a boolean reporting whether the UseGCProg bit should be set in the
+// type kind, and the ptrdata field to record in the reflect type information.
+// GCSym may be called in concurrent backend, so it does not emit the symbol
+// content.
+func GCSym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) {
+ // Record that we need to emit the GC symbol.
+ gcsymmu.Lock()
+ if _, ok := gcsymset[t]; !ok {
+ gcsymset[t] = struct{}{}
+ }
+ gcsymmu.Unlock()
+
+ return dgcsym(t, false)
+}
+
+// dgcsym returns a data symbol containing GC information for type t, along
+// with a boolean reporting whether the UseGCProg bit should be set in the
+// type kind, and the ptrdata field to record in the reflect type information.
+// When write is true, it writes the symbol data.
+func dgcsym(t *types.Type, write bool) (lsym *obj.LSym, useGCProg bool, ptrdata int64) {
+ ptrdata = types.PtrDataSize(t)
+ if ptrdata/int64(types.PtrSize) <= maxPtrmaskBytes*8 {
+ lsym = dgcptrmask(t, write)
+ return
+ }
+
+ useGCProg = true
+ lsym, ptrdata = dgcprog(t, write)
+ return
+}
+
+// dgcptrmask emits and returns the symbol containing a pointer mask for type t.
+func dgcptrmask(t *types.Type, write bool) *obj.LSym {
+ // Bytes we need for the ptrmask.
+ n := (types.PtrDataSize(t)/int64(types.PtrSize) + 7) / 8
+ // Runtime wants ptrmasks padded to a multiple of uintptr in size.
+ n = (n + int64(types.PtrSize) - 1) &^ (int64(types.PtrSize) - 1)
+ ptrmask := make([]byte, n)
+ fillptrmask(t, ptrmask)
+ p := fmt.Sprintf("runtime.gcbits.%x", ptrmask)
+
+ lsym := base.Ctxt.Lookup(p)
+ if write && !lsym.OnList() {
+ for i, x := range ptrmask {
+ objw.Uint8(lsym, i, x)
+ }
+ objw.Global(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ lsym.Set(obj.AttrContentAddressable, true)
+ }
+ return lsym
+}
+
+// fillptrmask fills in ptrmask with 1s corresponding to the
+// word offsets in t that hold pointers.
+// ptrmask is assumed to fit at least types.PtrDataSize(t)/PtrSize bits.
+func fillptrmask(t *types.Type, ptrmask []byte) {
+ for i := range ptrmask {
+ ptrmask[i] = 0
+ }
+ if !t.HasPointers() {
+ return
+ }
+
+ vec := bitvec.New(8 * int32(len(ptrmask)))
+ typebits.Set(t, 0, vec)
+
+ nptr := types.PtrDataSize(t) / int64(types.PtrSize)
+ for i := int64(0); i < nptr; i++ {
+ if vec.Get(int32(i)) {
+ ptrmask[i/8] |= 1 << (uint(i) % 8)
+ }
+ }
+}
+
+// dgcprog emits and returns the symbol containing a GC program for type t
+// along with the size of the data described by the program (in the range
+// [types.PtrDataSize(t), t.Width]).
+// In practice, the size is types.PtrDataSize(t) except for non-trivial arrays.
+// For non-trivial arrays, the program describes the full t.Width size.
+func dgcprog(t *types.Type, write bool) (*obj.LSym, int64) {
+ types.CalcSize(t)
+ if t.Size() == types.BADWIDTH {
+ base.Fatalf("dgcprog: %v badwidth", t)
+ }
+ lsym := TypeLinksymPrefix(".gcprog", t)
+ var p gcProg
+ p.init(lsym, write)
+ p.emit(t, 0)
+ offset := p.w.BitIndex() * int64(types.PtrSize)
+ p.end()
+ if ptrdata := types.PtrDataSize(t); offset < ptrdata || offset > t.Size() {
+ base.Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Size())
+ }
+ return lsym, offset
+}
+
+type gcProg struct {
+ lsym *obj.LSym
+ symoff int
+ w gcprog.Writer
+ write bool
+}
+
+func (p *gcProg) init(lsym *obj.LSym, write bool) {
+ p.lsym = lsym
+ p.write = write && !lsym.OnList()
+ p.symoff = 4 // first 4 bytes hold program length
+ if !write {
+ p.w.Init(func(byte) {})
+ return
+ }
+ p.w.Init(p.writeByte)
+ if base.Debug.GCProg > 0 {
+ fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym)
+ p.w.Debug(os.Stderr)
+ }
+}
+
+func (p *gcProg) writeByte(x byte) {
+ p.symoff = objw.Uint8(p.lsym, p.symoff, x)
+}
+
+func (p *gcProg) end() {
+ p.w.End()
+ if !p.write {
+ return
+ }
+ objw.Uint32(p.lsym, 0, uint32(p.symoff-4))
+ objw.Global(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ p.lsym.Set(obj.AttrContentAddressable, true)
+ if base.Debug.GCProg > 0 {
+ fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym)
+ }
+}
+
+func (p *gcProg) emit(t *types.Type, offset int64) {
+ types.CalcSize(t)
+ if !t.HasPointers() {
+ return
+ }
+ if t.Size() == int64(types.PtrSize) {
+ p.w.Ptr(offset / int64(types.PtrSize))
+ return
+ }
+ switch t.Kind() {
+ default:
+ base.Fatalf("gcProg.emit: unexpected type %v", t)
+
+ case types.TSTRING:
+ p.w.Ptr(offset / int64(types.PtrSize))
+
+ case types.TINTER:
+ // Note: the first word isn't a pointer. See comment in typebits.Set
+ p.w.Ptr(offset/int64(types.PtrSize) + 1)
+
+ case types.TSLICE:
+ p.w.Ptr(offset / int64(types.PtrSize))
+
+ case types.TARRAY:
+ if t.NumElem() == 0 {
+ // should have been handled by haspointers check above
+ base.Fatalf("gcProg.emit: empty array")
+ }
+
+ // Flatten array-of-array-of-array to just a big array by multiplying counts.
+ count := t.NumElem()
+ elem := t.Elem()
+ for elem.IsArray() {
+ count *= elem.NumElem()
+ elem = elem.Elem()
+ }
+
+ if !p.w.ShouldRepeat(elem.Size()/int64(types.PtrSize), count) {
+ // Cheaper to just emit the bits.
+ for i := int64(0); i < count; i++ {
+ p.emit(elem, offset+i*elem.Size())
+ }
+ return
+ }
+ p.emit(elem, offset)
+ p.w.ZeroUntil((offset + elem.Size()) / int64(types.PtrSize))
+ p.w.Repeat(elem.Size()/int64(types.PtrSize), count-1)
+
+ case types.TSTRUCT:
+ for _, t1 := range t.Fields() {
+ p.emit(t1.Type, offset+t1.Offset)
+ }
+ }
+}
+
+// ZeroAddr returns the address of a symbol with at least
+// size bytes of zeros.
+func ZeroAddr(size int64) ir.Node {
+ if size >= 1<<31 {
+ base.Fatalf("map elem too big %d", size)
+ }
+ if ZeroSize < size {
+ ZeroSize = size
+ }
+ lsym := base.PkgLinksym("go:map", "zero", obj.ABI0)
+ x := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8])
+ return typecheck.Expr(typecheck.NodAddr(x))
+}
+
+// NeedEmit reports whether typ is a type that we need to emit code
+// for (e.g., runtime type descriptors, method wrappers).
+func NeedEmit(typ *types.Type) bool {
+ // TODO(mdempsky): Export data should keep track of which anonymous
+ // and instantiated types were emitted, so at least downstream
+ // packages can skip re-emitting them.
+ //
+ // Perhaps we can just generalize the linker-symbol indexing to
+ // track the index of arbitrary types, not just defined types, and
+ // use its presence to detect this. The same idea would work for
+ // instantiated generic functions too.
+
+ switch sym := typ.Sym(); {
+ case writtenByWriteBasicTypes(typ):
+ return base.Ctxt.Pkgpath == "runtime"
+
+ case sym == nil:
+ // Anonymous type; possibly never seen before or ever again.
+ // Need to emit to be safe (however, see TODO above).
+ return true
+
+ case sym.Pkg == types.LocalPkg:
+ // Local defined type; our responsibility.
+ return true
+
+ case typ.IsFullyInstantiated():
+ // Instantiated type; possibly instantiated with unique type arguments.
+ // Need to emit to be safe (however, see TODO above).
+ return true
+
+ case typ.HasShape():
+ // Shape type; need to emit even though it lives in the .shape package.
+ // TODO: make sure the linker deduplicates them (see dupok in writeType above).
+ return true
+
+ default:
+ // Should have been emitted by an imported package.
+ return false
+ }
+}
+
+// Generate a wrapper function to convert from
+// a receiver of type T to a receiver of type U.
+// That is,
+//
+// func (t T) M() {
+// ...
+// }
+//
+// already exists; this function generates
+//
+// func (u U) M() {
+// u.M()
+// }
+//
+// where the types T and U are such that u.M() is valid
+// and calls the T.M method.
+// The resulting function is for use in method tables.
+//
+// rcvr - U
+// method - M func (t T)(), a TFIELD type struct
+//
+// Also wraps methods on instantiated generic types for use in itab entries.
+// For an instantiated generic type G[int], we generate wrappers like:
+// G[int] pointer shaped:
+//
+// func (x G[int]) f(arg) {
+// .inst.G[int].f(dictionary, x, arg)
+// }
+//
+// G[int] not pointer shaped:
+//
+// func (x *G[int]) f(arg) {
+// .inst.G[int].f(dictionary, *x, arg)
+// }
+//
+// These wrappers are always fully stenciled.
+func methodWrapper(rcvr *types.Type, method *types.Field, forItab bool) *obj.LSym {
+ if forItab && !types.IsDirectIface(rcvr) {
+ rcvr = rcvr.PtrTo()
+ }
+
+ newnam := ir.MethodSym(rcvr, method.Sym)
+ lsym := newnam.Linksym()
+
+ // Unified IR creates its own wrappers.
+ return lsym
+}
+
+var ZeroSize int64
+
+// MarkTypeUsedInInterface marks that type t is converted to an interface.
+// This information is used in the linker in dead method elimination.
+func MarkTypeUsedInInterface(t *types.Type, from *obj.LSym) {
+ if t.HasShape() {
+ // Shape types shouldn't be put in interfaces, so we shouldn't ever get here.
+ base.Fatalf("shape types have no methods %+v", t)
+ }
+ MarkTypeSymUsedInInterface(TypeLinksym(t), from)
+}
+func MarkTypeSymUsedInInterface(tsym *obj.LSym, from *obj.LSym) {
+ // Emit a marker relocation. The linker will know the type is converted
+ // to an interface if "from" is reachable.
+ r := obj.Addrel(from)
+ r.Sym = tsym
+ r.Type = objabi.R_USEIFACE
+}
+
+// MarkUsedIfaceMethod marks that an interface method is used in the current
+// function. n is OCALLINTER node.
+func MarkUsedIfaceMethod(n *ir.CallExpr) {
+ // skip unnamed functions (func _())
+ if ir.CurFunc.LSym == nil {
+ return
+ }
+ dot := n.Fun.(*ir.SelectorExpr)
+ ityp := dot.X.Type()
+ if ityp.HasShape() {
+ // Here we're calling a method on a generic interface. Something like:
+ //
+ // type I[T any] interface { foo() T }
+ // func f[T any](x I[T]) {
+ // ... = x.foo()
+ // }
+ // f[int](...)
+ // f[string](...)
+ //
+ // In this case, in f we're calling foo on a generic interface.
+ // Which method could that be? Normally we could match the method
+ // both by name and by type. But in this case we don't really know
+ // the type of the method we're calling. It could be func()int
+ // or func()string. So we match on just the function name, instead
+ // of both the name and the type used for the non-generic case below.
+ // TODO: instantiations at least know the shape of the instantiated
+ // type, and the linker could do more complicated matching using
+ // some sort of fuzzy shape matching. For now, only use the name
+ // of the method for matching.
+ r := obj.Addrel(ir.CurFunc.LSym)
+ r.Sym = staticdata.StringSymNoCommon(dot.Sel.Name)
+ r.Type = objabi.R_USENAMEDMETHOD
+ return
+ }
+
+ tsym := TypeLinksym(ityp)
+ r := obj.Addrel(ir.CurFunc.LSym)
+ r.Sym = tsym
+ // dot.Offset() is the method index * PtrSize (the offset of code pointer
+ // in itab).
+ midx := dot.Offset() / int64(types.PtrSize)
+ r.Add = InterfaceMethodOffset(ityp, midx)
+ r.Type = objabi.R_USEIFACEMETHOD
+}
+
+func deref(t *types.Type) *types.Type {
+ if t.IsPtr() {
+ return t.Elem()
+ }
+ return t
+}
diff --git a/src/cmd/compile/internal/riscv64/galign.go b/src/cmd/compile/internal/riscv64/galign.go
new file mode 100644
index 0000000..4244afb
--- /dev/null
+++ b/src/cmd/compile/internal/riscv64/galign.go
@@ -0,0 +1,26 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv64
+
+import (
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/riscv"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &riscv.LinkRISCV64
+
+ arch.REGSP = riscv.REG_SP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.Ginsnop = ginsnop
+ arch.ZeroRange = zeroRange
+
+ arch.SSAMarkMoves = ssaMarkMoves
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+ arch.LoadRegResult = loadRegResult
+ arch.SpillArgReg = spillArgReg
+}
diff --git a/src/cmd/compile/internal/riscv64/ggen.go b/src/cmd/compile/internal/riscv64/ggen.go
new file mode 100644
index 0000000..44488e4
--- /dev/null
+++ b/src/cmd/compile/internal/riscv64/ggen.go
@@ -0,0 +1,59 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv64
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/riscv"
+)
+
+func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+
+ // Adjust the frame to account for LR.
+ off += base.Ctxt.Arch.FixedFrameSize
+
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i)
+ }
+ return p
+ }
+
+ if cnt <= int64(128*types.PtrSize) {
+ p = pp.Append(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_X25, 0)
+ p.Reg = riscv.REG_SP
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
+ return p
+ }
+
+ // Loop, zeroing pointer width bytes at a time.
+ // ADD $(off), SP, T0
+ // ADD $(cnt), T0, T1
+ // loop:
+ // MOV ZERO, (T0)
+ // ADD $Widthptr, T0
+ // BNE T0, T1, loop
+ p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0)
+ p.Reg = riscv.REG_SP
+ p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0)
+ p.Reg = riscv.REG_T0
+ p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0)
+ loop := p
+ p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, riscv.REG_T0, 0)
+ p = pp.Append(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0)
+ p.Reg = riscv.REG_T1
+ p.To.SetTarget(loop)
+ return p
+}
diff --git a/src/cmd/compile/internal/riscv64/gsubr.go b/src/cmd/compile/internal/riscv64/gsubr.go
new file mode 100644
index 0000000..74bccf8
--- /dev/null
+++ b/src/cmd/compile/internal/riscv64/gsubr.go
@@ -0,0 +1,20 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv64
+
+import (
+ "cmd/compile/internal/objw"
+ "cmd/internal/obj"
+ "cmd/internal/obj/riscv"
+)
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ // Hardware nop is ADD $0, ZERO
+ p := pp.Prog(riscv.AADD)
+ p.From.Type = obj.TYPE_CONST
+ p.Reg = riscv.REG_ZERO
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: riscv.REG_ZERO}
+ return p
+}
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
new file mode 100644
index 0000000..2233818
--- /dev/null
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -0,0 +1,817 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv64
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/riscv"
+)
+
+// ssaRegToReg maps ssa register numbers to obj register numbers.
+var ssaRegToReg = []int16{
+ riscv.REG_X0,
+ // X1 (LR): unused
+ riscv.REG_X2,
+ riscv.REG_X3,
+ riscv.REG_X4,
+ riscv.REG_X5,
+ riscv.REG_X6,
+ riscv.REG_X7,
+ riscv.REG_X8,
+ riscv.REG_X9,
+ riscv.REG_X10,
+ riscv.REG_X11,
+ riscv.REG_X12,
+ riscv.REG_X13,
+ riscv.REG_X14,
+ riscv.REG_X15,
+ riscv.REG_X16,
+ riscv.REG_X17,
+ riscv.REG_X18,
+ riscv.REG_X19,
+ riscv.REG_X20,
+ riscv.REG_X21,
+ riscv.REG_X22,
+ riscv.REG_X23,
+ riscv.REG_X24,
+ riscv.REG_X25,
+ riscv.REG_X26,
+ riscv.REG_X27,
+ riscv.REG_X28,
+ riscv.REG_X29,
+ riscv.REG_X30,
+ riscv.REG_X31,
+ riscv.REG_F0,
+ riscv.REG_F1,
+ riscv.REG_F2,
+ riscv.REG_F3,
+ riscv.REG_F4,
+ riscv.REG_F5,
+ riscv.REG_F6,
+ riscv.REG_F7,
+ riscv.REG_F8,
+ riscv.REG_F9,
+ riscv.REG_F10,
+ riscv.REG_F11,
+ riscv.REG_F12,
+ riscv.REG_F13,
+ riscv.REG_F14,
+ riscv.REG_F15,
+ riscv.REG_F16,
+ riscv.REG_F17,
+ riscv.REG_F18,
+ riscv.REG_F19,
+ riscv.REG_F20,
+ riscv.REG_F21,
+ riscv.REG_F22,
+ riscv.REG_F23,
+ riscv.REG_F24,
+ riscv.REG_F25,
+ riscv.REG_F26,
+ riscv.REG_F27,
+ riscv.REG_F28,
+ riscv.REG_F29,
+ riscv.REG_F30,
+ riscv.REG_F31,
+ 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case.
+}
+
+func loadByType(t *types.Type) obj.As {
+ width := t.Size()
+
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return riscv.AMOVF
+ case 8:
+ return riscv.AMOVD
+ default:
+ base.Fatalf("unknown float width for load %d in type %v", width, t)
+ return 0
+ }
+ }
+
+ switch width {
+ case 1:
+ if t.IsSigned() {
+ return riscv.AMOVB
+ } else {
+ return riscv.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return riscv.AMOVH
+ } else {
+ return riscv.AMOVHU
+ }
+ case 4:
+ if t.IsSigned() {
+ return riscv.AMOVW
+ } else {
+ return riscv.AMOVWU
+ }
+ case 8:
+ return riscv.AMOV
+ default:
+ base.Fatalf("unknown width for load %d in type %v", width, t)
+ return 0
+ }
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ width := t.Size()
+
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return riscv.AMOVF
+ case 8:
+ return riscv.AMOVD
+ default:
+ base.Fatalf("unknown float width for store %d in type %v", width, t)
+ return 0
+ }
+ }
+
+ switch width {
+ case 1:
+ return riscv.AMOVB
+ case 2:
+ return riscv.AMOVH
+ case 4:
+ return riscv.AMOVW
+ case 8:
+ return riscv.AMOV
+ default:
+ base.Fatalf("unknown width for store %d in type %v", width, t)
+ return 0
+ }
+}
+
+// largestMove returns the largest move instruction possible and its size,
+// given the alignment of the total size of the move.
+//
+// e.g., a 16-byte move may use MOV, but an 11-byte move must use MOVB.
+//
+// Note that the moves may not be on naturally aligned addresses depending on
+// the source and destination.
+//
+// This matches the calculation in ssa.moveSize.
+func largestMove(alignment int64) (obj.As, int64) {
+ switch {
+ case alignment%8 == 0:
+ return riscv.AMOV, 8
+ case alignment%4 == 0:
+ return riscv.AMOVW, 4
+ case alignment%2 == 0:
+ return riscv.AMOVH, 2
+ default:
+ return riscv.AMOVB, 1
+ }
+}
+
+// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
+// RISC-V has no flags, so this is a no-op.
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ s.SetPos(v.Pos)
+
+ switch v.Op {
+ case ssa.OpInitMem:
+ // memory arg needs no code
+ case ssa.OpArg:
+ // input args need no code
+ case ssa.OpPhi:
+ ssagen.CheckLoweredPhi(v)
+ case ssa.OpCopy, ssa.OpRISCV64MOVDreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ rs := v.Args[0].Reg()
+ rd := v.Reg()
+ if rs == rd {
+ return
+ }
+ as := riscv.AMOV
+ if v.Type.IsFloat() {
+ as = riscv.AMOVD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = rs
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = rd
+ case ssa.OpRISCV64MOVDnop:
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.OpArgIntReg, ssa.OpArgFloatReg:
+ // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
+ // The loop only runs once.
+ for _, a := range v.Block.Func.RegArgs {
+ // Pass the spill/unspill information along to the assembler, offset by size of
+ // the saved LR slot.
+ addr := ssagen.SpillSlotAddr(a, riscv.REG_SP, base.Ctxt.Arch.FixedFrameSize)
+ s.FuncInfo().AddSpill(
+ obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
+ }
+ v.Block.Func.RegArgs = nil
+
+ ssagen.CheckArgReg(v)
+ case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
+ // nothing to do
+ case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg,
+ ssa.OpRISCV64MOVBUreg, ssa.OpRISCV64MOVHUreg, ssa.OpRISCV64MOVWUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpRISCV64MOVDreg {
+ a = a.Args[0]
+ }
+ as := v.Op.Asm()
+ rs := v.Args[0].Reg()
+ rd := v.Reg()
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpRISCV64MOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVWreg && t.Size() == 4 && t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+ // arg is a proper-typed load and already sign/zero-extended
+ if rs == rd {
+ return
+ }
+ as = riscv.AMOV
+ default:
+ }
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = rs
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = rd
+ case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64SUBW, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND,
+ ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRAW, ssa.OpRISCV64SRL, ssa.OpRISCV64SRLW,
+ ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH,
+ ssa.OpRISCV64MULHU, ssa.OpRISCV64DIV, ssa.OpRISCV64DIVU, ssa.OpRISCV64DIVW,
+ ssa.OpRISCV64DIVUW, ssa.OpRISCV64REM, ssa.OpRISCV64REMU, ssa.OpRISCV64REMW,
+ ssa.OpRISCV64REMUW,
+ ssa.OpRISCV64FADDS, ssa.OpRISCV64FSUBS, ssa.OpRISCV64FMULS, ssa.OpRISCV64FDIVS,
+ ssa.OpRISCV64FEQS, ssa.OpRISCV64FNES, ssa.OpRISCV64FLTS, ssa.OpRISCV64FLES,
+ ssa.OpRISCV64FADDD, ssa.OpRISCV64FSUBD, ssa.OpRISCV64FMULD, ssa.OpRISCV64FDIVD,
+ ssa.OpRISCV64FEQD, ssa.OpRISCV64FNED, ssa.OpRISCV64FLTD, ssa.OpRISCV64FLED,
+ ssa.OpRISCV64FSGNJD:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpRISCV64LoweredMuluhilo:
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ p := s.Prog(riscv.AMULHU)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p1 := s.Prog(riscv.AMUL)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.Reg = r0
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg1()
+ case ssa.OpRISCV64LoweredMuluover:
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ p := s.Prog(riscv.AMULHU)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg1()
+ p1 := s.Prog(riscv.AMUL)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.Reg = r0
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg0()
+ p2 := s.Prog(riscv.ASNEZ)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = v.Reg1()
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = v.Reg1()
+ case ssa.OpRISCV64FMADDD, ssa.OpRISCV64FMSUBD, ssa.OpRISCV64FNMADDD, ssa.OpRISCV64FNMSUBD,
+ ssa.OpRISCV64FMADDS, ssa.OpRISCV64FMSUBS, ssa.OpRISCV64FNMADDS, ssa.OpRISCV64FNMSUBS:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ r3 := v.Args[2].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.AddRestSource(obj.Addr{Type: obj.TYPE_REG, Reg: r3})
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpRISCV64FSQRTS, ssa.OpRISCV64FNEGS, ssa.OpRISCV64FABSD, ssa.OpRISCV64FSQRTD, ssa.OpRISCV64FNEGD,
+ ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVDX,
+ ssa.OpRISCV64FCVTSW, ssa.OpRISCV64FCVTSL, ssa.OpRISCV64FCVTWS, ssa.OpRISCV64FCVTLS,
+ ssa.OpRISCV64FCVTDW, ssa.OpRISCV64FCVTDL, ssa.OpRISCV64FCVTWD, ssa.OpRISCV64FCVTLD, ssa.OpRISCV64FCVTDS, ssa.OpRISCV64FCVTSD,
+ ssa.OpRISCV64NOT, ssa.OpRISCV64NEG, ssa.OpRISCV64NEGW:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64ADDI, ssa.OpRISCV64ADDIW, ssa.OpRISCV64XORI, ssa.OpRISCV64ORI, ssa.OpRISCV64ANDI,
+ ssa.OpRISCV64SLLI, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRAIW, ssa.OpRISCV64SRLI, ssa.OpRISCV64SRLIW, ssa.OpRISCV64SLTI,
+ ssa.OpRISCV64SLTIU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64MOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64MOVaddr:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_ADDR
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ var wantreg string
+ // MOVW $sym+off(base), R
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ ssagen.AddAux(&p.From, v)
+ case *ir.Name:
+ wantreg = "SP"
+ ssagen.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVW $off(SP), R
+ wantreg = "SP"
+ p.From.Reg = riscv.REG_SP
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+ case ssa.OpRISCV64MOVBload, ssa.OpRISCV64MOVHload, ssa.OpRISCV64MOVWload, ssa.OpRISCV64MOVDload,
+ ssa.OpRISCV64MOVBUload, ssa.OpRISCV64MOVHUload, ssa.OpRISCV64MOVWUload,
+ ssa.OpRISCV64FMOVWload, ssa.OpRISCV64FMOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64MOVBstore, ssa.OpRISCV64MOVHstore, ssa.OpRISCV64MOVWstore, ssa.OpRISCV64MOVDstore,
+ ssa.OpRISCV64FMOVWstore, ssa.OpRISCV64FMOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64CALLstatic, ssa.OpRISCV64CALLclosure, ssa.OpRISCV64CALLinter:
+ s.Call(v)
+ case ssa.OpRISCV64CALLtail:
+ s.TailCall(v)
+ case ssa.OpRISCV64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ // AuxInt encodes how many buffer entries we need.
+ p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
+ case ssa.OpRISCV64LoweredPanicBoundsA, ssa.OpRISCV64LoweredPanicBoundsB, ssa.OpRISCV64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+
+ case ssa.OpRISCV64LoweredAtomicLoad8:
+ s.Prog(riscv.AFENCE)
+ p := s.Prog(riscv.AMOVBU)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ s.Prog(riscv.AFENCE)
+
+ case ssa.OpRISCV64LoweredAtomicLoad32, ssa.OpRISCV64LoweredAtomicLoad64:
+ as := riscv.ALRW
+ if v.Op == ssa.OpRISCV64LoweredAtomicLoad64 {
+ as = riscv.ALRD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ case ssa.OpRISCV64LoweredAtomicStore8:
+ s.Prog(riscv.AFENCE)
+ p := s.Prog(riscv.AMOVB)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ s.Prog(riscv.AFENCE)
+
+ case ssa.OpRISCV64LoweredAtomicStore32, ssa.OpRISCV64LoweredAtomicStore64:
+ as := riscv.AAMOSWAPW
+ if v.Op == ssa.OpRISCV64LoweredAtomicStore64 {
+ as = riscv.AAMOSWAPD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.RegTo2 = riscv.REG_ZERO
+
+ case ssa.OpRISCV64LoweredAtomicAdd32, ssa.OpRISCV64LoweredAtomicAdd64:
+ as := riscv.AAMOADDW
+ if v.Op == ssa.OpRISCV64LoweredAtomicAdd64 {
+ as = riscv.AAMOADDD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.RegTo2 = riscv.REG_TMP
+
+ p2 := s.Prog(riscv.AADD)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = riscv.REG_TMP
+ p2.Reg = v.Args[1].Reg()
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = v.Reg0()
+
+ case ssa.OpRISCV64LoweredAtomicExchange32, ssa.OpRISCV64LoweredAtomicExchange64:
+ as := riscv.AAMOSWAPW
+ if v.Op == ssa.OpRISCV64LoweredAtomicExchange64 {
+ as = riscv.AAMOSWAPD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.RegTo2 = v.Reg0()
+
+ case ssa.OpRISCV64LoweredAtomicCas32, ssa.OpRISCV64LoweredAtomicCas64:
+ // MOV ZERO, Rout
+ // LR (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 3(PC)
+ // SC Rarg2, (Rarg0), Rtmp
+ // BNE Rtmp, ZERO, -3(PC)
+ // MOV $1, Rout
+
+ lr := riscv.ALRW
+ sc := riscv.ASCW
+ if v.Op == ssa.OpRISCV64LoweredAtomicCas64 {
+ lr = riscv.ALRD
+ sc = riscv.ASCD
+ }
+
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ out := v.Reg0()
+
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+
+ p1 := s.Prog(lr)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = r0
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = riscv.REG_TMP
+
+ p2 := s.Prog(riscv.ABNE)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = r1
+ p2.Reg = riscv.REG_TMP
+ p2.To.Type = obj.TYPE_BRANCH
+
+ p3 := s.Prog(sc)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = r2
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = r0
+ p3.RegTo2 = riscv.REG_TMP
+
+ p4 := s.Prog(riscv.ABNE)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = riscv.REG_TMP
+ p4.Reg = riscv.REG_ZERO
+ p4.To.Type = obj.TYPE_BRANCH
+ p4.To.SetTarget(p1)
+
+ p5 := s.Prog(riscv.AMOV)
+ p5.From.Type = obj.TYPE_CONST
+ p5.From.Offset = 1
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = out
+
+ p6 := s.Prog(obj.ANOP)
+ p2.To.SetTarget(p6)
+
+ case ssa.OpRISCV64LoweredAtomicAnd32, ssa.OpRISCV64LoweredAtomicOr32:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.RegTo2 = riscv.REG_ZERO
+
+ case ssa.OpRISCV64LoweredZero:
+ mov, sz := largestMove(v.AuxInt)
+
+ // mov ZERO, (Rarg0)
+ // ADD $sz, Rarg0
+ // BGEU Rarg1, Rarg0, -2(PC)
+
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ p2 := s.Prog(riscv.AADD)
+ p2.From.Type = obj.TYPE_CONST
+ p2.From.Offset = sz
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(riscv.ABGEU)
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.Reg = v.Args[0].Reg()
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[1].Reg()
+ p3.To.SetTarget(p)
+
+ case ssa.OpRISCV64LoweredMove:
+ mov, sz := largestMove(v.AuxInt)
+
+ // mov (Rarg1), T2
+ // mov T2, (Rarg0)
+ // ADD $sz, Rarg0
+ // ADD $sz, Rarg1
+ // BGEU Rarg2, Rarg0, -4(PC)
+
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_T2
+
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = riscv.REG_T2
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(riscv.AADD)
+ p3.From.Type = obj.TYPE_CONST
+ p3.From.Offset = sz
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Args[0].Reg()
+
+ p4 := s.Prog(riscv.AADD)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = sz
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Args[1].Reg()
+
+ p5 := s.Prog(riscv.ABGEU)
+ p5.To.Type = obj.TYPE_BRANCH
+ p5.Reg = v.Args[1].Reg()
+ p5.From.Type = obj.TYPE_REG
+ p5.From.Reg = v.Args[2].Reg()
+ p5.To.SetTarget(p)
+
+ case ssa.OpRISCV64LoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ // TODO: optimizations. See arm and amd64 LoweredNilCheck.
+ p := s.Prog(riscv.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_ZERO
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+
+ case ssa.OpRISCV64LoweredGetClosurePtr:
+ // Closure pointer is S10 (riscv.REG_CTXT).
+ ssagen.CheckLoweredGetClosurePtr(v)
+
+ case ssa.OpRISCV64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpRISCV64LoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpRISCV64DUFFZERO:
+ p := s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = v.AuxInt
+
+ case ssa.OpRISCV64DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffcopy
+ p.To.Offset = v.AuxInt
+
+ case ssa.OpRISCV64LoweredPubBarrier:
+ // FENCE
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpRISCV64LoweredRound32F, ssa.OpRISCV64LoweredRound64F:
+ // input is already rounded
+
+ case ssa.OpClobber, ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+
+ default:
+ v.Fatalf("Unhandled op %v", v.Op)
+ }
+}
+
+var blockBranch = [...]obj.As{
+ ssa.BlockRISCV64BEQ: riscv.ABEQ,
+ ssa.BlockRISCV64BEQZ: riscv.ABEQZ,
+ ssa.BlockRISCV64BGE: riscv.ABGE,
+ ssa.BlockRISCV64BGEU: riscv.ABGEU,
+ ssa.BlockRISCV64BGEZ: riscv.ABGEZ,
+ ssa.BlockRISCV64BGTZ: riscv.ABGTZ,
+ ssa.BlockRISCV64BLEZ: riscv.ABLEZ,
+ ssa.BlockRISCV64BLT: riscv.ABLT,
+ ssa.BlockRISCV64BLTU: riscv.ABLTU,
+ ssa.BlockRISCV64BLTZ: riscv.ABLTZ,
+ ssa.BlockRISCV64BNE: riscv.ABNE,
+ ssa.BlockRISCV64BNEZ: riscv.ABNEZ,
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ s.SetPos(b.Pos)
+
+ switch b.Kind {
+ case ssa.BlockDefer:
+ // defer returns in A0:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(riscv.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+ p.Reg = riscv.REG_A0
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit, ssa.BlockRetJmp:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BNEZ,
+ ssa.BlockRISCV64BLT, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BGEZ,
+ ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU:
+
+ as := blockBranch[b.Kind]
+ invAs := riscv.InvertBranch(as)
+
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(invAs, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ p = s.Br(as, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(as, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(invAs, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+
+ p.From.Type = obj.TYPE_REG
+ switch b.Kind {
+ case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BLT, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU:
+ if b.NumControls() != 2 {
+ b.Fatalf("Unexpected number of controls (%d != 2): %s", b.NumControls(), b.LongString())
+ }
+ p.From.Reg = b.Controls[0].Reg()
+ p.Reg = b.Controls[1].Reg()
+
+ case ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNEZ, ssa.BlockRISCV64BGEZ, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ:
+ if b.NumControls() != 1 {
+ b.Fatalf("Unexpected number of controls (%d != 1): %s", b.NumControls(), b.LongString())
+ }
+ p.From.Reg = b.Controls[0].Reg()
+ }
+
+ default:
+ b.Fatalf("Unhandled block: %s", b.LongString())
+ }
+}
+
+func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p := s.Prog(loadByType(t))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_AUTO
+ p.From.Sym = n.Linksym()
+ p.From.Offset = n.FrameOffset() + off
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = reg
+ return p
+}
+
+func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
+ p.To.Name = obj.NAME_PARAM
+ p.To.Sym = n.Linksym()
+ p.Pos = p.Pos.WithNotStmt()
+ return p
+}
diff --git a/src/cmd/compile/internal/rttype/rttype.go b/src/cmd/compile/internal/rttype/rttype.go
new file mode 100644
index 0000000..cdc399d
--- /dev/null
+++ b/src/cmd/compile/internal/rttype/rttype.go
@@ -0,0 +1,283 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package rttype allows the compiler to share type information with
+// the runtime. The shared type information is stored in
+// internal/abi. This package translates those types from the host
+// machine on which the compiler runs to the target machine on which
+// the compiled program will run. In particular, this package handles
+// layout differences between e.g. a 64 bit compiler and 32 bit
+// target.
+package rttype
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "internal/abi"
+ "reflect"
+)
+
+// The type structures shared with the runtime.
+var Type *types.Type
+
+var ArrayType *types.Type
+var ChanType *types.Type
+var FuncType *types.Type
+var InterfaceType *types.Type
+var MapType *types.Type
+var PtrType *types.Type
+var SliceType *types.Type
+var StructType *types.Type
+
+// Types that are parts of the types above.
+var IMethod *types.Type
+var Method *types.Type
+var StructField *types.Type
+var UncommonType *types.Type
+
+// Type switches and asserts
+var InterfaceSwitch *types.Type
+var TypeAssert *types.Type
+
+func Init() {
+ // Note: this has to be called explicitly instead of being
+ // an init function so it runs after the types package has
+ // been properly initialized.
+ Type = fromReflect(reflect.TypeOf(abi.Type{}))
+ ArrayType = fromReflect(reflect.TypeOf(abi.ArrayType{}))
+ ChanType = fromReflect(reflect.TypeOf(abi.ChanType{}))
+ FuncType = fromReflect(reflect.TypeOf(abi.FuncType{}))
+ InterfaceType = fromReflect(reflect.TypeOf(abi.InterfaceType{}))
+ MapType = fromReflect(reflect.TypeOf(abi.MapType{}))
+ PtrType = fromReflect(reflect.TypeOf(abi.PtrType{}))
+ SliceType = fromReflect(reflect.TypeOf(abi.SliceType{}))
+ StructType = fromReflect(reflect.TypeOf(abi.StructType{}))
+
+ IMethod = fromReflect(reflect.TypeOf(abi.Imethod{}))
+ Method = fromReflect(reflect.TypeOf(abi.Method{}))
+ StructField = fromReflect(reflect.TypeOf(abi.StructField{}))
+ UncommonType = fromReflect(reflect.TypeOf(abi.UncommonType{}))
+
+ InterfaceSwitch = fromReflect(reflect.TypeOf(abi.InterfaceSwitch{}))
+ TypeAssert = fromReflect(reflect.TypeOf(abi.TypeAssert{}))
+
+ // Make sure abi functions are correct. These functions are used
+ // by the linker which doesn't have the ability to do type layout,
+ // so we check the functions it uses here.
+ ptrSize := types.PtrSize
+ if got, want := int64(abi.CommonSize(ptrSize)), Type.Size(); got != want {
+ base.Fatalf("abi.CommonSize() == %d, want %d", got, want)
+ }
+ if got, want := int64(abi.StructFieldSize(ptrSize)), StructField.Size(); got != want {
+ base.Fatalf("abi.StructFieldSize() == %d, want %d", got, want)
+ }
+ if got, want := int64(abi.UncommonSize()), UncommonType.Size(); got != want {
+ base.Fatalf("abi.UncommonSize() == %d, want %d", got, want)
+ }
+ if got, want := int64(abi.TFlagOff(ptrSize)), Type.OffsetOf("TFlag"); got != want {
+ base.Fatalf("abi.TFlagOff() == %d, want %d", got, want)
+ }
+}
+
+// fromReflect translates from a host type to the equivalent target type.
+func fromReflect(rt reflect.Type) *types.Type {
+ t := reflectToType(rt)
+ types.CalcSize(t)
+ return t
+}
+
+// reflectToType converts from a reflect.Type (which is a compiler
+// host type) to a *types.Type, which is a target type. The result
+// must be CalcSize'd before using.
+func reflectToType(rt reflect.Type) *types.Type {
+ switch rt.Kind() {
+ case reflect.Bool:
+ return types.Types[types.TBOOL]
+ case reflect.Int:
+ return types.Types[types.TINT]
+ case reflect.Int32:
+ return types.Types[types.TINT32]
+ case reflect.Uint8:
+ return types.Types[types.TUINT8]
+ case reflect.Uint16:
+ return types.Types[types.TUINT16]
+ case reflect.Uint32:
+ return types.Types[types.TUINT32]
+ case reflect.Uintptr:
+ return types.Types[types.TUINTPTR]
+ case reflect.Ptr, reflect.Func, reflect.UnsafePointer:
+ // TODO: there's no mechanism to distinguish different pointer types,
+ // so we treat them all as unsafe.Pointer.
+ return types.Types[types.TUNSAFEPTR]
+ case reflect.Slice:
+ return types.NewSlice(reflectToType(rt.Elem()))
+ case reflect.Array:
+ return types.NewArray(reflectToType(rt.Elem()), int64(rt.Len()))
+ case reflect.Struct:
+ fields := make([]*types.Field, rt.NumField())
+ for i := 0; i < rt.NumField(); i++ {
+ f := rt.Field(i)
+ ft := reflectToType(f.Type)
+ fields[i] = &types.Field{Sym: &types.Sym{Name: f.Name}, Type: ft}
+ }
+ return types.NewStruct(fields)
+ default:
+ base.Fatalf("unhandled kind %s", rt.Kind())
+ return nil
+ }
+}
+
+// A Cursor represents a typed location inside a static variable where we
+// are going to write.
+type Cursor struct {
+ lsym *obj.LSym
+ offset int64
+ typ *types.Type
+}
+
+// NewCursor returns a cursor starting at lsym+off and having type t.
+func NewCursor(lsym *obj.LSym, off int64, t *types.Type) Cursor {
+ return Cursor{lsym: lsym, offset: off, typ: t}
+}
+
+// WritePtr writes a pointer "target" to the component at the location specified by c.
+func (c Cursor) WritePtr(target *obj.LSym) {
+ if c.typ.Kind() != types.TUNSAFEPTR {
+ base.Fatalf("can't write ptr, it has kind %s", c.typ.Kind())
+ }
+ if target == nil {
+ objw.Uintptr(c.lsym, int(c.offset), 0)
+ } else {
+ objw.SymPtr(c.lsym, int(c.offset), target, 0)
+ }
+}
+func (c Cursor) WriteUintptr(val uint64) {
+ if c.typ.Kind() != types.TUINTPTR {
+ base.Fatalf("can't write uintptr, it has kind %s", c.typ.Kind())
+ }
+ objw.Uintptr(c.lsym, int(c.offset), val)
+}
+func (c Cursor) WriteUint32(val uint32) {
+ if c.typ.Kind() != types.TUINT32 {
+ base.Fatalf("can't write uint32, it has kind %s", c.typ.Kind())
+ }
+ objw.Uint32(c.lsym, int(c.offset), val)
+}
+func (c Cursor) WriteUint16(val uint16) {
+ if c.typ.Kind() != types.TUINT16 {
+ base.Fatalf("can't write uint16, it has kind %s", c.typ.Kind())
+ }
+ objw.Uint16(c.lsym, int(c.offset), val)
+}
+func (c Cursor) WriteUint8(val uint8) {
+ if c.typ.Kind() != types.TUINT8 {
+ base.Fatalf("can't write uint8, it has kind %s", c.typ.Kind())
+ }
+ objw.Uint8(c.lsym, int(c.offset), val)
+}
+func (c Cursor) WriteInt(val int64) {
+ if c.typ.Kind() != types.TINT {
+ base.Fatalf("can't write int, it has kind %s", c.typ.Kind())
+ }
+ objw.Uintptr(c.lsym, int(c.offset), uint64(val))
+}
+func (c Cursor) WriteInt32(val int32) {
+ if c.typ.Kind() != types.TINT32 {
+ base.Fatalf("can't write int32, it has kind %s", c.typ.Kind())
+ }
+ objw.Uint32(c.lsym, int(c.offset), uint32(val))
+}
+func (c Cursor) WriteBool(val bool) {
+ if c.typ.Kind() != types.TBOOL {
+ base.Fatalf("can't write bool, it has kind %s", c.typ.Kind())
+ }
+ objw.Bool(c.lsym, int(c.offset), val)
+}
+
+// WriteSymPtrOff writes a "pointer" to the given symbol. The symbol
+// is encoded as a uint32 offset from the start of the section.
+func (c Cursor) WriteSymPtrOff(target *obj.LSym, weak bool) {
+ if c.typ.Kind() != types.TINT32 && c.typ.Kind() != types.TUINT32 {
+ base.Fatalf("can't write SymPtr, it has kind %s", c.typ.Kind())
+ }
+ if target == nil {
+ objw.Uint32(c.lsym, int(c.offset), 0)
+ } else if weak {
+ objw.SymPtrWeakOff(c.lsym, int(c.offset), target)
+ } else {
+ objw.SymPtrOff(c.lsym, int(c.offset), target)
+ }
+}
+
+// WriteSlice writes a slice header to c. The pointer is target+off, the len and cap fields are given.
+func (c Cursor) WriteSlice(target *obj.LSym, off, len, cap int64) {
+ if c.typ.Kind() != types.TSLICE {
+ base.Fatalf("can't write slice, it has kind %s", c.typ.Kind())
+ }
+ objw.SymPtr(c.lsym, int(c.offset), target, int(off))
+ objw.Uintptr(c.lsym, int(c.offset)+types.PtrSize, uint64(len))
+ objw.Uintptr(c.lsym, int(c.offset)+2*types.PtrSize, uint64(cap))
+ // TODO: ability to switch len&cap. Maybe not needed here, as every caller
+ // passes the same thing for both?
+ if len != cap {
+ base.Fatalf("len != cap (%d != %d)", len, cap)
+ }
+}
+
+// Reloc adds a relocation from the current cursor position.
+// Reloc fills in Off and Siz fields. Caller should fill in the rest (Type, others).
+func (c Cursor) Reloc() *obj.Reloc {
+ r := obj.Addrel(c.lsym)
+ r.Off = int32(c.offset)
+ r.Siz = uint8(c.typ.Size())
+ return r
+}
+
+// Field selects the field with the given name from the struct pointed to by c.
+func (c Cursor) Field(name string) Cursor {
+ if c.typ.Kind() != types.TSTRUCT {
+ base.Fatalf("can't call Field on non-struct %v", c.typ)
+ }
+ for _, f := range c.typ.Fields() {
+ if f.Sym.Name == name {
+ return Cursor{lsym: c.lsym, offset: c.offset + f.Offset, typ: f.Type}
+ }
+ }
+ base.Fatalf("couldn't find field %s in %v", name, c.typ)
+ return Cursor{}
+}
+
+type ArrayCursor struct {
+ c Cursor // cursor pointing at first element
+ n int // number of elements
+}
+
+// NewArrayCursor returns a cursor starting at lsym+off and having n copies of type t.
+func NewArrayCursor(lsym *obj.LSym, off int64, t *types.Type, n int) ArrayCursor {
+ return ArrayCursor{
+ c: NewCursor(lsym, off, t),
+ n: n,
+ }
+}
+
+// Elem selects element i of the array pointed to by c.
+func (a ArrayCursor) Elem(i int) Cursor {
+ if i < 0 || i >= a.n {
+ base.Fatalf("element index %d out of range [0:%d]", i, a.n)
+ }
+ return Cursor{lsym: a.c.lsym, offset: a.c.offset + int64(i)*a.c.typ.Size(), typ: a.c.typ}
+}
+
+// ModifyArray converts a cursor pointing at a type [k]T to a cursor pointing
+// at a type [n]T.
+// Also returns the size delta, aka (n-k)*sizeof(T).
+func (c Cursor) ModifyArray(n int) (ArrayCursor, int64) {
+ if c.typ.Kind() != types.TARRAY {
+ base.Fatalf("can't call ModifyArray on non-array %v", c.typ)
+ }
+ k := c.typ.NumElem()
+ return ArrayCursor{c: Cursor{lsym: c.lsym, offset: c.offset, typ: c.typ.Elem()}, n: n}, (int64(n) - k) * c.typ.Elem().Size()
+}
diff --git a/src/cmd/compile/internal/s390x/galign.go b/src/cmd/compile/internal/s390x/galign.go
new file mode 100644
index 0000000..d880834
--- /dev/null
+++ b/src/cmd/compile/internal/s390x/galign.go
@@ -0,0 +1,23 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s390x
+
+import (
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/s390x"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &s390x.Links390x
+ arch.REGSP = s390x.REGSP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = ssaMarkMoves
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/s390x/ggen.go b/src/cmd/compile/internal/s390x/ggen.go
new file mode 100644
index 0000000..70e4031
--- /dev/null
+++ b/src/cmd/compile/internal/s390x/ggen.go
@@ -0,0 +1,89 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s390x
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/objw"
+ "cmd/internal/obj"
+ "cmd/internal/obj/s390x"
+)
+
+// clearLoopCutOff is the (somewhat arbitrary) value above which it is better
+// to have a loop of clear instructions (e.g. XCs) rather than just generating
+// multiple instructions (i.e. loop unrolling).
+// Must be between 256 and 4096.
+const clearLoopCutoff = 1024
+
+// zerorange clears the stack in the given range.
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+
+ // Adjust the frame to account for LR.
+ off += base.Ctxt.Arch.FixedFrameSize
+ reg := int16(s390x.REGSP)
+
+ // If the off cannot fit in a 12-bit unsigned displacement then we
+ // need to create a copy of the stack pointer that we can adjust.
+ // We also need to do this if we are going to loop.
+ if off < 0 || off > 4096-clearLoopCutoff || cnt > clearLoopCutoff {
+ p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, s390x.REGRT1, 0)
+ p.Reg = int16(s390x.REGSP)
+ reg = s390x.REGRT1
+ off = 0
+ }
+
+ // Generate a loop of large clears.
+ if cnt > clearLoopCutoff {
+ ireg := int16(s390x.REGRT2) // register holds number of remaining loop iterations
+ p = pp.Append(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0)
+ p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off)
+ pl := p
+ p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
+ p = pp.Append(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0)
+ p.To.SetTarget(pl)
+ cnt = cnt % 256
+ }
+
+ // Generate remaining clear instructions without a loop.
+ for cnt > 0 {
+ n := cnt
+
+ // Can clear at most 256 bytes per instruction.
+ if n > 256 {
+ n = 256
+ }
+
+ switch n {
+ // Handle very small clears with move instructions.
+ case 8, 4, 2, 1:
+ ins := s390x.AMOVB
+ switch n {
+ case 8:
+ ins = s390x.AMOVD
+ case 4:
+ ins = s390x.AMOVW
+ case 2:
+ ins = s390x.AMOVH
+ }
+ p = pp.Append(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, off)
+
+ // Handle clears that would require multiple move instructions with CLEAR (assembled as XC).
+ default:
+ p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, n, obj.TYPE_MEM, reg, off)
+ }
+
+ cnt -= n
+ off += n
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ return pp.Prog(s390x.ANOPH)
+}
diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go
new file mode 100644
index 0000000..a97c156
--- /dev/null
+++ b/src/cmd/compile/internal/s390x/ssa.go
@@ -0,0 +1,959 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s390x
+
+import (
+ "math"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/s390x"
+)
+
+// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
+ flive := b.FlagsLiveAtEnd
+ for _, c := range b.ControlValues() {
+ flive = c.Type.IsFlags() || flive
+ }
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if flive && v.Op == ssa.OpS390XMOVDconst {
+ // The "mark" is any non-nil Aux value.
+ v.Aux = ssa.AuxMark
+ }
+ if v.Type.IsFlags() {
+ flive = false
+ }
+ for _, a := range v.Args {
+ if a.Type.IsFlags() {
+ flive = true
+ }
+ }
+ }
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return s390x.AFMOVS
+ case 8:
+ return s390x.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return s390x.AMOVB
+ } else {
+ return s390x.AMOVBZ
+ }
+ case 2:
+ if t.IsSigned() {
+ return s390x.AMOVH
+ } else {
+ return s390x.AMOVHZ
+ }
+ case 4:
+ if t.IsSigned() {
+ return s390x.AMOVW
+ } else {
+ return s390x.AMOVWZ
+ }
+ case 8:
+ return s390x.AMOVD
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ width := t.Size()
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return s390x.AFMOVS
+ case 8:
+ return s390x.AFMOVD
+ }
+ } else {
+ switch width {
+ case 1:
+ return s390x.AMOVB
+ case 2:
+ return s390x.AMOVH
+ case 4:
+ return s390x.AMOVW
+ case 8:
+ return s390x.AMOVD
+ }
+ }
+ panic("bad store type")
+}
+
+// moveByType returns the reg->reg move instruction of the given type.
+func moveByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ return s390x.AFMOVD
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return s390x.AMOVB
+ } else {
+ return s390x.AMOVBZ
+ }
+ case 2:
+ if t.IsSigned() {
+ return s390x.AMOVH
+ } else {
+ return s390x.AMOVHZ
+ }
+ case 4:
+ if t.IsSigned() {
+ return s390x.AMOVW
+ } else {
+ return s390x.AMOVWZ
+ }
+ case 8:
+ return s390x.AMOVD
+ }
+ }
+ panic("bad load type")
+}
+
+// opregreg emits instructions for
+//
+// dest := dest(To) op src(From)
+//
+// and also returns the created obj.Prog so it
+// may be further adjusted (offset, scale, etc).
+func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dest
+ p.From.Reg = src
+ return p
+}
+
+// opregregimm emits instructions for
+//
+// dest := src(From) op off
+//
+// and also returns the created obj.Prog so it
+// may be further adjusted (offset, scale, etc).
+func opregregimm(s *ssagen.State, op obj.As, dest, src int16, off int64) *obj.Prog {
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = off
+ p.Reg = src
+ p.To.Reg = dest
+ p.To.Type = obj.TYPE_REG
+ return p
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpS390XSLD, ssa.OpS390XSLW,
+ ssa.OpS390XSRD, ssa.OpS390XSRW,
+ ssa.OpS390XSRAD, ssa.OpS390XSRAW,
+ ssa.OpS390XRLLG, ssa.OpS390XRLL:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ if r2 == s390x.REG_R0 {
+ v.Fatalf("cannot use R0 as shift value %s", v.LongString())
+ }
+ p := opregreg(s, v.Op.Asm(), r, r2)
+ if r != r1 {
+ p.Reg = r1
+ }
+ case ssa.OpS390XRXSBG:
+ r2 := v.Args[1].Reg()
+ i := v.Aux.(s390x.RotateParams)
+ p := s.Prog(v.Op.Asm())
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(i.Start)}
+ p.AddRestSourceArgs([]obj.Addr{
+ {Type: obj.TYPE_CONST, Offset: int64(i.End)},
+ {Type: obj.TYPE_CONST, Offset: int64(i.Amount)},
+ {Type: obj.TYPE_REG, Reg: r2},
+ })
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+ case ssa.OpS390XRISBGZ:
+ r1 := v.Reg()
+ r2 := v.Args[0].Reg()
+ i := v.Aux.(s390x.RotateParams)
+ p := s.Prog(v.Op.Asm())
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(i.Start)}
+ p.AddRestSourceArgs([]obj.Addr{
+ {Type: obj.TYPE_CONST, Offset: int64(i.End)},
+ {Type: obj.TYPE_CONST, Offset: int64(i.Amount)},
+ {Type: obj.TYPE_REG, Reg: r2},
+ })
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: r1}
+ case ssa.OpS390XADD, ssa.OpS390XADDW,
+ ssa.OpS390XSUB, ssa.OpS390XSUBW,
+ ssa.OpS390XAND, ssa.OpS390XANDW,
+ ssa.OpS390XOR, ssa.OpS390XORW,
+ ssa.OpS390XXOR, ssa.OpS390XXORW:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := opregreg(s, v.Op.Asm(), r, r2)
+ if r != r1 {
+ p.Reg = r1
+ }
+ case ssa.OpS390XADDC:
+ r1 := v.Reg0()
+ r2 := v.Args[0].Reg()
+ r3 := v.Args[1].Reg()
+ if r1 == r2 {
+ r2, r3 = r3, r2
+ }
+ p := opregreg(s, v.Op.Asm(), r1, r2)
+ if r3 != r1 {
+ p.Reg = r3
+ }
+ case ssa.OpS390XSUBC:
+ r1 := v.Reg0()
+ r2 := v.Args[0].Reg()
+ r3 := v.Args[1].Reg()
+ p := opregreg(s, v.Op.Asm(), r1, r3)
+ if r1 != r2 {
+ p.Reg = r2
+ }
+ case ssa.OpS390XADDE, ssa.OpS390XSUBE:
+ r2 := v.Args[1].Reg()
+ opregreg(s, v.Op.Asm(), v.Reg0(), r2)
+ case ssa.OpS390XADDCconst:
+ r1 := v.Reg0()
+ r3 := v.Args[0].Reg()
+ i2 := int64(int16(v.AuxInt))
+ opregregimm(s, v.Op.Asm(), r1, r3, i2)
+ // 2-address opcode arithmetic
+ case ssa.OpS390XMULLD, ssa.OpS390XMULLW,
+ ssa.OpS390XMULHD, ssa.OpS390XMULHDU,
+ ssa.OpS390XFMULS, ssa.OpS390XFMUL, ssa.OpS390XFDIVS, ssa.OpS390XFDIV:
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
+ case ssa.OpS390XFSUBS, ssa.OpS390XFSUB,
+ ssa.OpS390XFADDS, ssa.OpS390XFADD:
+ opregreg(s, v.Op.Asm(), v.Reg0(), v.Args[1].Reg())
+ case ssa.OpS390XMLGR:
+ // MLGR Rx R3 -> R2:R3
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ if r1 != s390x.REG_R3 {
+ v.Fatalf("We require the multiplcand to be stored in R3 for MLGR %s", v.LongString())
+ }
+ p := s.Prog(s390x.AMLGR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r0
+ p.To.Reg = s390x.REG_R2
+ p.To.Type = obj.TYPE_REG
+ case ssa.OpS390XFMADD, ssa.OpS390XFMADDS,
+ ssa.OpS390XFMSUB, ssa.OpS390XFMSUBS:
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XFIDBR:
+ switch v.AuxInt {
+ case 0, 1, 3, 4, 5, 6, 7:
+ opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt)
+ default:
+ v.Fatalf("invalid FIDBR mask: %v", v.AuxInt)
+ }
+ case ssa.OpS390XCPSDR:
+ p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpS390XDIVD, ssa.OpS390XDIVW,
+ ssa.OpS390XDIVDU, ssa.OpS390XDIVWU,
+ ssa.OpS390XMODD, ssa.OpS390XMODW,
+ ssa.OpS390XMODDU, ssa.OpS390XMODWU:
+
+ // TODO(mundaym): use the temp registers every time like x86 does with AX?
+ dividend := v.Args[0].Reg()
+ divisor := v.Args[1].Reg()
+
+ // CPU faults upon signed overflow, which occurs when most
+ // negative int is divided by -1.
+ var j *obj.Prog
+ if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW ||
+ v.Op == ssa.OpS390XMODD || v.Op == ssa.OpS390XMODW {
+
+ var c *obj.Prog
+ c = s.Prog(s390x.ACMP)
+ j = s.Prog(s390x.ABEQ)
+
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = divisor
+ c.To.Type = obj.TYPE_CONST
+ c.To.Offset = -1
+
+ j.To.Type = obj.TYPE_BRANCH
+
+ }
+
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = divisor
+ p.Reg = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dividend
+
+ // signed division, rest of the check for -1 case
+ if j != nil {
+ j2 := s.Prog(s390x.ABR)
+ j2.To.Type = obj.TYPE_BRANCH
+
+ var n *obj.Prog
+ if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW {
+ // n * -1 = -n
+ n = s.Prog(s390x.ANEG)
+ n.To.Type = obj.TYPE_REG
+ n.To.Reg = dividend
+ } else {
+ // n % -1 == 0
+ n = s.Prog(s390x.AXOR)
+ n.From.Type = obj.TYPE_REG
+ n.From.Reg = dividend
+ n.To.Type = obj.TYPE_REG
+ n.To.Reg = dividend
+ }
+
+ j.To.SetTarget(n)
+ j2.To.SetTarget(s.Pc())
+ }
+ case ssa.OpS390XADDconst, ssa.OpS390XADDWconst:
+ opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt)
+ case ssa.OpS390XMULLDconst, ssa.OpS390XMULLWconst,
+ ssa.OpS390XSUBconst, ssa.OpS390XSUBWconst,
+ ssa.OpS390XANDconst, ssa.OpS390XANDWconst,
+ ssa.OpS390XORconst, ssa.OpS390XORWconst,
+ ssa.OpS390XXORconst, ssa.OpS390XXORWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XSLDconst, ssa.OpS390XSLWconst,
+ ssa.OpS390XSRDconst, ssa.OpS390XSRWconst,
+ ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst,
+ ssa.OpS390XRLLconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ if r != r1 {
+ p.Reg = r1
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpS390XMOVDaddridx:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ p := s.Prog(s390x.AMOVD)
+ p.From.Scale = 1
+ if i == s390x.REGSP {
+ r, i = i, r
+ }
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = r
+ p.From.Index = i
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XMOVDaddr:
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU:
+ opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
+ case ssa.OpS390XFCMPS, ssa.OpS390XFCMP:
+ opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
+ case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+ case ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = int64(uint32(v.AuxInt))
+ case ssa.OpS390XMOVDconst:
+ x := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.OpS390XFMOVSconst, ssa.OpS390XFMOVDconst:
+ x := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.OpS390XADDWload, ssa.OpS390XADDload,
+ ssa.OpS390XMULLWload, ssa.OpS390XMULLDload,
+ ssa.OpS390XSUBWload, ssa.OpS390XSUBload,
+ ssa.OpS390XANDWload, ssa.OpS390XANDload,
+ ssa.OpS390XORWload, ssa.OpS390XORload,
+ ssa.OpS390XXORWload, ssa.OpS390XXORload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XMOVDload,
+ ssa.OpS390XMOVWZload, ssa.OpS390XMOVHZload, ssa.OpS390XMOVBZload,
+ ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload,
+ ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload,
+ ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx,
+ ssa.OpS390XMOVBloadidx, ssa.OpS390XMOVHloadidx, ssa.OpS390XMOVWloadidx, ssa.OpS390XMOVDloadidx,
+ ssa.OpS390XMOVHBRloadidx, ssa.OpS390XMOVWBRloadidx, ssa.OpS390XMOVDBRloadidx,
+ ssa.OpS390XFMOVSloadidx, ssa.OpS390XFMOVDloadidx:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ if i == s390x.REGSP {
+ r, i = i, r
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r
+ p.From.Scale = 1
+ p.From.Index = i
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore,
+ ssa.OpS390XMOVHBRstore, ssa.OpS390XMOVWBRstore, ssa.OpS390XMOVDBRstore,
+ ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx,
+ ssa.OpS390XMOVHBRstoreidx, ssa.OpS390XMOVWBRstoreidx, ssa.OpS390XMOVDBRstoreidx,
+ ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ if i == s390x.REGSP {
+ r, i = i, r
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r
+ p.To.Scale = 1
+ p.To.Index = i
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val64()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, sc.Off64())
+ case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg,
+ ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg,
+ ssa.OpS390XLDGR, ssa.OpS390XLGDR,
+ ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA,
+ ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA,
+ ssa.OpS390XCELFBR, ssa.OpS390XCDLFBR, ssa.OpS390XCELGBR, ssa.OpS390XCDLGBR,
+ ssa.OpS390XCLFEBR, ssa.OpS390XCLFDBR, ssa.OpS390XCLGEBR, ssa.OpS390XCLGDBR,
+ ssa.OpS390XLDEBR, ssa.OpS390XLEDBR,
+ ssa.OpS390XFNEG, ssa.OpS390XFNEGS,
+ ssa.OpS390XLPDFR, ssa.OpS390XLNDFR:
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
+ case ssa.OpS390XCLEAR:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val64()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, sc.Off64())
+ case ssa.OpCopy:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x != y {
+ opregreg(s, moveByType(v.Type), y, x)
+ }
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.OpS390XLoweredGetClosurePtr:
+ // Closure pointer is R12 (already)
+ ssagen.CheckLoweredGetClosurePtr(v)
+ case ssa.OpS390XLoweredRound32F, ssa.OpS390XLoweredRound64F:
+ // input is already rounded
+ case ssa.OpS390XLoweredGetG:
+ r := v.Reg()
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = s390x.REGG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpS390XLoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XLoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XCALLstatic, ssa.OpS390XCALLclosure, ssa.OpS390XCALLinter:
+ s.Call(v)
+ case ssa.OpS390XCALLtail:
+ s.TailCall(v)
+ case ssa.OpS390XLoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ // AuxInt encodes how many buffer entries we need.
+ p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
+ case ssa.OpS390XLoweredPanicBoundsA, ssa.OpS390XLoweredPanicBoundsB, ssa.OpS390XLoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+ case ssa.OpS390XFLOGR, ssa.OpS390XPOPCNT,
+ ssa.OpS390XNEG, ssa.OpS390XNEGW,
+ ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XNOT, ssa.OpS390XNOTW:
+ v.Fatalf("NOT/NOTW generated %s", v.LongString())
+ case ssa.OpS390XSumBytes2, ssa.OpS390XSumBytes4, ssa.OpS390XSumBytes8:
+ v.Fatalf("SumBytes generated %s", v.LongString())
+ case ssa.OpS390XLOCGR:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(v.Aux.(s390x.CCMask))
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XFSQRTS, ssa.OpS390XFSQRT:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XLTDBR, ssa.OpS390XLTEBR:
+ opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[0].Reg())
+ case ssa.OpS390XInvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.OpS390XFlagEQ, ssa.OpS390XFlagLT, ssa.OpS390XFlagGT, ssa.OpS390XFlagOV:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.OpS390XAddTupleFirst32, ssa.OpS390XAddTupleFirst64:
+ v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString())
+ case ssa.OpS390XLoweredNilCheck:
+ // Issue a load which will fault if the input is nil.
+ p := s.Prog(s390x.AMOVBZ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s390x.REGTMP
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+ case ssa.OpS390XMVC:
+ vo := v.AuxValAndOff()
+ p := s.Prog(s390x.AMVC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = vo.Val64()
+ p.AddRestSource(obj.Addr{
+ Type: obj.TYPE_MEM,
+ Reg: v.Args[1].Reg(),
+ Offset: vo.Off64(),
+ })
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = vo.Off64()
+ case ssa.OpS390XSTMG2, ssa.OpS390XSTMG3, ssa.OpS390XSTMG4,
+ ssa.OpS390XSTM2, ssa.OpS390XSTM3, ssa.OpS390XSTM4:
+ for i := 2; i < len(v.Args)-1; i++ {
+ if v.Args[i].Reg() != v.Args[i-1].Reg()+1 {
+ v.Fatalf("invalid store multiple %s", v.LongString())
+ }
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[len(v.Args)-2].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpS390XLoweredMove:
+ // Inputs must be valid pointers to memory,
+ // so adjust arg0 and arg1 as part of the expansion.
+ // arg2 should be src+size,
+ //
+ // mvc: MVC $256, 0(R2), 0(R1)
+ // MOVD $256(R1), R1
+ // MOVD $256(R2), R2
+ // CMP R2, Rarg2
+ // BNE mvc
+ // MVC $rem, 0(R2), 0(R1) // if rem > 0
+ // arg2 is the last address to move in the loop + 256
+ mvc := s.Prog(s390x.AMVC)
+ mvc.From.Type = obj.TYPE_CONST
+ mvc.From.Offset = 256
+ mvc.AddRestSource(obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()})
+ mvc.To.Type = obj.TYPE_MEM
+ mvc.To.Reg = v.Args[0].Reg()
+
+ for i := 0; i < 2; i++ {
+ movd := s.Prog(s390x.AMOVD)
+ movd.From.Type = obj.TYPE_ADDR
+ movd.From.Reg = v.Args[i].Reg()
+ movd.From.Offset = 256
+ movd.To.Type = obj.TYPE_REG
+ movd.To.Reg = v.Args[i].Reg()
+ }
+
+ cmpu := s.Prog(s390x.ACMPU)
+ cmpu.From.Reg = v.Args[1].Reg()
+ cmpu.From.Type = obj.TYPE_REG
+ cmpu.To.Reg = v.Args[2].Reg()
+ cmpu.To.Type = obj.TYPE_REG
+
+ bne := s.Prog(s390x.ABLT)
+ bne.To.Type = obj.TYPE_BRANCH
+ bne.To.SetTarget(mvc)
+
+ if v.AuxInt > 0 {
+ mvc := s.Prog(s390x.AMVC)
+ mvc.From.Type = obj.TYPE_CONST
+ mvc.From.Offset = v.AuxInt
+ mvc.AddRestSource(obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()})
+ mvc.To.Type = obj.TYPE_MEM
+ mvc.To.Reg = v.Args[0].Reg()
+ }
+ case ssa.OpS390XLoweredZero:
+ // Input must be valid pointers to memory,
+ // so adjust arg0 as part of the expansion.
+ // arg1 should be src+size,
+ //
+ // clear: CLEAR $256, 0(R1)
+ // MOVD $256(R1), R1
+ // CMP R1, Rarg1
+ // BNE clear
+ // CLEAR $rem, 0(R1) // if rem > 0
+ // arg1 is the last address to zero in the loop + 256
+ clear := s.Prog(s390x.ACLEAR)
+ clear.From.Type = obj.TYPE_CONST
+ clear.From.Offset = 256
+ clear.To.Type = obj.TYPE_MEM
+ clear.To.Reg = v.Args[0].Reg()
+
+ movd := s.Prog(s390x.AMOVD)
+ movd.From.Type = obj.TYPE_ADDR
+ movd.From.Reg = v.Args[0].Reg()
+ movd.From.Offset = 256
+ movd.To.Type = obj.TYPE_REG
+ movd.To.Reg = v.Args[0].Reg()
+
+ cmpu := s.Prog(s390x.ACMPU)
+ cmpu.From.Reg = v.Args[0].Reg()
+ cmpu.From.Type = obj.TYPE_REG
+ cmpu.To.Reg = v.Args[1].Reg()
+ cmpu.To.Type = obj.TYPE_REG
+
+ bne := s.Prog(s390x.ABLT)
+ bne.To.Type = obj.TYPE_BRANCH
+ bne.To.SetTarget(clear)
+
+ if v.AuxInt > 0 {
+ clear := s.Prog(s390x.ACLEAR)
+ clear.From.Type = obj.TYPE_CONST
+ clear.From.Offset = v.AuxInt
+ clear.To.Type = obj.TYPE_MEM
+ clear.To.Reg = v.Args[0].Reg()
+ }
+ case ssa.OpS390XMOVBZatomicload, ssa.OpS390XMOVWZatomicload, ssa.OpS390XMOVDatomicload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpS390XMOVBatomicstore, ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpS390XLAN, ssa.OpS390XLAO:
+ // LA(N|O) Ry, TMP, 0(Rx)
+ op := s.Prog(v.Op.Asm())
+ op.From.Type = obj.TYPE_REG
+ op.From.Reg = v.Args[1].Reg()
+ op.Reg = s390x.REGTMP
+ op.To.Type = obj.TYPE_MEM
+ op.To.Reg = v.Args[0].Reg()
+ case ssa.OpS390XLANfloor, ssa.OpS390XLAOfloor:
+ r := v.Args[0].Reg() // clobbered, assumed R1 in comments
+
+ // Round ptr down to nearest multiple of 4.
+ // ANDW $~3, R1
+ ptr := s.Prog(s390x.AANDW)
+ ptr.From.Type = obj.TYPE_CONST
+ ptr.From.Offset = 0xfffffffc
+ ptr.To.Type = obj.TYPE_REG
+ ptr.To.Reg = r
+
+ // Redirect output of LA(N|O) into R1 since it is clobbered anyway.
+ // LA(N|O) Rx, R1, 0(R1)
+ op := s.Prog(v.Op.Asm())
+ op.From.Type = obj.TYPE_REG
+ op.From.Reg = v.Args[1].Reg()
+ op.Reg = r
+ op.To.Type = obj.TYPE_MEM
+ op.To.Reg = r
+ case ssa.OpS390XLAA, ssa.OpS390XLAAG:
+ p := s.Prog(v.Op.Asm())
+ p.Reg = v.Reg0()
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpS390XLoweredAtomicCas32, ssa.OpS390XLoweredAtomicCas64:
+ // Convert the flags output of CS{,G} into a bool.
+ // CS{,G} arg1, arg2, arg0
+ // MOVD $0, ret
+ // BNE 2(PC)
+ // MOVD $1, ret
+ // NOP (so the BNE has somewhere to land)
+
+ // CS{,G} arg1, arg2, arg0
+ cs := s.Prog(v.Op.Asm())
+ cs.From.Type = obj.TYPE_REG
+ cs.From.Reg = v.Args[1].Reg() // old
+ cs.Reg = v.Args[2].Reg() // new
+ cs.To.Type = obj.TYPE_MEM
+ cs.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&cs.To, v)
+
+ // MOVD $0, ret
+ movd := s.Prog(s390x.AMOVD)
+ movd.From.Type = obj.TYPE_CONST
+ movd.From.Offset = 0
+ movd.To.Type = obj.TYPE_REG
+ movd.To.Reg = v.Reg0()
+
+ // BNE 2(PC)
+ bne := s.Prog(s390x.ABNE)
+ bne.To.Type = obj.TYPE_BRANCH
+
+ // MOVD $1, ret
+ movd = s.Prog(s390x.AMOVD)
+ movd.From.Type = obj.TYPE_CONST
+ movd.From.Offset = 1
+ movd.To.Type = obj.TYPE_REG
+ movd.To.Reg = v.Reg0()
+
+ // NOP (so the BNE has somewhere to land)
+ nop := s.Prog(obj.ANOP)
+ bne.To.SetTarget(nop)
+ case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64:
+ // Loop until the CS{,G} succeeds.
+ // MOV{WZ,D} arg0, ret
+ // cs: CS{,G} ret, arg1, arg0
+ // BNE cs
+
+ // MOV{WZ,D} arg0, ret
+ load := s.Prog(loadByType(v.Type.FieldType(0)))
+ load.From.Type = obj.TYPE_MEM
+ load.From.Reg = v.Args[0].Reg()
+ load.To.Type = obj.TYPE_REG
+ load.To.Reg = v.Reg0()
+ ssagen.AddAux(&load.From, v)
+
+ // CS{,G} ret, arg1, arg0
+ cs := s.Prog(v.Op.Asm())
+ cs.From.Type = obj.TYPE_REG
+ cs.From.Reg = v.Reg0() // old
+ cs.Reg = v.Args[1].Reg() // new
+ cs.To.Type = obj.TYPE_MEM
+ cs.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&cs.To, v)
+
+ // BNE cs
+ bne := s.Prog(s390x.ABNE)
+ bne.To.Type = obj.TYPE_BRANCH
+ bne.To.SetTarget(cs)
+ case ssa.OpS390XSYNC:
+ s.Prog(s390x.ASYNC)
+ case ssa.OpClobber, ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+func blockAsm(b *ssa.Block) obj.As {
+ switch b.Kind {
+ case ssa.BlockS390XBRC:
+ return s390x.ABRC
+ case ssa.BlockS390XCRJ:
+ return s390x.ACRJ
+ case ssa.BlockS390XCGRJ:
+ return s390x.ACGRJ
+ case ssa.BlockS390XCLRJ:
+ return s390x.ACLRJ
+ case ssa.BlockS390XCLGRJ:
+ return s390x.ACLGRJ
+ case ssa.BlockS390XCIJ:
+ return s390x.ACIJ
+ case ssa.BlockS390XCGIJ:
+ return s390x.ACGIJ
+ case ssa.BlockS390XCLIJ:
+ return s390x.ACLIJ
+ case ssa.BlockS390XCLGIJ:
+ return s390x.ACLGIJ
+ }
+ b.Fatalf("blockAsm not implemented: %s", b.LongString())
+ panic("unreachable")
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ // Handle generic blocks first.
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(s390x.ABR)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ return
+ case ssa.BlockDefer:
+ // defer returns in R3:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Br(s390x.ACIJ, b.Succs[1].Block())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(s390x.NotEqual & s390x.NotUnordered) // unordered is not possible
+ p.Reg = s390x.REG_R3
+ p.AddRestSourceConst(0)
+ if b.Succs[0].Block() != next {
+ s.Br(s390x.ABR, b.Succs[0].Block())
+ }
+ return
+ case ssa.BlockExit, ssa.BlockRetJmp:
+ return
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ return
+ }
+
+ // Handle s390x-specific blocks. These blocks all have a
+ // condition code mask in the Aux value and 2 successors.
+ succs := [...]*ssa.Block{b.Succs[0].Block(), b.Succs[1].Block()}
+ mask := b.Aux.(s390x.CCMask)
+
+ // TODO: take into account Likely property for forward/backward
+ // branches. We currently can't do this because we don't know
+ // whether a block has already been emitted. In general forward
+ // branches are assumed 'not taken' and backward branches are
+ // assumed 'taken'.
+ if next == succs[0] {
+ succs[0], succs[1] = succs[1], succs[0]
+ mask = mask.Inverse()
+ }
+
+ p := s.Br(blockAsm(b), succs[0])
+ switch b.Kind {
+ case ssa.BlockS390XBRC:
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(mask)
+ case ssa.BlockS390XCGRJ, ssa.BlockS390XCRJ,
+ ssa.BlockS390XCLGRJ, ssa.BlockS390XCLRJ:
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
+ p.Reg = b.Controls[0].Reg()
+ p.AddRestSourceReg(b.Controls[1].Reg())
+ case ssa.BlockS390XCGIJ, ssa.BlockS390XCIJ:
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
+ p.Reg = b.Controls[0].Reg()
+ p.AddRestSourceConst(int64(int8(b.AuxInt)))
+ case ssa.BlockS390XCLGIJ, ssa.BlockS390XCLIJ:
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
+ p.Reg = b.Controls[0].Reg()
+ p.AddRestSourceConst(int64(uint8(b.AuxInt)))
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+ if next != succs[1] {
+ s.Br(s390x.ABR, succs[1])
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/README.md b/src/cmd/compile/internal/ssa/README.md
new file mode 100644
index 0000000..5dc4fbe
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/README.md
@@ -0,0 +1,222 @@
+<!---
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+-->
+
+## Introduction to the Go compiler's SSA backend
+
+This package contains the compiler's Static Single Assignment form component. If
+you're not familiar with SSA, its [Wikipedia
+article](https://en.wikipedia.org/wiki/Static_single_assignment_form) is a good
+starting point.
+
+It is recommended that you first read [cmd/compile/README.md](../../README.md)
+if you are not familiar with the Go compiler already. That document gives an
+overview of the compiler, and explains what is SSA's part and purpose in it.
+
+### Key concepts
+
+The names described below may be loosely related to their Go counterparts, but
+note that they are not equivalent. For example, a Go block statement has a
+variable scope, yet SSA has no notion of variables nor variable scopes.
+
+It may also be surprising that values and blocks are named after their unique
+sequential IDs. They rarely correspond to named entities in the original code,
+such as variables or function parameters. The sequential IDs also allow the
+compiler to avoid maps, and it is always possible to track back the values to Go
+code using debug and position information.
+
+#### Values
+
+Values are the basic building blocks of SSA. Per SSA's very definition, a
+value is defined exactly once, but it may be used any number of times. A value
+mainly consists of a unique identifier, an operator, a type, and some arguments.
+
+An operator or `Op` describes the operation that computes the value. The
+semantics of each operator can be found in `_gen/*Ops.go`. For example, `OpAdd8`
+takes two value arguments holding 8-bit integers and results in their addition.
+Here is a possible SSA representation of the addition of two `uint8` values:
+
+ // var c uint8 = a + b
+ v4 = Add8 <uint8> v2 v3
+
+A value's type will usually be a Go type. For example, the value in the example
+above has a `uint8` type, and a constant boolean value will have a `bool` type.
+However, certain types don't come from Go and are special; below we will cover
+`memory`, the most common of them.
+
+See [value.go](value.go) for more information.
+
+#### Memory types
+
+`memory` represents the global memory state. An `Op` that takes a memory
+argument depends on that memory state, and an `Op` which has the memory type
+impacts the state of memory. This ensures that memory operations are kept in the
+right order. For example:
+
+ // *a = 3
+ // *b = *a
+ v10 = Store <mem> {int} v6 v8 v1
+ v14 = Store <mem> {int} v7 v8 v10
+
+Here, `Store` stores its second argument (of type `int`) into the first argument
+(of type `*int`). The last argument is the memory state; since the second store
+depends on the memory value defined by the first store, the two stores cannot be
+reordered.
+
+See [cmd/compile/internal/types/type.go](../types/type.go) for more information.
+
+#### Blocks
+
+A block represents a basic block in the control flow graph of a function. It is,
+essentially, a list of values that define the operation of this block. Besides
+the list of values, blocks mainly consist of a unique identifier, a kind, and a
+list of successor blocks.
+
+The simplest kind is a `plain` block; it simply hands the control flow to
+another block, thus its successors list contains one block.
+
+Another common block kind is the `exit` block. These have a final value, called
+control value, which must return a memory state. This is necessary for functions
+to return some values, for example - the caller needs some memory state to
+depend on, to ensure that it receives those return values correctly.
+
+The last important block kind we will mention is the `if` block. It has a single
+control value that must be a boolean value, and it has exactly two successor
+blocks. The control flow is handed to the first successor if the bool is true,
+and to the second otherwise.
+
+Here is a sample if-else control flow represented with basic blocks:
+
+ // func(b bool) int {
+ // if b {
+ // return 2
+ // }
+ // return 3
+ // }
+ b1:
+ v1 = InitMem <mem>
+ v2 = SP <uintptr>
+ v5 = Addr <*int> {~r1} v2
+ v6 = Arg <bool> {b}
+ v8 = Const64 <int> [2]
+ v12 = Const64 <int> [3]
+ If v6 -> b2 b3
+ b2: <- b1
+ v10 = VarDef <mem> {~r1} v1
+ v11 = Store <mem> {int} v5 v8 v10
+ Ret v11
+ b3: <- b1
+ v14 = VarDef <mem> {~r1} v1
+ v15 = Store <mem> {int} v5 v12 v14
+ Ret v15
+
+<!---
+TODO: can we come up with a shorter example that still shows the control flow?
+-->
+
+See [block.go](block.go) for more information.
+
+#### Functions
+
+A function represents a function declaration along with its body. It mainly
+consists of a name, a type (its signature), a list of blocks that form its body,
+and the entry block within said list.
+
+When a function is called, the control flow is handed to its entry block. If the
+function terminates, the control flow will eventually reach an exit block, thus
+ending the function call.
+
+Note that a function may have zero or multiple exit blocks, just like a Go
+function can have any number of return points, but it must have exactly one
+entry point block.
+
+Also note that some SSA functions are autogenerated, such as the hash functions
+for each type used as a map key.
+
+For example, this is what an empty function can look like in SSA, with a single
+exit block that returns an uninteresting memory state:
+
+ foo func()
+ b1:
+ v1 = InitMem <mem>
+ Ret v1
+
+See [func.go](func.go) for more information.
+
+### Compiler passes
+
+Having a program in SSA form is not very useful on its own. Its advantage lies
+in how easy it is to write optimizations that modify the program to make it
+better. The way the Go compiler accomplishes this is via a list of passes.
+
+Each pass transforms a SSA function in some way. For example, a dead code
+elimination pass will remove blocks and values that it can prove will never be
+executed, and a nil check elimination pass will remove nil checks which it can
+prove to be redundant.
+
+Compiler passes work on one function at a time, and by default run sequentially
+and exactly once.
+
+The `lower` pass is special; it converts the SSA representation from being
+machine-independent to being machine-dependent. That is, some abstract operators
+are replaced with their non-generic counterparts, potentially reducing or
+increasing the final number of values.
+
+<!---
+TODO: Probably explain here why the ordering of the passes matters, and why some
+passes like deadstore have multiple variants at different stages.
+-->
+
+See the `passes` list defined in [compile.go](compile.go) for more information.
+
+### Playing with SSA
+
+A good way to see and get used to the compiler's SSA in action is via
+`GOSSAFUNC`. For example, to see func `Foo`'s initial SSA form and final
+generated assembly, one can run:
+
+ GOSSAFUNC=Foo go build
+
+The generated `ssa.html` file will also contain the SSA func at each of the
+compile passes, making it easy to see what each pass does to a particular
+program. You can also click on values and blocks to highlight them, to help
+follow the control flow and values.
+
+The value specified in GOSSAFUNC can also be a package-qualified function
+name, e.g.
+
+ GOSSAFUNC=blah.Foo go build
+
+This will match any function named "Foo" within a package whose final
+suffix is "blah" (e.g. something/blah.Foo, anotherthing/extra/blah.Foo).
+
+If non-HTML dumps are needed, append a "+" to the GOSSAFUNC value
+and dumps will be written to stdout:
+
+ GOSSAFUNC=Bar+ go build
+
+<!---
+TODO: need more ideas for this section
+-->
+
+### Hacking on SSA
+
+While most compiler passes are implemented directly in Go code, some others are
+code generated. This is currently done via rewrite rules, which have their own
+syntax and are maintained in `_gen/*.rules`. Simpler optimizations can be written
+easily and quickly this way, but rewrite rules are not suitable for more complex
+optimizations.
+
+To read more on rewrite rules, have a look at the top comments in
+[_gen/generic.rules](_gen/generic.rules) and [_gen/rulegen.go](_gen/rulegen.go).
+
+Similarly, the code to manage operators is also code generated from
+`_gen/*Ops.go`, as it is easier to maintain a few tables than a lot of code.
+After changing the rules or operators, run `go generate cmd/compile/internal/ssa`
+to generate the Go code again.
+
+<!---
+TODO: more tips and info could likely go here
+-->
diff --git a/src/cmd/compile/internal/ssa/TODO b/src/cmd/compile/internal/ssa/TODO
new file mode 100644
index 0000000..f4e4382
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/TODO
@@ -0,0 +1,24 @@
+This is a list of possible improvements to the SSA pass of the compiler.
+
+Optimizations (better compiled code)
+------------------------------------
+- Reduce register pressure in scheduler
+- Make dead store pass inter-block
+- If there are a lot of MOVQ $0, ..., then load
+ 0 into a register and use the register as the source instead.
+- Allow large structs to be SSAable (issue 24416)
+- Allow arrays of length >1 to be SSAable
+- If strings are being passed around without being interpreted (ptr
+ and len fields being accessed) pass them in xmm registers?
+ Same for interfaces?
+- any pointer generated by unsafe arithmetic must be non-nil?
+ (Of course that may not be true in general, but it is for all uses
+ in the runtime, and we can play games with unsafe.)
+
+Optimizations (better compiler)
+-------------------------------
+- Handle signed division overflow and sign extension earlier
+
+Regalloc
+--------
+- Make liveness analysis non-quadratic
diff --git a/src/cmd/compile/internal/ssa/_gen/386.rules b/src/cmd/compile/internal/ssa/_gen/386.rules
new file mode 100644
index 0000000..d92dddd
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/386.rules
@@ -0,0 +1,941 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add(Ptr|32|16|8) ...) => (ADDL ...)
+(Add(32|64)F ...) => (ADDS(S|D) ...)
+(Add32carry ...) => (ADDLcarry ...)
+(Add32withcarry ...) => (ADCL ...)
+
+(Sub(Ptr|32|16|8) ...) => (SUBL ...)
+(Sub(32|64)F ...) => (SUBS(S|D) ...)
+(Sub32carry ...) => (SUBLcarry ...)
+(Sub32withcarry ...) => (SBBL ...)
+
+(Mul(32|16|8) ...) => (MULL ...)
+(Mul(32|64)F ...) => (MULS(S|D) ...)
+(Mul32uhilo ...) => (MULLQU ...)
+
+(Select0 (Mul32uover x y)) => (Select0 <typ.UInt32> (MULLU x y))
+(Select1 (Mul32uover x y)) => (SETO (Select1 <types.TypeFlags> (MULLU x y)))
+
+(Avg32u ...) => (AVGLU ...)
+
+(Div(32|64)F ...) => (DIVS(S|D) ...)
+(Div(32|32u|16|16u) ...) => (DIV(L|LU|W|WU) ...)
+(Div8 x y) => (DIVW (SignExt8to16 x) (SignExt8to16 y))
+(Div8u x y) => (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+
+(Hmul(32|32u) ...) => (HMUL(L|LU) ...)
+
+(Mod(32|32u|16|16u) ...) => (MOD(L|LU|W|WU) ...)
+(Mod8 x y) => (MODW (SignExt8to16 x) (SignExt8to16 y))
+(Mod8u x y) => (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+
+(And(32|16|8) ...) => (ANDL ...)
+(Or(32|16|8) ...) => (ORL ...)
+(Xor(32|16|8) ...) => (XORL ...)
+
+(Neg(32|16|8) ...) => (NEGL ...)
+(Neg32F x) => (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
+(Neg64F x) => (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
+
+(Com(32|16|8) ...) => (NOTL ...)
+
+// Lowering boolean ops
+(AndB ...) => (ANDL ...)
+(OrB ...) => (ORL ...)
+(Not x) => (XORLconst [1] x)
+
+// Lowering pointer arithmetic
+(OffPtr [off] ptr) => (ADDLconst [int32(off)] ptr)
+
+(Bswap32 ...) => (BSWAPL ...)
+(Bswap16 x) => (ROLWconst [8] x)
+
+(Sqrt ...) => (SQRTSD ...)
+(Sqrt32 ...) => (SQRTSS ...)
+
+(Ctz8 x) => (BSFL (ORLconst <typ.UInt32> [0x100] x))
+(Ctz8NonZero ...) => (BSFL ...)
+(Ctz16 x) => (BSFL (ORLconst <typ.UInt32> [0x10000] x))
+(Ctz16NonZero ...) => (BSFL ...)
+(Ctz32 ...) => (LoweredCtz32 ...)
+(Ctz32NonZero ...) => (BSFL ...)
+
+// Lowering extension
+(SignExt8to16 ...) => (MOVBLSX ...)
+(SignExt8to32 ...) => (MOVBLSX ...)
+(SignExt16to32 ...) => (MOVWLSX ...)
+
+(ZeroExt8to16 ...) => (MOVBLZX ...)
+(ZeroExt8to32 ...) => (MOVBLZX ...)
+(ZeroExt16to32 ...) => (MOVWLZX ...)
+
+(Signmask x) => (SARLconst x [31])
+(Zeromask <t> x) => (XORLconst [-1] (SBBLcarrymask <t> (CMPLconst x [1])))
+(Slicemask <t> x) => (SARLconst (NEGL <t> x) [31])
+
+// Lowering truncation
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+
+// Lowering float-int conversions
+(Cvt32to32F ...) => (CVTSL2SS ...)
+(Cvt32to64F ...) => (CVTSL2SD ...)
+
+(Cvt32Fto32 ...) => (CVTTSS2SL ...)
+(Cvt64Fto32 ...) => (CVTTSD2SL ...)
+
+(Cvt32Fto64F ...) => (CVTSS2SD ...)
+(Cvt64Fto32F ...) => (CVTSD2SS ...)
+
+(Round32F ...) => (Copy ...)
+(Round64F ...) => (Copy ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+// Lowering shifts
+// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
+// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
+(Lsh32x(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [32])))
+(Lsh16x(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [32])))
+(Lsh8x(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [32])))
+
+(Lsh32x(32|16|8) <t> x y) && shiftIsBounded(v) => (SHLL <t> x y)
+(Lsh16x(32|16|8) <t> x y) && shiftIsBounded(v) => (SHLL <t> x y)
+(Lsh8x(32|16|8) <t> x y) && shiftIsBounded(v) => (SHLL <t> x y)
+
+(Rsh32Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [32])))
+(Rsh16Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [16])))
+(Rsh8Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [8])))
+
+(Rsh32Ux(32|16|8) <t> x y) && shiftIsBounded(v) => (SHRL <t> x y)
+(Rsh16Ux(32|16|8) <t> x y) && shiftIsBounded(v) => (SHRW <t> x y)
+(Rsh8Ux(32|16|8) <t> x y) && shiftIsBounded(v) => (SHRB <t> x y)
+
+// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
+// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
+
+(Rsh32x(32|16|8) <t> x y) && !shiftIsBounded(v) => (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMP(L|W|B)const y [32])))))
+(Rsh16x(32|16|8) <t> x y) && !shiftIsBounded(v) => (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMP(L|W|B)const y [16])))))
+(Rsh8x(32|16|8) <t> x y) && !shiftIsBounded(v) => (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMP(L|W|B)const y [8])))))
+
+(Rsh32x(32|16|8) <t> x y) && shiftIsBounded(v) => (SARL x y)
+(Rsh16x(32|16|8) <t> x y) && shiftIsBounded(v) => (SARW x y)
+(Rsh8x(32|16|8) <t> x y) && shiftIsBounded(v) => (SARB x y)
+
+// constant shifts
+// generic opt rewrites all constant shifts to shift by Const64
+(Lsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SHLLconst x [int32(c)])
+(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SARLconst x [int32(c)])
+(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 => (SHRLconst x [int32(c)])
+(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SHLLconst x [int32(c)])
+(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SARWconst x [int16(c)])
+(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 => (SHRWconst x [int16(c)])
+(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SHLLconst x [int32(c)])
+(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SARBconst x [int8(c)])
+(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 => (SHRBconst x [int8(c)])
+
+// large constant shifts
+(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
+(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
+
+// large constant signed right shift, we leave the sign bit
+(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 => (SARLconst x [31])
+(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 => (SARWconst x [15])
+(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 => (SARBconst x [7])
+
+// rotates
+(RotateLeft32 ...) => (ROLL ...)
+(RotateLeft16 ...) => (ROLW ...)
+(RotateLeft8 ...) => (ROLB ...)
+// constant rotates
+(ROLL x (MOVLconst [c])) => (ROLLconst [c&31] x)
+(ROLW x (MOVLconst [c])) => (ROLWconst [int16(c&15)] x)
+(ROLB x (MOVLconst [c])) => (ROLBconst [int8(c&7)] x)
+
+// Lowering comparisons
+(Less32 x y) => (SETL (CMPL x y))
+(Less16 x y) => (SETL (CMPW x y))
+(Less8 x y) => (SETL (CMPB x y))
+(Less32U x y) => (SETB (CMPL x y))
+(Less16U x y) => (SETB (CMPW x y))
+(Less8U x y) => (SETB (CMPB x y))
+// Use SETGF with reversed operands to dodge NaN case
+(Less64F x y) => (SETGF (UCOMISD y x))
+(Less32F x y) => (SETGF (UCOMISS y x))
+
+(Leq32 x y) => (SETLE (CMPL x y))
+(Leq16 x y) => (SETLE (CMPW x y))
+(Leq8 x y) => (SETLE (CMPB x y))
+(Leq32U x y) => (SETBE (CMPL x y))
+(Leq16U x y) => (SETBE (CMPW x y))
+(Leq8U x y) => (SETBE (CMPB x y))
+// Use SETGEF with reversed operands to dodge NaN case
+(Leq64F x y) => (SETGEF (UCOMISD y x))
+(Leq32F x y) => (SETGEF (UCOMISS y x))
+
+(Eq32 x y) => (SETEQ (CMPL x y))
+(Eq16 x y) => (SETEQ (CMPW x y))
+(Eq8 x y) => (SETEQ (CMPB x y))
+(EqB x y) => (SETEQ (CMPB x y))
+(EqPtr x y) => (SETEQ (CMPL x y))
+(Eq64F x y) => (SETEQF (UCOMISD x y))
+(Eq32F x y) => (SETEQF (UCOMISS x y))
+
+(Neq32 x y) => (SETNE (CMPL x y))
+(Neq16 x y) => (SETNE (CMPW x y))
+(Neq8 x y) => (SETNE (CMPB x y))
+(NeqB x y) => (SETNE (CMPB x y))
+(NeqPtr x y) => (SETNE (CMPL x y))
+(Neq64F x y) => (SETNEF (UCOMISD x y))
+(Neq32F x y) => (SETNEF (UCOMISS x y))
+
+// Lowering loads
+(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVLload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVSSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVSDload ptr mem)
+
+// Lowering stores
+(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVSDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVSSstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVLstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+
+// Lowering moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] dst src mem) => (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] dst src mem) => (MOVLstore dst (MOVLload src mem) mem)
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [5] dst src mem) =>
+ (MOVBstore [4] dst (MOVBload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [6] dst src mem) =>
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [7] dst src mem) =>
+ (MOVLstore [3] dst (MOVLload [3] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [8] dst src mem) =>
+ (MOVLstore [4] dst (MOVLload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+
+// Adjust moves to be a multiple of 4 bytes.
+(Move [s] dst src mem)
+ && s > 8 && s%4 != 0 =>
+ (Move [s-s%4]
+ (ADDLconst <dst.Type> dst [int32(s%4)])
+ (ADDLconst <src.Type> src [int32(s%4)])
+ (MOVLstore dst (MOVLload src mem) mem))
+
+// Medium copying uses a duff device.
+(Move [s] dst src mem)
+ && s > 8 && s <= 4*128 && s%4 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [10*(128-s/4)] dst src mem)
+// 10 and 128 are magic constants. 10 is the number of bytes to encode:
+// MOVL (SI), CX
+// ADDL $4, SI
+// MOVL CX, (DI)
+// ADDL $4, DI
+// and 128 is the number of such blocks. See src/runtime/duff_386.s:duffcopy.
+
+// Large copying uses REP MOVSL.
+(Move [s] dst src mem) && (s > 4*128 || config.noDuffDevice) && s%4 == 0 && logLargeCopy(v, s) =>
+ (REPMOVSL dst src (MOVLconst [int32(s/4)]) mem)
+
+// Lowering Zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (MOVBstoreconst [0] destptr mem)
+(Zero [2] destptr mem) => (MOVWstoreconst [0] destptr mem)
+(Zero [4] destptr mem) => (MOVLstoreconst [0] destptr mem)
+
+(Zero [3] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff(0,2)] destptr
+ (MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [5] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [6] destptr mem) =>
+ (MOVWstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [7] destptr mem) =>
+ (MOVLstoreconst [makeValAndOff(0,3)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+
+// Strip off any fractional word zeroing.
+(Zero [s] destptr mem) && s%4 != 0 && s > 4 =>
+ (Zero [s-s%4] (ADDLconst destptr [int32(s%4)])
+ (MOVLstoreconst [0] destptr mem))
+
+// Zero small numbers of words directly.
+(Zero [8] destptr mem) =>
+ (MOVLstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [12] destptr mem) =>
+ (MOVLstoreconst [makeValAndOff(0,8)] destptr
+ (MOVLstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)))
+(Zero [16] destptr mem) =>
+ (MOVLstoreconst [makeValAndOff(0,12)] destptr
+ (MOVLstoreconst [makeValAndOff(0,8)] destptr
+ (MOVLstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))))
+
+// Medium zeroing uses a duff device.
+(Zero [s] destptr mem)
+ && s > 16 && s <= 4*128 && s%4 == 0
+ && !config.noDuffDevice =>
+ (DUFFZERO [1*(128-s/4)] destptr (MOVLconst [0]) mem)
+// 1 and 128 are magic constants. 1 is the number of bytes to encode STOSL.
+// 128 is the number of STOSL instructions in duffzero.
+// See src/runtime/duff_386.s:duffzero.
+
+// Large zeroing uses REP STOSQ.
+(Zero [s] destptr mem)
+ && (s > 4*128 || (config.noDuffDevice && s > 16))
+ && s%4 == 0 =>
+ (REPSTOSL destptr (MOVLconst [int32(s/4)]) (MOVLconst [0]) mem)
+
+
+// Lowering constants
+(Const8 [c]) => (MOVLconst [int32(c)])
+(Const16 [c]) => (MOVLconst [int32(c)])
+(Const32 ...) => (MOVLconst ...)
+(Const(32|64)F ...) => (MOVS(S|D)const ...)
+(ConstNil) => (MOVLconst [0])
+(ConstBool [c]) => (MOVLconst [b2i32(c)])
+
+// Lowering calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// Miscellaneous
+(IsNonNil p) => (SETNE (TESTL p p))
+(IsInBounds idx len) => (SETB (CMPL idx len))
+(IsSliceInBounds idx len) => (SETBE (CMPL idx len))
+(NilCheck ...) => (LoweredNilCheck ...)
+(GetG ...) => (LoweredGetG ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(Addr {sym} base) => (LEAL {sym} base)
+(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (LEAL {sym} (SPanchored base mem))
+(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (LEAL {sym} base)
+
+// block rewrites
+(If (SETL cmp) yes no) => (LT cmp yes no)
+(If (SETLE cmp) yes no) => (LE cmp yes no)
+(If (SETG cmp) yes no) => (GT cmp yes no)
+(If (SETGE cmp) yes no) => (GE cmp yes no)
+(If (SETEQ cmp) yes no) => (EQ cmp yes no)
+(If (SETNE cmp) yes no) => (NE cmp yes no)
+(If (SETB cmp) yes no) => (ULT cmp yes no)
+(If (SETBE cmp) yes no) => (ULE cmp yes no)
+(If (SETA cmp) yes no) => (UGT cmp yes no)
+(If (SETAE cmp) yes no) => (UGE cmp yes no)
+(If (SETO cmp) yes no) => (OS cmp yes no)
+
+// Special case for floating point - LF/LEF not generated
+(If (SETGF cmp) yes no) => (UGT cmp yes no)
+(If (SETGEF cmp) yes no) => (UGE cmp yes no)
+(If (SETEQF cmp) yes no) => (EQF cmp yes no)
+(If (SETNEF cmp) yes no) => (NEF cmp yes no)
+
+(If cond yes no) => (NE (TESTB cond cond) yes no)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem)
+
+// ***************************
+// Above: lowering rules
+// Below: optimizations
+// ***************************
+// TODO: Should the optimizations be a separate pass?
+
+// Fold boolean tests into blocks
+(NE (TESTB (SETL cmp) (SETL cmp)) yes no) => (LT cmp yes no)
+(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) => (LE cmp yes no)
+(NE (TESTB (SETG cmp) (SETG cmp)) yes no) => (GT cmp yes no)
+(NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) => (GE cmp yes no)
+(NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) => (EQ cmp yes no)
+(NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) => (NE cmp yes no)
+(NE (TESTB (SETB cmp) (SETB cmp)) yes no) => (ULT cmp yes no)
+(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) => (ULE cmp yes no)
+(NE (TESTB (SETA cmp) (SETA cmp)) yes no) => (UGT cmp yes no)
+(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) => (UGE cmp yes no)
+(NE (TESTB (SETO cmp) (SETO cmp)) yes no) => (OS cmp yes no)
+
+// Special case for floating point - LF/LEF not generated
+(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) => (UGT cmp yes no)
+(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) => (UGE cmp yes no)
+(NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) => (EQF cmp yes no)
+(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF cmp yes no)
+
+// fold constants into instructions
+(ADDL x (MOVLconst <t> [c])) && !t.IsPtr() => (ADDLconst [c] x)
+(ADDLcarry x (MOVLconst [c])) => (ADDLconstcarry [c] x)
+(ADCL x (MOVLconst [c]) f) => (ADCLconst [c] x f)
+
+(SUBL x (MOVLconst [c])) => (SUBLconst x [c])
+(SUBL (MOVLconst [c]) x) => (NEGL (SUBLconst <v.Type> x [c]))
+(SUBLcarry x (MOVLconst [c])) => (SUBLconstcarry [c] x)
+(SBBL x (MOVLconst [c]) f) => (SBBLconst [c] x f)
+
+(MULL x (MOVLconst [c])) => (MULLconst [c] x)
+(ANDL x (MOVLconst [c])) => (ANDLconst [c] x)
+
+(ANDLconst [c] (ANDLconst [d] x)) => (ANDLconst [c & d] x)
+(XORLconst [c] (XORLconst [d] x)) => (XORLconst [c ^ d] x)
+(MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x)
+
+(ORL x (MOVLconst [c])) => (ORLconst [c] x)
+(XORL x (MOVLconst [c])) => (XORLconst [c] x)
+
+(SHLL x (MOVLconst [c])) => (SHLLconst [c&31] x)
+(SHRL x (MOVLconst [c])) => (SHRLconst [c&31] x)
+(SHRW x (MOVLconst [c])) && c&31 < 16 => (SHRWconst [int16(c&31)] x)
+(SHRW _ (MOVLconst [c])) && c&31 >= 16 => (MOVLconst [0])
+(SHRB x (MOVLconst [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x)
+(SHRB _ (MOVLconst [c])) && c&31 >= 8 => (MOVLconst [0])
+
+(SARL x (MOVLconst [c])) => (SARLconst [c&31] x)
+(SARW x (MOVLconst [c])) => (SARWconst [int16(min(int64(c&31),15))] x)
+(SARB x (MOVLconst [c])) => (SARBconst [int8(min(int64(c&31),7))] x)
+
+(SARL x (ANDLconst [31] y)) => (SARL x y)
+(SHLL x (ANDLconst [31] y)) => (SHLL x y)
+(SHRL x (ANDLconst [31] y)) => (SHRL x y)
+
+// Constant shift simplifications
+
+(SHLLconst x [0]) => x
+(SHRLconst x [0]) => x
+(SARLconst x [0]) => x
+
+(SHRWconst x [0]) => x
+(SARWconst x [0]) => x
+
+(SHRBconst x [0]) => x
+(SARBconst x [0]) => x
+
+(ROLLconst [0] x) => x
+(ROLWconst [0] x) => x
+(ROLBconst [0] x) => x
+
+// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
+// because the x86 instructions are defined to use all 5 bits of the shift even
+// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
+// (SHRW x (MOVLconst [24])), but just in case.
+
+(CMPL x (MOVLconst [c])) => (CMPLconst x [c])
+(CMPL (MOVLconst [c]) x) => (InvertFlags (CMPLconst x [c]))
+(CMPW x (MOVLconst [c])) => (CMPWconst x [int16(c)])
+(CMPW (MOVLconst [c]) x) => (InvertFlags (CMPWconst x [int16(c)]))
+(CMPB x (MOVLconst [c])) => (CMPBconst x [int8(c)])
+(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+(CMP(L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(L|W|B) y x))
+
+// strength reduction
+// Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
+// 1 - addl, shll, leal, negl, subl
+// 3 - imull
+// This limits the rewrites to two instructions.
+// Note that negl always operates in-place,
+// which can require a register-register move
+// to preserve the original value,
+// so it must be used with care.
+(MULLconst [-9] x) => (NEGL (LEAL8 <v.Type> x x))
+(MULLconst [-5] x) => (NEGL (LEAL4 <v.Type> x x))
+(MULLconst [-3] x) => (NEGL (LEAL2 <v.Type> x x))
+(MULLconst [-1] x) => (NEGL x)
+(MULLconst [0] _) => (MOVLconst [0])
+(MULLconst [1] x) => x
+(MULLconst [3] x) => (LEAL2 x x)
+(MULLconst [5] x) => (LEAL4 x x)
+(MULLconst [7] x) => (LEAL2 x (LEAL2 <v.Type> x x))
+(MULLconst [9] x) => (LEAL8 x x)
+(MULLconst [11] x) => (LEAL2 x (LEAL4 <v.Type> x x))
+(MULLconst [13] x) => (LEAL4 x (LEAL2 <v.Type> x x))
+(MULLconst [19] x) => (LEAL2 x (LEAL8 <v.Type> x x))
+(MULLconst [21] x) => (LEAL4 x (LEAL4 <v.Type> x x))
+(MULLconst [25] x) => (LEAL8 x (LEAL2 <v.Type> x x))
+(MULLconst [27] x) => (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x))
+(MULLconst [37] x) => (LEAL4 x (LEAL8 <v.Type> x x))
+(MULLconst [41] x) => (LEAL8 x (LEAL4 <v.Type> x x))
+(MULLconst [45] x) => (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x))
+(MULLconst [73] x) => (LEAL8 x (LEAL8 <v.Type> x x))
+(MULLconst [81] x) => (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x))
+
+(MULLconst [c] x) && isPowerOfTwo32(c+1) && c >= 15 => (SUBL (SHLLconst <v.Type> [int32(log32(c+1))] x) x)
+(MULLconst [c] x) && isPowerOfTwo32(c-1) && c >= 17 => (LEAL1 (SHLLconst <v.Type> [int32(log32(c-1))] x) x)
+(MULLconst [c] x) && isPowerOfTwo32(c-2) && c >= 34 => (LEAL2 (SHLLconst <v.Type> [int32(log32(c-2))] x) x)
+(MULLconst [c] x) && isPowerOfTwo32(c-4) && c >= 68 => (LEAL4 (SHLLconst <v.Type> [int32(log32(c-4))] x) x)
+(MULLconst [c] x) && isPowerOfTwo32(c-8) && c >= 136 => (LEAL8 (SHLLconst <v.Type> [int32(log32(c-8))] x) x)
+(MULLconst [c] x) && c%3 == 0 && isPowerOfTwo32(c/3) => (SHLLconst [int32(log32(c/3))] (LEAL2 <v.Type> x x))
+(MULLconst [c] x) && c%5 == 0 && isPowerOfTwo32(c/5) => (SHLLconst [int32(log32(c/5))] (LEAL4 <v.Type> x x))
+(MULLconst [c] x) && c%9 == 0 && isPowerOfTwo32(c/9) => (SHLLconst [int32(log32(c/9))] (LEAL8 <v.Type> x x))
+
+// combine add/shift into LEAL
+(ADDL x (SHLLconst [3] y)) => (LEAL8 x y)
+(ADDL x (SHLLconst [2] y)) => (LEAL4 x y)
+(ADDL x (SHLLconst [1] y)) => (LEAL2 x y)
+(ADDL x (ADDL y y)) => (LEAL2 x y)
+(ADDL x (ADDL x y)) => (LEAL2 y x)
+
+// combine ADDL/ADDLconst into LEAL1
+(ADDLconst [c] (ADDL x y)) => (LEAL1 [c] x y)
+(ADDL (ADDLconst [c] x) y) => (LEAL1 [c] x y)
+
+// fold ADDL into LEAL
+(ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(int64(c)+int64(d)) => (LEAL [c+d] {s} x)
+(LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(int64(c)+int64(d)) => (LEAL [c+d] {s} x)
+(ADDLconst [c] x:(SP)) => (LEAL [c] x) // so it is rematerializeable
+(LEAL [c] {s} (ADDL x y)) && x.Op != OpSB && y.Op != OpSB => (LEAL1 [c] {s} x y)
+(ADDL x (LEAL [c] {s} y)) && x.Op != OpSB && y.Op != OpSB => (LEAL1 [c] {s} x y)
+
+// fold ADDLconst into LEALx
+(ADDLconst [c] (LEAL1 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL1 [c+d] {s} x y)
+(ADDLconst [c] (LEAL2 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL2 [c+d] {s} x y)
+(ADDLconst [c] (LEAL4 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL4 [c+d] {s} x y)
+(ADDLconst [c] (LEAL8 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL8 [c+d] {s} x y)
+(LEAL1 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL1 [c+d] {s} x y)
+(LEAL2 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL2 [c+d] {s} x y)
+(LEAL2 [c] {s} x (ADDLconst [d] y)) && is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB => (LEAL2 [c+2*d] {s} x y)
+(LEAL4 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL4 [c+d] {s} x y)
+(LEAL4 [c] {s} x (ADDLconst [d] y)) && is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB => (LEAL4 [c+4*d] {s} x y)
+(LEAL8 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL8 [c+d] {s} x y)
+(LEAL8 [c] {s} x (ADDLconst [d] y)) && is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB => (LEAL8 [c+8*d] {s} x y)
+
+// fold shifts into LEALx
+(LEAL1 [c] {s} x (SHLLconst [1] y)) => (LEAL2 [c] {s} x y)
+(LEAL1 [c] {s} x (SHLLconst [2] y)) => (LEAL4 [c] {s} x y)
+(LEAL1 [c] {s} x (SHLLconst [3] y)) => (LEAL8 [c] {s} x y)
+(LEAL2 [c] {s} x (SHLLconst [1] y)) => (LEAL4 [c] {s} x y)
+(LEAL2 [c] {s} x (SHLLconst [2] y)) => (LEAL8 [c] {s} x y)
+(LEAL4 [c] {s} x (SHLLconst [1] y)) => (LEAL8 [c] {s} x y)
+
+// reverse ordering of compare instruction
+(SETL (InvertFlags x)) => (SETG x)
+(SETG (InvertFlags x)) => (SETL x)
+(SETB (InvertFlags x)) => (SETA x)
+(SETA (InvertFlags x)) => (SETB x)
+(SETLE (InvertFlags x)) => (SETGE x)
+(SETGE (InvertFlags x)) => (SETLE x)
+(SETBE (InvertFlags x)) => (SETAE x)
+(SETAE (InvertFlags x)) => (SETBE x)
+(SETEQ (InvertFlags x)) => (SETEQ x)
+(SETNE (InvertFlags x)) => (SETNE x)
+
+// sign extended loads
+// Note: The combined instruction must end up in the same block
+// as the original load. If not, we end up making a value with
+// memory type live in two different blocks, which can lead to
+// multiple memory values alive simultaneously.
+// Make sure we don't combine these ops if the load has another use.
+// This prevents a single load from being split into multiple loads
+// which then might return different values. See test/atomicload.go.
+(MOVBLSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBLSXload <v.Type> [off] {sym} ptr mem)
+(MOVBLZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVWLSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWLSXload <v.Type> [off] {sym} ptr mem)
+(MOVWLZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+
+// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBLZX x)
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWLZX x)
+(MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVBLSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBLSX x)
+(MOVWLSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWLSX x)
+
+// Fold extensions and ANDs together.
+(MOVBLZX (ANDLconst [c] x)) => (ANDLconst [c & 0xff] x)
+(MOVWLZX (ANDLconst [c] x)) => (ANDLconst [c & 0xffff] x)
+(MOVBLSX (ANDLconst [c] x)) && c & 0x80 == 0 => (ANDLconst [c & 0x7f] x)
+(MOVWLSX (ANDLconst [c] x)) && c & 0x8000 == 0 => (ANDLconst [c & 0x7fff] x)
+
+// Don't extend before storing
+(MOVWstore [off] {sym} ptr (MOVWL(S|Z)X x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBL(S|Z)X x) mem) => (MOVBstore [off] {sym} ptr x mem)
+
+// fold constants into memory operations
+// Note that this is not always a good idea because if not all the uses of
+// the ADDLconst get eliminated, we still have to compute the ADDLconst and we now
+// have potentially two live values (ptr and (ADDLconst [off] ptr)) instead of one.
+// Nevertheless, let's do it!
+(MOV(L|W|B|SS|SD)load [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(L|W|B|SS|SD)load [off1+off2] {sym} ptr mem)
+(MOV(L|W|B|SS|SD)store [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(L|W|B|SS|SD)store [off1+off2] {sym} ptr val mem)
+
+((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
+((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
+((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem)
+((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDLconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem)
+((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) && valoff1.canAdd32(off2) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
+
+// Fold constants into stores.
+(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) =>
+ (MOVLstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) =>
+ (MOVWstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) =>
+ (MOVBstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
+
+// Fold address offsets into constant stores.
+(MOV(L|W|B)storeconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) =>
+ (MOV(L|W|B)storeconst [sc.addOffset32(off)] {s} ptr mem)
+
+// We need to fold LEAL into the MOVx ops so that the live variable analysis knows
+// what variables are being read/written by the ops.
+// Note: we turn off this merging for operations on globals when building
+// position-independent code (when Flag_shared is set).
+// PIC needs a spare register to load the PC into. Having the LEAL be
+// a separate instruction gives us that register. Having the LEAL be
+// a separate instruction also allows it to be CSEd (which is good because
+// it compiles to a thunk call).
+(MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOV(L|W|B|SS|SD)store [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOV(L|W|B|SS|SD)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
+(MOV(L|W|B)storeconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOV(L|W|B)storeconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+
+((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
+ && valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+
+// Merge load/store to op
+((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|AND|OR|XOR|SUB|MUL)Lload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
+(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
+ && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
+
+// fold LEALs together
+(LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
+
+// LEAL into LEAL1
+(LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAL1 into LEAL
+(LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAL into LEAL[248]
+(LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAL[248] into LEAL
+(LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAL[1248] into LEAL[1248]. Only some such merges are possible.
+(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} x y)
+(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} y x)
+(LEAL2 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(int64(off1)+2*int64(off2)) =>
+ (LEAL4 [off1+2*off2] {sym} x y)
+(LEAL4 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(int64(off1)+4*int64(off2)) =>
+ (LEAL8 [off1+4*off2] {sym} x y)
+
+// Absorb InvertFlags into branches.
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+
+// Constant comparisons.
+(CMPLconst (MOVLconst [x]) [y]) && x==y => (FlagEQ)
+(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)<uint32(y) => (FlagLT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)>uint32(y) => (FlagLT_UGT)
+(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)<uint32(y) => (FlagGT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)>uint32(y) => (FlagGT_UGT)
+
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)==y => (FlagEQ)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)<uint16(y) => (FlagLT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)>uint16(y) => (FlagLT_UGT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)<uint16(y) => (FlagGT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)>uint16(y) => (FlagGT_UGT)
+
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)==y => (FlagEQ)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)<uint8(y) => (FlagLT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)>uint8(y) => (FlagLT_UGT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)<uint8(y) => (FlagGT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)>uint8(y) => (FlagGT_UGT)
+
+// Other known comparisons.
+(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) => (FlagLT_ULT)
+(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
+(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < n => (FlagLT_ULT)
+(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < n => (FlagLT_ULT)
+// TODO: DIVxU also.
+
+// Absorb flag constants into SBB ops.
+(SBBLcarrymask (FlagEQ)) => (MOVLconst [0])
+(SBBLcarrymask (FlagLT_ULT)) => (MOVLconst [-1])
+(SBBLcarrymask (FlagLT_UGT)) => (MOVLconst [0])
+(SBBLcarrymask (FlagGT_ULT)) => (MOVLconst [-1])
+(SBBLcarrymask (FlagGT_UGT)) => (MOVLconst [0])
+
+// Absorb flag constants into branches.
+(EQ (FlagEQ) yes no) => (First yes no)
+(EQ (FlagLT_ULT) yes no) => (First no yes)
+(EQ (FlagLT_UGT) yes no) => (First no yes)
+(EQ (FlagGT_ULT) yes no) => (First no yes)
+(EQ (FlagGT_UGT) yes no) => (First no yes)
+
+(NE (FlagEQ) yes no) => (First no yes)
+(NE (FlagLT_ULT) yes no) => (First yes no)
+(NE (FlagLT_UGT) yes no) => (First yes no)
+(NE (FlagGT_ULT) yes no) => (First yes no)
+(NE (FlagGT_UGT) yes no) => (First yes no)
+
+(LT (FlagEQ) yes no) => (First no yes)
+(LT (FlagLT_ULT) yes no) => (First yes no)
+(LT (FlagLT_UGT) yes no) => (First yes no)
+(LT (FlagGT_ULT) yes no) => (First no yes)
+(LT (FlagGT_UGT) yes no) => (First no yes)
+
+(LE (FlagEQ) yes no) => (First yes no)
+(LE (FlagLT_ULT) yes no) => (First yes no)
+(LE (FlagLT_UGT) yes no) => (First yes no)
+(LE (FlagGT_ULT) yes no) => (First no yes)
+(LE (FlagGT_UGT) yes no) => (First no yes)
+
+(GT (FlagEQ) yes no) => (First no yes)
+(GT (FlagLT_ULT) yes no) => (First no yes)
+(GT (FlagLT_UGT) yes no) => (First no yes)
+(GT (FlagGT_ULT) yes no) => (First yes no)
+(GT (FlagGT_UGT) yes no) => (First yes no)
+
+(GE (FlagEQ) yes no) => (First yes no)
+(GE (FlagLT_ULT) yes no) => (First no yes)
+(GE (FlagLT_UGT) yes no) => (First no yes)
+(GE (FlagGT_ULT) yes no) => (First yes no)
+(GE (FlagGT_UGT) yes no) => (First yes no)
+
+(ULT (FlagEQ) yes no) => (First no yes)
+(ULT (FlagLT_ULT) yes no) => (First yes no)
+(ULT (FlagLT_UGT) yes no) => (First no yes)
+(ULT (FlagGT_ULT) yes no) => (First yes no)
+(ULT (FlagGT_UGT) yes no) => (First no yes)
+
+(ULE (FlagEQ) yes no) => (First yes no)
+(ULE (FlagLT_ULT) yes no) => (First yes no)
+(ULE (FlagLT_UGT) yes no) => (First no yes)
+(ULE (FlagGT_ULT) yes no) => (First yes no)
+(ULE (FlagGT_UGT) yes no) => (First no yes)
+
+(UGT (FlagEQ) yes no) => (First no yes)
+(UGT (FlagLT_ULT) yes no) => (First no yes)
+(UGT (FlagLT_UGT) yes no) => (First yes no)
+(UGT (FlagGT_ULT) yes no) => (First no yes)
+(UGT (FlagGT_UGT) yes no) => (First yes no)
+
+(UGE (FlagEQ) yes no) => (First yes no)
+(UGE (FlagLT_ULT) yes no) => (First no yes)
+(UGE (FlagLT_UGT) yes no) => (First yes no)
+(UGE (FlagGT_ULT) yes no) => (First no yes)
+(UGE (FlagGT_UGT) yes no) => (First yes no)
+
+// Absorb flag constants into SETxx ops.
+(SETEQ (FlagEQ)) => (MOVLconst [1])
+(SETEQ (FlagLT_ULT)) => (MOVLconst [0])
+(SETEQ (FlagLT_UGT)) => (MOVLconst [0])
+(SETEQ (FlagGT_ULT)) => (MOVLconst [0])
+(SETEQ (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETNE (FlagEQ)) => (MOVLconst [0])
+(SETNE (FlagLT_ULT)) => (MOVLconst [1])
+(SETNE (FlagLT_UGT)) => (MOVLconst [1])
+(SETNE (FlagGT_ULT)) => (MOVLconst [1])
+(SETNE (FlagGT_UGT)) => (MOVLconst [1])
+
+(SETL (FlagEQ)) => (MOVLconst [0])
+(SETL (FlagLT_ULT)) => (MOVLconst [1])
+(SETL (FlagLT_UGT)) => (MOVLconst [1])
+(SETL (FlagGT_ULT)) => (MOVLconst [0])
+(SETL (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETLE (FlagEQ)) => (MOVLconst [1])
+(SETLE (FlagLT_ULT)) => (MOVLconst [1])
+(SETLE (FlagLT_UGT)) => (MOVLconst [1])
+(SETLE (FlagGT_ULT)) => (MOVLconst [0])
+(SETLE (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETG (FlagEQ)) => (MOVLconst [0])
+(SETG (FlagLT_ULT)) => (MOVLconst [0])
+(SETG (FlagLT_UGT)) => (MOVLconst [0])
+(SETG (FlagGT_ULT)) => (MOVLconst [1])
+(SETG (FlagGT_UGT)) => (MOVLconst [1])
+
+(SETGE (FlagEQ)) => (MOVLconst [1])
+(SETGE (FlagLT_ULT)) => (MOVLconst [0])
+(SETGE (FlagLT_UGT)) => (MOVLconst [0])
+(SETGE (FlagGT_ULT)) => (MOVLconst [1])
+(SETGE (FlagGT_UGT)) => (MOVLconst [1])
+
+(SETB (FlagEQ)) => (MOVLconst [0])
+(SETB (FlagLT_ULT)) => (MOVLconst [1])
+(SETB (FlagLT_UGT)) => (MOVLconst [0])
+(SETB (FlagGT_ULT)) => (MOVLconst [1])
+(SETB (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETBE (FlagEQ)) => (MOVLconst [1])
+(SETBE (FlagLT_ULT)) => (MOVLconst [1])
+(SETBE (FlagLT_UGT)) => (MOVLconst [0])
+(SETBE (FlagGT_ULT)) => (MOVLconst [1])
+(SETBE (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETA (FlagEQ)) => (MOVLconst [0])
+(SETA (FlagLT_ULT)) => (MOVLconst [0])
+(SETA (FlagLT_UGT)) => (MOVLconst [1])
+(SETA (FlagGT_ULT)) => (MOVLconst [0])
+(SETA (FlagGT_UGT)) => (MOVLconst [1])
+
+(SETAE (FlagEQ)) => (MOVLconst [1])
+(SETAE (FlagLT_ULT)) => (MOVLconst [0])
+(SETAE (FlagLT_UGT)) => (MOVLconst [1])
+(SETAE (FlagGT_ULT)) => (MOVLconst [0])
+(SETAE (FlagGT_UGT)) => (MOVLconst [1])
+
+// Remove redundant *const ops
+(ADDLconst [c] x) && c==0 => x
+(SUBLconst [c] x) && c==0 => x
+(ANDLconst [c] _) && c==0 => (MOVLconst [0])
+(ANDLconst [c] x) && c==-1 => x
+(ORLconst [c] x) && c==0 => x
+(ORLconst [c] _) && c==-1 => (MOVLconst [-1])
+(XORLconst [c] x) && c==0 => x
+// TODO: since we got rid of the W/B versions, we might miss
+// things like (ANDLconst [0x100] x) which were formerly
+// (ANDBconst [0] x). Probably doesn't happen very often.
+// If we cared, we might do:
+// (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 => (MOVLconst [0])
+
+// Convert constant subtracts to constant adds
+(SUBLconst [c] x) => (ADDLconst [-c] x)
+
+// generic constant folding
+// TODO: more of this
+(ADDLconst [c] (MOVLconst [d])) => (MOVLconst [c+d])
+(ADDLconst [c] (ADDLconst [d] x)) => (ADDLconst [c+d] x)
+(SARLconst [c] (MOVLconst [d])) => (MOVLconst [d>>uint64(c)])
+(SARWconst [c] (MOVLconst [d])) => (MOVLconst [d>>uint64(c)])
+(SARBconst [c] (MOVLconst [d])) => (MOVLconst [d>>uint64(c)])
+(NEGL (MOVLconst [c])) => (MOVLconst [-c])
+(MULLconst [c] (MOVLconst [d])) => (MOVLconst [c*d])
+(ANDLconst [c] (MOVLconst [d])) => (MOVLconst [c&d])
+(ORLconst [c] (MOVLconst [d])) => (MOVLconst [c|d])
+(XORLconst [c] (MOVLconst [d])) => (MOVLconst [c^d])
+(NOTL (MOVLconst [c])) => (MOVLconst [^c])
+
+// generic simplifications
+// TODO: more of this
+(ADDL x (NEGL y)) => (SUBL x y)
+(SUBL x x) => (MOVLconst [0])
+(ANDL x x) => x
+(ORL x x) => x
+(XORL x x) => (MOVLconst [0])
+
+// checking AND against 0.
+(CMP(L|W|B)const l:(ANDL x y) [0]) && l.Uses==1 => (TEST(L|W|B) x y)
+(CMPLconst l:(ANDLconst [c] x) [0]) && l.Uses==1 => (TESTLconst [c] x)
+(CMPWconst l:(ANDLconst [c] x) [0]) && l.Uses==1 => (TESTWconst [int16(c)] x)
+(CMPBconst l:(ANDLconst [c] x) [0]) && l.Uses==1 => (TESTBconst [int8(c)] x)
+
+// TEST %reg,%reg is shorter than CMP
+(CMP(L|W|B)const x [0]) => (TEST(L|W|B) x x)
+
+// Convert LEAL1 back to ADDL if we can
+(LEAL1 [0] {nil} x y) => (ADDL x y)
+
+// For PIC, break floating-point constant loading into two instructions so we have
+// a register to use for holding the address of the constant pool entry.
+(MOVSSconst [c]) && config.ctxt.Flag_shared => (MOVSSconst2 (MOVSSconst1 [c]))
+(MOVSDconst [c]) && config.ctxt.Flag_shared => (MOVSDconst2 (MOVSDconst1 [c]))
+
+(CMP(L|W|B) l:(MOV(L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (CMP(L|W|B)load {sym} [off] ptr x mem)
+(CMP(L|W|B) x l:(MOV(L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (InvertFlags (CMP(L|W|B)load {sym} [off] ptr x mem))
+
+(CMP(L|W|B)const l:(MOV(L|W|B)load {sym} [off] ptr mem) [c])
+ && l.Uses == 1
+ && clobber(l) =>
+ @l.Block (CMP(L|W|B)constload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+
+(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
+(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
+(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
+
+(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
diff --git a/src/cmd/compile/internal/ssa/_gen/386Ops.go b/src/cmd/compile/internal/ssa/_gen/386Ops.go
new file mode 100644
index 0000000..7401ac8
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/386Ops.go
@@ -0,0 +1,590 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - Floating-point types live in the low natural slot of an sse2 register.
+// Unused portions are junk.
+// - We do not use AH,BH,CH,DH registers.
+// - When doing sub-register operations, we try to write the whole
+// destination register to avoid a partial-register write.
+// - Unused portions of AuxInt (or the Val portion of ValAndOff) are
+// filled by sign-extending the used portion. Users of AuxInt which interpret
+// AuxInt as unsigned (e.g. shifts) must be careful.
+
+// Suffixes encode the bit width of various instructions.
+// L (long word) = 32 bit
+// W (word) = 16 bit
+// B (byte) = 8 bit
+
+// copied from ../../x86/reg.go
+var regNames386 = []string{
+ "AX",
+ "CX",
+ "DX",
+ "BX",
+ "SP",
+ "BP",
+ "SI",
+ "DI",
+ "X0",
+ "X1",
+ "X2",
+ "X3",
+ "X4",
+ "X5",
+ "X6",
+ "X7",
+
+ // If you add registers, update asyncPreempt in runtime
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNames386) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNames386 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ ax = buildReg("AX")
+ cx = buildReg("CX")
+ dx = buildReg("DX")
+ bx = buildReg("BX")
+ si = buildReg("SI")
+ gp = buildReg("AX CX DX BX BP SI DI")
+ fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7")
+ gpsp = gp | buildReg("SP")
+ gpspsb = gpsp | buildReg("SB")
+ callerSave = gp | fp
+ )
+ // Common slices of register masks
+ var (
+ gponly = []regMask{gp}
+ fponly = []regMask{fp}
+ )
+
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: gponly}
+ gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly}
+ gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly}
+ gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp11carry = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}}
+ gp21carry = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}}
+ gp1carry1 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp2carry1 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly}
+ gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly}
+ gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}}
+ gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax}, clobbers: dx}
+ gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax}
+ gp11mod = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{dx}, clobbers: ax}
+ gp21mul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}
+
+ gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}}
+ gp1flags = regInfo{inputs: []regMask{gpsp}}
+ gp0flagsLoad = regInfo{inputs: []regMask{gpspsb, 0}}
+ gp1flagsLoad = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+ flagsgp = regInfo{inputs: nil, outputs: gponly}
+
+ readflags = regInfo{inputs: nil, outputs: gponly}
+ flagsgpax = regInfo{inputs: nil, clobbers: ax, outputs: []regMask{gp &^ ax}}
+
+ gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: gponly}
+ gp21load = regInfo{inputs: []regMask{gp, gpspsb, 0}, outputs: gponly}
+ gploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: gponly}
+ gp21loadidx = regInfo{inputs: []regMask{gp, gpspsb, gpsp, 0}, outputs: gponly}
+
+ gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+ gpstoreconst = regInfo{inputs: []regMask{gpspsb, 0}}
+ gpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, gpsp, 0}}
+ gpstoreconstidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+
+ fp01 = regInfo{inputs: nil, outputs: fponly}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
+ fp21load = regInfo{inputs: []regMask{fp, gpspsb, 0}, outputs: fponly}
+ fpgp = regInfo{inputs: fponly, outputs: gponly}
+ gpfp = regInfo{inputs: gponly, outputs: fponly}
+ fp11 = regInfo{inputs: fponly, outputs: fponly}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+
+ fpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: fponly}
+ fploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: fponly}
+
+ fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}}
+ fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}}
+ )
+
+ var _386ops = []opData{
+ // fp ops
+ {name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS", commutative: true, resultInArg0: true}, // fp32 add
+ {name: "ADDSD", argLength: 2, reg: fp21, asm: "ADDSD", commutative: true, resultInArg0: true}, // fp64 add
+ {name: "SUBSS", argLength: 2, reg: fp21, asm: "SUBSS", resultInArg0: true}, // fp32 sub
+ {name: "SUBSD", argLength: 2, reg: fp21, asm: "SUBSD", resultInArg0: true}, // fp64 sub
+ {name: "MULSS", argLength: 2, reg: fp21, asm: "MULSS", commutative: true, resultInArg0: true}, // fp32 mul
+ {name: "MULSD", argLength: 2, reg: fp21, asm: "MULSD", commutative: true, resultInArg0: true}, // fp64 mul
+ {name: "DIVSS", argLength: 2, reg: fp21, asm: "DIVSS", resultInArg0: true}, // fp32 div
+ {name: "DIVSD", argLength: 2, reg: fp21, asm: "DIVSD", resultInArg0: true}, // fp64 div
+
+ {name: "MOVSSload", argLength: 2, reg: fpload, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp32 load
+ {name: "MOVSDload", argLength: 2, reg: fpload, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp64 load
+ {name: "MOVSSconst", reg: fp01, asm: "MOVSS", aux: "Float32", rematerializeable: true}, // fp32 constant
+ {name: "MOVSDconst", reg: fp01, asm: "MOVSD", aux: "Float64", rematerializeable: true}, // fp64 constant
+ {name: "MOVSSloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSS", aux: "SymOff", symEffect: "Read"}, // fp32 load indexed by i
+ {name: "MOVSSloadidx4", argLength: 3, reg: fploadidx, asm: "MOVSS", aux: "SymOff", symEffect: "Read"}, // fp32 load indexed by 4*i
+ {name: "MOVSDloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSD", aux: "SymOff", symEffect: "Read"}, // fp64 load indexed by i
+ {name: "MOVSDloadidx8", argLength: 3, reg: fploadidx, asm: "MOVSD", aux: "SymOff", symEffect: "Read"}, // fp64 load indexed by 8*i
+
+ {name: "MOVSSstore", argLength: 3, reg: fpstore, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp32 store
+ {name: "MOVSDstore", argLength: 3, reg: fpstore, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp64 store
+ {name: "MOVSSstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSS", aux: "SymOff", symEffect: "Write"}, // fp32 indexed by i store
+ {name: "MOVSSstoreidx4", argLength: 4, reg: fpstoreidx, asm: "MOVSS", aux: "SymOff", symEffect: "Write"}, // fp32 indexed by 4i store
+ {name: "MOVSDstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff", symEffect: "Write"}, // fp64 indexed by i store
+ {name: "MOVSDstoreidx8", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff", symEffect: "Write"}, // fp64 indexed by 8i store
+
+ {name: "ADDSSload", argLength: 3, reg: fp21load, asm: "ADDSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ADDSDload", argLength: 3, reg: fp21load, asm: "ADDSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBSSload", argLength: 3, reg: fp21load, asm: "SUBSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBSDload", argLength: 3, reg: fp21load, asm: "SUBSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "MULSSload", argLength: 3, reg: fp21load, asm: "MULSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "MULSDload", argLength: 3, reg: fp21load, asm: "MULSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "DIVSSload", argLength: 3, reg: fp21load, asm: "DIVSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 / tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "DIVSDload", argLength: 3, reg: fp21load, asm: "DIVSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 / tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+
+ // binary ops
+ {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", typ: "UInt32", clobberFlags: true}, // arg0 + auxint
+
+ {name: "ADDLcarry", argLength: 2, reg: gp21carry, asm: "ADDL", commutative: true, resultInArg0: true}, // arg0 + arg1, generates <carry,result> pair
+ {name: "ADDLconstcarry", argLength: 1, reg: gp11carry, asm: "ADDL", aux: "Int32", resultInArg0: true}, // arg0 + auxint, generates <carry,result> pair
+ {name: "ADCL", argLength: 3, reg: gp2carry1, asm: "ADCL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0+arg1+carry(arg2), where arg2 is flags
+ {name: "ADCLconst", argLength: 2, reg: gp1carry1, asm: "ADCL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0+auxint+carry(arg1), where arg1 is flags
+
+ {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
+ {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+
+ {name: "SUBLcarry", argLength: 2, reg: gp21carry, asm: "SUBL", resultInArg0: true}, // arg0-arg1, generates <borrow,result> pair
+ {name: "SUBLconstcarry", argLength: 1, reg: gp11carry, asm: "SUBL", aux: "Int32", resultInArg0: true}, // arg0-auxint, generates <borrow,result> pair
+ {name: "SBBL", argLength: 3, reg: gp2carry1, asm: "SBBL", resultInArg0: true, clobberFlags: true}, // arg0-arg1-borrow(arg2), where arg2 is flags
+ {name: "SBBLconst", argLength: 2, reg: gp1carry1, asm: "SBBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0-auxint-borrow(arg1), where arg1 is flags
+
+ {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
+ {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMUL3L", aux: "Int32", clobberFlags: true}, // arg0 * auxint
+
+ {name: "MULLU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt32,Flags)", asm: "MULL", commutative: true, clobberFlags: true}, // Let x = arg0*arg1 (full 32x32->64 unsigned multiply). Returns uint32(x), and flags set to overflow if uint32(x) != x.
+
+ {name: "HMULL", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULLU", argLength: 2, reg: gp21hmul, commutative: true, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width
+
+ {name: "MULLQU", argLength: 2, reg: gp21mul, commutative: true, asm: "MULL", clobberFlags: true}, // arg0 * arg1, high 32 in result[0], low 32 in result[1]
+
+ {name: "AVGLU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 32 result bits
+
+ // For DIVL, DIVW, MODL and MODW, AuxInt non-zero means that the divisor has been proved to be not -1.
+ {name: "DIVL", argLength: 2, reg: gp11div, asm: "IDIVL", aux: "Bool", clobberFlags: true}, // arg0 / arg1
+ {name: "DIVW", argLength: 2, reg: gp11div, asm: "IDIVW", aux: "Bool", clobberFlags: true}, // arg0 / arg1
+ {name: "DIVLU", argLength: 2, reg: gp11div, asm: "DIVL", clobberFlags: true}, // arg0 / arg1
+ {name: "DIVWU", argLength: 2, reg: gp11div, asm: "DIVW", clobberFlags: true}, // arg0 / arg1
+
+ {name: "MODL", argLength: 2, reg: gp11mod, asm: "IDIVL", aux: "Bool", clobberFlags: true}, // arg0 % arg1
+ {name: "MODW", argLength: 2, reg: gp11mod, asm: "IDIVW", aux: "Bool", clobberFlags: true}, // arg0 % arg1
+ {name: "MODLU", argLength: 2, reg: gp11mod, asm: "DIVL", clobberFlags: true}, // arg0 % arg1
+ {name: "MODWU", argLength: 2, reg: gp11mod, asm: "DIVW", clobberFlags: true}, // arg0 % arg1
+
+ {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+
+ {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+
+ {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+
+ {name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPB", argLength: 2, reg: gp2flags, asm: "CMPB", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPLconst", argLength: 1, reg: gp1flags, asm: "CMPL", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int16"}, // arg0 compare to auxint
+ {name: "CMPBconst", argLength: 1, reg: gp1flags, asm: "CMPB", typ: "Flags", aux: "Int8"}, // arg0 compare to auxint
+
+ // compare *(arg0+auxint+aux) to arg1 (in that order). arg2=mem.
+ {name: "CMPLload", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPWload", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPBload", argLength: 3, reg: gp1flagsLoad, asm: "CMPB", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+
+ // compare *(arg0+ValAndOff(AuxInt).Off()+aux) to ValAndOff(AuxInt).Val() (in that order). arg1=mem.
+ {name: "CMPLconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPL", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPWconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPW", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPBconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPB", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+
+ {name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags"}, // arg0 compare to arg1, f32
+ {name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags"}, // arg0 compare to arg1, f64
+
+ {name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTB", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0
+ {name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0
+ {name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0
+
+ {name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 32
+ {name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-31
+ // Note: x86 is weird, the 16 and 8 byte shifts still use all 5 bits of shift amount!
+
+ {name: "SHRL", argLength: 2, reg: gp21shift, asm: "SHRL", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRW", argLength: 2, reg: gp21shift, asm: "SHRW", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRB", argLength: 2, reg: gp21shift, asm: "SHRB", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRLconst", argLength: 1, reg: gp11, asm: "SHRL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-31
+ {name: "SHRWconst", argLength: 1, reg: gp11, asm: "SHRW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-15
+ {name: "SHRBconst", argLength: 1, reg: gp11, asm: "SHRB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-7
+
+ {name: "SARL", argLength: 2, reg: gp21shift, asm: "SARL", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARW", argLength: 2, reg: gp21shift, asm: "SARW", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARB", argLength: 2, reg: gp21shift, asm: "SARB", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARLconst", argLength: 1, reg: gp11, asm: "SARL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31
+ {name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-15
+ {name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-7
+
+ {name: "ROLL", argLength: 2, reg: gp21shift, asm: "ROLL", resultInArg0: true, clobberFlags: true}, // 32 bits of arg0 rotate left by arg1
+ {name: "ROLW", argLength: 2, reg: gp21shift, asm: "ROLW", resultInArg0: true, clobberFlags: true}, // low 16 bits of arg0 rotate left by arg1
+ {name: "ROLB", argLength: 2, reg: gp21shift, asm: "ROLB", resultInArg0: true, clobberFlags: true}, // low 8 bits of arg0 rotate left by arg1
+ {name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-31
+ {name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15
+ {name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7
+
+ // binary-op with a memory source operand
+ {name: "ADDLload", argLength: 3, reg: gp21load, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBLload", argLength: 3, reg: gp21load, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "MULLload", argLength: 3, reg: gp21load, asm: "IMULL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ANDLload", argLength: 3, reg: gp21load, asm: "ANDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ORLload", argLength: 3, reg: gp21load, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "XORLload", argLength: 3, reg: gp21load, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+
+ // binary-op with an indexed memory source operand
+ {name: "ADDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "SUBLloadidx4", argLength: 4, reg: gp21loadidx, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "MULLloadidx4", argLength: 4, reg: gp21loadidx, asm: "IMULL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 * tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "ANDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ANDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "ORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "XORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+
+ // unary ops
+ {name: "NEGL", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true, clobberFlags: true}, // -arg0
+
+ {name: "NOTL", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true}, // ^arg0
+
+ {name: "BSFL", argLength: 1, reg: gp11, asm: "BSFL", clobberFlags: true}, // arg0 # of low-order zeroes ; undef if zero
+ {name: "BSFW", argLength: 1, reg: gp11, asm: "BSFW", clobberFlags: true}, // arg0 # of low-order zeroes ; undef if zero
+ {name: "LoweredCtz32", argLength: 1, reg: gp11, clobberFlags: true}, // arg0 # of low-order zeroes
+
+ {name: "BSRL", argLength: 1, reg: gp11, asm: "BSRL", clobberFlags: true}, // arg0 # of high-order zeroes ; undef if zero
+ {name: "BSRW", argLength: 1, reg: gp11, asm: "BSRW", clobberFlags: true}, // arg0 # of high-order zeroes ; undef if zero
+
+ {name: "BSWAPL", argLength: 1, reg: gp11, asm: "BSWAPL", resultInArg0: true}, // arg0 swap bytes
+
+ {name: "SQRTSD", argLength: 1, reg: fp11, asm: "SQRTSD"}, // sqrt(arg0)
+ {name: "SQRTSS", argLength: 1, reg: fp11, asm: "SQRTSS"}, // sqrt(arg0), float32
+
+ {name: "SBBLcarrymask", argLength: 1, reg: flagsgp, asm: "SBBL"}, // (int32)(-1) if carry is set, 0 if carry is clear.
+ // Note: SBBW and SBBB are subsumed by SBBL
+
+ {name: "SETEQ", argLength: 1, reg: readflags, asm: "SETEQ"}, // extract == condition from arg0
+ {name: "SETNE", argLength: 1, reg: readflags, asm: "SETNE"}, // extract != condition from arg0
+ {name: "SETL", argLength: 1, reg: readflags, asm: "SETLT"}, // extract signed < condition from arg0
+ {name: "SETLE", argLength: 1, reg: readflags, asm: "SETLE"}, // extract signed <= condition from arg0
+ {name: "SETG", argLength: 1, reg: readflags, asm: "SETGT"}, // extract signed > condition from arg0
+ {name: "SETGE", argLength: 1, reg: readflags, asm: "SETGE"}, // extract signed >= condition from arg0
+ {name: "SETB", argLength: 1, reg: readflags, asm: "SETCS"}, // extract unsigned < condition from arg0
+ {name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0
+ {name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0
+ {name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0
+ {name: "SETO", argLength: 1, reg: readflags, asm: "SETOS"}, // extract if overflow flag is set from arg0
+ // Need different opcodes for floating point conditions because
+ // any comparison involving a NaN is always FALSE and thus
+ // the patterns for inverting conditions cannot be used.
+ {name: "SETEQF", argLength: 1, reg: flagsgpax, asm: "SETEQ", clobberFlags: true}, // extract == condition from arg0
+ {name: "SETNEF", argLength: 1, reg: flagsgpax, asm: "SETNE", clobberFlags: true}, // extract != condition from arg0
+ {name: "SETORD", argLength: 1, reg: flagsgp, asm: "SETPC"}, // extract "ordered" (No Nan present) condition from arg0
+ {name: "SETNAN", argLength: 1, reg: flagsgp, asm: "SETPS"}, // extract "unordered" (Nan present) condition from arg0
+
+ {name: "SETGF", argLength: 1, reg: flagsgp, asm: "SETHI"}, // extract floating > condition from arg0
+ {name: "SETGEF", argLength: 1, reg: flagsgp, asm: "SETCC"}, // extract floating >= condition from arg0
+
+ {name: "MOVBLSX", argLength: 1, reg: gp11, asm: "MOVBLSX"}, // sign extend arg0 from int8 to int32
+ {name: "MOVBLZX", argLength: 1, reg: gp11, asm: "MOVBLZX"}, // zero extend arg0 from int8 to int32
+ {name: "MOVWLSX", argLength: 1, reg: gp11, asm: "MOVWLSX"}, // sign extend arg0 from int16 to int32
+ {name: "MOVWLZX", argLength: 1, reg: gp11, asm: "MOVWLZX"}, // zero extend arg0 from int16 to int32
+
+ {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint
+
+ {name: "CVTTSD2SL", argLength: 1, reg: fpgp, asm: "CVTTSD2SL"}, // convert float64 to int32
+ {name: "CVTTSS2SL", argLength: 1, reg: fpgp, asm: "CVTTSS2SL"}, // convert float32 to int32
+ {name: "CVTSL2SS", argLength: 1, reg: gpfp, asm: "CVTSL2SS"}, // convert int32 to float32
+ {name: "CVTSL2SD", argLength: 1, reg: gpfp, asm: "CVTSL2SD"}, // convert int32 to float64
+ {name: "CVTSD2SS", argLength: 1, reg: fp11, asm: "CVTSD2SS"}, // convert float64 to float32
+ {name: "CVTSS2SD", argLength: 1, reg: fp11, asm: "CVTSS2SD"}, // convert float32 to float64
+
+ {name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation.
+
+ {name: "LEAL", argLength: 1, reg: gp11sb, aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
+ {name: "LEAL1", argLength: 2, reg: gp21sb, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
+ {name: "LEAL2", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux
+ {name: "LEAL4", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux
+ {name: "LEAL8", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux
+ // Note: LEAL{1,2,4,8} must not have OpSB as either argument.
+
+ // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
+ {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVBLSXload", argLength: 2, reg: gpload, asm: "MOVBLSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int32
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVWLSXload", argLength: 2, reg: gpload, asm: "MOVWLSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int32
+ {name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
+
+ // direct binary-op on memory (read-modify-write)
+ {name: "ADDLmodify", argLength: 3, reg: gpstore, asm: "ADDL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) += arg1, arg2=mem
+ {name: "SUBLmodify", argLength: 3, reg: gpstore, asm: "SUBL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) -= arg1, arg2=mem
+ {name: "ANDLmodify", argLength: 3, reg: gpstore, asm: "ANDL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) &= arg1, arg2=mem
+ {name: "ORLmodify", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) |= arg1, arg2=mem
+ {name: "XORLmodify", argLength: 3, reg: gpstore, asm: "XORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) ^= arg1, arg2=mem
+
+ // direct binary-op on indexed memory (read-modify-write)
+ {name: "ADDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ADDL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) += arg2, arg3=mem
+ {name: "SUBLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "SUBL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) -= arg2, arg3=mem
+ {name: "ANDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ANDL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) &= arg2, arg3=mem
+ {name: "ORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ORL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) |= arg2, arg3=mem
+ {name: "XORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "XORL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) ^= arg2, arg3=mem
+
+ // direct binary-op on memory with a constant (read-modify-write)
+ {name: "ADDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ANDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "XORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ // direct binary-op on indexed memory with a constant (read-modify-write)
+ {name: "ADDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ADDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
+ {name: "ANDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ANDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
+ {name: "ORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
+ {name: "XORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "XORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
+
+ // indexed loads/stores
+ {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", aux: "SymOff", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVWloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem
+ {name: "MOVLloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem
+ // TODO: sign-extending indexed loads
+ {name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem
+ {name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem
+ // TODO: add size-mismatched indexed loads, like MOVBstoreidx4.
+
+ // For storeconst ops, the AuxInt field encodes both
+ // the value to store and an address offset of the store.
+ // Cast AuxInt to a ValAndOff to extract Val and Off fields.
+ {name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem
+ {name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 2 bytes of ...
+ {name: "MOVLstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 4 bytes of ...
+
+ {name: "MOVBstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+1*arg1+ValAndOff(AuxInt).Off()+aux. arg2=mem
+ {name: "MOVWstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 2 bytes of ... arg1 ...
+ {name: "MOVWstoreconstidx2", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 2 bytes of ... 2*arg1 ...
+ {name: "MOVLstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 4 bytes of ... arg1 ...
+ {name: "MOVLstoreconstidx4", argLength: 3, reg: gpstoreconstidx, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 4 bytes of ... 4*arg1 ...
+
+ // arg0 = pointer to start of memory to zero
+ // arg1 = value to store (will always be zero)
+ // arg2 = mem
+ // auxint = offset into duffzero code to start executing
+ // returns mem
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("AX")},
+ clobbers: buildReg("DI CX"),
+ // Note: CX is only clobbered when dynamic linking.
+ },
+ faultOnNilArg0: true,
+ },
+
+ // arg0 = address of memory to zero
+ // arg1 = # of 4-byte words to zero
+ // arg2 = value to store (will always be zero)
+ // arg3 = mem
+ // returns mem
+ {
+ name: "REPSTOSL",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("CX"), buildReg("AX")},
+ clobbers: buildReg("DI CX"),
+ },
+ faultOnNilArg0: true,
+ },
+
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // arg0 = destination pointer
+ // arg1 = source pointer
+ // arg2 = mem
+ // auxint = offset from duffcopy symbol to call
+ // returns memory
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("SI")},
+ clobbers: buildReg("DI SI CX"), // uses CX as a temporary
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // arg0 = destination pointer
+ // arg1 = source pointer
+ // arg2 = # of 8-byte words to copy
+ // arg3 = mem
+ // returns memory
+ {
+ name: "REPMOVSL",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("SI"), buildReg("CX")},
+ clobbers: buildReg("DI SI CX"),
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // (InvertFlags (CMPL a b)) == (CMPL b a)
+ // So if we want (SETL (CMPL a b)) but we can't do that because a is a constant,
+ // then we do (SETL (InvertFlags (CMPL b a))) instead.
+ // Rewrites will convert this to (SETG (CMPL b a)).
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // Pseudo-ops
+ {name: "LoweredGetG", argLength: 1, reg: gp01}, // arg0=mem
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of DX (the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}, zeroWidth: true},
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+ // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem
+ {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
+ //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of write barrier slots
+ // It saves all GP registers if necessary, but may clobber others.
+ // Returns a pointer to a write barrier buffer in DI.
+ {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: callerSave &^ gp, outputs: []regMask{buildReg("DI")}}, clobberFlags: true, aux: "Int64"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{dx, bx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{cx, dx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{ax, cx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ // Extend ops are the same as Bounds ops except the indexes are 64-bit.
+ {name: "LoweredPanicExtendA", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, dx, bx}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendB", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, cx, dx}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendC", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, ax, cx}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+
+ // Constant flag values. For any comparison, there are 5 possible
+ // outcomes: the three from the signed total order (<,==,>) and the
+ // three from the unsigned total order. The == cases overlap.
+ // Note: there's a sixth "unordered" outcome for floating-point
+ // comparisons, but we don't use such a beast yet.
+ // These ops are for temporary use by rewrite rules. They
+ // cannot appear in the generated assembly.
+ {name: "FlagEQ"}, // equal
+ {name: "FlagLT_ULT"}, // signed < and unsigned <
+ {name: "FlagLT_UGT"}, // signed < and unsigned >
+ {name: "FlagGT_UGT"}, // signed > and unsigned <
+ {name: "FlagGT_ULT"}, // signed > and unsigned >
+
+ // Special ops for PIC floating-point constants.
+ // MOVSXconst1 loads the address of the constant-pool entry into a register.
+ // MOVSXconst2 loads the constant from that address.
+ // MOVSXconst1 returns a pointer, but we type it as uint32 because it can never point to the Go heap.
+ {name: "MOVSSconst1", reg: gp01, typ: "UInt32", aux: "Float32"},
+ {name: "MOVSDconst1", reg: gp01, typ: "UInt32", aux: "Float64"},
+ {name: "MOVSSconst2", argLength: 1, reg: gpfp, asm: "MOVSS"},
+ {name: "MOVSDconst2", argLength: 1, reg: gpfp, asm: "MOVSD"},
+ }
+
+ var _386blocks = []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LT", controls: 1},
+ {name: "LE", controls: 1},
+ {name: "GT", controls: 1},
+ {name: "GE", controls: 1},
+ {name: "OS", controls: 1},
+ {name: "OC", controls: 1},
+ {name: "ULT", controls: 1},
+ {name: "ULE", controls: 1},
+ {name: "UGT", controls: 1},
+ {name: "UGE", controls: 1},
+ {name: "EQF", controls: 1},
+ {name: "NEF", controls: 1},
+ {name: "ORD", controls: 1}, // FP, ordered comparison (parity zero)
+ {name: "NAN", controls: 1}, // FP, unordered comparison (parity one)
+ }
+
+ archs = append(archs, arch{
+ name: "386",
+ pkg: "cmd/internal/obj/x86",
+ genfile: "../../x86/ssa.go",
+ ops: _386ops,
+ blocks: _386blocks,
+ regnames: regNames386,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: int8(num["BP"]),
+ linkreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/386splitload.rules b/src/cmd/compile/internal/ssa/_gen/386splitload.rules
new file mode 100644
index 0000000..29d4f8c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/386splitload.rules
@@ -0,0 +1,11 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// See the top of AMD64splitload.rules for discussion of these rules.
+
+(CMP(L|W|B)load {sym} [off] ptr x mem) => (CMP(L|W|B) (MOV(L|W|B)load {sym} [off] ptr mem) x)
+
+(CMPLconstload {sym} [vo] ptr mem) => (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()])
+(CMPWconstload {sym} [vo] ptr mem) => (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()])
+(CMPBconstload {sym} [vo] ptr mem) => (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()])
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
new file mode 100644
index 0000000..aac6873
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
@@ -0,0 +1,1700 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add(64|32|16|8) ...) => (ADD(Q|L|L|L) ...)
+(AddPtr ...) => (ADDQ ...)
+(Add(32|64)F ...) => (ADDS(S|D) ...)
+
+(Sub(64|32|16|8) ...) => (SUB(Q|L|L|L) ...)
+(SubPtr ...) => (SUBQ ...)
+(Sub(32|64)F ...) => (SUBS(S|D) ...)
+
+(Mul(64|32|16|8) ...) => (MUL(Q|L|L|L) ...)
+(Mul(32|64)F ...) => (MULS(S|D) ...)
+
+(Select0 (Mul64uover x y)) => (Select0 <typ.UInt64> (MULQU x y))
+(Select0 (Mul32uover x y)) => (Select0 <typ.UInt32> (MULLU x y))
+(Select1 (Mul(64|32)uover x y)) => (SETO (Select1 <types.TypeFlags> (MUL(Q|L)U x y)))
+
+(Hmul(64|32) ...) => (HMUL(Q|L) ...)
+(Hmul(64|32)u ...) => (HMUL(Q|L)U ...)
+
+(Div(64|32|16) [a] x y) => (Select0 (DIV(Q|L|W) [a] x y))
+(Div8 x y) => (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+(Div(64|32|16)u x y) => (Select0 (DIV(Q|L|W)U x y))
+(Div8u x y) => (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+(Div(32|64)F ...) => (DIVS(S|D) ...)
+
+(Select0 (Add64carry x y c)) =>
+ (Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
+(Select1 (Add64carry x y c)) =>
+ (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
+(Select0 (Sub64borrow x y c)) =>
+ (Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
+(Select1 (Sub64borrow x y c)) =>
+ (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
+
+// Optimize ADCQ and friends
+(ADCQ x (MOVQconst [c]) carry) && is32Bit(c) => (ADCQconst x [int32(c)] carry)
+(ADCQ x y (FlagEQ)) => (ADDQcarry x y)
+(ADCQconst x [c] (FlagEQ)) => (ADDQconstcarry x [c])
+(ADDQcarry x (MOVQconst [c])) && is32Bit(c) => (ADDQconstcarry x [int32(c)])
+(SBBQ x (MOVQconst [c]) borrow) && is32Bit(c) => (SBBQconst x [int32(c)] borrow)
+(SBBQ x y (FlagEQ)) => (SUBQborrow x y)
+(SBBQconst x [c] (FlagEQ)) => (SUBQconstborrow x [c])
+(SUBQborrow x (MOVQconst [c])) && is32Bit(c) => (SUBQconstborrow x [int32(c)])
+(Select1 (NEGLflags (MOVQconst [0]))) => (FlagEQ)
+(Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) => x
+
+
+(Mul64uhilo ...) => (MULQU2 ...)
+(Div128u ...) => (DIVQU2 ...)
+
+(Avg64u ...) => (AVGQU ...)
+
+(Mod(64|32|16) [a] x y) => (Select1 (DIV(Q|L|W) [a] x y))
+(Mod8 x y) => (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+(Mod(64|32|16)u x y) => (Select1 (DIV(Q|L|W)U x y))
+(Mod8u x y) => (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+
+(And(64|32|16|8) ...) => (AND(Q|L|L|L) ...)
+(Or(64|32|16|8) ...) => (OR(Q|L|L|L) ...)
+(Xor(64|32|16|8) ...) => (XOR(Q|L|L|L) ...)
+(Com(64|32|16|8) ...) => (NOT(Q|L|L|L) ...)
+
+(Neg(64|32|16|8) ...) => (NEG(Q|L|L|L) ...)
+(Neg32F x) => (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
+(Neg64F x) => (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
+
+// Lowering boolean ops
+(AndB ...) => (ANDL ...)
+(OrB ...) => (ORL ...)
+(Not x) => (XORLconst [1] x)
+
+// Lowering pointer arithmetic
+(OffPtr [off] ptr) && is32Bit(off) => (ADDQconst [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDQ (MOVQconst [off]) ptr)
+
+// Lowering other arithmetic
+(Ctz64 x) && buildcfg.GOAMD64 >= 3 => (TZCNTQ x)
+(Ctz32 x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
+(Ctz64 <t> x) && buildcfg.GOAMD64 < 3 => (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
+(Ctz32 x) && buildcfg.GOAMD64 < 3 => (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
+(Ctz16 x) => (BSFL (ORLconst <typ.UInt32> [1<<16] x))
+(Ctz8 x) => (BSFL (ORLconst <typ.UInt32> [1<<8 ] x))
+
+(Ctz64NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTQ x)
+(Ctz32NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
+(Ctz16NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
+(Ctz8NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
+(Ctz64NonZero x) && buildcfg.GOAMD64 < 3 => (Select0 (BSFQ x))
+(Ctz32NonZero x) && buildcfg.GOAMD64 < 3 => (BSFL x)
+(Ctz16NonZero x) && buildcfg.GOAMD64 < 3 => (BSFL x)
+(Ctz8NonZero x) && buildcfg.GOAMD64 < 3 => (BSFL x)
+
+// BitLen64 of a 64 bit value x requires checking whether x == 0, since BSRQ is undefined when x == 0.
+// However, for zero-extended values, we can cheat a bit, and calculate
+// BSR(x<<1 + 1), which is guaranteed to be non-zero, and which conveniently
+// places the index of the highest set bit where we want it.
+// For GOAMD64>=3, BitLen can be calculated by OperandSize - LZCNT(x).
+(BitLen64 <t> x) && buildcfg.GOAMD64 < 3 => (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
+(BitLen32 x) && buildcfg.GOAMD64 < 3 => (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x))))
+(BitLen16 x) && buildcfg.GOAMD64 < 3 => (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x)))
+(BitLen8 x) && buildcfg.GOAMD64 < 3 => (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x)))
+(BitLen64 <t> x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst <t> [-64] (LZCNTQ x)))
+// Use 64-bit version to allow const-fold remove unnecessary arithmetic.
+(BitLen32 <t> x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst <t> [-32] (LZCNTL x)))
+(BitLen16 <t> x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst <t> [-32] (LZCNTL (MOVWQZX <x.Type> x))))
+(BitLen8 <t> x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst <t> [-32] (LZCNTL (MOVBQZX <x.Type> x))))
+
+(Bswap(64|32) ...) => (BSWAP(Q|L) ...)
+(Bswap16 x) => (ROLWconst [8] x)
+
+(PopCount(64|32) ...) => (POPCNT(Q|L) ...)
+(PopCount16 x) => (POPCNTL (MOVWQZX <typ.UInt32> x))
+(PopCount8 x) => (POPCNTL (MOVBQZX <typ.UInt32> x))
+
+(Sqrt ...) => (SQRTSD ...)
+(Sqrt32 ...) => (SQRTSS ...)
+
+(RoundToEven x) => (ROUNDSD [0] x)
+(Floor x) => (ROUNDSD [1] x)
+(Ceil x) => (ROUNDSD [2] x)
+(Trunc x) => (ROUNDSD [3] x)
+
+(FMA x y z) => (VFMADD231SD z x y)
+
+// Lowering extension
+// Note: we always extend to 64 bits even though some ops don't need that many result bits.
+(SignExt8to16 ...) => (MOVBQSX ...)
+(SignExt8to32 ...) => (MOVBQSX ...)
+(SignExt8to64 ...) => (MOVBQSX ...)
+(SignExt16to32 ...) => (MOVWQSX ...)
+(SignExt16to64 ...) => (MOVWQSX ...)
+(SignExt32to64 ...) => (MOVLQSX ...)
+
+(ZeroExt8to16 ...) => (MOVBQZX ...)
+(ZeroExt8to32 ...) => (MOVBQZX ...)
+(ZeroExt8to64 ...) => (MOVBQZX ...)
+(ZeroExt16to32 ...) => (MOVWQZX ...)
+(ZeroExt16to64 ...) => (MOVWQZX ...)
+(ZeroExt32to64 ...) => (MOVLQZX ...)
+
+(Slicemask <t> x) => (SARQconst (NEGQ <t> x) [63])
+
+(SpectreIndex <t> x y) => (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
+(SpectreSliceIndex <t> x y) => (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
+
+// Lowering truncation
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Lowering float <-> int
+(Cvt32to32F ...) => (CVTSL2SS ...)
+(Cvt32to64F ...) => (CVTSL2SD ...)
+(Cvt64to32F ...) => (CVTSQ2SS ...)
+(Cvt64to64F ...) => (CVTSQ2SD ...)
+
+(Cvt32Fto32 ...) => (CVTTSS2SL ...)
+(Cvt32Fto64 ...) => (CVTTSS2SQ ...)
+(Cvt64Fto32 ...) => (CVTTSD2SL ...)
+(Cvt64Fto64 ...) => (CVTTSD2SQ ...)
+
+(Cvt32Fto64F ...) => (CVTSS2SD ...)
+(Cvt64Fto32F ...) => (CVTSD2SS ...)
+
+(Round(32|64)F ...) => (Copy ...)
+
+// Floating-point min is tricky, as the hardware op isn't right for various special
+// cases (-0 and NaN). We use two hardware ops organized just right to make the
+// result come out how we want it. See https://github.com/golang/go/issues/59488#issuecomment-1553493207
+// (although that comment isn't exactly right, as the value overwritten is not simulated correctly).
+// t1 = MINSD x, y => incorrect if x==NaN or x==-0,y==+0
+// t2 = MINSD t1, x => fixes x==NaN case
+// res = POR t1, t2 => fixes x==-0,y==+0 case
+// Note that this trick depends on the special property that (NaN OR x) produces a NaN (although
+// it might not produce the same NaN as the input).
+(Min(64|32)F <t> x y) => (POR (MINS(D|S) <t> (MINS(D|S) <t> x y) x) (MINS(D|S) <t> x y))
+// Floating-point max is even trickier. Punt to using min instead.
+// max(x,y) == -min(-x,-y)
+(Max(64|32)F <t> x y) => (Neg(64|32)F <t> (Min(64|32)F <t> (Neg(64|32)F <t> x) (Neg(64|32)F <t> y)))
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+// Lowering shifts
+// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
+// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
+(Lsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
+(Lsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+(Lsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+(Lsh8x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+
+(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLQ x y)
+(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
+(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
+(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
+
+(Rsh64Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
+(Rsh32Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+(Rsh16Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [16])))
+(Rsh8Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [8])))
+
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRQ x y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRL x y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRW x y)
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRB x y)
+
+// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
+// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
+(Rsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARQ <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [64])))))
+(Rsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARL <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [32])))))
+(Rsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARW <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [16])))))
+(Rsh8x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARB <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [8])))))
+
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SARQ x y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SARL x y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SARW x y)
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SARB x y)
+
+// Lowering integer comparisons
+(Less(64|32|16|8) x y) => (SETL (CMP(Q|L|W|B) x y))
+(Less(64|32|16|8)U x y) => (SETB (CMP(Q|L|W|B) x y))
+(Leq(64|32|16|8) x y) => (SETLE (CMP(Q|L|W|B) x y))
+(Leq(64|32|16|8)U x y) => (SETBE (CMP(Q|L|W|B) x y))
+(Eq(Ptr|64|32|16|8|B) x y) => (SETEQ (CMP(Q|Q|L|W|B|B) x y))
+(Neq(Ptr|64|32|16|8|B) x y) => (SETNE (CMP(Q|Q|L|W|B|B) x y))
+
+// Lowering floating point comparisons
+// Note Go assembler gets UCOMISx operand order wrong, but it is right here
+// and the operands are reversed when generating assembly language.
+(Eq(32|64)F x y) => (SETEQF (UCOMIS(S|D) x y))
+(Neq(32|64)F x y) => (SETNEF (UCOMIS(S|D) x y))
+// Use SETGF/SETGEF with reversed operands to dodge NaN case.
+(Less(32|64)F x y) => (SETGF (UCOMIS(S|D) y x))
+(Leq(32|64)F x y) => (SETGEF (UCOMIS(S|D) y x))
+
+// Lowering loads
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVQload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) => (MOVLload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVSSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVSDload ptr mem)
+
+// Lowering stores
+(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVSDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVSSstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVQstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVLstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+
+// Lowering moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] dst src mem) => (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] dst src mem) => (MOVLstore dst (MOVLload src mem) mem)
+(Move [8] dst src mem) => (MOVQstore dst (MOVQload src mem) mem)
+(Move [16] dst src mem) && config.useSSE => (MOVOstore dst (MOVOload src mem) mem)
+(Move [16] dst src mem) && !config.useSSE =>
+ (MOVQstore [8] dst (MOVQload [8] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
+
+(Move [32] dst src mem) =>
+ (Move [16]
+ (OffPtr <dst.Type> dst [16])
+ (OffPtr <src.Type> src [16])
+ (Move [16] dst src mem))
+
+(Move [48] dst src mem) && config.useSSE =>
+ (Move [32]
+ (OffPtr <dst.Type> dst [16])
+ (OffPtr <src.Type> src [16])
+ (Move [16] dst src mem))
+
+(Move [64] dst src mem) && config.useSSE =>
+ (Move [32]
+ (OffPtr <dst.Type> dst [32])
+ (OffPtr <src.Type> src [32])
+ (Move [32] dst src mem))
+
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [5] dst src mem) =>
+ (MOVBstore [4] dst (MOVBload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [6] dst src mem) =>
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [7] dst src mem) =>
+ (MOVLstore [3] dst (MOVLload [3] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [9] dst src mem) =>
+ (MOVBstore [8] dst (MOVBload [8] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
+(Move [10] dst src mem) =>
+ (MOVWstore [8] dst (MOVWload [8] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
+(Move [11] dst src mem) =>
+ (MOVLstore [7] dst (MOVLload [7] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
+(Move [12] dst src mem) =>
+ (MOVLstore [8] dst (MOVLload [8] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
+(Move [s] dst src mem) && s >= 13 && s <= 15 =>
+ (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
+
+// Adjust moves to be a multiple of 16 bytes.
+(Move [s] dst src mem)
+ && s > 16 && s%16 != 0 && s%16 <= 8 =>
+ (Move [s-s%16]
+ (OffPtr <dst.Type> dst [s%16])
+ (OffPtr <src.Type> src [s%16])
+ (MOVQstore dst (MOVQload src mem) mem))
+(Move [s] dst src mem)
+ && s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE =>
+ (Move [s-s%16]
+ (OffPtr <dst.Type> dst [s%16])
+ (OffPtr <src.Type> src [s%16])
+ (MOVOstore dst (MOVOload src mem) mem))
+(Move [s] dst src mem)
+ && s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE =>
+ (Move [s-s%16]
+ (OffPtr <dst.Type> dst [s%16])
+ (OffPtr <src.Type> src [s%16])
+ (MOVQstore [8] dst (MOVQload [8] src mem)
+ (MOVQstore dst (MOVQload src mem) mem)))
+
+// Medium copying uses a duff device.
+(Move [s] dst src mem)
+ && s > 64 && s <= 16*64 && s%16 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [s] dst src mem)
+
+// Large copying uses REP MOVSQ.
+(Move [s] dst src mem) && (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s) =>
+ (REPMOVSQ dst src (MOVQconst [s/8]) mem)
+
+// Lowering Zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (MOVBstoreconst [makeValAndOff(0,0)] destptr mem)
+(Zero [2] destptr mem) => (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)
+(Zero [4] destptr mem) => (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)
+(Zero [8] destptr mem) => (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)
+
+(Zero [3] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff(0,2)] destptr
+ (MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [5] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [6] destptr mem) =>
+ (MOVWstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [7] destptr mem) =>
+ (MOVLstoreconst [makeValAndOff(0,3)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+
+// Strip off any fractional word zeroing.
+(Zero [s] destptr mem) && s%8 != 0 && s > 8 && !config.useSSE =>
+ (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8])
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+
+// Zero small numbers of words directly.
+(Zero [16] destptr mem) && !config.useSSE =>
+ (MOVQstoreconst [makeValAndOff(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [24] destptr mem) && !config.useSSE =>
+ (MOVQstoreconst [makeValAndOff(0,16)] destptr
+ (MOVQstoreconst [makeValAndOff(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)))
+(Zero [32] destptr mem) && !config.useSSE =>
+ (MOVQstoreconst [makeValAndOff(0,24)] destptr
+ (MOVQstoreconst [makeValAndOff(0,16)] destptr
+ (MOVQstoreconst [makeValAndOff(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))))
+
+(Zero [9] destptr mem) && config.useSSE =>
+ (MOVBstoreconst [makeValAndOff(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+
+(Zero [10] destptr mem) && config.useSSE =>
+ (MOVWstoreconst [makeValAndOff(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+
+(Zero [11] destptr mem) && config.useSSE =>
+ (MOVLstoreconst [makeValAndOff(0,7)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+
+(Zero [12] destptr mem) && config.useSSE =>
+ (MOVLstoreconst [makeValAndOff(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+
+(Zero [s] destptr mem) && s > 12 && s < 16 && config.useSSE =>
+ (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+
+// Adjust zeros to be a multiple of 16 bytes.
+(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE =>
+ (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
+ (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
+
+(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE =>
+ (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
+ (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
+
+(Zero [16] destptr mem) && config.useSSE =>
+ (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)
+(Zero [32] destptr mem) && config.useSSE =>
+ (MOVOstoreconst [makeValAndOff(0,16)] destptr
+ (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [48] destptr mem) && config.useSSE =>
+ (MOVOstoreconst [makeValAndOff(0,32)] destptr
+ (MOVOstoreconst [makeValAndOff(0,16)] destptr
+ (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)))
+(Zero [64] destptr mem) && config.useSSE =>
+ (MOVOstoreconst [makeValAndOff(0,48)] destptr
+ (MOVOstoreconst [makeValAndOff(0,32)] destptr
+ (MOVOstoreconst [makeValAndOff(0,16)] destptr
+ (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))))
+
+// Medium zeroing uses a duff device.
+(Zero [s] destptr mem)
+ && s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice =>
+ (DUFFZERO [s] destptr mem)
+
+// Large zeroing uses REP STOSQ.
+(Zero [s] destptr mem)
+ && (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32))
+ && s%8 == 0 =>
+ (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
+
+// Lowering constants
+(Const8 [c]) => (MOVLconst [int32(c)])
+(Const16 [c]) => (MOVLconst [int32(c)])
+(Const32 ...) => (MOVLconst ...)
+(Const64 ...) => (MOVQconst ...)
+(Const32F ...) => (MOVSSconst ...)
+(Const64F ...) => (MOVSDconst ...)
+(ConstNil ) => (MOVQconst [0])
+(ConstBool [c]) => (MOVLconst [b2i32(c)])
+
+// Lowering calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// Lowering conditional moves
+// If the condition is a SETxx, we can just run a CMOV from the comparison that was
+// setting the flags.
+// Legend: HI=unsigned ABOVE, CS=unsigned BELOW, CC=unsigned ABOVE EQUAL, LS=unsigned BELOW EQUAL
+(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && (is64BitInt(t) || isPtr(t))
+ => (CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
+(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is32BitInt(t)
+ => (CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
+(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is16BitInt(t)
+ => (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
+
+// If the condition does not set the flags, we need to generate a comparison.
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 1
+ => (CondSelect <t> x y (MOVBQZX <typ.UInt64> check))
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 2
+ => (CondSelect <t> x y (MOVWQZX <typ.UInt64> check))
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 4
+ => (CondSelect <t> x y (MOVLQZX <typ.UInt64> check))
+
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
+ => (CMOVQNE y x (CMPQconst [0] check))
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
+ => (CMOVLNE y x (CMPQconst [0] check))
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
+ => (CMOVWNE y x (CMPQconst [0] check))
+
+// Absorb InvertFlags
+(CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
+ => (CMOVQ(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
+(CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
+ => (CMOVL(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
+(CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
+ => (CMOVW(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
+
+// Absorb constants generated during lower
+(CMOV(QEQ|QLE|QGE|QCC|QLS|LEQ|LLE|LGE|LCC|LLS|WEQ|WLE|WGE|WCC|WLS) _ x (FlagEQ)) => x
+(CMOV(QNE|QLT|QGT|QCS|QHI|LNE|LLT|LGT|LCS|LHI|WNE|WLT|WGT|WCS|WHI) y _ (FlagEQ)) => y
+(CMOV(QNE|QGT|QGE|QHI|QCC|LNE|LGT|LGE|LHI|LCC|WNE|WGT|WGE|WHI|WCC) _ x (FlagGT_UGT)) => x
+(CMOV(QEQ|QLE|QLT|QLS|QCS|LEQ|LLE|LLT|LLS|LCS|WEQ|WLE|WLT|WLS|WCS) y _ (FlagGT_UGT)) => y
+(CMOV(QNE|QGT|QGE|QLS|QCS|LNE|LGT|LGE|LLS|LCS|WNE|WGT|WGE|WLS|WCS) _ x (FlagGT_ULT)) => x
+(CMOV(QEQ|QLE|QLT|QHI|QCC|LEQ|LLE|LLT|LHI|LCC|WEQ|WLE|WLT|WHI|WCC) y _ (FlagGT_ULT)) => y
+(CMOV(QNE|QLT|QLE|QCS|QLS|LNE|LLT|LLE|LCS|LLS|WNE|WLT|WLE|WCS|WLS) _ x (FlagLT_ULT)) => x
+(CMOV(QEQ|QGT|QGE|QHI|QCC|LEQ|LGT|LGE|LHI|LCC|WEQ|WGT|WGE|WHI|WCC) y _ (FlagLT_ULT)) => y
+(CMOV(QNE|QLT|QLE|QHI|QCC|LNE|LLT|LLE|LHI|LCC|WNE|WLT|WLE|WHI|WCC) _ x (FlagLT_UGT)) => x
+(CMOV(QEQ|QGT|QGE|QCS|QLS|LEQ|LGT|LGE|LCS|LLS|WEQ|WGT|WGE|WCS|WLS) y _ (FlagLT_UGT)) => y
+
+// Miscellaneous
+(IsNonNil p) => (SETNE (TESTQ p p))
+(IsInBounds idx len) => (SETB (CMPQ idx len))
+(IsSliceInBounds idx len) => (SETBE (CMPQ idx len))
+(NilCheck ...) => (LoweredNilCheck ...)
+(GetG mem) && v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+
+(HasCPUFeature {s}) => (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s})))
+(Addr {sym} base) => (LEAQ {sym} base)
+(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (LEAQ {sym} (SPanchored base mem))
+(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (LEAQ {sym} base)
+
+(MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 => (SETLstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 => (SETLEstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 => (SETGstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 => (SETGEstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 => (SETEQstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 => (SETNEstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 => (SETBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 => (SETBEstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 => (SETAstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 => (SETAEstore [off] {sym} ptr x mem)
+
+// block rewrites
+(If (SETL cmp) yes no) => (LT cmp yes no)
+(If (SETLE cmp) yes no) => (LE cmp yes no)
+(If (SETG cmp) yes no) => (GT cmp yes no)
+(If (SETGE cmp) yes no) => (GE cmp yes no)
+(If (SETEQ cmp) yes no) => (EQ cmp yes no)
+(If (SETNE cmp) yes no) => (NE cmp yes no)
+(If (SETB cmp) yes no) => (ULT cmp yes no)
+(If (SETBE cmp) yes no) => (ULE cmp yes no)
+(If (SETA cmp) yes no) => (UGT cmp yes no)
+(If (SETAE cmp) yes no) => (UGE cmp yes no)
+(If (SETO cmp) yes no) => (OS cmp yes no)
+
+// Special case for floating point - LF/LEF not generated
+(If (SETGF cmp) yes no) => (UGT cmp yes no)
+(If (SETGEF cmp) yes no) => (UGE cmp yes no)
+(If (SETEQF cmp) yes no) => (EQF cmp yes no)
+(If (SETNEF cmp) yes no) => (NEF cmp yes no)
+
+(If cond yes no) => (NE (TESTB cond cond) yes no)
+
+(JumpTable idx) => (JUMPTABLE {makeJumpTableSym(b)} idx (LEAQ <typ.Uintptr> {makeJumpTableSym(b)} (SB)))
+
+// Atomic loads. Other than preserving their ordering with respect to other loads, nothing special here.
+(AtomicLoad8 ptr mem) => (MOVBatomicload ptr mem)
+(AtomicLoad32 ptr mem) => (MOVLatomicload ptr mem)
+(AtomicLoad64 ptr mem) => (MOVQatomicload ptr mem)
+(AtomicLoadPtr ptr mem) => (MOVQatomicload ptr mem)
+
+// Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load.
+// TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those?
+(AtomicStore8 ptr val mem) => (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem))
+(AtomicStore32 ptr val mem) => (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
+(AtomicStore64 ptr val mem) => (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
+(AtomicStorePtrNoWB ptr val mem) => (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
+
+// Atomic exchanges.
+(AtomicExchange32 ptr val mem) => (XCHGL val ptr mem)
+(AtomicExchange64 ptr val mem) => (XCHGQ val ptr mem)
+
+// Atomic adds.
+(AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (XADDLlock val ptr mem))
+(AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (XADDQlock val ptr mem))
+(Select0 <t> (AddTupleFirst32 val tuple)) => (ADDL val (Select0 <t> tuple))
+(Select1 (AddTupleFirst32 _ tuple)) => (Select1 tuple)
+(Select0 <t> (AddTupleFirst64 val tuple)) => (ADDQ val (Select0 <t> tuple))
+(Select1 (AddTupleFirst64 _ tuple)) => (Select1 tuple)
+
+// Atomic compare and swap.
+(AtomicCompareAndSwap32 ptr old new_ mem) => (CMPXCHGLlock ptr old new_ mem)
+(AtomicCompareAndSwap64 ptr old new_ mem) => (CMPXCHGQlock ptr old new_ mem)
+
+// Atomic memory updates.
+(AtomicAnd8 ptr val mem) => (ANDBlock ptr val mem)
+(AtomicAnd32 ptr val mem) => (ANDLlock ptr val mem)
+(AtomicOr8 ptr val mem) => (ORBlock ptr val mem)
+(AtomicOr32 ptr val mem) => (ORLlock ptr val mem)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// lowering rotates
+(RotateLeft8 ...) => (ROLB ...)
+(RotateLeft16 ...) => (ROLW ...)
+(RotateLeft32 ...) => (ROLL ...)
+(RotateLeft64 ...) => (ROLQ ...)
+
+// ***************************
+// Above: lowering rules
+// Below: optimizations
+// ***************************
+// TODO: Should the optimizations be a separate pass?
+
+// Fold boolean tests into blocks
+(NE (TESTB (SETL cmp) (SETL cmp)) yes no) => (LT cmp yes no)
+(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) => (LE cmp yes no)
+(NE (TESTB (SETG cmp) (SETG cmp)) yes no) => (GT cmp yes no)
+(NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) => (GE cmp yes no)
+(NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) => (EQ cmp yes no)
+(NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) => (NE cmp yes no)
+(NE (TESTB (SETB cmp) (SETB cmp)) yes no) => (ULT cmp yes no)
+(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) => (ULE cmp yes no)
+(NE (TESTB (SETA cmp) (SETA cmp)) yes no) => (UGT cmp yes no)
+(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) => (UGE cmp yes no)
+(NE (TESTB (SETO cmp) (SETO cmp)) yes no) => (OS cmp yes no)
+
+// Unsigned comparisons to 0/1
+(ULT (TEST(Q|L|W|B) x x) yes no) => (First no yes)
+(UGE (TEST(Q|L|W|B) x x) yes no) => (First yes no)
+(SETB (TEST(Q|L|W|B) x x)) => (ConstBool [false])
+(SETAE (TEST(Q|L|W|B) x x)) => (ConstBool [true])
+
+// x & 1 != 0 -> x & 1
+(SETNE (TEST(B|W)const [1] x)) => (AND(L|L)const [1] x)
+(SETB (BT(L|Q)const [0] x)) => (AND(L|Q)const [1] x)
+
+// Recognize bit tests: a&(1<<b) != 0 for b suitably bounded
+// Note that BTx instructions use the carry bit, so we need to convert tests for zero flag
+// into tests for carry flags.
+// ULT and SETB check the carry flag; they are identical to CS and SETCS. Same, mutatis
+// mutandis, for UGE and SETAE, and CC and SETCC.
+((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => ((ULT|UGE) (BTL x y))
+((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y))
+((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
+ => ((ULT|UGE) (BTLconst [int8(log32(c))] x))
+((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
+ => ((ULT|UGE) (BTQconst [int8(log32(c))] x))
+((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
+ => ((ULT|UGE) (BTQconst [int8(log64(c))] x))
+(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE) (BTL x y))
+(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE) (BTQ x y))
+(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
+ => (SET(B|AE) (BTLconst [int8(log32(c))] x))
+(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
+ => (SET(B|AE) (BTQconst [int8(log32(c))] x))
+(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
+ => (SET(B|AE) (BTQconst [int8(log64(c))] x))
+// SET..store variant
+(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
+ => (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
+ => (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(int64(c))
+ => (SET(B|AE)store [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(int64(c))
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c)
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
+
+// Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
+// and further combining shifts.
+(BT(Q|L)const [c] (SHRQconst [d] x)) && (c+d)<64 => (BTQconst [c+d] x)
+(BT(Q|L)const [c] (SHLQconst [d] x)) && c>d => (BT(Q|L)const [c-d] x)
+(BT(Q|L)const [0] s:(SHRQ x y)) => (BTQ y x)
+(BTLconst [c] (SHRLconst [d] x)) && (c+d)<32 => (BTLconst [c+d] x)
+(BTLconst [c] (SHLLconst [d] x)) && c>d => (BTLconst [c-d] x)
+(BTLconst [0] s:(SHR(L|XL) x y)) => (BTL y x)
+
+// Rewrite a & 1 != 1 into a & 1 == 0.
+// Among other things, this lets us turn (a>>b)&1 != 1 into a bit test.
+(SET(NE|EQ) (CMPLconst [1] s:(ANDLconst [1] _))) => (SET(EQ|NE) (CMPLconst [0] s))
+(SET(NE|EQ)store [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPLconst [0] s) mem)
+(SET(NE|EQ) (CMPQconst [1] s:(ANDQconst [1] _))) => (SET(EQ|NE) (CMPQconst [0] s))
+(SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem)
+
+// Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
+(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTS(Q|L) x y)
+(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y)
+// Note: only convert OR/XOR to BTS/BTC if the constant wouldn't fit in
+// the constant field of the OR/XOR instruction. See issue 61694.
+((OR|XOR)Q (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 1<<31 => (BT(S|C)Qconst [int8(log64(c))] x)
+
+// Recognize bit clearing: a &^= 1<<b
+(AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) => (BTR(Q|L) x y)
+(ANDN(Q|L) x (SHL(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y)
+// Note: only convert AND to BTR if the constant wouldn't fit in
+// the constant field of the AND instruction. See issue 61694.
+(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31 => (BTRQconst [int8(log64(^c))] x)
+
+// Special-case bit patterns on first/last bit.
+// generic.rules changes ANDs of high-part/low-part masks into a couple of shifts,
+// for instance:
+// x & 0xFFFF0000 -> (x >> 16) << 16
+// x & 0x80000000 -> (x >> 31) << 31
+//
+// In case the mask is just one bit (like second example above), it conflicts
+// with the above rules to detect bit-testing / bit-clearing of first/last bit.
+// We thus special-case them, by detecting the shift patterns.
+
+// Special case resetting first/last bit
+(SHL(L|Q)const [1] (SHR(L|Q)const [1] x))
+ => (AND(L|Q)const [-2] x)
+(SHRLconst [1] (SHLLconst [1] x))
+ => (ANDLconst [0x7fffffff] x)
+(SHRQconst [1] (SHLQconst [1] x))
+ => (BTRQconst [63] x)
+
+// Special case testing first/last bit (with double-shift generated by generic.rules)
+((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
+((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTQconst [31] x))
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
+
+((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTQconst [0] x))
+((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTLconst [0] x))
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem)
+
+// Special-case manually testing last bit with "a>>63 != 0" (without "&1")
+((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
+((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTLconst [31] x))
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
+
+// Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1)
+(BTSQconst [c] (BTRQconst [c] x)) => (BTSQconst [c] x)
+(BTSQconst [c] (BTCQconst [c] x)) => (BTSQconst [c] x)
+(BTRQconst [c] (BTSQconst [c] x)) => (BTRQconst [c] x)
+(BTRQconst [c] (BTCQconst [c] x)) => (BTRQconst [c] x)
+
+// Fold boolean negation into SETcc.
+(XORLconst [1] (SETNE x)) => (SETEQ x)
+(XORLconst [1] (SETEQ x)) => (SETNE x)
+(XORLconst [1] (SETL x)) => (SETGE x)
+(XORLconst [1] (SETGE x)) => (SETL x)
+(XORLconst [1] (SETLE x)) => (SETG x)
+(XORLconst [1] (SETG x)) => (SETLE x)
+(XORLconst [1] (SETB x)) => (SETAE x)
+(XORLconst [1] (SETAE x)) => (SETB x)
+(XORLconst [1] (SETBE x)) => (SETA x)
+(XORLconst [1] (SETA x)) => (SETBE x)
+
+// Special case for floating point - LF/LEF not generated
+(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) => (UGT cmp yes no)
+(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) => (UGE cmp yes no)
+(NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) => (EQF cmp yes no)
+(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF cmp yes no)
+
+// Disabled because it interferes with the pattern match above and makes worse code.
+// (SETNEF x) => (ORQ (SETNE <typ.Int8> x) (SETNAN <typ.Int8> x))
+// (SETEQF x) => (ANDQ (SETEQ <typ.Int8> x) (SETORD <typ.Int8> x))
+
+// fold constants into instructions
+(ADDQ x (MOVQconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDQconst [int32(c)] x)
+(ADDQ x (MOVLconst [c])) => (ADDQconst [c] x)
+(ADDL x (MOVLconst [c])) => (ADDLconst [c] x)
+
+(SUBQ x (MOVQconst [c])) && is32Bit(c) => (SUBQconst x [int32(c)])
+(SUBQ (MOVQconst [c]) x) && is32Bit(c) => (NEGQ (SUBQconst <v.Type> x [int32(c)]))
+(SUBL x (MOVLconst [c])) => (SUBLconst x [c])
+(SUBL (MOVLconst [c]) x) => (NEGL (SUBLconst <v.Type> x [c]))
+
+(MULQ x (MOVQconst [c])) && is32Bit(c) => (MULQconst [int32(c)] x)
+(MULL x (MOVLconst [c])) => (MULLconst [c] x)
+
+(ANDQ x (MOVQconst [c])) && is32Bit(c) => (ANDQconst [int32(c)] x)
+(ANDL x (MOVLconst [c])) => (ANDLconst [c] x)
+
+(AND(L|Q)const [c] (AND(L|Q)const [d] x)) => (AND(L|Q)const [c & d] x)
+(XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) => (XOR(L|Q)const [c ^ d] x)
+(OR(L|Q)const [c] (OR(L|Q)const [d] x)) => (OR(L|Q)const [c | d] x)
+
+(MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x)
+(MULQconst [c] (MULQconst [d] x)) && is32Bit(int64(c)*int64(d)) => (MULQconst [c * d] x)
+
+(ORQ x (MOVQconst [c])) && is32Bit(c) => (ORQconst [int32(c)] x)
+(ORQ x (MOVLconst [c])) => (ORQconst [c] x)
+(ORL x (MOVLconst [c])) => (ORLconst [c] x)
+
+(XORQ x (MOVQconst [c])) && is32Bit(c) => (XORQconst [int32(c)] x)
+(XORL x (MOVLconst [c])) => (XORLconst [c] x)
+
+(SHLQ x (MOV(Q|L)const [c])) => (SHLQconst [int8(c&63)] x)
+(SHLL x (MOV(Q|L)const [c])) => (SHLLconst [int8(c&31)] x)
+
+(SHRQ x (MOV(Q|L)const [c])) => (SHRQconst [int8(c&63)] x)
+(SHRL x (MOV(Q|L)const [c])) => (SHRLconst [int8(c&31)] x)
+(SHRW x (MOV(Q|L)const [c])) && c&31 < 16 => (SHRWconst [int8(c&31)] x)
+(SHRW _ (MOV(Q|L)const [c])) && c&31 >= 16 => (MOVLconst [0])
+(SHRB x (MOV(Q|L)const [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x)
+(SHRB _ (MOV(Q|L)const [c])) && c&31 >= 8 => (MOVLconst [0])
+
+(SARQ x (MOV(Q|L)const [c])) => (SARQconst [int8(c&63)] x)
+(SARL x (MOV(Q|L)const [c])) => (SARLconst [int8(c&31)] x)
+(SARW x (MOV(Q|L)const [c])) => (SARWconst [int8(min(int64(c)&31,15))] x)
+(SARB x (MOV(Q|L)const [c])) => (SARBconst [int8(min(int64(c)&31,7))] x)
+
+// Operations which don't affect the low 6/5 bits of the shift amount are NOPs.
+((SHLQ|SHRQ|SARQ) x (ADDQconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y)
+((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
+((SHLQ|SHRQ|SARQ) x (ANDQconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
+((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
+
+((SHLL|SHRL|SARL) x (ADDQconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y)
+((SHLL|SHRL|SARL) x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGQ <t> y))
+((SHLL|SHRL|SARL) x (ANDQconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
+((SHLL|SHRL|SARL) x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGQ <t> y))
+
+((SHLQ|SHRQ|SARQ) x (ADDLconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y)
+((SHLQ|SHRQ|SARQ) x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
+((SHLQ|SHRQ|SARQ) x (ANDLconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
+((SHLQ|SHRQ|SARQ) x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
+
+((SHLL|SHRL|SARL) x (ADDLconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y)
+((SHLL|SHRL|SARL) x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGL <t> y))
+((SHLL|SHRL|SARL) x (ANDLconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
+((SHLL|SHRL|SARL) x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGL <t> y))
+
+// rotate left negative = rotate right
+(ROLQ x (NEG(Q|L) y)) => (RORQ x y)
+(ROLL x (NEG(Q|L) y)) => (RORL x y)
+(ROLW x (NEG(Q|L) y)) => (RORW x y)
+(ROLB x (NEG(Q|L) y)) => (RORB x y)
+
+// rotate right negative = rotate left
+(RORQ x (NEG(Q|L) y)) => (ROLQ x y)
+(RORL x (NEG(Q|L) y)) => (ROLL x y)
+(RORW x (NEG(Q|L) y)) => (ROLW x y)
+(RORB x (NEG(Q|L) y)) => (ROLB x y)
+
+// rotate by constants
+(ROLQ x (MOV(Q|L)const [c])) => (ROLQconst [int8(c&63)] x)
+(ROLL x (MOV(Q|L)const [c])) => (ROLLconst [int8(c&31)] x)
+(ROLW x (MOV(Q|L)const [c])) => (ROLWconst [int8(c&15)] x)
+(ROLB x (MOV(Q|L)const [c])) => (ROLBconst [int8(c&7) ] x)
+
+(RORQ x (MOV(Q|L)const [c])) => (ROLQconst [int8((-c)&63)] x)
+(RORL x (MOV(Q|L)const [c])) => (ROLLconst [int8((-c)&31)] x)
+(RORW x (MOV(Q|L)const [c])) => (ROLWconst [int8((-c)&15)] x)
+(RORB x (MOV(Q|L)const [c])) => (ROLBconst [int8((-c)&7) ] x)
+
+// Constant shift simplifications
+((SHLQ|SHRQ|SARQ)const x [0]) => x
+((SHLL|SHRL|SARL)const x [0]) => x
+((SHRW|SARW)const x [0]) => x
+((SHRB|SARB)const x [0]) => x
+((ROLQ|ROLL|ROLW|ROLB)const x [0]) => x
+
+// Multi-register shifts
+(ORQ (SH(R|L)Q lo bits) (SH(L|R)Q hi (NEGQ bits))) => (SH(R|L)DQ lo hi bits)
+(ORQ (SH(R|L)XQ lo bits) (SH(L|R)XQ hi (NEGQ bits))) => (SH(R|L)DQ lo hi bits)
+
+// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
+// because the x86 instructions are defined to use all 5 bits of the shift even
+// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
+// (SHRW x (MOVLconst [24])), but just in case.
+
+(CMPQ x (MOVQconst [c])) && is32Bit(c) => (CMPQconst x [int32(c)])
+(CMPQ (MOVQconst [c]) x) && is32Bit(c) => (InvertFlags (CMPQconst x [int32(c)]))
+(CMPL x (MOVLconst [c])) => (CMPLconst x [c])
+(CMPL (MOVLconst [c]) x) => (InvertFlags (CMPLconst x [c]))
+(CMPW x (MOVLconst [c])) => (CMPWconst x [int16(c)])
+(CMPW (MOVLconst [c]) x) => (InvertFlags (CMPWconst x [int16(c)]))
+(CMPB x (MOVLconst [c])) => (CMPBconst x [int8(c)])
+(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+(CMP(Q|L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(Q|L|W|B) y x))
+
+// Using MOVZX instead of AND is cheaper.
+(AND(Q|L)const [ 0xFF] x) => (MOVBQZX x)
+(AND(Q|L)const [0xFFFF] x) => (MOVWQZX x)
+// This rule is currently invalid because 0xFFFFFFFF is not representable by a signed int32.
+// Commenting out for now, because it also can't trigger because of the is32bit guard on the
+// ANDQconst lowering-rule, above, prevents 0xFFFFFFFF from matching (for the same reason)
+// Using an alternate form of this rule segfaults some binaries because of
+// adverse interactions with other passes.
+// (ANDQconst [0xFFFFFFFF] x) => (MOVLQZX x)
+
+// strength reduction
+// Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
+// 1 - addq, shlq, leaq, negq, subq
+// 3 - imulq
+// This limits the rewrites to two instructions.
+// Note that negq always operates in-place,
+// which can require a register-register move
+// to preserve the original value,
+// so it must be used with care.
+(MUL(Q|L)const [-9] x) => (NEG(Q|L) (LEA(Q|L)8 <v.Type> x x))
+(MUL(Q|L)const [-5] x) => (NEG(Q|L) (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [-3] x) => (NEG(Q|L) (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [-1] x) => (NEG(Q|L) x)
+(MUL(Q|L)const [ 0] _) => (MOV(Q|L)const [0])
+(MUL(Q|L)const [ 1] x) => x
+(MUL(Q|L)const [ 3] x) => (LEA(Q|L)2 x x)
+(MUL(Q|L)const [ 5] x) => (LEA(Q|L)4 x x)
+(MUL(Q|L)const [ 7] x) => (LEA(Q|L)2 x (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [ 9] x) => (LEA(Q|L)8 x x)
+(MUL(Q|L)const [11] x) => (LEA(Q|L)2 x (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [13] x) => (LEA(Q|L)4 x (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [19] x) => (LEA(Q|L)2 x (LEA(Q|L)8 <v.Type> x x))
+(MUL(Q|L)const [21] x) => (LEA(Q|L)4 x (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [25] x) => (LEA(Q|L)8 x (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [27] x) => (LEA(Q|L)8 (LEA(Q|L)2 <v.Type> x x) (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [37] x) => (LEA(Q|L)4 x (LEA(Q|L)8 <v.Type> x x))
+(MUL(Q|L)const [41] x) => (LEA(Q|L)8 x (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [45] x) => (LEA(Q|L)8 (LEA(Q|L)4 <v.Type> x x) (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [73] x) => (LEA(Q|L)8 x (LEA(Q|L)8 <v.Type> x x))
+(MUL(Q|L)const [81] x) => (LEA(Q|L)8 (LEA(Q|L)8 <v.Type> x x) (LEA(Q|L)8 <v.Type> x x))
+
+(MUL(Q|L)const [c] x) && isPowerOfTwo64(int64(c)+1) && c >= 15 => (SUB(Q|L) (SHL(Q|L)const <v.Type> [int8(log64(int64(c)+1))] x) x)
+(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-1) && c >= 17 => (LEA(Q|L)1 (SHL(Q|L)const <v.Type> [int8(log32(c-1))] x) x)
+(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-2) && c >= 34 => (LEA(Q|L)2 (SHL(Q|L)const <v.Type> [int8(log32(c-2))] x) x)
+(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-4) && c >= 68 => (LEA(Q|L)4 (SHL(Q|L)const <v.Type> [int8(log32(c-4))] x) x)
+(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-8) && c >= 136 => (LEA(Q|L)8 (SHL(Q|L)const <v.Type> [int8(log32(c-8))] x) x)
+(MUL(Q|L)const [c] x) && c%3 == 0 && isPowerOfTwo32(c/3) => (SHL(Q|L)const [int8(log32(c/3))] (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [c] x) && c%5 == 0 && isPowerOfTwo32(c/5) => (SHL(Q|L)const [int8(log32(c/5))] (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [c] x) && c%9 == 0 && isPowerOfTwo32(c/9) => (SHL(Q|L)const [int8(log32(c/9))] (LEA(Q|L)8 <v.Type> x x))
+
+// combine add/shift into LEAQ/LEAL
+(ADD(L|Q) x (SHL(L|Q)const [3] y)) => (LEA(L|Q)8 x y)
+(ADD(L|Q) x (SHL(L|Q)const [2] y)) => (LEA(L|Q)4 x y)
+(ADD(L|Q) x (SHL(L|Q)const [1] y)) => (LEA(L|Q)2 x y)
+(ADD(L|Q) x (ADD(L|Q) y y)) => (LEA(L|Q)2 x y)
+(ADD(L|Q) x (ADD(L|Q) x y)) => (LEA(L|Q)2 y x)
+
+// combine ADDQ/ADDQconst into LEAQ1/LEAL1
+(ADD(Q|L)const [c] (ADD(Q|L) x y)) => (LEA(Q|L)1 [c] x y)
+(ADD(Q|L) (ADD(Q|L)const [c] x) y) => (LEA(Q|L)1 [c] x y)
+(ADD(Q|L)const [c] (SHL(Q|L)const [1] x)) => (LEA(Q|L)1 [c] x x)
+
+// fold ADDQ/ADDL into LEAQ/LEAL
+(ADD(Q|L)const [c] (LEA(Q|L) [d] {s} x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x)
+(LEA(Q|L) [c] {s} (ADD(Q|L)const [d] x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x)
+(LEA(Q|L) [c] {s} (ADD(Q|L) x y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y)
+(ADD(Q|L) x (LEA(Q|L) [c] {s} y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y)
+
+// fold ADDQconst/ADDLconst into LEAQx/LEALx
+(ADD(Q|L)const [c] (LEA(Q|L)1 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)1 [c+d] {s} x y)
+(ADD(Q|L)const [c] (LEA(Q|L)2 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)2 [c+d] {s} x y)
+(ADD(Q|L)const [c] (LEA(Q|L)4 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)4 [c+d] {s} x y)
+(ADD(Q|L)const [c] (LEA(Q|L)8 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)8 [c+d] {s} x y)
+(LEA(Q|L)1 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)1 [c+d] {s} x y)
+(LEA(Q|L)2 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)2 [c+d] {s} x y)
+(LEA(Q|L)2 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB => (LEA(Q|L)2 [c+2*d] {s} x y)
+(LEA(Q|L)4 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)4 [c+d] {s} x y)
+(LEA(Q|L)4 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB => (LEA(Q|L)4 [c+4*d] {s} x y)
+(LEA(Q|L)8 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)8 [c+d] {s} x y)
+(LEA(Q|L)8 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB => (LEA(Q|L)8 [c+8*d] {s} x y)
+
+// fold shifts into LEAQx/LEALx
+(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)2 [c] {s} x y)
+(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)4 [c] {s} x y)
+(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [3] y)) => (LEA(Q|L)8 [c] {s} x y)
+(LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)4 [c] {s} x y)
+(LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)8 [c] {s} x y)
+(LEA(Q|L)4 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)8 [c] {s} x y)
+
+// reverse ordering of compare instruction
+(SETL (InvertFlags x)) => (SETG x)
+(SETG (InvertFlags x)) => (SETL x)
+(SETB (InvertFlags x)) => (SETA x)
+(SETA (InvertFlags x)) => (SETB x)
+(SETLE (InvertFlags x)) => (SETGE x)
+(SETGE (InvertFlags x)) => (SETLE x)
+(SETBE (InvertFlags x)) => (SETAE x)
+(SETAE (InvertFlags x)) => (SETBE x)
+(SETEQ (InvertFlags x)) => (SETEQ x)
+(SETNE (InvertFlags x)) => (SETNE x)
+
+(SETLstore [off] {sym} ptr (InvertFlags x) mem) => (SETGstore [off] {sym} ptr x mem)
+(SETGstore [off] {sym} ptr (InvertFlags x) mem) => (SETLstore [off] {sym} ptr x mem)
+(SETBstore [off] {sym} ptr (InvertFlags x) mem) => (SETAstore [off] {sym} ptr x mem)
+(SETAstore [off] {sym} ptr (InvertFlags x) mem) => (SETBstore [off] {sym} ptr x mem)
+(SETLEstore [off] {sym} ptr (InvertFlags x) mem) => (SETGEstore [off] {sym} ptr x mem)
+(SETGEstore [off] {sym} ptr (InvertFlags x) mem) => (SETLEstore [off] {sym} ptr x mem)
+(SETBEstore [off] {sym} ptr (InvertFlags x) mem) => (SETAEstore [off] {sym} ptr x mem)
+(SETAEstore [off] {sym} ptr (InvertFlags x) mem) => (SETBEstore [off] {sym} ptr x mem)
+(SETEQstore [off] {sym} ptr (InvertFlags x) mem) => (SETEQstore [off] {sym} ptr x mem)
+(SETNEstore [off] {sym} ptr (InvertFlags x) mem) => (SETNEstore [off] {sym} ptr x mem)
+
+// sign extended loads
+// Note: The combined instruction must end up in the same block
+// as the original load. If not, we end up making a value with
+// memory type live in two different blocks, which can lead to
+// multiple memory values alive simultaneously.
+// Make sure we don't combine these ops if the load has another use.
+// This prevents a single load from being split into multiple loads
+// which then might return different values. See test/atomicload.go.
+(MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+(MOVBQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+(MOVBQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+(MOVBQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+(MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVBQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVBQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVBQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+(MOVWQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+(MOVWQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+(MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+(MOVWQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+(MOVWQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+(MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
+(MOVLQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
+(MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
+(MOVLQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
+
+(MOVLQZX x) && zeroUpper32Bits(x,3) => x
+(MOVWQZX x) && zeroUpper48Bits(x,3) => x
+(MOVBQZX x) && zeroUpper56Bits(x,3) => x
+
+// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQZX x)
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQZX x)
+(MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQZX x)
+(MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQSX x)
+(MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQSX x)
+(MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQSX x)
+
+// Fold extensions and ANDs together.
+(MOVBQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xff] x)
+(MOVWQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xffff] x)
+(MOVLQZX (ANDLconst [c] x)) => (ANDLconst [c] x)
+(MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 => (ANDLconst [c & 0x7f] x)
+(MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 => (ANDLconst [c & 0x7fff] x)
+(MOVLQSX (ANDLconst [c] x)) && uint32(c) & 0x80000000 == 0 => (ANDLconst [c & 0x7fffffff] x)
+
+// Don't extend before storing
+(MOVLstore [off] {sym} ptr (MOVLQSX x) mem) => (MOVLstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWQSX x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBQSX x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVLstore [off] {sym} ptr (MOVLQZX x) mem) => (MOVLstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWQZX x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBQZX x) mem) => (MOVBstore [off] {sym} ptr x mem)
+
+// fold constants into memory operations
+// Note that this is not always a good idea because if not all the uses of
+// the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
+// have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
+// Nevertheless, let's do it!
+(MOV(Q|L|W|B|SS|SD|O)load [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(Q|L|W|B|SS|SD|O)load [off1+off2] {sym} ptr mem)
+(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {sym} ptr val mem)
+(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {sym} base val mem)
+((ADD|SUB|AND|OR|XOR)Qload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {sym} val base mem)
+((ADD|SUB|AND|OR|XOR)Lload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
+(CMP(Q|L|W|B)load [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (CMP(Q|L|W|B)load [off1+off2] {sym} base val mem)
+(CMP(Q|L|W|B)constload [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
+ (CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+
+((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
+((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem)
+((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
+ ((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {sym} base val mem)
+((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem)
+
+// Fold constants into stores.
+(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validVal(c) =>
+ (MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+(MOVLstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
+ (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
+ (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
+ (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
+
+// Fold address offsets into constant stores.
+(MOV(Q|L|W|B|O)storeconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd32(off) =>
+ (MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+
+// We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
+// what variables are being read/written by the ops.
+(MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOV(Q|L|W|B|O)storeconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) =>
+ (MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+((ADD|SUB|AND|OR|XOR)Qload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|AND|OR|XOR)Lload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+(CMP(Q|L|W|B)load [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (CMP(Q|L|W|B)load [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(CMP(Q|L|W|B)constload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
+ (CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+
+((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
+ ((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
+// fold LEAQs together
+(LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
+
+// LEAQ into LEAQ1
+(LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAQ1 into LEAQ
+(LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAQ into LEAQ[248]
+(LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAQ[248] into LEAQ
+(LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAQ[1248] into LEAQ[1248]. Only some such merges are possible.
+(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y)
+(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x)
+(LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil =>
+ (LEAQ4 [off1+2*off2] {sym1} x y)
+(LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil =>
+ (LEAQ8 [off1+4*off2] {sym1} x y)
+// TODO: more?
+
+// Lower LEAQ2/4/8 when the offset is a constant
+(LEAQ2 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*2) =>
+ (LEAQ [off+int32(scale)*2] {sym} x)
+(LEAQ4 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*4) =>
+ (LEAQ [off+int32(scale)*4] {sym} x)
+(LEAQ8 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*8) =>
+ (LEAQ [off+int32(scale)*8] {sym} x)
+
+// Absorb InvertFlags into branches.
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+
+// Constant comparisons.
+(CMPQconst (MOVQconst [x]) [y]) && x==int64(y) => (FlagEQ)
+(CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)<uint64(int64(y)) => (FlagLT_ULT)
+(CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)>uint64(int64(y)) => (FlagLT_UGT)
+(CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)<uint64(int64(y)) => (FlagGT_ULT)
+(CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)>uint64(int64(y)) => (FlagGT_UGT)
+(CMPLconst (MOVLconst [x]) [y]) && x==y => (FlagEQ)
+(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)<uint32(y) => (FlagLT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)>uint32(y) => (FlagLT_UGT)
+(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)<uint32(y) => (FlagGT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)>uint32(y) => (FlagGT_UGT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)==y => (FlagEQ)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)<uint16(y) => (FlagLT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)>uint16(y) => (FlagLT_UGT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)<uint16(y) => (FlagGT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)>uint16(y) => (FlagGT_UGT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)==y => (FlagEQ)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)<uint8(y) => (FlagLT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)>uint8(y) => (FlagLT_UGT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)<uint8(y) => (FlagGT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)>uint8(y) => (FlagGT_UGT)
+
+// CMPQconst requires a 32 bit const, but we can still constant-fold 64 bit consts.
+// In theory this applies to any of the simplifications above,
+// but CMPQ is the only one I've actually seen occur.
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y => (FlagEQ)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)<uint64(y) => (FlagLT_ULT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)>uint64(y) => (FlagLT_UGT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)<uint64(y) => (FlagGT_ULT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) => (FlagGT_UGT)
+
+// Other known comparisons.
+(CMPQconst (MOVBQZX _) [c]) && 0xFF < c => (FlagLT_ULT)
+(CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c => (FlagLT_ULT)
+(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) => (FlagLT_ULT)
+(CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) => (FlagLT_ULT)
+(CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
+(CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
+(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
+(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < n => (FlagLT_ULT)
+(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < n => (FlagLT_ULT)
+
+// TESTQ c c sets flags like CMPQ c 0.
+(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c == 0 => (FlagEQ)
+(TESTLconst [c] (MOVLconst [c])) && c == 0 => (FlagEQ)
+(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c < 0 => (FlagLT_UGT)
+(TESTLconst [c] (MOVLconst [c])) && c < 0 => (FlagLT_UGT)
+(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c > 0 => (FlagGT_UGT)
+(TESTLconst [c] (MOVLconst [c])) && c > 0 => (FlagGT_UGT)
+
+// TODO: DIVxU also.
+
+// Absorb flag constants into SBB ops.
+(SBBQcarrymask (FlagEQ)) => (MOVQconst [0])
+(SBBQcarrymask (FlagLT_ULT)) => (MOVQconst [-1])
+(SBBQcarrymask (FlagLT_UGT)) => (MOVQconst [0])
+(SBBQcarrymask (FlagGT_ULT)) => (MOVQconst [-1])
+(SBBQcarrymask (FlagGT_UGT)) => (MOVQconst [0])
+(SBBLcarrymask (FlagEQ)) => (MOVLconst [0])
+(SBBLcarrymask (FlagLT_ULT)) => (MOVLconst [-1])
+(SBBLcarrymask (FlagLT_UGT)) => (MOVLconst [0])
+(SBBLcarrymask (FlagGT_ULT)) => (MOVLconst [-1])
+(SBBLcarrymask (FlagGT_UGT)) => (MOVLconst [0])
+
+// Absorb flag constants into branches.
+((EQ|LE|GE|ULE|UGE) (FlagEQ) yes no) => (First yes no)
+((NE|LT|GT|ULT|UGT) (FlagEQ) yes no) => (First no yes)
+((NE|LT|LE|ULT|ULE) (FlagLT_ULT) yes no) => (First yes no)
+((EQ|GT|GE|UGT|UGE) (FlagLT_ULT) yes no) => (First no yes)
+((NE|LT|LE|UGT|UGE) (FlagLT_UGT) yes no) => (First yes no)
+((EQ|GT|GE|ULT|ULE) (FlagLT_UGT) yes no) => (First no yes)
+((NE|GT|GE|ULT|ULE) (FlagGT_ULT) yes no) => (First yes no)
+((EQ|LT|LE|UGT|UGE) (FlagGT_ULT) yes no) => (First no yes)
+((NE|GT|GE|UGT|UGE) (FlagGT_UGT) yes no) => (First yes no)
+((EQ|LT|LE|ULT|ULE) (FlagGT_UGT) yes no) => (First no yes)
+
+// Absorb flag constants into SETxx ops.
+((SETEQ|SETLE|SETGE|SETBE|SETAE) (FlagEQ)) => (MOVLconst [1])
+((SETNE|SETL|SETG|SETB|SETA) (FlagEQ)) => (MOVLconst [0])
+((SETNE|SETL|SETLE|SETB|SETBE) (FlagLT_ULT)) => (MOVLconst [1])
+((SETEQ|SETG|SETGE|SETA|SETAE) (FlagLT_ULT)) => (MOVLconst [0])
+((SETNE|SETL|SETLE|SETA|SETAE) (FlagLT_UGT)) => (MOVLconst [1])
+((SETEQ|SETG|SETGE|SETB|SETBE) (FlagLT_UGT)) => (MOVLconst [0])
+((SETNE|SETG|SETGE|SETB|SETBE) (FlagGT_ULT)) => (MOVLconst [1])
+((SETEQ|SETL|SETLE|SETA|SETAE) (FlagGT_ULT)) => (MOVLconst [0])
+((SETNE|SETG|SETGE|SETA|SETAE) (FlagGT_UGT)) => (MOVLconst [1])
+((SETEQ|SETL|SETLE|SETB|SETBE) (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETEQstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+
+(SETNEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+
+(SETLstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETLstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETLstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETLstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETLstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+
+(SETLEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+
+(SETGstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETGstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETGstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETGstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETGstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+
+(SETGEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+
+(SETBstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETBstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETBstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETBstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETBstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+
+(SETBEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+
+(SETAstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETAstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETAstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETAstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETAstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+
+(SETAEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+
+// Remove redundant *const ops
+(ADDQconst [0] x) => x
+(ADDLconst [c] x) && c==0 => x
+(SUBQconst [0] x) => x
+(SUBLconst [c] x) && c==0 => x
+(ANDQconst [0] _) => (MOVQconst [0])
+(ANDLconst [c] _) && c==0 => (MOVLconst [0])
+(ANDQconst [-1] x) => x
+(ANDLconst [c] x) && c==-1 => x
+(ORQconst [0] x) => x
+(ORLconst [c] x) && c==0 => x
+(ORQconst [-1] _) => (MOVQconst [-1])
+(ORLconst [c] _) && c==-1 => (MOVLconst [-1])
+(XORQconst [0] x) => x
+(XORLconst [c] x) && c==0 => x
+// TODO: since we got rid of the W/B versions, we might miss
+// things like (ANDLconst [0x100] x) which were formerly
+// (ANDBconst [0] x). Probably doesn't happen very often.
+// If we cared, we might do:
+// (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
+
+// Remove redundant ops
+// Not in generic rules, because they may appear after lowering e. g. Slicemask
+(NEG(Q|L) (NEG(Q|L) x)) => x
+(NEG(Q|L) s:(SUB(Q|L) x y)) && s.Uses == 1 => (SUB(Q|L) y x)
+
+// Convert constant subtracts to constant adds
+(SUBQconst [c] x) && c != -(1<<31) => (ADDQconst [-c] x)
+(SUBLconst [c] x) => (ADDLconst [-c] x)
+
+// generic constant folding
+// TODO: more of this
+(ADDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)+d])
+(ADDLconst [c] (MOVLconst [d])) => (MOVLconst [c+d])
+(ADDQconst [c] (ADDQconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDQconst [c+d] x)
+(ADDLconst [c] (ADDLconst [d] x)) => (ADDLconst [c+d] x)
+(SUBQconst (MOVQconst [d]) [c]) => (MOVQconst [d-int64(c)])
+(SUBQconst (SUBQconst x [d]) [c]) && is32Bit(int64(-c)-int64(d)) => (ADDQconst [-c-d] x)
+(SARQconst [c] (MOVQconst [d])) => (MOVQconst [d>>uint64(c)])
+(SARLconst [c] (MOVQconst [d])) => (MOVQconst [int64(int32(d))>>uint64(c)])
+(SARWconst [c] (MOVQconst [d])) => (MOVQconst [int64(int16(d))>>uint64(c)])
+(SARBconst [c] (MOVQconst [d])) => (MOVQconst [int64(int8(d))>>uint64(c)])
+(NEGQ (MOVQconst [c])) => (MOVQconst [-c])
+(NEGL (MOVLconst [c])) => (MOVLconst [-c])
+(MULQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)*d])
+(MULLconst [c] (MOVLconst [d])) => (MOVLconst [c*d])
+(ANDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)&d])
+(ANDLconst [c] (MOVLconst [d])) => (MOVLconst [c&d])
+(ORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)|d])
+(ORLconst [c] (MOVLconst [d])) => (MOVLconst [c|d])
+(XORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)^d])
+(XORLconst [c] (MOVLconst [d])) => (MOVLconst [c^d])
+(NOTQ (MOVQconst [c])) => (MOVQconst [^c])
+(NOTL (MOVLconst [c])) => (MOVLconst [^c])
+(BTSQconst [c] (MOVQconst [d])) => (MOVQconst [d|(1<<uint32(c))])
+(BTRQconst [c] (MOVQconst [d])) => (MOVQconst [d&^(1<<uint32(c))])
+(BTCQconst [c] (MOVQconst [d])) => (MOVQconst [d^(1<<uint32(c))])
+
+// If c or d doesn't fit into 32 bits, then we can't construct ORQconst,
+// but we can still constant-fold.
+// In theory this applies to any of the simplifications above,
+// but ORQ is the only one I've actually seen occur.
+(ORQ (MOVQconst [c]) (MOVQconst [d])) => (MOVQconst [c|d])
+
+// generic simplifications
+// TODO: more of this
+(ADDQ x (NEGQ y)) => (SUBQ x y)
+(ADDL x (NEGL y)) => (SUBL x y)
+(SUBQ x x) => (MOVQconst [0])
+(SUBL x x) => (MOVLconst [0])
+(ANDQ x x) => x
+(ANDL x x) => x
+(ORQ x x) => x
+(ORL x x) => x
+(XORQ x x) => (MOVQconst [0])
+(XORL x x) => (MOVLconst [0])
+
+(SHLLconst [d] (MOVLconst [c])) => (MOVLconst [c << uint64(d)])
+(SHLQconst [d] (MOVQconst [c])) => (MOVQconst [c << uint64(d)])
+(SHLQconst [d] (MOVLconst [c])) => (MOVQconst [int64(c) << uint64(d)])
+
+// Fold NEG into ADDconst/MULconst. Take care to keep c in 32 bit range.
+(NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) => (ADDQconst [-c] x)
+(MULQconst [c] (NEGQ x)) && c != -(1<<31) => (MULQconst [-c] x)
+
+// checking AND against 0.
+(CMPQconst a:(ANDQ x y) [0]) && a.Uses == 1 => (TESTQ x y)
+(CMPLconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTL x y)
+(CMPWconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTW x y)
+(CMPBconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTB x y)
+(CMPQconst a:(ANDQconst [c] x) [0]) && a.Uses == 1 => (TESTQconst [c] x)
+(CMPLconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTLconst [c] x)
+(CMPWconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTWconst [int16(c)] x)
+(CMPBconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTBconst [int8(c)] x)
+
+// Convert TESTx to TESTxconst if possible.
+(TESTQ (MOVQconst [c]) x) && is32Bit(c) => (TESTQconst [int32(c)] x)
+(TESTL (MOVLconst [c]) x) => (TESTLconst [c] x)
+(TESTW (MOVLconst [c]) x) => (TESTWconst [int16(c)] x)
+(TESTB (MOVLconst [c]) x) => (TESTBconst [int8(c)] x)
+
+// TEST %reg,%reg is shorter than CMP
+(CMPQconst x [0]) => (TESTQ x x)
+(CMPLconst x [0]) => (TESTL x x)
+(CMPWconst x [0]) => (TESTW x x)
+(CMPBconst x [0]) => (TESTB x x)
+(TESTQconst [-1] x) && x.Op != OpAMD64MOVQconst => (TESTQ x x)
+(TESTLconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTL x x)
+(TESTWconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTW x x)
+(TESTBconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTB x x)
+
+// Convert LEAQ1 back to ADDQ if we can
+(LEAQ1 [0] x y) && v.Aux == nil => (ADDQ x y)
+
+(MOVQstoreconst [c] {s} p1 x:(MOVQstoreconst [a] {s} p0 mem))
+ && config.useSSE
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off()))
+ && a.Val() == 0
+ && c.Val() == 0
+ && setPos(v, x.Pos)
+ && clobber(x)
+ => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
+(MOVQstoreconst [a] {s} p0 x:(MOVQstoreconst [c] {s} p1 mem))
+ && config.useSSE
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off()))
+ && a.Val() == 0
+ && c.Val() == 0
+ && setPos(v, x.Pos)
+ && clobber(x)
+ => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
+
+// Merge load and op
+// TODO: add indexed variants?
+((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem)
+((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Lload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
+(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
+(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((ADD|SUB|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
+(MOVQstore {sym} [off] ptr x:(BT(S|R|C)Qconst [c] l:(MOVQload {sym} [off] ptr mem)) mem) && x.Uses == 1 && l.Uses == 1 && clobber(x, l) =>
+ (BT(S|R|C)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+
+// Merge ADDQconst and LEAQ into atomic loads.
+(MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(Q|L|B)atomicload [off1+off2] {sym} ptr mem)
+(MOV(Q|L|B)atomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOV(Q|L|B)atomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
+
+// Merge ADDQconst and LEAQ into atomic stores.
+(XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (XCHGQ [off1+off2] {sym} val ptr mem)
+(XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
+ (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+(XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (XCHGL [off1+off2] {sym} val ptr mem)
+(XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
+ (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+
+// Merge ADDQconst into atomic adds.
+// TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
+(XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (XADDQlock [off1+off2] {sym} val ptr mem)
+(XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (XADDLlock [off1+off2] {sym} val ptr mem)
+
+// Merge ADDQconst into atomic compare and swaps.
+// TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
+(CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
+(CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
+
+// We don't need the conditional move if we know the arg of BSF is not zero.
+(CMOVQEQ x _ (Select1 (BS(F|R)Q (ORQconst [c] _)))) && c != 0 => x
+// Extension is unnecessary for trailing zeros.
+(BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) => (BSFQ (ORQconst <t> [1<<8] x))
+(BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) => (BSFQ (ORQconst <t> [1<<16] x))
+
+// Redundant sign/zero extensions
+// Note: see issue 21963. We have to make sure we use the right type on
+// the resulting extension (the outer type, not the inner type).
+(MOVLQSX (MOVLQSX x)) => (MOVLQSX x)
+(MOVLQSX (MOVWQSX x)) => (MOVWQSX x)
+(MOVLQSX (MOVBQSX x)) => (MOVBQSX x)
+(MOVWQSX (MOVWQSX x)) => (MOVWQSX x)
+(MOVWQSX (MOVBQSX x)) => (MOVBQSX x)
+(MOVBQSX (MOVBQSX x)) => (MOVBQSX x)
+(MOVLQZX (MOVLQZX x)) => (MOVLQZX x)
+(MOVLQZX (MOVWQZX x)) => (MOVWQZX x)
+(MOVLQZX (MOVBQZX x)) => (MOVBQZX x)
+(MOVWQZX (MOVWQZX x)) => (MOVWQZX x)
+(MOVWQZX (MOVBQZX x)) => (MOVBQZX x)
+(MOVBQZX (MOVBQZX x)) => (MOVBQZX x)
+
+(MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) =>
+ ((ADD|AND|OR|XOR)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+(MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) =>
+ ((ADD|AND|OR|XOR)Lconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+
+// float <-> int register moves, with no conversion.
+// These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}.
+(MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) => (MOVQf2i val)
+(MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) => (MOVLf2i val)
+(MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) => (MOVQi2f val)
+(MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) => (MOVLi2f val)
+
+// Other load-like ops.
+(ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ADDQ x (MOVQf2i y))
+(ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ADDL x (MOVLf2i y))
+(SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (SUBQ x (MOVQf2i y))
+(SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (SUBL x (MOVLf2i y))
+(ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ANDQ x (MOVQf2i y))
+(ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ANDL x (MOVLf2i y))
+( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => ( ORQ x (MOVQf2i y))
+( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => ( ORL x (MOVLf2i y))
+(XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (XORQ x (MOVQf2i y))
+(XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (XORL x (MOVLf2i y))
+
+(ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (ADDSD x (MOVQi2f y))
+(ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (ADDSS x (MOVLi2f y))
+(SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (SUBSD x (MOVQi2f y))
+(SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (SUBSS x (MOVLi2f y))
+(MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (MULSD x (MOVQi2f y))
+(MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (MULSS x (MOVLi2f y))
+
+// Redirect stores to use the other register set.
+(MOVQstore [off] {sym} ptr (MOVQf2i val) mem) => (MOVSDstore [off] {sym} ptr val mem)
+(MOVLstore [off] {sym} ptr (MOVLf2i val) mem) => (MOVSSstore [off] {sym} ptr val mem)
+(MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) => (MOVQstore [off] {sym} ptr val mem)
+(MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) => (MOVLstore [off] {sym} ptr val mem)
+
+// Load args directly into the register class where it will be used.
+// We do this by just modifying the type of the Arg.
+(MOVQf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
+(MOVLf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
+(MOVQi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
+(MOVLi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
+
+// LEAQ is rematerializeable, so this helps to avoid register spill.
+// See issue 22947 for details
+(ADD(Q|L)const [off] x:(SP)) => (LEA(Q|L) [off] x)
+
+// HMULx is commutative, but its first argument must go in AX.
+// If possible, put a rematerializeable value in the first argument slot,
+// to reduce the odds that another value will be have to spilled
+// specifically to free up AX.
+(HMUL(Q|L) x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L) y x)
+(HMUL(Q|L)U x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L)U y x)
+
+// Fold loads into compares
+// Note: these may be undone by the flagalloc pass.
+(CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (CMP(Q|L|W|B)load {sym} [off] ptr x mem)
+(CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem))
+
+(CMP(Q|L)const l:(MOV(Q|L)load {sym} [off] ptr mem) [c])
+ && l.Uses == 1
+ && clobber(l) =>
+@l.Block (CMP(Q|L)constload {sym} [makeValAndOff(c,off)] ptr mem)
+(CMP(W|B)const l:(MOV(W|B)load {sym} [off] ptr mem) [c])
+ && l.Uses == 1
+ && clobber(l) =>
+@l.Block (CMP(W|B)constload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+
+(CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validVal(c) => (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
+(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
+(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
+
+(TEST(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2)
+ && l == l2
+ && l.Uses == 2
+ && clobber(l) =>
+ @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0, off)] ptr mem)
+
+// Convert ANDload to MOVload when we can do the AND in a containing TEST op.
+// Only do when it's within the same block, so we don't have flags live across basic block boundaries.
+// See issue 44228.
+(TEST(Q|L) a:(AND(Q|L)load [off] {sym} x ptr mem) a) && a.Uses == 2 && a.Block == v.Block && clobber(a) => (TEST(Q|L) (MOV(Q|L)load <a.Type> [off] {sym} ptr mem) x)
+
+(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVQload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) && symIsRO(srcSym) =>
+ (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))])
+ (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
+
+// Arch-specific inlining for small or disjoint runtime.memmove
+// Match post-lowering calls, memory version.
+(SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem)))))
+ && sc.Val64() >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
+ && isInlinableMemmove(dst, src, sc.Val64(), config)
+ && clobber(s1, s2, s3, call)
+ => (Move [sc.Val64()] dst src mem)
+
+// Match post-lowering calls, register version.
+(SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem))
+ && sz >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && call.Uses == 1
+ && isInlinableMemmove(dst, src, sz, config)
+ && clobber(call)
+ => (Move [sz] dst src mem)
+
+// Prefetch instructions
+(PrefetchCache ...) => (PrefetchT0 ...)
+(PrefetchCacheStreamed ...) => (PrefetchNTA ...)
+
+// CPUID feature: BMI1.
+(AND(Q|L) x (NOT(Q|L) y)) && buildcfg.GOAMD64 >= 3 => (ANDN(Q|L) x y)
+(AND(Q|L) x (NEG(Q|L) x)) && buildcfg.GOAMD64 >= 3 => (BLSI(Q|L) x)
+(XOR(Q|L) x (ADD(Q|L)const [-1] x)) && buildcfg.GOAMD64 >= 3 => (BLSMSK(Q|L) x)
+(AND(Q|L) <t> x (ADD(Q|L)const [-1] x)) && buildcfg.GOAMD64 >= 3 => (Select0 <t> (BLSR(Q|L) x))
+// eliminate TEST instruction in classical "isPowerOfTwo" check
+(SETEQ (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (SETEQ (Select1 <types.TypeFlags> blsr))
+(CMOVQEQ x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (CMOVQEQ x y (Select1 <types.TypeFlags> blsr))
+(CMOVLEQ x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (CMOVLEQ x y (Select1 <types.TypeFlags> blsr))
+(EQ (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s) yes no) => (EQ (Select1 <types.TypeFlags> blsr) yes no)
+(SETNE (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (SETNE (Select1 <types.TypeFlags> blsr))
+(CMOVQNE x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (CMOVQNE x y (Select1 <types.TypeFlags> blsr))
+(CMOVLNE x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (CMOVLNE x y (Select1 <types.TypeFlags> blsr))
+(NE (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s) yes no) => (NE (Select1 <types.TypeFlags> blsr) yes no)
+
+(BSWAP(Q|L) (BSWAP(Q|L) p)) => p
+
+// CPUID feature: MOVBE.
+(MOV(Q|L)store [i] {s} p x:(BSWAP(Q|L) w) mem) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBE(Q|L)store [i] {s} p w mem)
+(MOVBE(Q|L)store [i] {s} p x:(BSWAP(Q|L) w) mem) && x.Uses == 1 => (MOV(Q|L)store [i] {s} p w mem)
+(BSWAP(Q|L) x:(MOV(Q|L)load [i] {s} p mem)) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => @x.Block (MOVBE(Q|L)load [i] {s} p mem)
+(BSWAP(Q|L) x:(MOVBE(Q|L)load [i] {s} p mem)) && x.Uses == 1 => @x.Block (MOV(Q|L)load [i] {s} p mem)
+(MOVWstore [i] {s} p x:(ROLWconst [8] w) mem) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBEWstore [i] {s} p w mem)
+(MOVBEWstore [i] {s} p x:(ROLWconst [8] w) mem) && x.Uses == 1 => (MOVWstore [i] {s} p w mem)
+
+(SAR(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SARX(Q|L)load [off] {sym} ptr x mem)
+(SHL(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SHLX(Q|L)load [off] {sym} ptr x mem)
+(SHR(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SHRX(Q|L)load [off] {sym} ptr x mem)
+
+((SHL|SHR|SAR)XQload [off] {sym} ptr (MOVQconst [c]) mem) => ((SHL|SHR|SAR)Qconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+((SHL|SHR|SAR)XQload [off] {sym} ptr (MOVLconst [c]) mem) => ((SHL|SHR|SAR)Qconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+((SHL|SHR|SAR)XLload [off] {sym} ptr (MOVLconst [c]) mem) => ((SHL|SHR|SAR)Lconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
new file mode 100644
index 0000000..6061719
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go
@@ -0,0 +1,1167 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - Floating-point types live in the low natural slot of an sse2 register.
+// Unused portions are junk.
+// - We do not use AH,BH,CH,DH registers.
+// - When doing sub-register operations, we try to write the whole
+// destination register to avoid a partial-register write.
+// - Unused portions of AuxInt (or the Val portion of ValAndOff) are
+// filled by sign-extending the used portion. Users of AuxInt which interpret
+// AuxInt as unsigned (e.g. shifts) must be careful.
+// - All SymOff opcodes require their offset to fit in an int32.
+
+// Suffixes encode the bit width of various instructions.
+// Q (quad word) = 64 bit
+// L (long word) = 32 bit
+// W (word) = 16 bit
+// B (byte) = 8 bit
+// D (double) = 64 bit float
+// S (single) = 32 bit float
+
+// copied from ../../amd64/reg.go
+var regNamesAMD64 = []string{
+ "AX",
+ "CX",
+ "DX",
+ "BX",
+ "SP",
+ "BP",
+ "SI",
+ "DI",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "g", // a.k.a. R14
+ "R15",
+ "X0",
+ "X1",
+ "X2",
+ "X3",
+ "X4",
+ "X5",
+ "X6",
+ "X7",
+ "X8",
+ "X9",
+ "X10",
+ "X11",
+ "X12",
+ "X13",
+ "X14",
+ "X15", // constant 0 in ABIInternal
+
+ // If you add registers, update asyncPreempt in runtime
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesAMD64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesAMD64 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ ax = buildReg("AX")
+ cx = buildReg("CX")
+ dx = buildReg("DX")
+ bx = buildReg("BX")
+ gp = buildReg("AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15")
+ g = buildReg("g")
+ fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14")
+ x15 = buildReg("X15")
+ gpsp = gp | buildReg("SP")
+ gpspsb = gpsp | buildReg("SB")
+ gpspsbg = gpspsb | g
+ callerSave = gp | fp | g // runtime.setg (and anything calling it) may clobber g
+ )
+ // Common slices of register masks
+ var (
+ gponly = []regMask{gp}
+ fponly = []regMask{fp}
+ )
+
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: gponly}
+ gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly}
+ gp11sb = regInfo{inputs: []regMask{gpspsbg}, outputs: gponly}
+ gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly}
+ gp21sb = regInfo{inputs: []regMask{gpspsbg, gpsp}, outputs: gponly}
+ gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}}
+ gp31shift = regInfo{inputs: []regMask{gp, gp, cx}, outputs: []regMask{gp}}
+ gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax, dx}}
+ gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax}
+ gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}}
+ gp2flags1flags = regInfo{inputs: []regMask{gp, gp, 0}, outputs: []regMask{gp, 0}}
+
+ gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}}
+ gp1flags = regInfo{inputs: []regMask{gpsp}}
+ gp0flagsLoad = regInfo{inputs: []regMask{gpspsbg, 0}}
+ gp1flagsLoad = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}}
+ gp2flagsLoad = regInfo{inputs: []regMask{gpspsbg, gpsp, gpsp, 0}}
+ flagsgp = regInfo{inputs: nil, outputs: gponly}
+
+ gp11flags = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}}
+ gp1flags1flags = regInfo{inputs: []regMask{gp, 0}, outputs: []regMask{gp, 0}}
+
+ readflags = regInfo{inputs: nil, outputs: gponly}
+
+ gpload = regInfo{inputs: []regMask{gpspsbg, 0}, outputs: gponly}
+ gp21load = regInfo{inputs: []regMask{gp, gpspsbg, 0}, outputs: gponly}
+ gploadidx = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}, outputs: gponly}
+ gp21loadidx = regInfo{inputs: []regMask{gp, gpspsbg, gpsp, 0}, outputs: gponly}
+ gp21shxload = regInfo{inputs: []regMask{gpspsbg, gp, 0}, outputs: gponly}
+ gp21shxloadidx = regInfo{inputs: []regMask{gpspsbg, gpsp, gp, 0}, outputs: gponly}
+
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}}
+ gpstoreconst = regInfo{inputs: []regMask{gpspsbg, 0}}
+ gpstoreidx = regInfo{inputs: []regMask{gpspsbg, gpsp, gpsp, 0}}
+ gpstoreconstidx = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}}
+ gpstorexchg = regInfo{inputs: []regMask{gp, gpspsbg, 0}, outputs: []regMask{gp}}
+ cmpxchg = regInfo{inputs: []regMask{gp, ax, gp, 0}, outputs: []regMask{gp, 0}, clobbers: ax}
+
+ fp01 = regInfo{inputs: nil, outputs: fponly}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
+ fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly}
+ fp21load = regInfo{inputs: []regMask{fp, gpspsbg, 0}, outputs: fponly}
+ fp21loadidx = regInfo{inputs: []regMask{fp, gpspsbg, gpspsb, 0}, outputs: fponly}
+ fpgp = regInfo{inputs: fponly, outputs: gponly}
+ gpfp = regInfo{inputs: gponly, outputs: fponly}
+ fp11 = regInfo{inputs: fponly, outputs: fponly}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+
+ fpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: fponly}
+ fploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: fponly}
+
+ fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}}
+ fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}}
+
+ prefreg = regInfo{inputs: []regMask{gpspsbg}}
+ )
+
+ var AMD64ops = []opData{
+ // {ADD,SUB,MUL,DIV}Sx: floating-point arithmetic
+ // x==S for float32, x==D for float64
+ // computes arg0 OP arg1
+ {name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS", commutative: true, resultInArg0: true},
+ {name: "ADDSD", argLength: 2, reg: fp21, asm: "ADDSD", commutative: true, resultInArg0: true},
+ {name: "SUBSS", argLength: 2, reg: fp21, asm: "SUBSS", resultInArg0: true},
+ {name: "SUBSD", argLength: 2, reg: fp21, asm: "SUBSD", resultInArg0: true},
+ {name: "MULSS", argLength: 2, reg: fp21, asm: "MULSS", commutative: true, resultInArg0: true},
+ {name: "MULSD", argLength: 2, reg: fp21, asm: "MULSD", commutative: true, resultInArg0: true},
+ {name: "DIVSS", argLength: 2, reg: fp21, asm: "DIVSS", resultInArg0: true},
+ {name: "DIVSD", argLength: 2, reg: fp21, asm: "DIVSD", resultInArg0: true},
+
+ // MOVSxload: floating-point loads
+ // x==S for float32, x==D for float64
+ // load from arg0+auxint+aux, arg1 = mem
+ {name: "MOVSSload", argLength: 2, reg: fpload, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVSDload", argLength: 2, reg: fpload, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+
+ // MOVSxconst: floatint-point constants
+ // x==S for float32, x==D for float64
+ {name: "MOVSSconst", reg: fp01, asm: "MOVSS", aux: "Float32", rematerializeable: true},
+ {name: "MOVSDconst", reg: fp01, asm: "MOVSD", aux: "Float64", rematerializeable: true},
+
+ // MOVSxloadidx: floating-point indexed loads
+ // x==S for float32, x==D for float64
+ // load from arg0 + scale*arg1+auxint+aux, arg2 = mem
+ {name: "MOVSSloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSS", scale: 1, aux: "SymOff", symEffect: "Read"},
+ {name: "MOVSSloadidx4", argLength: 3, reg: fploadidx, asm: "MOVSS", scale: 4, aux: "SymOff", symEffect: "Read"},
+ {name: "MOVSDloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSD", scale: 1, aux: "SymOff", symEffect: "Read"},
+ {name: "MOVSDloadidx8", argLength: 3, reg: fploadidx, asm: "MOVSD", scale: 8, aux: "SymOff", symEffect: "Read"},
+
+ // MOVSxstore: floating-point stores
+ // x==S for float32, x==D for float64
+ // does *(arg0+auxint+aux) = arg1, arg2 = mem
+ {name: "MOVSSstore", argLength: 3, reg: fpstore, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"},
+ {name: "MOVSDstore", argLength: 3, reg: fpstore, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"},
+
+ // MOVSxstoreidx: floating-point indexed stores
+ // x==S for float32, x==D for float64
+ // does *(arg0+scale*arg1+auxint+aux) = arg2, arg3 = mem
+ {name: "MOVSSstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSS", scale: 1, aux: "SymOff", symEffect: "Write"},
+ {name: "MOVSSstoreidx4", argLength: 4, reg: fpstoreidx, asm: "MOVSS", scale: 4, aux: "SymOff", symEffect: "Write"},
+ {name: "MOVSDstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSD", scale: 1, aux: "SymOff", symEffect: "Write"},
+ {name: "MOVSDstoreidx8", argLength: 4, reg: fpstoreidx, asm: "MOVSD", scale: 8, aux: "SymOff", symEffect: "Write"},
+
+ // {ADD,SUB,MUL,DIV}Sxload: floating-point load / op combo
+ // x==S for float32, x==D for float64
+ // computes arg0 OP *(arg1+auxint+aux), arg2=mem
+ {name: "ADDSSload", argLength: 3, reg: fp21load, asm: "ADDSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"},
+ {name: "ADDSDload", argLength: 3, reg: fp21load, asm: "ADDSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"},
+ {name: "SUBSSload", argLength: 3, reg: fp21load, asm: "SUBSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"},
+ {name: "SUBSDload", argLength: 3, reg: fp21load, asm: "SUBSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"},
+ {name: "MULSSload", argLength: 3, reg: fp21load, asm: "MULSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"},
+ {name: "MULSDload", argLength: 3, reg: fp21load, asm: "MULSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"},
+ {name: "DIVSSload", argLength: 3, reg: fp21load, asm: "DIVSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"},
+ {name: "DIVSDload", argLength: 3, reg: fp21load, asm: "DIVSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"},
+
+ // {ADD,SUB,MUL,DIV}Sxloadidx: floating-point indexed load / op combo
+ // x==S for float32, x==D for float64
+ // computes arg0 OP *(arg1+scale*arg2+auxint+aux), arg3=mem
+ {name: "ADDSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "ADDSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"},
+ {name: "ADDSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "ADDSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"},
+ {name: "ADDSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "ADDSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"},
+ {name: "ADDSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "ADDSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"},
+ {name: "SUBSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "SUBSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"},
+ {name: "SUBSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "SUBSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"},
+ {name: "SUBSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "SUBSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"},
+ {name: "SUBSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "SUBSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"},
+ {name: "MULSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "MULSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"},
+ {name: "MULSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "MULSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"},
+ {name: "MULSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "MULSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"},
+ {name: "MULSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "MULSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"},
+ {name: "DIVSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "DIVSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"},
+ {name: "DIVSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "DIVSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"},
+ {name: "DIVSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "DIVSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"},
+ {name: "DIVSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "DIVSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"},
+
+ // {ADD,SUB,MUL,DIV,AND,OR,XOR}x: binary integer ops
+ // unadorned versions compute arg0 OP arg1
+ // const versions compute arg0 OP auxint (auxint is a sign-extended 32-bit value)
+ // constmodify versions compute *(arg0+ValAndOff(AuxInt).Off().aux) OP= ValAndOff(AuxInt).Val(), arg1 = mem
+ // x==L operations zero the upper 4 bytes of the destination register (not meaningful for constmodify versions).
+ {name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true, clobberFlags: true},
+ {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true},
+ {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int32", typ: "UInt64", clobberFlags: true},
+ {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", clobberFlags: true},
+ {name: "ADDQconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+ {name: "ADDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+
+ {name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true, clobberFlags: true},
+ {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true},
+ {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int32", resultInArg0: true, clobberFlags: true},
+ {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true},
+
+ {name: "MULQ", argLength: 2, reg: gp21, asm: "IMULQ", commutative: true, resultInArg0: true, clobberFlags: true},
+ {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true},
+ {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMUL3Q", aux: "Int32", clobberFlags: true},
+ {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMUL3L", aux: "Int32", clobberFlags: true},
+
+ // Let x = arg0*arg1 (full 32x32->64 unsigned multiply). Returns uint32(x), and flags set to overflow if uint32(x) != x.
+ {name: "MULLU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt32,Flags)", asm: "MULL", commutative: true, clobberFlags: true},
+ // Let x = arg0*arg1 (full 64x64->128 unsigned multiply). Returns uint64(x), and flags set to overflow if uint64(x) != x.
+ {name: "MULQU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt64,Flags)", asm: "MULQ", commutative: true, clobberFlags: true},
+
+ // HMULx[U]: computes the high bits of an integer multiply.
+ // computes arg0 * arg1 >> (x==L?32:64)
+ // The multiply is unsigned for the U versions, signed for the non-U versions.
+ // HMULx[U] are intentionally not marked as commutative, even though they are.
+ // This is because they have asymmetric register requirements.
+ // There are rewrite rules to try to place arguments in preferable slots.
+ {name: "HMULQ", argLength: 2, reg: gp21hmul, asm: "IMULQ", clobberFlags: true},
+ {name: "HMULL", argLength: 2, reg: gp21hmul, asm: "IMULL", clobberFlags: true},
+ {name: "HMULQU", argLength: 2, reg: gp21hmul, asm: "MULQ", clobberFlags: true},
+ {name: "HMULLU", argLength: 2, reg: gp21hmul, asm: "MULL", clobberFlags: true},
+
+ // (arg0 + arg1) / 2 as unsigned, all 64 result bits
+ {name: "AVGQU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true},
+
+ // DIVx[U] computes [arg0 / arg1, arg0 % arg1]
+ // For signed versions, AuxInt non-zero means that the divisor has been proved to be not -1.
+ {name: "DIVQ", argLength: 2, reg: gp11div, typ: "(Int64,Int64)", asm: "IDIVQ", aux: "Bool", clobberFlags: true},
+ {name: "DIVL", argLength: 2, reg: gp11div, typ: "(Int32,Int32)", asm: "IDIVL", aux: "Bool", clobberFlags: true},
+ {name: "DIVW", argLength: 2, reg: gp11div, typ: "(Int16,Int16)", asm: "IDIVW", aux: "Bool", clobberFlags: true},
+ {name: "DIVQU", argLength: 2, reg: gp11div, typ: "(UInt64,UInt64)", asm: "DIVQ", clobberFlags: true},
+ {name: "DIVLU", argLength: 2, reg: gp11div, typ: "(UInt32,UInt32)", asm: "DIVL", clobberFlags: true},
+ {name: "DIVWU", argLength: 2, reg: gp11div, typ: "(UInt16,UInt16)", asm: "DIVW", clobberFlags: true},
+
+ // computes -arg0, flags set for 0-arg0.
+ {name: "NEGLflags", argLength: 1, reg: gp11flags, typ: "(UInt32,Flags)", asm: "NEGL", resultInArg0: true},
+
+ // The following 4 add opcodes return the low 64 bits of the sum in the first result and
+ // the carry (the 65th bit) in the carry flag.
+ {name: "ADDQcarry", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "ADDQ", commutative: true, resultInArg0: true}, // r = arg0+arg1
+ {name: "ADCQ", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "ADCQ", commutative: true, resultInArg0: true}, // r = arg0+arg1+carry(arg2)
+ {name: "ADDQconstcarry", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "ADDQ", aux: "Int32", resultInArg0: true}, // r = arg0+auxint
+ {name: "ADCQconst", argLength: 2, reg: gp1flags1flags, typ: "(UInt64,Flags)", asm: "ADCQ", aux: "Int32", resultInArg0: true}, // r = arg0+auxint+carry(arg1)
+
+ // The following 4 add opcodes return the low 64 bits of the difference in the first result and
+ // the borrow (if the result is negative) in the carry flag.
+ {name: "SUBQborrow", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "SUBQ", resultInArg0: true}, // r = arg0-arg1
+ {name: "SBBQ", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "SBBQ", resultInArg0: true}, // r = arg0-(arg1+carry(arg2))
+ {name: "SUBQconstborrow", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "SUBQ", aux: "Int32", resultInArg0: true}, // r = arg0-auxint
+ {name: "SBBQconst", argLength: 2, reg: gp1flags1flags, typ: "(UInt64,Flags)", asm: "SBBQ", aux: "Int32", resultInArg0: true}, // r = arg0-(auxint+carry(arg1))
+
+ {name: "MULQU2", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}, commutative: true, asm: "MULQ", clobberFlags: true}, // arg0 * arg1, returns (hi, lo)
+ {name: "DIVQU2", argLength: 3, reg: regInfo{inputs: []regMask{dx, ax, gpsp}, outputs: []regMask{ax, dx}}, asm: "DIVQ", clobberFlags: true}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r)
+
+ {name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDQconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ANDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ {name: "ORQ", argLength: 2, reg: gp21, asm: "ORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORQconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ {name: "XORQ", argLength: 2, reg: gp21, asm: "XORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORQconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "XORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ // CMPx: compare arg0 to arg1.
+ {name: "CMPQ", argLength: 2, reg: gp2flags, asm: "CMPQ", typ: "Flags"},
+ {name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"},
+ {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"},
+ {name: "CMPB", argLength: 2, reg: gp2flags, asm: "CMPB", typ: "Flags"},
+
+ // CMPxconst: compare arg0 to auxint.
+ {name: "CMPQconst", argLength: 1, reg: gp1flags, asm: "CMPQ", typ: "Flags", aux: "Int32"},
+ {name: "CMPLconst", argLength: 1, reg: gp1flags, asm: "CMPL", typ: "Flags", aux: "Int32"},
+ {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int16"},
+ {name: "CMPBconst", argLength: 1, reg: gp1flags, asm: "CMPB", typ: "Flags", aux: "Int8"},
+
+ // CMPxload: compare *(arg0+auxint+aux) to arg1 (in that order). arg2=mem.
+ {name: "CMPQload", argLength: 3, reg: gp1flagsLoad, asm: "CMPQ", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPLload", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPWload", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPBload", argLength: 3, reg: gp1flagsLoad, asm: "CMPB", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+
+ // CMPxconstload: compare *(arg0+ValAndOff(AuxInt).Off()+aux) to ValAndOff(AuxInt).Val() (in that order). arg1=mem.
+ {name: "CMPQconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPQ", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPLconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPL", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPWconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPW", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPBconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPB", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+
+ // CMPxloadidx: compare *(arg0+N*arg1+auxint+aux) to arg2 (in that order). arg3=mem.
+ {name: "CMPQloadidx8", argLength: 4, reg: gp2flagsLoad, asm: "CMPQ", scale: 8, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPQloadidx1", argLength: 4, reg: gp2flagsLoad, asm: "CMPQ", scale: 1, commutative: true, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPLloadidx4", argLength: 4, reg: gp2flagsLoad, asm: "CMPL", scale: 4, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPLloadidx1", argLength: 4, reg: gp2flagsLoad, asm: "CMPL", scale: 1, commutative: true, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPWloadidx2", argLength: 4, reg: gp2flagsLoad, asm: "CMPW", scale: 2, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPWloadidx1", argLength: 4, reg: gp2flagsLoad, asm: "CMPW", scale: 1, commutative: true, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPBloadidx1", argLength: 4, reg: gp2flagsLoad, asm: "CMPB", scale: 1, commutative: true, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+
+ // CMPxconstloadidx: compare *(arg0+N*arg1+ValAndOff(AuxInt).Off()+aux) to ValAndOff(AuxInt).Val() (in that order). arg2=mem.
+ {name: "CMPQconstloadidx8", argLength: 3, reg: gp1flagsLoad, asm: "CMPQ", scale: 8, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPQconstloadidx1", argLength: 3, reg: gp1flagsLoad, asm: "CMPQ", scale: 1, commutative: true, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPLconstloadidx4", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", scale: 4, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPLconstloadidx1", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", scale: 1, commutative: true, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPWconstloadidx2", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", scale: 2, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPWconstloadidx1", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", scale: 1, commutative: true, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPBconstloadidx1", argLength: 3, reg: gp1flagsLoad, asm: "CMPB", scale: 1, commutative: true, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+
+ // UCOMISx: floating-point compare arg0 to arg1
+ // x==S for float32, x==D for float64
+ {name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags"},
+ {name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags"},
+
+ // bit test/set/clear operations
+ {name: "BTL", argLength: 2, reg: gp2flags, asm: "BTL", typ: "Flags"}, // test whether bit arg0%32 in arg1 is set
+ {name: "BTQ", argLength: 2, reg: gp2flags, asm: "BTQ", typ: "Flags"}, // test whether bit arg0%64 in arg1 is set
+ {name: "BTCL", argLength: 2, reg: gp21, asm: "BTCL", resultInArg0: true, clobberFlags: true}, // complement bit arg1%32 in arg0
+ {name: "BTCQ", argLength: 2, reg: gp21, asm: "BTCQ", resultInArg0: true, clobberFlags: true}, // complement bit arg1%64 in arg0
+ {name: "BTRL", argLength: 2, reg: gp21, asm: "BTRL", resultInArg0: true, clobberFlags: true}, // reset bit arg1%32 in arg0
+ {name: "BTRQ", argLength: 2, reg: gp21, asm: "BTRQ", resultInArg0: true, clobberFlags: true}, // reset bit arg1%64 in arg0
+ {name: "BTSL", argLength: 2, reg: gp21, asm: "BTSL", resultInArg0: true, clobberFlags: true}, // set bit arg1%32 in arg0
+ {name: "BTSQ", argLength: 2, reg: gp21, asm: "BTSQ", resultInArg0: true, clobberFlags: true}, // set bit arg1%64 in arg0
+ {name: "BTLconst", argLength: 1, reg: gp1flags, asm: "BTL", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 32
+ {name: "BTQconst", argLength: 1, reg: gp1flags, asm: "BTQ", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 64
+ {name: "BTCQconst", argLength: 1, reg: gp11, asm: "BTCQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // complement bit auxint in arg0, 31 <= auxint < 64
+ {name: "BTRQconst", argLength: 1, reg: gp11, asm: "BTRQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // reset bit auxint in arg0, 31 <= auxint < 64
+ {name: "BTSQconst", argLength: 1, reg: gp11, asm: "BTSQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 31 <= auxint < 64
+
+ // BT[SRC]Qconstmodify
+ //
+ // S: set bit
+ // R: reset (clear) bit
+ // C: complement bit
+ //
+ // Apply operation to bit ValAndOff(AuxInt).Val() in the 64 bits at
+ // memory address arg0+ValAndOff(AuxInt).Off()+aux
+ // Bit index must be in range (31-63).
+ // (We use OR/AND/XOR for thinner targets and lower bit indexes.)
+ // arg1=mem, returns mem
+ //
+ // Note that there aren't non-const versions of these instructions.
+ // Well, there are such instructions, but they are slow and weird so we don't use them.
+ {name: "BTSQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTSQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+ {name: "BTRQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTRQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+ {name: "BTCQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTCQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+
+ // TESTx: compare (arg0 & arg1) to 0
+ {name: "TESTQ", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTQ", typ: "Flags"},
+ {name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"},
+ {name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"},
+ {name: "TESTB", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTB", typ: "Flags"},
+
+ // TESTxconst: compare (arg0 & auxint) to 0
+ {name: "TESTQconst", argLength: 1, reg: gp1flags, asm: "TESTQ", typ: "Flags", aux: "Int32"},
+ {name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"},
+ {name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"},
+ {name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"},
+
+ // S{HL, HR, AR}x: shift operations
+ // SHL: shift left
+ // SHR: shift right logical (0s are shifted in from beyond the word size)
+ // SAR: shift right arithmetic (sign bit is shifted in from beyond the word size)
+ // arg0 is the value being shifted
+ // arg1 is the amount to shift, interpreted mod (Q=64,L=32,W=32,B=32)
+ // (Note: x86 is weird, the 16 and 8 byte shifts still use all 5 bits of shift amount!)
+ // For *const versions, use auxint instead of arg1 as the shift amount. auxint must be in the range 0 to (Q=63,L=31,W=15,B=7) inclusive.
+ {name: "SHLQ", argLength: 2, reg: gp21shift, asm: "SHLQ", resultInArg0: true, clobberFlags: true},
+ {name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true, clobberFlags: true},
+ {name: "SHLQconst", argLength: 1, reg: gp11, asm: "SHLQ", aux: "Int8", resultInArg0: true, clobberFlags: true},
+ {name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int8", resultInArg0: true, clobberFlags: true},
+
+ {name: "SHRQ", argLength: 2, reg: gp21shift, asm: "SHRQ", resultInArg0: true, clobberFlags: true},
+ {name: "SHRL", argLength: 2, reg: gp21shift, asm: "SHRL", resultInArg0: true, clobberFlags: true},
+ {name: "SHRW", argLength: 2, reg: gp21shift, asm: "SHRW", resultInArg0: true, clobberFlags: true},
+ {name: "SHRB", argLength: 2, reg: gp21shift, asm: "SHRB", resultInArg0: true, clobberFlags: true},
+ {name: "SHRQconst", argLength: 1, reg: gp11, asm: "SHRQ", aux: "Int8", resultInArg0: true, clobberFlags: true},
+ {name: "SHRLconst", argLength: 1, reg: gp11, asm: "SHRL", aux: "Int8", resultInArg0: true, clobberFlags: true},
+ {name: "SHRWconst", argLength: 1, reg: gp11, asm: "SHRW", aux: "Int8", resultInArg0: true, clobberFlags: true},
+ {name: "SHRBconst", argLength: 1, reg: gp11, asm: "SHRB", aux: "Int8", resultInArg0: true, clobberFlags: true},
+
+ {name: "SARQ", argLength: 2, reg: gp21shift, asm: "SARQ", resultInArg0: true, clobberFlags: true},
+ {name: "SARL", argLength: 2, reg: gp21shift, asm: "SARL", resultInArg0: true, clobberFlags: true},
+ {name: "SARW", argLength: 2, reg: gp21shift, asm: "SARW", resultInArg0: true, clobberFlags: true},
+ {name: "SARB", argLength: 2, reg: gp21shift, asm: "SARB", resultInArg0: true, clobberFlags: true},
+ {name: "SARQconst", argLength: 1, reg: gp11, asm: "SARQ", aux: "Int8", resultInArg0: true, clobberFlags: true},
+ {name: "SARLconst", argLength: 1, reg: gp11, asm: "SARL", aux: "Int8", resultInArg0: true, clobberFlags: true},
+ {name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int8", resultInArg0: true, clobberFlags: true},
+ {name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true},
+
+ // unsigned arg0 >> arg2, shifting in bits from arg1 (==(arg1<<64+arg0)>>arg2, keeping low 64 bits), shift amount is mod 64
+ {name: "SHRDQ", argLength: 3, reg: gp31shift, asm: "SHRQ", resultInArg0: true, clobberFlags: true},
+ // unsigned arg0 << arg2, shifting in bits from arg1 (==(arg0<<64+arg1)<<arg2, keeping high 64 bits), shift amount is mod 64
+ {name: "SHLDQ", argLength: 3, reg: gp31shift, asm: "SHLQ", resultInArg0: true, clobberFlags: true},
+
+ // RO{L,R}x: rotate instructions
+ // computes arg0 rotate (L=left,R=right) arg1 bits.
+ // Bits are rotated within the low (Q=64,L=32,W=16,B=8) bits of the register.
+ // For *const versions use auxint instead of arg1 as the rotate amount. auxint must be in the range 0 to (Q=63,L=31,W=15,B=7) inclusive.
+ // x==L versions zero the upper 32 bits of the destination register.
+ // x==W and x==B versions leave the upper bits unspecified.
+ {name: "ROLQ", argLength: 2, reg: gp21shift, asm: "ROLQ", resultInArg0: true, clobberFlags: true},
+ {name: "ROLL", argLength: 2, reg: gp21shift, asm: "ROLL", resultInArg0: true, clobberFlags: true},
+ {name: "ROLW", argLength: 2, reg: gp21shift, asm: "ROLW", resultInArg0: true, clobberFlags: true},
+ {name: "ROLB", argLength: 2, reg: gp21shift, asm: "ROLB", resultInArg0: true, clobberFlags: true},
+ {name: "RORQ", argLength: 2, reg: gp21shift, asm: "RORQ", resultInArg0: true, clobberFlags: true},
+ {name: "RORL", argLength: 2, reg: gp21shift, asm: "RORL", resultInArg0: true, clobberFlags: true},
+ {name: "RORW", argLength: 2, reg: gp21shift, asm: "RORW", resultInArg0: true, clobberFlags: true},
+ {name: "RORB", argLength: 2, reg: gp21shift, asm: "RORB", resultInArg0: true, clobberFlags: true},
+ {name: "ROLQconst", argLength: 1, reg: gp11, asm: "ROLQ", aux: "Int8", resultInArg0: true, clobberFlags: true},
+ {name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int8", resultInArg0: true, clobberFlags: true},
+ {name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int8", resultInArg0: true, clobberFlags: true},
+ {name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true},
+
+ // [ADD,SUB,AND,OR]xload: integer load/op combo
+ // L = int32, Q = int64
+ // x==L operations zero the upper 4 bytes of the destination register.
+ // computes arg0 op *(arg1+auxint+aux), arg2=mem
+ {name: "ADDLload", argLength: 3, reg: gp21load, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"},
+ {name: "ADDQload", argLength: 3, reg: gp21load, asm: "ADDQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"},
+ {name: "SUBQload", argLength: 3, reg: gp21load, asm: "SUBQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"},
+ {name: "SUBLload", argLength: 3, reg: gp21load, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"},
+ {name: "ANDLload", argLength: 3, reg: gp21load, asm: "ANDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"},
+ {name: "ANDQload", argLength: 3, reg: gp21load, asm: "ANDQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"},
+ {name: "ORQload", argLength: 3, reg: gp21load, asm: "ORQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"},
+ {name: "ORLload", argLength: 3, reg: gp21load, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"},
+ {name: "XORQload", argLength: 3, reg: gp21load, asm: "XORQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"},
+ {name: "XORLload", argLength: 3, reg: gp21load, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"},
+
+ // integer indexed load/op combo
+ // L = int32, Q = int64
+ // L operations zero the upper 4 bytes of the destination register.
+ // computes arg0 op *(arg1+scale*arg2+auxint+aux), arg3=mem
+ {name: "ADDLloadidx1", argLength: 4, reg: gp21loadidx, asm: "ADDL", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "ADDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ADDL", scale: 4, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "ADDLloadidx8", argLength: 4, reg: gp21loadidx, asm: "ADDL", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "ADDQloadidx1", argLength: 4, reg: gp21loadidx, asm: "ADDQ", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "ADDQloadidx8", argLength: 4, reg: gp21loadidx, asm: "ADDQ", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "SUBLloadidx1", argLength: 4, reg: gp21loadidx, asm: "SUBL", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "SUBLloadidx4", argLength: 4, reg: gp21loadidx, asm: "SUBL", scale: 4, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "SUBLloadidx8", argLength: 4, reg: gp21loadidx, asm: "SUBL", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "SUBQloadidx1", argLength: 4, reg: gp21loadidx, asm: "SUBQ", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "SUBQloadidx8", argLength: 4, reg: gp21loadidx, asm: "SUBQ", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "ANDLloadidx1", argLength: 4, reg: gp21loadidx, asm: "ANDL", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "ANDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ANDL", scale: 4, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "ANDLloadidx8", argLength: 4, reg: gp21loadidx, asm: "ANDL", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "ANDQloadidx1", argLength: 4, reg: gp21loadidx, asm: "ANDQ", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "ANDQloadidx8", argLength: 4, reg: gp21loadidx, asm: "ANDQ", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "ORLloadidx1", argLength: 4, reg: gp21loadidx, asm: "ORL", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "ORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ORL", scale: 4, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "ORLloadidx8", argLength: 4, reg: gp21loadidx, asm: "ORL", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "ORQloadidx1", argLength: 4, reg: gp21loadidx, asm: "ORQ", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "ORQloadidx8", argLength: 4, reg: gp21loadidx, asm: "ORQ", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "XORLloadidx1", argLength: 4, reg: gp21loadidx, asm: "XORL", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "XORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "XORL", scale: 4, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "XORLloadidx8", argLength: 4, reg: gp21loadidx, asm: "XORL", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "XORQloadidx1", argLength: 4, reg: gp21loadidx, asm: "XORQ", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+ {name: "XORQloadidx8", argLength: 4, reg: gp21loadidx, asm: "XORQ", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"},
+
+ // direct binary op on memory (read-modify-write)
+ // L = int32, Q = int64
+ // does *(arg0+auxint+aux) op= arg1, arg2=mem
+ {name: "ADDQmodify", argLength: 3, reg: gpstore, asm: "ADDQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+ {name: "SUBQmodify", argLength: 3, reg: gpstore, asm: "SUBQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+ {name: "ANDQmodify", argLength: 3, reg: gpstore, asm: "ANDQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+ {name: "ORQmodify", argLength: 3, reg: gpstore, asm: "ORQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+ {name: "XORQmodify", argLength: 3, reg: gpstore, asm: "XORQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+ {name: "ADDLmodify", argLength: 3, reg: gpstore, asm: "ADDL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+ {name: "SUBLmodify", argLength: 3, reg: gpstore, asm: "SUBL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+ {name: "ANDLmodify", argLength: 3, reg: gpstore, asm: "ANDL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+ {name: "ORLmodify", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+ {name: "XORLmodify", argLength: 3, reg: gpstore, asm: "XORL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"},
+
+ // indexed direct binary op on memory.
+ // does *(arg0+scale*arg1+auxint+aux) op= arg2, arg3=mem
+ {name: "ADDQmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ADDQ", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ADDQmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ADDQ", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "SUBQmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "SUBQ", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "SUBQmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "SUBQ", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ANDQmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ANDQ", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ANDQmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ANDQ", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ORQmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ORQ", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ORQmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ORQ", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "XORQmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "XORQ", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "XORQmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "XORQ", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ADDLmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ADDL", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ADDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ADDL", scale: 4, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ADDLmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ADDL", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "SUBLmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "SUBL", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "SUBLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "SUBL", scale: 4, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "SUBLmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "SUBL", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ANDLmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ANDL", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ANDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ANDL", scale: 4, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ANDLmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ANDL", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ORLmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ORL", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ORL", scale: 4, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ORLmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ORL", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "XORLmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "XORL", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "XORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "XORL", scale: 4, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "XORLmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "XORL", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+
+ // indexed direct binary op on memory with constant argument.
+ // does *(arg0+scale*arg1+ValAndOff(AuxInt).Off()+aux) op= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ADDQconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ADDQ", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ADDQconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ADDQ", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ANDQconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ANDQ", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ANDQconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ANDQ", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ORQconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ORQ", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ORQconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ORQ", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "XORQconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "XORQ", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "XORQconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "XORQ", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ADDLconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ADDL", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ADDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ADDL", scale: 4, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ADDLconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ADDL", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ANDLconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ANDL", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ANDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ANDL", scale: 4, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ANDLconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ANDL", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ORLconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ORL", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ORL", scale: 4, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "ORLconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ORL", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "XORLconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "XORL", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "XORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "XORL", scale: 4, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+ {name: "XORLconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "XORL", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"},
+
+ // {NEG,NOT}x: unary ops
+ // computes [NEG:-,NOT:^]arg0
+ // L = int32, Q = int64
+ // L operations zero the upper 4 bytes of the destination register.
+ {name: "NEGQ", argLength: 1, reg: gp11, asm: "NEGQ", resultInArg0: true, clobberFlags: true},
+ {name: "NEGL", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true, clobberFlags: true},
+ {name: "NOTQ", argLength: 1, reg: gp11, asm: "NOTQ", resultInArg0: true},
+ {name: "NOTL", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true},
+
+ // BS{F,R}Q returns a tuple [result, flags]
+ // result is undefined if the input is zero.
+ // flags are set to "equal" if the input is zero, "not equal" otherwise.
+ // BS{F,R}L returns only the result.
+ {name: "BSFQ", argLength: 1, reg: gp11flags, asm: "BSFQ", typ: "(UInt64,Flags)"}, // # of low-order zeroes in 64-bit arg
+ {name: "BSFL", argLength: 1, reg: gp11, asm: "BSFL", typ: "UInt32", clobberFlags: true}, // # of low-order zeroes in 32-bit arg
+ {name: "BSRQ", argLength: 1, reg: gp11flags, asm: "BSRQ", typ: "(UInt64,Flags)"}, // # of high-order zeroes in 64-bit arg
+ {name: "BSRL", argLength: 1, reg: gp11, asm: "BSRL", typ: "UInt32", clobberFlags: true}, // # of high-order zeroes in 32-bit arg
+
+ // CMOV instructions: 64, 32 and 16-bit sizes.
+ // if arg2 encodes a true result, return arg1, else arg0
+ {name: "CMOVQEQ", argLength: 3, reg: gp21, asm: "CMOVQEQ", resultInArg0: true},
+ {name: "CMOVQNE", argLength: 3, reg: gp21, asm: "CMOVQNE", resultInArg0: true},
+ {name: "CMOVQLT", argLength: 3, reg: gp21, asm: "CMOVQLT", resultInArg0: true},
+ {name: "CMOVQGT", argLength: 3, reg: gp21, asm: "CMOVQGT", resultInArg0: true},
+ {name: "CMOVQLE", argLength: 3, reg: gp21, asm: "CMOVQLE", resultInArg0: true},
+ {name: "CMOVQGE", argLength: 3, reg: gp21, asm: "CMOVQGE", resultInArg0: true},
+ {name: "CMOVQLS", argLength: 3, reg: gp21, asm: "CMOVQLS", resultInArg0: true},
+ {name: "CMOVQHI", argLength: 3, reg: gp21, asm: "CMOVQHI", resultInArg0: true},
+ {name: "CMOVQCC", argLength: 3, reg: gp21, asm: "CMOVQCC", resultInArg0: true},
+ {name: "CMOVQCS", argLength: 3, reg: gp21, asm: "CMOVQCS", resultInArg0: true},
+
+ {name: "CMOVLEQ", argLength: 3, reg: gp21, asm: "CMOVLEQ", resultInArg0: true},
+ {name: "CMOVLNE", argLength: 3, reg: gp21, asm: "CMOVLNE", resultInArg0: true},
+ {name: "CMOVLLT", argLength: 3, reg: gp21, asm: "CMOVLLT", resultInArg0: true},
+ {name: "CMOVLGT", argLength: 3, reg: gp21, asm: "CMOVLGT", resultInArg0: true},
+ {name: "CMOVLLE", argLength: 3, reg: gp21, asm: "CMOVLLE", resultInArg0: true},
+ {name: "CMOVLGE", argLength: 3, reg: gp21, asm: "CMOVLGE", resultInArg0: true},
+ {name: "CMOVLLS", argLength: 3, reg: gp21, asm: "CMOVLLS", resultInArg0: true},
+ {name: "CMOVLHI", argLength: 3, reg: gp21, asm: "CMOVLHI", resultInArg0: true},
+ {name: "CMOVLCC", argLength: 3, reg: gp21, asm: "CMOVLCC", resultInArg0: true},
+ {name: "CMOVLCS", argLength: 3, reg: gp21, asm: "CMOVLCS", resultInArg0: true},
+
+ {name: "CMOVWEQ", argLength: 3, reg: gp21, asm: "CMOVWEQ", resultInArg0: true},
+ {name: "CMOVWNE", argLength: 3, reg: gp21, asm: "CMOVWNE", resultInArg0: true},
+ {name: "CMOVWLT", argLength: 3, reg: gp21, asm: "CMOVWLT", resultInArg0: true},
+ {name: "CMOVWGT", argLength: 3, reg: gp21, asm: "CMOVWGT", resultInArg0: true},
+ {name: "CMOVWLE", argLength: 3, reg: gp21, asm: "CMOVWLE", resultInArg0: true},
+ {name: "CMOVWGE", argLength: 3, reg: gp21, asm: "CMOVWGE", resultInArg0: true},
+ {name: "CMOVWLS", argLength: 3, reg: gp21, asm: "CMOVWLS", resultInArg0: true},
+ {name: "CMOVWHI", argLength: 3, reg: gp21, asm: "CMOVWHI", resultInArg0: true},
+ {name: "CMOVWCC", argLength: 3, reg: gp21, asm: "CMOVWCC", resultInArg0: true},
+ {name: "CMOVWCS", argLength: 3, reg: gp21, asm: "CMOVWCS", resultInArg0: true},
+
+ // CMOV with floating point instructions. We need separate pseudo-op to handle
+ // InvertFlags correctly, and to generate special code that handles NaN (unordered flag).
+ // NOTE: the fact that CMOV*EQF here is marked to generate CMOV*NE is not a bug. See
+ // code generation in amd64/ssa.go.
+ {name: "CMOVQEQF", argLength: 3, reg: gp21, asm: "CMOVQNE", resultInArg0: true, needIntTemp: true},
+ {name: "CMOVQNEF", argLength: 3, reg: gp21, asm: "CMOVQNE", resultInArg0: true},
+ {name: "CMOVQGTF", argLength: 3, reg: gp21, asm: "CMOVQHI", resultInArg0: true},
+ {name: "CMOVQGEF", argLength: 3, reg: gp21, asm: "CMOVQCC", resultInArg0: true},
+ {name: "CMOVLEQF", argLength: 3, reg: gp21, asm: "CMOVLNE", resultInArg0: true, needIntTemp: true},
+ {name: "CMOVLNEF", argLength: 3, reg: gp21, asm: "CMOVLNE", resultInArg0: true},
+ {name: "CMOVLGTF", argLength: 3, reg: gp21, asm: "CMOVLHI", resultInArg0: true},
+ {name: "CMOVLGEF", argLength: 3, reg: gp21, asm: "CMOVLCC", resultInArg0: true},
+ {name: "CMOVWEQF", argLength: 3, reg: gp21, asm: "CMOVWNE", resultInArg0: true, needIntTemp: true},
+ {name: "CMOVWNEF", argLength: 3, reg: gp21, asm: "CMOVWNE", resultInArg0: true},
+ {name: "CMOVWGTF", argLength: 3, reg: gp21, asm: "CMOVWHI", resultInArg0: true},
+ {name: "CMOVWGEF", argLength: 3, reg: gp21, asm: "CMOVWCC", resultInArg0: true},
+
+ // BSWAPx swaps the low-order (L=4,Q=8) bytes of arg0.
+ // Q: abcdefgh -> hgfedcba
+ // L: abcdefgh -> 0000hgfe (L zeros the upper 4 bytes)
+ {name: "BSWAPQ", argLength: 1, reg: gp11, asm: "BSWAPQ", resultInArg0: true},
+ {name: "BSWAPL", argLength: 1, reg: gp11, asm: "BSWAPL", resultInArg0: true},
+
+ // POPCNTx counts the number of set bits in the low-order (L=32,Q=64) bits of arg0.
+ // POPCNTx instructions are only guaranteed to be available if GOAMD64>=v2.
+ // For GOAMD64<v2, any use must be preceded by a successful runtime check of runtime.x86HasPOPCNT.
+ {name: "POPCNTQ", argLength: 1, reg: gp11, asm: "POPCNTQ", clobberFlags: true},
+ {name: "POPCNTL", argLength: 1, reg: gp11, asm: "POPCNTL", clobberFlags: true},
+
+ // SQRTSx computes sqrt(arg0)
+ // S = float32, D = float64
+ {name: "SQRTSD", argLength: 1, reg: fp11, asm: "SQRTSD"},
+ {name: "SQRTSS", argLength: 1, reg: fp11, asm: "SQRTSS"},
+
+ // ROUNDSD rounds arg0 to an integer depending on auxint
+ // 0 means math.RoundToEven, 1 means math.Floor, 2 math.Ceil, 3 math.Trunc
+ // (The result is still a float64.)
+ // ROUNDSD instruction is only guaraneteed to be available if GOAMD64>=v2.
+ // For GOAMD64<v2, any use must be preceded by a successful check of runtime.x86HasSSE41.
+ {name: "ROUNDSD", argLength: 1, reg: fp11, aux: "Int8", asm: "ROUNDSD"},
+
+ // VFMADD231SD only exists on platforms with the FMA3 instruction set.
+ // Any use must be preceded by a successful check of runtime.support_fma.
+ {name: "VFMADD231SD", argLength: 3, reg: fp31, resultInArg0: true, asm: "VFMADD231SD"},
+
+ // Note that these operations don't exactly match the semantics of Go's
+ // builtin min. In particular, these aren't commutative, because on various
+ // special cases the 2nd argument is preferred.
+ {name: "MINSD", argLength: 2, reg: fp21, resultInArg0: true, asm: "MINSD"}, // min(arg0,arg1)
+ {name: "MINSS", argLength: 2, reg: fp21, resultInArg0: true, asm: "MINSS"}, // min(arg0,arg1)
+
+ {name: "SBBQcarrymask", argLength: 1, reg: flagsgp, asm: "SBBQ"}, // (int64)(-1) if carry is set, 0 if carry is clear.
+ {name: "SBBLcarrymask", argLength: 1, reg: flagsgp, asm: "SBBL"}, // (int32)(-1) if carry is set, 0 if carry is clear.
+ // Note: SBBW and SBBB are subsumed by SBBL
+
+ {name: "SETEQ", argLength: 1, reg: readflags, asm: "SETEQ"}, // extract == condition from arg0
+ {name: "SETNE", argLength: 1, reg: readflags, asm: "SETNE"}, // extract != condition from arg0
+ {name: "SETL", argLength: 1, reg: readflags, asm: "SETLT"}, // extract signed < condition from arg0
+ {name: "SETLE", argLength: 1, reg: readflags, asm: "SETLE"}, // extract signed <= condition from arg0
+ {name: "SETG", argLength: 1, reg: readflags, asm: "SETGT"}, // extract signed > condition from arg0
+ {name: "SETGE", argLength: 1, reg: readflags, asm: "SETGE"}, // extract signed >= condition from arg0
+ {name: "SETB", argLength: 1, reg: readflags, asm: "SETCS"}, // extract unsigned < condition from arg0
+ {name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0
+ {name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0
+ {name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0
+ {name: "SETO", argLength: 1, reg: readflags, asm: "SETOS"}, // extract if overflow flag is set from arg0
+ // Variants that store result to memory
+ {name: "SETEQstore", argLength: 3, reg: gpstoreconst, asm: "SETEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract == condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETNEstore", argLength: 3, reg: gpstoreconst, asm: "SETNE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract != condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETLstore", argLength: 3, reg: gpstoreconst, asm: "SETLT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed < condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETLEstore", argLength: 3, reg: gpstoreconst, asm: "SETLE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed <= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETGstore", argLength: 3, reg: gpstoreconst, asm: "SETGT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed > condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETGEstore", argLength: 3, reg: gpstoreconst, asm: "SETGE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed >= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETBstore", argLength: 3, reg: gpstoreconst, asm: "SETCS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned < condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETBEstore", argLength: 3, reg: gpstoreconst, asm: "SETLS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned <= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETAstore", argLength: 3, reg: gpstoreconst, asm: "SETHI", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned > condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETAEstore", argLength: 3, reg: gpstoreconst, asm: "SETCC", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned >= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETEQstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETEQ", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract == condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETNEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETNE", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract != condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETLstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETLT", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract signed < condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETLEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETLE", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract signed <= condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETGstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETGT", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract signed > condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETGEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETGE", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract signed >= condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETBstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETCS", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract unsigned < condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETBEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETLS", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract unsigned <= condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETAstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETHI", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract unsigned > condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+ {name: "SETAEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETCC", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract unsigned >= condition from arg2 to arg0+arg1+auxint+aux, arg3=mem
+
+ // Need different opcodes for floating point conditions because
+ // any comparison involving a NaN is always FALSE and thus
+ // the patterns for inverting conditions cannot be used.
+ {name: "SETEQF", argLength: 1, reg: flagsgp, asm: "SETEQ", clobberFlags: true, needIntTemp: true}, // extract == condition from arg0
+ {name: "SETNEF", argLength: 1, reg: flagsgp, asm: "SETNE", clobberFlags: true, needIntTemp: true}, // extract != condition from arg0
+ {name: "SETORD", argLength: 1, reg: flagsgp, asm: "SETPC"}, // extract "ordered" (No Nan present) condition from arg0
+ {name: "SETNAN", argLength: 1, reg: flagsgp, asm: "SETPS"}, // extract "unordered" (Nan present) condition from arg0
+
+ {name: "SETGF", argLength: 1, reg: flagsgp, asm: "SETHI"}, // extract floating > condition from arg0
+ {name: "SETGEF", argLength: 1, reg: flagsgp, asm: "SETCC"}, // extract floating >= condition from arg0
+
+ {name: "MOVBQSX", argLength: 1, reg: gp11, asm: "MOVBQSX"}, // sign extend arg0 from int8 to int64
+ {name: "MOVBQZX", argLength: 1, reg: gp11, asm: "MOVBLZX"}, // zero extend arg0 from int8 to int64
+ {name: "MOVWQSX", argLength: 1, reg: gp11, asm: "MOVWQSX"}, // sign extend arg0 from int16 to int64
+ {name: "MOVWQZX", argLength: 1, reg: gp11, asm: "MOVWLZX"}, // zero extend arg0 from int16 to int64
+ {name: "MOVLQSX", argLength: 1, reg: gp11, asm: "MOVLQSX"}, // sign extend arg0 from int32 to int64
+ {name: "MOVLQZX", argLength: 1, reg: gp11, asm: "MOVL"}, // zero extend arg0 from int32 to int64
+
+ {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint
+ {name: "MOVQconst", reg: gp01, asm: "MOVQ", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
+
+ {name: "CVTTSD2SL", argLength: 1, reg: fpgp, asm: "CVTTSD2SL"}, // convert float64 to int32
+ {name: "CVTTSD2SQ", argLength: 1, reg: fpgp, asm: "CVTTSD2SQ"}, // convert float64 to int64
+ {name: "CVTTSS2SL", argLength: 1, reg: fpgp, asm: "CVTTSS2SL"}, // convert float32 to int32
+ {name: "CVTTSS2SQ", argLength: 1, reg: fpgp, asm: "CVTTSS2SQ"}, // convert float32 to int64
+ {name: "CVTSL2SS", argLength: 1, reg: gpfp, asm: "CVTSL2SS"}, // convert int32 to float32
+ {name: "CVTSL2SD", argLength: 1, reg: gpfp, asm: "CVTSL2SD"}, // convert int32 to float64
+ {name: "CVTSQ2SS", argLength: 1, reg: gpfp, asm: "CVTSQ2SS"}, // convert int64 to float32
+ {name: "CVTSQ2SD", argLength: 1, reg: gpfp, asm: "CVTSQ2SD"}, // convert int64 to float64
+ {name: "CVTSD2SS", argLength: 1, reg: fp11, asm: "CVTSD2SS"}, // convert float64 to float32
+ {name: "CVTSS2SD", argLength: 1, reg: fp11, asm: "CVTSS2SD"}, // convert float32 to float64
+
+ // Move values between int and float registers, with no conversion.
+ // TODO: should we have generic versions of these?
+ {name: "MOVQi2f", argLength: 1, reg: gpfp, typ: "Float64"}, // move 64 bits from int to float reg
+ {name: "MOVQf2i", argLength: 1, reg: fpgp, typ: "UInt64"}, // move 64 bits from float to int reg
+ {name: "MOVLi2f", argLength: 1, reg: gpfp, typ: "Float32"}, // move 32 bits from int to float reg
+ {name: "MOVLf2i", argLength: 1, reg: fpgp, typ: "UInt32"}, // move 32 bits from float to int reg, zero extend
+
+ {name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs (for float negation).
+ {name: "POR", argLength: 2, reg: fp21, asm: "POR", commutative: true, resultInArg0: true}, // inclusive or, applied to X regs (for float min/max).
+
+ {name: "LEAQ", argLength: 1, reg: gp11sb, asm: "LEAQ", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
+ {name: "LEAL", argLength: 1, reg: gp11sb, asm: "LEAL", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
+ {name: "LEAW", argLength: 1, reg: gp11sb, asm: "LEAW", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
+
+ // LEAxn computes arg0 + n*arg1 + auxint + aux
+ // x==L zeroes the upper 4 bytes.
+ {name: "LEAQ1", argLength: 2, reg: gp21sb, asm: "LEAQ", scale: 1, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
+ {name: "LEAL1", argLength: 2, reg: gp21sb, asm: "LEAL", scale: 1, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
+ {name: "LEAW1", argLength: 2, reg: gp21sb, asm: "LEAW", scale: 1, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
+ {name: "LEAQ2", argLength: 2, reg: gp21sb, asm: "LEAQ", scale: 2, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux
+ {name: "LEAL2", argLength: 2, reg: gp21sb, asm: "LEAL", scale: 2, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux
+ {name: "LEAW2", argLength: 2, reg: gp21sb, asm: "LEAW", scale: 2, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux
+ {name: "LEAQ4", argLength: 2, reg: gp21sb, asm: "LEAQ", scale: 4, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux
+ {name: "LEAL4", argLength: 2, reg: gp21sb, asm: "LEAL", scale: 4, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux
+ {name: "LEAW4", argLength: 2, reg: gp21sb, asm: "LEAW", scale: 4, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux
+ {name: "LEAQ8", argLength: 2, reg: gp21sb, asm: "LEAQ", scale: 8, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux
+ {name: "LEAL8", argLength: 2, reg: gp21sb, asm: "LEAL", scale: 8, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux
+ {name: "LEAW8", argLength: 2, reg: gp21sb, asm: "LEAW", scale: 8, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux
+ // Note: LEAx{1,2,4,8} must not have OpSB as either argument.
+
+ // MOVxload: loads
+ // Load (Q=8,L=4,W=2,B=1) bytes from (arg0+auxint+aux), arg1=mem.
+ // "+auxint+aux" == add auxint and the offset of the symbol in aux (if any) to the effective address
+ // Standard versions zero extend the result. SX versions sign extend the result.
+ {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVBQSXload", argLength: 2, reg: gpload, asm: "MOVBQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVWQSXload", argLength: 2, reg: gpload, asm: "MOVWQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVLQSXload", argLength: 2, reg: gpload, asm: "MOVLQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVQload", argLength: 2, reg: gpload, asm: "MOVQ", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"},
+
+ // MOVxstore: stores
+ // Store (Q=8,L=4,W=2,B=1) low bytes of arg1.
+ // Does *(arg0+auxint+aux) = arg1, arg2=mem.
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},
+ {name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},
+ {name: "MOVQstore", argLength: 3, reg: gpstore, asm: "MOVQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},
+
+ // MOVOload/store: 16 byte load/store
+ // These operations are only used to move data around: there is no *O arithmetic, for example.
+ {name: "MOVOload", argLength: 2, reg: fpload, asm: "MOVUPS", aux: "SymOff", typ: "Int128", faultOnNilArg0: true, symEffect: "Read"}, // load 16 bytes from arg0+auxint+aux. arg1=mem
+ {name: "MOVOstore", argLength: 3, reg: fpstore, asm: "MOVUPS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes in arg1 to arg0+auxint+aux. arg2=mem
+
+ // MOVxloadidx: indexed loads
+ // load (Q=8,L=4,W=2,B=1) bytes from (arg0+scale*arg1+auxint+aux), arg2=mem.
+ // Results are zero-extended. (TODO: sign-extending indexed loads)
+ {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", scale: 1, aux: "SymOff", typ: "UInt8", symEffect: "Read"},
+ {name: "MOVWloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWLZX", scale: 1, aux: "SymOff", typ: "UInt16", symEffect: "Read"},
+ {name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", scale: 2, aux: "SymOff", typ: "UInt16", symEffect: "Read"},
+ {name: "MOVLloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVL", scale: 1, aux: "SymOff", typ: "UInt32", symEffect: "Read"},
+ {name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", scale: 4, aux: "SymOff", typ: "UInt32", symEffect: "Read"},
+ {name: "MOVLloadidx8", argLength: 3, reg: gploadidx, asm: "MOVL", scale: 8, aux: "SymOff", typ: "UInt32", symEffect: "Read"},
+ {name: "MOVQloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVQ", scale: 1, aux: "SymOff", typ: "UInt64", symEffect: "Read"},
+ {name: "MOVQloadidx8", argLength: 3, reg: gploadidx, asm: "MOVQ", scale: 8, aux: "SymOff", typ: "UInt64", symEffect: "Read"},
+
+ // MOVxstoreidx: indexed stores
+ // Store (Q=8,L=4,W=2,B=1) low bytes of arg2.
+ // Does *(arg0+scale*arg1+auxint+aux) = arg2, arg3=mem.
+ {name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", scale: 1, aux: "SymOff", symEffect: "Write"},
+ {name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", scale: 1, aux: "SymOff", symEffect: "Write"},
+ {name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", scale: 2, aux: "SymOff", symEffect: "Write"},
+ {name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVL", scale: 1, aux: "SymOff", symEffect: "Write"},
+ {name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", scale: 4, aux: "SymOff", symEffect: "Write"},
+ {name: "MOVLstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVL", scale: 8, aux: "SymOff", symEffect: "Write"},
+ {name: "MOVQstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVQ", scale: 1, aux: "SymOff", symEffect: "Write"},
+ {name: "MOVQstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVQ", scale: 8, aux: "SymOff", symEffect: "Write"},
+
+ // TODO: add size-mismatched indexed loads/stores, like MOVBstoreidx4?
+
+ // MOVxstoreconst: constant stores
+ // Store (O=16,Q=8,L=4,W=2,B=1) constant bytes.
+ // Does *(arg0+ValAndOff(AuxInt).Off()+aux) = ValAndOff(AuxInt).Val(), arg1=mem.
+ // O version can only store the constant 0.
+ {name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},
+ {name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},
+ {name: "MOVLstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},
+ {name: "MOVQstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVQ", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},
+ {name: "MOVOstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVUPS", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"},
+
+ // MOVxstoreconstidx: constant indexed stores
+ // Store (Q=8,L=4,W=2,B=1) constant bytes.
+ // Does *(arg0+scale*arg1+ValAndOff(AuxInt).Off()+aux) = ValAndOff(AuxInt).Val(), arg2=mem.
+ {name: "MOVBstoreconstidx1", argLength: 3, reg: gpstoreconstidx, commutative: true, asm: "MOVB", scale: 1, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"},
+ {name: "MOVWstoreconstidx1", argLength: 3, reg: gpstoreconstidx, commutative: true, asm: "MOVW", scale: 1, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"},
+ {name: "MOVWstoreconstidx2", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", scale: 2, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"},
+ {name: "MOVLstoreconstidx1", argLength: 3, reg: gpstoreconstidx, commutative: true, asm: "MOVL", scale: 1, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"},
+ {name: "MOVLstoreconstidx4", argLength: 3, reg: gpstoreconstidx, asm: "MOVL", scale: 4, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"},
+ {name: "MOVQstoreconstidx1", argLength: 3, reg: gpstoreconstidx, commutative: true, asm: "MOVQ", scale: 1, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"},
+ {name: "MOVQstoreconstidx8", argLength: 3, reg: gpstoreconstidx, asm: "MOVQ", scale: 8, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"},
+
+ // arg0 = pointer to start of memory to zero
+ // arg1 = mem
+ // auxint = # of bytes to zero
+ // returns mem
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI")},
+ clobbers: buildReg("DI"),
+ },
+ faultOnNilArg0: true,
+ unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
+ },
+
+ // arg0 = address of memory to zero
+ // arg1 = # of 8-byte words to zero
+ // arg2 = value to store (will always be zero)
+ // arg3 = mem
+ // returns mem
+ {
+ name: "REPSTOSQ",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("CX"), buildReg("AX")},
+ clobbers: buildReg("DI CX"),
+ },
+ faultOnNilArg0: true,
+ },
+
+ // With a register ABI, the actual register info for these instructions (i.e., what is used in regalloc) is augmented with per-call-site bindings of additional arguments to specific in and out registers.
+ {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
+
+ // arg0 = destination pointer
+ // arg1 = source pointer
+ // arg2 = mem
+ // auxint = # of bytes to copy, must be multiple of 16
+ // returns memory
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("SI")},
+ clobbers: buildReg("DI SI X0"), // uses X0 as a temporary
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
+ },
+
+ // arg0 = destination pointer
+ // arg1 = source pointer
+ // arg2 = # of 8-byte words to copy
+ // arg3 = mem
+ // returns memory
+ {
+ name: "REPMOVSQ",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("SI"), buildReg("CX")},
+ clobbers: buildReg("DI SI CX"),
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // (InvertFlags (CMPQ a b)) == (CMPQ b a)
+ // So if we want (SETL (CMPQ a b)) but we can't do that because a is a constant,
+ // then we do (SETL (InvertFlags (CMPQ b a))) instead.
+ // Rewrites will convert this to (SETG (CMPQ b a)).
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // Pseudo-ops
+ {name: "LoweredGetG", argLength: 1, reg: gp01}, // arg0=mem
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of DX (the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}, zeroWidth: true},
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+ // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem
+ {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
+ //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
+ // LoweredWB invokes runtime.gcWriteBarrier{auxint}. arg0=mem, auxint=# of buffer entries needed.
+ // It saves all GP registers if necessary, but may clobber others.
+ // Returns a pointer to a write barrier buffer in R11.
+ {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: callerSave &^ (gp | g), outputs: []regMask{buildReg("R11")}}, clobberFlags: true, aux: "Int64"},
+
+ {name: "LoweredHasCPUFeature", argLength: 0, reg: gp01, rematerializeable: true, typ: "UInt64", aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{dx, bx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{cx, dx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{ax, cx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+
+ // Constant flag values. For any comparison, there are 5 possible
+ // outcomes: the three from the signed total order (<,==,>) and the
+ // three from the unsigned total order. The == cases overlap.
+ // Note: there's a sixth "unordered" outcome for floating-point
+ // comparisons, but we don't use such a beast yet.
+ // These ops are for temporary use by rewrite rules. They
+ // cannot appear in the generated assembly.
+ {name: "FlagEQ"}, // equal
+ {name: "FlagLT_ULT"}, // signed < and unsigned <
+ {name: "FlagLT_UGT"}, // signed < and unsigned >
+ {name: "FlagGT_UGT"}, // signed > and unsigned >
+ {name: "FlagGT_ULT"}, // signed > and unsigned <
+
+ // Atomic loads. These are just normal loads but return <value,memory> tuples
+ // so they can be properly ordered with other loads.
+ // load from arg0+auxint+aux. arg1=mem.
+ {name: "MOVBatomicload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVLatomicload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVQatomicload", argLength: 2, reg: gpload, asm: "MOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+
+ // Atomic stores and exchanges. Stores use XCHG to get the right memory ordering semantics.
+ // store arg0 to arg1+auxint+aux, arg2=mem.
+ // These ops return a tuple of <old contents of *(arg1+auxint+aux), memory>.
+ // Note: arg0 and arg1 are backwards compared to MOVLstore (to facilitate resultInArg0)!
+ {name: "XCHGB", argLength: 3, reg: gpstorexchg, asm: "XCHGB", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "XCHGL", argLength: 3, reg: gpstorexchg, asm: "XCHGL", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "XCHGQ", argLength: 3, reg: gpstorexchg, asm: "XCHGQ", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"},
+
+ // Atomic adds.
+ // *(arg1+auxint+aux) += arg0. arg2=mem.
+ // Returns a tuple of <old contents of *(arg1+auxint+aux), memory>.
+ // Note: arg0 and arg1 are backwards compared to MOVLstore (to facilitate resultInArg0)!
+ {name: "XADDLlock", argLength: 3, reg: gpstorexchg, asm: "XADDL", typ: "(UInt32,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "XADDQlock", argLength: 3, reg: gpstorexchg, asm: "XADDQ", typ: "(UInt64,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "AddTupleFirst32", argLength: 2}, // arg1=tuple <x,y>. Returns <x+arg0,y>.
+ {name: "AddTupleFirst64", argLength: 2}, // arg1=tuple <x,y>. Returns <x+arg0,y>.
+
+ // Compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
+ // if *(arg0+auxint+aux) == arg1 {
+ // *(arg0+auxint+aux) = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // Note that these instructions also return the old value in AX, but we ignore it.
+ // TODO: have these return flags instead of bool. The current system generates:
+ // CMPXCHGQ ...
+ // SETEQ AX
+ // CMPB AX, $0
+ // JNE ...
+ // instead of just
+ // CMPXCHGQ ...
+ // JEQ ...
+ // but we can't do that because memory-using ops can't generate flags yet
+ // (flagalloc wants to move flag-generating instructions around).
+ {name: "CMPXCHGLlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "CMPXCHGQlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGQ", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+
+ // Atomic memory updates.
+ {name: "ANDBlock", argLength: 3, reg: gpstore, asm: "ANDB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) &= arg1
+ {name: "ANDLlock", argLength: 3, reg: gpstore, asm: "ANDL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) &= arg1
+ {name: "ORBlock", argLength: 3, reg: gpstore, asm: "ORB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) |= arg1
+ {name: "ORLlock", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) |= arg1
+
+ // Prefetch instructions
+ // Do prefetch arg0 address. arg0=addr, arg1=memory. Instruction variant selects locality hint
+ {name: "PrefetchT0", argLength: 2, reg: prefreg, asm: "PREFETCHT0", hasSideEffects: true},
+ {name: "PrefetchNTA", argLength: 2, reg: prefreg, asm: "PREFETCHNTA", hasSideEffects: true},
+
+ // CPUID feature: BMI1.
+ {name: "ANDNQ", argLength: 2, reg: gp21, asm: "ANDNQ", clobberFlags: true}, // arg0 &^ arg1
+ {name: "ANDNL", argLength: 2, reg: gp21, asm: "ANDNL", clobberFlags: true}, // arg0 &^ arg1
+ {name: "BLSIQ", argLength: 1, reg: gp11, asm: "BLSIQ", clobberFlags: true}, // arg0 & -arg0
+ {name: "BLSIL", argLength: 1, reg: gp11, asm: "BLSIL", clobberFlags: true}, // arg0 & -arg0
+ {name: "BLSMSKQ", argLength: 1, reg: gp11, asm: "BLSMSKQ", clobberFlags: true}, // arg0 ^ (arg0 - 1)
+ {name: "BLSMSKL", argLength: 1, reg: gp11, asm: "BLSMSKL", clobberFlags: true}, // arg0 ^ (arg0 - 1)
+ {name: "BLSRQ", argLength: 1, reg: gp11flags, asm: "BLSRQ", typ: "(UInt64,Flags)"}, // arg0 & (arg0 - 1)
+ {name: "BLSRL", argLength: 1, reg: gp11flags, asm: "BLSRL", typ: "(UInt32,Flags)"}, // arg0 & (arg0 - 1)
+ // count the number of trailing zero bits, prefer TZCNTQ over BSFQ, as TZCNTQ(0)==64
+ // and BSFQ(0) is undefined. Same for TZCNTL(0)==32
+ {name: "TZCNTQ", argLength: 1, reg: gp11, asm: "TZCNTQ", clobberFlags: true},
+ {name: "TZCNTL", argLength: 1, reg: gp11, asm: "TZCNTL", clobberFlags: true},
+
+ // CPUID feature: LZCNT.
+ // count the number of leading zero bits.
+ {name: "LZCNTQ", argLength: 1, reg: gp11, asm: "LZCNTQ", typ: "UInt64", clobberFlags: true},
+ {name: "LZCNTL", argLength: 1, reg: gp11, asm: "LZCNTL", typ: "UInt32", clobberFlags: true},
+
+ // CPUID feature: MOVBE
+ // MOVBEWload does not satisfy zero extended, so only use MOVBEWstore
+ {name: "MOVBEWstore", argLength: 3, reg: gpstore, asm: "MOVBEW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // swap and store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVBELload", argLength: 2, reg: gpload, asm: "MOVBEL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load and swap 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVBELstore", argLength: 3, reg: gpstore, asm: "MOVBEL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // swap and store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVBEQload", argLength: 2, reg: gpload, asm: "MOVBEQ", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load and swap 8 bytes from arg0+auxint+aux. arg1=mem
+ {name: "MOVBEQstore", argLength: 3, reg: gpstore, asm: "MOVBEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // swap and store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ // indexed MOVBE loads
+ {name: "MOVBELloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBEL", scale: 1, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load and swap 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Zero extend.
+ {name: "MOVBELloadidx4", argLength: 3, reg: gploadidx, asm: "MOVBEL", scale: 4, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load and swap 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem. Zero extend.
+ {name: "MOVBELloadidx8", argLength: 3, reg: gploadidx, asm: "MOVBEL", scale: 8, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load and swap 4 bytes from arg0+8*arg1+auxint+aux. arg2=mem. Zero extend.
+ {name: "MOVBEQloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBEQ", scale: 1, aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load and swap 8 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVBEQloadidx8", argLength: 3, reg: gploadidx, asm: "MOVBEQ", scale: 8, aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load and swap 8 bytes from arg0+8*arg1+auxint+aux. arg2=mem
+ // indexed MOVBE stores
+ {name: "MOVBEWstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVBEW", scale: 1, aux: "SymOff", symEffect: "Write"}, // swap and store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVBEWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVBEW", scale: 2, aux: "SymOff", symEffect: "Write"}, // swap and store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem
+ {name: "MOVBELstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVBEL", scale: 1, aux: "SymOff", symEffect: "Write"}, // swap and store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVBELstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVBEL", scale: 4, aux: "SymOff", symEffect: "Write"}, // swap and store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem
+ {name: "MOVBELstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVBEL", scale: 8, aux: "SymOff", symEffect: "Write"}, // swap and store 4 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem
+ {name: "MOVBEQstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVBEQ", scale: 1, aux: "SymOff", symEffect: "Write"}, // swap and store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVBEQstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVBEQ", scale: 8, aux: "SymOff", symEffect: "Write"}, // swap and store 8 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem
+
+ // CPUID feature: BMI2.
+ {name: "SARXQ", argLength: 2, reg: gp21, asm: "SARXQ"}, // signed arg0 >> arg1, shift amount is mod 64
+ {name: "SARXL", argLength: 2, reg: gp21, asm: "SARXL"}, // signed int32(arg0) >> arg1, shift amount is mod 32
+ {name: "SHLXQ", argLength: 2, reg: gp21, asm: "SHLXQ"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SHLXL", argLength: 2, reg: gp21, asm: "SHLXL"}, // arg0 << arg1, shift amount is mod 32
+ {name: "SHRXQ", argLength: 2, reg: gp21, asm: "SHRXQ"}, // unsigned arg0 >> arg1, shift amount is mod 64
+ {name: "SHRXL", argLength: 2, reg: gp21, asm: "SHRXL"}, // unsigned uint32(arg0) >> arg1, shift amount is mod 32
+
+ {name: "SARXLload", argLength: 3, reg: gp21shxload, asm: "SARXL", aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+auxint+aux) >> arg1, arg2=mem, shift amount is mod 32
+ {name: "SARXQload", argLength: 3, reg: gp21shxload, asm: "SARXQ", aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+auxint+aux) >> arg1, arg2=mem, shift amount is mod 64
+ {name: "SHLXLload", argLength: 3, reg: gp21shxload, asm: "SHLXL", aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+auxint+aux) << arg1, arg2=mem, shift amount is mod 32
+ {name: "SHLXQload", argLength: 3, reg: gp21shxload, asm: "SHLXQ", aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+auxint+aux) << arg1, arg2=mem, shift amount is mod 64
+ {name: "SHRXLload", argLength: 3, reg: gp21shxload, asm: "SHRXL", aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // unsigned *(arg0+auxint+aux) >> arg1, arg2=mem, shift amount is mod 32
+ {name: "SHRXQload", argLength: 3, reg: gp21shxload, asm: "SHRXQ", aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // unsigned *(arg0+auxint+aux) >> arg1, arg2=mem, shift amount is mod 64
+
+ {name: "SARXLloadidx1", argLength: 4, reg: gp21shxloadidx, asm: "SARXL", scale: 1, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+1*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 32
+ {name: "SARXLloadidx4", argLength: 4, reg: gp21shxloadidx, asm: "SARXL", scale: 4, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+4*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 32
+ {name: "SARXLloadidx8", argLength: 4, reg: gp21shxloadidx, asm: "SARXL", scale: 8, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+8*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 32
+ {name: "SARXQloadidx1", argLength: 4, reg: gp21shxloadidx, asm: "SARXQ", scale: 1, aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+1*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 64
+ {name: "SARXQloadidx8", argLength: 4, reg: gp21shxloadidx, asm: "SARXQ", scale: 8, aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+8*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 64
+ {name: "SHLXLloadidx1", argLength: 4, reg: gp21shxloadidx, asm: "SHLXL", scale: 1, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+1*arg1+auxint+aux) << arg2, arg3=mem, shift amount is mod 32
+ {name: "SHLXLloadidx4", argLength: 4, reg: gp21shxloadidx, asm: "SHLXL", scale: 4, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+4*arg1+auxint+aux) << arg2, arg3=mem, shift amount is mod 32
+ {name: "SHLXLloadidx8", argLength: 4, reg: gp21shxloadidx, asm: "SHLXL", scale: 8, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+8*arg1+auxint+aux) << arg2, arg3=mem, shift amount is mod 32
+ {name: "SHLXQloadidx1", argLength: 4, reg: gp21shxloadidx, asm: "SHLXQ", scale: 1, aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+1*arg1+auxint+aux) << arg2, arg3=mem, shift amount is mod 64
+ {name: "SHLXQloadidx8", argLength: 4, reg: gp21shxloadidx, asm: "SHLXQ", scale: 8, aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+8*arg1+auxint+aux) << arg2, arg3=mem, shift amount is mod 64
+ {name: "SHRXLloadidx1", argLength: 4, reg: gp21shxloadidx, asm: "SHRXL", scale: 1, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // unsigned *(arg0+1*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 32
+ {name: "SHRXLloadidx4", argLength: 4, reg: gp21shxloadidx, asm: "SHRXL", scale: 4, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // unsigned *(arg0+4*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 32
+ {name: "SHRXLloadidx8", argLength: 4, reg: gp21shxloadidx, asm: "SHRXL", scale: 8, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // unsigned *(arg0+8*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 32
+ {name: "SHRXQloadidx1", argLength: 4, reg: gp21shxloadidx, asm: "SHRXQ", scale: 1, aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // unsigned *(arg0+1*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 64
+ {name: "SHRXQloadidx8", argLength: 4, reg: gp21shxloadidx, asm: "SHRXQ", scale: 8, aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // unsigned *(arg0+8*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 64
+ }
+
+ var AMD64blocks = []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LT", controls: 1},
+ {name: "LE", controls: 1},
+ {name: "GT", controls: 1},
+ {name: "GE", controls: 1},
+ {name: "OS", controls: 1},
+ {name: "OC", controls: 1},
+ {name: "ULT", controls: 1},
+ {name: "ULE", controls: 1},
+ {name: "UGT", controls: 1},
+ {name: "UGE", controls: 1},
+ {name: "EQF", controls: 1},
+ {name: "NEF", controls: 1},
+ {name: "ORD", controls: 1}, // FP, ordered comparison (parity zero)
+ {name: "NAN", controls: 1}, // FP, unordered comparison (parity one)
+
+ // JUMPTABLE implements jump tables.
+ // Aux is the symbol (an *obj.LSym) for the jump table.
+ // control[0] is the index into the jump table.
+ // control[1] is the address of the jump table (the address of the symbol stored in Aux).
+ {name: "JUMPTABLE", controls: 2, aux: "Sym"},
+ }
+
+ archs = append(archs, arch{
+ name: "AMD64",
+ pkg: "cmd/internal/obj/x86",
+ genfile: "../../amd64/ssa.go",
+ ops: AMD64ops,
+ blocks: AMD64blocks,
+ regnames: regNamesAMD64,
+ ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11",
+ ParamFloatRegNames: "X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14",
+ gpregmask: gp,
+ fpregmask: fp,
+ specialregmask: x15,
+ framepointerreg: int8(num["BP"]),
+ linkreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64latelower.rules b/src/cmd/compile/internal/ssa/_gen/AMD64latelower.rules
new file mode 100644
index 0000000..a1e63d6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/AMD64latelower.rules
@@ -0,0 +1,8 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Prefer SARX/SHLX/SHRX instruction because it has less register restriction on the shift input.
+(SAR(Q|L) x y) && buildcfg.GOAMD64 >= 3 => (SARX(Q|L) x y)
+(SHL(Q|L) x y) && buildcfg.GOAMD64 >= 3 => (SHLX(Q|L) x y)
+(SHR(Q|L) x y) && buildcfg.GOAMD64 >= 3 => (SHRX(Q|L) x y)
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64splitload.rules b/src/cmd/compile/internal/ssa/_gen/AMD64splitload.rules
new file mode 100644
index 0000000..dd8f8ac
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/AMD64splitload.rules
@@ -0,0 +1,45 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains rules used by flagalloc and addressingmodes to
+// split a flag-generating merged load op into separate load and op.
+// Unlike with the other rules files, not all of these
+// rules will be applied to all values.
+// Rather, flagalloc will request for rules to be applied
+// to a particular problematic value.
+// These are often the exact inverse of rules in AMD64.rules,
+// only with the conditions removed.
+//
+// For addressingmodes, certain single instructions are slower than the two instruction
+// split generated here (which is different from the inputs to addressingmodes).
+// For example:
+// (CMPBconstload c (ADDQ x y)) -> (CMPBconstloadidx1 c x y) -> (CMPB c (MOVBloadidx1 x y))
+
+(CMP(Q|L|W|B)load {sym} [off] ptr x mem) => (CMP(Q|L|W|B) (MOV(Q|L|W|B)load {sym} [off] ptr mem) x)
+
+(CMP(Q|L|W|B)constload {sym} [vo] ptr mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)load {sym} [vo.Off()] ptr mem) x)
+
+(CMPQconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPQconst (MOVQload {sym} [vo.Off()] ptr mem) [vo.Val()])
+(CMPLconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()])
+(CMPWconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()])
+(CMPBconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()])
+
+(CMP(Q|L|W|B)loadidx1 {sym} [off] ptr idx x mem) => (CMP(Q|L|W|B) (MOV(Q|L|W|B)loadidx1 {sym} [off] ptr idx mem) x)
+(CMPQloadidx8 {sym} [off] ptr idx x mem) => (CMPQ (MOVQloadidx8 {sym} [off] ptr idx mem) x)
+(CMPLloadidx4 {sym} [off] ptr idx x mem) => (CMPL (MOVLloadidx4 {sym} [off] ptr idx mem) x)
+(CMPWloadidx2 {sym} [off] ptr idx x mem) => (CMPW (MOVWloadidx2 {sym} [off] ptr idx mem) x)
+
+(CMP(Q|L|W|B)constloadidx1 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)loadidx1 {sym} [vo.Off()] ptr idx mem) x)
+(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTQ x:(MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) x)
+(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTL x:(MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) x)
+(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTW x:(MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) x)
+
+(CMPQconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+(CMPLconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+(CMPWconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val16()])
+(CMPBconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPBconst (MOVBloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val8()])
+
+(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) [vo.Val16()])
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM.rules b/src/cmd/compile/internal/ssa/_gen/ARM.rules
new file mode 100644
index 0000000..ed0ed80
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/ARM.rules
@@ -0,0 +1,1475 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(Add(Ptr|32|16|8) ...) => (ADD ...)
+(Add(32|64)F ...) => (ADD(F|D) ...)
+(Add32carry ...) => (ADDS ...)
+(Add32withcarry ...) => (ADC ...)
+
+(Sub(Ptr|32|16|8) ...) => (SUB ...)
+(Sub(32|64)F ...) => (SUB(F|D) ...)
+(Sub32carry ...) => (SUBS ...)
+(Sub32withcarry ...) => (SBC ...)
+
+(Mul(32|16|8) ...) => (MUL ...)
+(Mul(32|64)F ...) => (MUL(F|D) ...)
+(Hmul(32|32u) ...) => (HMU(L|LU) ...)
+(Mul32uhilo ...) => (MULLU ...)
+
+(Div32 x y) =>
+ (SUB (XOR <typ.UInt32> // negate the result if one operand is negative
+ (Select0 <typ.UInt32> (CALLudiv
+ (SUB <typ.UInt32> (XOR x <typ.UInt32> (Signmask x)) (Signmask x)) // negate x if negative
+ (SUB <typ.UInt32> (XOR y <typ.UInt32> (Signmask y)) (Signmask y)))) // negate y if negative
+ (Signmask (XOR <typ.UInt32> x y))) (Signmask (XOR <typ.UInt32> x y)))
+(Div32u x y) => (Select0 <typ.UInt32> (CALLudiv x y))
+(Div16 x y) => (Div32 (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) => (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) => (Div32 (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) => (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Div(32|64)F ...) => (DIV(F|D) ...)
+
+(Mod32 x y) =>
+ (SUB (XOR <typ.UInt32> // negate the result if x is negative
+ (Select1 <typ.UInt32> (CALLudiv
+ (SUB <typ.UInt32> (XOR <typ.UInt32> x (Signmask x)) (Signmask x)) // negate x if negative
+ (SUB <typ.UInt32> (XOR <typ.UInt32> y (Signmask y)) (Signmask y)))) // negate y if negative
+ (Signmask x)) (Signmask x))
+(Mod32u x y) => (Select1 <typ.UInt32> (CALLudiv x y))
+(Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+// (x + y) / 2 with x>=y -> (x - y) / 2 + y
+(Avg32u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+
+(And(32|16|8) ...) => (AND ...)
+(Or(32|16|8) ...) => (OR ...)
+(Xor(32|16|8) ...) => (XOR ...)
+
+// unary ops
+(Neg(32|16|8) x) => (RSBconst [0] x)
+(Neg(32|64)F ...) => (NEG(F|D) ...)
+
+(Com(32|16|8) ...) => (MVN ...)
+
+(Sqrt ...) => (SQRTD ...)
+(Sqrt32 ...) => (SQRTF ...)
+(Abs ...) => (ABSD ...)
+
+// TODO: optimize this for ARMv5 and ARMv6
+(Ctz32NonZero ...) => (Ctz32 ...)
+(Ctz16NonZero ...) => (Ctz32 ...)
+(Ctz8NonZero ...) => (Ctz32 ...)
+
+// count trailing zero for ARMv5 and ARMv6
+// 32 - CLZ(x&-x - 1)
+(Ctz32 <t> x) && buildcfg.GOARM.Version<=6 =>
+ (RSBconst [32] (CLZ <t> (SUBconst <t> (AND <t> x (RSBconst <t> [0] x)) [1])))
+(Ctz16 <t> x) && buildcfg.GOARM.Version<=6 =>
+ (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x10000] x))) [1])))
+(Ctz8 <t> x) && buildcfg.GOARM.Version<=6 =>
+ (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x100] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x100] x))) [1])))
+
+// count trailing zero for ARMv7
+(Ctz32 <t> x) && buildcfg.GOARM.Version==7 => (CLZ <t> (RBIT <t> x))
+(Ctz16 <t> x) && buildcfg.GOARM.Version==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+(Ctz8 <t> x) && buildcfg.GOARM.Version==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+
+// bit length
+(BitLen32 <t> x) => (RSBconst [32] (CLZ <t> x))
+
+// byte swap for ARMv5
+// let (a, b, c, d) be the bytes of x from high to low
+// t1 = x right rotate 16 bits -- (c, d, a, b )
+// t2 = x ^ t1 -- (a^c, b^d, a^c, b^d)
+// t3 = t2 &^ 0xff0000 -- (a^c, 0, a^c, b^d)
+// t4 = t3 >> 8 -- (0, a^c, 0, a^c)
+// t5 = x right rotate 8 bits -- (d, a, b, c )
+// result = t4 ^ t5 -- (d, c, b, a )
+// using shifted ops this can be done in 4 instructions.
+(Bswap32 <t> x) && buildcfg.GOARM.Version==5 =>
+ (XOR <t>
+ (SRLconst <t> (BICconst <t> (XOR <t> x (SRRconst <t> [16] x)) [0xff0000]) [8])
+ (SRRconst <t> x [8]))
+
+// byte swap for ARMv6 and above
+(Bswap32 x) && buildcfg.GOARM.Version>=6 => (REV x)
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XORconst [1] (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XORconst [1] x)
+
+// shifts
+// hardware instruction uses only the low byte of the shift
+// we compare to 256 to ensure Go semantics for large shifts
+(Lsh32x32 x y) => (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh32x16 x y) => (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Lsh32x8 x y) => (SLL x (ZeroExt8to32 y))
+
+(Lsh16x32 x y) => (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh16x16 x y) => (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Lsh16x8 x y) => (SLL x (ZeroExt8to32 y))
+
+(Lsh8x32 x y) => (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh8x16 x y) => (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Lsh8x8 x y) => (SLL x (ZeroExt8to32 y))
+
+(Rsh32Ux32 x y) => (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
+(Rsh32Ux16 x y) => (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Rsh32Ux8 x y) => (SRL x (ZeroExt8to32 y))
+
+(Rsh16Ux32 x y) => (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
+(Rsh16Ux16 x y) => (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Rsh16Ux8 x y) => (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
+
+(Rsh8Ux32 x y) => (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
+(Rsh8Ux16 x y) => (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Rsh8Ux8 x y) => (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(Rsh32x32 x y) => (SRAcond x y (CMPconst [256] y))
+(Rsh32x16 x y) => (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+(Rsh32x8 x y) => (SRA x (ZeroExt8to32 y))
+
+(Rsh16x32 x y) => (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
+(Rsh16x16 x y) => (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+(Rsh16x8 x y) => (SRA (SignExt16to32 x) (ZeroExt8to32 y))
+
+(Rsh8x32 x y) => (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
+(Rsh8x16 x y) => (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+(Rsh8x8 x y) => (SRA (SignExt8to32 x) (ZeroExt8to32 y))
+
+// constant shifts
+// generic opt rewrites all constant shifts to shift by Const64
+(Lsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SLLconst x [int32(c)])
+(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SRAconst x [int32(c)])
+(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 => (SRLconst x [int32(c)])
+(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SLLconst x [int32(c)])
+(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 => (SRLconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SLLconst x [int32(c)])
+(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 => (SRLconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+
+// large constant shifts
+(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
+(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
+
+// large constant signed right shift, we leave the sign bit
+(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 => (SRAconst x [31])
+(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
+(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
+
+// constants
+(Const(8|16|32) [val]) => (MOVWconst [int32(val)])
+(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
+(ConstNil) => (MOVWconst [0])
+(ConstBool [t]) => (MOVWconst [b2i32(t)])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+
+(Signmask x) => (SRAconst x [31])
+(Zeromask x) => (SRAconst (RSBshiftRL <typ.Int32> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
+(Slicemask <t> x) => (SRAconst (RSBconst <t> [0] x) [31])
+
+// float <-> int conversion
+(Cvt32to32F ...) => (MOVWF ...)
+(Cvt32to64F ...) => (MOVWD ...)
+(Cvt32Uto32F ...) => (MOVWUF ...)
+(Cvt32Uto64F ...) => (MOVWUD ...)
+(Cvt32Fto32 ...) => (MOVFW ...)
+(Cvt64Fto32 ...) => (MOVDW ...)
+(Cvt32Fto32U ...) => (MOVFWU ...)
+(Cvt64Fto32U ...) => (MOVDWU ...)
+(Cvt32Fto64F ...) => (MOVFD ...)
+(Cvt64Fto32F ...) => (MOVDF ...)
+
+(Round(32|64)F ...) => (Copy ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+// fused-multiply-add
+(FMA x y z) => (FMULAD z x y)
+
+// comparisons
+(Eq8 x y) => (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) => (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) => (Equal (CMP x y))
+(EqPtr x y) => (Equal (CMP x y))
+(Eq(32|64)F x y) => (Equal (CMP(F|D) x y))
+
+(Neq8 x y) => (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Neq16 x y) => (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Neq32 x y) => (NotEqual (CMP x y))
+(NeqPtr x y) => (NotEqual (CMP x y))
+(Neq(32|64)F x y) => (NotEqual (CMP(F|D) x y))
+
+(Less8 x y) => (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+(Less16 x y) => (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+(Less32 x y) => (LessThan (CMP x y))
+(Less(32|64)F x y) => (GreaterThan (CMP(F|D) y x)) // reverse operands to work around NaN
+
+(Less8U x y) => (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Less16U x y) => (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Less32U x y) => (LessThanU (CMP x y))
+
+(Leq8 x y) => (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) => (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) => (LessEqual (CMP x y))
+(Leq(32|64)F x y) => (GreaterEqual (CMP(F|D) y x)) // reverse operands to work around NaN
+
+(Leq8U x y) => (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) => (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) => (LessEqualU (CMP x y))
+
+(OffPtr [off] ptr:(SP)) => (MOVWaddr [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDconst [int32(off)] ptr)
+
+(Addr {sym} base) => (MOVWaddr {sym} base)
+(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVWaddr {sym} (SPanchored base mem))
+(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVWaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
+
+// stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVDstore ptr val mem)
+
+// zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVWconst [0]) mem)
+(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore ptr (MOVWconst [0]) mem)
+(Zero [2] ptr mem) =>
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem))
+(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore ptr (MOVWconst [0]) mem)
+(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] ptr (MOVWconst [0])
+ (MOVHstore [0] ptr (MOVWconst [0]) mem))
+(Zero [4] ptr mem) =>
+ (MOVBstore [3] ptr (MOVWconst [0])
+ (MOVBstore [2] ptr (MOVWconst [0])
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem))))
+
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVWconst [0])
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem)))
+
+// Medium zeroing uses a duff device
+// 4 and 128 are magic constants, see runtime/mkduff.go
+(Zero [s] {t} ptr mem)
+ && s%4 == 0 && s > 4 && s <= 512
+ && t.Alignment()%4 == 0 && !config.noDuffDevice =>
+ (DUFFZERO [4 * (128 - s/4)] ptr (MOVWconst [0]) mem)
+
+// Large zeroing uses a loop
+(Zero [s] {t} ptr mem)
+ && (s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0 =>
+ (LoweredZero [t.Alignment()]
+ ptr
+ (ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))])
+ (MOVWconst [0])
+ mem)
+
+// moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
+(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore dst (MOVHUload src mem) mem)
+(Move [2] dst src mem) =>
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem))
+(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] dst (MOVHUload [2] src mem)
+ (MOVHstore dst (MOVHUload src mem) mem))
+(Move [4] dst src mem) =>
+ (MOVBstore [3] dst (MOVBUload [3] src mem)
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem))))
+
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem)))
+
+// Medium move uses a duff device
+// 8 and 128 are magic constants, see runtime/mkduff.go
+(Move [s] {t} dst src mem)
+ && s%4 == 0 && s > 4 && s <= 512
+ && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [8 * (128 - s/4)] dst src mem)
+
+// Large move uses a loop
+(Move [s] {t} dst src mem)
+ && ((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s) =>
+ (LoweredMove [t.Alignment()]
+ dst
+ src
+ (ADDconst <src.Type> src [int32(s-moveSize(t.Alignment(), config))])
+ mem)
+
+// calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// checks
+(NilCheck ...) => (LoweredNilCheck ...)
+(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
+(IsInBounds idx len) => (LessThanU (CMP idx len))
+(IsSliceInBounds idx len) => (LessEqualU (CMP idx len))
+
+// pseudo-ops
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+// Absorb pseudo-ops into blocks.
+(If (Equal cc) yes no) => (EQ cc yes no)
+(If (NotEqual cc) yes no) => (NE cc yes no)
+(If (LessThan cc) yes no) => (LT cc yes no)
+(If (LessThanU cc) yes no) => (ULT cc yes no)
+(If (LessEqual cc) yes no) => (LE cc yes no)
+(If (LessEqualU cc) yes no) => (ULE cc yes no)
+(If (GreaterThan cc) yes no) => (GT cc yes no)
+(If (GreaterThanU cc) yes no) => (UGT cc yes no)
+(If (GreaterEqual cc) yes no) => (GE cc yes no)
+(If (GreaterEqualU cc) yes no) => (UGE cc yes no)
+
+(If cond yes no) => (NE (CMPconst [0] cond) yes no)
+
+// Absorb boolean tests into block
+(NE (CMPconst [0] (Equal cc)) yes no) => (EQ cc yes no)
+(NE (CMPconst [0] (NotEqual cc)) yes no) => (NE cc yes no)
+(NE (CMPconst [0] (LessThan cc)) yes no) => (LT cc yes no)
+(NE (CMPconst [0] (LessThanU cc)) yes no) => (ULT cc yes no)
+(NE (CMPconst [0] (LessEqual cc)) yes no) => (LE cc yes no)
+(NE (CMPconst [0] (LessEqualU cc)) yes no) => (ULE cc yes no)
+(NE (CMPconst [0] (GreaterThan cc)) yes no) => (GT cc yes no)
+(NE (CMPconst [0] (GreaterThanU cc)) yes no) => (UGT cc yes no)
+(NE (CMPconst [0] (GreaterEqual cc)) yes no) => (GE cc yes no)
+(NE (CMPconst [0] (GreaterEqualU cc)) yes no) => (UGE cc yes no)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem)
+
+// Optimizations
+
+// fold offset into address
+(ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) => (MOVWaddr [off1+off2] {sym} ptr)
+(SUBconst [off1] (MOVWaddr [off2] {sym} ptr)) => (MOVWaddr [off2-off1] {sym} ptr)
+
+// fold address into load/store
+(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVBload [off1+off2] {sym} ptr mem)
+(MOVBload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVBload [off1-off2] {sym} ptr mem)
+(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVBUload [off1+off2] {sym} ptr mem)
+(MOVBUload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVBUload [off1-off2] {sym} ptr mem)
+(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVHload [off1+off2] {sym} ptr mem)
+(MOVHload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVHload [off1-off2] {sym} ptr mem)
+(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVHUload [off1+off2] {sym} ptr mem)
+(MOVHUload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVHUload [off1-off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVWload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVWload [off1-off2] {sym} ptr mem)
+(MOVFload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVFload [off1+off2] {sym} ptr mem)
+(MOVFload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVFload [off1-off2] {sym} ptr mem)
+(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVDload [off1+off2] {sym} ptr mem)
+(MOVDload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVDload [off1-off2] {sym} ptr mem)
+
+(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVBstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVBstore [off1-off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVHstore [off1+off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVHstore [off1-off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVWstore [off1-off2] {sym} ptr val mem)
+(MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVFstore [off1+off2] {sym} ptr val mem)
+(MOVFstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVFstore [off1-off2] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVDstore [off1+off2] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVDstore [off1-off2] {sym} ptr val mem)
+
+(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+
+// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBreg x)
+(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBUreg x)
+(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHreg x)
+(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHUreg x)
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+
+(MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+
+(MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => x
+(MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) => x
+(MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) => x
+(MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) => x
+(MOVBUloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVBUreg x)
+(MOVBloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVBreg x)
+(MOVHUloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVHUreg x)
+(MOVHloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVHreg x)
+
+// fold constant into arithmetic ops
+(ADD x (MOVWconst <t> [c])) && !t.IsPtr() => (ADDconst [c] x)
+(SUB (MOVWconst [c]) x) => (RSBconst [c] x)
+(SUB x (MOVWconst [c])) => (SUBconst [c] x)
+(RSB (MOVWconst [c]) x) => (SUBconst [c] x)
+(RSB x (MOVWconst [c])) => (RSBconst [c] x)
+
+(ADDS x (MOVWconst [c])) => (ADDSconst [c] x)
+(SUBS x (MOVWconst [c])) => (SUBSconst [c] x)
+
+(ADC (MOVWconst [c]) x flags) => (ADCconst [c] x flags)
+(SBC (MOVWconst [c]) x flags) => (RSCconst [c] x flags)
+(SBC x (MOVWconst [c]) flags) => (SBCconst [c] x flags)
+
+(AND x (MOVWconst [c])) => (ANDconst [c] x)
+(OR x (MOVWconst [c])) => (ORconst [c] x)
+(XOR x (MOVWconst [c])) => (XORconst [c] x)
+(BIC x (MOVWconst [c])) => (BICconst [c] x)
+
+(SLL x (MOVWconst [c])) && 0 <= c && c < 32 => (SLLconst x [c])
+(SRL x (MOVWconst [c])) && 0 <= c && c < 32 => (SRLconst x [c])
+(SRA x (MOVWconst [c])) && 0 <= c && c < 32 => (SRAconst x [c])
+
+(CMP x (MOVWconst [c])) => (CMPconst [c] x)
+(CMP (MOVWconst [c]) x) => (InvertFlags (CMPconst [c] x))
+(CMN x (MOVWconst [c])) => (CMNconst [c] x)
+(TST x (MOVWconst [c])) => (TSTconst [c] x)
+(TEQ x (MOVWconst [c])) => (TEQconst [c] x)
+
+(SRR x (MOVWconst [c])) => (SRRconst x [c&31])
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+(CMP x y) && canonLessThan(x,y) => (InvertFlags (CMP y x))
+
+// don't extend after proper load
+// MOVWreg instruction is not emitted if src and dst registers are same, but it ensures the type.
+(MOVBreg x:(MOVBload _ _)) => (MOVWreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVWreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVWreg x)
+
+// fold extensions and ANDs together
+(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&0xff] x)
+(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&0xffff] x)
+(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 => (ANDconst [c&0x7f] x)
+(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 => (ANDconst [c&0x7fff] x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) => (MOVWreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVWreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVWreg x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVWnop doesn't emit instruction, only for ensuring the type.
+(MOVWreg x) && x.Uses == 1 => (MOVWnop x)
+
+// TODO: we should be able to get rid of MOVWnop all together.
+// But for now, this is enough to get rid of lots of them.
+(MOVWnop (MOVWconst [c])) => (MOVWconst [c])
+
+// mul by constant
+(MUL x (MOVWconst [c])) && int32(c) == -1 => (RSBconst [0] x)
+(MUL _ (MOVWconst [0])) => (MOVWconst [0])
+(MUL x (MOVWconst [1])) => x
+(MUL x (MOVWconst [c])) && isPowerOfTwo32(c) => (SLLconst [int32(log32(c))] x)
+(MUL x (MOVWconst [c])) && isPowerOfTwo32(c-1) && c >= 3 => (ADDshiftLL x x [int32(log32(c-1))])
+(MUL x (MOVWconst [c])) && isPowerOfTwo32(c+1) && c >= 7 => (RSBshiftLL x x [int32(log32(c+1))])
+(MUL x (MOVWconst [c])) && c%3 == 0 && isPowerOfTwo32(c/3) => (SLLconst [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1]))
+(MUL x (MOVWconst [c])) && c%5 == 0 && isPowerOfTwo32(c/5) => (SLLconst [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2]))
+(MUL x (MOVWconst [c])) && c%7 == 0 && isPowerOfTwo32(c/7) => (SLLconst [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3]))
+(MUL x (MOVWconst [c])) && c%9 == 0 && isPowerOfTwo32(c/9) => (SLLconst [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3]))
+
+(MULA x (MOVWconst [c]) a) && c == -1 => (SUB a x)
+(MULA _ (MOVWconst [0]) a) => a
+(MULA x (MOVWconst [1]) a) => (ADD x a)
+(MULA x (MOVWconst [c]) a) && isPowerOfTwo32(c) => (ADD (SLLconst <x.Type> [int32(log32(c))] x) a)
+(MULA x (MOVWconst [c]) a) && isPowerOfTwo32(c-1) && c >= 3 => (ADD (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+(MULA x (MOVWconst [c]) a) && isPowerOfTwo32(c+1) && c >= 7 => (ADD (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+(MULA x (MOVWconst [c]) a) && c%3 == 0 && isPowerOfTwo32(c/3) => (ADD (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+(MULA x (MOVWconst [c]) a) && c%5 == 0 && isPowerOfTwo32(c/5) => (ADD (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+(MULA x (MOVWconst [c]) a) && c%7 == 0 && isPowerOfTwo32(c/7) => (ADD (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+(MULA x (MOVWconst [c]) a) && c%9 == 0 && isPowerOfTwo32(c/9) => (ADD (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+
+(MULA (MOVWconst [c]) x a) && c == -1 => (SUB a x)
+(MULA (MOVWconst [0]) _ a) => a
+(MULA (MOVWconst [1]) x a) => (ADD x a)
+(MULA (MOVWconst [c]) x a) && isPowerOfTwo32(c) => (ADD (SLLconst <x.Type> [int32(log32(c))] x) a)
+(MULA (MOVWconst [c]) x a) && isPowerOfTwo32(c-1) && c >= 3 => (ADD (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+(MULA (MOVWconst [c]) x a) && isPowerOfTwo32(c+1) && c >= 7 => (ADD (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+(MULA (MOVWconst [c]) x a) && c%3 == 0 && isPowerOfTwo32(c/3) => (ADD (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+(MULA (MOVWconst [c]) x a) && c%5 == 0 && isPowerOfTwo32(c/5) => (ADD (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+(MULA (MOVWconst [c]) x a) && c%7 == 0 && isPowerOfTwo32(c/7) => (ADD (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+(MULA (MOVWconst [c]) x a) && c%9 == 0 && isPowerOfTwo32(c/9) => (ADD (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+
+(MULS x (MOVWconst [c]) a) && c == -1 => (ADD a x)
+(MULS _ (MOVWconst [0]) a) => a
+(MULS x (MOVWconst [1]) a) => (RSB x a)
+(MULS x (MOVWconst [c]) a) && isPowerOfTwo32(c) => (RSB (SLLconst <x.Type> [int32(log32(c))] x) a)
+(MULS x (MOVWconst [c]) a) && isPowerOfTwo32(c-1) && c >= 3 => (RSB (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+(MULS x (MOVWconst [c]) a) && isPowerOfTwo32(c+1) && c >= 7 => (RSB (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+(MULS x (MOVWconst [c]) a) && c%3 == 0 && isPowerOfTwo32(c/3) => (RSB (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+(MULS x (MOVWconst [c]) a) && c%5 == 0 && isPowerOfTwo32(c/5) => (RSB (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+(MULS x (MOVWconst [c]) a) && c%7 == 0 && isPowerOfTwo32(c/7) => (RSB (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+(MULS x (MOVWconst [c]) a) && c%9 == 0 && isPowerOfTwo32(c/9) => (RSB (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+
+(MULS (MOVWconst [c]) x a) && c == -1 => (ADD a x)
+(MULS (MOVWconst [0]) _ a) => a
+(MULS (MOVWconst [1]) x a) => (RSB x a)
+(MULS (MOVWconst [c]) x a) && isPowerOfTwo32(c) => (RSB (SLLconst <x.Type> [int32(log32(c))] x) a)
+(MULS (MOVWconst [c]) x a) && isPowerOfTwo32(c-1) && c >= 3 => (RSB (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+(MULS (MOVWconst [c]) x a) && isPowerOfTwo32(c+1) && c >= 7 => (RSB (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+(MULS (MOVWconst [c]) x a) && c%3 == 0 && isPowerOfTwo32(c/3) => (RSB (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+(MULS (MOVWconst [c]) x a) && c%5 == 0 && isPowerOfTwo32(c/5) => (RSB (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+(MULS (MOVWconst [c]) x a) && c%7 == 0 && isPowerOfTwo32(c/7) => (RSB (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+(MULS (MOVWconst [c]) x a) && c%9 == 0 && isPowerOfTwo32(c/9) => (RSB (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+
+// div by constant
+(Select0 (CALLudiv x (MOVWconst [1]))) => x
+(Select1 (CALLudiv _ (MOVWconst [1]))) => (MOVWconst [0])
+(Select0 (CALLudiv x (MOVWconst [c]))) && isPowerOfTwo32(c) => (SRLconst [int32(log32(c))] x)
+(Select1 (CALLudiv x (MOVWconst [c]))) && isPowerOfTwo32(c) => (ANDconst [c-1] x)
+
+// constant comparisons
+(CMPconst (MOVWconst [x]) [y]) => (FlagConstant [subFlags32(x,y)])
+(CMNconst (MOVWconst [x]) [y]) => (FlagConstant [addFlags32(x,y)])
+(TSTconst (MOVWconst [x]) [y]) => (FlagConstant [logicFlags32(x&y)])
+(TEQconst (MOVWconst [x]) [y]) => (FlagConstant [logicFlags32(x^y)])
+
+// other known comparisons
+(CMPconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags32(0, 1)])
+(CMPconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags32(0, 1)])
+(CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n => (FlagConstant [subFlags32(0, 1)])
+(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) => (FlagConstant [subFlags32(0, 1)])
+
+// absorb flag constants into branches
+(EQ (FlagConstant [fc]) yes no) && fc.eq() => (First yes no)
+(EQ (FlagConstant [fc]) yes no) && !fc.eq() => (First no yes)
+
+(NE (FlagConstant [fc]) yes no) && fc.ne() => (First yes no)
+(NE (FlagConstant [fc]) yes no) && !fc.ne() => (First no yes)
+
+(LT (FlagConstant [fc]) yes no) && fc.lt() => (First yes no)
+(LT (FlagConstant [fc]) yes no) && !fc.lt() => (First no yes)
+
+(LE (FlagConstant [fc]) yes no) && fc.le() => (First yes no)
+(LE (FlagConstant [fc]) yes no) && !fc.le() => (First no yes)
+
+(GT (FlagConstant [fc]) yes no) && fc.gt() => (First yes no)
+(GT (FlagConstant [fc]) yes no) && !fc.gt() => (First no yes)
+
+(GE (FlagConstant [fc]) yes no) && fc.ge() => (First yes no)
+(GE (FlagConstant [fc]) yes no) && !fc.ge() => (First no yes)
+
+(ULT (FlagConstant [fc]) yes no) && fc.ult() => (First yes no)
+(ULT (FlagConstant [fc]) yes no) && !fc.ult() => (First no yes)
+
+(ULE (FlagConstant [fc]) yes no) && fc.ule() => (First yes no)
+(ULE (FlagConstant [fc]) yes no) && !fc.ule() => (First no yes)
+
+(UGT (FlagConstant [fc]) yes no) && fc.ugt() => (First yes no)
+(UGT (FlagConstant [fc]) yes no) && !fc.ugt() => (First no yes)
+
+(UGE (FlagConstant [fc]) yes no) && fc.uge() => (First yes no)
+(UGE (FlagConstant [fc]) yes no) && !fc.uge() => (First no yes)
+
+(LTnoov (FlagConstant [fc]) yes no) && fc.ltNoov() => (First yes no)
+(LTnoov (FlagConstant [fc]) yes no) && !fc.ltNoov() => (First no yes)
+
+(LEnoov (FlagConstant [fc]) yes no) && fc.leNoov() => (First yes no)
+(LEnoov (FlagConstant [fc]) yes no) && !fc.leNoov() => (First no yes)
+
+(GTnoov (FlagConstant [fc]) yes no) && fc.gtNoov() => (First yes no)
+(GTnoov (FlagConstant [fc]) yes no) && !fc.gtNoov() => (First no yes)
+
+(GEnoov (FlagConstant [fc]) yes no) && fc.geNoov() => (First yes no)
+(GEnoov (FlagConstant [fc]) yes no) && !fc.geNoov() => (First no yes)
+
+// absorb InvertFlags into branches
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+(LTnoov (InvertFlags cmp) yes no) => (GTnoov cmp yes no)
+(GEnoov (InvertFlags cmp) yes no) => (LEnoov cmp yes no)
+(LEnoov (InvertFlags cmp) yes no) => (GEnoov cmp yes no)
+(GTnoov (InvertFlags cmp) yes no) => (LTnoov cmp yes no)
+
+// absorb flag constants into boolean values
+(Equal (FlagConstant [fc])) => (MOVWconst [b2i32(fc.eq())])
+(NotEqual (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ne())])
+(LessThan (FlagConstant [fc])) => (MOVWconst [b2i32(fc.lt())])
+(LessThanU (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ult())])
+(LessEqual (FlagConstant [fc])) => (MOVWconst [b2i32(fc.le())])
+(LessEqualU (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ule())])
+(GreaterThan (FlagConstant [fc])) => (MOVWconst [b2i32(fc.gt())])
+(GreaterThanU (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ugt())])
+(GreaterEqual (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ge())])
+(GreaterEqualU (FlagConstant [fc])) => (MOVWconst [b2i32(fc.uge())])
+
+// absorb InvertFlags into boolean values
+(Equal (InvertFlags x)) => (Equal x)
+(NotEqual (InvertFlags x)) => (NotEqual x)
+(LessThan (InvertFlags x)) => (GreaterThan x)
+(LessThanU (InvertFlags x)) => (GreaterThanU x)
+(GreaterThan (InvertFlags x)) => (LessThan x)
+(GreaterThanU (InvertFlags x)) => (LessThanU x)
+(LessEqual (InvertFlags x)) => (GreaterEqual x)
+(LessEqualU (InvertFlags x)) => (GreaterEqualU x)
+(GreaterEqual (InvertFlags x)) => (LessEqual x)
+(GreaterEqualU (InvertFlags x)) => (LessEqualU x)
+
+// absorb flag constants into conditional instructions
+(CMOVWLSconst _ (FlagConstant [fc]) [c]) && fc.ule() => (MOVWconst [c])
+(CMOVWLSconst x (FlagConstant [fc]) [c]) && fc.ugt() => x
+
+(CMOVWHSconst _ (FlagConstant [fc]) [c]) && fc.uge() => (MOVWconst [c])
+(CMOVWHSconst x (FlagConstant [fc]) [c]) && fc.ult() => x
+
+(CMOVWLSconst x (InvertFlags flags) [c]) => (CMOVWHSconst x flags [c])
+(CMOVWHSconst x (InvertFlags flags) [c]) => (CMOVWLSconst x flags [c])
+
+(SRAcond x _ (FlagConstant [fc])) && fc.uge() => (SRAconst x [31])
+(SRAcond x y (FlagConstant [fc])) && fc.ult() => (SRA x y)
+
+// remove redundant *const ops
+(ADDconst [0] x) => x
+(SUBconst [0] x) => x
+(ANDconst [0] _) => (MOVWconst [0])
+(ANDconst [c] x) && int32(c)==-1 => x
+(ORconst [0] x) => x
+(ORconst [c] _) && int32(c)==-1 => (MOVWconst [-1])
+(XORconst [0] x) => x
+(BICconst [0] x) => x
+(BICconst [c] _) && int32(c)==-1 => (MOVWconst [0])
+
+// generic constant folding
+(ADDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) => (SUBconst [-c] x)
+(SUBconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) => (ADDconst [-c] x)
+(ANDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (BICconst [int32(^uint32(c))] x)
+(BICconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (ANDconst [int32(^uint32(c))] x)
+(ADDconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x)
+(SUBconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x)
+(ANDconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x)
+(BICconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x)
+(ADDconst [c] (MOVWconst [d])) => (MOVWconst [c+d])
+(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
+(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
+(ADDconst [c] (RSBconst [d] x)) => (RSBconst [c+d] x)
+(ADCconst [c] (ADDconst [d] x) flags) => (ADCconst [c+d] x flags)
+(ADCconst [c] (SUBconst [d] x) flags) => (ADCconst [c-d] x flags)
+(SUBconst [c] (MOVWconst [d])) => (MOVWconst [d-c])
+(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x)
+(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x)
+(SUBconst [c] (RSBconst [d] x)) => (RSBconst [-c+d] x)
+(SBCconst [c] (ADDconst [d] x) flags) => (SBCconst [c-d] x flags)
+(SBCconst [c] (SUBconst [d] x) flags) => (SBCconst [c+d] x flags)
+(RSBconst [c] (MOVWconst [d])) => (MOVWconst [c-d])
+(RSBconst [c] (RSBconst [d] x)) => (ADDconst [c-d] x)
+(RSBconst [c] (ADDconst [d] x)) => (RSBconst [c-d] x)
+(RSBconst [c] (SUBconst [d] x)) => (RSBconst [c+d] x)
+(RSCconst [c] (ADDconst [d] x) flags) => (RSCconst [c-d] x flags)
+(RSCconst [c] (SUBconst [d] x) flags) => (RSCconst [c+d] x flags)
+(SLLconst [c] (MOVWconst [d])) => (MOVWconst [d<<uint64(c)])
+(SRLconst [c] (MOVWconst [d])) => (MOVWconst [int32(uint32(d)>>uint64(c))])
+(SRAconst [c] (MOVWconst [d])) => (MOVWconst [d>>uint64(c)])
+(MUL (MOVWconst [c]) (MOVWconst [d])) => (MOVWconst [c*d])
+(MULA (MOVWconst [c]) (MOVWconst [d]) a) => (ADDconst [c*d] a)
+(MULS (MOVWconst [c]) (MOVWconst [d]) a) => (SUBconst [c*d] a)
+(Select0 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)/uint32(d))])
+(Select1 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)%uint32(d))])
+(ANDconst [c] (MOVWconst [d])) => (MOVWconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ORconst [c] (MOVWconst [d])) => (MOVWconst [c|d])
+(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
+(XORconst [c] (MOVWconst [d])) => (MOVWconst [c^d])
+(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
+(BICconst [c] (MOVWconst [d])) => (MOVWconst [d&^c])
+(BICconst [c] (BICconst [d] x)) => (BICconst [c|d] x)
+(MVN (MOVWconst [c])) => (MOVWconst [^c])
+(MOVBreg (MOVWconst [c])) => (MOVWconst [int32(int8(c))])
+(MOVBUreg (MOVWconst [c])) => (MOVWconst [int32(uint8(c))])
+(MOVHreg (MOVWconst [c])) => (MOVWconst [int32(int16(c))])
+(MOVHUreg (MOVWconst [c])) => (MOVWconst [int32(uint16(c))])
+(MOVWreg (MOVWconst [c])) => (MOVWconst [c])
+// BFX: Width = c >> 8, LSB = c & 0xff, result = d << (32 - Width - LSB) >> (32 - Width)
+(BFX [c] (MOVWconst [d])) => (MOVWconst [d<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8))])
+(BFXU [c] (MOVWconst [d])) => (MOVWconst [int32(uint32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))])
+
+// absorb shifts into ops
+(ADD x (SLLconst [c] y)) => (ADDshiftLL x y [c])
+(ADD x (SRLconst [c] y)) => (ADDshiftRL x y [c])
+(ADD x (SRAconst [c] y)) => (ADDshiftRA x y [c])
+(ADD x (SLL y z)) => (ADDshiftLLreg x y z)
+(ADD x (SRL y z)) => (ADDshiftRLreg x y z)
+(ADD x (SRA y z)) => (ADDshiftRAreg x y z)
+(ADC x (SLLconst [c] y) flags) => (ADCshiftLL x y [c] flags)
+(ADC x (SRLconst [c] y) flags) => (ADCshiftRL x y [c] flags)
+(ADC x (SRAconst [c] y) flags) => (ADCshiftRA x y [c] flags)
+(ADC x (SLL y z) flags) => (ADCshiftLLreg x y z flags)
+(ADC x (SRL y z) flags) => (ADCshiftRLreg x y z flags)
+(ADC x (SRA y z) flags) => (ADCshiftRAreg x y z flags)
+(ADDS x (SLLconst [c] y)) => (ADDSshiftLL x y [c])
+(ADDS x (SRLconst [c] y)) => (ADDSshiftRL x y [c])
+(ADDS x (SRAconst [c] y)) => (ADDSshiftRA x y [c])
+(ADDS x (SLL y z)) => (ADDSshiftLLreg x y z)
+(ADDS x (SRL y z)) => (ADDSshiftRLreg x y z)
+(ADDS x (SRA y z)) => (ADDSshiftRAreg x y z)
+(SUB x (SLLconst [c] y)) => (SUBshiftLL x y [c])
+(SUB (SLLconst [c] y) x) => (RSBshiftLL x y [c])
+(SUB x (SRLconst [c] y)) => (SUBshiftRL x y [c])
+(SUB (SRLconst [c] y) x) => (RSBshiftRL x y [c])
+(SUB x (SRAconst [c] y)) => (SUBshiftRA x y [c])
+(SUB (SRAconst [c] y) x) => (RSBshiftRA x y [c])
+(SUB x (SLL y z)) => (SUBshiftLLreg x y z)
+(SUB (SLL y z) x) => (RSBshiftLLreg x y z)
+(SUB x (SRL y z)) => (SUBshiftRLreg x y z)
+(SUB (SRL y z) x) => (RSBshiftRLreg x y z)
+(SUB x (SRA y z)) => (SUBshiftRAreg x y z)
+(SUB (SRA y z) x) => (RSBshiftRAreg x y z)
+(SBC x (SLLconst [c] y) flags) => (SBCshiftLL x y [c] flags)
+(SBC (SLLconst [c] y) x flags) => (RSCshiftLL x y [c] flags)
+(SBC x (SRLconst [c] y) flags) => (SBCshiftRL x y [c] flags)
+(SBC (SRLconst [c] y) x flags) => (RSCshiftRL x y [c] flags)
+(SBC x (SRAconst [c] y) flags) => (SBCshiftRA x y [c] flags)
+(SBC (SRAconst [c] y) x flags) => (RSCshiftRA x y [c] flags)
+(SBC x (SLL y z) flags) => (SBCshiftLLreg x y z flags)
+(SBC (SLL y z) x flags) => (RSCshiftLLreg x y z flags)
+(SBC x (SRL y z) flags) => (SBCshiftRLreg x y z flags)
+(SBC (SRL y z) x flags) => (RSCshiftRLreg x y z flags)
+(SBC x (SRA y z) flags) => (SBCshiftRAreg x y z flags)
+(SBC (SRA y z) x flags) => (RSCshiftRAreg x y z flags)
+(SUBS x (SLLconst [c] y)) => (SUBSshiftLL x y [c])
+(SUBS (SLLconst [c] y) x) => (RSBSshiftLL x y [c])
+(SUBS x (SRLconst [c] y)) => (SUBSshiftRL x y [c])
+(SUBS (SRLconst [c] y) x) => (RSBSshiftRL x y [c])
+(SUBS x (SRAconst [c] y)) => (SUBSshiftRA x y [c])
+(SUBS (SRAconst [c] y) x) => (RSBSshiftRA x y [c])
+(SUBS x (SLL y z)) => (SUBSshiftLLreg x y z)
+(SUBS (SLL y z) x) => (RSBSshiftLLreg x y z)
+(SUBS x (SRL y z)) => (SUBSshiftRLreg x y z)
+(SUBS (SRL y z) x) => (RSBSshiftRLreg x y z)
+(SUBS x (SRA y z)) => (SUBSshiftRAreg x y z)
+(SUBS (SRA y z) x) => (RSBSshiftRAreg x y z)
+(RSB x (SLLconst [c] y)) => (RSBshiftLL x y [c])
+(RSB (SLLconst [c] y) x) => (SUBshiftLL x y [c])
+(RSB x (SRLconst [c] y)) => (RSBshiftRL x y [c])
+(RSB (SRLconst [c] y) x) => (SUBshiftRL x y [c])
+(RSB x (SRAconst [c] y)) => (RSBshiftRA x y [c])
+(RSB (SRAconst [c] y) x) => (SUBshiftRA x y [c])
+(RSB x (SLL y z)) => (RSBshiftLLreg x y z)
+(RSB (SLL y z) x) => (SUBshiftLLreg x y z)
+(RSB x (SRL y z)) => (RSBshiftRLreg x y z)
+(RSB (SRL y z) x) => (SUBshiftRLreg x y z)
+(RSB x (SRA y z)) => (RSBshiftRAreg x y z)
+(RSB (SRA y z) x) => (SUBshiftRAreg x y z)
+(AND x (SLLconst [c] y)) => (ANDshiftLL x y [c])
+(AND x (SRLconst [c] y)) => (ANDshiftRL x y [c])
+(AND x (SRAconst [c] y)) => (ANDshiftRA x y [c])
+(AND x (SLL y z)) => (ANDshiftLLreg x y z)
+(AND x (SRL y z)) => (ANDshiftRLreg x y z)
+(AND x (SRA y z)) => (ANDshiftRAreg x y z)
+(OR x (SLLconst [c] y)) => (ORshiftLL x y [c])
+(OR x (SRLconst [c] y)) => (ORshiftRL x y [c])
+(OR x (SRAconst [c] y)) => (ORshiftRA x y [c])
+(OR x (SLL y z)) => (ORshiftLLreg x y z)
+(OR x (SRL y z)) => (ORshiftRLreg x y z)
+(OR x (SRA y z)) => (ORshiftRAreg x y z)
+(XOR x (SLLconst [c] y)) => (XORshiftLL x y [c])
+(XOR x (SRLconst [c] y)) => (XORshiftRL x y [c])
+(XOR x (SRAconst [c] y)) => (XORshiftRA x y [c])
+(XOR x (SRRconst [c] y)) => (XORshiftRR x y [c])
+(XOR x (SLL y z)) => (XORshiftLLreg x y z)
+(XOR x (SRL y z)) => (XORshiftRLreg x y z)
+(XOR x (SRA y z)) => (XORshiftRAreg x y z)
+(BIC x (SLLconst [c] y)) => (BICshiftLL x y [c])
+(BIC x (SRLconst [c] y)) => (BICshiftRL x y [c])
+(BIC x (SRAconst [c] y)) => (BICshiftRA x y [c])
+(BIC x (SLL y z)) => (BICshiftLLreg x y z)
+(BIC x (SRL y z)) => (BICshiftRLreg x y z)
+(BIC x (SRA y z)) => (BICshiftRAreg x y z)
+(MVN (SLLconst [c] x)) => (MVNshiftLL x [c])
+(MVN (SRLconst [c] x)) => (MVNshiftRL x [c])
+(MVN (SRAconst [c] x)) => (MVNshiftRA x [c])
+(MVN (SLL x y)) => (MVNshiftLLreg x y)
+(MVN (SRL x y)) => (MVNshiftRLreg x y)
+(MVN (SRA x y)) => (MVNshiftRAreg x y)
+
+(CMP x (SLLconst [c] y)) => (CMPshiftLL x y [c])
+(CMP (SLLconst [c] y) x) => (InvertFlags (CMPshiftLL x y [c]))
+(CMP x (SRLconst [c] y)) => (CMPshiftRL x y [c])
+(CMP (SRLconst [c] y) x) => (InvertFlags (CMPshiftRL x y [c]))
+(CMP x (SRAconst [c] y)) => (CMPshiftRA x y [c])
+(CMP (SRAconst [c] y) x) => (InvertFlags (CMPshiftRA x y [c]))
+(CMP x (SLL y z)) => (CMPshiftLLreg x y z)
+(CMP (SLL y z) x) => (InvertFlags (CMPshiftLLreg x y z))
+(CMP x (SRL y z)) => (CMPshiftRLreg x y z)
+(CMP (SRL y z) x) => (InvertFlags (CMPshiftRLreg x y z))
+(CMP x (SRA y z)) => (CMPshiftRAreg x y z)
+(CMP (SRA y z) x) => (InvertFlags (CMPshiftRAreg x y z))
+(TST x (SLLconst [c] y)) => (TSTshiftLL x y [c])
+(TST x (SRLconst [c] y)) => (TSTshiftRL x y [c])
+(TST x (SRAconst [c] y)) => (TSTshiftRA x y [c])
+(TST x (SLL y z)) => (TSTshiftLLreg x y z)
+(TST x (SRL y z)) => (TSTshiftRLreg x y z)
+(TST x (SRA y z)) => (TSTshiftRAreg x y z)
+(TEQ x (SLLconst [c] y)) => (TEQshiftLL x y [c])
+(TEQ x (SRLconst [c] y)) => (TEQshiftRL x y [c])
+(TEQ x (SRAconst [c] y)) => (TEQshiftRA x y [c])
+(TEQ x (SLL y z)) => (TEQshiftLLreg x y z)
+(TEQ x (SRL y z)) => (TEQshiftRLreg x y z)
+(TEQ x (SRA y z)) => (TEQshiftRAreg x y z)
+(CMN x (SLLconst [c] y)) => (CMNshiftLL x y [c])
+(CMN x (SRLconst [c] y)) => (CMNshiftRL x y [c])
+(CMN x (SRAconst [c] y)) => (CMNshiftRA x y [c])
+(CMN x (SLL y z)) => (CMNshiftLLreg x y z)
+(CMN x (SRL y z)) => (CMNshiftRLreg x y z)
+(CMN x (SRA y z)) => (CMNshiftRAreg x y z)
+
+// prefer *const ops to *shift ops
+(ADDshiftLL (MOVWconst [c]) x [d]) => (ADDconst [c] (SLLconst <x.Type> x [d]))
+(ADDshiftRL (MOVWconst [c]) x [d]) => (ADDconst [c] (SRLconst <x.Type> x [d]))
+(ADDshiftRA (MOVWconst [c]) x [d]) => (ADDconst [c] (SRAconst <x.Type> x [d]))
+(ADCshiftLL (MOVWconst [c]) x [d] flags) => (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
+(ADCshiftRL (MOVWconst [c]) x [d] flags) => (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
+(ADCshiftRA (MOVWconst [c]) x [d] flags) => (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
+(ADDSshiftLL (MOVWconst [c]) x [d]) => (ADDSconst [c] (SLLconst <x.Type> x [d]))
+(ADDSshiftRL (MOVWconst [c]) x [d]) => (ADDSconst [c] (SRLconst <x.Type> x [d]))
+(ADDSshiftRA (MOVWconst [c]) x [d]) => (ADDSconst [c] (SRAconst <x.Type> x [d]))
+(SUBshiftLL (MOVWconst [c]) x [d]) => (RSBconst [c] (SLLconst <x.Type> x [d]))
+(SUBshiftRL (MOVWconst [c]) x [d]) => (RSBconst [c] (SRLconst <x.Type> x [d]))
+(SUBshiftRA (MOVWconst [c]) x [d]) => (RSBconst [c] (SRAconst <x.Type> x [d]))
+(SBCshiftLL (MOVWconst [c]) x [d] flags) => (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
+(SBCshiftRL (MOVWconst [c]) x [d] flags) => (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
+(SBCshiftRA (MOVWconst [c]) x [d] flags) => (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
+(SUBSshiftLL (MOVWconst [c]) x [d]) => (RSBSconst [c] (SLLconst <x.Type> x [d]))
+(SUBSshiftRL (MOVWconst [c]) x [d]) => (RSBSconst [c] (SRLconst <x.Type> x [d]))
+(SUBSshiftRA (MOVWconst [c]) x [d]) => (RSBSconst [c] (SRAconst <x.Type> x [d]))
+(RSBshiftLL (MOVWconst [c]) x [d]) => (SUBconst [c] (SLLconst <x.Type> x [d]))
+(RSBshiftRL (MOVWconst [c]) x [d]) => (SUBconst [c] (SRLconst <x.Type> x [d]))
+(RSBshiftRA (MOVWconst [c]) x [d]) => (SUBconst [c] (SRAconst <x.Type> x [d]))
+(RSCshiftLL (MOVWconst [c]) x [d] flags) => (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
+(RSCshiftRL (MOVWconst [c]) x [d] flags) => (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
+(RSCshiftRA (MOVWconst [c]) x [d] flags) => (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
+(RSBSshiftLL (MOVWconst [c]) x [d]) => (SUBSconst [c] (SLLconst <x.Type> x [d]))
+(RSBSshiftRL (MOVWconst [c]) x [d]) => (SUBSconst [c] (SRLconst <x.Type> x [d]))
+(RSBSshiftRA (MOVWconst [c]) x [d]) => (SUBSconst [c] (SRAconst <x.Type> x [d]))
+(ANDshiftLL (MOVWconst [c]) x [d]) => (ANDconst [c] (SLLconst <x.Type> x [d]))
+(ANDshiftRL (MOVWconst [c]) x [d]) => (ANDconst [c] (SRLconst <x.Type> x [d]))
+(ANDshiftRA (MOVWconst [c]) x [d]) => (ANDconst [c] (SRAconst <x.Type> x [d]))
+(ORshiftLL (MOVWconst [c]) x [d]) => (ORconst [c] (SLLconst <x.Type> x [d]))
+(ORshiftRL (MOVWconst [c]) x [d]) => (ORconst [c] (SRLconst <x.Type> x [d]))
+(ORshiftRA (MOVWconst [c]) x [d]) => (ORconst [c] (SRAconst <x.Type> x [d]))
+(XORshiftLL (MOVWconst [c]) x [d]) => (XORconst [c] (SLLconst <x.Type> x [d]))
+(XORshiftRL (MOVWconst [c]) x [d]) => (XORconst [c] (SRLconst <x.Type> x [d]))
+(XORshiftRA (MOVWconst [c]) x [d]) => (XORconst [c] (SRAconst <x.Type> x [d]))
+(XORshiftRR (MOVWconst [c]) x [d]) => (XORconst [c] (SRRconst <x.Type> x [d]))
+(CMPshiftLL (MOVWconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+(CMPshiftRL (MOVWconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+(CMPshiftRA (MOVWconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+(TSTshiftLL (MOVWconst [c]) x [d]) => (TSTconst [c] (SLLconst <x.Type> x [d]))
+(TSTshiftRL (MOVWconst [c]) x [d]) => (TSTconst [c] (SRLconst <x.Type> x [d]))
+(TSTshiftRA (MOVWconst [c]) x [d]) => (TSTconst [c] (SRAconst <x.Type> x [d]))
+(TEQshiftLL (MOVWconst [c]) x [d]) => (TEQconst [c] (SLLconst <x.Type> x [d]))
+(TEQshiftRL (MOVWconst [c]) x [d]) => (TEQconst [c] (SRLconst <x.Type> x [d]))
+(TEQshiftRA (MOVWconst [c]) x [d]) => (TEQconst [c] (SRAconst <x.Type> x [d]))
+(CMNshiftLL (MOVWconst [c]) x [d]) => (CMNconst [c] (SLLconst <x.Type> x [d]))
+(CMNshiftRL (MOVWconst [c]) x [d]) => (CMNconst [c] (SRLconst <x.Type> x [d]))
+(CMNshiftRA (MOVWconst [c]) x [d]) => (CMNconst [c] (SRAconst <x.Type> x [d]))
+
+(ADDshiftLLreg (MOVWconst [c]) x y) => (ADDconst [c] (SLL <x.Type> x y))
+(ADDshiftRLreg (MOVWconst [c]) x y) => (ADDconst [c] (SRL <x.Type> x y))
+(ADDshiftRAreg (MOVWconst [c]) x y) => (ADDconst [c] (SRA <x.Type> x y))
+(ADCshiftLLreg (MOVWconst [c]) x y flags) => (ADCconst [c] (SLL <x.Type> x y) flags)
+(ADCshiftRLreg (MOVWconst [c]) x y flags) => (ADCconst [c] (SRL <x.Type> x y) flags)
+(ADCshiftRAreg (MOVWconst [c]) x y flags) => (ADCconst [c] (SRA <x.Type> x y) flags)
+(ADDSshiftLLreg (MOVWconst [c]) x y) => (ADDSconst [c] (SLL <x.Type> x y))
+(ADDSshiftRLreg (MOVWconst [c]) x y) => (ADDSconst [c] (SRL <x.Type> x y))
+(ADDSshiftRAreg (MOVWconst [c]) x y) => (ADDSconst [c] (SRA <x.Type> x y))
+(SUBshiftLLreg (MOVWconst [c]) x y) => (RSBconst [c] (SLL <x.Type> x y))
+(SUBshiftRLreg (MOVWconst [c]) x y) => (RSBconst [c] (SRL <x.Type> x y))
+(SUBshiftRAreg (MOVWconst [c]) x y) => (RSBconst [c] (SRA <x.Type> x y))
+(SBCshiftLLreg (MOVWconst [c]) x y flags) => (RSCconst [c] (SLL <x.Type> x y) flags)
+(SBCshiftRLreg (MOVWconst [c]) x y flags) => (RSCconst [c] (SRL <x.Type> x y) flags)
+(SBCshiftRAreg (MOVWconst [c]) x y flags) => (RSCconst [c] (SRA <x.Type> x y) flags)
+(SUBSshiftLLreg (MOVWconst [c]) x y) => (RSBSconst [c] (SLL <x.Type> x y))
+(SUBSshiftRLreg (MOVWconst [c]) x y) => (RSBSconst [c] (SRL <x.Type> x y))
+(SUBSshiftRAreg (MOVWconst [c]) x y) => (RSBSconst [c] (SRA <x.Type> x y))
+(RSBshiftLLreg (MOVWconst [c]) x y) => (SUBconst [c] (SLL <x.Type> x y))
+(RSBshiftRLreg (MOVWconst [c]) x y) => (SUBconst [c] (SRL <x.Type> x y))
+(RSBshiftRAreg (MOVWconst [c]) x y) => (SUBconst [c] (SRA <x.Type> x y))
+(RSCshiftLLreg (MOVWconst [c]) x y flags) => (SBCconst [c] (SLL <x.Type> x y) flags)
+(RSCshiftRLreg (MOVWconst [c]) x y flags) => (SBCconst [c] (SRL <x.Type> x y) flags)
+(RSCshiftRAreg (MOVWconst [c]) x y flags) => (SBCconst [c] (SRA <x.Type> x y) flags)
+(RSBSshiftLLreg (MOVWconst [c]) x y) => (SUBSconst [c] (SLL <x.Type> x y))
+(RSBSshiftRLreg (MOVWconst [c]) x y) => (SUBSconst [c] (SRL <x.Type> x y))
+(RSBSshiftRAreg (MOVWconst [c]) x y) => (SUBSconst [c] (SRA <x.Type> x y))
+(ANDshiftLLreg (MOVWconst [c]) x y) => (ANDconst [c] (SLL <x.Type> x y))
+(ANDshiftRLreg (MOVWconst [c]) x y) => (ANDconst [c] (SRL <x.Type> x y))
+(ANDshiftRAreg (MOVWconst [c]) x y) => (ANDconst [c] (SRA <x.Type> x y))
+(ORshiftLLreg (MOVWconst [c]) x y) => (ORconst [c] (SLL <x.Type> x y))
+(ORshiftRLreg (MOVWconst [c]) x y) => (ORconst [c] (SRL <x.Type> x y))
+(ORshiftRAreg (MOVWconst [c]) x y) => (ORconst [c] (SRA <x.Type> x y))
+(XORshiftLLreg (MOVWconst [c]) x y) => (XORconst [c] (SLL <x.Type> x y))
+(XORshiftRLreg (MOVWconst [c]) x y) => (XORconst [c] (SRL <x.Type> x y))
+(XORshiftRAreg (MOVWconst [c]) x y) => (XORconst [c] (SRA <x.Type> x y))
+(CMPshiftLLreg (MOVWconst [c]) x y) => (InvertFlags (CMPconst [c] (SLL <x.Type> x y)))
+(CMPshiftRLreg (MOVWconst [c]) x y) => (InvertFlags (CMPconst [c] (SRL <x.Type> x y)))
+(CMPshiftRAreg (MOVWconst [c]) x y) => (InvertFlags (CMPconst [c] (SRA <x.Type> x y)))
+(TSTshiftLLreg (MOVWconst [c]) x y) => (TSTconst [c] (SLL <x.Type> x y))
+(TSTshiftRLreg (MOVWconst [c]) x y) => (TSTconst [c] (SRL <x.Type> x y))
+(TSTshiftRAreg (MOVWconst [c]) x y) => (TSTconst [c] (SRA <x.Type> x y))
+(TEQshiftLLreg (MOVWconst [c]) x y) => (TEQconst [c] (SLL <x.Type> x y))
+(TEQshiftRLreg (MOVWconst [c]) x y) => (TEQconst [c] (SRL <x.Type> x y))
+(TEQshiftRAreg (MOVWconst [c]) x y) => (TEQconst [c] (SRA <x.Type> x y))
+(CMNshiftLLreg (MOVWconst [c]) x y) => (CMNconst [c] (SLL <x.Type> x y))
+(CMNshiftRLreg (MOVWconst [c]) x y) => (CMNconst [c] (SRL <x.Type> x y))
+(CMNshiftRAreg (MOVWconst [c]) x y) => (CMNconst [c] (SRA <x.Type> x y))
+
+// constant folding in *shift ops
+(ADDshiftLL x (MOVWconst [c]) [d]) => (ADDconst x [c<<uint64(d)])
+(ADDshiftRL x (MOVWconst [c]) [d]) => (ADDconst x [int32(uint32(c)>>uint64(d))])
+(ADDshiftRA x (MOVWconst [c]) [d]) => (ADDconst x [c>>uint64(d)])
+(ADCshiftLL x (MOVWconst [c]) [d] flags) => (ADCconst x [c<<uint64(d)] flags)
+(ADCshiftRL x (MOVWconst [c]) [d] flags) => (ADCconst x [int32(uint32(c)>>uint64(d))] flags)
+(ADCshiftRA x (MOVWconst [c]) [d] flags) => (ADCconst x [c>>uint64(d)] flags)
+(ADDSshiftLL x (MOVWconst [c]) [d]) => (ADDSconst x [c<<uint64(d)])
+(ADDSshiftRL x (MOVWconst [c]) [d]) => (ADDSconst x [int32(uint32(c)>>uint64(d))])
+(ADDSshiftRA x (MOVWconst [c]) [d]) => (ADDSconst x [c>>uint64(d)])
+(SUBshiftLL x (MOVWconst [c]) [d]) => (SUBconst x [c<<uint64(d)])
+(SUBshiftRL x (MOVWconst [c]) [d]) => (SUBconst x [int32(uint32(c)>>uint64(d))])
+(SUBshiftRA x (MOVWconst [c]) [d]) => (SUBconst x [c>>uint64(d)])
+(SBCshiftLL x (MOVWconst [c]) [d] flags) => (SBCconst x [c<<uint64(d)] flags)
+(SBCshiftRL x (MOVWconst [c]) [d] flags) => (SBCconst x [int32(uint32(c)>>uint64(d))] flags)
+(SBCshiftRA x (MOVWconst [c]) [d] flags) => (SBCconst x [c>>uint64(d)] flags)
+(SUBSshiftLL x (MOVWconst [c]) [d]) => (SUBSconst x [c<<uint64(d)])
+(SUBSshiftRL x (MOVWconst [c]) [d]) => (SUBSconst x [int32(uint32(c)>>uint64(d))])
+(SUBSshiftRA x (MOVWconst [c]) [d]) => (SUBSconst x [c>>uint64(d)])
+(RSBshiftLL x (MOVWconst [c]) [d]) => (RSBconst x [c<<uint64(d)])
+(RSBshiftRL x (MOVWconst [c]) [d]) => (RSBconst x [int32(uint32(c)>>uint64(d))])
+(RSBshiftRA x (MOVWconst [c]) [d]) => (RSBconst x [c>>uint64(d)])
+(RSCshiftLL x (MOVWconst [c]) [d] flags) => (RSCconst x [c<<uint64(d)] flags)
+(RSCshiftRL x (MOVWconst [c]) [d] flags) => (RSCconst x [int32(uint32(c)>>uint64(d))] flags)
+(RSCshiftRA x (MOVWconst [c]) [d] flags) => (RSCconst x [c>>uint64(d)] flags)
+(RSBSshiftLL x (MOVWconst [c]) [d]) => (RSBSconst x [c<<uint64(d)])
+(RSBSshiftRL x (MOVWconst [c]) [d]) => (RSBSconst x [int32(uint32(c)>>uint64(d))])
+(RSBSshiftRA x (MOVWconst [c]) [d]) => (RSBSconst x [c>>uint64(d)])
+(ANDshiftLL x (MOVWconst [c]) [d]) => (ANDconst x [c<<uint64(d)])
+(ANDshiftRL x (MOVWconst [c]) [d]) => (ANDconst x [int32(uint32(c)>>uint64(d))])
+(ANDshiftRA x (MOVWconst [c]) [d]) => (ANDconst x [c>>uint64(d)])
+(ORshiftLL x (MOVWconst [c]) [d]) => (ORconst x [c<<uint64(d)])
+(ORshiftRL x (MOVWconst [c]) [d]) => (ORconst x [int32(uint32(c)>>uint64(d))])
+(ORshiftRA x (MOVWconst [c]) [d]) => (ORconst x [c>>uint64(d)])
+(XORshiftLL x (MOVWconst [c]) [d]) => (XORconst x [c<<uint64(d)])
+(XORshiftRL x (MOVWconst [c]) [d]) => (XORconst x [int32(uint32(c)>>uint64(d))])
+(XORshiftRA x (MOVWconst [c]) [d]) => (XORconst x [c>>uint64(d)])
+(XORshiftRR x (MOVWconst [c]) [d]) => (XORconst x [int32(uint32(c)>>uint64(d)|uint32(c)<<uint64(32-d))])
+(BICshiftLL x (MOVWconst [c]) [d]) => (BICconst x [c<<uint64(d)])
+(BICshiftRL x (MOVWconst [c]) [d]) => (BICconst x [int32(uint32(c)>>uint64(d))])
+(BICshiftRA x (MOVWconst [c]) [d]) => (BICconst x [c>>uint64(d)])
+(MVNshiftLL (MOVWconst [c]) [d]) => (MOVWconst [^(c<<uint64(d))])
+(MVNshiftRL (MOVWconst [c]) [d]) => (MOVWconst [^int32(uint32(c)>>uint64(d))])
+(MVNshiftRA (MOVWconst [c]) [d]) => (MOVWconst [int32(c)>>uint64(d)])
+(CMPshiftLL x (MOVWconst [c]) [d]) => (CMPconst x [c<<uint64(d)])
+(CMPshiftRL x (MOVWconst [c]) [d]) => (CMPconst x [int32(uint32(c)>>uint64(d))])
+(CMPshiftRA x (MOVWconst [c]) [d]) => (CMPconst x [c>>uint64(d)])
+(TSTshiftLL x (MOVWconst [c]) [d]) => (TSTconst x [c<<uint64(d)])
+(TSTshiftRL x (MOVWconst [c]) [d]) => (TSTconst x [int32(uint32(c)>>uint64(d))])
+(TSTshiftRA x (MOVWconst [c]) [d]) => (TSTconst x [c>>uint64(d)])
+(TEQshiftLL x (MOVWconst [c]) [d]) => (TEQconst x [c<<uint64(d)])
+(TEQshiftRL x (MOVWconst [c]) [d]) => (TEQconst x [int32(uint32(c)>>uint64(d))])
+(TEQshiftRA x (MOVWconst [c]) [d]) => (TEQconst x [c>>uint64(d)])
+(CMNshiftLL x (MOVWconst [c]) [d]) => (CMNconst x [c<<uint64(d)])
+(CMNshiftRL x (MOVWconst [c]) [d]) => (CMNconst x [int32(uint32(c)>>uint64(d))])
+(CMNshiftRA x (MOVWconst [c]) [d]) => (CMNconst x [c>>uint64(d)])
+
+(ADDshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDshiftLL x y [c])
+(ADDshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDshiftRL x y [c])
+(ADDshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDshiftRA x y [c])
+(ADCshiftLLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (ADCshiftLL x y [c] flags)
+(ADCshiftRLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (ADCshiftRL x y [c] flags)
+(ADCshiftRAreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (ADCshiftRA x y [c] flags)
+(ADDSshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDSshiftLL x y [c])
+(ADDSshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDSshiftRL x y [c])
+(ADDSshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDSshiftRA x y [c])
+(SUBshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBshiftLL x y [c])
+(SUBshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBshiftRL x y [c])
+(SUBshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBshiftRA x y [c])
+(SBCshiftLLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (SBCshiftLL x y [c] flags)
+(SBCshiftRLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (SBCshiftRL x y [c] flags)
+(SBCshiftRAreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (SBCshiftRA x y [c] flags)
+(SUBSshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBSshiftLL x y [c])
+(SUBSshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBSshiftRL x y [c])
+(SUBSshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBSshiftRA x y [c])
+(RSBshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBshiftLL x y [c])
+(RSBshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBshiftRL x y [c])
+(RSBshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBshiftRA x y [c])
+(RSCshiftLLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (RSCshiftLL x y [c] flags)
+(RSCshiftRLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (RSCshiftRL x y [c] flags)
+(RSCshiftRAreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (RSCshiftRA x y [c] flags)
+(RSBSshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBSshiftLL x y [c])
+(RSBSshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBSshiftRL x y [c])
+(RSBSshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBSshiftRA x y [c])
+(ANDshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ANDshiftLL x y [c])
+(ANDshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ANDshiftRL x y [c])
+(ANDshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ANDshiftRA x y [c])
+(ORshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ORshiftLL x y [c])
+(ORshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ORshiftRL x y [c])
+(ORshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ORshiftRA x y [c])
+(XORshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (XORshiftLL x y [c])
+(XORshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (XORshiftRL x y [c])
+(XORshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (XORshiftRA x y [c])
+(BICshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (BICshiftLL x y [c])
+(BICshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (BICshiftRL x y [c])
+(BICshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (BICshiftRA x y [c])
+(MVNshiftLLreg x (MOVWconst [c])) && 0 <= c && c < 32 => (MVNshiftLL x [c])
+(MVNshiftRLreg x (MOVWconst [c])) && 0 <= c && c < 32 => (MVNshiftRL x [c])
+(MVNshiftRAreg x (MOVWconst [c])) && 0 <= c && c < 32 => (MVNshiftRA x [c])
+(CMPshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMPshiftLL x y [c])
+(CMPshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMPshiftRL x y [c])
+(CMPshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMPshiftRA x y [c])
+(TSTshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TSTshiftLL x y [c])
+(TSTshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TSTshiftRL x y [c])
+(TSTshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TSTshiftRA x y [c])
+(TEQshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TEQshiftLL x y [c])
+(TEQshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TEQshiftRL x y [c])
+(TEQshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TEQshiftRA x y [c])
+(CMNshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftLL x y [c])
+(CMNshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftRL x y [c])
+(CMNshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftRA x y [c])
+
+(RotateLeft16 <t> x (MOVWconst [c])) => (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
+(RotateLeft8 <t> x (MOVWconst [c])) => (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
+(RotateLeft32 x y) => (SRR x (RSBconst [0] <y.Type> y))
+
+// ((x>>8) | (x<<8)) -> (REV16 x), the type of x is uint16, "|" can also be "^" or "+".
+// UBFX instruction is supported by ARMv6T2, ARMv7 and above versions, REV16 is supported by
+// ARMv6 and above versions. So for ARMv6, we need to match SLLconst, SRLconst and ORshiftLL.
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x) => (REV16 x)
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && buildcfg.GOARM.Version>=6 => (REV16 x)
+
+// use indexed loads and stores
+(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVWloadidx ptr idx mem)
+(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil => (MOVWstoreidx ptr idx val mem)
+(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil => (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil => (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil => (MOVWloadshiftRA ptr idx [c] mem)
+(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil => (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil => (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil => (MOVWstoreshiftRA ptr idx [c] val mem)
+(MOVBUload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVBUloadidx ptr idx mem)
+(MOVBload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVBloadidx ptr idx mem)
+(MOVBstore [0] {sym} (ADD ptr idx) val mem) && sym == nil => (MOVBstoreidx ptr idx val mem)
+(MOVHUload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVHUloadidx ptr idx mem)
+(MOVHload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVHloadidx ptr idx mem)
+(MOVHstore [0] {sym} (ADD ptr idx) val mem) && sym == nil => (MOVHstoreidx ptr idx val mem)
+
+// constant folding in indexed loads and stores
+(MOVWloadidx ptr (MOVWconst [c]) mem) => (MOVWload [c] ptr mem)
+(MOVWloadidx (MOVWconst [c]) ptr mem) => (MOVWload [c] ptr mem)
+(MOVBloadidx ptr (MOVWconst [c]) mem) => (MOVBload [c] ptr mem)
+(MOVBloadidx (MOVWconst [c]) ptr mem) => (MOVBload [c] ptr mem)
+(MOVBUloadidx ptr (MOVWconst [c]) mem) => (MOVBUload [c] ptr mem)
+(MOVBUloadidx (MOVWconst [c]) ptr mem) => (MOVBUload [c] ptr mem)
+(MOVHUloadidx ptr (MOVWconst [c]) mem) => (MOVHUload [c] ptr mem)
+(MOVHUloadidx (MOVWconst [c]) ptr mem) => (MOVHUload [c] ptr mem)
+(MOVHloadidx ptr (MOVWconst [c]) mem) => (MOVHload [c] ptr mem)
+(MOVHloadidx (MOVWconst [c]) ptr mem) => (MOVHload [c] ptr mem)
+
+(MOVWstoreidx ptr (MOVWconst [c]) val mem) => (MOVWstore [c] ptr val mem)
+(MOVWstoreidx (MOVWconst [c]) ptr val mem) => (MOVWstore [c] ptr val mem)
+(MOVBstoreidx ptr (MOVWconst [c]) val mem) => (MOVBstore [c] ptr val mem)
+(MOVBstoreidx (MOVWconst [c]) ptr val mem) => (MOVBstore [c] ptr val mem)
+(MOVHstoreidx ptr (MOVWconst [c]) val mem) => (MOVHstore [c] ptr val mem)
+(MOVHstoreidx (MOVWconst [c]) ptr val mem) => (MOVHstore [c] ptr val mem)
+
+(MOVWloadidx ptr (SLLconst idx [c]) mem) => (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWloadidx (SLLconst idx [c]) ptr mem) => (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWloadidx ptr (SRLconst idx [c]) mem) => (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWloadidx (SRLconst idx [c]) ptr mem) => (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWloadidx ptr (SRAconst idx [c]) mem) => (MOVWloadshiftRA ptr idx [c] mem)
+(MOVWloadidx (SRAconst idx [c]) ptr mem) => (MOVWloadshiftRA ptr idx [c] mem)
+
+(MOVWstoreidx ptr (SLLconst idx [c]) val mem) => (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstoreidx (SLLconst idx [c]) ptr val mem) => (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstoreidx ptr (SRLconst idx [c]) val mem) => (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstoreidx (SRLconst idx [c]) ptr val mem) => (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstoreidx ptr (SRAconst idx [c]) val mem) => (MOVWstoreshiftRA ptr idx [c] val mem)
+(MOVWstoreidx (SRAconst idx [c]) ptr val mem) => (MOVWstoreshiftRA ptr idx [c] val mem)
+
+(MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) => (MOVWload [int32(uint32(c)<<uint64(d))] ptr mem)
+(MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem) => (MOVWload [int32(uint32(c)>>uint64(d))] ptr mem)
+(MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem) => (MOVWload [c>>uint64(d)] ptr mem)
+
+(MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [int32(uint32(c)<<uint64(d))] ptr val mem)
+(MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [int32(uint32(c)>>uint64(d))] ptr val mem)
+(MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [c>>uint64(d)] ptr val mem)
+
+// generic simplifications
+(ADD x (RSBconst [0] y)) => (SUB x y)
+(ADD <t> (RSBconst [c] x) (RSBconst [d] y)) => (RSBconst [c+d] (ADD <t> x y))
+(SUB x x) => (MOVWconst [0])
+(RSB x x) => (MOVWconst [0])
+(AND x x) => x
+(OR x x) => x
+(XOR x x) => (MOVWconst [0])
+(BIC x x) => (MOVWconst [0])
+
+(ADD (MUL x y) a) => (MULA x y a)
+(SUB a (MUL x y)) && buildcfg.GOARM.Version == 7 => (MULS x y a)
+(RSB (MUL x y) a) && buildcfg.GOARM.Version == 7 => (MULS x y a)
+
+(NEGF (MULF x y)) && buildcfg.GOARM.Version >= 6 => (NMULF x y)
+(NEGD (MULD x y)) && buildcfg.GOARM.Version >= 6 => (NMULD x y)
+(MULF (NEGF x) y) && buildcfg.GOARM.Version >= 6 => (NMULF x y)
+(MULD (NEGD x) y) && buildcfg.GOARM.Version >= 6 => (NMULD x y)
+(NMULF (NEGF x) y) => (MULF x y)
+(NMULD (NEGD x) y) => (MULD x y)
+
+// the result will overwrite the addend, since they are in the same register
+(ADDF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAF a x y)
+(ADDF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSF a x y)
+(ADDD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAD a x y)
+(ADDD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSD a x y)
+(SUBF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSF a x y)
+(SUBF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAF a x y)
+(SUBD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSD a x y)
+(SUBD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAD a x y)
+
+(AND x (MVN y)) => (BIC x y)
+
+// simplification with *shift ops
+(SUBshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0])
+(SUBshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0])
+(SUBshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0])
+(RSBshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0])
+(RSBshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0])
+(RSBshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0])
+(ANDshiftLL y:(SLLconst x [c]) x [c]) => y
+(ANDshiftRL y:(SRLconst x [c]) x [c]) => y
+(ANDshiftRA y:(SRAconst x [c]) x [c]) => y
+(ORshiftLL y:(SLLconst x [c]) x [c]) => y
+(ORshiftRL y:(SRLconst x [c]) x [c]) => y
+(ORshiftRA y:(SRAconst x [c]) x [c]) => y
+(XORshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0])
+(XORshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0])
+(XORshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0])
+(BICshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0])
+(BICshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0])
+(BICshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0])
+(AND x (MVNshiftLL y [c])) => (BICshiftLL x y [c])
+(AND x (MVNshiftRL y [c])) => (BICshiftRL x y [c])
+(AND x (MVNshiftRA y [c])) => (BICshiftRA x y [c])
+
+// floating point optimizations
+(CMPF x (MOVFconst [0])) => (CMPF0 x)
+(CMPD x (MOVDconst [0])) => (CMPD0 x)
+
+// bit extraction
+(SRAconst (SLLconst x [c]) [d]) && buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x)
+(SRLconst (SLLconst x [c]) [d]) && buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x)
+
+// comparison simplification
+((EQ|NE) (CMP x (RSBconst [0] y))) => ((EQ|NE) (CMN x y)) // sense of carry bit not preserved; see also #50854
+((EQ|NE) (CMN x (RSBconst [0] y))) => ((EQ|NE) (CMP x y)) // sense of carry bit not preserved; see also #50864
+(EQ (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (EQ (CMP x y) yes no)
+(EQ (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (EQ (CMP a (MUL <x.Type> x y)) yes no)
+(EQ (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (EQ (CMPconst [c] x) yes no)
+(EQ (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (EQ (CMPshiftLL x y [c]) yes no)
+(EQ (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (EQ (CMPshiftRL x y [c]) yes no)
+(EQ (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (EQ (CMPshiftRA x y [c]) yes no)
+(EQ (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (EQ (CMPshiftLLreg x y z) yes no)
+(EQ (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (EQ (CMPshiftRLreg x y z) yes no)
+(EQ (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (EQ (CMPshiftRAreg x y z) yes no)
+(NE (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (NE (CMP x y) yes no)
+(NE (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (NE (CMP a (MUL <x.Type> x y)) yes no)
+(NE (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (NE (CMPconst [c] x) yes no)
+(NE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (NE (CMPshiftLL x y [c]) yes no)
+(NE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (NE (CMPshiftRL x y [c]) yes no)
+(NE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (NE (CMPshiftRA x y [c]) yes no)
+(NE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (NE (CMPshiftLLreg x y z) yes no)
+(NE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (NE (CMPshiftRLreg x y z) yes no)
+(NE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (NE (CMPshiftRAreg x y z) yes no)
+(EQ (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (EQ (CMN x y) yes no)
+(EQ (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (EQ (CMN a (MUL <x.Type> x y)) yes no)
+(EQ (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (EQ (CMNconst [c] x) yes no)
+(EQ (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (EQ (CMNshiftLL x y [c]) yes no)
+(EQ (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (EQ (CMNshiftRL x y [c]) yes no)
+(EQ (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (EQ (CMNshiftRA x y [c]) yes no)
+(EQ (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (EQ (CMNshiftLLreg x y z) yes no)
+(EQ (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (EQ (CMNshiftRLreg x y z) yes no)
+(EQ (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (EQ (CMNshiftRAreg x y z) yes no)
+(NE (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (NE (CMN x y) yes no)
+(NE (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (NE (CMN a (MUL <x.Type> x y)) yes no)
+(NE (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (NE (CMNconst [c] x) yes no)
+(NE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (NE (CMNshiftLL x y [c]) yes no)
+(NE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (NE (CMNshiftRL x y [c]) yes no)
+(NE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (NE (CMNshiftRA x y [c]) yes no)
+(NE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (NE (CMNshiftLLreg x y z) yes no)
+(NE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (NE (CMNshiftRLreg x y z) yes no)
+(NE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (NE (CMNshiftRAreg x y z) yes no)
+(EQ (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (EQ (TST x y) yes no)
+(EQ (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (EQ (TSTconst [c] x) yes no)
+(EQ (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (EQ (TSTshiftLL x y [c]) yes no)
+(EQ (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (EQ (TSTshiftRL x y [c]) yes no)
+(EQ (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (EQ (TSTshiftRA x y [c]) yes no)
+(EQ (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (EQ (TSTshiftLLreg x y z) yes no)
+(EQ (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (EQ (TSTshiftRLreg x y z) yes no)
+(EQ (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (EQ (TSTshiftRAreg x y z) yes no)
+(NE (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (NE (TST x y) yes no)
+(NE (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (NE (TSTconst [c] x) yes no)
+(NE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (NE (TSTshiftLL x y [c]) yes no)
+(NE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (NE (TSTshiftRL x y [c]) yes no)
+(NE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (NE (TSTshiftRA x y [c]) yes no)
+(NE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (NE (TSTshiftLLreg x y z) yes no)
+(NE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (NE (TSTshiftRLreg x y z) yes no)
+(NE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (NE (TSTshiftRAreg x y z) yes no)
+(EQ (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (EQ (TEQ x y) yes no)
+(EQ (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (EQ (TEQconst [c] x) yes no)
+(EQ (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (EQ (TEQshiftLL x y [c]) yes no)
+(EQ (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (EQ (TEQshiftRL x y [c]) yes no)
+(EQ (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (EQ (TEQshiftRA x y [c]) yes no)
+(EQ (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (EQ (TEQshiftLLreg x y z) yes no)
+(EQ (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (EQ (TEQshiftRLreg x y z) yes no)
+(EQ (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (EQ (TEQshiftRAreg x y z) yes no)
+(NE (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (NE (TEQ x y) yes no)
+(NE (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (NE (TEQconst [c] x) yes no)
+(NE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (NE (TEQshiftLL x y [c]) yes no)
+(NE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (NE (TEQshiftRL x y [c]) yes no)
+(NE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (NE (TEQshiftRA x y [c]) yes no)
+(NE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (NE (TEQshiftLLreg x y z) yes no)
+(NE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (NE (TEQshiftRLreg x y z) yes no)
+(NE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (NE (TEQshiftRAreg x y z) yes no)
+(LT (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (LTnoov (CMP x y) yes no)
+(LT (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (LTnoov (CMP a (MUL <x.Type> x y)) yes no)
+(LT (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (LTnoov (CMPconst [c] x) yes no)
+(LT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (LTnoov (CMPshiftLL x y [c]) yes no)
+(LT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (LTnoov (CMPshiftRL x y [c]) yes no)
+(LT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (LTnoov (CMPshiftRA x y [c]) yes no)
+(LT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMPshiftLLreg x y z) yes no)
+(LT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMPshiftRLreg x y z) yes no)
+(LT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMPshiftRAreg x y z) yes no)
+(LE (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (LEnoov (CMP x y) yes no)
+(LE (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (LEnoov (CMP a (MUL <x.Type> x y)) yes no)
+(LE (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (LEnoov (CMPconst [c] x) yes no)
+(LE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (LEnoov (CMPshiftLL x y [c]) yes no)
+(LE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (LEnoov (CMPshiftRL x y [c]) yes no)
+(LE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (LEnoov (CMPshiftRA x y [c]) yes no)
+(LE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMPshiftLLreg x y z) yes no)
+(LE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMPshiftRLreg x y z) yes no)
+(LE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMPshiftRAreg x y z) yes no)
+(LT (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (LTnoov (CMN x y) yes no)
+(LT (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (LTnoov (CMN a (MUL <x.Type> x y)) yes no)
+(LT (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (LTnoov (CMNconst [c] x) yes no)
+(LT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (LTnoov (CMNshiftLL x y [c]) yes no)
+(LT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (LTnoov (CMNshiftRL x y [c]) yes no)
+(LT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (LTnoov (CMNshiftRA x y [c]) yes no)
+(LT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMNshiftLLreg x y z) yes no)
+(LT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMNshiftRLreg x y z) yes no)
+(LT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMNshiftRAreg x y z) yes no)
+(LE (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (LEnoov (CMN x y) yes no)
+(LE (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (LEnoov (CMN a (MUL <x.Type> x y)) yes no)
+(LE (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (LEnoov (CMNconst [c] x) yes no)
+(LE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (LEnoov (CMNshiftLL x y [c]) yes no)
+(LE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (LEnoov (CMNshiftRL x y [c]) yes no)
+(LE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (LEnoov (CMNshiftRA x y [c]) yes no)
+(LE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMNshiftLLreg x y z) yes no)
+(LE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMNshiftRLreg x y z) yes no)
+(LE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMNshiftRAreg x y z) yes no)
+(LT (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (LTnoov (TST x y) yes no)
+(LT (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (LTnoov (TSTconst [c] x) yes no)
+(LT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (LTnoov (TSTshiftLL x y [c]) yes no)
+(LT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (LTnoov (TSTshiftRL x y [c]) yes no)
+(LT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (LTnoov (TSTshiftRA x y [c]) yes no)
+(LT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (LTnoov (TSTshiftLLreg x y z) yes no)
+(LT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (LTnoov (TSTshiftRLreg x y z) yes no)
+(LT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (LTnoov (TSTshiftRAreg x y z) yes no)
+(LE (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (LEnoov (TST x y) yes no)
+(LE (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (LEnoov (TSTconst [c] x) yes no)
+(LE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (LEnoov (TSTshiftLL x y [c]) yes no)
+(LE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (LEnoov (TSTshiftRL x y [c]) yes no)
+(LE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (LEnoov (TSTshiftRA x y [c]) yes no)
+(LE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (LEnoov (TSTshiftLLreg x y z) yes no)
+(LE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (LEnoov (TSTshiftRLreg x y z) yes no)
+(LE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (LEnoov (TSTshiftRAreg x y z) yes no)
+(LT (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (LTnoov (TEQ x y) yes no)
+(LT (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (LTnoov (TEQconst [c] x) yes no)
+(LT (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (LTnoov (TEQshiftLL x y [c]) yes no)
+(LT (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (LTnoov (TEQshiftRL x y [c]) yes no)
+(LT (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (LTnoov (TEQshiftRA x y [c]) yes no)
+(LT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (LTnoov (TEQshiftLLreg x y z) yes no)
+(LT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (LTnoov (TEQshiftRLreg x y z) yes no)
+(LT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (LTnoov (TEQshiftRAreg x y z) yes no)
+(LE (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (LEnoov (TEQ x y) yes no)
+(LE (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (LEnoov (TEQconst [c] x) yes no)
+(LE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (LEnoov (TEQshiftLL x y [c]) yes no)
+(LE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (LEnoov (TEQshiftRL x y [c]) yes no)
+(LE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (LEnoov (TEQshiftRA x y [c]) yes no)
+(LE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (LEnoov (TEQshiftLLreg x y z) yes no)
+(LE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (LEnoov (TEQshiftRLreg x y z) yes no)
+(LE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (LEnoov (TEQshiftRAreg x y z) yes no)
+(GT (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (GTnoov (CMP x y) yes no)
+(GT (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (GTnoov (CMP a (MUL <x.Type> x y)) yes no)
+(GT (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (GTnoov (CMPconst [c] x) yes no)
+(GT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (GTnoov (CMPshiftLL x y [c]) yes no)
+(GT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (GTnoov (CMPshiftRL x y [c]) yes no)
+(GT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (GTnoov (CMPshiftRA x y [c]) yes no)
+(GT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMPshiftLLreg x y z) yes no)
+(GT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMPshiftRLreg x y z) yes no)
+(GT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMPshiftRAreg x y z) yes no)
+(GE (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (GEnoov (CMP x y) yes no)
+(GE (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (GEnoov (CMP a (MUL <x.Type> x y)) yes no)
+(GE (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (GEnoov (CMPconst [c] x) yes no)
+(GE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (GEnoov (CMPshiftLL x y [c]) yes no)
+(GE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (GEnoov (CMPshiftRL x y [c]) yes no)
+(GE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (GEnoov (CMPshiftRA x y [c]) yes no)
+(GE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMPshiftLLreg x y z) yes no)
+(GE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMPshiftRLreg x y z) yes no)
+(GE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMPshiftRAreg x y z) yes no)
+(GT (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (GTnoov (CMN x y) yes no)
+(GT (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (GTnoov (CMNconst [c] x) yes no)
+(GT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (GTnoov (CMNshiftLL x y [c]) yes no)
+(GT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (GTnoov (CMNshiftRL x y [c]) yes no)
+(GT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (GTnoov (CMNshiftRA x y [c]) yes no)
+(GT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMNshiftLLreg x y z) yes no)
+(GT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMNshiftRLreg x y z) yes no)
+(GT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMNshiftRAreg x y z) yes no)
+(GE (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (GEnoov (CMN x y) yes no)
+(GE (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (GEnoov (CMN a (MUL <x.Type> x y)) yes no)
+(GE (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (GEnoov (CMNconst [c] x) yes no)
+(GE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (GEnoov (CMNshiftLL x y [c]) yes no)
+(GE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (GEnoov (CMNshiftRL x y [c]) yes no)
+(GE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (GEnoov (CMNshiftRA x y [c]) yes no)
+(GE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMNshiftLLreg x y z) yes no)
+(GE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMNshiftRLreg x y z) yes no)
+(GE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMNshiftRAreg x y z) yes no)
+(GT (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (GTnoov (CMN a (MUL <x.Type> x y)) yes no)
+(GT (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (GTnoov (TST x y) yes no)
+(GT (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (GTnoov (TSTconst [c] x) yes no)
+(GT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (GTnoov (TSTshiftLL x y [c]) yes no)
+(GT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (GTnoov (TSTshiftRL x y [c]) yes no)
+(GT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (GTnoov (TSTshiftRA x y [c]) yes no)
+(GT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (GTnoov (TSTshiftLLreg x y z) yes no)
+(GT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (GTnoov (TSTshiftRLreg x y z) yes no)
+(GT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (GTnoov (TSTshiftRAreg x y z) yes no)
+(GE (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (GEnoov (TST x y) yes no)
+(GE (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (GEnoov (TSTconst [c] x) yes no)
+(GE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (GEnoov (TSTshiftLL x y [c]) yes no)
+(GE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (GEnoov (TSTshiftRL x y [c]) yes no)
+(GE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (GEnoov (TSTshiftRA x y [c]) yes no)
+(GE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (GEnoov (TSTshiftLLreg x y z) yes no)
+(GE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (GEnoov (TSTshiftRLreg x y z) yes no)
+(GE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (GEnoov (TSTshiftRAreg x y z) yes no)
+(GT (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (GTnoov (TEQ x y) yes no)
+(GT (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (GTnoov (TEQconst [c] x) yes no)
+(GT (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (GTnoov (TEQshiftLL x y [c]) yes no)
+(GT (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (GTnoov (TEQshiftRL x y [c]) yes no)
+(GT (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (GTnoov (TEQshiftRA x y [c]) yes no)
+(GT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (GTnoov (TEQshiftLLreg x y z) yes no)
+(GT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (GTnoov (TEQshiftRLreg x y z) yes no)
+(GT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (GTnoov (TEQshiftRAreg x y z) yes no)
+(GE (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (GEnoov (TEQ x y) yes no)
+(GE (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (GEnoov (TEQconst [c] x) yes no)
+(GE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (GEnoov (TEQshiftLL x y [c]) yes no)
+(GE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (GEnoov (TEQshiftRL x y [c]) yes no)
+(GE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (GEnoov (TEQshiftRA x y [c]) yes no)
+(GE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (GEnoov (TEQshiftLLreg x y z) yes no)
+(GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (GEnoov (TEQshiftRLreg x y z) yes no)
+(GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (GEnoov (TEQshiftRAreg x y z) yes no)
+
+(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read8(sym, int64(off)))])
+(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64.rules b/src/cmd/compile/internal/ssa/_gen/ARM64.rules
new file mode 100644
index 0000000..c5ee028
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/ARM64.rules
@@ -0,0 +1,1999 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(Add(Ptr|64|32|16|8) ...) => (ADD ...)
+(Add(32|64)F ...) => (FADD(S|D) ...)
+
+(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
+(Sub(32|64)F ...) => (FSUB(S|D) ...)
+
+(Mul64 ...) => (MUL ...)
+(Mul(32|16|8) ...) => (MULW ...)
+(Mul(32|64)F ...) => (FMUL(S|D) ...)
+
+(Hmul64 ...) => (MULH ...)
+(Hmul64u ...) => (UMULH ...)
+(Hmul32 x y) => (SRAconst (MULL <typ.Int64> x y) [32])
+(Hmul32u x y) => (SRAconst (UMULL <typ.UInt64> x y) [32])
+(Select0 (Mul64uhilo x y)) => (UMULH x y)
+(Select1 (Mul64uhilo x y)) => (MUL x y)
+
+(Div64 [false] x y) => (DIV x y)
+(Div32 [false] x y) => (DIVW x y)
+(Div16 [false] x y) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) => (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) => (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Div64u ...) => (UDIV ...)
+(Div32u ...) => (UDIVW ...)
+(Div32F ...) => (FDIVS ...)
+(Div64F ...) => (FDIVD ...)
+
+(Mod64 x y) => (MOD x y)
+(Mod32 x y) => (MODW x y)
+(Mod64u ...) => (UMOD ...)
+(Mod32u ...) => (UMODW ...)
+(Mod(16|8) x y) => (MODW (SignExt(16|8)to32 x) (SignExt(16|8)to32 y))
+(Mod(16|8)u x y) => (UMODW (ZeroExt(16|8)to32 x) (ZeroExt(16|8)to32 y))
+
+// (x + y) / 2 with x>=y => (x - y) / 2 + y
+(Avg64u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+
+(And(64|32|16|8) ...) => (AND ...)
+(Or(64|32|16|8) ...) => (OR ...)
+(Xor(64|32|16|8) ...) => (XOR ...)
+
+// unary ops
+(Neg(64|32|16|8) ...) => (NEG ...)
+(Neg(32|64)F ...) => (FNEG(S|D) ...)
+(Com(64|32|16|8) ...) => (MVN ...)
+
+// math package intrinsics
+(Abs ...) => (FABSD ...)
+(Sqrt ...) => (FSQRTD ...)
+(Ceil ...) => (FRINTPD ...)
+(Floor ...) => (FRINTMD ...)
+(Round ...) => (FRINTAD ...)
+(RoundToEven ...) => (FRINTND ...)
+(Trunc ...) => (FRINTZD ...)
+(FMA x y z) => (FMADDD z x y)
+
+(Sqrt32 ...) => (FSQRTS ...)
+
+(Min(64|32)F ...) => (FMIN(D|S) ...)
+(Max(64|32)F ...) => (FMAX(D|S) ...)
+
+// lowering rotates
+// we do rotate detection in generic rules, if the following rules need to be changed, check generic rules first.
+(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+(RotateLeft8 <t> x y) => (OR <t> (SLL <t> x (ANDconst <typ.Int64> [7] y)) (SRL <t> (ZeroExt8to64 x) (ANDconst <typ.Int64> [7] (NEG <typ.Int64> y))))
+(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+(RotateLeft16 <t> x y) => (RORW <t> (ORshiftLL <typ.UInt32> (ZeroExt16to32 x) (ZeroExt16to32 x) [16]) (NEG <typ.Int64> y))
+(RotateLeft32 x y) => (RORW x (NEG <y.Type> y))
+(RotateLeft64 x y) => (ROR x (NEG <y.Type> y))
+
+(Ctz(64|32|16|8)NonZero ...) => (Ctz(64|32|32|32) ...)
+
+(Ctz64 <t> x) => (CLZ (RBIT <t> x))
+(Ctz32 <t> x) => (CLZW (RBITW <t> x))
+(Ctz16 <t> x) => (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+(Ctz8 <t> x) => (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+
+(PopCount64 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> x))))
+(PopCount32 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt32to64 x)))))
+(PopCount16 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt16to64 x)))))
+
+// Load args directly into the register class where it will be used.
+(FMOVDgpfp <t> (Arg [off] {sym})) => @b.Func.Entry (Arg <t> [off] {sym})
+(FMOVDfpgp <t> (Arg [off] {sym})) => @b.Func.Entry (Arg <t> [off] {sym})
+
+// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
+(MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem) => (FMOVDstore [off] {sym} ptr val mem)
+(FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem) => (MOVDstore [off] {sym} ptr val mem)
+(MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem) => (FMOVSstore [off] {sym} ptr val mem)
+(FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem)
+
+// float <=> int register moves, with no conversion.
+// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
+(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _)) => (FMOVDfpgp val)
+(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) => (FMOVDgpfp val)
+(MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _)) => (FMOVSfpgp val)
+(FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (FMOVSgpfp val)
+
+(BitLen64 x) => (SUB (MOVDconst [64]) (CLZ <typ.Int> x))
+(BitLen32 x) => (SUB (MOVDconst [32]) (CLZW <typ.Int> x))
+
+(Bswap64 ...) => (REV ...)
+(Bswap32 ...) => (REVW ...)
+(Bswap16 ...) => (REV16W ...)
+
+(BitRev64 ...) => (RBIT ...)
+(BitRev32 ...) => (RBITW ...)
+(BitRev16 x) => (SRLconst [48] (RBIT <typ.UInt64> x))
+(BitRev8 x) => (SRLconst [56] (RBIT <typ.UInt64> x))
+
+// In fact, UMOD will be translated into UREM instruction, and UREM is originally translated into
+// UDIV and MSUB instructions. But if there is already an identical UDIV instruction just before or
+// after UREM (case like quo, rem := z/y, z%y), then the second UDIV instruction becomes redundant.
+// The purpose of this rule is to have this extra UDIV instruction removed in CSE pass.
+(UMOD <typ.UInt64> x y) => (MSUB <typ.UInt64> x y (UDIV <typ.UInt64> x y))
+(UMODW <typ.UInt32> x y) => (MSUBW <typ.UInt32> x y (UDIVW <typ.UInt32> x y))
+
+// 64-bit addition with carry.
+(Select0 (Add64carry x y c)) => (Select0 <typ.UInt64> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c))))
+(Select1 (Add64carry x y c)) => (ADCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c)))))
+
+// 64-bit subtraction with borrowing.
+(Select0 (Sub64borrow x y bo)) => (Select0 <typ.UInt64> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))
+(Select1 (Sub64borrow x y bo)) => (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))))
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XOR (MOVDconst [1]) (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XOR (MOVDconst [1]) x)
+
+// shifts
+// hardware instruction uses only the low 6 bits of the shift
+// we compare to 64 to ensure Go semantics for large shifts
+// Rules about rotates with non-const shift are based on the following rules,
+// if the following rules change, please also modify the rules based on them.
+
+// check shiftIsBounded first, if shift value is proved to be valid then we
+// can do the shift directly.
+// left shift
+(Lsh(64|32|16|8)x64 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
+(Lsh(64|32|16|8)x32 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
+(Lsh(64|32|16|8)x16 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
+(Lsh(64|32|16|8)x8 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
+
+// signed right shift
+(Rsh64x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> x y)
+(Rsh32x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) y)
+(Rsh16x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) y)
+(Rsh8x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) y)
+
+// unsigned right shift
+(Rsh64Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> x y)
+(Rsh32Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> (ZeroExt32to64 x) y)
+(Rsh16Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> (ZeroExt16to64 x) y)
+(Rsh8Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> (ZeroExt8to64 x) y)
+
+// shift value may be out of range, use CMP + CSEL instead
+(Lsh64x64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh64x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
+
+(Lsh32x64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh32x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
+
+(Lsh16x64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh16x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
+
+(Lsh8x64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh8x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
+
+(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh64Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
+
+(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh32Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
+
+(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh16Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
+
+(Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh8Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
+
+(Rsh64x64 x y) && !shiftIsBounded(v) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh64x(32|16|8) x y) && !shiftIsBounded(v) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
+
+(Rsh32x64 x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh32x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
+
+(Rsh16x64 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh16x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
+
+(Rsh8x64 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh8x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
+
+// constants
+(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
+(Const(32|64)F [val]) => (FMOV(S|D)const [float64(val)])
+(ConstNil) => (MOVDconst [0])
+(ConstBool [t]) => (MOVDconst [b2i(t)])
+
+(Slicemask <t> x) => (SRAconst (NEG <t> x) [63])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+(ZeroExt8to64 ...) => (MOVBUreg ...)
+(ZeroExt16to64 ...) => (MOVHUreg ...)
+(ZeroExt32to64 ...) => (MOVWUreg ...)
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+(SignExt8to64 ...) => (MOVBreg ...)
+(SignExt16to64 ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+// float <=> int conversion
+(Cvt32to32F ...) => (SCVTFWS ...)
+(Cvt32to64F ...) => (SCVTFWD ...)
+(Cvt64to32F ...) => (SCVTFS ...)
+(Cvt64to64F ...) => (SCVTFD ...)
+(Cvt32Uto32F ...) => (UCVTFWS ...)
+(Cvt32Uto64F ...) => (UCVTFWD ...)
+(Cvt64Uto32F ...) => (UCVTFS ...)
+(Cvt64Uto64F ...) => (UCVTFD ...)
+(Cvt32Fto32 ...) => (FCVTZSSW ...)
+(Cvt64Fto32 ...) => (FCVTZSDW ...)
+(Cvt32Fto64 ...) => (FCVTZSS ...)
+(Cvt64Fto64 ...) => (FCVTZSD ...)
+(Cvt32Fto32U ...) => (FCVTZUSW ...)
+(Cvt64Fto32U ...) => (FCVTZUDW ...)
+(Cvt32Fto64U ...) => (FCVTZUS ...)
+(Cvt64Fto64U ...) => (FCVTZUD ...)
+(Cvt32Fto64F ...) => (FCVTSD ...)
+(Cvt64Fto32F ...) => (FCVTDS ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round32F ...) => (LoweredRound32F ...)
+(Round64F ...) => (LoweredRound64F ...)
+
+// comparisons
+(Eq8 x y) => (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) => (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) => (Equal (CMPW x y))
+(Eq64 x y) => (Equal (CMP x y))
+(EqPtr x y) => (Equal (CMP x y))
+(Eq32F x y) => (Equal (FCMPS x y))
+(Eq64F x y) => (Equal (FCMPD x y))
+
+(Neq8 x y) => (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Neq16 x y) => (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Neq32 x y) => (NotEqual (CMPW x y))
+(Neq64 x y) => (NotEqual (CMP x y))
+(NeqPtr x y) => (NotEqual (CMP x y))
+(Neq(32|64)F x y) => (NotEqual (FCMP(S|D) x y))
+
+(Less(8|16) x y) => (LessThan (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
+(Less32 x y) => (LessThan (CMPW x y))
+(Less64 x y) => (LessThan (CMP x y))
+
+// Set condition flags for floating-point comparisons "x < y"
+// and "x <= y". Because if either or both of the operands are
+// NaNs, all three of (x < y), (x == y) and (x > y) are false,
+// and ARM Manual says FCMP instruction sets PSTATE.<N,Z,C,V>
+// of this case to (0, 0, 1, 1).
+(Less32F x y) => (LessThanF (FCMPS x y))
+(Less64F x y) => (LessThanF (FCMPD x y))
+
+// For an unsigned integer x, the following rules are useful when combining branch
+// 0 < x => x != 0
+// x <= 0 => x == 0
+// x < 1 => x == 0
+// 1 <= x => x != 0
+(Less(8U|16U|32U|64U) zero:(MOVDconst [0]) x) => (Neq(8|16|32|64) zero x)
+(Leq(8U|16U|32U|64U) x zero:(MOVDconst [0])) => (Eq(8|16|32|64) x zero)
+(Less(8U|16U|32U|64U) x (MOVDconst [1])) => (Eq(8|16|32|64) x (MOVDconst [0]))
+(Leq(8U|16U|32U|64U) (MOVDconst [1]) x) => (Neq(8|16|32|64) (MOVDconst [0]) x)
+
+(Less8U x y) => (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Less16U x y) => (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Less32U x y) => (LessThanU (CMPW x y))
+(Less64U x y) => (LessThanU (CMP x y))
+
+(Leq8 x y) => (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) => (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) => (LessEqual (CMPW x y))
+(Leq64 x y) => (LessEqual (CMP x y))
+
+// Refer to the comments for op Less64F above.
+(Leq32F x y) => (LessEqualF (FCMPS x y))
+(Leq64F x y) => (LessEqualF (FCMPD x y))
+
+(Leq8U x y) => (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) => (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) => (LessEqualU (CMPW x y))
+(Leq64U x y) => (LessEqualU (CMP x y))
+
+// Optimize comparison between a floating-point value and 0.0 with "FCMP $(0.0), Fn"
+(FCMPS x (FMOVSconst [0])) => (FCMPS0 x)
+(FCMPS (FMOVSconst [0]) x) => (InvertFlags (FCMPS0 x))
+(FCMPD x (FMOVDconst [0])) => (FCMPD0 x)
+(FCMPD (FMOVDconst [0]) x) => (InvertFlags (FCMPD0 x))
+
+// CSEL needs a flag-generating argument. Synthesize a TSTW if necessary.
+(CondSelect x y boolval) && flagArg(boolval) != nil => (CSEL [boolval.Op] x y flagArg(boolval))
+(CondSelect x y boolval) && flagArg(boolval) == nil => (CSEL [OpARM64NotEqual] x y (TSTWconst [1] boolval))
+
+(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVDaddr [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDconst [off] ptr)
+
+(Addr {sym} base) => (MOVDaddr {sym} base)
+(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVDaddr {sym} (SPanchored base mem))
+(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVDaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
+
+// stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (FMOVSstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (FMOVDstore ptr val mem)
+
+// zeroing
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
+(Zero [2] ptr mem) => (MOVHstore ptr (MOVDconst [0]) mem)
+(Zero [4] ptr mem) => (MOVWstore ptr (MOVDconst [0]) mem)
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVDconst [0])
+ (MOVHstore ptr (MOVDconst [0]) mem))
+(Zero [5] ptr mem) =>
+ (MOVBstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem))
+(Zero [6] ptr mem) =>
+ (MOVHstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem))
+(Zero [7] ptr mem) =>
+ (MOVWstore [3] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem))
+(Zero [8] ptr mem) => (MOVDstore ptr (MOVDconst [0]) mem)
+(Zero [9] ptr mem) =>
+ (MOVBstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [10] ptr mem) =>
+ (MOVHstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [11] ptr mem) =>
+ (MOVDstore [3] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [12] ptr mem) =>
+ (MOVWstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [13] ptr mem) =>
+ (MOVDstore [5] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [14] ptr mem) =>
+ (MOVDstore [6] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [15] ptr mem) =>
+ (MOVDstore [7] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [16] ptr mem) =>
+ (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)
+
+(Zero [32] ptr mem) =>
+ (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))
+
+(Zero [48] ptr mem) =>
+ (STP [32] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))
+
+(Zero [64] ptr mem) =>
+ (STP [48] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [32] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))))
+
+// strip off fractional word zeroing
+(Zero [s] ptr mem) && s%16 != 0 && s%16 <= 8 && s > 16 =>
+ (Zero [8]
+ (OffPtr <ptr.Type> ptr [s-8])
+ (Zero [s-s%16] ptr mem))
+(Zero [s] ptr mem) && s%16 != 0 && s%16 > 8 && s > 16 =>
+ (Zero [16]
+ (OffPtr <ptr.Type> ptr [s-16])
+ (Zero [s-s%16] ptr mem))
+
+// medium zeroing uses a duff device
+// 4, 16, and 64 are magic constants, see runtime/mkduff.go
+(Zero [s] ptr mem)
+ && s%16 == 0 && s > 64 && s <= 16*64
+ && !config.noDuffDevice =>
+ (DUFFZERO [4 * (64 - s/16)] ptr mem)
+
+// large zeroing uses a loop
+(Zero [s] ptr mem)
+ && s%16 == 0 && (s > 16*64 || config.noDuffDevice) =>
+ (LoweredZero
+ ptr
+ (ADDconst <ptr.Type> [s-16] ptr)
+ mem)
+
+// moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
+(Move [2] dst src mem) => (MOVHstore dst (MOVHUload src mem) mem)
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVHstore dst (MOVHUload src mem) mem))
+(Move [4] dst src mem) => (MOVWstore dst (MOVWUload src mem) mem)
+(Move [5] dst src mem) =>
+ (MOVBstore [4] dst (MOVBUload [4] src mem)
+ (MOVWstore dst (MOVWUload src mem) mem))
+(Move [6] dst src mem) =>
+ (MOVHstore [4] dst (MOVHUload [4] src mem)
+ (MOVWstore dst (MOVWUload src mem) mem))
+(Move [7] dst src mem) =>
+ (MOVWstore [3] dst (MOVWUload [3] src mem)
+ (MOVWstore dst (MOVWUload src mem) mem))
+(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
+(Move [9] dst src mem) =>
+ (MOVBstore [8] dst (MOVBUload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [10] dst src mem) =>
+ (MOVHstore [8] dst (MOVHUload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [11] dst src mem) =>
+ (MOVDstore [3] dst (MOVDload [3] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [12] dst src mem) =>
+ (MOVWstore [8] dst (MOVWUload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [13] dst src mem) =>
+ (MOVDstore [5] dst (MOVDload [5] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [14] dst src mem) =>
+ (MOVDstore [6] dst (MOVDload [6] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [15] dst src mem) =>
+ (MOVDstore [7] dst (MOVDload [7] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [16] dst src mem) =>
+ (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)
+(Move [32] dst src mem) =>
+ (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
+ (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))
+(Move [48] dst src mem) =>
+ (STP [32] dst (Select0 <typ.UInt64> (LDP [32] src mem)) (Select1 <typ.UInt64> (LDP [32] src mem))
+ (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
+ (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)))
+(Move [64] dst src mem) =>
+ (STP [48] dst (Select0 <typ.UInt64> (LDP [48] src mem)) (Select1 <typ.UInt64> (LDP [48] src mem))
+ (STP [32] dst (Select0 <typ.UInt64> (LDP [32] src mem)) (Select1 <typ.UInt64> (LDP [32] src mem))
+ (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
+ (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))))
+
+(MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i+8] ptr mem)) && x.Uses == 1 && setPos(v, x.Pos) && clobber(x) => (MOVQstorezero {s} [i] ptr mem)
+(MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i-8] ptr mem)) && x.Uses == 1 && setPos(v, x.Pos) && clobber(x) => (MOVQstorezero {s} [i-8] ptr mem)
+
+// strip off fractional word move
+(Move [s] dst src mem) && s%16 != 0 && s%16 <= 8 && s > 16 =>
+ (Move [8]
+ (OffPtr <dst.Type> dst [s-8])
+ (OffPtr <src.Type> src [s-8])
+ (Move [s-s%16] dst src mem))
+(Move [s] dst src mem) && s%16 != 0 && s%16 > 8 && s > 16 =>
+ (Move [16]
+ (OffPtr <dst.Type> dst [s-16])
+ (OffPtr <src.Type> src [s-16])
+ (Move [s-s%16] dst src mem))
+
+// medium move uses a duff device
+(Move [s] dst src mem)
+ && s > 64 && s <= 16*64 && s%16 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [8 * (64 - s/16)] dst src mem)
+// 8 is the number of bytes to encode:
+//
+// LDP.P 16(R16), (R26, R27)
+// STP.P (R26, R27), 16(R17)
+//
+// 64 is number of these blocks. See runtime/duff_arm64.s:duffcopy
+
+// large move uses a loop
+(Move [s] dst src mem)
+ && s%16 == 0 && (s > 16*64 || config.noDuffDevice)
+ && logLargeCopy(v, s) =>
+ (LoweredMove
+ dst
+ src
+ (ADDconst <src.Type> src [s-16])
+ mem)
+
+// calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// checks
+(NilCheck ...) => (LoweredNilCheck ...)
+(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
+(IsInBounds idx len) => (LessThanU (CMP idx len))
+(IsSliceInBounds idx len) => (LessEqualU (CMP idx len))
+
+// pseudo-ops
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+// Absorb pseudo-ops into blocks.
+(If (Equal cc) yes no) => (EQ cc yes no)
+(If (NotEqual cc) yes no) => (NE cc yes no)
+(If (LessThan cc) yes no) => (LT cc yes no)
+(If (LessThanU cc) yes no) => (ULT cc yes no)
+(If (LessEqual cc) yes no) => (LE cc yes no)
+(If (LessEqualU cc) yes no) => (ULE cc yes no)
+(If (GreaterThan cc) yes no) => (GT cc yes no)
+(If (GreaterThanU cc) yes no) => (UGT cc yes no)
+(If (GreaterEqual cc) yes no) => (GE cc yes no)
+(If (GreaterEqualU cc) yes no) => (UGE cc yes no)
+(If (LessThanF cc) yes no) => (FLT cc yes no)
+(If (LessEqualF cc) yes no) => (FLE cc yes no)
+(If (GreaterThanF cc) yes no) => (FGT cc yes no)
+(If (GreaterEqualF cc) yes no) => (FGE cc yes no)
+
+(If cond yes no) => (TBNZ [0] cond yes no)
+
+(JumpTable idx) => (JUMPTABLE {makeJumpTableSym(b)} idx (MOVDaddr <typ.Uintptr> {makeJumpTableSym(b)} (SB)))
+
+// atomic intrinsics
+// Note: these ops do not accept offset.
+(AtomicLoad8 ...) => (LDARB ...)
+(AtomicLoad32 ...) => (LDARW ...)
+(AtomicLoad64 ...) => (LDAR ...)
+(AtomicLoadPtr ...) => (LDAR ...)
+
+(AtomicStore8 ...) => (STLRB ...)
+(AtomicStore32 ...) => (STLRW ...)
+(AtomicStore64 ...) => (STLR ...)
+(AtomicStorePtrNoWB ...) => (STLR ...)
+
+(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
+(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
+(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...)
+
+(AtomicAdd(32|64)Variant ...) => (LoweredAtomicAdd(32|64)Variant ...)
+(AtomicExchange(32|64)Variant ...) => (LoweredAtomicExchange(32|64)Variant ...)
+(AtomicCompareAndSwap(32|64)Variant ...) => (LoweredAtomicCas(32|64)Variant ...)
+
+// Currently the updated value is not used, but we need a register to temporarily hold it.
+(AtomicAnd(8|32) ptr val mem) => (Select1 (LoweredAtomicAnd(8|32) ptr val mem))
+(AtomicOr(8|32) ptr val mem) => (Select1 (LoweredAtomicOr(8|32) ptr val mem))
+(AtomicAnd(8|32)Variant ptr val mem) => (Select1 (LoweredAtomicAnd(8|32)Variant ptr val mem))
+(AtomicOr(8|32)Variant ptr val mem) => (Select1 (LoweredAtomicOr(8|32)Variant ptr val mem))
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+// Publication barrier (0xe is ST option)
+(PubBarrier mem) => (DMB [0xe] mem)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// Optimizations
+
+// Absorb boolean tests into block
+(NZ (Equal cc) yes no) => (EQ cc yes no)
+(NZ (NotEqual cc) yes no) => (NE cc yes no)
+(NZ (LessThan cc) yes no) => (LT cc yes no)
+(NZ (LessThanU cc) yes no) => (ULT cc yes no)
+(NZ (LessEqual cc) yes no) => (LE cc yes no)
+(NZ (LessEqualU cc) yes no) => (ULE cc yes no)
+(NZ (GreaterThan cc) yes no) => (GT cc yes no)
+(NZ (GreaterThanU cc) yes no) => (UGT cc yes no)
+(NZ (GreaterEqual cc) yes no) => (GE cc yes no)
+(NZ (GreaterEqualU cc) yes no) => (UGE cc yes no)
+(NZ (LessThanF cc) yes no) => (FLT cc yes no)
+(NZ (LessEqualF cc) yes no) => (FLE cc yes no)
+(NZ (GreaterThanF cc) yes no) => (FGT cc yes no)
+(NZ (GreaterEqualF cc) yes no) => (FGE cc yes no)
+
+(TBNZ [0] (Equal cc) yes no) => (EQ cc yes no)
+(TBNZ [0] (NotEqual cc) yes no) => (NE cc yes no)
+(TBNZ [0] (LessThan cc) yes no) => (LT cc yes no)
+(TBNZ [0] (LessThanU cc) yes no) => (ULT cc yes no)
+(TBNZ [0] (LessEqual cc) yes no) => (LE cc yes no)
+(TBNZ [0] (LessEqualU cc) yes no) => (ULE cc yes no)
+(TBNZ [0] (GreaterThan cc) yes no) => (GT cc yes no)
+(TBNZ [0] (GreaterThanU cc) yes no) => (UGT cc yes no)
+(TBNZ [0] (GreaterEqual cc) yes no) => (GE cc yes no)
+(TBNZ [0] (GreaterEqualU cc) yes no) => (UGE cc yes no)
+(TBNZ [0] (LessThanF cc) yes no) => (FLT cc yes no)
+(TBNZ [0] (LessEqualF cc) yes no) => (FLE cc yes no)
+(TBNZ [0] (GreaterThanF cc) yes no) => (FGT cc yes no)
+(TBNZ [0] (GreaterEqualF cc) yes no) => (FGE cc yes no)
+
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TST x y) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTconst [c] y) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTW x y) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTWconst [int32(c)] y) yes no)
+
+// For conditional instructions such as CSET, CSEL.
+((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPconst [0] z:(AND x y))) && z.Uses == 1 =>
+ ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TST x y))
+((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPWconst [0] x:(ANDconst [c] y))) && x.Uses == 1 =>
+ ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTWconst [int32(c)] y))
+((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPWconst [0] z:(AND x y))) && z.Uses == 1 =>
+ ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTW x y))
+((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPconst [0] x:(ANDconst [c] y))) && x.Uses == 1 =>
+ ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTconst [c] y))
+
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNconst [c] y) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNWconst [int32(c)] y) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMN x y) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNW x y) yes no)
+
+// CMP(x,-y) -> CMN(x,y) is only valid for unordered comparison, if y can be -1<<63
+((EQ|NE) (CMP x z:(NEG y)) yes no) && z.Uses == 1 => ((EQ|NE) (CMN x y) yes no)
+((Equal|NotEqual) (CMP x z:(NEG y))) && z.Uses == 1 => ((Equal|NotEqual) (CMN x y))
+
+// CMPW(x,-y) -> CMNW(x,y) is only valid for unordered comparison, if y can be -1<<31
+((EQ|NE) (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => ((EQ|NE) (CMNW x y) yes no)
+((Equal|NotEqual) (CMPW x z:(NEG y))) && z.Uses == 1 => ((Equal|NotEqual) (CMNW x y))
+
+// For conditional instructions such as CSET, CSEL.
+// TODO: add support for LE, GT, overflow needs to be considered.
+((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst [0] x:(ADDconst [c] y))) && x.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNconst [c] y))
+((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] x:(ADDconst [c] y))) && x.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNWconst [int32(c)] y))
+((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst [0] z:(ADD x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMN x y))
+((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(ADD x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNW x y))
+((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst [0] z:(MADD a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMN a (MUL <x.Type> x y)))
+((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst [0] z:(MSUB a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMP a (MUL <x.Type> x y)))
+((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(MADDW a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNW a (MULW <x.Type> x y)))
+((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(MSUBW a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMPW a (MULW <x.Type> x y)))
+
+((CMPconst|CMNconst) [c] y) && c < 0 && c != -1<<63 => ((CMNconst|CMPconst) [-c] y)
+((CMPWconst|CMNWconst) [c] y) && c < 0 && c != -1<<31 => ((CMNWconst|CMPWconst) [-c] y)
+
+((EQ|NE) (CMPconst [0] x) yes no) => ((Z|NZ) x yes no)
+((EQ|NE) (CMPWconst [0] x) yes no) => ((ZW|NZW) x yes no)
+
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMN a (MUL <x.Type> x y)) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMP a (MUL <x.Type> x y)) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNW a (MULW <x.Type> x y)) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMPW a (MULW <x.Type> x y)) yes no)
+
+// Absorb bit-tests into block
+(Z (ANDconst [c] x) yes no) && oneBit(c) => (TBZ [int64(ntz64(c))] x yes no)
+(NZ (ANDconst [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no)
+(ZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
+(NZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
+(EQ (TSTconst [c] x) yes no) && oneBit(c) => (TBZ [int64(ntz64(c))] x yes no)
+(NE (TSTconst [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no)
+(EQ (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
+(NE (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
+
+// Test sign-bit for signed comparisons against zero
+(GE (CMPWconst [0] x) yes no) => (TBZ [31] x yes no)
+(GE (CMPconst [0] x) yes no) => (TBZ [63] x yes no)
+(LT (CMPWconst [0] x) yes no) => (TBNZ [31] x yes no)
+(LT (CMPconst [0] x) yes no) => (TBNZ [63] x yes no)
+
+// fold offset into address
+(ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) =>
+ (MOVDaddr [int32(off1)+off2] {sym} ptr)
+
+// fold address into load/store.
+// Do not fold global variable access in -dynlink mode, where it will
+// be rewritten to use the GOT via REGTMP, which currently cannot handle
+// large offset.
+(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVBload [off1+int32(off2)] {sym} ptr mem)
+(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVBUload [off1+int32(off2)] {sym} ptr mem)
+(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVHload [off1+int32(off2)] {sym} ptr mem)
+(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVHUload [off1+int32(off2)] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVWload [off1+int32(off2)] {sym} ptr mem)
+(MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVWUload [off1+int32(off2)] {sym} ptr mem)
+(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVDload [off1+int32(off2)] {sym} ptr mem)
+(LDP [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (LDP [off1+int32(off2)] {sym} ptr mem)
+(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (FMOVSload [off1+int32(off2)] {sym} ptr mem)
+(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (FMOVDload [off1+int32(off2)] {sym} ptr mem)
+
+// register indexed load
+(MOVDload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr idx mem)
+(MOVWUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr idx mem)
+(MOVWload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr idx mem)
+(MOVHUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr idx mem)
+(MOVHload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr idx mem)
+(MOVBUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr idx mem)
+(MOVBload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr idx mem)
+(FMOVSload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx ptr idx mem)
+(FMOVDload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx ptr idx mem)
+
+(MOVDloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
+(MOVDloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
+(MOVWUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
+(MOVWUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
+(MOVWloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
+(MOVWloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
+(MOVHUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
+(MOVHUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
+(MOVHloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
+(MOVHloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
+(MOVBUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
+(MOVBUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
+(MOVBloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
+(MOVBloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
+(FMOVSloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem)
+(FMOVSloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem)
+(FMOVDloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem)
+(FMOVDloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem)
+
+// shifted register indexed load
+(MOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx8 ptr idx mem)
+(MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx4 ptr idx mem)
+(MOVWload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx4 ptr idx mem)
+(MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx2 ptr idx mem)
+(MOVHload [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx2 ptr idx mem)
+(MOVDloadidx ptr (SLLconst [3] idx) mem) => (MOVDloadidx8 ptr idx mem)
+(MOVWloadidx ptr (SLLconst [2] idx) mem) => (MOVWloadidx4 ptr idx mem)
+(MOVWUloadidx ptr (SLLconst [2] idx) mem) => (MOVWUloadidx4 ptr idx mem)
+(MOVHloadidx ptr (SLLconst [1] idx) mem) => (MOVHloadidx2 ptr idx mem)
+(MOVHUloadidx ptr (SLLconst [1] idx) mem) => (MOVHUloadidx2 ptr idx mem)
+(MOVHloadidx ptr (ADD idx idx) mem) => (MOVHloadidx2 ptr idx mem)
+(MOVHUloadidx ptr (ADD idx idx) mem) => (MOVHUloadidx2 ptr idx mem)
+(MOVDloadidx (SLLconst [3] idx) ptr mem) => (MOVDloadidx8 ptr idx mem)
+(MOVWloadidx (SLLconst [2] idx) ptr mem) => (MOVWloadidx4 ptr idx mem)
+(MOVWUloadidx (SLLconst [2] idx) ptr mem) => (MOVWUloadidx4 ptr idx mem)
+(MOVHloadidx (ADD idx idx) ptr mem) => (MOVHloadidx2 ptr idx mem)
+(MOVHUloadidx (ADD idx idx) ptr mem) => (MOVHUloadidx2 ptr idx mem)
+(MOVDloadidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (MOVDload [int32(c)<<3] ptr mem)
+(MOVWUloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWUload [int32(c)<<2] ptr mem)
+(MOVWloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWload [int32(c)<<2] ptr mem)
+(MOVHUloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHUload [int32(c)<<1] ptr mem)
+(MOVHloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHload [int32(c)<<1] ptr mem)
+
+(FMOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx8 ptr idx mem)
+(FMOVSload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx4 ptr idx mem)
+(FMOVDloadidx ptr (SLLconst [3] idx) mem) => (FMOVDloadidx8 ptr idx mem)
+(FMOVSloadidx ptr (SLLconst [2] idx) mem) => (FMOVSloadidx4 ptr idx mem)
+(FMOVDloadidx (SLLconst [3] idx) ptr mem) => (FMOVDloadidx8 ptr idx mem)
+(FMOVSloadidx (SLLconst [2] idx) ptr mem) => (FMOVSloadidx4 ptr idx mem)
+(FMOVDloadidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (FMOVDload ptr [int32(c)<<3] mem)
+(FMOVSloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (FMOVSload ptr [int32(c)<<2] mem)
+
+(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
+(STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (STP [off1+int32(off2)] {sym} ptr val1 val2 mem)
+(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
+(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVQstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVQstorezero [off1+int32(off2)] {sym} ptr mem)
+
+// register indexed store
+(MOVDstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr idx val mem)
+(MOVWstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr idx val mem)
+(MOVHstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr idx val mem)
+(MOVBstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr idx val mem)
+(FMOVDstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx ptr idx val mem)
+(FMOVSstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx ptr idx val mem)
+(MOVDstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVDstore [int32(c)] ptr val mem)
+(MOVDstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVDstore [int32(c)] idx val mem)
+(MOVWstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVWstore [int32(c)] ptr val mem)
+(MOVWstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVWstore [int32(c)] idx val mem)
+(MOVHstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVHstore [int32(c)] ptr val mem)
+(MOVHstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVHstore [int32(c)] idx val mem)
+(MOVBstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVBstore [int32(c)] ptr val mem)
+(MOVBstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVBstore [int32(c)] idx val mem)
+(FMOVDstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVDstore [int32(c)] ptr val mem)
+(FMOVDstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVDstore [int32(c)] idx val mem)
+(FMOVSstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVSstore [int32(c)] ptr val mem)
+(FMOVSstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVSstore [int32(c)] idx val mem)
+
+// shifted register indexed store
+(MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx8 ptr idx val mem)
+(MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx4 ptr idx val mem)
+(MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx2 ptr idx val mem)
+(MOVDstoreidx ptr (SLLconst [3] idx) val mem) => (MOVDstoreidx8 ptr idx val mem)
+(MOVWstoreidx ptr (SLLconst [2] idx) val mem) => (MOVWstoreidx4 ptr idx val mem)
+(MOVHstoreidx ptr (SLLconst [1] idx) val mem) => (MOVHstoreidx2 ptr idx val mem)
+(MOVHstoreidx ptr (ADD idx idx) val mem) => (MOVHstoreidx2 ptr idx val mem)
+(MOVDstoreidx (SLLconst [3] idx) ptr val mem) => (MOVDstoreidx8 ptr idx val mem)
+(MOVWstoreidx (SLLconst [2] idx) ptr val mem) => (MOVWstoreidx4 ptr idx val mem)
+(MOVHstoreidx (SLLconst [1] idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem)
+(MOVHstoreidx (ADD idx idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem)
+(MOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (MOVDstore [int32(c)<<3] ptr val mem)
+(MOVWstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (MOVWstore [int32(c)<<2] ptr val mem)
+(MOVHstoreidx2 ptr (MOVDconst [c]) val mem) && is32Bit(c<<1) => (MOVHstore [int32(c)<<1] ptr val mem)
+
+(FMOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx8 ptr idx val mem)
+(FMOVSstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx4 ptr idx val mem)
+(FMOVDstoreidx ptr (SLLconst [3] idx) val mem) => (FMOVDstoreidx8 ptr idx val mem)
+(FMOVSstoreidx ptr (SLLconst [2] idx) val mem) => (FMOVSstoreidx4 ptr idx val mem)
+(FMOVDstoreidx (SLLconst [3] idx) ptr val mem) => (FMOVDstoreidx8 ptr idx val mem)
+(FMOVSstoreidx (SLLconst [2] idx) ptr val mem) => (FMOVSstoreidx4 ptr idx val mem)
+(FMOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (FMOVDstore [int32(c)<<3] ptr val mem)
+(FMOVSstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (FMOVSstore [int32(c)<<2] ptr val mem)
+
+(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(LDP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (LDP [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem)
+(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOVQstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+// store zero
+(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
+(STP [off] {sym} ptr (MOVDconst [0]) (MOVDconst [0]) mem) => (MOVQstorezero [off] {sym} ptr mem)
+
+// register indexed store zero
+(MOVDstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVDstorezeroidx ptr idx mem)
+(MOVWstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWstorezeroidx ptr idx mem)
+(MOVHstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHstorezeroidx ptr idx mem)
+(MOVBstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBstorezeroidx ptr idx mem)
+(MOVDstoreidx ptr idx (MOVDconst [0]) mem) => (MOVDstorezeroidx ptr idx mem)
+(MOVWstoreidx ptr idx (MOVDconst [0]) mem) => (MOVWstorezeroidx ptr idx mem)
+(MOVHstoreidx ptr idx (MOVDconst [0]) mem) => (MOVHstorezeroidx ptr idx mem)
+(MOVBstoreidx ptr idx (MOVDconst [0]) mem) => (MOVBstorezeroidx ptr idx mem)
+(MOVDstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVDstorezero [int32(c)] ptr mem)
+(MOVDstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVDstorezero [int32(c)] idx mem)
+(MOVWstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWstorezero [int32(c)] ptr mem)
+(MOVWstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVWstorezero [int32(c)] idx mem)
+(MOVHstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHstorezero [int32(c)] ptr mem)
+(MOVHstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVHstorezero [int32(c)] idx mem)
+(MOVBstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBstorezero [int32(c)] ptr mem)
+(MOVBstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVBstorezero [int32(c)] idx mem)
+
+// shifted register indexed store zero
+(MOVDstorezero [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (MOVDstorezeroidx8 ptr idx mem)
+(MOVWstorezero [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWstorezeroidx4 ptr idx mem)
+(MOVHstorezero [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHstorezeroidx2 ptr idx mem)
+(MOVDstorezeroidx ptr (SLLconst [3] idx) mem) => (MOVDstorezeroidx8 ptr idx mem)
+(MOVWstorezeroidx ptr (SLLconst [2] idx) mem) => (MOVWstorezeroidx4 ptr idx mem)
+(MOVHstorezeroidx ptr (SLLconst [1] idx) mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVHstorezeroidx ptr (ADD idx idx) mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVDstorezeroidx (SLLconst [3] idx) ptr mem) => (MOVDstorezeroidx8 ptr idx mem)
+(MOVWstorezeroidx (SLLconst [2] idx) ptr mem) => (MOVWstorezeroidx4 ptr idx mem)
+(MOVHstorezeroidx (SLLconst [1] idx) ptr mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVHstorezeroidx (ADD idx idx) ptr mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVDstoreidx8 ptr idx (MOVDconst [0]) mem) => (MOVDstorezeroidx8 ptr idx mem)
+(MOVWstoreidx4 ptr idx (MOVDconst [0]) mem) => (MOVWstorezeroidx4 ptr idx mem)
+(MOVHstoreidx2 ptr idx (MOVDconst [0]) mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVDstorezeroidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (MOVDstorezero [int32(c<<3)] ptr mem)
+(MOVWstorezeroidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWstorezero [int32(c<<2)] ptr mem)
+(MOVHstorezeroidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHstorezero [int32(c<<1)] ptr mem)
+
+// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
+// these seem to have bad interaction with other rules, resulting in slower code
+//(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBreg x)
+//(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBUreg x)
+//(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHreg x)
+//(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHUreg x)
+//(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWreg x)
+//(MOVWUload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWUreg x)
+//(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+//(FMOVSload [off] {sym} ptr (FMOVSstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+//(FMOVDload [off] {sym} ptr (FMOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+//(LDP [off] {sym} ptr (STP [off2] {sym2} ptr2 x y _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x y
+
+(MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+
+(MOVBloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVBUloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVHloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVHUloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVWloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVWUloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVDloadidx ptr idx (MOVDstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+
+(MOVHloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+(MOVHUloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+(MOVWloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+(MOVWUloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+(MOVDloadidx8 ptr idx (MOVDstorezeroidx8 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+
+// don't extend after proper load
+(MOVBreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVWload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x)
+(MOVBreg x:(MOVBloadidx _ _ _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBloadidx _ _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVHloadidx _ _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHUloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVWloadidx _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUloadidx _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUloadidx _ _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVHloadidx2 _ _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHloadidx2 _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVWloadidx4 _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUloadidx4 _ _ _)) => (MOVDreg x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVHreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVWreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUreg _)) => (MOVDreg x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstoreidx ptr idx (MOVBreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVBUreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVHreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVHUreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVWreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVWUreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOVHreg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOVHUreg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOVWreg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOVWUreg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVWstoreidx ptr idx (MOVWreg x) mem) => (MOVWstoreidx ptr idx x mem)
+(MOVWstoreidx ptr idx (MOVWUreg x) mem) => (MOVWstoreidx ptr idx x mem)
+(MOVHstoreidx2 ptr idx (MOVHreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
+(MOVHstoreidx2 ptr idx (MOVHUreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
+(MOVHstoreidx2 ptr idx (MOVWreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
+(MOVHstoreidx2 ptr idx (MOVWUreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
+(MOVWstoreidx4 ptr idx (MOVWreg x) mem) => (MOVWstoreidx4 ptr idx x mem)
+(MOVWstoreidx4 ptr idx (MOVWUreg x) mem) => (MOVWstoreidx4 ptr idx x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVDnop doesn't emit instruction, only for ensuring the type.
+(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
+
+// TODO: we should be able to get rid of MOVDnop all together.
+// But for now, this is enough to get rid of lots of them.
+(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
+
+// fold constant into arithmetic ops
+(ADD x (MOVDconst <t> [c])) && !t.IsPtr() => (ADDconst [c] x)
+(SUB x (MOVDconst [c])) => (SUBconst [c] x)
+(AND x (MOVDconst [c])) => (ANDconst [c] x)
+(OR x (MOVDconst [c])) => (ORconst [c] x)
+(XOR x (MOVDconst [c])) => (XORconst [c] x)
+(TST x (MOVDconst [c])) => (TSTconst [c] x)
+(TSTW x (MOVDconst [c])) => (TSTWconst [int32(c)] x)
+(CMN x (MOVDconst [c])) => (CMNconst [c] x)
+(CMNW x (MOVDconst [c])) => (CMNWconst [int32(c)] x)
+(BIC x (MOVDconst [c])) => (ANDconst [^c] x)
+(EON x (MOVDconst [c])) => (XORconst [^c] x)
+(ORN x (MOVDconst [c])) => (ORconst [^c] x)
+
+(SLL x (MOVDconst [c])) => (SLLconst x [c&63])
+(SRL x (MOVDconst [c])) => (SRLconst x [c&63])
+(SRA x (MOVDconst [c])) => (SRAconst x [c&63])
+(SLL x (ANDconst [63] y)) => (SLL x y)
+(SRL x (ANDconst [63] y)) => (SRL x y)
+(SRA x (ANDconst [63] y)) => (SRA x y)
+
+(CMP x (MOVDconst [c])) => (CMPconst [c] x)
+(CMP (MOVDconst [c]) x) => (InvertFlags (CMPconst [c] x))
+(CMPW x (MOVDconst [c])) => (CMPWconst [int32(c)] x)
+(CMPW (MOVDconst [c]) x) => (InvertFlags (CMPWconst [int32(c)] x))
+
+(ROR x (MOVDconst [c])) => (RORconst x [c&63])
+(RORW x (MOVDconst [c])) => (RORWconst x [c&31])
+
+(ADDSflags x (MOVDconst [c])) => (ADDSconstflags [c] x)
+
+(ADDconst [c] y) && c < 0 => (SUBconst [-c] y)
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+((CMP|CMPW) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW) y x))
+
+// mul-neg => mneg
+(NEG (MUL x y)) => (MNEG x y)
+(NEG (MULW x y)) && v.Type.Size() <= 4 => (MNEGW x y)
+(MUL (NEG x) y) => (MNEG x y)
+(MULW (NEG x) y) => (MNEGW x y)
+
+// madd/msub
+(ADD a l:(MUL x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
+(SUB a l:(MUL x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
+(ADD a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
+(SUB a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
+
+(ADD a l:(MULW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MADDW a x y)
+(SUB a l:(MULW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MSUBW a x y)
+(ADD a l:(MNEGW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MSUBW a x y)
+(SUB a l:(MNEGW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MADDW a x y)
+
+// optimize ADCSflags, SBCSflags and friends
+(ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (ADCzerocarry <typ.UInt64> c)))) => (ADCSflags x y c)
+(ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (MOVDconst [0])))) => (ADDSflags x y)
+(SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> bo))))) => (SBCSflags x y bo)
+(SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (MOVDconst [0])))) => (SUBSflags x y)
+
+// mul by constant
+(MUL x (MOVDconst [-1])) => (NEG x)
+(MUL _ (MOVDconst [0])) => (MOVDconst [0])
+(MUL x (MOVDconst [1])) => x
+(MUL x (MOVDconst [c])) && isPowerOfTwo64(c) => (SLLconst [log64(c)] x)
+(MUL x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c >= 3 => (ADDshiftLL x x [log64(c-1)])
+(MUL x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
+(MUL x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MUL x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+(MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+(MULW x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (NEG <x.Type> x))
+(MULW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
+(MULW x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg x)
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c) => (MOVWUreg (SLLconst <x.Type> [log64(c)] x))
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (MOVWUreg (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (MOVWUreg (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (ADDshiftLL <x.Type> x x [1])))
+(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3])))
+(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+
+// mneg by constant
+(MNEG x (MOVDconst [-1])) => x
+(MNEG _ (MOVDconst [0])) => (MOVDconst [0])
+(MNEG x (MOVDconst [1])) => (NEG x)
+(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
+(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+(MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
+(MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+(MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
+(MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+
+
+(MNEGW x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg x)
+(MNEGW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
+(MNEGW x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (NEG <x.Type> x))
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> x x [log64(c-1)])))
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)])))
+(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2])))
+(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))))
+(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3])))
+(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))))
+
+
+(MADD a x (MOVDconst [-1])) => (SUB a x)
+(MADD a _ (MOVDconst [0])) => a
+(MADD a x (MOVDconst [1])) => (ADD a x)
+(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
+(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MADD a (MOVDconst [-1]) x) => (SUB a x)
+(MADD a (MOVDconst [0]) _) => a
+(MADD a (MOVDconst [1]) x) => (ADD a x)
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MADDW a x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (SUB <a.Type> a x))
+(MADDW a _ (MOVDconst [c])) && int32(c)==0 => (MOVWUreg a)
+(MADDW a x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (ADD <a.Type> a x))
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
+(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
+(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
+(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
+(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
+
+(MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (MOVWUreg (SUB <a.Type> a x))
+(MADDW a (MOVDconst [c]) _) && int32(c)==0 => (MOVWUreg a)
+(MADDW a (MOVDconst [c]) x) && int32(c)==1 => (MOVWUreg (ADD <a.Type> a x))
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
+(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
+(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
+(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
+(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
+
+(MSUB a x (MOVDconst [-1])) => (ADD a x)
+(MSUB a _ (MOVDconst [0])) => a
+(MSUB a x (MOVDconst [1])) => (SUB a x)
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MSUB a (MOVDconst [-1]) x) => (ADD a x)
+(MSUB a (MOVDconst [0]) _) => a
+(MSUB a (MOVDconst [1]) x) => (SUB a x)
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (ADD <a.Type> a x))
+(MSUBW a _ (MOVDconst [c])) && int32(c)==0 => (MOVWUreg a)
+(MSUBW a x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (SUB <a.Type> a x))
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
+(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
+(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
+(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
+(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
+
+(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (MOVWUreg (ADD <a.Type> a x))
+(MSUBW a (MOVDconst [c]) _) && int32(c)==0 => (MOVWUreg a)
+(MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (MOVWUreg (SUB <a.Type> a x))
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
+(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
+(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
+(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
+(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
+
+// div by constant
+(UDIV x (MOVDconst [1])) => x
+(UDIV x (MOVDconst [c])) && isPowerOfTwo64(c) => (SRLconst [log64(c)] x)
+(UDIVW x (MOVDconst [c])) && uint32(c)==1 => (MOVWUreg x)
+(UDIVW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (SRLconst [log64(c)] (MOVWUreg <v.Type> x))
+(UMOD _ (MOVDconst [1])) => (MOVDconst [0])
+(UMOD x (MOVDconst [c])) && isPowerOfTwo64(c) => (ANDconst [c-1] x)
+(UMODW _ (MOVDconst [c])) && uint32(c)==1 => (MOVDconst [0])
+(UMODW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (ANDconst [c-1] x)
+
+// generic simplifications
+(ADD x (NEG y)) => (SUB x y)
+(SUB x x) => (MOVDconst [0])
+(AND x x) => x
+(OR x x) => x
+(XOR x x) => (MOVDconst [0])
+(BIC x x) => (MOVDconst [0])
+(EON x x) => (MOVDconst [-1])
+(ORN x x) => (MOVDconst [-1])
+(AND x (MVN y)) => (BIC x y)
+(XOR x (MVN y)) => (EON x y)
+(OR x (MVN y)) => (ORN x y)
+(MVN (XOR x y)) => (EON x y)
+(NEG (NEG x)) => x
+
+(CSEL [cc] (MOVDconst [-1]) (MOVDconst [0]) flag) => (CSETM [cc] flag)
+(CSEL [cc] (MOVDconst [0]) (MOVDconst [-1]) flag) => (CSETM [arm64Negate(cc)] flag)
+(CSEL [cc] x (MOVDconst [0]) flag) => (CSEL0 [cc] x flag)
+(CSEL [cc] (MOVDconst [0]) y flag) => (CSEL0 [arm64Negate(cc)] y flag)
+(CSEL [cc] x (ADDconst [1] a) flag) => (CSINC [cc] x a flag)
+(CSEL [cc] (ADDconst [1] a) x flag) => (CSINC [arm64Negate(cc)] x a flag)
+(CSEL [cc] x (MVN a) flag) => (CSINV [cc] x a flag)
+(CSEL [cc] (MVN a) x flag) => (CSINV [arm64Negate(cc)] x a flag)
+(CSEL [cc] x (NEG a) flag) => (CSNEG [cc] x a flag)
+(CSEL [cc] (NEG a) x flag) => (CSNEG [arm64Negate(cc)] x a flag)
+
+(SUB x (SUB y z)) => (SUB (ADD <v.Type> x z) y)
+(SUB (SUB x y) z) => (SUB x (ADD <y.Type> y z))
+
+// remove redundant *const ops
+(ADDconst [0] x) => x
+(SUBconst [0] x) => x
+(ANDconst [0] _) => (MOVDconst [0])
+(ANDconst [-1] x) => x
+(ORconst [0] x) => x
+(ORconst [-1] _) => (MOVDconst [-1])
+(XORconst [0] x) => x
+(XORconst [-1] x) => (MVN x)
+
+// generic constant folding
+(ADDconst [c] (MOVDconst [d])) => (MOVDconst [c+d])
+(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
+(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
+(SUBconst [c] (MOVDconst [d])) => (MOVDconst [d-c])
+(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x)
+(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x)
+(SLLconst [c] (MOVDconst [d])) => (MOVDconst [d<<uint64(c)])
+(SRLconst [c] (MOVDconst [d])) => (MOVDconst [int64(uint64(d)>>uint64(c))])
+(SRAconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)])
+(MUL (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c*d])
+(MNEG (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-c*d])
+(MULW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(c*d))])
+(MNEGW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(-c*d))])
+(MADD (MOVDconst [c]) x y) => (ADDconst [c] (MUL <x.Type> x y))
+(MSUB (MOVDconst [c]) x y) => (ADDconst [c] (MNEG <x.Type> x y))
+(MADD a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [c*d] a)
+(MSUB a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [c*d] a)
+(MADDW (MOVDconst [c]) x y) => (MOVWUreg (ADDconst <x.Type> [c] (MULW <x.Type> x y)))
+(MSUBW (MOVDconst [c]) x y) => (MOVWUreg (ADDconst <x.Type> [c] (MNEGW <x.Type> x y)))
+(MADDW a (MOVDconst [c]) (MOVDconst [d])) => (MOVWUreg (ADDconst <a.Type> [c*d] a))
+(MSUBW a (MOVDconst [c]) (MOVDconst [d])) => (MOVWUreg (SUBconst <a.Type> [c*d] a))
+(DIV (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c/d])
+(UDIV (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)/uint64(d))])
+(DIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(int32(c)/int32(d)))])
+(UDIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)/uint32(d))])
+(MOD (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c%d])
+(UMOD (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)%uint64(d))])
+(MODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(int32(c)%int32(d)))])
+(UMODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)%uint32(d))])
+(ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ANDconst [c] (MOVWUreg x)) => (ANDconst [c&(1<<32-1)] x)
+(ANDconst [c] (MOVHUreg x)) => (ANDconst [c&(1<<16-1)] x)
+(ANDconst [c] (MOVBUreg x)) => (ANDconst [c&(1<<8-1)] x)
+(MOVWUreg (ANDconst [c] x)) => (ANDconst [c&(1<<32-1)] x)
+(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&(1<<16-1)] x)
+(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&(1<<8-1)] x)
+(ORconst [c] (MOVDconst [d])) => (MOVDconst [c|d])
+(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
+(XORconst [c] (MOVDconst [d])) => (MOVDconst [c^d])
+(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
+(MVN (MOVDconst [c])) => (MOVDconst [^c])
+(NEG (MOVDconst [c])) => (MOVDconst [-c])
+(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
+(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
+(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
+(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
+(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
+(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
+(MOVDreg (MOVDconst [c])) => (MOVDconst [c])
+
+// constant comparisons
+(CMPconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags64(x,y)])
+(CMPWconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags32(int32(x),y)])
+(TSTconst (MOVDconst [x]) [y]) => (FlagConstant [logicFlags64(x&y)])
+(TSTWconst (MOVDconst [x]) [y]) => (FlagConstant [logicFlags32(int32(x)&y)])
+(CMNconst (MOVDconst [x]) [y]) => (FlagConstant [addFlags64(x,y)])
+(CMNWconst (MOVDconst [x]) [y]) => (FlagConstant [addFlags32(int32(x),y)])
+
+// other known comparisons
+(CMPconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags64(0,1)])
+(CMPconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags64(0,1)])
+(CMPconst (MOVWUreg _) [c]) && 0xffffffff < c => (FlagConstant [subFlags64(0,1)])
+(CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n => (FlagConstant [subFlags64(0,1)])
+(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n) => (FlagConstant [subFlags64(0,1)])
+(CMPWconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags64(0,1)])
+(CMPWconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags64(0,1)])
+
+// absorb flag constants into branches
+(EQ (FlagConstant [fc]) yes no) && fc.eq() => (First yes no)
+(EQ (FlagConstant [fc]) yes no) && !fc.eq() => (First no yes)
+
+(NE (FlagConstant [fc]) yes no) && fc.ne() => (First yes no)
+(NE (FlagConstant [fc]) yes no) && !fc.ne() => (First no yes)
+
+(LT (FlagConstant [fc]) yes no) && fc.lt() => (First yes no)
+(LT (FlagConstant [fc]) yes no) && !fc.lt() => (First no yes)
+
+(LE (FlagConstant [fc]) yes no) && fc.le() => (First yes no)
+(LE (FlagConstant [fc]) yes no) && !fc.le() => (First no yes)
+
+(GT (FlagConstant [fc]) yes no) && fc.gt() => (First yes no)
+(GT (FlagConstant [fc]) yes no) && !fc.gt() => (First no yes)
+
+(GE (FlagConstant [fc]) yes no) && fc.ge() => (First yes no)
+(GE (FlagConstant [fc]) yes no) && !fc.ge() => (First no yes)
+
+(ULT (FlagConstant [fc]) yes no) && fc.ult() => (First yes no)
+(ULT (FlagConstant [fc]) yes no) && !fc.ult() => (First no yes)
+
+(ULE (FlagConstant [fc]) yes no) && fc.ule() => (First yes no)
+(ULE (FlagConstant [fc]) yes no) && !fc.ule() => (First no yes)
+
+(UGT (FlagConstant [fc]) yes no) && fc.ugt() => (First yes no)
+(UGT (FlagConstant [fc]) yes no) && !fc.ugt() => (First no yes)
+
+(UGE (FlagConstant [fc]) yes no) && fc.uge() => (First yes no)
+(UGE (FlagConstant [fc]) yes no) && !fc.uge() => (First no yes)
+
+(LTnoov (FlagConstant [fc]) yes no) && fc.ltNoov() => (First yes no)
+(LTnoov (FlagConstant [fc]) yes no) && !fc.ltNoov() => (First no yes)
+
+(LEnoov (FlagConstant [fc]) yes no) && fc.leNoov() => (First yes no)
+(LEnoov (FlagConstant [fc]) yes no) && !fc.leNoov() => (First no yes)
+
+(GTnoov (FlagConstant [fc]) yes no) && fc.gtNoov() => (First yes no)
+(GTnoov (FlagConstant [fc]) yes no) && !fc.gtNoov() => (First no yes)
+
+(GEnoov (FlagConstant [fc]) yes no) && fc.geNoov() => (First yes no)
+(GEnoov (FlagConstant [fc]) yes no) && !fc.geNoov() => (First no yes)
+
+(Z (MOVDconst [0]) yes no) => (First yes no)
+(Z (MOVDconst [c]) yes no) && c != 0 => (First no yes)
+(NZ (MOVDconst [0]) yes no) => (First no yes)
+(NZ (MOVDconst [c]) yes no) && c != 0 => (First yes no)
+(ZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First yes no)
+(ZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First no yes)
+(NZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First no yes)
+(NZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First yes no)
+
+// absorb InvertFlags into branches
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+(FLT (InvertFlags cmp) yes no) => (FGT cmp yes no)
+(FGT (InvertFlags cmp) yes no) => (FLT cmp yes no)
+(FLE (InvertFlags cmp) yes no) => (FGE cmp yes no)
+(FGE (InvertFlags cmp) yes no) => (FLE cmp yes no)
+(LTnoov (InvertFlags cmp) yes no) => (GTnoov cmp yes no)
+(GEnoov (InvertFlags cmp) yes no) => (LEnoov cmp yes no)
+(LEnoov (InvertFlags cmp) yes no) => (GEnoov cmp yes no)
+(GTnoov (InvertFlags cmp) yes no) => (LTnoov cmp yes no)
+
+// absorb InvertFlags into conditional instructions
+(CSEL [cc] x y (InvertFlags cmp)) => (CSEL [arm64Invert(cc)] x y cmp)
+(CSEL0 [cc] x (InvertFlags cmp)) => (CSEL0 [arm64Invert(cc)] x cmp)
+(CSETM [cc] (InvertFlags cmp)) => (CSETM [arm64Invert(cc)] cmp)
+(CSINC [cc] x y (InvertFlags cmp)) => (CSINC [arm64Invert(cc)] x y cmp)
+(CSINV [cc] x y (InvertFlags cmp)) => (CSINV [arm64Invert(cc)] x y cmp)
+(CSNEG [cc] x y (InvertFlags cmp)) => (CSNEG [arm64Invert(cc)] x y cmp)
+
+// absorb flag constants into boolean values
+(Equal (FlagConstant [fc])) => (MOVDconst [b2i(fc.eq())])
+(NotEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.ne())])
+(LessThan (FlagConstant [fc])) => (MOVDconst [b2i(fc.lt())])
+(LessThanU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ult())])
+(LessEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.le())])
+(LessEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ule())])
+(GreaterThan (FlagConstant [fc])) => (MOVDconst [b2i(fc.gt())])
+(GreaterThanU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ugt())])
+(GreaterEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.ge())])
+(GreaterEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.uge())])
+
+// absorb InvertFlags into boolean values
+(Equal (InvertFlags x)) => (Equal x)
+(NotEqual (InvertFlags x)) => (NotEqual x)
+(LessThan (InvertFlags x)) => (GreaterThan x)
+(LessThanU (InvertFlags x)) => (GreaterThanU x)
+(GreaterThan (InvertFlags x)) => (LessThan x)
+(GreaterThanU (InvertFlags x)) => (LessThanU x)
+(LessEqual (InvertFlags x)) => (GreaterEqual x)
+(LessEqualU (InvertFlags x)) => (GreaterEqualU x)
+(GreaterEqual (InvertFlags x)) => (LessEqual x)
+(GreaterEqualU (InvertFlags x)) => (LessEqualU x)
+(LessThanF (InvertFlags x)) => (GreaterThanF x)
+(LessEqualF (InvertFlags x)) => (GreaterEqualF x)
+(GreaterThanF (InvertFlags x)) => (LessThanF x)
+(GreaterEqualF (InvertFlags x)) => (LessEqualF x)
+(LessThanNoov (InvertFlags x)) => (CSEL0 [OpARM64NotEqual] (GreaterEqualNoov <typ.Bool> x) x)
+(GreaterEqualNoov (InvertFlags x)) => (CSINC [OpARM64NotEqual] (LessThanNoov <typ.Bool> x) (MOVDconst [0]) x)
+
+// Boolean-generating instructions (NOTE: NOT all boolean Values) always
+// zero upper bit of the register; no need to zero-extend
+(MOVBUreg x:((Equal|NotEqual|LessThan|LessThanU|LessThanF|LessEqual|LessEqualU|LessEqualF|GreaterThan|GreaterThanU|GreaterThanF|GreaterEqual|GreaterEqualU|GreaterEqualF) _)) => (MOVDreg x)
+
+// Don't bother extending if we're not using the higher bits.
+(MOV(B|BU)reg x) && v.Type.Size() <= 1 => x
+(MOV(H|HU)reg x) && v.Type.Size() <= 2 => x
+(MOV(W|WU)reg x) && v.Type.Size() <= 4 => x
+
+// omit unsign extension
+(MOVWUreg x) && zeroUpper32Bits(x, 3) => x
+
+// omit sign extension
+(MOVWreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffff80000000) == 0 => (ANDconst <t> x [c])
+(MOVHreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffff8000) == 0 => (ANDconst <t> x [c])
+(MOVBreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffffff80) == 0 => (ANDconst <t> x [c])
+
+// absorb flag constants into conditional instructions
+(CSEL [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
+(CSEL [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y
+(CSEL0 [cc] x flag) && ccARM64Eval(cc, flag) > 0 => x
+(CSEL0 [cc] _ flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0])
+(CSNEG [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
+(CSNEG [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (NEG y)
+(CSINV [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
+(CSINV [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (Not y)
+(CSINC [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
+(CSINC [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (ADDconst [1] y)
+(CSETM [cc] flag) && ccARM64Eval(cc, flag) > 0 => (MOVDconst [-1])
+(CSETM [cc] flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0])
+
+// absorb flags back into boolean CSEL
+(CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
+ (CSEL [boolval.Op] x y flagArg(boolval))
+(CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
+ (CSEL [arm64Negate(boolval.Op)] x y flagArg(boolval))
+(CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
+ (CSEL0 [boolval.Op] x flagArg(boolval))
+(CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
+ (CSEL0 [arm64Negate(boolval.Op)] x flagArg(boolval))
+
+// absorb shifts into ops
+(NEG x:(SLLconst [c] y)) && clobberIfDead(x) => (NEGshiftLL [c] y)
+(NEG x:(SRLconst [c] y)) && clobberIfDead(x) => (NEGshiftRL [c] y)
+(NEG x:(SRAconst [c] y)) && clobberIfDead(x) => (NEGshiftRA [c] y)
+(MVN x:(SLLconst [c] y)) && clobberIfDead(x) => (MVNshiftLL [c] y)
+(MVN x:(SRLconst [c] y)) && clobberIfDead(x) => (MVNshiftRL [c] y)
+(MVN x:(SRAconst [c] y)) && clobberIfDead(x) => (MVNshiftRA [c] y)
+(MVN x:(RORconst [c] y)) && clobberIfDead(x) => (MVNshiftRO [c] y)
+(ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ADDshiftLL x0 y [c])
+(ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ADDshiftRL x0 y [c])
+(ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ADDshiftRA x0 y [c])
+(SUB x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (SUBshiftLL x0 y [c])
+(SUB x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (SUBshiftRL x0 y [c])
+(SUB x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (SUBshiftRA x0 y [c])
+(AND x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ANDshiftLL x0 y [c])
+(AND x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ANDshiftRL x0 y [c])
+(AND x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ANDshiftRA x0 y [c])
+(AND x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ANDshiftRO x0 y [c])
+(OR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORshiftLL x0 y [c]) // useful for combined load
+(OR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORshiftRL x0 y [c])
+(OR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORshiftRA x0 y [c])
+(OR x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ORshiftRO x0 y [c])
+(XOR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (XORshiftLL x0 y [c])
+(XOR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (XORshiftRL x0 y [c])
+(XOR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (XORshiftRA x0 y [c])
+(XOR x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (XORshiftRO x0 y [c])
+(BIC x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (BICshiftLL x0 y [c])
+(BIC x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (BICshiftRL x0 y [c])
+(BIC x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (BICshiftRA x0 y [c])
+(BIC x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (BICshiftRO x0 y [c])
+(ORN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORNshiftLL x0 y [c])
+(ORN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORNshiftRL x0 y [c])
+(ORN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORNshiftRA x0 y [c])
+(ORN x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ORNshiftRO x0 y [c])
+(EON x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (EONshiftLL x0 y [c])
+(EON x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (EONshiftRL x0 y [c])
+(EON x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (EONshiftRA x0 y [c])
+(EON x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (EONshiftRO x0 y [c])
+(CMP x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMPshiftLL x0 y [c])
+(CMP x0:(SLLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftLL x1 y [c]))
+(CMP x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMPshiftRL x0 y [c])
+(CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRL x1 y [c]))
+(CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMPshiftRA x0 y [c])
+(CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRA x1 y [c]))
+(CMN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMNshiftLL x0 y [c])
+(CMN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMNshiftRL x0 y [c])
+(CMN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMNshiftRA x0 y [c])
+(TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (TSTshiftLL x0 y [c])
+(TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (TSTshiftRL x0 y [c])
+(TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (TSTshiftRA x0 y [c])
+(TST x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (TSTshiftRO x0 y [c])
+
+// prefer *const ops to *shift ops
+(ADDshiftLL (MOVDconst [c]) x [d]) => (ADDconst [c] (SLLconst <x.Type> x [d]))
+(ADDshiftRL (MOVDconst [c]) x [d]) => (ADDconst [c] (SRLconst <x.Type> x [d]))
+(ADDshiftRA (MOVDconst [c]) x [d]) => (ADDconst [c] (SRAconst <x.Type> x [d]))
+(ANDshiftLL (MOVDconst [c]) x [d]) => (ANDconst [c] (SLLconst <x.Type> x [d]))
+(ANDshiftRL (MOVDconst [c]) x [d]) => (ANDconst [c] (SRLconst <x.Type> x [d]))
+(ANDshiftRA (MOVDconst [c]) x [d]) => (ANDconst [c] (SRAconst <x.Type> x [d]))
+(ANDshiftRO (MOVDconst [c]) x [d]) => (ANDconst [c] (RORconst <x.Type> x [d]))
+(ORshiftLL (MOVDconst [c]) x [d]) => (ORconst [c] (SLLconst <x.Type> x [d]))
+(ORshiftRL (MOVDconst [c]) x [d]) => (ORconst [c] (SRLconst <x.Type> x [d]))
+(ORshiftRA (MOVDconst [c]) x [d]) => (ORconst [c] (SRAconst <x.Type> x [d]))
+(ORshiftRO (MOVDconst [c]) x [d]) => (ORconst [c] (RORconst <x.Type> x [d]))
+(XORshiftLL (MOVDconst [c]) x [d]) => (XORconst [c] (SLLconst <x.Type> x [d]))
+(XORshiftRL (MOVDconst [c]) x [d]) => (XORconst [c] (SRLconst <x.Type> x [d]))
+(XORshiftRA (MOVDconst [c]) x [d]) => (XORconst [c] (SRAconst <x.Type> x [d]))
+(XORshiftRO (MOVDconst [c]) x [d]) => (XORconst [c] (RORconst <x.Type> x [d]))
+(CMPshiftLL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+(CMPshiftRL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+(CMPshiftRA (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+(CMNshiftLL (MOVDconst [c]) x [d]) => (CMNconst [c] (SLLconst <x.Type> x [d]))
+(CMNshiftRL (MOVDconst [c]) x [d]) => (CMNconst [c] (SRLconst <x.Type> x [d]))
+(CMNshiftRA (MOVDconst [c]) x [d]) => (CMNconst [c] (SRAconst <x.Type> x [d]))
+(TSTshiftLL (MOVDconst [c]) x [d]) => (TSTconst [c] (SLLconst <x.Type> x [d]))
+(TSTshiftRL (MOVDconst [c]) x [d]) => (TSTconst [c] (SRLconst <x.Type> x [d]))
+(TSTshiftRA (MOVDconst [c]) x [d]) => (TSTconst [c] (SRAconst <x.Type> x [d]))
+(TSTshiftRO (MOVDconst [c]) x [d]) => (TSTconst [c] (RORconst <x.Type> x [d]))
+
+// constant folding in *shift ops
+(MVNshiftLL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)<<uint64(d))])
+(MVNshiftRL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)>>uint64(d))])
+(MVNshiftRA (MOVDconst [c]) [d]) => (MOVDconst [^(c>>uint64(d))])
+(MVNshiftRO (MOVDconst [c]) [d]) => (MOVDconst [^rotateRight64(c, d)])
+(NEGshiftLL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)<<uint64(d))])
+(NEGshiftRL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)>>uint64(d))])
+(NEGshiftRA (MOVDconst [c]) [d]) => (MOVDconst [-(c>>uint64(d))])
+(ADDshiftLL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)<<uint64(d))])
+(ADDshiftRL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)>>uint64(d))])
+(ADDshiftRA x (MOVDconst [c]) [d]) => (ADDconst x [c>>uint64(d)])
+(SUBshiftLL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)<<uint64(d))])
+(SUBshiftRL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)>>uint64(d))])
+(SUBshiftRA x (MOVDconst [c]) [d]) => (SUBconst x [c>>uint64(d)])
+(ANDshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)<<uint64(d))])
+(ANDshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)>>uint64(d))])
+(ANDshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [c>>uint64(d)])
+(ANDshiftRO x (MOVDconst [c]) [d]) => (ANDconst x [rotateRight64(c, d)])
+(ORshiftLL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)<<uint64(d))])
+(ORshiftRL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)>>uint64(d))])
+(ORshiftRA x (MOVDconst [c]) [d]) => (ORconst x [c>>uint64(d)])
+(ORshiftRO x (MOVDconst [c]) [d]) => (ORconst x [rotateRight64(c, d)])
+(XORshiftLL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)<<uint64(d))])
+(XORshiftRL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)>>uint64(d))])
+(XORshiftRA x (MOVDconst [c]) [d]) => (XORconst x [c>>uint64(d)])
+(XORshiftRO x (MOVDconst [c]) [d]) => (XORconst x [rotateRight64(c, d)])
+(BICshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)<<uint64(d))])
+(BICshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)>>uint64(d))])
+(BICshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [^(c>>uint64(d))])
+(BICshiftRO x (MOVDconst [c]) [d]) => (ANDconst x [^rotateRight64(c, d)])
+(ORNshiftLL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)<<uint64(d))])
+(ORNshiftRL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)>>uint64(d))])
+(ORNshiftRA x (MOVDconst [c]) [d]) => (ORconst x [^(c>>uint64(d))])
+(ORNshiftRO x (MOVDconst [c]) [d]) => (ORconst x [^rotateRight64(c, d)])
+(EONshiftLL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)<<uint64(d))])
+(EONshiftRL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)>>uint64(d))])
+(EONshiftRA x (MOVDconst [c]) [d]) => (XORconst x [^(c>>uint64(d))])
+(EONshiftRO x (MOVDconst [c]) [d]) => (XORconst x [^rotateRight64(c, d)])
+(CMPshiftLL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)<<uint64(d))])
+(CMPshiftRL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)>>uint64(d))])
+(CMPshiftRA x (MOVDconst [c]) [d]) => (CMPconst x [c>>uint64(d)])
+(CMNshiftLL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)<<uint64(d))])
+(CMNshiftRL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)>>uint64(d))])
+(CMNshiftRA x (MOVDconst [c]) [d]) => (CMNconst x [c>>uint64(d)])
+(TSTshiftLL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)<<uint64(d))])
+(TSTshiftRL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)>>uint64(d))])
+(TSTshiftRA x (MOVDconst [c]) [d]) => (TSTconst x [c>>uint64(d)])
+(TSTshiftRO x (MOVDconst [c]) [d]) => (TSTconst x [rotateRight64(c, d)])
+
+// simplification with *shift ops
+(SUBshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
+(SUBshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
+(SUBshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
+(ANDshiftLL y:(SLLconst x [c]) x [c]) => y
+(ANDshiftRL y:(SRLconst x [c]) x [c]) => y
+(ANDshiftRA y:(SRAconst x [c]) x [c]) => y
+(ANDshiftRO y:(RORconst x [c]) x [c]) => y
+(ORshiftLL y:(SLLconst x [c]) x [c]) => y
+(ORshiftRL y:(SRLconst x [c]) x [c]) => y
+(ORshiftRA y:(SRAconst x [c]) x [c]) => y
+(ORshiftRO y:(RORconst x [c]) x [c]) => y
+(XORshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
+(XORshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
+(XORshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
+(XORshiftRO (RORconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftRO (RORconst x [c]) x [c]) => (MOVDconst [0])
+(EONshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
+(EONshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
+(EONshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
+(EONshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1])
+
+// rev16w | rev16
+// ((x>>8) | (x<<8)) => (REV16W x), the type of x is uint16, "|" can also be "^" or "+".
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) => (REV16W x)
+
+// ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), "|" can also be "^" or "+".
+((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x))
+ && uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff
+ => (REV16W x)
+
+// ((x & 0xff00ff00ff00ff00)>>8) | ((x & 0x00ff00ff00ff00ff)<<8), "|" can also be "^" or "+".
+((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
+ && (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff)
+ => (REV16 x)
+
+// ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), "|" can also be "^" or "+".
+((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
+ && (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff)
+ => (REV16 (ANDconst <x.Type> [0xffffffff] x))
+
+// Extract from reg pair
+(ADDshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
+( ORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
+(XORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
+
+(ADDshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (EXTRWconst [32-c] x2 x)
+( ORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (EXTRWconst [32-c] x2 x)
+(XORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (EXTRWconst [32-c] x2 x)
+
+// Rewrite special pairs of shifts to AND.
+// On ARM64 the bitmask can fit into an instruction.
+(SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 => (ANDconst [1<<uint(64-c)-1] x) // mask out high bits
+(SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 => (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits
+
+// Special case setting bit as 1. An example is math.Copysign(c,-1)
+(ORconst [c1] (ANDconst [c2] x)) && c2|c1 == ^0 => (ORconst [c1] x)
+
+// If the shift amount is larger than the datasize(32, 16, 8), we can optimize to constant 0.
+(MOVWUreg (SLLconst [lc] x)) && lc >= 32 => (MOVDconst [0])
+(MOVHUreg (SLLconst [lc] x)) && lc >= 16 => (MOVDconst [0])
+(MOVBUreg (SLLconst [lc] x)) && lc >= 8 => (MOVDconst [0])
+
+// After zero extension, the upper (64-datasize(32|16|8)) bits are zero, we can optimiza to constant 0.
+(SRLconst [rc] (MOVWUreg x)) && rc >= 32 => (MOVDconst [0])
+(SRLconst [rc] (MOVHUreg x)) && rc >= 16 => (MOVDconst [0])
+(SRLconst [rc] (MOVBUreg x)) && rc >= 8 => (MOVDconst [0])
+
+// bitfield ops
+
+// sbfiz
+// (x << lc) >> rc
+(SRAconst [rc] (SLLconst [lc] x)) && lc > rc => (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+// int64(x << lc)
+(MOVWreg (SLLconst [lc] x)) && lc < 32 => (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
+(MOVHreg (SLLconst [lc] x)) && lc < 16 => (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
+(MOVBreg (SLLconst [lc] x)) && lc < 8 => (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
+// int64(x) << lc
+(SLLconst [lc] (MOVWreg x)) => (SBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
+(SLLconst [lc] (MOVHreg x)) => (SBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
+(SLLconst [lc] (MOVBreg x)) => (SBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
+
+// sbfx
+// (x << lc) >> rc
+(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc => (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+// int64(x) >> rc
+(SRAconst [rc] (MOVWreg x)) && rc < 32 => (SBFX [armBFAuxInt(rc, 32-rc)] x)
+(SRAconst [rc] (MOVHreg x)) && rc < 16 => (SBFX [armBFAuxInt(rc, 16-rc)] x)
+(SRAconst [rc] (MOVBreg x)) && rc < 8 => (SBFX [armBFAuxInt(rc, 8-rc)] x)
+// merge sbfx and sign-extension into sbfx
+(MOVWreg (SBFX [bfc] x)) && bfc.getARM64BFwidth() <= 32 => (SBFX [bfc] x)
+(MOVHreg (SBFX [bfc] x)) && bfc.getARM64BFwidth() <= 16 => (SBFX [bfc] x)
+(MOVBreg (SBFX [bfc] x)) && bfc.getARM64BFwidth() <= 8 => (SBFX [bfc] x)
+
+// sbfiz/sbfx combinations: merge shifts into bitfield ops
+(SRAconst [sc] (SBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb()
+ => (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= bfc.getARM64BFlsb()
+ && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ => (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+
+// ubfiz
+// (x << lc) >> rc
+(SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+// uint64(x) << lc
+(SLLconst [lc] (MOVWUreg x)) => (UBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
+(SLLconst [lc] (MOVHUreg x)) => (UBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
+(SLLconst [lc] (MOVBUreg x)) => (UBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
+// uint64(x << lc)
+(MOVWUreg (SLLconst [lc] x)) && lc < 32 => (UBFIZ [armBFAuxInt(lc, 32-lc)] x)
+(MOVHUreg (SLLconst [lc] x)) && lc < 16 => (UBFIZ [armBFAuxInt(lc, 16-lc)] x)
+(MOVBUreg (SLLconst [lc] x)) && lc < 8 => (UBFIZ [armBFAuxInt(lc, 8-lc)] x)
+
+// merge ANDconst into ubfiz
+// (x & ac) << sc
+(SLLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, 0)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+// (x << sc) & ac
+(ANDconst [ac] (SLLconst [sc] x)) && isARM64BFMask(sc, ac, sc)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+
+// ubfx
+// (x << lc) >> rc
+(SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+// uint64(x) >> rc
+(SRLconst [rc] (MOVWUreg x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32-rc)] x)
+(SRLconst [rc] (MOVHUreg x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16-rc)] x)
+(SRLconst [rc] (MOVBUreg x)) && rc < 8 => (UBFX [armBFAuxInt(rc, 8-rc)] x)
+// uint64(x >> rc)
+(MOVWUreg (SRLconst [rc] x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32)] x)
+(MOVHUreg (SRLconst [rc] x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16)] x)
+(MOVBUreg (SRLconst [rc] x)) && rc < 8 => (UBFX [armBFAuxInt(rc, 8)] x)
+// merge ANDconst into ubfx
+// (x >> sc) & ac
+(ANDconst [ac] (SRLconst [sc] x)) && isARM64BFMask(sc, ac, 0)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+// (x & ac) >> sc
+(SRLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, sc)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+// merge ANDconst and ubfx into ubfx
+(ANDconst [c] (UBFX [bfc] x)) && isARM64BFMask(0, c, 0) =>
+ (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), arm64BFWidth(c, 0)))] x)
+(UBFX [bfc] (ANDconst [c] x)) && isARM64BFMask(0, c, 0) && bfc.getARM64BFlsb() + bfc.getARM64BFwidth() <= arm64BFWidth(c, 0) =>
+ (UBFX [bfc] x)
+// merge ubfx and zerso-extension into ubfx
+(MOVWUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 32 => (UBFX [bfc] x)
+(MOVHUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 16 => (UBFX [bfc] x)
+(MOVBUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 8 => (UBFX [bfc] x)
+
+// ubfiz/ubfx combinations: merge shifts into bitfield ops
+(SRLconst [sc] (UBFX [bfc] x)) && sc < bfc.getARM64BFwidth()
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+(UBFX [bfc] (SRLconst [sc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+(SLLconst [sc] (UBFIZ [bfc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+(UBFIZ [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFwidth()
+ => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+// ((x << c1) >> c2) >> c3
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc == bfc.getARM64BFlsb()
+ => (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb()
+ => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc > bfc.getARM64BFlsb()
+ && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ => (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+// ((x << c1) << c2) >> c3
+(UBFX [bfc] (SLLconst [sc] x)) && sc == bfc.getARM64BFlsb()
+ => (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFlsb()
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc > bfc.getARM64BFlsb()
+ && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ => (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+
+// bfi
+(OR (UBFIZ [bfc] x) (ANDconst [ac] y))
+ && ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
+ => (BFI [bfc] y x)
+(ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y))
+ && lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
+ => (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
+// bfxil
+(OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(bfc.getARM64BFwidth())-1)
+ => (BFXIL [bfc] y x)
+(ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == bfc.getARM64BFwidth()
+ => (BFXIL [bfc] y x)
+(ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) && lc < rc && ac == ^((1<<uint(64-rc)-1))
+ => (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
+
+// FP simplification
+(FNEGS (FMULS x y)) => (FNMULS x y)
+(FNEGD (FMULD x y)) => (FNMULD x y)
+(FMULS (FNEGS x) y) => (FNMULS x y)
+(FMULD (FNEGD x) y) => (FNMULD x y)
+(FNEGS (FNMULS x y)) => (FMULS x y)
+(FNEGD (FNMULD x y)) => (FMULD x y)
+(FNMULS (FNEGS x) y) => (FMULS x y)
+(FNMULD (FNEGD x) y) => (FMULD x y)
+
+(FADDS a (FMULS x y)) && a.Block.Func.useFMA(v) => (FMADDS a x y)
+(FADDD a (FMULD x y)) && a.Block.Func.useFMA(v) => (FMADDD a x y)
+(FSUBS a (FMULS x y)) && a.Block.Func.useFMA(v) => (FMSUBS a x y)
+(FSUBD a (FMULD x y)) && a.Block.Func.useFMA(v) => (FMSUBD a x y)
+(FSUBS (FMULS x y) a) && a.Block.Func.useFMA(v) => (FNMSUBS a x y)
+(FSUBD (FMULD x y) a) && a.Block.Func.useFMA(v) => (FNMSUBD a x y)
+(FADDS a (FNMULS x y)) && a.Block.Func.useFMA(v) => (FMSUBS a x y)
+(FADDD a (FNMULD x y)) && a.Block.Func.useFMA(v) => (FMSUBD a x y)
+(FSUBS a (FNMULS x y)) && a.Block.Func.useFMA(v) => (FMADDS a x y)
+(FSUBD a (FNMULD x y)) && a.Block.Func.useFMA(v) => (FMADDD a x y)
+(FSUBS (FNMULS x y) a) && a.Block.Func.useFMA(v) => (FNMADDS a x y)
+(FSUBD (FNMULD x y) a) && a.Block.Func.useFMA(v) => (FNMADDD a x y)
+
+(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read8(sym, int64(off)))])
+(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVDload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+
+// Prefetch instructions (aux is option: 0 - PLDL1KEEP; 1 - PLDL1STRM)
+(PrefetchCache addr mem) => (PRFM [0] addr mem)
+(PrefetchCacheStreamed addr mem) => (PRFM [1] addr mem)
+
+// Arch-specific inlining for small or disjoint runtime.memmove
+(SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore _ src s3:(MOVDstore {t} _ dst mem)))))
+ && sz >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
+ && isInlinableMemmove(dst, src, sz, config)
+ && clobber(s1, s2, s3, call)
+ => (Move [sz] dst src mem)
+
+// Match post-lowering calls, register version.
+(SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
+ && sz >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && call.Uses == 1
+ && isInlinableMemmove(dst, src, sz, config)
+ && clobber(call)
+ => (Move [sz] dst src mem)
+
+((REV|REVW) ((REV|REVW) p)) => p
+
+// runtime/internal/math.MulUintptr intrinsics
+
+(Select0 (Mul64uover x y)) => (MUL x y)
+(Select1 (Mul64uover x y)) => (NotEqual (CMPconst (UMULH <typ.UInt64> x y) [0]))
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
new file mode 100644
index 0000000..5a98aa0
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go
@@ -0,0 +1,803 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - *const instructions may use a constant larger than the instruction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R27).
+// - All 32-bit Ops will zero the upper 32 bits of the destination register.
+
+// Suffixes encode the bit width of various instructions.
+// D (double word) = 64 bit
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// HU = 16 bit unsigned
+// B (byte) = 8 bit
+// BU = 8 bit unsigned
+// S (single) = 32 bit float
+// D (double) = 64 bit float
+
+// Note: registers not used in regalloc are not included in this list,
+// so that regmask stays within int64
+// Be careful when hand coding regmasks.
+var regNamesARM64 = []string{
+ "R0",
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+ "R16",
+ "R17",
+ "R18", // platform register, not used
+ "R19",
+ "R20",
+ "R21",
+ "R22",
+ "R23",
+ "R24",
+ "R25",
+ "R26",
+ // R27 = REGTMP not used in regalloc
+ "g", // aka R28
+ "R29", // frame pointer, not used
+ "R30", // aka REGLINK
+ "SP", // aka R31
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+ "F16",
+ "F17",
+ "F18",
+ "F19",
+ "F20",
+ "F21",
+ "F22",
+ "F23",
+ "F24",
+ "F25",
+ "F26",
+ "F27",
+ "F28",
+ "F29",
+ "F30",
+ "F31",
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesARM64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesARM64 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30")
+ gpg = gp | buildReg("g")
+ gpsp = gp | buildReg("SP")
+ gpspg = gpg | buildReg("SP")
+ gpspsbg = gpspg | buildReg("SB")
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31")
+ callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r0 = buildReg("R0")
+ r1 = buildReg("R1")
+ r2 = buildReg("R2")
+ r3 = buildReg("R3")
+ )
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp0flags1 = regInfo{inputs: []regMask{0}, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp1flags = regInfo{inputs: []regMask{gpg}}
+ gp1flags1 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11flags = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp, 0}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ gp21nog = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
+ gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}}
+ gp2flags = regInfo{inputs: []regMask{gpg, gpg}}
+ gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
+ gp2flags1flags = regInfo{inputs: []regMask{gp, gp, 0}, outputs: []regMask{gp, 0}}
+ gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gp31 = regInfo{inputs: []regMask{gpg, gpg, gpg}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpload2 = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gpg, gpg}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
+ gpstore0 = regInfo{inputs: []regMask{gpspsbg}}
+ gpstore2 = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}}
+ gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
+ gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+ fp1flags = regInfo{inputs: []regMask{fp}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
+ fpstore2 = regInfo{inputs: []regMask{gpspsbg, gpg, fp}}
+ readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
+ prefreg = regInfo{inputs: []regMask{gpspsbg}}
+ )
+ ops := []opData{
+ // binary ops
+ {name: "ADCSflags", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "ADCS", commutative: true}, // arg0+arg1+carry, set flags.
+ {name: "ADCzerocarry", argLength: 1, reg: gp0flags1, typ: "UInt64", asm: "ADC"}, // ZR+ZR+carry
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int64"}, // arg0 + auxInt
+ {name: "ADDSconstflags", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "ADDS", aux: "Int64"}, // arg0+auxint, set flags.
+ {name: "ADDSflags", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "ADDS", commutative: true}, // arg0+arg1, set flags.
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1
+ {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int64"}, // arg0 - auxInt
+ {name: "SBCSflags", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "SBCS"}, // arg0-(arg1+borrowing), set flags.
+ {name: "SUBSflags", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "SUBS"}, // arg0 - arg1, set flags.
+ {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true}, // arg0 * arg1
+ {name: "MULW", argLength: 2, reg: gp21, asm: "MULW", commutative: true}, // arg0 * arg1, 32-bit
+ {name: "MNEG", argLength: 2, reg: gp21, asm: "MNEG", commutative: true}, // -arg0 * arg1
+ {name: "MNEGW", argLength: 2, reg: gp21, asm: "MNEGW", commutative: true}, // -arg0 * arg1, 32-bit
+ {name: "MULH", argLength: 2, reg: gp21, asm: "SMULH", commutative: true}, // (arg0 * arg1) >> 64, signed
+ {name: "UMULH", argLength: 2, reg: gp21, asm: "UMULH", commutative: true}, // (arg0 * arg1) >> 64, unsigned
+ {name: "MULL", argLength: 2, reg: gp21, asm: "SMULL", commutative: true}, // arg0 * arg1, signed, 32-bit mult results in 64-bit
+ {name: "UMULL", argLength: 2, reg: gp21, asm: "UMULL", commutative: true}, // arg0 * arg1, unsigned, 32-bit mult results in 64-bit
+ {name: "DIV", argLength: 2, reg: gp21, asm: "SDIV"}, // arg0 / arg1, signed
+ {name: "UDIV", argLength: 2, reg: gp21, asm: "UDIV"}, // arg0 / arg1, unsigned
+ {name: "DIVW", argLength: 2, reg: gp21, asm: "SDIVW"}, // arg0 / arg1, signed, 32 bit
+ {name: "UDIVW", argLength: 2, reg: gp21, asm: "UDIVW"}, // arg0 / arg1, unsigned, 32 bit
+ {name: "MOD", argLength: 2, reg: gp21, asm: "REM"}, // arg0 % arg1, signed
+ {name: "UMOD", argLength: 2, reg: gp21, asm: "UREM"}, // arg0 % arg1, unsigned
+ {name: "MODW", argLength: 2, reg: gp21, asm: "REMW"}, // arg0 % arg1, signed, 32 bit
+ {name: "UMODW", argLength: 2, reg: gp21, asm: "UREMW"}, // arg0 % arg1, unsigned, 32 bit
+
+ {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0 + arg1
+ {name: "FADDD", argLength: 2, reg: fp21, asm: "FADDD", commutative: true}, // arg0 + arg1
+ {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0 - arg1
+ {name: "FSUBD", argLength: 2, reg: fp21, asm: "FSUBD"}, // arg0 - arg1
+ {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0 * arg1
+ {name: "FMULD", argLength: 2, reg: fp21, asm: "FMULD", commutative: true}, // arg0 * arg1
+ {name: "FNMULS", argLength: 2, reg: fp21, asm: "FNMULS", commutative: true}, // -(arg0 * arg1)
+ {name: "FNMULD", argLength: 2, reg: fp21, asm: "FNMULD", commutative: true}, // -(arg0 * arg1)
+ {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS"}, // arg0 / arg1
+ {name: "FDIVD", argLength: 2, reg: fp21, asm: "FDIVD"}, // arg0 / arg1
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64"}, // arg0 & auxInt
+ {name: "OR", argLength: 2, reg: gp21, asm: "ORR", commutative: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "ORR", aux: "Int64"}, // arg0 | auxInt
+ {name: "XOR", argLength: 2, reg: gp21, asm: "EOR", commutative: true}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "EOR", aux: "Int64"}, // arg0 ^ auxInt
+ {name: "BIC", argLength: 2, reg: gp21, asm: "BIC"}, // arg0 &^ arg1
+ {name: "EON", argLength: 2, reg: gp21, asm: "EON"}, // arg0 ^ ^arg1
+ {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0 | ^arg1
+
+ // unary ops
+ {name: "MVN", argLength: 1, reg: gp11, asm: "MVN"}, // ^arg0
+ {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0
+ {name: "NEGSflags", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "NEGS"}, // -arg0, set flags.
+ {name: "NGCzerocarry", argLength: 1, reg: gp0flags1, typ: "UInt64", asm: "NGC"}, // -1 if borrowing, 0 otherwise.
+ {name: "FABSD", argLength: 1, reg: fp11, asm: "FABSD"}, // abs(arg0), float64
+ {name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS"}, // -arg0, float32
+ {name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD"}, // -arg0, float64
+ {name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD"}, // sqrt(arg0), float64
+ {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0), float32
+ {name: "FMIND", argLength: 2, reg: fp21, asm: "FMIND"}, // min(arg0, arg1)
+ {name: "FMINS", argLength: 2, reg: fp21, asm: "FMINS"}, // min(arg0, arg1)
+ {name: "FMAXD", argLength: 2, reg: fp21, asm: "FMAXD"}, // max(arg0, arg1)
+ {name: "FMAXS", argLength: 2, reg: fp21, asm: "FMAXS"}, // max(arg0, arg1)
+ {name: "REV", argLength: 1, reg: gp11, asm: "REV"}, // byte reverse, 64-bit
+ {name: "REVW", argLength: 1, reg: gp11, asm: "REVW"}, // byte reverse, 32-bit
+ {name: "REV16", argLength: 1, reg: gp11, asm: "REV16"}, // byte reverse in each 16-bit halfword, 64-bit
+ {name: "REV16W", argLength: 1, reg: gp11, asm: "REV16W"}, // byte reverse in each 16-bit halfword, 32-bit
+ {name: "RBIT", argLength: 1, reg: gp11, asm: "RBIT"}, // bit reverse, 64-bit
+ {name: "RBITW", argLength: 1, reg: gp11, asm: "RBITW"}, // bit reverse, 32-bit
+ {name: "CLZ", argLength: 1, reg: gp11, asm: "CLZ"}, // count leading zero, 64-bit
+ {name: "CLZW", argLength: 1, reg: gp11, asm: "CLZW"}, // count leading zero, 32-bit
+ {name: "VCNT", argLength: 1, reg: fp11, asm: "VCNT"}, // count set bits for each 8-bit unit and store the result in each 8-bit unit
+ {name: "VUADDLV", argLength: 1, reg: fp11, asm: "VUADDLV"}, // unsigned sum of eight bytes in a 64-bit value, zero extended to 64-bit.
+ {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+ {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+
+ // 3-operand, the addend comes first
+ {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS"}, // +arg0 + (arg1 * arg2)
+ {name: "FMADDD", argLength: 3, reg: fp31, asm: "FMADDD"}, // +arg0 + (arg1 * arg2)
+ {name: "FNMADDS", argLength: 3, reg: fp31, asm: "FNMADDS"}, // -arg0 - (arg1 * arg2)
+ {name: "FNMADDD", argLength: 3, reg: fp31, asm: "FNMADDD"}, // -arg0 - (arg1 * arg2)
+ {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS"}, // +arg0 - (arg1 * arg2)
+ {name: "FMSUBD", argLength: 3, reg: fp31, asm: "FMSUBD"}, // +arg0 - (arg1 * arg2)
+ {name: "FNMSUBS", argLength: 3, reg: fp31, asm: "FNMSUBS"}, // -arg0 + (arg1 * arg2)
+ {name: "FNMSUBD", argLength: 3, reg: fp31, asm: "FNMSUBD"}, // -arg0 + (arg1 * arg2)
+ {name: "MADD", argLength: 3, reg: gp31, asm: "MADD"}, // +arg0 + (arg1 * arg2)
+ {name: "MADDW", argLength: 3, reg: gp31, asm: "MADDW"}, // +arg0 + (arg1 * arg2), 32-bit
+ {name: "MSUB", argLength: 3, reg: gp31, asm: "MSUB"}, // +arg0 - (arg1 * arg2)
+ {name: "MSUBW", argLength: 3, reg: gp31, asm: "MSUBW"}, // +arg0 - (arg1 * arg2), 32-bit
+
+ // shifts
+ {name: "SLL", argLength: 2, reg: gp21, asm: "LSL"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SLLconst", argLength: 1, reg: gp11, asm: "LSL", aux: "Int64"}, // arg0 << auxInt, auxInt should be in the range 0 to 63.
+ {name: "SRL", argLength: 2, reg: gp21, asm: "LSR"}, // arg0 >> arg1, unsigned, shift amount is mod 64
+ {name: "SRLconst", argLength: 1, reg: gp11, asm: "LSR", aux: "Int64"}, // arg0 >> auxInt, unsigned, auxInt should be in the range 0 to 63.
+ {name: "SRA", argLength: 2, reg: gp21, asm: "ASR"}, // arg0 >> arg1, signed, shift amount is mod 64
+ {name: "SRAconst", argLength: 1, reg: gp11, asm: "ASR", aux: "Int64"}, // arg0 >> auxInt, signed, auxInt should be in the range 0 to 63.
+ {name: "ROR", argLength: 2, reg: gp21, asm: "ROR"}, // arg0 right rotate by (arg1 mod 64) bits
+ {name: "RORW", argLength: 2, reg: gp21, asm: "RORW"}, // arg0 right rotate by (arg1 mod 32) bits
+ {name: "RORconst", argLength: 1, reg: gp11, asm: "ROR", aux: "Int64"}, // arg0 right rotate by auxInt bits, auxInt should be in the range 0 to 63.
+ {name: "RORWconst", argLength: 1, reg: gp11, asm: "RORW", aux: "Int64"}, // uint32(arg0) right rotate by auxInt bits, auxInt should be in the range 0 to 31.
+ {name: "EXTRconst", argLength: 2, reg: gp21, asm: "EXTR", aux: "Int64"}, // extract 64 bits from arg0:arg1 starting at lsb auxInt, auxInt should be in the range 0 to 63.
+ {name: "EXTRWconst", argLength: 2, reg: gp21, asm: "EXTRW", aux: "Int64"}, // extract 32 bits from arg0[31:0]:arg1[31:0] starting at lsb auxInt and zero top 32 bits, auxInt should be in the range 0 to 31.
+
+ // comparisons
+ {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to auxInt
+ {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1, 32 bit
+ {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt, 32 bit
+ {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags", commutative: true}, // arg0 compare to -arg1, provided arg1 is not 1<<63
+ {name: "CMNconst", argLength: 1, reg: gp1flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // arg0 compare to -auxInt
+ {name: "CMNW", argLength: 2, reg: gp2flags, asm: "CMNW", typ: "Flags", commutative: true}, // arg0 compare to -arg1, 32 bit, provided arg1 is not 1<<31
+ {name: "CMNWconst", argLength: 1, reg: gp1flags, asm: "CMNW", aux: "Int32", typ: "Flags"}, // arg0 compare to -auxInt, 32 bit
+ {name: "TST", argLength: 2, reg: gp2flags, asm: "TST", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0
+ {name: "TSTconst", argLength: 1, reg: gp1flags, asm: "TST", aux: "Int64", typ: "Flags"}, // arg0 & auxInt compare to 0
+ {name: "TSTW", argLength: 2, reg: gp2flags, asm: "TSTW", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0, 32 bit
+ {name: "TSTWconst", argLength: 1, reg: gp1flags, asm: "TSTW", aux: "Int32", typ: "Flags"}, // arg0 & auxInt compare to 0, 32 bit
+ {name: "FCMPS", argLength: 2, reg: fp2flags, asm: "FCMPS", typ: "Flags"}, // arg0 compare to arg1, float32
+ {name: "FCMPD", argLength: 2, reg: fp2flags, asm: "FCMPD", typ: "Flags"}, // arg0 compare to arg1, float64
+ {name: "FCMPS0", argLength: 1, reg: fp1flags, asm: "FCMPS", typ: "Flags"}, // arg0 compare to 0, float32
+ {name: "FCMPD0", argLength: 1, reg: fp1flags, asm: "FCMPD", typ: "Flags"}, // arg0 compare to 0, float64
+
+ // shifted ops
+ {name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0<<auxInt), auxInt should be in the range 0 to 63.
+ {name: "MVNshiftRL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "MVNshiftRO", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0 ROR auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "NEGshiftLL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0<<auxInt), auxInt should be in the range 0 to 63.
+ {name: "NEGshiftRL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "NEGshiftRA", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1<<auxInt, auxInt should be in the range 0 to 63.
+ {name: "ADDshiftRL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63.
+ {name: "SUBshiftLL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1<<auxInt, auxInt should be in the range 0 to 63.
+ {name: "SUBshiftRL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1>>auxInt, unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "SUBshiftRA", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63.
+ {name: "ANDshiftLL", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1<<auxInt), auxInt should be in the range 0 to 63.
+ {name: "ANDshiftRL", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "ANDshiftRA", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "ANDshiftRO", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1 ROR auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "ORshiftLL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1<<auxInt, auxInt should be in the range 0 to 63.
+ {name: "ORshiftRL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1>>auxInt, unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "ORshiftRA", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63.
+ {name: "ORshiftRO", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1 ROR auxInt, signed shift, auxInt should be in the range 0 to 63.
+ {name: "XORshiftLL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1<<auxInt, auxInt should be in the range 0 to 63.
+ {name: "XORshiftRL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1>>auxInt, unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "XORshiftRA", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63.
+ {name: "XORshiftRO", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1 ROR auxInt, signed shift, auxInt should be in the range 0 to 63.
+ {name: "BICshiftLL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1<<auxInt), auxInt should be in the range 0 to 63.
+ {name: "BICshiftRL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "BICshiftRA", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "BICshiftRO", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1 ROR auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "EONshiftLL", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1<<auxInt), auxInt should be in the range 0 to 63.
+ {name: "EONshiftRL", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "EONshiftRA", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "EONshiftRO", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1 ROR auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "ORNshiftLL", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1<<auxInt), auxInt should be in the range 0 to 63.
+ {name: "ORNshiftRL", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "ORNshiftRA", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "ORNshiftRO", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1 ROR auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1<<auxInt, auxInt should be in the range 0 to 63.
+ {name: "CMPshiftRL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63.
+ {name: "CMNshiftLL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1<<auxInt) compare to 0, auxInt should be in the range 0 to 63.
+ {name: "CMNshiftRL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1>>auxInt) compare to 0, unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "CMNshiftRA", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1>>auxInt) compare to 0, signed shift, auxInt should be in the range 0 to 63.
+ {name: "TSTshiftLL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1<<auxInt) compare to 0, auxInt should be in the range 0 to 63.
+ {name: "TSTshiftRL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "TSTshiftRA", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, signed shift, auxInt should be in the range 0 to 63.
+ {name: "TSTshiftRO", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1 ROR auxInt) compare to 0, signed shift, auxInt should be in the range 0 to 63.
+
+ // bitfield ops
+ // for all bitfield ops lsb is auxInt>>8, width is auxInt&0xff
+ // insert low width bits of arg1 into the result starting at bit lsb, copy other bits from arg0
+ {name: "BFI", argLength: 2, reg: gp21nog, asm: "BFI", aux: "ARM64BitField", resultInArg0: true},
+ // extract width bits of arg1 starting at bit lsb and insert at low end of result, copy other bits from arg0
+ {name: "BFXIL", argLength: 2, reg: gp21nog, asm: "BFXIL", aux: "ARM64BitField", resultInArg0: true},
+ // insert low width bits of arg0 into the result starting at bit lsb, bits to the left of the inserted bit field are set to the high/sign bit of the inserted bit field, bits to the right are zeroed
+ {name: "SBFIZ", argLength: 1, reg: gp11, asm: "SBFIZ", aux: "ARM64BitField"},
+ // extract width bits of arg0 starting at bit lsb and insert at low end of result, remaining high bits are set to the high/sign bit of the extracted bitfield
+ {name: "SBFX", argLength: 1, reg: gp11, asm: "SBFX", aux: "ARM64BitField"},
+ // insert low width bits of arg0 into the result starting at bit lsb, bits to the left and right of the inserted bit field are zeroed
+ {name: "UBFIZ", argLength: 1, reg: gp11, asm: "UBFIZ", aux: "ARM64BitField"},
+ // extract width bits of arg0 starting at bit lsb and insert at low end of result, remaining high bits are zeroed
+ {name: "UBFX", argLength: 1, reg: gp11, asm: "UBFX", aux: "ARM64BitField"},
+
+ // moves
+ {name: "MOVDconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVD", typ: "UInt64", rematerializeable: true}, // 64 bits from auxint
+ {name: "FMOVSconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVS", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
+ {name: "FMOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
+
+ {name: "MOVDaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVD", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVD", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "LDP", argLength: 2, reg: gpload2, aux: "SymOff", asm: "LDP", typ: "(UInt64,UInt64)", faultOnNilArg0: true, symEffect: "Read"}, // load from ptr = arg0 + auxInt + aux, returns the tuple <*(*uint64)ptr, *(*uint64)(ptr+8)>. arg1=mem.
+ {name: "FMOVSload", argLength: 2, reg: fpload, aux: "SymOff", asm: "FMOVS", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "FMOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "FMOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+
+ // register indexed load
+ {name: "MOVDloadidx", argLength: 3, reg: gp2load, asm: "MOVD", typ: "UInt64"}, // load 64-bit dword from arg0 + arg1, arg2 = mem.
+ {name: "MOVWloadidx", argLength: 3, reg: gp2load, asm: "MOVW", typ: "Int32"}, // load 32-bit word from arg0 + arg1, sign-extended to 64-bit, arg2=mem.
+ {name: "MOVWUloadidx", argLength: 3, reg: gp2load, asm: "MOVWU", typ: "UInt32"}, // load 32-bit word from arg0 + arg1, zero-extended to 64-bit, arg2=mem.
+ {name: "MOVHloadidx", argLength: 3, reg: gp2load, asm: "MOVH", typ: "Int16"}, // load 16-bit word from arg0 + arg1, sign-extended to 64-bit, arg2=mem.
+ {name: "MOVHUloadidx", argLength: 3, reg: gp2load, asm: "MOVHU", typ: "UInt16"}, // load 16-bit word from arg0 + arg1, zero-extended to 64-bit, arg2=mem.
+ {name: "MOVBloadidx", argLength: 3, reg: gp2load, asm: "MOVB", typ: "Int8"}, // load 8-bit word from arg0 + arg1, sign-extended to 64-bit, arg2=mem.
+ {name: "MOVBUloadidx", argLength: 3, reg: gp2load, asm: "MOVBU", typ: "UInt8"}, // load 8-bit word from arg0 + arg1, zero-extended to 64-bit, arg2=mem.
+ {name: "FMOVSloadidx", argLength: 3, reg: fp2load, asm: "FMOVS", typ: "Float32"}, // load 32-bit float from arg0 + arg1, arg2=mem.
+ {name: "FMOVDloadidx", argLength: 3, reg: fp2load, asm: "FMOVD", typ: "Float64"}, // load 64-bit float from arg0 + arg1, arg2=mem.
+
+ // shifted register indexed load
+ {name: "MOVHloadidx2", argLength: 3, reg: gp2load, asm: "MOVH", typ: "Int16"}, // load 16-bit half-word from arg0 + arg1*2, sign-extended to 64-bit, arg2=mem.
+ {name: "MOVHUloadidx2", argLength: 3, reg: gp2load, asm: "MOVHU", typ: "UInt16"}, // load 16-bit half-word from arg0 + arg1*2, zero-extended to 64-bit, arg2=mem.
+ {name: "MOVWloadidx4", argLength: 3, reg: gp2load, asm: "MOVW", typ: "Int32"}, // load 32-bit word from arg0 + arg1*4, sign-extended to 64-bit, arg2=mem.
+ {name: "MOVWUloadidx4", argLength: 3, reg: gp2load, asm: "MOVWU", typ: "UInt32"}, // load 32-bit word from arg0 + arg1*4, zero-extended to 64-bit, arg2=mem.
+ {name: "MOVDloadidx8", argLength: 3, reg: gp2load, asm: "MOVD", typ: "UInt64"}, // load 64-bit double-word from arg0 + arg1*8, arg2 = mem.
+ {name: "FMOVSloadidx4", argLength: 3, reg: fp2load, asm: "FMOVS", typ: "Float32"}, // load 32-bit float from arg0 + arg1*4, arg2 = mem.
+ {name: "FMOVDloadidx8", argLength: 3, reg: fp2load, asm: "FMOVD", typ: "Float64"}, // load 64-bit float from arg0 + arg1*8, arg2 = mem.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVDstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "STP", argLength: 4, reg: gpstore2, aux: "SymOff", asm: "STP", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes of arg1 and arg2 to arg0 + auxInt + aux. arg3=mem.
+ {name: "FMOVSstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "FMOVS", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "FMOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "FMOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+
+ // register indexed store
+ {name: "MOVBstoreidx", argLength: 4, reg: gpstore2, asm: "MOVB", typ: "Mem"}, // store 1 byte of arg2 to arg0 + arg1, arg3 = mem.
+ {name: "MOVHstoreidx", argLength: 4, reg: gpstore2, asm: "MOVH", typ: "Mem"}, // store 2 bytes of arg2 to arg0 + arg1, arg3 = mem.
+ {name: "MOVWstoreidx", argLength: 4, reg: gpstore2, asm: "MOVW", typ: "Mem"}, // store 4 bytes of arg2 to arg0 + arg1, arg3 = mem.
+ {name: "MOVDstoreidx", argLength: 4, reg: gpstore2, asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg2 to arg0 + arg1, arg3 = mem.
+ {name: "FMOVSstoreidx", argLength: 4, reg: fpstore2, asm: "FMOVS", typ: "Mem"}, // store 32-bit float of arg2 to arg0 + arg1, arg3=mem.
+ {name: "FMOVDstoreidx", argLength: 4, reg: fpstore2, asm: "FMOVD", typ: "Mem"}, // store 64-bit float of arg2 to arg0 + arg1, arg3=mem.
+
+ // shifted register indexed store
+ {name: "MOVHstoreidx2", argLength: 4, reg: gpstore2, asm: "MOVH", typ: "Mem"}, // store 2 bytes of arg2 to arg0 + arg1*2, arg3 = mem.
+ {name: "MOVWstoreidx4", argLength: 4, reg: gpstore2, asm: "MOVW", typ: "Mem"}, // store 4 bytes of arg2 to arg0 + arg1*4, arg3 = mem.
+ {name: "MOVDstoreidx8", argLength: 4, reg: gpstore2, asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg2 to arg0 + arg1*8, arg3 = mem.
+ {name: "FMOVSstoreidx4", argLength: 4, reg: fpstore2, asm: "FMOVS", typ: "Mem"}, // store 32-bit float of arg2 to arg0 + arg1*4, arg3=mem.
+ {name: "FMOVDstoreidx8", argLength: 4, reg: fpstore2, asm: "FMOVD", typ: "Mem"}, // store 64-bit float of arg2 to arg0 + arg1*8, arg3=mem.
+
+ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVQstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "STP", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+
+ // register indexed store zero
+ {name: "MOVBstorezeroidx", argLength: 3, reg: gpstore, asm: "MOVB", typ: "Mem"}, // store 1 byte of zero to arg0 + arg1, arg2 = mem.
+ {name: "MOVHstorezeroidx", argLength: 3, reg: gpstore, asm: "MOVH", typ: "Mem"}, // store 2 bytes of zero to arg0 + arg1, arg2 = mem.
+ {name: "MOVWstorezeroidx", argLength: 3, reg: gpstore, asm: "MOVW", typ: "Mem"}, // store 4 bytes of zero to arg0 + arg1, arg2 = mem.
+ {name: "MOVDstorezeroidx", argLength: 3, reg: gpstore, asm: "MOVD", typ: "Mem"}, // store 8 bytes of zero to arg0 + arg1, arg2 = mem.
+
+ // shifted register indexed store zero
+ {name: "MOVHstorezeroidx2", argLength: 3, reg: gpstore, asm: "MOVH", typ: "Mem"}, // store 2 bytes of zero to arg0 + arg1*2, arg2 = mem.
+ {name: "MOVWstorezeroidx4", argLength: 3, reg: gpstore, asm: "MOVW", typ: "Mem"}, // store 4 bytes of zero to arg0 + arg1*4, arg2 = mem.
+ {name: "MOVDstorezeroidx8", argLength: 3, reg: gpstore, asm: "MOVD", typ: "Mem"}, // store 8 bytes of zero to arg0 + arg1*8, arg2 = mem.
+
+ {name: "FMOVDgpfp", argLength: 1, reg: gpfp, asm: "FMOVD"}, // move int64 to float64 (no conversion)
+ {name: "FMOVDfpgp", argLength: 1, reg: fpgp, asm: "FMOVD"}, // move float64 to int64 (no conversion)
+ {name: "FMOVSgpfp", argLength: 1, reg: gpfp, asm: "FMOVS"}, // move 32bits from int to float reg (no conversion)
+ {name: "FMOVSfpgp", argLength: 1, reg: fpgp, asm: "FMOVS"}, // move 32bits from float to int reg, zero extend (no conversion)
+
+ // conversions
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word
+ {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word
+ {name: "MOVDreg", argLength: 1, reg: gp11, asm: "MOVD"}, // move from arg0
+
+ {name: "MOVDnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ {name: "SCVTFWS", argLength: 1, reg: gpfp, asm: "SCVTFWS"}, // int32 -> float32
+ {name: "SCVTFWD", argLength: 1, reg: gpfp, asm: "SCVTFWD"}, // int32 -> float64
+ {name: "UCVTFWS", argLength: 1, reg: gpfp, asm: "UCVTFWS"}, // uint32 -> float32
+ {name: "UCVTFWD", argLength: 1, reg: gpfp, asm: "UCVTFWD"}, // uint32 -> float64
+ {name: "SCVTFS", argLength: 1, reg: gpfp, asm: "SCVTFS"}, // int64 -> float32
+ {name: "SCVTFD", argLength: 1, reg: gpfp, asm: "SCVTFD"}, // int64 -> float64
+ {name: "UCVTFS", argLength: 1, reg: gpfp, asm: "UCVTFS"}, // uint64 -> float32
+ {name: "UCVTFD", argLength: 1, reg: gpfp, asm: "UCVTFD"}, // uint64 -> float64
+ {name: "FCVTZSSW", argLength: 1, reg: fpgp, asm: "FCVTZSSW"}, // float32 -> int32
+ {name: "FCVTZSDW", argLength: 1, reg: fpgp, asm: "FCVTZSDW"}, // float64 -> int32
+ {name: "FCVTZUSW", argLength: 1, reg: fpgp, asm: "FCVTZUSW"}, // float32 -> uint32
+ {name: "FCVTZUDW", argLength: 1, reg: fpgp, asm: "FCVTZUDW"}, // float64 -> uint32
+ {name: "FCVTZSS", argLength: 1, reg: fpgp, asm: "FCVTZSS"}, // float32 -> int64
+ {name: "FCVTZSD", argLength: 1, reg: fpgp, asm: "FCVTZSD"}, // float64 -> int64
+ {name: "FCVTZUS", argLength: 1, reg: fpgp, asm: "FCVTZUS"}, // float32 -> uint64
+ {name: "FCVTZUD", argLength: 1, reg: fpgp, asm: "FCVTZUD"}, // float64 -> uint64
+ {name: "FCVTSD", argLength: 1, reg: fp11, asm: "FCVTSD"}, // float32 -> float64
+ {name: "FCVTDS", argLength: 1, reg: fp11, asm: "FCVTDS"}, // float64 -> float32
+
+ // floating-point round to integral
+ {name: "FRINTAD", argLength: 1, reg: fp11, asm: "FRINTAD"},
+ {name: "FRINTMD", argLength: 1, reg: fp11, asm: "FRINTMD"},
+ {name: "FRINTND", argLength: 1, reg: fp11, asm: "FRINTND"},
+ {name: "FRINTPD", argLength: 1, reg: fp11, asm: "FRINTPD"},
+ {name: "FRINTZD", argLength: 1, reg: fp11, asm: "FRINTZD"},
+
+ // conditional instructions; auxint is
+ // one of the arm64 comparison pseudo-ops (LessThan, LessThanU, etc.)
+ {name: "CSEL", argLength: 3, reg: gp2flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : arg1
+ {name: "CSEL0", argLength: 2, reg: gp1flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : 0
+ {name: "CSINC", argLength: 3, reg: gp2flags1, asm: "CSINC", aux: "CCop"}, // auxint(flags) ? arg0 : arg1 + 1
+ {name: "CSINV", argLength: 3, reg: gp2flags1, asm: "CSINV", aux: "CCop"}, // auxint(flags) ? arg0 : ^arg1
+ {name: "CSNEG", argLength: 3, reg: gp2flags1, asm: "CSNEG", aux: "CCop"}, // auxint(flags) ? arg0 : -arg1
+ {name: "CSETM", argLength: 1, reg: readflags, asm: "CSETM", aux: "CCop"}, // auxint(flags) ? -1 : 0
+
+ // function calls
+ {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
+
+ // pseudo-ops
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
+
+ {name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise.
+ {name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise.
+ {name: "LessThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x<y false otherwise.
+ {name: "LessEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x<=y false otherwise.
+ {name: "GreaterThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x>y false otherwise.
+ {name: "GreaterEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x>=y false otherwise.
+ {name: "LessThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<y false otherwise.
+ {name: "LessEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<=y false otherwise.
+ {name: "GreaterThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>y false otherwise.
+ {name: "GreaterEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>=y false otherwise.
+ {name: "LessThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<y false otherwise.
+ {name: "LessEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<=y false otherwise.
+ {name: "GreaterThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>y false otherwise.
+ {name: "GreaterEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>=y false otherwise.
+ {name: "NotLessThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>=y || x is unordered with y, false otherwise.
+ {name: "NotLessEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>y || x is unordered with y, false otherwise.
+ {name: "NotGreaterThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<=y || x is unordered with y, false otherwise.
+ {name: "NotGreaterEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<y || x is unordered with y, false otherwise.
+ {name: "LessThanNoov", argLength: 1, reg: readflags}, // bool, true flags encode signed x<y but without honoring overflow, false otherwise.
+ {name: "GreaterEqualNoov", argLength: 1, reg: readflags}, // bool, true flags encode signed x>=y but without honoring overflow, false otherwise.
+
+ // duffzero
+ // arg0 = address of memory to zero
+ // arg1 = mem
+ // auxint = offset into duffzero code to start executing
+ // returns mem
+ // R20 changed as side effect
+ // R16 and R17 may be clobbered by linker trampoline.
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20")},
+ clobbers: buildReg("R16 R17 R20 R30"),
+ },
+ faultOnNilArg0: true,
+ unsafePoint: true, // FP maintenance around DUFFZERO can be clobbered by interrupts
+ },
+
+ // large zeroing
+ // arg0 = address of memory to zero (in R16 aka arm64.REGRT1, changed as side effect)
+ // arg1 = address of the last 16-byte unit to zero
+ // arg2 = mem
+ // returns mem
+ // STP.P (ZR,ZR), 16(R16)
+ // CMP Rarg1, R16
+ // BLE -2(PC)
+ // Note: the-end-of-the-memory may be not a valid pointer. it's a problem if it is spilled.
+ // the-end-of-the-memory - 16 is with the area to zero, ok to spill.
+ {
+ name: "LoweredZero",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R16"), gp},
+ clobbers: buildReg("R16"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ },
+
+ // duffcopy
+ // arg0 = address of dst memory (in R21, changed as side effect)
+ // arg1 = address of src memory (in R20, changed as side effect)
+ // arg2 = mem
+ // auxint = offset into duffcopy code to start executing
+ // returns mem
+ // R20, R21 changed as side effect
+ // R16 and R17 may be clobbered by linker trampoline.
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R21"), buildReg("R20")},
+ clobbers: buildReg("R16 R17 R20 R21 R26 R30"),
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
+ },
+
+ // large move
+ // arg0 = address of dst memory (in R17 aka arm64.REGRT2, changed as side effect)
+ // arg1 = address of src memory (in R16 aka arm64.REGRT1, changed as side effect)
+ // arg2 = address of the last element of src
+ // arg3 = mem
+ // returns mem
+ // LDP.P 16(R16), (R25, Rtmp)
+ // STP.P (R25, Rtmp), 16(R17)
+ // CMP Rarg2, R16
+ // BLE -3(PC)
+ // Note: the-end-of-src may be not a valid pointer. it's a problem if it is spilled.
+ // the-end-of-src - 16 is within the area to copy, ok to spill.
+ {
+ name: "LoweredMove",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R17"), buildReg("R16"), gp &^ buildReg("R25")},
+ clobbers: buildReg("R16 R17 R25"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R26 (arm64.REGCTXT, the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R26")}}, zeroWidth: true},
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem
+ {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // Constant flag value.
+ // Note: there's an "unordered" outcome for floating-point
+ // comparisons, but we don't use such a beast yet.
+ // This op is for temporary use by rewrite rules. It
+ // cannot appear in the generated assembly.
+ {name: "FlagConstant", aux: "FlagConstant"},
+
+ // (InvertFlags (CMP a b)) == (CMP b a)
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // atomic loads.
+ // load from arg0. arg1=mem. auxint must be zero.
+ // returns <value,memory> so they can be properly ordered with other loads.
+ {name: "LDAR", argLength: 2, reg: gpload, asm: "LDAR", faultOnNilArg0: true},
+ {name: "LDARB", argLength: 2, reg: gpload, asm: "LDARB", faultOnNilArg0: true},
+ {name: "LDARW", argLength: 2, reg: gpload, asm: "LDARW", faultOnNilArg0: true},
+
+ // atomic stores.
+ // store arg1 to arg0. arg2=mem. returns memory. auxint must be zero.
+ {name: "STLRB", argLength: 3, reg: gpstore, asm: "STLRB", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "STLR", argLength: 3, reg: gpstore, asm: "STLR", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "STLRW", argLength: 3, reg: gpstore, asm: "STLRW", faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic exchange.
+ // store arg1 to arg0. arg2=mem. returns <old content of *arg0, memory>. auxint must be zero.
+ // LDAXR (Rarg0), Rout
+ // STLXR Rarg1, (Rarg0), Rtmp
+ // CBNZ Rtmp, -2(PC)
+ {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic exchange variant.
+ // store arg1 to arg0. arg2=mem. returns <old content of *arg0, memory>. auxint must be zero.
+ // SWPALD Rarg1, (Rarg0), Rout
+ {name: "LoweredAtomicExchange64Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicExchange32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic add.
+ // *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
+ // LDAXR (Rarg0), Rout
+ // ADD Rarg1, Rout
+ // STLXR Rout, (Rarg0), Rtmp
+ // CBNZ Rtmp, -3(PC)
+ {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic add variant.
+ // *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
+ // LDADDAL (Rarg0), Rarg1, Rout
+ // ADD Rarg1, Rout
+ {name: "LoweredAtomicAdd64Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicAdd32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. auxint must be zero.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // LDAXR (Rarg0), Rtmp
+ // CMP Rarg1, Rtmp
+ // BNE 3(PC)
+ // STLXR Rarg2, (Rarg0), Rtmp
+ // CBNZ Rtmp, -4(PC)
+ // CSET EQ, Rout
+ {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic compare and swap variant.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. auxint must be zero.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // MOV Rarg1, Rtmp
+ // CASAL Rtmp, (Rarg0), Rarg2
+ // CMP Rarg1, Rtmp
+ // CSET EQ, Rout
+ {name: "LoweredAtomicCas64Variant", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicCas32Variant", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic and/or.
+ // *arg0 &= (|=) arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
+ // LDAXR (Rarg0), Rout
+ // AND/OR Rarg1, Rout
+ // STLXR Rout, (Rarg0), Rtmp
+ // CBNZ Rtmp, -3(PC)
+ {name: "LoweredAtomicAnd8", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAnd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr8", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr32", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic and/or variant.
+ // *arg0 &= (|=) arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
+ // AND:
+ // MNV Rarg1, Rtemp
+ // LDANDALB Rtemp, (Rarg0), Rout
+ // AND Rarg1, Rout
+ // OR:
+ // LDORALB Rarg1, (Rarg0), Rout
+ // ORR Rarg1, Rout
+ {name: "LoweredAtomicAnd8Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAnd32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr8Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicOr32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed
+ // It saves all GP registers if necessary,
+ // but clobbers R30 (LR) because it's a call.
+ // R16 and R17 may be clobbered by linker trampoline.
+ // Returns a pointer to a write barrier buffer in R25.
+ {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R16 R17 R30"), outputs: []regMask{buildReg("R25")}}, clobberFlags: true, aux: "Int64"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+
+ // Prefetch instruction
+ // Do prefetch arg0 address with option aux. arg0=addr, arg1=memory, aux=option.
+ {name: "PRFM", argLength: 2, aux: "Int64", reg: prefreg, asm: "PRFM", hasSideEffects: true},
+
+ // Publication barrier
+ {name: "DMB", argLength: 1, aux: "Int64", asm: "DMB", hasSideEffects: true}, // Do data barrier. arg0=memory, aux=option.
+ }
+
+ blocks := []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LT", controls: 1},
+ {name: "LE", controls: 1},
+ {name: "GT", controls: 1},
+ {name: "GE", controls: 1},
+ {name: "ULT", controls: 1},
+ {name: "ULE", controls: 1},
+ {name: "UGT", controls: 1},
+ {name: "UGE", controls: 1},
+ {name: "Z", controls: 1}, // Control == 0 (take a register instead of flags)
+ {name: "NZ", controls: 1}, // Control != 0
+ {name: "ZW", controls: 1}, // Control == 0, 32-bit
+ {name: "NZW", controls: 1}, // Control != 0, 32-bit
+ {name: "TBZ", controls: 1, aux: "Int64"}, // Control & (1 << AuxInt) == 0
+ {name: "TBNZ", controls: 1, aux: "Int64"}, // Control & (1 << AuxInt) != 0
+ {name: "FLT", controls: 1},
+ {name: "FLE", controls: 1},
+ {name: "FGT", controls: 1},
+ {name: "FGE", controls: 1},
+ {name: "LTnoov", controls: 1}, // 'LT' but without honoring overflow
+ {name: "LEnoov", controls: 1}, // 'LE' but without honoring overflow
+ {name: "GTnoov", controls: 1}, // 'GT' but without honoring overflow
+ {name: "GEnoov", controls: 1}, // 'GE' but without honoring overflow
+
+ // JUMPTABLE implements jump tables.
+ // Aux is the symbol (an *obj.LSym) for the jump table.
+ // control[0] is the index into the jump table.
+ // control[1] is the address of the jump table (the address of the symbol stored in Aux).
+ {name: "JUMPTABLE", controls: 2, aux: "Sym"},
+ }
+
+ archs = append(archs, arch{
+ name: "ARM64",
+ pkg: "cmd/internal/obj/arm64",
+ genfile: "../../arm64/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesARM64,
+ ParamIntRegNames: "R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15",
+ ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15",
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R30"]),
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64latelower.rules b/src/cmd/compile/internal/ssa/_gen/ARM64latelower.rules
new file mode 100644
index 0000000..d0c2099
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/ARM64latelower.rules
@@ -0,0 +1,21 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains rules used by the laterLower pass.
+// These are often the exact inverse of rules in ARM64.rules.
+
+(ADDconst [c] x) && !isARM64addcon(c) => (ADD x (MOVDconst [c]))
+(SUBconst [c] x) && !isARM64addcon(c) => (SUB x (MOVDconst [c]))
+(ANDconst [c] x) && !isARM64bitcon(uint64(c)) => (AND x (MOVDconst [c]))
+(ORconst [c] x) && !isARM64bitcon(uint64(c)) => (OR x (MOVDconst [c]))
+(XORconst [c] x) && !isARM64bitcon(uint64(c)) => (XOR x (MOVDconst [c]))
+(TSTconst [c] x) && !isARM64bitcon(uint64(c)) => (TST x (MOVDconst [c]))
+(TSTWconst [c] x) && !isARM64bitcon(uint64(c)|uint64(c)<<32) => (TSTW x (MOVDconst [int64(c)]))
+
+(CMPconst [c] x) && !isARM64addcon(c) => (CMP x (MOVDconst [c]))
+(CMPWconst [c] x) && !isARM64addcon(int64(c)) => (CMPW x (MOVDconst [int64(c)]))
+(CMNconst [c] x) && !isARM64addcon(c) => (CMN x (MOVDconst [c]))
+(CMNWconst [c] x) && !isARM64addcon(int64(c)) => (CMNW x (MOVDconst [int64(c)]))
+
+(ADDSconstflags [c] x) && !isARM64addcon(c) => (ADDSflags x (MOVDconst [c]))
diff --git a/src/cmd/compile/internal/ssa/_gen/ARMOps.go b/src/cmd/compile/internal/ssa/_gen/ARMOps.go
new file mode 100644
index 0000000..39d2469
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/ARMOps.go
@@ -0,0 +1,600 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - *const instructions may use a constant larger than the instruction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R11).
+
+// Suffixes encode the bit width of various instructions.
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// HU = 16 bit unsigned
+// B (byte) = 8 bit
+// BU = 8 bit unsigned
+// F (float) = 32 bit float
+// D (double) = 64 bit float
+
+var regNamesARM = []string{
+ "R0",
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "g", // aka R10
+ "R11", // tmp
+ "R12",
+ "SP", // aka R13
+ "R14", // link
+ "R15", // pc
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15", // tmp
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesARM) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesARM {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14")
+ gpg = gp | buildReg("g")
+ gpsp = gp | buildReg("SP")
+ gpspg = gpg | buildReg("SP")
+ gpspsbg = gpspg | buildReg("SB")
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15")
+ callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r0 = buildReg("R0")
+ r1 = buildReg("R1")
+ r2 = buildReg("R2")
+ r3 = buildReg("R3")
+ r4 = buildReg("R4")
+ )
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11carry = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp, 0}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp1flags = regInfo{inputs: []regMask{gpg}}
+ gp1flags1 = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ gp21carry = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, 0}}
+ gp2flags = regInfo{inputs: []regMask{gpg, gpg}}
+ gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
+ gp22 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, gp}}
+ gp31 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ gp31carry = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp, 0}}
+ gp3flags = regInfo{inputs: []regMask{gp, gp, gp}}
+ gp3flags1 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
+ gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gp2store = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ fp1flags = regInfo{inputs: []regMask{fp}}
+ fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}, clobbers: buildReg("F15")} // int-float conversion uses F15 as tmp
+ gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}, clobbers: buildReg("F15")}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
+ readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
+ )
+ ops := []opData{
+ // binary ops
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int32"}, // arg0 + auxInt
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1
+ {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int32"}, // arg0 - auxInt
+ {name: "RSB", argLength: 2, reg: gp21, asm: "RSB"}, // arg1 - arg0
+ {name: "RSBconst", argLength: 1, reg: gp11, asm: "RSB", aux: "Int32"}, // auxInt - arg0
+ {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true}, // arg0 * arg1
+ {name: "HMUL", argLength: 2, reg: gp21, asm: "MULL", commutative: true}, // (arg0 * arg1) >> 32, signed
+ {name: "HMULU", argLength: 2, reg: gp21, asm: "MULLU", commutative: true}, // (arg0 * arg1) >> 32, unsigned
+
+ // udiv runtime call for soft division
+ // output0 = arg0/arg1, output1 = arg0%arg1
+ // see ../../../../../runtime/vlop_arm.s
+ {
+ name: "CALLudiv",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), buildReg("R0")},
+ outputs: []regMask{buildReg("R0"), buildReg("R1")},
+ clobbers: buildReg("R2 R3 R12 R14"), // R14 is LR, R12 is linker trampoline scratch register
+ },
+ clobberFlags: true,
+ typ: "(UInt32,UInt32)",
+ call: false, // TODO(mdempsky): Should this be true?
+ },
+
+ {name: "ADDS", argLength: 2, reg: gp21carry, asm: "ADD", commutative: true}, // arg0 + arg1, set carry flag
+ {name: "ADDSconst", argLength: 1, reg: gp11carry, asm: "ADD", aux: "Int32"}, // arg0 + auxInt, set carry flag
+ {name: "ADC", argLength: 3, reg: gp2flags1, asm: "ADC", commutative: true}, // arg0 + arg1 + carry, arg2=flags
+ {name: "ADCconst", argLength: 2, reg: gp1flags1, asm: "ADC", aux: "Int32"}, // arg0 + auxInt + carry, arg1=flags
+ {name: "SUBS", argLength: 2, reg: gp21carry, asm: "SUB"}, // arg0 - arg1, set carry flag
+ {name: "SUBSconst", argLength: 1, reg: gp11carry, asm: "SUB", aux: "Int32"}, // arg0 - auxInt, set carry flag
+ {name: "RSBSconst", argLength: 1, reg: gp11carry, asm: "RSB", aux: "Int32"}, // auxInt - arg0, set carry flag
+ {name: "SBC", argLength: 3, reg: gp2flags1, asm: "SBC"}, // arg0 - arg1 - carry, arg2=flags
+ {name: "SBCconst", argLength: 2, reg: gp1flags1, asm: "SBC", aux: "Int32"}, // arg0 - auxInt - carry, arg1=flags
+ {name: "RSCconst", argLength: 2, reg: gp1flags1, asm: "RSC", aux: "Int32"}, // auxInt - arg0 - carry, arg1=flags
+
+ {name: "MULLU", argLength: 2, reg: gp22, asm: "MULLU", commutative: true}, // arg0 * arg1, high 32 bits in out0, low 32 bits in out1
+ {name: "MULA", argLength: 3, reg: gp31, asm: "MULA"}, // arg0 * arg1 + arg2
+ {name: "MULS", argLength: 3, reg: gp31, asm: "MULS"}, // arg2 - arg0 * arg1
+
+ {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1
+ {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1
+ {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1
+ {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1
+ {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1
+ {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1
+ {name: "NMULF", argLength: 2, reg: fp21, asm: "NMULF", commutative: true}, // -(arg0 * arg1)
+ {name: "NMULD", argLength: 2, reg: fp21, asm: "NMULD", commutative: true}, // -(arg0 * arg1)
+ {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1
+ {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1
+
+ {name: "MULAF", argLength: 3, reg: fp31, asm: "MULAF", resultInArg0: true}, // arg0 + (arg1 * arg2)
+ {name: "MULAD", argLength: 3, reg: fp31, asm: "MULAD", resultInArg0: true}, // arg0 + (arg1 * arg2)
+ {name: "MULSF", argLength: 3, reg: fp31, asm: "MULSF", resultInArg0: true}, // arg0 - (arg1 * arg2)
+ {name: "MULSD", argLength: 3, reg: fp31, asm: "MULSD", resultInArg0: true}, // arg0 - (arg1 * arg2)
+
+ // FMULAD only exists on platforms with the VFPv4 instruction set.
+ // Any use must be preceded by a successful check of runtime.arm_support_vfpv4.
+ {name: "FMULAD", argLength: 3, reg: fp31, asm: "FMULAD", resultInArg0: true}, // arg0 + (arg1 * arg2)
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int32"}, // arg0 & auxInt
+ {name: "OR", argLength: 2, reg: gp21, asm: "ORR", commutative: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "ORR", aux: "Int32"}, // arg0 | auxInt
+ {name: "XOR", argLength: 2, reg: gp21, asm: "EOR", commutative: true}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "EOR", aux: "Int32"}, // arg0 ^ auxInt
+ {name: "BIC", argLength: 2, reg: gp21, asm: "BIC"}, // arg0 &^ arg1
+ {name: "BICconst", argLength: 1, reg: gp11, asm: "BIC", aux: "Int32"}, // arg0 &^ auxInt
+
+ // bit extraction, AuxInt = Width<<8 | LSB
+ {name: "BFX", argLength: 1, reg: gp11, asm: "BFX", aux: "Int32"}, // extract W bits from bit L in arg0, then signed extend
+ {name: "BFXU", argLength: 1, reg: gp11, asm: "BFXU", aux: "Int32"}, // extract W bits from bit L in arg0, then unsigned extend
+
+ // unary ops
+ {name: "MVN", argLength: 1, reg: gp11, asm: "MVN"}, // ^arg0
+
+ {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32
+ {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64
+ {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64
+ {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32
+ {name: "ABSD", argLength: 1, reg: fp11, asm: "ABSD"}, // abs(arg0), float64
+
+ {name: "CLZ", argLength: 1, reg: gp11, asm: "CLZ"}, // count leading zero
+ {name: "REV", argLength: 1, reg: gp11, asm: "REV"}, // reverse byte order
+ {name: "REV16", argLength: 1, reg: gp11, asm: "REV16"}, // reverse byte order in 16-bit halfwords
+ {name: "RBIT", argLength: 1, reg: gp11, asm: "RBIT"}, // reverse bit order
+
+ // shifts
+ {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << arg1, shift amount is mod 256
+ {name: "SLLconst", argLength: 1, reg: gp11, asm: "SLL", aux: "Int32"}, // arg0 << auxInt, 0 <= auxInt < 32
+ {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> arg1, unsigned, shift amount is mod 256
+ {name: "SRLconst", argLength: 1, reg: gp11, asm: "SRL", aux: "Int32"}, // arg0 >> auxInt, unsigned, 0 <= auxInt < 32
+ {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> arg1, signed, shift amount is mod 256
+ {name: "SRAconst", argLength: 1, reg: gp11, asm: "SRA", aux: "Int32"}, // arg0 >> auxInt, signed, 0 <= auxInt < 32
+ {name: "SRR", argLength: 2, reg: gp21}, // arg0 right rotate by arg1 bits
+ {name: "SRRconst", argLength: 1, reg: gp11, aux: "Int32"}, // arg0 right rotate by auxInt bits, 0 <= auxInt < 32
+
+ // auxInt for all of these satisfy 0 <= auxInt < 32
+ {name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1<<auxInt
+ {name: "ADDshiftRL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, unsigned shift
+ {name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, signed shift
+ {name: "SUBshiftLL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1<<auxInt
+ {name: "SUBshiftRL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, unsigned shift
+ {name: "SUBshiftRA", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, signed shift
+ {name: "RSBshiftLL", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1<<auxInt - arg0
+ {name: "RSBshiftRL", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, unsigned shift
+ {name: "RSBshiftRA", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, signed shift
+ {name: "ANDshiftLL", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1<<auxInt)
+ {name: "ANDshiftRL", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1>>auxInt), unsigned shift
+ {name: "ANDshiftRA", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1>>auxInt), signed shift
+ {name: "ORshiftLL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1<<auxInt
+ {name: "ORshiftRL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1>>auxInt, unsigned shift
+ {name: "ORshiftRA", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1>>auxInt, signed shift
+ {name: "XORshiftLL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1<<auxInt
+ {name: "XORshiftRL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1>>auxInt, unsigned shift
+ {name: "XORshiftRA", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1>>auxInt, signed shift
+ {name: "XORshiftRR", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ (arg1 right rotate by auxInt)
+ {name: "BICshiftLL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1<<auxInt)
+ {name: "BICshiftRL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1>>auxInt), unsigned shift
+ {name: "BICshiftRA", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1>>auxInt), signed shift
+ {name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0<<auxInt)
+ {name: "MVNshiftRL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0>>auxInt), unsigned shift
+ {name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0>>auxInt), signed shift
+
+ {name: "ADCshiftLL", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1<<auxInt + carry, arg2=flags
+ {name: "ADCshiftRL", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1>>auxInt + carry, unsigned shift, arg2=flags
+ {name: "ADCshiftRA", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1>>auxInt + carry, signed shift, arg2=flags
+ {name: "SBCshiftLL", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1<<auxInt - carry, arg2=flags
+ {name: "SBCshiftRL", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1>>auxInt - carry, unsigned shift, arg2=flags
+ {name: "SBCshiftRA", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1>>auxInt - carry, signed shift, arg2=flags
+ {name: "RSCshiftLL", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1<<auxInt - arg0 - carry, arg2=flags
+ {name: "RSCshiftRL", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1>>auxInt - arg0 - carry, unsigned shift, arg2=flags
+ {name: "RSCshiftRA", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1>>auxInt - arg0 - carry, signed shift, arg2=flags
+
+ {name: "ADDSshiftLL", argLength: 2, reg: gp21carry, asm: "ADD", aux: "Int32"}, // arg0 + arg1<<auxInt, set carry flag
+ {name: "ADDSshiftRL", argLength: 2, reg: gp21carry, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, unsigned shift, set carry flag
+ {name: "ADDSshiftRA", argLength: 2, reg: gp21carry, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, signed shift, set carry flag
+ {name: "SUBSshiftLL", argLength: 2, reg: gp21carry, asm: "SUB", aux: "Int32"}, // arg0 - arg1<<auxInt, set carry flag
+ {name: "SUBSshiftRL", argLength: 2, reg: gp21carry, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, unsigned shift, set carry flag
+ {name: "SUBSshiftRA", argLength: 2, reg: gp21carry, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, signed shift, set carry flag
+ {name: "RSBSshiftLL", argLength: 2, reg: gp21carry, asm: "RSB", aux: "Int32"}, // arg1<<auxInt - arg0, set carry flag
+ {name: "RSBSshiftRL", argLength: 2, reg: gp21carry, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, unsigned shift, set carry flag
+ {name: "RSBSshiftRA", argLength: 2, reg: gp21carry, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, signed shift, set carry flag
+
+ {name: "ADDshiftLLreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1<<arg2
+ {name: "ADDshiftRLreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1>>arg2, unsigned shift
+ {name: "ADDshiftRAreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1>>arg2, signed shift
+ {name: "SUBshiftLLreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1<<arg2
+ {name: "SUBshiftRLreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1>>arg2, unsigned shift
+ {name: "SUBshiftRAreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1>>arg2, signed shift
+ {name: "RSBshiftLLreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1<<arg2 - arg0
+ {name: "RSBshiftRLreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1>>arg2 - arg0, unsigned shift
+ {name: "RSBshiftRAreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1>>arg2 - arg0, signed shift
+ {name: "ANDshiftLLreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1<<arg2)
+ {name: "ANDshiftRLreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1>>arg2), unsigned shift
+ {name: "ANDshiftRAreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1>>arg2), signed shift
+ {name: "ORshiftLLreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1<<arg2
+ {name: "ORshiftRLreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1>>arg2, unsigned shift
+ {name: "ORshiftRAreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1>>arg2, signed shift
+ {name: "XORshiftLLreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1<<arg2
+ {name: "XORshiftRLreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1>>arg2, unsigned shift
+ {name: "XORshiftRAreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1>>arg2, signed shift
+ {name: "BICshiftLLreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1<<arg2)
+ {name: "BICshiftRLreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1>>arg2), unsigned shift
+ {name: "BICshiftRAreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1>>arg2), signed shift
+ {name: "MVNshiftLLreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0<<arg1)
+ {name: "MVNshiftRLreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0>>arg1), unsigned shift
+ {name: "MVNshiftRAreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0>>arg1), signed shift
+
+ {name: "ADCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1<<arg2 + carry, arg3=flags
+ {name: "ADCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1>>arg2 + carry, unsigned shift, arg3=flags
+ {name: "ADCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1>>arg2 + carry, signed shift, arg3=flags
+ {name: "SBCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1<<arg2 - carry, arg3=flags
+ {name: "SBCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1>>arg2 - carry, unsigned shift, arg3=flags
+ {name: "SBCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1>>arg2 - carry, signed shift, arg3=flags
+ {name: "RSCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1<<arg2 - arg0 - carry, arg3=flags
+ {name: "RSCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1>>arg2 - arg0 - carry, unsigned shift, arg3=flags
+ {name: "RSCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1>>arg2 - arg0 - carry, signed shift, arg3=flags
+
+ {name: "ADDSshiftLLreg", argLength: 3, reg: gp31carry, asm: "ADD"}, // arg0 + arg1<<arg2, set carry flag
+ {name: "ADDSshiftRLreg", argLength: 3, reg: gp31carry, asm: "ADD"}, // arg0 + arg1>>arg2, unsigned shift, set carry flag
+ {name: "ADDSshiftRAreg", argLength: 3, reg: gp31carry, asm: "ADD"}, // arg0 + arg1>>arg2, signed shift, set carry flag
+ {name: "SUBSshiftLLreg", argLength: 3, reg: gp31carry, asm: "SUB"}, // arg0 - arg1<<arg2, set carry flag
+ {name: "SUBSshiftRLreg", argLength: 3, reg: gp31carry, asm: "SUB"}, // arg0 - arg1>>arg2, unsigned shift, set carry flag
+ {name: "SUBSshiftRAreg", argLength: 3, reg: gp31carry, asm: "SUB"}, // arg0 - arg1>>arg2, signed shift, set carry flag
+ {name: "RSBSshiftLLreg", argLength: 3, reg: gp31carry, asm: "RSB"}, // arg1<<arg2 - arg0, set carry flag
+ {name: "RSBSshiftRLreg", argLength: 3, reg: gp31carry, asm: "RSB"}, // arg1>>arg2 - arg0, unsigned shift, set carry flag
+ {name: "RSBSshiftRAreg", argLength: 3, reg: gp31carry, asm: "RSB"}, // arg1>>arg2 - arg0, signed shift, set carry flag
+
+ // comparisons
+ {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt
+ {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags", commutative: true}, // arg0 compare to -arg1, provided arg1 is not 1<<63
+ {name: "CMNconst", argLength: 1, reg: gp1flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -auxInt
+ {name: "TST", argLength: 2, reg: gp2flags, asm: "TST", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0
+ {name: "TSTconst", argLength: 1, reg: gp1flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & auxInt compare to 0
+ {name: "TEQ", argLength: 2, reg: gp2flags, asm: "TEQ", typ: "Flags", commutative: true}, // arg0 ^ arg1 compare to 0
+ {name: "TEQconst", argLength: 1, reg: gp1flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ auxInt compare to 0
+ {name: "CMPF", argLength: 2, reg: fp2flags, asm: "CMPF", typ: "Flags"}, // arg0 compare to arg1, float32
+ {name: "CMPD", argLength: 2, reg: fp2flags, asm: "CMPD", typ: "Flags"}, // arg0 compare to arg1, float64
+
+ {name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1<<auxInt
+ {name: "CMPshiftRL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1>>auxInt, unsigned shift
+ {name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift
+ {name: "CMNshiftLL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -(arg1<<auxInt)
+ {name: "CMNshiftRL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -(arg1>>auxInt), unsigned shift
+ {name: "CMNshiftRA", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -(arg1>>auxInt), signed shift
+ {name: "TSTshiftLL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & (arg1<<auxInt) compare to 0
+ {name: "TSTshiftRL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & (arg1>>auxInt) compare to 0, unsigned shift
+ {name: "TSTshiftRA", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & (arg1>>auxInt) compare to 0, signed shift
+ {name: "TEQshiftLL", argLength: 2, reg: gp2flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ (arg1<<auxInt) compare to 0
+ {name: "TEQshiftRL", argLength: 2, reg: gp2flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ (arg1>>auxInt) compare to 0, unsigned shift
+ {name: "TEQshiftRA", argLength: 2, reg: gp2flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ (arg1>>auxInt) compare to 0, signed shift
+
+ {name: "CMPshiftLLreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1<<arg2
+ {name: "CMPshiftRLreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1>>arg2, unsigned shift
+ {name: "CMPshiftRAreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1>>arg2, signed shift
+ {name: "CMNshiftLLreg", argLength: 3, reg: gp3flags, asm: "CMN", typ: "Flags"}, // arg0 + (arg1<<arg2) compare to 0
+ {name: "CMNshiftRLreg", argLength: 3, reg: gp3flags, asm: "CMN", typ: "Flags"}, // arg0 + (arg1>>arg2) compare to 0, unsigned shift
+ {name: "CMNshiftRAreg", argLength: 3, reg: gp3flags, asm: "CMN", typ: "Flags"}, // arg0 + (arg1>>arg2) compare to 0, signed shift
+ {name: "TSTshiftLLreg", argLength: 3, reg: gp3flags, asm: "TST", typ: "Flags"}, // arg0 & (arg1<<arg2) compare to 0
+ {name: "TSTshiftRLreg", argLength: 3, reg: gp3flags, asm: "TST", typ: "Flags"}, // arg0 & (arg1>>arg2) compare to 0, unsigned shift
+ {name: "TSTshiftRAreg", argLength: 3, reg: gp3flags, asm: "TST", typ: "Flags"}, // arg0 & (arg1>>arg2) compare to 0, signed shift
+ {name: "TEQshiftLLreg", argLength: 3, reg: gp3flags, asm: "TEQ", typ: "Flags"}, // arg0 ^ (arg1<<arg2) compare to 0
+ {name: "TEQshiftRLreg", argLength: 3, reg: gp3flags, asm: "TEQ", typ: "Flags"}, // arg0 ^ (arg1>>arg2) compare to 0, unsigned shift
+ {name: "TEQshiftRAreg", argLength: 3, reg: gp3flags, asm: "TEQ", typ: "Flags"}, // arg0 ^ (arg1>>arg2) compare to 0, signed shift
+
+ {name: "CMPF0", argLength: 1, reg: fp1flags, asm: "CMPF", typ: "Flags"}, // arg0 compare to 0, float32
+ {name: "CMPD0", argLength: 1, reg: fp1flags, asm: "CMPD", typ: "Flags"}, // arg0 compare to 0, float64
+
+ // moves
+ {name: "MOVWconst", argLength: 0, reg: gp01, aux: "Int32", asm: "MOVW", typ: "UInt32", rematerializeable: true}, // 32 low bits of auxint
+ {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
+ {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
+
+ {name: "MOVWaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVW", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+
+ {name: "MOVWloadidx", argLength: 3, reg: gp2load, asm: "MOVW", typ: "UInt32"}, // load from arg0 + arg1. arg2=mem
+ {name: "MOVWloadshiftLL", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32", typ: "UInt32"}, // load from arg0 + arg1<<auxInt. arg2=mem
+ {name: "MOVWloadshiftRL", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32", typ: "UInt32"}, // load from arg0 + arg1>>auxInt, unsigned shift. arg2=mem
+ {name: "MOVWloadshiftRA", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32", typ: "UInt32"}, // load from arg0 + arg1>>auxInt, signed shift. arg2=mem
+ {name: "MOVBUloadidx", argLength: 3, reg: gp2load, asm: "MOVBU", typ: "UInt8"}, // load from arg0 + arg1. arg2=mem
+ {name: "MOVBloadidx", argLength: 3, reg: gp2load, asm: "MOVB", typ: "Int8"}, // load from arg0 + arg1. arg2=mem
+ {name: "MOVHUloadidx", argLength: 3, reg: gp2load, asm: "MOVHU", typ: "UInt16"}, // load from arg0 + arg1. arg2=mem
+ {name: "MOVHloadidx", argLength: 3, reg: gp2load, asm: "MOVH", typ: "Int16"}, // load from arg0 + arg1. arg2=mem
+
+ {name: "MOVWstoreidx", argLength: 4, reg: gp2store, asm: "MOVW", typ: "Mem"}, // store arg2 to arg0 + arg1. arg3=mem
+ {name: "MOVWstoreshiftLL", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32", typ: "Mem"}, // store arg2 to arg0 + arg1<<auxInt. arg3=mem
+ {name: "MOVWstoreshiftRL", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32", typ: "Mem"}, // store arg2 to arg0 + arg1>>auxInt, unsigned shift. arg3=mem
+ {name: "MOVWstoreshiftRA", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32", typ: "Mem"}, // store arg2 to arg0 + arg1>>auxInt, signed shift. arg3=mem
+ {name: "MOVBstoreidx", argLength: 4, reg: gp2store, asm: "MOVB", typ: "Mem"}, // store arg2 to arg0 + arg1. arg3=mem
+ {name: "MOVHstoreidx", argLength: 4, reg: gp2store, asm: "MOVH", typ: "Mem"}, // store arg2 to arg0 + arg1. arg3=mem
+
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVBS"}, // move from arg0, sign-extended from byte
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVHS"}, // move from arg0, sign-extended from half
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0
+
+ {name: "MOVWnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ {name: "MOVWF", argLength: 1, reg: gpfp, asm: "MOVWF"}, // int32 -> float32
+ {name: "MOVWD", argLength: 1, reg: gpfp, asm: "MOVWD"}, // int32 -> float64
+ {name: "MOVWUF", argLength: 1, reg: gpfp, asm: "MOVWF"}, // uint32 -> float32, set U bit in the instruction
+ {name: "MOVWUD", argLength: 1, reg: gpfp, asm: "MOVWD"}, // uint32 -> float64, set U bit in the instruction
+ {name: "MOVFW", argLength: 1, reg: fpgp, asm: "MOVFW"}, // float32 -> int32
+ {name: "MOVDW", argLength: 1, reg: fpgp, asm: "MOVDW"}, // float64 -> int32
+ {name: "MOVFWU", argLength: 1, reg: fpgp, asm: "MOVFW"}, // float32 -> uint32, set U bit in the instruction
+ {name: "MOVDWU", argLength: 1, reg: fpgp, asm: "MOVDW"}, // float64 -> uint32, set U bit in the instruction
+ {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64
+ {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
+
+ // conditional instructions, for lowering shifts
+ {name: "CMOVWHSconst", argLength: 2, reg: gp1flags1, asm: "MOVW", aux: "Int32", resultInArg0: true}, // replace arg0 w/ const if flags indicates HS, arg1=flags
+ {name: "CMOVWLSconst", argLength: 2, reg: gp1flags1, asm: "MOVW", aux: "Int32", resultInArg0: true}, // replace arg0 w/ const if flags indicates LS, arg1=flags
+ {name: "SRAcond", argLength: 3, reg: gp2flags1, asm: "SRA"}, // arg0 >> 31 if flags indicates HS, arg0 >> arg1 otherwise, signed shift, arg2=flags
+
+ // function calls
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R7"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // pseudo-ops
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
+
+ {name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise.
+ {name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise.
+ {name: "LessThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x<y false otherwise.
+ {name: "LessEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x<=y false otherwise.
+ {name: "GreaterThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x>y false otherwise.
+ {name: "GreaterEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x>=y false otherwise.
+ {name: "LessThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<y false otherwise.
+ {name: "LessEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<=y false otherwise.
+ {name: "GreaterThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>y false otherwise.
+ {name: "GreaterEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>=y false otherwise.
+
+ // duffzero (must be 4-byte aligned)
+ // arg0 = address of memory to zero (in R1, changed as side effect)
+ // arg1 = value to store (always zero)
+ // arg2 = mem
+ // auxint = offset into duffzero code to start executing
+ // returns mem
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), buildReg("R0")},
+ clobbers: buildReg("R1 R12 R14"), // R14 is LR, R12 is linker trampoline scratch register
+ },
+ faultOnNilArg0: true,
+ },
+
+ // duffcopy (must be 4-byte aligned)
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = mem
+ // auxint = offset into duffcopy code to start executing
+ // returns mem
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1")},
+ clobbers: buildReg("R0 R1 R2 R12 R14"), // R14 is LR, R12 is linker trampoline scratch register
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // large or unaligned zeroing
+ // arg0 = address of memory to zero (in R1, changed as side effect)
+ // arg1 = address of the last element to zero
+ // arg2 = value to store (always zero)
+ // arg3 = mem
+ // returns mem
+ // MOVW.P Rarg2, 4(R1)
+ // CMP R1, Rarg1
+ // BLE -2(PC)
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), gp, gp},
+ clobbers: buildReg("R1"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ },
+
+ // large or unaligned move
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = address of the last element of src
+ // arg3 = mem
+ // returns mem
+ // MOVW.P 4(R1), Rtmp
+ // MOVW.P Rtmp, 4(R2)
+ // CMP R1, Rarg2
+ // BLE -3(PC)
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1"), gp},
+ clobbers: buildReg("R1 R2"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R7 (arm.REGCTXT, the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R7")}}, zeroWidth: true},
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem.
+ {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ // Extend ops are the same as Bounds ops except the indexes are 64-bit.
+ {name: "LoweredPanicExtendA", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r2, r3}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendB", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r1, r2}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendC", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r0, r1}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+
+ // Constant flag value.
+ // Note: there's an "unordered" outcome for floating-point
+ // comparisons, but we don't use such a beast yet.
+ // This op is for temporary use by rewrite rules. It
+ // cannot appear in the generated assembly.
+ {name: "FlagConstant", aux: "FlagConstant"},
+
+ // (InvertFlags (CMP a b)) == (CMP b a)
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed
+ // It saves all GP registers if necessary,
+ // but clobbers R14 (LR) because it's a call, and R12 which is linker trampoline scratch register.
+ // Returns a pointer to a write barrier buffer in R8.
+ {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R12 R14"), outputs: []regMask{buildReg("R8")}}, clobberFlags: true, aux: "Int64"}}
+
+ blocks := []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LT", controls: 1},
+ {name: "LE", controls: 1},
+ {name: "GT", controls: 1},
+ {name: "GE", controls: 1},
+ {name: "ULT", controls: 1},
+ {name: "ULE", controls: 1},
+ {name: "UGT", controls: 1},
+ {name: "UGE", controls: 1},
+ {name: "LTnoov", controls: 1}, // 'LT' but without honoring overflow
+ {name: "LEnoov", controls: 1}, // 'LE' but without honoring overflow
+ {name: "GTnoov", controls: 1}, // 'GT' but without honoring overflow
+ {name: "GEnoov", controls: 1}, // 'GE' but without honoring overflow
+ }
+
+ archs = append(archs, arch{
+ name: "ARM",
+ pkg: "cmd/internal/obj/arm",
+ genfile: "../../arm/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesARM,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R14"]),
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
new file mode 100644
index 0000000..2af9519
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules
@@ -0,0 +1,664 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(Add(Ptr|64|32|16|8) ...) => (ADDV ...)
+(Add(32|64)F ...) => (ADD(F|D) ...)
+
+(Sub(Ptr|64|32|16|8) ...) => (SUBV ...)
+(Sub(32|64)F ...) => (SUB(F|D) ...)
+
+(Mul(64|32|16|8) ...) => (MULV ...)
+(Mul(32|64)F ...) => (MUL(F|D) ...)
+(Select0 (Mul64uhilo x y)) => (MULHVU x y)
+(Select1 (Mul64uhilo x y)) => (MULV x y)
+(Select0 (Mul64uover x y)) => (MULV x y)
+(Select1 (Mul64uover x y)) => (SGTU <typ.Bool> (MULHVU x y) (MOVVconst <typ.UInt64> [0]))
+
+(Hmul64 ...) => (MULHV ...)
+(Hmul64u ...) => (MULHVU ...)
+(Hmul32 x y) => (SRAVconst (MULV (SignExt32to64 x) (SignExt32to64 y)) [32])
+(Hmul32u x y) => (SRLVconst (MULV (ZeroExt32to64 x) (ZeroExt32to64 y)) [32])
+
+(Div64 x y) => (DIVV x y)
+(Div64u ...) => (DIVVU ...)
+(Div32 x y) => (DIVV (SignExt32to64 x) (SignExt32to64 y))
+(Div32u x y) => (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Div16 x y) => (DIVV (SignExt16to64 x) (SignExt16to64 y))
+(Div16u x y) => (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Div8 x y) => (DIVV (SignExt8to64 x) (SignExt8to64 y))
+(Div8u x y) => (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))
+(Div(32|64)F ...) => (DIV(F|D) ...)
+
+(Mod64 x y) => (REMV x y)
+(Mod64u ...) => (REMVU ...)
+(Mod32 x y) => (REMV (SignExt32to64 x) (SignExt32to64 y))
+(Mod32u x y) => (REMVU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Mod16 x y) => (REMV (SignExt16to64 x) (SignExt16to64 y))
+(Mod16u x y) => (REMVU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Mod8 x y) => (REMV (SignExt8to64 x) (SignExt8to64 y))
+(Mod8u x y) => (REMVU (ZeroExt8to64 x) (ZeroExt8to64 y))
+
+(Select0 <t> (Add64carry x y c)) => (ADDV (ADDV <t> x y) c)
+(Select1 <t> (Add64carry x y c)) =>
+ (OR (SGTU <t> x s:(ADDV <t> x y)) (SGTU <t> s (ADDV <t> s c)))
+
+(Select0 <t> (Sub64borrow x y c)) => (SUBV (SUBV <t> x y) c)
+(Select1 <t> (Sub64borrow x y c)) =>
+ (OR (SGTU <t> s:(SUBV <t> x y) x) (SGTU <t> (SUBV <t> s c) s))
+
+// (x + y) / 2 with x>=y => (x - y) / 2 + y
+(Avg64u <t> x y) => (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
+
+(And(64|32|16|8) ...) => (AND ...)
+(Or(64|32|16|8) ...) => (OR ...)
+(Xor(64|32|16|8) ...) => (XOR ...)
+
+// shifts
+// hardware instruction uses only the low 6 bits of the shift
+// we compare to 64 to ensure Go semantics for large shifts
+(Lsh64x64 <t> x y) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+(Lsh64x32 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+(Lsh64x16 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+(Lsh64x8 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+
+(Lsh32x64 <t> x y) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+(Lsh32x32 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+(Lsh32x16 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+(Lsh32x8 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+
+(Lsh16x64 <t> x y) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+(Lsh16x32 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+(Lsh16x16 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+(Lsh16x8 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+
+(Lsh8x64 <t> x y) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+(Lsh8x32 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+(Lsh8x16 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+(Lsh8x8 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+
+(Rsh64Ux64 <t> x y) => (MASKEQZ (SRLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+(Rsh64Ux32 <t> x y) => (MASKEQZ (SRLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+(Rsh64Ux16 <t> x y) => (MASKEQZ (SRLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+(Rsh64Ux8 <t> x y) => (MASKEQZ (SRLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+
+(Rsh32Ux64 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt32to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+(Rsh32Ux32 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+(Rsh32Ux16 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+(Rsh32Ux8 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+
+(Rsh16Ux64 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+(Rsh16Ux32 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+(Rsh16Ux16 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+(Rsh16Ux8 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+
+(Rsh8Ux64 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+(Rsh8Ux32 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+(Rsh8Ux16 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+(Rsh8Ux8 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+
+(Rsh64x64 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh64x32 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh64x16 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh64x8 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh32x64 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh32x32 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh32x16 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh32x8 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh16x64 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh16x32 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh16x16 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh16x8 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh8x64 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh8x32 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh8x16 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh8x8 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+// rotates
+(RotateLeft8 <t> x (MOVVconst [c])) => (Or8 (Lsh8x64 <t> x (MOVVconst [c&7])) (Rsh8Ux64 <t> x (MOVVconst [-c&7])))
+(RotateLeft16 <t> x (MOVVconst [c])) => (Or16 (Lsh16x64 <t> x (MOVVconst [c&15])) (Rsh16Ux64 <t> x (MOVVconst [-c&15])))
+(RotateLeft32 x y) => (ROTR x (NEGV <y.Type> y))
+(RotateLeft64 x y) => (ROTRV x (NEGV <y.Type> y))
+
+// unary ops
+(Neg(64|32|16|8) ...) => (NEGV ...)
+(Neg(32|64)F ...) => (NEG(F|D) ...)
+
+(Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x)
+
+(Sqrt ...) => (SQRTD ...)
+(Sqrt32 ...) => (SQRTF ...)
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XORconst [1] x)
+
+// constants
+(Const(64|32|16|8) [val]) => (MOVVconst [int64(val)])
+(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
+(ConstNil) => (MOVVconst [0])
+(ConstBool [t]) => (MOVVconst [int64(b2i(t))])
+
+(Slicemask <t> x) => (SRAVconst (NEGV <t> x) [63])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+(ZeroExt8to64 ...) => (MOVBUreg ...)
+(ZeroExt16to64 ...) => (MOVHUreg ...)
+(ZeroExt32to64 ...) => (MOVWUreg ...)
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+(SignExt8to64 ...) => (MOVBreg ...)
+(SignExt16to64 ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+// float <=> int conversion
+(Cvt32to32F ...) => (MOVWF ...)
+(Cvt32to64F ...) => (MOVWD ...)
+(Cvt64to32F ...) => (MOVVF ...)
+(Cvt64to64F ...) => (MOVVD ...)
+(Cvt32Fto32 ...) => (TRUNCFW ...)
+(Cvt64Fto32 ...) => (TRUNCDW ...)
+(Cvt32Fto64 ...) => (TRUNCFV ...)
+(Cvt64Fto64 ...) => (TRUNCDV ...)
+(Cvt32Fto64F ...) => (MOVFD ...)
+(Cvt64Fto32F ...) => (MOVDF ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round(32|64)F ...) => (Copy ...)
+
+// comparisons
+(Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y))
+(EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y))
+(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y))
+
+(Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
+(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
+(Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
+(Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0]))
+(NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0]))
+(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y))
+
+(Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x))
+(Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x))
+(Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x))
+(Less64 x y) => (SGT y x)
+(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
+
+(Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
+(Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
+(Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
+(Less64U x y) => (SGTU y x)
+
+(Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
+(Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
+(Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
+(Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y))
+(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
+
+(Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y))
+
+(OffPtr [off] ptr:(SP)) => (MOVVaddr [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDVconst [off] ptr)
+
+(Addr {sym} base) => (MOVVaddr {sym} base)
+(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVVaddr {sym} (SPanchored base mem))
+(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVVaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
+
+// stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVVstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVDstore ptr val mem)
+
+// zeroing
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem)
+(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore ptr (MOVVconst [0]) mem)
+(Zero [2] ptr mem) =>
+ (MOVBstore [1] ptr (MOVVconst [0])
+ (MOVBstore [0] ptr (MOVVconst [0]) mem))
+(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore ptr (MOVVconst [0]) mem)
+(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] ptr (MOVVconst [0])
+ (MOVHstore [0] ptr (MOVVconst [0]) mem))
+(Zero [4] ptr mem) =>
+ (MOVBstore [3] ptr (MOVVconst [0])
+ (MOVBstore [2] ptr (MOVVconst [0])
+ (MOVBstore [1] ptr (MOVVconst [0])
+ (MOVBstore [0] ptr (MOVVconst [0]) mem))))
+(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore ptr (MOVVconst [0]) mem)
+(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] ptr (MOVVconst [0])
+ (MOVWstore [0] ptr (MOVVconst [0]) mem))
+(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] ptr (MOVVconst [0])
+ (MOVHstore [4] ptr (MOVVconst [0])
+ (MOVHstore [2] ptr (MOVVconst [0])
+ (MOVHstore [0] ptr (MOVVconst [0]) mem))))
+
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVVconst [0])
+ (MOVBstore [1] ptr (MOVVconst [0])
+ (MOVBstore [0] ptr (MOVVconst [0]) mem)))
+(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] ptr (MOVVconst [0])
+ (MOVHstore [2] ptr (MOVVconst [0])
+ (MOVHstore [0] ptr (MOVVconst [0]) mem)))
+(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] ptr (MOVVconst [0])
+ (MOVWstore [4] ptr (MOVVconst [0])
+ (MOVWstore [0] ptr (MOVVconst [0]) mem)))
+(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore [8] ptr (MOVVconst [0])
+ (MOVVstore [0] ptr (MOVVconst [0]) mem))
+(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore [16] ptr (MOVVconst [0])
+ (MOVVstore [8] ptr (MOVVconst [0])
+ (MOVVstore [0] ptr (MOVVconst [0]) mem)))
+
+// medium zeroing uses a duff device
+// 8, and 128 are magic constants, see runtime/mkduff.go
+(Zero [s] {t} ptr mem)
+ && s%8 == 0 && s > 24 && s <= 8*128
+ && t.Alignment()%8 == 0 && !config.noDuffDevice =>
+ (DUFFZERO [8 * (128 - s/8)] ptr mem)
+
+// large or unaligned zeroing uses a loop
+(Zero [s] {t} ptr mem)
+ && (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 =>
+ (LoweredZero [t.Alignment()]
+ ptr
+ (ADDVconst <ptr.Type> ptr [s-moveSize(t.Alignment(), config)])
+ mem)
+
+// moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore dst (MOVHload src mem) mem)
+(Move [2] dst src mem) =>
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem))
+(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))
+(Move [4] dst src mem) =>
+ (MOVBstore [3] dst (MOVBload [3] src mem)
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem))))
+(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore dst (MOVVload src mem) mem)
+(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] dst (MOVHload [6] src mem)
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))))
+
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem)))
+(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem)))
+(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] dst (MOVWload [8] src mem)
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem)))
+(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore [8] dst (MOVVload [8] src mem)
+ (MOVVstore dst (MOVVload src mem) mem))
+(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore [16] dst (MOVVload [16] src mem)
+ (MOVVstore [8] dst (MOVVload [8] src mem)
+ (MOVVstore dst (MOVVload src mem) mem)))
+
+// medium move uses a duff device
+(Move [s] {t} dst src mem)
+ && s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [16 * (128 - s/8)] dst src mem)
+// 16 and 128 are magic constants. 16 is the number of bytes to encode:
+// MOVV (R1), R23
+// ADDV $8, R1
+// MOVV R23, (R2)
+// ADDV $8, R2
+// and 128 is the number of such blocks. See runtime/duff_mips64.s:duffcopy.
+
+// large or unaligned move uses a loop
+(Move [s] {t} dst src mem)
+ && s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 =>
+ (LoweredMove [t.Alignment()]
+ dst
+ src
+ (ADDVconst <src.Type> src [s-moveSize(t.Alignment(), config)])
+ mem)
+
+// calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// atomic intrinsics
+(AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...)
+(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
+
+(AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...)
+(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
+
+(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
+
+(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
+
+(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
+(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
+
+// checks
+(NilCheck ...) => (LoweredNilCheck ...)
+(IsNonNil ptr) => (SGTU ptr (MOVVconst [0]))
+(IsInBounds idx len) => (SGTU len idx)
+(IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len))
+
+// pseudo-ops
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+(If cond yes no) => (NE (MOVBUreg <typ.UInt64> cond) yes no)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+(CondSelect <t> x y cond) => (OR (MASKEQZ <t> x cond) (MASKNEZ <t> y cond))
+
+// Optimizations
+
+// Absorb boolean tests into block
+(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
+(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
+(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no)
+(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no)
+(NE (SGTUconst [1] x) yes no) => (EQ x yes no)
+(EQ (SGTUconst [1] x) yes no) => (NE x yes no)
+(NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no)
+(EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no)
+(NE (SGTconst [0] x) yes no) => (LTZ x yes no)
+(EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
+(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
+(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
+(MOVBUreg x:((SGT|SGTU) _ _)) => x
+
+// fold offset into address
+(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
+
+// fold address into load/store
+// Do not fold global variable access in -dynlink mode, where it will be rewritten
+// to use the GOT via REGTMP, which currently cannot handle large offset.
+(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {sym} ptr mem)
+
+(MOV(B|H|W|V|F|D)store [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOV(B|H|W|V|F|D)store [off1+int32(off2)] {sym} ptr val mem)
+
+(MOV(B|H|W|V)storezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOV(B|H|W|V)storezero [off1+int32(off2)] {sym} ptr mem)
+
+(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOV(B|H|W|V|F|D)store [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+ && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOV(B|H|W|V|F|D)store [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+
+(MOV(B|H|W|V)storezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
+ (MOV(B|H|W|V)storezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+
+(LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem)
+(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem)
+(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem)
+
+// don't extend after proper load
+(MOVBreg x:(MOVBload _ _)) => (MOVVreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVVreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVVreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVBload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVHload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVHUload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVWload _ _)) => (MOVVreg x)
+(MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x)
+(MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) => (MOVVreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVVreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVVreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVBreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVHreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVWreg _)) => (MOVVreg x)
+(MOVWUreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVWUreg x:(MOVHUreg _)) => (MOVVreg x)
+(MOVWUreg x:(MOVWUreg _)) => (MOVVreg x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVVnop doesn't emit instruction, only for ensuring the type.
+(MOVVreg x) && x.Uses == 1 => (MOVVnop x)
+
+// fold constant into arithmetic ops
+(ADDV x (MOVVconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDVconst [c] x)
+(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x)
+(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x)
+(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x)
+(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x)
+(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x)
+
+(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
+(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
+(SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63])
+(SLLV x (MOVVconst [c])) => (SLLVconst x [c])
+(SRLV x (MOVVconst [c])) => (SRLVconst x [c])
+(SRAV x (MOVVconst [c])) => (SRAVconst x [c])
+(ROTR x (MOVVconst [c])) => (ROTRconst x [c&31])
+(ROTRV x (MOVVconst [c])) => (ROTRVconst x [c&63])
+
+(SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x)
+(SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x)
+
+// mul by constant
+(MULV x (MOVVconst [-1])) => (NEGV x)
+(MULV _ (MOVVconst [0])) => (MOVVconst [0])
+(MULV x (MOVVconst [1])) => x
+(MULV x (MOVVconst [c])) && isPowerOfTwo64(c) => (SLLVconst [log64(c)] x)
+
+// div by constant
+(DIVVU x (MOVVconst [1])) => x
+(DIVVU x (MOVVconst [c])) && isPowerOfTwo64(c) => (SRLVconst [log64(c)] x)
+(REMVU _ (MOVVconst [1])) => (MOVVconst [0]) // mod
+(REMVU x (MOVVconst [c])) && isPowerOfTwo64(c) => (ANDconst [c-1] x) // mod
+
+// generic simplifications
+(ADDV x (NEGV y)) => (SUBV x y)
+(SUBV x x) => (MOVVconst [0])
+(SUBV (MOVVconst [0]) x) => (NEGV x)
+(AND x x) => x
+(OR x x) => x
+(XOR x x) => (MOVVconst [0])
+
+// remove redundant *const ops
+(ADDVconst [0] x) => x
+(SUBVconst [0] x) => x
+(ANDconst [0] _) => (MOVVconst [0])
+(ANDconst [-1] x) => x
+(ORconst [0] x) => x
+(ORconst [-1] _) => (MOVVconst [-1])
+(XORconst [0] x) => x
+(XORconst [-1] x) => (NORconst [0] x)
+(MASKEQZ (MOVVconst [0]) cond) => (MOVVconst [0])
+(MASKNEZ (MOVVconst [0]) cond) => (MOVVconst [0])
+(MASKEQZ x (MOVVconst [c])) && c == 0 => (MOVVconst [0])
+(MASKEQZ x (MOVVconst [c])) && c != 0 => x
+
+// generic constant folding
+(ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d])
+(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x)
+(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x)
+(SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c])
+(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x)
+(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x)
+(SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d<<uint64(c)])
+(SRLVconst [c] (MOVVconst [d])) => (MOVVconst [int64(uint64(d)>>uint64(c))])
+(SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)])
+(MULV (MOVVconst [c]) (MOVVconst [d])) => (MOVVconst [c*d])
+(DIVV (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [c/d])
+(DIVVU (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [int64(uint64(c)/uint64(d))])
+(REMV (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [c%d]) // mod
+(REMVU (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod
+(ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d])
+(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x)
+(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d])
+(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x)
+(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)])
+(NEGV (MOVVconst [c])) => (MOVVconst [-c])
+(MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))])
+(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))])
+(MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))])
+(MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))])
+(MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))])
+(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))])
+(MOVVreg (MOVVconst [c])) => (MOVVconst [c])
+
+// constant comparisons
+(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1])
+(SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0])
+(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1])
+(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0])
+
+// other known comparisons
+(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1])
+(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0])
+(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1])
+(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0])
+(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1])
+(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1])
+(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0])
+(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1])
+(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0])
+(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1])
+(SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0])
+(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1])
+(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1])
+(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
+(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
+
+// absorb constants into branches
+(EQ (MOVVconst [0]) yes no) => (First yes no)
+(EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes)
+(NE (MOVVconst [0]) yes no) => (First no yes)
+(NE (MOVVconst [c]) yes no) && c != 0 => (First yes no)
+(LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no)
+(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes)
+(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no)
+(LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes)
+(GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no)
+(GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes)
+(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no)
+(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes)
+
+// SGT/SGTU with known outcomes.
+(SGT x x) => (MOVVconst [0])
+(SGTU x x) => (MOVVconst [0])
diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
new file mode 100644
index 0000000..3fbf5be
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go
@@ -0,0 +1,486 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - *const instructions may use a constant larger than the instruction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R23).
+
+// Suffixes encode the bit width of various instructions.
+// V (vlong) = 64 bit
+// WU (word) = 32 bit unsigned
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// HU = 16 bit unsigned
+// B (byte) = 8 bit
+// BU = 8 bit unsigned
+// F (float) = 32 bit float
+// D (double) = 64 bit float
+
+// Note: registers not used in regalloc are not included in this list,
+// so that regmask stays within int64
+// Be careful when hand coding regmasks.
+var regNamesLOONG64 = []string{
+ "R0", // constant 0
+ "R1",
+ "SP", // aka R3
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+ "R16",
+ "R17",
+ "R18",
+ "R19",
+ "R20",
+ "R21",
+ "g", // aka R22
+ "R23",
+ "R24",
+ "R25",
+ "R26",
+ "R27",
+ "R28",
+ "R29",
+ // R30 is REGTMP not used in regalloc
+ "R31",
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+ "F16",
+ "F17",
+ "F18",
+ "F19",
+ "F20",
+ "F21",
+ "F22",
+ "F23",
+ "F24",
+ "F25",
+ "F26",
+ "F27",
+ "F28",
+ "F29",
+ "F30",
+ "F31",
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesLOONG64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesLOONG64 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ gp = buildReg("R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31") // R1 is LR, R2 is thread pointer, R3 is stack pointer, R22 is g, R30 is REGTMP
+ gpg = gp | buildReg("g")
+ gpsp = gp | buildReg("SP")
+ gpspg = gpg | buildReg("SP")
+ gpspsbg = gpspg | buildReg("SB")
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31")
+ callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r1 = buildReg("R20")
+ r2 = buildReg("R21")
+ r3 = buildReg("R23")
+ r4 = buildReg("R24")
+ )
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
+ gpstore0 = regInfo{inputs: []regMask{gpspsbg}}
+ gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
+ readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
+ )
+ ops := []opData{
+ // binary ops
+ {name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true}, // arg0 + arg1
+ {name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt. auxInt is 32-bit, also in other *const ops.
+ {name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"}, // arg0 - arg1
+ {name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"}, // arg0 - auxInt
+
+ {name: "MULV", argLength: 2, reg: gp21, asm: "MULV", commutative: true, typ: "Int64"}, // arg0 * arg1
+ {name: "MULHV", argLength: 2, reg: gp21, asm: "MULHV", commutative: true, typ: "Int64"}, // (arg0 * arg1) >> 64, signed
+ {name: "MULHVU", argLength: 2, reg: gp21, asm: "MULHVU", commutative: true, typ: "UInt64"}, // (arg0 * arg1) >> 64, unsigned
+ {name: "DIVV", argLength: 2, reg: gp21, asm: "DIVV", typ: "Int64"}, // arg0 / arg1, signed
+ {name: "DIVVU", argLength: 2, reg: gp21, asm: "DIVVU", typ: "UInt64"}, // arg0 / arg1, unsigned
+ {name: "REMV", argLength: 2, reg: gp21, asm: "REMV", typ: "Int64"}, // arg0 / arg1, signed
+ {name: "REMVU", argLength: 2, reg: gp21, asm: "REMVU", typ: "UInt64"}, // arg0 / arg1, unsigned
+
+ {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1
+ {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1
+ {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1
+ {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1
+ {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1
+ {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1
+ {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1
+ {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64"}, // arg0 & auxInt
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0 | auxInt
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt64"}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", typ: "UInt64"}, // arg0 ^ auxInt
+ {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1)
+ {name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int64"}, // ^(arg0 | auxInt)
+
+ {name: "NEGV", argLength: 1, reg: gp11}, // -arg0
+ {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32
+ {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64
+ {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64
+ {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32
+
+ {name: "MASKEQZ", argLength: 2, reg: gp21, asm: "MASKEQZ"}, // returns 0 if arg1 == 0, otherwise returns arg0
+ {name: "MASKNEZ", argLength: 2, reg: gp21, asm: "MASKNEZ"}, // returns 0 if arg1 != 0, otherwise returns arg0
+
+ // shifts
+ {name: "SLLV", argLength: 2, reg: gp21, asm: "SLLV"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SLLVconst", argLength: 1, reg: gp11, asm: "SLLV", aux: "Int64"}, // arg0 << auxInt
+ {name: "SRLV", argLength: 2, reg: gp21, asm: "SRLV"}, // arg0 >> arg1, unsigned, shift amount is mod 64
+ {name: "SRLVconst", argLength: 1, reg: gp11, asm: "SRLV", aux: "Int64"}, // arg0 >> auxInt, unsigned
+ {name: "SRAV", argLength: 2, reg: gp21, asm: "SRAV"}, // arg0 >> arg1, signed, shift amount is mod 64
+ {name: "SRAVconst", argLength: 1, reg: gp11, asm: "SRAV", aux: "Int64"}, // arg0 >> auxInt, signed
+ {name: "ROTR", argLength: 2, reg: gp21, asm: "ROTR"}, // arg0 right rotate by (arg1 mod 32) bits
+ {name: "ROTRV", argLength: 2, reg: gp21, asm: "ROTRV"}, // arg0 right rotate by (arg1 mod 64) bits
+ {name: "ROTRconst", argLength: 1, reg: gp11, asm: "ROTR", aux: "Int64"}, // uint32(arg0) right rotate by auxInt bits, auxInt should be in the range 0 to 31.
+ {name: "ROTRVconst", argLength: 1, reg: gp11, asm: "ROTRV", aux: "Int64"}, // arg0 right rotate by auxInt bits, auxInt should be in the range 0 to 63.
+
+ // comparisons
+ {name: "SGT", argLength: 2, reg: gp21, asm: "SGT", typ: "Bool"}, // 1 if arg0 > arg1 (signed), 0 otherwise
+ {name: "SGTconst", argLength: 1, reg: gp11, asm: "SGT", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (signed), 0 otherwise
+ {name: "SGTU", argLength: 2, reg: gp21, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > arg1 (unsigned), 0 otherwise
+ {name: "SGTUconst", argLength: 1, reg: gp11, asm: "SGTU", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (unsigned), 0 otherwise
+
+ {name: "CMPEQF", argLength: 2, reg: fp2flags, asm: "CMPEQF", typ: "Flags"}, // flags=true if arg0 = arg1, float32
+ {name: "CMPEQD", argLength: 2, reg: fp2flags, asm: "CMPEQD", typ: "Flags"}, // flags=true if arg0 = arg1, float64
+ {name: "CMPGEF", argLength: 2, reg: fp2flags, asm: "CMPGEF", typ: "Flags"}, // flags=true if arg0 >= arg1, float32
+ {name: "CMPGED", argLength: 2, reg: fp2flags, asm: "CMPGED", typ: "Flags"}, // flags=true if arg0 >= arg1, float64
+ {name: "CMPGTF", argLength: 2, reg: fp2flags, asm: "CMPGTF", typ: "Flags"}, // flags=true if arg0 > arg1, float32
+ {name: "CMPGTD", argLength: 2, reg: fp2flags, asm: "CMPGTD", typ: "Flags"}, // flags=true if arg0 > arg1, float64
+
+ // moves
+ {name: "MOVVconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVV", typ: "UInt64", rematerializeable: true}, // auxint
+ {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
+ {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
+
+ {name: "MOVVaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVV", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVVload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVV", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVVstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+
+ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVVstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. ar12=mem.
+
+ // conversions
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word
+ {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word
+ {name: "MOVVreg", argLength: 1, reg: gp11, asm: "MOVV"}, // move from arg0
+
+ {name: "MOVVnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ {name: "MOVWF", argLength: 1, reg: fp11, asm: "MOVWF"}, // int32 -> float32
+ {name: "MOVWD", argLength: 1, reg: fp11, asm: "MOVWD"}, // int32 -> float64
+ {name: "MOVVF", argLength: 1, reg: fp11, asm: "MOVVF"}, // int64 -> float32
+ {name: "MOVVD", argLength: 1, reg: fp11, asm: "MOVVD"}, // int64 -> float64
+ {name: "TRUNCFW", argLength: 1, reg: fp11, asm: "TRUNCFW"}, // float32 -> int32
+ {name: "TRUNCDW", argLength: 1, reg: fp11, asm: "TRUNCDW"}, // float64 -> int32
+ {name: "TRUNCFV", argLength: 1, reg: fp11, asm: "TRUNCFV"}, // float32 -> int64
+ {name: "TRUNCDV", argLength: 1, reg: fp11, asm: "TRUNCDV"}, // float64 -> int64
+ {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64
+ {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
+
+ // function calls
+ {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R29"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
+
+ // duffzero
+ // arg0 = address of memory to zero
+ // arg1 = mem
+ // auxint = offset into duffzero code to start executing
+ // returns mem
+ // R20 aka loong64.REGRT1 changed as side effect
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20")},
+ clobbers: buildReg("R20 R1"),
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ },
+
+ // duffcopy
+ // arg0 = address of dst memory (in R21, changed as side effect)
+ // arg1 = address of src memory (in R20, changed as side effect)
+ // arg2 = mem
+ // auxint = offset into duffcopy code to start executing
+ // returns mem
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R21"), buildReg("R20")},
+ clobbers: buildReg("R20 R21 R1"),
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // large or unaligned zeroing
+ // arg0 = address of memory to zero (in R20, changed as side effect)
+ // arg1 = address of the last element to zero
+ // arg2 = mem
+ // auxint = alignment
+ // returns mem
+ // MOVx R0, (R20)
+ // ADDV $sz, R20
+ // BGEU Rarg1, R20, -2(PC)
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20"), gp},
+ clobbers: buildReg("R20"),
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ },
+
+ // large or unaligned move
+ // arg0 = address of dst memory (in R21, changed as side effect)
+ // arg1 = address of src memory (in R20, changed as side effect)
+ // arg2 = address of the last element of src
+ // arg3 = mem
+ // auxint = alignment
+ // returns mem
+ // MOVx (R20), Rtmp
+ // MOVx Rtmp, (R21)
+ // ADDV $sz, R20
+ // ADDV $sz, R21
+ // BGEU Rarg2, R20, -4(PC)
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R21"), buildReg("R20"), gp},
+ clobbers: buildReg("R20 R21"),
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // atomic loads.
+ // load from arg0. arg1=mem.
+ // returns <value,memory> so they can be properly ordered with other loads.
+ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true},
+
+ // atomic stores.
+ // store arg1 to arg0. arg2=mem. returns memory.
+ {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ // store zero to arg0. arg1=mem. returns memory.
+ {name: "LoweredAtomicStorezero32", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStorezero64", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic exchange.
+ // store arg1 to arg0. arg2=mem. returns <old content of *arg0, memory>.
+ // DBAR
+ // LL (Rarg0), Rout
+ // MOVV Rarg1, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // DBAR
+ {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic add.
+ // *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>.
+ // DBAR
+ // LL (Rarg0), Rout
+ // ADDV Rarg1, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // DBAR
+ // ADDV Rarg1, Rout
+ {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ // *arg0 += auxint. arg1=mem. returns <new content of *arg0, memory>. auxint is 32-bit.
+ {name: "LoweredAtomicAddconst32", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAddconst64", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int64", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // DBAR
+ // MOVV $0, Rout
+ // LL (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 4(PC)
+ // MOVV Rarg2, Rout
+ // SC Rout, (Rarg0)
+ // BEQ Rout, -4(PC)
+ // DBAR
+ {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // pseudo-ops
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
+
+ {name: "FPFlagTrue", argLength: 1, reg: readflags}, // bool, true if FP flag is true
+ {name: "FPFlagFalse", argLength: 1, reg: readflags}, // bool, true if FP flag is false
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R22 (loong64.REGCTXT, the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R29")}}, zeroWidth: true},
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem.
+ {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed
+ // It saves all GP registers if necessary,
+ // but clobbers R1 (LR) because it's a call
+ // and R30 (REGTMP).
+ // Returns a pointer to a write barrier buffer in R29.
+ {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R1"), outputs: []regMask{buildReg("R29")}}, clobberFlags: true, aux: "Int64"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ }
+
+ blocks := []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LTZ", controls: 1}, // < 0
+ {name: "LEZ", controls: 1}, // <= 0
+ {name: "GTZ", controls: 1}, // > 0
+ {name: "GEZ", controls: 1}, // >= 0
+ {name: "FPT", controls: 1}, // FP flag is true
+ {name: "FPF", controls: 1}, // FP flag is false
+ }
+
+ archs = append(archs, arch{
+ name: "LOONG64",
+ pkg: "cmd/internal/obj/loong64",
+ genfile: "../../loong64/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesLOONG64,
+ // TODO: support register ABI on loong64
+ ParamIntRegNames: "R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19",
+ ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15",
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R1"]),
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/MIPS.rules b/src/cmd/compile/internal/ssa/_gen/MIPS.rules
new file mode 100644
index 0000000..d6ae010
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/MIPS.rules
@@ -0,0 +1,716 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(Add(Ptr|32|16|8) ...) => (ADD ...)
+(Add(32|64)F ...) => (ADD(F|D) ...)
+
+(Select0 (Add32carry <t> x y)) => (ADD <t.FieldType(0)> x y)
+(Select1 (Add32carry <t> x y)) => (SGTU <typ.Bool> x (ADD <t.FieldType(0)> x y))
+(Add32withcarry <t> x y c) => (ADD c (ADD <t> x y))
+
+(Sub(Ptr|32|16|8) ...) => (SUB ...)
+(Sub(32|64)F ...) => (SUB(F|D) ...)
+
+(Select0 (Sub32carry <t> x y)) => (SUB <t.FieldType(0)> x y)
+(Select1 (Sub32carry <t> x y)) => (SGTU <typ.Bool> (SUB <t.FieldType(0)> x y) x)
+(Sub32withcarry <t> x y c) => (SUB (SUB <t> x y) c)
+
+(Mul(32|16|8) ...) => (MUL ...)
+(Mul(32|64)F ...) => (MUL(F|D) ...)
+
+(Hmul(32|32u) x y) => (Select0 (MUL(T|TU) x y))
+(Mul32uhilo ...) => (MULTU ...)
+
+(Div32 x y) => (Select1 (DIV x y))
+(Div32u x y) => (Select1 (DIVU x y))
+(Div16 x y) => (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y)))
+(Div16u x y) => (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Div8 x y) => (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y)))
+(Div8u x y) => (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Div(32|64)F ...) => (DIV(F|D) ...)
+
+(Mod32 x y) => (Select0 (DIV x y))
+(Mod32u x y) => (Select0 (DIVU x y))
+(Mod16 x y) => (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y)))
+(Mod16u x y) => (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Mod8 x y) => (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y)))
+(Mod8u x y) => (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+
+// math package intrinsics
+(Abs ...) => (ABSD ...)
+
+// (x + y) / 2 with x>=y becomes (x - y) / 2 + y
+(Avg32u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+
+(And(32|16|8) ...) => (AND ...)
+(Or(32|16|8) ...) => (OR ...)
+(Xor(32|16|8) ...) => (XOR ...)
+
+// constant shifts
+// generic opt rewrites all constant shifts to shift by Const64
+(Lsh32x64 x (Const64 [c])) && uint32(c) < 32 => (SLLconst x [int32(c)])
+(Rsh32x64 x (Const64 [c])) && uint32(c) < 32 => (SRAconst x [int32(c)])
+(Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 => (SRLconst x [int32(c)])
+(Lsh16x64 x (Const64 [c])) && uint32(c) < 16 => (SLLconst x [int32(c)])
+(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 => (SRLconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+(Lsh8x64 x (Const64 [c])) && uint32(c) < 8 => (SLLconst x [int32(c)])
+(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 => (SRLconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+
+// large constant shifts
+(Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 => (MOVWconst [0])
+(Rsh32Ux64 _ (Const64 [c])) && uint32(c) >= 32 => (MOVWconst [0])
+(Lsh16x64 _ (Const64 [c])) && uint32(c) >= 16 => (MOVWconst [0])
+(Rsh16Ux64 _ (Const64 [c])) && uint32(c) >= 16 => (MOVWconst [0])
+(Lsh8x64 _ (Const64 [c])) && uint32(c) >= 8 => (MOVWconst [0])
+(Rsh8Ux64 _ (Const64 [c])) && uint32(c) >= 8 => (MOVWconst [0])
+
+// large constant signed right shift, we leave the sign bit
+(Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 => (SRAconst x [31])
+(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
+(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
+
+// shifts
+// hardware instruction uses only the low 5 bits of the shift
+// we compare to 32 to ensure Go semantics for large shifts
+(Lsh32x32 <t> x y) => (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+(Lsh32x16 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Lsh32x8 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Lsh16x32 <t> x y) => (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+(Lsh16x16 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Lsh16x8 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Lsh8x32 <t> x y) => (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+(Lsh8x16 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Lsh8x8 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Rsh32Ux32 <t> x y) => (CMOVZ (SRL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+(Rsh32Ux16 <t> x y) => (CMOVZ (SRL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Rsh32Ux8 <t> x y) => (CMOVZ (SRL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Rsh16Ux32 <t> x y) => (CMOVZ (SRL <t> (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
+(Rsh16Ux16 <t> x y) => (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Rsh16Ux8 <t> x y) => (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Rsh8Ux32 <t> x y) => (CMOVZ (SRL <t> (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
+(Rsh8Ux16 <t> x y) => (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Rsh8Ux8 <t> x y) => (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Rsh32x32 x y) => (SRA x ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+(Rsh32x16 x y) => (SRA x ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh32x8 x y) => (SRA x ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+
+(Rsh16x32 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+(Rsh16x16 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh16x8 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+
+(Rsh8x32 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+(Rsh8x16 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh8x8 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+
+// rotates
+(RotateLeft8 <t> x (MOVWconst [c])) => (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
+(RotateLeft16 <t> x (MOVWconst [c])) => (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
+(RotateLeft32 <t> x (MOVWconst [c])) => (Or32 (Lsh32x32 <t> x (MOVWconst [c&31])) (Rsh32Ux32 <t> x (MOVWconst [-c&31])))
+(RotateLeft64 <t> x (MOVWconst [c])) => (Or64 (Lsh64x32 <t> x (MOVWconst [c&63])) (Rsh64Ux32 <t> x (MOVWconst [-c&63])))
+
+// unary ops
+(Neg(32|16|8) ...) => (NEG ...)
+(Neg(32|64)F ...) => (NEG(F|D) ...)
+
+(Com(32|16|8) x) => (NORconst [0] x)
+
+(Sqrt ...) => (SQRTD ...)
+(Sqrt32 ...) => (SQRTF ...)
+
+// TODO: optimize this case?
+(Ctz32NonZero ...) => (Ctz32 ...)
+
+// count trailing zero
+// 32 - CLZ(x&-x - 1)
+(Ctz32 <t> x) => (SUB (MOVWconst [32]) (CLZ <t> (SUBconst <t> [1] (AND <t> x (NEG <t> x)))))
+
+// bit length
+(BitLen32 <t> x) => (SUB (MOVWconst [32]) (CLZ <t> x))
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XORconst [1] (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XORconst [1] x)
+
+// constants
+(Const(32|16|8) [val]) => (MOVWconst [int32(val)])
+(Const(32|64)F ...) => (MOV(F|D)const ...)
+(ConstNil) => (MOVWconst [0])
+(ConstBool [t]) => (MOVWconst [b2i32(t)])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+
+(Signmask x) => (SRAconst x [31])
+(Zeromask x) => (NEG (SGTU x (MOVWconst [0])))
+(Slicemask <t> x) => (SRAconst (NEG <t> x) [31])
+
+// float-int conversion
+(Cvt32to(32|64)F ...) => (MOVW(F|D) ...)
+(Cvt(32|64)Fto32 ...) => (TRUNC(F|D)W ...)
+(Cvt32Fto64F ...) => (MOVFD ...)
+(Cvt64Fto32F ...) => (MOVDF ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round(32|64)F ...) => (Copy ...)
+
+// comparisons
+(Eq8 x y) => (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) => (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) => (SGTUconst [1] (XOR x y))
+(EqPtr x y) => (SGTUconst [1] (XOR x y))
+(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y))
+
+(Neq8 x y) => (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0]))
+(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0]))
+(Neq32 x y) => (SGTU (XOR x y) (MOVWconst [0]))
+(NeqPtr x y) => (SGTU (XOR x y) (MOVWconst [0]))
+(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y))
+
+(Less8 x y) => (SGT (SignExt8to32 y) (SignExt8to32 x))
+(Less16 x y) => (SGT (SignExt16to32 y) (SignExt16to32 x))
+(Less32 x y) => (SGT y x)
+(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
+
+(Less8U x y) => (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))
+(Less16U x y) => (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))
+(Less32U x y) => (SGTU y x)
+
+(Leq8 x y) => (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) => (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) => (XORconst [1] (SGT x y))
+(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
+
+(Leq8U x y) => (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) => (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) => (XORconst [1] (SGTU x y))
+
+(OffPtr [off] ptr:(SP)) => (MOVWaddr [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDconst [int32(off)] ptr)
+
+(Addr {sym} base) => (MOVWaddr {sym} base)
+(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVWaddr {sym} (SPanchored base mem))
+(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVWaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
+
+// stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVDstore ptr val mem)
+
+// float <=> int register moves, with no conversion.
+// These come up when compiling math.{Float32bits, Float32frombits}.
+(MOVWload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _)) => (MOVWfpgp val)
+(MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (MOVWgpfp val)
+
+// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
+(MOVWstore [off] {sym} ptr (MOVWfpgp val) mem) => (MOVFstore [off] {sym} ptr val mem)
+(MOVFstore [off] {sym} ptr (MOVWgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem)
+
+// zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVWconst [0]) mem)
+(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore ptr (MOVWconst [0]) mem)
+(Zero [2] ptr mem) =>
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem))
+(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore ptr (MOVWconst [0]) mem)
+(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] ptr (MOVWconst [0])
+ (MOVHstore [0] ptr (MOVWconst [0]) mem))
+(Zero [4] ptr mem) =>
+ (MOVBstore [3] ptr (MOVWconst [0])
+ (MOVBstore [2] ptr (MOVWconst [0])
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem))))
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVWconst [0])
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem)))
+(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] ptr (MOVWconst [0])
+ (MOVHstore [2] ptr (MOVWconst [0])
+ (MOVHstore [0] ptr (MOVWconst [0]) mem)))
+(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] ptr (MOVWconst [0])
+ (MOVWstore [0] ptr (MOVWconst [0]) mem))
+(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] ptr (MOVWconst [0])
+ (MOVWstore [4] ptr (MOVWconst [0])
+ (MOVWstore [0] ptr (MOVWconst [0]) mem)))
+(Zero [16] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [12] ptr (MOVWconst [0])
+ (MOVWstore [8] ptr (MOVWconst [0])
+ (MOVWstore [4] ptr (MOVWconst [0])
+ (MOVWstore [0] ptr (MOVWconst [0]) mem))))
+
+// large or unaligned zeroing uses a loop
+(Zero [s] {t} ptr mem)
+ && (s > 16 || t.Alignment()%4 != 0) =>
+ (LoweredZero [int32(t.Alignment())]
+ ptr
+ (ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))])
+ mem)
+
+// moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
+(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore dst (MOVHUload src mem) mem)
+(Move [2] dst src mem) =>
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem))
+(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] dst (MOVHUload [2] src mem)
+ (MOVHstore dst (MOVHUload src mem) mem))
+(Move [4] dst src mem) =>
+ (MOVBstore [3] dst (MOVBUload [3] src mem)
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem))))
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem)))
+(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] dst (MOVHload [6] src mem)
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))))
+(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem)))
+(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] dst (MOVWload [8] src mem)
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem)))
+(Move [16] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [12] dst (MOVWload [12] src mem)
+ (MOVWstore [8] dst (MOVWload [8] src mem)
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))))
+
+
+// large or unaligned move uses a loop
+(Move [s] {t} dst src mem)
+ && (s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0) =>
+ (LoweredMove [int32(t.Alignment())]
+ dst
+ src
+ (ADDconst <src.Type> src [int32(s-moveSize(t.Alignment(), config))])
+ mem)
+
+// calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// atomic intrinsics
+(AtomicLoad(8|32) ...) => (LoweredAtomicLoad(8|32) ...)
+(AtomicLoadPtr ...) => (LoweredAtomicLoad32 ...)
+
+(AtomicStore(8|32) ...) => (LoweredAtomicStore(8|32) ...)
+(AtomicStorePtrNoWB ...) => (LoweredAtomicStore32 ...)
+
+(AtomicExchange32 ...) => (LoweredAtomicExchange ...)
+(AtomicAdd32 ...) => (LoweredAtomicAdd ...)
+
+(AtomicCompareAndSwap32 ...) => (LoweredAtomicCas ...)
+
+// AtomicOr8(ptr,val) => LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8))
+(AtomicOr8 ptr val mem) && !config.BigEndian =>
+ (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
+ (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3] ptr))) mem)
+
+// AtomicAnd8(ptr,val) => LoweredAtomicAnd(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
+(AtomicAnd8 ptr val mem) && !config.BigEndian =>
+ (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
+ (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3] ptr)))
+ (NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
+ (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3] ptr))))) mem)
+
+// AtomicOr8(ptr,val) => LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
+(AtomicOr8 ptr val mem) && config.BigEndian =>
+ (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
+ (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3]
+ (XORconst <typ.UInt32> [3] ptr)))) mem)
+
+// AtomicAnd8(ptr,val) => LoweredAtomicAnd(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
+(AtomicAnd8 ptr val mem) && config.BigEndian =>
+ (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
+ (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3]
+ (XORconst <typ.UInt32> [3] ptr))))
+ (NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
+ (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3]
+ (XORconst <typ.UInt32> [3] ptr)))))) mem)
+
+(AtomicAnd32 ...) => (LoweredAtomicAnd ...)
+(AtomicOr32 ...) => (LoweredAtomicOr ...)
+
+
+// checks
+(NilCheck ...) => (LoweredNilCheck ...)
+(IsNonNil ptr) => (SGTU ptr (MOVWconst [0]))
+(IsInBounds idx len) => (SGTU len idx)
+(IsSliceInBounds idx len) => (XORconst [1] (SGTU idx len))
+
+// pseudo-ops
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+(If cond yes no) => (NE cond yes no)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem)
+
+// Optimizations
+
+// Absorb boolean tests into block
+(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
+(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
+(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTzero _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTUzero _)) yes no) => (EQ cmp yes no)
+(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTzero _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTUzero _)) yes no) => (NE cmp yes no)
+(NE (SGTUconst [1] x) yes no) => (EQ x yes no)
+(EQ (SGTUconst [1] x) yes no) => (NE x yes no)
+(NE (SGTUzero x) yes no) => (NE x yes no)
+(EQ (SGTUzero x) yes no) => (EQ x yes no)
+(NE (SGTconst [0] x) yes no) => (LTZ x yes no)
+(EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
+(NE (SGTzero x) yes no) => (GTZ x yes no)
+(EQ (SGTzero x) yes no) => (LEZ x yes no)
+
+// fold offset into address
+(ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) => (MOVWaddr [off1+off2] {sym} ptr)
+
+// fold address into load/store
+(MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBload [off1+off2] {sym} ptr mem)
+(MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBUload [off1+off2] {sym} ptr mem)
+(MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHload [off1+off2] {sym} ptr mem)
+(MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHUload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWload [off1+off2] {sym} ptr mem)
+(MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVFload [off1+off2] {sym} ptr mem)
+(MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVDload [off1+off2] {sym} ptr mem)
+
+(MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVFstore [off1+off2] {sym} ptr val mem)
+(MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVDstore [off1+off2] {sym} ptr val mem)
+
+(MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBstorezero [off1+off2] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHstorezero [off1+off2] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWstorezero [off1+off2] {sym} ptr mem)
+
+(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBreg x)
+(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBUreg x)
+(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHreg x)
+(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHUreg x)
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+
+// store zero
+(MOVBstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+
+// don't extend after proper load
+(MOVBreg x:(MOVBload _ _)) => (MOVWreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVWreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVWreg x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) => (MOVWreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVWreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVWreg x)
+
+// sign extended loads
+// Note: The combined instruction must end up in the same block
+// as the original load. If not, we end up making a value with
+// memory type live in two different blocks, which can lead to
+// multiple memory values alive simultaneously.
+// Make sure we don't combine these ops if the load has another use.
+// This prevents a single load from being split into multiple loads
+// which then might return different values. See test/atomicload.go.
+(MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <t> [off] {sym} ptr mem)
+(MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
+(MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload <t> [off] {sym} ptr mem)
+(MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
+
+// fold extensions and ANDs together
+(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&0xff] x)
+(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&0xffff] x)
+(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 => (ANDconst [c&0x7f] x)
+(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 => (ANDconst [c&0x7fff] x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVWnop doesn't emit instruction, only for ensuring the type.
+(MOVWreg x) && x.Uses == 1 => (MOVWnop x)
+
+// TODO: we should be able to get rid of MOVWnop all together.
+// But for now, this is enough to get rid of lots of them.
+(MOVWnop (MOVWconst [c])) => (MOVWconst [c])
+
+// fold constant into arithmetic ops
+(ADD x (MOVWconst <t> [c])) && !t.IsPtr() => (ADDconst [c] x)
+(SUB x (MOVWconst [c])) => (SUBconst [c] x)
+(AND x (MOVWconst [c])) => (ANDconst [c] x)
+(OR x (MOVWconst [c])) => (ORconst [c] x)
+(XOR x (MOVWconst [c])) => (XORconst [c] x)
+(NOR x (MOVWconst [c])) => (NORconst [c] x)
+
+(SLL x (MOVWconst [c])) => (SLLconst x [c&31])
+(SRL x (MOVWconst [c])) => (SRLconst x [c&31])
+(SRA x (MOVWconst [c])) => (SRAconst x [c&31])
+
+(SGT (MOVWconst [c]) x) => (SGTconst [c] x)
+(SGTU (MOVWconst [c]) x) => (SGTUconst [c] x)
+(SGT x (MOVWconst [0])) => (SGTzero x)
+(SGTU x (MOVWconst [0])) => (SGTUzero x)
+
+// mul with constant
+(Select1 (MULTU (MOVWconst [0]) _ )) => (MOVWconst [0])
+(Select0 (MULTU (MOVWconst [0]) _ )) => (MOVWconst [0])
+(Select1 (MULTU (MOVWconst [1]) x )) => x
+(Select0 (MULTU (MOVWconst [1]) _ )) => (MOVWconst [0])
+(Select1 (MULTU (MOVWconst [-1]) x )) => (NEG <x.Type> x)
+(Select0 (MULTU (MOVWconst [-1]) x )) => (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
+(Select1 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo64(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
+(Select0 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo64(int64(uint32(c))) => (SRLconst [int32(32-log2uint32(int64(c)))] x)
+
+(MUL (MOVWconst [0]) _ ) => (MOVWconst [0])
+(MUL (MOVWconst [1]) x ) => x
+(MUL (MOVWconst [-1]) x ) => (NEG x)
+(MUL (MOVWconst [c]) x ) && isPowerOfTwo64(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
+
+// generic simplifications
+(ADD x (NEG y)) => (SUB x y)
+(SUB x x) => (MOVWconst [0])
+(SUB (MOVWconst [0]) x) => (NEG x)
+(AND x x) => x
+(OR x x) => x
+(XOR x x) => (MOVWconst [0])
+
+// miscellaneous patterns generated by dec64
+(AND (SGTUconst [1] x) (SGTUconst [1] y)) => (SGTUconst [1] (OR <x.Type> x y))
+(OR (SGTUzero x) (SGTUzero y)) => (SGTUzero (OR <x.Type> x y))
+
+// remove redundant *const ops
+(ADDconst [0] x) => x
+(SUBconst [0] x) => x
+(ANDconst [0] _) => (MOVWconst [0])
+(ANDconst [-1] x) => x
+(ORconst [0] x) => x
+(ORconst [-1] _) => (MOVWconst [-1])
+(XORconst [0] x) => x
+(XORconst [-1] x) => (NORconst [0] x)
+
+// generic constant folding
+(ADDconst [c] (MOVWconst [d])) => (MOVWconst [int32(c+d)])
+(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
+(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
+(SUBconst [c] (MOVWconst [d])) => (MOVWconst [d-c])
+(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x)
+(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x)
+(SLLconst [c] (MOVWconst [d])) => (MOVWconst [d<<uint32(c)])
+(SRLconst [c] (MOVWconst [d])) => (MOVWconst [int32(uint32(d)>>uint32(c))])
+(SRAconst [c] (MOVWconst [d])) => (MOVWconst [d>>uint32(c)])
+(MUL (MOVWconst [c]) (MOVWconst [d])) => (MOVWconst [c*d])
+(Select1 (MULTU (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [int32(uint32(c)*uint32(d))])
+(Select0 (MULTU (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [int32((int64(uint32(c))*int64(uint32(d)))>>32)])
+(Select1 (DIV (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [c/d])
+(Select1 (DIVU (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)/uint32(d))])
+(Select0 (DIV (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [c%d])
+(Select0 (DIVU (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)%uint32(d))])
+(ANDconst [c] (MOVWconst [d])) => (MOVWconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ORconst [c] (MOVWconst [d])) => (MOVWconst [c|d])
+(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
+(XORconst [c] (MOVWconst [d])) => (MOVWconst [c^d])
+(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
+(NORconst [c] (MOVWconst [d])) => (MOVWconst [^(c|d)])
+(NEG (MOVWconst [c])) => (MOVWconst [-c])
+(MOVBreg (MOVWconst [c])) => (MOVWconst [int32(int8(c))])
+(MOVBUreg (MOVWconst [c])) => (MOVWconst [int32(uint8(c))])
+(MOVHreg (MOVWconst [c])) => (MOVWconst [int32(int16(c))])
+(MOVHUreg (MOVWconst [c])) => (MOVWconst [int32(uint16(c))])
+(MOVWreg (MOVWconst [c])) => (MOVWconst [c])
+
+// constant comparisons
+(SGTconst [c] (MOVWconst [d])) && c > d => (MOVWconst [1])
+(SGTconst [c] (MOVWconst [d])) && c <= d => (MOVWconst [0])
+(SGTUconst [c] (MOVWconst [d])) && uint32(c) > uint32(d) => (MOVWconst [1])
+(SGTUconst [c] (MOVWconst [d])) && uint32(c) <= uint32(d) => (MOVWconst [0])
+(SGTzero (MOVWconst [d])) && d > 0 => (MOVWconst [1])
+(SGTzero (MOVWconst [d])) && d <= 0 => (MOVWconst [0])
+(SGTUzero (MOVWconst [d])) && d != 0 => (MOVWconst [1])
+(SGTUzero (MOVWconst [d])) && d == 0 => (MOVWconst [0])
+
+// other known comparisons
+(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVWconst [1])
+(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVWconst [0])
+(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVWconst [1])
+(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVWconst [0])
+(SGTUconst [c] (MOVBUreg _)) && 0xff < uint32(c) => (MOVWconst [1])
+(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVWconst [1])
+(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVWconst [0])
+(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVWconst [1])
+(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVWconst [0])
+(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint32(c) => (MOVWconst [1])
+(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVWconst [1])
+(SGTUconst [c] (ANDconst [m] _)) && uint32(m) < uint32(c) => (MOVWconst [1])
+(SGTconst [c] (SRLconst _ [d])) && 0 <= c && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) => (MOVWconst [1])
+(SGTUconst [c] (SRLconst _ [d])) && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) => (MOVWconst [1])
+
+// absorb constants into branches
+(EQ (MOVWconst [0]) yes no) => (First yes no)
+(EQ (MOVWconst [c]) yes no) && c != 0 => (First no yes)
+(NE (MOVWconst [0]) yes no) => (First no yes)
+(NE (MOVWconst [c]) yes no) && c != 0 => (First yes no)
+(LTZ (MOVWconst [c]) yes no) && c < 0 => (First yes no)
+(LTZ (MOVWconst [c]) yes no) && c >= 0 => (First no yes)
+(LEZ (MOVWconst [c]) yes no) && c <= 0 => (First yes no)
+(LEZ (MOVWconst [c]) yes no) && c > 0 => (First no yes)
+(GTZ (MOVWconst [c]) yes no) && c > 0 => (First yes no)
+(GTZ (MOVWconst [c]) yes no) && c <= 0 => (First no yes)
+(GEZ (MOVWconst [c]) yes no) && c >= 0 => (First yes no)
+(GEZ (MOVWconst [c]) yes no) && c < 0 => (First no yes)
+
+// conditional move
+(CMOVZ _ f (MOVWconst [0])) => f
+(CMOVZ a _ (MOVWconst [c])) && c!=0 => a
+(CMOVZzero _ (MOVWconst [0])) => (MOVWconst [0])
+(CMOVZzero a (MOVWconst [c])) && c!=0 => a
+(CMOVZ a (MOVWconst [0]) c) => (CMOVZzero a c)
+
+// atomic
+(LoweredAtomicStore32 ptr (MOVWconst [0]) mem) => (LoweredAtomicStorezero ptr mem)
+(LoweredAtomicAdd ptr (MOVWconst [c]) mem) && is16Bit(int64(c)) => (LoweredAtomicAddconst [c] ptr mem)
+
diff --git a/src/cmd/compile/internal/ssa/_gen/MIPS64.rules b/src/cmd/compile/internal/ssa/_gen/MIPS64.rules
new file mode 100644
index 0000000..cabc7c6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/MIPS64.rules
@@ -0,0 +1,817 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(Add(Ptr|64|32|16|8) ...) => (ADDV ...)
+(Add(32|64)F ...) => (ADD(F|D) ...)
+
+(Sub(Ptr|64|32|16|8) ...) => (SUBV ...)
+(Sub(32|64)F ...) => (SUB(F|D) ...)
+
+(Mul(64|32|16|8) x y) => (Select1 (MULVU x y))
+(Mul(32|64)F ...) => (MUL(F|D) ...)
+(Mul64uhilo ...) => (MULVU ...)
+(Select0 (Mul64uover x y)) => (Select1 <typ.UInt64> (MULVU x y))
+(Select1 (Mul64uover x y)) => (SGTU <typ.Bool> (Select0 <typ.UInt64> (MULVU x y)) (MOVVconst <typ.UInt64> [0]))
+
+(Hmul64 x y) => (Select0 (MULV x y))
+(Hmul64u x y) => (Select0 (MULVU x y))
+(Hmul32 x y) => (SRAVconst (Select1 <typ.Int64> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
+(Hmul32u x y) => (SRLVconst (Select1 <typ.UInt64> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
+
+(Div64 x y) => (Select1 (DIVV x y))
+(Div64u x y) => (Select1 (DIVVU x y))
+(Div32 x y) => (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+(Div32u x y) => (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Div16 x y) => (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+(Div16u x y) => (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Div8 x y) => (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+(Div8u x y) => (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Div(32|64)F ...) => (DIV(F|D) ...)
+
+(Mod64 x y) => (Select0 (DIVV x y))
+(Mod64u x y) => (Select0 (DIVVU x y))
+(Mod32 x y) => (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+(Mod32u x y) => (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Mod16 x y) => (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+(Mod16u x y) => (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+(Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+
+(Select0 <t> (Add64carry x y c)) => (ADDV (ADDV <t> x y) c)
+(Select1 <t> (Add64carry x y c)) =>
+ (OR (SGTU <t> x s:(ADDV <t> x y)) (SGTU <t> s (ADDV <t> s c)))
+
+(Select0 <t> (Sub64borrow x y c)) => (SUBV (SUBV <t> x y) c)
+(Select1 <t> (Sub64borrow x y c)) =>
+ (OR (SGTU <t> s:(SUBV <t> x y) x) (SGTU <t> (SUBV <t> s c) s))
+
+// math package intrinsics
+(Abs ...) => (ABSD ...)
+
+// (x + y) / 2 with x>=y => (x - y) / 2 + y
+(Avg64u <t> x y) => (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
+
+(And(64|32|16|8) ...) => (AND ...)
+(Or(64|32|16|8) ...) => (OR ...)
+(Xor(64|32|16|8) ...) => (XOR ...)
+
+// shifts
+// hardware instruction uses only the low 6 bits of the shift
+// we compare to 64 to ensure Go semantics for large shifts
+(Lsh64x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh64x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh64x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh64x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh32x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh32x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh32x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh32x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh16x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh16x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh16x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh16x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh8x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh8x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh8x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh8x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Rsh64Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> x y))
+(Rsh64Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
+(Rsh64Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
+(Rsh64Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
+
+(Rsh32Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
+(Rsh32Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Rsh32Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
+(Rsh32Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
+
+(Rsh16Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
+(Rsh16Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
+(Rsh16Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Rsh16Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
+
+(Rsh8Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
+(Rsh8Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
+(Rsh8Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
+(Rsh8Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+
+(Rsh64x64 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh64x32 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh64x16 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh64x8 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh32x64 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh32x32 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh32x16 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh32x8 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh16x64 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh16x32 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh16x16 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh16x8 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh8x64 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh8x32 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh8x16 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh8x8 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+// rotates
+(RotateLeft8 <t> x (MOVVconst [c])) => (Or8 (Lsh8x64 <t> x (MOVVconst [c&7])) (Rsh8Ux64 <t> x (MOVVconst [-c&7])))
+(RotateLeft16 <t> x (MOVVconst [c])) => (Or16 (Lsh16x64 <t> x (MOVVconst [c&15])) (Rsh16Ux64 <t> x (MOVVconst [-c&15])))
+(RotateLeft32 <t> x (MOVVconst [c])) => (Or32 (Lsh32x64 <t> x (MOVVconst [c&31])) (Rsh32Ux64 <t> x (MOVVconst [-c&31])))
+(RotateLeft64 <t> x (MOVVconst [c])) => (Or64 (Lsh64x64 <t> x (MOVVconst [c&63])) (Rsh64Ux64 <t> x (MOVVconst [-c&63])))
+
+// unary ops
+(Neg(64|32|16|8) ...) => (NEGV ...)
+(Neg(32|64)F ...) => (NEG(F|D) ...)
+
+(Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x)
+
+(Sqrt ...) => (SQRTD ...)
+(Sqrt32 ...) => (SQRTF ...)
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XORconst [1] x)
+
+// constants
+(Const(64|32|16|8) [val]) => (MOVVconst [int64(val)])
+(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
+(ConstNil) => (MOVVconst [0])
+(ConstBool [t]) => (MOVVconst [int64(b2i(t))])
+
+(Slicemask <t> x) => (SRAVconst (NEGV <t> x) [63])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+(ZeroExt8to64 ...) => (MOVBUreg ...)
+(ZeroExt16to64 ...) => (MOVHUreg ...)
+(ZeroExt32to64 ...) => (MOVWUreg ...)
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+(SignExt8to64 ...) => (MOVBreg ...)
+(SignExt16to64 ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+// float <=> int conversion
+(Cvt32to32F ...) => (MOVWF ...)
+(Cvt32to64F ...) => (MOVWD ...)
+(Cvt64to32F ...) => (MOVVF ...)
+(Cvt64to64F ...) => (MOVVD ...)
+(Cvt32Fto32 ...) => (TRUNCFW ...)
+(Cvt64Fto32 ...) => (TRUNCDW ...)
+(Cvt32Fto64 ...) => (TRUNCFV ...)
+(Cvt64Fto64 ...) => (TRUNCDV ...)
+(Cvt32Fto64F ...) => (MOVFD ...)
+(Cvt64Fto32F ...) => (MOVDF ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round(32|64)F ...) => (Copy ...)
+
+// comparisons
+(Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y))
+(EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y))
+(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y))
+
+(Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
+(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
+(Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
+(Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0]))
+(NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0]))
+(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y))
+
+(Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x))
+(Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x))
+(Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x))
+(Less64 x y) => (SGT y x)
+(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
+
+(Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
+(Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
+(Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
+(Less64U x y) => (SGTU y x)
+
+(Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
+(Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
+(Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
+(Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y))
+(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
+
+(Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y))
+
+(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVVaddr [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDVconst [off] ptr)
+
+(Addr {sym} base) => (MOVVaddr {sym} base)
+(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVVaddr {sym} (SPanchored base mem))
+(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVVaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
+
+// stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVVstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVDstore ptr val mem)
+
+// zeroing
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem)
+(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore ptr (MOVVconst [0]) mem)
+(Zero [2] ptr mem) =>
+ (MOVBstore [1] ptr (MOVVconst [0])
+ (MOVBstore [0] ptr (MOVVconst [0]) mem))
+(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore ptr (MOVVconst [0]) mem)
+(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] ptr (MOVVconst [0])
+ (MOVHstore [0] ptr (MOVVconst [0]) mem))
+(Zero [4] ptr mem) =>
+ (MOVBstore [3] ptr (MOVVconst [0])
+ (MOVBstore [2] ptr (MOVVconst [0])
+ (MOVBstore [1] ptr (MOVVconst [0])
+ (MOVBstore [0] ptr (MOVVconst [0]) mem))))
+(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore ptr (MOVVconst [0]) mem)
+(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] ptr (MOVVconst [0])
+ (MOVWstore [0] ptr (MOVVconst [0]) mem))
+(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] ptr (MOVVconst [0])
+ (MOVHstore [4] ptr (MOVVconst [0])
+ (MOVHstore [2] ptr (MOVVconst [0])
+ (MOVHstore [0] ptr (MOVVconst [0]) mem))))
+
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVVconst [0])
+ (MOVBstore [1] ptr (MOVVconst [0])
+ (MOVBstore [0] ptr (MOVVconst [0]) mem)))
+(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] ptr (MOVVconst [0])
+ (MOVHstore [2] ptr (MOVVconst [0])
+ (MOVHstore [0] ptr (MOVVconst [0]) mem)))
+(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] ptr (MOVVconst [0])
+ (MOVWstore [4] ptr (MOVVconst [0])
+ (MOVWstore [0] ptr (MOVVconst [0]) mem)))
+(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore [8] ptr (MOVVconst [0])
+ (MOVVstore [0] ptr (MOVVconst [0]) mem))
+(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore [16] ptr (MOVVconst [0])
+ (MOVVstore [8] ptr (MOVVconst [0])
+ (MOVVstore [0] ptr (MOVVconst [0]) mem)))
+
+// medium zeroing uses a duff device
+// 8, and 128 are magic constants, see runtime/mkduff.go
+(Zero [s] {t} ptr mem)
+ && s%8 == 0 && s > 24 && s <= 8*128
+ && t.Alignment()%8 == 0 && !config.noDuffDevice =>
+ (DUFFZERO [8 * (128 - s/8)] ptr mem)
+
+// large or unaligned zeroing uses a loop
+(Zero [s] {t} ptr mem)
+ && (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 =>
+ (LoweredZero [t.Alignment()]
+ ptr
+ (ADDVconst <ptr.Type> ptr [s-moveSize(t.Alignment(), config)])
+ mem)
+
+// moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore dst (MOVHload src mem) mem)
+(Move [2] dst src mem) =>
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem))
+(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))
+(Move [4] dst src mem) =>
+ (MOVBstore [3] dst (MOVBload [3] src mem)
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem))))
+(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore dst (MOVVload src mem) mem)
+(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] dst (MOVHload [6] src mem)
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))))
+
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem)))
+(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem)))
+(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] dst (MOVWload [8] src mem)
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem)))
+(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore [8] dst (MOVVload [8] src mem)
+ (MOVVstore dst (MOVVload src mem) mem))
+(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore [16] dst (MOVVload [16] src mem)
+ (MOVVstore [8] dst (MOVVload [8] src mem)
+ (MOVVstore dst (MOVVload src mem) mem)))
+
+// float <=> int register moves, with no conversion.
+// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
+(MOVVload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) => (MOVVfpgp val)
+(MOVDload [off] {sym} ptr (MOVVstore [off] {sym} ptr val _)) => (MOVVgpfp val)
+(MOVWUload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _)) => (ZeroExt32to64 (MOVWfpgp <typ.Float32> val))
+(MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (MOVWgpfp val)
+
+// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
+(MOVVstore [off] {sym} ptr (MOVVfpgp val) mem) => (MOVDstore [off] {sym} ptr val mem)
+(MOVDstore [off] {sym} ptr (MOVVgpfp val) mem) => (MOVVstore [off] {sym} ptr val mem)
+(MOVWstore [off] {sym} ptr (MOVWfpgp val) mem) => (MOVFstore [off] {sym} ptr val mem)
+(MOVFstore [off] {sym} ptr (MOVWgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem)
+
+// medium move uses a duff device
+(Move [s] {t} dst src mem)
+ && s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [16 * (128 - s/8)] dst src mem)
+// 16 and 128 are magic constants. 16 is the number of bytes to encode:
+// MOVV (R1), R23
+// ADDV $8, R1
+// MOVV R23, (R2)
+// ADDV $8, R2
+// and 128 is the number of such blocks. See runtime/duff_mips64.s:duffcopy.
+
+// large or unaligned move uses a loop
+(Move [s] {t} dst src mem)
+ && s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 =>
+ (LoweredMove [t.Alignment()]
+ dst
+ src
+ (ADDVconst <src.Type> src [s-moveSize(t.Alignment(), config)])
+ mem)
+
+// calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// atomic intrinsics
+(AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...)
+(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
+
+(AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...)
+(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
+
+(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
+
+(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
+
+(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
+(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
+
+// AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3,uint32(val) << ((ptr & 3) * 8))
+(AtomicOr8 ptr val mem) && !config.BigEndian =>
+ (LoweredAtomicOr32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr)
+ (SLLV <typ.UInt32> (ZeroExt8to32 val)
+ (SLLVconst <typ.UInt64> [3]
+ (ANDconst <typ.UInt64> [3] ptr))) mem)
+
+// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
+(AtomicAnd8 ptr val mem) && !config.BigEndian =>
+ (LoweredAtomicAnd32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr)
+ (OR <typ.UInt64> (SLLV <typ.UInt32> (ZeroExt8to32 val)
+ (SLLVconst <typ.UInt64> [3]
+ (ANDconst <typ.UInt64> [3] ptr)))
+ (NORconst [0] <typ.UInt64> (SLLV <typ.UInt64>
+ (MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3]
+ (ANDconst <typ.UInt64> [3] ptr))))) mem)
+
+// AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
+(AtomicOr8 ptr val mem) && config.BigEndian =>
+ (LoweredAtomicOr32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr)
+ (SLLV <typ.UInt32> (ZeroExt8to32 val)
+ (SLLVconst <typ.UInt64> [3]
+ (ANDconst <typ.UInt64> [3]
+ (XORconst <typ.UInt64> [3] ptr)))) mem)
+
+// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
+(AtomicAnd8 ptr val mem) && config.BigEndian =>
+ (LoweredAtomicAnd32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr)
+ (OR <typ.UInt64> (SLLV <typ.UInt32> (ZeroExt8to32 val)
+ (SLLVconst <typ.UInt64> [3]
+ (ANDconst <typ.UInt64> [3]
+ (XORconst <typ.UInt64> [3] ptr))))
+ (NORconst [0] <typ.UInt64> (SLLV <typ.UInt64>
+ (MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3]
+ (ANDconst <typ.UInt64> [3]
+ (XORconst <typ.UInt64> [3] ptr)))))) mem)
+
+(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
+(AtomicOr32 ...) => (LoweredAtomicOr32 ...)
+
+// checks
+(NilCheck ...) => (LoweredNilCheck ...)
+(IsNonNil ptr) => (SGTU ptr (MOVVconst [0]))
+(IsInBounds idx len) => (SGTU len idx)
+(IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len))
+
+// pseudo-ops
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+(If cond yes no) => (NE cond yes no)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// Optimizations
+
+// Absorb boolean tests into block
+(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
+(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
+(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no)
+(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no)
+(NE (SGTUconst [1] x) yes no) => (EQ x yes no)
+(EQ (SGTUconst [1] x) yes no) => (NE x yes no)
+(NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no)
+(EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no)
+(NE (SGTconst [0] x) yes no) => (LTZ x yes no)
+(EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
+(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
+(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
+
+// fold offset into address
+(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
+
+// fold address into load/store
+(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBload [off1+int32(off2)] {sym} ptr mem)
+(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBUload [off1+int32(off2)] {sym} ptr mem)
+(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHload [off1+int32(off2)] {sym} ptr mem)
+(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHUload [off1+int32(off2)] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWload [off1+int32(off2)] {sym} ptr mem)
+(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWUload [off1+int32(off2)] {sym} ptr mem)
+(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVload [off1+int32(off2)] {sym} ptr mem)
+(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVFload [off1+int32(off2)] {sym} ptr mem)
+(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVDload [off1+int32(off2)] {sym} ptr mem)
+
+(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
+
+(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+
+// store zero
+(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+(MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVVstorezero [off] {sym} ptr mem)
+
+// don't extend after proper load
+(MOVBreg x:(MOVBload _ _)) => (MOVVreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVVreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVVreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVBload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVHload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVHUload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVWload _ _)) => (MOVVreg x)
+(MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x)
+(MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) => (MOVVreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVVreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVVreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVBreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVHreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVWreg _)) => (MOVVreg x)
+(MOVWUreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVWUreg x:(MOVHUreg _)) => (MOVVreg x)
+(MOVWUreg x:(MOVWUreg _)) => (MOVVreg x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVVnop doesn't emit instruction, only for ensuring the type.
+(MOVVreg x) && x.Uses == 1 => (MOVVnop x)
+
+// TODO: we should be able to get rid of MOVVnop all together.
+// But for now, this is enough to get rid of lots of them.
+(MOVVnop (MOVVconst [c])) => (MOVVconst [c])
+
+// fold constant into arithmetic ops
+(ADDV x (MOVVconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDVconst [c] x)
+(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x)
+(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x)
+(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x)
+(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x)
+(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x)
+
+(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
+(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
+(SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63])
+(SLLV x (MOVVconst [c])) => (SLLVconst x [c])
+(SRLV x (MOVVconst [c])) => (SRLVconst x [c])
+(SRAV x (MOVVconst [c])) => (SRAVconst x [c])
+
+(SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x)
+(SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x)
+
+// mul by constant
+(Select1 (MULVU x (MOVVconst [-1]))) => (NEGV x)
+(Select1 (MULVU _ (MOVVconst [0]))) => (MOVVconst [0])
+(Select1 (MULVU x (MOVVconst [1]))) => x
+(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SLLVconst [log64(c)] x)
+
+// div by constant
+(Select1 (DIVVU x (MOVVconst [1]))) => x
+(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SRLVconst [log64(c)] x)
+(Select0 (DIVVU _ (MOVVconst [1]))) => (MOVVconst [0]) // mod
+(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (ANDconst [c-1] x) // mod
+
+// generic simplifications
+(ADDV x (NEGV y)) => (SUBV x y)
+(SUBV x x) => (MOVVconst [0])
+(SUBV (MOVVconst [0]) x) => (NEGV x)
+(AND x x) => x
+(OR x x) => x
+(XOR x x) => (MOVVconst [0])
+
+// remove redundant *const ops
+(ADDVconst [0] x) => x
+(SUBVconst [0] x) => x
+(ANDconst [0] _) => (MOVVconst [0])
+(ANDconst [-1] x) => x
+(ORconst [0] x) => x
+(ORconst [-1] _) => (MOVVconst [-1])
+(XORconst [0] x) => x
+(XORconst [-1] x) => (NORconst [0] x)
+
+// generic constant folding
+(ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d])
+(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x)
+(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x)
+(SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c])
+(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x)
+(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x)
+(SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d<<uint64(c)])
+(SRLVconst [c] (MOVVconst [d])) => (MOVVconst [int64(uint64(d)>>uint64(c))])
+(SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)])
+(Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c*d])
+(Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [c/d])
+(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [int64(uint64(c)/uint64(d))])
+(Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [c%d]) // mod
+(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod
+(ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d])
+(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x)
+(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d])
+(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x)
+(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)])
+(NEGV (MOVVconst [c])) => (MOVVconst [-c])
+(MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))])
+(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))])
+(MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))])
+(MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))])
+(MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))])
+(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))])
+(MOVVreg (MOVVconst [c])) => (MOVVconst [c])
+(LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem)
+(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem)
+(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem)
+
+// constant comparisons
+(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1])
+(SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0])
+(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1])
+(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0])
+
+// other known comparisons
+(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1])
+(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0])
+(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1])
+(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0])
+(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1])
+(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1])
+(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0])
+(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1])
+(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0])
+(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1])
+(SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0])
+(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1])
+(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1])
+(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
+(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
+
+// absorb constants into branches
+(EQ (MOVVconst [0]) yes no) => (First yes no)
+(EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes)
+(NE (MOVVconst [0]) yes no) => (First no yes)
+(NE (MOVVconst [c]) yes no) && c != 0 => (First yes no)
+(LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no)
+(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes)
+(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no)
+(LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes)
+(GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no)
+(GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes)
+(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no)
+(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes)
+
+// SGT/SGTU with known outcomes.
+(SGT x x) => (MOVVconst [0])
+(SGTU x x) => (MOVVconst [0])
+
+// fold readonly sym load
+(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read8(sym, int64(off)))])
+(MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVVload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
diff --git a/src/cmd/compile/internal/ssa/_gen/MIPS64Ops.go b/src/cmd/compile/internal/ssa/_gen/MIPS64Ops.go
new file mode 100644
index 0000000..08cab89
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/MIPS64Ops.go
@@ -0,0 +1,501 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - *const instructions may use a constant larger than the instruction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R23).
+
+// Suffixes encode the bit width of various instructions.
+// V (vlong) = 64 bit
+// WU (word) = 32 bit unsigned
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// HU = 16 bit unsigned
+// B (byte) = 8 bit
+// BU = 8 bit unsigned
+// F (float) = 32 bit float
+// D (double) = 64 bit float
+
+// Note: registers not used in regalloc are not included in this list,
+// so that regmask stays within int64
+// Be careful when hand coding regmasks.
+var regNamesMIPS64 = []string{
+ "R0", // constant 0
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+ "R16",
+ "R17",
+ "R18",
+ "R19",
+ "R20",
+ "R21",
+ "R22",
+ // R23 = REGTMP not used in regalloc
+ "R24",
+ "R25",
+ // R26 reserved by kernel
+ // R27 reserved by kernel
+ // R28 = REGSB not used in regalloc
+ "SP", // aka R29
+ "g", // aka R30
+ "R31", // aka REGLINK
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+ "F16",
+ "F17",
+ "F18",
+ "F19",
+ "F20",
+ "F21",
+ "F22",
+ "F23",
+ "F24",
+ "F25",
+ "F26",
+ "F27",
+ "F28",
+ "F29",
+ "F30",
+ "F31",
+
+ "HI", // high bits of multiplication
+ "LO", // low bits of multiplication
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesMIPS64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesMIPS64 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ gp = buildReg("R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31")
+ gpg = gp | buildReg("g")
+ gpsp = gp | buildReg("SP")
+ gpspg = gpg | buildReg("SP")
+ gpspsbg = gpspg | buildReg("SB")
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31")
+ lo = buildReg("LO")
+ hi = buildReg("HI")
+ callerSave = gp | fp | lo | hi | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r1 = buildReg("R1")
+ r2 = buildReg("R2")
+ r3 = buildReg("R3")
+ r4 = buildReg("R4")
+ )
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ gp2hilo = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{hi, lo}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
+ gpstore0 = regInfo{inputs: []regMask{gpspsbg}}
+ gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ //fp1flags = regInfo{inputs: []regMask{fp}}
+ fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
+ gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
+ readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
+ )
+ ops := []opData{
+ // binary ops
+ {name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true}, // arg0 + arg1
+ {name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt. auxInt is 32-bit, also in other *const ops.
+ {name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"}, // arg0 - arg1
+ {name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"}, // arg0 - auxInt
+ {name: "MULV", argLength: 2, reg: gp2hilo, asm: "MULV", commutative: true, typ: "(Int64,Int64)"}, // arg0 * arg1, signed, results hi,lo
+ {name: "MULVU", argLength: 2, reg: gp2hilo, asm: "MULVU", commutative: true, typ: "(UInt64,UInt64)"}, // arg0 * arg1, unsigned, results hi,lo
+ {name: "DIVV", argLength: 2, reg: gp2hilo, asm: "DIVV", typ: "(Int64,Int64)"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
+ {name: "DIVVU", argLength: 2, reg: gp2hilo, asm: "DIVVU", typ: "(UInt64,UInt64)"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
+
+ {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1
+ {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1
+ {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1
+ {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1
+ {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1
+ {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1
+ {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1
+ {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64"}, // arg0 & auxInt
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0 | auxInt
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt64"}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", typ: "UInt64"}, // arg0 ^ auxInt
+ {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1)
+ {name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int64"}, // ^(arg0 | auxInt)
+
+ {name: "NEGV", argLength: 1, reg: gp11}, // -arg0
+ {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32
+ {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64
+ {name: "ABSD", argLength: 1, reg: fp11, asm: "ABSD"}, // abs(arg0), float64
+ {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64
+ {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32
+
+ // shifts
+ {name: "SLLV", argLength: 2, reg: gp21, asm: "SLLV"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SLLVconst", argLength: 1, reg: gp11, asm: "SLLV", aux: "Int64"}, // arg0 << auxInt
+ {name: "SRLV", argLength: 2, reg: gp21, asm: "SRLV"}, // arg0 >> arg1, unsigned, shift amount is mod 64
+ {name: "SRLVconst", argLength: 1, reg: gp11, asm: "SRLV", aux: "Int64"}, // arg0 >> auxInt, unsigned
+ {name: "SRAV", argLength: 2, reg: gp21, asm: "SRAV"}, // arg0 >> arg1, signed, shift amount is mod 64
+ {name: "SRAVconst", argLength: 1, reg: gp11, asm: "SRAV", aux: "Int64"}, // arg0 >> auxInt, signed
+
+ // comparisons
+ {name: "SGT", argLength: 2, reg: gp21, asm: "SGT", typ: "Bool"}, // 1 if arg0 > arg1 (signed), 0 otherwise
+ {name: "SGTconst", argLength: 1, reg: gp11, asm: "SGT", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (signed), 0 otherwise
+ {name: "SGTU", argLength: 2, reg: gp21, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > arg1 (unsigned), 0 otherwise
+ {name: "SGTUconst", argLength: 1, reg: gp11, asm: "SGTU", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (unsigned), 0 otherwise
+
+ {name: "CMPEQF", argLength: 2, reg: fp2flags, asm: "CMPEQF", typ: "Flags"}, // flags=true if arg0 = arg1, float32
+ {name: "CMPEQD", argLength: 2, reg: fp2flags, asm: "CMPEQD", typ: "Flags"}, // flags=true if arg0 = arg1, float64
+ {name: "CMPGEF", argLength: 2, reg: fp2flags, asm: "CMPGEF", typ: "Flags"}, // flags=true if arg0 >= arg1, float32
+ {name: "CMPGED", argLength: 2, reg: fp2flags, asm: "CMPGED", typ: "Flags"}, // flags=true if arg0 >= arg1, float64
+ {name: "CMPGTF", argLength: 2, reg: fp2flags, asm: "CMPGTF", typ: "Flags"}, // flags=true if arg0 > arg1, float32
+ {name: "CMPGTD", argLength: 2, reg: fp2flags, asm: "CMPGTD", typ: "Flags"}, // flags=true if arg0 > arg1, float64
+
+ // moves
+ {name: "MOVVconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVV", typ: "UInt64", rematerializeable: true}, // auxint
+ {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
+ {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
+
+ {name: "MOVVaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVV", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVVload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVV", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVVstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+
+ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVVstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. ar12=mem.
+
+ // moves (no conversion)
+ {name: "MOVWfpgp", argLength: 1, reg: fpgp, asm: "MOVW"}, // move float32 to int32 (no conversion). MIPS64 will perform sign-extend to 64-bit by default
+ {name: "MOVWgpfp", argLength: 1, reg: gpfp, asm: "MOVW"}, // move int32 to float32 (no conversion). MIPS64 will perform sign-extend to 64-bit by default
+ {name: "MOVVfpgp", argLength: 1, reg: fpgp, asm: "MOVV"}, // move float64 to int64 (no conversion).
+ {name: "MOVVgpfp", argLength: 1, reg: gpfp, asm: "MOVV"}, // move int64 to float64 (no conversion).
+
+ // conversions
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word
+ {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word
+ {name: "MOVVreg", argLength: 1, reg: gp11, asm: "MOVV"}, // move from arg0
+
+ {name: "MOVVnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ {name: "MOVWF", argLength: 1, reg: fp11, asm: "MOVWF"}, // int32 -> float32
+ {name: "MOVWD", argLength: 1, reg: fp11, asm: "MOVWD"}, // int32 -> float64
+ {name: "MOVVF", argLength: 1, reg: fp11, asm: "MOVVF"}, // int64 -> float32
+ {name: "MOVVD", argLength: 1, reg: fp11, asm: "MOVVD"}, // int64 -> float64
+ {name: "TRUNCFW", argLength: 1, reg: fp11, asm: "TRUNCFW"}, // float32 -> int32
+ {name: "TRUNCDW", argLength: 1, reg: fp11, asm: "TRUNCDW"}, // float64 -> int32
+ {name: "TRUNCFV", argLength: 1, reg: fp11, asm: "TRUNCFV"}, // float32 -> int64
+ {name: "TRUNCDV", argLength: 1, reg: fp11, asm: "TRUNCDV"}, // float64 -> int64
+ {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64
+ {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
+
+ // function calls
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // duffzero
+ // arg0 = address of memory to zero
+ // arg1 = mem
+ // auxint = offset into duffzero code to start executing
+ // returns mem
+ // R1 aka mips.REGRT1 changed as side effect
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{gp},
+ clobbers: buildReg("R1 R31"),
+ },
+ faultOnNilArg0: true,
+ },
+
+ // duffcopy
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = mem
+ // auxint = offset into duffcopy code to start executing
+ // returns mem
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1")},
+ clobbers: buildReg("R1 R2 R31"),
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // large or unaligned zeroing
+ // arg0 = address of memory to zero (in R1, changed as side effect)
+ // arg1 = address of the last element to zero
+ // arg2 = mem
+ // auxint = alignment
+ // returns mem
+ // SUBV $8, R1
+ // MOVV R0, 8(R1)
+ // ADDV $8, R1
+ // BNE Rarg1, R1, -2(PC)
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), gp},
+ clobbers: buildReg("R1"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ },
+
+ // large or unaligned move
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = address of the last element of src
+ // arg3 = mem
+ // auxint = alignment
+ // returns mem
+ // SUBV $8, R1
+ // MOVV 8(R1), Rtmp
+ // MOVV Rtmp, (R2)
+ // ADDV $8, R1
+ // ADDV $8, R2
+ // BNE Rarg2, R1, -4(PC)
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1"), gp},
+ clobbers: buildReg("R1 R2"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // atomic and/or.
+ // *arg0 &= (|=) arg1. arg2=mem. returns memory.
+ // SYNC
+ // LL (Rarg0), Rtmp
+ // AND Rarg1, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ {name: "LoweredAtomicAnd32", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr32", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic loads.
+ // load from arg0. arg1=mem.
+ // returns <value,memory> so they can be properly ordered with other loads.
+ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true},
+
+ // atomic stores.
+ // store arg1 to arg0. arg2=mem. returns memory.
+ {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ // store zero to arg0. arg1=mem. returns memory.
+ {name: "LoweredAtomicStorezero32", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStorezero64", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic exchange.
+ // store arg1 to arg0. arg2=mem. returns <old content of *arg0, memory>.
+ // SYNC
+ // LL (Rarg0), Rout
+ // MOVV Rarg1, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic add.
+ // *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>.
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDV Rarg1, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDV Rarg1, Rout
+ {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ // *arg0 += auxint. arg1=mem. returns <new content of *arg0, memory>. auxint is 32-bit.
+ {name: "LoweredAtomicAddconst32", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAddconst64", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int64", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // SYNC
+ // MOVV $0, Rout
+ // LL (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 4(PC)
+ // MOVV Rarg2, Rout
+ // SC Rout, (Rarg0)
+ // BEQ Rout, -4(PC)
+ // SYNC
+ {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // pseudo-ops
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
+
+ {name: "FPFlagTrue", argLength: 1, reg: readflags}, // bool, true if FP flag is true
+ {name: "FPFlagFalse", argLength: 1, reg: readflags}, // bool, true if FP flag is false
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R22 (mips.REGCTXT, the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R22")}}, zeroWidth: true},
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem.
+ {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed
+ // It saves all GP registers if necessary,
+ // but clobbers R31 (LR) because it's a call
+ // and R23 (REGTMP).
+ // Returns a pointer to a write barrier buffer in R25.
+ {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R31"), outputs: []regMask{buildReg("R25")}}, clobberFlags: true, aux: "Int64"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ }
+
+ blocks := []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LTZ", controls: 1}, // < 0
+ {name: "LEZ", controls: 1}, // <= 0
+ {name: "GTZ", controls: 1}, // > 0
+ {name: "GEZ", controls: 1}, // >= 0
+ {name: "FPT", controls: 1}, // FP flag is true
+ {name: "FPF", controls: 1}, // FP flag is false
+ }
+
+ archs = append(archs, arch{
+ name: "MIPS64",
+ pkg: "cmd/internal/obj/mips",
+ genfile: "../../mips64/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesMIPS64,
+ gpregmask: gp,
+ fpregmask: fp,
+ specialregmask: hi | lo,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R31"]),
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/MIPSOps.go b/src/cmd/compile/internal/ssa/_gen/MIPSOps.go
new file mode 100644
index 0000000..5964bb7
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/MIPSOps.go
@@ -0,0 +1,447 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - Unused portions of AuxInt are filled by sign-extending the used portion.
+// - *const instructions may use a constant larger than the instruction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R23).
+
+// Suffixes encode the bit width of various instructions.
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// HU = 16 bit unsigned
+// B (byte) = 8 bit
+// BU = 8 bit unsigned
+// F (float) = 32 bit float
+// D (double) = 64 bit float
+
+// Note: registers not used in regalloc are not included in this list,
+// so that regmask stays within int64
+// Be careful when hand coding regmasks.
+var regNamesMIPS = []string{
+ "R0", // constant 0
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+ "R16",
+ "R17",
+ "R18",
+ "R19",
+ "R20",
+ "R21",
+ "R22",
+ //REGTMP
+ "R24",
+ "R25",
+ // R26 reserved by kernel
+ // R27 reserved by kernel
+ "R28",
+ "SP", // aka R29
+ "g", // aka R30
+ "R31", // REGLINK
+
+ // odd FP registers contain high parts of 64-bit FP values
+ "F0",
+ "F2",
+ "F4",
+ "F6",
+ "F8",
+ "F10",
+ "F12",
+ "F14",
+ "F16",
+ "F18",
+ "F20",
+ "F22",
+ "F24",
+ "F26",
+ "F28",
+ "F30",
+
+ "HI", // high bits of multiplication
+ "LO", // low bits of multiplication
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesMIPS) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesMIPS {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ gp = buildReg("R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31")
+ gpg = gp | buildReg("g")
+ gpsp = gp | buildReg("SP")
+ gpspg = gpg | buildReg("SP")
+ gpspsbg = gpspg | buildReg("SB")
+ fp = buildReg("F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30")
+ lo = buildReg("LO")
+ hi = buildReg("HI")
+ callerSave = gp | fp | lo | hi | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r1 = buildReg("R1")
+ r2 = buildReg("R2")
+ r3 = buildReg("R3")
+ r4 = buildReg("R4")
+ r5 = buildReg("R5")
+ )
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ gp31 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ gp2hilo = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{hi, lo}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
+ gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}}
+ gpstore0 = regInfo{inputs: []regMask{gpspsbg}}
+ fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
+ gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
+ readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
+ )
+ ops := []opData{
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADDU", commutative: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADDU", aux: "Int32"}, // arg0 + auxInt
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUBU"}, // arg0 - arg1
+ {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUBU", aux: "Int32"}, // arg0 - auxInt
+ {name: "MUL", argLength: 2, reg: regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}, clobbers: hi | lo}, asm: "MUL", commutative: true}, // arg0 * arg1
+ {name: "MULT", argLength: 2, reg: gp2hilo, asm: "MUL", commutative: true, typ: "(Int32,Int32)"}, // arg0 * arg1, signed, results hi,lo
+ {name: "MULTU", argLength: 2, reg: gp2hilo, asm: "MULU", commutative: true, typ: "(UInt32,UInt32)"}, // arg0 * arg1, unsigned, results hi,lo
+ {name: "DIV", argLength: 2, reg: gp2hilo, asm: "DIV", typ: "(Int32,Int32)"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
+ {name: "DIVU", argLength: 2, reg: gp2hilo, asm: "DIVU", typ: "(UInt32,UInt32)"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
+
+ {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1
+ {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1
+ {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1
+ {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1
+ {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1
+ {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1
+ {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1
+ {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int32"}, // arg0 & auxInt
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int32"}, // arg0 | auxInt
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt32"}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int32", typ: "UInt32"}, // arg0 ^ auxInt
+ {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1)
+ {name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int32"}, // ^(arg0 | auxInt)
+
+ {name: "NEG", argLength: 1, reg: gp11}, // -arg0
+ {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32
+ {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64
+ {name: "ABSD", argLength: 1, reg: fp11, asm: "ABSD"}, // abs(arg0), float64
+ {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64
+ {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32
+
+ // shifts
+ {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << arg1, shift amount is mod 32
+ {name: "SLLconst", argLength: 1, reg: gp11, asm: "SLL", aux: "Int32"}, // arg0 << auxInt, shift amount must be 0 through 31 inclusive
+ {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> arg1, unsigned, shift amount is mod 32
+ {name: "SRLconst", argLength: 1, reg: gp11, asm: "SRL", aux: "Int32"}, // arg0 >> auxInt, shift amount must be 0 through 31 inclusive
+ {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> arg1, signed, shift amount is mod 32
+ {name: "SRAconst", argLength: 1, reg: gp11, asm: "SRA", aux: "Int32"}, // arg0 >> auxInt, signed, shift amount must be 0 through 31 inclusive
+
+ {name: "CLZ", argLength: 1, reg: gp11, asm: "CLZ"},
+
+ // comparisons
+ {name: "SGT", argLength: 2, reg: gp21, asm: "SGT", typ: "Bool"}, // 1 if arg0 > arg1 (signed), 0 otherwise
+ {name: "SGTconst", argLength: 1, reg: gp11, asm: "SGT", aux: "Int32", typ: "Bool"}, // 1 if auxInt > arg0 (signed), 0 otherwise
+ {name: "SGTzero", argLength: 1, reg: gp11, asm: "SGT", typ: "Bool"}, // 1 if arg0 > 0 (signed), 0 otherwise
+ {name: "SGTU", argLength: 2, reg: gp21, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > arg1 (unsigned), 0 otherwise
+ {name: "SGTUconst", argLength: 1, reg: gp11, asm: "SGTU", aux: "Int32", typ: "Bool"}, // 1 if auxInt > arg0 (unsigned), 0 otherwise
+ {name: "SGTUzero", argLength: 1, reg: gp11, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > 0 (unsigned), 0 otherwise
+
+ {name: "CMPEQF", argLength: 2, reg: fp2flags, asm: "CMPEQF", typ: "Flags"}, // flags=true if arg0 = arg1, float32
+ {name: "CMPEQD", argLength: 2, reg: fp2flags, asm: "CMPEQD", typ: "Flags"}, // flags=true if arg0 = arg1, float64
+ {name: "CMPGEF", argLength: 2, reg: fp2flags, asm: "CMPGEF", typ: "Flags"}, // flags=true if arg0 >= arg1, float32
+ {name: "CMPGED", argLength: 2, reg: fp2flags, asm: "CMPGED", typ: "Flags"}, // flags=true if arg0 >= arg1, float64
+ {name: "CMPGTF", argLength: 2, reg: fp2flags, asm: "CMPGTF", typ: "Flags"}, // flags=true if arg0 > arg1, float32
+ {name: "CMPGTD", argLength: 2, reg: fp2flags, asm: "CMPGTD", typ: "Flags"}, // flags=true if arg0 > arg1, float64
+
+ // moves
+ {name: "MOVWconst", argLength: 0, reg: gp01, aux: "Int32", asm: "MOVW", typ: "UInt32", rematerializeable: true}, // auxint
+ {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float32", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
+ {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
+
+ {name: "MOVWaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVW", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+
+ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+
+ // moves (no conversion)
+ {name: "MOVWfpgp", argLength: 1, reg: fpgp, asm: "MOVW"}, // move float32 to int32 (no conversion)
+ {name: "MOVWgpfp", argLength: 1, reg: gpfp, asm: "MOVW"}, // move int32 to float32 (no conversion)
+
+ // conversions
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0
+
+ {name: "MOVWnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ // conditional move on zero (returns arg1 if arg2 is 0, otherwise arg0)
+ // order of parameters is reversed so we can use resultInArg0 (OpCMOVZ result arg1 arg2-> CMOVZ arg2reg, arg1reg, resultReg)
+ {name: "CMOVZ", argLength: 3, reg: gp31, asm: "CMOVZ", resultInArg0: true},
+ {name: "CMOVZzero", argLength: 2, reg: regInfo{inputs: []regMask{gp, gpg}, outputs: []regMask{gp}}, asm: "CMOVZ", resultInArg0: true},
+
+ {name: "MOVWF", argLength: 1, reg: fp11, asm: "MOVWF"}, // int32 -> float32
+ {name: "MOVWD", argLength: 1, reg: fp11, asm: "MOVWD"}, // int32 -> float64
+ {name: "TRUNCFW", argLength: 1, reg: fp11, asm: "TRUNCFW"}, // float32 -> int32
+ {name: "TRUNCDW", argLength: 1, reg: fp11, asm: "TRUNCDW"}, // float64 -> int32
+ {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64
+ {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
+
+ // function calls
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // atomic ops
+
+ // load from arg0. arg1=mem.
+ // returns <value,memory> so they can be properly ordered with other loads.
+ // SYNC
+ // MOV(B|W) (Rarg0), Rout
+ // SYNC
+ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true},
+
+ // store arg1 to arg0. arg2=mem. returns memory.
+ // SYNC
+ // MOV(B|W) Rarg1, (Rarg0)
+ // SYNC
+ {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStorezero", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic exchange.
+ // store arg1 to arg0. arg2=mem. returns <old content of *arg0, memory>.
+ // SYNC
+ // LL (Rarg0), Rout
+ // MOVW Rarg1, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ {name: "LoweredAtomicExchange", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic add.
+ // *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>.
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDU Rarg1, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDU Rarg1, Rout
+ {name: "LoweredAtomicAdd", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAddconst", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // SYNC
+ // MOVW $0, Rout
+ // LL (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 4(PC)
+ // MOVW Rarg2, Rout
+ // SC Rout, (Rarg0)
+ // BEQ Rout, -4(PC)
+ // SYNC
+ {name: "LoweredAtomicCas", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic and/or.
+ // *arg0 &= (|=) arg1. arg2=mem. returns memory.
+ // SYNC
+ // LL (Rarg0), Rtmp
+ // AND Rarg1, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ {name: "LoweredAtomicAnd", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // large or unaligned zeroing
+ // arg0 = address of memory to zero (in R1, changed as side effect)
+ // arg1 = address of the last element to zero
+ // arg2 = mem
+ // auxint = alignment
+ // returns mem
+ // SUBU $4, R1
+ // MOVW R0, 4(R1)
+ // ADDU $4, R1
+ // BNE Rarg1, R1, -2(PC)
+ {
+ name: "LoweredZero",
+ aux: "Int32",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), gp},
+ clobbers: buildReg("R1"),
+ },
+ faultOnNilArg0: true,
+ },
+
+ // large or unaligned move
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = address of the last element of src
+ // arg3 = mem
+ // auxint = alignment
+ // returns mem
+ // SUBU $4, R1
+ // MOVW 4(R1), Rtmp
+ // MOVW Rtmp, (R2)
+ // ADDU $4, R1
+ // ADDU $4, R2
+ // BNE Rarg2, R1, -4(PC)
+ {
+ name: "LoweredMove",
+ aux: "Int32",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1"), gp},
+ clobbers: buildReg("R1 R2"),
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // pseudo-ops
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
+
+ {name: "FPFlagTrue", argLength: 1, reg: readflags}, // bool, true if FP flag is true
+ {name: "FPFlagFalse", argLength: 1, reg: readflags}, // bool, true if FP flag is false
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R22 (mips.REGCTXT, the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R22")}}, zeroWidth: true},
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem.
+ {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed
+ // It saves all GP registers if necessary,
+ // but clobbers R31 (LR) because it's a call
+ // and R23 (REGTMP).
+ // Returns a pointer to a write barrier buffer in R25.
+ {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R31"), outputs: []regMask{buildReg("R25")}}, clobberFlags: true, aux: "Int64"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ // Extend ops are the same as Bounds ops except the indexes are 64-bit.
+ {name: "LoweredPanicExtendA", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r3, r4}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendB", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r2, r3}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendC", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r1, r2}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ }
+
+ blocks := []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LTZ", controls: 1}, // < 0
+ {name: "LEZ", controls: 1}, // <= 0
+ {name: "GTZ", controls: 1}, // > 0
+ {name: "GEZ", controls: 1}, // >= 0
+ {name: "FPT", controls: 1}, // FP flag is true
+ {name: "FPF", controls: 1}, // FP flag is false
+ }
+
+ archs = append(archs, arch{
+ name: "MIPS",
+ pkg: "cmd/internal/obj/mips",
+ genfile: "../../mips/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesMIPS,
+ gpregmask: gp,
+ fpregmask: fp,
+ specialregmask: hi | lo,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R31"]),
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64.rules b/src/cmd/compile/internal/ssa/_gen/PPC64.rules
new file mode 100644
index 0000000..c9cd34b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64.rules
@@ -0,0 +1,1018 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// GOPPC64 values indicate power8, power9, etc.
+// That means the code is compiled for that target,
+// and will not run on earlier targets.
+//
+(Add(Ptr|64|32|16|8) ...) => (ADD ...)
+(Add64F ...) => (FADD ...)
+(Add32F ...) => (FADDS ...)
+
+(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
+(Sub32F ...) => (FSUBS ...)
+(Sub64F ...) => (FSUB ...)
+
+// Combine 64 bit integer multiply and adds
+(ADD l:(MULLD x y) z) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y z)
+
+(Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Mod64 x y) && buildcfg.GOPPC64 >=9 => (MODSD x y)
+(Mod64 x y) && buildcfg.GOPPC64 <=8 => (SUB x (MULLD y (DIVD x y)))
+(Mod64u x y) && buildcfg.GOPPC64 >= 9 => (MODUD x y)
+(Mod64u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLD y (DIVDU x y)))
+(Mod32 x y) && buildcfg.GOPPC64 >= 9 => (MODSW x y)
+(Mod32 x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVW x y)))
+(Mod32u x y) && buildcfg.GOPPC64 >= 9 => (MODUW x y)
+(Mod32u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVWU x y)))
+
+// (x + y) / 2 with x>=y => (x - y) / 2 + y
+(Avg64u <t> x y) => (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
+
+(Mul64 ...) => (MULLD ...)
+(Mul(32|16|8) ...) => (MULLW ...)
+(Select0 (Mul64uhilo x y)) => (MULHDU x y)
+(Select1 (Mul64uhilo x y)) => (MULLD x y)
+
+(Div64 [false] x y) => (DIVD x y)
+(Div64u ...) => (DIVDU ...)
+(Div32 [false] x y) => (DIVW x y)
+(Div32u ...) => (DIVWU ...)
+(Div16 [false] x y) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) => (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) => (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(Hmul(64|64u|32|32u) ...) => (MULH(D|DU|W|WU) ...)
+
+(Mul(32|64)F ...) => ((FMULS|FMUL) ...)
+
+(Div(32|64)F ...) => ((FDIVS|FDIV) ...)
+
+// Lowering float <=> int
+(Cvt32to(32|64)F x) => ((FCFIDS|FCFID) (MTVSRD (SignExt32to64 x)))
+(Cvt64to(32|64)F x) => ((FCFIDS|FCFID) (MTVSRD x))
+
+(Cvt32Fto(32|64) x) => (MFVSRD (FCTI(W|D)Z x))
+(Cvt64Fto(32|64) x) => (MFVSRD (FCTI(W|D)Z x))
+
+(Cvt32Fto64F ...) => (Copy ...) // Note v will have the wrong type for patterns dependent on Float32/Float64
+(Cvt64Fto32F ...) => (FRSP ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
+
+(Sqrt ...) => (FSQRT ...)
+(Sqrt32 ...) => (FSQRTS ...)
+(Floor ...) => (FFLOOR ...)
+(Ceil ...) => (FCEIL ...)
+(Trunc ...) => (FTRUNC ...)
+(Round ...) => (FROUND ...)
+(Copysign x y) => (FCPSGN y x)
+(Abs ...) => (FABS ...)
+(FMA ...) => (FMADD ...)
+
+// Lowering extension
+// Note: we always extend to 64 bits even though some ops don't need that many result bits.
+(SignExt8to(16|32|64) ...) => (MOVBreg ...)
+(SignExt16to(32|64) ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+(ZeroExt8to(16|32|64) ...) => (MOVBZreg ...)
+(ZeroExt16to(32|64) ...) => (MOVHZreg ...)
+(ZeroExt32to64 ...) => (MOVWZreg ...)
+
+(Trunc(16|32|64)to8 <t> x) && t.IsSigned() => (MOVBreg x)
+(Trunc(16|32|64)to8 x) => (MOVBZreg x)
+(Trunc(32|64)to16 <t> x) && t.IsSigned() => (MOVHreg x)
+(Trunc(32|64)to16 x) => (MOVHZreg x)
+(Trunc64to32 <t> x) && t.IsSigned() => (MOVWreg x)
+(Trunc64to32 x) => (MOVWZreg x)
+
+// Lowering constants
+(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
+(Const(32|64)F ...) => (FMOV(S|D)const ...)
+(ConstNil) => (MOVDconst [0])
+(ConstBool [t]) => (MOVDconst [b2i(t)])
+
+// Carrying addition.
+(Select0 (Add64carry x y c)) => (Select0 <typ.UInt64> (ADDE x y (Select1 <typ.UInt64> (ADDCconst c [-1]))))
+(Select1 (Add64carry x y c)) => (ADDZEzero (Select1 <typ.UInt64> (ADDE x y (Select1 <typ.UInt64> (ADDCconst c [-1])))))
+// Fold initial carry bit if 0.
+(ADDE x y (Select1 <typ.UInt64> (ADDCconst (MOVDconst [0]) [-1]))) => (ADDC x y)
+// Fold transfer of CA -> GPR -> CA. Note 2 uses when feeding into a chained Add64carry.
+(Select1 (ADDCconst n:(ADDZEzero x) [-1])) && n.Uses <= 2 => x
+
+// Borrowing subtraction.
+(Select0 (Sub64borrow x y c)) => (Select0 <typ.UInt64> (SUBE x y (Select1 <typ.UInt64> (SUBCconst c [0]))))
+(Select1 (Sub64borrow x y c)) => (NEG (SUBZEzero (Select1 <typ.UInt64> (SUBE x y (Select1 <typ.UInt64> (SUBCconst c [0]))))))
+// Fold initial borrow bit if 0.
+(SUBE x y (Select1 <typ.UInt64> (SUBCconst (MOVDconst [0]) [0]))) => (SUBC x y)
+// Fold transfer of CA -> GPR -> CA. Note 2 uses when feeding into a chained Sub64borrow.
+(Select1 (SUBCconst n:(NEG (SUBZEzero x)) [0])) && n.Uses <= 2 => x
+
+// Constant folding
+(FABS (FMOVDconst [x])) => (FMOVDconst [math.Abs(x)])
+(FSQRT (FMOVDconst [x])) && x >= 0 => (FMOVDconst [math.Sqrt(x)])
+(FFLOOR (FMOVDconst [x])) => (FMOVDconst [math.Floor(x)])
+(FCEIL (FMOVDconst [x])) => (FMOVDconst [math.Ceil(x)])
+(FTRUNC (FMOVDconst [x])) => (FMOVDconst [math.Trunc(x)])
+
+// Rotates
+(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+(RotateLeft(32|64) ...) => ((ROTLW|ROTL) ...)
+
+// Constant rotate generation
+(ROTLW x (MOVDconst [c])) => (ROTLWconst x [c&31])
+(ROTL x (MOVDconst [c])) => (ROTLconst x [c&63])
+
+// Combine rotate and mask operations
+(Select0 (ANDCCconst [m] (ROTLWconst [r] x))) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+(AND (MOVDconst [m]) (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+(Select0 (ANDCCconst [m] (ROTLW x r))) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+(AND (MOVDconst [m]) (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+
+// Note, any rotated word bitmask is still a valid word bitmask.
+(ROTLWconst [r] (AND (MOVDconst [m]) x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+(ROTLWconst [r] (Select0 (ANDCCconst [m] x))) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+
+(Select0 (ANDCCconst [m] (SRWconst x [s]))) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
+(Select0 (ANDCCconst [m] (SRWconst x [s]))) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
+(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
+(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
+
+(SRWconst (Select0 (ANDCCconst [m] x)) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
+(SRWconst (Select0 (ANDCCconst [m] x)) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
+(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+
+// Merge shift right + shift left and clear left (e.g for a table lookup)
+(CLRLSLDI [c] (SRWconst [s] x)) && mergePPC64ClrlsldiSrw(int64(c),s) != 0 => (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x)
+(SLDconst [l] (SRWconst [r] x)) && mergePPC64SldiSrw(l,r) != 0 => (RLWINM [mergePPC64SldiSrw(l,r)] x)
+// The following reduction shows up frequently too. e.g b[(x>>14)&0xFF]
+(CLRLSLDI [c] i:(RLWINM [s] x)) && mergePPC64ClrlsldiRlwinm(c,s) != 0 => (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x)
+
+// large constant signed right shift, we leave the sign bit
+(Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 => (SRADconst x [63])
+(Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 => (SRAWconst x [63])
+(Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 => (SRAWconst (SignExt16to32 x) [63])
+(Rsh8x64 x (MOVDconst [c])) && uint64(c) >= 8 => (SRAWconst (SignExt8to32 x) [63])
+
+// constant shifts
+((Lsh64|Rsh64|Rsh64U)x64 x (MOVDconst [c])) && uint64(c) < 64 => (S(L|RA|R)Dconst x [c])
+((Lsh32|Rsh32|Rsh32U)x64 x (MOVDconst [c])) && uint64(c) < 32 => (S(L|RA|R)Wconst x [c])
+((Rsh16|Rsh16U)x64 x (MOVDconst [c])) && uint64(c) < 16 => (SR(AW|W)const ((Sign|Zero)Ext16to32 x) [c])
+(Lsh16x64 x (MOVDconst [c])) && uint64(c) < 16 => (SLWconst x [c])
+((Rsh8|Rsh8U)x64 x (MOVDconst [c])) && uint64(c) < 8 => (SR(AW|W)const ((Sign|Zero)Ext8to32 x) [c])
+(Lsh8x64 x (MOVDconst [c])) && uint64(c) < 8 => (SLWconst x [c])
+
+// Lower bounded shifts first. No need to check shift value.
+(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y)
+(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y)
+(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y)
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD x y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW x y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD (MOVHZreg x) y)
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD (MOVBZreg x) y)
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAD x y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAD (MOVHreg x) y)
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAD (MOVBreg x) y)
+
+// Unbounded shifts. Go shifts saturate to 0 or -1 when shifting beyond the number of
+// bits in a type, PPC64 shifts do not (see the ISA for details).
+//
+// Note, y is always non-negative.
+//
+// Note, ISELZ is intentionally not used in lower. Where possible, ISEL is converted to ISELZ in late lower
+// after all the ISEL folding rules have been exercised.
+
+((Rsh64U|Lsh64)x64 <t> x y) => (ISEL [0] (S(R|L)D <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+((Rsh64U|Lsh64)x32 <t> x y) => (ISEL [0] (S(R|L)D <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+((Rsh64U|Lsh64)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
+((Rsh64U|Lsh64)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
+(Rsh64x(64|32) <t> x y) => (ISEL [0] (SRAD <t> x y) (SRADconst <t> x [63]) (CMP(U|WU)const y [64]))
+(Rsh64x16 <t> x y) => (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
+(Rsh64x8 <t> x y) => (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
+
+((Rsh32U|Lsh32)x64 <t> x y) => (ISEL [0] (S(R|L)W <t> x y) (MOVDconst [0]) (CMPUconst y [32]))
+((Rsh32U|Lsh32)x32 <t> x y) => (ISEL [0] (S(R|L)W <t> x y) (MOVDconst [0]) (CMPWUconst y [32]))
+((Rsh32U|Lsh32)x16 <t> x y) => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
+((Rsh32U|Lsh32)x8 <t> x y) => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
+(Rsh32x(64|32) <t> x y) => (ISEL [0] (SRAW <t> x y) (SRAWconst <t> x [31]) (CMP(U|WU)const y [32]))
+(Rsh32x16 <t> x y) => (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
+(Rsh32x8 <t> x y) => (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
+
+((Rsh16U|Lsh16)x64 <t> x y) => (ISEL [0] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPUconst y [16]))
+((Rsh16U|Lsh16)x32 <t> x y) => (ISEL [0] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst y [16]))
+((Rsh16U|Lsh16)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
+((Rsh16U|Lsh16)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
+(Rsh16x(64|32) <t> x y) => (ISEL [0] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (CMP(U|WU)const y [16]))
+(Rsh16x16 <t> x y) => (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
+(Rsh16x8 <t> x y) => (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
+
+((Rsh8U|Lsh8)x64 <t> x y) => (ISEL [0] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPUconst y [8]))
+((Rsh8U|Lsh8)x32 <t> x y) => (ISEL [0] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst y [8]))
+((Rsh8U|Lsh8)x16 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
+((Rsh8U|Lsh8)x8 <t> x y) => (ISEL [2] (S(R|L)D <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
+(Rsh8x(64|32) <t> x y) => (ISEL [0] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (CMP(U|WU)const y [8]))
+(Rsh8x16 <t> x y) => (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
+(Rsh8x8 <t> x y) => (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
+
+// Catch bounded shifts in situations like foo<<uint(shift&63) which might not be caught by the prove pass.
+(CMP(U|WU)const [d] (Select0 (ANDCCconst z [c]))) && uint64(d) > uint64(c) => (FlagLT)
+
+(ORN x (MOVDconst [-1])) => x
+
+(S(RAD|RD|LD) x (MOVDconst [c])) => (S(RAD|RD|LD)const [c&63 | (c>>6&1*63)] x)
+(S(RAW|RW|LW) x (MOVDconst [c])) => (S(RAW|RW|LW)const [c&31 | (c>>5&1*31)] x)
+
+(Addr {sym} base) => (MOVDaddr {sym} [0] base)
+(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVDaddr {sym} (SPanchored base mem))
+(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVDaddr {sym} base)
+(OffPtr [off] ptr) => (ADD (MOVDconst <typ.Int64> [off]) ptr)
+(MOVDaddr {sym} [n] p:(ADD x y)) && sym == nil && n == 0 => p
+(MOVDaddr {sym} [n] ptr) && sym == nil && n == 0 && (ptr.Op == OpArgIntReg || ptr.Op == OpPhi) => ptr
+
+// TODO: optimize these cases?
+(Ctz32NonZero ...) => (Ctz32 ...)
+(Ctz64NonZero ...) => (Ctz64 ...)
+
+(Ctz64 x) && buildcfg.GOPPC64<=8 => (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
+(Ctz64 x) => (CNTTZD x)
+(Ctz32 x) && buildcfg.GOPPC64<=8 => (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
+(Ctz32 x) => (CNTTZW (MOVWZreg x))
+(Ctz16 x) => (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
+(Ctz8 x) => (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
+
+(BitLen64 x) => (SUBFCconst [64] (CNTLZD <typ.Int> x))
+(BitLen32 x) => (SUBFCconst [32] (CNTLZW <typ.Int> x))
+
+(PopCount64 ...) => (POPCNTD ...)
+(PopCount(32|16|8) x) => (POPCNT(W|W|B) (MOV(W|H|B)Zreg x))
+
+(And(64|32|16|8) ...) => (AND ...)
+(Or(64|32|16|8) ...) => (OR ...)
+(Xor(64|32|16|8) ...) => (XOR ...)
+
+(Neg(64|32|16|8) ...) => (NEG ...)
+(Neg(64|32)F ...) => (FNEG ...)
+
+(Com(64|32|16|8) x) => (NOR x x)
+
+// Lowering boolean ops
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(Not x) => (XORconst [1] x)
+
+// Merge logical operations
+(AND x (NOR y y)) => (ANDN x y)
+(OR x (NOR y y)) => (ORN x y)
+
+// Lowering comparisons
+(EqB x y) => (Select0 <typ.Int> (ANDCCconst [1] (EQV x y)))
+// Sign extension dependence on operand sign sets up for sign/zero-extension elision later
+(Eq(8|16) x y) && x.Type.IsSigned() && y.Type.IsSigned() => (Equal (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
+(Eq(8|16) x y) => (Equal (CMPW (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
+(Eq(32|64|Ptr) x y) => (Equal ((CMPW|CMP|CMP) x y))
+(Eq(32|64)F x y) => (Equal (FCMPU x y))
+
+(NeqB ...) => (XOR ...)
+// Like Eq8 and Eq16, prefer sign extension likely to enable later elision.
+(Neq(8|16) x y) && x.Type.IsSigned() && y.Type.IsSigned() => (NotEqual (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
+(Neq(8|16) x y) => (NotEqual (CMPW (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
+(Neq(32|64|Ptr) x y) => (NotEqual ((CMPW|CMP|CMP) x y))
+(Neq(32|64)F x y) => (NotEqual (FCMPU x y))
+
+(Less(8|16) x y) => (LessThan (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
+(Less(32|64) x y) => (LessThan ((CMPW|CMP) x y))
+(Less(32|64)F x y) => (FLessThan (FCMPU x y))
+
+(Less(8|16)U x y) => (LessThan (CMPWU (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
+(Less(32|64)U x y) => (LessThan ((CMPWU|CMPU) x y))
+
+(Leq(8|16) x y) => (LessEqual (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
+(Leq(32|64) x y) => (LessEqual ((CMPW|CMP) x y))
+(Leq(32|64)F x y) => (FLessEqual (FCMPU x y))
+
+(Leq(8|16)U x y) => (LessEqual (CMPWU (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
+(Leq(32|64)U x y) => (LessEqual (CMP(WU|U) x y))
+
+// Absorb pseudo-ops into blocks.
+(If (Equal cc) yes no) => (EQ cc yes no)
+(If (NotEqual cc) yes no) => (NE cc yes no)
+(If (LessThan cc) yes no) => (LT cc yes no)
+(If (LessEqual cc) yes no) => (LE cc yes no)
+(If (GreaterThan cc) yes no) => (GT cc yes no)
+(If (GreaterEqual cc) yes no) => (GE cc yes no)
+(If (FLessThan cc) yes no) => (FLT cc yes no)
+(If (FLessEqual cc) yes no) => (FLE cc yes no)
+(If (FGreaterThan cc) yes no) => (FGT cc yes no)
+(If (FGreaterEqual cc) yes no) => (FGE cc yes no)
+
+(If cond yes no) => (NE (CMPWconst [0] (Select0 <typ.UInt32> (ANDCCconst [1] cond))) yes no)
+
+// Absorb boolean tests into block
+(NE (CMPWconst [0] (Select0 (ANDCCconst [1] ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) cc)))) yes no) => ((EQ|NE|LT|LE|GT|GE) cc yes no)
+(NE (CMPWconst [0] (Select0 (ANDCCconst [1] ((FLessThan|FLessEqual|FGreaterThan|FGreaterEqual) cc)))) yes no) => ((FLT|FLE|FGT|FGE) cc yes no)
+
+// absorb flag constants into branches
+(EQ (FlagEQ) yes no) => (First yes no)
+(EQ (FlagLT) yes no) => (First no yes)
+(EQ (FlagGT) yes no) => (First no yes)
+
+(NE (FlagEQ) yes no) => (First no yes)
+(NE (FlagLT) yes no) => (First yes no)
+(NE (FlagGT) yes no) => (First yes no)
+
+(LT (FlagEQ) yes no) => (First no yes)
+(LT (FlagLT) yes no) => (First yes no)
+(LT (FlagGT) yes no) => (First no yes)
+
+(LE (FlagEQ) yes no) => (First yes no)
+(LE (FlagLT) yes no) => (First yes no)
+(LE (FlagGT) yes no) => (First no yes)
+
+(GT (FlagEQ) yes no) => (First no yes)
+(GT (FlagLT) yes no) => (First no yes)
+(GT (FlagGT) yes no) => (First yes no)
+
+(GE (FlagEQ) yes no) => (First yes no)
+(GE (FlagLT) yes no) => (First no yes)
+(GE (FlagGT) yes no) => (First yes no)
+
+// absorb InvertFlags into branches
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+
+// constant comparisons
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) => (FlagLT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) => (FlagGT)
+
+(CMPconst (MOVDconst [x]) [y]) && x==y => (FlagEQ)
+(CMPconst (MOVDconst [x]) [y]) && x<y => (FlagLT)
+(CMPconst (MOVDconst [x]) [y]) && x>y => (FlagGT)
+
+(CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) => (FlagLT)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) => (FlagGT)
+
+(CMPUconst (MOVDconst [x]) [y]) && x==y => (FlagEQ)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) => (FlagLT)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT)
+
+// absorb flag constants into boolean values
+(Equal (FlagEQ)) => (MOVDconst [1])
+(Equal (FlagLT)) => (MOVDconst [0])
+(Equal (FlagGT)) => (MOVDconst [0])
+
+(NotEqual (FlagEQ)) => (MOVDconst [0])
+(NotEqual (FlagLT)) => (MOVDconst [1])
+(NotEqual (FlagGT)) => (MOVDconst [1])
+
+(LessThan (FlagEQ)) => (MOVDconst [0])
+(LessThan (FlagLT)) => (MOVDconst [1])
+(LessThan (FlagGT)) => (MOVDconst [0])
+
+(LessEqual (FlagEQ)) => (MOVDconst [1])
+(LessEqual (FlagLT)) => (MOVDconst [1])
+(LessEqual (FlagGT)) => (MOVDconst [0])
+
+(GreaterThan (FlagEQ)) => (MOVDconst [0])
+(GreaterThan (FlagLT)) => (MOVDconst [0])
+(GreaterThan (FlagGT)) => (MOVDconst [1])
+
+(GreaterEqual (FlagEQ)) => (MOVDconst [1])
+(GreaterEqual (FlagLT)) => (MOVDconst [0])
+(GreaterEqual (FlagGT)) => (MOVDconst [1])
+
+// absorb InvertFlags into boolean values
+((Equal|NotEqual|LessThan|GreaterThan|LessEqual|GreaterEqual) (InvertFlags x)) => ((Equal|NotEqual|GreaterThan|LessThan|GreaterEqual|LessEqual) x)
+
+
+// Elide compares of bit tests
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> z) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> z) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ORCC x y)) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (XORCC x y)) yes no)
+
+(CondSelect x y (SETBC [a] cmp)) => (ISEL [a] x y cmp)
+(CondSelect x y (SETBCR [a] cmp)) => (ISEL [a+4] x y cmp)
+// Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably.
+(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [1] bool)))
+// Fold any CR -> GPR -> CR transfers when applying the above rule.
+(ISEL [6] x y (Select1 (ANDCCconst [1] (SETBC [c] cmp)))) => (ISEL [c] x y cmp)
+(ISEL [6] x y ((CMP|CMPW)const [0] (SETBC [c] cmp))) => (ISEL [c] x y cmp)
+(ISEL [6] x y ((CMP|CMPW)const [0] (SETBCR [c] cmp))) => (ISEL [c+4] x y cmp)
+
+// Lowering loads
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && t.IsSigned() => (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && !t.IsSigned() => (MOVWZload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && t.IsSigned() => (MOVHload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && !t.IsSigned() => (MOVHZload ptr mem)
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBZload ptr mem)
+(Load <t> ptr mem) && is8BitInt(t) && t.IsSigned() => (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load.
+(Load <t> ptr mem) && is8BitInt(t) && !t.IsSigned() => (MOVBZload ptr mem)
+
+(Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
+
+(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (FMOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (FMOVSstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+
+// Using Zero instead of LoweredZero allows the
+// target address to be folded where possible.
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (MOVBstorezero destptr mem)
+(Zero [2] destptr mem) =>
+ (MOVHstorezero destptr mem)
+(Zero [3] destptr mem) =>
+ (MOVBstorezero [2] destptr
+ (MOVHstorezero destptr mem))
+(Zero [4] destptr mem) =>
+ (MOVWstorezero destptr mem)
+(Zero [5] destptr mem) =>
+ (MOVBstorezero [4] destptr
+ (MOVWstorezero destptr mem))
+(Zero [6] destptr mem) =>
+ (MOVHstorezero [4] destptr
+ (MOVWstorezero destptr mem))
+(Zero [7] destptr mem) =>
+ (MOVBstorezero [6] destptr
+ (MOVHstorezero [4] destptr
+ (MOVWstorezero destptr mem)))
+
+(Zero [8] {t} destptr mem) => (MOVDstorezero destptr mem)
+(Zero [12] {t} destptr mem) =>
+ (MOVWstorezero [8] destptr
+ (MOVDstorezero [0] destptr mem))
+(Zero [16] {t} destptr mem) =>
+ (MOVDstorezero [8] destptr
+ (MOVDstorezero [0] destptr mem))
+(Zero [24] {t} destptr mem) =>
+ (MOVDstorezero [16] destptr
+ (MOVDstorezero [8] destptr
+ (MOVDstorezero [0] destptr mem)))
+(Zero [32] {t} destptr mem) =>
+ (MOVDstorezero [24] destptr
+ (MOVDstorezero [16] destptr
+ (MOVDstorezero [8] destptr
+ (MOVDstorezero [0] destptr mem))))
+
+// Handle cases not handled above
+// Lowered Short cases do not generate loops, and as a result don't clobber
+// the address registers or flags.
+(Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 && s < 64 => (LoweredZeroShort [s] ptr mem)
+(Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 => (LoweredZero [s] ptr mem)
+(Zero [s] ptr mem) && s < 128 && buildcfg.GOPPC64 >= 9 => (LoweredQuadZeroShort [s] ptr mem)
+(Zero [s] ptr mem) && buildcfg.GOPPC64 >= 9 => (LoweredQuadZero [s] ptr mem)
+
+// moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem)
+(Move [2] dst src mem) =>
+ (MOVHstore dst (MOVHZload src mem) mem)
+(Move [4] dst src mem) =>
+ (MOVWstore dst (MOVWZload src mem) mem)
+// MOVD for load and store must have offsets that are multiple of 4
+(Move [8] {t} dst src mem) =>
+ (MOVDstore dst (MOVDload src mem) mem)
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBZload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))
+(Move [5] dst src mem) =>
+ (MOVBstore [4] dst (MOVBZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem))
+(Move [6] dst src mem) =>
+ (MOVHstore [4] dst (MOVHZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem))
+(Move [7] dst src mem) =>
+ (MOVBstore [6] dst (MOVBZload [6] src mem)
+ (MOVHstore [4] dst (MOVHZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem)))
+
+// Large move uses a loop. Since the address is computed and the
+// offset is zero, any alignment can be used.
+(Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s) =>
+ (LoweredMove [s] dst src mem)
+(Move [s] dst src mem) && s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9 =>
+ (LoweredQuadMoveShort [s] dst src mem)
+(Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s) =>
+ (LoweredQuadMove [s] dst src mem)
+
+// Calls
+// Lowering calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// Miscellaneous
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
+(IsInBounds idx len) => (LessThan (CMPU idx len))
+(IsSliceInBounds idx len) => (LessEqual (CMPU idx len))
+(NilCheck ...) => (LoweredNilCheck ...)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+// Publication barrier as intrinsic
+(PubBarrier ...) => (LoweredPubBarrier ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// Optimizations
+// Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
+// so ORconst, XORconst easily expand into a pair.
+
+// Include very-large constants in the const-const case.
+(AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d])
+(OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d])
+(XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d])
+(ORN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|^d])
+(ANDN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&^d])
+(NOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [^(c|d)])
+
+// Discover consts
+(AND x (MOVDconst [-1])) => x
+(AND x (MOVDconst [c])) && isU16Bit(c) => (Select0 (ANDCCconst [c] x))
+(XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x)
+(OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x)
+
+// Simplify consts
+(ANDCCconst [c] (Select0 (ANDCCconst [d] x))) => (ANDCCconst [c&d] x)
+(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
+(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
+(Select0 (ANDCCconst [-1] x)) => x
+(Select0 (ANDCCconst [0] _)) => (MOVDconst [0])
+(Select1 (ANDCCconst [0] _)) => (FlagEQ)
+(XORconst [0] x) => x
+(ORconst [-1] _) => (MOVDconst [-1])
+(ORconst [0] x) => x
+
+// zero-extend of small and => small and
+(MOVBZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFF => y
+(MOVHZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFF => y
+(MOVWZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFFFFFF => y
+(MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF => y
+
+// sign extend of small-positive and => small-positive-and
+(MOVBreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0x7F => y
+(MOVHreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0x7FFF => y
+(MOVWreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
+(MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF => y
+
+// small and of zero-extend => either zero-extend or small and
+(Select0 (ANDCCconst [c] y:(MOVBZreg _))) && c&0xFF == 0xFF => y
+(Select0 (ANDCCconst [0xFF] (MOVBreg x))) => (MOVBZreg x)
+(Select0 (ANDCCconst [c] y:(MOVHZreg _))) && c&0xFFFF == 0xFFFF => y
+(Select0 (ANDCCconst [0xFFFF] (MOVHreg x))) => (MOVHZreg x)
+
+(AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF => y
+(AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x)
+// normal case
+(Select0 (ANDCCconst [c] (MOVBZreg x))) => (Select0 (ANDCCconst [c&0xFF] x))
+(Select0 (ANDCCconst [c] (MOVHZreg x))) => (Select0 (ANDCCconst [c&0xFFFF] x))
+(Select0 (ANDCCconst [c] (MOVWZreg x))) => (Select0 (ANDCCconst [c&0xFFFFFFFF] x))
+
+// Eliminate unnecessary sign/zero extend following right shift
+(MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x))
+(MOV(H|W)Zreg (SRWconst [c] (MOVHZreg x))) => (SRWconst [c] (MOVHZreg x))
+(MOVWZreg (SRWconst [c] (MOVWZreg x))) => (SRWconst [c] (MOVWZreg x))
+(MOV(B|H|W)reg (SRAWconst [c] (MOVBreg x))) => (SRAWconst [c] (MOVBreg x))
+(MOV(H|W)reg (SRAWconst [c] (MOVHreg x))) => (SRAWconst [c] (MOVHreg x))
+(MOVWreg (SRAWconst [c] (MOVWreg x))) => (SRAWconst [c] (MOVWreg x))
+
+(MOV(WZ|W)reg (S(R|RA)Wconst [c] x)) && sizeof(x.Type) <= 32 => (S(R|RA)Wconst [c] x)
+(MOV(HZ|H)reg (S(R|RA)Wconst [c] x)) && sizeof(x.Type) <= 16 => (S(R|RA)Wconst [c] x)
+(MOV(BZ|B)reg (S(R|RA)Wconst [c] x)) && sizeof(x.Type) == 8 => (S(R|RA)Wconst [c] x)
+
+// initial right shift will handle sign/zero extend
+(MOVBZreg (SRDconst [c] x)) && c>=56 => (SRDconst [c] x)
+(MOVBreg (SRDconst [c] x)) && c>56 => (SRDconst [c] x)
+(MOVBreg (SRDconst [c] x)) && c==56 => (SRADconst [c] x)
+(MOVBreg (SRADconst [c] x)) && c>=56 => (SRADconst [c] x)
+(MOVBZreg (SRWconst [c] x)) && c>=24 => (SRWconst [c] x)
+(MOVBreg (SRWconst [c] x)) && c>24 => (SRWconst [c] x)
+(MOVBreg (SRWconst [c] x)) && c==24 => (SRAWconst [c] x)
+(MOVBreg (SRAWconst [c] x)) && c>=24 => (SRAWconst [c] x)
+
+(MOVHZreg (SRDconst [c] x)) && c>=48 => (SRDconst [c] x)
+(MOVHreg (SRDconst [c] x)) && c>48 => (SRDconst [c] x)
+(MOVHreg (SRDconst [c] x)) && c==48 => (SRADconst [c] x)
+(MOVHreg (SRADconst [c] x)) && c>=48 => (SRADconst [c] x)
+(MOVHZreg (SRWconst [c] x)) && c>=16 => (SRWconst [c] x)
+(MOVHreg (SRWconst [c] x)) && c>16 => (SRWconst [c] x)
+(MOVHreg (SRAWconst [c] x)) && c>=16 => (SRAWconst [c] x)
+(MOVHreg (SRWconst [c] x)) && c==16 => (SRAWconst [c] x)
+
+(MOVWZreg (SRDconst [c] x)) && c>=32 => (SRDconst [c] x)
+(MOVWreg (SRDconst [c] x)) && c>32 => (SRDconst [c] x)
+(MOVWreg (SRADconst [c] x)) && c>=32 => (SRADconst [c] x)
+(MOVWreg (SRDconst [c] x)) && c==32 => (SRADconst [c] x)
+
+// Various redundant zero/sign extension combinations.
+(MOVBZreg y:(MOVBZreg _)) => y // repeat
+(MOVBreg y:(MOVBreg _)) => y // repeat
+(MOVBreg (MOVBZreg x)) => (MOVBreg x)
+(MOVBZreg (MOVBreg x)) => (MOVBZreg x)
+
+// H - there are more combinations than these
+
+(MOVHZreg y:(MOV(H|B)Zreg _)) => y // repeat
+(MOVHZreg y:(MOVHBRload _ _)) => y
+
+(MOVHreg y:(MOV(H|B)reg _)) => y // repeat
+
+(MOV(H|HZ)reg y:(MOV(HZ|H)reg x)) => (MOV(H|HZ)reg x)
+
+// W - there are more combinations than these
+
+(MOV(WZ|WZ|WZ|W|W|W)reg y:(MOV(WZ|HZ|BZ|W|H|B)reg _)) => y // repeat
+(MOVWZreg y:(MOV(H|W)BRload _ _)) => y
+
+(MOV(W|WZ)reg y:(MOV(WZ|W)reg x)) => (MOV(W|WZ)reg x)
+
+// Truncate then logical then truncate: omit first, lesser or equal truncate
+(MOVWZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVWZreg ((OR|XOR|AND) <t> x y))
+(MOVHZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVHZreg ((OR|XOR|AND) <t> x y))
+(MOVHZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVHZreg ((OR|XOR|AND) <t> x y))
+(MOVBZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
+(MOVBZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
+(MOVBZreg ((OR|XOR|AND) <t> x (MOVBZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
+
+(MOV(B|H|W)Zreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) => z
+(MOV(B|H|W)Zreg z:(AND y (MOV(B|H|W)Zload ptr x))) => z
+(MOV(H|W)Zreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x)))) => z
+(MOVWZreg z:(Select0 (ANDCCconst [c] (MOVWZload ptr x)))) => z
+
+// Arithmetic constant ops
+
+(ADD x (MOVDconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDconst [c] x)
+(ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) => (ADDconst [c+d] x)
+(ADDconst [0] x) => x
+(SUB x (MOVDconst [c])) && is32Bit(-c) => (ADDconst [-c] x)
+
+(ADDconst [c] (MOVDaddr [d] {sym} x)) && is32Bit(c+int64(d)) => (MOVDaddr [int32(c+int64(d))] {sym} x)
+(ADDconst [c] x:(SP)) && is32Bit(c) => (MOVDaddr [int32(c)] x) // so it is rematerializeable
+
+(MULL(W|D) x (MOVDconst [c])) && is16Bit(c) => (MULL(W|D)const [int32(c)] x)
+
+// Subtract from (with carry, but ignored) constant.
+// Note, these clobber the carry bit.
+(SUB (MOVDconst [c]) x) && is32Bit(c) => (SUBFCconst [c] x)
+(SUBFCconst [c] (NEG x)) => (ADDconst [c] x)
+(SUBFCconst [c] (SUBFCconst [d] x)) && is32Bit(c-d) => (ADDconst [c-d] x)
+(SUBFCconst [0] x) => (NEG x)
+(ADDconst [c] (SUBFCconst [d] x)) && is32Bit(c+d) => (SUBFCconst [c+d] x)
+(NEG (ADDconst [c] x)) && is32Bit(-c) => (SUBFCconst [-c] x)
+(NEG (SUBFCconst [c] x)) && is32Bit(-c) => (ADDconst [-c] x)
+(NEG (SUB x y)) => (SUB y x)
+(NEG (NEG x)) => x
+
+// Use register moves instead of stores and loads to move int<=>float values
+// Common with math Float64bits, Float64frombits
+(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) => (MFVSRD x)
+(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) => (MTVSRD x)
+
+(FMOVDstore [off] {sym} ptr (MTVSRD x) mem) => (MOVDstore [off] {sym} ptr x mem)
+(MOVDstore [off] {sym} ptr (MFVSRD x) mem) => (FMOVDstore [off] {sym} ptr x mem)
+
+(MTVSRD (MOVDconst [c])) && !math.IsNaN(math.Float64frombits(uint64(c))) => (FMOVDconst [math.Float64frombits(uint64(c))])
+(MFVSRD (FMOVDconst [c])) => (MOVDconst [int64(math.Float64bits(c))])
+
+(MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (FMOVDload [off] {sym} ptr mem)
+(MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVDload [off] {sym} ptr mem)
+
+// Rules for MOV* or FMOV* ops determine when indexed (MOV*loadidx or MOV*storeidx)
+// or non-indexed (MOV*load or MOV*store) should be used. Indexed instructions
+// require an extra instruction and register to load the index so non-indexed is preferred.
+// Indexed ops generate indexed load or store instructions for all GOPPC64 values.
+// Non-indexed ops generate DS-form loads and stores when the offset fits in 16 bits,
+// and on power8 and power9, a multiple of 4 is required for MOVW and MOVD ops.
+// On power10, prefixed loads and stores can be used for offsets > 16 bits and <= 32 bits.
+// and support for PC relative addressing must be available if relocation is needed.
+// On power10, the assembler will determine when to use DS-form or prefixed
+// instructions for non-indexed ops depending on the value of the offset.
+//
+// Fold offsets for stores.
+(MOV(D|W|H|B)store [off1] {sym} (ADDconst [off2] x) val mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (MOV(D|W|H|B)store [off1+int32(off2)] {sym} x val mem)
+
+(FMOV(S|D)store [off1] {sym} (ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (FMOV(S|D)store [off1+int32(off2)] {sym} ptr val mem)
+
+// Fold address into load/store.
+// If power10 with PCRel is not available, then
+// the assembler needs to generate several instructions and use
+// temp register for accessing global, and each time it will reload
+// the temp register. So don't fold address of global in that case if there is more than
+// one use.
+(MOV(B|H|W|D)store [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+ && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
+ (MOV(B|H|W|D)store [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+
+(FMOV(S|D)store [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+ && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
+ (FMOV(S|D)store [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+
+(MOV(B|H|W)Zload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
+ (MOV(B|H|W)Zload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOV(H|W|D)load [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
+ (MOV(H|W|D)load [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOV(S|D)load [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
+ (FMOV(S|D)load [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+// Fold offsets for loads.
+(FMOV(S|D)load [off1] {sym} (ADDconst [off2] ptr) mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (FMOV(S|D)load [off1+int32(off2)] {sym} ptr mem)
+
+(MOV(D|W|WZ|H|HZ|BZ)load [off1] {sym} (ADDconst [off2] x) mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (MOV(D|W|WZ|H|HZ|BZ)load [off1+int32(off2)] {sym} x mem)
+
+// Determine load + addressing that can be done as a register indexed load
+(MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 => (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem)
+
+// See comments above concerning selection of indexed vs. non-indexed ops.
+// These cases don't have relocation.
+(MOV(D|W)loadidx ptr (MOVDconst [c]) mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(D|W)load [int32(c)] ptr mem)
+(MOV(WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
+(MOV(D|W)loadidx (MOVDconst [c]) ptr mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(D|W)load [int32(c)] ptr mem)
+(MOV(WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
+
+// Store of zero => storezero
+(MOV(D|W|H|B)store [off] {sym} ptr (MOVDconst [0]) mem) => (MOV(D|W|H|B)storezero [off] {sym} ptr mem)
+
+// Fold offsets for storezero
+(MOV(D|W|H|B)storezero [off1] {sym} (ADDconst [off2] x) mem) && ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2))) =>
+ (MOV(D|W|H|B)storezero [off1+int32(off2)] {sym} x mem)
+
+// Stores with addressing that can be done as indexed stores
+(MOV(D|W|H|B)store [0] {sym} p:(ADD ptr idx) val mem) && sym == nil && p.Uses == 1 => (MOV(D|W|H|B)storeidx ptr idx val mem)
+
+(MOVDstoreidx ptr (MOVDconst [c]) val mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOVDstore [int32(c)] ptr val mem)
+(MOV(W|H|B)storeidx ptr (MOVDconst [c]) val mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(W|H|B)store [int32(c)] ptr val mem)
+(MOVDstoreidx (MOVDconst [c]) ptr val mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOVDstore [int32(c)] ptr val mem)
+(MOV(W|H|B)storeidx (MOVDconst [c]) ptr val mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(W|H|B)store [int32(c)] ptr val mem)
+
+// Fold symbols into storezero
+(MOV(D|W|H|B)storezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
+ && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
+ (MOV(D|W|H|B)storezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+
+// atomic intrinsics
+(AtomicLoad(8|32|64|Ptr) ptr mem) => (LoweredAtomicLoad(8|32|64|Ptr) [1] ptr mem)
+(AtomicLoadAcq(32|64) ptr mem) => (LoweredAtomicLoad(32|64) [0] ptr mem)
+
+(AtomicStore(8|32|64) ptr val mem) => (LoweredAtomicStore(8|32|64) [1] ptr val mem)
+(AtomicStoreRel(32|64) ptr val mem) => (LoweredAtomicStore(32|64) [0] ptr val mem)
+
+(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
+
+(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
+
+(AtomicCompareAndSwap(32|64) ptr old new_ mem) => (LoweredAtomicCas(32|64) [1] ptr old new_ mem)
+(AtomicCompareAndSwapRel32 ptr old new_ mem) => (LoweredAtomicCas32 [0] ptr old new_ mem)
+
+(AtomicAnd(8|32) ...) => (LoweredAtomicAnd(8|32) ...)
+(AtomicOr(8|32) ...) => (LoweredAtomicOr(8|32) ...)
+
+(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
+(Select0 (ANDCCconst [1] z:(SRADconst [63] x))) && z.Uses == 1 => (SRDconst [63] x)
+
+// Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
+// This may interact with other patterns in the future. (Compare with arm64)
+(MOV(B|H|W)Zreg x:(MOVBZload _ _)) => x
+(MOV(B|H|W)Zreg x:(MOVBZloadidx _ _ _)) => x
+(MOV(H|W)Zreg x:(MOVHZload _ _)) => x
+(MOV(H|W)Zreg x:(MOVHZloadidx _ _ _)) => x
+(MOV(H|W)reg x:(MOVHload _ _)) => x
+(MOV(H|W)reg x:(MOVHloadidx _ _ _)) => x
+(MOV(WZ|W)reg x:(MOV(WZ|W)load _ _)) => x
+(MOV(WZ|W)reg x:(MOV(WZ|W)loadidx _ _ _)) => x
+(MOV(B|W)Zreg x:(Select0 (LoweredAtomicLoad(8|32) _ _))) => x
+
+// don't extend if argument is already extended
+(MOVBreg x:(Arg <t>)) && is8BitInt(t) && t.IsSigned() => x
+(MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !t.IsSigned() => x
+(MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && t.IsSigned() => x
+(MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !t.IsSigned() => x
+(MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && t.IsSigned() => x
+(MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !t.IsSigned() => x
+
+(MOVBZreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
+(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
+(MOVHZreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
+(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
+(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
+(MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
+
+// Implement clrsldi and clrslwi extended mnemonics as described in
+// ISA 3.0 section C.8. AuxInt field contains values needed for
+// the instructions, packed together since there is only one available.
+(SLDconst [c] z:(MOVBZreg x)) && c < 8 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x)
+(SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x)
+(SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x)
+
+(SLDconst [c] z:(Select0 (ANDCCconst [d] x))) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
+(SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
+(SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
+(SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x)
+(SLWconst [c] z:(Select0 (ANDCCconst [d] x))) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
+(SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
+// special case for power9
+(SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && buildcfg.GOPPC64 >= 9 => (EXTSWSLconst [c] x)
+
+// Lose widening ops fed to stores
+(MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+(MOVBstore [off] {sym} ptr (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+(MOVBstoreidx ptr idx (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVWstoreidx ptr idx (MOV(W|WZ)reg x) mem) => (MOVWstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+(MOVBstoreidx ptr idx (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+(MOVHBRstore ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHBRstore ptr x mem)
+(MOVWBRstore ptr (MOV(W|WZ)reg x) mem) => (MOVWBRstore ptr x mem)
+
+// Lose W-widening ops fed to compare-W
+(CMP(W|WU) x (MOV(W|WZ)reg y)) => (CMP(W|WU) x y)
+(CMP(W|WU) (MOV(W|WZ)reg x) y) => (CMP(W|WU) x y)
+
+(CMP x (MOVDconst [c])) && is16Bit(c) => (CMPconst x [c])
+(CMP (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPconst y [c]))
+(CMPW x (MOVDconst [c])) && is16Bit(c) => (CMPWconst x [int32(c)])
+(CMPW (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPWconst y [int32(c)]))
+
+(CMPU x (MOVDconst [c])) && isU16Bit(c) => (CMPUconst x [c])
+(CMPU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPUconst y [c]))
+(CMPWU x (MOVDconst [c])) && isU16Bit(c) => (CMPWUconst x [int32(c)])
+(CMPWU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPWUconst y [int32(c)]))
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
+
+// SETBC auxInt values 0=LT 1=GT 2=EQ Crbit==1 ? 1 : 0
+// SETBCR auxInt values 0=LT 1=GT 2=EQ Crbit==1 ? 0 : 1
+(Equal cmp) => (SETBC [2] cmp)
+(NotEqual cmp) => (SETBCR [2] cmp)
+(LessThan cmp) => (SETBC [0] cmp)
+(FLessThan cmp) => (SETBC [0] cmp)
+(FLessEqual cmp) => (OR (SETBC [2] cmp) (SETBC [0] cmp))
+(GreaterEqual cmp) => (SETBCR [0] cmp)
+(GreaterThan cmp) => (SETBC [1] cmp)
+(FGreaterEqual cmp) => (OR (SETBC [2] cmp) (SETBC [1] cmp))
+(FGreaterThan cmp) => (SETBC [1] cmp)
+(LessEqual cmp) => (SETBCR [1] cmp)
+
+(SETBC [0] (FlagLT)) => (MOVDconst [1])
+(SETBC [0] (Flag(GT|EQ))) => (MOVDconst [0])
+(SETBC [1] (FlagGT)) => (MOVDconst [1])
+(SETBC [1] (Flag(LT|EQ))) => (MOVDconst [0])
+(SETBC [2] (FlagEQ)) => (MOVDconst [1])
+(SETBC [2] (Flag(LT|GT))) => (MOVDconst [0])
+
+(SETBCR [0] (FlagLT)) => (MOVDconst [0])
+(SETBCR [0] (Flag(GT|EQ))) => (MOVDconst [1])
+(SETBCR [1] (FlagGT)) => (MOVDconst [0])
+(SETBCR [1] (Flag(LT|EQ))) => (MOVDconst [1])
+(SETBCR [2] (FlagEQ)) => (MOVDconst [0])
+(SETBCR [2] (Flag(LT|GT))) => (MOVDconst [1])
+
+(SETBC [0] (InvertFlags bool)) => (SETBC [1] bool)
+(SETBC [1] (InvertFlags bool)) => (SETBC [0] bool)
+(SETBC [2] (InvertFlags bool)) => (SETBC [2] bool)
+
+(SETBCR [0] (InvertFlags bool)) => (SETBCR [1] bool)
+(SETBCR [1] (InvertFlags bool)) => (SETBCR [0] bool)
+(SETBCR [2] (InvertFlags bool)) => (SETBCR [2] bool)
+
+// ISEL auxInt values 0=LT 1=GT 2=EQ arg2 ? arg0 : arg1
+// ISEL auxInt values 4=GE 5=LE 6=NE !arg2 ? arg1 : arg0
+
+(ISEL [2] x _ (FlagEQ)) => x
+(ISEL [2] _ y (Flag(LT|GT))) => y
+
+(ISEL [6] _ y (FlagEQ)) => y
+(ISEL [6] x _ (Flag(LT|GT))) => x
+
+(ISEL [0] _ y (Flag(EQ|GT))) => y
+(ISEL [0] x _ (FlagLT)) => x
+
+(ISEL [5] _ x (Flag(EQ|LT))) => x
+(ISEL [5] y _ (FlagGT)) => y
+
+(ISEL [1] _ y (Flag(EQ|LT))) => y
+(ISEL [1] x _ (FlagGT)) => x
+
+(ISEL [4] x _ (Flag(EQ|GT))) => x
+(ISEL [4] _ y (FlagLT)) => y
+
+(ISEL [2] x y ((CMP|CMPW)const [0] (Select0 (ANDCCconst [n] z)))) => (ISEL [2] x y (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
+(ISEL [6] x y ((CMP|CMPW)const [0] (Select0 (ANDCCconst [n] z)))) => (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
+(SETBC [n] (InvertFlags bool)) => (SETBCR [n] bool)
+(SETBCR [n] (InvertFlags bool)) => (SETBC [n] bool)
+
+(ISEL [n] x y (InvertFlags bool)) && n%4 == 0 => (ISEL [n+1] x y bool)
+(ISEL [n] x y (InvertFlags bool)) && n%4 == 1 => (ISEL [n-1] x y bool)
+(ISEL [n] x y (InvertFlags bool)) && n%4 == 2 => (ISEL [n] x y bool)
+(XORconst [1] (SETBCR [n] cmp)) => (SETBC [n] cmp)
+(XORconst [1] (SETBC [n] cmp)) => (SETBCR [n] cmp)
+
+(SETBC [2] ((CMP|CMPW)const [0] (Select0 (ANDCCconst [1] z)))) => (XORconst [1] (Select0 <typ.UInt64> (ANDCCconst [1] z )))
+(SETBCR [2] ((CMP|CMPW)const [0] (Select0 (ANDCCconst [1] z)))) => (Select0 <typ.UInt64> (ANDCCconst [1] z ))
+
+(SETBC [2] (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) => (SETBC [2] (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
+(SETBCR [2] (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) => (SETBCR [2] (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
+
+// Only CMPconst for these in case AND|OR|XOR result is > 32 bits
+(SETBC [2] (CMPconst [0] a:(AND y z))) && a.Uses == 1 => (SETBC [2] (Select1 <types.TypeFlags> (ANDCC y z )))
+(SETBCR [2] (CMPconst [0] a:(AND y z))) && a.Uses == 1 => (SETBCR [2] (Select1 <types.TypeFlags> (ANDCC y z )))
+
+(SETBC [2] (CMPconst [0] o:(OR y z))) && o.Uses == 1 => (SETBC [2] (Select1 <types.TypeFlags> (ORCC y z )))
+(SETBCR [2] (CMPconst [0] o:(OR y z))) && o.Uses == 1 => (SETBCR [2] (Select1 <types.TypeFlags> (ORCC y z )))
+
+(SETBC [2] (CMPconst [0] a:(XOR y z))) && a.Uses == 1 => (SETBC [2] (Select1 <types.TypeFlags> (XORCC y z )))
+(SETBCR [2] (CMPconst [0] a:(XOR y z))) && a.Uses == 1 => (SETBCR [2] (Select1 <types.TypeFlags> (XORCC y z )))
+
+// A particular pattern seen in cgo code:
+(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (Select0 (ANDCCconst [c&0xFF] x))
+
+// floating point negative abs
+(FNEG (F(ABS|NABS) x)) => (F(NABS|ABS) x)
+
+// floating-point fused multiply-add/sub
+(F(ADD|SUB) (FMUL x y) z) && x.Block.Func.useFMA(v) => (FM(ADD|SUB) x y z)
+(F(ADDS|SUBS) (FMULS x y) z) && x.Block.Func.useFMA(v) => (FM(ADDS|SUBS) x y z)
+
+// Arch-specific inlining for small or disjoint runtime.memmove
+(SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore _ src s3:(MOVDstore {t} _ dst mem)))))
+ && sz >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
+ && isInlinableMemmove(dst, src, sz, config)
+ && clobber(s1, s2, s3, call)
+ => (Move [sz] dst src mem)
+
+// Match post-lowering calls, register version.
+(SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
+ && sz >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && call.Uses == 1
+ && isInlinableMemmove(dst, src, sz, config)
+ && clobber(call)
+ => (Move [sz] dst src mem)
+
+// Prefetch instructions (TH specified using aux field)
+// For DCBT Ra,Rb,TH, A value of TH indicates:
+// 0, hint this cache line will be used soon. (PrefetchCache)
+// 16, hint this cache line will not be used for long. (PrefetchCacheStreamed)
+// See ISA 3.0 Book II 4.3.2 for more detail. https://openpower.foundation/specifications/isa/
+(PrefetchCache ptr mem) => (DCBT ptr mem [0])
+(PrefetchCacheStreamed ptr mem) => (DCBT ptr mem [16])
+
+// Use byte reverse instructions on Power10
+(Bswap(16|32|64) x) && buildcfg.GOPPC64>=10 => (BR(H|W|D) x)
+
+// Fold bit reversal into loads.
+(BR(W|H) x:(MOV(W|H)Zload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOV(W|H)BRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
+(BR(W|H) x:(MOV(W|H)Zloadidx ptr idx mem)) && x.Uses == 1 => @x.Block (MOV(W|H)BRloadidx ptr idx mem)
+(BRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOVDBRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
+(BRD x:(MOVDloadidx ptr idx mem)) && x.Uses == 1 => @x.Block (MOVDBRloadidx ptr idx mem)
+
+// Fold bit reversal into stores.
+(MOV(D|W|H)store [off] {sym} ptr r:(BR(D|W|H) val) mem) && r.Uses == 1 => (MOV(D|W|H)BRstore (MOVDaddr <ptr.Type> [off] {sym} ptr) val mem)
+(MOV(D|W|H)storeidx ptr idx r:(BR(D|W|H) val) mem) && r.Uses == 1 => (MOV(D|W|H)BRstoreidx ptr idx val mem)
+
+// GOPPC64<10 rules.
+// These Bswap operations should only be introduced by the memcombine pass in places where they can be folded into loads or stores.
+(Bswap(32|16) x:(MOV(W|H)Zload [off] {sym} ptr mem)) => @x.Block (MOV(W|H)BRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
+(Bswap(32|16) x:(MOV(W|H)Zloadidx ptr idx mem)) => @x.Block (MOV(W|H)BRloadidx ptr idx mem)
+(Bswap64 x:(MOVDload [off] {sym} ptr mem)) => @x.Block (MOVDBRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
+(Bswap64 x:(MOVDloadidx ptr idx mem)) => @x.Block (MOVDBRloadidx ptr idx mem)
+(MOV(D|W|H)store [off] {sym} ptr (Bswap(64|32|16) val) mem) => (MOV(D|W|H)BRstore (MOVDaddr <ptr.Type> [off] {sym} ptr) val mem)
+(MOV(D|W|H)storeidx ptr idx (Bswap(64|32|16) val) mem) => (MOV(D|W|H)BRstoreidx ptr idx val mem)
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
new file mode 100644
index 0000000..7aa2e6c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go
@@ -0,0 +1,755 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "strings"
+
+// Notes:
+// - Less-than-64-bit integer types live in the low portion of registers.
+// The upper portion is junk.
+// - Boolean types are zero or 1; stored in a byte, with upper bytes of the register containing junk.
+// - *const instructions may use a constant larger than the instruction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R31).
+
+var regNamesPPC64 = []string{
+ "R0", // REGZERO, not used, but simplifies counting in regalloc
+ "SP", // REGSP
+ "SB", // REGSB
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11", // REGCTXT for closures
+ "R12",
+ "R13", // REGTLS
+ "R14",
+ "R15",
+ "R16",
+ "R17",
+ "R18",
+ "R19",
+ "R20",
+ "R21",
+ "R22",
+ "R23",
+ "R24",
+ "R25",
+ "R26",
+ "R27",
+ "R28",
+ "R29",
+ "g", // REGG. Using name "g" and setting Config.hasGReg makes it "just happen".
+ "R31", // REGTMP
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+ "F16",
+ "F17",
+ "F18",
+ "F19",
+ "F20",
+ "F21",
+ "F22",
+ "F23",
+ "F24",
+ "F25",
+ "F26",
+ "F27",
+ "F28",
+ "F29",
+ "F30",
+ // "F31", the allocator is limited to 64 entries. We sacrifice this FPR to support XER.
+
+ "XER",
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ // "CR0",
+ // "CR1",
+ // "CR2",
+ // "CR3",
+ // "CR4",
+ // "CR5",
+ // "CR6",
+ // "CR7",
+
+ // "CR",
+ // "LR",
+ // "CTR",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesPPC64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesPPC64 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ var (
+ gp = buildReg("R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29")
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30")
+ sp = buildReg("SP")
+ sb = buildReg("SB")
+ gr = buildReg("g")
+ xer = buildReg("XER")
+ // cr = buildReg("CR")
+ // ctr = buildReg("CTR")
+ // lr = buildReg("LR")
+ tmp = buildReg("R31")
+ ctxt = buildReg("R11")
+ callptr = buildReg("R12")
+ // tls = buildReg("R13")
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}
+ xergp = regInfo{inputs: []regMask{xer}, outputs: []regMask{gp}, clobbers: xer}
+ gp11cxer = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}, clobbers: xer}
+ gp11xer = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp, xer}}
+ gp21 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}}
+ gp21a0 = regInfo{inputs: []regMask{gp, gp | sp | sb}, outputs: []regMask{gp}}
+ gp21cxer = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}, clobbers: xer}
+ gp21xer = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp, xer}, clobbers: xer}
+ gp2xer1xer = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, xer}, outputs: []regMask{gp, xer}, clobbers: xer}
+ gp31 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}}
+ gp1cr = regInfo{inputs: []regMask{gp | sp | sb}}
+ gp2cr = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}}
+ crgp = regInfo{inputs: nil, outputs: []regMask{gp}}
+ crgp11 = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}
+ crgp21 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}
+ gploadidx = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}}
+ prefreg = regInfo{inputs: []regMask{gp | sp | sb}}
+ gpstore = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}}
+ gpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}}
+ gpstorezero = regInfo{inputs: []regMask{gp | sp | sb}} // ppc64.REGZERO is reserved zero value
+ gpxchg = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}}
+ gpcas = regInfo{inputs: []regMask{gp | sp | sb, gp, gp}, outputs: []regMask{gp}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
+ gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}}
+ fp2cr = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{fp}}
+ fploadidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gp | sp | sb, fp}}
+ fpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, fp}}
+ callerSave = regMask(gp | fp | gr | xer)
+ r3 = buildReg("R3")
+ r4 = buildReg("R4")
+ r5 = buildReg("R5")
+ r6 = buildReg("R6")
+ )
+ ops := []opData{
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDCC", argLength: 2, reg: gp21, asm: "ADDCC", commutative: true, typ: "(Int,Flags)"}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11, asm: "ADD", aux: "Int64"}, // arg0 + auxInt
+ {name: "ADDCCconst", argLength: 1, reg: gp11cxer, asm: "ADDCCC", aux: "Int64", typ: "(Int,Flags)"}, // arg0 + auxInt sets CC, clobbers XER
+ {name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true}, // arg0+arg1
+ {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0+arg1
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0-arg1
+ {name: "SUBCC", argLength: 2, reg: gp21, asm: "SUBCC", typ: "(Int,Flags)"}, // arg0-arg1 sets CC
+ {name: "SUBFCconst", argLength: 1, reg: gp11cxer, asm: "SUBC", aux: "Int64"}, // auxInt - arg0 (carry is ignored)
+ {name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"}, // arg0-arg1
+ {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0-arg1
+
+ {name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true}, // arg0*arg1 (signed 64-bit)
+ {name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true}, // arg0*arg1 (signed 32-bit)
+ {name: "MULLDconst", argLength: 1, reg: gp11, asm: "MULLD", aux: "Int32", typ: "Int64"}, // arg0*auxInt (signed 64-bit)
+ {name: "MULLWconst", argLength: 1, reg: gp11, asm: "MULLW", aux: "Int32", typ: "Int64"}, // arg0*auxInt (signed 64-bit)
+ {name: "MADDLD", argLength: 3, reg: gp31, asm: "MADDLD", typ: "Int64"}, // (arg0*arg1)+arg2 (signed 64-bit)
+
+ {name: "MULHD", argLength: 2, reg: gp21, asm: "MULHD", commutative: true}, // (arg0 * arg1) >> 64, signed
+ {name: "MULHW", argLength: 2, reg: gp21, asm: "MULHW", commutative: true}, // (arg0 * arg1) >> 32, signed
+ {name: "MULHDU", argLength: 2, reg: gp21, asm: "MULHDU", commutative: true}, // (arg0 * arg1) >> 64, unsigned
+ {name: "MULHWU", argLength: 2, reg: gp21, asm: "MULHWU", commutative: true}, // (arg0 * arg1) >> 32, unsigned
+
+ {name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true}, // arg0*arg1
+ {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0*arg1
+
+ {name: "FMADD", argLength: 3, reg: fp31, asm: "FMADD"}, // arg0*arg1 + arg2
+ {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS"}, // arg0*arg1 + arg2
+ {name: "FMSUB", argLength: 3, reg: fp31, asm: "FMSUB"}, // arg0*arg1 - arg2
+ {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS"}, // arg0*arg1 - arg2
+
+ {name: "SRAD", argLength: 2, reg: gp21cxer, asm: "SRAD"}, // signed arg0 >> (arg1&127), 64 bit width (note: 127, not 63!)
+ {name: "SRAW", argLength: 2, reg: gp21cxer, asm: "SRAW"}, // signed arg0 >> (arg1&63), 32 bit width
+ {name: "SRD", argLength: 2, reg: gp21, asm: "SRD"}, // unsigned arg0 >> (arg1&127), 64 bit width
+ {name: "SRW", argLength: 2, reg: gp21, asm: "SRW"}, // unsigned arg0 >> (arg1&63), 32 bit width
+ {name: "SLD", argLength: 2, reg: gp21, asm: "SLD"}, // arg0 << (arg1&127), 64 bit width
+ {name: "SLW", argLength: 2, reg: gp21, asm: "SLW"}, // arg0 << (arg1&63), 32 bit width
+
+ {name: "ROTL", argLength: 2, reg: gp21, asm: "ROTL"}, // arg0 rotate left by arg1 mod 64
+ {name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32
+ // The following are ops to implement the extended mnemonics for shifts as described in section C.8 of the ISA.
+ // The constant shift values are packed into the aux int32.
+ {name: "CLRLSLWI", argLength: 1, reg: gp11, asm: "CLRLSLWI", aux: "Int32"}, //
+ {name: "CLRLSLDI", argLength: 1, reg: gp11, asm: "CLRLSLDI", aux: "Int32"}, //
+
+ // Operations which consume or generate the CA (xer)
+ {name: "ADDC", argLength: 2, reg: gp21xer, asm: "ADDC", commutative: true, typ: "(UInt64, UInt64)"}, // arg0 + arg1 -> out, CA
+ {name: "SUBC", argLength: 2, reg: gp21xer, asm: "SUBC", typ: "(UInt64, UInt64)"}, // arg0 - arg1 -> out, CA
+ {name: "ADDCconst", argLength: 1, reg: gp11xer, asm: "ADDC", typ: "(UInt64, UInt64)", aux: "Int64"}, // arg0 + imm16 -> out, CA
+ {name: "SUBCconst", argLength: 1, reg: gp11xer, asm: "SUBC", typ: "(UInt64, UInt64)", aux: "Int64"}, // imm16 - arg0 -> out, CA
+ {name: "ADDE", argLength: 3, reg: gp2xer1xer, asm: "ADDE", typ: "(UInt64, UInt64)", commutative: true}, // arg0 + arg1 + CA (arg2) -> out, CA
+ {name: "SUBE", argLength: 3, reg: gp2xer1xer, asm: "SUBE", typ: "(UInt64, UInt64)"}, // arg0 - arg1 - CA (arg2) -> out, CA
+ {name: "ADDZEzero", argLength: 1, reg: xergp, asm: "ADDZE", typ: "UInt64"}, // CA (arg0) + $0 -> out
+ {name: "SUBZEzero", argLength: 1, reg: xergp, asm: "SUBZE", typ: "UInt64"}, // $0 - CA (arg0) -> out
+
+ {name: "SRADconst", argLength: 1, reg: gp11cxer, asm: "SRAD", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width
+ {name: "SRAWconst", argLength: 1, reg: gp11cxer, asm: "SRAW", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width
+ {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int64"}, // unsigned arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width
+ {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int64"}, // unsigned arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width
+ {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int64"}, // arg0 << auxInt, 0 <= auxInt < 64, 64 bit width
+ {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int64"}, // arg0 << auxInt, 0 <= auxInt < 32, 32 bit width
+
+ {name: "ROTLconst", argLength: 1, reg: gp11, asm: "ROTL", aux: "Int64"}, // arg0 rotate left by auxInt bits
+ {name: "ROTLWconst", argLength: 1, reg: gp11, asm: "ROTLW", aux: "Int64"}, // uint32(arg0) rotate left by auxInt bits
+ {name: "EXTSWSLconst", argLength: 1, reg: gp11, asm: "EXTSWSLI", aux: "Int64"},
+
+ {name: "RLWINM", argLength: 1, reg: gp11, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by immediate "rlwinm". encodePPC64RotateMask describes aux
+ {name: "RLWNM", argLength: 2, reg: gp21, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by "rlwnm". encodePPC64RotateMask describes aux
+ {name: "RLWMI", argLength: 2, reg: gp21a0, asm: "RLWMI", aux: "Int64", resultInArg0: true}, // "rlwimi" similar aux encoding as above
+ {name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int64"}, // Auxint is encoded similarly to RLWINM, but only MB and SH are valid. ME is always 63.
+ {name: "RLDICR", argLength: 1, reg: gp11, asm: "RLDICR", aux: "Int64"}, // Likewise, but only ME and SH are valid. MB is always 0.
+
+ {name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD"}, // count leading zeros
+ {name: "CNTLZDCC", argLength: 1, reg: gp11, asm: "CNTLZDCC", typ: "(Int, Flags)"}, // count leading zeros, sets CC
+ {name: "CNTLZW", argLength: 1, reg: gp11, asm: "CNTLZW"}, // count leading zeros (32 bit)
+
+ {name: "CNTTZD", argLength: 1, reg: gp11, asm: "CNTTZD"}, // count trailing zeros
+ {name: "CNTTZW", argLength: 1, reg: gp11, asm: "CNTTZW"}, // count trailing zeros (32 bit)
+
+ {name: "POPCNTD", argLength: 1, reg: gp11, asm: "POPCNTD"}, // number of set bits in arg0
+ {name: "POPCNTW", argLength: 1, reg: gp11, asm: "POPCNTW"}, // number of set bits in each word of arg0 placed in corresponding word
+ {name: "POPCNTB", argLength: 1, reg: gp11, asm: "POPCNTB"}, // number of set bits in each byte of arg0 placed in corresponding byte
+
+ {name: "FDIV", argLength: 2, reg: fp21, asm: "FDIV"}, // arg0/arg1
+ {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS"}, // arg0/arg1
+
+ {name: "DIVD", argLength: 2, reg: gp21, asm: "DIVD", typ: "Int64"}, // arg0/arg1 (signed 64-bit)
+ {name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", typ: "Int32"}, // arg0/arg1 (signed 32-bit)
+ {name: "DIVDU", argLength: 2, reg: gp21, asm: "DIVDU", typ: "Int64"}, // arg0/arg1 (unsigned 64-bit)
+ {name: "DIVWU", argLength: 2, reg: gp21, asm: "DIVWU", typ: "Int32"}, // arg0/arg1 (unsigned 32-bit)
+
+ {name: "MODUD", argLength: 2, reg: gp21, asm: "MODUD", typ: "UInt64"}, // arg0 % arg1 (unsigned 64-bit)
+ {name: "MODSD", argLength: 2, reg: gp21, asm: "MODSD", typ: "Int64"}, // arg0 % arg1 (signed 64-bit)
+ {name: "MODUW", argLength: 2, reg: gp21, asm: "MODUW", typ: "UInt32"}, // arg0 % arg1 (unsigned 32-bit)
+ {name: "MODSW", argLength: 2, reg: gp21, asm: "MODSW", typ: "Int32"}, // arg0 % arg1 (signed 32-bit)
+ // MOD is implemented as rem := arg0 - (arg0/arg1) * arg1
+
+ // Conversions are all float-to-float register operations. "Integer" refers to encoding in the FP register.
+ {name: "FCTIDZ", argLength: 1, reg: fp11, asm: "FCTIDZ", typ: "Float64"}, // convert float to 64-bit int round towards zero
+ {name: "FCTIWZ", argLength: 1, reg: fp11, asm: "FCTIWZ", typ: "Float64"}, // convert float to 32-bit int round towards zero
+ {name: "FCFID", argLength: 1, reg: fp11, asm: "FCFID", typ: "Float64"}, // convert 64-bit integer to float
+ {name: "FCFIDS", argLength: 1, reg: fp11, asm: "FCFIDS", typ: "Float32"}, // convert 32-bit integer to float
+ {name: "FRSP", argLength: 1, reg: fp11, asm: "FRSP", typ: "Float64"}, // round float to 32-bit value
+
+ // Movement between float and integer registers with no change in bits; accomplished with stores+loads on PPC.
+ // Because the 32-bit load-literal-bits instructions have impoverished addressability, always widen the
+ // data instead and use FMOVDload and FMOVDstore instead (this will also dodge endianess issues).
+ // There are optimizations that should apply -- (Xi2f64 (MOVWload (not-ADD-ptr+offset) ) ) could use
+ // the word-load instructions. (Xi2f64 (MOVDload ptr )) can be (FMOVDload ptr)
+
+ {name: "MFVSRD", argLength: 1, reg: fpgp, asm: "MFVSRD", typ: "Int64"}, // move 64 bits of F register into G register
+ {name: "MTVSRD", argLength: 1, reg: gpfp, asm: "MTVSRD", typ: "Float64"}, // move 64 bits of G register into F register
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0&arg1
+ {name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"}, // arg0&^arg1
+ {name: "ANDNCC", argLength: 2, reg: gp21, asm: "ANDNCC", typ: "(Int64,Flags)"}, // arg0&^arg1 sets CC
+ {name: "ANDCC", argLength: 2, reg: gp21, asm: "ANDCC", commutative: true, typ: "(Int64,Flags)"}, // arg0&arg1 sets CC
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0|arg1
+ {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0|^arg1
+ {name: "ORCC", argLength: 2, reg: gp21, asm: "ORCC", commutative: true, typ: "(Int,Flags)"}, // arg0|arg1 sets CC
+ {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0|arg1)
+ {name: "NORCC", argLength: 2, reg: gp21, asm: "NORCC", commutative: true, typ: "(Int,Flags)"}, // ^(arg0|arg1) sets CC
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", typ: "Int64", commutative: true}, // arg0^arg1
+ {name: "XORCC", argLength: 2, reg: gp21, asm: "XORCC", commutative: true, typ: "(Int,Flags)"}, // arg0^arg1 sets CC
+ {name: "EQV", argLength: 2, reg: gp21, asm: "EQV", typ: "Int64", commutative: true}, // arg0^^arg1
+ {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0 (integer)
+ {name: "NEGCC", argLength: 1, reg: gp11, asm: "NEGCC", typ: "(Int,Flags)"}, // -arg0 (integer) sets CC
+ {name: "BRD", argLength: 1, reg: gp11, asm: "BRD"}, // reversebytes64(arg0)
+ {name: "BRW", argLength: 1, reg: gp11, asm: "BRW"}, // reversebytes32(arg0)
+ {name: "BRH", argLength: 1, reg: gp11, asm: "BRH"}, // reversebytes16(arg0)
+ {name: "FNEG", argLength: 1, reg: fp11, asm: "FNEG"}, // -arg0 (floating point)
+ {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) (floating point)
+ {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0) (floating point, single precision)
+ {name: "FFLOOR", argLength: 1, reg: fp11, asm: "FRIM"}, // floor(arg0), float64
+ {name: "FCEIL", argLength: 1, reg: fp11, asm: "FRIP"}, // ceil(arg0), float64
+ {name: "FTRUNC", argLength: 1, reg: fp11, asm: "FRIZ"}, // trunc(arg0), float64
+ {name: "FROUND", argLength: 1, reg: fp11, asm: "FRIN"}, // round(arg0), float64
+ {name: "FABS", argLength: 1, reg: fp11, asm: "FABS"}, // abs(arg0), float64
+ {name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"}, // -abs(arg0), float64
+ {name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"}, // copysign arg0 -> arg1, float64
+
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux
+ {name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", typ: "(Int,Flags)"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always.
+
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB", typ: "Int64"}, // sign extend int8 to int64
+ {name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ", typ: "Int64"}, // zero extend uint8 to uint64
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH", typ: "Int64"}, // sign extend int16 to int64
+ {name: "MOVHZreg", argLength: 1, reg: gp11, asm: "MOVHZ", typ: "Int64"}, // zero extend uint16 to uint64
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW", typ: "Int64"}, // sign extend int32 to int64
+ {name: "MOVWZreg", argLength: 1, reg: gp11, asm: "MOVWZ", typ: "Int64"}, // zero extend uint32 to uint64
+
+ // Load bytes in the endian order of the arch from arg0+aux+auxint into a 64 bit register.
+ {name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte zero extend
+ {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes sign extend
+ {name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes zero extend
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes sign extend
+ {name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes zero extend
+ {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes
+
+ // Load bytes in reverse endian order of the arch from arg0 into a 64 bit register, all zero extend.
+ // The generated instructions are indexed loads with no offset field in the instruction so the aux fields are not used.
+ // In these cases the index register field is set to 0 and the full address is in the base register.
+ {name: "MOVDBRload", argLength: 2, reg: gpload, asm: "MOVDBR", typ: "UInt64", faultOnNilArg0: true}, // load 8 bytes reverse order
+ {name: "MOVWBRload", argLength: 2, reg: gpload, asm: "MOVWBR", typ: "UInt32", faultOnNilArg0: true}, // load 4 bytes zero extend reverse order
+ {name: "MOVHBRload", argLength: 2, reg: gpload, asm: "MOVHBR", typ: "UInt16", faultOnNilArg0: true}, // load 2 bytes zero extend reverse order
+
+ // In these cases an index register is used in addition to a base register
+ // Loads from memory location arg[0] + arg[1].
+ {name: "MOVBZloadidx", argLength: 3, reg: gploadidx, asm: "MOVBZ", typ: "UInt8"}, // zero extend uint8 to uint64
+ {name: "MOVHloadidx", argLength: 3, reg: gploadidx, asm: "MOVH", typ: "Int16"}, // sign extend int16 to int64
+ {name: "MOVHZloadidx", argLength: 3, reg: gploadidx, asm: "MOVHZ", typ: "UInt16"}, // zero extend uint16 to uint64
+ {name: "MOVWloadidx", argLength: 3, reg: gploadidx, asm: "MOVW", typ: "Int32"}, // sign extend int32 to int64
+ {name: "MOVWZloadidx", argLength: 3, reg: gploadidx, asm: "MOVWZ", typ: "UInt32"}, // zero extend uint32 to uint64
+ {name: "MOVDloadidx", argLength: 3, reg: gploadidx, asm: "MOVD", typ: "Int64"},
+ {name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVHBR", typ: "Int16"}, // sign extend int16 to int64
+ {name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVWBR", typ: "Int32"}, // sign extend int32 to int64
+ {name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVDBR", typ: "Int64"},
+ {name: "FMOVDloadidx", argLength: 3, reg: fploadidx, asm: "FMOVD", typ: "Float64"},
+ {name: "FMOVSloadidx", argLength: 3, reg: fploadidx, asm: "FMOVS", typ: "Float32"},
+
+ // Prefetch instruction
+ // Do prefetch of address generated with arg0 and arg1 with option aux. arg0=addr,arg1=memory, aux=option.
+ {name: "DCBT", argLength: 2, aux: "Int64", reg: prefreg, asm: "DCBT", hasSideEffects: true},
+
+ // Store bytes in the reverse endian order of the arch into arg0.
+ // These are indexed stores with no offset field in the instruction so the auxint fields are not used.
+ {name: "MOVDBRstore", argLength: 3, reg: gpstore, asm: "MOVDBR", typ: "Mem", faultOnNilArg0: true}, // store 8 bytes reverse order
+ {name: "MOVWBRstore", argLength: 3, reg: gpstore, asm: "MOVWBR", typ: "Mem", faultOnNilArg0: true}, // store 4 bytes reverse order
+ {name: "MOVHBRstore", argLength: 3, reg: gpstore, asm: "MOVHBR", typ: "Mem", faultOnNilArg0: true}, // store 2 bytes reverse order
+
+ // Floating point loads from arg0+aux+auxint
+ {name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", aux: "SymOff", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load double float
+ {name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", aux: "SymOff", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load single float
+
+ // Store bytes in the endian order of the arch into arg0+aux+auxint
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte
+ {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes
+ {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes
+
+ // Store floating point value into arg0+aux+auxint
+ {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store double flot
+ {name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store single float
+
+ // Stores using index and base registers
+ // Stores to arg[0] + arg[1]
+ {name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVB", typ: "Mem"}, // store bye
+ {name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVH", typ: "Mem"}, // store half word
+ {name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVW", typ: "Mem"}, // store word
+ {name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVD", typ: "Mem"}, // store double word
+ {name: "FMOVDstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVD", typ: "Mem"}, // store double float
+ {name: "FMOVSstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVS", typ: "Mem"}, // store single float
+ {name: "MOVHBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVHBR", typ: "Mem"}, // store half word reversed byte using index reg
+ {name: "MOVWBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVWBR", typ: "Mem"}, // store word reversed byte using index reg
+ {name: "MOVDBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVDBR", typ: "Mem"}, // store double word reversed byte using index reg
+
+ // The following ops store 0 into arg0+aux+auxint arg1=mem
+ {name: "MOVBstorezero", argLength: 2, reg: gpstorezero, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 1 byte
+ {name: "MOVHstorezero", argLength: 2, reg: gpstorezero, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 2 bytes
+ {name: "MOVWstorezero", argLength: 2, reg: gpstorezero, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 4 bytes
+ {name: "MOVDstorezero", argLength: 2, reg: gpstorezero, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 8 bytes
+
+ {name: "MOVDaddr", argLength: 1, reg: regInfo{inputs: []regMask{sp | sb | gp}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVD", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB/GP
+
+ {name: "MOVDconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVD", typ: "Int64", rematerializeable: true}, //
+ {name: "FMOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVD", rematerializeable: true}, //
+ {name: "FMOVSconst", argLength: 0, reg: fp01, aux: "Float32", asm: "FMOVS", rematerializeable: true}, //
+ {name: "FCMPU", argLength: 2, reg: fp2cr, asm: "FCMPU", typ: "Flags"},
+
+ {name: "CMP", argLength: 2, reg: gp2cr, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPU", argLength: 2, reg: gp2cr, asm: "CMPU", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPW", argLength: 2, reg: gp2cr, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPWU", argLength: 2, reg: gp2cr, asm: "CMPWU", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPconst", argLength: 1, reg: gp1cr, asm: "CMP", aux: "Int64", typ: "Flags"},
+ {name: "CMPUconst", argLength: 1, reg: gp1cr, asm: "CMPU", aux: "Int64", typ: "Flags"},
+ {name: "CMPWconst", argLength: 1, reg: gp1cr, asm: "CMPW", aux: "Int32", typ: "Flags"},
+ {name: "CMPWUconst", argLength: 1, reg: gp1cr, asm: "CMPWU", aux: "Int32", typ: "Flags"},
+
+ // ISEL arg2 ? arg0 : arg1
+ // ISELZ arg1 ? arg0 : $0
+ // auxInt values 0=LT 1=GT 2=EQ 3=SO (summary overflow/unordered) 4=GE 5=LE 6=NE 7=NSO (not summary overflow/not unordered)
+ // Note, auxInt^4 inverts the comparison condition. For example, LT^4 becomes GE, and "ISEL [a] x y z" is equivalent to ISEL [a^4] y x z".
+ {name: "ISEL", argLength: 3, reg: crgp21, asm: "ISEL", aux: "Int32", typ: "Int32"},
+ {name: "ISELZ", argLength: 2, reg: crgp11, asm: "ISEL", aux: "Int32"},
+
+ // SETBC auxInt values 0=LT 1=GT 2=EQ (CRbit=1)? 1 : 0
+ {name: "SETBC", argLength: 1, reg: crgp, asm: "SETBC", aux: "Int32", typ: "Int32"},
+ // SETBCR auxInt values 0=LT 1=GT 2=EQ (CRbit=1)? 0 : 1
+ {name: "SETBCR", argLength: 1, reg: crgp, asm: "SETBCR", aux: "Int32", typ: "Int32"},
+
+ // pseudo-ops
+ {name: "Equal", argLength: 1, reg: crgp}, // bool, true flags encode x==y false otherwise.
+ {name: "NotEqual", argLength: 1, reg: crgp}, // bool, true flags encode x!=y false otherwise.
+ {name: "LessThan", argLength: 1, reg: crgp}, // bool, true flags encode x<y false otherwise.
+ {name: "FLessThan", argLength: 1, reg: crgp}, // bool, true flags encode x<y false otherwise.
+ {name: "LessEqual", argLength: 1, reg: crgp}, // bool, true flags encode x<=y false otherwise.
+ {name: "FLessEqual", argLength: 1, reg: crgp}, // bool, true flags encode x<=y false otherwise; PPC <= === !> which is wrong for NaN
+ {name: "GreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode x>y false otherwise.
+ {name: "FGreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode x>y false otherwise.
+ {name: "GreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode x>=y false otherwise.
+ {name: "FGreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode x>=y false otherwise.; PPC >= === !< which is wrong for NaN
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of the closure pointer.
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{ctxt}}, zeroWidth: true},
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem.
+ {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
+ // Round ops to block fused-multiply-add extraction.
+ {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+ {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+
+ {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{callptr, ctxt, 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{callptr}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // large or unaligned zeroing
+ // arg0 = address of memory to zero (in R3, changed as side effect)
+ // returns mem
+ //
+ // a loop is generated when there is more than one iteration
+ // needed to clear 4 doublewords
+ //
+ // XXLXOR VS32,VS32,VS32
+ // MOVD $len/32,R31
+ // MOVD R31,CTR
+ // MOVD $16,R31
+ // loop:
+ // STXVD2X VS32,(R0)(R3)
+ // STXVD2X VS32,(R31)(R3)
+ // ADD R3,32
+ // BC loop
+
+ // remaining doubleword clears generated as needed
+ // MOVD R0,(R3)
+ // MOVD R0,8(R3)
+ // MOVD R0,16(R3)
+ // MOVD R0,24(R3)
+
+ // one or more of these to clear remainder < 8 bytes
+ // MOVW R0,n1(R3)
+ // MOVH R0,n2(R3)
+ // MOVB R0,n3(R3)
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20")},
+ clobbers: buildReg("R20"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ },
+ {
+ name: "LoweredZeroShort",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{gp}},
+ typ: "Mem",
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ },
+ {
+ name: "LoweredQuadZeroShort",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{gp},
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ },
+ {
+ name: "LoweredQuadZero",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20")},
+ clobbers: buildReg("R20"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ },
+
+ // R31 is temp register
+ // Loop code:
+ // MOVD len/32,R31 set up loop ctr
+ // MOVD R31,CTR
+ // MOVD $16,R31 index register
+ // loop:
+ // LXVD2X (R0)(R4),VS32
+ // LXVD2X (R31)(R4),VS33
+ // ADD R4,$32 increment src
+ // STXVD2X VS32,(R0)(R3)
+ // STXVD2X VS33,(R31)(R3)
+ // ADD R3,$32 increment dst
+ // BC 16,0,loop branch ctr
+ // For this purpose, VS32 and VS33 are treated as
+ // scratch registers. Since regalloc does not
+ // track vector registers, even if it could be marked
+ // as clobbered it would have no effect.
+ // TODO: If vector registers are managed by regalloc
+ // mark these as clobbered.
+ //
+ // Bytes not moved by this loop are moved
+ // with a combination of the following instructions,
+ // starting with the largest sizes and generating as
+ // many as needed, using the appropriate offset value.
+ // MOVD n(R4),R14
+ // MOVD R14,n(R3)
+ // MOVW n1(R4),R14
+ // MOVW R14,n1(R3)
+ // MOVH n2(R4),R14
+ // MOVH R14,n2(R3)
+ // MOVB n3(R4),R14
+ // MOVB R14,n3(R3)
+
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20"), buildReg("R21")},
+ clobbers: buildReg("R20 R21"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ },
+ {
+ name: "LoweredMoveShort",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{gp, gp},
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ },
+
+ // The following is similar to the LoweredMove, but uses
+ // LXV instead of LXVD2X, which does not require an index
+ // register and will do 4 in a loop instead of only.
+ {
+ name: "LoweredQuadMove",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20"), buildReg("R21")},
+ clobbers: buildReg("R20 R21"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ },
+
+ {
+ name: "LoweredQuadMoveShort",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{gp, gp},
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ },
+
+ {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true},
+
+ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, typ: "UInt8", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, typ: "UInt32", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoadPtr", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
+
+ // atomic add32, 64
+ // LWSYNC
+ // LDAR (Rarg0), Rout
+ // ADD Rarg1, Rout
+ // STDCCC Rout, (Rarg0)
+ // BNE -3(PC)
+ // return new sum
+ {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic exchange32, 64
+ // LWSYNC
+ // LDAR (Rarg0), Rout
+ // STDCCC Rarg1, (Rarg0)
+ // BNE -2(PC)
+ // ISYNC
+ // return old val
+ {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. auxint must be zero.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // SYNC
+ // LDAR (Rarg0), Rtmp
+ // CMP Rarg1, Rtmp
+ // BNE 3(PC)
+ // STDCCC Rarg2, (Rarg0)
+ // BNE -4(PC)
+ // CBNZ Rtmp, -4(PC)
+ // CSET EQ, Rout
+ {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic 8/32 and/or.
+ // *arg0 &= (|=) arg1. arg2=mem. returns memory. auxint must be zero.
+ // LBAR/LWAT (Rarg0), Rtmp
+ // AND/OR Rarg1, Rtmp
+ // STBCCC/STWCCC Rtmp, (Rarg0), Rtmp
+ // BNE Rtmp, -3(PC)
+ {name: "LoweredAtomicAnd8", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicAnd32", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicOr8", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicOr32", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed
+ // It preserves R0 through R17 (except special registers R1, R2, R11, R12, R13), g, and R20 and R21,
+ // but may clobber anything else, including R31 (REGTMP).
+ // Returns a pointer to a write barrier buffer in R29.
+ {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ buildReg("R0 R3 R4 R5 R6 R7 R8 R9 R10 R14 R15 R16 R17 R20 R21 g")) | buildReg("R31"), outputs: []regMask{buildReg("R29")}}, clobberFlags: true, aux: "Int64"},
+
+ {name: "LoweredPubBarrier", argLength: 1, asm: "LWSYNC", hasSideEffects: true}, // Do data barrier. arg0=memory
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r6}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r5}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+
+ // (InvertFlags (CMP a b)) == (CMP b a)
+ // So if we want (LessThan (CMP a b)) but we can't do that because a is a constant,
+ // then we do (LessThan (InvertFlags (CMP b a))) instead.
+ // Rewrites will convert this to (GreaterThan (CMP b a)).
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // Constant flag values. For any comparison, there are 3 possible
+ // outcomes: either the three from the signed total order (<,==,>)
+ // or the three from the unsigned total order, depending on which
+ // comparison operation was used (CMP or CMPU -- PPC is different from
+ // the other architectures, which have a single comparison producing
+ // both signed and unsigned comparison results.)
+
+ // These ops are for temporary use by rewrite rules. They
+ // cannot appear in the generated assembly.
+ {name: "FlagEQ"}, // equal
+ {name: "FlagLT"}, // signed < or unsigned <
+ {name: "FlagGT"}, // signed > or unsigned >
+ }
+
+ blocks := []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LT", controls: 1},
+ {name: "LE", controls: 1},
+ {name: "GT", controls: 1},
+ {name: "GE", controls: 1},
+ {name: "FLT", controls: 1},
+ {name: "FLE", controls: 1},
+ {name: "FGT", controls: 1},
+ {name: "FGE", controls: 1},
+ }
+
+ archs = append(archs, arch{
+ name: "PPC64",
+ pkg: "cmd/internal/obj/ppc64",
+ genfile: "../../ppc64/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesPPC64,
+ ParamIntRegNames: "R3 R4 R5 R6 R7 R8 R9 R10 R14 R15 R16 R17",
+ ParamFloatRegNames: "F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12",
+ gpregmask: gp,
+ fpregmask: fp,
+ specialregmask: xer,
+ framepointerreg: -1,
+ linkreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules b/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules
new file mode 100644
index 0000000..2eecf94
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules
@@ -0,0 +1,55 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains rules used by the laterLower pass.
+
+// Simplify ISEL x $0 z into ISELZ
+(ISEL [a] x (MOVDconst [0]) z) => (ISELZ [a] x z)
+// Simplify ISEL $0 y z into ISELZ by inverting comparison and reversing arguments.
+(ISEL [a] (MOVDconst [0]) y z) => (ISELZ [a^0x4] y z)
+
+// SETBC, SETBCR is supported on ISA 3.1(Power10) and newer, use ISELZ for
+// older targets
+(SETBC [2] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [2] (MOVDconst [1]) cmp)
+(SETBCR [2] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [6] (MOVDconst [1]) cmp)
+(SETBC [0] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [0] (MOVDconst [1]) cmp)
+(SETBCR [0] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [4] (MOVDconst [1]) cmp)
+(SETBC [1] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [1] (MOVDconst [1]) cmp)
+(SETBCR [1] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [5] (MOVDconst [1]) cmp)
+
+// Avoid using ANDCCconst if the value for CR0 is not needed, since ANDCCconst
+// always sets it.
+(Select0 z:(ANDCCconst [m] x)) && z.Uses == 1 && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] x)
+// The upper bits of the smaller than register values is undefined. Take advantage of that.
+(AND <t> x:(MOVDconst [m]) n) && t.Size() <= 2 => (Select0 (ANDCCconst [int64(int16(m))] n))
+
+// Convert simple bit masks to an equivalent rldic[lr] if possible.
+(AND x:(MOVDconst [m]) n) && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] n)
+(AND x:(MOVDconst [m]) n) && m != 0 && isPPC64ValidShiftMask(^m) => (RLDICR [encodePPC64RotateMask(0,m,64)] n)
+
+// If the RLDICL does not rotate its value, a shifted value can be merged.
+(RLDICL [em] x:(SRDconst [s] a)) && (em&0xFF0000) == 0 => (RLDICL [mergePPC64RLDICLandSRDconst(em, s)] a)
+
+// Convert rotated 32 bit masks on 32 bit values into rlwinm. In general, this leaves the upper 32 bits in an undefined state.
+(AND <t> x:(MOVDconst [m]) n) && t.Size() == 4 && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(0,m,32)] n)
+
+// When PCRel is supported, paddi can add a 34b signed constant in one instruction.
+(ADD (MOVDconst [m]) x) && supportsPPC64PCRel() && (m<<30)>>30 == m => (ADDconst [m] x)
+
+
+// Where possible and practical, generate CC opcodes. Due to the structure of the rules, there are limits to how
+// a Value can be rewritten which make it impossible to correctly rewrite sibling Value users. To workaround this
+// case, candidates for CC opcodes are converted in two steps:
+// 1. Convert all (x (Op ...) ...) into (x (Select0 (OpCC ...) ...). See convertPPC64OpToOpCC for more
+// detail on how and why this is done there.
+// 2. Rewrite (CMPconst [0] (Select0 (OpCC ...))) into (Select1 (OpCC...))
+// Note: to minimize potentially expensive regeneration of CC opcodes during the flagalloc pass, only rewrite if
+// both ops are in the same block.
+(CMPconst [0] z:((ADD|AND|ANDN|OR|SUB|NOR|XOR) x y)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
+(CMPconst [0] z:((NEG|CNTLZD) x)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
+// Note: ADDCCconst only assembles to 1 instruction for int16 constants.
+(CMPconst [0] z:(ADDconst [c] x)) && int64(int16(c)) == c && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z))
+// And finally, fixup the flag user.
+(CMPconst <t> [0] (Select0 z:((ADD|AND|ANDN|OR|SUB|NOR|XOR)CC x y))) => (Select1 <t> z)
+(CMPconst <t> [0] (Select0 z:((ADDCCconst|NEGCC|CNTLZDCC) y))) => (Select1 <t> z)
diff --git a/src/cmd/compile/internal/ssa/_gen/README b/src/cmd/compile/internal/ssa/_gen/README
new file mode 100644
index 0000000..74b81c2
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/README
@@ -0,0 +1,11 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+This command generates Go code (in the parent directory) for all
+the architecture-specific opcodes, blocks, and rewrites. See the
+"Hacking on SSA" section in the parent directory's README.md for
+more information.
+
+To regenerate everything, run "go generate" on the ssa package
+in the parent directory.
diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
new file mode 100644
index 0000000..fc206c4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
@@ -0,0 +1,821 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add(Ptr|64|32|16|8) ...) => (ADD ...)
+(Add(64|32)F ...) => (FADD(D|S) ...)
+
+(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
+(Sub(64|32)F ...) => (FSUB(D|S) ...)
+
+(Mul64 ...) => (MUL ...)
+(Mul64uhilo ...) => (LoweredMuluhilo ...)
+(Mul64uover ...) => (LoweredMuluover ...)
+(Mul32 ...) => (MULW ...)
+(Mul16 x y) => (MULW (SignExt16to32 x) (SignExt16to32 y))
+(Mul8 x y) => (MULW (SignExt8to32 x) (SignExt8to32 y))
+(Mul(64|32)F ...) => (FMUL(D|S) ...)
+
+(Div(64|32)F ...) => (FDIV(D|S) ...)
+
+(Div64 x y [false]) => (DIV x y)
+(Div64u ...) => (DIVU ...)
+(Div32 x y [false]) => (DIVW x y)
+(Div32u ...) => (DIVUW ...)
+(Div16 x y [false]) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) => (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) => (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(Hmul64 ...) => (MULH ...)
+(Hmul64u ...) => (MULHU ...)
+(Hmul32 x y) => (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y)))
+(Hmul32u x y) => (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y)))
+
+(Select0 (Add64carry x y c)) => (ADD (ADD <typ.UInt64> x y) c)
+(Select1 (Add64carry x y c)) =>
+ (OR (SLTU <typ.UInt64> s:(ADD <typ.UInt64> x y) x) (SLTU <typ.UInt64> (ADD <typ.UInt64> s c) s))
+
+(Select0 (Sub64borrow x y c)) => (SUB (SUB <typ.UInt64> x y) c)
+(Select1 (Sub64borrow x y c)) =>
+ (OR (SLTU <typ.UInt64> x s:(SUB <typ.UInt64> x y)) (SLTU <typ.UInt64> s (SUB <typ.UInt64> s c)))
+
+// (x + y) / 2 => (x / 2) + (y / 2) + (x & y & 1)
+(Avg64u <t> x y) => (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
+
+(Mod64 x y [false]) => (REM x y)
+(Mod64u ...) => (REMU ...)
+(Mod32 x y [false]) => (REMW x y)
+(Mod32u ...) => (REMUW ...)
+(Mod16 x y [false]) => (REMW (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) => (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) => (REMW (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) => (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(And(64|32|16|8) ...) => (AND ...)
+(Or(64|32|16|8) ...) => (OR ...)
+(Xor(64|32|16|8) ...) => (XOR ...)
+
+(Neg(64|32|16|8) ...) => (NEG ...)
+(Neg(64|32)F ...) => (FNEG(D|S) ...)
+
+(Com(64|32|16|8) ...) => (NOT ...)
+
+
+(Sqrt ...) => (FSQRTD ...)
+(Sqrt32 ...) => (FSQRTS ...)
+
+(Copysign ...) => (FSGNJD ...)
+
+(Abs ...) => (FABSD ...)
+
+(FMA ...) => (FMADDD ...)
+
+// Sign and zero extension.
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt8to64 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+(SignExt16to64 ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt8to64 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+(ZeroExt16to64 ...) => (MOVHUreg ...)
+(ZeroExt32to64 ...) => (MOVWUreg ...)
+
+(Cvt32to32F ...) => (FCVTSW ...)
+(Cvt32to64F ...) => (FCVTDW ...)
+(Cvt64to32F ...) => (FCVTSL ...)
+(Cvt64to64F ...) => (FCVTDL ...)
+
+(Cvt32Fto32 ...) => (FCVTWS ...)
+(Cvt32Fto64 ...) => (FCVTLS ...)
+(Cvt64Fto32 ...) => (FCVTWD ...)
+(Cvt64Fto64 ...) => (FCVTLD ...)
+
+(Cvt32Fto64F ...) => (FCVTDS ...)
+(Cvt64Fto32F ...) => (FCVTSD ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
+
+(Slicemask <t> x) => (SRAI [63] (NEG <t> x))
+
+// Truncations
+// We ignore the unused high parts of registers, so truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Shifts
+
+// SLL only considers the bottom 6 bits of y. If y > 64, the result should
+// always be 0.
+//
+// Breaking down the operation:
+//
+// (SLL x y) generates x << (y & 63).
+//
+// If y < 64, this is the value we want. Otherwise, we want zero.
+//
+// So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise.
+(Lsh8x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh8x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh8x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh8x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
+(Lsh16x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh16x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh16x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh16x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
+(Lsh32x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh32x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh32x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh32x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
+(Lsh64x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh64x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh64x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh64x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+
+(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
+(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
+(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
+(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
+
+// SRL only considers the bottom 6 bits of y, similarly SRLW only considers the
+// bottom 5 bits of y. Ensure that the result is always zero if the shift exceeds
+// the maximum value. See Lsh above for a detailed description.
+(Rsh8Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh8Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh8Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
+(Rsh16Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
+(Rsh32Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt8to64 y))))
+(Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt16to64 y))))
+(Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt32to64 y))))
+(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] y)))
+(Rsh64Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64 x) y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLW x y)
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y)
+
+// SRA only considers the bottom 6 bits of y, similarly SRAW only considers the
+// bottom 5 bits. If y is greater than the maximum value (either 63 or 31
+// depending on the instruction), the result of the shift should be either 0
+// or -1 based on the sign bit of x.
+//
+// We implement this by performing the max shift (-1) if y > the maximum value.
+//
+// We OR (uint64(y < 64) - 1) into y before passing it to SRA. This leaves
+// us with -1 (0xffff...) if y >= 64. Similarly, we OR (uint64(y < 32) - 1) into y
+// before passing it to SRAW.
+//
+// We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
+// more than the 5 or 6 bits SRAW and SRA care about.
+(Rsh8x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh8x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh8x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh8x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh16x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh32x8 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt8to64 y)))))
+(Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt16to64 y)))))
+(Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt32to64 y)))))
+(Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] y))))
+(Rsh64x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt8to64 x) y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y)
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y)
+
+// Rotates.
+(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+(RotateLeft32 <t> x (MOVDconst [c])) => (Or32 (Lsh32x64 <t> x (MOVDconst [c&31])) (Rsh32Ux64 <t> x (MOVDconst [-c&31])))
+(RotateLeft64 <t> x (MOVDconst [c])) => (Or64 (Lsh64x64 <t> x (MOVDconst [c&63])) (Rsh64Ux64 <t> x (MOVDconst [-c&63])))
+
+(Less64 ...) => (SLT ...)
+(Less32 x y) => (SLT (SignExt32to64 x) (SignExt32to64 y))
+(Less16 x y) => (SLT (SignExt16to64 x) (SignExt16to64 y))
+(Less8 x y) => (SLT (SignExt8to64 x) (SignExt8to64 y))
+(Less64U ...) => (SLTU ...)
+(Less32U x y) => (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Less16U x y) => (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Less8U x y) => (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y))
+(Less(64|32)F ...) => (FLT(D|S) ...)
+
+// Convert x <= y to !(y > x).
+(Leq(64|32|16|8) x y) => (Not (Less(64|32|16|8) y x))
+(Leq(64|32|16|8)U x y) => (Not (Less(64|32|16|8)U y x))
+(Leq(64|32)F ...) => (FLE(D|S) ...)
+
+(EqPtr x y) => (SEQZ (SUB <typ.Uintptr> x y))
+(Eq64 x y) => (SEQZ (SUB <x.Type> x y))
+(Eq32 x y) && x.Type.IsSigned() => (SEQZ (SUB <x.Type> (SignExt32to64 x) (SignExt32to64 y)))
+(Eq32 x y) && !x.Type.IsSigned() => (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Eq16 x y) => (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Eq8 x y) => (SEQZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Eq(64|32)F ...) => (FEQ(D|S) ...)
+
+(NeqPtr x y) => (Not (EqPtr x y))
+(Neq64 x y) => (Not (Eq64 x y))
+(Neq32 x y) => (Not (Eq32 x y))
+(Neq16 x y) => (Not (Eq16 x y))
+(Neq8 x y) => (Not (Eq8 x y))
+(Neq(64|32)F ...) => (FNE(D|S) ...)
+
+// Loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && ( is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
+(Load <t> ptr mem) && ( is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (FMOVWload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
+
+// Stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (FMOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (FMOVDstore ptr val mem)
+
+// We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis
+// knows what variables are being read/written by the ops.
+(MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVBUload [off1+int32(off2)] {sym} base mem)
+(MOVBload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVBload [off1+int32(off2)] {sym} base mem)
+(MOVHUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVHUload [off1+int32(off2)] {sym} base mem)
+(MOVHload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVHload [off1+int32(off2)] {sym} base mem)
+(MOVWUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVWUload [off1+int32(off2)] {sym} base mem)
+(MOVWload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVWload [off1+int32(off2)] {sym} base mem)
+(MOVDload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVDload [off1+int32(off2)] {sym} base mem)
+
+(MOVBstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (MOVBstore [off1+int32(off2)] {sym} base val mem)
+(MOVHstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (MOVHstore [off1+int32(off2)] {sym} base val mem)
+(MOVWstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (MOVWstore [off1+int32(off2)] {sym} base val mem)
+(MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (MOVDstore [off1+int32(off2)] {sym} base val mem)
+(MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
+
+// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
+// with OffPtr -> ADDI.
+(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x)
+
+// Small zeroing
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
+(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore ptr (MOVDconst [0]) mem)
+(Zero [2] ptr mem) =>
+ (MOVBstore [1] ptr (MOVDconst [0])
+ (MOVBstore ptr (MOVDconst [0]) mem))
+(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore ptr (MOVDconst [0]) mem)
+(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] ptr (MOVDconst [0])
+ (MOVHstore ptr (MOVDconst [0]) mem))
+(Zero [4] ptr mem) =>
+ (MOVBstore [3] ptr (MOVDconst [0])
+ (MOVBstore [2] ptr (MOVDconst [0])
+ (MOVBstore [1] ptr (MOVDconst [0])
+ (MOVBstore ptr (MOVDconst [0]) mem))))
+(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore ptr (MOVDconst [0]) mem)
+(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem))
+(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] ptr (MOVDconst [0])
+ (MOVHstore [4] ptr (MOVDconst [0])
+ (MOVHstore [2] ptr (MOVDconst [0])
+ (MOVHstore ptr (MOVDconst [0]) mem))))
+
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVDconst [0])
+ (MOVBstore [1] ptr (MOVDconst [0])
+ (MOVBstore ptr (MOVDconst [0]) mem)))
+(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] ptr (MOVDconst [0])
+ (MOVHstore [2] ptr (MOVDconst [0])
+ (MOVHstore ptr (MOVDconst [0]) mem)))
+(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] ptr (MOVDconst [0])
+ (MOVWstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem)))
+(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [16] ptr (MOVDconst [0])
+ (MOVDstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem)))
+(Zero [32] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [24] ptr (MOVDconst [0])
+ (MOVDstore [16] ptr (MOVDconst [0])
+ (MOVDstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))))
+
+// Medium 8-aligned zeroing uses a Duff's device
+// 8 and 128 are magic constants, see runtime/mkduff.go
+(Zero [s] {t} ptr mem)
+ && s%8 == 0 && s <= 8*128
+ && t.Alignment()%8 == 0 && !config.noDuffDevice =>
+ (DUFFZERO [8 * (128 - s/8)] ptr mem)
+
+// Generic zeroing uses a loop
+(Zero [s] {t} ptr mem) =>
+ (LoweredZero [t.Alignment()]
+ ptr
+ (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)]))
+ mem)
+
+// Checks
+(IsNonNil ...) => (SNEZ ...)
+(IsInBounds ...) => (Less64U ...)
+(IsSliceInBounds ...) => (Leq64U ...)
+
+// Trivial lowering
+(NilCheck ...) => (LoweredNilCheck ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+// Publication barrier as intrinsic
+(PubBarrier ...) => (LoweredPubBarrier ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// Small moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore dst (MOVHload src mem) mem)
+(Move [2] dst src mem) =>
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem))
+(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))
+(Move [4] dst src mem) =>
+ (MOVBstore [3] dst (MOVBload [3] src mem)
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem))))
+(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore dst (MOVDload src mem) mem)
+(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] dst (MOVHload [6] src mem)
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))))
+
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem)))
+(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem)))
+(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] dst (MOVWload [8] src mem)
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem)))
+(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [16] dst (MOVDload [16] src mem)
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem)))
+(Move [32] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [24] dst (MOVDload [24] src mem)
+ (MOVDstore [16] dst (MOVDload [16] src mem)
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))))
+
+// Medium 8-aligned move uses a Duff's device
+// 16 and 128 are magic constants, see runtime/mkduff.go
+(Move [s] {t} dst src mem)
+ && s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [16 * (128 - s/8)] dst src mem)
+
+// Generic move uses a loop
+(Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) =>
+ (LoweredMove [t.Alignment()]
+ dst
+ src
+ (ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src)
+ mem)
+
+// Boolean ops; 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (SEQZ (SUB <typ.Bool> x y))
+(NeqB x y) => (SNEZ (SUB <typ.Bool> x y))
+(Not ...) => (SEQZ ...)
+
+// Lowering pointer arithmetic
+// TODO: Special handling for SP offsets, like ARM
+(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVaddr [int32(off)] ptr)
+(OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr)
+(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
+
+(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
+(Const32F [val]) => (FMVSX (MOVDconst [int64(math.Float32bits(val))]))
+(Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
+(ConstNil) => (MOVDconst [0])
+(ConstBool [val]) => (MOVDconst [int64(b2i(val))])
+
+(Addr {sym} base) => (MOVaddr {sym} [0] base)
+(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVaddr {sym} (SPanchored base mem))
+(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVaddr {sym} base)
+
+// Calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// Atomic Intrinsics
+(AtomicLoad(Ptr|64|32|8) ...) => (LoweredAtomicLoad(64|64|32|8) ...)
+(AtomicStore(PtrNoWB|64|32|8) ...) => (LoweredAtomicStore(64|64|32|8) ...)
+(AtomicAdd(64|32) ...) => (LoweredAtomicAdd(64|32) ...)
+
+// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8)))
+(AtomicAnd8 ptr val mem) =>
+ (LoweredAtomicAnd32 (ANDI <typ.Uintptr> [^3] ptr)
+ (NOT <typ.UInt32> (SLL <typ.UInt32> (XORI <typ.UInt32> [0xff] (ZeroExt8to32 val))
+ (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr)))) mem)
+
+(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
+
+(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
+(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
+
+(AtomicExchange(64|32) ...) => (LoweredAtomicExchange(64|32) ...)
+
+// AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3, uint32(val)<<((ptr&3)*8))
+(AtomicOr8 ptr val mem) =>
+ (LoweredAtomicOr32 (ANDI <typ.Uintptr> [^3] ptr)
+ (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr))) mem)
+
+(AtomicOr32 ...) => (LoweredAtomicOr32 ...)
+
+// Conditional branches
+(If cond yes no) => (BNEZ (MOVBUreg <typ.UInt64> cond) yes no)
+
+// Optimizations
+
+// Absorb SEQZ/SNEZ into branch.
+(BEQZ (SEQZ x) yes no) => (BNEZ x yes no)
+(BEQZ (SNEZ x) yes no) => (BEQZ x yes no)
+(BNEZ (SEQZ x) yes no) => (BEQZ x yes no)
+(BNEZ (SNEZ x) yes no) => (BNEZ x yes no)
+
+// Remove redundant NEG from BEQZ/BNEZ.
+(BEQZ (NEG x) yes no) => (BEQZ x yes no)
+(BNEZ (NEG x) yes no) => (BNEZ x yes no)
+
+// Negate comparison with FNES/FNED.
+(BEQZ (FNES <t> x y) yes no) => (BNEZ (FEQS <t> x y) yes no)
+(BNEZ (FNES <t> x y) yes no) => (BEQZ (FEQS <t> x y) yes no)
+(BEQZ (FNED <t> x y) yes no) => (BNEZ (FEQD <t> x y) yes no)
+(BNEZ (FNED <t> x y) yes no) => (BEQZ (FEQD <t> x y) yes no)
+
+// Convert BEQZ/BNEZ into more optimal branch conditions.
+(BEQZ (SUB x y) yes no) => (BEQ x y yes no)
+(BNEZ (SUB x y) yes no) => (BNE x y yes no)
+(BEQZ (SLT x y) yes no) => (BGE x y yes no)
+(BNEZ (SLT x y) yes no) => (BLT x y yes no)
+(BEQZ (SLTU x y) yes no) => (BGEU x y yes no)
+(BNEZ (SLTU x y) yes no) => (BLTU x y yes no)
+(BEQZ (SLTI [x] y) yes no) => (BGE y (MOVDconst [x]) yes no)
+(BNEZ (SLTI [x] y) yes no) => (BLT y (MOVDconst [x]) yes no)
+(BEQZ (SLTIU [x] y) yes no) => (BGEU y (MOVDconst [x]) yes no)
+(BNEZ (SLTIU [x] y) yes no) => (BLTU y (MOVDconst [x]) yes no)
+
+// Convert branch with zero to more optimal branch zero.
+(BEQ (MOVDconst [0]) cond yes no) => (BEQZ cond yes no)
+(BEQ cond (MOVDconst [0]) yes no) => (BEQZ cond yes no)
+(BNE (MOVDconst [0]) cond yes no) => (BNEZ cond yes no)
+(BNE cond (MOVDconst [0]) yes no) => (BNEZ cond yes no)
+(BLT (MOVDconst [0]) cond yes no) => (BGTZ cond yes no)
+(BLT cond (MOVDconst [0]) yes no) => (BLTZ cond yes no)
+(BGE (MOVDconst [0]) cond yes no) => (BLEZ cond yes no)
+(BGE cond (MOVDconst [0]) yes no) => (BGEZ cond yes no)
+
+// Remove redundant NEG from SEQZ/SNEZ.
+(SEQZ (NEG x)) => (SEQZ x)
+(SNEZ (NEG x)) => (SNEZ x)
+
+// Remove redundant SEQZ/SNEZ.
+(SEQZ (SEQZ x)) => (SNEZ x)
+(SEQZ (SNEZ x)) => (SEQZ x)
+(SNEZ (SEQZ x)) => (SEQZ x)
+(SNEZ (SNEZ x)) => (SNEZ x)
+
+// Store zero.
+(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
+
+// Boolean ops are already extended.
+(MOVBUreg x:((FLES|FLTS|FEQS|FNES) _ _)) => x
+(MOVBUreg x:((FLED|FLTD|FEQD|FNED) _ _)) => x
+(MOVBUreg x:((SEQZ|SNEZ) _)) => x
+(MOVBUreg x:((SLT|SLTU) _ _)) => x
+
+// Avoid extending when already sufficiently masked.
+(MOVBreg x:(ANDI [c] y)) && c >= 0 && int64(int8(c)) == c => x
+(MOVHreg x:(ANDI [c] y)) && c >= 0 && int64(int16(c)) == c => x
+(MOVWreg x:(ANDI [c] y)) && c >= 0 && int64(int32(c)) == c => x
+(MOVBUreg x:(ANDI [c] y)) && c >= 0 && int64(uint8(c)) == c => x
+(MOVHUreg x:(ANDI [c] y)) && c >= 0 && int64(uint16(c)) == c => x
+(MOVWUreg x:(ANDI [c] y)) && c >= 0 && int64(uint32(c)) == c => x
+
+// Combine masking and zero extension.
+(MOVBUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint8(c))] x)
+(MOVHUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint16(c))] x)
+(MOVWUreg (ANDI [c] x)) && c < 0 => (AND (MOVDconst [int64(uint32(c))]) x)
+
+// Avoid sign/zero extension for consts.
+(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
+(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
+(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
+(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
+(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
+(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
+
+// Avoid sign/zero extension after properly typed load.
+(MOVBreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVWload _ _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x)
+
+// Avoid zero extension after properly typed atomic operation.
+(MOVBUreg x:(Select0 (LoweredAtomicLoad8 _ _))) => (MOVDreg x)
+(MOVBUreg x:(Select0 (LoweredAtomicCas32 _ _ _ _))) => (MOVDreg x)
+(MOVBUreg x:(Select0 (LoweredAtomicCas64 _ _ _ _))) => (MOVDreg x)
+
+// Avoid sign extension after word arithmetic.
+(MOVWreg x:(ADDIW _)) => (MOVDreg x)
+(MOVWreg x:(SUBW _ _)) => (MOVDreg x)
+(MOVWreg x:(NEGW _)) => (MOVDreg x)
+(MOVWreg x:(MULW _ _)) => (MOVDreg x)
+(MOVWreg x:(DIVW _ _)) => (MOVDreg x)
+(MOVWreg x:(DIVUW _ _)) => (MOVDreg x)
+(MOVWreg x:(REMW _ _)) => (MOVDreg x)
+(MOVWreg x:(REMUW _ _)) => (MOVDreg x)
+
+// Fold double extensions.
+(MOVBreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVHreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVWreg _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUreg _)) => (MOVDreg x)
+
+// Do not extend before store.
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+
+// Replace extend after load with alternate load where possible.
+(MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <t> [off] {sym} ptr mem)
+(MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload <t> [off] {sym} ptr mem)
+(MOVWreg <t> x:(MOVWUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <t> [off] {sym} ptr mem)
+(MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
+(MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
+(MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem)
+
+// If a register move has only 1 use, just use the same register without emitting instruction
+// MOVnop does not emit an instruction, only for ensuring the type.
+(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
+
+// TODO: we should be able to get rid of MOVDnop all together.
+// But for now, this is enough to get rid of lots of them.
+(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
+
+// Avoid unnecessary zero and sign extension when right shifting.
+(SRAI <t> [x] (MOVWreg y)) && x >= 0 && x <= 31 => (SRAIW <t> [int64(x)] y)
+(SRLI <t> [x] (MOVWUreg y)) && x >= 0 && x <= 31 => (SRLIW <t> [int64(x)] y)
+
+// Replace right shifts that exceed size of signed type.
+(SRAI <t> [x] (MOVBreg y)) && x >= 8 => (SRAI [63] (SLLI <t> [56] y))
+(SRAI <t> [x] (MOVHreg y)) && x >= 16 => (SRAI [63] (SLLI <t> [48] y))
+(SRAI <t> [x] (MOVWreg y)) && x >= 32 => (SRAIW [31] y)
+
+// Eliminate right shifts that exceed size of unsigned type.
+(SRLI <t> [x] (MOVBUreg y)) && x >= 8 => (MOVDconst <t> [0])
+(SRLI <t> [x] (MOVHUreg y)) && x >= 16 => (MOVDconst <t> [0])
+(SRLI <t> [x] (MOVWUreg y)) && x >= 32 => (MOVDconst <t> [0])
+
+// Fold constant into immediate instructions where possible.
+(ADD (MOVDconst <t> [val]) x) && is32Bit(val) && !t.IsPtr() => (ADDI [val] x)
+(AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x)
+(OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x)
+(XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x)
+(SLL x (MOVDconst [val])) => (SLLI [int64(val&63)] x)
+(SRL x (MOVDconst [val])) => (SRLI [int64(val&63)] x)
+(SRLW x (MOVDconst [val])) => (SRLIW [int64(val&31)] x)
+(SRA x (MOVDconst [val])) => (SRAI [int64(val&63)] x)
+(SRAW x (MOVDconst [val])) => (SRAIW [int64(val&31)] x)
+(SLT x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTI [val] x)
+(SLTU x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTIU [val] x)
+
+// Convert const subtraction into ADDI with negative immediate, where possible.
+(SUB x (MOVDconst [val])) && is32Bit(-val) => (ADDI [-val] x)
+(SUB <t> (MOVDconst [val]) y) && is32Bit(-val) => (NEG (ADDI <t> [-val] y))
+
+// Subtraction of zero.
+(SUB x (MOVDconst [0])) => x
+(SUBW x (MOVDconst [0])) => (ADDIW [0] x)
+
+// Subtraction from zero.
+(SUB (MOVDconst [0]) x) => (NEG x)
+(SUBW (MOVDconst [0]) x) => (NEGW x)
+
+// Fold negation into subtraction.
+(NEG (SUB x y)) => (SUB y x)
+(NEG <t> s:(ADDI [val] (SUB x y))) && s.Uses == 1 && is32Bit(-val) => (ADDI [-val] (SUB <t> y x))
+
+// Double negation.
+(NEG (NEG x)) => x
+
+// Addition of zero or two constants.
+(ADDI [0] x) => x
+(ADDI [x] (MOVDconst [y])) && is32Bit(x + y) => (MOVDconst [x + y])
+
+// ANDI with all zeros, all ones or two constants.
+(ANDI [0] x) => (MOVDconst [0])
+(ANDI [-1] x) => x
+(ANDI [x] (MOVDconst [y])) => (MOVDconst [x & y])
+
+// ORI with all zeroes, all ones or two constants.
+(ORI [0] x) => x
+(ORI [-1] x) => (MOVDconst [-1])
+(ORI [x] (MOVDconst [y])) => (MOVDconst [x | y])
+
+// Combine operations with immediate.
+(ADDI [x] (ADDI [y] z)) && is32Bit(x + y) => (ADDI [x + y] z)
+(ANDI [x] (ANDI [y] z)) => (ANDI [x & y] z)
+(ORI [x] (ORI [y] z)) => (ORI [x | y] z)
+
+// Negation of a constant.
+(NEG (MOVDconst [x])) => (MOVDconst [-x])
+(NEGW (MOVDconst [x])) => (MOVDconst [int64(int32(-x))])
+
+// Shift of a constant.
+(SLLI [x] (MOVDconst [y])) && is32Bit(y << uint32(x)) => (MOVDconst [y << uint32(x)])
+(SRLI [x] (MOVDconst [y])) => (MOVDconst [int64(uint64(y) >> uint32(x))])
+(SRAI [x] (MOVDconst [y])) => (MOVDconst [int64(y) >> uint32(x)])
+
+// SLTI/SLTIU with constants.
+(SLTI [x] (MOVDconst [y])) => (MOVDconst [b2i(int64(y) < int64(x))])
+(SLTIU [x] (MOVDconst [y])) => (MOVDconst [b2i(uint64(y) < uint64(x))])
+
+// SLTI/SLTIU with known outcomes.
+(SLTI [x] (ANDI [y] _)) && y >= 0 && int64(y) < int64(x) => (MOVDconst [1])
+(SLTIU [x] (ANDI [y] _)) && y >= 0 && uint64(y) < uint64(x) => (MOVDconst [1])
+(SLTI [x] (ORI [y] _)) && y >= 0 && int64(y) >= int64(x) => (MOVDconst [0])
+(SLTIU [x] (ORI [y] _)) && y >= 0 && uint64(y) >= uint64(x) => (MOVDconst [0])
+
+// SLT/SLTU with known outcomes.
+(SLT x x) => (MOVDconst [0])
+(SLTU x x) => (MOVDconst [0])
+
+// Deadcode for LoweredMuluhilo
+(Select0 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MULHU x y)
+(Select1 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MUL x y)
+
+(FADD(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FMADD(S|D) x y a)
+(FSUB(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FNMSUB(S|D) x y a)
+(FSUB(S|D) (FMUL(S|D) x y) a) && a.Block.Func.useFMA(v) => (FMSUB(S|D) x y a)
+
+// Merge negation into fused multiply-add and multiply-subtract.
+//
+// Key:
+//
+// [+ -](x * y [+ -] z).
+// _ N A S
+// D U
+// D B
+//
+// Note: multiplication commutativity handled by rule generator.
+(F(MADD|NMADD|MSUB|NMSUB)S neg:(FNEGS x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)S x y z)
+(F(MADD|NMADD|MSUB|NMSUB)S x y neg:(FNEGS z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)S x y z)
+(F(MADD|NMADD|MSUB|NMSUB)D neg:(FNEGD x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)D x y z)
+(F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z)
diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
new file mode 100644
index 0000000..93f20f8
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go
@@ -0,0 +1,492 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+)
+
+// Notes:
+// - Boolean types occupy the entire register. 0=false, 1=true.
+
+// Suffixes encode the bit width of various instructions:
+//
+// D (double word) = 64 bit int
+// W (word) = 32 bit int
+// H (half word) = 16 bit int
+// B (byte) = 8 bit int
+// S (single) = 32 bit float
+// D (double) = 64 bit float
+// L = 64 bit int, used when the opcode starts with F
+
+const (
+ riscv64REG_G = 27
+ riscv64REG_CTXT = 26
+ riscv64REG_LR = 1
+ riscv64REG_SP = 2
+ riscv64REG_GP = 3
+ riscv64REG_TP = 4
+ riscv64REG_TMP = 31
+ riscv64REG_ZERO = 0
+)
+
+func riscv64RegName(r int) string {
+ switch {
+ case r == riscv64REG_G:
+ return "g"
+ case r == riscv64REG_SP:
+ return "SP"
+ case 0 <= r && r <= 31:
+ return fmt.Sprintf("X%d", r)
+ case 32 <= r && r <= 63:
+ return fmt.Sprintf("F%d", r-32)
+ default:
+ panic(fmt.Sprintf("unknown register %d", r))
+ }
+}
+
+func init() {
+ var regNamesRISCV64 []string
+ var gpMask, fpMask, gpgMask, gpspMask, gpspsbMask, gpspsbgMask regMask
+ regNamed := make(map[string]regMask)
+
+ // Build the list of register names, creating an appropriately indexed
+ // regMask for the gp and fp registers as we go.
+ //
+ // If name is specified, use it rather than the riscv reg number.
+ addreg := func(r int, name string) regMask {
+ mask := regMask(1) << uint(len(regNamesRISCV64))
+ if name == "" {
+ name = riscv64RegName(r)
+ }
+ regNamesRISCV64 = append(regNamesRISCV64, name)
+ regNamed[name] = mask
+ return mask
+ }
+
+ // General purpose registers.
+ for r := 0; r <= 31; r++ {
+ if r == riscv64REG_LR {
+ // LR is not used by regalloc, so we skip it to leave
+ // room for pseudo-register SB.
+ continue
+ }
+
+ mask := addreg(r, "")
+
+ // Add general purpose registers to gpMask.
+ switch r {
+ // ZERO, GP, TP and TMP are not in any gp mask.
+ case riscv64REG_ZERO, riscv64REG_GP, riscv64REG_TP, riscv64REG_TMP:
+ case riscv64REG_G:
+ gpgMask |= mask
+ gpspsbgMask |= mask
+ case riscv64REG_SP:
+ gpspMask |= mask
+ gpspsbMask |= mask
+ gpspsbgMask |= mask
+ default:
+ gpMask |= mask
+ gpgMask |= mask
+ gpspMask |= mask
+ gpspsbMask |= mask
+ gpspsbgMask |= mask
+ }
+ }
+
+ // Floating pointer registers.
+ for r := 32; r <= 63; r++ {
+ mask := addreg(r, "")
+ fpMask |= mask
+ }
+
+ // Pseudo-register: SB
+ mask := addreg(-1, "SB")
+ gpspsbMask |= mask
+ gpspsbgMask |= mask
+
+ if len(regNamesRISCV64) > 64 {
+ // regMask is only 64 bits.
+ panic("Too many RISCV64 registers")
+ }
+
+ regCtxt := regNamed["X26"]
+ callerSave := gpMask | fpMask | regNamed["g"]
+
+ var (
+ gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register
+ gpstore0 = regInfo{inputs: []regMask{gpspsbMask}}
+ gp01 = regInfo{outputs: []regMask{gpMask}}
+ gp11 = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}
+ gp21 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask}}
+ gp22 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask, gpMask}}
+ gpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{gpMask}}
+ gp11sb = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}}
+ gpxchg = regInfo{inputs: []regMask{gpspsbgMask, gpgMask}, outputs: []regMask{gpMask}}
+ gpcas = regInfo{inputs: []regMask{gpspsbgMask, gpgMask, gpgMask}, outputs: []regMask{gpMask}}
+ gpatomic = regInfo{inputs: []regMask{gpspsbgMask, gpgMask}}
+
+ fp11 = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{fpMask}}
+ fp21 = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{fpMask}}
+ fp31 = regInfo{inputs: []regMask{fpMask, fpMask, fpMask}, outputs: []regMask{fpMask}}
+ gpfp = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{fpMask}}
+ fpgp = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{gpMask}}
+ fpstore = regInfo{inputs: []regMask{gpspsbMask, fpMask, 0}}
+ fpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{fpMask}}
+ fp2gp = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{gpMask}}
+
+ call = regInfo{clobbers: callerSave}
+ callClosure = regInfo{inputs: []regMask{gpspMask, regCtxt, 0}, clobbers: callerSave}
+ callInter = regInfo{inputs: []regMask{gpMask}, clobbers: callerSave}
+ )
+
+ RISCV64ops := []opData{
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDI", argLength: 1, reg: gp11sb, asm: "ADDI", aux: "Int64"}, // arg0 + auxint
+ {name: "ADDIW", argLength: 1, reg: gp11, asm: "ADDIW", aux: "Int64"}, // 32 low bits of arg0 + auxint, sign extended to 64 bits
+ {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0
+ {name: "NEGW", argLength: 1, reg: gp11, asm: "NEGW"}, // -arg0 of 32 bits, sign extended to 64 bits
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1
+ {name: "SUBW", argLength: 2, reg: gp21, asm: "SUBW"}, // 32 low bits of arg 0 - 32 low bits of arg 1, sign extended to 64 bits
+
+ // M extension. H means high (i.e., it returns the top bits of
+ // the result). U means unsigned. W means word (i.e., 32-bit).
+ {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true, typ: "Int64"}, // arg0 * arg1
+ {name: "MULW", argLength: 2, reg: gp21, asm: "MULW", commutative: true, typ: "Int32"},
+ {name: "MULH", argLength: 2, reg: gp21, asm: "MULH", commutative: true, typ: "Int64"},
+ {name: "MULHU", argLength: 2, reg: gp21, asm: "MULHU", commutative: true, typ: "UInt64"},
+ {name: "LoweredMuluhilo", argLength: 2, reg: gp22, resultNotInArgs: true}, // arg0 * arg1, return (hi, lo)
+ {name: "LoweredMuluover", argLength: 2, reg: gp22, resultNotInArgs: true}, // arg0 * arg1, return (64 bits of arg0*arg1, overflow)
+
+ {name: "DIV", argLength: 2, reg: gp21, asm: "DIV", typ: "Int64"}, // arg0 / arg1
+ {name: "DIVU", argLength: 2, reg: gp21, asm: "DIVU", typ: "UInt64"},
+ {name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", typ: "Int32"},
+ {name: "DIVUW", argLength: 2, reg: gp21, asm: "DIVUW", typ: "UInt32"},
+ {name: "REM", argLength: 2, reg: gp21, asm: "REM", typ: "Int64"}, // arg0 % arg1
+ {name: "REMU", argLength: 2, reg: gp21, asm: "REMU", typ: "UInt64"},
+ {name: "REMW", argLength: 2, reg: gp21, asm: "REMW", typ: "Int32"},
+ {name: "REMUW", argLength: 2, reg: gp21, asm: "REMUW", typ: "UInt32"},
+
+ {name: "MOVaddr", argLength: 1, reg: gp11sb, asm: "MOV", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
+ // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
+
+ {name: "MOVDconst", reg: gp01, asm: "MOV", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
+
+ // Loads: load <size> bits from arg0+auxint+aux and extend to 64 bits; arg1=mem
+ {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // 8 bits, sign extend
+ {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // 16 bits, sign extend
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // 32 bits, sign extend
+ {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOV", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"}, // 64 bits
+ {name: "MOVBUload", argLength: 2, reg: gpload, asm: "MOVBU", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // 8 bits, zero extend
+ {name: "MOVHUload", argLength: 2, reg: gpload, asm: "MOVHU", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // 16 bits, zero extend
+ {name: "MOVWUload", argLength: 2, reg: gpload, asm: "MOVWU", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // 32 bits, zero extend
+
+ // Stores: store <size> lowest bits in arg1 to arg0+auxint+aux; arg2=mem
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 8 bits
+ {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 16 bits
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
+ {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOV", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits
+
+ // Stores: store <size> of zero in arg0+auxint+aux; arg1=mem
+ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 8 bits
+ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 16 bits
+ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
+ {name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits
+
+ // Conversions
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word
+ {name: "MOVDreg", argLength: 1, reg: gp11, asm: "MOV"}, // move from arg0
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word
+
+ {name: "MOVDnop", argLength: 1, reg: regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ // Shift ops
+ {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << (aux1 & 63)
+ {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> (aux1 & 63), signed
+ {name: "SRAW", argLength: 2, reg: gp21, asm: "SRAW"}, // arg0 >> (aux1 & 31), signed
+ {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> (aux1 & 63), unsigned
+ {name: "SRLW", argLength: 2, reg: gp21, asm: "SRLW"}, // arg0 >> (aux1 & 31), unsigned
+ {name: "SLLI", argLength: 1, reg: gp11, asm: "SLLI", aux: "Int64"}, // arg0 << auxint, shift amount 0-63
+ {name: "SRAI", argLength: 1, reg: gp11, asm: "SRAI", aux: "Int64"}, // arg0 >> auxint, signed, shift amount 0-63
+ {name: "SRAIW", argLength: 1, reg: gp11, asm: "SRAIW", aux: "Int64"}, // arg0 >> auxint, signed, shift amount 0-31
+ {name: "SRLI", argLength: 1, reg: gp11, asm: "SRLI", aux: "Int64"}, // arg0 >> auxint, unsigned, shift amount 0-63
+ {name: "SRLIW", argLength: 1, reg: gp11, asm: "SRLIW", aux: "Int64"}, // arg0 >> auxint, unsigned, shift amount 0-31
+
+ // Bitwise ops
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true}, // arg0 ^ arg1
+ {name: "XORI", argLength: 1, reg: gp11, asm: "XORI", aux: "Int64"}, // arg0 ^ auxint
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1
+ {name: "ORI", argLength: 1, reg: gp11, asm: "ORI", aux: "Int64"}, // arg0 | auxint
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDI", argLength: 1, reg: gp11, asm: "ANDI", aux: "Int64"}, // arg0 & auxint
+ {name: "NOT", argLength: 1, reg: gp11, asm: "NOT"}, // ^arg0
+
+ // Generate boolean values
+ {name: "SEQZ", argLength: 1, reg: gp11, asm: "SEQZ"}, // arg0 == 0, result is 0 or 1
+ {name: "SNEZ", argLength: 1, reg: gp11, asm: "SNEZ"}, // arg0 != 0, result is 0 or 1
+ {name: "SLT", argLength: 2, reg: gp21, asm: "SLT"}, // arg0 < arg1, result is 0 or 1
+ {name: "SLTI", argLength: 1, reg: gp11, asm: "SLTI", aux: "Int64"}, // arg0 < auxint, result is 0 or 1
+ {name: "SLTU", argLength: 2, reg: gp21, asm: "SLTU"}, // arg0 < arg1, unsigned, result is 0 or 1
+ {name: "SLTIU", argLength: 1, reg: gp11, asm: "SLTIU", aux: "Int64"}, // arg0 < auxint, unsigned, result is 0 or 1
+
+ // Round ops to block fused-multiply-add extraction.
+ {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true},
+ {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true},
+
+ // Calls
+ {name: "CALLstatic", argLength: -1, reg: call, aux: "CallOff", call: true}, // call static function aux.(*gc.Sym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: -1, reg: call, aux: "CallOff", call: true, tailCall: true}, // tail call static function aux.(*gc.Sym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: -1, reg: callClosure, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: -1, reg: callInter, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
+
+ // duffzero
+ // arg0 = address of memory to zero (in X25, changed as side effect)
+ // arg1 = mem
+ // auxint = offset into duffzero code to start executing
+ // X1 (link register) changed because of function call
+ // returns mem
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{regNamed["X25"]},
+ clobbers: regNamed["X1"] | regNamed["X25"],
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ },
+
+ // duffcopy
+ // arg0 = address of dst memory (in X25, changed as side effect)
+ // arg1 = address of src memory (in X24, changed as side effect)
+ // arg2 = mem
+ // auxint = offset into duffcopy code to start executing
+ // X1 (link register) changed because of function call
+ // returns mem
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{regNamed["X25"], regNamed["X24"]},
+ clobbers: regNamed["X1"] | regNamed["X24"] | regNamed["X25"],
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // Generic moves and zeros
+
+ // general unaligned zeroing
+ // arg0 = address of memory to zero (in X5, changed as side effect)
+ // arg1 = address of the last element to zero (inclusive)
+ // arg2 = mem
+ // auxint = element size
+ // returns mem
+ // mov ZERO, (X5)
+ // ADD $sz, X5
+ // BGEU Rarg1, X5, -2(PC)
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{regNamed["X5"], gpMask},
+ clobbers: regNamed["X5"],
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ },
+
+ // general unaligned move
+ // arg0 = address of dst memory (in X5, changed as side effect)
+ // arg1 = address of src memory (in X6, changed as side effect)
+ // arg2 = address of the last element of src (can't be X7 as we clobber it before using arg2)
+ // arg3 = mem
+ // auxint = alignment
+ // clobbers X7 as a tmp register.
+ // returns mem
+ // mov (X6), X7
+ // mov X7, (X5)
+ // ADD $sz, X5
+ // ADD $sz, X6
+ // BGEU Rarg2, X5, -4(PC)
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{regNamed["X5"], regNamed["X6"], gpMask &^ regNamed["X7"]},
+ clobbers: regNamed["X5"] | regNamed["X6"] | regNamed["X7"],
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // Atomic loads.
+ // load from arg0. arg1=mem.
+ // returns <value,memory> so they can be properly ordered with other loads.
+ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true},
+
+ // Atomic stores.
+ // store arg1 to *arg0. arg2=mem. returns memory.
+ {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+
+ // Atomic exchange.
+ // store arg1 to *arg0. arg2=mem. returns <old content of *arg0, memory>.
+ {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // Atomic add.
+ // *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>.
+ {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // Atomic compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // MOV $0, Rout
+ // LR (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 3(PC)
+ // SC Rarg2, (Rarg0), Rtmp
+ // BNE Rtmp, ZERO, -3(PC)
+ // MOV $1, Rout
+ {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // Atomic 32 bit AND/OR.
+ // *arg0 &= (|=) arg1. arg2=mem. returns nil.
+ {name: "LoweredAtomicAnd32", argLength: 3, reg: gpatomic, asm: "AMOANDW", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicOr32", argLength: 3, reg: gpatomic, asm: "AMOORW", faultOnNilArg0: true, hasSideEffects: true},
+
+ // Lowering pass-throughs
+ {name: "LoweredNilCheck", argLength: 2, faultOnNilArg0: true, nilCheck: true, reg: regInfo{inputs: []regMask{gpspMask}}}, // arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{regCtxt}}}, // scheduler ensures only at beginning of entry block
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem.
+ {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed
+ // It saves all GP registers if necessary,
+ // but clobbers RA (LR) because it's a call
+ // and T6 (REG_TMP).
+ // Returns a pointer to a write barrier buffer in X24.
+ {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ (gpMask | regNamed["g"])) | regNamed["X1"], outputs: []regMask{regNamed["X24"]}}, clobberFlags: true, aux: "Int64"},
+
+ // Do data barrier. arg0=memorys
+ {name: "LoweredPubBarrier", argLength: 1, asm: "FENCE", hasSideEffects: true},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X7"], regNamed["X28"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X6"], regNamed["X7"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X5"], regNamed["X6"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+
+ // F extension.
+ {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true, typ: "Float32"}, // arg0 + arg1
+ {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS", commutative: false, typ: "Float32"}, // arg0 - arg1
+ {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true, typ: "Float32"}, // arg0 * arg1
+ {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS", commutative: false, typ: "Float32"}, // arg0 / arg1
+ {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS", commutative: true, typ: "Float32"}, // (arg0 * arg1) + arg2
+ {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS", commutative: true, typ: "Float32"}, // (arg0 * arg1) - arg2
+ {name: "FNMADDS", argLength: 3, reg: fp31, asm: "FNMADDS", commutative: true, typ: "Float32"}, // -(arg0 * arg1) + arg2
+ {name: "FNMSUBS", argLength: 3, reg: fp31, asm: "FNMSUBS", commutative: true, typ: "Float32"}, // -(arg0 * arg1) - arg2
+ {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS", typ: "Float32"}, // sqrt(arg0)
+ {name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS", typ: "Float32"}, // -arg0
+ {name: "FMVSX", argLength: 1, reg: gpfp, asm: "FMVSX", typ: "Float32"}, // reinterpret arg0 as float
+ {name: "FCVTSW", argLength: 1, reg: gpfp, asm: "FCVTSW", typ: "Float32"}, // float32(low 32 bits of arg0)
+ {name: "FCVTSL", argLength: 1, reg: gpfp, asm: "FCVTSL", typ: "Float32"}, // float32(arg0)
+ {name: "FCVTWS", argLength: 1, reg: fpgp, asm: "FCVTWS", typ: "Int32"}, // int32(arg0)
+ {name: "FCVTLS", argLength: 1, reg: fpgp, asm: "FCVTLS", typ: "Int64"}, // int64(arg0)
+ {name: "FMOVWload", argLength: 2, reg: fpload, asm: "MOVF", aux: "SymOff", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load float32 from arg0+auxint+aux
+ {name: "FMOVWstore", argLength: 3, reg: fpstore, asm: "MOVF", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store float32 to arg0+auxint+aux
+ {name: "FEQS", argLength: 2, reg: fp2gp, asm: "FEQS", commutative: true}, // arg0 == arg1
+ {name: "FNES", argLength: 2, reg: fp2gp, asm: "FNES", commutative: true}, // arg0 != arg1
+ {name: "FLTS", argLength: 2, reg: fp2gp, asm: "FLTS"}, // arg0 < arg1
+ {name: "FLES", argLength: 2, reg: fp2gp, asm: "FLES"}, // arg0 <= arg1
+
+ // D extension.
+ {name: "FADDD", argLength: 2, reg: fp21, asm: "FADDD", commutative: true, typ: "Float64"}, // arg0 + arg1
+ {name: "FSUBD", argLength: 2, reg: fp21, asm: "FSUBD", commutative: false, typ: "Float64"}, // arg0 - arg1
+ {name: "FMULD", argLength: 2, reg: fp21, asm: "FMULD", commutative: true, typ: "Float64"}, // arg0 * arg1
+ {name: "FDIVD", argLength: 2, reg: fp21, asm: "FDIVD", commutative: false, typ: "Float64"}, // arg0 / arg1
+ {name: "FMADDD", argLength: 3, reg: fp31, asm: "FMADDD", commutative: true, typ: "Float64"}, // (arg0 * arg1) + arg2
+ {name: "FMSUBD", argLength: 3, reg: fp31, asm: "FMSUBD", commutative: true, typ: "Float64"}, // (arg0 * arg1) - arg2
+ {name: "FNMADDD", argLength: 3, reg: fp31, asm: "FNMADDD", commutative: true, typ: "Float64"}, // -(arg0 * arg1) + arg2
+ {name: "FNMSUBD", argLength: 3, reg: fp31, asm: "FNMSUBD", commutative: true, typ: "Float64"}, // -(arg0 * arg1) - arg2
+ {name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD", typ: "Float64"}, // sqrt(arg0)
+ {name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD", typ: "Float64"}, // -arg0
+ {name: "FABSD", argLength: 1, reg: fp11, asm: "FABSD", typ: "Float64"}, // abs(arg0)
+ {name: "FSGNJD", argLength: 2, reg: fp21, asm: "FSGNJD", typ: "Float64"}, // copy sign of arg1 to arg0
+ {name: "FMVDX", argLength: 1, reg: gpfp, asm: "FMVDX", typ: "Float64"}, // reinterpret arg0 as float
+ {name: "FCVTDW", argLength: 1, reg: gpfp, asm: "FCVTDW", typ: "Float64"}, // float64(low 32 bits of arg0)
+ {name: "FCVTDL", argLength: 1, reg: gpfp, asm: "FCVTDL", typ: "Float64"}, // float64(arg0)
+ {name: "FCVTWD", argLength: 1, reg: fpgp, asm: "FCVTWD", typ: "Int32"}, // int32(arg0)
+ {name: "FCVTLD", argLength: 1, reg: fpgp, asm: "FCVTLD", typ: "Int64"}, // int64(arg0)
+ {name: "FCVTDS", argLength: 1, reg: fp11, asm: "FCVTDS", typ: "Float64"}, // float64(arg0)
+ {name: "FCVTSD", argLength: 1, reg: fp11, asm: "FCVTSD", typ: "Float32"}, // float32(arg0)
+ {name: "FMOVDload", argLength: 2, reg: fpload, asm: "MOVD", aux: "SymOff", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load float64 from arg0+auxint+aux
+ {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store float6 to arg0+auxint+aux
+ {name: "FEQD", argLength: 2, reg: fp2gp, asm: "FEQD", commutative: true}, // arg0 == arg1
+ {name: "FNED", argLength: 2, reg: fp2gp, asm: "FNED", commutative: true}, // arg0 != arg1
+ {name: "FLTD", argLength: 2, reg: fp2gp, asm: "FLTD"}, // arg0 < arg1
+ {name: "FLED", argLength: 2, reg: fp2gp, asm: "FLED"}, // arg0 <= arg1
+ }
+
+ RISCV64blocks := []blockData{
+ {name: "BEQ", controls: 2},
+ {name: "BNE", controls: 2},
+ {name: "BLT", controls: 2},
+ {name: "BGE", controls: 2},
+ {name: "BLTU", controls: 2},
+ {name: "BGEU", controls: 2},
+
+ {name: "BEQZ", controls: 1},
+ {name: "BNEZ", controls: 1},
+ {name: "BLEZ", controls: 1},
+ {name: "BGEZ", controls: 1},
+ {name: "BLTZ", controls: 1},
+ {name: "BGTZ", controls: 1},
+ }
+
+ archs = append(archs, arch{
+ name: "RISCV64",
+ pkg: "cmd/internal/obj/riscv",
+ genfile: "../../riscv64/ssa.go",
+ ops: RISCV64ops,
+ blocks: RISCV64blocks,
+ regnames: regNamesRISCV64,
+ gpregmask: gpMask,
+ fpregmask: fpMask,
+ framepointerreg: -1, // not used
+ // Integer parameters passed in register X10-X17, X8-X9, X18-X23
+ ParamIntRegNames: "X10 X11 X12 X13 X14 X15 X16 X17 X8 X9 X18 X19 X20 X21 X22 X23",
+ // Float parameters passed in register F10-F17, F8-F9, F18-F23
+ ParamFloatRegNames: "F10 F11 F12 F13 F14 F15 F16 F17 F8 F9 F18 F19 F20 F21 F22 F23",
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64latelower.rules b/src/cmd/compile/internal/ssa/_gen/RISCV64latelower.rules
new file mode 100644
index 0000000..cd55331
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/RISCV64latelower.rules
@@ -0,0 +1,19 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Fold constant shift with extension.
+(SRAI [c] (MOVBreg x)) && c < 8 => (SRAI [56+c] (SLLI <typ.Int64> [56] x))
+(SRAI [c] (MOVHreg x)) && c < 16 => (SRAI [48+c] (SLLI <typ.Int64> [48] x))
+(SRAI [c] (MOVWreg x)) && c < 32 => (SRAI [32+c] (SLLI <typ.Int64> [32] x))
+(SRLI [c] (MOVBUreg x)) && c < 8 => (SRLI [56+c] (SLLI <typ.UInt64> [56] x))
+(SRLI [c] (MOVHUreg x)) && c < 16 => (SRLI [48+c] (SLLI <typ.UInt64> [48] x))
+(SRLI [c] (MOVWUreg x)) && c < 32 => (SRLI [32+c] (SLLI <typ.UInt64> [32] x))
+(SLLI [c] (MOVBUreg x)) && c <= 56 => (SRLI [56-c] (SLLI <typ.UInt64> [56] x))
+(SLLI [c] (MOVHUreg x)) && c <= 48 => (SRLI [48-c] (SLLI <typ.UInt64> [48] x))
+(SLLI [c] (MOVWUreg x)) && c <= 32 => (SRLI [32-c] (SLLI <typ.UInt64> [32] x))
+
+// Shift by zero.
+(SRAI [0] x) => x
+(SRLI [0] x) => x
+(SLLI [0] x) => x
diff --git a/src/cmd/compile/internal/ssa/_gen/S390X.rules b/src/cmd/compile/internal/ssa/_gen/S390X.rules
new file mode 100644
index 0000000..2a6d7e7
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/S390X.rules
@@ -0,0 +1,1368 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add(64|Ptr) ...) => (ADD ...)
+(Add(32|16|8) ...) => (ADDW ...)
+(Add32F x y) => (Select0 (FADDS x y))
+(Add64F x y) => (Select0 (FADD x y))
+
+(Sub(64|Ptr) ...) => (SUB ...)
+(Sub(32|16|8) ...) => (SUBW ...)
+(Sub32F x y) => (Select0 (FSUBS x y))
+(Sub64F x y) => (Select0 (FSUB x y))
+
+(Mul64 ...) => (MULLD ...)
+(Mul(32|16|8) ...) => (MULLW ...)
+(Mul32F ...) => (FMULS ...)
+(Mul64F ...) => (FMUL ...)
+(Mul64uhilo ...) => (MLGR ...)
+
+(Div32F ...) => (FDIVS ...)
+(Div64F ...) => (FDIV ...)
+
+(Div64 x y) => (DIVD x y)
+(Div64u ...) => (DIVDU ...)
+// DIVW/DIVWU has a 64-bit dividend and a 32-bit divisor,
+// so a sign/zero extension of the dividend is required.
+(Div32 x y) => (DIVW (MOVWreg x) y)
+(Div32u x y) => (DIVWU (MOVWZreg x) y)
+(Div16 x y) => (DIVW (MOVHreg x) (MOVHreg y))
+(Div16u x y) => (DIVWU (MOVHZreg x) (MOVHZreg y))
+(Div8 x y) => (DIVW (MOVBreg x) (MOVBreg y))
+(Div8u x y) => (DIVWU (MOVBZreg x) (MOVBZreg y))
+
+(Hmul(64|64u) ...) => (MULH(D|DU) ...)
+(Hmul32 x y) => (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y)))
+(Hmul32u x y) => (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y)))
+
+(Mod64 x y) => (MODD x y)
+(Mod64u ...) => (MODDU ...)
+// MODW/MODWU has a 64-bit dividend and a 32-bit divisor,
+// so a sign/zero extension of the dividend is required.
+(Mod32 x y) => (MODW (MOVWreg x) y)
+(Mod32u x y) => (MODWU (MOVWZreg x) y)
+(Mod16 x y) => (MODW (MOVHreg x) (MOVHreg y))
+(Mod16u x y) => (MODWU (MOVHZreg x) (MOVHZreg y))
+(Mod8 x y) => (MODW (MOVBreg x) (MOVBreg y))
+(Mod8u x y) => (MODWU (MOVBZreg x) (MOVBZreg y))
+
+// (x + y) / 2 with x>=y -> (x - y) / 2 + y
+(Avg64u <t> x y) => (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
+
+(And64 ...) => (AND ...)
+(And(32|16|8) ...) => (ANDW ...)
+
+(Or64 ...) => (OR ...)
+(Or(32|16|8) ...) => (ORW ...)
+
+(Xor64 ...) => (XOR ...)
+(Xor(32|16|8) ...) => (XORW ...)
+
+(Neg64 ...) => (NEG ...)
+(Neg(32|16|8) ...) => (NEGW ...)
+(Neg32F ...) => (FNEGS ...)
+(Neg64F ...) => (FNEG ...)
+
+(Com64 ...) => (NOT ...)
+(Com(32|16|8) ...) => (NOTW ...)
+(NOT x) => (XOR (MOVDconst [-1]) x)
+(NOTW x) => (XORWconst [-1] x)
+
+// Lowering boolean ops
+(AndB ...) => (ANDW ...)
+(OrB ...) => (ORW ...)
+(Not x) => (XORWconst [1] x)
+
+// Lowering pointer arithmetic
+(OffPtr [off] ptr:(SP)) => (MOVDaddr [int32(off)] ptr)
+(OffPtr [off] ptr) && is32Bit(off) => (ADDconst [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
+
+// TODO: optimize these cases?
+(Ctz64NonZero ...) => (Ctz64 ...)
+(Ctz32NonZero ...) => (Ctz32 ...)
+
+// Ctz(x) = 64 - findLeftmostOne((x-1)&^x)
+(Ctz64 <t> x) => (SUB (MOVDconst [64]) (FLOGR (AND <t> (SUBconst <t> [1] x) (NOT <t> x))))
+(Ctz32 <t> x) => (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW <t> (SUBWconst <t> [1] x) (NOTW <t> x)))))
+
+(BitLen64 x) => (SUB (MOVDconst [64]) (FLOGR x))
+
+// POPCNT treats the input register as a vector of 8 bytes, producing
+// a population count for each individual byte. For inputs larger than
+// a single byte we therefore need to sum the individual bytes produced
+// by the POPCNT instruction. For example, the following instruction
+// sequence could be used to calculate the population count of a 4-byte
+// value:
+//
+// MOVD $0x12345678, R1 // R1=0x12345678 <-- input
+// POPCNT R1, R2 // R2=0x02030404
+// SRW $16, R2, R3 // R3=0x00000203
+// ADDW R2, R3, R4 // R4=0x02030607
+// SRW $8, R4, R5 // R5=0x00020306
+// ADDW R4, R5, R6 // R6=0x0205090d
+// MOVBZ R6, R7 // R7=0x0000000d <-- result is 13
+//
+(PopCount8 x) => (POPCNT (MOVBZreg x))
+(PopCount16 x) => (MOVBZreg (SumBytes2 (POPCNT <typ.UInt16> x)))
+(PopCount32 x) => (MOVBZreg (SumBytes4 (POPCNT <typ.UInt32> x)))
+(PopCount64 x) => (MOVBZreg (SumBytes8 (POPCNT <typ.UInt64> x)))
+
+// SumBytes{2,4,8} pseudo operations sum the values of the rightmost
+// 2, 4 or 8 bytes respectively. The result is a single byte however
+// other bytes might contain junk so a zero extension is required if
+// the desired output type is larger than 1 byte.
+(SumBytes2 x) => (ADDW (SRWconst <typ.UInt8> x [8]) x)
+(SumBytes4 x) => (SumBytes2 (ADDW <typ.UInt16> (SRWconst <typ.UInt16> x [16]) x))
+(SumBytes8 x) => (SumBytes4 (ADDW <typ.UInt32> (SRDconst <typ.UInt32> x [32]) x))
+
+(Bswap64 ...) => (MOVDBR ...)
+(Bswap32 ...) => (MOVWBR ...)
+
+// add with carry
+(Select0 (Add64carry x y c))
+ => (Select0 <typ.UInt64> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))
+(Select1 (Add64carry x y c))
+ => (Select0 <typ.UInt64> (ADDE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))))
+
+// subtract with borrow
+(Select0 (Sub64borrow x y c))
+ => (Select0 <typ.UInt64> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c))))
+(Select1 (Sub64borrow x y c))
+ => (NEG (Select0 <typ.UInt64> (SUBE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c)))))))
+
+// math package intrinsics
+(Sqrt ...) => (FSQRT ...)
+(Floor x) => (FIDBR [7] x)
+(Ceil x) => (FIDBR [6] x)
+(Trunc x) => (FIDBR [5] x)
+(RoundToEven x) => (FIDBR [4] x)
+(Round x) => (FIDBR [1] x)
+(FMA x y z) => (FMADD z x y)
+
+(Sqrt32 ...) => (FSQRTS ...)
+
+// Atomic loads and stores.
+// The SYNC instruction (fast-BCR-serialization) prevents store-load
+// reordering. Other sequences of memory operations (load-load,
+// store-store and load-store) are already guaranteed not to be reordered.
+(AtomicLoad(8|32|Acq32|64|Ptr) ptr mem) => (MOV(BZ|WZ|WZ|D|D)atomicload ptr mem)
+(AtomicStore(8|32|64|PtrNoWB) ptr val mem) => (SYNC (MOV(B|W|D|D)atomicstore ptr val mem))
+
+// Store-release doesn't require store-load ordering.
+(AtomicStoreRel32 ptr val mem) => (MOVWatomicstore ptr val mem)
+
+// Atomic adds.
+(AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (LAA ptr val mem))
+(AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (LAAG ptr val mem))
+(Select0 <t> (AddTupleFirst32 val tuple)) => (ADDW val (Select0 <t> tuple))
+(Select1 (AddTupleFirst32 _ tuple)) => (Select1 tuple)
+(Select0 <t> (AddTupleFirst64 val tuple)) => (ADD val (Select0 <t> tuple))
+(Select1 (AddTupleFirst64 _ tuple)) => (Select1 tuple)
+
+// Atomic exchanges.
+(AtomicExchange32 ptr val mem) => (LoweredAtomicExchange32 ptr val mem)
+(AtomicExchange64 ptr val mem) => (LoweredAtomicExchange64 ptr val mem)
+
+// Atomic compare and swap.
+(AtomicCompareAndSwap32 ptr old new_ mem) => (LoweredAtomicCas32 ptr old new_ mem)
+(AtomicCompareAndSwap64 ptr old new_ mem) => (LoweredAtomicCas64 ptr old new_ mem)
+
+// Atomic and: *(*uint8)(ptr) &= val
+//
+// Round pointer down to nearest word boundary and pad value with ones before
+// applying atomic AND operation to target word.
+//
+// *(*uint32)(ptr &^ 3) &= rotateleft(uint32(val) | 0xffffff00, ((3 << 3) ^ ((ptr & 3) << 3))
+//
+(AtomicAnd8 ptr val mem)
+ => (LANfloor
+ ptr
+ (RLL <typ.UInt32>
+ (ORWconst <typ.UInt32> val [-1<<8])
+ (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr))
+ mem)
+
+// Atomic or: *(*uint8)(ptr) |= val
+//
+// Round pointer down to nearest word boundary and pad value with zeros before
+// applying atomic OR operation to target word.
+//
+// *(*uint32)(ptr &^ 3) |= uint32(val) << ((3 << 3) ^ ((ptr & 3) << 3))
+//
+(AtomicOr8 ptr val mem)
+ => (LAOfloor
+ ptr
+ (SLW <typ.UInt32>
+ (MOVBZreg <typ.UInt32> val)
+ (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr))
+ mem)
+
+(AtomicAnd32 ...) => (LAN ...)
+(AtomicOr32 ...) => (LAO ...)
+
+// Lowering extension
+// Note: we always extend to 64 bits even though some ops don't need that many result bits.
+(SignExt8to(16|32|64) ...) => (MOVBreg ...)
+(SignExt16to(32|64) ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+(ZeroExt8to(16|32|64) ...) => (MOVBZreg ...)
+(ZeroExt16to(32|64) ...) => (MOVHZreg ...)
+(ZeroExt32to64 ...) => (MOVWZreg ...)
+
+(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
+
+// Lowering truncation
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc(16|32|64)to8 ...) => (Copy ...)
+(Trunc(32|64)to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Lowering float <-> int
+(Cvt32to32F ...) => (CEFBRA ...)
+(Cvt32to64F ...) => (CDFBRA ...)
+(Cvt64to32F ...) => (CEGBRA ...)
+(Cvt64to64F ...) => (CDGBRA ...)
+
+(Cvt32Fto32 ...) => (CFEBRA ...)
+(Cvt32Fto64 ...) => (CGEBRA ...)
+(Cvt64Fto32 ...) => (CFDBRA ...)
+(Cvt64Fto64 ...) => (CGDBRA ...)
+
+// Lowering float <-> uint
+(Cvt32Uto32F ...) => (CELFBR ...)
+(Cvt32Uto64F ...) => (CDLFBR ...)
+(Cvt64Uto32F ...) => (CELGBR ...)
+(Cvt64Uto64F ...) => (CDLGBR ...)
+
+(Cvt32Fto32U ...) => (CLFEBR ...)
+(Cvt32Fto64U ...) => (CLGEBR ...)
+(Cvt64Fto32U ...) => (CLFDBR ...)
+(Cvt64Fto64U ...) => (CLGDBR ...)
+
+// Lowering float32 <-> float64
+(Cvt32Fto64F ...) => (LDEBR ...)
+(Cvt64Fto32F ...) => (LEDBR ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
+
+// Lowering shifts
+
+// Lower bounded shifts first. No need to check shift value.
+(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y)
+(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD x y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW x y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVHZreg x) y)
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVBZreg x) y)
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAD x y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVHreg x) y)
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVBreg x) y)
+
+// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
+// result = shift >= 64 ? 0 : arg << shift
+(Lsh(64|32|16|8)x64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+(Lsh(64|32|16|8)x32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+(Lsh(64|32|16|8)x16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+(Lsh(64|32|16|8)x8 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+
+(Rsh(64|32)Ux64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+(Rsh(64|32)Ux32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+(Rsh(64|32)Ux16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+(Rsh(64|32)Ux8 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+
+(Rsh(16|8)Ux64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPUconst y [64]))
+(Rsh(16|8)Ux32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst y [64]))
+(Rsh(16|8)Ux16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+(Rsh(16|8)Ux8 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+
+// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
+// We implement this by setting the shift value to 63 (all ones) if the shift value is more than 63.
+// result = arg >> (shift >= 64 ? 63 : shift)
+(Rsh(64|32)x64 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+(Rsh(64|32)x32 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+(Rsh(64|32)x16 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+(Rsh(64|32)x8 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+
+(Rsh(16|8)x64 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+(Rsh(16|8)x32 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+(Rsh(16|8)x16 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+(Rsh(16|8)x8 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+
+// Lowering rotates
+(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+(RotateLeft32 ...) => (RLL ...)
+(RotateLeft64 ...) => (RLLG ...)
+
+// Lowering comparisons
+(Less64 x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Less32 x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Less(16|8) x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B)reg x) (MOV(H|B)reg y)))
+(Less64U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+(Less32U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+(Less(16|8)U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOV(H|B)Zreg x) (MOV(H|B)Zreg y)))
+(Less64F x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+(Less32F x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+
+(Leq64 x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Leq32 x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Leq(16|8) x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B)reg x) (MOV(H|B)reg y)))
+(Leq64U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+(Leq32U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+(Leq(16|8)U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOV(H|B)Zreg x) (MOV(H|B)Zreg y)))
+(Leq64F x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+(Leq32F x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+
+(Eq(64|Ptr) x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Eq32 x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Eq(16|8|B) x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B|B)reg x) (MOV(H|B|B)reg y)))
+(Eq64F x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+(Eq32F x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+
+(Neq(64|Ptr) x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Neq32 x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Neq(16|8|B) x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B|B)reg x) (MOV(H|B|B)reg y)))
+(Neq64F x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+(Neq32F x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+
+// Lowering loads
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && t.IsSigned() => (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && !t.IsSigned() => (MOVWZload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && t.IsSigned() => (MOVHload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && !t.IsSigned() => (MOVHZload ptr mem)
+(Load <t> ptr mem) && is8BitInt(t) && t.IsSigned() => (MOVBload ptr mem)
+(Load <t> ptr mem) && (t.IsBoolean() || (is8BitInt(t) && !t.IsSigned())) => (MOVBZload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
+
+// Lowering stores
+(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (FMOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (FMOVSstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+
+// Lowering moves
+
+// Load and store for small copies.
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem)
+(Move [2] dst src mem) => (MOVHstore dst (MOVHZload src mem) mem)
+(Move [4] dst src mem) => (MOVWstore dst (MOVWZload src mem) mem)
+(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
+(Move [16] dst src mem) =>
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [24] dst src mem) =>
+ (MOVDstore [16] dst (MOVDload [16] src mem)
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem)))
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBZload [2] src mem)
+ (MOVHstore dst (MOVHZload src mem) mem))
+(Move [5] dst src mem) =>
+ (MOVBstore [4] dst (MOVBZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem))
+(Move [6] dst src mem) =>
+ (MOVHstore [4] dst (MOVHZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem))
+(Move [7] dst src mem) =>
+ (MOVBstore [6] dst (MOVBZload [6] src mem)
+ (MOVHstore [4] dst (MOVHZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem)))
+
+// MVC for other moves. Use up to 4 instructions (sizes up to 1024 bytes).
+(Move [s] dst src mem) && s > 0 && s <= 256 && logLargeCopy(v, s) =>
+ (MVC [makeValAndOff(int32(s), 0)] dst src mem)
+(Move [s] dst src mem) && s > 256 && s <= 512 && logLargeCopy(v, s) =>
+ (MVC [makeValAndOff(int32(s)-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))
+(Move [s] dst src mem) && s > 512 && s <= 768 && logLargeCopy(v, s) =>
+ (MVC [makeValAndOff(int32(s)-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))
+(Move [s] dst src mem) && s > 768 && s <= 1024 && logLargeCopy(v, s) =>
+ (MVC [makeValAndOff(int32(s)-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))))
+
+// Move more than 1024 bytes using a loop.
+(Move [s] dst src mem) && s > 1024 && logLargeCopy(v, s) =>
+ (LoweredMove [s%256] dst src (ADD <src.Type> src (MOVDconst [(s/256)*256])) mem)
+
+// Lowering Zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (MOVBstoreconst [0] destptr mem)
+(Zero [2] destptr mem) => (MOVHstoreconst [0] destptr mem)
+(Zero [4] destptr mem) => (MOVWstoreconst [0] destptr mem)
+(Zero [8] destptr mem) => (MOVDstoreconst [0] destptr mem)
+(Zero [3] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff(0,2)] destptr
+ (MOVHstoreconst [0] destptr mem))
+(Zero [5] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff(0,4)] destptr
+ (MOVWstoreconst [0] destptr mem))
+(Zero [6] destptr mem) =>
+ (MOVHstoreconst [makeValAndOff(0,4)] destptr
+ (MOVWstoreconst [0] destptr mem))
+(Zero [7] destptr mem) =>
+ (MOVWstoreconst [makeValAndOff(0,3)] destptr
+ (MOVWstoreconst [0] destptr mem))
+
+(Zero [s] destptr mem) && s > 0 && s <= 1024 =>
+ (CLEAR [makeValAndOff(int32(s), 0)] destptr mem)
+
+// Zero more than 1024 bytes using a loop.
+(Zero [s] destptr mem) && s > 1024 =>
+ (LoweredZero [s%256] destptr (ADDconst <destptr.Type> destptr [(int32(s)/256)*256]) mem)
+
+// Lowering constants
+(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
+(Const(32|64)F ...) => (FMOV(S|D)const ...)
+(ConstNil) => (MOVDconst [0])
+(ConstBool [t]) => (MOVDconst [b2i(t)])
+
+// Lowering calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// Miscellaneous
+(IsNonNil p) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
+(IsInBounds idx len) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
+(IsSliceInBounds idx len) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
+(NilCheck ...) => (LoweredNilCheck ...)
+(GetG ...) => (LoweredGetG ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+(Addr {sym} base) => (MOVDaddr {sym} base)
+(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVDaddr {sym} (SPanchored base mem))
+(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVDaddr {sym} base)
+(ITab (Load ptr mem)) => (MOVDload ptr mem)
+
+// block rewrites
+(If cond yes no) => (CLIJ {s390x.LessOrGreater} (MOVBZreg <typ.Bool> cond) [0] yes no)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// ***************************
+// Above: lowering rules
+// Below: optimizations
+// ***************************
+// TODO: Should the optimizations be a separate pass?
+
+// Note: when removing unnecessary sign/zero extensions.
+//
+// After a value is spilled it is restored using a sign- or zero-extension
+// to register-width as appropriate for its type. For example, a uint8 will
+// be restored using a MOVBZ (llgc) instruction which will zero extend the
+// 8-bit value to 64-bits.
+//
+// This is a hazard when folding sign- and zero-extensions since we need to
+// ensure not only that the value in the argument register is correctly
+// extended but also that it will still be correctly extended if it is
+// spilled and restored.
+//
+// In general this means we need type checks when the RHS of a rule is an
+// OpCopy (i.e. "(... x:(...) ...) -> x").
+
+// Merge double extensions.
+(MOV(H|HZ)reg e:(MOV(B|BZ)reg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(W|WZ)reg e:(MOV(B|BZ)reg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(W|WZ)reg e:(MOV(H|HZ)reg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
+
+// Bypass redundant sign extensions.
+(MOV(B|BZ)reg e:(MOVBreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(B|BZ)reg e:(MOVHreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(B|BZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(H|HZ)reg e:(MOVHreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
+(MOV(H|HZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
+(MOV(W|WZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(W|WZ)reg x)
+
+// Bypass redundant zero extensions.
+(MOV(B|BZ)reg e:(MOVBZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(B|BZ)reg e:(MOVHZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(B|BZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(H|HZ)reg e:(MOVHZreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
+(MOV(H|HZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
+(MOV(W|WZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(W|WZ)reg x)
+
+// Remove zero extensions after zero extending load.
+// Note: take care that if x is spilled it is restored correctly.
+(MOV(B|H|W)Zreg x:(MOVBZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x
+(MOV(H|W)Zreg x:(MOVHZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x
+(MOVWZreg x:(MOVWZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 4) => x
+
+// Remove sign extensions after sign extending load.
+// Note: take care that if x is spilled it is restored correctly.
+(MOV(B|H|W)reg x:(MOVBload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
+(MOV(H|W)reg x:(MOVHload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
+(MOVWreg x:(MOVWload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
+
+// Remove sign extensions after zero extending load.
+// These type checks are probably unnecessary but do them anyway just in case.
+(MOV(H|W)reg x:(MOVBZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x
+(MOVWreg x:(MOVHZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x
+
+// Fold sign and zero extensions into loads.
+//
+// Note: The combined instruction must end up in the same block
+// as the original load. If not, we end up making a value with
+// memory type live in two different blocks, which can lead to
+// multiple memory values alive simultaneously.
+//
+// Make sure we don't combine these ops if the load has another use.
+// This prevents a single load from being split into multiple loads
+// which then might return different values. See test/atomicload.go.
+(MOV(B|H|W)Zreg <t> x:(MOV(B|H|W)load [o] {s} p mem))
+ && x.Uses == 1
+ && clobber(x)
+ => @x.Block (MOV(B|H|W)Zload <t> [o] {s} p mem)
+(MOV(B|H|W)reg <t> x:(MOV(B|H|W)Zload [o] {s} p mem))
+ && x.Uses == 1
+ && clobber(x)
+ => @x.Block (MOV(B|H|W)load <t> [o] {s} p mem)
+
+// Remove zero extensions after argument load.
+(MOVBZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() == 1 => x
+(MOVHZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() <= 2 => x
+(MOVWZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() <= 4 => x
+
+// Remove sign extensions after argument load.
+(MOVBreg x:(Arg <t>)) && t.IsSigned() && t.Size() == 1 => x
+(MOVHreg x:(Arg <t>)) && t.IsSigned() && t.Size() <= 2 => x
+(MOVWreg x:(Arg <t>)) && t.IsSigned() && t.Size() <= 4 => x
+
+// Fold zero extensions into constants.
+(MOVBZreg (MOVDconst [c])) => (MOVDconst [int64( uint8(c))])
+(MOVHZreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
+(MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
+
+// Fold sign extensions into constants.
+(MOVBreg (MOVDconst [c])) => (MOVDconst [int64( int8(c))])
+(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
+(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
+
+// Remove zero extension of conditional move.
+// Note: only for MOVBZreg for now since it is added as part of 'if' statement lowering.
+(MOVBZreg x:(LOCGR (MOVDconst [c]) (MOVDconst [d]) _))
+ && int64(uint8(c)) == c
+ && int64(uint8(d)) == d
+ && (!x.Type.IsSigned() || x.Type.Size() > 1)
+ => x
+
+// Fold boolean tests into blocks.
+// Note: this must match If statement lowering.
+(CLIJ {s390x.LessOrGreater} (LOCGR {d} (MOVDconst [0]) (MOVDconst [x]) cmp) [0] yes no)
+ && int32(x) != 0
+ => (BRC {d} cmp yes no)
+
+// Canonicalize BRC condition code mask by removing impossible conditions.
+// Integer comparisons cannot generate the unordered condition.
+(BRC {c} x:((CMP|CMPW|CMPU|CMPWU) _ _) yes no) && c&s390x.Unordered != 0 => (BRC {c&^s390x.Unordered} x yes no)
+(BRC {c} x:((CMP|CMPW|CMPU|CMPWU)const _) yes no) && c&s390x.Unordered != 0 => (BRC {c&^s390x.Unordered} x yes no)
+
+// Compare-and-branch.
+// Note: bit 3 (unordered) must not be set so we mask out s390x.Unordered.
+(BRC {c} (CMP x y) yes no) => (CGRJ {c&^s390x.Unordered} x y yes no)
+(BRC {c} (CMPW x y) yes no) => (CRJ {c&^s390x.Unordered} x y yes no)
+(BRC {c} (CMPU x y) yes no) => (CLGRJ {c&^s390x.Unordered} x y yes no)
+(BRC {c} (CMPWU x y) yes no) => (CLRJ {c&^s390x.Unordered} x y yes no)
+
+// Compare-and-branch (immediate).
+// Note: bit 3 (unordered) must not be set so we mask out s390x.Unordered.
+(BRC {c} (CMPconst x [y]) yes no) && y == int32( int8(y)) => (CGIJ {c&^s390x.Unordered} x [ int8(y)] yes no)
+(BRC {c} (CMPWconst x [y]) yes no) && y == int32( int8(y)) => (CIJ {c&^s390x.Unordered} x [ int8(y)] yes no)
+(BRC {c} (CMPUconst x [y]) yes no) && y == int32(uint8(y)) => (CLGIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
+(BRC {c} (CMPWUconst x [y]) yes no) && y == int32(uint8(y)) => (CLIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
+
+// Absorb immediate into compare-and-branch.
+(C(R|GR)J {c} x (MOVDconst [y]) yes no) && is8Bit(y) => (C(I|GI)J {c} x [ int8(y)] yes no)
+(CL(R|GR)J {c} x (MOVDconst [y]) yes no) && isU8Bit(y) => (CL(I|GI)J {c} x [uint8(y)] yes no)
+(C(R|GR)J {c} (MOVDconst [x]) y yes no) && is8Bit(x) => (C(I|GI)J {c.ReverseComparison()} y [ int8(x)] yes no)
+(CL(R|GR)J {c} (MOVDconst [x]) y yes no) && isU8Bit(x) => (CL(I|GI)J {c.ReverseComparison()} y [uint8(x)] yes no)
+
+// Prefer comparison with immediate to compare-and-branch.
+(CGRJ {c} x (MOVDconst [y]) yes no) && !is8Bit(y) && is32Bit(y) => (BRC {c} (CMPconst x [int32(y)]) yes no)
+(CRJ {c} x (MOVDconst [y]) yes no) && !is8Bit(y) && is32Bit(y) => (BRC {c} (CMPWconst x [int32(y)]) yes no)
+(CLGRJ {c} x (MOVDconst [y]) yes no) && !isU8Bit(y) && isU32Bit(y) => (BRC {c} (CMPUconst x [int32(y)]) yes no)
+(CLRJ {c} x (MOVDconst [y]) yes no) && !isU8Bit(y) && isU32Bit(y) => (BRC {c} (CMPWUconst x [int32(y)]) yes no)
+(CGRJ {c} (MOVDconst [x]) y yes no) && !is8Bit(x) && is32Bit(x) => (BRC {c.ReverseComparison()} (CMPconst y [int32(x)]) yes no)
+(CRJ {c} (MOVDconst [x]) y yes no) && !is8Bit(x) && is32Bit(x) => (BRC {c.ReverseComparison()} (CMPWconst y [int32(x)]) yes no)
+(CLGRJ {c} (MOVDconst [x]) y yes no) && !isU8Bit(x) && isU32Bit(x) => (BRC {c.ReverseComparison()} (CMPUconst y [int32(x)]) yes no)
+(CLRJ {c} (MOVDconst [x]) y yes no) && !isU8Bit(x) && isU32Bit(x) => (BRC {c.ReverseComparison()} (CMPWUconst y [int32(x)]) yes no)
+
+// Absorb sign/zero extensions into 32-bit compare-and-branch.
+(CIJ {c} (MOV(W|WZ)reg x) [y] yes no) => (CIJ {c} x [y] yes no)
+(CLIJ {c} (MOV(W|WZ)reg x) [y] yes no) => (CLIJ {c} x [y] yes no)
+
+// Bring out-of-range signed immediates into range by varying branch condition.
+(BRC {s390x.Less} (CMPconst x [ 128]) yes no) => (CGIJ {s390x.LessOrEqual} x [ 127] yes no)
+(BRC {s390x.Less} (CMPWconst x [ 128]) yes no) => (CIJ {s390x.LessOrEqual} x [ 127] yes no)
+(BRC {s390x.LessOrEqual} (CMPconst x [-129]) yes no) => (CGIJ {s390x.Less} x [-128] yes no)
+(BRC {s390x.LessOrEqual} (CMPWconst x [-129]) yes no) => (CIJ {s390x.Less} x [-128] yes no)
+(BRC {s390x.Greater} (CMPconst x [-129]) yes no) => (CGIJ {s390x.GreaterOrEqual} x [-128] yes no)
+(BRC {s390x.Greater} (CMPWconst x [-129]) yes no) => (CIJ {s390x.GreaterOrEqual} x [-128] yes no)
+(BRC {s390x.GreaterOrEqual} (CMPconst x [ 128]) yes no) => (CGIJ {s390x.Greater} x [ 127] yes no)
+(BRC {s390x.GreaterOrEqual} (CMPWconst x [ 128]) yes no) => (CIJ {s390x.Greater} x [ 127] yes no)
+
+// Bring out-of-range unsigned immediates into range by varying branch condition.
+(BRC {s390x.Less} (CMP(WU|U)const x [256]) yes no) => (C(L|LG)IJ {s390x.LessOrEqual} x [255] yes no)
+(BRC {s390x.GreaterOrEqual} (CMP(WU|U)const x [256]) yes no) => (C(L|LG)IJ {s390x.Greater} x [255] yes no)
+
+// Bring out-of-range immediates into range by switching signedness (only == and !=).
+(BRC {c} (CMPconst x [y]) yes no) && y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CLGIJ {c} x [uint8(y)] yes no)
+(BRC {c} (CMPWconst x [y]) yes no) && y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CLIJ {c} x [uint8(y)] yes no)
+(BRC {c} (CMPUconst x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CGIJ {c} x [ int8(y)] yes no)
+(BRC {c} (CMPWUconst x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CIJ {c} x [ int8(y)] yes no)
+
+// Fold constants into instructions.
+(ADD x (MOVDconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDconst [int32(c)] x)
+(ADDW x (MOVDconst [c])) => (ADDWconst [int32(c)] x)
+
+(SUB x (MOVDconst [c])) && is32Bit(c) => (SUBconst x [int32(c)])
+(SUB (MOVDconst [c]) x) && is32Bit(c) => (NEG (SUBconst <v.Type> x [int32(c)]))
+(SUBW x (MOVDconst [c])) => (SUBWconst x [int32(c)])
+(SUBW (MOVDconst [c]) x) => (NEGW (SUBWconst <v.Type> x [int32(c)]))
+
+(MULLD x (MOVDconst [c])) && is32Bit(c) => (MULLDconst [int32(c)] x)
+(MULLW x (MOVDconst [c])) => (MULLWconst [int32(c)] x)
+
+// NILF instructions leave the high 32 bits unchanged which is
+// equivalent to the leftmost 32 bits being set.
+// TODO(mundaym): modify the assembler to accept 64-bit values
+// and use isU32Bit(^c).
+(AND x (MOVDconst [c]))
+ && s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)) != nil
+ => (RISBGZ x {*s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c))})
+(AND x (MOVDconst [c]))
+ && is32Bit(c)
+ && c < 0
+ => (ANDconst [c] x)
+(AND x (MOVDconst [c]))
+ && is32Bit(c)
+ && c >= 0
+ => (MOVWZreg (ANDWconst <typ.UInt32> [int32(c)] x))
+
+(ANDW x (MOVDconst [c])) => (ANDWconst [int32(c)] x)
+
+((AND|ANDW)const [c] ((AND|ANDW)const [d] x)) => ((AND|ANDW)const [c&d] x)
+
+((OR|XOR) x (MOVDconst [c])) && isU32Bit(c) => ((OR|XOR)const [c] x)
+((OR|XOR)W x (MOVDconst [c])) => ((OR|XOR)Wconst [int32(c)] x)
+
+// Constant shifts.
+(S(LD|RD|RAD) x (MOVDconst [c])) => (S(LD|RD|RAD)const x [uint8(c&63)])
+(S(LW|RW|RAW) x (MOVDconst [c])) && c&32 == 0 => (S(LW|RW|RAW)const x [uint8(c&31)])
+(S(LW|RW) _ (MOVDconst [c])) && c&32 != 0 => (MOVDconst [0])
+(SRAW x (MOVDconst [c])) && c&32 != 0 => (SRAWconst x [31])
+
+// Shifts only use the rightmost 6 bits of the shift value.
+(S(LD|RD|RAD|LW|RW|RAW) x (RISBGZ y {r}))
+ && r.Amount == 0
+ && r.OutMask()&63 == 63
+ => (S(LD|RD|RAD|LW|RW|RAW) x y)
+(S(LD|RD|RAD|LW|RW|RAW) x (AND (MOVDconst [c]) y))
+ => (S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+(S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst [c] y)) && c&63 == 63
+ => (S(LD|RD|RAD|LW|RW|RAW) x y)
+(SLD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SLD x y)
+(SRD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRD x y)
+(SRAD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAD x y)
+(SLW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SLW x y)
+(SRW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRW x y)
+(SRAW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAW x y)
+
+// Match rotate by constant.
+(RLLG x (MOVDconst [c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, uint8(c&63))})
+(RLL x (MOVDconst [c])) => (RLLconst x [uint8(c&31)])
+
+// Signed 64-bit comparison with immediate.
+(CMP x (MOVDconst [c])) && is32Bit(c) => (CMPconst x [int32(c)])
+(CMP (MOVDconst [c]) x) && is32Bit(c) => (InvertFlags (CMPconst x [int32(c)]))
+
+// Unsigned 64-bit comparison with immediate.
+(CMPU x (MOVDconst [c])) && isU32Bit(c) => (CMPUconst x [int32(c)])
+(CMPU (MOVDconst [c]) x) && isU32Bit(c) => (InvertFlags (CMPUconst x [int32(c)]))
+
+// Signed and unsigned 32-bit comparison with immediate.
+(CMP(W|WU) x (MOVDconst [c])) => (CMP(W|WU)const x [int32(c)])
+(CMP(W|WU) (MOVDconst [c]) x) => (InvertFlags (CMP(W|WU)const x [int32(c)]))
+
+// Match (x >> c) << d to 'rotate then insert selected bits [into zero]'.
+(SLDconst (SRDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(uint8(max8(0, int8(c-d))), 63-d, uint8(int8(d-c)&63))})
+
+// Match (x << c) >> d to 'rotate then insert selected bits [into zero]'.
+(SRDconst (SLDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(d, uint8(min8(63, int8(63-c+d))), uint8(int8(c-d)&63))})
+
+// Absorb input zero extension into 'rotate then insert selected bits [into zero]'.
+(RISBGZ (MOVWZreg x) {r}) && r.InMerge(0xffffffff) != nil => (RISBGZ x {*r.InMerge(0xffffffff)})
+(RISBGZ (MOVHZreg x) {r}) && r.InMerge(0x0000ffff) != nil => (RISBGZ x {*r.InMerge(0x0000ffff)})
+(RISBGZ (MOVBZreg x) {r}) && r.InMerge(0x000000ff) != nil => (RISBGZ x {*r.InMerge(0x000000ff)})
+
+// Absorb 'rotate then insert selected bits [into zero]' into zero extension.
+(MOVWZreg (RISBGZ x {r})) && r.OutMerge(0xffffffff) != nil => (RISBGZ x {*r.OutMerge(0xffffffff)})
+(MOVHZreg (RISBGZ x {r})) && r.OutMerge(0x0000ffff) != nil => (RISBGZ x {*r.OutMerge(0x0000ffff)})
+(MOVBZreg (RISBGZ x {r})) && r.OutMerge(0x000000ff) != nil => (RISBGZ x {*r.OutMerge(0x000000ff)})
+
+// Absorb shift into 'rotate then insert selected bits [into zero]'.
+//
+// Any unsigned shift can be represented as a rotate and mask operation:
+//
+// x << c => RotateLeft64(x, c) & (^uint64(0) << c)
+// x >> c => RotateLeft64(x, -c) & (^uint64(0) >> c)
+//
+// Therefore when a shift is used as the input to a rotate then insert
+// selected bits instruction we can merge the two together. We just have
+// to be careful that the resultant mask is representable (non-zero and
+// contiguous). For example, assuming that x is variable and c, y and m
+// are constants, a shift followed by a rotate then insert selected bits
+// could be represented as:
+//
+// RotateLeft64(RotateLeft64(x, c) & (^uint64(0) << c), y) & m
+//
+// We can split the rotation by y into two, one rotate for x and one for
+// the mask:
+//
+// RotateLeft64(RotateLeft64(x, c), y) & (RotateLeft64(^uint64(0) << c, y)) & m
+//
+// The rotations of x by c followed by y can then be combined:
+//
+// RotateLeft64(x, c+y) & (RotateLeft64(^uint64(0) << c, y)) & m
+// ^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// rotate mask
+//
+// To perform this optimization we therefore just need to check that it
+// is valid to merge the shift mask (^(uint64(0)<<c)) into the selected
+// bits mask (i.e. that the resultant mask is non-zero and contiguous).
+//
+(RISBGZ (SLDconst x [c]) {r}) && r.InMerge(^uint64(0)<<c) != nil => (RISBGZ x {(*r.InMerge(^uint64(0)<<c)).RotateLeft(c)})
+(RISBGZ (SRDconst x [c]) {r}) && r.InMerge(^uint64(0)>>c) != nil => (RISBGZ x {(*r.InMerge(^uint64(0)>>c)).RotateLeft(-c)})
+
+// Absorb 'rotate then insert selected bits [into zero]' into left shift.
+(SLDconst (RISBGZ x {r}) [c])
+ && s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask()) != nil
+ => (RISBGZ x {(*s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask())).RotateLeft(r.Amount)})
+
+// Absorb 'rotate then insert selected bits [into zero]' into right shift.
+(SRDconst (RISBGZ x {r}) [c])
+ && s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask()) != nil
+ => (RISBGZ x {(*s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask())).RotateLeft(r.Amount)})
+
+// Merge 'rotate then insert selected bits [into zero]' instructions together.
+(RISBGZ (RISBGZ x {y}) {z})
+ && z.InMerge(y.OutMask()) != nil
+ => (RISBGZ x {(*z.InMerge(y.OutMask())).RotateLeft(y.Amount)})
+
+// Convert RISBGZ into 64-bit shift (helps CSE).
+(RISBGZ x {r}) && r.End == 63 && r.Start == -r.Amount&63 => (SRDconst x [-r.Amount&63])
+(RISBGZ x {r}) && r.Start == 0 && r.End == 63-r.Amount => (SLDconst x [r.Amount])
+
+// Optimize single bit isolation when it is known to be equivalent to
+// the most significant bit due to mask produced by arithmetic shift.
+// Simply isolate the most significant bit itself and place it in the
+// correct position.
+//
+// Example: (int64(x) >> 63) & 0x8 -> RISBGZ $60, $60, $4, Rsrc, Rdst
+(RISBGZ (SRADconst x [c]) {r})
+ && r.Start == r.End // single bit selected
+ && (r.Start+r.Amount)&63 <= c // equivalent to most significant bit of x
+ => (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)})
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
+
+// Use sign/zero extend instead of RISBGZ.
+(RISBGZ x {r}) && r == s390x.NewRotateParams(56, 63, 0) => (MOVBZreg x)
+(RISBGZ x {r}) && r == s390x.NewRotateParams(48, 63, 0) => (MOVHZreg x)
+(RISBGZ x {r}) && r == s390x.NewRotateParams(32, 63, 0) => (MOVWZreg x)
+
+// Use sign/zero extend instead of ANDW.
+(ANDWconst [0x00ff] x) => (MOVBZreg x)
+(ANDWconst [0xffff] x) => (MOVHZreg x)
+
+// Strength reduce multiplication to the sum (or difference) of two powers of two.
+//
+// Examples:
+// 5x -> 4x + 1x
+// 10x -> 8x + 2x
+// 120x -> 128x - 8x
+// -120x -> 8x - 128x
+//
+// We know that the rightmost bit of any positive value, once isolated, must either
+// be a power of 2 (because it is a single bit) or 0 (if the original value is 0).
+// In all of these rules we use a rightmost bit calculation to determine one operand
+// for the addition or subtraction. We then just need to calculate if the other
+// operand is a valid power of 2 before we can match the rule.
+//
+// Notes:
+// - the generic rules have already matched single powers of two so we ignore them here
+// - isPowerOfTwo32 asserts that its argument is greater than 0
+// - c&(c-1) = clear rightmost bit
+// - c&^(c-1) = isolate rightmost bit
+
+// c = 2ˣ + 2ʸ => c - 2ˣ = 2ʸ
+(MULL(D|W)const <t> x [c]) && isPowerOfTwo32(c&(c-1))
+ => ((ADD|ADDW) (SL(D|W)const <t> x [uint8(log32(c&(c-1)))])
+ (SL(D|W)const <t> x [uint8(log32(c&^(c-1)))]))
+
+// c = 2ʸ - 2ˣ => c + 2ˣ = 2ʸ
+(MULL(D|W)const <t> x [c]) && isPowerOfTwo32(c+(c&^(c-1)))
+ => ((SUB|SUBW) (SL(D|W)const <t> x [uint8(log32(c+(c&^(c-1))))])
+ (SL(D|W)const <t> x [uint8(log32(c&^(c-1)))]))
+
+// c = 2ˣ - 2ʸ => -c + 2ˣ = 2ʸ
+(MULL(D|W)const <t> x [c]) && isPowerOfTwo32(-c+(-c&^(-c-1)))
+ => ((SUB|SUBW) (SL(D|W)const <t> x [uint8(log32(-c&^(-c-1)))])
+ (SL(D|W)const <t> x [uint8(log32(-c+(-c&^(-c-1))))]))
+
+// Fold ADD into MOVDaddr. Odd offsets from SB shouldn't be folded (LARL can't handle them).
+(ADDconst [c] (MOVDaddr [d] {s} x:(SB))) && ((c+d)&1 == 0) && is32Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x)
+(ADDconst [c] (MOVDaddr [d] {s} x)) && x.Op != OpSB && is20Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x)
+(ADD idx (MOVDaddr [c] {s} ptr)) && ptr.Op != OpSB => (MOVDaddridx [c] {s} ptr idx)
+
+// fold ADDconst into MOVDaddrx
+(ADDconst [c] (MOVDaddridx [d] {s} x y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
+(MOVDaddridx [c] {s} (ADDconst [d] x) y) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
+(MOVDaddridx [c] {s} x (ADDconst [d] y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
+
+// reverse ordering of compare instruction
+(LOCGR {c} x y (InvertFlags cmp)) => (LOCGR {c.ReverseComparison()} x y cmp)
+
+// replace load from same location as preceding store with copy
+(MOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
+(MOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWreg x)
+(MOVHload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVHreg x)
+(MOVBload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVBreg x)
+(MOVWZload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWZreg x)
+(MOVHZload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVHZreg x)
+(MOVBZload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVBZreg x)
+(MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (LGDR x)
+(FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (LDGR x)
+(FMOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
+(FMOVSload [off] {sym} ptr1 (FMOVSstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
+
+// prefer FPR <-> GPR moves over combined load ops
+(MULLDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (MULLD x (LGDR <t> y))
+(ADDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (ADD x (LGDR <t> y))
+(SUBload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (SUB x (LGDR <t> y))
+(ORload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (OR x (LGDR <t> y))
+(ANDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (AND x (LGDR <t> y))
+(XORload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (XOR x (LGDR <t> y))
+
+// detect attempts to set/clear the sign bit
+// may need to be reworked when NIHH/OIHH are added
+(RISBGZ (LGDR <t> x) {r}) && r == s390x.NewRotateParams(1, 63, 0) => (LGDR <t> (LPDFR <x.Type> x))
+(LDGR <t> (RISBGZ x {r})) && r == s390x.NewRotateParams(1, 63, 0) => (LPDFR (LDGR <t> x))
+(OR (MOVDconst [-1<<63]) (LGDR <t> x)) => (LGDR <t> (LNDFR <x.Type> x))
+(LDGR <t> (OR (MOVDconst [-1<<63]) x)) => (LNDFR (LDGR <t> x))
+
+// detect attempts to set the sign bit with load
+(LDGR <t> x:(ORload <t1> [off] {sym} (MOVDconst [-1<<63]) ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (LNDFR <t> (LDGR <t> (MOVDload <t1> [off] {sym} ptr mem)))
+
+// detect copysign
+(OR (RISBGZ (LGDR x) {r}) (LGDR (LPDFR <t> y)))
+ && r == s390x.NewRotateParams(0, 0, 0)
+ => (LGDR (CPSDR <t> y x))
+(OR (RISBGZ (LGDR x) {r}) (MOVDconst [c]))
+ && c >= 0
+ && r == s390x.NewRotateParams(0, 0, 0)
+ => (LGDR (CPSDR <x.Type> (FMOVDconst <x.Type> [math.Float64frombits(uint64(c))]) x))
+(CPSDR y (FMOVDconst [c])) && !math.Signbit(c) => (LPDFR y)
+(CPSDR y (FMOVDconst [c])) && math.Signbit(c) => (LNDFR y)
+
+// absorb negations into set/clear sign bit
+(FNEG (LPDFR x)) => (LNDFR x)
+(FNEG (LNDFR x)) => (LPDFR x)
+(FNEGS (LPDFR x)) => (LNDFR x)
+(FNEGS (LNDFR x)) => (LPDFR x)
+
+// no need to convert float32 to float64 to set/clear sign bit
+(LEDBR (LPDFR (LDEBR x))) => (LPDFR x)
+(LEDBR (LNDFR (LDEBR x))) => (LNDFR x)
+
+// remove unnecessary FPR <-> GPR moves
+(LDGR (LGDR x)) => x
+(LGDR (LDGR x)) => x
+
+// Don't extend before storing
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWZreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHZreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBZreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+
+// Fold constants into memory operations.
+// Note that this is not always a good idea because if not all the uses of
+// the ADDconst get eliminated, we still have to compute the ADDconst and we now
+// have potentially two live values (ptr and (ADDconst [off] ptr)) instead of one.
+// Nevertheless, let's do it!
+(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVDload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWload [off1+off2] {sym} ptr mem)
+(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHload [off1+off2] {sym} ptr mem)
+(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBload [off1+off2] {sym} ptr mem)
+(MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWZload [off1+off2] {sym} ptr mem)
+(MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHZload [off1+off2] {sym} ptr mem)
+(MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBZload [off1+off2] {sym} ptr mem)
+(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVSload [off1+off2] {sym} ptr mem)
+(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVDload [off1+off2] {sym} ptr mem)
+
+(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVDstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHstore [off1+off2] {sym} ptr val mem)
+(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBstore [off1+off2] {sym} ptr val mem)
+(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVSstore [off1+off2] {sym} ptr val mem)
+(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVDstore [off1+off2] {sym} ptr val mem)
+
+(ADDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ADDload [off1+off2] {sym} x ptr mem)
+(ADDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ADDWload [off1+off2] {sym} x ptr mem)
+(MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (MULLDload [off1+off2] {sym} x ptr mem)
+(MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (MULLWload [off1+off2] {sym} x ptr mem)
+(SUBload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (SUBload [off1+off2] {sym} x ptr mem)
+(SUBWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (SUBWload [off1+off2] {sym} x ptr mem)
+
+(ANDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ANDload [off1+off2] {sym} x ptr mem)
+(ANDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ANDWload [off1+off2] {sym} x ptr mem)
+(ORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ORload [off1+off2] {sym} x ptr mem)
+(ORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ORWload [off1+off2] {sym} x ptr mem)
+(XORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (XORload [off1+off2] {sym} x ptr mem)
+(XORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (XORWload [off1+off2] {sym} x ptr mem)
+
+// Fold constants into stores.
+(MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
+ (MOVDstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
+ (MOVWstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
+ (MOVHstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && is20Bit(int64(off)) && ptr.Op != OpSB =>
+ (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
+
+// Fold address offsets into constant stores.
+(MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
+ (MOVDstoreconst [sc.addOffset32(off)] {s} ptr mem)
+(MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
+ (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
+(MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
+ (MOVHstoreconst [sc.addOffset32(off)] {s} ptr mem)
+(MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && is20Bit(sc.Off64()+int64(off)) =>
+ (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
+
+// Merge address calculations into loads and stores.
+// Offsets from SB must not be merged into unaligned memory accesses because
+// loads/stores using PC-relative addressing directly must be aligned to the
+// size of the target.
+(MOVDload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
+ (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVHZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
+ (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVWload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVHload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVDstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVWstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVHstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
+(ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+
+(ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+
+// Cannot store constant to SB directly (no 'move relative long immediate' instructions).
+(MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVDstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVHstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+
+// MOVDaddr into MOVDaddridx
+(MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+(MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB =>
+ (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// Absorb InvertFlags into branches.
+(BRC {c} (InvertFlags cmp) yes no) => (BRC {c.ReverseComparison()} cmp yes no)
+
+// Constant comparisons.
+(CMPconst (MOVDconst [x]) [y]) && x==int64(y) => (FlagEQ)
+(CMPconst (MOVDconst [x]) [y]) && x<int64(y) => (FlagLT)
+(CMPconst (MOVDconst [x]) [y]) && x>int64(y) => (FlagGT)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)==uint64(y) => (FlagEQ)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) => (FlagLT)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT)
+
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) => (FlagLT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) => (FlagGT)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)==uint32(y) => (FlagEQ)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) => (FlagLT)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) => (FlagGT)
+
+(CMP(W|WU)const (MOVBZreg _) [c]) && 0xff < c => (FlagLT)
+(CMP(W|WU)const (MOVHZreg _) [c]) && 0xffff < c => (FlagLT)
+
+(CMPconst (SRDconst _ [c]) [n]) && c > 0 && n < 0 => (FlagGT)
+(CMPWconst (SRWconst _ [c]) [n]) && c > 0 && n < 0 => (FlagGT)
+
+(CMPUconst (SRDconst _ [c]) [n]) && c > 0 && c < 64 && (1<<uint(64-c)) <= uint64(n) => (FlagLT)
+(CMPWUconst (SRWconst _ [c]) [n]) && c > 0 && c < 32 && (1<<uint(32-c)) <= uint32(n) => (FlagLT)
+
+(CMPWconst (ANDWconst _ [m]) [n]) && int32(m) >= 0 && int32(m) < int32(n) => (FlagLT)
+(CMPWUconst (ANDWconst _ [m]) [n]) && uint32(m) < uint32(n) => (FlagLT)
+
+(CMPconst (RISBGZ x {r}) [c]) && c > 0 && r.OutMask() < uint64(c) => (FlagLT)
+(CMPUconst (RISBGZ x {r}) [c]) && r.OutMask() < uint64(uint32(c)) => (FlagLT)
+
+// Constant compare-and-branch with immediate.
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && int64(x) == int64(y) => (First yes no)
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && int64(x) < int64(y) => (First yes no)
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && int64(x) > int64(y) => (First yes no)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && int32(x) == int32(y) => (First yes no)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && int32(x) < int32(y) => (First yes no)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && int32(x) > int32(y) => (First yes no)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && uint64(x) == uint64(y) => (First yes no)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && uint64(x) < uint64(y) => (First yes no)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && uint64(x) > uint64(y) => (First yes no)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && uint32(x) == uint32(y) => (First yes no)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && uint32(x) < uint32(y) => (First yes no)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && uint32(x) > uint32(y) => (First yes no)
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && int64(x) == int64(y) => (First no yes)
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && int64(x) < int64(y) => (First no yes)
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && int64(x) > int64(y) => (First no yes)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && int32(x) == int32(y) => (First no yes)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && int32(x) < int32(y) => (First no yes)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && int32(x) > int32(y) => (First no yes)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && uint64(x) == uint64(y) => (First no yes)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && uint64(x) < uint64(y) => (First no yes)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && uint64(x) > uint64(y) => (First no yes)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && uint32(x) == uint32(y) => (First no yes)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && uint32(x) < uint32(y) => (First no yes)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && uint32(x) > uint32(y) => (First no yes)
+
+// Constant compare-and-branch with immediate when unsigned comparison with zero.
+(C(L|LG)IJ {s390x.GreaterOrEqual} _ [0] yes no) => (First yes no)
+(C(L|LG)IJ {s390x.Less} _ [0] yes no) => (First no yes)
+
+// Constant compare-and-branch when operands match.
+(C(GR|R|LGR|LR)J {c} x y yes no) && x == y && c&s390x.Equal != 0 => (First yes no)
+(C(GR|R|LGR|LR)J {c} x y yes no) && x == y && c&s390x.Equal == 0 => (First no yes)
+
+// Convert 64-bit comparisons to 32-bit comparisons and signed comparisons
+// to unsigned comparisons.
+// Helps simplify constant comparison detection.
+(CM(P|PU)const (MOV(W|WZ)reg x) [c]) => (CMP(W|WU)const x [c])
+(CM(P|P|PU|PU)const x:(MOV(H|HZ|H|HZ)reg _) [c]) => (CMP(W|W|WU|WU)const x [c])
+(CM(P|P|PU|PU)const x:(MOV(B|BZ|B|BZ)reg _) [c]) => (CMP(W|W|WU|WU)const x [c])
+(CMPconst (MOV(WZ|W)reg x:(ANDWconst [m] _)) [c]) && int32(m) >= 0 && c >= 0 => (CMPWUconst x [c])
+(CMPUconst (MOV(WZ|W)reg x:(ANDWconst [m] _)) [c]) && int32(m) >= 0 => (CMPWUconst x [c])
+(CMPconst x:(SRDconst _ [c]) [n]) && c > 0 && n >= 0 => (CMPUconst x [n])
+(CMPWconst x:(SRWconst _ [c]) [n]) && c > 0 && n >= 0 => (CMPWUconst x [n])
+
+// Absorb sign and zero extensions into 32-bit comparisons.
+(CMP(W|W|WU|WU) x (MOV(W|WZ|W|WZ)reg y)) => (CMP(W|W|WU|WU) x y)
+(CMP(W|W|WU|WU) (MOV(W|WZ|W|WZ)reg x) y) => (CMP(W|W|WU|WU) x y)
+(CMP(W|W|WU|WU)const (MOV(W|WZ|W|WZ)reg x) [c]) => (CMP(W|W|WU|WU)const x [c])
+
+// Absorb flag constants into branches.
+(BRC {c} (FlagEQ) yes no) && c&s390x.Equal != 0 => (First yes no)
+(BRC {c} (FlagLT) yes no) && c&s390x.Less != 0 => (First yes no)
+(BRC {c} (FlagGT) yes no) && c&s390x.Greater != 0 => (First yes no)
+(BRC {c} (FlagOV) yes no) && c&s390x.Unordered != 0 => (First yes no)
+
+(BRC {c} (FlagEQ) yes no) && c&s390x.Equal == 0 => (First no yes)
+(BRC {c} (FlagLT) yes no) && c&s390x.Less == 0 => (First no yes)
+(BRC {c} (FlagGT) yes no) && c&s390x.Greater == 0 => (First no yes)
+(BRC {c} (FlagOV) yes no) && c&s390x.Unordered == 0 => (First no yes)
+
+// Absorb flag constants into SETxx ops.
+(LOCGR {c} _ x (FlagEQ)) && c&s390x.Equal != 0 => x
+(LOCGR {c} _ x (FlagLT)) && c&s390x.Less != 0 => x
+(LOCGR {c} _ x (FlagGT)) && c&s390x.Greater != 0 => x
+(LOCGR {c} _ x (FlagOV)) && c&s390x.Unordered != 0 => x
+
+(LOCGR {c} x _ (FlagEQ)) && c&s390x.Equal == 0 => x
+(LOCGR {c} x _ (FlagLT)) && c&s390x.Less == 0 => x
+(LOCGR {c} x _ (FlagGT)) && c&s390x.Greater == 0 => x
+(LOCGR {c} x _ (FlagOV)) && c&s390x.Unordered == 0 => x
+
+// Remove redundant *const ops
+(ADDconst [0] x) => x
+(ADDWconst [c] x) && int32(c)==0 => x
+(SUBconst [0] x) => x
+(SUBWconst [c] x) && int32(c) == 0 => x
+(ANDconst [0] _) => (MOVDconst [0])
+(ANDWconst [c] _) && int32(c)==0 => (MOVDconst [0])
+(ANDconst [-1] x) => x
+(ANDWconst [c] x) && int32(c)==-1 => x
+(ORconst [0] x) => x
+(ORWconst [c] x) && int32(c)==0 => x
+(ORconst [-1] _) => (MOVDconst [-1])
+(ORWconst [c] _) && int32(c)==-1 => (MOVDconst [-1])
+(XORconst [0] x) => x
+(XORWconst [c] x) && int32(c)==0 => x
+
+// Shifts by zero (may be inserted during multiplication strength reduction).
+((SLD|SLW|SRD|SRW|SRAD|SRAW)const x [0]) => x
+
+// Convert constant subtracts to constant adds.
+(SUBconst [c] x) && c != -(1<<31) => (ADDconst [-c] x)
+(SUBWconst [c] x) => (ADDWconst [-int32(c)] x)
+
+// generic constant folding
+// TODO: more of this
+(ADDconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)+d])
+(ADDWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)+d])
+(ADDconst [c] (ADDconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDconst [c+d] x)
+(ADDWconst [c] (ADDWconst [d] x)) => (ADDWconst [int32(c+d)] x)
+(SUBconst (MOVDconst [d]) [c]) => (MOVDconst [d-int64(c)])
+(SUBconst (SUBconst x [d]) [c]) && is32Bit(-int64(c)-int64(d)) => (ADDconst [-c-d] x)
+(SRADconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)])
+(SRAWconst [c] (MOVDconst [d])) => (MOVDconst [int64(int32(d))>>uint64(c)])
+(NEG (MOVDconst [c])) => (MOVDconst [-c])
+(NEGW (MOVDconst [c])) => (MOVDconst [int64(int32(-c))])
+(MULLDconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)*d])
+(MULLWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c*int32(d))])
+(AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d])
+(ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d])
+(ANDWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)&d])
+(OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d])
+(ORconst [c] (MOVDconst [d])) => (MOVDconst [c|d])
+(ORWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)|d])
+(XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d])
+(XORconst [c] (MOVDconst [d])) => (MOVDconst [c^d])
+(XORWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)^d])
+(LoweredRound32F x:(FMOVSconst)) => x
+(LoweredRound64F x:(FMOVDconst)) => x
+
+// generic simplifications
+// TODO: more of this
+(ADD x (NEG y)) => (SUB x y)
+(ADDW x (NEGW y)) => (SUBW x y)
+(SUB x x) => (MOVDconst [0])
+(SUBW x x) => (MOVDconst [0])
+(AND x x) => x
+(ANDW x x) => x
+(OR x x) => x
+(ORW x x) => x
+(XOR x x) => (MOVDconst [0])
+(XORW x x) => (MOVDconst [0])
+(NEG (ADDconst [c] (NEG x))) && c != -(1<<31) => (ADDconst [-c] x)
+(MOVBZreg (ANDWconst [m] x)) => (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
+(MOVHZreg (ANDWconst [m] x)) => (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
+(MOVBreg (ANDWconst [m] x)) && int8(m) >= 0 => (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
+(MOVHreg (ANDWconst [m] x)) && int16(m) >= 0 => (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
+
+// carry flag generation
+// (only constant fold carry of zero)
+(Select1 (ADDCconst (MOVDconst [c]) [d]))
+ && uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0
+ => (FlagEQ)
+(Select1 (ADDCconst (MOVDconst [c]) [d]))
+ && uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0
+ => (FlagLT)
+
+// borrow flag generation
+// (only constant fold borrow of zero)
+(Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
+ && uint64(d) <= uint64(c) && c-d == 0
+ => (FlagGT)
+(Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
+ && uint64(d) <= uint64(c) && c-d != 0
+ => (FlagOV)
+
+// add with carry
+(ADDE x y (FlagEQ)) => (ADDC x y)
+(ADDE x y (FlagLT)) => (ADDC x y)
+(ADDC x (MOVDconst [c])) && is16Bit(c) => (ADDCconst x [int16(c)])
+(Select0 (ADDCconst (MOVDconst [c]) [d])) => (MOVDconst [c+int64(d)])
+
+// subtract with borrow
+(SUBE x y (FlagGT)) => (SUBC x y)
+(SUBE x y (FlagOV)) => (SUBC x y)
+(Select0 (SUBC (MOVDconst [c]) (MOVDconst [d]))) => (MOVDconst [c-d])
+
+// collapse carry chain
+(ADDE x y (Select1 (ADDCconst [-1] (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) c)))))
+ => (ADDE x y c)
+
+// collapse borrow chain
+(SUBE x y (Select1 (SUBC (MOVDconst [0]) (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) c))))))
+ => (SUBE x y c)
+
+// branch on carry
+(C(G|LG)IJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.NoCarry} carry)
+(C(G|LG)IJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) => (BRC {s390x.Carry} carry)
+(C(G|LG)IJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.Carry} carry)
+(C(G|LG)IJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) => (BRC {s390x.NoCarry} carry)
+(C(G|LG)IJ {s390x.Greater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.Carry} carry)
+
+// branch on borrow
+(C(G|LG)IJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.NoBorrow} borrow)
+(C(G|LG)IJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) => (BRC {s390x.Borrow} borrow)
+(C(G|LG)IJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.Borrow} borrow)
+(C(G|LG)IJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) => (BRC {s390x.NoBorrow} borrow)
+(C(G|LG)IJ {s390x.Greater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.Borrow} borrow)
+
+// fused multiply-add
+(Select0 (F(ADD|SUB) (FMUL y z) x)) && x.Block.Func.useFMA(v) => (FM(ADD|SUB) x y z)
+(Select0 (F(ADDS|SUBS) (FMULS y z) x)) && x.Block.Func.useFMA(v) => (FM(ADDS|SUBS) x y z)
+
+// Convert floating point comparisons against zero into 'load and test' instructions.
+(F(CMP|CMPS) x (FMOV(D|S)const [0.0])) => (LT(D|E)BR x)
+(F(CMP|CMPS) (FMOV(D|S)const [0.0]) x) => (InvertFlags (LT(D|E)BR <v.Type> x))
+
+// FSUB, FSUBS, FADD, FADDS now produce a condition code representing the
+// comparison of the result with 0.0. If a compare with zero instruction
+// (e.g. LTDBR) is following one of those instructions, we can use the
+// generated flag and remove the comparison instruction.
+// Note: when inserting Select1 ops we need to ensure they are in the
+// same block as their argument. We could also use @x.Block for this
+// but moving the flag generating value to a different block seems to
+// increase the likelihood that the flags value will have to be regenerated
+// by flagalloc which is not what we want.
+(LTDBR (Select0 x:(F(ADD|SUB) _ _))) && b == x.Block => (Select1 x)
+(LTEBR (Select0 x:(F(ADDS|SUBS) _ _))) && b == x.Block => (Select1 x)
+
+// Fold memory operations into operations.
+// Exclude global data (SB) because these instructions cannot handle relative addresses.
+// TODO(mundaym): indexed versions of these?
+((ADD|SUB|MULLD|AND|OR|XOR) <t> x g:(MOVDload [off] {sym} ptr mem))
+ && ptr.Op != OpSB
+ && is20Bit(int64(off))
+ && canMergeLoadClobber(v, g, x)
+ && clobber(g)
+ => ((ADD|SUB|MULLD|AND|OR|XOR)load <t> [off] {sym} x ptr mem)
+((ADD|SUB|MULL|AND|OR|XOR)W <t> x g:(MOVWload [off] {sym} ptr mem))
+ && ptr.Op != OpSB
+ && is20Bit(int64(off))
+ && canMergeLoadClobber(v, g, x)
+ && clobber(g)
+ => ((ADD|SUB|MULL|AND|OR|XOR)Wload <t> [off] {sym} x ptr mem)
+((ADD|SUB|MULL|AND|OR|XOR)W <t> x g:(MOVWZload [off] {sym} ptr mem))
+ && ptr.Op != OpSB
+ && is20Bit(int64(off))
+ && canMergeLoadClobber(v, g, x)
+ && clobber(g)
+ => ((ADD|SUB|MULL|AND|OR|XOR)Wload <t> [off] {sym} x ptr mem)
+
+// Combine stores into store multiples.
+// 32-bit
+(MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && is20Bit(int64(i)-4)
+ && setPos(v, x.Pos)
+ && clobber(x)
+ => (STM2 [i-4] {s} p w0 w1 mem)
+(MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-8)
+ && setPos(v, x.Pos)
+ && clobber(x)
+ => (STM3 [i-8] {s} p w0 w1 w2 mem)
+(MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-12)
+ && setPos(v, x.Pos)
+ && clobber(x)
+ => (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
+(STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-8)
+ && setPos(v, x.Pos)
+ && clobber(x)
+ => (STM4 [i-8] {s} p w0 w1 w2 w3 mem)
+// 64-bit
+(MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && is20Bit(int64(i)-8)
+ && setPos(v, x.Pos)
+ && clobber(x)
+ => (STMG2 [i-8] {s} p w0 w1 mem)
+(MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-16)
+ && setPos(v, x.Pos)
+ && clobber(x)
+ => (STMG3 [i-16] {s} p w0 w1 w2 mem)
+(MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-24)
+ && setPos(v, x.Pos)
+ && clobber(x)
+ => (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
+(STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-16)
+ && setPos(v, x.Pos)
+ && clobber(x)
+ => (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
+
+// Convert 32-bit store multiples into 64-bit stores.
+(STM2 [i] {s} p (SRDconst [32] x) x mem) => (MOVDstore [i] {s} p x mem)
+
+// Fold bit reversal into loads.
+(MOVWBR x:(MOVWZload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOVWZreg (MOVWBRload [off] {sym} ptr mem)) // need zero extension?
+(MOVWBR x:(MOVWZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 => @x.Block (MOVWZreg (MOVWBRloadidx [off] {sym} ptr idx mem)) // need zero extension?
+(MOVDBR x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOVDBRload [off] {sym} ptr mem)
+(MOVDBR x:(MOVDloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 => @x.Block (MOVDBRloadidx [off] {sym} ptr idx mem)
+
+// Fold bit reversal into stores.
+(MOV(D|W)store [off] {sym} ptr r:(MOV(D|W)BR x) mem) && r.Uses == 1 => (MOV(D|W)BRstore [off] {sym} ptr x mem)
+(MOV(D|W)storeidx [off] {sym} ptr idx r:(MOV(D|W)BR x) mem) && r.Uses == 1 => (MOV(D|W)BRstoreidx [off] {sym} ptr idx x mem)
+
+// Special bswap16 rules
+(Bswap16 x:(MOVHZload [off] {sym} ptr mem)) => @x.Block (MOVHZreg (MOVHBRload [off] {sym} ptr mem))
+(Bswap16 x:(MOVHZloadidx [off] {sym} ptr idx mem)) => @x.Block (MOVHZreg (MOVHBRloadidx [off] {sym} ptr idx mem))
+(MOVHstore [off] {sym} ptr (Bswap16 val) mem) => (MOVHBRstore [off] {sym} ptr val mem)
+(MOVHstoreidx [off] {sym} ptr idx (Bswap16 val) mem) => (MOVHBRstoreidx [off] {sym} ptr idx val mem)
diff --git a/src/cmd/compile/internal/ssa/_gen/S390XOps.go b/src/cmd/compile/internal/ssa/_gen/S390XOps.go
new file mode 100644
index 0000000..c4766c1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/S390XOps.go
@@ -0,0 +1,819 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - When doing sub-register operations, we try to write the whole
+// destination register to avoid a partial-register write.
+// - Unused portions of AuxInt (or the Val portion of ValAndOff) are
+// filled by sign-extending the used portion. Users of AuxInt which interpret
+// AuxInt as unsigned (e.g. shifts) must be careful.
+// - The SB 'register' is implemented using instruction-relative addressing. This
+// places some limitations on when and how memory operands that are addressed
+// relative to SB can be used:
+//
+// 1. Pseudo-instructions do not always map to a single machine instruction when
+// using the SB 'register' to address data. This is because many machine
+// instructions do not have relative long (RL suffix) equivalents. For example,
+// ADDload, which is assembled as AG.
+//
+// 2. Loads and stores using relative addressing require the data be aligned
+// according to its size (8-bytes for double words, 4-bytes for words
+// and so on).
+//
+// We can always work around these by inserting LARL instructions (load address
+// relative long) in the assembler, but typically this results in worse code
+// generation because the address can't be re-used. Inserting instructions in the
+// assembler also means clobbering the temp register and it is a long-term goal
+// to prevent the compiler doing this so that it can be allocated as a normal
+// register.
+//
+// For more information about the z/Architecture, the instruction set and the
+// addressing modes it supports take a look at the z/Architecture Principles of
+// Operation: http://publibfp.boulder.ibm.com/epubs/pdf/dz9zr010.pdf
+//
+// Suffixes encode the bit width of pseudo-instructions.
+// D (double word) = 64 bit (frequently omitted)
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// B (byte) = 8 bit
+// S (single prec.) = 32 bit (double precision is omitted)
+
+// copied from ../../s390x/reg.go
+var regNamesS390X = []string{
+ "R0",
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "g", // R13
+ "R14",
+ "SP", // R15
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ //pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesS390X) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesS390X {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ sp = buildReg("SP")
+ sb = buildReg("SB")
+ r0 = buildReg("R0")
+ tmp = buildReg("R11") // R11 is used as a temporary in a small number of instructions.
+
+ // R10 is reserved by the assembler.
+ gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14")
+ gpg = gp | buildReg("g")
+ gpsp = gp | sp
+
+ // R0 is considered to contain the value 0 in address calculations.
+ ptr = gp &^ r0
+ ptrsp = ptr | sp
+ ptrspsb = ptrsp | sb
+
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15")
+ callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r1 = buildReg("R1")
+ r2 = buildReg("R2")
+ r3 = buildReg("R3")
+ r9 = buildReg("R9")
+ )
+ // Common slices of register masks
+ var (
+ gponly = []regMask{gp}
+ fponly = []regMask{fp}
+ )
+
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: []regMask{}, outputs: gponly}
+ gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly}
+ gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly}
+ gp21tmp = regInfo{inputs: []regMask{gp &^ tmp, gp &^ tmp}, outputs: []regMask{gp &^ tmp}, clobbers: tmp}
+
+ // R0 evaluates to 0 when used as the number of bits to shift
+ // so we need to exclude it from that operand.
+ sh21 = regInfo{inputs: []regMask{gp, ptr}, outputs: gponly}
+
+ addr = regInfo{inputs: []regMask{sp | sb}, outputs: gponly}
+ addridx = regInfo{inputs: []regMask{sp | sb, ptrsp}, outputs: gponly}
+
+ gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}}
+ gp1flags = regInfo{inputs: []regMask{gpsp}}
+ gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp11flags = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp2flags1flags = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+
+ gpload = regInfo{inputs: []regMask{ptrspsb, 0}, outputs: gponly}
+ gploadidx = regInfo{inputs: []regMask{ptrspsb, ptrsp, 0}, outputs: gponly}
+ gpopload = regInfo{inputs: []regMask{gp, ptrsp, 0}, outputs: gponly}
+ gpstore = regInfo{inputs: []regMask{ptrspsb, gpsp, 0}}
+ gpstoreconst = regInfo{inputs: []regMask{ptrspsb, 0}}
+ gpstoreidx = regInfo{inputs: []regMask{ptrsp, ptrsp, gpsp, 0}}
+ gpstorebr = regInfo{inputs: []regMask{ptrsp, gpsp, 0}}
+ gpstorelaa = regInfo{inputs: []regMask{ptrspsb, gpsp, 0}, outputs: gponly}
+ gpstorelab = regInfo{inputs: []regMask{r1, gpsp, 0}, clobbers: r1}
+
+ gpmvc = regInfo{inputs: []regMask{ptrsp, ptrsp, 0}}
+
+ fp01 = regInfo{inputs: []regMask{}, outputs: fponly}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
+ fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly}
+ fp21clobber = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
+ fpgp = regInfo{inputs: fponly, outputs: gponly}
+ gpfp = regInfo{inputs: gponly, outputs: fponly}
+ fp11 = regInfo{inputs: fponly, outputs: fponly}
+ fp1flags = regInfo{inputs: []regMask{fp}}
+ fp11clobber = regInfo{inputs: fponly, outputs: fponly}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+
+ fpload = regInfo{inputs: []regMask{ptrspsb, 0}, outputs: fponly}
+ fploadidx = regInfo{inputs: []regMask{ptrsp, ptrsp, 0}, outputs: fponly}
+
+ fpstore = regInfo{inputs: []regMask{ptrspsb, fp, 0}}
+ fpstoreidx = regInfo{inputs: []regMask{ptrsp, ptrsp, fp, 0}}
+
+ sync = regInfo{inputs: []regMask{0}}
+
+ // LoweredAtomicCas may overwrite arg1, so force it to R0 for now.
+ cas = regInfo{inputs: []regMask{ptrsp, r0, gpsp, 0}, outputs: []regMask{gp, 0}, clobbers: r0}
+
+ // LoweredAtomicExchange overwrites the output before executing
+ // CS{,G}, so the output register must not be the same as the
+ // input register. For now we just force the output register to
+ // R0.
+ exchange = regInfo{inputs: []regMask{ptrsp, gpsp &^ r0, 0}, outputs: []regMask{r0, 0}}
+ )
+
+ var S390Xops = []opData{
+ // fp ops
+ {name: "FADDS", argLength: 2, reg: fp21clobber, typ: "(Float32,Flags)", asm: "FADDS", commutative: true, resultInArg0: true}, // fp32 arg0 + arg1
+ {name: "FADD", argLength: 2, reg: fp21clobber, typ: "(Float64,Flags)", asm: "FADD", commutative: true, resultInArg0: true}, // fp64 arg0 + arg1
+ {name: "FSUBS", argLength: 2, reg: fp21clobber, typ: "(Float32,Flags)", asm: "FSUBS", resultInArg0: true}, // fp32 arg0 - arg1
+ {name: "FSUB", argLength: 2, reg: fp21clobber, typ: "(Float64,Flags)", asm: "FSUB", resultInArg0: true}, // fp64 arg0 - arg1
+ {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true, resultInArg0: true}, // fp32 arg0 * arg1
+ {name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true, resultInArg0: true}, // fp64 arg0 * arg1
+ {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS", resultInArg0: true}, // fp32 arg0 / arg1
+ {name: "FDIV", argLength: 2, reg: fp21, asm: "FDIV", resultInArg0: true}, // fp64 arg0 / arg1
+ {name: "FNEGS", argLength: 1, reg: fp11clobber, asm: "FNEGS", clobberFlags: true}, // fp32 -arg0
+ {name: "FNEG", argLength: 1, reg: fp11clobber, asm: "FNEG", clobberFlags: true}, // fp64 -arg0
+ {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS", resultInArg0: true}, // fp32 arg1 * arg2 + arg0
+ {name: "FMADD", argLength: 3, reg: fp31, asm: "FMADD", resultInArg0: true}, // fp64 arg1 * arg2 + arg0
+ {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS", resultInArg0: true}, // fp32 arg1 * arg2 - arg0
+ {name: "FMSUB", argLength: 3, reg: fp31, asm: "FMSUB", resultInArg0: true}, // fp64 arg1 * arg2 - arg0
+ {name: "LPDFR", argLength: 1, reg: fp11, asm: "LPDFR"}, // fp64/fp32 set sign bit
+ {name: "LNDFR", argLength: 1, reg: fp11, asm: "LNDFR"}, // fp64/fp32 clear sign bit
+ {name: "CPSDR", argLength: 2, reg: fp21, asm: "CPSDR"}, // fp64/fp32 copy arg1 sign bit to arg0
+
+ // Round to integer, float64 only.
+ //
+ // aux | rounding mode
+ // ----+-----------------------------------
+ // 1 | round to nearest, ties away from 0
+ // 4 | round to nearest, ties to even
+ // 5 | round toward 0
+ // 6 | round toward +∞
+ // 7 | round toward -∞
+ {name: "FIDBR", argLength: 1, reg: fp11, asm: "FIDBR", aux: "Int8"},
+
+ {name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp32 load
+ {name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp64 load
+ {name: "FMOVSconst", reg: fp01, asm: "FMOVS", aux: "Float32", rematerializeable: true}, // fp32 constant
+ {name: "FMOVDconst", reg: fp01, asm: "FMOVD", aux: "Float64", rematerializeable: true}, // fp64 constant
+ {name: "FMOVSloadidx", argLength: 3, reg: fploadidx, asm: "FMOVS", aux: "SymOff", symEffect: "Read"}, // fp32 load indexed by i
+ {name: "FMOVDloadidx", argLength: 3, reg: fploadidx, asm: "FMOVD", aux: "SymOff", symEffect: "Read"}, // fp64 load indexed by i
+
+ {name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp32 store
+ {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp64 store
+ {name: "FMOVSstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVS", aux: "SymOff", symEffect: "Write"}, // fp32 indexed by i store
+ {name: "FMOVDstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVD", aux: "SymOff", symEffect: "Write"}, // fp64 indexed by i store
+
+ // binary ops
+ {name: "ADD", argLength: 2, reg: gp21sp, asm: "ADD", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDW", argLength: 2, reg: gp21sp, asm: "ADDW", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int32", typ: "UInt64", clobberFlags: true}, // arg0 + auxint
+ {name: "ADDWconst", argLength: 1, reg: gp11sp, asm: "ADDW", aux: "Int32", clobberFlags: true}, // arg0 + auxint
+ {name: "ADDload", argLength: 3, reg: gpopload, asm: "ADD", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + *arg1. arg2=mem
+ {name: "ADDWload", argLength: 3, reg: gpopload, asm: "ADDW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + *arg1. arg2=mem
+
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB", clobberFlags: true}, // arg0 - arg1
+ {name: "SUBW", argLength: 2, reg: gp21, asm: "SUBW", clobberFlags: true}, // arg0 - arg1
+ {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+ {name: "SUBWconst", argLength: 1, reg: gp11, asm: "SUBW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+ {name: "SUBload", argLength: 3, reg: gpopload, asm: "SUB", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - *arg1. arg2=mem
+ {name: "SUBWload", argLength: 3, reg: gpopload, asm: "SUBW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - *arg1. arg2=mem
+
+ {name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
+ {name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
+ {name: "MULLDconst", argLength: 1, reg: gp11, asm: "MULLD", aux: "Int32", typ: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
+ {name: "MULLWconst", argLength: 1, reg: gp11, asm: "MULLW", aux: "Int32", typ: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
+ {name: "MULLDload", argLength: 3, reg: gpopload, asm: "MULLD", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * *arg1. arg2=mem
+ {name: "MULLWload", argLength: 3, reg: gpopload, asm: "MULLW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * *arg1. arg2=mem
+
+ {name: "MULHD", argLength: 2, reg: gp21tmp, asm: "MULHD", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "MULHDU", argLength: 2, reg: gp21tmp, asm: "MULHDU", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width
+
+ {name: "DIVD", argLength: 2, reg: gp21tmp, asm: "DIVD", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
+ {name: "DIVW", argLength: 2, reg: gp21tmp, asm: "DIVW", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
+ {name: "DIVDU", argLength: 2, reg: gp21tmp, asm: "DIVDU", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
+ {name: "DIVWU", argLength: 2, reg: gp21tmp, asm: "DIVWU", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
+
+ {name: "MODD", argLength: 2, reg: gp21tmp, asm: "MODD", resultInArg0: true, clobberFlags: true}, // arg0 % arg1
+ {name: "MODW", argLength: 2, reg: gp21tmp, asm: "MODW", resultInArg0: true, clobberFlags: true}, // arg0 % arg1
+
+ {name: "MODDU", argLength: 2, reg: gp21tmp, asm: "MODDU", resultInArg0: true, clobberFlags: true}, // arg0 % arg1
+ {name: "MODWU", argLength: 2, reg: gp21tmp, asm: "MODWU", resultInArg0: true, clobberFlags: true}, // arg0 % arg1
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDW", argLength: 2, reg: gp21, asm: "ANDW", commutative: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDWconst", argLength: 1, reg: gp11, asm: "ANDW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDload", argLength: 3, reg: gpopload, asm: "AND", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & *arg1. arg2=mem
+ {name: "ANDWload", argLength: 3, reg: gpopload, asm: "ANDW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & *arg1. arg2=mem
+
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORW", argLength: 2, reg: gp21, asm: "ORW", commutative: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORWconst", argLength: 1, reg: gp11, asm: "ORW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORload", argLength: 3, reg: gpopload, asm: "OR", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | *arg1. arg2=mem
+ {name: "ORWload", argLength: 3, reg: gpopload, asm: "ORW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | *arg1. arg2=mem
+
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORW", argLength: 2, reg: gp21, asm: "XORW", commutative: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORWconst", argLength: 1, reg: gp11, asm: "XORW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORload", argLength: 3, reg: gpopload, asm: "XOR", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ *arg1. arg2=mem
+ {name: "XORWload", argLength: 3, reg: gpopload, asm: "XORW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ *arg1. arg2=mem
+
+ // Arithmetic ops with carry/borrow chain.
+ //
+ // A carry is represented by a condition code of 2 or 3 (GT or OV).
+ // A borrow is represented by a condition code of 0 or 1 (EQ or LT).
+ {name: "ADDC", argLength: 2, reg: gp21flags, asm: "ADDC", typ: "(UInt64,Flags)", commutative: true}, // (arg0 + arg1, carry out)
+ {name: "ADDCconst", argLength: 1, reg: gp11flags, asm: "ADDC", typ: "(UInt64,Flags)", aux: "Int16"}, // (arg0 + auxint, carry out)
+ {name: "ADDE", argLength: 3, reg: gp2flags1flags, asm: "ADDE", typ: "(UInt64,Flags)", commutative: true, resultInArg0: true}, // (arg0 + arg1 + arg2 (carry in), carry out)
+ {name: "SUBC", argLength: 2, reg: gp21flags, asm: "SUBC", typ: "(UInt64,Flags)"}, // (arg0 - arg1, borrow out)
+ {name: "SUBE", argLength: 3, reg: gp2flags1flags, asm: "SUBE", typ: "(UInt64,Flags)", resultInArg0: true}, // (arg0 - arg1 - arg2 (borrow in), borrow out)
+
+ // Comparisons.
+ {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
+
+ {name: "CMPU", argLength: 2, reg: gp2flags, asm: "CMPU", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPWU", argLength: 2, reg: gp2flags, asm: "CMPWU", typ: "Flags"}, // arg0 compare to arg1
+
+ {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPUconst", argLength: 1, reg: gp1flags, asm: "CMPU", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPWUconst", argLength: 1, reg: gp1flags, asm: "CMPWU", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+
+ {name: "FCMPS", argLength: 2, reg: fp2flags, asm: "CEBR", typ: "Flags"}, // arg0 compare to arg1, f32
+ {name: "FCMP", argLength: 2, reg: fp2flags, asm: "FCMPU", typ: "Flags"}, // arg0 compare to arg1, f64
+ {name: "LTDBR", argLength: 1, reg: fp1flags, asm: "LTDBR", typ: "Flags"}, // arg0 compare to 0, f64
+ {name: "LTEBR", argLength: 1, reg: fp1flags, asm: "LTEBR", typ: "Flags"}, // arg0 compare to 0, f32
+
+ {name: "SLD", argLength: 2, reg: sh21, asm: "SLD"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SLW", argLength: 2, reg: sh21, asm: "SLW"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "UInt8"}, // arg0 << auxint, shift amount 0-63
+ {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "UInt8"}, // arg0 << auxint, shift amount 0-31
+
+ {name: "SRD", argLength: 2, reg: sh21, asm: "SRD"}, // unsigned arg0 >> arg1, shift amount is mod 64
+ {name: "SRW", argLength: 2, reg: sh21, asm: "SRW"}, // unsigned uint32(arg0) >> arg1, shift amount is mod 64
+ {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "UInt8"}, // unsigned arg0 >> auxint, shift amount 0-63
+ {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "UInt8"}, // unsigned uint32(arg0) >> auxint, shift amount 0-31
+
+ // Arithmetic shifts clobber flags.
+ {name: "SRAD", argLength: 2, reg: sh21, asm: "SRAD", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64
+ {name: "SRAW", argLength: 2, reg: sh21, asm: "SRAW", clobberFlags: true}, // signed int32(arg0) >> arg1, shift amount is mod 64
+ {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "UInt8", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63
+ {name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "UInt8", clobberFlags: true}, // signed int32(arg0) >> auxint, shift amount 0-31
+
+ // Rotate instructions.
+ // Note: no RLLGconst - use RISBGZ instead.
+ {name: "RLLG", argLength: 2, reg: sh21, asm: "RLLG"}, // arg0 rotate left arg1, rotate amount 0-63
+ {name: "RLL", argLength: 2, reg: sh21, asm: "RLL"}, // arg0 rotate left arg1, rotate amount 0-31
+ {name: "RLLconst", argLength: 1, reg: gp11, asm: "RLL", aux: "UInt8"}, // arg0 rotate left auxint, rotate amount 0-31
+
+ // Rotate then (and|or|xor|insert) selected bits instructions.
+ //
+ // Aux is an s390x.RotateParams struct containing Start, End and rotation
+ // Amount fields.
+ //
+ // arg1 is rotated left by the rotation amount then the bits from the start
+ // bit to the end bit (inclusive) are combined with arg0 using the logical
+ // operation specified. Bit indices are specified from left to right - the
+ // MSB is 0 and the LSB is 63.
+ //
+ // Examples:
+ // | aux |
+ // | instruction | start | end | amount | arg0 | arg1 | result |
+ // +-------------+-------+-----+--------+-----------------------+-----------------------+-----------------------+
+ // | RXSBG (XOR) | 0 | 1 | 0 | 0xffff_ffff_ffff_ffff | 0xffff_ffff_ffff_ffff | 0x3fff_ffff_ffff_ffff |
+ // | RXSBG (XOR) | 62 | 63 | 0 | 0xffff_ffff_ffff_ffff | 0xffff_ffff_ffff_ffff | 0xffff_ffff_ffff_fffc |
+ // | RXSBG (XOR) | 0 | 47 | 16 | 0xffff_ffff_ffff_ffff | 0x0000_0000_0000_ffff | 0xffff_ffff_0000_ffff |
+ // +-------------+-------+-----+--------+-----------------------+-----------------------+-----------------------+
+ //
+ {name: "RXSBG", argLength: 2, reg: gp21, asm: "RXSBG", resultInArg0: true, aux: "S390XRotateParams", clobberFlags: true}, // rotate then xor selected bits
+ {name: "RISBGZ", argLength: 1, reg: gp11, asm: "RISBGZ", aux: "S390XRotateParams", clobberFlags: true}, // rotate then insert selected bits [into zero]
+
+ // unary ops
+ {name: "NEG", argLength: 1, reg: gp11, asm: "NEG", clobberFlags: true}, // -arg0
+ {name: "NEGW", argLength: 1, reg: gp11, asm: "NEGW", clobberFlags: true}, // -arg0
+
+ {name: "NOT", argLength: 1, reg: gp11, resultInArg0: true, clobberFlags: true}, // ^arg0
+ {name: "NOTW", argLength: 1, reg: gp11, resultInArg0: true, clobberFlags: true}, // ^arg0
+
+ {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0)
+ {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0), float32
+
+ // Conditional register-register moves.
+ // The aux for these values is an s390x.CCMask value representing the condition code mask.
+ {name: "LOCGR", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "LOCGR", aux: "S390XCCMask"}, // load arg1 into arg0 if the condition code in arg2 matches a masked bit in aux.
+
+ {name: "MOVBreg", argLength: 1, reg: gp11sp, asm: "MOVB", typ: "Int64"}, // sign extend arg0 from int8 to int64
+ {name: "MOVBZreg", argLength: 1, reg: gp11sp, asm: "MOVBZ", typ: "UInt64"}, // zero extend arg0 from int8 to int64
+ {name: "MOVHreg", argLength: 1, reg: gp11sp, asm: "MOVH", typ: "Int64"}, // sign extend arg0 from int16 to int64
+ {name: "MOVHZreg", argLength: 1, reg: gp11sp, asm: "MOVHZ", typ: "UInt64"}, // zero extend arg0 from int16 to int64
+ {name: "MOVWreg", argLength: 1, reg: gp11sp, asm: "MOVW", typ: "Int64"}, // sign extend arg0 from int32 to int64
+ {name: "MOVWZreg", argLength: 1, reg: gp11sp, asm: "MOVWZ", typ: "UInt64"}, // zero extend arg0 from int32 to int64
+
+ {name: "MOVDconst", reg: gp01, asm: "MOVD", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
+
+ {name: "LDGR", argLength: 1, reg: gpfp, asm: "LDGR"}, // move int64 to float64 (no conversion)
+ {name: "LGDR", argLength: 1, reg: fpgp, asm: "LGDR"}, // move float64 to int64 (no conversion)
+
+ {name: "CFDBRA", argLength: 1, reg: fpgp, asm: "CFDBRA", clobberFlags: true}, // convert float64 to int32
+ {name: "CGDBRA", argLength: 1, reg: fpgp, asm: "CGDBRA", clobberFlags: true}, // convert float64 to int64
+ {name: "CFEBRA", argLength: 1, reg: fpgp, asm: "CFEBRA", clobberFlags: true}, // convert float32 to int32
+ {name: "CGEBRA", argLength: 1, reg: fpgp, asm: "CGEBRA", clobberFlags: true}, // convert float32 to int64
+ {name: "CEFBRA", argLength: 1, reg: gpfp, asm: "CEFBRA", clobberFlags: true}, // convert int32 to float32
+ {name: "CDFBRA", argLength: 1, reg: gpfp, asm: "CDFBRA", clobberFlags: true}, // convert int32 to float64
+ {name: "CEGBRA", argLength: 1, reg: gpfp, asm: "CEGBRA", clobberFlags: true}, // convert int64 to float32
+ {name: "CDGBRA", argLength: 1, reg: gpfp, asm: "CDGBRA", clobberFlags: true}, // convert int64 to float64
+ {name: "CLFEBR", argLength: 1, reg: fpgp, asm: "CLFEBR", clobberFlags: true}, // convert float32 to uint32
+ {name: "CLFDBR", argLength: 1, reg: fpgp, asm: "CLFDBR", clobberFlags: true}, // convert float64 to uint32
+ {name: "CLGEBR", argLength: 1, reg: fpgp, asm: "CLGEBR", clobberFlags: true}, // convert float32 to uint64
+ {name: "CLGDBR", argLength: 1, reg: fpgp, asm: "CLGDBR", clobberFlags: true}, // convert float64 to uint64
+ {name: "CELFBR", argLength: 1, reg: gpfp, asm: "CELFBR", clobberFlags: true}, // convert uint32 to float32
+ {name: "CDLFBR", argLength: 1, reg: gpfp, asm: "CDLFBR", clobberFlags: true}, // convert uint32 to float64
+ {name: "CELGBR", argLength: 1, reg: gpfp, asm: "CELGBR", clobberFlags: true}, // convert uint64 to float32
+ {name: "CDLGBR", argLength: 1, reg: gpfp, asm: "CDLGBR", clobberFlags: true}, // convert uint64 to float64
+
+ {name: "LEDBR", argLength: 1, reg: fp11, asm: "LEDBR"}, // convert float64 to float32
+ {name: "LDEBR", argLength: 1, reg: fp11, asm: "LDEBR"}, // convert float32 to float64
+
+ {name: "MOVDaddr", argLength: 1, reg: addr, aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
+ {name: "MOVDaddridx", argLength: 2, reg: addridx, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
+
+ // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
+ {name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64
+ {name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64
+ {name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64
+ {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes from arg0+auxint+aux. arg1=mem
+
+ {name: "MOVWBR", argLength: 1, reg: gp11, asm: "MOVWBR"}, // arg0 swap bytes
+ {name: "MOVDBR", argLength: 1, reg: gp11, asm: "MOVDBR"}, // arg0 swap bytes
+
+ {name: "MOVHBRload", argLength: 2, reg: gpload, asm: "MOVHBR", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes.
+ {name: "MOVWBRload", argLength: 2, reg: gpload, asm: "MOVWBR", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes.
+ {name: "MOVDBRload", argLength: 2, reg: gpload, asm: "MOVDBR", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVHBRstore", argLength: 3, reg: gpstorebr, asm: "MOVHBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem. Reverse bytes.
+ {name: "MOVWBRstore", argLength: 3, reg: gpstorebr, asm: "MOVWBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem. Reverse bytes.
+ {name: "MOVDBRstore", argLength: 3, reg: gpstorebr, asm: "MOVDBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem. Reverse bytes.
+
+ {name: "MVC", argLength: 3, reg: gpmvc, asm: "MVC", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, faultOnNilArg1: true, symEffect: "None"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size,off
+
+ // indexed loads/stores
+ {name: "MOVBZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem. Zero extend.
+ {name: "MOVBloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVB", aux: "SymOff", typ: "Int8", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem. Sign extend.
+ {name: "MOVHZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Zero extend.
+ {name: "MOVHloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVH", aux: "SymOff", typ: "Int16", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Sign extend.
+ {name: "MOVWZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Zero extend.
+ {name: "MOVWloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVW", aux: "SymOff", typ: "Int32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Sign extend.
+ {name: "MOVDloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVD", aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVHBR", aux: "SymOff", typ: "Int16", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
+ {name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWBR", aux: "SymOff", typ: "Int32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
+ {name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVDBR", aux: "SymOff", typ: "Int64", symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
+ {name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVH", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVD", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVHBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVHBR", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes.
+ {name: "MOVWBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVWBR", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes.
+ {name: "MOVDBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVDBR", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes.
+
+ // For storeconst ops, the AuxInt field encodes both
+ // the value to store and an address offset of the store.
+ // Cast AuxInt to a ValAndOff to extract Val and Off fields.
+ {name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem
+ {name: "MOVHstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVH", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 2 bytes of ...
+ {name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 4 bytes of ...
+ {name: "MOVDstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVD", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of ...
+
+ {name: "CLEAR", argLength: 2, reg: regInfo{inputs: []regMask{ptr, 0}}, asm: "CLEAR", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Write"},
+
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{ptrsp, buildReg("R12"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{ptr}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // (InvertFlags (CMP a b)) == (CMP b a)
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // Pseudo-ops
+ {name: "LoweredGetG", argLength: 1, reg: gp01}, // arg0=mem
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R12 (the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R12")}}, zeroWidth: true},
+ // arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem.
+ {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{ptrsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
+ // Round ops to block fused-multiply-add extraction.
+ {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+ {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, aux=# of buffer entries needed
+ // It saves all GP registers if necessary,
+ // but clobbers R14 (LR) because it's a call,
+ // and also clobbers R1 as the PLT stub does.
+ // Returns a pointer to a write barrier buffer in R9.
+ {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R14") | r1, outputs: []regMask{r9}}, clobberFlags: true, aux: "Int64"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+
+ // Constant condition code values. The condition code can be 0, 1, 2 or 3.
+ {name: "FlagEQ"}, // CC=0 (equal)
+ {name: "FlagLT"}, // CC=1 (less than)
+ {name: "FlagGT"}, // CC=2 (greater than)
+ {name: "FlagOV"}, // CC=3 (overflow)
+
+ // Fast-BCR-serialization to ensure store-load ordering.
+ {name: "SYNC", argLength: 1, reg: sync, asm: "SYNC", typ: "Mem"},
+
+ // Atomic loads. These are just normal loads but return <value,memory> tuples
+ // so they can be properly ordered with other loads.
+ // load from arg0+auxint+aux. arg1=mem.
+ {name: "MOVBZatomicload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVWZatomicload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVDatomicload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+
+ // Atomic stores. These are just normal stores.
+ // store arg1 to arg0+auxint+aux. arg2=mem.
+ {name: "MOVBatomicstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "Write"},
+ {name: "MOVWatomicstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "Write"},
+ {name: "MOVDatomicstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "Write"},
+
+ // Atomic adds.
+ // *(arg0+auxint+aux) += arg1. arg2=mem.
+ // Returns a tuple of <old contents of *(arg0+auxint+aux), memory>.
+ {name: "LAA", argLength: 3, reg: gpstorelaa, asm: "LAA", typ: "(UInt32,Mem)", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "LAAG", argLength: 3, reg: gpstorelaa, asm: "LAAG", typ: "(UInt64,Mem)", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "AddTupleFirst32", argLength: 2}, // arg1=tuple <x,y>. Returns <x+arg0,y>.
+ {name: "AddTupleFirst64", argLength: 2}, // arg1=tuple <x,y>. Returns <x+arg0,y>.
+
+ // Atomic bitwise operations.
+ // Note: 'floor' operations round the pointer down to the nearest word boundary
+ // which reflects how they are used in the runtime.
+ {name: "LAN", argLength: 3, reg: gpstore, asm: "LAN", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *arg0 &= arg1. arg2 = mem.
+ {name: "LANfloor", argLength: 3, reg: gpstorelab, asm: "LAN", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *(floor(arg0, 4)) &= arg1. arg2 = mem.
+ {name: "LAO", argLength: 3, reg: gpstore, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *arg0 |= arg1. arg2 = mem.
+ {name: "LAOfloor", argLength: 3, reg: gpstorelab, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *(floor(arg0, 4)) |= arg1. arg2 = mem.
+
+ // Compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
+ // if *(arg0+auxint+aux) == arg1 {
+ // *(arg0+auxint+aux) = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // Note that these instructions also return the old value in arg1, but we ignore it.
+ // TODO: have these return flags instead of bool. The current system generates:
+ // CS ...
+ // MOVD $0, ret
+ // BNE 2(PC)
+ // MOVD $1, ret
+ // CMPW ret, $0
+ // BNE ...
+ // instead of just
+ // CS ...
+ // BEQ ...
+ // but we can't do that because memory-using ops can't generate flags yet
+ // (flagalloc wants to move flag-generating instructions around).
+ {name: "LoweredAtomicCas32", argLength: 4, reg: cas, asm: "CS", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "LoweredAtomicCas64", argLength: 4, reg: cas, asm: "CSG", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+
+ // Lowered atomic swaps, emulated using compare-and-swap.
+ // store arg1 to arg0+auxint+aux, arg2=mem.
+ {name: "LoweredAtomicExchange32", argLength: 3, reg: exchange, asm: "CS", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "LoweredAtomicExchange64", argLength: 3, reg: exchange, asm: "CSG", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+
+ // find leftmost one
+ {
+ name: "FLOGR",
+ argLength: 1,
+ reg: regInfo{inputs: gponly, outputs: []regMask{buildReg("R0")}, clobbers: buildReg("R1")},
+ asm: "FLOGR",
+ typ: "UInt64",
+ clobberFlags: true,
+ },
+
+ // population count
+ //
+ // Counts the number of ones in each byte of arg0
+ // and places the result into the corresponding byte
+ // of the result.
+ {
+ name: "POPCNT",
+ argLength: 1,
+ reg: gp11,
+ asm: "POPCNT",
+ typ: "UInt64",
+ clobberFlags: true,
+ },
+
+ // unsigned multiplication (64x64 → 128)
+ //
+ // Multiply the two 64-bit input operands together and place the 128-bit result into
+ // an even-odd register pair. The second register in the target pair also contains
+ // one of the input operands. Since we don't currently have a way to specify an
+ // even-odd register pair we hardcode this register pair as R2:R3.
+ {
+ name: "MLGR",
+ argLength: 2,
+ reg: regInfo{inputs: []regMask{gp, r3}, outputs: []regMask{r2, r3}},
+ asm: "MLGR",
+ },
+
+ // pseudo operations to sum the output of the POPCNT instruction
+ {name: "SumBytes2", argLength: 1, typ: "UInt8"}, // sum the rightmost 2 bytes in arg0 ignoring overflow
+ {name: "SumBytes4", argLength: 1, typ: "UInt8"}, // sum the rightmost 4 bytes in arg0 ignoring overflow
+ {name: "SumBytes8", argLength: 1, typ: "UInt8"}, // sum all the bytes in arg0 ignoring overflow
+
+ // store multiple
+ {
+ name: "STMG2",
+ argLength: 4,
+ reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), 0}},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMG",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+ {
+ name: "STMG3",
+ argLength: 5,
+ reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), buildReg("R3"), 0}},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMG",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+ {
+ name: "STMG4",
+ argLength: 6,
+ reg: regInfo{inputs: []regMask{
+ ptrsp,
+ buildReg("R1"),
+ buildReg("R2"),
+ buildReg("R3"),
+ buildReg("R4"),
+ 0,
+ }},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMG",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+ {
+ name: "STM2",
+ argLength: 4,
+ reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), 0}},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMY",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+ {
+ name: "STM3",
+ argLength: 5,
+ reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), buildReg("R3"), 0}},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMY",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+ {
+ name: "STM4",
+ argLength: 6,
+ reg: regInfo{inputs: []regMask{
+ ptrsp,
+ buildReg("R1"),
+ buildReg("R2"),
+ buildReg("R3"),
+ buildReg("R4"),
+ 0,
+ }},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMY",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+
+ // large move
+ // auxint = remaining bytes after loop (rem)
+ // arg0 = address of dst memory (in R1, changed as a side effect)
+ // arg1 = address of src memory (in R2, changed as a side effect)
+ // arg2 = pointer to last address to move in loop + 256
+ // arg3 = mem
+ // returns mem
+ //
+ // mvc: MVC $256, 0(R2), 0(R1)
+ // MOVD $256(R1), R1
+ // MOVD $256(R2), R2
+ // CMP R2, Rarg2
+ // BNE mvc
+ // MVC $rem, 0(R2), 0(R1) // if rem > 0
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), buildReg("R2"), gpsp},
+ clobbers: buildReg("R1 R2"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // large clear
+ // auxint = remaining bytes after loop (rem)
+ // arg0 = address of dst memory (in R1, changed as a side effect)
+ // arg1 = pointer to last address to zero in loop + 256
+ // arg2 = mem
+ // returns mem
+ //
+ // clear: CLEAR $256, 0(R1)
+ // MOVD $256(R1), R1
+ // CMP R1, Rarg2
+ // BNE clear
+ // CLEAR $rem, 0(R1) // if rem > 0
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), gpsp},
+ clobbers: buildReg("R1"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ },
+ }
+
+ // All blocks on s390x have their condition code mask (s390x.CCMask) as the Aux value.
+ // The condition code mask is a 4-bit mask where each bit corresponds to a condition
+ // code value. If the value of the condition code matches a bit set in the condition
+ // code mask then the first successor is executed. Otherwise the second successor is
+ // executed.
+ //
+ // | condition code value | mask bit |
+ // +----------------------+------------+
+ // | 0 (equal) | 0b1000 (8) |
+ // | 1 (less than) | 0b0100 (4) |
+ // | 2 (greater than) | 0b0010 (2) |
+ // | 3 (unordered) | 0b0001 (1) |
+ //
+ // Note: that compare-and-branch instructions must not have bit 3 (0b0001) set.
+ var S390Xblocks = []blockData{
+ // branch on condition
+ {name: "BRC", controls: 1, aux: "S390XCCMask"}, // condition code value (flags) is Controls[0]
+
+ // compare-and-branch (register-register)
+ // - integrates comparison of Controls[0] with Controls[1]
+ // - both control values must be in general purpose registers
+ {name: "CRJ", controls: 2, aux: "S390XCCMask"}, // signed 32-bit integer comparison
+ {name: "CGRJ", controls: 2, aux: "S390XCCMask"}, // signed 64-bit integer comparison
+ {name: "CLRJ", controls: 2, aux: "S390XCCMask"}, // unsigned 32-bit integer comparison
+ {name: "CLGRJ", controls: 2, aux: "S390XCCMask"}, // unsigned 64-bit integer comparison
+
+ // compare-and-branch (register-immediate)
+ // - integrates comparison of Controls[0] with AuxInt
+ // - control value must be in a general purpose register
+ // - the AuxInt value is sign-extended for signed comparisons
+ // and zero-extended for unsigned comparisons
+ {name: "CIJ", controls: 1, aux: "S390XCCMaskInt8"}, // signed 32-bit integer comparison
+ {name: "CGIJ", controls: 1, aux: "S390XCCMaskInt8"}, // signed 64-bit integer comparison
+ {name: "CLIJ", controls: 1, aux: "S390XCCMaskUint8"}, // unsigned 32-bit integer comparison
+ {name: "CLGIJ", controls: 1, aux: "S390XCCMaskUint8"}, // unsigned 64-bit integer comparison
+ }
+
+ archs = append(archs, arch{
+ name: "S390X",
+ pkg: "cmd/internal/obj/s390x",
+ genfile: "../../s390x/ssa.go",
+ ops: S390Xops,
+ blocks: S390Xblocks,
+ regnames: regNamesS390X,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R14"]),
+ imports: []string{
+ "cmd/internal/obj/s390x",
+ },
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/Wasm.rules b/src/cmd/compile/internal/ssa/_gen/Wasm.rules
new file mode 100644
index 0000000..91a9fc5
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/Wasm.rules
@@ -0,0 +1,397 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add(64|32|16|8|Ptr) ...) => (I64Add ...)
+(Add(64|32)F ...) => (F(64|32)Add ...)
+
+(Sub(64|32|16|8|Ptr) ...) => (I64Sub ...)
+(Sub(64|32)F ...) => (F(64|32)Sub ...)
+
+(Mul(64|32|16|8) ...) => (I64Mul ...)
+(Mul(64|32)F ...) => (F(64|32)Mul ...)
+
+(Div64 [false] x y) => (I64DivS x y)
+(Div32 [false] x y) => (I64DivS (SignExt32to64 x) (SignExt32to64 y))
+(Div16 [false] x y) => (I64DivS (SignExt16to64 x) (SignExt16to64 y))
+(Div8 x y) => (I64DivS (SignExt8to64 x) (SignExt8to64 y))
+(Div64u ...) => (I64DivU ...)
+(Div32u x y) => (I64DivU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Div16u x y) => (I64DivU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Div8u x y) => (I64DivU (ZeroExt8to64 x) (ZeroExt8to64 y))
+(Div(64|32)F ...) => (F(64|32)Div ...)
+
+(Mod64 [false] x y) => (I64RemS x y)
+(Mod32 [false] x y) => (I64RemS (SignExt32to64 x) (SignExt32to64 y))
+(Mod16 [false] x y) => (I64RemS (SignExt16to64 x) (SignExt16to64 y))
+(Mod8 x y) => (I64RemS (SignExt8to64 x) (SignExt8to64 y))
+(Mod64u ...) => (I64RemU ...)
+(Mod32u x y) => (I64RemU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Mod16u x y) => (I64RemU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Mod8u x y) => (I64RemU (ZeroExt8to64 x) (ZeroExt8to64 y))
+
+(And(64|32|16|8|B) ...) => (I64And ...)
+
+(Or(64|32|16|8|B) ...) => (I64Or ...)
+
+(Xor(64|32|16|8) ...) => (I64Xor ...)
+
+(Neg(64|32|16|8) x) => (I64Sub (I64Const [0]) x)
+(Neg(64|32)F ...) => (F(64|32)Neg ...)
+
+(Com(64|32|16|8) x) => (I64Xor x (I64Const [-1]))
+
+(Not ...) => (I64Eqz ...)
+
+// Lowering pointer arithmetic
+(OffPtr ...) => (I64AddConst ...)
+
+// Lowering extension
+// It is unnecessary to extend loads
+(SignExt32to64 x:(I64Load32S _ _)) => x
+(SignExt16to(64|32) x:(I64Load16S _ _)) => x
+(SignExt8to(64|32|16) x:(I64Load8S _ _)) => x
+(ZeroExt32to64 x:(I64Load32U _ _)) => x
+(ZeroExt16to(64|32) x:(I64Load16U _ _)) => x
+(ZeroExt8to(64|32|16) x:(I64Load8U _ _)) => x
+(SignExt32to64 x) && buildcfg.GOWASM.SignExt => (I64Extend32S x)
+(SignExt8to(64|32|16) x) && buildcfg.GOWASM.SignExt => (I64Extend8S x)
+(SignExt16to(64|32) x) && buildcfg.GOWASM.SignExt => (I64Extend16S x)
+(SignExt32to64 x) => (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32]))
+(SignExt16to(64|32) x) => (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48]))
+(SignExt8to(64|32|16) x) => (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
+(ZeroExt32to64 x) => (I64And x (I64Const [0xffffffff]))
+(ZeroExt16to(64|32) x) => (I64And x (I64Const [0xffff]))
+(ZeroExt8to(64|32|16) x) => (I64And x (I64Const [0xff]))
+
+(Slicemask x) => (I64ShrS (I64Sub (I64Const [0]) x) (I64Const [63]))
+
+// Lowering truncation
+// Because we ignore the high parts, truncates are just copies.
+(Trunc64to(32|16|8) ...) => (Copy ...)
+(Trunc32to(16|8) ...) => (Copy ...)
+(Trunc16to8 ...) => (Copy ...)
+
+// Lowering float <=> int
+(Cvt32to(64|32)F x) => (F(64|32)ConvertI64S (SignExt32to64 x))
+(Cvt64to(64|32)F ...) => (F(64|32)ConvertI64S ...)
+(Cvt32Uto(64|32)F x) => (F(64|32)ConvertI64U (ZeroExt32to64 x))
+(Cvt64Uto(64|32)F ...) => (F(64|32)ConvertI64U ...)
+
+(Cvt32Fto32 ...) => (I64TruncSatF32S ...)
+(Cvt32Fto64 ...) => (I64TruncSatF32S ...)
+(Cvt64Fto32 ...) => (I64TruncSatF64S ...)
+(Cvt64Fto64 ...) => (I64TruncSatF64S ...)
+(Cvt32Fto32U ...) => (I64TruncSatF32U ...)
+(Cvt32Fto64U ...) => (I64TruncSatF32U ...)
+(Cvt64Fto32U ...) => (I64TruncSatF64U ...)
+(Cvt64Fto64U ...) => (I64TruncSatF64U ...)
+
+(Cvt32Fto64F ...) => (F64PromoteF32 ...)
+(Cvt64Fto32F ...) => (F32DemoteF64 ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round32F ...) => (Copy ...)
+(Round64F ...) => (Copy ...)
+
+// Lowering shifts
+// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
+
+(Lsh64x64 x y) && shiftIsBounded(v) => (I64Shl x y)
+(Lsh64x64 x (I64Const [c])) && uint64(c) < 64 => (I64Shl x (I64Const [c]))
+(Lsh64x64 x (I64Const [c])) && uint64(c) >= 64 => (I64Const [0])
+(Lsh64x64 x y) => (Select (I64Shl x y) (I64Const [0]) (I64LtU y (I64Const [64])))
+(Lsh64x(32|16|8) [c] x y) => (Lsh64x64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Lsh32x64 ...) => (Lsh64x64 ...)
+(Lsh32x(32|16|8) [c] x y) => (Lsh64x64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Lsh16x64 ...) => (Lsh64x64 ...)
+(Lsh16x(32|16|8) [c] x y) => (Lsh64x64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Lsh8x64 ...) => (Lsh64x64 ...)
+(Lsh8x(32|16|8) [c] x y) => (Lsh64x64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Rsh64Ux64 x y) && shiftIsBounded(v) => (I64ShrU x y)
+(Rsh64Ux64 x (I64Const [c])) && uint64(c) < 64 => (I64ShrU x (I64Const [c]))
+(Rsh64Ux64 x (I64Const [c])) && uint64(c) >= 64 => (I64Const [0])
+(Rsh64Ux64 x y) => (Select (I64ShrU x y) (I64Const [0]) (I64LtU y (I64Const [64])))
+(Rsh64Ux(32|16|8) [c] x y) => (Rsh64Ux64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Rsh32Ux64 [c] x y) => (Rsh64Ux64 [c] (ZeroExt32to64 x) y)
+(Rsh32Ux(32|16|8) [c] x y) => (Rsh64Ux64 [c] (ZeroExt32to64 x) (ZeroExt(32|16|8)to64 y))
+
+(Rsh16Ux64 [c] x y) => (Rsh64Ux64 [c] (ZeroExt16to64 x) y)
+(Rsh16Ux(32|16|8) [c] x y) => (Rsh64Ux64 [c] (ZeroExt16to64 x) (ZeroExt(32|16|8)to64 y))
+
+(Rsh8Ux64 [c] x y) => (Rsh64Ux64 [c] (ZeroExt8to64 x) y)
+(Rsh8Ux(32|16|8) [c] x y) => (Rsh64Ux64 [c] (ZeroExt8to64 x) (ZeroExt(32|16|8)to64 y))
+
+// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
+// We implement this by setting the shift value to (width - 1) if the shift value is >= width.
+
+(Rsh64x64 x y) && shiftIsBounded(v) => (I64ShrS x y)
+(Rsh64x64 x (I64Const [c])) && uint64(c) < 64 => (I64ShrS x (I64Const [c]))
+(Rsh64x64 x (I64Const [c])) && uint64(c) >= 64 => (I64ShrS x (I64Const [63]))
+(Rsh64x64 x y) => (I64ShrS x (Select <typ.Int64> y (I64Const [63]) (I64LtU y (I64Const [64]))))
+(Rsh64x(32|16|8) [c] x y) => (Rsh64x64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Rsh32x64 [c] x y) => (Rsh64x64 [c] (SignExt32to64 x) y)
+(Rsh32x(32|16|8) [c] x y) => (Rsh64x64 [c] (SignExt32to64 x) (ZeroExt(32|16|8)to64 y))
+
+(Rsh16x64 [c] x y) => (Rsh64x64 [c] (SignExt16to64 x) y)
+(Rsh16x(32|16|8) [c] x y) => (Rsh64x64 [c] (SignExt16to64 x) (ZeroExt(32|16|8)to64 y))
+
+(Rsh8x64 [c] x y) => (Rsh64x64 [c] (SignExt8to64 x) y)
+(Rsh8x(32|16|8) [c] x y) => (Rsh64x64 [c] (SignExt8to64 x) (ZeroExt(32|16|8)to64 y))
+
+// Lowering rotates
+(RotateLeft8 <t> x (I64Const [c])) => (Or8 (Lsh8x64 <t> x (I64Const [c&7])) (Rsh8Ux64 <t> x (I64Const [-c&7])))
+(RotateLeft16 <t> x (I64Const [c])) => (Or16 (Lsh16x64 <t> x (I64Const [c&15])) (Rsh16Ux64 <t> x (I64Const [-c&15])))
+(RotateLeft32 ...) => (I32Rotl ...)
+(RotateLeft64 ...) => (I64Rotl ...)
+
+// Lowering comparisons
+(Less64 ...) => (I64LtS ...)
+(Less32 x y) => (I64LtS (SignExt32to64 x) (SignExt32to64 y))
+(Less16 x y) => (I64LtS (SignExt16to64 x) (SignExt16to64 y))
+(Less8 x y) => (I64LtS (SignExt8to64 x) (SignExt8to64 y))
+(Less64U ...) => (I64LtU ...)
+(Less32U x y) => (I64LtU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Less16U x y) => (I64LtU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Less8U x y) => (I64LtU (ZeroExt8to64 x) (ZeroExt8to64 y))
+(Less(64|32)F ...) => (F(64|32)Lt ...)
+
+(Leq64 ...) => (I64LeS ...)
+(Leq32 x y) => (I64LeS (SignExt32to64 x) (SignExt32to64 y))
+(Leq16 x y) => (I64LeS (SignExt16to64 x) (SignExt16to64 y))
+(Leq8 x y) => (I64LeS (SignExt8to64 x) (SignExt8to64 y))
+(Leq64U ...) => (I64LeU ...)
+(Leq32U x y) => (I64LeU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Leq16U x y) => (I64LeU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Leq8U x y) => (I64LeU (ZeroExt8to64 x) (ZeroExt8to64 y))
+(Leq(64|32)F ...) => (F(64|32)Le ...)
+
+(Eq64 ...) => (I64Eq ...)
+(Eq32 x y) => (I64Eq (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Eq16 x y) => (I64Eq (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Eq8 x y) => (I64Eq (ZeroExt8to64 x) (ZeroExt8to64 y))
+(EqB ...) => (I64Eq ...)
+(EqPtr ...) => (I64Eq ...)
+(Eq(64|32)F ...) => (F(64|32)Eq ...)
+
+(Neq64 ...) => (I64Ne ...)
+(Neq32 x y) => (I64Ne (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Neq16 x y) => (I64Ne (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Neq8 x y) => (I64Ne (ZeroExt8to64 x) (ZeroExt8to64 y))
+(NeqB ...) => (I64Ne ...)
+(NeqPtr ...) => (I64Ne ...)
+(Neq(64|32)F ...) => (F(64|32)Ne ...)
+
+// Lowering loads
+(Load <t> ptr mem) && is32BitFloat(t) => (F32Load ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (F64Load ptr mem)
+(Load <t> ptr mem) && t.Size() == 8 => (I64Load ptr mem)
+(Load <t> ptr mem) && t.Size() == 4 && !t.IsSigned() => (I64Load32U ptr mem)
+(Load <t> ptr mem) && t.Size() == 4 && t.IsSigned() => (I64Load32S ptr mem)
+(Load <t> ptr mem) && t.Size() == 2 && !t.IsSigned() => (I64Load16U ptr mem)
+(Load <t> ptr mem) && t.Size() == 2 && t.IsSigned() => (I64Load16S ptr mem)
+(Load <t> ptr mem) && t.Size() == 1 && !t.IsSigned() => (I64Load8U ptr mem)
+(Load <t> ptr mem) && t.Size() == 1 && t.IsSigned() => (I64Load8S ptr mem)
+
+// Lowering stores
+(Store {t} ptr val mem) && is64BitFloat(t) => (F64Store ptr val mem)
+(Store {t} ptr val mem) && is32BitFloat(t) => (F32Store ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 => (I64Store ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 => (I64Store32 ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (I64Store16 ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (I64Store8 ptr val mem)
+
+// Lowering moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (I64Store8 dst (I64Load8U src mem) mem)
+(Move [2] dst src mem) => (I64Store16 dst (I64Load16U src mem) mem)
+(Move [4] dst src mem) => (I64Store32 dst (I64Load32U src mem) mem)
+(Move [8] dst src mem) => (I64Store dst (I64Load src mem) mem)
+(Move [16] dst src mem) =>
+ (I64Store [8] dst (I64Load [8] src mem)
+ (I64Store dst (I64Load src mem) mem))
+(Move [3] dst src mem) =>
+ (I64Store8 [2] dst (I64Load8U [2] src mem)
+ (I64Store16 dst (I64Load16U src mem) mem))
+(Move [5] dst src mem) =>
+ (I64Store8 [4] dst (I64Load8U [4] src mem)
+ (I64Store32 dst (I64Load32U src mem) mem))
+(Move [6] dst src mem) =>
+ (I64Store16 [4] dst (I64Load16U [4] src mem)
+ (I64Store32 dst (I64Load32U src mem) mem))
+(Move [7] dst src mem) =>
+ (I64Store32 [3] dst (I64Load32U [3] src mem)
+ (I64Store32 dst (I64Load32U src mem) mem))
+(Move [s] dst src mem) && s > 8 && s < 16 =>
+ (I64Store [s-8] dst (I64Load [s-8] src mem)
+ (I64Store dst (I64Load src mem) mem))
+
+// Large copying uses helper.
+(Move [s] dst src mem) && logLargeCopy(v, s) =>
+ (LoweredMove [s] dst src mem)
+
+// Lowering Zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (I64Store8 destptr (I64Const [0]) mem)
+(Zero [2] destptr mem) => (I64Store16 destptr (I64Const [0]) mem)
+(Zero [4] destptr mem) => (I64Store32 destptr (I64Const [0]) mem)
+(Zero [8] destptr mem) => (I64Store destptr (I64Const [0]) mem)
+
+(Zero [3] destptr mem) =>
+ (I64Store8 [2] destptr (I64Const [0])
+ (I64Store16 destptr (I64Const [0]) mem))
+(Zero [5] destptr mem) =>
+ (I64Store8 [4] destptr (I64Const [0])
+ (I64Store32 destptr (I64Const [0]) mem))
+(Zero [6] destptr mem) =>
+ (I64Store16 [4] destptr (I64Const [0])
+ (I64Store32 destptr (I64Const [0]) mem))
+(Zero [7] destptr mem) =>
+ (I64Store32 [3] destptr (I64Const [0])
+ (I64Store32 destptr (I64Const [0]) mem))
+
+// Strip off any fractional word zeroing.
+(Zero [s] destptr mem) && s%8 != 0 && s > 8 && s < 32 =>
+ (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8])
+ (I64Store destptr (I64Const [0]) mem))
+
+// Zero small numbers of words directly.
+(Zero [16] destptr mem) =>
+ (I64Store [8] destptr (I64Const [0])
+ (I64Store destptr (I64Const [0]) mem))
+(Zero [24] destptr mem) =>
+ (I64Store [16] destptr (I64Const [0])
+ (I64Store [8] destptr (I64Const [0])
+ (I64Store destptr (I64Const [0]) mem)))
+(Zero [32] destptr mem) =>
+ (I64Store [24] destptr (I64Const [0])
+ (I64Store [16] destptr (I64Const [0])
+ (I64Store [8] destptr (I64Const [0])
+ (I64Store destptr (I64Const [0]) mem))))
+
+// Large zeroing uses helper.
+(Zero [s] destptr mem) =>
+ (LoweredZero [s] destptr mem)
+
+// Lowering constants
+(Const64 ...) => (I64Const ...)
+(Const(32|16|8) [c]) => (I64Const [int64(c)])
+(Const(64|32)F ...) => (F(64|32)Const ...)
+(ConstNil) => (I64Const [0])
+(ConstBool [c]) => (I64Const [b2i(c)])
+
+// Lowering calls
+(StaticCall ...) => (LoweredStaticCall ...)
+(ClosureCall ...) => (LoweredClosureCall ...)
+(InterCall ...) => (LoweredInterCall ...)
+(TailCall ...) => (LoweredTailCall ...)
+
+// Miscellaneous
+(Convert ...) => (LoweredConvert ...)
+(IsNonNil p) => (I64Eqz (I64Eqz p))
+(IsInBounds ...) => (I64LtU ...)
+(IsSliceInBounds ...) => (I64LeU ...)
+(NilCheck ...) => (LoweredNilCheck ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(Addr {sym} base) => (LoweredAddr {sym} [0] base)
+(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (LoweredAddr {sym} (SPanchored base mem))
+(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (LoweredAddr {sym} base)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+// --- Intrinsics ---
+(Sqrt ...) => (F64Sqrt ...)
+(Trunc ...) => (F64Trunc ...)
+(Ceil ...) => (F64Ceil ...)
+(Floor ...) => (F64Floor ...)
+(RoundToEven ...) => (F64Nearest ...)
+(Abs ...) => (F64Abs ...)
+(Copysign ...) => (F64Copysign ...)
+
+(Sqrt32 ...) => (F32Sqrt ...)
+
+(Ctz64 ...) => (I64Ctz ...)
+(Ctz32 x) => (I64Ctz (I64Or x (I64Const [0x100000000])))
+(Ctz16 x) => (I64Ctz (I64Or x (I64Const [0x10000])))
+(Ctz8 x) => (I64Ctz (I64Or x (I64Const [0x100])))
+
+(Ctz(64|32|16|8)NonZero ...) => (I64Ctz ...)
+
+(BitLen64 x) => (I64Sub (I64Const [64]) (I64Clz x))
+
+(PopCount64 ...) => (I64Popcnt ...)
+(PopCount32 x) => (I64Popcnt (ZeroExt32to64 x))
+(PopCount16 x) => (I64Popcnt (ZeroExt16to64 x))
+(PopCount8 x) => (I64Popcnt (ZeroExt8to64 x))
+
+(CondSelect ...) => (Select ...)
+
+// --- Optimizations ---
+(I64Add (I64Const [x]) (I64Const [y])) => (I64Const [x + y])
+(I64Mul (I64Const [x]) (I64Const [y])) => (I64Const [x * y])
+(I64And (I64Const [x]) (I64Const [y])) => (I64Const [x & y])
+(I64Or (I64Const [x]) (I64Const [y])) => (I64Const [x | y])
+(I64Xor (I64Const [x]) (I64Const [y])) => (I64Const [x ^ y])
+(F64Add (F64Const [x]) (F64Const [y])) => (F64Const [x + y])
+(F64Mul (F64Const [x]) (F64Const [y])) && !math.IsNaN(x * y) => (F64Const [x * y])
+(I64Eq (I64Const [x]) (I64Const [y])) && x == y => (I64Const [1])
+(I64Eq (I64Const [x]) (I64Const [y])) && x != y => (I64Const [0])
+(I64Ne (I64Const [x]) (I64Const [y])) && x == y => (I64Const [0])
+(I64Ne (I64Const [x]) (I64Const [y])) && x != y => (I64Const [1])
+
+(I64Shl (I64Const [x]) (I64Const [y])) => (I64Const [x << uint64(y)])
+(I64ShrU (I64Const [x]) (I64Const [y])) => (I64Const [int64(uint64(x) >> uint64(y))])
+(I64ShrS (I64Const [x]) (I64Const [y])) => (I64Const [x >> uint64(y)])
+
+// TODO: declare these operations as commutative and get rid of these rules?
+(I64Add (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Add y (I64Const [x]))
+(I64Mul (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Mul y (I64Const [x]))
+(I64And (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64And y (I64Const [x]))
+(I64Or (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Or y (I64Const [x]))
+(I64Xor (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Xor y (I64Const [x]))
+(F64Add (F64Const [x]) y) && y.Op != OpWasmF64Const => (F64Add y (F64Const [x]))
+(F64Mul (F64Const [x]) y) && y.Op != OpWasmF64Const => (F64Mul y (F64Const [x]))
+(I64Eq (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Eq y (I64Const [x]))
+(I64Ne (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Ne y (I64Const [x]))
+
+(I64Eq x (I64Const [0])) => (I64Eqz x)
+(I64LtU (I64Const [0]) x) => (I64Eqz (I64Eqz x))
+(I64LeU x (I64Const [0])) => (I64Eqz x)
+(I64LtU x (I64Const [1])) => (I64Eqz x)
+(I64LeU (I64Const [1]) x) => (I64Eqz (I64Eqz x))
+(I64Ne x (I64Const [0])) => (I64Eqz (I64Eqz x))
+
+(I64Add x (I64Const <t> [y])) && !t.IsPtr() => (I64AddConst [y] x)
+(I64AddConst [0] x) => x
+(I64Eqz (I64Eqz (I64Eqz x))) => (I64Eqz x)
+
+// folding offset into load/store
+((I64Load|I64Load32U|I64Load32S|I64Load16U|I64Load16S|I64Load8U|I64Load8S) [off] (I64AddConst [off2] ptr) mem)
+ && isU32Bit(off+off2) =>
+ ((I64Load|I64Load32U|I64Load32S|I64Load16U|I64Load16S|I64Load8U|I64Load8S) [off+off2] ptr mem)
+
+((I64Store|I64Store32|I64Store16|I64Store8) [off] (I64AddConst [off2] ptr) val mem)
+ && isU32Bit(off+off2) =>
+ ((I64Store|I64Store32|I64Store16|I64Store8) [off+off2] ptr val mem)
+
+// folding offset into address
+(I64AddConst [off] (LoweredAddr {sym} [off2] base)) && isU32Bit(off+int64(off2)) =>
+ (LoweredAddr {sym} [int32(off)+off2] base)
+(I64AddConst [off] x:(SP)) && isU32Bit(off) => (LoweredAddr [int32(off)] x) // so it is rematerializeable
+
+// transforming readonly globals into constants
+(I64Load [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read64(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+(I64Load32U [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read32(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+(I64Load16U [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read16(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+(I64Load8U [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read8(sym, off+int64(off2)))])
diff --git a/src/cmd/compile/internal/ssa/_gen/WasmOps.go b/src/cmd/compile/internal/ssa/_gen/WasmOps.go
new file mode 100644
index 0000000..45bbed5
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/WasmOps.go
@@ -0,0 +1,277 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "strings"
+
+var regNamesWasm = []string{
+ "R0",
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+
+ "F16",
+ "F17",
+ "F18",
+ "F19",
+ "F20",
+ "F21",
+ "F22",
+ "F23",
+ "F24",
+ "F25",
+ "F26",
+ "F27",
+ "F28",
+ "F29",
+ "F30",
+ "F31",
+
+ "SP",
+ "g",
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesWasm) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesWasm {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ var (
+ gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15")
+ fp32 = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15")
+ fp64 = buildReg("F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31")
+ gpsp = gp | buildReg("SP")
+ gpspsb = gpsp | buildReg("SB")
+ // The "registers", which are actually local variables, can get clobbered
+ // if we're switching goroutines, because it unwinds the WebAssembly stack.
+ callerSave = gp | fp32 | fp64 | buildReg("g")
+ )
+
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpsp}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpsp, gpsp}, outputs: []regMask{gp}}
+ gp31 = regInfo{inputs: []regMask{gpsp, gpsp, gpsp}, outputs: []regMask{gp}}
+ fp32_01 = regInfo{inputs: nil, outputs: []regMask{fp32}}
+ fp32_11 = regInfo{inputs: []regMask{fp32}, outputs: []regMask{fp32}}
+ fp32_21 = regInfo{inputs: []regMask{fp32, fp32}, outputs: []regMask{fp32}}
+ fp32_21gp = regInfo{inputs: []regMask{fp32, fp32}, outputs: []regMask{gp}}
+ fp64_01 = regInfo{inputs: nil, outputs: []regMask{fp64}}
+ fp64_11 = regInfo{inputs: []regMask{fp64}, outputs: []regMask{fp64}}
+ fp64_21 = regInfo{inputs: []regMask{fp64, fp64}, outputs: []regMask{fp64}}
+ fp64_21gp = regInfo{inputs: []regMask{fp64, fp64}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+ fp32load = regInfo{inputs: []regMask{gpspsb, 0}, outputs: []regMask{fp32}}
+ fp32store = regInfo{inputs: []regMask{gpspsb, fp32, 0}}
+ fp64load = regInfo{inputs: []regMask{gpspsb, 0}, outputs: []regMask{fp64}}
+ fp64store = regInfo{inputs: []regMask{gpspsb, fp64, 0}}
+ )
+
+ var WasmOps = []opData{
+ {name: "LoweredStaticCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "LoweredTailCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "LoweredClosureCall", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp, 0}, clobbers: callerSave}, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "LoweredInterCall", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ {name: "LoweredAddr", argLength: 1, reg: gp11, aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // returns base+aux+auxint, arg0=base
+ {name: "LoweredMove", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp}}, aux: "Int64"}, // large move. arg0=dst, arg1=src, arg2=mem, auxint=len, returns mem
+ {name: "LoweredZero", argLength: 2, reg: regInfo{inputs: []regMask{gp}}, aux: "Int64"}, // large zeroing. arg0=start, arg1=mem, auxint=len, returns mem
+
+ {name: "LoweredGetClosurePtr", reg: gp01}, // returns wasm.REG_CTXT, the closure pointer
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, // returns the PC of the caller of the current function
+ {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true}, // returns the SP of the caller of the current function. arg0=mem.
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem
+ {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: callerSave, outputs: []regMask{gp}}, aux: "Int64"}, // invokes runtime.gcWriteBarrier{auxint}. arg0=mem, auxint=# of buffer entries needed. Returns a pointer to a write barrier buffer.
+
+ // LoweredConvert converts between pointers and integers.
+ // We have a special op for this so as to not confuse GCCallOff
+ // (particularly stack maps). It takes a memory arg so it
+ // gets correctly ordered with respect to GC safepoints.
+ // arg0=ptr/int arg1=mem, output=int/ptr
+ //
+ // TODO(neelance): LoweredConvert should not be necessary any more, since OpConvert does not need to be lowered any more (CL 108496).
+ {name: "LoweredConvert", argLength: 2, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}},
+
+ // The following are native WebAssembly instructions, see https://webassembly.github.io/spec/core/syntax/instructions.html
+
+ {name: "Select", asm: "Select", argLength: 3, reg: gp31}, // returns arg0 if arg2 != 0, otherwise returns arg1
+
+ {name: "I64Load8U", asm: "I64Load8U", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt8"}, // read unsigned 8-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load8S", asm: "I64Load8S", argLength: 2, reg: gpload, aux: "Int64", typ: "Int8"}, // read signed 8-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load16U", asm: "I64Load16U", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt16"}, // read unsigned 16-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load16S", asm: "I64Load16S", argLength: 2, reg: gpload, aux: "Int64", typ: "Int16"}, // read signed 16-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load32U", asm: "I64Load32U", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt32"}, // read unsigned 32-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load32S", asm: "I64Load32S", argLength: 2, reg: gpload, aux: "Int64", typ: "Int32"}, // read signed 32-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load", asm: "I64Load", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt64"}, // read 64-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Store8", asm: "I64Store8", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 8-bit integer arg1 at address arg0+aux, arg2=mem, returns mem
+ {name: "I64Store16", asm: "I64Store16", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 16-bit integer arg1 at address arg0+aux, arg2=mem, returns mem
+ {name: "I64Store32", asm: "I64Store32", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 32-bit integer arg1 at address arg0+aux, arg2=mem, returns mem
+ {name: "I64Store", asm: "I64Store", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 64-bit integer arg1 at address arg0+aux, arg2=mem, returns mem
+
+ {name: "F32Load", asm: "F32Load", argLength: 2, reg: fp32load, aux: "Int64", typ: "Float32"}, // read 32-bit float from address arg0+aux, arg1=mem
+ {name: "F64Load", asm: "F64Load", argLength: 2, reg: fp64load, aux: "Int64", typ: "Float64"}, // read 64-bit float from address arg0+aux, arg1=mem
+ {name: "F32Store", asm: "F32Store", argLength: 3, reg: fp32store, aux: "Int64", typ: "Mem"}, // store 32-bit float arg1 at address arg0+aux, arg2=mem, returns mem
+ {name: "F64Store", asm: "F64Store", argLength: 3, reg: fp64store, aux: "Int64", typ: "Mem"}, // store 64-bit float arg1 at address arg0+aux, arg2=mem, returns mem
+
+ {name: "I64Const", reg: gp01, aux: "Int64", rematerializeable: true, typ: "Int64"}, // returns the constant integer aux
+ {name: "F32Const", reg: fp32_01, aux: "Float32", rematerializeable: true, typ: "Float32"}, // returns the constant float aux
+ {name: "F64Const", reg: fp64_01, aux: "Float64", rematerializeable: true, typ: "Float64"}, // returns the constant float aux
+
+ {name: "I64Eqz", asm: "I64Eqz", argLength: 1, reg: gp11, typ: "Bool"}, // arg0 == 0
+ {name: "I64Eq", asm: "I64Eq", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 == arg1
+ {name: "I64Ne", asm: "I64Ne", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 != arg1
+ {name: "I64LtS", asm: "I64LtS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 < arg1 (signed)
+ {name: "I64LtU", asm: "I64LtU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 < arg1 (unsigned)
+ {name: "I64GtS", asm: "I64GtS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 > arg1 (signed)
+ {name: "I64GtU", asm: "I64GtU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 > arg1 (unsigned)
+ {name: "I64LeS", asm: "I64LeS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 <= arg1 (signed)
+ {name: "I64LeU", asm: "I64LeU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 <= arg1 (unsigned)
+ {name: "I64GeS", asm: "I64GeS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 >= arg1 (signed)
+ {name: "I64GeU", asm: "I64GeU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 >= arg1 (unsigned)
+
+ {name: "F32Eq", asm: "F32Eq", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 == arg1
+ {name: "F32Ne", asm: "F32Ne", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 != arg1
+ {name: "F32Lt", asm: "F32Lt", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 < arg1
+ {name: "F32Gt", asm: "F32Gt", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 > arg1
+ {name: "F32Le", asm: "F32Le", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 <= arg1
+ {name: "F32Ge", asm: "F32Ge", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 >= arg1
+
+ {name: "F64Eq", asm: "F64Eq", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 == arg1
+ {name: "F64Ne", asm: "F64Ne", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 != arg1
+ {name: "F64Lt", asm: "F64Lt", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 < arg1
+ {name: "F64Gt", asm: "F64Gt", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 > arg1
+ {name: "F64Le", asm: "F64Le", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 <= arg1
+ {name: "F64Ge", asm: "F64Ge", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 >= arg1
+
+ {name: "I64Add", asm: "I64Add", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 + arg1
+ {name: "I64AddConst", asm: "I64Add", argLength: 1, reg: gp11, aux: "Int64", typ: "Int64"}, // arg0 + aux
+ {name: "I64Sub", asm: "I64Sub", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 - arg1
+ {name: "I64Mul", asm: "I64Mul", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 * arg1
+ {name: "I64DivS", asm: "I64DivS", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 / arg1 (signed)
+ {name: "I64DivU", asm: "I64DivU", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 / arg1 (unsigned)
+ {name: "I64RemS", asm: "I64RemS", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 % arg1 (signed)
+ {name: "I64RemU", asm: "I64RemU", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 % arg1 (unsigned)
+ {name: "I64And", asm: "I64And", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 & arg1
+ {name: "I64Or", asm: "I64Or", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 | arg1
+ {name: "I64Xor", asm: "I64Xor", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 ^ arg1
+ {name: "I64Shl", asm: "I64Shl", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 << (arg1 % 64)
+ {name: "I64ShrS", asm: "I64ShrS", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 >> (arg1 % 64) (signed)
+ {name: "I64ShrU", asm: "I64ShrU", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 >> (arg1 % 64) (unsigned)
+
+ {name: "F32Neg", asm: "F32Neg", argLength: 1, reg: fp32_11, typ: "Float32"}, // -arg0
+ {name: "F32Add", asm: "F32Add", argLength: 2, reg: fp32_21, typ: "Float32"}, // arg0 + arg1
+ {name: "F32Sub", asm: "F32Sub", argLength: 2, reg: fp32_21, typ: "Float32"}, // arg0 - arg1
+ {name: "F32Mul", asm: "F32Mul", argLength: 2, reg: fp32_21, typ: "Float32"}, // arg0 * arg1
+ {name: "F32Div", asm: "F32Div", argLength: 2, reg: fp32_21, typ: "Float32"}, // arg0 / arg1
+
+ {name: "F64Neg", asm: "F64Neg", argLength: 1, reg: fp64_11, typ: "Float64"}, // -arg0
+ {name: "F64Add", asm: "F64Add", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 + arg1
+ {name: "F64Sub", asm: "F64Sub", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 - arg1
+ {name: "F64Mul", asm: "F64Mul", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 * arg1
+ {name: "F64Div", asm: "F64Div", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 / arg1
+
+ {name: "I64TruncSatF64S", asm: "I64TruncSatF64S", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating)
+ {name: "I64TruncSatF64U", asm: "I64TruncSatF64U", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to an unsigned integer (saturating)
+ {name: "I64TruncSatF32S", asm: "I64TruncSatF32S", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating)
+ {name: "I64TruncSatF32U", asm: "I64TruncSatF32U", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to an unsigned integer (saturating)
+ {name: "F32ConvertI64S", asm: "F32ConvertI64S", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp32}}, typ: "Float32"}, // converts the signed integer arg0 to a float
+ {name: "F32ConvertI64U", asm: "F32ConvertI64U", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp32}}, typ: "Float32"}, // converts the unsigned integer arg0 to a float
+ {name: "F64ConvertI64S", asm: "F64ConvertI64S", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp64}}, typ: "Float64"}, // converts the signed integer arg0 to a float
+ {name: "F64ConvertI64U", asm: "F64ConvertI64U", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp64}}, typ: "Float64"}, // converts the unsigned integer arg0 to a float
+ {name: "F32DemoteF64", asm: "F32DemoteF64", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{fp32}}, typ: "Float32"},
+ {name: "F64PromoteF32", asm: "F64PromoteF32", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{fp64}}, typ: "Float64"},
+
+ {name: "I64Extend8S", asm: "I64Extend8S", argLength: 1, reg: gp11, typ: "Int64"}, // sign-extend arg0 from 8 to 64 bit
+ {name: "I64Extend16S", asm: "I64Extend16S", argLength: 1, reg: gp11, typ: "Int64"}, // sign-extend arg0 from 16 to 64 bit
+ {name: "I64Extend32S", asm: "I64Extend32S", argLength: 1, reg: gp11, typ: "Int64"}, // sign-extend arg0 from 32 to 64 bit
+
+ {name: "F32Sqrt", asm: "F32Sqrt", argLength: 1, reg: fp32_11, typ: "Float32"}, // sqrt(arg0)
+ {name: "F32Trunc", asm: "F32Trunc", argLength: 1, reg: fp32_11, typ: "Float32"}, // trunc(arg0)
+ {name: "F32Ceil", asm: "F32Ceil", argLength: 1, reg: fp32_11, typ: "Float32"}, // ceil(arg0)
+ {name: "F32Floor", asm: "F32Floor", argLength: 1, reg: fp32_11, typ: "Float32"}, // floor(arg0)
+ {name: "F32Nearest", asm: "F32Nearest", argLength: 1, reg: fp32_11, typ: "Float32"}, // round(arg0)
+ {name: "F32Abs", asm: "F32Abs", argLength: 1, reg: fp32_11, typ: "Float32"}, // abs(arg0)
+ {name: "F32Copysign", asm: "F32Copysign", argLength: 2, reg: fp32_21, typ: "Float32"}, // copysign(arg0, arg1)
+
+ {name: "F64Sqrt", asm: "F64Sqrt", argLength: 1, reg: fp64_11, typ: "Float64"}, // sqrt(arg0)
+ {name: "F64Trunc", asm: "F64Trunc", argLength: 1, reg: fp64_11, typ: "Float64"}, // trunc(arg0)
+ {name: "F64Ceil", asm: "F64Ceil", argLength: 1, reg: fp64_11, typ: "Float64"}, // ceil(arg0)
+ {name: "F64Floor", asm: "F64Floor", argLength: 1, reg: fp64_11, typ: "Float64"}, // floor(arg0)
+ {name: "F64Nearest", asm: "F64Nearest", argLength: 1, reg: fp64_11, typ: "Float64"}, // round(arg0)
+ {name: "F64Abs", asm: "F64Abs", argLength: 1, reg: fp64_11, typ: "Float64"}, // abs(arg0)
+ {name: "F64Copysign", asm: "F64Copysign", argLength: 2, reg: fp64_21, typ: "Float64"}, // copysign(arg0, arg1)
+
+ {name: "I64Ctz", asm: "I64Ctz", argLength: 1, reg: gp11, typ: "Int64"}, // ctz(arg0)
+ {name: "I64Clz", asm: "I64Clz", argLength: 1, reg: gp11, typ: "Int64"}, // clz(arg0)
+ {name: "I32Rotl", asm: "I32Rotl", argLength: 2, reg: gp21, typ: "Int32"}, // rotl(arg0, arg1)
+ {name: "I64Rotl", asm: "I64Rotl", argLength: 2, reg: gp21, typ: "Int64"}, // rotl(arg0, arg1)
+ {name: "I64Popcnt", asm: "I64Popcnt", argLength: 1, reg: gp11, typ: "Int64"}, // popcnt(arg0)
+ }
+
+ archs = append(archs, arch{
+ name: "Wasm",
+ pkg: "cmd/internal/obj/wasm",
+ genfile: "../../wasm/ssa.go",
+ ops: WasmOps,
+ blocks: nil,
+ regnames: regNamesWasm,
+ gpregmask: gp,
+ fpregmask: fp32 | fp64,
+ fp32regmask: fp32,
+ fp64regmask: fp64,
+ framepointerreg: -1, // not used
+ linkreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/allocators.go b/src/cmd/compile/internal/ssa/_gen/allocators.go
new file mode 100644
index 0000000..5869a61
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/allocators.go
@@ -0,0 +1,229 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// TODO: should we share backing storage for similarly-shaped types?
+// e.g. []*Value and []*Block, or even []int32 and []bool.
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "io"
+ "log"
+ "os"
+)
+
+type allocator struct {
+ name string // name for alloc/free functions
+ typ string // the type they return/accept
+ mak string // code to make a new object (takes power-of-2 size as fmt arg)
+ capacity string // code to calculate the capacity of an object. Should always report a power of 2.
+ resize string // code to shrink to sub-power-of-two size (takes size as fmt arg)
+ clear string // code for clearing object before putting it on the free list
+ minLog int // log_2 of minimum allocation size
+ maxLog int // log_2 of maximum allocation size
+}
+
+type derived struct {
+ name string // name for alloc/free functions
+ typ string // the type they return/accept
+ base string // underlying allocator
+}
+
+func genAllocators() {
+ allocators := []allocator{
+ {
+ name: "ValueSlice",
+ typ: "[]*Value",
+ capacity: "cap(%s)",
+ mak: "make([]*Value, %s)",
+ resize: "%s[:%s]",
+ clear: "for i := range %[1]s {\n%[1]s[i] = nil\n}",
+ minLog: 5,
+ maxLog: 32,
+ },
+ {
+ name: "Int64Slice",
+ typ: "[]int64",
+ capacity: "cap(%s)",
+ mak: "make([]int64, %s)",
+ resize: "%s[:%s]",
+ clear: "for i := range %[1]s {\n%[1]s[i] = 0\n}",
+ minLog: 5,
+ maxLog: 32,
+ },
+ {
+ name: "SparseSet",
+ typ: "*sparseSet",
+ capacity: "%s.cap()",
+ mak: "newSparseSet(%s)",
+ resize: "", // larger-sized sparse sets are ok
+ clear: "%s.clear()",
+ minLog: 5,
+ maxLog: 32,
+ },
+ {
+ name: "SparseMap",
+ typ: "*sparseMap",
+ capacity: "%s.cap()",
+ mak: "newSparseMap(%s)",
+ resize: "", // larger-sized sparse maps are ok
+ clear: "%s.clear()",
+ minLog: 5,
+ maxLog: 32,
+ },
+ {
+ name: "SparseMapPos",
+ typ: "*sparseMapPos",
+ capacity: "%s.cap()",
+ mak: "newSparseMapPos(%s)",
+ resize: "", // larger-sized sparse maps are ok
+ clear: "%s.clear()",
+ minLog: 5,
+ maxLog: 32,
+ },
+ }
+ deriveds := []derived{
+ {
+ name: "BlockSlice",
+ typ: "[]*Block",
+ base: "ValueSlice",
+ },
+ {
+ name: "IntSlice",
+ typ: "[]int",
+ base: "Int64Slice",
+ },
+ {
+ name: "Int32Slice",
+ typ: "[]int32",
+ base: "Int64Slice",
+ },
+ {
+ name: "Int8Slice",
+ typ: "[]int8",
+ base: "Int64Slice",
+ },
+ {
+ name: "BoolSlice",
+ typ: "[]bool",
+ base: "Int64Slice",
+ },
+ {
+ name: "IDSlice",
+ typ: "[]ID",
+ base: "Int64Slice",
+ },
+ }
+
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated from _gen/allocators.go using 'go generate'; DO NOT EDIT.\n")
+ fmt.Fprintln(w)
+ fmt.Fprintln(w, "package ssa")
+
+ fmt.Fprintln(w, "import (")
+ fmt.Fprintln(w, "\"internal/unsafeheader\"")
+ fmt.Fprintln(w, "\"math/bits\"")
+ fmt.Fprintln(w, "\"sync\"")
+ fmt.Fprintln(w, "\"unsafe\"")
+ fmt.Fprintln(w, ")")
+ for _, a := range allocators {
+ genAllocator(w, a)
+ }
+ for _, d := range deriveds {
+ for _, base := range allocators {
+ if base.name == d.base {
+ genDerived(w, d, base)
+ break
+ }
+ }
+ }
+ // gofmt result
+ b := w.Bytes()
+ var err error
+ b, err = format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", w.Bytes())
+ panic(err)
+ }
+
+ if err := os.WriteFile("../allocators.go", b, 0666); err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
+func genAllocator(w io.Writer, a allocator) {
+ fmt.Fprintf(w, "var poolFree%s [%d]sync.Pool\n", a.name, a.maxLog-a.minLog)
+ fmt.Fprintf(w, "func (c *Cache) alloc%s(n int) %s {\n", a.name, a.typ)
+ fmt.Fprintf(w, "var s %s\n", a.typ)
+ fmt.Fprintf(w, "n2 := n\n")
+ fmt.Fprintf(w, "if n2 < %d { n2 = %d }\n", 1<<a.minLog, 1<<a.minLog)
+ fmt.Fprintf(w, "b := bits.Len(uint(n2-1))\n")
+ fmt.Fprintf(w, "v := poolFree%s[b-%d].Get()\n", a.name, a.minLog)
+ fmt.Fprintf(w, "if v == nil {\n")
+ fmt.Fprintf(w, " s = %s\n", fmt.Sprintf(a.mak, "1<<b"))
+ fmt.Fprintf(w, "} else {\n")
+ if a.typ[0] == '*' {
+ fmt.Fprintf(w, "s = v.(%s)\n", a.typ)
+ } else {
+ fmt.Fprintf(w, "sp := v.(*%s)\n", a.typ)
+ fmt.Fprintf(w, "s = *sp\n")
+ fmt.Fprintf(w, "*sp = nil\n")
+ fmt.Fprintf(w, "c.hdr%s = append(c.hdr%s, sp)\n", a.name, a.name)
+ }
+ fmt.Fprintf(w, "}\n")
+ if a.resize != "" {
+ fmt.Fprintf(w, "s = %s\n", fmt.Sprintf(a.resize, "s", "n"))
+ }
+ fmt.Fprintf(w, "return s\n")
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, "func (c *Cache) free%s(s %s) {\n", a.name, a.typ)
+ fmt.Fprintf(w, "%s\n", fmt.Sprintf(a.clear, "s"))
+ fmt.Fprintf(w, "b := bits.Len(uint(%s) - 1)\n", fmt.Sprintf(a.capacity, "s"))
+ if a.typ[0] == '*' {
+ fmt.Fprintf(w, "poolFree%s[b-%d].Put(s)\n", a.name, a.minLog)
+ } else {
+ fmt.Fprintf(w, "var sp *%s\n", a.typ)
+ fmt.Fprintf(w, "if len(c.hdr%s) == 0 {\n", a.name)
+ fmt.Fprintf(w, " sp = new(%s)\n", a.typ)
+ fmt.Fprintf(w, "} else {\n")
+ fmt.Fprintf(w, " sp = c.hdr%s[len(c.hdr%s)-1]\n", a.name, a.name)
+ fmt.Fprintf(w, " c.hdr%s[len(c.hdr%s)-1] = nil\n", a.name, a.name)
+ fmt.Fprintf(w, " c.hdr%s = c.hdr%s[:len(c.hdr%s)-1]\n", a.name, a.name, a.name)
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, "*sp = s\n")
+ fmt.Fprintf(w, "poolFree%s[b-%d].Put(sp)\n", a.name, a.minLog)
+ }
+ fmt.Fprintf(w, "}\n")
+}
+func genDerived(w io.Writer, d derived, base allocator) {
+ fmt.Fprintf(w, "func (c *Cache) alloc%s(n int) %s {\n", d.name, d.typ)
+ if d.typ[:2] != "[]" || base.typ[:2] != "[]" {
+ panic(fmt.Sprintf("bad derived types: %s %s", d.typ, base.typ))
+ }
+ fmt.Fprintf(w, "var base %s\n", base.typ[2:])
+ fmt.Fprintf(w, "var derived %s\n", d.typ[2:])
+ fmt.Fprintf(w, "if unsafe.Sizeof(base)%%unsafe.Sizeof(derived) != 0 { panic(\"bad\") }\n")
+ fmt.Fprintf(w, "scale := unsafe.Sizeof(base)/unsafe.Sizeof(derived)\n")
+ fmt.Fprintf(w, "b := c.alloc%s(int((uintptr(n)+scale-1)/scale))\n", base.name)
+ fmt.Fprintf(w, "s := unsafeheader.Slice {\n")
+ fmt.Fprintf(w, " Data: unsafe.Pointer(&b[0]),\n")
+ fmt.Fprintf(w, " Len: n,\n")
+ fmt.Fprintf(w, " Cap: cap(b)*int(scale),\n")
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "return *(*%s)(unsafe.Pointer(&s))\n", d.typ)
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, "func (c *Cache) free%s(s %s) {\n", d.name, d.typ)
+ fmt.Fprintf(w, "var base %s\n", base.typ[2:])
+ fmt.Fprintf(w, "var derived %s\n", d.typ[2:])
+ fmt.Fprintf(w, "scale := unsafe.Sizeof(base)/unsafe.Sizeof(derived)\n")
+ fmt.Fprintf(w, "b := unsafeheader.Slice {\n")
+ fmt.Fprintf(w, " Data: unsafe.Pointer(&s[0]),\n")
+ fmt.Fprintf(w, " Len: int((uintptr(len(s))+scale-1)/scale),\n")
+ fmt.Fprintf(w, " Cap: int((uintptr(cap(s))+scale-1)/scale),\n")
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "c.free%s(*(*%s)(unsafe.Pointer(&b)))\n", base.name, base.typ)
+ fmt.Fprintf(w, "}\n")
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/cover.bash b/src/cmd/compile/internal/ssa/_gen/cover.bash
new file mode 100755
index 0000000..733f9db
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/cover.bash
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+# Copyright 2020 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# A quick and dirty way to obtain code coverage from rulegen's main func. For
+# example:
+#
+# ./cover.bash && go tool cover -html=cover.out
+#
+# This script is needed to set up a temporary test file, so that we don't break
+# regular 'go run .' usage to run the generator.
+
+cat >main_test.go <<-EOF
+ //go:build ignore
+
+ package main
+
+ import "testing"
+
+ func TestCoverage(t *testing.T) { main() }
+EOF
+
+go test -run='^TestCoverage$' -coverprofile=cover.out "$@" *.go
+
+rm -f main_test.go
diff --git a/src/cmd/compile/internal/ssa/_gen/dec.rules b/src/cmd/compile/internal/ssa/_gen/dec.rules
new file mode 100644
index 0000000..7944947
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/dec.rules
@@ -0,0 +1,201 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains rules to decompose builtin compound types
+// (complex,string,slice,interface) into their constituent
+// types. These rules work together with the decomposeBuiltIn
+// pass which handles phis of these types.
+
+(Store {t} _ _ mem) && t.Size() == 0 => mem
+
+// complex ops
+(ComplexReal (ComplexMake real _ )) => real
+(ComplexImag (ComplexMake _ imag )) => imag
+
+(Load <t> ptr mem) && t.IsComplex() && t.Size() == 8 =>
+ (ComplexMake
+ (Load <typ.Float32> ptr mem)
+ (Load <typ.Float32>
+ (OffPtr <typ.Float32Ptr> [4] ptr)
+ mem)
+ )
+(Store {t} dst (ComplexMake real imag) mem) && t.Size() == 8 =>
+ (Store {typ.Float32}
+ (OffPtr <typ.Float32Ptr> [4] dst)
+ imag
+ (Store {typ.Float32} dst real mem))
+(Load <t> ptr mem) && t.IsComplex() && t.Size() == 16 =>
+ (ComplexMake
+ (Load <typ.Float64> ptr mem)
+ (Load <typ.Float64>
+ (OffPtr <typ.Float64Ptr> [8] ptr)
+ mem)
+ )
+(Store {t} dst (ComplexMake real imag) mem) && t.Size() == 16 =>
+ (Store {typ.Float64}
+ (OffPtr <typ.Float64Ptr> [8] dst)
+ imag
+ (Store {typ.Float64} dst real mem))
+
+// string ops
+(StringPtr (StringMake ptr _)) => ptr
+(StringLen (StringMake _ len)) => len
+
+(Load <t> ptr mem) && t.IsString() =>
+ (StringMake
+ (Load <typ.BytePtr> ptr mem)
+ (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [config.PtrSize] ptr)
+ mem))
+(Store dst (StringMake ptr len) mem) =>
+ (Store {typ.Int}
+ (OffPtr <typ.IntPtr> [config.PtrSize] dst)
+ len
+ (Store {typ.BytePtr} dst ptr mem))
+
+// slice ops
+(SlicePtr (SliceMake ptr _ _ )) => ptr
+(SliceLen (SliceMake _ len _)) => len
+(SliceCap (SliceMake _ _ cap)) => cap
+(SlicePtrUnchecked (SliceMake ptr _ _ )) => ptr
+
+(Load <t> ptr mem) && t.IsSlice() =>
+ (SliceMake
+ (Load <t.Elem().PtrTo()> ptr mem)
+ (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [config.PtrSize] ptr)
+ mem)
+ (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [2*config.PtrSize] ptr)
+ mem))
+(Store {t} dst (SliceMake ptr len cap) mem) =>
+ (Store {typ.Int}
+ (OffPtr <typ.IntPtr> [2*config.PtrSize] dst)
+ cap
+ (Store {typ.Int}
+ (OffPtr <typ.IntPtr> [config.PtrSize] dst)
+ len
+ (Store {t.Elem().PtrTo()} dst ptr mem)))
+
+// interface ops
+(ITab (IMake itab _)) => itab
+(IData (IMake _ data)) => data
+
+(Load <t> ptr mem) && t.IsInterface() =>
+ (IMake
+ (Load <typ.Uintptr> ptr mem)
+ (Load <typ.BytePtr>
+ (OffPtr <typ.BytePtrPtr> [config.PtrSize] ptr)
+ mem))
+(Store dst (IMake itab data) mem) =>
+ (Store {typ.BytePtr}
+ (OffPtr <typ.BytePtrPtr> [config.PtrSize] dst)
+ data
+ (Store {typ.Uintptr} dst itab mem))
+
+// Helpers for expand calls
+// Some of these are copied from generic.rules
+
+(IMake _typ (StructMake1 val)) => (IMake _typ val)
+(StructSelect [0] (IData x)) => (IData x)
+
+(StructSelect (StructMake1 x)) => x
+(StructSelect [0] (StructMake2 x _)) => x
+(StructSelect [1] (StructMake2 _ x)) => x
+(StructSelect [0] (StructMake3 x _ _)) => x
+(StructSelect [1] (StructMake3 _ x _)) => x
+(StructSelect [2] (StructMake3 _ _ x)) => x
+(StructSelect [0] (StructMake4 x _ _ _)) => x
+(StructSelect [1] (StructMake4 _ x _ _)) => x
+(StructSelect [2] (StructMake4 _ _ x _)) => x
+(StructSelect [3] (StructMake4 _ _ _ x)) => x
+
+// Special case coming from immediate interface rewriting
+// Typical case: (StructSelect [0] (IData (IMake typ dat)) rewrites to (StructSelect [0] dat)
+// but because the interface is immediate, the type of "IData" is a one-element struct containing
+// a pointer that is not the pointer type of dat (can be a *uint8).
+// More annoying case: (ArraySelect[0] (StructSelect[0] isAPtr))
+// There, result of the StructSelect is an Array (not a pointer) and
+// the pre-rewrite input to the ArraySelect is a struct, not a pointer.
+(StructSelect [0] x) && x.Type.IsPtrShaped() => x
+(ArraySelect [0] x) && x.Type.IsPtrShaped() => x
+
+// These, too. Bits is bits.
+(ArrayMake1 x) && x.Type.IsPtrShaped() => x
+(StructMake1 x) && x.Type.IsPtrShaped() => x
+
+(Store dst (StructMake1 <t> f0) mem) =>
+ (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)
+(Store dst (StructMake2 <t> f0 f1) mem) =>
+ (Store {t.FieldType(1)}
+ (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+ f1
+ (Store {t.FieldType(0)}
+ (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+ f0 mem))
+(Store dst (StructMake3 <t> f0 f1 f2) mem) =>
+ (Store {t.FieldType(2)}
+ (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst)
+ f2
+ (Store {t.FieldType(1)}
+ (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+ f1
+ (Store {t.FieldType(0)}
+ (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+ f0 mem)))
+(Store dst (StructMake4 <t> f0 f1 f2 f3) mem) =>
+ (Store {t.FieldType(3)}
+ (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] dst)
+ f3
+ (Store {t.FieldType(2)}
+ (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst)
+ f2
+ (Store {t.FieldType(1)}
+ (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+ f1
+ (Store {t.FieldType(0)}
+ (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+ f0 mem))))
+
+(ArraySelect (ArrayMake1 x)) => x
+(ArraySelect [0] (IData x)) => (IData x)
+
+(Store dst (ArrayMake1 e) mem) => (Store {e.Type} dst e mem)
+
+// NOTE removed must-not-be-SSA condition.
+(ArraySelect [i] x:(Load <t> ptr mem)) =>
+ @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.Elem().Size()*i] ptr) mem)
+
+(StringPtr x:(Load <t> ptr mem)) && t.IsString() => @x.Block (Load <typ.BytePtr> ptr mem)
+(StringLen x:(Load <t> ptr mem)) && t.IsString() => @x.Block (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [config.PtrSize] ptr)
+ mem)
+
+// NOTE removed must-not-be-SSA condition.
+(StructSelect [i] x:(Load <t> ptr mem)) =>
+ @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
+
+(ITab x:(Load <t> ptr mem)) && t.IsInterface() => @x.Block (Load <typ.Uintptr> ptr mem)
+
+(IData x:(Load <t> ptr mem)) && t.IsInterface() => @x.Block (Load <typ.BytePtr>
+ (OffPtr <typ.BytePtrPtr> [config.PtrSize] ptr)
+ mem)
+
+(SlicePtr x:(Load <t> ptr mem)) && t.IsSlice() => @x.Block (Load <t.Elem().PtrTo()> ptr mem)
+(SliceLen x:(Load <t> ptr mem)) && t.IsSlice() => @x.Block (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [config.PtrSize] ptr)
+ mem)
+(SliceCap x:(Load <t> ptr mem)) && t.IsSlice() => @x.Block (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [2*config.PtrSize] ptr)
+ mem)
+
+(ComplexReal x:(Load <t> ptr mem)) && t.IsComplex() && t.Size() == 8 => @x.Block (Load <typ.Float32> ptr mem)
+(ComplexImag x:(Load <t> ptr mem)) && t.IsComplex() && t.Size() == 8 => @x.Block (Load <typ.Float32>
+ (OffPtr <typ.Float32Ptr> [4] ptr)
+ mem)
+
+(ComplexReal x:(Load <t> ptr mem)) && t.IsComplex() && t.Size() == 16 => @x.Block (Load <typ.Float64> ptr mem)
+(ComplexImag x:(Load <t> ptr mem)) && t.IsComplex() && t.Size() == 16 => @x.Block (Load <typ.Float64>
+ (OffPtr <typ.Float64Ptr> [8] ptr)
+ mem)
diff --git a/src/cmd/compile/internal/ssa/_gen/dec64.rules b/src/cmd/compile/internal/ssa/_gen/dec64.rules
new file mode 100644
index 0000000..ba776af
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/dec64.rules
@@ -0,0 +1,401 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains rules to decompose [u]int64 types on 32-bit
+// architectures. These rules work together with the decomposeBuiltIn
+// pass which handles phis of these typ.
+
+(Int64Hi (Int64Make hi _)) => hi
+(Int64Lo (Int64Make _ lo)) => lo
+
+(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && t.IsSigned() =>
+ (Int64Make
+ (Load <typ.Int32> (OffPtr <typ.Int32Ptr> [4] ptr) mem)
+ (Load <typ.UInt32> ptr mem))
+
+(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && !t.IsSigned() =>
+ (Int64Make
+ (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem)
+ (Load <typ.UInt32> ptr mem))
+
+(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && t.IsSigned() =>
+ (Int64Make
+ (Load <typ.Int32> ptr mem)
+ (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem))
+
+(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && !t.IsSigned() =>
+ (Int64Make
+ (Load <typ.UInt32> ptr mem)
+ (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem))
+
+(Store {t} dst (Int64Make hi lo) mem) && t.Size() == 8 && !config.BigEndian =>
+ (Store {hi.Type}
+ (OffPtr <hi.Type.PtrTo()> [4] dst)
+ hi
+ (Store {lo.Type} dst lo mem))
+
+(Store {t} dst (Int64Make hi lo) mem) && t.Size() == 8 && config.BigEndian =>
+ (Store {lo.Type}
+ (OffPtr <lo.Type.PtrTo()> [4] dst)
+ lo
+ (Store {hi.Type} dst hi mem))
+
+// These are not enabled during decomposeBuiltin if late call expansion, but they are always enabled for softFloat
+(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") =>
+ (Int64Make
+ (Arg <typ.Int32> {n} [off+4])
+ (Arg <typ.UInt32> {n} [off]))
+(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") =>
+ (Int64Make
+ (Arg <typ.UInt32> {n} [off+4])
+ (Arg <typ.UInt32> {n} [off]))
+
+(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") =>
+ (Int64Make
+ (Arg <typ.Int32> {n} [off])
+ (Arg <typ.UInt32> {n} [off+4]))
+(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") =>
+ (Int64Make
+ (Arg <typ.UInt32> {n} [off])
+ (Arg <typ.UInt32> {n} [off+4]))
+
+(Add64 x y) =>
+ (Int64Make
+ (Add32withcarry <typ.Int32>
+ (Int64Hi x)
+ (Int64Hi y)
+ (Select1 <types.TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y))))
+ (Select0 <typ.UInt32> (Add32carry (Int64Lo x) (Int64Lo y))))
+
+(Sub64 x y) =>
+ (Int64Make
+ (Sub32withcarry <typ.Int32>
+ (Int64Hi x)
+ (Int64Hi y)
+ (Select1 <types.TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y))))
+ (Select0 <typ.UInt32> (Sub32carry (Int64Lo x) (Int64Lo y))))
+
+(Mul64 x y) =>
+ (Int64Make
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32> (Int64Lo x) (Int64Hi y))
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32> (Int64Hi x) (Int64Lo y))
+ (Select0 <typ.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y)))))
+ (Select1 <typ.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
+
+(And64 x y) =>
+ (Int64Make
+ (And32 <typ.UInt32> (Int64Hi x) (Int64Hi y))
+ (And32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+
+(Or64 x y) =>
+ (Int64Make
+ (Or32 <typ.UInt32> (Int64Hi x) (Int64Hi y))
+ (Or32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+
+(Xor64 x y) =>
+ (Int64Make
+ (Xor32 <typ.UInt32> (Int64Hi x) (Int64Hi y))
+ (Xor32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+
+(Neg64 <t> x) => (Sub64 (Const64 <t> [0]) x)
+
+(Com64 x) =>
+ (Int64Make
+ (Com32 <typ.UInt32> (Int64Hi x))
+ (Com32 <typ.UInt32> (Int64Lo x)))
+
+// Sadly, just because we know that x is non-zero,
+// we don't know whether either component is,
+// so just treat Ctz64NonZero the same as Ctz64.
+(Ctz64NonZero ...) => (Ctz64 ...)
+
+(Ctz64 x) =>
+ (Add32 <typ.UInt32>
+ (Ctz32 <typ.UInt32> (Int64Lo x))
+ (And32 <typ.UInt32>
+ (Com32 <typ.UInt32> (Zeromask (Int64Lo x)))
+ (Ctz32 <typ.UInt32> (Int64Hi x))))
+
+(BitLen64 x) =>
+ (Add32 <typ.Int>
+ (BitLen32 <typ.Int> (Int64Hi x))
+ (BitLen32 <typ.Int>
+ (Or32 <typ.UInt32>
+ (Int64Lo x)
+ (Zeromask (Int64Hi x)))))
+
+(Bswap64 x) =>
+ (Int64Make
+ (Bswap32 <typ.UInt32> (Int64Lo x))
+ (Bswap32 <typ.UInt32> (Int64Hi x)))
+
+(SignExt32to64 x) => (Int64Make (Signmask x) x)
+(SignExt16to64 x) => (SignExt32to64 (SignExt16to32 x))
+(SignExt8to64 x) => (SignExt32to64 (SignExt8to32 x))
+
+(ZeroExt32to64 x) => (Int64Make (Const32 <typ.UInt32> [0]) x)
+(ZeroExt16to64 x) => (ZeroExt32to64 (ZeroExt16to32 x))
+(ZeroExt8to64 x) => (ZeroExt32to64 (ZeroExt8to32 x))
+
+(Trunc64to32 (Int64Make _ lo)) => lo
+(Trunc64to16 (Int64Make _ lo)) => (Trunc32to16 lo)
+(Trunc64to8 (Int64Make _ lo)) => (Trunc32to8 lo)
+// Most general
+(Trunc64to32 x) => (Int64Lo x)
+(Trunc64to16 x) => (Trunc32to16 (Int64Lo x))
+(Trunc64to8 x) => (Trunc32to8 (Int64Lo x))
+
+(Lsh32x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+(Rsh32x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Signmask x)
+(Rsh32Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+(Lsh16x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+(Rsh16x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Signmask (SignExt16to32 x))
+(Rsh16Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+(Lsh8x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+(Rsh8x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Signmask (SignExt8to32 x))
+(Rsh8Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+
+(Lsh32x64 [c] x (Int64Make (Const32 [0]) lo)) => (Lsh32x32 [c] x lo)
+(Rsh32x64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh32x32 [c] x lo)
+(Rsh32Ux64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh32Ux32 [c] x lo)
+(Lsh16x64 [c] x (Int64Make (Const32 [0]) lo)) => (Lsh16x32 [c] x lo)
+(Rsh16x64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh16x32 [c] x lo)
+(Rsh16Ux64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh16Ux32 [c] x lo)
+(Lsh8x64 [c] x (Int64Make (Const32 [0]) lo)) => (Lsh8x32 [c] x lo)
+(Rsh8x64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh8x32 [c] x lo)
+(Rsh8Ux64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh8Ux32 [c] x lo)
+
+(Lsh64x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const64 [0])
+(Rsh64x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Int64Make (Signmask (Int64Hi x)) (Signmask (Int64Hi x)))
+(Rsh64Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const64 [0])
+
+(Lsh64x64 [c] x (Int64Make (Const32 [0]) lo)) => (Lsh64x32 [c] x lo)
+(Rsh64x64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh64x32 [c] x lo)
+(Rsh64Ux64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh64Ux32 [c] x lo)
+
+// turn x64 non-constant shifts to x32 shifts
+// if high 32-bit of the shift is nonzero, make a huge shift
+(Lsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh64Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Lsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh32Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Lsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh16Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Lsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh8Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+
+// Most general
+(Lsh64x64 x y) => (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh64x64 x y) => (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh64Ux64 x y) => (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Lsh32x64 x y) => (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh32x64 x y) => (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh32Ux64 x y) => (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Lsh16x64 x y) => (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh16x64 x y) => (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh16Ux64 x y) => (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Lsh8x64 x y) => (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh8x64 x y) => (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh8Ux64 x y) => (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+
+(RotateLeft64 x (Int64Make hi lo)) => (RotateLeft64 x lo)
+(RotateLeft32 x (Int64Make hi lo)) => (RotateLeft32 x lo)
+(RotateLeft16 x (Int64Make hi lo)) => (RotateLeft16 x lo)
+(RotateLeft8 x (Int64Make hi lo)) => (RotateLeft8 x lo)
+
+// Clean up constants a little
+(Or32 <typ.UInt32> (Zeromask (Const32 [c])) y) && c == 0 => y
+(Or32 <typ.UInt32> (Zeromask (Const32 [c])) y) && c != 0 => (Const32 <typ.UInt32> [-1])
+
+// 64x left shift
+// result.hi = hi<<s | lo>>(32-s) | lo<<(s-32) // >> is unsigned, large shifts result 0
+// result.lo = lo<<s
+(Lsh64x32 x s) =>
+ (Int64Make
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Lsh32x32 <typ.UInt32> (Int64Hi x) s)
+ (Rsh32Ux32 <typ.UInt32>
+ (Int64Lo x)
+ (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
+ (Lsh32x32 <typ.UInt32>
+ (Int64Lo x)
+ (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32]))))
+ (Lsh32x32 <typ.UInt32> (Int64Lo x) s))
+(Lsh64x16 x s) =>
+ (Int64Make
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Lsh32x16 <typ.UInt32> (Int64Hi x) s)
+ (Rsh32Ux16 <typ.UInt32>
+ (Int64Lo x)
+ (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
+ (Lsh32x16 <typ.UInt32>
+ (Int64Lo x)
+ (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32]))))
+ (Lsh32x16 <typ.UInt32> (Int64Lo x) s))
+(Lsh64x8 x s) =>
+ (Int64Make
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Lsh32x8 <typ.UInt32> (Int64Hi x) s)
+ (Rsh32Ux8 <typ.UInt32>
+ (Int64Lo x)
+ (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
+ (Lsh32x8 <typ.UInt32>
+ (Int64Lo x)
+ (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32]))))
+ (Lsh32x8 <typ.UInt32> (Int64Lo x) s))
+
+// 64x unsigned right shift
+// result.hi = hi>>s
+// result.lo = lo>>s | hi<<(32-s) | hi>>(s-32) // >> is unsigned, large shifts result 0
+(Rsh64Ux32 x s) =>
+ (Int64Make
+ (Rsh32Ux32 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux32 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x32 <typ.UInt32>
+ (Int64Hi x)
+ (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
+ (Rsh32Ux32 <typ.UInt32>
+ (Int64Hi x)
+ (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))))
+(Rsh64Ux16 x s) =>
+ (Int64Make
+ (Rsh32Ux16 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux16 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x16 <typ.UInt32>
+ (Int64Hi x)
+ (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
+ (Rsh32Ux16 <typ.UInt32>
+ (Int64Hi x)
+ (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))))
+(Rsh64Ux8 x s) =>
+ (Int64Make
+ (Rsh32Ux8 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux8 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x8 <typ.UInt32>
+ (Int64Hi x)
+ (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
+ (Rsh32Ux8 <typ.UInt32>
+ (Int64Hi x)
+ (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))))
+
+// 64x signed right shift
+// result.hi = hi>>s
+// result.lo = lo>>s | hi<<(32-s) | (hi>>(s-32))&zeromask(s>>5) // hi>>(s-32) is signed, large shifts result 0/-1
+(Rsh64x32 x s) =>
+ (Int64Make
+ (Rsh32x32 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux32 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x32 <typ.UInt32>
+ (Int64Hi x)
+ (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
+ (And32 <typ.UInt32>
+ (Rsh32x32 <typ.UInt32>
+ (Int64Hi x)
+ (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))
+ (Zeromask
+ (Rsh32Ux32 <typ.UInt32> s (Const32 <typ.UInt32> [5]))))))
+(Rsh64x16 x s) =>
+ (Int64Make
+ (Rsh32x16 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux16 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x16 <typ.UInt32>
+ (Int64Hi x)
+ (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
+ (And32 <typ.UInt32>
+ (Rsh32x16 <typ.UInt32>
+ (Int64Hi x)
+ (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))
+ (Zeromask
+ (ZeroExt16to32
+ (Rsh16Ux32 <typ.UInt16> s (Const32 <typ.UInt32> [5])))))))
+(Rsh64x8 x s) =>
+ (Int64Make
+ (Rsh32x8 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux8 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x8 <typ.UInt32>
+ (Int64Hi x)
+ (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
+ (And32 <typ.UInt32>
+ (Rsh32x8 <typ.UInt32>
+ (Int64Hi x)
+ (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))
+ (Zeromask
+ (ZeroExt8to32
+ (Rsh8Ux32 <typ.UInt8> s (Const32 <typ.UInt32> [5])))))))
+
+(Const64 <t> [c]) && t.IsSigned() =>
+ (Int64Make (Const32 <typ.Int32> [int32(c>>32)]) (Const32 <typ.UInt32> [int32(c)]))
+(Const64 <t> [c]) && !t.IsSigned() =>
+ (Int64Make (Const32 <typ.UInt32> [int32(c>>32)]) (Const32 <typ.UInt32> [int32(c)]))
+
+(Eq64 x y) =>
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Eq32 (Int64Lo x) (Int64Lo y)))
+
+(Neq64 x y) =>
+ (OrB
+ (Neq32 (Int64Hi x) (Int64Hi y))
+ (Neq32 (Int64Lo x) (Int64Lo y)))
+
+(Less64U x y) =>
+ (OrB
+ (Less32U (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Less32U (Int64Lo x) (Int64Lo y))))
+
+(Leq64U x y) =>
+ (OrB
+ (Less32U (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Leq32U (Int64Lo x) (Int64Lo y))))
+
+(Less64 x y) =>
+ (OrB
+ (Less32 (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Less32U (Int64Lo x) (Int64Lo y))))
+
+(Leq64 x y) =>
+ (OrB
+ (Less32 (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Leq32U (Int64Lo x) (Int64Lo y))))
diff --git a/src/cmd/compile/internal/ssa/_gen/dec64Ops.go b/src/cmd/compile/internal/ssa/_gen/dec64Ops.go
new file mode 100644
index 0000000..bba218e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/dec64Ops.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+var dec64Ops = []opData{}
+
+var dec64Blocks = []blockData{}
+
+func init() {
+ archs = append(archs, arch{
+ name: "dec64",
+ ops: dec64Ops,
+ blocks: dec64Blocks,
+ generic: true,
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/decOps.go b/src/cmd/compile/internal/ssa/_gen/decOps.go
new file mode 100644
index 0000000..0cc11cb
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/decOps.go
@@ -0,0 +1,18 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+var decOps = []opData{}
+
+var decBlocks = []blockData{}
+
+func init() {
+ archs = append(archs, arch{
+ name: "dec",
+ ops: decOps,
+ blocks: decBlocks,
+ generic: true,
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules
new file mode 100644
index 0000000..aeda625
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/generic.rules
@@ -0,0 +1,2756 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Simplifications that apply to all backend architectures. As an example, this
+// Go source code
+//
+// y := 0 * x
+//
+// can be translated into y := 0 without losing any information, which saves a
+// pointless multiplication instruction. Other .rules files in this directory
+// (for example AMD64.rules) contain rules specific to the architecture in the
+// filename. The rules here apply to every architecture.
+//
+// The code for parsing this file lives in rulegen.go; this file generates
+// ssa/rewritegeneric.go.
+
+// values are specified using the following format:
+// (op <type> [auxint] {aux} arg0 arg1 ...)
+// the type, aux, and auxint fields are optional
+// on the matching side
+// - the type, aux, and auxint fields must match if they are specified.
+// - the first occurrence of a variable defines that variable. Subsequent
+// uses must match (be == to) the first use.
+// - v is defined to be the value matched.
+// - an additional conditional can be provided after the match pattern with "&&".
+// on the generated side
+// - the type of the top-level expression is the same as the one on the left-hand side.
+// - the type of any subexpressions must be specified explicitly (or
+// be specified in the op's type field).
+// - auxint will be 0 if not specified.
+// - aux will be nil if not specified.
+
+// blocks are specified using the following format:
+// (kind controlvalue succ0 succ1 ...)
+// controlvalue must be "nil" or a value expression
+// succ* fields must be variables
+// For now, the generated successors must be a permutation of the matched successors.
+
+// constant folding
+(Trunc16to8 (Const16 [c])) => (Const8 [int8(c)])
+(Trunc32to8 (Const32 [c])) => (Const8 [int8(c)])
+(Trunc32to16 (Const32 [c])) => (Const16 [int16(c)])
+(Trunc64to8 (Const64 [c])) => (Const8 [int8(c)])
+(Trunc64to16 (Const64 [c])) => (Const16 [int16(c)])
+(Trunc64to32 (Const64 [c])) => (Const32 [int32(c)])
+(Cvt64Fto32F (Const64F [c])) => (Const32F [float32(c)])
+(Cvt32Fto64F (Const32F [c])) => (Const64F [float64(c)])
+(Cvt32to32F (Const32 [c])) => (Const32F [float32(c)])
+(Cvt32to64F (Const32 [c])) => (Const64F [float64(c)])
+(Cvt64to32F (Const64 [c])) => (Const32F [float32(c)])
+(Cvt64to64F (Const64 [c])) => (Const64F [float64(c)])
+(Cvt32Fto32 (Const32F [c])) => (Const32 [int32(c)])
+(Cvt32Fto64 (Const32F [c])) => (Const64 [int64(c)])
+(Cvt64Fto32 (Const64F [c])) => (Const32 [int32(c)])
+(Cvt64Fto64 (Const64F [c])) => (Const64 [int64(c)])
+(Round32F x:(Const32F)) => x
+(Round64F x:(Const64F)) => x
+(CvtBoolToUint8 (ConstBool [false])) => (Const8 [0])
+(CvtBoolToUint8 (ConstBool [true])) => (Const8 [1])
+
+(Trunc16to8 (ZeroExt8to16 x)) => x
+(Trunc32to8 (ZeroExt8to32 x)) => x
+(Trunc32to16 (ZeroExt8to32 x)) => (ZeroExt8to16 x)
+(Trunc32to16 (ZeroExt16to32 x)) => x
+(Trunc64to8 (ZeroExt8to64 x)) => x
+(Trunc64to16 (ZeroExt8to64 x)) => (ZeroExt8to16 x)
+(Trunc64to16 (ZeroExt16to64 x)) => x
+(Trunc64to32 (ZeroExt8to64 x)) => (ZeroExt8to32 x)
+(Trunc64to32 (ZeroExt16to64 x)) => (ZeroExt16to32 x)
+(Trunc64to32 (ZeroExt32to64 x)) => x
+(Trunc16to8 (SignExt8to16 x)) => x
+(Trunc32to8 (SignExt8to32 x)) => x
+(Trunc32to16 (SignExt8to32 x)) => (SignExt8to16 x)
+(Trunc32to16 (SignExt16to32 x)) => x
+(Trunc64to8 (SignExt8to64 x)) => x
+(Trunc64to16 (SignExt8to64 x)) => (SignExt8to16 x)
+(Trunc64to16 (SignExt16to64 x)) => x
+(Trunc64to32 (SignExt8to64 x)) => (SignExt8to32 x)
+(Trunc64to32 (SignExt16to64 x)) => (SignExt16to32 x)
+(Trunc64to32 (SignExt32to64 x)) => x
+
+(ZeroExt8to16 (Const8 [c])) => (Const16 [int16( uint8(c))])
+(ZeroExt8to32 (Const8 [c])) => (Const32 [int32( uint8(c))])
+(ZeroExt8to64 (Const8 [c])) => (Const64 [int64( uint8(c))])
+(ZeroExt16to32 (Const16 [c])) => (Const32 [int32(uint16(c))])
+(ZeroExt16to64 (Const16 [c])) => (Const64 [int64(uint16(c))])
+(ZeroExt32to64 (Const32 [c])) => (Const64 [int64(uint32(c))])
+(SignExt8to16 (Const8 [c])) => (Const16 [int16(c)])
+(SignExt8to32 (Const8 [c])) => (Const32 [int32(c)])
+(SignExt8to64 (Const8 [c])) => (Const64 [int64(c)])
+(SignExt16to32 (Const16 [c])) => (Const32 [int32(c)])
+(SignExt16to64 (Const16 [c])) => (Const64 [int64(c)])
+(SignExt32to64 (Const32 [c])) => (Const64 [int64(c)])
+
+(Neg8 (Const8 [c])) => (Const8 [-c])
+(Neg16 (Const16 [c])) => (Const16 [-c])
+(Neg32 (Const32 [c])) => (Const32 [-c])
+(Neg64 (Const64 [c])) => (Const64 [-c])
+(Neg32F (Const32F [c])) && c != 0 => (Const32F [-c])
+(Neg64F (Const64F [c])) && c != 0 => (Const64F [-c])
+
+(Add8 (Const8 [c]) (Const8 [d])) => (Const8 [c+d])
+(Add16 (Const16 [c]) (Const16 [d])) => (Const16 [c+d])
+(Add32 (Const32 [c]) (Const32 [d])) => (Const32 [c+d])
+(Add64 (Const64 [c]) (Const64 [d])) => (Const64 [c+d])
+(Add32F (Const32F [c]) (Const32F [d])) && c+d == c+d => (Const32F [c+d])
+(Add64F (Const64F [c]) (Const64F [d])) && c+d == c+d => (Const64F [c+d])
+(AddPtr <t> x (Const64 [c])) => (OffPtr <t> x [c])
+(AddPtr <t> x (Const32 [c])) => (OffPtr <t> x [int64(c)])
+
+(Sub8 (Const8 [c]) (Const8 [d])) => (Const8 [c-d])
+(Sub16 (Const16 [c]) (Const16 [d])) => (Const16 [c-d])
+(Sub32 (Const32 [c]) (Const32 [d])) => (Const32 [c-d])
+(Sub64 (Const64 [c]) (Const64 [d])) => (Const64 [c-d])
+(Sub32F (Const32F [c]) (Const32F [d])) && c-d == c-d => (Const32F [c-d])
+(Sub64F (Const64F [c]) (Const64F [d])) && c-d == c-d => (Const64F [c-d])
+
+(Mul8 (Const8 [c]) (Const8 [d])) => (Const8 [c*d])
+(Mul16 (Const16 [c]) (Const16 [d])) => (Const16 [c*d])
+(Mul32 (Const32 [c]) (Const32 [d])) => (Const32 [c*d])
+(Mul64 (Const64 [c]) (Const64 [d])) => (Const64 [c*d])
+(Mul32F (Const32F [c]) (Const32F [d])) && c*d == c*d => (Const32F [c*d])
+(Mul64F (Const64F [c]) (Const64F [d])) && c*d == c*d => (Const64F [c*d])
+
+(And8 (Const8 [c]) (Const8 [d])) => (Const8 [c&d])
+(And16 (Const16 [c]) (Const16 [d])) => (Const16 [c&d])
+(And32 (Const32 [c]) (Const32 [d])) => (Const32 [c&d])
+(And64 (Const64 [c]) (Const64 [d])) => (Const64 [c&d])
+
+(Or8 (Const8 [c]) (Const8 [d])) => (Const8 [c|d])
+(Or16 (Const16 [c]) (Const16 [d])) => (Const16 [c|d])
+(Or32 (Const32 [c]) (Const32 [d])) => (Const32 [c|d])
+(Or64 (Const64 [c]) (Const64 [d])) => (Const64 [c|d])
+
+(Xor8 (Const8 [c]) (Const8 [d])) => (Const8 [c^d])
+(Xor16 (Const16 [c]) (Const16 [d])) => (Const16 [c^d])
+(Xor32 (Const32 [c]) (Const32 [d])) => (Const32 [c^d])
+(Xor64 (Const64 [c]) (Const64 [d])) => (Const64 [c^d])
+
+(Ctz64 (Const64 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz64(c))])
+(Ctz32 (Const32 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz32(c))])
+(Ctz16 (Const16 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz16(c))])
+(Ctz8 (Const8 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz8(c))])
+
+(Ctz64 (Const64 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz64(c))])
+(Ctz32 (Const32 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz32(c))])
+(Ctz16 (Const16 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz16(c))])
+(Ctz8 (Const8 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz8(c))])
+
+(Div8 (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [c/d])
+(Div16 (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [c/d])
+(Div32 (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [c/d])
+(Div64 (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [c/d])
+(Div8u (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [int8(uint8(c)/uint8(d))])
+(Div16u (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [int16(uint16(c)/uint16(d))])
+(Div32u (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [int32(uint32(c)/uint32(d))])
+(Div64u (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [int64(uint64(c)/uint64(d))])
+(Div32F (Const32F [c]) (Const32F [d])) && c/d == c/d => (Const32F [c/d])
+(Div64F (Const64F [c]) (Const64F [d])) && c/d == c/d => (Const64F [c/d])
+(Select0 (Div128u (Const64 [0]) lo y)) => (Div64u lo y)
+(Select1 (Div128u (Const64 [0]) lo y)) => (Mod64u lo y)
+
+(Not (ConstBool [c])) => (ConstBool [!c])
+
+(Floor (Const64F [c])) => (Const64F [math.Floor(c)])
+(Ceil (Const64F [c])) => (Const64F [math.Ceil(c)])
+(Trunc (Const64F [c])) => (Const64F [math.Trunc(c)])
+(RoundToEven (Const64F [c])) => (Const64F [math.RoundToEven(c)])
+
+// Convert x * 1 to x.
+(Mul(8|16|32|64) (Const(8|16|32|64) [1]) x) => x
+(Select0 (Mul(32|64)uover (Const(32|64) [1]) x)) => x
+(Select1 (Mul(32|64)uover (Const(32|64) [1]) x)) => (ConstBool [false])
+
+// Convert x * -1 to -x.
+(Mul(8|16|32|64) (Const(8|16|32|64) [-1]) x) => (Neg(8|16|32|64) x)
+
+// DeMorgan's Laws
+(And(8|16|32|64) <t> (Com(8|16|32|64) x) (Com(8|16|32|64) y)) => (Com(8|16|32|64) (Or(8|16|32|64) <t> x y))
+(Or(8|16|32|64) <t> (Com(8|16|32|64) x) (Com(8|16|32|64) y)) => (Com(8|16|32|64) (And(8|16|32|64) <t> x y))
+
+// Convert multiplication by a power of two to a shift.
+(Mul8 <t> n (Const8 [c])) && isPowerOfTwo8(c) => (Lsh8x64 <t> n (Const64 <typ.UInt64> [log8(c)]))
+(Mul16 <t> n (Const16 [c])) && isPowerOfTwo16(c) => (Lsh16x64 <t> n (Const64 <typ.UInt64> [log16(c)]))
+(Mul32 <t> n (Const32 [c])) && isPowerOfTwo32(c) => (Lsh32x64 <t> n (Const64 <typ.UInt64> [log32(c)]))
+(Mul64 <t> n (Const64 [c])) && isPowerOfTwo64(c) => (Lsh64x64 <t> n (Const64 <typ.UInt64> [log64(c)]))
+(Mul8 <t> n (Const8 [c])) && t.IsSigned() && isPowerOfTwo8(-c) => (Neg8 (Lsh8x64 <t> n (Const64 <typ.UInt64> [log8(-c)])))
+(Mul16 <t> n (Const16 [c])) && t.IsSigned() && isPowerOfTwo16(-c) => (Neg16 (Lsh16x64 <t> n (Const64 <typ.UInt64> [log16(-c)])))
+(Mul32 <t> n (Const32 [c])) && t.IsSigned() && isPowerOfTwo32(-c) => (Neg32 (Lsh32x64 <t> n (Const64 <typ.UInt64> [log32(-c)])))
+(Mul64 <t> n (Const64 [c])) && t.IsSigned() && isPowerOfTwo64(-c) => (Neg64 (Lsh64x64 <t> n (Const64 <typ.UInt64> [log64(-c)])))
+
+(Mod8 (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [c % d])
+(Mod16 (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [c % d])
+(Mod32 (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [c % d])
+(Mod64 (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [c % d])
+
+(Mod8u (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [int8(uint8(c) % uint8(d))])
+(Mod16u (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [int16(uint16(c) % uint16(d))])
+(Mod32u (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [int32(uint32(c) % uint32(d))])
+(Mod64u (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [int64(uint64(c) % uint64(d))])
+
+(Lsh64x64 (Const64 [c]) (Const64 [d])) => (Const64 [c << uint64(d)])
+(Rsh64x64 (Const64 [c]) (Const64 [d])) => (Const64 [c >> uint64(d)])
+(Rsh64Ux64 (Const64 [c]) (Const64 [d])) => (Const64 [int64(uint64(c) >> uint64(d))])
+(Lsh32x64 (Const32 [c]) (Const64 [d])) => (Const32 [c << uint64(d)])
+(Rsh32x64 (Const32 [c]) (Const64 [d])) => (Const32 [c >> uint64(d)])
+(Rsh32Ux64 (Const32 [c]) (Const64 [d])) => (Const32 [int32(uint32(c) >> uint64(d))])
+(Lsh16x64 (Const16 [c]) (Const64 [d])) => (Const16 [c << uint64(d)])
+(Rsh16x64 (Const16 [c]) (Const64 [d])) => (Const16 [c >> uint64(d)])
+(Rsh16Ux64 (Const16 [c]) (Const64 [d])) => (Const16 [int16(uint16(c) >> uint64(d))])
+(Lsh8x64 (Const8 [c]) (Const64 [d])) => (Const8 [c << uint64(d)])
+(Rsh8x64 (Const8 [c]) (Const64 [d])) => (Const8 [c >> uint64(d)])
+(Rsh8Ux64 (Const8 [c]) (Const64 [d])) => (Const8 [int8(uint8(c) >> uint64(d))])
+
+// Fold IsInBounds when the range of the index cannot exceed the limit.
+(IsInBounds (ZeroExt8to32 _) (Const32 [c])) && (1 << 8) <= c => (ConstBool [true])
+(IsInBounds (ZeroExt8to64 _) (Const64 [c])) && (1 << 8) <= c => (ConstBool [true])
+(IsInBounds (ZeroExt16to32 _) (Const32 [c])) && (1 << 16) <= c => (ConstBool [true])
+(IsInBounds (ZeroExt16to64 _) (Const64 [c])) && (1 << 16) <= c => (ConstBool [true])
+(IsInBounds x x) => (ConstBool [false])
+(IsInBounds (And8 (Const8 [c]) _) (Const8 [d])) && 0 <= c && c < d => (ConstBool [true])
+(IsInBounds (ZeroExt8to16 (And8 (Const8 [c]) _)) (Const16 [d])) && 0 <= c && int16(c) < d => (ConstBool [true])
+(IsInBounds (ZeroExt8to32 (And8 (Const8 [c]) _)) (Const32 [d])) && 0 <= c && int32(c) < d => (ConstBool [true])
+(IsInBounds (ZeroExt8to64 (And8 (Const8 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true])
+(IsInBounds (And16 (Const16 [c]) _) (Const16 [d])) && 0 <= c && c < d => (ConstBool [true])
+(IsInBounds (ZeroExt16to32 (And16 (Const16 [c]) _)) (Const32 [d])) && 0 <= c && int32(c) < d => (ConstBool [true])
+(IsInBounds (ZeroExt16to64 (And16 (Const16 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true])
+(IsInBounds (And32 (Const32 [c]) _) (Const32 [d])) && 0 <= c && c < d => (ConstBool [true])
+(IsInBounds (ZeroExt32to64 (And32 (Const32 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true])
+(IsInBounds (And64 (Const64 [c]) _) (Const64 [d])) && 0 <= c && c < d => (ConstBool [true])
+(IsInBounds (Const32 [c]) (Const32 [d])) => (ConstBool [0 <= c && c < d])
+(IsInBounds (Const64 [c]) (Const64 [d])) => (ConstBool [0 <= c && c < d])
+// (Mod64u x y) is always between 0 (inclusive) and y (exclusive).
+(IsInBounds (Mod32u _ y) y) => (ConstBool [true])
+(IsInBounds (Mod64u _ y) y) => (ConstBool [true])
+// Right shifting an unsigned number limits its value.
+(IsInBounds (ZeroExt8to64 (Rsh8Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true])
+(IsInBounds (ZeroExt8to32 (Rsh8Ux64 _ (Const64 [c]))) (Const32 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true])
+(IsInBounds (ZeroExt8to16 (Rsh8Ux64 _ (Const64 [c]))) (Const16 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true])
+(IsInBounds (Rsh8Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true])
+(IsInBounds (ZeroExt16to64 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 16 && 1<<uint(16-c)-1 < d => (ConstBool [true])
+(IsInBounds (ZeroExt16to32 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 16 && 1<<uint(16-c)-1 < d => (ConstBool [true])
+(IsInBounds (Rsh16Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 16 && 1<<uint(16-c)-1 < d => (ConstBool [true])
+(IsInBounds (ZeroExt32to64 (Rsh32Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 32 && 1<<uint(32-c)-1 < d => (ConstBool [true])
+(IsInBounds (Rsh32Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 32 && 1<<uint(32-c)-1 < d => (ConstBool [true])
+(IsInBounds (Rsh64Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 64 && 1<<uint(64-c)-1 < d => (ConstBool [true])
+
+(IsSliceInBounds x x) => (ConstBool [true])
+(IsSliceInBounds (And32 (Const32 [c]) _) (Const32 [d])) && 0 <= c && c <= d => (ConstBool [true])
+(IsSliceInBounds (And64 (Const64 [c]) _) (Const64 [d])) && 0 <= c && c <= d => (ConstBool [true])
+(IsSliceInBounds (Const32 [0]) _) => (ConstBool [true])
+(IsSliceInBounds (Const64 [0]) _) => (ConstBool [true])
+(IsSliceInBounds (Const32 [c]) (Const32 [d])) => (ConstBool [0 <= c && c <= d])
+(IsSliceInBounds (Const64 [c]) (Const64 [d])) => (ConstBool [0 <= c && c <= d])
+(IsSliceInBounds (SliceLen x) (SliceCap x)) => (ConstBool [true])
+
+(Eq(64|32|16|8) x x) => (ConstBool [true])
+(EqB (ConstBool [c]) (ConstBool [d])) => (ConstBool [c == d])
+(EqB (ConstBool [false]) x) => (Not x)
+(EqB (ConstBool [true]) x) => x
+
+(Neq(64|32|16|8) x x) => (ConstBool [false])
+(NeqB (ConstBool [c]) (ConstBool [d])) => (ConstBool [c != d])
+(NeqB (ConstBool [false]) x) => x
+(NeqB (ConstBool [true]) x) => (Not x)
+(NeqB (Not x) (Not y)) => (NeqB x y)
+
+(Eq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Eq64 (Const64 <t> [c-d]) x)
+(Eq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Eq32 (Const32 <t> [c-d]) x)
+(Eq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Eq16 (Const16 <t> [c-d]) x)
+(Eq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Eq8 (Const8 <t> [c-d]) x)
+
+(Neq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Neq64 (Const64 <t> [c-d]) x)
+(Neq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Neq32 (Const32 <t> [c-d]) x)
+(Neq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Neq16 (Const16 <t> [c-d]) x)
+(Neq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Neq8 (Const8 <t> [c-d]) x)
+
+// signed integer range: ( c <= x && x (<|<=) d ) -> ( unsigned(x-c) (<|<=) unsigned(d-c) )
+(AndB (Leq64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+(AndB (Leq32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+(AndB (Leq16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+(AndB (Leq8 (Const8 [c]) x) ((Less|Leq)8 x (Const8 [d]))) && d >= c => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+
+// signed integer range: ( c < x && x (<|<=) d ) -> ( unsigned(x-(c+1)) (<|<=) unsigned(d-(c+1)) )
+(AndB (Less64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+(AndB (Less32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+(AndB (Less16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+(AndB (Less8 (Const8 [c]) x) ((Less|Leq)8 x (Const8 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+
+// unsigned integer range: ( c <= x && x (<|<=) d ) -> ( x-c (<|<=) d-c )
+(AndB (Leq64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c) => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+(AndB (Leq32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c) => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+(AndB (Leq16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c) => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+(AndB (Leq8U (Const8 [c]) x) ((Less|Leq)8U x (Const8 [d]))) && uint8(d) >= uint8(c) => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+
+// unsigned integer range: ( c < x && x (<|<=) d ) -> ( x-(c+1) (<|<=) d-(c+1) )
+(AndB (Less64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c) => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+(AndB (Less32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c) => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+(AndB (Less16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c) => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+(AndB (Less8U (Const8 [c]) x) ((Less|Leq)8U x (Const8 [d]))) && uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c) => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+
+// signed integer range: ( c (<|<=) x || x < d ) -> ( unsigned(c-d) (<|<=) unsigned(x-d) )
+(OrB ((Less|Leq)64 (Const64 [c]) x) (Less64 x (Const64 [d]))) && c >= d => ((Less|Leq)64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+(OrB ((Less|Leq)32 (Const32 [c]) x) (Less32 x (Const32 [d]))) && c >= d => ((Less|Leq)32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+(OrB ((Less|Leq)16 (Const16 [c]) x) (Less16 x (Const16 [d]))) && c >= d => ((Less|Leq)16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+(OrB ((Less|Leq)8 (Const8 [c]) x) (Less8 x (Const8 [d]))) && c >= d => ((Less|Leq)8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+
+// signed integer range: ( c (<|<=) x || x <= d ) -> ( unsigned(c-(d+1)) (<|<=) unsigned(x-(d+1)) )
+(OrB ((Less|Leq)64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+(OrB ((Less|Leq)32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+(OrB ((Less|Leq)16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+(OrB ((Less|Leq)8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+
+// unsigned integer range: ( c (<|<=) x || x < d ) -> ( c-d (<|<=) x-d )
+(OrB ((Less|Leq)64U (Const64 [c]) x) (Less64U x (Const64 [d]))) && uint64(c) >= uint64(d) => ((Less|Leq)64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+(OrB ((Less|Leq)32U (Const32 [c]) x) (Less32U x (Const32 [d]))) && uint32(c) >= uint32(d) => ((Less|Leq)32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+(OrB ((Less|Leq)16U (Const16 [c]) x) (Less16U x (Const16 [d]))) && uint16(c) >= uint16(d) => ((Less|Leq)16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+(OrB ((Less|Leq)8U (Const8 [c]) x) (Less8U x (Const8 [d]))) && uint8(c) >= uint8(d) => ((Less|Leq)8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+
+// unsigned integer range: ( c (<|<=) x || x <= d ) -> ( c-(d+1) (<|<=) x-(d+1) )
+(OrB ((Less|Leq)64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) && uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) => ((Less|Leq)64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+(OrB ((Less|Leq)32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) && uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) => ((Less|Leq)32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+(OrB ((Less|Leq)16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) && uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) => ((Less|Leq)16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+(OrB ((Less|Leq)8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) && uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) => ((Less|Leq)8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+
+// Canonicalize x-const to x+(-const)
+(Sub64 x (Const64 <t> [c])) && x.Op != OpConst64 => (Add64 (Const64 <t> [-c]) x)
+(Sub32 x (Const32 <t> [c])) && x.Op != OpConst32 => (Add32 (Const32 <t> [-c]) x)
+(Sub16 x (Const16 <t> [c])) && x.Op != OpConst16 => (Add16 (Const16 <t> [-c]) x)
+(Sub8 x (Const8 <t> [c])) && x.Op != OpConst8 => (Add8 (Const8 <t> [-c]) x)
+
+// fold negation into comparison operators
+(Not (Eq(64|32|16|8|B|Ptr|64F|32F) x y)) => (Neq(64|32|16|8|B|Ptr|64F|32F) x y)
+(Not (Neq(64|32|16|8|B|Ptr|64F|32F) x y)) => (Eq(64|32|16|8|B|Ptr|64F|32F) x y)
+
+(Not (Less(64|32|16|8) x y)) => (Leq(64|32|16|8) y x)
+(Not (Less(64|32|16|8)U x y)) => (Leq(64|32|16|8)U y x)
+(Not (Leq(64|32|16|8) x y)) => (Less(64|32|16|8) y x)
+(Not (Leq(64|32|16|8)U x y)) => (Less(64|32|16|8)U y x)
+
+// Distribute multiplication c * (d+x) -> c*d + c*x. Useful for:
+// a[i].b = ...; a[i+1].b = ...
+(Mul64 (Const64 <t> [c]) (Add64 <t> (Const64 <t> [d]) x)) =>
+ (Add64 (Const64 <t> [c*d]) (Mul64 <t> (Const64 <t> [c]) x))
+(Mul32 (Const32 <t> [c]) (Add32 <t> (Const32 <t> [d]) x)) =>
+ (Add32 (Const32 <t> [c*d]) (Mul32 <t> (Const32 <t> [c]) x))
+
+// Rewrite x*y ± x*z to x*(y±z)
+(Add(64|32|16|8) <t> (Mul(64|32|16|8) x y) (Mul(64|32|16|8) x z))
+ => (Mul(64|32|16|8) x (Add(64|32|16|8) <t> y z))
+(Sub(64|32|16|8) <t> (Mul(64|32|16|8) x y) (Mul(64|32|16|8) x z))
+ => (Mul(64|32|16|8) x (Sub(64|32|16|8) <t> y z))
+
+// rewrite shifts of 8/16/32 bit consts into 64 bit consts to reduce
+// the number of the other rewrite rules for const shifts
+(Lsh64x32 <t> x (Const32 [c])) => (Lsh64x64 x (Const64 <t> [int64(uint32(c))]))
+(Lsh64x16 <t> x (Const16 [c])) => (Lsh64x64 x (Const64 <t> [int64(uint16(c))]))
+(Lsh64x8 <t> x (Const8 [c])) => (Lsh64x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh64x32 <t> x (Const32 [c])) => (Rsh64x64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh64x16 <t> x (Const16 [c])) => (Rsh64x64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh64x8 <t> x (Const8 [c])) => (Rsh64x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh64Ux32 <t> x (Const32 [c])) => (Rsh64Ux64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh64Ux16 <t> x (Const16 [c])) => (Rsh64Ux64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh64Ux8 <t> x (Const8 [c])) => (Rsh64Ux64 x (Const64 <t> [int64(uint8(c))]))
+
+(Lsh32x32 <t> x (Const32 [c])) => (Lsh32x64 x (Const64 <t> [int64(uint32(c))]))
+(Lsh32x16 <t> x (Const16 [c])) => (Lsh32x64 x (Const64 <t> [int64(uint16(c))]))
+(Lsh32x8 <t> x (Const8 [c])) => (Lsh32x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh32x32 <t> x (Const32 [c])) => (Rsh32x64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh32x16 <t> x (Const16 [c])) => (Rsh32x64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh32x8 <t> x (Const8 [c])) => (Rsh32x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh32Ux32 <t> x (Const32 [c])) => (Rsh32Ux64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh32Ux16 <t> x (Const16 [c])) => (Rsh32Ux64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh32Ux8 <t> x (Const8 [c])) => (Rsh32Ux64 x (Const64 <t> [int64(uint8(c))]))
+
+(Lsh16x32 <t> x (Const32 [c])) => (Lsh16x64 x (Const64 <t> [int64(uint32(c))]))
+(Lsh16x16 <t> x (Const16 [c])) => (Lsh16x64 x (Const64 <t> [int64(uint16(c))]))
+(Lsh16x8 <t> x (Const8 [c])) => (Lsh16x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh16x32 <t> x (Const32 [c])) => (Rsh16x64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh16x16 <t> x (Const16 [c])) => (Rsh16x64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh16x8 <t> x (Const8 [c])) => (Rsh16x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh16Ux32 <t> x (Const32 [c])) => (Rsh16Ux64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh16Ux16 <t> x (Const16 [c])) => (Rsh16Ux64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh16Ux8 <t> x (Const8 [c])) => (Rsh16Ux64 x (Const64 <t> [int64(uint8(c))]))
+
+(Lsh8x32 <t> x (Const32 [c])) => (Lsh8x64 x (Const64 <t> [int64(uint32(c))]))
+(Lsh8x16 <t> x (Const16 [c])) => (Lsh8x64 x (Const64 <t> [int64(uint16(c))]))
+(Lsh8x8 <t> x (Const8 [c])) => (Lsh8x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh8x32 <t> x (Const32 [c])) => (Rsh8x64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh8x16 <t> x (Const16 [c])) => (Rsh8x64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh8x8 <t> x (Const8 [c])) => (Rsh8x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh8Ux32 <t> x (Const32 [c])) => (Rsh8Ux64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh8Ux16 <t> x (Const16 [c])) => (Rsh8Ux64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh8Ux8 <t> x (Const8 [c])) => (Rsh8Ux64 x (Const64 <t> [int64(uint8(c))]))
+
+// shifts by zero
+(Lsh(64|32|16|8)x64 x (Const64 [0])) => x
+(Rsh(64|32|16|8)x64 x (Const64 [0])) => x
+(Rsh(64|32|16|8)Ux64 x (Const64 [0])) => x
+
+// rotates by multiples of register width
+(RotateLeft64 x (Const64 [c])) && c%64 == 0 => x
+(RotateLeft32 x (Const32 [c])) && c%32 == 0 => x
+(RotateLeft16 x (Const16 [c])) && c%16 == 0 => x
+(RotateLeft8 x (Const8 [c])) && c%8 == 0 => x
+
+// zero shifted
+(Lsh64x(64|32|16|8) (Const64 [0]) _) => (Const64 [0])
+(Rsh64x(64|32|16|8) (Const64 [0]) _) => (Const64 [0])
+(Rsh64Ux(64|32|16|8) (Const64 [0]) _) => (Const64 [0])
+(Lsh32x(64|32|16|8) (Const32 [0]) _) => (Const32 [0])
+(Rsh32x(64|32|16|8) (Const32 [0]) _) => (Const32 [0])
+(Rsh32Ux(64|32|16|8) (Const32 [0]) _) => (Const32 [0])
+(Lsh16x(64|32|16|8) (Const16 [0]) _) => (Const16 [0])
+(Rsh16x(64|32|16|8) (Const16 [0]) _) => (Const16 [0])
+(Rsh16Ux(64|32|16|8) (Const16 [0]) _) => (Const16 [0])
+(Lsh8x(64|32|16|8) (Const8 [0]) _) => (Const8 [0])
+(Rsh8x(64|32|16|8) (Const8 [0]) _) => (Const8 [0])
+(Rsh8Ux(64|32|16|8) (Const8 [0]) _) => (Const8 [0])
+
+// large left shifts of all values, and right shifts of unsigned values
+((Lsh64|Rsh64U)x64 _ (Const64 [c])) && uint64(c) >= 64 => (Const64 [0])
+((Lsh32|Rsh32U)x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+((Lsh16|Rsh16U)x64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+((Lsh8|Rsh8U)x64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
+
+// combine const shifts
+(Lsh64x64 <t> (Lsh64x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh64x64 x (Const64 <t> [c+d]))
+(Lsh32x64 <t> (Lsh32x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh32x64 x (Const64 <t> [c+d]))
+(Lsh16x64 <t> (Lsh16x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh16x64 x (Const64 <t> [c+d]))
+(Lsh8x64 <t> (Lsh8x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh8x64 x (Const64 <t> [c+d]))
+
+(Rsh64x64 <t> (Rsh64x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh64x64 x (Const64 <t> [c+d]))
+(Rsh32x64 <t> (Rsh32x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh32x64 x (Const64 <t> [c+d]))
+(Rsh16x64 <t> (Rsh16x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh16x64 x (Const64 <t> [c+d]))
+(Rsh8x64 <t> (Rsh8x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh8x64 x (Const64 <t> [c+d]))
+
+(Rsh64Ux64 <t> (Rsh64Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh64Ux64 x (Const64 <t> [c+d]))
+(Rsh32Ux64 <t> (Rsh32Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh32Ux64 x (Const64 <t> [c+d]))
+(Rsh16Ux64 <t> (Rsh16Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh16Ux64 x (Const64 <t> [c+d]))
+(Rsh8Ux64 <t> (Rsh8Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh8Ux64 x (Const64 <t> [c+d]))
+
+// Remove signed right shift before an unsigned right shift that extracts the sign bit.
+(Rsh8Ux64 (Rsh8x64 x _) (Const64 <t> [7] )) => (Rsh8Ux64 x (Const64 <t> [7] ))
+(Rsh16Ux64 (Rsh16x64 x _) (Const64 <t> [15])) => (Rsh16Ux64 x (Const64 <t> [15]))
+(Rsh32Ux64 (Rsh32x64 x _) (Const64 <t> [31])) => (Rsh32Ux64 x (Const64 <t> [31]))
+(Rsh64Ux64 (Rsh64x64 x _) (Const64 <t> [63])) => (Rsh64Ux64 x (Const64 <t> [63]))
+
+// Convert x>>c<<c to x&^(1<<c-1)
+(Lsh64x64 i:(Rsh(64|64U)x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 64 && i.Uses == 1 => (And64 x (Const64 <v.Type> [int64(-1) << c]))
+(Lsh32x64 i:(Rsh(32|32U)x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 32 && i.Uses == 1 => (And32 x (Const32 <v.Type> [int32(-1) << c]))
+(Lsh16x64 i:(Rsh(16|16U)x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 16 && i.Uses == 1 => (And16 x (Const16 <v.Type> [int16(-1) << c]))
+(Lsh8x64 i:(Rsh(8|8U)x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 8 && i.Uses == 1 => (And8 x (Const8 <v.Type> [int8(-1) << c]))
+// similarly for x<<c>>c
+(Rsh64Ux64 i:(Lsh64x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 64 && i.Uses == 1 => (And64 x (Const64 <v.Type> [int64(^uint64(0)>>c)]))
+(Rsh32Ux64 i:(Lsh32x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 32 && i.Uses == 1 => (And32 x (Const32 <v.Type> [int32(^uint32(0)>>c)]))
+(Rsh16Ux64 i:(Lsh16x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 16 && i.Uses == 1 => (And16 x (Const16 <v.Type> [int16(^uint16(0)>>c)]))
+(Rsh8Ux64 i:(Lsh8x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 8 && i.Uses == 1 => (And8 x (Const8 <v.Type> [int8 (^uint8 (0)>>c)]))
+
+// ((x >> c1) << c2) >> c3
+(Rsh(64|32|16|8)Ux64 (Lsh(64|32|16|8)x64 (Rsh(64|32|16|8)Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ && uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ => (Rsh(64|32|16|8)Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+
+// ((x << c1) >> c2) << c3
+(Lsh(64|32|16|8)x64 (Rsh(64|32|16|8)Ux64 (Lsh(64|32|16|8)x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ && uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ => (Lsh(64|32|16|8)x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+
+// (x >> c) & uppermask = 0
+(And64 (Const64 [m]) (Rsh64Ux64 _ (Const64 [c]))) && c >= int64(64-ntz64(m)) => (Const64 [0])
+(And32 (Const32 [m]) (Rsh32Ux64 _ (Const64 [c]))) && c >= int64(32-ntz32(m)) => (Const32 [0])
+(And16 (Const16 [m]) (Rsh16Ux64 _ (Const64 [c]))) && c >= int64(16-ntz16(m)) => (Const16 [0])
+(And8 (Const8 [m]) (Rsh8Ux64 _ (Const64 [c]))) && c >= int64(8-ntz8(m)) => (Const8 [0])
+
+// (x << c) & lowermask = 0
+(And64 (Const64 [m]) (Lsh64x64 _ (Const64 [c]))) && c >= int64(64-nlz64(m)) => (Const64 [0])
+(And32 (Const32 [m]) (Lsh32x64 _ (Const64 [c]))) && c >= int64(32-nlz32(m)) => (Const32 [0])
+(And16 (Const16 [m]) (Lsh16x64 _ (Const64 [c]))) && c >= int64(16-nlz16(m)) => (Const16 [0])
+(And8 (Const8 [m]) (Lsh8x64 _ (Const64 [c]))) && c >= int64(8-nlz8(m)) => (Const8 [0])
+
+// replace shifts with zero extensions
+(Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) => (ZeroExt8to16 (Trunc16to8 <typ.UInt8> x))
+(Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) => (ZeroExt8to32 (Trunc32to8 <typ.UInt8> x))
+(Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) => (ZeroExt8to64 (Trunc64to8 <typ.UInt8> x))
+(Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) => (ZeroExt16to32 (Trunc32to16 <typ.UInt16> x))
+(Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) => (ZeroExt16to64 (Trunc64to16 <typ.UInt16> x))
+(Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) => (ZeroExt32to64 (Trunc64to32 <typ.UInt32> x))
+
+// replace shifts with sign extensions
+(Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) => (SignExt8to16 (Trunc16to8 <typ.Int8> x))
+(Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) => (SignExt8to32 (Trunc32to8 <typ.Int8> x))
+(Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) => (SignExt8to64 (Trunc64to8 <typ.Int8> x))
+(Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) => (SignExt16to32 (Trunc32to16 <typ.Int16> x))
+(Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) => (SignExt16to64 (Trunc64to16 <typ.Int16> x))
+(Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) => (SignExt32to64 (Trunc64to32 <typ.Int32> x))
+
+// constant comparisons
+(Eq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c == d])
+(Neq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c != d])
+(Less(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c < d])
+(Leq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c <= d])
+
+(Less64U (Const64 [c]) (Const64 [d])) => (ConstBool [uint64(c) < uint64(d)])
+(Less32U (Const32 [c]) (Const32 [d])) => (ConstBool [uint32(c) < uint32(d)])
+(Less16U (Const16 [c]) (Const16 [d])) => (ConstBool [uint16(c) < uint16(d)])
+(Less8U (Const8 [c]) (Const8 [d])) => (ConstBool [ uint8(c) < uint8(d)])
+
+(Leq64U (Const64 [c]) (Const64 [d])) => (ConstBool [uint64(c) <= uint64(d)])
+(Leq32U (Const32 [c]) (Const32 [d])) => (ConstBool [uint32(c) <= uint32(d)])
+(Leq16U (Const16 [c]) (Const16 [d])) => (ConstBool [uint16(c) <= uint16(d)])
+(Leq8U (Const8 [c]) (Const8 [d])) => (ConstBool [ uint8(c) <= uint8(d)])
+
+(Leq8 (Const8 [0]) (And8 _ (Const8 [c]))) && c >= 0 => (ConstBool [true])
+(Leq16 (Const16 [0]) (And16 _ (Const16 [c]))) && c >= 0 => (ConstBool [true])
+(Leq32 (Const32 [0]) (And32 _ (Const32 [c]))) && c >= 0 => (ConstBool [true])
+(Leq64 (Const64 [0]) (And64 _ (Const64 [c]))) && c >= 0 => (ConstBool [true])
+
+(Leq8 (Const8 [0]) (Rsh8Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true])
+(Leq16 (Const16 [0]) (Rsh16Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true])
+(Leq32 (Const32 [0]) (Rsh32Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true])
+(Leq64 (Const64 [0]) (Rsh64Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true])
+
+// prefer equalities with zero
+(Less(64|32|16|8) (Const(64|32|16|8) <t> [0]) x) && isNonNegative(x) => (Neq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x)
+(Less(64|32|16|8) x (Const(64|32|16|8) <t> [1])) && isNonNegative(x) => (Eq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x)
+(Less(64|32|16|8)U x (Const(64|32|16|8) <t> [1])) => (Eq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x)
+(Leq(64|32|16|8)U (Const(64|32|16|8) <t> [1]) x) => (Neq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x)
+
+// prefer comparisons with zero
+(Less(64|32|16|8) x (Const(64|32|16|8) <t> [1])) => (Leq(64|32|16|8) x (Const(64|32|16|8) <t> [0]))
+(Leq(64|32|16|8) x (Const(64|32|16|8) <t> [-1])) => (Less(64|32|16|8) x (Const(64|32|16|8) <t> [0]))
+(Leq(64|32|16|8) (Const(64|32|16|8) <t> [1]) x) => (Less(64|32|16|8) (Const(64|32|16|8) <t> [0]) x)
+(Less(64|32|16|8) (Const(64|32|16|8) <t> [-1]) x) => (Leq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x)
+
+// constant floating point comparisons
+(Eq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c == d])
+(Eq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c == d])
+(Neq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c != d])
+(Neq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c != d])
+(Less32F (Const32F [c]) (Const32F [d])) => (ConstBool [c < d])
+(Less64F (Const64F [c]) (Const64F [d])) => (ConstBool [c < d])
+(Leq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c <= d])
+(Leq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c <= d])
+
+// simplifications
+(Or(64|32|16|8) x x) => x
+(Or(64|32|16|8) (Const(64|32|16|8) [0]) x) => x
+(Or(64|32|16|8) (Const(64|32|16|8) [-1]) _) => (Const(64|32|16|8) [-1])
+(Or(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1])
+
+(And(64|32|16|8) x x) => x
+(And(64|32|16|8) (Const(64|32|16|8) [-1]) x) => x
+(And(64|32|16|8) (Const(64|32|16|8) [0]) _) => (Const(64|32|16|8) [0])
+(And(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [0])
+
+(Xor(64|32|16|8) x x) => (Const(64|32|16|8) [0])
+(Xor(64|32|16|8) (Const(64|32|16|8) [0]) x) => x
+(Xor(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1])
+
+(Add(64|32|16|8) (Const(64|32|16|8) [0]) x) => x
+(Sub(64|32|16|8) x x) => (Const(64|32|16|8) [0])
+(Mul(64|32|16|8) (Const(64|32|16|8) [0]) _) => (Const(64|32|16|8) [0])
+(Select0 (Mul(64|32)uover (Const(64|32) [0]) x)) => (Const(64|32) [0])
+(Select1 (Mul(64|32)uover (Const(64|32) [0]) x)) => (ConstBool [false])
+
+(Com(64|32|16|8) (Com(64|32|16|8) x)) => x
+(Com(64|32|16|8) (Const(64|32|16|8) [c])) => (Const(64|32|16|8) [^c])
+
+(Neg(64|32|16|8) (Sub(64|32|16|8) x y)) => (Sub(64|32|16|8) y x)
+(Add(64|32|16|8) x (Neg(64|32|16|8) y)) => (Sub(64|32|16|8) x y)
+
+(Xor(64|32|16|8) (Const(64|32|16|8) [-1]) x) => (Com(64|32|16|8) x)
+
+(Sub(64|32|16|8) (Neg(64|32|16|8) x) (Com(64|32|16|8) x)) => (Const(64|32|16|8) [1])
+(Sub(64|32|16|8) (Com(64|32|16|8) x) (Neg(64|32|16|8) x)) => (Const(64|32|16|8) [-1])
+(Add(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1])
+
+// Simplification when involving common integer
+// (t + x) - (t + y) == x - y
+// (t + x) - (y + t) == x - y
+// (x + t) - (y + t) == x - y
+// (x + t) - (t + y) == x - y
+// (x - t) + (t + y) == x + y
+// (x - t) + (y + t) == x + y
+(Sub(64|32|16|8) (Add(64|32|16|8) t x) (Add(64|32|16|8) t y)) => (Sub(64|32|16|8) x y)
+(Add(64|32|16|8) (Sub(64|32|16|8) x t) (Add(64|32|16|8) t y)) => (Add(64|32|16|8) x y)
+
+// ^(x-1) == ^x+1 == -x
+(Add(64|32|16|8) (Const(64|32|16|8) [1]) (Com(64|32|16|8) x)) => (Neg(64|32|16|8) x)
+(Com(64|32|16|8) (Add(64|32|16|8) (Const(64|32|16|8) [-1]) x)) => (Neg(64|32|16|8) x)
+
+// -(-x) == x
+(Neg(64|32|16|8) (Neg(64|32|16|8) x)) => x
+
+// -^x == x+1
+(Neg(64|32|16|8) <t> (Com(64|32|16|8) x)) => (Add(64|32|16|8) (Const(64|32|16|8) <t> [1]) x)
+
+(And(64|32|16|8) x (And(64|32|16|8) x y)) => (And(64|32|16|8) x y)
+(Or(64|32|16|8) x (Or(64|32|16|8) x y)) => (Or(64|32|16|8) x y)
+(Xor(64|32|16|8) x (Xor(64|32|16|8) x y)) => y
+
+// Unsigned comparisons to zero.
+(Less(64U|32U|16U|8U) _ (Const(64|32|16|8) [0])) => (ConstBool [false])
+(Leq(64U|32U|16U|8U) (Const(64|32|16|8) [0]) _) => (ConstBool [true])
+
+// Ands clear bits. Ors set bits.
+// If a subsequent Or will set all the bits
+// that an And cleared, we can skip the And.
+// This happens in bitmasking code like:
+// x &^= 3 << shift // clear two old bits
+// x |= v << shift // set two new bits
+// when shift is a small constant and v ends up a constant 3.
+(Or8 (And8 x (Const8 [c2])) (Const8 <t> [c1])) && ^(c1 | c2) == 0 => (Or8 (Const8 <t> [c1]) x)
+(Or16 (And16 x (Const16 [c2])) (Const16 <t> [c1])) && ^(c1 | c2) == 0 => (Or16 (Const16 <t> [c1]) x)
+(Or32 (And32 x (Const32 [c2])) (Const32 <t> [c1])) && ^(c1 | c2) == 0 => (Or32 (Const32 <t> [c1]) x)
+(Or64 (And64 x (Const64 [c2])) (Const64 <t> [c1])) && ^(c1 | c2) == 0 => (Or64 (Const64 <t> [c1]) x)
+
+(Trunc64to8 (And64 (Const64 [y]) x)) && y&0xFF == 0xFF => (Trunc64to8 x)
+(Trunc64to16 (And64 (Const64 [y]) x)) && y&0xFFFF == 0xFFFF => (Trunc64to16 x)
+(Trunc64to32 (And64 (Const64 [y]) x)) && y&0xFFFFFFFF == 0xFFFFFFFF => (Trunc64to32 x)
+(Trunc32to8 (And32 (Const32 [y]) x)) && y&0xFF == 0xFF => (Trunc32to8 x)
+(Trunc32to16 (And32 (Const32 [y]) x)) && y&0xFFFF == 0xFFFF => (Trunc32to16 x)
+(Trunc16to8 (And16 (Const16 [y]) x)) && y&0xFF == 0xFF => (Trunc16to8 x)
+
+(ZeroExt8to64 (Trunc64to8 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 56 => x
+(ZeroExt16to64 (Trunc64to16 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 48 => x
+(ZeroExt32to64 (Trunc64to32 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 32 => x
+(ZeroExt8to32 (Trunc32to8 x:(Rsh32Ux64 _ (Const64 [s])))) && s >= 24 => x
+(ZeroExt16to32 (Trunc32to16 x:(Rsh32Ux64 _ (Const64 [s])))) && s >= 16 => x
+(ZeroExt8to16 (Trunc16to8 x:(Rsh16Ux64 _ (Const64 [s])))) && s >= 8 => x
+
+(SignExt8to64 (Trunc64to8 x:(Rsh64x64 _ (Const64 [s])))) && s >= 56 => x
+(SignExt16to64 (Trunc64to16 x:(Rsh64x64 _ (Const64 [s])))) && s >= 48 => x
+(SignExt32to64 (Trunc64to32 x:(Rsh64x64 _ (Const64 [s])))) && s >= 32 => x
+(SignExt8to32 (Trunc32to8 x:(Rsh32x64 _ (Const64 [s])))) && s >= 24 => x
+(SignExt16to32 (Trunc32to16 x:(Rsh32x64 _ (Const64 [s])))) && s >= 16 => x
+(SignExt8to16 (Trunc16to8 x:(Rsh16x64 _ (Const64 [s])))) && s >= 8 => x
+
+(Slicemask (Const32 [x])) && x > 0 => (Const32 [-1])
+(Slicemask (Const32 [0])) => (Const32 [0])
+(Slicemask (Const64 [x])) && x > 0 => (Const64 [-1])
+(Slicemask (Const64 [0])) => (Const64 [0])
+
+// simplifications often used for lengths. e.g. len(s[i:i+5])==5
+(Sub(64|32|16|8) (Add(64|32|16|8) x y) x) => y
+(Sub(64|32|16|8) (Add(64|32|16|8) x y) y) => x
+(Sub(64|32|16|8) (Sub(64|32|16|8) x y) x) => (Neg(64|32|16|8) y)
+(Sub(64|32|16|8) x (Add(64|32|16|8) x y)) => (Neg(64|32|16|8) y)
+(Add(64|32|16|8) x (Sub(64|32|16|8) y x)) => y
+(Add(64|32|16|8) x (Add(64|32|16|8) y (Sub(64|32|16|8) z x))) => (Add(64|32|16|8) y z)
+
+// basic phi simplifications
+(Phi (Const8 [c]) (Const8 [c])) => (Const8 [c])
+(Phi (Const16 [c]) (Const16 [c])) => (Const16 [c])
+(Phi (Const32 [c]) (Const32 [c])) => (Const32 [c])
+(Phi (Const64 [c]) (Const64 [c])) => (Const64 [c])
+
+// slice and interface comparisons
+// The frontend ensures that we can only compare against nil,
+// so we need only compare the first word (interface type or slice ptr).
+(EqInter x y) => (EqPtr (ITab x) (ITab y))
+(NeqInter x y) => (NeqPtr (ITab x) (ITab y))
+(EqSlice x y) => (EqPtr (SlicePtr x) (SlicePtr y))
+(NeqSlice x y) => (NeqPtr (SlicePtr x) (SlicePtr y))
+
+// Load of store of same address, with compatibly typed value and same size
+(Load <t1> p1 (Store {t2} p2 x _))
+ && isSamePtr(p1, p2)
+ && t1.Compare(x.Type) == types.CMPeq
+ && t1.Size() == t2.Size()
+ => x
+(Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 x _)))
+ && isSamePtr(p1, p3)
+ && t1.Compare(x.Type) == types.CMPeq
+ && t1.Size() == t2.Size()
+ && disjoint(p3, t3.Size(), p2, t2.Size())
+ => x
+(Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 x _))))
+ && isSamePtr(p1, p4)
+ && t1.Compare(x.Type) == types.CMPeq
+ && t1.Size() == t2.Size()
+ && disjoint(p4, t4.Size(), p2, t2.Size())
+ && disjoint(p4, t4.Size(), p3, t3.Size())
+ => x
+(Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 x _)))))
+ && isSamePtr(p1, p5)
+ && t1.Compare(x.Type) == types.CMPeq
+ && t1.Size() == t2.Size()
+ && disjoint(p5, t5.Size(), p2, t2.Size())
+ && disjoint(p5, t5.Size(), p3, t3.Size())
+ && disjoint(p5, t5.Size(), p4, t4.Size())
+ => x
+
+// Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits
+ (Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) => (Const64F [math.Float64frombits(uint64(x))])
+ (Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) => (Const32F [math.Float32frombits(uint32(x))])
+(Load <t1> p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) => (Const64 [int64(math.Float64bits(x))])
+(Load <t1> p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) => (Const32 [int32(math.Float32bits(x))])
+
+// Float Loads up to Zeros so they can be constant folded.
+(Load <t1> op:(OffPtr [o1] p1)
+ (Store {t2} p2 _
+ mem:(Zero [n] p3 _)))
+ && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3)
+ && CanSSA(t1)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p3) mem)
+(Load <t1> op:(OffPtr [o1] p1)
+ (Store {t2} p2 _
+ (Store {t3} p3 _
+ mem:(Zero [n] p4 _))))
+ && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4)
+ && CanSSA(t1)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ && disjoint(op, t1.Size(), p3, t3.Size())
+ => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p4) mem)
+(Load <t1> op:(OffPtr [o1] p1)
+ (Store {t2} p2 _
+ (Store {t3} p3 _
+ (Store {t4} p4 _
+ mem:(Zero [n] p5 _)))))
+ && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5)
+ && CanSSA(t1)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ && disjoint(op, t1.Size(), p3, t3.Size())
+ && disjoint(op, t1.Size(), p4, t4.Size())
+ => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p5) mem)
+(Load <t1> op:(OffPtr [o1] p1)
+ (Store {t2} p2 _
+ (Store {t3} p3 _
+ (Store {t4} p4 _
+ (Store {t5} p5 _
+ mem:(Zero [n] p6 _))))))
+ && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6)
+ && CanSSA(t1)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ && disjoint(op, t1.Size(), p3, t3.Size())
+ && disjoint(op, t1.Size(), p4, t4.Size())
+ && disjoint(op, t1.Size(), p5, t5.Size())
+ => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p6) mem)
+
+// Zero to Load forwarding.
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && t1.IsBoolean()
+ && isSamePtr(p1, p2)
+ && n >= o + 1
+ => (ConstBool [false])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is8BitInt(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 1
+ => (Const8 [0])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is16BitInt(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 2
+ => (Const16 [0])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is32BitInt(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 4
+ => (Const32 [0])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is64BitInt(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 8
+ => (Const64 [0])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is32BitFloat(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 4
+ => (Const32F [0])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is64BitFloat(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 8
+ => (Const64F [0])
+
+// Eliminate stores of values that have just been loaded from the same location.
+// We also handle the common case where there are some intermediate stores.
+(Store {t1} p1 (Load <t2> p2 mem) mem)
+ && isSamePtr(p1, p2)
+ && t2.Size() == t1.Size()
+ => mem
+(Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ oldmem))
+ && isSamePtr(p1, p2)
+ && t2.Size() == t1.Size()
+ && disjoint(p1, t1.Size(), p3, t3.Size())
+ => mem
+(Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ oldmem)))
+ && isSamePtr(p1, p2)
+ && t2.Size() == t1.Size()
+ && disjoint(p1, t1.Size(), p3, t3.Size())
+ && disjoint(p1, t1.Size(), p4, t4.Size())
+ => mem
+(Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ oldmem))))
+ && isSamePtr(p1, p2)
+ && t2.Size() == t1.Size()
+ && disjoint(p1, t1.Size(), p3, t3.Size())
+ && disjoint(p1, t1.Size(), p4, t4.Size())
+ && disjoint(p1, t1.Size(), p5, t5.Size())
+ => mem
+
+// Don't Store zeros to cleared variables.
+(Store {t} (OffPtr [o] p1) x mem:(Zero [n] p2 _))
+ && isConstZero(x)
+ && o >= 0 && t.Size() + o <= n && isSamePtr(p1, p2)
+ => mem
+(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Zero [n] p3 _)))
+ && isConstZero(x)
+ && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p3)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ => mem
+(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Zero [n] p4 _))))
+ && isConstZero(x)
+ && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p4)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ && disjoint(op, t1.Size(), p3, t3.Size())
+ => mem
+(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Zero [n] p5 _)))))
+ && isConstZero(x)
+ && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p5)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ && disjoint(op, t1.Size(), p3, t3.Size())
+ && disjoint(op, t1.Size(), p4, t4.Size())
+ => mem
+
+// Collapse OffPtr
+(OffPtr (OffPtr p [y]) [x]) => (OffPtr p [x+y])
+(OffPtr p [0]) && v.Type.Compare(p.Type) == types.CMPeq => p
+
+// indexing operations
+// Note: bounds check has already been done
+(PtrIndex <t> ptr idx) && config.PtrSize == 4 && is32Bit(t.Elem().Size()) => (AddPtr ptr (Mul32 <typ.Int> idx (Const32 <typ.Int> [int32(t.Elem().Size())])))
+(PtrIndex <t> ptr idx) && config.PtrSize == 8 => (AddPtr ptr (Mul64 <typ.Int> idx (Const64 <typ.Int> [t.Elem().Size()])))
+
+// struct operations
+(StructSelect (StructMake1 x)) => x
+(StructSelect [0] (StructMake2 x _)) => x
+(StructSelect [1] (StructMake2 _ x)) => x
+(StructSelect [0] (StructMake3 x _ _)) => x
+(StructSelect [1] (StructMake3 _ x _)) => x
+(StructSelect [2] (StructMake3 _ _ x)) => x
+(StructSelect [0] (StructMake4 x _ _ _)) => x
+(StructSelect [1] (StructMake4 _ x _ _)) => x
+(StructSelect [2] (StructMake4 _ _ x _)) => x
+(StructSelect [3] (StructMake4 _ _ _ x)) => x
+
+(Load <t> _ _) && t.IsStruct() && t.NumFields() == 0 && CanSSA(t) =>
+ (StructMake0)
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 1 && CanSSA(t) =>
+ (StructMake1
+ (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem))
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 2 && CanSSA(t) =>
+ (StructMake2
+ (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
+ (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem))
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 3 && CanSSA(t) =>
+ (StructMake3
+ (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
+ (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)
+ (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem))
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 4 && CanSSA(t) =>
+ (StructMake4
+ (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
+ (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)
+ (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem)
+ (Load <t.FieldType(3)> (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] ptr) mem))
+
+(StructSelect [i] x:(Load <t> ptr mem)) && !CanSSA(t) =>
+ @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
+
+(Store _ (StructMake0) mem) => mem
+(Store dst (StructMake1 <t> f0) mem) =>
+ (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)
+(Store dst (StructMake2 <t> f0 f1) mem) =>
+ (Store {t.FieldType(1)}
+ (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+ f1
+ (Store {t.FieldType(0)}
+ (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+ f0 mem))
+(Store dst (StructMake3 <t> f0 f1 f2) mem) =>
+ (Store {t.FieldType(2)}
+ (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst)
+ f2
+ (Store {t.FieldType(1)}
+ (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+ f1
+ (Store {t.FieldType(0)}
+ (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+ f0 mem)))
+(Store dst (StructMake4 <t> f0 f1 f2 f3) mem) =>
+ (Store {t.FieldType(3)}
+ (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] dst)
+ f3
+ (Store {t.FieldType(2)}
+ (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst)
+ f2
+ (Store {t.FieldType(1)}
+ (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+ f1
+ (Store {t.FieldType(0)}
+ (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+ f0 mem))))
+
+// Putting struct{*byte} and similar into direct interfaces.
+(IMake _typ (StructMake1 val)) => (IMake _typ val)
+(StructSelect [0] (IData x)) => (IData x)
+
+// un-SSAable values use mem->mem copies
+(Store {t} dst (Load src mem) mem) && !CanSSA(t) =>
+ (Move {t} [t.Size()] dst src mem)
+(Store {t} dst (Load src mem) (VarDef {x} mem)) && !CanSSA(t) =>
+ (Move {t} [t.Size()] dst src (VarDef {x} mem))
+
+// array ops
+(ArraySelect (ArrayMake1 x)) => x
+
+(Load <t> _ _) && t.IsArray() && t.NumElem() == 0 =>
+ (ArrayMake0)
+
+(Load <t> ptr mem) && t.IsArray() && t.NumElem() == 1 && CanSSA(t) =>
+ (ArrayMake1 (Load <t.Elem()> ptr mem))
+
+(Store _ (ArrayMake0) mem) => mem
+(Store dst (ArrayMake1 e) mem) => (Store {e.Type} dst e mem)
+
+// Putting [1]*byte and similar into direct interfaces.
+(IMake _typ (ArrayMake1 val)) => (IMake _typ val)
+(ArraySelect [0] (IData x)) => (IData x)
+
+// string ops
+// Decomposing StringMake and lowering of StringPtr and StringLen
+// happens in a later pass, dec, so that these operations are available
+// to other passes for optimizations.
+(StringPtr (StringMake (Addr <t> {s} base) _)) => (Addr <t> {s} base)
+(StringLen (StringMake _ (Const64 <t> [c]))) => (Const64 <t> [c])
+(ConstString {str}) && config.PtrSize == 4 && str == "" =>
+ (StringMake (ConstNil) (Const32 <typ.Int> [0]))
+(ConstString {str}) && config.PtrSize == 8 && str == "" =>
+ (StringMake (ConstNil) (Const64 <typ.Int> [0]))
+(ConstString {str}) && config.PtrSize == 4 && str != "" =>
+ (StringMake
+ (Addr <typ.BytePtr> {fe.StringData(str)}
+ (SB))
+ (Const32 <typ.Int> [int32(len(str))]))
+(ConstString {str}) && config.PtrSize == 8 && str != "" =>
+ (StringMake
+ (Addr <typ.BytePtr> {fe.StringData(str)}
+ (SB))
+ (Const64 <typ.Int> [int64(len(str))]))
+
+// slice ops
+// Only a few slice rules are provided here. See dec.rules for
+// a more comprehensive set.
+(SliceLen (SliceMake _ (Const64 <t> [c]) _)) => (Const64 <t> [c])
+(SliceCap (SliceMake _ _ (Const64 <t> [c]))) => (Const64 <t> [c])
+(SliceLen (SliceMake _ (Const32 <t> [c]) _)) => (Const32 <t> [c])
+(SliceCap (SliceMake _ _ (Const32 <t> [c]))) => (Const32 <t> [c])
+(SlicePtr (SliceMake (SlicePtr x) _ _)) => (SlicePtr x)
+(SliceLen (SliceMake _ (SliceLen x) _)) => (SliceLen x)
+(SliceCap (SliceMake _ _ (SliceCap x))) => (SliceCap x)
+(SliceCap (SliceMake _ _ (SliceLen x))) => (SliceLen x)
+(ConstSlice) && config.PtrSize == 4 =>
+ (SliceMake
+ (ConstNil <v.Type.Elem().PtrTo()>)
+ (Const32 <typ.Int> [0])
+ (Const32 <typ.Int> [0]))
+(ConstSlice) && config.PtrSize == 8 =>
+ (SliceMake
+ (ConstNil <v.Type.Elem().PtrTo()>)
+ (Const64 <typ.Int> [0])
+ (Const64 <typ.Int> [0]))
+
+// interface ops
+(ConstInterface) =>
+ (IMake
+ (ConstNil <typ.Uintptr>)
+ (ConstNil <typ.BytePtr>))
+
+(NilCheck ptr:(GetG mem) mem) => ptr
+
+(If (Not cond) yes no) => (If cond no yes)
+(If (ConstBool [c]) yes no) && c => (First yes no)
+(If (ConstBool [c]) yes no) && !c => (First no yes)
+
+(Phi <t> nx:(Not x) ny:(Not y)) && nx.Uses == 1 && ny.Uses == 1 => (Not (Phi <t> x y))
+
+// Get rid of Convert ops for pointer arithmetic on unsafe.Pointer.
+(Convert (Add(64|32) (Convert ptr mem) off) mem) => (AddPtr ptr off)
+(Convert (Convert ptr mem) mem) => ptr
+
+// strength reduction of divide by a constant.
+// See ../magic.go for a detailed description of these algorithms.
+
+// Unsigned divide by power of 2. Strength reduce to a shift.
+(Div8u n (Const8 [c])) && isPowerOfTwo8(c) => (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)]))
+(Div16u n (Const16 [c])) && isPowerOfTwo16(c) => (Rsh16Ux64 n (Const64 <typ.UInt64> [log16(c)]))
+(Div32u n (Const32 [c])) && isPowerOfTwo32(c) => (Rsh32Ux64 n (Const64 <typ.UInt64> [log32(c)]))
+(Div64u n (Const64 [c])) && isPowerOfTwo64(c) => (Rsh64Ux64 n (Const64 <typ.UInt64> [log64(c)]))
+(Div64u n (Const64 [-1<<63])) => (Rsh64Ux64 n (Const64 <typ.UInt64> [63]))
+
+// Signed non-negative divide by power of 2.
+(Div8 n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo8(c) => (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)]))
+(Div16 n (Const16 [c])) && isNonNegative(n) && isPowerOfTwo16(c) => (Rsh16Ux64 n (Const64 <typ.UInt64> [log16(c)]))
+(Div32 n (Const32 [c])) && isNonNegative(n) && isPowerOfTwo32(c) => (Rsh32Ux64 n (Const64 <typ.UInt64> [log32(c)]))
+(Div64 n (Const64 [c])) && isNonNegative(n) && isPowerOfTwo64(c) => (Rsh64Ux64 n (Const64 <typ.UInt64> [log64(c)]))
+(Div64 n (Const64 [-1<<63])) && isNonNegative(n) => (Const64 [0])
+
+// Unsigned divide, not a power of 2. Strength reduce to a multiply.
+// For 8-bit divides, we just do a direct 9-bit by 8-bit multiply.
+(Div8u x (Const8 [c])) && umagicOK8(c) =>
+ (Trunc32to8
+ (Rsh32Ux64 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(1<<8+umagic8(c).m)])
+ (ZeroExt8to32 x))
+ (Const64 <typ.UInt64> [8+umagic8(c).s])))
+
+// For 16-bit divides on 64-bit machines, we do a direct 17-bit by 16-bit multiply.
+(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 8 =>
+ (Trunc64to16
+ (Rsh64Ux64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<16+umagic16(c).m)])
+ (ZeroExt16to64 x))
+ (Const64 <typ.UInt64> [16+umagic16(c).s])))
+
+// For 16-bit divides on 32-bit machines
+(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0 =>
+ (Trunc32to16
+ (Rsh32Ux64 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(1<<15+umagic16(c).m/2)])
+ (ZeroExt16to32 x))
+ (Const64 <typ.UInt64> [16+umagic16(c).s-1])))
+(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && c&1 == 0 =>
+ (Trunc32to16
+ (Rsh32Ux64 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(1<<15+(umagic16(c).m+1)/2)])
+ (Rsh32Ux64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [1])))
+ (Const64 <typ.UInt64> [16+umagic16(c).s-2])))
+(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && config.useAvg =>
+ (Trunc32to16
+ (Rsh32Ux64 <typ.UInt32>
+ (Avg32u
+ (Lsh32x64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [16]))
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(umagic16(c).m)])
+ (ZeroExt16to32 x)))
+ (Const64 <typ.UInt64> [16+umagic16(c).s-1])))
+
+// For 32-bit divides on 32-bit machines
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul =>
+ (Rsh32Ux64 <typ.UInt32>
+ (Hmul32u <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(1<<31+umagic32(c).m/2)])
+ x)
+ (Const64 <typ.UInt64> [umagic32(c).s-1]))
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul =>
+ (Rsh32Ux64 <typ.UInt32>
+ (Hmul32u <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(1<<31+(umagic32(c).m+1)/2)])
+ (Rsh32Ux64 <typ.UInt32> x (Const64 <typ.UInt64> [1])))
+ (Const64 <typ.UInt64> [umagic32(c).s-2]))
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul =>
+ (Rsh32Ux64 <typ.UInt32>
+ (Avg32u
+ x
+ (Hmul32u <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(umagic32(c).m)])
+ x))
+ (Const64 <typ.UInt64> [umagic32(c).s-1]))
+
+// For 32-bit divides on 64-bit machines
+// We'll use a regular (non-hi) multiply for this case.
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0 =>
+ (Trunc64to32
+ (Rsh64Ux64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<31+umagic32(c).m/2)])
+ (ZeroExt32to64 x))
+ (Const64 <typ.UInt64> [32+umagic32(c).s-1])))
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && c&1 == 0 =>
+ (Trunc64to32
+ (Rsh64Ux64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<31+(umagic32(c).m+1)/2)])
+ (Rsh64Ux64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [1])))
+ (Const64 <typ.UInt64> [32+umagic32(c).s-2])))
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && config.useAvg =>
+ (Trunc64to32
+ (Rsh64Ux64 <typ.UInt64>
+ (Avg64u
+ (Lsh64x64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [32]))
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt32> [int64(umagic32(c).m)])
+ (ZeroExt32to64 x)))
+ (Const64 <typ.UInt64> [32+umagic32(c).s-1])))
+
+// For unsigned 64-bit divides on 32-bit machines,
+// if the constant fits in 16 bits (so that the last term
+// fits in 32 bits), convert to three 32-bit divides by a constant.
+//
+// If 1<<32 = Q * c + R
+// and x = hi << 32 + lo
+//
+// Then x = (hi/c*c + hi%c) << 32 + lo
+// = hi/c*c<<32 + hi%c<<32 + lo
+// = hi/c*c<<32 + (hi%c)*(Q*c+R) + lo/c*c + lo%c
+// = hi/c*c<<32 + (hi%c)*Q*c + lo/c*c + (hi%c*R+lo%c)
+// and x / c = (hi/c)<<32 + (hi%c)*Q + lo/c + (hi%c*R+lo%c)/c
+(Div64u x (Const64 [c])) && c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul =>
+ (Add64
+ (Add64 <typ.UInt64>
+ (Add64 <typ.UInt64>
+ (Lsh64x64 <typ.UInt64>
+ (ZeroExt32to64
+ (Div32u <typ.UInt32>
+ (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32])))
+ (Const32 <typ.UInt32> [int32(c)])))
+ (Const64 <typ.UInt64> [32]))
+ (ZeroExt32to64 (Div32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)]))))
+ (Mul64 <typ.UInt64>
+ (ZeroExt32to64 <typ.UInt64>
+ (Mod32u <typ.UInt32>
+ (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32])))
+ (Const32 <typ.UInt32> [int32(c)])))
+ (Const64 <typ.UInt64> [int64((1<<32)/c)])))
+ (ZeroExt32to64
+ (Div32u <typ.UInt32>
+ (Add32 <typ.UInt32>
+ (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)]))
+ (Mul32 <typ.UInt32>
+ (Mod32u <typ.UInt32>
+ (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32])))
+ (Const32 <typ.UInt32> [int32(c)]))
+ (Const32 <typ.UInt32> [int32((1<<32)%c)])))
+ (Const32 <typ.UInt32> [int32(c)]))))
+
+// For 64-bit divides on 64-bit machines
+// (64-bit divides on 32-bit machines are lowered to a runtime call by the walk pass.)
+(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul =>
+ (Rsh64Ux64 <typ.UInt64>
+ (Hmul64u <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<63+umagic64(c).m/2)])
+ x)
+ (Const64 <typ.UInt64> [umagic64(c).s-1]))
+(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul =>
+ (Rsh64Ux64 <typ.UInt64>
+ (Hmul64u <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<63+(umagic64(c).m+1)/2)])
+ (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [1])))
+ (Const64 <typ.UInt64> [umagic64(c).s-2]))
+(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul =>
+ (Rsh64Ux64 <typ.UInt64>
+ (Avg64u
+ x
+ (Hmul64u <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(umagic64(c).m)])
+ x))
+ (Const64 <typ.UInt64> [umagic64(c).s-1]))
+
+// Signed divide by a negative constant. Rewrite to divide by a positive constant.
+(Div8 <t> n (Const8 [c])) && c < 0 && c != -1<<7 => (Neg8 (Div8 <t> n (Const8 <t> [-c])))
+(Div16 <t> n (Const16 [c])) && c < 0 && c != -1<<15 => (Neg16 (Div16 <t> n (Const16 <t> [-c])))
+(Div32 <t> n (Const32 [c])) && c < 0 && c != -1<<31 => (Neg32 (Div32 <t> n (Const32 <t> [-c])))
+(Div64 <t> n (Const64 [c])) && c < 0 && c != -1<<63 => (Neg64 (Div64 <t> n (Const64 <t> [-c])))
+
+// Dividing by the most-negative number. Result is always 0 except
+// if the input is also the most-negative number.
+// We can detect that using the sign bit of x & -x.
+(Div8 <t> x (Const8 [-1<<7 ])) => (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <typ.UInt64> [7 ]))
+(Div16 <t> x (Const16 [-1<<15])) => (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <typ.UInt64> [15]))
+(Div32 <t> x (Const32 [-1<<31])) => (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <typ.UInt64> [31]))
+(Div64 <t> x (Const64 [-1<<63])) => (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <typ.UInt64> [63]))
+
+// Signed divide by power of 2.
+// n / c = n >> log(c) if n >= 0
+// = (n+c-1) >> log(c) if n < 0
+// We conditionally add c-1 by adding n>>63>>(64-log(c)) (first shift signed, second shift unsigned).
+(Div8 <t> n (Const8 [c])) && isPowerOfTwo8(c) =>
+ (Rsh8x64
+ (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [int64( 8-log8(c))])))
+ (Const64 <typ.UInt64> [int64(log8(c))]))
+(Div16 <t> n (Const16 [c])) && isPowerOfTwo16(c) =>
+ (Rsh16x64
+ (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [int64(16-log16(c))])))
+ (Const64 <typ.UInt64> [int64(log16(c))]))
+(Div32 <t> n (Const32 [c])) && isPowerOfTwo32(c) =>
+ (Rsh32x64
+ (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [int64(32-log32(c))])))
+ (Const64 <typ.UInt64> [int64(log32(c))]))
+(Div64 <t> n (Const64 [c])) && isPowerOfTwo64(c) =>
+ (Rsh64x64
+ (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [int64(64-log64(c))])))
+ (Const64 <typ.UInt64> [int64(log64(c))]))
+
+// Signed divide, not a power of 2. Strength reduce to a multiply.
+(Div8 <t> x (Const8 [c])) && smagicOK8(c) =>
+ (Sub8 <t>
+ (Rsh32x64 <t>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(smagic8(c).m)])
+ (SignExt8to32 x))
+ (Const64 <typ.UInt64> [8+smagic8(c).s]))
+ (Rsh32x64 <t>
+ (SignExt8to32 x)
+ (Const64 <typ.UInt64> [31])))
+(Div16 <t> x (Const16 [c])) && smagicOK16(c) =>
+ (Sub16 <t>
+ (Rsh32x64 <t>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(smagic16(c).m)])
+ (SignExt16to32 x))
+ (Const64 <typ.UInt64> [16+smagic16(c).s]))
+ (Rsh32x64 <t>
+ (SignExt16to32 x)
+ (Const64 <typ.UInt64> [31])))
+(Div32 <t> x (Const32 [c])) && smagicOK32(c) && config.RegSize == 8 =>
+ (Sub32 <t>
+ (Rsh64x64 <t>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(smagic32(c).m)])
+ (SignExt32to64 x))
+ (Const64 <typ.UInt64> [32+smagic32(c).s]))
+ (Rsh64x64 <t>
+ (SignExt32to64 x)
+ (Const64 <typ.UInt64> [63])))
+(Div32 <t> x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul =>
+ (Sub32 <t>
+ (Rsh32x64 <t>
+ (Hmul32 <t>
+ (Const32 <typ.UInt32> [int32(smagic32(c).m/2)])
+ x)
+ (Const64 <typ.UInt64> [smagic32(c).s-1]))
+ (Rsh32x64 <t>
+ x
+ (Const64 <typ.UInt64> [31])))
+(Div32 <t> x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul =>
+ (Sub32 <t>
+ (Rsh32x64 <t>
+ (Add32 <t>
+ (Hmul32 <t>
+ (Const32 <typ.UInt32> [int32(smagic32(c).m)])
+ x)
+ x)
+ (Const64 <typ.UInt64> [smagic32(c).s]))
+ (Rsh32x64 <t>
+ x
+ (Const64 <typ.UInt64> [31])))
+(Div64 <t> x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul =>
+ (Sub64 <t>
+ (Rsh64x64 <t>
+ (Hmul64 <t>
+ (Const64 <typ.UInt64> [int64(smagic64(c).m/2)])
+ x)
+ (Const64 <typ.UInt64> [smagic64(c).s-1]))
+ (Rsh64x64 <t>
+ x
+ (Const64 <typ.UInt64> [63])))
+(Div64 <t> x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul =>
+ (Sub64 <t>
+ (Rsh64x64 <t>
+ (Add64 <t>
+ (Hmul64 <t>
+ (Const64 <typ.UInt64> [int64(smagic64(c).m)])
+ x)
+ x)
+ (Const64 <typ.UInt64> [smagic64(c).s]))
+ (Rsh64x64 <t>
+ x
+ (Const64 <typ.UInt64> [63])))
+
+// Unsigned mod by power of 2 constant.
+(Mod8u <t> n (Const8 [c])) && isPowerOfTwo8(c) => (And8 n (Const8 <t> [c-1]))
+(Mod16u <t> n (Const16 [c])) && isPowerOfTwo16(c) => (And16 n (Const16 <t> [c-1]))
+(Mod32u <t> n (Const32 [c])) && isPowerOfTwo32(c) => (And32 n (Const32 <t> [c-1]))
+(Mod64u <t> n (Const64 [c])) && isPowerOfTwo64(c) => (And64 n (Const64 <t> [c-1]))
+(Mod64u <t> n (Const64 [-1<<63])) => (And64 n (Const64 <t> [1<<63-1]))
+
+// Signed non-negative mod by power of 2 constant.
+(Mod8 <t> n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo8(c) => (And8 n (Const8 <t> [c-1]))
+(Mod16 <t> n (Const16 [c])) && isNonNegative(n) && isPowerOfTwo16(c) => (And16 n (Const16 <t> [c-1]))
+(Mod32 <t> n (Const32 [c])) && isNonNegative(n) && isPowerOfTwo32(c) => (And32 n (Const32 <t> [c-1]))
+(Mod64 <t> n (Const64 [c])) && isNonNegative(n) && isPowerOfTwo64(c) => (And64 n (Const64 <t> [c-1]))
+(Mod64 n (Const64 [-1<<63])) && isNonNegative(n) => n
+
+// Signed mod by negative constant.
+(Mod8 <t> n (Const8 [c])) && c < 0 && c != -1<<7 => (Mod8 <t> n (Const8 <t> [-c]))
+(Mod16 <t> n (Const16 [c])) && c < 0 && c != -1<<15 => (Mod16 <t> n (Const16 <t> [-c]))
+(Mod32 <t> n (Const32 [c])) && c < 0 && c != -1<<31 => (Mod32 <t> n (Const32 <t> [-c]))
+(Mod64 <t> n (Const64 [c])) && c < 0 && c != -1<<63 => (Mod64 <t> n (Const64 <t> [-c]))
+
+// All other mods by constants, do A%B = A-(A/B*B).
+// This implements % with two * and a bunch of ancillary ops.
+// One of the * is free if the user's code also computes A/B.
+(Mod8 <t> x (Const8 [c])) && x.Op != OpConst8 && (c > 0 || c == -1<<7)
+ => (Sub8 x (Mul8 <t> (Div8 <t> x (Const8 <t> [c])) (Const8 <t> [c])))
+(Mod16 <t> x (Const16 [c])) && x.Op != OpConst16 && (c > 0 || c == -1<<15)
+ => (Sub16 x (Mul16 <t> (Div16 <t> x (Const16 <t> [c])) (Const16 <t> [c])))
+(Mod32 <t> x (Const32 [c])) && x.Op != OpConst32 && (c > 0 || c == -1<<31)
+ => (Sub32 x (Mul32 <t> (Div32 <t> x (Const32 <t> [c])) (Const32 <t> [c])))
+(Mod64 <t> x (Const64 [c])) && x.Op != OpConst64 && (c > 0 || c == -1<<63)
+ => (Sub64 x (Mul64 <t> (Div64 <t> x (Const64 <t> [c])) (Const64 <t> [c])))
+(Mod8u <t> x (Const8 [c])) && x.Op != OpConst8 && c > 0 && umagicOK8( c)
+ => (Sub8 x (Mul8 <t> (Div8u <t> x (Const8 <t> [c])) (Const8 <t> [c])))
+(Mod16u <t> x (Const16 [c])) && x.Op != OpConst16 && c > 0 && umagicOK16(c)
+ => (Sub16 x (Mul16 <t> (Div16u <t> x (Const16 <t> [c])) (Const16 <t> [c])))
+(Mod32u <t> x (Const32 [c])) && x.Op != OpConst32 && c > 0 && umagicOK32(c)
+ => (Sub32 x (Mul32 <t> (Div32u <t> x (Const32 <t> [c])) (Const32 <t> [c])))
+(Mod64u <t> x (Const64 [c])) && x.Op != OpConst64 && c > 0 && umagicOK64(c)
+ => (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c])))
+
+// For architectures without rotates on less than 32-bits, promote these checks to 32-bit.
+(Eq8 (Mod8u x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config) =>
+ (Eq32 (Mod32u <typ.UInt32> (ZeroExt8to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint8(c))])) (Const32 <typ.UInt32> [0]))
+(Eq16 (Mod16u x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config) =>
+ (Eq32 (Mod32u <typ.UInt32> (ZeroExt16to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint16(c))])) (Const32 <typ.UInt32> [0]))
+(Eq8 (Mod8 x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config) =>
+ (Eq32 (Mod32 <typ.Int32> (SignExt8to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
+(Eq16 (Mod16 x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config) =>
+ (Eq32 (Mod32 <typ.Int32> (SignExt16to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
+
+// Divisibility checks x%c == 0 convert to multiply and rotate.
+// Note, x%c == 0 is rewritten as x == c*(x/c) during the opt pass
+// where (x/c) is performed using multiplication with magic constants.
+// To rewrite x%c == 0 requires pattern matching the rewritten expression
+// and checking that the division by the same constant wasn't already calculated.
+// This check is made by counting uses of the magic constant multiplication.
+// Note that if there were an intermediate opt pass, this rule could be applied
+// directly on the Div op and magic division rewrites could be delayed to late opt.
+
+// Unsigned divisibility checks convert to multiply and rotate.
+(Eq8 x (Mul8 (Const8 [c])
+ (Trunc32to8
+ (Rsh32Ux64
+ mul:(Mul32
+ (Const32 [m])
+ (ZeroExt8to32 x))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s
+ && x.Op != OpConst8 && udivisibleOK8(c)
+ => (Leq8U
+ (RotateLeft8 <typ.UInt8>
+ (Mul8 <typ.UInt8>
+ (Const8 <typ.UInt8> [int8(udivisible8(c).m)])
+ x)
+ (Const8 <typ.UInt8> [int8(8-udivisible8(c).k)])
+ )
+ (Const8 <typ.UInt8> [int8(udivisible8(c).max)])
+ )
+
+(Eq16 x (Mul16 (Const16 [c])
+ (Trunc64to16
+ (Rsh64Ux64
+ mul:(Mul64
+ (Const64 [m])
+ (ZeroExt16to64 x))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s
+ && x.Op != OpConst16 && udivisibleOK16(c)
+ => (Leq16U
+ (RotateLeft16 <typ.UInt16>
+ (Mul16 <typ.UInt16>
+ (Const16 <typ.UInt16> [int16(udivisible16(c).m)])
+ x)
+ (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)])
+ )
+ (Const16 <typ.UInt16> [int16(udivisible16(c).max)])
+ )
+
+(Eq16 x (Mul16 (Const16 [c])
+ (Trunc32to16
+ (Rsh32Ux64
+ mul:(Mul32
+ (Const32 [m])
+ (ZeroExt16to32 x))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1
+ && x.Op != OpConst16 && udivisibleOK16(c)
+ => (Leq16U
+ (RotateLeft16 <typ.UInt16>
+ (Mul16 <typ.UInt16>
+ (Const16 <typ.UInt16> [int16(udivisible16(c).m)])
+ x)
+ (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)])
+ )
+ (Const16 <typ.UInt16> [int16(udivisible16(c).max)])
+ )
+
+(Eq16 x (Mul16 (Const16 [c])
+ (Trunc32to16
+ (Rsh32Ux64
+ mul:(Mul32
+ (Const32 [m])
+ (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1])))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2
+ && x.Op != OpConst16 && udivisibleOK16(c)
+ => (Leq16U
+ (RotateLeft16 <typ.UInt16>
+ (Mul16 <typ.UInt16>
+ (Const16 <typ.UInt16> [int16(udivisible16(c).m)])
+ x)
+ (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)])
+ )
+ (Const16 <typ.UInt16> [int16(udivisible16(c).max)])
+ )
+
+(Eq16 x (Mul16 (Const16 [c])
+ (Trunc32to16
+ (Rsh32Ux64
+ (Avg32u
+ (Lsh32x64 (ZeroExt16to32 x) (Const64 [16]))
+ mul:(Mul32
+ (Const32 [m])
+ (ZeroExt16to32 x)))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1
+ && x.Op != OpConst16 && udivisibleOK16(c)
+ => (Leq16U
+ (RotateLeft16 <typ.UInt16>
+ (Mul16 <typ.UInt16>
+ (Const16 <typ.UInt16> [int16(udivisible16(c).m)])
+ x)
+ (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)])
+ )
+ (Const16 <typ.UInt16> [int16(udivisible16(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Rsh32Ux64
+ mul:(Hmul32u
+ (Const32 [m])
+ x)
+ (Const64 [s]))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Rsh32Ux64
+ mul:(Hmul32u
+ (Const32 <typ.UInt32> [m])
+ (Rsh32Ux64 x (Const64 [1])))
+ (Const64 [s]))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Rsh32Ux64
+ (Avg32u
+ x
+ mul:(Hmul32u
+ (Const32 [m])
+ x))
+ (Const64 [s]))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(umagic32(c).m) && s == umagic32(c).s-1
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Trunc64to32
+ (Rsh64Ux64
+ mul:(Mul64
+ (Const64 [m])
+ (ZeroExt32to64 x))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Trunc64to32
+ (Rsh64Ux64
+ mul:(Mul64
+ (Const64 [m])
+ (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1])))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Trunc64to32
+ (Rsh64Ux64
+ (Avg64u
+ (Lsh64x64 (ZeroExt32to64 x) (Const64 [32]))
+ mul:(Mul64
+ (Const64 [m])
+ (ZeroExt32to64 x)))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq64 x (Mul64 (Const64 [c])
+ (Rsh64Ux64
+ mul:(Hmul64u
+ (Const64 [m])
+ x)
+ (Const64 [s]))
+ )
+) && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1
+ && x.Op != OpConst64 && udivisibleOK64(c)
+ => (Leq64U
+ (RotateLeft64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(udivisible64(c).m)])
+ x)
+ (Const64 <typ.UInt64> [64-udivisible64(c).k])
+ )
+ (Const64 <typ.UInt64> [int64(udivisible64(c).max)])
+ )
+(Eq64 x (Mul64 (Const64 [c])
+ (Rsh64Ux64
+ mul:(Hmul64u
+ (Const64 [m])
+ (Rsh64Ux64 x (Const64 [1])))
+ (Const64 [s]))
+ )
+) && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2
+ && x.Op != OpConst64 && udivisibleOK64(c)
+ => (Leq64U
+ (RotateLeft64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(udivisible64(c).m)])
+ x)
+ (Const64 <typ.UInt64> [64-udivisible64(c).k])
+ )
+ (Const64 <typ.UInt64> [int64(udivisible64(c).max)])
+ )
+(Eq64 x (Mul64 (Const64 [c])
+ (Rsh64Ux64
+ (Avg64u
+ x
+ mul:(Hmul64u
+ (Const64 [m])
+ x))
+ (Const64 [s]))
+ )
+) && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(umagic64(c).m) && s == umagic64(c).s-1
+ && x.Op != OpConst64 && udivisibleOK64(c)
+ => (Leq64U
+ (RotateLeft64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(udivisible64(c).m)])
+ x)
+ (Const64 <typ.UInt64> [64-udivisible64(c).k])
+ )
+ (Const64 <typ.UInt64> [int64(udivisible64(c).max)])
+ )
+
+// Signed divisibility checks convert to multiply, add and rotate.
+(Eq8 x (Mul8 (Const8 [c])
+ (Sub8
+ (Rsh32x64
+ mul:(Mul32
+ (Const32 [m])
+ (SignExt8to32 x))
+ (Const64 [s]))
+ (Rsh32x64
+ (SignExt8to32 x)
+ (Const64 [31])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(smagic8(c).m) && s == 8+smagic8(c).s
+ && x.Op != OpConst8 && sdivisibleOK8(c)
+ => (Leq8U
+ (RotateLeft8 <typ.UInt8>
+ (Add8 <typ.UInt8>
+ (Mul8 <typ.UInt8>
+ (Const8 <typ.UInt8> [int8(sdivisible8(c).m)])
+ x)
+ (Const8 <typ.UInt8> [int8(sdivisible8(c).a)])
+ )
+ (Const8 <typ.UInt8> [int8(8-sdivisible8(c).k)])
+ )
+ (Const8 <typ.UInt8> [int8(sdivisible8(c).max)])
+ )
+
+(Eq16 x (Mul16 (Const16 [c])
+ (Sub16
+ (Rsh32x64
+ mul:(Mul32
+ (Const32 [m])
+ (SignExt16to32 x))
+ (Const64 [s]))
+ (Rsh32x64
+ (SignExt16to32 x)
+ (Const64 [31])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(smagic16(c).m) && s == 16+smagic16(c).s
+ && x.Op != OpConst16 && sdivisibleOK16(c)
+ => (Leq16U
+ (RotateLeft16 <typ.UInt16>
+ (Add16 <typ.UInt16>
+ (Mul16 <typ.UInt16>
+ (Const16 <typ.UInt16> [int16(sdivisible16(c).m)])
+ x)
+ (Const16 <typ.UInt16> [int16(sdivisible16(c).a)])
+ )
+ (Const16 <typ.UInt16> [int16(16-sdivisible16(c).k)])
+ )
+ (Const16 <typ.UInt16> [int16(sdivisible16(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Sub32
+ (Rsh64x64
+ mul:(Mul64
+ (Const64 [m])
+ (SignExt32to64 x))
+ (Const64 [s]))
+ (Rsh64x64
+ (SignExt32to64 x)
+ (Const64 [63])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(smagic32(c).m) && s == 32+smagic32(c).s
+ && x.Op != OpConst32 && sdivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).a)])
+ )
+ (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Sub32
+ (Rsh32x64
+ mul:(Hmul32
+ (Const32 [m])
+ x)
+ (Const64 [s]))
+ (Rsh32x64
+ x
+ (Const64 [31])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1
+ && x.Op != OpConst32 && sdivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).a)])
+ )
+ (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Sub32
+ (Rsh32x64
+ (Add32
+ mul:(Hmul32
+ (Const32 [m])
+ x)
+ x)
+ (Const64 [s]))
+ (Rsh32x64
+ x
+ (Const64 [31])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(smagic32(c).m) && s == smagic32(c).s
+ && x.Op != OpConst32 && sdivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).a)])
+ )
+ (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).max)])
+ )
+
+(Eq64 x (Mul64 (Const64 [c])
+ (Sub64
+ (Rsh64x64
+ mul:(Hmul64
+ (Const64 [m])
+ x)
+ (Const64 [s]))
+ (Rsh64x64
+ x
+ (Const64 [63])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1
+ && x.Op != OpConst64 && sdivisibleOK64(c)
+ => (Leq64U
+ (RotateLeft64 <typ.UInt64>
+ (Add64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).m)])
+ x)
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).a)])
+ )
+ (Const64 <typ.UInt64> [64-sdivisible64(c).k])
+ )
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).max)])
+ )
+
+(Eq64 x (Mul64 (Const64 [c])
+ (Sub64
+ (Rsh64x64
+ (Add64
+ mul:(Hmul64
+ (Const64 [m])
+ x)
+ x)
+ (Const64 [s]))
+ (Rsh64x64
+ x
+ (Const64 [63])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(smagic64(c).m) && s == smagic64(c).s
+ && x.Op != OpConst64 && sdivisibleOK64(c)
+ => (Leq64U
+ (RotateLeft64 <typ.UInt64>
+ (Add64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).m)])
+ x)
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).a)])
+ )
+ (Const64 <typ.UInt64> [64-sdivisible64(c).k])
+ )
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).max)])
+ )
+
+// Divisibility check for signed integers for power of two constant are simple mask.
+// However, we must match against the rewritten n%c == 0 -> n - c*(n/c) == 0 -> n == c*(n/c)
+// where n/c contains fixup code to handle signed n.
+((Eq8|Neq8) n (Lsh8x64
+ (Rsh8x64
+ (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [kbar])))
+ (Const64 <typ.UInt64> [k]))
+ (Const64 <typ.UInt64> [k]))
+) && k > 0 && k < 7 && kbar == 8 - k
+ => ((Eq8|Neq8) (And8 <t> n (Const8 <t> [1<<uint(k)-1])) (Const8 <t> [0]))
+
+((Eq16|Neq16) n (Lsh16x64
+ (Rsh16x64
+ (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [kbar])))
+ (Const64 <typ.UInt64> [k]))
+ (Const64 <typ.UInt64> [k]))
+) && k > 0 && k < 15 && kbar == 16 - k
+ => ((Eq16|Neq16) (And16 <t> n (Const16 <t> [1<<uint(k)-1])) (Const16 <t> [0]))
+
+((Eq32|Neq32) n (Lsh32x64
+ (Rsh32x64
+ (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [kbar])))
+ (Const64 <typ.UInt64> [k]))
+ (Const64 <typ.UInt64> [k]))
+) && k > 0 && k < 31 && kbar == 32 - k
+ => ((Eq32|Neq32) (And32 <t> n (Const32 <t> [1<<uint(k)-1])) (Const32 <t> [0]))
+
+((Eq64|Neq64) n (Lsh64x64
+ (Rsh64x64
+ (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [kbar])))
+ (Const64 <typ.UInt64> [k]))
+ (Const64 <typ.UInt64> [k]))
+) && k > 0 && k < 63 && kbar == 64 - k
+ => ((Eq64|Neq64) (And64 <t> n (Const64 <t> [1<<uint(k)-1])) (Const64 <t> [0]))
+
+(Eq(8|16|32|64) s:(Sub(8|16|32|64) x y) (Const(8|16|32|64) [0])) && s.Uses == 1 => (Eq(8|16|32|64) x y)
+(Neq(8|16|32|64) s:(Sub(8|16|32|64) x y) (Const(8|16|32|64) [0])) && s.Uses == 1 => (Neq(8|16|32|64) x y)
+
+// Optimize bitsets
+(Eq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [y])) && oneBit8(y)
+ => (Neq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [0]))
+(Eq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [y])) && oneBit16(y)
+ => (Neq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [0]))
+(Eq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [y])) && oneBit32(y)
+ => (Neq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [0]))
+(Eq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [y])) && oneBit64(y)
+ => (Neq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [0]))
+(Neq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [y])) && oneBit8(y)
+ => (Eq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [0]))
+(Neq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [y])) && oneBit16(y)
+ => (Eq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [0]))
+(Neq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [y])) && oneBit32(y)
+ => (Eq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [0]))
+(Neq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [y])) && oneBit64(y)
+ => (Eq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [0]))
+
+// Reassociate expressions involving
+// constants such that constants come first,
+// exposing obvious constant-folding opportunities.
+// Reassociate (op (op y C) x) to (op C (op x y)) or similar, where C
+// is constant, which pushes constants to the outside
+// of the expression. At that point, any constant-folding
+// opportunities should be obvious.
+// Note: don't include AddPtr here! In order to maintain the
+// invariant that pointers must stay within the pointed-to object,
+// we can't pull part of a pointer computation above the AddPtr.
+// See issue 37881.
+// Note: we don't need to handle any (x-C) cases because we already rewrite
+// (x-C) to (x+(-C)).
+
+// x + (C + z) -> C + (x + z)
+(Add64 (Add64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Add64 <t> z x))
+(Add32 (Add32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Add32 <t> z x))
+(Add16 (Add16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Add16 <t> z x))
+(Add8 (Add8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Add8 <t> z x))
+
+// x + (C - z) -> C + (x - z)
+(Add64 (Sub64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Sub64 <t> x z))
+(Add32 (Sub32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Sub32 <t> x z))
+(Add16 (Sub16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Sub16 <t> x z))
+(Add8 (Sub8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Sub8 <t> x z))
+
+// x - (C - z) -> x + (z - C) -> (x + z) - C
+(Sub64 x (Sub64 i:(Const64 <t>) z)) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 (Add64 <t> x z) i)
+(Sub32 x (Sub32 i:(Const32 <t>) z)) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 (Add32 <t> x z) i)
+(Sub16 x (Sub16 i:(Const16 <t>) z)) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 (Add16 <t> x z) i)
+(Sub8 x (Sub8 i:(Const8 <t>) z)) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 (Add8 <t> x z) i)
+
+// x - (z + C) -> x + (-z - C) -> (x - z) - C
+(Sub64 x (Add64 z i:(Const64 <t>))) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 (Sub64 <t> x z) i)
+(Sub32 x (Add32 z i:(Const32 <t>))) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 (Sub32 <t> x z) i)
+(Sub16 x (Add16 z i:(Const16 <t>))) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 (Sub16 <t> x z) i)
+(Sub8 x (Add8 z i:(Const8 <t>))) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 (Sub8 <t> x z) i)
+
+// (C - z) - x -> C - (z + x)
+(Sub64 (Sub64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 i (Add64 <t> z x))
+(Sub32 (Sub32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 i (Add32 <t> z x))
+(Sub16 (Sub16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 i (Add16 <t> z x))
+(Sub8 (Sub8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 i (Add8 <t> z x))
+
+// (z + C) -x -> C + (z - x)
+(Sub64 (Add64 z i:(Const64 <t>)) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Sub64 <t> z x))
+(Sub32 (Add32 z i:(Const32 <t>)) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Sub32 <t> z x))
+(Sub16 (Add16 z i:(Const16 <t>)) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Sub16 <t> z x))
+(Sub8 (Add8 z i:(Const8 <t>)) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Sub8 <t> z x))
+
+// x & (C & z) -> C & (x & z)
+(And64 (And64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (And64 i (And64 <t> z x))
+(And32 (And32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (And32 i (And32 <t> z x))
+(And16 (And16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (And16 i (And16 <t> z x))
+(And8 (And8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (And8 i (And8 <t> z x))
+
+// x | (C | z) -> C | (x | z)
+(Or64 (Or64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Or64 i (Or64 <t> z x))
+(Or32 (Or32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Or32 i (Or32 <t> z x))
+(Or16 (Or16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Or16 i (Or16 <t> z x))
+(Or8 (Or8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Or8 i (Or8 <t> z x))
+
+// x ^ (C ^ z) -> C ^ (x ^ z)
+(Xor64 (Xor64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Xor64 i (Xor64 <t> z x))
+(Xor32 (Xor32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Xor32 i (Xor32 <t> z x))
+(Xor16 (Xor16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Xor16 i (Xor16 <t> z x))
+(Xor8 (Xor8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Xor8 i (Xor8 <t> z x))
+
+// x * (D * z) = D * (x * z)
+(Mul64 (Mul64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Mul64 i (Mul64 <t> x z))
+(Mul32 (Mul32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Mul32 i (Mul32 <t> x z))
+(Mul16 (Mul16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Mul16 i (Mul16 <t> x z))
+(Mul8 (Mul8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Mul8 i (Mul8 <t> x z))
+
+// C + (D + x) -> (C + D) + x
+(Add64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Add64 (Const64 <t> [c+d]) x)
+(Add32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Add32 (Const32 <t> [c+d]) x)
+(Add16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Add16 (Const16 <t> [c+d]) x)
+(Add8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Add8 (Const8 <t> [c+d]) x)
+
+// C + (D - x) -> (C + D) - x
+(Add64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x)) => (Sub64 (Const64 <t> [c+d]) x)
+(Add32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x)) => (Sub32 (Const32 <t> [c+d]) x)
+(Add16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x)) => (Sub16 (Const16 <t> [c+d]) x)
+(Add8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x)) => (Sub8 (Const8 <t> [c+d]) x)
+
+// C - (D - x) -> (C - D) + x
+(Sub64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x)) => (Add64 (Const64 <t> [c-d]) x)
+(Sub32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x)) => (Add32 (Const32 <t> [c-d]) x)
+(Sub16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x)) => (Add16 (Const16 <t> [c-d]) x)
+(Sub8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x)) => (Add8 (Const8 <t> [c-d]) x)
+
+// C - (D + x) -> (C - D) - x
+(Sub64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Sub64 (Const64 <t> [c-d]) x)
+(Sub32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Sub32 (Const32 <t> [c-d]) x)
+(Sub16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Sub16 (Const16 <t> [c-d]) x)
+(Sub8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Sub8 (Const8 <t> [c-d]) x)
+
+// C & (D & x) -> (C & D) & x
+(And64 (Const64 <t> [c]) (And64 (Const64 <t> [d]) x)) => (And64 (Const64 <t> [c&d]) x)
+(And32 (Const32 <t> [c]) (And32 (Const32 <t> [d]) x)) => (And32 (Const32 <t> [c&d]) x)
+(And16 (Const16 <t> [c]) (And16 (Const16 <t> [d]) x)) => (And16 (Const16 <t> [c&d]) x)
+(And8 (Const8 <t> [c]) (And8 (Const8 <t> [d]) x)) => (And8 (Const8 <t> [c&d]) x)
+
+// C | (D | x) -> (C | D) | x
+(Or64 (Const64 <t> [c]) (Or64 (Const64 <t> [d]) x)) => (Or64 (Const64 <t> [c|d]) x)
+(Or32 (Const32 <t> [c]) (Or32 (Const32 <t> [d]) x)) => (Or32 (Const32 <t> [c|d]) x)
+(Or16 (Const16 <t> [c]) (Or16 (Const16 <t> [d]) x)) => (Or16 (Const16 <t> [c|d]) x)
+(Or8 (Const8 <t> [c]) (Or8 (Const8 <t> [d]) x)) => (Or8 (Const8 <t> [c|d]) x)
+
+// C ^ (D ^ x) -> (C ^ D) ^ x
+(Xor64 (Const64 <t> [c]) (Xor64 (Const64 <t> [d]) x)) => (Xor64 (Const64 <t> [c^d]) x)
+(Xor32 (Const32 <t> [c]) (Xor32 (Const32 <t> [d]) x)) => (Xor32 (Const32 <t> [c^d]) x)
+(Xor16 (Const16 <t> [c]) (Xor16 (Const16 <t> [d]) x)) => (Xor16 (Const16 <t> [c^d]) x)
+(Xor8 (Const8 <t> [c]) (Xor8 (Const8 <t> [d]) x)) => (Xor8 (Const8 <t> [c^d]) x)
+
+// C * (D * x) = (C * D) * x
+(Mul64 (Const64 <t> [c]) (Mul64 (Const64 <t> [d]) x)) => (Mul64 (Const64 <t> [c*d]) x)
+(Mul32 (Const32 <t> [c]) (Mul32 (Const32 <t> [d]) x)) => (Mul32 (Const32 <t> [c*d]) x)
+(Mul16 (Const16 <t> [c]) (Mul16 (Const16 <t> [d]) x)) => (Mul16 (Const16 <t> [c*d]) x)
+(Mul8 (Const8 <t> [c]) (Mul8 (Const8 <t> [d]) x)) => (Mul8 (Const8 <t> [c*d]) x)
+
+// floating point optimizations
+(Mul(32|64)F x (Const(32|64)F [1])) => x
+(Mul32F x (Const32F [-1])) => (Neg32F x)
+(Mul64F x (Const64F [-1])) => (Neg64F x)
+(Mul32F x (Const32F [2])) => (Add32F x x)
+(Mul64F x (Const64F [2])) => (Add64F x x)
+
+(Div32F x (Const32F <t> [c])) && reciprocalExact32(c) => (Mul32F x (Const32F <t> [1/c]))
+(Div64F x (Const64F <t> [c])) && reciprocalExact64(c) => (Mul64F x (Const64F <t> [1/c]))
+
+// rewrite single-precision sqrt expression "float32(math.Sqrt(float64(x)))"
+(Cvt64Fto32F sqrt0:(Sqrt (Cvt32Fto64F x))) && sqrt0.Uses==1 => (Sqrt32 x)
+
+(Sqrt (Const64F [c])) && !math.IsNaN(math.Sqrt(c)) => (Const64F [math.Sqrt(c)])
+
+// for rewriting results of some late-expanded rewrites (below)
+(SelectN [0] (MakeResult x ___)) => x
+(SelectN [1] (MakeResult x y ___)) => y
+(SelectN [2] (MakeResult x y z ___)) => z
+
+// for late-expanded calls, recognize newobject and remove zeroing and nilchecks
+(Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call))
+ && isSameCall(call.Aux, "runtime.newobject")
+ => mem
+
+(Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call))
+ && isConstZero(x)
+ && isSameCall(call.Aux, "runtime.newobject")
+ => mem
+
+(Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call))
+ && isConstZero(x)
+ && isSameCall(call.Aux, "runtime.newobject")
+ => mem
+
+(NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _)
+ && isSameCall(call.Aux, "runtime.newobject")
+ && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ => ptr
+
+(NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
+ && isSameCall(call.Aux, "runtime.newobject")
+ && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ => ptr
+
+// Addresses of globals are always non-nil.
+(NilCheck ptr:(Addr {_} (SB)) _) => ptr
+(NilCheck ptr:(Convert (Addr {_} (SB)) _) _) => ptr
+
+// for late-expanded calls, recognize memequal applied to a single constant byte
+// Support is limited by 1, 2, 4, 8 byte sizes
+(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem)
+ && isSameCall(callAux, "runtime.memequal")
+ && symIsRO(scon)
+ => (MakeResult (Eq8 (Load <typ.Int8> sptr mem) (Const8 <typ.Int8> [int8(read8(scon,0))])) mem)
+
+(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [1]) mem)
+ && isSameCall(callAux, "runtime.memequal")
+ && symIsRO(scon)
+ => (MakeResult (Eq8 (Load <typ.Int8> sptr mem) (Const8 <typ.Int8> [int8(read8(scon,0))])) mem)
+
+(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [2]) mem)
+ && isSameCall(callAux, "runtime.memequal")
+ && symIsRO(scon)
+ && canLoadUnaligned(config)
+ => (MakeResult (Eq16 (Load <typ.Int16> sptr mem) (Const16 <typ.Int16> [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+
+(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [2]) mem)
+ && isSameCall(callAux, "runtime.memequal")
+ && symIsRO(scon)
+ && canLoadUnaligned(config)
+ => (MakeResult (Eq16 (Load <typ.Int16> sptr mem) (Const16 <typ.Int16> [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+
+(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [4]) mem)
+ && isSameCall(callAux, "runtime.memequal")
+ && symIsRO(scon)
+ && canLoadUnaligned(config)
+ => (MakeResult (Eq32 (Load <typ.Int32> sptr mem) (Const32 <typ.Int32> [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+
+(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [4]) mem)
+ && isSameCall(callAux, "runtime.memequal")
+ && symIsRO(scon)
+ && canLoadUnaligned(config)
+ => (MakeResult (Eq32 (Load <typ.Int32> sptr mem) (Const32 <typ.Int32> [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+
+(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [8]) mem)
+ && isSameCall(callAux, "runtime.memequal")
+ && symIsRO(scon)
+ && canLoadUnaligned(config) && config.PtrSize == 8
+ => (MakeResult (Eq64 (Load <typ.Int64> sptr mem) (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+
+(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [8]) mem)
+ && isSameCall(callAux, "runtime.memequal")
+ && symIsRO(scon)
+ && canLoadUnaligned(config) && config.PtrSize == 8
+ => (MakeResult (Eq64 (Load <typ.Int64> sptr mem) (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+
+(StaticLECall {callAux} _ _ (Const64 [0]) mem)
+ && isSameCall(callAux, "runtime.memequal")
+ => (MakeResult (ConstBool <typ.Bool> [true]) mem)
+
+(Static(Call|LECall) {callAux} p q _ mem)
+ && isSameCall(callAux, "runtime.memequal")
+ && isSamePtr(p, q)
+ => (MakeResult (ConstBool <typ.Bool> [true]) mem)
+
+// Turn known-size calls to memclrNoHeapPointers into a Zero.
+// Note that we are using types.Types[types.TUINT8] instead of sptr.Type.Elem() - see issue 55122 and CL 431496 for more details.
+(SelectN [0] call:(StaticCall {sym} sptr (Const(64|32) [c]) mem))
+ && isInlinableMemclr(config, int64(c))
+ && isSameCall(sym, "runtime.memclrNoHeapPointers")
+ && call.Uses == 1
+ && clobber(call)
+ => (Zero {types.Types[types.TUINT8]} [int64(c)] sptr mem)
+
+// Recognise make([]T, 0) and replace it with a pointer to the zerobase
+(StaticLECall {callAux} _ (Const(64|32) [0]) (Const(64|32) [0]) mem)
+ && isSameCall(callAux, "runtime.makeslice")
+ => (MakeResult (Addr <v.Type.FieldType(0)> {ir.Syms.Zerobase} (SB)) mem)
+
+// Evaluate constant address comparisons.
+(EqPtr x x) => (ConstBool [true])
+(NeqPtr x x) => (ConstBool [false])
+(EqPtr (Addr {x} _) (Addr {y} _)) => (ConstBool [x == y])
+(EqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _))) => (ConstBool [x == y && o == 0])
+(EqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _))) => (ConstBool [x == y && o1 == o2])
+(NeqPtr (Addr {x} _) (Addr {y} _)) => (ConstBool [x != y])
+(NeqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _))) => (ConstBool [x != y || o != 0])
+(NeqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _))) => (ConstBool [x != y || o1 != o2])
+(EqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _)) => (ConstBool [x == y])
+(EqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _))) => (ConstBool [x == y && o == 0])
+(EqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _))) => (ConstBool [x == y && o1 == o2])
+(NeqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _)) => (ConstBool [x != y])
+(NeqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _))) => (ConstBool [x != y || o != 0])
+(NeqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _))) => (ConstBool [x != y || o1 != o2])
+(EqPtr (OffPtr [o1] p1) p2) && isSamePtr(p1, p2) => (ConstBool [o1 == 0])
+(NeqPtr (OffPtr [o1] p1) p2) && isSamePtr(p1, p2) => (ConstBool [o1 != 0])
+(EqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) && isSamePtr(p1, p2) => (ConstBool [o1 == o2])
+(NeqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) && isSamePtr(p1, p2) => (ConstBool [o1 != o2])
+(EqPtr (Const(32|64) [c]) (Const(32|64) [d])) => (ConstBool [c == d])
+(NeqPtr (Const(32|64) [c]) (Const(32|64) [d])) => (ConstBool [c != d])
+(EqPtr (Convert (Addr {x} _) _) (Addr {y} _)) => (ConstBool [x==y])
+(NeqPtr (Convert (Addr {x} _) _) (Addr {y} _)) => (ConstBool [x!=y])
+
+(EqPtr (LocalAddr _ _) (Addr _)) => (ConstBool [false])
+(EqPtr (OffPtr (LocalAddr _ _)) (Addr _)) => (ConstBool [false])
+(EqPtr (LocalAddr _ _) (OffPtr (Addr _))) => (ConstBool [false])
+(EqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) => (ConstBool [false])
+(NeqPtr (LocalAddr _ _) (Addr _)) => (ConstBool [true])
+(NeqPtr (OffPtr (LocalAddr _ _)) (Addr _)) => (ConstBool [true])
+(NeqPtr (LocalAddr _ _) (OffPtr (Addr _))) => (ConstBool [true])
+(NeqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) => (ConstBool [true])
+
+// Simplify address comparisons.
+(EqPtr (AddPtr p1 o1) p2) && isSamePtr(p1, p2) => (Not (IsNonNil o1))
+(NeqPtr (AddPtr p1 o1) p2) && isSamePtr(p1, p2) => (IsNonNil o1)
+(EqPtr (Const(32|64) [0]) p) => (Not (IsNonNil p))
+(NeqPtr (Const(32|64) [0]) p) => (IsNonNil p)
+(EqPtr (ConstNil) p) => (Not (IsNonNil p))
+(NeqPtr (ConstNil) p) => (IsNonNil p)
+
+// Evaluate constant user nil checks.
+(IsNonNil (ConstNil)) => (ConstBool [false])
+(IsNonNil (Const(32|64) [c])) => (ConstBool [c != 0])
+(IsNonNil (Addr _) ) => (ConstBool [true])
+(IsNonNil (Convert (Addr _) _)) => (ConstBool [true])
+(IsNonNil (LocalAddr _ _)) => (ConstBool [true])
+
+// Inline small or disjoint runtime.memmove calls with constant length.
+// See the comment in op Move in genericOps.go for discussion of the type.
+//
+// Note that we've lost any knowledge of the type and alignment requirements
+// of the source and destination. We only know the size, and that the type
+// contains no pointers.
+// The type of the move is not necessarily v.Args[0].Type().Elem()!
+// See issue 55122 for details.
+//
+// Because expand calls runs after prove, constants useful to this pattern may not appear.
+// Both versions need to exist; the memory and register variants.
+//
+// Match post-expansion calls, memory version.
+(SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const(64|32) [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))))
+ && sz >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
+ && isInlinableMemmove(dst, src, int64(sz), config)
+ && clobber(s1, s2, s3, call)
+ => (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+
+// Match post-expansion calls, register version.
+(SelectN [0] call:(StaticCall {sym} dst src (Const(64|32) [sz]) mem))
+ && sz >= 0
+ && call.Uses == 1 // this will exclude all calls with results
+ && isSameCall(sym, "runtime.memmove")
+ && isInlinableMemmove(dst, src, int64(sz), config)
+ && clobber(call)
+ => (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+
+// Match pre-expansion calls.
+(SelectN [0] call:(StaticLECall {sym} dst src (Const(64|32) [sz]) mem))
+ && sz >= 0
+ && call.Uses == 1 // this will exclude all calls with results
+ && isSameCall(sym, "runtime.memmove")
+ && isInlinableMemmove(dst, src, int64(sz), config)
+ && clobber(call)
+ => (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+
+// De-virtualize late-expanded interface calls into late-expanded static calls.
+(InterLECall [argsize] {auxCall} (Addr {fn} (SB)) ___) => devirtLECall(v, fn.(*obj.LSym))
+
+// Move and Zero optimizations.
+// Move source and destination may overlap.
+
+// Convert Moves into Zeros when the source is known to be zeros.
+(Move {t} [n] dst1 src mem:(Zero {t} [n] dst2 _)) && isSamePtr(src, dst2)
+ => (Zero {t} [n] dst1 mem)
+(Move {t} [n] dst1 src mem:(VarDef (Zero {t} [n] dst0 _))) && isSamePtr(src, dst0)
+ => (Zero {t} [n] dst1 mem)
+(Move {t} [n] dst (Addr {sym} (SB)) mem) && symIsROZero(sym) => (Zero {t} [n] dst mem)
+
+// Don't Store to variables that are about to be overwritten by Move/Zero.
+(Zero {t1} [n] p1 store:(Store {t2} (OffPtr [o2] p2) _ mem))
+ && isSamePtr(p1, p2) && store.Uses == 1
+ && n >= o2 + t2.Size()
+ && clobber(store)
+ => (Zero {t1} [n] p1 mem)
+(Move {t1} [n] dst1 src1 store:(Store {t2} op:(OffPtr [o2] dst2) _ mem))
+ && isSamePtr(dst1, dst2) && store.Uses == 1
+ && n >= o2 + t2.Size()
+ && disjoint(src1, n, op, t2.Size())
+ && clobber(store)
+ => (Move {t1} [n] dst1 src1 mem)
+
+// Don't Move to variables that are immediately completely overwritten.
+(Zero {t} [n] dst1 move:(Move {t} [n] dst2 _ mem))
+ && move.Uses == 1
+ && isSamePtr(dst1, dst2)
+ && clobber(move)
+ => (Zero {t} [n] dst1 mem)
+(Move {t} [n] dst1 src1 move:(Move {t} [n] dst2 _ mem))
+ && move.Uses == 1
+ && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
+ && clobber(move)
+ => (Move {t} [n] dst1 src1 mem)
+(Zero {t} [n] dst1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem)))
+ && move.Uses == 1 && vardef.Uses == 1
+ && isSamePtr(dst1, dst2)
+ && clobber(move, vardef)
+ => (Zero {t} [n] dst1 (VarDef {x} mem))
+(Move {t} [n] dst1 src1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem)))
+ && move.Uses == 1 && vardef.Uses == 1
+ && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
+ && clobber(move, vardef)
+ => (Move {t} [n] dst1 src1 (VarDef {x} mem))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [0] p2) d2
+ m3:(Move [n] p3 _ mem)))
+ && m2.Uses == 1 && m3.Uses == 1
+ && o1 == t2.Size()
+ && n == t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && clobber(m2, m3)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [o2] p2) d2
+ m3:(Store {t3} op3:(OffPtr [0] p3) d3
+ m4:(Move [n] p4 _ mem))))
+ && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1
+ && o2 == t3.Size()
+ && o1-o2 == t2.Size()
+ && n == t3.Size() + t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && clobber(m2, m3, m4)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem)))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [o2] p2) d2
+ m3:(Store {t3} op3:(OffPtr [o3] p3) d3
+ m4:(Store {t4} op4:(OffPtr [0] p4) d4
+ m5:(Move [n] p5 _ mem)))))
+ && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1
+ && o3 == t4.Size()
+ && o2-o3 == t3.Size()
+ && o1-o2 == t2.Size()
+ && n == t4.Size() + t3.Size() + t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && clobber(m2, m3, m4, m5)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem))))
+
+// Don't Zero variables that are immediately completely overwritten
+// before being accessed.
+(Move {t} [n] dst1 src1 zero:(Zero {t} [n] dst2 mem))
+ && zero.Uses == 1
+ && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
+ && clobber(zero)
+ => (Move {t} [n] dst1 src1 mem)
+(Move {t} [n] dst1 src1 vardef:(VarDef {x} zero:(Zero {t} [n] dst2 mem)))
+ && zero.Uses == 1 && vardef.Uses == 1
+ && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
+ && clobber(zero, vardef)
+ => (Move {t} [n] dst1 src1 (VarDef {x} mem))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [0] p2) d2
+ m3:(Zero [n] p3 mem)))
+ && m2.Uses == 1 && m3.Uses == 1
+ && o1 == t2.Size()
+ && n == t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && clobber(m2, m3)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [o2] p2) d2
+ m3:(Store {t3} op3:(OffPtr [0] p3) d3
+ m4:(Zero [n] p4 mem))))
+ && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1
+ && o2 == t3.Size()
+ && o1-o2 == t2.Size()
+ && n == t3.Size() + t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && clobber(m2, m3, m4)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem)))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [o2] p2) d2
+ m3:(Store {t3} op3:(OffPtr [o3] p3) d3
+ m4:(Store {t4} op4:(OffPtr [0] p4) d4
+ m5:(Zero [n] p5 mem)))))
+ && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1
+ && o3 == t4.Size()
+ && o2-o3 == t3.Size()
+ && o1-o2 == t2.Size()
+ && n == t4.Size() + t3.Size() + t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && clobber(m2, m3, m4, m5)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem))))
+
+// Don't Move from memory if the values are likely to already be
+// in registers.
+(Move {t1} [n] dst p1
+ mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _)))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && o2 == t3.Size()
+ && n == t2.Size() + t3.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
+(Move {t1} [n] dst p1
+ mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
+ (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && o3 == t4.Size()
+ && o2-o3 == t3.Size()
+ && n == t2.Size() + t3.Size() + t4.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
+(Move {t1} [n] dst p1
+ mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
+ (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3
+ (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _)))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && registerizable(b, t5)
+ && o4 == t5.Size()
+ && o3-o4 == t4.Size()
+ && o2-o3 == t3.Size()
+ && n == t2.Size() + t3.Size() + t4.Size() + t5.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
+
+// Same thing but with VarDef in the middle.
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && o2 == t3.Size()
+ && n == t2.Size() + t3.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
+ (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _)))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && o3 == t4.Size()
+ && o2-o3 == t3.Size()
+ && n == t2.Size() + t3.Size() + t4.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
+ (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3
+ (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _))))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && registerizable(b, t5)
+ && o4 == t5.Size()
+ && o3-o4 == t4.Size()
+ && o2-o3 == t3.Size()
+ && n == t2.Size() + t3.Size() + t4.Size() + t5.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
+
+// Prefer to Zero and Store than to Move.
+(Move {t1} [n] dst p1
+ mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Zero {t3} [n] p3 _)))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && n >= o2 + t2.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Zero {t1} [n] dst mem))
+(Move {t1} [n] dst p1
+ mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Zero {t4} [n] p4 _))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Zero {t1} [n] dst mem)))
+(Move {t1} [n] dst p1
+ mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Store {t4} (OffPtr <tt4> [o4] p4) d3
+ (Zero {t5} [n] p5 _)))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ && n >= o4 + t4.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Zero {t1} [n] dst mem))))
+(Move {t1} [n] dst p1
+ mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Store {t4} (OffPtr <tt4> [o4] p4) d3
+ (Store {t5} (OffPtr <tt5> [o5] p5) d4
+ (Zero {t6} [n] p6 _))))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && t6.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && registerizable(b, t5)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ && n >= o4 + t4.Size()
+ && n >= o5 + t5.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Store {t5} (OffPtr <tt5> [o5] dst) d4
+ (Zero {t1} [n] dst mem)))))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Zero {t3} [n] p3 _))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && n >= o2 + t2.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Zero {t1} [n] dst mem))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Zero {t4} [n] p4 _)))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Zero {t1} [n] dst mem)))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Store {t4} (OffPtr <tt4> [o4] p4) d3
+ (Zero {t5} [n] p5 _))))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ && n >= o4 + t4.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Zero {t1} [n] dst mem))))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Store {t4} (OffPtr <tt4> [o4] p4) d3
+ (Store {t5} (OffPtr <tt5> [o5] p5) d4
+ (Zero {t6} [n] p6 _)))))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && t6.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && registerizable(b, t5)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ && n >= o4 + t4.Size()
+ && n >= o5 + t5.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Store {t5} (OffPtr <tt5> [o5] dst) d4
+ (Zero {t1} [n] dst mem)))))
+
+(SelectN [0] call:(StaticLECall {sym} a x)) && needRaceCleanup(sym, call) && clobber(call) => x
+(SelectN [0] call:(StaticLECall {sym} x)) && needRaceCleanup(sym, call) && clobber(call) => x
+
+// When rewriting append to growslice, we use as the new length the result of
+// growslice so that we don't have to spill/restore the new length around the growslice call.
+// The exception here is that if the new length is a constant, avoiding spilling it
+// is pointless and its constantness is sometimes useful for subsequent optimizations.
+// See issue 56440.
+// Note there are 2 rules here, one for the pre-decomposed []T result and one for
+// the post-decomposed (*T,int,int) result. (The latter is generated after call expansion.)
+(SliceLen (SelectN [0] (StaticLECall {sym} _ newLen:(Const(64|32)) _ _ _ _))) && isSameCall(sym, "runtime.growslice") => newLen
+(SelectN [1] (StaticCall {sym} _ newLen:(Const(64|32)) _ _ _ _)) && v.Type.IsInteger() && isSameCall(sym, "runtime.growslice") => newLen
+
+// Collapse moving A -> B -> C into just A -> C.
+// Later passes (deadstore, elim unread auto) will remove the A -> B move, if possible.
+// This happens most commonly when B is an autotmp inserted earlier
+// during compilation to ensure correctness.
+// Take care that overlapping moves are preserved.
+// Restrict this optimization to the stack, to avoid duplicating loads from the heap;
+// see CL 145208 for discussion.
+(Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _))
+ && t1.Compare(t2) == types.CMPeq
+ && isSamePtr(tmp1, tmp2)
+ && isStackPtr(src) && !isVolatile(src)
+ && disjoint(src, s, tmp2, s)
+ && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
+ => (Move {t1} [s] dst src midmem)
+
+// Same, but for large types that require VarDefs.
+(Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _)))
+ && t1.Compare(t2) == types.CMPeq
+ && isSamePtr(tmp1, tmp2)
+ && isStackPtr(src) && !isVolatile(src)
+ && disjoint(src, s, tmp2, s)
+ && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
+ => (Move {t1} [s] dst src midmem)
+
+// Don't zero the same bits twice.
+(Zero {t} [s] dst1 zero:(Zero {t} [s] dst2 _)) && isSamePtr(dst1, dst2) => zero
+(Zero {t} [s] dst1 vardef:(VarDef (Zero {t} [s] dst2 _))) && isSamePtr(dst1, dst2) => vardef
+
+// Elide self-moves. This only happens rarely (e.g test/fixedbugs/bug277.go).
+// However, this rule is needed to prevent the previous rule from looping forever in such cases.
+(Move dst src mem) && isSamePtr(dst, src) => mem
+
+// Constant rotate detection.
+((Add64|Or64|Xor64) (Lsh64x64 x z:(Const64 <t> [c])) (Rsh64Ux64 x (Const64 [d]))) && c < 64 && d == 64-c && canRotate(config, 64) => (RotateLeft64 x z)
+((Add32|Or32|Xor32) (Lsh32x64 x z:(Const64 <t> [c])) (Rsh32Ux64 x (Const64 [d]))) && c < 32 && d == 32-c && canRotate(config, 32) => (RotateLeft32 x z)
+((Add16|Or16|Xor16) (Lsh16x64 x z:(Const64 <t> [c])) (Rsh16Ux64 x (Const64 [d]))) && c < 16 && d == 16-c && canRotate(config, 16) => (RotateLeft16 x z)
+((Add8|Or8|Xor8) (Lsh8x64 x z:(Const64 <t> [c])) (Rsh8Ux64 x (Const64 [d]))) && c < 8 && d == 8-c && canRotate(config, 8) => (RotateLeft8 x z)
+
+// Non-constant rotate detection.
+// We use shiftIsBounded to make sure that neither of the shifts are >64.
+// Note: these rules are subtle when the shift amounts are 0/64, as Go shifts
+// are different from most native shifts. But it works out.
+((Add64|Or64|Xor64) left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y)
+((Add64|Or64|Xor64) left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y)
+((Add64|Or64|Xor64) left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y)
+((Add64|Or64|Xor64) left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y)
+
+((Add64|Or64|Xor64) right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z)
+((Add64|Or64|Xor64) right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z)
+((Add64|Or64|Xor64) right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z)
+((Add64|Or64|Xor64) right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z)
+
+((Add32|Or32|Xor32) left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y)
+((Add32|Or32|Xor32) left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y)
+((Add32|Or32|Xor32) left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y)
+((Add32|Or32|Xor32) left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y)
+
+((Add32|Or32|Xor32) right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z)
+((Add32|Or32|Xor32) right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z)
+((Add32|Or32|Xor32) right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z)
+((Add32|Or32|Xor32) right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z)
+
+((Add16|Or16|Xor16) left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y)
+((Add16|Or16|Xor16) left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y)
+((Add16|Or16|Xor16) left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y)
+((Add16|Or16|Xor16) left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y)
+
+((Add16|Or16|Xor16) right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z)
+((Add16|Or16|Xor16) right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z)
+((Add16|Or16|Xor16) right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z)
+((Add16|Or16|Xor16) right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z)
+
+((Add8|Or8|Xor8) left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y)
+((Add8|Or8|Xor8) left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y)
+((Add8|Or8|Xor8) left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y)
+((Add8|Or8|Xor8) left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y)
+
+((Add8|Or8|Xor8) right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z)
+((Add8|Or8|Xor8) right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z)
+((Add8|Or8|Xor8) right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z)
+((Add8|Or8|Xor8) right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z)
+
+// Rotating by y&c, with c a mask that doesn't change the bottom bits, is the same as rotating by y.
+(RotateLeft64 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&63 == 63 => (RotateLeft64 x y)
+(RotateLeft32 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&31 == 31 => (RotateLeft32 x y)
+(RotateLeft16 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&15 == 15 => (RotateLeft16 x y)
+(RotateLeft8 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&7 == 7 => (RotateLeft8 x y)
+
+// Rotating by -(y&c), with c a mask that doesn't change the bottom bits, is the same as rotating by -y.
+(RotateLeft64 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&63 == 63 => (RotateLeft64 x (Neg(64|32|16|8) <y.Type> y))
+(RotateLeft32 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&31 == 31 => (RotateLeft32 x (Neg(64|32|16|8) <y.Type> y))
+(RotateLeft16 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&15 == 15 => (RotateLeft16 x (Neg(64|32|16|8) <y.Type> y))
+(RotateLeft8 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&7 == 7 => (RotateLeft8 x (Neg(64|32|16|8) <y.Type> y))
+
+// Rotating by y+c, with c a multiple of the value width, is the same as rotating by y.
+(RotateLeft64 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&63 == 0 => (RotateLeft64 x y)
+(RotateLeft32 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&31 == 0 => (RotateLeft32 x y)
+(RotateLeft16 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&15 == 0 => (RotateLeft16 x y)
+(RotateLeft8 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&7 == 0 => (RotateLeft8 x y)
+
+// Rotating by c-y, with c a multiple of the value width, is the same as rotating by -y.
+(RotateLeft64 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&63 == 0 => (RotateLeft64 x (Neg(64|32|16|8) <y.Type> y))
+(RotateLeft32 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&31 == 0 => (RotateLeft32 x (Neg(64|32|16|8) <y.Type> y))
+(RotateLeft16 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&15 == 0 => (RotateLeft16 x (Neg(64|32|16|8) <y.Type> y))
+(RotateLeft8 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&7 == 0 => (RotateLeft8 x (Neg(64|32|16|8) <y.Type> y))
+
+// Ensure we don't do Const64 rotates in a 32-bit system.
+(RotateLeft64 x (Const64 <t> [c])) && config.PtrSize == 4 => (RotateLeft64 x (Const32 <t> [int32(c)]))
+(RotateLeft32 x (Const64 <t> [c])) && config.PtrSize == 4 => (RotateLeft32 x (Const32 <t> [int32(c)]))
+(RotateLeft16 x (Const64 <t> [c])) && config.PtrSize == 4 => (RotateLeft16 x (Const32 <t> [int32(c)]))
+(RotateLeft8 x (Const64 <t> [c])) && config.PtrSize == 4 => (RotateLeft8 x (Const32 <t> [int32(c)]))
+
+// Rotating by c, then by d, is the same as rotating by c+d.
+// We're trading a rotate for an add, which seems generally a good choice. It is especially good when c and d are constants.
+// This rule is a bit tricky as c and d might be different widths. We handle only cases where they are the same width.
+(RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 8 && d.Type.Size() == 8 => (RotateLeft(64|32|16|8) x (Add64 <c.Type> c d))
+(RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 4 && d.Type.Size() == 4 => (RotateLeft(64|32|16|8) x (Add32 <c.Type> c d))
+(RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 2 && d.Type.Size() == 2 => (RotateLeft(64|32|16|8) x (Add16 <c.Type> c d))
+(RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 1 && d.Type.Size() == 1 => (RotateLeft(64|32|16|8) x (Add8 <c.Type> c d))
+
+// Loading constant values from dictionaries and itabs.
+(Load <t> (OffPtr [off] (Addr {s} sb) ) _) && t.IsUintptr() && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb)
+(Load <t> (OffPtr [off] (Convert (Addr {s} sb) _) ) _) && t.IsUintptr() && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb)
+(Load <t> (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _) && t.IsUintptr() && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb)
+(Load <t> (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _) && t.IsUintptr() && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb)
+
+// Loading constant values from runtime._type.hash.
+(Load <t> (OffPtr [off] (Addr {sym} _) ) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)])
+(Load <t> (OffPtr [off] (Convert (Addr {sym} _) _) ) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)])
+(Load <t> (OffPtr [off] (ITab (IMake (Addr {sym} _) _))) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)])
+(Load <t> (OffPtr [off] (ITab (IMake (Convert (Addr {sym} _) _) _))) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)])
diff --git a/src/cmd/compile/internal/ssa/_gen/genericOps.go b/src/cmd/compile/internal/ssa/_gen/genericOps.go
new file mode 100644
index 0000000..69eb48c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/genericOps.go
@@ -0,0 +1,675 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// Generic opcodes typically specify a width. The inputs and outputs
+// of that op are the given number of bits wide. There is no notion of
+// "sign", so Add32 can be used both for signed and unsigned 32-bit
+// addition.
+
+// Signed/unsigned is explicit with the extension ops
+// (SignExt*/ZeroExt*) and implicit as the arg to some opcodes
+// (e.g. the second argument to shifts is unsigned). If not mentioned,
+// all args take signed inputs, or don't care whether their inputs
+// are signed or unsigned.
+
+var genericOps = []opData{
+ // 2-input arithmetic
+ // Types must be consistent with Go typing. Add, for example, must take two values
+ // of the same type and produces that same type.
+ {name: "Add8", argLength: 2, commutative: true}, // arg0 + arg1
+ {name: "Add16", argLength: 2, commutative: true},
+ {name: "Add32", argLength: 2, commutative: true},
+ {name: "Add64", argLength: 2, commutative: true},
+ {name: "AddPtr", argLength: 2}, // For address calculations. arg0 is a pointer and arg1 is an int.
+ {name: "Add32F", argLength: 2, commutative: true},
+ {name: "Add64F", argLength: 2, commutative: true},
+
+ {name: "Sub8", argLength: 2}, // arg0 - arg1
+ {name: "Sub16", argLength: 2},
+ {name: "Sub32", argLength: 2},
+ {name: "Sub64", argLength: 2},
+ {name: "SubPtr", argLength: 2},
+ {name: "Sub32F", argLength: 2},
+ {name: "Sub64F", argLength: 2},
+
+ {name: "Mul8", argLength: 2, commutative: true}, // arg0 * arg1
+ {name: "Mul16", argLength: 2, commutative: true},
+ {name: "Mul32", argLength: 2, commutative: true},
+ {name: "Mul64", argLength: 2, commutative: true},
+ {name: "Mul32F", argLength: 2, commutative: true},
+ {name: "Mul64F", argLength: 2, commutative: true},
+
+ {name: "Div32F", argLength: 2}, // arg0 / arg1
+ {name: "Div64F", argLength: 2},
+
+ {name: "Hmul32", argLength: 2, commutative: true},
+ {name: "Hmul32u", argLength: 2, commutative: true},
+ {name: "Hmul64", argLength: 2, commutative: true},
+ {name: "Hmul64u", argLength: 2, commutative: true},
+
+ {name: "Mul32uhilo", argLength: 2, typ: "(UInt32,UInt32)", commutative: true}, // arg0 * arg1, returns (hi, lo)
+ {name: "Mul64uhilo", argLength: 2, typ: "(UInt64,UInt64)", commutative: true}, // arg0 * arg1, returns (hi, lo)
+
+ {name: "Mul32uover", argLength: 2, typ: "(UInt32,Bool)", commutative: true}, // Let x = arg0*arg1 (full 32x32-> 64 unsigned multiply), returns (uint32(x), (uint32(x) != x))
+ {name: "Mul64uover", argLength: 2, typ: "(UInt64,Bool)", commutative: true}, // Let x = arg0*arg1 (full 64x64->128 unsigned multiply), returns (uint64(x), (uint64(x) != x))
+
+ // Weird special instructions for use in the strength reduction of divides.
+ // These ops compute unsigned (arg0 + arg1) / 2, correct to all
+ // 32/64 bits, even when the intermediate result of the add has 33/65 bits.
+ // These ops can assume arg0 >= arg1.
+ // Note: these ops aren't commutative!
+ {name: "Avg32u", argLength: 2, typ: "UInt32"}, // 32-bit platforms only
+ {name: "Avg64u", argLength: 2, typ: "UInt64"}, // 64-bit platforms only
+
+ // For Div16, Div32 and Div64, AuxInt non-zero means that the divisor has been proved to be not -1
+ // or that the dividend is not the most negative value.
+ {name: "Div8", argLength: 2}, // arg0 / arg1, signed
+ {name: "Div8u", argLength: 2}, // arg0 / arg1, unsigned
+ {name: "Div16", argLength: 2, aux: "Bool"},
+ {name: "Div16u", argLength: 2},
+ {name: "Div32", argLength: 2, aux: "Bool"},
+ {name: "Div32u", argLength: 2},
+ {name: "Div64", argLength: 2, aux: "Bool"},
+ {name: "Div64u", argLength: 2},
+ {name: "Div128u", argLength: 3}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r)
+
+ // For Mod16, Mod32 and Mod64, AuxInt non-zero means that the divisor has been proved to be not -1.
+ {name: "Mod8", argLength: 2}, // arg0 % arg1, signed
+ {name: "Mod8u", argLength: 2}, // arg0 % arg1, unsigned
+ {name: "Mod16", argLength: 2, aux: "Bool"},
+ {name: "Mod16u", argLength: 2},
+ {name: "Mod32", argLength: 2, aux: "Bool"},
+ {name: "Mod32u", argLength: 2},
+ {name: "Mod64", argLength: 2, aux: "Bool"},
+ {name: "Mod64u", argLength: 2},
+
+ {name: "And8", argLength: 2, commutative: true}, // arg0 & arg1
+ {name: "And16", argLength: 2, commutative: true},
+ {name: "And32", argLength: 2, commutative: true},
+ {name: "And64", argLength: 2, commutative: true},
+
+ {name: "Or8", argLength: 2, commutative: true}, // arg0 | arg1
+ {name: "Or16", argLength: 2, commutative: true},
+ {name: "Or32", argLength: 2, commutative: true},
+ {name: "Or64", argLength: 2, commutative: true},
+
+ {name: "Xor8", argLength: 2, commutative: true}, // arg0 ^ arg1
+ {name: "Xor16", argLength: 2, commutative: true},
+ {name: "Xor32", argLength: 2, commutative: true},
+ {name: "Xor64", argLength: 2, commutative: true},
+
+ // For shifts, AxB means the shifted value has A bits and the shift amount has B bits.
+ // Shift amounts are considered unsigned.
+ // If arg1 is known to be nonnegative and less than the number of bits in arg0,
+ // then auxInt may be set to 1.
+ // This enables better code generation on some platforms.
+ {name: "Lsh8x8", argLength: 2, aux: "Bool"}, // arg0 << arg1
+ {name: "Lsh8x16", argLength: 2, aux: "Bool"},
+ {name: "Lsh8x32", argLength: 2, aux: "Bool"},
+ {name: "Lsh8x64", argLength: 2, aux: "Bool"},
+ {name: "Lsh16x8", argLength: 2, aux: "Bool"},
+ {name: "Lsh16x16", argLength: 2, aux: "Bool"},
+ {name: "Lsh16x32", argLength: 2, aux: "Bool"},
+ {name: "Lsh16x64", argLength: 2, aux: "Bool"},
+ {name: "Lsh32x8", argLength: 2, aux: "Bool"},
+ {name: "Lsh32x16", argLength: 2, aux: "Bool"},
+ {name: "Lsh32x32", argLength: 2, aux: "Bool"},
+ {name: "Lsh32x64", argLength: 2, aux: "Bool"},
+ {name: "Lsh64x8", argLength: 2, aux: "Bool"},
+ {name: "Lsh64x16", argLength: 2, aux: "Bool"},
+ {name: "Lsh64x32", argLength: 2, aux: "Bool"},
+ {name: "Lsh64x64", argLength: 2, aux: "Bool"},
+
+ {name: "Rsh8x8", argLength: 2, aux: "Bool"}, // arg0 >> arg1, signed
+ {name: "Rsh8x16", argLength: 2, aux: "Bool"},
+ {name: "Rsh8x32", argLength: 2, aux: "Bool"},
+ {name: "Rsh8x64", argLength: 2, aux: "Bool"},
+ {name: "Rsh16x8", argLength: 2, aux: "Bool"},
+ {name: "Rsh16x16", argLength: 2, aux: "Bool"},
+ {name: "Rsh16x32", argLength: 2, aux: "Bool"},
+ {name: "Rsh16x64", argLength: 2, aux: "Bool"},
+ {name: "Rsh32x8", argLength: 2, aux: "Bool"},
+ {name: "Rsh32x16", argLength: 2, aux: "Bool"},
+ {name: "Rsh32x32", argLength: 2, aux: "Bool"},
+ {name: "Rsh32x64", argLength: 2, aux: "Bool"},
+ {name: "Rsh64x8", argLength: 2, aux: "Bool"},
+ {name: "Rsh64x16", argLength: 2, aux: "Bool"},
+ {name: "Rsh64x32", argLength: 2, aux: "Bool"},
+ {name: "Rsh64x64", argLength: 2, aux: "Bool"},
+
+ {name: "Rsh8Ux8", argLength: 2, aux: "Bool"}, // arg0 >> arg1, unsigned
+ {name: "Rsh8Ux16", argLength: 2, aux: "Bool"},
+ {name: "Rsh8Ux32", argLength: 2, aux: "Bool"},
+ {name: "Rsh8Ux64", argLength: 2, aux: "Bool"},
+ {name: "Rsh16Ux8", argLength: 2, aux: "Bool"},
+ {name: "Rsh16Ux16", argLength: 2, aux: "Bool"},
+ {name: "Rsh16Ux32", argLength: 2, aux: "Bool"},
+ {name: "Rsh16Ux64", argLength: 2, aux: "Bool"},
+ {name: "Rsh32Ux8", argLength: 2, aux: "Bool"},
+ {name: "Rsh32Ux16", argLength: 2, aux: "Bool"},
+ {name: "Rsh32Ux32", argLength: 2, aux: "Bool"},
+ {name: "Rsh32Ux64", argLength: 2, aux: "Bool"},
+ {name: "Rsh64Ux8", argLength: 2, aux: "Bool"},
+ {name: "Rsh64Ux16", argLength: 2, aux: "Bool"},
+ {name: "Rsh64Ux32", argLength: 2, aux: "Bool"},
+ {name: "Rsh64Ux64", argLength: 2, aux: "Bool"},
+
+ // 2-input comparisons
+ {name: "Eq8", argLength: 2, commutative: true, typ: "Bool"}, // arg0 == arg1
+ {name: "Eq16", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Eq32", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Eq64", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "EqPtr", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "EqInter", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
+ {name: "EqSlice", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
+ {name: "Eq32F", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Eq64F", argLength: 2, commutative: true, typ: "Bool"},
+
+ {name: "Neq8", argLength: 2, commutative: true, typ: "Bool"}, // arg0 != arg1
+ {name: "Neq16", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Neq32", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Neq64", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "NeqPtr", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "NeqInter", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
+ {name: "NeqSlice", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
+ {name: "Neq32F", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Neq64F", argLength: 2, commutative: true, typ: "Bool"},
+
+ {name: "Less8", argLength: 2, typ: "Bool"}, // arg0 < arg1, signed
+ {name: "Less8U", argLength: 2, typ: "Bool"}, // arg0 < arg1, unsigned
+ {name: "Less16", argLength: 2, typ: "Bool"},
+ {name: "Less16U", argLength: 2, typ: "Bool"},
+ {name: "Less32", argLength: 2, typ: "Bool"},
+ {name: "Less32U", argLength: 2, typ: "Bool"},
+ {name: "Less64", argLength: 2, typ: "Bool"},
+ {name: "Less64U", argLength: 2, typ: "Bool"},
+ {name: "Less32F", argLength: 2, typ: "Bool"},
+ {name: "Less64F", argLength: 2, typ: "Bool"},
+
+ {name: "Leq8", argLength: 2, typ: "Bool"}, // arg0 <= arg1, signed
+ {name: "Leq8U", argLength: 2, typ: "Bool"}, // arg0 <= arg1, unsigned
+ {name: "Leq16", argLength: 2, typ: "Bool"},
+ {name: "Leq16U", argLength: 2, typ: "Bool"},
+ {name: "Leq32", argLength: 2, typ: "Bool"},
+ {name: "Leq32U", argLength: 2, typ: "Bool"},
+ {name: "Leq64", argLength: 2, typ: "Bool"},
+ {name: "Leq64U", argLength: 2, typ: "Bool"},
+ {name: "Leq32F", argLength: 2, typ: "Bool"},
+ {name: "Leq64F", argLength: 2, typ: "Bool"},
+
+ // the type of a CondSelect is the same as the type of its first
+ // two arguments, which should be register-width scalars; the third
+ // argument should be a boolean
+ {name: "CondSelect", argLength: 3}, // arg2 ? arg0 : arg1
+
+ // boolean ops
+ {name: "AndB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 && arg1 (not shortcircuited)
+ {name: "OrB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 || arg1 (not shortcircuited)
+ {name: "EqB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 == arg1
+ {name: "NeqB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 != arg1
+ {name: "Not", argLength: 1, typ: "Bool"}, // !arg0, boolean
+
+ // 1-input ops
+ {name: "Neg8", argLength: 1}, // -arg0
+ {name: "Neg16", argLength: 1},
+ {name: "Neg32", argLength: 1},
+ {name: "Neg64", argLength: 1},
+ {name: "Neg32F", argLength: 1},
+ {name: "Neg64F", argLength: 1},
+
+ {name: "Com8", argLength: 1}, // ^arg0
+ {name: "Com16", argLength: 1},
+ {name: "Com32", argLength: 1},
+ {name: "Com64", argLength: 1},
+
+ {name: "Ctz8", argLength: 1}, // Count trailing (low order) zeroes (returns 0-8)
+ {name: "Ctz16", argLength: 1}, // Count trailing (low order) zeroes (returns 0-16)
+ {name: "Ctz32", argLength: 1}, // Count trailing (low order) zeroes (returns 0-32)
+ {name: "Ctz64", argLength: 1}, // Count trailing (low order) zeroes (returns 0-64)
+ {name: "Ctz8NonZero", argLength: 1}, // same as above, but arg[0] known to be non-zero, returns 0-7
+ {name: "Ctz16NonZero", argLength: 1}, // same as above, but arg[0] known to be non-zero, returns 0-15
+ {name: "Ctz32NonZero", argLength: 1}, // same as above, but arg[0] known to be non-zero, returns 0-31
+ {name: "Ctz64NonZero", argLength: 1}, // same as above, but arg[0] known to be non-zero, returns 0-63
+ {name: "BitLen8", argLength: 1}, // Number of bits in arg[0] (returns 0-8)
+ {name: "BitLen16", argLength: 1}, // Number of bits in arg[0] (returns 0-16)
+ {name: "BitLen32", argLength: 1}, // Number of bits in arg[0] (returns 0-32)
+ {name: "BitLen64", argLength: 1}, // Number of bits in arg[0] (returns 0-64)
+
+ {name: "Bswap16", argLength: 1}, // Swap bytes
+ {name: "Bswap32", argLength: 1}, // Swap bytes
+ {name: "Bswap64", argLength: 1}, // Swap bytes
+
+ {name: "BitRev8", argLength: 1}, // Reverse the bits in arg[0]
+ {name: "BitRev16", argLength: 1}, // Reverse the bits in arg[0]
+ {name: "BitRev32", argLength: 1}, // Reverse the bits in arg[0]
+ {name: "BitRev64", argLength: 1}, // Reverse the bits in arg[0]
+
+ {name: "PopCount8", argLength: 1}, // Count bits in arg[0]
+ {name: "PopCount16", argLength: 1}, // Count bits in arg[0]
+ {name: "PopCount32", argLength: 1}, // Count bits in arg[0]
+ {name: "PopCount64", argLength: 1}, // Count bits in arg[0]
+
+ // RotateLeftX instructions rotate the X bits of arg[0] to the left
+ // by the low lg_2(X) bits of arg[1], interpreted as an unsigned value.
+ // Note that this works out regardless of the bit width or signedness of
+ // arg[1]. In particular, RotateLeft by x is the same as RotateRight by -x.
+ {name: "RotateLeft64", argLength: 2},
+ {name: "RotateLeft32", argLength: 2},
+ {name: "RotateLeft16", argLength: 2},
+ {name: "RotateLeft8", argLength: 2},
+
+ // Square root.
+ // Special cases:
+ // +∞ → +∞
+ // ±0 → ±0 (sign preserved)
+ // x<0 → NaN
+ // NaN → NaN
+ {name: "Sqrt", argLength: 1}, // √arg0 (floating point, double precision)
+ {name: "Sqrt32", argLength: 1}, // √arg0 (floating point, single precision)
+
+ // Round to integer, float64 only.
+ // Special cases:
+ // ±∞ → ±∞ (sign preserved)
+ // ±0 → ±0 (sign preserved)
+ // NaN → NaN
+ {name: "Floor", argLength: 1}, // round arg0 toward -∞
+ {name: "Ceil", argLength: 1}, // round arg0 toward +∞
+ {name: "Trunc", argLength: 1}, // round arg0 toward 0
+ {name: "Round", argLength: 1}, // round arg0 to nearest, ties away from 0
+ {name: "RoundToEven", argLength: 1}, // round arg0 to nearest, ties to even
+
+ // Modify the sign bit
+ {name: "Abs", argLength: 1}, // absolute value arg0
+ {name: "Copysign", argLength: 2}, // copy sign from arg0 to arg1
+
+ // Float min/max implementation, if hardware is available.
+ {name: "Min64F", argLength: 2}, // min(arg0,arg1)
+ {name: "Min32F", argLength: 2}, // min(arg0,arg1)
+ {name: "Max64F", argLength: 2}, // max(arg0,arg1)
+ {name: "Max32F", argLength: 2}, // max(arg0,arg1)
+
+ // 3-input opcode.
+ // Fused-multiply-add, float64 only.
+ // When a*b+c is exactly zero (before rounding), then the result is +0 or -0.
+ // The 0's sign is determined according to the standard rules for the
+ // addition (-0 if both a*b and c are -0, +0 otherwise).
+ //
+ // Otherwise, when a*b+c rounds to zero, then the resulting 0's sign is
+ // determined by the sign of the exact result a*b+c.
+ // See section 6.3 in ieee754.
+ //
+ // When the multiply is an infinity times a zero, the result is NaN.
+ // See section 7.2 in ieee754.
+ {name: "FMA", argLength: 3}, // compute (a*b)+c without intermediate rounding
+
+ // Data movement. Max argument length for Phi is indefinite.
+ {name: "Phi", argLength: -1, zeroWidth: true}, // select an argument based on which predecessor block we came from
+ {name: "Copy", argLength: 1}, // output = arg0
+ // Convert converts between pointers and integers.
+ // We have a special op for this so as to not confuse GC
+ // (particularly stack maps). It takes a memory arg so it
+ // gets correctly ordered with respect to GC safepoints.
+ // It gets compiled to nothing, so its result must in the same
+ // register as its argument. regalloc knows it can use any
+ // allocatable integer register for OpConvert.
+ // arg0=ptr/int arg1=mem, output=int/ptr
+ {name: "Convert", argLength: 2, zeroWidth: true, resultInArg0: true},
+
+ // constants. Constant values are stored in the aux or
+ // auxint fields.
+ {name: "ConstBool", aux: "Bool"}, // auxint is 0 for false and 1 for true
+ {name: "ConstString", aux: "String"}, // value is aux.(string)
+ {name: "ConstNil", typ: "BytePtr"}, // nil pointer
+ {name: "Const8", aux: "Int8"}, // auxint is sign-extended 8 bits
+ {name: "Const16", aux: "Int16"}, // auxint is sign-extended 16 bits
+ {name: "Const32", aux: "Int32"}, // auxint is sign-extended 32 bits
+ // Note: ConstX are sign-extended even when the type of the value is unsigned.
+ // For instance, uint8(0xaa) is stored as auxint=0xffffffffffffffaa.
+ {name: "Const64", aux: "Int64"}, // value is auxint
+ // Note: for both Const32F and Const64F, we disallow encoding NaNs.
+ // Signaling NaNs are tricky because if you do anything with them, they become quiet.
+ // Particularly, converting a 32 bit sNaN to 64 bit and back converts it to a qNaN.
+ // See issue 36399 and 36400.
+ // Encodings of +inf, -inf, and -0 are fine.
+ {name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly representable as float 32
+ {name: "Const64F", aux: "Float64"}, // value is math.Float64frombits(uint64(auxint))
+ {name: "ConstInterface"}, // nil interface
+ {name: "ConstSlice"}, // nil slice
+
+ // Constant-like things
+ {name: "InitMem", zeroWidth: true}, // memory input to the function.
+ {name: "Arg", aux: "SymOff", symEffect: "Read", zeroWidth: true}, // argument to the function. aux=GCNode of arg, off = offset in that arg.
+
+ // Like Arg, these are generic ops that survive lowering. AuxInt is a register index, and the actual output register for each index is defined by the architecture.
+ // AuxInt = integer argument index (not a register number). ABI-specified spill loc obtained from function
+ {name: "ArgIntReg", aux: "NameOffsetInt8", zeroWidth: true}, // argument to the function in an int reg.
+ {name: "ArgFloatReg", aux: "NameOffsetInt8", zeroWidth: true}, // argument to the function in a float reg.
+
+ // The address of a variable. arg0 is the base pointer.
+ // If the variable is a global, the base pointer will be SB and
+ // the Aux field will be a *obj.LSym.
+ // If the variable is a local, the base pointer will be SP and
+ // the Aux field will be a *gc.Node.
+ {name: "Addr", argLength: 1, aux: "Sym", symEffect: "Addr"}, // Address of a variable. Arg0=SB. Aux identifies the variable.
+ {name: "LocalAddr", argLength: 2, aux: "Sym", symEffect: "Addr"}, // Address of a variable. Arg0=SP. Arg1=mem. Aux identifies the variable.
+
+ {name: "SP", zeroWidth: true}, // stack pointer
+ {name: "SB", typ: "Uintptr", zeroWidth: true}, // static base pointer (a.k.a. globals pointer)
+ {name: "Invalid"}, // unused value
+ {name: "SPanchored", typ: "Uintptr", argLength: 2, zeroWidth: true}, // arg0 = SP, arg1 = mem. Result is identical to arg0, but cannot be scheduled before memory state arg1.
+
+ // Memory operations
+ {name: "Load", argLength: 2}, // Load from arg0. arg1=memory
+ {name: "Dereference", argLength: 2}, // Load from arg0. arg1=memory. Helper op for arg/result passing, result is an otherwise not-SSA-able "value".
+ {name: "Store", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory.
+ // Normally we require that the source and destination of Move do not overlap.
+ // There is an exception when we know all the loads will happen before all
+ // the stores. In that case, overlap is ok. See
+ // memmove inlining in generic.rules. When inlineablememmovesize (in ../rewrite.go)
+ // returns true, we must do all loads before all stores, when lowering Move.
+ // The type of Move is used for the write barrier pass to insert write barriers
+ // and for alignment on some architectures.
+ // For pointerless types, it is possible for the type to be inaccurate.
+ // For type alignment and pointer information, use the type in Aux;
+ // for type size, use the size in AuxInt.
+ // The "inline runtime.memmove" rewrite rule generates Moves with inaccurate types,
+ // such as type byte instead of the more accurate type [8]byte.
+ {name: "Move", argLength: 3, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size, aux=type. Returns memory.
+ {name: "Zero", argLength: 2, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=mem, auxint=size, aux=type. Returns memory.
+
+ // Memory operations with write barriers.
+ // Expand to runtime calls. Write barrier will be removed if write on stack.
+ {name: "StoreWB", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory.
+ {name: "MoveWB", argLength: 3, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size, aux=type. Returns memory.
+ {name: "ZeroWB", argLength: 2, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=mem, auxint=size, aux=type. Returns memory.
+ {name: "WBend", argLength: 1, typ: "Mem"}, // Write barrier code is done, interrupting is now allowed.
+
+ // WB invokes runtime.gcWriteBarrier. This is not a normal
+ // call: it takes arguments in registers, doesn't clobber
+ // general-purpose registers (the exact clobber set is
+ // arch-dependent), and is not a safe-point.
+ {name: "WB", argLength: 1, typ: "(BytePtr,Mem)", aux: "Int64"}, // arg0=mem, auxint=# of buffer entries needed. Returns buffer pointer and memory.
+
+ {name: "HasCPUFeature", argLength: 0, typ: "bool", aux: "Sym", symEffect: "None"}, // aux=place that this feature flag can be loaded from
+
+ // PanicBounds and PanicExtend generate a runtime panic.
+ // Their arguments provide index values to use in panic messages.
+ // Both PanicBounds and PanicExtend have an AuxInt value from the BoundsKind type (in ../op.go).
+ // PanicBounds' index is int sized.
+ // PanicExtend's index is int64 sized. (PanicExtend is only used on 32-bit archs.)
+ {name: "PanicBounds", argLength: 3, aux: "Int64", typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory.
+ {name: "PanicExtend", argLength: 4, aux: "Int64", typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory.
+
+ // Function calls. Arguments to the call have already been written to the stack.
+ // Return values appear on the stack. The method receiver, if any, is treated
+ // as a phantom first argument.
+ // TODO(josharian): ClosureCall and InterCall should have Int32 aux
+ // to match StaticCall's 32 bit arg size limit.
+ // TODO(drchase,josharian): could the arg size limit be bundled into the rules for CallOff?
+
+ // Before lowering, LECalls receive their fixed inputs (first), memory (last),
+ // and a variable number of input values in the middle.
+ // They produce a variable number of result values.
+ // These values are not necessarily "SSA-able"; they can be too large,
+ // but in that case inputs are loaded immediately before with OpDereference,
+ // and outputs are stored immediately with OpStore.
+ //
+ // After call expansion, Calls have the same fixed-middle-memory arrangement of inputs,
+ // with the difference that the "middle" is only the register-resident inputs,
+ // and the non-register inputs are instead stored at ABI-defined offsets from SP
+ // (and the stores thread through the memory that is ultimately an input to the call).
+ // Outputs follow a similar pattern; register-resident outputs are the leading elements
+ // of a Result-typed output, with memory last, and any memory-resident outputs have been
+ // stored to ABI-defined locations. Each non-memory input or output fits in a register.
+ //
+ // Subsequent architecture-specific lowering only changes the opcode.
+
+ {name: "ClosureCall", argLength: -1, aux: "CallOff", call: true}, // arg0=code pointer, arg1=context ptr, arg2..argN-1 are register inputs, argN=memory. auxint=arg size. Returns Result of register results, plus memory.
+ {name: "StaticCall", argLength: -1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0..argN-1 are register inputs, argN=memory. auxint=arg size. Returns Result of register results, plus memory.
+ {name: "InterCall", argLength: -1, aux: "CallOff", call: true}, // interface call. arg0=code pointer, arg1..argN-1 are register inputs, argN=memory, auxint=arg size. Returns Result of register results, plus memory.
+ {name: "TailCall", argLength: -1, aux: "CallOff", call: true}, // tail call function aux.(*obj.LSym), arg0..argN-1 are register inputs, argN=memory. auxint=arg size. Returns Result of register results, plus memory.
+
+ {name: "ClosureLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded closure call. arg0=code pointer, arg1=context ptr, arg2..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
+ {name: "StaticLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
+ {name: "InterLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded interface call. arg0=code pointer, arg1..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
+ {name: "TailLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static tail call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
+
+ // Conversions: signed extensions, zero (unsigned) extensions, truncations
+ {name: "SignExt8to16", argLength: 1, typ: "Int16"},
+ {name: "SignExt8to32", argLength: 1, typ: "Int32"},
+ {name: "SignExt8to64", argLength: 1, typ: "Int64"},
+ {name: "SignExt16to32", argLength: 1, typ: "Int32"},
+ {name: "SignExt16to64", argLength: 1, typ: "Int64"},
+ {name: "SignExt32to64", argLength: 1, typ: "Int64"},
+ {name: "ZeroExt8to16", argLength: 1, typ: "UInt16"},
+ {name: "ZeroExt8to32", argLength: 1, typ: "UInt32"},
+ {name: "ZeroExt8to64", argLength: 1, typ: "UInt64"},
+ {name: "ZeroExt16to32", argLength: 1, typ: "UInt32"},
+ {name: "ZeroExt16to64", argLength: 1, typ: "UInt64"},
+ {name: "ZeroExt32to64", argLength: 1, typ: "UInt64"},
+ {name: "Trunc16to8", argLength: 1},
+ {name: "Trunc32to8", argLength: 1},
+ {name: "Trunc32to16", argLength: 1},
+ {name: "Trunc64to8", argLength: 1},
+ {name: "Trunc64to16", argLength: 1},
+ {name: "Trunc64to32", argLength: 1},
+
+ {name: "Cvt32to32F", argLength: 1},
+ {name: "Cvt32to64F", argLength: 1},
+ {name: "Cvt64to32F", argLength: 1},
+ {name: "Cvt64to64F", argLength: 1},
+ {name: "Cvt32Fto32", argLength: 1},
+ {name: "Cvt32Fto64", argLength: 1},
+ {name: "Cvt64Fto32", argLength: 1},
+ {name: "Cvt64Fto64", argLength: 1},
+ {name: "Cvt32Fto64F", argLength: 1},
+ {name: "Cvt64Fto32F", argLength: 1},
+ {name: "CvtBoolToUint8", argLength: 1},
+
+ // Force rounding to precision of type.
+ {name: "Round32F", argLength: 1},
+ {name: "Round64F", argLength: 1},
+
+ // Automatically inserted safety checks
+ {name: "IsNonNil", argLength: 1, typ: "Bool"}, // arg0 != nil
+ {name: "IsInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 < arg1. arg1 is guaranteed >= 0.
+ {name: "IsSliceInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 <= arg1. arg1 is guaranteed >= 0.
+ {name: "NilCheck", argLength: 2, nilCheck: true}, // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns the ptr unmodified.
+
+ // Pseudo-ops
+ {name: "GetG", argLength: 1, zeroWidth: true}, // runtime.getg() (read g pointer). arg0=mem
+ {name: "GetClosurePtr"}, // get closure pointer from dedicated register
+ {name: "GetCallerPC"}, // for getcallerpc intrinsic
+ {name: "GetCallerSP", argLength: 1}, // for getcallersp intrinsic. arg0=mem.
+
+ // Indexing operations
+ {name: "PtrIndex", argLength: 2}, // arg0=ptr, arg1=index. Computes ptr+sizeof(*v.type)*index, where index is extended to ptrwidth type
+ {name: "OffPtr", argLength: 1, aux: "Int64"}, // arg0 + auxint (arg0 and result are pointers)
+
+ // Slices
+ {name: "SliceMake", argLength: 3}, // arg0=ptr, arg1=len, arg2=cap
+ {name: "SlicePtr", argLength: 1, typ: "BytePtr"}, // ptr(arg0)
+ {name: "SliceLen", argLength: 1}, // len(arg0)
+ {name: "SliceCap", argLength: 1}, // cap(arg0)
+ // SlicePtrUnchecked, like SlicePtr, extracts the pointer from a slice.
+ // SlicePtr values are assumed non-nil, because they are guarded by bounds checks.
+ // SlicePtrUnchecked values can be nil.
+ {name: "SlicePtrUnchecked", argLength: 1},
+
+ // Complex (part/whole)
+ {name: "ComplexMake", argLength: 2}, // arg0=real, arg1=imag
+ {name: "ComplexReal", argLength: 1}, // real(arg0)
+ {name: "ComplexImag", argLength: 1}, // imag(arg0)
+
+ // Strings
+ {name: "StringMake", argLength: 2}, // arg0=ptr, arg1=len
+ {name: "StringPtr", argLength: 1, typ: "BytePtr"}, // ptr(arg0)
+ {name: "StringLen", argLength: 1, typ: "Int"}, // len(arg0)
+
+ // Interfaces
+ {name: "IMake", argLength: 2}, // arg0=itab, arg1=data
+ {name: "ITab", argLength: 1, typ: "Uintptr"}, // arg0=interface, returns itable field
+ {name: "IData", argLength: 1}, // arg0=interface, returns data field
+
+ // Structs
+ {name: "StructMake0"}, // Returns struct with 0 fields.
+ {name: "StructMake1", argLength: 1}, // arg0=field0. Returns struct.
+ {name: "StructMake2", argLength: 2}, // arg0,arg1=field0,field1. Returns struct.
+ {name: "StructMake3", argLength: 3}, // arg0..2=field0..2. Returns struct.
+ {name: "StructMake4", argLength: 4}, // arg0..3=field0..3. Returns struct.
+ {name: "StructSelect", argLength: 1, aux: "Int64"}, // arg0=struct, auxint=field index. Returns the auxint'th field.
+
+ // Arrays
+ {name: "ArrayMake0"}, // Returns array with 0 elements
+ {name: "ArrayMake1", argLength: 1}, // Returns array with 1 element
+ {name: "ArraySelect", argLength: 1, aux: "Int64"}, // arg0=array, auxint=index. Returns a[i].
+
+ // Spill&restore ops for the register allocator. These are
+ // semantically identical to OpCopy; they do not take/return
+ // stores like regular memory ops do. We can get away without memory
+ // args because we know there is no aliasing of spill slots on the stack.
+ {name: "StoreReg", argLength: 1},
+ {name: "LoadReg", argLength: 1},
+
+ // Used during ssa construction. Like Copy, but the arg has not been specified yet.
+ {name: "FwdRef", aux: "Sym", symEffect: "None"},
+
+ // Unknown value. Used for Values whose values don't matter because they are dead code.
+ {name: "Unknown"},
+
+ {name: "VarDef", argLength: 1, aux: "Sym", typ: "Mem", symEffect: "None", zeroWidth: true}, // aux is a *gc.Node of a variable that is about to be initialized. arg0=mem, returns mem
+ // TODO: what's the difference between VarLive and KeepAlive?
+ {name: "VarLive", argLength: 1, aux: "Sym", symEffect: "Read", zeroWidth: true}, // aux is a *gc.Node of a variable that must be kept live. arg0=mem, returns mem
+ {name: "KeepAlive", argLength: 2, typ: "Mem", zeroWidth: true}, // arg[0] is a value that must be kept alive until this mark. arg[1]=mem, returns mem
+
+ // InlMark marks the start of an inlined function body. Its AuxInt field
+ // distinguishes which entry in the local inline tree it is marking.
+ {name: "InlMark", argLength: 1, aux: "Int32", typ: "Void"}, // arg[0]=mem, returns void.
+
+ // Ops for breaking 64-bit operations on 32-bit architectures
+ {name: "Int64Make", argLength: 2, typ: "UInt64"}, // arg0=hi, arg1=lo
+ {name: "Int64Hi", argLength: 1, typ: "UInt32"}, // high 32-bit of arg0
+ {name: "Int64Lo", argLength: 1, typ: "UInt32"}, // low 32-bit of arg0
+
+ {name: "Add32carry", argLength: 2, commutative: true, typ: "(UInt32,Flags)"}, // arg0 + arg1, returns (value, carry)
+ {name: "Add32withcarry", argLength: 3, commutative: true}, // arg0 + arg1 + arg2, arg2=carry (0 or 1)
+
+ {name: "Sub32carry", argLength: 2, typ: "(UInt32,Flags)"}, // arg0 - arg1, returns (value, carry)
+ {name: "Sub32withcarry", argLength: 3}, // arg0 - arg1 - arg2, arg2=carry (0 or 1)
+
+ {name: "Add64carry", argLength: 3, commutative: true, typ: "(UInt64,UInt64)"}, // arg0 + arg1 + arg2, arg2 must be 0 or 1. returns (value, value>>64)
+ {name: "Sub64borrow", argLength: 3, typ: "(UInt64,UInt64)"}, // arg0 - (arg1 + arg2), arg2 must be 0 or 1. returns (value, value>>64&1)
+
+ {name: "Signmask", argLength: 1, typ: "Int32"}, // 0 if arg0 >= 0, -1 if arg0 < 0
+ {name: "Zeromask", argLength: 1, typ: "UInt32"}, // 0 if arg0 == 0, 0xffffffff if arg0 != 0
+ {name: "Slicemask", argLength: 1}, // 0 if arg0 == 0, -1 if arg0 > 0, undef if arg0<0. Type is native int size.
+
+ {name: "SpectreIndex", argLength: 2}, // arg0 if 0 <= arg0 < arg1, 0 otherwise. Type is native int size.
+ {name: "SpectreSliceIndex", argLength: 2}, // arg0 if 0 <= arg0 <= arg1, 0 otherwise. Type is native int size.
+
+ {name: "Cvt32Uto32F", argLength: 1}, // uint32 -> float32, only used on 32-bit arch
+ {name: "Cvt32Uto64F", argLength: 1}, // uint32 -> float64, only used on 32-bit arch
+ {name: "Cvt32Fto32U", argLength: 1}, // float32 -> uint32, only used on 32-bit arch
+ {name: "Cvt64Fto32U", argLength: 1}, // float64 -> uint32, only used on 32-bit arch
+ {name: "Cvt64Uto32F", argLength: 1}, // uint64 -> float32, only used on archs that has the instruction
+ {name: "Cvt64Uto64F", argLength: 1}, // uint64 -> float64, only used on archs that has the instruction
+ {name: "Cvt32Fto64U", argLength: 1}, // float32 -> uint64, only used on archs that has the instruction
+ {name: "Cvt64Fto64U", argLength: 1}, // float64 -> uint64, only used on archs that has the instruction
+
+ // pseudo-ops for breaking Tuple
+ {name: "Select0", argLength: 1, zeroWidth: true}, // the first component of a tuple
+ {name: "Select1", argLength: 1, zeroWidth: true}, // the second component of a tuple
+ {name: "SelectN", argLength: 1, aux: "Int64"}, // arg0=result, auxint=field index. Returns the auxint'th member.
+ {name: "SelectNAddr", argLength: 1, aux: "Int64"}, // arg0=result, auxint=field index. Returns the address of auxint'th member. Used for un-SSA-able result types.
+ {name: "MakeResult", argLength: -1}, // arg0 .. are components of a "Result" (like the result from a Call). The last arg should be memory (like the result from a call).
+
+ // Atomic operations used for semantically inlining sync/atomic and
+ // runtime/internal/atomic. Atomic loads return a new memory so that
+ // the loads are properly ordered with respect to other loads and
+ // stores.
+ {name: "AtomicLoad8", argLength: 2, typ: "(UInt8,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
+ {name: "AtomicLoad32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
+ {name: "AtomicLoad64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
+ {name: "AtomicLoadPtr", argLength: 2, typ: "(BytePtr,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
+ {name: "AtomicLoadAcq32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Lock acquisition, returns loaded value and new memory.
+ {name: "AtomicLoadAcq64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Lock acquisition, returns loaded value and new memory.
+ {name: "AtomicStore8", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
+ {name: "AtomicStore32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
+ {name: "AtomicStore64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
+ {name: "AtomicStorePtrNoWB", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
+ {name: "AtomicStoreRel32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Lock release, returns memory.
+ {name: "AtomicStoreRel64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Lock release, returns memory.
+ {name: "AtomicExchange32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicExchange64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicAdd32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
+ {name: "AtomicAdd64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
+ {name: "AtomicCompareAndSwap32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
+ {name: "AtomicCompareAndSwap64", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
+ {name: "AtomicCompareAndSwapRel32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Lock release, reports whether store happens and new memory.
+ {name: "AtomicAnd8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
+ {name: "AtomicAnd32", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
+ {name: "AtomicOr8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
+ {name: "AtomicOr32", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
+
+ // Atomic operation variants
+ // These variants have the same semantics as above atomic operations.
+ // But they are used for generating more efficient code on certain modern machines, with run-time CPU feature detection.
+ // Currently, they are used on ARM64 only.
+ {name: "AtomicAdd32Variant", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
+ {name: "AtomicAdd64Variant", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
+ {name: "AtomicExchange32Variant", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicExchange64Variant", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicCompareAndSwap32Variant", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
+ {name: "AtomicCompareAndSwap64Variant", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
+ {name: "AtomicAnd8Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
+ {name: "AtomicAnd32Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
+ {name: "AtomicOr8Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
+ {name: "AtomicOr32Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
+
+ // Publication barrier
+ {name: "PubBarrier", argLength: 1, hasSideEffects: true}, // Do data barrier. arg0=memory.
+
+ // Clobber experiment op
+ {name: "Clobber", argLength: 0, typ: "Void", aux: "SymOff", symEffect: "None"}, // write an invalid pointer value to the given pointer slot of a stack variable
+ {name: "ClobberReg", argLength: 0, typ: "Void"}, // clobber a register
+
+ // Prefetch instruction
+ {name: "PrefetchCache", argLength: 2, hasSideEffects: true}, // Do prefetch arg0 to cache. arg0=addr, arg1=memory.
+ {name: "PrefetchCacheStreamed", argLength: 2, hasSideEffects: true}, // Do non-temporal or streamed prefetch arg0 to cache. arg0=addr, arg1=memory.
+}
+
+// kind controls successors implicit exit
+// ----------------------------------------------------------
+// Exit [return mem] [] yes
+// Ret [return mem] [] yes
+// RetJmp [return mem] [] yes
+// Plain [] [next]
+// If [boolean Value] [then, else]
+// First [] [always, never]
+// Defer [mem] [nopanic, panic] (control opcode should be OpStaticCall to runtime.deferproc)
+//JumpTable [integer Value] [succ1,succ2,..]
+
+var genericBlocks = []blockData{
+ {name: "Plain"}, // a single successor
+ {name: "If", controls: 1}, // if Controls[0] goto Succs[0] else goto Succs[1]
+ {name: "Defer", controls: 1}, // Succs[0]=defer queued, Succs[1]=defer recovered. Controls[0] is call op (of memory type)
+ {name: "Ret", controls: 1}, // no successors, Controls[0] value is memory result
+ {name: "RetJmp", controls: 1}, // no successors, Controls[0] value is a tail call
+ {name: "Exit", controls: 1}, // no successors, Controls[0] value generates a panic
+ {name: "JumpTable", controls: 1}, // multiple successors, the integer Controls[0] selects which one
+
+ // transient block state used for dead code removal
+ {name: "First"}, // 2 successors, always takes the first one (second is dead)
+}
+
+func init() {
+ archs = append(archs, arch{
+ name: "generic",
+ ops: genericOps,
+ blocks: genericBlocks,
+ generic: true,
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/_gen/main.go b/src/cmd/compile/internal/ssa/_gen/main.go
new file mode 100644
index 0000000..086418c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/main.go
@@ -0,0 +1,571 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The gen command generates Go code (in the parent directory) for all
+// the architecture-specific opcodes, blocks, and rewrites.
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/format"
+ "log"
+ "math/bits"
+ "os"
+ "path"
+ "regexp"
+ "runtime"
+ "runtime/pprof"
+ "runtime/trace"
+ "sort"
+ "strings"
+ "sync"
+)
+
+// TODO: capitalize these types, so that we can more easily tell variable names
+// apart from type names, and avoid awkward func parameters like "arch arch".
+
+type arch struct {
+ name string
+ pkg string // obj package to import for this arch.
+ genfile string // source file containing opcode code generation.
+ ops []opData
+ blocks []blockData
+ regnames []string
+ ParamIntRegNames string
+ ParamFloatRegNames string
+ gpregmask regMask
+ fpregmask regMask
+ fp32regmask regMask
+ fp64regmask regMask
+ specialregmask regMask
+ framepointerreg int8
+ linkreg int8
+ generic bool
+ imports []string
+}
+
+type opData struct {
+ name string
+ reg regInfo
+ asm string
+ typ string // default result type
+ aux string
+ rematerializeable bool
+ argLength int32 // number of arguments, if -1, then this operation has a variable number of arguments
+ commutative bool // this operation is commutative on its first 2 arguments (e.g. addition)
+ resultInArg0 bool // (first, if a tuple) output of v and v.Args[0] must be allocated to the same register
+ resultNotInArgs bool // outputs must not be allocated to the same registers as inputs
+ clobberFlags bool // this op clobbers flags register
+ needIntTemp bool // need a temporary free integer register
+ call bool // is a function call
+ tailCall bool // is a tail call
+ nilCheck bool // this op is a nil check on arg0
+ faultOnNilArg0 bool // this op will fault if arg0 is nil (and aux encodes a small offset)
+ faultOnNilArg1 bool // this op will fault if arg1 is nil (and aux encodes a small offset)
+ hasSideEffects bool // for "reasons", not to be eliminated. E.g., atomic store, #19182.
+ zeroWidth bool // op never translates into any machine code. example: copy, which may sometimes translate to machine code, is not zero-width.
+ unsafePoint bool // this op is an unsafe point, i.e. not safe for async preemption
+ symEffect string // effect this op has on symbol in aux
+ scale uint8 // amd64/386 indexed load scale
+}
+
+type blockData struct {
+ name string // the suffix for this block ("EQ", "LT", etc.)
+ controls int // the number of control values this type of block requires
+ aux string // the type of the Aux/AuxInt value, if any
+}
+
+type regInfo struct {
+ // inputs[i] encodes the set of registers allowed for the i'th input.
+ // Inputs that don't use registers (flags, memory, etc.) should be 0.
+ inputs []regMask
+ // clobbers encodes the set of registers that are overwritten by
+ // the instruction (other than the output registers).
+ clobbers regMask
+ // outputs[i] encodes the set of registers allowed for the i'th output.
+ outputs []regMask
+}
+
+type regMask uint64
+
+func (a arch) regMaskComment(r regMask) string {
+ var buf strings.Builder
+ for i := uint64(0); r != 0; i++ {
+ if r&1 != 0 {
+ if buf.Len() == 0 {
+ buf.WriteString(" //")
+ }
+ buf.WriteString(" ")
+ buf.WriteString(a.regnames[i])
+ }
+ r >>= 1
+ }
+ return buf.String()
+}
+
+var archs []arch
+
+var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`")
+var memprofile = flag.String("memprofile", "", "write memory profile to `file`")
+var tracefile = flag.String("trace", "", "write trace to `file`")
+
+func main() {
+ flag.Parse()
+ if *cpuprofile != "" {
+ f, err := os.Create(*cpuprofile)
+ if err != nil {
+ log.Fatal("could not create CPU profile: ", err)
+ }
+ defer f.Close()
+ if err := pprof.StartCPUProfile(f); err != nil {
+ log.Fatal("could not start CPU profile: ", err)
+ }
+ defer pprof.StopCPUProfile()
+ }
+ if *tracefile != "" {
+ f, err := os.Create(*tracefile)
+ if err != nil {
+ log.Fatalf("failed to create trace output file: %v", err)
+ }
+ defer func() {
+ if err := f.Close(); err != nil {
+ log.Fatalf("failed to close trace file: %v", err)
+ }
+ }()
+
+ if err := trace.Start(f); err != nil {
+ log.Fatalf("failed to start trace: %v", err)
+ }
+ defer trace.Stop()
+ }
+
+ sort.Sort(ArchsByName(archs))
+
+ // The generate tasks are run concurrently, since they are CPU-intensive
+ // that can easily make use of many cores on a machine.
+ //
+ // Note that there is no limit on the concurrency at the moment. On a
+ // four-core laptop at the time of writing, peak RSS usually reaches
+ // ~200MiB, which seems doable by practically any machine nowadays. If
+ // that stops being the case, we can cap this func to a fixed number of
+ // architectures being generated at once.
+
+ tasks := []func(){
+ genOp,
+ genAllocators,
+ }
+ for _, a := range archs {
+ a := a // the funcs are ran concurrently at a later time
+ tasks = append(tasks, func() {
+ genRules(a)
+ genSplitLoadRules(a)
+ genLateLowerRules(a)
+ })
+ }
+ var wg sync.WaitGroup
+ for _, task := range tasks {
+ task := task
+ wg.Add(1)
+ go func() {
+ task()
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+
+ if *memprofile != "" {
+ f, err := os.Create(*memprofile)
+ if err != nil {
+ log.Fatal("could not create memory profile: ", err)
+ }
+ defer f.Close()
+ runtime.GC() // get up-to-date statistics
+ if err := pprof.WriteHeapProfile(f); err != nil {
+ log.Fatal("could not write memory profile: ", err)
+ }
+ }
+}
+
+func genOp() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated from _gen/*Ops.go using 'go generate'; DO NOT EDIT.\n")
+ fmt.Fprintln(w)
+ fmt.Fprintln(w, "package ssa")
+
+ fmt.Fprintln(w, "import (")
+ fmt.Fprintln(w, "\"cmd/internal/obj\"")
+ for _, a := range archs {
+ if a.pkg != "" {
+ fmt.Fprintf(w, "%q\n", a.pkg)
+ }
+ }
+ fmt.Fprintln(w, ")")
+
+ // generate Block* declarations
+ fmt.Fprintln(w, "const (")
+ fmt.Fprintln(w, "BlockInvalid BlockKind = iota")
+ for _, a := range archs {
+ fmt.Fprintln(w)
+ for _, d := range a.blocks {
+ fmt.Fprintf(w, "Block%s%s\n", a.Name(), d.name)
+ }
+ }
+ fmt.Fprintln(w, ")")
+
+ // generate block kind string method
+ fmt.Fprintln(w, "var blockString = [...]string{")
+ fmt.Fprintln(w, "BlockInvalid:\"BlockInvalid\",")
+ for _, a := range archs {
+ fmt.Fprintln(w)
+ for _, b := range a.blocks {
+ fmt.Fprintf(w, "Block%s%s:\"%s\",\n", a.Name(), b.name, b.name)
+ }
+ }
+ fmt.Fprintln(w, "}")
+ fmt.Fprintln(w, "func (k BlockKind) String() string {return blockString[k]}")
+
+ // generate block kind auxint method
+ fmt.Fprintln(w, "func (k BlockKind) AuxIntType() string {")
+ fmt.Fprintln(w, "switch k {")
+ for _, a := range archs {
+ for _, b := range a.blocks {
+ if b.auxIntType() == "invalid" {
+ continue
+ }
+ fmt.Fprintf(w, "case Block%s%s: return \"%s\"\n", a.Name(), b.name, b.auxIntType())
+ }
+ }
+ fmt.Fprintln(w, "}")
+ fmt.Fprintln(w, "return \"\"")
+ fmt.Fprintln(w, "}")
+
+ // generate Op* declarations
+ fmt.Fprintln(w, "const (")
+ fmt.Fprintln(w, "OpInvalid Op = iota") // make sure OpInvalid is 0.
+ for _, a := range archs {
+ fmt.Fprintln(w)
+ for _, v := range a.ops {
+ if v.name == "Invalid" {
+ continue
+ }
+ fmt.Fprintf(w, "Op%s%s\n", a.Name(), v.name)
+ }
+ }
+ fmt.Fprintln(w, ")")
+
+ // generate OpInfo table
+ fmt.Fprintln(w, "var opcodeTable = [...]opInfo{")
+ fmt.Fprintln(w, " { name: \"OpInvalid\" },")
+ for _, a := range archs {
+ fmt.Fprintln(w)
+
+ pkg := path.Base(a.pkg)
+ for _, v := range a.ops {
+ if v.name == "Invalid" {
+ continue
+ }
+ fmt.Fprintln(w, "{")
+ fmt.Fprintf(w, "name:\"%s\",\n", v.name)
+
+ // flags
+ if v.aux != "" {
+ fmt.Fprintf(w, "auxType: aux%s,\n", v.aux)
+ }
+ fmt.Fprintf(w, "argLen: %d,\n", v.argLength)
+
+ if v.rematerializeable {
+ if v.reg.clobbers != 0 {
+ log.Fatalf("%s is rematerializeable and clobbers registers", v.name)
+ }
+ if v.clobberFlags {
+ log.Fatalf("%s is rematerializeable and clobbers flags", v.name)
+ }
+ fmt.Fprintln(w, "rematerializeable: true,")
+ }
+ if v.commutative {
+ fmt.Fprintln(w, "commutative: true,")
+ }
+ if v.resultInArg0 {
+ fmt.Fprintln(w, "resultInArg0: true,")
+ // OpConvert's register mask is selected dynamically,
+ // so don't try to check it in the static table.
+ if v.name != "Convert" && v.reg.inputs[0] != v.reg.outputs[0] {
+ log.Fatalf("%s: input[0] and output[0] must use the same registers for %s", a.name, v.name)
+ }
+ if v.name != "Convert" && v.commutative && v.reg.inputs[1] != v.reg.outputs[0] {
+ log.Fatalf("%s: input[1] and output[0] must use the same registers for %s", a.name, v.name)
+ }
+ }
+ if v.resultNotInArgs {
+ fmt.Fprintln(w, "resultNotInArgs: true,")
+ }
+ if v.clobberFlags {
+ fmt.Fprintln(w, "clobberFlags: true,")
+ }
+ if v.needIntTemp {
+ fmt.Fprintln(w, "needIntTemp: true,")
+ }
+ if v.call {
+ fmt.Fprintln(w, "call: true,")
+ }
+ if v.tailCall {
+ fmt.Fprintln(w, "tailCall: true,")
+ }
+ if v.nilCheck {
+ fmt.Fprintln(w, "nilCheck: true,")
+ }
+ if v.faultOnNilArg0 {
+ fmt.Fprintln(w, "faultOnNilArg0: true,")
+ if v.aux != "Sym" && v.aux != "SymOff" && v.aux != "SymValAndOff" && v.aux != "Int64" && v.aux != "Int32" && v.aux != "" {
+ log.Fatalf("faultOnNilArg0 with aux %s not allowed", v.aux)
+ }
+ }
+ if v.faultOnNilArg1 {
+ fmt.Fprintln(w, "faultOnNilArg1: true,")
+ if v.aux != "Sym" && v.aux != "SymOff" && v.aux != "SymValAndOff" && v.aux != "Int64" && v.aux != "Int32" && v.aux != "" {
+ log.Fatalf("faultOnNilArg1 with aux %s not allowed", v.aux)
+ }
+ }
+ if v.hasSideEffects {
+ fmt.Fprintln(w, "hasSideEffects: true,")
+ }
+ if v.zeroWidth {
+ fmt.Fprintln(w, "zeroWidth: true,")
+ }
+ if v.unsafePoint {
+ fmt.Fprintln(w, "unsafePoint: true,")
+ }
+ needEffect := strings.HasPrefix(v.aux, "Sym")
+ if v.symEffect != "" {
+ if !needEffect {
+ log.Fatalf("symEffect with aux %s not allowed", v.aux)
+ }
+ fmt.Fprintf(w, "symEffect: Sym%s,\n", strings.Replace(v.symEffect, ",", "|Sym", -1))
+ } else if needEffect {
+ log.Fatalf("symEffect needed for aux %s", v.aux)
+ }
+ if a.name == "generic" {
+ fmt.Fprintln(w, "generic:true,")
+ fmt.Fprintln(w, "},") // close op
+ // generic ops have no reg info or asm
+ continue
+ }
+ if v.asm != "" {
+ fmt.Fprintf(w, "asm: %s.A%s,\n", pkg, v.asm)
+ }
+ if v.scale != 0 {
+ fmt.Fprintf(w, "scale: %d,\n", v.scale)
+ }
+ fmt.Fprintln(w, "reg:regInfo{")
+
+ // Compute input allocation order. We allocate from the
+ // most to the least constrained input. This order guarantees
+ // that we will always be able to find a register.
+ var s []intPair
+ for i, r := range v.reg.inputs {
+ if r != 0 {
+ s = append(s, intPair{countRegs(r), i})
+ }
+ }
+ if len(s) > 0 {
+ sort.Sort(byKey(s))
+ fmt.Fprintln(w, "inputs: []inputInfo{")
+ for _, p := range s {
+ r := v.reg.inputs[p.val]
+ fmt.Fprintf(w, "{%d,%d},%s\n", p.val, r, a.regMaskComment(r))
+ }
+ fmt.Fprintln(w, "},")
+ }
+
+ if v.reg.clobbers > 0 {
+ fmt.Fprintf(w, "clobbers: %d,%s\n", v.reg.clobbers, a.regMaskComment(v.reg.clobbers))
+ }
+
+ // reg outputs
+ s = s[:0]
+ for i, r := range v.reg.outputs {
+ s = append(s, intPair{countRegs(r), i})
+ }
+ if len(s) > 0 {
+ sort.Sort(byKey(s))
+ fmt.Fprintln(w, "outputs: []outputInfo{")
+ for _, p := range s {
+ r := v.reg.outputs[p.val]
+ fmt.Fprintf(w, "{%d,%d},%s\n", p.val, r, a.regMaskComment(r))
+ }
+ fmt.Fprintln(w, "},")
+ }
+ fmt.Fprintln(w, "},") // close reg info
+ fmt.Fprintln(w, "},") // close op
+ }
+ }
+ fmt.Fprintln(w, "}")
+
+ fmt.Fprintln(w, "func (o Op) Asm() obj.As {return opcodeTable[o].asm}")
+ fmt.Fprintln(w, "func (o Op) Scale() int16 {return int16(opcodeTable[o].scale)}")
+
+ // generate op string method
+ fmt.Fprintln(w, "func (o Op) String() string {return opcodeTable[o].name }")
+
+ fmt.Fprintln(w, "func (o Op) SymEffect() SymEffect { return opcodeTable[o].symEffect }")
+ fmt.Fprintln(w, "func (o Op) IsCall() bool { return opcodeTable[o].call }")
+ fmt.Fprintln(w, "func (o Op) IsTailCall() bool { return opcodeTable[o].tailCall }")
+ fmt.Fprintln(w, "func (o Op) HasSideEffects() bool { return opcodeTable[o].hasSideEffects }")
+ fmt.Fprintln(w, "func (o Op) UnsafePoint() bool { return opcodeTable[o].unsafePoint }")
+ fmt.Fprintln(w, "func (o Op) ResultInArg0() bool { return opcodeTable[o].resultInArg0 }")
+
+ // generate registers
+ for _, a := range archs {
+ if a.generic {
+ continue
+ }
+ fmt.Fprintf(w, "var registers%s = [...]Register {\n", a.name)
+ var gcRegN int
+ num := map[string]int8{}
+ for i, r := range a.regnames {
+ num[r] = int8(i)
+ pkg := a.pkg[len("cmd/internal/obj/"):]
+ var objname string // name in cmd/internal/obj/$ARCH
+ switch r {
+ case "SB":
+ // SB isn't a real register. cmd/internal/obj expects 0 in this case.
+ objname = "0"
+ case "SP":
+ objname = pkg + ".REGSP"
+ case "g":
+ objname = pkg + ".REGG"
+ default:
+ objname = pkg + ".REG_" + r
+ }
+ // Assign a GC register map index to registers
+ // that may contain pointers.
+ gcRegIdx := -1
+ if a.gpregmask&(1<<uint(i)) != 0 {
+ gcRegIdx = gcRegN
+ gcRegN++
+ }
+ fmt.Fprintf(w, " {%d, %s, %d, \"%s\"},\n", i, objname, gcRegIdx, r)
+ }
+ parameterRegisterList := func(paramNamesString string) []int8 {
+ paramNamesString = strings.TrimSpace(paramNamesString)
+ if paramNamesString == "" {
+ return nil
+ }
+ paramNames := strings.Split(paramNamesString, " ")
+ var paramRegs []int8
+ for _, regName := range paramNames {
+ if regName == "" {
+ // forgive extra spaces
+ continue
+ }
+ if regNum, ok := num[regName]; ok {
+ paramRegs = append(paramRegs, regNum)
+ delete(num, regName)
+ } else {
+ log.Fatalf("parameter register %s for architecture %s not a register name (or repeated in parameter list)", regName, a.name)
+ }
+ }
+ return paramRegs
+ }
+
+ paramIntRegs := parameterRegisterList(a.ParamIntRegNames)
+ paramFloatRegs := parameterRegisterList(a.ParamFloatRegNames)
+
+ if gcRegN > 32 {
+ // Won't fit in a uint32 mask.
+ log.Fatalf("too many GC registers (%d > 32) on %s", gcRegN, a.name)
+ }
+ fmt.Fprintln(w, "}")
+ fmt.Fprintf(w, "var paramIntReg%s = %#v\n", a.name, paramIntRegs)
+ fmt.Fprintf(w, "var paramFloatReg%s = %#v\n", a.name, paramFloatRegs)
+ fmt.Fprintf(w, "var gpRegMask%s = regMask(%d)\n", a.name, a.gpregmask)
+ fmt.Fprintf(w, "var fpRegMask%s = regMask(%d)\n", a.name, a.fpregmask)
+ if a.fp32regmask != 0 {
+ fmt.Fprintf(w, "var fp32RegMask%s = regMask(%d)\n", a.name, a.fp32regmask)
+ }
+ if a.fp64regmask != 0 {
+ fmt.Fprintf(w, "var fp64RegMask%s = regMask(%d)\n", a.name, a.fp64regmask)
+ }
+ fmt.Fprintf(w, "var specialRegMask%s = regMask(%d)\n", a.name, a.specialregmask)
+ fmt.Fprintf(w, "var framepointerReg%s = int8(%d)\n", a.name, a.framepointerreg)
+ fmt.Fprintf(w, "var linkReg%s = int8(%d)\n", a.name, a.linkreg)
+ }
+
+ // gofmt result
+ b := w.Bytes()
+ var err error
+ b, err = format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", w.Bytes())
+ panic(err)
+ }
+
+ if err := os.WriteFile("../opGen.go", b, 0666); err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+
+ // Check that the arch genfile handles all the arch-specific opcodes.
+ // This is very much a hack, but it is better than nothing.
+ //
+ // Do a single regexp pass to record all ops being handled in a map, and
+ // then compare that with the ops list. This is much faster than one
+ // regexp pass per opcode.
+ for _, a := range archs {
+ if a.genfile == "" {
+ continue
+ }
+
+ pattern := fmt.Sprintf(`\Wssa\.Op%s([a-zA-Z0-9_]+)\W`, a.name)
+ rxOp, err := regexp.Compile(pattern)
+ if err != nil {
+ log.Fatalf("bad opcode regexp %s: %v", pattern, err)
+ }
+
+ src, err := os.ReadFile(a.genfile)
+ if err != nil {
+ log.Fatalf("can't read %s: %v", a.genfile, err)
+ }
+ seen := make(map[string]bool, len(a.ops))
+ for _, m := range rxOp.FindAllSubmatch(src, -1) {
+ seen[string(m[1])] = true
+ }
+ for _, op := range a.ops {
+ if !seen[op.name] {
+ log.Fatalf("Op%s%s has no code generation in %s", a.name, op.name, a.genfile)
+ }
+ }
+ }
+}
+
+// Name returns the name of the architecture for use in Op* and Block* enumerations.
+func (a arch) Name() string {
+ s := a.name
+ if s == "generic" {
+ s = ""
+ }
+ return s
+}
+
+// countRegs returns the number of set bits in the register mask.
+func countRegs(r regMask) int {
+ return bits.OnesCount64(uint64(r))
+}
+
+// for sorting a pair of integers by key
+type intPair struct {
+ key, val int
+}
+type byKey []intPair
+
+func (a byKey) Len() int { return len(a) }
+func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byKey) Less(i, j int) bool { return a[i].key < a[j].key }
+
+type ArchsByName []arch
+
+func (x ArchsByName) Len() int { return len(x) }
+func (x ArchsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x ArchsByName) Less(i, j int) bool { return x[i].name < x[j].name }
diff --git a/src/cmd/compile/internal/ssa/_gen/rulegen.go b/src/cmd/compile/internal/ssa/_gen/rulegen.go
new file mode 100644
index 0000000..072df29
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/_gen/rulegen.go
@@ -0,0 +1,1885 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This program generates Go code that applies rewrite rules to a Value.
+// The generated code implements a function of type func (v *Value) bool
+// which reports whether if did something.
+// Ideas stolen from Swift: http://www.hpl.hp.com/techreports/Compaq-DEC/WRL-2000-2.html
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io"
+ "log"
+ "os"
+ "path"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/ast/astutil"
+)
+
+// rule syntax:
+// sexpr [&& extra conditions] => [@block] sexpr
+//
+// sexpr are s-expressions (lisp-like parenthesized groupings)
+// sexpr ::= [variable:](opcode sexpr*)
+// | variable
+// | <type>
+// | [auxint]
+// | {aux}
+//
+// aux ::= variable | {code}
+// type ::= variable | {code}
+// variable ::= some token
+// opcode ::= one of the opcodes from the *Ops.go files
+
+// special rules: trailing ellipsis "..." (in the outermost sexpr?) must match on both sides of a rule.
+// trailing three underscore "___" in the outermost match sexpr indicate the presence of
+// extra ignored args that need not appear in the replacement
+
+// extra conditions is just a chunk of Go that evaluates to a boolean. It may use
+// variables declared in the matching tsexpr. The variable "v" is predefined to be
+// the value matched by the entire rule.
+
+// If multiple rules match, the first one in file order is selected.
+
+var (
+ genLog = flag.Bool("log", false, "generate code that logs; for debugging only")
+ addLine = flag.Bool("line", false, "add line number comment to generated rules; for debugging only")
+)
+
+type Rule struct {
+ Rule string
+ Loc string // file name & line number
+}
+
+func (r Rule) String() string {
+ return fmt.Sprintf("rule %q at %s", r.Rule, r.Loc)
+}
+
+func normalizeSpaces(s string) string {
+ return strings.Join(strings.Fields(strings.TrimSpace(s)), " ")
+}
+
+// parse returns the matching part of the rule, additional conditions, and the result.
+func (r Rule) parse() (match, cond, result string) {
+ s := strings.Split(r.Rule, "=>")
+ match = normalizeSpaces(s[0])
+ result = normalizeSpaces(s[1])
+ cond = ""
+ if i := strings.Index(match, "&&"); i >= 0 {
+ cond = normalizeSpaces(match[i+2:])
+ match = normalizeSpaces(match[:i])
+ }
+ return match, cond, result
+}
+
+func genRules(arch arch) { genRulesSuffix(arch, "") }
+func genSplitLoadRules(arch arch) { genRulesSuffix(arch, "splitload") }
+func genLateLowerRules(arch arch) { genRulesSuffix(arch, "latelower") }
+
+func genRulesSuffix(arch arch, suff string) {
+ // Open input file.
+ text, err := os.Open(arch.name + suff + ".rules")
+ if err != nil {
+ if suff == "" {
+ // All architectures must have a plain rules file.
+ log.Fatalf("can't read rule file: %v", err)
+ }
+ // Some architectures have bonus rules files that others don't share. That's fine.
+ return
+ }
+
+ // oprules contains a list of rules for each block and opcode
+ blockrules := map[string][]Rule{}
+ oprules := map[string][]Rule{}
+
+ // read rule file
+ scanner := bufio.NewScanner(text)
+ rule := ""
+ var lineno int
+ var ruleLineno int // line number of "=>"
+ for scanner.Scan() {
+ lineno++
+ line := scanner.Text()
+ if i := strings.Index(line, "//"); i >= 0 {
+ // Remove comments. Note that this isn't string safe, so
+ // it will truncate lines with // inside strings. Oh well.
+ line = line[:i]
+ }
+ rule += " " + line
+ rule = strings.TrimSpace(rule)
+ if rule == "" {
+ continue
+ }
+ if !strings.Contains(rule, "=>") {
+ continue
+ }
+ if ruleLineno == 0 {
+ ruleLineno = lineno
+ }
+ if strings.HasSuffix(rule, "=>") {
+ continue // continue on the next line
+ }
+ if n := balance(rule); n > 0 {
+ continue // open parentheses remain, continue on the next line
+ } else if n < 0 {
+ break // continuing the line can't help, and it will only make errors worse
+ }
+
+ loc := fmt.Sprintf("%s%s.rules:%d", arch.name, suff, ruleLineno)
+ for _, rule2 := range expandOr(rule) {
+ r := Rule{Rule: rule2, Loc: loc}
+ if rawop := strings.Split(rule2, " ")[0][1:]; isBlock(rawop, arch) {
+ blockrules[rawop] = append(blockrules[rawop], r)
+ continue
+ }
+ // Do fancier value op matching.
+ match, _, _ := r.parse()
+ op, oparch, _, _, _, _ := parseValue(match, arch, loc)
+ opname := fmt.Sprintf("Op%s%s", oparch, op.name)
+ oprules[opname] = append(oprules[opname], r)
+ }
+ rule = ""
+ ruleLineno = 0
+ }
+ if err := scanner.Err(); err != nil {
+ log.Fatalf("scanner failed: %v\n", err)
+ }
+ if balance(rule) != 0 {
+ log.Fatalf("%s.rules:%d: unbalanced rule: %v\n", arch.name, lineno, rule)
+ }
+
+ // Order all the ops.
+ var ops []string
+ for op := range oprules {
+ ops = append(ops, op)
+ }
+ sort.Strings(ops)
+
+ genFile := &File{Arch: arch, Suffix: suff}
+ // Main rewrite routine is a switch on v.Op.
+ fn := &Func{Kind: "Value", ArgLen: -1}
+
+ sw := &Switch{Expr: exprf("v.Op")}
+ for _, op := range ops {
+ eop, ok := parseEllipsisRules(oprules[op], arch)
+ if ok {
+ if strings.Contains(oprules[op][0].Rule, "=>") && opByName(arch, op).aux != opByName(arch, eop).aux {
+ panic(fmt.Sprintf("can't use ... for ops that have different aux types: %s and %s", op, eop))
+ }
+ swc := &Case{Expr: exprf("%s", op)}
+ swc.add(stmtf("v.Op = %s", eop))
+ swc.add(stmtf("return true"))
+ sw.add(swc)
+ continue
+ }
+
+ swc := &Case{Expr: exprf("%s", op)}
+ swc.add(stmtf("return rewriteValue%s%s_%s(v)", arch.name, suff, op))
+ sw.add(swc)
+ }
+ if len(sw.List) > 0 { // skip if empty
+ fn.add(sw)
+ }
+ fn.add(stmtf("return false"))
+ genFile.add(fn)
+
+ // Generate a routine per op. Note that we don't make one giant routine
+ // because it is too big for some compilers.
+ for _, op := range ops {
+ rules := oprules[op]
+ _, ok := parseEllipsisRules(oprules[op], arch)
+ if ok {
+ continue
+ }
+
+ // rr is kept between iterations, so that each rule can check
+ // that the previous rule wasn't unconditional.
+ var rr *RuleRewrite
+ fn := &Func{
+ Kind: "Value",
+ Suffix: fmt.Sprintf("_%s", op),
+ ArgLen: opByName(arch, op).argLength,
+ }
+ fn.add(declReserved("b", "v.Block"))
+ fn.add(declReserved("config", "b.Func.Config"))
+ fn.add(declReserved("fe", "b.Func.fe"))
+ fn.add(declReserved("typ", "&b.Func.Config.Types"))
+ for _, rule := range rules {
+ if rr != nil && !rr.CanFail {
+ log.Fatalf("unconditional rule %s is followed by other rules", rr.Match)
+ }
+ rr = &RuleRewrite{Loc: rule.Loc}
+ rr.Match, rr.Cond, rr.Result = rule.parse()
+ pos, _ := genMatch(rr, arch, rr.Match, fn.ArgLen >= 0)
+ if pos == "" {
+ pos = "v.Pos"
+ }
+ if rr.Cond != "" {
+ rr.add(breakf("!(%s)", rr.Cond))
+ }
+ genResult(rr, arch, rr.Result, pos)
+ if *genLog {
+ rr.add(stmtf("logRule(%q)", rule.Loc))
+ }
+ fn.add(rr)
+ }
+ if rr.CanFail {
+ fn.add(stmtf("return false"))
+ }
+ genFile.add(fn)
+ }
+
+ // Generate block rewrite function. There are only a few block types
+ // so we can make this one function with a switch.
+ fn = &Func{Kind: "Block"}
+ fn.add(declReserved("config", "b.Func.Config"))
+ fn.add(declReserved("typ", "&b.Func.Config.Types"))
+
+ sw = &Switch{Expr: exprf("b.Kind")}
+ ops = ops[:0]
+ for op := range blockrules {
+ ops = append(ops, op)
+ }
+ sort.Strings(ops)
+ for _, op := range ops {
+ name, data := getBlockInfo(op, arch)
+ swc := &Case{Expr: exprf("%s", name)}
+ for _, rule := range blockrules[op] {
+ swc.add(genBlockRewrite(rule, arch, data))
+ }
+ sw.add(swc)
+ }
+ if len(sw.List) > 0 { // skip if empty
+ fn.add(sw)
+ }
+ fn.add(stmtf("return false"))
+ genFile.add(fn)
+
+ // Remove unused imports and variables.
+ buf := new(bytes.Buffer)
+ fprint(buf, genFile)
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, "", buf, parser.ParseComments)
+ if err != nil {
+ filename := fmt.Sprintf("%s_broken.go", arch.name)
+ if err := os.WriteFile(filename, buf.Bytes(), 0644); err != nil {
+ log.Printf("failed to dump broken code to %s: %v", filename, err)
+ } else {
+ log.Printf("dumped broken code to %s", filename)
+ }
+ log.Fatalf("failed to parse generated code for arch %s: %v", arch.name, err)
+ }
+ tfile := fset.File(file.Pos())
+
+ // First, use unusedInspector to find the unused declarations by their
+ // start position.
+ u := unusedInspector{unused: make(map[token.Pos]bool)}
+ u.node(file)
+
+ // Then, delete said nodes via astutil.Apply.
+ pre := func(c *astutil.Cursor) bool {
+ node := c.Node()
+ if node == nil {
+ return true
+ }
+ if u.unused[node.Pos()] {
+ c.Delete()
+ // Unused imports and declarations use exactly
+ // one line. Prevent leaving an empty line.
+ tfile.MergeLine(tfile.Position(node.Pos()).Line)
+ return false
+ }
+ return true
+ }
+ post := func(c *astutil.Cursor) bool {
+ switch node := c.Node().(type) {
+ case *ast.GenDecl:
+ if len(node.Specs) == 0 {
+ // Don't leave a broken or empty GenDecl behind,
+ // such as "import ()".
+ c.Delete()
+ }
+ }
+ return true
+ }
+ file = astutil.Apply(file, pre, post).(*ast.File)
+
+ // Write the well-formatted source to file
+ f, err := os.Create("../rewrite" + arch.name + suff + ".go")
+ if err != nil {
+ log.Fatalf("can't write output: %v", err)
+ }
+ defer f.Close()
+ // gofmt result; use a buffered writer, as otherwise go/format spends
+ // far too much time in syscalls.
+ bw := bufio.NewWriter(f)
+ if err := format.Node(bw, fset, file); err != nil {
+ log.Fatalf("can't format output: %v", err)
+ }
+ if err := bw.Flush(); err != nil {
+ log.Fatalf("can't write output: %v", err)
+ }
+ if err := f.Close(); err != nil {
+ log.Fatalf("can't write output: %v", err)
+ }
+}
+
+// unusedInspector can be used to detect unused variables and imports in an
+// ast.Node via its node method. The result is available in the "unused" map.
+//
+// note that unusedInspector is lazy and best-effort; it only supports the node
+// types and patterns used by the rulegen program.
+type unusedInspector struct {
+ // scope is the current scope, which can never be nil when a declaration
+ // is encountered. That is, the unusedInspector.node entrypoint should
+ // generally be an entire file or block.
+ scope *scope
+
+ // unused is the resulting set of unused declared names, indexed by the
+ // starting position of the node that declared the name.
+ unused map[token.Pos]bool
+
+ // defining is the object currently being defined; this is useful so
+ // that if "foo := bar" is unused and removed, we can then detect if
+ // "bar" becomes unused as well.
+ defining *object
+}
+
+// scoped opens a new scope when called, and returns a function which closes
+// that same scope. When a scope is closed, unused variables are recorded.
+func (u *unusedInspector) scoped() func() {
+ outer := u.scope
+ u.scope = &scope{outer: outer, objects: map[string]*object{}}
+ return func() {
+ for anyUnused := true; anyUnused; {
+ anyUnused = false
+ for _, obj := range u.scope.objects {
+ if obj.numUses > 0 {
+ continue
+ }
+ u.unused[obj.pos] = true
+ for _, used := range obj.used {
+ if used.numUses--; used.numUses == 0 {
+ anyUnused = true
+ }
+ }
+ // We've decremented numUses for each of the
+ // objects in used. Zero this slice too, to keep
+ // everything consistent.
+ obj.used = nil
+ }
+ }
+ u.scope = outer
+ }
+}
+
+func (u *unusedInspector) exprs(list []ast.Expr) {
+ for _, x := range list {
+ u.node(x)
+ }
+}
+
+func (u *unusedInspector) node(node ast.Node) {
+ switch node := node.(type) {
+ case *ast.File:
+ defer u.scoped()()
+ for _, decl := range node.Decls {
+ u.node(decl)
+ }
+ case *ast.GenDecl:
+ for _, spec := range node.Specs {
+ u.node(spec)
+ }
+ case *ast.ImportSpec:
+ impPath, _ := strconv.Unquote(node.Path.Value)
+ name := path.Base(impPath)
+ u.scope.objects[name] = &object{
+ name: name,
+ pos: node.Pos(),
+ }
+ case *ast.FuncDecl:
+ u.node(node.Type)
+ if node.Body != nil {
+ u.node(node.Body)
+ }
+ case *ast.FuncType:
+ if node.Params != nil {
+ u.node(node.Params)
+ }
+ if node.Results != nil {
+ u.node(node.Results)
+ }
+ case *ast.FieldList:
+ for _, field := range node.List {
+ u.node(field)
+ }
+ case *ast.Field:
+ u.node(node.Type)
+
+ // statements
+
+ case *ast.BlockStmt:
+ defer u.scoped()()
+ for _, stmt := range node.List {
+ u.node(stmt)
+ }
+ case *ast.DeclStmt:
+ u.node(node.Decl)
+ case *ast.IfStmt:
+ if node.Init != nil {
+ u.node(node.Init)
+ }
+ u.node(node.Cond)
+ u.node(node.Body)
+ if node.Else != nil {
+ u.node(node.Else)
+ }
+ case *ast.ForStmt:
+ if node.Init != nil {
+ u.node(node.Init)
+ }
+ if node.Cond != nil {
+ u.node(node.Cond)
+ }
+ if node.Post != nil {
+ u.node(node.Post)
+ }
+ u.node(node.Body)
+ case *ast.SwitchStmt:
+ if node.Init != nil {
+ u.node(node.Init)
+ }
+ if node.Tag != nil {
+ u.node(node.Tag)
+ }
+ u.node(node.Body)
+ case *ast.CaseClause:
+ u.exprs(node.List)
+ defer u.scoped()()
+ for _, stmt := range node.Body {
+ u.node(stmt)
+ }
+ case *ast.BranchStmt:
+ case *ast.ExprStmt:
+ u.node(node.X)
+ case *ast.AssignStmt:
+ if node.Tok != token.DEFINE {
+ u.exprs(node.Rhs)
+ u.exprs(node.Lhs)
+ break
+ }
+ lhs := node.Lhs
+ if len(lhs) == 2 && lhs[1].(*ast.Ident).Name == "_" {
+ lhs = lhs[:1]
+ }
+ if len(lhs) != 1 {
+ panic("no support for := with multiple names")
+ }
+
+ name := lhs[0].(*ast.Ident)
+ obj := &object{
+ name: name.Name,
+ pos: name.NamePos,
+ }
+
+ old := u.defining
+ u.defining = obj
+ u.exprs(node.Rhs)
+ u.defining = old
+
+ u.scope.objects[name.Name] = obj
+ case *ast.ReturnStmt:
+ u.exprs(node.Results)
+ case *ast.IncDecStmt:
+ u.node(node.X)
+
+ // expressions
+
+ case *ast.CallExpr:
+ u.node(node.Fun)
+ u.exprs(node.Args)
+ case *ast.SelectorExpr:
+ u.node(node.X)
+ case *ast.UnaryExpr:
+ u.node(node.X)
+ case *ast.BinaryExpr:
+ u.node(node.X)
+ u.node(node.Y)
+ case *ast.StarExpr:
+ u.node(node.X)
+ case *ast.ParenExpr:
+ u.node(node.X)
+ case *ast.IndexExpr:
+ u.node(node.X)
+ u.node(node.Index)
+ case *ast.TypeAssertExpr:
+ u.node(node.X)
+ u.node(node.Type)
+ case *ast.Ident:
+ if obj := u.scope.Lookup(node.Name); obj != nil {
+ obj.numUses++
+ if u.defining != nil {
+ u.defining.used = append(u.defining.used, obj)
+ }
+ }
+ case *ast.BasicLit:
+ case *ast.ValueSpec:
+ u.exprs(node.Values)
+ default:
+ panic(fmt.Sprintf("unhandled node: %T", node))
+ }
+}
+
+// scope keeps track of a certain scope and its declared names, as well as the
+// outer (parent) scope.
+type scope struct {
+ outer *scope // can be nil, if this is the top-level scope
+ objects map[string]*object // indexed by each declared name
+}
+
+func (s *scope) Lookup(name string) *object {
+ if obj := s.objects[name]; obj != nil {
+ return obj
+ }
+ if s.outer == nil {
+ return nil
+ }
+ return s.outer.Lookup(name)
+}
+
+// object keeps track of a declared name, such as a variable or import.
+type object struct {
+ name string
+ pos token.Pos // start position of the node declaring the object
+
+ numUses int // number of times this object is used
+ used []*object // objects that its declaration makes use of
+}
+
+func fprint(w io.Writer, n Node) {
+ switch n := n.(type) {
+ case *File:
+ file := n
+ seenRewrite := make(map[[3]string]string)
+ fmt.Fprintf(w, "// Code generated from _gen/%s%s.rules using 'go generate'; DO NOT EDIT.\n", n.Arch.name, n.Suffix)
+ fmt.Fprintf(w, "\npackage ssa\n")
+ for _, path := range append([]string{
+ "fmt",
+ "internal/buildcfg",
+ "math",
+ "cmd/internal/obj",
+ "cmd/compile/internal/base",
+ "cmd/compile/internal/types",
+ "cmd/compile/internal/ir",
+ }, n.Arch.imports...) {
+ fmt.Fprintf(w, "import %q\n", path)
+ }
+ for _, f := range n.List {
+ f := f.(*Func)
+ fmt.Fprintf(w, "func rewrite%s%s%s%s(", f.Kind, n.Arch.name, n.Suffix, f.Suffix)
+ fmt.Fprintf(w, "%c *%s) bool {\n", strings.ToLower(f.Kind)[0], f.Kind)
+ if f.Kind == "Value" && f.ArgLen > 0 {
+ for i := f.ArgLen - 1; i >= 0; i-- {
+ fmt.Fprintf(w, "v_%d := v.Args[%d]\n", i, i)
+ }
+ }
+ for _, n := range f.List {
+ fprint(w, n)
+
+ if rr, ok := n.(*RuleRewrite); ok {
+ k := [3]string{
+ normalizeMatch(rr.Match, file.Arch),
+ normalizeWhitespace(rr.Cond),
+ normalizeWhitespace(rr.Result),
+ }
+ if prev, ok := seenRewrite[k]; ok {
+ log.Fatalf("duplicate rule %s, previously seen at %s\n", rr.Loc, prev)
+ }
+ seenRewrite[k] = rr.Loc
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+ case *Switch:
+ fmt.Fprintf(w, "switch ")
+ fprint(w, n.Expr)
+ fmt.Fprintf(w, " {\n")
+ for _, n := range n.List {
+ fprint(w, n)
+ }
+ fmt.Fprintf(w, "}\n")
+ case *Case:
+ fmt.Fprintf(w, "case ")
+ fprint(w, n.Expr)
+ fmt.Fprintf(w, ":\n")
+ for _, n := range n.List {
+ fprint(w, n)
+ }
+ case *RuleRewrite:
+ if *addLine {
+ fmt.Fprintf(w, "// %s\n", n.Loc)
+ }
+ fmt.Fprintf(w, "// match: %s\n", n.Match)
+ if n.Cond != "" {
+ fmt.Fprintf(w, "// cond: %s\n", n.Cond)
+ }
+ fmt.Fprintf(w, "// result: %s\n", n.Result)
+ fmt.Fprintf(w, "for %s {\n", n.Check)
+ nCommutative := 0
+ for _, n := range n.List {
+ if b, ok := n.(*CondBreak); ok {
+ b.InsideCommuteLoop = nCommutative > 0
+ }
+ fprint(w, n)
+ if loop, ok := n.(StartCommuteLoop); ok {
+ if nCommutative != loop.Depth {
+ panic("mismatch commute loop depth")
+ }
+ nCommutative++
+ }
+ }
+ fmt.Fprintf(w, "return true\n")
+ for i := 0; i < nCommutative; i++ {
+ fmt.Fprintln(w, "}")
+ }
+ if n.CommuteDepth > 0 && n.CanFail {
+ fmt.Fprint(w, "break\n")
+ }
+ fmt.Fprintf(w, "}\n")
+ case *Declare:
+ fmt.Fprintf(w, "%s := ", n.Name)
+ fprint(w, n.Value)
+ fmt.Fprintln(w)
+ case *CondBreak:
+ fmt.Fprintf(w, "if ")
+ fprint(w, n.Cond)
+ fmt.Fprintf(w, " {\n")
+ if n.InsideCommuteLoop {
+ fmt.Fprintf(w, "continue")
+ } else {
+ fmt.Fprintf(w, "break")
+ }
+ fmt.Fprintf(w, "\n}\n")
+ case ast.Node:
+ printConfig.Fprint(w, emptyFset, n)
+ if _, ok := n.(ast.Stmt); ok {
+ fmt.Fprintln(w)
+ }
+ case StartCommuteLoop:
+ fmt.Fprintf(w, "for _i%[1]d := 0; _i%[1]d <= 1; _i%[1]d, %[2]s_0, %[2]s_1 = _i%[1]d + 1, %[2]s_1, %[2]s_0 {\n", n.Depth, n.V)
+ default:
+ log.Fatalf("cannot print %T", n)
+ }
+}
+
+var printConfig = printer.Config{
+ Mode: printer.RawFormat, // we use go/format later, so skip work here
+}
+
+var emptyFset = token.NewFileSet()
+
+// Node can be a Statement or an ast.Expr.
+type Node interface{}
+
+// Statement can be one of our high-level statement struct types, or an
+// ast.Stmt under some limited circumstances.
+type Statement interface{}
+
+// BodyBase is shared by all of our statement pseudo-node types which can
+// contain other statements.
+type BodyBase struct {
+ List []Statement
+ CanFail bool
+}
+
+func (w *BodyBase) add(node Statement) {
+ var last Statement
+ if len(w.List) > 0 {
+ last = w.List[len(w.List)-1]
+ }
+ if node, ok := node.(*CondBreak); ok {
+ w.CanFail = true
+ if last, ok := last.(*CondBreak); ok {
+ // Add to the previous "if <cond> { break }" via a
+ // logical OR, which will save verbosity.
+ last.Cond = &ast.BinaryExpr{
+ Op: token.LOR,
+ X: last.Cond,
+ Y: node.Cond,
+ }
+ return
+ }
+ }
+
+ w.List = append(w.List, node)
+}
+
+// predeclared contains globally known tokens that should not be redefined.
+var predeclared = map[string]bool{
+ "nil": true,
+ "false": true,
+ "true": true,
+}
+
+// declared reports if the body contains a Declare with the given name.
+func (w *BodyBase) declared(name string) bool {
+ if predeclared[name] {
+ // Treat predeclared names as having already been declared.
+ // This lets us use nil to match an aux field or
+ // true and false to match an auxint field.
+ return true
+ }
+ for _, s := range w.List {
+ if decl, ok := s.(*Declare); ok && decl.Name == name {
+ return true
+ }
+ }
+ return false
+}
+
+// These types define some high-level statement struct types, which can be used
+// as a Statement. This allows us to keep some node structs simpler, and have
+// higher-level nodes such as an entire rule rewrite.
+//
+// Note that ast.Expr is always used as-is; we don't declare our own expression
+// nodes.
+type (
+ File struct {
+ BodyBase // []*Func
+ Arch arch
+ Suffix string
+ }
+ Func struct {
+ BodyBase
+ Kind string // "Value" or "Block"
+ Suffix string
+ ArgLen int32 // if kind == "Value", number of args for this op
+ }
+ Switch struct {
+ BodyBase // []*Case
+ Expr ast.Expr
+ }
+ Case struct {
+ BodyBase
+ Expr ast.Expr
+ }
+ RuleRewrite struct {
+ BodyBase
+ Match, Cond, Result string // top comments
+ Check string // top-level boolean expression
+
+ Alloc int // for unique var names
+ Loc string // file name & line number of the original rule
+ CommuteDepth int // used to track depth of commute loops
+ }
+ Declare struct {
+ Name string
+ Value ast.Expr
+ }
+ CondBreak struct {
+ Cond ast.Expr
+ InsideCommuteLoop bool
+ }
+ StartCommuteLoop struct {
+ Depth int
+ V string
+ }
+)
+
+// exprf parses a Go expression generated from fmt.Sprintf, panicking if an
+// error occurs.
+func exprf(format string, a ...interface{}) ast.Expr {
+ src := fmt.Sprintf(format, a...)
+ expr, err := parser.ParseExpr(src)
+ if err != nil {
+ log.Fatalf("expr parse error on %q: %v", src, err)
+ }
+ return expr
+}
+
+// stmtf parses a Go statement generated from fmt.Sprintf. This function is only
+// meant for simple statements that don't have a custom Statement node declared
+// in this package, such as ast.ReturnStmt or ast.ExprStmt.
+func stmtf(format string, a ...interface{}) Statement {
+ src := fmt.Sprintf(format, a...)
+ fsrc := "package p\nfunc _() {\n" + src + "\n}\n"
+ file, err := parser.ParseFile(token.NewFileSet(), "", fsrc, 0)
+ if err != nil {
+ log.Fatalf("stmt parse error on %q: %v", src, err)
+ }
+ return file.Decls[0].(*ast.FuncDecl).Body.List[0]
+}
+
+var reservedNames = map[string]bool{
+ "v": true, // Values[i], etc
+ "b": true, // v.Block
+ "config": true, // b.Func.Config
+ "fe": true, // b.Func.fe
+ "typ": true, // &b.Func.Config.Types
+}
+
+// declf constructs a simple "name := value" declaration,
+// using exprf for its value.
+//
+// name must not be one of reservedNames.
+// This helps prevent unintended shadowing and name clashes.
+// To declare a reserved name, use declReserved.
+func declf(loc, name, format string, a ...interface{}) *Declare {
+ if reservedNames[name] {
+ log.Fatalf("rule %s uses the reserved name %s", loc, name)
+ }
+ return &Declare{name, exprf(format, a...)}
+}
+
+// declReserved is like declf, but the name must be one of reservedNames.
+// Calls to declReserved should generally be static and top-level.
+func declReserved(name, value string) *Declare {
+ if !reservedNames[name] {
+ panic(fmt.Sprintf("declReserved call does not use a reserved name: %q", name))
+ }
+ return &Declare{name, exprf(value)}
+}
+
+// breakf constructs a simple "if cond { break }" statement, using exprf for its
+// condition.
+func breakf(format string, a ...interface{}) *CondBreak {
+ return &CondBreak{Cond: exprf(format, a...)}
+}
+
+func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
+ rr := &RuleRewrite{Loc: rule.Loc}
+ rr.Match, rr.Cond, rr.Result = rule.parse()
+ _, _, auxint, aux, s := extract(rr.Match) // remove parens, then split
+
+ // check match of control values
+ if len(s) < data.controls {
+ log.Fatalf("incorrect number of arguments in %s, got %v wanted at least %v", rule, len(s), data.controls)
+ }
+ controls := s[:data.controls]
+ pos := make([]string, data.controls)
+ for i, arg := range controls {
+ cname := fmt.Sprintf("b.Controls[%v]", i)
+ if strings.Contains(arg, "(") {
+ vname, expr := splitNameExpr(arg)
+ if vname == "" {
+ vname = fmt.Sprintf("v_%v", i)
+ }
+ rr.add(declf(rr.Loc, vname, cname))
+ p, op := genMatch0(rr, arch, expr, vname, nil, false) // TODO: pass non-nil cnt?
+ if op != "" {
+ check := fmt.Sprintf("%s.Op == %s", cname, op)
+ if rr.Check == "" {
+ rr.Check = check
+ } else {
+ rr.Check += " && " + check
+ }
+ }
+ if p == "" {
+ p = vname + ".Pos"
+ }
+ pos[i] = p
+ } else {
+ rr.add(declf(rr.Loc, arg, cname))
+ pos[i] = arg + ".Pos"
+ }
+ }
+ for _, e := range []struct {
+ name, field, dclType string
+ }{
+ {auxint, "AuxInt", data.auxIntType()},
+ {aux, "Aux", data.auxType()},
+ } {
+ if e.name == "" {
+ continue
+ }
+
+ if e.dclType == "" {
+ log.Fatalf("op %s has no declared type for %s", data.name, e.field)
+ }
+ if !token.IsIdentifier(e.name) || rr.declared(e.name) {
+ rr.add(breakf("%sTo%s(b.%s) != %s", unTitle(e.field), title(e.dclType), e.field, e.name))
+ } else {
+ rr.add(declf(rr.Loc, e.name, "%sTo%s(b.%s)", unTitle(e.field), title(e.dclType), e.field))
+ }
+ }
+ if rr.Cond != "" {
+ rr.add(breakf("!(%s)", rr.Cond))
+ }
+
+ // Rule matches. Generate result.
+ outop, _, auxint, aux, t := extract(rr.Result) // remove parens, then split
+ blockName, outdata := getBlockInfo(outop, arch)
+ if len(t) < outdata.controls {
+ log.Fatalf("incorrect number of output arguments in %s, got %v wanted at least %v", rule, len(s), outdata.controls)
+ }
+
+ // Check if newsuccs is the same set as succs.
+ succs := s[data.controls:]
+ newsuccs := t[outdata.controls:]
+ m := map[string]bool{}
+ for _, succ := range succs {
+ if m[succ] {
+ log.Fatalf("can't have a repeat successor name %s in %s", succ, rule)
+ }
+ m[succ] = true
+ }
+ for _, succ := range newsuccs {
+ if !m[succ] {
+ log.Fatalf("unknown successor %s in %s", succ, rule)
+ }
+ delete(m, succ)
+ }
+ if len(m) != 0 {
+ log.Fatalf("unmatched successors %v in %s", m, rule)
+ }
+
+ var genControls [2]string
+ for i, control := range t[:outdata.controls] {
+ // Select a source position for any new control values.
+ // TODO: does it always make sense to use the source position
+ // of the original control values or should we be using the
+ // block's source position in some cases?
+ newpos := "b.Pos" // default to block's source position
+ if i < len(pos) && pos[i] != "" {
+ // Use the previous control value's source position.
+ newpos = pos[i]
+ }
+
+ // Generate a new control value (or copy an existing value).
+ genControls[i] = genResult0(rr, arch, control, false, false, newpos, nil)
+ }
+ switch outdata.controls {
+ case 0:
+ rr.add(stmtf("b.Reset(%s)", blockName))
+ case 1:
+ rr.add(stmtf("b.resetWithControl(%s, %s)", blockName, genControls[0]))
+ case 2:
+ rr.add(stmtf("b.resetWithControl2(%s, %s, %s)", blockName, genControls[0], genControls[1]))
+ default:
+ log.Fatalf("too many controls: %d", outdata.controls)
+ }
+
+ if auxint != "" {
+ // Make sure auxint value has the right type.
+ rr.add(stmtf("b.AuxInt = %sToAuxInt(%s)", unTitle(outdata.auxIntType()), auxint))
+ }
+ if aux != "" {
+ // Make sure aux value has the right type.
+ rr.add(stmtf("b.Aux = %sToAux(%s)", unTitle(outdata.auxType()), aux))
+ }
+
+ succChanged := false
+ for i := 0; i < len(succs); i++ {
+ if succs[i] != newsuccs[i] {
+ succChanged = true
+ }
+ }
+ if succChanged {
+ if len(succs) != 2 {
+ log.Fatalf("changed successors, len!=2 in %s", rule)
+ }
+ if succs[0] != newsuccs[1] || succs[1] != newsuccs[0] {
+ log.Fatalf("can only handle swapped successors in %s", rule)
+ }
+ rr.add(stmtf("b.swapSuccessors()"))
+ }
+
+ if *genLog {
+ rr.add(stmtf("logRule(%q)", rule.Loc))
+ }
+ return rr
+}
+
+// genMatch returns the variable whose source position should be used for the
+// result (or "" if no opinion), and a boolean that reports whether the match can fail.
+func genMatch(rr *RuleRewrite, arch arch, match string, pregenTop bool) (pos, checkOp string) {
+ cnt := varCount(rr)
+ return genMatch0(rr, arch, match, "v", cnt, pregenTop)
+}
+
+func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int, pregenTop bool) (pos, checkOp string) {
+ if match[0] != '(' || match[len(match)-1] != ')' {
+ log.Fatalf("%s: non-compound expr in genMatch0: %q", rr.Loc, match)
+ }
+ op, oparch, typ, auxint, aux, args := parseValue(match, arch, rr.Loc)
+
+ checkOp = fmt.Sprintf("Op%s%s", oparch, op.name)
+
+ if op.faultOnNilArg0 || op.faultOnNilArg1 {
+ // Prefer the position of an instruction which could fault.
+ pos = v + ".Pos"
+ }
+
+ // If the last argument is ___, it means "don't care about trailing arguments, really"
+ // The likely/intended use is for rewrites that are too tricky to express in the existing pattern language
+ // Do a length check early because long patterns fed short (ultimately not-matching) inputs will
+ // do an indexing error in pattern-matching.
+ if op.argLength == -1 {
+ l := len(args)
+ if l == 0 || args[l-1] != "___" {
+ rr.add(breakf("len(%s.Args) != %d", v, l))
+ } else if l > 1 && args[l-1] == "___" {
+ rr.add(breakf("len(%s.Args) < %d", v, l-1))
+ }
+ }
+
+ for _, e := range []struct {
+ name, field, dclType string
+ }{
+ {typ, "Type", "*types.Type"},
+ {auxint, "AuxInt", op.auxIntType()},
+ {aux, "Aux", op.auxType()},
+ } {
+ if e.name == "" {
+ continue
+ }
+
+ if e.dclType == "" {
+ log.Fatalf("op %s has no declared type for %s", op.name, e.field)
+ }
+ if !token.IsIdentifier(e.name) || rr.declared(e.name) {
+ switch e.field {
+ case "Aux":
+ rr.add(breakf("auxTo%s(%s.%s) != %s", title(e.dclType), v, e.field, e.name))
+ case "AuxInt":
+ rr.add(breakf("auxIntTo%s(%s.%s) != %s", title(e.dclType), v, e.field, e.name))
+ case "Type":
+ rr.add(breakf("%s.%s != %s", v, e.field, e.name))
+ }
+ } else {
+ switch e.field {
+ case "Aux":
+ rr.add(declf(rr.Loc, e.name, "auxTo%s(%s.%s)", title(e.dclType), v, e.field))
+ case "AuxInt":
+ rr.add(declf(rr.Loc, e.name, "auxIntTo%s(%s.%s)", title(e.dclType), v, e.field))
+ case "Type":
+ rr.add(declf(rr.Loc, e.name, "%s.%s", v, e.field))
+ }
+ }
+ }
+
+ commutative := op.commutative
+ if commutative {
+ if args[0] == args[1] {
+ // When we have (Add x x), for any x,
+ // even if there are other uses of x besides these two,
+ // and even if x is not a variable,
+ // we can skip the commutative match.
+ commutative = false
+ }
+ if cnt[args[0]] == 1 && cnt[args[1]] == 1 {
+ // When we have (Add x y) with no other uses
+ // of x and y in the matching rule and condition,
+ // then we can skip the commutative match (Add y x).
+ commutative = false
+ }
+ }
+
+ if !pregenTop {
+ // Access last argument first to minimize bounds checks.
+ for n := len(args) - 1; n > 0; n-- {
+ a := args[n]
+ if a == "_" {
+ continue
+ }
+ if !rr.declared(a) && token.IsIdentifier(a) && !(commutative && len(args) == 2) {
+ rr.add(declf(rr.Loc, a, "%s.Args[%d]", v, n))
+ // delete the last argument so it is not reprocessed
+ args = args[:n]
+ } else {
+ rr.add(stmtf("_ = %s.Args[%d]", v, n))
+ }
+ break
+ }
+ }
+ if commutative && !pregenTop {
+ for i := 0; i <= 1; i++ {
+ vname := fmt.Sprintf("%s_%d", v, i)
+ rr.add(declf(rr.Loc, vname, "%s.Args[%d]", v, i))
+ }
+ }
+ if commutative {
+ rr.add(StartCommuteLoop{rr.CommuteDepth, v})
+ rr.CommuteDepth++
+ }
+ for i, arg := range args {
+ if arg == "_" {
+ continue
+ }
+ var rhs string
+ if (commutative && i < 2) || pregenTop {
+ rhs = fmt.Sprintf("%s_%d", v, i)
+ } else {
+ rhs = fmt.Sprintf("%s.Args[%d]", v, i)
+ }
+ if !strings.Contains(arg, "(") {
+ // leaf variable
+ if rr.declared(arg) {
+ // variable already has a definition. Check whether
+ // the old definition and the new definition match.
+ // For example, (add x x). Equality is just pointer equality
+ // on Values (so cse is important to do before lowering).
+ rr.add(breakf("%s != %s", arg, rhs))
+ } else {
+ if arg != rhs {
+ rr.add(declf(rr.Loc, arg, "%s", rhs))
+ }
+ }
+ continue
+ }
+ // compound sexpr
+ argname, expr := splitNameExpr(arg)
+ if argname == "" {
+ argname = fmt.Sprintf("%s_%d", v, i)
+ }
+ if argname == "b" {
+ log.Fatalf("don't name args 'b', it is ambiguous with blocks")
+ }
+
+ if argname != rhs {
+ rr.add(declf(rr.Loc, argname, "%s", rhs))
+ }
+ bexpr := exprf("%s.Op != addLater", argname)
+ rr.add(&CondBreak{Cond: bexpr})
+ argPos, argCheckOp := genMatch0(rr, arch, expr, argname, cnt, false)
+ bexpr.(*ast.BinaryExpr).Y.(*ast.Ident).Name = argCheckOp
+
+ if argPos != "" {
+ // Keep the argument in preference to the parent, as the
+ // argument is normally earlier in program flow.
+ // Keep the argument in preference to an earlier argument,
+ // as that prefers the memory argument which is also earlier
+ // in the program flow.
+ pos = argPos
+ }
+ }
+
+ return pos, checkOp
+}
+
+func genResult(rr *RuleRewrite, arch arch, result, pos string) {
+ move := result[0] == '@'
+ if move {
+ // parse @block directive
+ s := strings.SplitN(result[1:], " ", 2)
+ rr.add(stmtf("b = %s", s[0]))
+ result = s[1]
+ }
+ cse := make(map[string]string)
+ genResult0(rr, arch, result, true, move, pos, cse)
+}
+
+func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos string, cse map[string]string) string {
+ resname, expr := splitNameExpr(result)
+ result = expr
+ // TODO: when generating a constant result, use f.constVal to avoid
+ // introducing copies just to clean them up again.
+ if result[0] != '(' {
+ // variable
+ if top {
+ // It in not safe in general to move a variable between blocks
+ // (and particularly not a phi node).
+ // Introduce a copy.
+ rr.add(stmtf("v.copyOf(%s)", result))
+ }
+ return result
+ }
+
+ w := normalizeWhitespace(result)
+ if prev := cse[w]; prev != "" {
+ return prev
+ }
+
+ op, oparch, typ, auxint, aux, args := parseValue(result, arch, rr.Loc)
+
+ // Find the type of the variable.
+ typeOverride := typ != ""
+ if typ == "" && op.typ != "" {
+ typ = typeName(op.typ)
+ }
+
+ v := "v"
+ if top && !move {
+ rr.add(stmtf("v.reset(Op%s%s)", oparch, op.name))
+ if typeOverride {
+ rr.add(stmtf("v.Type = %s", typ))
+ }
+ } else {
+ if typ == "" {
+ log.Fatalf("sub-expression %s (op=Op%s%s) at %s must have a type", result, oparch, op.name, rr.Loc)
+ }
+ if resname == "" {
+ v = fmt.Sprintf("v%d", rr.Alloc)
+ } else {
+ v = resname
+ }
+ rr.Alloc++
+ rr.add(declf(rr.Loc, v, "b.NewValue0(%s, Op%s%s, %s)", pos, oparch, op.name, typ))
+ if move && top {
+ // Rewrite original into a copy
+ rr.add(stmtf("v.copyOf(%s)", v))
+ }
+ }
+
+ if auxint != "" {
+ // Make sure auxint value has the right type.
+ rr.add(stmtf("%s.AuxInt = %sToAuxInt(%s)", v, unTitle(op.auxIntType()), auxint))
+ }
+ if aux != "" {
+ // Make sure aux value has the right type.
+ rr.add(stmtf("%s.Aux = %sToAux(%s)", v, unTitle(op.auxType()), aux))
+ }
+ all := new(strings.Builder)
+ for i, arg := range args {
+ x := genResult0(rr, arch, arg, false, move, pos, cse)
+ if i > 0 {
+ all.WriteString(", ")
+ }
+ all.WriteString(x)
+ }
+ switch len(args) {
+ case 0:
+ case 1:
+ rr.add(stmtf("%s.AddArg(%s)", v, all.String()))
+ default:
+ rr.add(stmtf("%s.AddArg%d(%s)", v, len(args), all.String()))
+ }
+
+ if cse != nil {
+ cse[w] = v
+ }
+ return v
+}
+
+func split(s string) []string {
+ var r []string
+
+outer:
+ for s != "" {
+ d := 0 // depth of ({[<
+ var open, close byte // opening and closing markers ({[< or )}]>
+ nonsp := false // found a non-space char so far
+ for i := 0; i < len(s); i++ {
+ switch {
+ case d == 0 && s[i] == '(':
+ open, close = '(', ')'
+ d++
+ case d == 0 && s[i] == '<':
+ open, close = '<', '>'
+ d++
+ case d == 0 && s[i] == '[':
+ open, close = '[', ']'
+ d++
+ case d == 0 && s[i] == '{':
+ open, close = '{', '}'
+ d++
+ case d == 0 && (s[i] == ' ' || s[i] == '\t'):
+ if nonsp {
+ r = append(r, strings.TrimSpace(s[:i]))
+ s = s[i:]
+ continue outer
+ }
+ case d > 0 && s[i] == open:
+ d++
+ case d > 0 && s[i] == close:
+ d--
+ default:
+ nonsp = true
+ }
+ }
+ if d != 0 {
+ log.Fatalf("imbalanced expression: %q", s)
+ }
+ if nonsp {
+ r = append(r, strings.TrimSpace(s))
+ }
+ break
+ }
+ return r
+}
+
+// isBlock reports whether this op is a block opcode.
+func isBlock(name string, arch arch) bool {
+ for _, b := range genericBlocks {
+ if b.name == name {
+ return true
+ }
+ }
+ for _, b := range arch.blocks {
+ if b.name == name {
+ return true
+ }
+ }
+ return false
+}
+
+func extract(val string) (op, typ, auxint, aux string, args []string) {
+ val = val[1 : len(val)-1] // remove ()
+
+ // Split val up into regions.
+ // Split by spaces/tabs, except those contained in (), {}, [], or <>.
+ s := split(val)
+
+ // Extract restrictions and args.
+ op = s[0]
+ for _, a := range s[1:] {
+ switch a[0] {
+ case '<':
+ typ = a[1 : len(a)-1] // remove <>
+ case '[':
+ auxint = a[1 : len(a)-1] // remove []
+ case '{':
+ aux = a[1 : len(a)-1] // remove {}
+ default:
+ args = append(args, a)
+ }
+ }
+ return
+}
+
+// parseValue parses a parenthesized value from a rule.
+// The value can be from the match or the result side.
+// It returns the op and unparsed strings for typ, auxint, and aux restrictions and for all args.
+// oparch is the architecture that op is located in, or "" for generic.
+func parseValue(val string, arch arch, loc string) (op opData, oparch, typ, auxint, aux string, args []string) {
+ // Resolve the op.
+ var s string
+ s, typ, auxint, aux, args = extract(val)
+
+ // match reports whether x is a good op to select.
+ // If strict is true, rule generation might succeed.
+ // If strict is false, rule generation has failed,
+ // but we're trying to generate a useful error.
+ // Doing strict=true then strict=false allows
+ // precise op matching while retaining good error messages.
+ match := func(x opData, strict bool, archname string) bool {
+ if x.name != s {
+ return false
+ }
+ if x.argLength != -1 && int(x.argLength) != len(args) && (len(args) != 1 || args[0] != "...") {
+ if strict {
+ return false
+ }
+ log.Printf("%s: op %s (%s) should have %d args, has %d", loc, s, archname, x.argLength, len(args))
+ }
+ return true
+ }
+
+ for _, x := range genericOps {
+ if match(x, true, "generic") {
+ op = x
+ break
+ }
+ }
+ for _, x := range arch.ops {
+ if arch.name != "generic" && match(x, true, arch.name) {
+ if op.name != "" {
+ log.Fatalf("%s: matches for op %s found in both generic and %s", loc, op.name, arch.name)
+ }
+ op = x
+ oparch = arch.name
+ break
+ }
+ }
+
+ if op.name == "" {
+ // Failed to find the op.
+ // Run through everything again with strict=false
+ // to generate useful diagnostic messages before failing.
+ for _, x := range genericOps {
+ match(x, false, "generic")
+ }
+ for _, x := range arch.ops {
+ match(x, false, arch.name)
+ }
+ log.Fatalf("%s: unknown op %s", loc, s)
+ }
+
+ // Sanity check aux, auxint.
+ if auxint != "" && !opHasAuxInt(op) {
+ log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
+ }
+ if aux != "" && !opHasAux(op) {
+ log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
+ }
+ return
+}
+
+func opHasAuxInt(op opData) bool {
+ switch op.aux {
+ case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "UInt8", "Float32", "Float64",
+ "SymOff", "CallOff", "SymValAndOff", "TypSize", "ARM64BitField", "FlagConstant", "CCop":
+ return true
+ }
+ return false
+}
+
+func opHasAux(op opData) bool {
+ switch op.aux {
+ case "String", "Sym", "SymOff", "Call", "CallOff", "SymValAndOff", "Typ", "TypSize",
+ "S390XCCMask", "S390XRotateParams":
+ return true
+ }
+ return false
+}
+
+// splitNameExpr splits s-expr arg, possibly prefixed by "name:",
+// into name and the unprefixed expression.
+// For example, "x:(Foo)" yields "x", "(Foo)",
+// and "(Foo)" yields "", "(Foo)".
+func splitNameExpr(arg string) (name, expr string) {
+ colon := strings.Index(arg, ":")
+ if colon < 0 {
+ return "", arg
+ }
+ openparen := strings.Index(arg, "(")
+ if openparen < 0 {
+ log.Fatalf("splitNameExpr(%q): colon but no open parens", arg)
+ }
+ if colon > openparen {
+ // colon is inside the parens, such as in "(Foo x:(Bar))".
+ return "", arg
+ }
+ return arg[:colon], arg[colon+1:]
+}
+
+func getBlockInfo(op string, arch arch) (name string, data blockData) {
+ for _, b := range genericBlocks {
+ if b.name == op {
+ return "Block" + op, b
+ }
+ }
+ for _, b := range arch.blocks {
+ if b.name == op {
+ return "Block" + arch.name + op, b
+ }
+ }
+ log.Fatalf("could not find block data for %s", op)
+ panic("unreachable")
+}
+
+// typeName returns the string to use to generate a type.
+func typeName(typ string) string {
+ if typ[0] == '(' {
+ ts := strings.Split(typ[1:len(typ)-1], ",")
+ if len(ts) != 2 {
+ log.Fatalf("Tuple expect 2 arguments")
+ }
+ return "types.NewTuple(" + typeName(ts[0]) + ", " + typeName(ts[1]) + ")"
+ }
+ switch typ {
+ case "Flags", "Mem", "Void", "Int128":
+ return "types.Type" + typ
+ default:
+ return "typ." + typ
+ }
+}
+
+// balance returns the number of unclosed '(' characters in s.
+// If a ')' appears without a corresponding '(', balance returns -1.
+func balance(s string) int {
+ balance := 0
+ for _, c := range s {
+ switch c {
+ case '(':
+ balance++
+ case ')':
+ balance--
+ if balance < 0 {
+ // don't allow ")(" to return 0
+ return -1
+ }
+ }
+ }
+ return balance
+}
+
+// findAllOpcode is a function to find the opcode portion of s-expressions.
+var findAllOpcode = regexp.MustCompile(`[(](\w+[|])+\w+[)]`).FindAllStringIndex
+
+// excludeFromExpansion reports whether the substring s[idx[0]:idx[1]] in a rule
+// should be disregarded as a candidate for | expansion.
+// It uses simple syntactic checks to see whether the substring
+// is inside an AuxInt expression or inside the && conditions.
+func excludeFromExpansion(s string, idx []int) bool {
+ left := s[:idx[0]]
+ if strings.LastIndexByte(left, '[') > strings.LastIndexByte(left, ']') {
+ // Inside an AuxInt expression.
+ return true
+ }
+ right := s[idx[1]:]
+ if strings.Contains(left, "&&") && strings.Contains(right, "=>") {
+ // Inside && conditions.
+ return true
+ }
+ return false
+}
+
+// expandOr converts a rule into multiple rules by expanding | ops.
+func expandOr(r string) []string {
+ // Find every occurrence of |-separated things.
+ // They look like MOV(B|W|L|Q|SS|SD)load or MOV(Q|L)loadidx(1|8).
+ // Generate rules selecting one case from each |-form.
+
+ // Count width of |-forms. They must match.
+ n := 1
+ for _, idx := range findAllOpcode(r, -1) {
+ if excludeFromExpansion(r, idx) {
+ continue
+ }
+ s := r[idx[0]:idx[1]]
+ c := strings.Count(s, "|") + 1
+ if c == 1 {
+ continue
+ }
+ if n > 1 && n != c {
+ log.Fatalf("'|' count doesn't match in %s: both %d and %d\n", r, n, c)
+ }
+ n = c
+ }
+ if n == 1 {
+ // No |-form in this rule.
+ return []string{r}
+ }
+ // Build each new rule.
+ res := make([]string, n)
+ for i := 0; i < n; i++ {
+ buf := new(strings.Builder)
+ x := 0
+ for _, idx := range findAllOpcode(r, -1) {
+ if excludeFromExpansion(r, idx) {
+ continue
+ }
+ buf.WriteString(r[x:idx[0]]) // write bytes we've skipped over so far
+ s := r[idx[0]+1 : idx[1]-1] // remove leading "(" and trailing ")"
+ buf.WriteString(strings.Split(s, "|")[i]) // write the op component for this rule
+ x = idx[1] // note that we've written more bytes
+ }
+ buf.WriteString(r[x:])
+ res[i] = buf.String()
+ }
+ return res
+}
+
+// varCount returns a map which counts the number of occurrences of
+// Value variables in the s-expression rr.Match and the Go expression rr.Cond.
+func varCount(rr *RuleRewrite) map[string]int {
+ cnt := map[string]int{}
+ varCount1(rr.Loc, rr.Match, cnt)
+ if rr.Cond != "" {
+ expr, err := parser.ParseExpr(rr.Cond)
+ if err != nil {
+ log.Fatalf("%s: failed to parse cond %q: %v", rr.Loc, rr.Cond, err)
+ }
+ ast.Inspect(expr, func(n ast.Node) bool {
+ if id, ok := n.(*ast.Ident); ok {
+ cnt[id.Name]++
+ }
+ return true
+ })
+ }
+ return cnt
+}
+
+func varCount1(loc, m string, cnt map[string]int) {
+ if m[0] == '<' || m[0] == '[' || m[0] == '{' {
+ return
+ }
+ if token.IsIdentifier(m) {
+ cnt[m]++
+ return
+ }
+ // Split up input.
+ name, expr := splitNameExpr(m)
+ if name != "" {
+ cnt[name]++
+ }
+ if expr[0] != '(' || expr[len(expr)-1] != ')' {
+ log.Fatalf("%s: non-compound expr in varCount1: %q", loc, expr)
+ }
+ s := split(expr[1 : len(expr)-1])
+ for _, arg := range s[1:] {
+ varCount1(loc, arg, cnt)
+ }
+}
+
+// normalizeWhitespace replaces 2+ whitespace sequences with a single space.
+func normalizeWhitespace(x string) string {
+ x = strings.Join(strings.Fields(x), " ")
+ x = strings.Replace(x, "( ", "(", -1)
+ x = strings.Replace(x, " )", ")", -1)
+ x = strings.Replace(x, "[ ", "[", -1)
+ x = strings.Replace(x, " ]", "]", -1)
+ x = strings.Replace(x, ")=>", ") =>", -1)
+ return x
+}
+
+// opIsCommutative reports whether op s is commutative.
+func opIsCommutative(op string, arch arch) bool {
+ for _, x := range genericOps {
+ if op == x.name {
+ if x.commutative {
+ return true
+ }
+ break
+ }
+ }
+ if arch.name != "generic" {
+ for _, x := range arch.ops {
+ if op == x.name {
+ if x.commutative {
+ return true
+ }
+ break
+ }
+ }
+ }
+ return false
+}
+
+func normalizeMatch(m string, arch arch) string {
+ if token.IsIdentifier(m) {
+ return m
+ }
+ op, typ, auxint, aux, args := extract(m)
+ if opIsCommutative(op, arch) {
+ if args[1] < args[0] {
+ args[0], args[1] = args[1], args[0]
+ }
+ }
+ s := new(strings.Builder)
+ fmt.Fprintf(s, "%s <%s> [%s] {%s}", op, typ, auxint, aux)
+ for _, arg := range args {
+ prefix, expr := splitNameExpr(arg)
+ fmt.Fprint(s, " ", prefix, normalizeMatch(expr, arch))
+ }
+ return s.String()
+}
+
+func parseEllipsisRules(rules []Rule, arch arch) (newop string, ok bool) {
+ if len(rules) != 1 {
+ for _, r := range rules {
+ if strings.Contains(r.Rule, "...") {
+ log.Fatalf("%s: found ellipsis in rule, but there are other rules with the same op", r.Loc)
+ }
+ }
+ return "", false
+ }
+ rule := rules[0]
+ match, cond, result := rule.parse()
+ if cond != "" || !isEllipsisValue(match) || !isEllipsisValue(result) {
+ if strings.Contains(rule.Rule, "...") {
+ log.Fatalf("%s: found ellipsis in non-ellipsis rule", rule.Loc)
+ }
+ checkEllipsisRuleCandidate(rule, arch)
+ return "", false
+ }
+ op, oparch, _, _, _, _ := parseValue(result, arch, rule.Loc)
+ return fmt.Sprintf("Op%s%s", oparch, op.name), true
+}
+
+// isEllipsisValue reports whether s is of the form (OpX ...).
+func isEllipsisValue(s string) bool {
+ if len(s) < 2 || s[0] != '(' || s[len(s)-1] != ')' {
+ return false
+ }
+ c := split(s[1 : len(s)-1])
+ if len(c) != 2 || c[1] != "..." {
+ return false
+ }
+ return true
+}
+
+func checkEllipsisRuleCandidate(rule Rule, arch arch) {
+ match, cond, result := rule.parse()
+ if cond != "" {
+ return
+ }
+ op, _, _, auxint, aux, args := parseValue(match, arch, rule.Loc)
+ var auxint2, aux2 string
+ var args2 []string
+ var usingCopy string
+ var eop opData
+ if result[0] != '(' {
+ // Check for (Foo x) => x, which can be converted to (Foo ...) => (Copy ...).
+ args2 = []string{result}
+ usingCopy = " using Copy"
+ } else {
+ eop, _, _, auxint2, aux2, args2 = parseValue(result, arch, rule.Loc)
+ }
+ // Check that all restrictions in match are reproduced exactly in result.
+ if aux != aux2 || auxint != auxint2 || len(args) != len(args2) {
+ return
+ }
+ if strings.Contains(rule.Rule, "=>") && op.aux != eop.aux {
+ return
+ }
+ for i := range args {
+ if args[i] != args2[i] {
+ return
+ }
+ }
+ switch {
+ case opHasAux(op) && aux == "" && aux2 == "":
+ fmt.Printf("%s: rule silently zeros aux, either copy aux or explicitly zero\n", rule.Loc)
+ case opHasAuxInt(op) && auxint == "" && auxint2 == "":
+ fmt.Printf("%s: rule silently zeros auxint, either copy auxint or explicitly zero\n", rule.Loc)
+ default:
+ fmt.Printf("%s: possible ellipsis rule candidate%s: %q\n", rule.Loc, usingCopy, rule.Rule)
+ }
+}
+
+func opByName(arch arch, name string) opData {
+ name = name[2:]
+ for _, x := range genericOps {
+ if name == x.name {
+ return x
+ }
+ }
+ if arch.name != "generic" {
+ name = name[len(arch.name):]
+ for _, x := range arch.ops {
+ if name == x.name {
+ return x
+ }
+ }
+ }
+ log.Fatalf("failed to find op named %s in arch %s", name, arch.name)
+ panic("unreachable")
+}
+
+// auxType returns the Go type that this operation should store in its aux field.
+func (op opData) auxType() string {
+ switch op.aux {
+ case "String":
+ return "string"
+ case "Sym":
+ // Note: a Sym can be an *obj.LSym, a *gc.Node, or nil.
+ return "Sym"
+ case "SymOff":
+ return "Sym"
+ case "Call":
+ return "Call"
+ case "CallOff":
+ return "Call"
+ case "SymValAndOff":
+ return "Sym"
+ case "Typ":
+ return "*types.Type"
+ case "TypSize":
+ return "*types.Type"
+ case "S390XCCMask":
+ return "s390x.CCMask"
+ case "S390XRotateParams":
+ return "s390x.RotateParams"
+ default:
+ return "invalid"
+ }
+}
+
+// auxIntType returns the Go type that this operation should store in its auxInt field.
+func (op opData) auxIntType() string {
+ switch op.aux {
+ case "Bool":
+ return "bool"
+ case "Int8":
+ return "int8"
+ case "Int16":
+ return "int16"
+ case "Int32":
+ return "int32"
+ case "Int64":
+ return "int64"
+ case "Int128":
+ return "int128"
+ case "UInt8":
+ return "uint8"
+ case "Float32":
+ return "float32"
+ case "Float64":
+ return "float64"
+ case "CallOff":
+ return "int32"
+ case "SymOff":
+ return "int32"
+ case "SymValAndOff":
+ return "ValAndOff"
+ case "TypSize":
+ return "int64"
+ case "CCop":
+ return "Op"
+ case "FlagConstant":
+ return "flagConstant"
+ case "ARM64BitField":
+ return "arm64BitField"
+ default:
+ return "invalid"
+ }
+}
+
+// auxType returns the Go type that this block should store in its aux field.
+func (b blockData) auxType() string {
+ switch b.aux {
+ case "Sym":
+ return "Sym"
+ case "S390XCCMask", "S390XCCMaskInt8", "S390XCCMaskUint8":
+ return "s390x.CCMask"
+ case "S390XRotateParams":
+ return "s390x.RotateParams"
+ default:
+ return "invalid"
+ }
+}
+
+// auxIntType returns the Go type that this block should store in its auxInt field.
+func (b blockData) auxIntType() string {
+ switch b.aux {
+ case "S390XCCMaskInt8":
+ return "int8"
+ case "S390XCCMaskUint8":
+ return "uint8"
+ case "Int64":
+ return "int64"
+ default:
+ return "invalid"
+ }
+}
+
+func title(s string) string {
+ if i := strings.Index(s, "."); i >= 0 {
+ switch strings.ToLower(s[:i]) {
+ case "s390x": // keep arch prefix for clarity
+ s = s[:i] + s[i+1:]
+ default:
+ s = s[i+1:]
+ }
+ }
+ return strings.Title(s)
+}
+
+func unTitle(s string) string {
+ if i := strings.Index(s, "."); i >= 0 {
+ switch strings.ToLower(s[:i]) {
+ case "s390x": // keep arch prefix for clarity
+ s = s[:i] + s[i+1:]
+ default:
+ s = s[i+1:]
+ }
+ }
+ return strings.ToLower(s[:1]) + s[1:]
+}
diff --git a/src/cmd/compile/internal/ssa/addressingmodes.go b/src/cmd/compile/internal/ssa/addressingmodes.go
new file mode 100644
index 0000000..4e3209e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/addressingmodes.go
@@ -0,0 +1,518 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// addressingModes combines address calculations into memory operations
+// that can perform complicated addressing modes.
+func addressingModes(f *Func) {
+ isInImmediateRange := is32Bit
+ switch f.Config.arch {
+ default:
+ // Most architectures can't do this.
+ return
+ case "amd64", "386":
+ case "s390x":
+ isInImmediateRange = is20Bit
+ }
+
+ var tmp []*Value
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if !combineFirst[v.Op] {
+ continue
+ }
+ // All matched operations have the pointer in arg[0].
+ // All results have the pointer in arg[0] and the index in arg[1].
+ // *Except* for operations which update a register,
+ // which are marked with resultInArg0. Those have
+ // the pointer in arg[1], and the corresponding result op
+ // has the pointer in arg[1] and the index in arg[2].
+ ptrIndex := 0
+ if opcodeTable[v.Op].resultInArg0 {
+ ptrIndex = 1
+ }
+ p := v.Args[ptrIndex]
+ c, ok := combine[[2]Op{v.Op, p.Op}]
+ if !ok {
+ continue
+ }
+ // See if we can combine the Aux/AuxInt values.
+ switch [2]auxType{opcodeTable[v.Op].auxType, opcodeTable[p.Op].auxType} {
+ case [2]auxType{auxSymOff, auxInt32}:
+ // TODO: introduce auxSymOff32
+ if !isInImmediateRange(v.AuxInt + p.AuxInt) {
+ continue
+ }
+ v.AuxInt += p.AuxInt
+ case [2]auxType{auxSymOff, auxSymOff}:
+ if v.Aux != nil && p.Aux != nil {
+ continue
+ }
+ if !isInImmediateRange(v.AuxInt + p.AuxInt) {
+ continue
+ }
+ if p.Aux != nil {
+ v.Aux = p.Aux
+ }
+ v.AuxInt += p.AuxInt
+ case [2]auxType{auxSymValAndOff, auxInt32}:
+ vo := ValAndOff(v.AuxInt)
+ if !vo.canAdd64(p.AuxInt) {
+ continue
+ }
+ v.AuxInt = int64(vo.addOffset64(p.AuxInt))
+ case [2]auxType{auxSymValAndOff, auxSymOff}:
+ vo := ValAndOff(v.AuxInt)
+ if v.Aux != nil && p.Aux != nil {
+ continue
+ }
+ if !vo.canAdd64(p.AuxInt) {
+ continue
+ }
+ if p.Aux != nil {
+ v.Aux = p.Aux
+ }
+ v.AuxInt = int64(vo.addOffset64(p.AuxInt))
+ case [2]auxType{auxSymOff, auxNone}:
+ // nothing to do
+ case [2]auxType{auxSymValAndOff, auxNone}:
+ // nothing to do
+ default:
+ f.Fatalf("unknown aux combining for %s and %s\n", v.Op, p.Op)
+ }
+ // Combine the operations.
+ tmp = append(tmp[:0], v.Args[:ptrIndex]...)
+ tmp = append(tmp, p.Args...)
+ tmp = append(tmp, v.Args[ptrIndex+1:]...)
+ v.resetArgs()
+ v.Op = c
+ v.AddArgs(tmp...)
+ if needSplit[c] {
+ // It turns out that some of the combined instructions have faster two-instruction equivalents,
+ // but not the two instructions that led to them being combined here. For example
+ // (CMPBconstload c (ADDQ x y)) -> (CMPBconstloadidx1 c x y) -> (CMPB c (MOVBloadidx1 x y))
+ // The final pair of instructions turns out to be notably faster, at least in some benchmarks.
+ f.Config.splitLoad(v)
+ }
+ }
+ }
+}
+
+// combineFirst contains ops which appear in combine as the
+// first part of the key.
+var combineFirst = map[Op]bool{}
+
+func init() {
+ for k := range combine {
+ combineFirst[k[0]] = true
+ }
+}
+
+// needSplit contains instructions that should be postprocessed by splitLoad
+// into a more-efficient two-instruction form.
+var needSplit = map[Op]bool{
+ OpAMD64CMPBloadidx1: true,
+ OpAMD64CMPWloadidx1: true,
+ OpAMD64CMPLloadidx1: true,
+ OpAMD64CMPQloadidx1: true,
+ OpAMD64CMPWloadidx2: true,
+ OpAMD64CMPLloadidx4: true,
+ OpAMD64CMPQloadidx8: true,
+
+ OpAMD64CMPBconstloadidx1: true,
+ OpAMD64CMPWconstloadidx1: true,
+ OpAMD64CMPLconstloadidx1: true,
+ OpAMD64CMPQconstloadidx1: true,
+ OpAMD64CMPWconstloadidx2: true,
+ OpAMD64CMPLconstloadidx4: true,
+ OpAMD64CMPQconstloadidx8: true,
+}
+
+// For each entry k, v in this map, if we have a value x with:
+//
+// x.Op == k[0]
+// x.Args[0].Op == k[1]
+//
+// then we can set x.Op to v and set x.Args like this:
+//
+// x.Args[0].Args + x.Args[1:]
+//
+// Additionally, the Aux/AuxInt from x.Args[0] is merged into x.
+var combine = map[[2]Op]Op{
+ // amd64
+ [2]Op{OpAMD64MOVBload, OpAMD64ADDQ}: OpAMD64MOVBloadidx1,
+ [2]Op{OpAMD64MOVWload, OpAMD64ADDQ}: OpAMD64MOVWloadidx1,
+ [2]Op{OpAMD64MOVLload, OpAMD64ADDQ}: OpAMD64MOVLloadidx1,
+ [2]Op{OpAMD64MOVQload, OpAMD64ADDQ}: OpAMD64MOVQloadidx1,
+ [2]Op{OpAMD64MOVSSload, OpAMD64ADDQ}: OpAMD64MOVSSloadidx1,
+ [2]Op{OpAMD64MOVSDload, OpAMD64ADDQ}: OpAMD64MOVSDloadidx1,
+
+ [2]Op{OpAMD64MOVBstore, OpAMD64ADDQ}: OpAMD64MOVBstoreidx1,
+ [2]Op{OpAMD64MOVWstore, OpAMD64ADDQ}: OpAMD64MOVWstoreidx1,
+ [2]Op{OpAMD64MOVLstore, OpAMD64ADDQ}: OpAMD64MOVLstoreidx1,
+ [2]Op{OpAMD64MOVQstore, OpAMD64ADDQ}: OpAMD64MOVQstoreidx1,
+ [2]Op{OpAMD64MOVSSstore, OpAMD64ADDQ}: OpAMD64MOVSSstoreidx1,
+ [2]Op{OpAMD64MOVSDstore, OpAMD64ADDQ}: OpAMD64MOVSDstoreidx1,
+
+ [2]Op{OpAMD64MOVBstoreconst, OpAMD64ADDQ}: OpAMD64MOVBstoreconstidx1,
+ [2]Op{OpAMD64MOVWstoreconst, OpAMD64ADDQ}: OpAMD64MOVWstoreconstidx1,
+ [2]Op{OpAMD64MOVLstoreconst, OpAMD64ADDQ}: OpAMD64MOVLstoreconstidx1,
+ [2]Op{OpAMD64MOVQstoreconst, OpAMD64ADDQ}: OpAMD64MOVQstoreconstidx1,
+
+ [2]Op{OpAMD64MOVBload, OpAMD64LEAQ1}: OpAMD64MOVBloadidx1,
+ [2]Op{OpAMD64MOVWload, OpAMD64LEAQ1}: OpAMD64MOVWloadidx1,
+ [2]Op{OpAMD64MOVWload, OpAMD64LEAQ2}: OpAMD64MOVWloadidx2,
+ [2]Op{OpAMD64MOVLload, OpAMD64LEAQ1}: OpAMD64MOVLloadidx1,
+ [2]Op{OpAMD64MOVLload, OpAMD64LEAQ4}: OpAMD64MOVLloadidx4,
+ [2]Op{OpAMD64MOVLload, OpAMD64LEAQ8}: OpAMD64MOVLloadidx8,
+ [2]Op{OpAMD64MOVQload, OpAMD64LEAQ1}: OpAMD64MOVQloadidx1,
+ [2]Op{OpAMD64MOVQload, OpAMD64LEAQ8}: OpAMD64MOVQloadidx8,
+ [2]Op{OpAMD64MOVSSload, OpAMD64LEAQ1}: OpAMD64MOVSSloadidx1,
+ [2]Op{OpAMD64MOVSSload, OpAMD64LEAQ4}: OpAMD64MOVSSloadidx4,
+ [2]Op{OpAMD64MOVSDload, OpAMD64LEAQ1}: OpAMD64MOVSDloadidx1,
+ [2]Op{OpAMD64MOVSDload, OpAMD64LEAQ8}: OpAMD64MOVSDloadidx8,
+
+ [2]Op{OpAMD64MOVBstore, OpAMD64LEAQ1}: OpAMD64MOVBstoreidx1,
+ [2]Op{OpAMD64MOVWstore, OpAMD64LEAQ1}: OpAMD64MOVWstoreidx1,
+ [2]Op{OpAMD64MOVWstore, OpAMD64LEAQ2}: OpAMD64MOVWstoreidx2,
+ [2]Op{OpAMD64MOVLstore, OpAMD64LEAQ1}: OpAMD64MOVLstoreidx1,
+ [2]Op{OpAMD64MOVLstore, OpAMD64LEAQ4}: OpAMD64MOVLstoreidx4,
+ [2]Op{OpAMD64MOVLstore, OpAMD64LEAQ8}: OpAMD64MOVLstoreidx8,
+ [2]Op{OpAMD64MOVQstore, OpAMD64LEAQ1}: OpAMD64MOVQstoreidx1,
+ [2]Op{OpAMD64MOVQstore, OpAMD64LEAQ8}: OpAMD64MOVQstoreidx8,
+ [2]Op{OpAMD64MOVSSstore, OpAMD64LEAQ1}: OpAMD64MOVSSstoreidx1,
+ [2]Op{OpAMD64MOVSSstore, OpAMD64LEAQ4}: OpAMD64MOVSSstoreidx4,
+ [2]Op{OpAMD64MOVSDstore, OpAMD64LEAQ1}: OpAMD64MOVSDstoreidx1,
+ [2]Op{OpAMD64MOVSDstore, OpAMD64LEAQ8}: OpAMD64MOVSDstoreidx8,
+
+ [2]Op{OpAMD64MOVBstoreconst, OpAMD64LEAQ1}: OpAMD64MOVBstoreconstidx1,
+ [2]Op{OpAMD64MOVWstoreconst, OpAMD64LEAQ1}: OpAMD64MOVWstoreconstidx1,
+ [2]Op{OpAMD64MOVWstoreconst, OpAMD64LEAQ2}: OpAMD64MOVWstoreconstidx2,
+ [2]Op{OpAMD64MOVLstoreconst, OpAMD64LEAQ1}: OpAMD64MOVLstoreconstidx1,
+ [2]Op{OpAMD64MOVLstoreconst, OpAMD64LEAQ4}: OpAMD64MOVLstoreconstidx4,
+ [2]Op{OpAMD64MOVQstoreconst, OpAMD64LEAQ1}: OpAMD64MOVQstoreconstidx1,
+ [2]Op{OpAMD64MOVQstoreconst, OpAMD64LEAQ8}: OpAMD64MOVQstoreconstidx8,
+
+ [2]Op{OpAMD64SETEQstore, OpAMD64LEAQ1}: OpAMD64SETEQstoreidx1,
+ [2]Op{OpAMD64SETNEstore, OpAMD64LEAQ1}: OpAMD64SETNEstoreidx1,
+ [2]Op{OpAMD64SETLstore, OpAMD64LEAQ1}: OpAMD64SETLstoreidx1,
+ [2]Op{OpAMD64SETLEstore, OpAMD64LEAQ1}: OpAMD64SETLEstoreidx1,
+ [2]Op{OpAMD64SETGstore, OpAMD64LEAQ1}: OpAMD64SETGstoreidx1,
+ [2]Op{OpAMD64SETGEstore, OpAMD64LEAQ1}: OpAMD64SETGEstoreidx1,
+ [2]Op{OpAMD64SETBstore, OpAMD64LEAQ1}: OpAMD64SETBstoreidx1,
+ [2]Op{OpAMD64SETBEstore, OpAMD64LEAQ1}: OpAMD64SETBEstoreidx1,
+ [2]Op{OpAMD64SETAstore, OpAMD64LEAQ1}: OpAMD64SETAstoreidx1,
+ [2]Op{OpAMD64SETAEstore, OpAMD64LEAQ1}: OpAMD64SETAEstoreidx1,
+
+ // These instructions are re-split differently for performance, see needSplit above.
+ // TODO if 386 versions are created, also update needSplit and _gen/386splitload.rules
+ [2]Op{OpAMD64CMPBload, OpAMD64ADDQ}: OpAMD64CMPBloadidx1,
+ [2]Op{OpAMD64CMPWload, OpAMD64ADDQ}: OpAMD64CMPWloadidx1,
+ [2]Op{OpAMD64CMPLload, OpAMD64ADDQ}: OpAMD64CMPLloadidx1,
+ [2]Op{OpAMD64CMPQload, OpAMD64ADDQ}: OpAMD64CMPQloadidx1,
+
+ [2]Op{OpAMD64CMPBload, OpAMD64LEAQ1}: OpAMD64CMPBloadidx1,
+ [2]Op{OpAMD64CMPWload, OpAMD64LEAQ1}: OpAMD64CMPWloadidx1,
+ [2]Op{OpAMD64CMPWload, OpAMD64LEAQ2}: OpAMD64CMPWloadidx2,
+ [2]Op{OpAMD64CMPLload, OpAMD64LEAQ1}: OpAMD64CMPLloadidx1,
+ [2]Op{OpAMD64CMPLload, OpAMD64LEAQ4}: OpAMD64CMPLloadidx4,
+ [2]Op{OpAMD64CMPQload, OpAMD64LEAQ1}: OpAMD64CMPQloadidx1,
+ [2]Op{OpAMD64CMPQload, OpAMD64LEAQ8}: OpAMD64CMPQloadidx8,
+
+ [2]Op{OpAMD64CMPBconstload, OpAMD64ADDQ}: OpAMD64CMPBconstloadidx1,
+ [2]Op{OpAMD64CMPWconstload, OpAMD64ADDQ}: OpAMD64CMPWconstloadidx1,
+ [2]Op{OpAMD64CMPLconstload, OpAMD64ADDQ}: OpAMD64CMPLconstloadidx1,
+ [2]Op{OpAMD64CMPQconstload, OpAMD64ADDQ}: OpAMD64CMPQconstloadidx1,
+
+ [2]Op{OpAMD64CMPBconstload, OpAMD64LEAQ1}: OpAMD64CMPBconstloadidx1,
+ [2]Op{OpAMD64CMPWconstload, OpAMD64LEAQ1}: OpAMD64CMPWconstloadidx1,
+ [2]Op{OpAMD64CMPWconstload, OpAMD64LEAQ2}: OpAMD64CMPWconstloadidx2,
+ [2]Op{OpAMD64CMPLconstload, OpAMD64LEAQ1}: OpAMD64CMPLconstloadidx1,
+ [2]Op{OpAMD64CMPLconstload, OpAMD64LEAQ4}: OpAMD64CMPLconstloadidx4,
+ [2]Op{OpAMD64CMPQconstload, OpAMD64LEAQ1}: OpAMD64CMPQconstloadidx1,
+ [2]Op{OpAMD64CMPQconstload, OpAMD64LEAQ8}: OpAMD64CMPQconstloadidx8,
+
+ [2]Op{OpAMD64ADDLload, OpAMD64ADDQ}: OpAMD64ADDLloadidx1,
+ [2]Op{OpAMD64ADDQload, OpAMD64ADDQ}: OpAMD64ADDQloadidx1,
+ [2]Op{OpAMD64SUBLload, OpAMD64ADDQ}: OpAMD64SUBLloadidx1,
+ [2]Op{OpAMD64SUBQload, OpAMD64ADDQ}: OpAMD64SUBQloadidx1,
+ [2]Op{OpAMD64ANDLload, OpAMD64ADDQ}: OpAMD64ANDLloadidx1,
+ [2]Op{OpAMD64ANDQload, OpAMD64ADDQ}: OpAMD64ANDQloadidx1,
+ [2]Op{OpAMD64ORLload, OpAMD64ADDQ}: OpAMD64ORLloadidx1,
+ [2]Op{OpAMD64ORQload, OpAMD64ADDQ}: OpAMD64ORQloadidx1,
+ [2]Op{OpAMD64XORLload, OpAMD64ADDQ}: OpAMD64XORLloadidx1,
+ [2]Op{OpAMD64XORQload, OpAMD64ADDQ}: OpAMD64XORQloadidx1,
+
+ [2]Op{OpAMD64ADDLload, OpAMD64LEAQ1}: OpAMD64ADDLloadidx1,
+ [2]Op{OpAMD64ADDLload, OpAMD64LEAQ4}: OpAMD64ADDLloadidx4,
+ [2]Op{OpAMD64ADDLload, OpAMD64LEAQ8}: OpAMD64ADDLloadidx8,
+ [2]Op{OpAMD64ADDQload, OpAMD64LEAQ1}: OpAMD64ADDQloadidx1,
+ [2]Op{OpAMD64ADDQload, OpAMD64LEAQ8}: OpAMD64ADDQloadidx8,
+ [2]Op{OpAMD64SUBLload, OpAMD64LEAQ1}: OpAMD64SUBLloadidx1,
+ [2]Op{OpAMD64SUBLload, OpAMD64LEAQ4}: OpAMD64SUBLloadidx4,
+ [2]Op{OpAMD64SUBLload, OpAMD64LEAQ8}: OpAMD64SUBLloadidx8,
+ [2]Op{OpAMD64SUBQload, OpAMD64LEAQ1}: OpAMD64SUBQloadidx1,
+ [2]Op{OpAMD64SUBQload, OpAMD64LEAQ8}: OpAMD64SUBQloadidx8,
+ [2]Op{OpAMD64ANDLload, OpAMD64LEAQ1}: OpAMD64ANDLloadidx1,
+ [2]Op{OpAMD64ANDLload, OpAMD64LEAQ4}: OpAMD64ANDLloadidx4,
+ [2]Op{OpAMD64ANDLload, OpAMD64LEAQ8}: OpAMD64ANDLloadidx8,
+ [2]Op{OpAMD64ANDQload, OpAMD64LEAQ1}: OpAMD64ANDQloadidx1,
+ [2]Op{OpAMD64ANDQload, OpAMD64LEAQ8}: OpAMD64ANDQloadidx8,
+ [2]Op{OpAMD64ORLload, OpAMD64LEAQ1}: OpAMD64ORLloadidx1,
+ [2]Op{OpAMD64ORLload, OpAMD64LEAQ4}: OpAMD64ORLloadidx4,
+ [2]Op{OpAMD64ORLload, OpAMD64LEAQ8}: OpAMD64ORLloadidx8,
+ [2]Op{OpAMD64ORQload, OpAMD64LEAQ1}: OpAMD64ORQloadidx1,
+ [2]Op{OpAMD64ORQload, OpAMD64LEAQ8}: OpAMD64ORQloadidx8,
+ [2]Op{OpAMD64XORLload, OpAMD64LEAQ1}: OpAMD64XORLloadidx1,
+ [2]Op{OpAMD64XORLload, OpAMD64LEAQ4}: OpAMD64XORLloadidx4,
+ [2]Op{OpAMD64XORLload, OpAMD64LEAQ8}: OpAMD64XORLloadidx8,
+ [2]Op{OpAMD64XORQload, OpAMD64LEAQ1}: OpAMD64XORQloadidx1,
+ [2]Op{OpAMD64XORQload, OpAMD64LEAQ8}: OpAMD64XORQloadidx8,
+
+ [2]Op{OpAMD64ADDLmodify, OpAMD64ADDQ}: OpAMD64ADDLmodifyidx1,
+ [2]Op{OpAMD64ADDQmodify, OpAMD64ADDQ}: OpAMD64ADDQmodifyidx1,
+ [2]Op{OpAMD64SUBLmodify, OpAMD64ADDQ}: OpAMD64SUBLmodifyidx1,
+ [2]Op{OpAMD64SUBQmodify, OpAMD64ADDQ}: OpAMD64SUBQmodifyidx1,
+ [2]Op{OpAMD64ANDLmodify, OpAMD64ADDQ}: OpAMD64ANDLmodifyidx1,
+ [2]Op{OpAMD64ANDQmodify, OpAMD64ADDQ}: OpAMD64ANDQmodifyidx1,
+ [2]Op{OpAMD64ORLmodify, OpAMD64ADDQ}: OpAMD64ORLmodifyidx1,
+ [2]Op{OpAMD64ORQmodify, OpAMD64ADDQ}: OpAMD64ORQmodifyidx1,
+ [2]Op{OpAMD64XORLmodify, OpAMD64ADDQ}: OpAMD64XORLmodifyidx1,
+ [2]Op{OpAMD64XORQmodify, OpAMD64ADDQ}: OpAMD64XORQmodifyidx1,
+
+ [2]Op{OpAMD64ADDLmodify, OpAMD64LEAQ1}: OpAMD64ADDLmodifyidx1,
+ [2]Op{OpAMD64ADDLmodify, OpAMD64LEAQ4}: OpAMD64ADDLmodifyidx4,
+ [2]Op{OpAMD64ADDLmodify, OpAMD64LEAQ8}: OpAMD64ADDLmodifyidx8,
+ [2]Op{OpAMD64ADDQmodify, OpAMD64LEAQ1}: OpAMD64ADDQmodifyidx1,
+ [2]Op{OpAMD64ADDQmodify, OpAMD64LEAQ8}: OpAMD64ADDQmodifyidx8,
+ [2]Op{OpAMD64SUBLmodify, OpAMD64LEAQ1}: OpAMD64SUBLmodifyidx1,
+ [2]Op{OpAMD64SUBLmodify, OpAMD64LEAQ4}: OpAMD64SUBLmodifyidx4,
+ [2]Op{OpAMD64SUBLmodify, OpAMD64LEAQ8}: OpAMD64SUBLmodifyidx8,
+ [2]Op{OpAMD64SUBQmodify, OpAMD64LEAQ1}: OpAMD64SUBQmodifyidx1,
+ [2]Op{OpAMD64SUBQmodify, OpAMD64LEAQ8}: OpAMD64SUBQmodifyidx8,
+ [2]Op{OpAMD64ANDLmodify, OpAMD64LEAQ1}: OpAMD64ANDLmodifyidx1,
+ [2]Op{OpAMD64ANDLmodify, OpAMD64LEAQ4}: OpAMD64ANDLmodifyidx4,
+ [2]Op{OpAMD64ANDLmodify, OpAMD64LEAQ8}: OpAMD64ANDLmodifyidx8,
+ [2]Op{OpAMD64ANDQmodify, OpAMD64LEAQ1}: OpAMD64ANDQmodifyidx1,
+ [2]Op{OpAMD64ANDQmodify, OpAMD64LEAQ8}: OpAMD64ANDQmodifyidx8,
+ [2]Op{OpAMD64ORLmodify, OpAMD64LEAQ1}: OpAMD64ORLmodifyidx1,
+ [2]Op{OpAMD64ORLmodify, OpAMD64LEAQ4}: OpAMD64ORLmodifyidx4,
+ [2]Op{OpAMD64ORLmodify, OpAMD64LEAQ8}: OpAMD64ORLmodifyidx8,
+ [2]Op{OpAMD64ORQmodify, OpAMD64LEAQ1}: OpAMD64ORQmodifyidx1,
+ [2]Op{OpAMD64ORQmodify, OpAMD64LEAQ8}: OpAMD64ORQmodifyidx8,
+ [2]Op{OpAMD64XORLmodify, OpAMD64LEAQ1}: OpAMD64XORLmodifyidx1,
+ [2]Op{OpAMD64XORLmodify, OpAMD64LEAQ4}: OpAMD64XORLmodifyidx4,
+ [2]Op{OpAMD64XORLmodify, OpAMD64LEAQ8}: OpAMD64XORLmodifyidx8,
+ [2]Op{OpAMD64XORQmodify, OpAMD64LEAQ1}: OpAMD64XORQmodifyidx1,
+ [2]Op{OpAMD64XORQmodify, OpAMD64LEAQ8}: OpAMD64XORQmodifyidx8,
+
+ [2]Op{OpAMD64ADDLconstmodify, OpAMD64ADDQ}: OpAMD64ADDLconstmodifyidx1,
+ [2]Op{OpAMD64ADDQconstmodify, OpAMD64ADDQ}: OpAMD64ADDQconstmodifyidx1,
+ [2]Op{OpAMD64ANDLconstmodify, OpAMD64ADDQ}: OpAMD64ANDLconstmodifyidx1,
+ [2]Op{OpAMD64ANDQconstmodify, OpAMD64ADDQ}: OpAMD64ANDQconstmodifyidx1,
+ [2]Op{OpAMD64ORLconstmodify, OpAMD64ADDQ}: OpAMD64ORLconstmodifyidx1,
+ [2]Op{OpAMD64ORQconstmodify, OpAMD64ADDQ}: OpAMD64ORQconstmodifyidx1,
+ [2]Op{OpAMD64XORLconstmodify, OpAMD64ADDQ}: OpAMD64XORLconstmodifyidx1,
+ [2]Op{OpAMD64XORQconstmodify, OpAMD64ADDQ}: OpAMD64XORQconstmodifyidx1,
+
+ [2]Op{OpAMD64ADDLconstmodify, OpAMD64LEAQ1}: OpAMD64ADDLconstmodifyidx1,
+ [2]Op{OpAMD64ADDLconstmodify, OpAMD64LEAQ4}: OpAMD64ADDLconstmodifyidx4,
+ [2]Op{OpAMD64ADDLconstmodify, OpAMD64LEAQ8}: OpAMD64ADDLconstmodifyidx8,
+ [2]Op{OpAMD64ADDQconstmodify, OpAMD64LEAQ1}: OpAMD64ADDQconstmodifyidx1,
+ [2]Op{OpAMD64ADDQconstmodify, OpAMD64LEAQ8}: OpAMD64ADDQconstmodifyidx8,
+ [2]Op{OpAMD64ANDLconstmodify, OpAMD64LEAQ1}: OpAMD64ANDLconstmodifyidx1,
+ [2]Op{OpAMD64ANDLconstmodify, OpAMD64LEAQ4}: OpAMD64ANDLconstmodifyidx4,
+ [2]Op{OpAMD64ANDLconstmodify, OpAMD64LEAQ8}: OpAMD64ANDLconstmodifyidx8,
+ [2]Op{OpAMD64ANDQconstmodify, OpAMD64LEAQ1}: OpAMD64ANDQconstmodifyidx1,
+ [2]Op{OpAMD64ANDQconstmodify, OpAMD64LEAQ8}: OpAMD64ANDQconstmodifyidx8,
+ [2]Op{OpAMD64ORLconstmodify, OpAMD64LEAQ1}: OpAMD64ORLconstmodifyidx1,
+ [2]Op{OpAMD64ORLconstmodify, OpAMD64LEAQ4}: OpAMD64ORLconstmodifyidx4,
+ [2]Op{OpAMD64ORLconstmodify, OpAMD64LEAQ8}: OpAMD64ORLconstmodifyidx8,
+ [2]Op{OpAMD64ORQconstmodify, OpAMD64LEAQ1}: OpAMD64ORQconstmodifyidx1,
+ [2]Op{OpAMD64ORQconstmodify, OpAMD64LEAQ8}: OpAMD64ORQconstmodifyidx8,
+ [2]Op{OpAMD64XORLconstmodify, OpAMD64LEAQ1}: OpAMD64XORLconstmodifyidx1,
+ [2]Op{OpAMD64XORLconstmodify, OpAMD64LEAQ4}: OpAMD64XORLconstmodifyidx4,
+ [2]Op{OpAMD64XORLconstmodify, OpAMD64LEAQ8}: OpAMD64XORLconstmodifyidx8,
+ [2]Op{OpAMD64XORQconstmodify, OpAMD64LEAQ1}: OpAMD64XORQconstmodifyidx1,
+ [2]Op{OpAMD64XORQconstmodify, OpAMD64LEAQ8}: OpAMD64XORQconstmodifyidx8,
+
+ [2]Op{OpAMD64ADDSSload, OpAMD64LEAQ1}: OpAMD64ADDSSloadidx1,
+ [2]Op{OpAMD64ADDSSload, OpAMD64LEAQ4}: OpAMD64ADDSSloadidx4,
+ [2]Op{OpAMD64ADDSDload, OpAMD64LEAQ1}: OpAMD64ADDSDloadidx1,
+ [2]Op{OpAMD64ADDSDload, OpAMD64LEAQ8}: OpAMD64ADDSDloadidx8,
+ [2]Op{OpAMD64SUBSSload, OpAMD64LEAQ1}: OpAMD64SUBSSloadidx1,
+ [2]Op{OpAMD64SUBSSload, OpAMD64LEAQ4}: OpAMD64SUBSSloadidx4,
+ [2]Op{OpAMD64SUBSDload, OpAMD64LEAQ1}: OpAMD64SUBSDloadidx1,
+ [2]Op{OpAMD64SUBSDload, OpAMD64LEAQ8}: OpAMD64SUBSDloadidx8,
+ [2]Op{OpAMD64MULSSload, OpAMD64LEAQ1}: OpAMD64MULSSloadidx1,
+ [2]Op{OpAMD64MULSSload, OpAMD64LEAQ4}: OpAMD64MULSSloadidx4,
+ [2]Op{OpAMD64MULSDload, OpAMD64LEAQ1}: OpAMD64MULSDloadidx1,
+ [2]Op{OpAMD64MULSDload, OpAMD64LEAQ8}: OpAMD64MULSDloadidx8,
+ [2]Op{OpAMD64DIVSSload, OpAMD64LEAQ1}: OpAMD64DIVSSloadidx1,
+ [2]Op{OpAMD64DIVSSload, OpAMD64LEAQ4}: OpAMD64DIVSSloadidx4,
+ [2]Op{OpAMD64DIVSDload, OpAMD64LEAQ1}: OpAMD64DIVSDloadidx1,
+ [2]Op{OpAMD64DIVSDload, OpAMD64LEAQ8}: OpAMD64DIVSDloadidx8,
+
+ [2]Op{OpAMD64SARXLload, OpAMD64ADDQ}: OpAMD64SARXLloadidx1,
+ [2]Op{OpAMD64SARXQload, OpAMD64ADDQ}: OpAMD64SARXQloadidx1,
+ [2]Op{OpAMD64SHLXLload, OpAMD64ADDQ}: OpAMD64SHLXLloadidx1,
+ [2]Op{OpAMD64SHLXQload, OpAMD64ADDQ}: OpAMD64SHLXQloadidx1,
+ [2]Op{OpAMD64SHRXLload, OpAMD64ADDQ}: OpAMD64SHRXLloadidx1,
+ [2]Op{OpAMD64SHRXQload, OpAMD64ADDQ}: OpAMD64SHRXQloadidx1,
+
+ [2]Op{OpAMD64SARXLload, OpAMD64LEAQ1}: OpAMD64SARXLloadidx1,
+ [2]Op{OpAMD64SARXLload, OpAMD64LEAQ4}: OpAMD64SARXLloadidx4,
+ [2]Op{OpAMD64SARXLload, OpAMD64LEAQ8}: OpAMD64SARXLloadidx8,
+ [2]Op{OpAMD64SARXQload, OpAMD64LEAQ1}: OpAMD64SARXQloadidx1,
+ [2]Op{OpAMD64SARXQload, OpAMD64LEAQ8}: OpAMD64SARXQloadidx8,
+ [2]Op{OpAMD64SHLXLload, OpAMD64LEAQ1}: OpAMD64SHLXLloadidx1,
+ [2]Op{OpAMD64SHLXLload, OpAMD64LEAQ4}: OpAMD64SHLXLloadidx4,
+ [2]Op{OpAMD64SHLXLload, OpAMD64LEAQ8}: OpAMD64SHLXLloadidx8,
+ [2]Op{OpAMD64SHLXQload, OpAMD64LEAQ1}: OpAMD64SHLXQloadidx1,
+ [2]Op{OpAMD64SHLXQload, OpAMD64LEAQ8}: OpAMD64SHLXQloadidx8,
+ [2]Op{OpAMD64SHRXLload, OpAMD64LEAQ1}: OpAMD64SHRXLloadidx1,
+ [2]Op{OpAMD64SHRXLload, OpAMD64LEAQ4}: OpAMD64SHRXLloadidx4,
+ [2]Op{OpAMD64SHRXLload, OpAMD64LEAQ8}: OpAMD64SHRXLloadidx8,
+ [2]Op{OpAMD64SHRXQload, OpAMD64LEAQ1}: OpAMD64SHRXQloadidx1,
+ [2]Op{OpAMD64SHRXQload, OpAMD64LEAQ8}: OpAMD64SHRXQloadidx8,
+
+ // amd64/v3
+ [2]Op{OpAMD64MOVBELload, OpAMD64ADDQ}: OpAMD64MOVBELloadidx1,
+ [2]Op{OpAMD64MOVBEQload, OpAMD64ADDQ}: OpAMD64MOVBEQloadidx1,
+ [2]Op{OpAMD64MOVBELload, OpAMD64LEAQ1}: OpAMD64MOVBELloadidx1,
+ [2]Op{OpAMD64MOVBELload, OpAMD64LEAQ4}: OpAMD64MOVBELloadidx4,
+ [2]Op{OpAMD64MOVBELload, OpAMD64LEAQ8}: OpAMD64MOVBELloadidx8,
+ [2]Op{OpAMD64MOVBEQload, OpAMD64LEAQ1}: OpAMD64MOVBEQloadidx1,
+ [2]Op{OpAMD64MOVBEQload, OpAMD64LEAQ8}: OpAMD64MOVBEQloadidx8,
+
+ [2]Op{OpAMD64MOVBEWstore, OpAMD64ADDQ}: OpAMD64MOVBEWstoreidx1,
+ [2]Op{OpAMD64MOVBELstore, OpAMD64ADDQ}: OpAMD64MOVBELstoreidx1,
+ [2]Op{OpAMD64MOVBEQstore, OpAMD64ADDQ}: OpAMD64MOVBEQstoreidx1,
+ [2]Op{OpAMD64MOVBEWstore, OpAMD64LEAQ1}: OpAMD64MOVBEWstoreidx1,
+ [2]Op{OpAMD64MOVBEWstore, OpAMD64LEAQ2}: OpAMD64MOVBEWstoreidx2,
+ [2]Op{OpAMD64MOVBELstore, OpAMD64LEAQ1}: OpAMD64MOVBELstoreidx1,
+ [2]Op{OpAMD64MOVBELstore, OpAMD64LEAQ4}: OpAMD64MOVBELstoreidx4,
+ [2]Op{OpAMD64MOVBELstore, OpAMD64LEAQ8}: OpAMD64MOVBELstoreidx8,
+ [2]Op{OpAMD64MOVBEQstore, OpAMD64LEAQ1}: OpAMD64MOVBEQstoreidx1,
+ [2]Op{OpAMD64MOVBEQstore, OpAMD64LEAQ8}: OpAMD64MOVBEQstoreidx8,
+
+ // 386
+ [2]Op{Op386MOVBload, Op386ADDL}: Op386MOVBloadidx1,
+ [2]Op{Op386MOVWload, Op386ADDL}: Op386MOVWloadidx1,
+ [2]Op{Op386MOVLload, Op386ADDL}: Op386MOVLloadidx1,
+ [2]Op{Op386MOVSSload, Op386ADDL}: Op386MOVSSloadidx1,
+ [2]Op{Op386MOVSDload, Op386ADDL}: Op386MOVSDloadidx1,
+
+ [2]Op{Op386MOVBstore, Op386ADDL}: Op386MOVBstoreidx1,
+ [2]Op{Op386MOVWstore, Op386ADDL}: Op386MOVWstoreidx1,
+ [2]Op{Op386MOVLstore, Op386ADDL}: Op386MOVLstoreidx1,
+ [2]Op{Op386MOVSSstore, Op386ADDL}: Op386MOVSSstoreidx1,
+ [2]Op{Op386MOVSDstore, Op386ADDL}: Op386MOVSDstoreidx1,
+
+ [2]Op{Op386MOVBstoreconst, Op386ADDL}: Op386MOVBstoreconstidx1,
+ [2]Op{Op386MOVWstoreconst, Op386ADDL}: Op386MOVWstoreconstidx1,
+ [2]Op{Op386MOVLstoreconst, Op386ADDL}: Op386MOVLstoreconstidx1,
+
+ [2]Op{Op386MOVBload, Op386LEAL1}: Op386MOVBloadidx1,
+ [2]Op{Op386MOVWload, Op386LEAL1}: Op386MOVWloadidx1,
+ [2]Op{Op386MOVWload, Op386LEAL2}: Op386MOVWloadidx2,
+ [2]Op{Op386MOVLload, Op386LEAL1}: Op386MOVLloadidx1,
+ [2]Op{Op386MOVLload, Op386LEAL4}: Op386MOVLloadidx4,
+ [2]Op{Op386MOVSSload, Op386LEAL1}: Op386MOVSSloadidx1,
+ [2]Op{Op386MOVSSload, Op386LEAL4}: Op386MOVSSloadidx4,
+ [2]Op{Op386MOVSDload, Op386LEAL1}: Op386MOVSDloadidx1,
+ [2]Op{Op386MOVSDload, Op386LEAL8}: Op386MOVSDloadidx8,
+
+ [2]Op{Op386MOVBstore, Op386LEAL1}: Op386MOVBstoreidx1,
+ [2]Op{Op386MOVWstore, Op386LEAL1}: Op386MOVWstoreidx1,
+ [2]Op{Op386MOVWstore, Op386LEAL2}: Op386MOVWstoreidx2,
+ [2]Op{Op386MOVLstore, Op386LEAL1}: Op386MOVLstoreidx1,
+ [2]Op{Op386MOVLstore, Op386LEAL4}: Op386MOVLstoreidx4,
+ [2]Op{Op386MOVSSstore, Op386LEAL1}: Op386MOVSSstoreidx1,
+ [2]Op{Op386MOVSSstore, Op386LEAL4}: Op386MOVSSstoreidx4,
+ [2]Op{Op386MOVSDstore, Op386LEAL1}: Op386MOVSDstoreidx1,
+ [2]Op{Op386MOVSDstore, Op386LEAL8}: Op386MOVSDstoreidx8,
+
+ [2]Op{Op386MOVBstoreconst, Op386LEAL1}: Op386MOVBstoreconstidx1,
+ [2]Op{Op386MOVWstoreconst, Op386LEAL1}: Op386MOVWstoreconstidx1,
+ [2]Op{Op386MOVWstoreconst, Op386LEAL2}: Op386MOVWstoreconstidx2,
+ [2]Op{Op386MOVLstoreconst, Op386LEAL1}: Op386MOVLstoreconstidx1,
+ [2]Op{Op386MOVLstoreconst, Op386LEAL4}: Op386MOVLstoreconstidx4,
+
+ [2]Op{Op386ADDLload, Op386LEAL4}: Op386ADDLloadidx4,
+ [2]Op{Op386SUBLload, Op386LEAL4}: Op386SUBLloadidx4,
+ [2]Op{Op386MULLload, Op386LEAL4}: Op386MULLloadidx4,
+ [2]Op{Op386ANDLload, Op386LEAL4}: Op386ANDLloadidx4,
+ [2]Op{Op386ORLload, Op386LEAL4}: Op386ORLloadidx4,
+ [2]Op{Op386XORLload, Op386LEAL4}: Op386XORLloadidx4,
+
+ [2]Op{Op386ADDLmodify, Op386LEAL4}: Op386ADDLmodifyidx4,
+ [2]Op{Op386SUBLmodify, Op386LEAL4}: Op386SUBLmodifyidx4,
+ [2]Op{Op386ANDLmodify, Op386LEAL4}: Op386ANDLmodifyidx4,
+ [2]Op{Op386ORLmodify, Op386LEAL4}: Op386ORLmodifyidx4,
+ [2]Op{Op386XORLmodify, Op386LEAL4}: Op386XORLmodifyidx4,
+
+ [2]Op{Op386ADDLconstmodify, Op386LEAL4}: Op386ADDLconstmodifyidx4,
+ [2]Op{Op386ANDLconstmodify, Op386LEAL4}: Op386ANDLconstmodifyidx4,
+ [2]Op{Op386ORLconstmodify, Op386LEAL4}: Op386ORLconstmodifyidx4,
+ [2]Op{Op386XORLconstmodify, Op386LEAL4}: Op386XORLconstmodifyidx4,
+
+ // s390x
+ [2]Op{OpS390XMOVDload, OpS390XADD}: OpS390XMOVDloadidx,
+ [2]Op{OpS390XMOVWload, OpS390XADD}: OpS390XMOVWloadidx,
+ [2]Op{OpS390XMOVHload, OpS390XADD}: OpS390XMOVHloadidx,
+ [2]Op{OpS390XMOVBload, OpS390XADD}: OpS390XMOVBloadidx,
+
+ [2]Op{OpS390XMOVWZload, OpS390XADD}: OpS390XMOVWZloadidx,
+ [2]Op{OpS390XMOVHZload, OpS390XADD}: OpS390XMOVHZloadidx,
+ [2]Op{OpS390XMOVBZload, OpS390XADD}: OpS390XMOVBZloadidx,
+
+ [2]Op{OpS390XMOVDBRload, OpS390XADD}: OpS390XMOVDBRloadidx,
+ [2]Op{OpS390XMOVWBRload, OpS390XADD}: OpS390XMOVWBRloadidx,
+ [2]Op{OpS390XMOVHBRload, OpS390XADD}: OpS390XMOVHBRloadidx,
+
+ [2]Op{OpS390XFMOVDload, OpS390XADD}: OpS390XFMOVDloadidx,
+ [2]Op{OpS390XFMOVSload, OpS390XADD}: OpS390XFMOVSloadidx,
+
+ [2]Op{OpS390XMOVDstore, OpS390XADD}: OpS390XMOVDstoreidx,
+ [2]Op{OpS390XMOVWstore, OpS390XADD}: OpS390XMOVWstoreidx,
+ [2]Op{OpS390XMOVHstore, OpS390XADD}: OpS390XMOVHstoreidx,
+ [2]Op{OpS390XMOVBstore, OpS390XADD}: OpS390XMOVBstoreidx,
+
+ [2]Op{OpS390XMOVDBRstore, OpS390XADD}: OpS390XMOVDBRstoreidx,
+ [2]Op{OpS390XMOVWBRstore, OpS390XADD}: OpS390XMOVWBRstoreidx,
+ [2]Op{OpS390XMOVHBRstore, OpS390XADD}: OpS390XMOVHBRstoreidx,
+
+ [2]Op{OpS390XFMOVDstore, OpS390XADD}: OpS390XFMOVDstoreidx,
+ [2]Op{OpS390XFMOVSstore, OpS390XADD}: OpS390XFMOVSstoreidx,
+
+ [2]Op{OpS390XMOVDload, OpS390XMOVDaddridx}: OpS390XMOVDloadidx,
+ [2]Op{OpS390XMOVWload, OpS390XMOVDaddridx}: OpS390XMOVWloadidx,
+ [2]Op{OpS390XMOVHload, OpS390XMOVDaddridx}: OpS390XMOVHloadidx,
+ [2]Op{OpS390XMOVBload, OpS390XMOVDaddridx}: OpS390XMOVBloadidx,
+
+ [2]Op{OpS390XMOVWZload, OpS390XMOVDaddridx}: OpS390XMOVWZloadidx,
+ [2]Op{OpS390XMOVHZload, OpS390XMOVDaddridx}: OpS390XMOVHZloadidx,
+ [2]Op{OpS390XMOVBZload, OpS390XMOVDaddridx}: OpS390XMOVBZloadidx,
+
+ [2]Op{OpS390XMOVDBRload, OpS390XMOVDaddridx}: OpS390XMOVDBRloadidx,
+ [2]Op{OpS390XMOVWBRload, OpS390XMOVDaddridx}: OpS390XMOVWBRloadidx,
+ [2]Op{OpS390XMOVHBRload, OpS390XMOVDaddridx}: OpS390XMOVHBRloadidx,
+
+ [2]Op{OpS390XFMOVDload, OpS390XMOVDaddridx}: OpS390XFMOVDloadidx,
+ [2]Op{OpS390XFMOVSload, OpS390XMOVDaddridx}: OpS390XFMOVSloadidx,
+
+ [2]Op{OpS390XMOVDstore, OpS390XMOVDaddridx}: OpS390XMOVDstoreidx,
+ [2]Op{OpS390XMOVWstore, OpS390XMOVDaddridx}: OpS390XMOVWstoreidx,
+ [2]Op{OpS390XMOVHstore, OpS390XMOVDaddridx}: OpS390XMOVHstoreidx,
+ [2]Op{OpS390XMOVBstore, OpS390XMOVDaddridx}: OpS390XMOVBstoreidx,
+
+ [2]Op{OpS390XMOVDBRstore, OpS390XMOVDaddridx}: OpS390XMOVDBRstoreidx,
+ [2]Op{OpS390XMOVWBRstore, OpS390XMOVDaddridx}: OpS390XMOVWBRstoreidx,
+ [2]Op{OpS390XMOVHBRstore, OpS390XMOVDaddridx}: OpS390XMOVHBRstoreidx,
+
+ [2]Op{OpS390XFMOVDstore, OpS390XMOVDaddridx}: OpS390XFMOVDstoreidx,
+ [2]Op{OpS390XFMOVSstore, OpS390XMOVDaddridx}: OpS390XFMOVSstoreidx,
+}
diff --git a/src/cmd/compile/internal/ssa/allocators.go b/src/cmd/compile/internal/ssa/allocators.go
new file mode 100644
index 0000000..ff70795
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/allocators.go
@@ -0,0 +1,311 @@
+// Code generated from _gen/allocators.go using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import (
+ "internal/unsafeheader"
+ "math/bits"
+ "sync"
+ "unsafe"
+)
+
+var poolFreeValueSlice [27]sync.Pool
+
+func (c *Cache) allocValueSlice(n int) []*Value {
+ var s []*Value
+ n2 := n
+ if n2 < 32 {
+ n2 = 32
+ }
+ b := bits.Len(uint(n2 - 1))
+ v := poolFreeValueSlice[b-5].Get()
+ if v == nil {
+ s = make([]*Value, 1<<b)
+ } else {
+ sp := v.(*[]*Value)
+ s = *sp
+ *sp = nil
+ c.hdrValueSlice = append(c.hdrValueSlice, sp)
+ }
+ s = s[:n]
+ return s
+}
+func (c *Cache) freeValueSlice(s []*Value) {
+ for i := range s {
+ s[i] = nil
+ }
+ b := bits.Len(uint(cap(s)) - 1)
+ var sp *[]*Value
+ if len(c.hdrValueSlice) == 0 {
+ sp = new([]*Value)
+ } else {
+ sp = c.hdrValueSlice[len(c.hdrValueSlice)-1]
+ c.hdrValueSlice[len(c.hdrValueSlice)-1] = nil
+ c.hdrValueSlice = c.hdrValueSlice[:len(c.hdrValueSlice)-1]
+ }
+ *sp = s
+ poolFreeValueSlice[b-5].Put(sp)
+}
+
+var poolFreeInt64Slice [27]sync.Pool
+
+func (c *Cache) allocInt64Slice(n int) []int64 {
+ var s []int64
+ n2 := n
+ if n2 < 32 {
+ n2 = 32
+ }
+ b := bits.Len(uint(n2 - 1))
+ v := poolFreeInt64Slice[b-5].Get()
+ if v == nil {
+ s = make([]int64, 1<<b)
+ } else {
+ sp := v.(*[]int64)
+ s = *sp
+ *sp = nil
+ c.hdrInt64Slice = append(c.hdrInt64Slice, sp)
+ }
+ s = s[:n]
+ return s
+}
+func (c *Cache) freeInt64Slice(s []int64) {
+ for i := range s {
+ s[i] = 0
+ }
+ b := bits.Len(uint(cap(s)) - 1)
+ var sp *[]int64
+ if len(c.hdrInt64Slice) == 0 {
+ sp = new([]int64)
+ } else {
+ sp = c.hdrInt64Slice[len(c.hdrInt64Slice)-1]
+ c.hdrInt64Slice[len(c.hdrInt64Slice)-1] = nil
+ c.hdrInt64Slice = c.hdrInt64Slice[:len(c.hdrInt64Slice)-1]
+ }
+ *sp = s
+ poolFreeInt64Slice[b-5].Put(sp)
+}
+
+var poolFreeSparseSet [27]sync.Pool
+
+func (c *Cache) allocSparseSet(n int) *sparseSet {
+ var s *sparseSet
+ n2 := n
+ if n2 < 32 {
+ n2 = 32
+ }
+ b := bits.Len(uint(n2 - 1))
+ v := poolFreeSparseSet[b-5].Get()
+ if v == nil {
+ s = newSparseSet(1 << b)
+ } else {
+ s = v.(*sparseSet)
+ }
+ return s
+}
+func (c *Cache) freeSparseSet(s *sparseSet) {
+ s.clear()
+ b := bits.Len(uint(s.cap()) - 1)
+ poolFreeSparseSet[b-5].Put(s)
+}
+
+var poolFreeSparseMap [27]sync.Pool
+
+func (c *Cache) allocSparseMap(n int) *sparseMap {
+ var s *sparseMap
+ n2 := n
+ if n2 < 32 {
+ n2 = 32
+ }
+ b := bits.Len(uint(n2 - 1))
+ v := poolFreeSparseMap[b-5].Get()
+ if v == nil {
+ s = newSparseMap(1 << b)
+ } else {
+ s = v.(*sparseMap)
+ }
+ return s
+}
+func (c *Cache) freeSparseMap(s *sparseMap) {
+ s.clear()
+ b := bits.Len(uint(s.cap()) - 1)
+ poolFreeSparseMap[b-5].Put(s)
+}
+
+var poolFreeSparseMapPos [27]sync.Pool
+
+func (c *Cache) allocSparseMapPos(n int) *sparseMapPos {
+ var s *sparseMapPos
+ n2 := n
+ if n2 < 32 {
+ n2 = 32
+ }
+ b := bits.Len(uint(n2 - 1))
+ v := poolFreeSparseMapPos[b-5].Get()
+ if v == nil {
+ s = newSparseMapPos(1 << b)
+ } else {
+ s = v.(*sparseMapPos)
+ }
+ return s
+}
+func (c *Cache) freeSparseMapPos(s *sparseMapPos) {
+ s.clear()
+ b := bits.Len(uint(s.cap()) - 1)
+ poolFreeSparseMapPos[b-5].Put(s)
+}
+func (c *Cache) allocBlockSlice(n int) []*Block {
+ var base *Value
+ var derived *Block
+ if unsafe.Sizeof(base)%unsafe.Sizeof(derived) != 0 {
+ panic("bad")
+ }
+ scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
+ b := c.allocValueSlice(int((uintptr(n) + scale - 1) / scale))
+ s := unsafeheader.Slice{
+ Data: unsafe.Pointer(&b[0]),
+ Len: n,
+ Cap: cap(b) * int(scale),
+ }
+ return *(*[]*Block)(unsafe.Pointer(&s))
+}
+func (c *Cache) freeBlockSlice(s []*Block) {
+ var base *Value
+ var derived *Block
+ scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
+ b := unsafeheader.Slice{
+ Data: unsafe.Pointer(&s[0]),
+ Len: int((uintptr(len(s)) + scale - 1) / scale),
+ Cap: int((uintptr(cap(s)) + scale - 1) / scale),
+ }
+ c.freeValueSlice(*(*[]*Value)(unsafe.Pointer(&b)))
+}
+func (c *Cache) allocIntSlice(n int) []int {
+ var base int64
+ var derived int
+ if unsafe.Sizeof(base)%unsafe.Sizeof(derived) != 0 {
+ panic("bad")
+ }
+ scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
+ b := c.allocInt64Slice(int((uintptr(n) + scale - 1) / scale))
+ s := unsafeheader.Slice{
+ Data: unsafe.Pointer(&b[0]),
+ Len: n,
+ Cap: cap(b) * int(scale),
+ }
+ return *(*[]int)(unsafe.Pointer(&s))
+}
+func (c *Cache) freeIntSlice(s []int) {
+ var base int64
+ var derived int
+ scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
+ b := unsafeheader.Slice{
+ Data: unsafe.Pointer(&s[0]),
+ Len: int((uintptr(len(s)) + scale - 1) / scale),
+ Cap: int((uintptr(cap(s)) + scale - 1) / scale),
+ }
+ c.freeInt64Slice(*(*[]int64)(unsafe.Pointer(&b)))
+}
+func (c *Cache) allocInt32Slice(n int) []int32 {
+ var base int64
+ var derived int32
+ if unsafe.Sizeof(base)%unsafe.Sizeof(derived) != 0 {
+ panic("bad")
+ }
+ scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
+ b := c.allocInt64Slice(int((uintptr(n) + scale - 1) / scale))
+ s := unsafeheader.Slice{
+ Data: unsafe.Pointer(&b[0]),
+ Len: n,
+ Cap: cap(b) * int(scale),
+ }
+ return *(*[]int32)(unsafe.Pointer(&s))
+}
+func (c *Cache) freeInt32Slice(s []int32) {
+ var base int64
+ var derived int32
+ scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
+ b := unsafeheader.Slice{
+ Data: unsafe.Pointer(&s[0]),
+ Len: int((uintptr(len(s)) + scale - 1) / scale),
+ Cap: int((uintptr(cap(s)) + scale - 1) / scale),
+ }
+ c.freeInt64Slice(*(*[]int64)(unsafe.Pointer(&b)))
+}
+func (c *Cache) allocInt8Slice(n int) []int8 {
+ var base int64
+ var derived int8
+ if unsafe.Sizeof(base)%unsafe.Sizeof(derived) != 0 {
+ panic("bad")
+ }
+ scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
+ b := c.allocInt64Slice(int((uintptr(n) + scale - 1) / scale))
+ s := unsafeheader.Slice{
+ Data: unsafe.Pointer(&b[0]),
+ Len: n,
+ Cap: cap(b) * int(scale),
+ }
+ return *(*[]int8)(unsafe.Pointer(&s))
+}
+func (c *Cache) freeInt8Slice(s []int8) {
+ var base int64
+ var derived int8
+ scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
+ b := unsafeheader.Slice{
+ Data: unsafe.Pointer(&s[0]),
+ Len: int((uintptr(len(s)) + scale - 1) / scale),
+ Cap: int((uintptr(cap(s)) + scale - 1) / scale),
+ }
+ c.freeInt64Slice(*(*[]int64)(unsafe.Pointer(&b)))
+}
+func (c *Cache) allocBoolSlice(n int) []bool {
+ var base int64
+ var derived bool
+ if unsafe.Sizeof(base)%unsafe.Sizeof(derived) != 0 {
+ panic("bad")
+ }
+ scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
+ b := c.allocInt64Slice(int((uintptr(n) + scale - 1) / scale))
+ s := unsafeheader.Slice{
+ Data: unsafe.Pointer(&b[0]),
+ Len: n,
+ Cap: cap(b) * int(scale),
+ }
+ return *(*[]bool)(unsafe.Pointer(&s))
+}
+func (c *Cache) freeBoolSlice(s []bool) {
+ var base int64
+ var derived bool
+ scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
+ b := unsafeheader.Slice{
+ Data: unsafe.Pointer(&s[0]),
+ Len: int((uintptr(len(s)) + scale - 1) / scale),
+ Cap: int((uintptr(cap(s)) + scale - 1) / scale),
+ }
+ c.freeInt64Slice(*(*[]int64)(unsafe.Pointer(&b)))
+}
+func (c *Cache) allocIDSlice(n int) []ID {
+ var base int64
+ var derived ID
+ if unsafe.Sizeof(base)%unsafe.Sizeof(derived) != 0 {
+ panic("bad")
+ }
+ scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
+ b := c.allocInt64Slice(int((uintptr(n) + scale - 1) / scale))
+ s := unsafeheader.Slice{
+ Data: unsafe.Pointer(&b[0]),
+ Len: n,
+ Cap: cap(b) * int(scale),
+ }
+ return *(*[]ID)(unsafe.Pointer(&s))
+}
+func (c *Cache) freeIDSlice(s []ID) {
+ var base int64
+ var derived ID
+ scale := unsafe.Sizeof(base) / unsafe.Sizeof(derived)
+ b := unsafeheader.Slice{
+ Data: unsafe.Pointer(&s[0]),
+ Len: int((uintptr(len(s)) + scale - 1) / scale),
+ Cap: int((uintptr(cap(s)) + scale - 1) / scale),
+ }
+ c.freeInt64Slice(*(*[]int64)(unsafe.Pointer(&b)))
+}
diff --git a/src/cmd/compile/internal/ssa/bench_test.go b/src/cmd/compile/internal/ssa/bench_test.go
new file mode 100644
index 0000000..1dc733b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/bench_test.go
@@ -0,0 +1,50 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package ssa
+
+import (
+ "math/rand"
+ "testing"
+)
+
+var d int
+
+//go:noinline
+func fn(a, b int) bool {
+ c := false
+ if a > 0 {
+ if b < 0 {
+ d = d + 1
+ }
+ c = true
+ }
+ return c
+}
+
+func BenchmarkPhioptPass(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ a := rand.Perm(i/10 + 10)
+ for i := 1; i < len(a)/2; i++ {
+ fn(a[i]-a[i-1], a[i+len(a)/2-2]-a[i+len(a)/2-1])
+ }
+ }
+}
+
+type Point struct {
+ X, Y int
+}
+
+//go:noinline
+func sign(p1, p2, p3 Point) bool {
+ return (p1.X-p3.X)*(p2.Y-p3.Y)-(p2.X-p3.X)*(p1.Y-p3.Y) < 0
+}
+
+func BenchmarkInvertLessThanNoov(b *testing.B) {
+ p1 := Point{1, 2}
+ p2 := Point{2, 3}
+ p3 := Point{3, 4}
+ for i := 0; i < b.N; i++ {
+ sign(p1, p2, p3)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/biasedsparsemap.go b/src/cmd/compile/internal/ssa/biasedsparsemap.go
new file mode 100644
index 0000000..948aef9
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/biasedsparsemap.go
@@ -0,0 +1,111 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "math"
+)
+
+// A biasedSparseMap is a sparseMap for integers between J and K inclusive,
+// where J might be somewhat larger than zero (and K-J is probably much smaller than J).
+// (The motivating use case is the line numbers of statements for a single function.)
+// Not all features of a SparseMap are exported, and it is also easy to treat a
+// biasedSparseMap like a SparseSet.
+type biasedSparseMap struct {
+ s *sparseMap
+ first int
+}
+
+// newBiasedSparseMap returns a new biasedSparseMap for values between first and last, inclusive.
+func newBiasedSparseMap(first, last int) *biasedSparseMap {
+ if first > last {
+ return &biasedSparseMap{first: math.MaxInt32, s: nil}
+ }
+ return &biasedSparseMap{first: first, s: newSparseMap(1 + last - first)}
+}
+
+// cap returns one more than the largest key valid for s
+func (s *biasedSparseMap) cap() int {
+ if s == nil || s.s == nil {
+ return 0
+ }
+ return s.s.cap() + int(s.first)
+}
+
+// size returns the number of entries stored in s
+func (s *biasedSparseMap) size() int {
+ if s == nil || s.s == nil {
+ return 0
+ }
+ return s.s.size()
+}
+
+// contains reports whether x is a key in s
+func (s *biasedSparseMap) contains(x uint) bool {
+ if s == nil || s.s == nil {
+ return false
+ }
+ if int(x) < s.first {
+ return false
+ }
+ if int(x) >= s.cap() {
+ return false
+ }
+ return s.s.contains(ID(int(x) - s.first))
+}
+
+// get returns the value s maps for key x, or -1 if
+// x is not mapped or is out of range for s.
+func (s *biasedSparseMap) get(x uint) int32 {
+ if s == nil || s.s == nil {
+ return -1
+ }
+ if int(x) < s.first {
+ return -1
+ }
+ if int(x) >= s.cap() {
+ return -1
+ }
+ return s.s.get(ID(int(x) - s.first))
+}
+
+// getEntry returns the i'th key and value stored in s,
+// where 0 <= i < s.size()
+func (s *biasedSparseMap) getEntry(i int) (x uint, v int32) {
+ e := s.s.contents()[i]
+ x = uint(int(e.key) + s.first)
+ v = e.val
+ return
+}
+
+// add inserts x->0 into s, provided that x is in the range of keys stored in s.
+func (s *biasedSparseMap) add(x uint) {
+ if int(x) < s.first || int(x) >= s.cap() {
+ return
+ }
+ s.s.set(ID(int(x)-s.first), 0)
+}
+
+// add inserts x->v into s, provided that x is in the range of keys stored in s.
+func (s *biasedSparseMap) set(x uint, v int32) {
+ if int(x) < s.first || int(x) >= s.cap() {
+ return
+ }
+ s.s.set(ID(int(x)-s.first), v)
+}
+
+// remove removes key x from s.
+func (s *biasedSparseMap) remove(x uint) {
+ if int(x) < s.first || int(x) >= s.cap() {
+ return
+ }
+ s.s.remove(ID(int(x) - s.first))
+}
+
+func (s *biasedSparseMap) clear() {
+ if s.s != nil {
+ s.s.clear()
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go
new file mode 100644
index 0000000..26af10b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/block.go
@@ -0,0 +1,428 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+ "fmt"
+)
+
+// Block represents a basic block in the control flow graph of a function.
+type Block struct {
+ // A unique identifier for the block. The system will attempt to allocate
+ // these IDs densely, but no guarantees.
+ ID ID
+
+ // Source position for block's control operation
+ Pos src.XPos
+
+ // The kind of block this is.
+ Kind BlockKind
+
+ // Likely direction for branches.
+ // If BranchLikely, Succs[0] is the most likely branch taken.
+ // If BranchUnlikely, Succs[1] is the most likely branch taken.
+ // Ignored if len(Succs) < 2.
+ // Fatal if not BranchUnknown and len(Succs) > 2.
+ Likely BranchPrediction
+
+ // After flagalloc, records whether flags are live at the end of the block.
+ FlagsLiveAtEnd bool
+
+ // Subsequent blocks, if any. The number and order depend on the block kind.
+ Succs []Edge
+
+ // Inverse of successors.
+ // The order is significant to Phi nodes in the block.
+ // TODO: predecessors is a pain to maintain. Can we somehow order phi
+ // arguments by block id and have this field computed explicitly when needed?
+ Preds []Edge
+
+ // A list of values that determine how the block is exited. The number
+ // and type of control values depends on the Kind of the block. For
+ // instance, a BlockIf has a single boolean control value and BlockExit
+ // has a single memory control value.
+ //
+ // The ControlValues() method may be used to get a slice with the non-nil
+ // control values that can be ranged over.
+ //
+ // Controls[1] must be nil if Controls[0] is nil.
+ Controls [2]*Value
+
+ // Auxiliary info for the block. Its value depends on the Kind.
+ Aux Aux
+ AuxInt int64
+
+ // The unordered set of Values that define the operation of this block.
+ // After the scheduling pass, this list is ordered.
+ Values []*Value
+
+ // The containing function
+ Func *Func
+
+ // Storage for Succs, Preds and Values.
+ succstorage [2]Edge
+ predstorage [4]Edge
+ valstorage [9]*Value
+}
+
+// Edge represents a CFG edge.
+// Example edges for b branching to either c or d.
+// (c and d have other predecessors.)
+//
+// b.Succs = [{c,3}, {d,1}]
+// c.Preds = [?, ?, ?, {b,0}]
+// d.Preds = [?, {b,1}, ?]
+//
+// These indexes allow us to edit the CFG in constant time.
+// In addition, it informs phi ops in degenerate cases like:
+//
+// b:
+// if k then c else c
+// c:
+// v = Phi(x, y)
+//
+// Then the indexes tell you whether x is chosen from
+// the if or else branch from b.
+//
+// b.Succs = [{c,0},{c,1}]
+// c.Preds = [{b,0},{b,1}]
+//
+// means x is chosen if k is true.
+type Edge struct {
+ // block edge goes to (in a Succs list) or from (in a Preds list)
+ b *Block
+ // index of reverse edge. Invariant:
+ // e := x.Succs[idx]
+ // e.b.Preds[e.i] = Edge{x,idx}
+ // and similarly for predecessors.
+ i int
+}
+
+func (e Edge) Block() *Block {
+ return e.b
+}
+func (e Edge) Index() int {
+ return e.i
+}
+func (e Edge) String() string {
+ return fmt.Sprintf("{%v,%d}", e.b, e.i)
+}
+
+// BlockKind is the kind of SSA block.
+type BlockKind int16
+
+// short form print
+func (b *Block) String() string {
+ return fmt.Sprintf("b%d", b.ID)
+}
+
+// long form print
+func (b *Block) LongString() string {
+ s := b.Kind.String()
+ if b.Aux != nil {
+ s += fmt.Sprintf(" {%s}", b.Aux)
+ }
+ if t := b.AuxIntString(); t != "" {
+ s += fmt.Sprintf(" [%s]", t)
+ }
+ for _, c := range b.ControlValues() {
+ s += fmt.Sprintf(" %s", c)
+ }
+ if len(b.Succs) > 0 {
+ s += " ->"
+ for _, c := range b.Succs {
+ s += " " + c.b.String()
+ }
+ }
+ switch b.Likely {
+ case BranchUnlikely:
+ s += " (unlikely)"
+ case BranchLikely:
+ s += " (likely)"
+ }
+ return s
+}
+
+// NumControls returns the number of non-nil control values the
+// block has.
+func (b *Block) NumControls() int {
+ if b.Controls[0] == nil {
+ return 0
+ }
+ if b.Controls[1] == nil {
+ return 1
+ }
+ return 2
+}
+
+// ControlValues returns a slice containing the non-nil control
+// values of the block. The index of each control value will be
+// the same as it is in the Controls property and can be used
+// in ReplaceControl calls.
+func (b *Block) ControlValues() []*Value {
+ if b.Controls[0] == nil {
+ return b.Controls[:0]
+ }
+ if b.Controls[1] == nil {
+ return b.Controls[:1]
+ }
+ return b.Controls[:2]
+}
+
+// SetControl removes all existing control values and then adds
+// the control value provided. The number of control values after
+// a call to SetControl will always be 1.
+func (b *Block) SetControl(v *Value) {
+ b.ResetControls()
+ b.Controls[0] = v
+ v.Uses++
+}
+
+// ResetControls sets the number of controls for the block to 0.
+func (b *Block) ResetControls() {
+ if b.Controls[0] != nil {
+ b.Controls[0].Uses--
+ }
+ if b.Controls[1] != nil {
+ b.Controls[1].Uses--
+ }
+ b.Controls = [2]*Value{} // reset both controls to nil
+}
+
+// AddControl appends a control value to the existing list of control values.
+func (b *Block) AddControl(v *Value) {
+ i := b.NumControls()
+ b.Controls[i] = v // panics if array is full
+ v.Uses++
+}
+
+// ReplaceControl exchanges the existing control value at the index provided
+// for the new value. The index must refer to a valid control value.
+func (b *Block) ReplaceControl(i int, v *Value) {
+ b.Controls[i].Uses--
+ b.Controls[i] = v
+ v.Uses++
+}
+
+// CopyControls replaces the controls for this block with those from the
+// provided block. The provided block is not modified.
+func (b *Block) CopyControls(from *Block) {
+ if b == from {
+ return
+ }
+ b.ResetControls()
+ for _, c := range from.ControlValues() {
+ b.AddControl(c)
+ }
+}
+
+// Reset sets the block to the provided kind and clears all the blocks control
+// and auxiliary values. Other properties of the block, such as its successors,
+// predecessors and values are left unmodified.
+func (b *Block) Reset(kind BlockKind) {
+ b.Kind = kind
+ b.ResetControls()
+ b.Aux = nil
+ b.AuxInt = 0
+}
+
+// resetWithControl resets b and adds control v.
+// It is equivalent to b.Reset(kind); b.AddControl(v),
+// except that it is one call instead of two and avoids a bounds check.
+// It is intended for use by rewrite rules, where this matters.
+func (b *Block) resetWithControl(kind BlockKind, v *Value) {
+ b.Kind = kind
+ b.ResetControls()
+ b.Aux = nil
+ b.AuxInt = 0
+ b.Controls[0] = v
+ v.Uses++
+}
+
+// resetWithControl2 resets b and adds controls v and w.
+// It is equivalent to b.Reset(kind); b.AddControl(v); b.AddControl(w),
+// except that it is one call instead of three and avoids two bounds checks.
+// It is intended for use by rewrite rules, where this matters.
+func (b *Block) resetWithControl2(kind BlockKind, v, w *Value) {
+ b.Kind = kind
+ b.ResetControls()
+ b.Aux = nil
+ b.AuxInt = 0
+ b.Controls[0] = v
+ b.Controls[1] = w
+ v.Uses++
+ w.Uses++
+}
+
+// truncateValues truncates b.Values at the ith element, zeroing subsequent elements.
+// The values in b.Values after i must already have had their args reset,
+// to maintain correct value uses counts.
+func (b *Block) truncateValues(i int) {
+ tail := b.Values[i:]
+ for j := range tail {
+ tail[j] = nil
+ }
+ b.Values = b.Values[:i]
+}
+
+// AddEdgeTo adds an edge from block b to block c.
+func (b *Block) AddEdgeTo(c *Block) {
+ i := len(b.Succs)
+ j := len(c.Preds)
+ b.Succs = append(b.Succs, Edge{c, j})
+ c.Preds = append(c.Preds, Edge{b, i})
+ b.Func.invalidateCFG()
+}
+
+// removePred removes the ith input edge from b.
+// It is the responsibility of the caller to remove
+// the corresponding successor edge, and adjust any
+// phi values by calling b.removePhiArg(v, i).
+func (b *Block) removePred(i int) {
+ n := len(b.Preds) - 1
+ if i != n {
+ e := b.Preds[n]
+ b.Preds[i] = e
+ // Update the other end of the edge we moved.
+ e.b.Succs[e.i].i = i
+ }
+ b.Preds[n] = Edge{}
+ b.Preds = b.Preds[:n]
+ b.Func.invalidateCFG()
+}
+
+// removeSucc removes the ith output edge from b.
+// It is the responsibility of the caller to remove
+// the corresponding predecessor edge.
+// Note that this potentially reorders successors of b, so it
+// must be used very carefully.
+func (b *Block) removeSucc(i int) {
+ n := len(b.Succs) - 1
+ if i != n {
+ e := b.Succs[n]
+ b.Succs[i] = e
+ // Update the other end of the edge we moved.
+ e.b.Preds[e.i].i = i
+ }
+ b.Succs[n] = Edge{}
+ b.Succs = b.Succs[:n]
+ b.Func.invalidateCFG()
+}
+
+func (b *Block) swapSuccessors() {
+ if len(b.Succs) != 2 {
+ b.Fatalf("swapSuccessors with len(Succs)=%d", len(b.Succs))
+ }
+ e0 := b.Succs[0]
+ e1 := b.Succs[1]
+ b.Succs[0] = e1
+ b.Succs[1] = e0
+ e0.b.Preds[e0.i].i = 1
+ e1.b.Preds[e1.i].i = 0
+ b.Likely *= -1
+}
+
+// Swaps b.Succs[x] and b.Succs[y].
+func (b *Block) swapSuccessorsByIdx(x, y int) {
+ if x == y {
+ return
+ }
+ ex := b.Succs[x]
+ ey := b.Succs[y]
+ b.Succs[x] = ey
+ b.Succs[y] = ex
+ ex.b.Preds[ex.i].i = y
+ ey.b.Preds[ey.i].i = x
+}
+
+// removePhiArg removes the ith arg from phi.
+// It must be called after calling b.removePred(i) to
+// adjust the corresponding phi value of the block:
+//
+// b.removePred(i)
+// for _, v := range b.Values {
+//
+// if v.Op != OpPhi {
+// continue
+// }
+// b.removePhiArg(v, i)
+//
+// }
+func (b *Block) removePhiArg(phi *Value, i int) {
+ n := len(b.Preds)
+ if numPhiArgs := len(phi.Args); numPhiArgs-1 != n {
+ b.Fatalf("inconsistent state for %v, num predecessors: %d, num phi args: %d", phi, n, numPhiArgs)
+ }
+ phi.Args[i].Uses--
+ phi.Args[i] = phi.Args[n]
+ phi.Args[n] = nil
+ phi.Args = phi.Args[:n]
+ phielimValue(phi)
+}
+
+// LackingPos indicates whether b is a block whose position should be inherited
+// from its successors. This is true if all the values within it have unreliable positions
+// and if it is "plain", meaning that there is no control flow that is also very likely
+// to correspond to a well-understood source position.
+func (b *Block) LackingPos() bool {
+ // Non-plain predecessors are If or Defer, which both (1) have two successors,
+ // which might have different line numbers and (2) correspond to statements
+ // in the source code that have positions, so this case ought not occur anyway.
+ if b.Kind != BlockPlain {
+ return false
+ }
+ if b.Pos != src.NoXPos {
+ return false
+ }
+ for _, v := range b.Values {
+ if v.LackingPos() {
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+func (b *Block) AuxIntString() string {
+ switch b.Kind.AuxIntType() {
+ case "int8":
+ return fmt.Sprintf("%v", int8(b.AuxInt))
+ case "uint8":
+ return fmt.Sprintf("%v", uint8(b.AuxInt))
+ case "": // no aux int type
+ return ""
+ default: // type specified but not implemented - print as int64
+ return fmt.Sprintf("%v", b.AuxInt)
+ }
+}
+
+// likelyBranch reports whether block b is the likely branch of all of its predecessors.
+func (b *Block) likelyBranch() bool {
+ if len(b.Preds) == 0 {
+ return false
+ }
+ for _, e := range b.Preds {
+ p := e.b
+ if len(p.Succs) == 1 || len(p.Succs) == 2 && (p.Likely == BranchLikely && p.Succs[0].b == b ||
+ p.Likely == BranchUnlikely && p.Succs[1].b == b) {
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+func (b *Block) Logf(msg string, args ...interface{}) { b.Func.Logf(msg, args...) }
+func (b *Block) Log() bool { return b.Func.Log() }
+func (b *Block) Fatalf(msg string, args ...interface{}) { b.Func.Fatalf(msg, args...) }
+
+type BranchPrediction int8
+
+const (
+ BranchUnlikely = BranchPrediction(-1)
+ BranchUnknown = BranchPrediction(0)
+ BranchLikely = BranchPrediction(+1)
+)
diff --git a/src/cmd/compile/internal/ssa/branchelim.go b/src/cmd/compile/internal/ssa/branchelim.go
new file mode 100644
index 0000000..f16959d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/branchelim.go
@@ -0,0 +1,470 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "cmd/internal/src"
+
+// branchelim tries to eliminate branches by
+// generating CondSelect instructions.
+//
+// Search for basic blocks that look like
+//
+// bb0 bb0
+// | \ / \
+// | bb1 or bb1 bb2 <- trivial if/else blocks
+// | / \ /
+// bb2 bb3
+//
+// where the intermediate blocks are mostly empty (with no side-effects);
+// rewrite Phis in the postdominator as CondSelects.
+func branchelim(f *Func) {
+ // FIXME: add support for lowering CondSelects on more architectures
+ switch f.Config.arch {
+ case "arm64", "ppc64le", "ppc64", "amd64", "wasm", "loong64":
+ // implemented
+ default:
+ return
+ }
+
+ // Find all the values used in computing the address of any load.
+ // Typically these values have operations like AddPtr, Lsh64x64, etc.
+ loadAddr := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(loadAddr)
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpLoad, OpAtomicLoad8, OpAtomicLoad32, OpAtomicLoad64, OpAtomicLoadPtr, OpAtomicLoadAcq32, OpAtomicLoadAcq64:
+ loadAddr.add(v.Args[0].ID)
+ case OpMove:
+ loadAddr.add(v.Args[1].ID)
+ }
+ }
+ }
+ po := f.postorder()
+ for {
+ n := loadAddr.size()
+ for _, b := range po {
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if !loadAddr.contains(v.ID) {
+ continue
+ }
+ for _, a := range v.Args {
+ if a.Type.IsInteger() || a.Type.IsPtr() || a.Type.IsUnsafePtr() {
+ loadAddr.add(a.ID)
+ }
+ }
+ }
+ }
+ if loadAddr.size() == n {
+ break
+ }
+ }
+
+ change := true
+ for change {
+ change = false
+ for _, b := range f.Blocks {
+ change = elimIf(f, loadAddr, b) || elimIfElse(f, loadAddr, b) || change
+ }
+ }
+}
+
+func canCondSelect(v *Value, arch string, loadAddr *sparseSet) bool {
+ if loadAddr.contains(v.ID) {
+ // The result of the soon-to-be conditional move is used to compute a load address.
+ // We want to avoid generating a conditional move in this case
+ // because the load address would now be data-dependent on the condition.
+ // Previously it would only be control-dependent on the condition, which is faster
+ // if the branch predicts well (or possibly even if it doesn't, if the load will
+ // be an expensive cache miss).
+ // See issue #26306.
+ return false
+ }
+ if arch == "loong64" {
+ // We should not generate conditional moves if neither of the arguments is constant zero,
+ // because it requires three instructions (OR, MASKEQZ, MASKNEZ) and will increase the
+ // register pressure.
+ if !(v.Args[0].isGenericIntConst() && v.Args[0].AuxInt == 0) &&
+ !(v.Args[1].isGenericIntConst() && v.Args[1].AuxInt == 0) {
+ return false
+ }
+ }
+ // For now, stick to simple scalars that fit in registers
+ switch {
+ case v.Type.Size() > v.Block.Func.Config.RegSize:
+ return false
+ case v.Type.IsPtrShaped():
+ return true
+ case v.Type.IsInteger():
+ if arch == "amd64" && v.Type.Size() < 2 {
+ // amd64 doesn't support CMOV with byte registers
+ return false
+ }
+ return true
+ default:
+ return false
+ }
+}
+
+// elimIf converts the one-way branch starting at dom in f to a conditional move if possible.
+// loadAddr is a set of values which are used to compute the address of a load.
+// Those values are exempt from CMOV generation.
+func elimIf(f *Func, loadAddr *sparseSet, dom *Block) bool {
+ // See if dom is an If with one arm that
+ // is trivial and succeeded by the other
+ // successor of dom.
+ if dom.Kind != BlockIf || dom.Likely != BranchUnknown {
+ return false
+ }
+ var simple, post *Block
+ for i := range dom.Succs {
+ bb, other := dom.Succs[i].Block(), dom.Succs[i^1].Block()
+ if isLeafPlain(bb) && bb.Succs[0].Block() == other {
+ simple = bb
+ post = other
+ break
+ }
+ }
+ if simple == nil || len(post.Preds) != 2 || post == dom {
+ return false
+ }
+
+ // We've found our diamond CFG of blocks.
+ // Now decide if fusing 'simple' into dom+post
+ // looks profitable.
+
+ // Check that there are Phis, and that all of them
+ // can be safely rewritten to CondSelect.
+ hasphis := false
+ for _, v := range post.Values {
+ if v.Op == OpPhi {
+ hasphis = true
+ if !canCondSelect(v, f.Config.arch, loadAddr) {
+ return false
+ }
+ }
+ }
+ if !hasphis {
+ return false
+ }
+
+ // Pick some upper bound for the number of instructions
+ // we'd be willing to execute just to generate a dead
+ // argument to CondSelect. In the worst case, this is
+ // the number of useless instructions executed.
+ const maxfuseinsts = 2
+
+ if len(simple.Values) > maxfuseinsts || !canSpeculativelyExecute(simple) {
+ return false
+ }
+
+ // Replace Phi instructions in b with CondSelect instructions
+ swap := (post.Preds[0].Block() == dom) != (dom.Succs[0].Block() == post)
+ for _, v := range post.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ v.Op = OpCondSelect
+ if swap {
+ v.Args[0], v.Args[1] = v.Args[1], v.Args[0]
+ }
+ v.AddArg(dom.Controls[0])
+ }
+
+ // Put all of the instructions into 'dom'
+ // and update the CFG appropriately.
+ dom.Kind = post.Kind
+ dom.CopyControls(post)
+ dom.Aux = post.Aux
+ dom.Succs = append(dom.Succs[:0], post.Succs...)
+ for i := range dom.Succs {
+ e := dom.Succs[i]
+ e.b.Preds[e.i].b = dom
+ }
+
+ // Try really hard to preserve statement marks attached to blocks.
+ simplePos := simple.Pos
+ postPos := post.Pos
+ simpleStmt := simplePos.IsStmt() == src.PosIsStmt
+ postStmt := postPos.IsStmt() == src.PosIsStmt
+
+ for _, v := range simple.Values {
+ v.Block = dom
+ }
+ for _, v := range post.Values {
+ v.Block = dom
+ }
+
+ // findBlockPos determines if b contains a stmt-marked value
+ // that has the same line number as the Pos for b itself.
+ // (i.e. is the position on b actually redundant?)
+ findBlockPos := func(b *Block) bool {
+ pos := b.Pos
+ for _, v := range b.Values {
+ // See if there is a stmt-marked value already that matches simple.Pos (and perhaps post.Pos)
+ if pos.SameFileAndLine(v.Pos) && v.Pos.IsStmt() == src.PosIsStmt {
+ return true
+ }
+ }
+ return false
+ }
+ if simpleStmt {
+ simpleStmt = !findBlockPos(simple)
+ if !simpleStmt && simplePos.SameFileAndLine(postPos) {
+ postStmt = false
+ }
+
+ }
+ if postStmt {
+ postStmt = !findBlockPos(post)
+ }
+
+ // If simpleStmt and/or postStmt are still true, then try harder
+ // to find the corresponding statement marks new homes.
+
+ // setBlockPos determines if b contains a can-be-statement value
+ // that has the same line number as the Pos for b itself, and
+ // puts a statement mark on it, and returns whether it succeeded
+ // in this operation.
+ setBlockPos := func(b *Block) bool {
+ pos := b.Pos
+ for _, v := range b.Values {
+ if pos.SameFileAndLine(v.Pos) && !isPoorStatementOp(v.Op) {
+ v.Pos = v.Pos.WithIsStmt()
+ return true
+ }
+ }
+ return false
+ }
+ // If necessary and possible, add a mark to a value in simple
+ if simpleStmt {
+ if setBlockPos(simple) && simplePos.SameFileAndLine(postPos) {
+ postStmt = false
+ }
+ }
+ // If necessary and possible, add a mark to a value in post
+ if postStmt {
+ postStmt = !setBlockPos(post)
+ }
+
+ // Before giving up (this was added because it helps), try the end of "dom", and if that is not available,
+ // try the values in the successor block if it is uncomplicated.
+ if postStmt {
+ if dom.Pos.IsStmt() != src.PosIsStmt {
+ dom.Pos = postPos
+ } else {
+ // Try the successor block
+ if len(dom.Succs) == 1 && len(dom.Succs[0].Block().Preds) == 1 {
+ succ := dom.Succs[0].Block()
+ for _, v := range succ.Values {
+ if isPoorStatementOp(v.Op) {
+ continue
+ }
+ if postPos.SameFileAndLine(v.Pos) {
+ v.Pos = v.Pos.WithIsStmt()
+ }
+ postStmt = false
+ break
+ }
+ // If postStmt still true, tag the block itself if possible
+ if postStmt && succ.Pos.IsStmt() != src.PosIsStmt {
+ succ.Pos = postPos
+ }
+ }
+ }
+ }
+
+ dom.Values = append(dom.Values, simple.Values...)
+ dom.Values = append(dom.Values, post.Values...)
+
+ // Trash 'post' and 'simple'
+ clobberBlock(post)
+ clobberBlock(simple)
+
+ f.invalidateCFG()
+ return true
+}
+
+// is this a BlockPlain with one predecessor?
+func isLeafPlain(b *Block) bool {
+ return b.Kind == BlockPlain && len(b.Preds) == 1
+}
+
+func clobberBlock(b *Block) {
+ b.Values = nil
+ b.Preds = nil
+ b.Succs = nil
+ b.Aux = nil
+ b.ResetControls()
+ b.Likely = BranchUnknown
+ b.Kind = BlockInvalid
+}
+
+// elimIfElse converts the two-way branch starting at dom in f to a conditional move if possible.
+// loadAddr is a set of values which are used to compute the address of a load.
+// Those values are exempt from CMOV generation.
+func elimIfElse(f *Func, loadAddr *sparseSet, b *Block) bool {
+ // See if 'b' ends in an if/else: it should
+ // have two successors, both of which are BlockPlain
+ // and succeeded by the same block.
+ if b.Kind != BlockIf || b.Likely != BranchUnknown {
+ return false
+ }
+ yes, no := b.Succs[0].Block(), b.Succs[1].Block()
+ if !isLeafPlain(yes) || len(yes.Values) > 1 || !canSpeculativelyExecute(yes) {
+ return false
+ }
+ if !isLeafPlain(no) || len(no.Values) > 1 || !canSpeculativelyExecute(no) {
+ return false
+ }
+ if b.Succs[0].Block().Succs[0].Block() != b.Succs[1].Block().Succs[0].Block() {
+ return false
+ }
+ // block that postdominates the if/else
+ post := b.Succs[0].Block().Succs[0].Block()
+ if len(post.Preds) != 2 || post == b {
+ return false
+ }
+ hasphis := false
+ for _, v := range post.Values {
+ if v.Op == OpPhi {
+ hasphis = true
+ if !canCondSelect(v, f.Config.arch, loadAddr) {
+ return false
+ }
+ }
+ }
+ if !hasphis {
+ return false
+ }
+
+ // Don't generate CondSelects if branch is cheaper.
+ if !shouldElimIfElse(no, yes, post, f.Config.arch) {
+ return false
+ }
+
+ // now we're committed: rewrite each Phi as a CondSelect
+ swap := post.Preds[0].Block() != b.Succs[0].Block()
+ for _, v := range post.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ v.Op = OpCondSelect
+ if swap {
+ v.Args[0], v.Args[1] = v.Args[1], v.Args[0]
+ }
+ v.AddArg(b.Controls[0])
+ }
+
+ // Move the contents of all of these
+ // blocks into 'b' and update CFG edges accordingly
+ b.Kind = post.Kind
+ b.CopyControls(post)
+ b.Aux = post.Aux
+ b.Succs = append(b.Succs[:0], post.Succs...)
+ for i := range b.Succs {
+ e := b.Succs[i]
+ e.b.Preds[e.i].b = b
+ }
+ for i := range post.Values {
+ post.Values[i].Block = b
+ }
+ for i := range yes.Values {
+ yes.Values[i].Block = b
+ }
+ for i := range no.Values {
+ no.Values[i].Block = b
+ }
+ b.Values = append(b.Values, yes.Values...)
+ b.Values = append(b.Values, no.Values...)
+ b.Values = append(b.Values, post.Values...)
+
+ // trash post, yes, and no
+ clobberBlock(yes)
+ clobberBlock(no)
+ clobberBlock(post)
+
+ f.invalidateCFG()
+ return true
+}
+
+// shouldElimIfElse reports whether estimated cost of eliminating branch
+// is lower than threshold.
+func shouldElimIfElse(no, yes, post *Block, arch string) bool {
+ switch arch {
+ default:
+ return true
+ case "amd64":
+ const maxcost = 2
+ phi := 0
+ other := 0
+ for _, v := range post.Values {
+ if v.Op == OpPhi {
+ // Each phi results in CondSelect, which lowers into CMOV,
+ // CMOV has latency >1 on most CPUs.
+ phi++
+ }
+ for _, x := range v.Args {
+ if x.Block == no || x.Block == yes {
+ other++
+ }
+ }
+ }
+ cost := phi * 1
+ if phi > 1 {
+ // If we have more than 1 phi and some values in post have args
+ // in yes or no blocks, we may have to recalculate condition, because
+ // those args may clobber flags. For now assume that all operations clobber flags.
+ cost += other * 1
+ }
+ return cost < maxcost
+ }
+}
+
+// canSpeculativelyExecute reports whether every value in the block can
+// be evaluated without causing any observable side effects (memory
+// accesses, panics and so on) except for execution time changes. It
+// also ensures that the block does not contain any phis which we can't
+// speculatively execute.
+// Warning: this function cannot currently detect values that represent
+// instructions the execution of which need to be guarded with CPU
+// hardware feature checks. See issue #34950.
+func canSpeculativelyExecute(b *Block) bool {
+ // don't fuse memory ops, Phi ops, divides (can panic),
+ // or anything else with side-effects
+ for _, v := range b.Values {
+ if v.Op == OpPhi || isDivMod(v.Op) || isPtrArithmetic(v.Op) || v.Type.IsMemory() ||
+ v.MemoryArg() != nil || opcodeTable[v.Op].hasSideEffects {
+ return false
+ }
+ }
+ return true
+}
+
+func isDivMod(op Op) bool {
+ switch op {
+ case OpDiv8, OpDiv8u, OpDiv16, OpDiv16u,
+ OpDiv32, OpDiv32u, OpDiv64, OpDiv64u, OpDiv128u,
+ OpDiv32F, OpDiv64F,
+ OpMod8, OpMod8u, OpMod16, OpMod16u,
+ OpMod32, OpMod32u, OpMod64, OpMod64u:
+ return true
+ default:
+ return false
+ }
+}
+
+func isPtrArithmetic(op Op) bool {
+ // Pointer arithmetic can't be speculatively executed because the result
+ // may be an invalid pointer (if, for example, the condition is that the
+ // base pointer is not nil). See issue 56990.
+ switch op {
+ case OpOffPtr, OpAddPtr, OpSubPtr:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/branchelim_test.go b/src/cmd/compile/internal/ssa/branchelim_test.go
new file mode 100644
index 0000000..20fa84d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/branchelim_test.go
@@ -0,0 +1,172 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+// Test that a trivial 'if' is eliminated
+func TestBranchElimIf(t *testing.T) {
+ var testData = []struct {
+ arch string
+ intType string
+ ok bool
+ }{
+ {"arm64", "int32", true},
+ {"amd64", "int32", true},
+ {"amd64", "int8", false},
+ }
+
+ for _, data := range testData {
+ t.Run(data.arch+"/"+data.intType, func(t *testing.T) {
+ c := testConfigArch(t, data.arch)
+ boolType := c.config.Types.Bool
+ var intType *types.Type
+ switch data.intType {
+ case "int32":
+ intType = c.config.Types.Int32
+ case "int8":
+ intType = c.config.Types.Int8
+ default:
+ t.Fatal("invalid integer type:", data.intType)
+ }
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("const1", OpConst32, intType, 1, nil),
+ Valu("const2", OpConst32, intType, 2, nil),
+ Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"),
+ Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"),
+ If("cond", "b2", "b3")),
+ Bloc("b2",
+ Goto("b3")),
+ Bloc("b3",
+ Valu("phi", OpPhi, intType, 0, nil, "const1", "const2"),
+ Valu("retstore", OpStore, types.TypeMem, 0, nil, "phi", "sb", "start"),
+ Exit("retstore")))
+
+ CheckFunc(fun.f)
+ branchelim(fun.f)
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ if data.ok {
+
+ if len(fun.f.Blocks) != 1 {
+ t.Fatalf("expected 1 block after branchelim and deadcode; found %d", len(fun.f.Blocks))
+ }
+ if fun.values["phi"].Op != OpCondSelect {
+ t.Fatalf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op)
+ }
+ if fun.values["phi"].Args[2] != fun.values["cond"] {
+ t.Errorf("expected CondSelect condition to be %s; found %s", fun.values["cond"], fun.values["phi"].Args[2])
+ }
+ if fun.blocks["entry"].Kind != BlockExit {
+ t.Errorf("expected entry to be BlockExit; found kind %s", fun.blocks["entry"].Kind.String())
+ }
+ } else {
+ if len(fun.f.Blocks) != 3 {
+ t.Fatalf("expected 3 block after branchelim and deadcode; found %d", len(fun.f.Blocks))
+ }
+ }
+ })
+ }
+}
+
+// Test that a trivial if/else is eliminated
+func TestBranchElimIfElse(t *testing.T) {
+ for _, arch := range []string{"arm64", "amd64"} {
+ t.Run(arch, func(t *testing.T) {
+ c := testConfigArch(t, arch)
+ boolType := c.config.Types.Bool
+ intType := c.config.Types.Int32
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("const1", OpConst32, intType, 1, nil),
+ Valu("const2", OpConst32, intType, 2, nil),
+ Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"),
+ Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"),
+ If("cond", "b2", "b3")),
+ Bloc("b2",
+ Goto("b4")),
+ Bloc("b3",
+ Goto("b4")),
+ Bloc("b4",
+ Valu("phi", OpPhi, intType, 0, nil, "const1", "const2"),
+ Valu("retstore", OpStore, types.TypeMem, 0, nil, "phi", "sb", "start"),
+ Exit("retstore")))
+
+ CheckFunc(fun.f)
+ branchelim(fun.f)
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ if len(fun.f.Blocks) != 1 {
+ t.Fatalf("expected 1 block after branchelim; found %d", len(fun.f.Blocks))
+ }
+ if fun.values["phi"].Op != OpCondSelect {
+ t.Fatalf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op)
+ }
+ if fun.values["phi"].Args[2] != fun.values["cond"] {
+ t.Errorf("expected CondSelect condition to be %s; found %s", fun.values["cond"], fun.values["phi"].Args[2])
+ }
+ if fun.blocks["entry"].Kind != BlockExit {
+ t.Errorf("expected entry to be BlockExit; found kind %s", fun.blocks["entry"].Kind.String())
+ }
+ })
+ }
+}
+
+// Test that an if/else CFG that loops back
+// into itself does *not* get eliminated.
+func TestNoBranchElimLoop(t *testing.T) {
+ for _, arch := range []string{"arm64", "amd64"} {
+ t.Run(arch, func(t *testing.T) {
+ c := testConfigArch(t, arch)
+ boolType := c.config.Types.Bool
+ intType := c.config.Types.Int32
+
+ // The control flow here is totally bogus,
+ // but a dead cycle seems like the only plausible
+ // way to arrive at a diamond CFG that is also a loop.
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("const2", OpConst32, intType, 2, nil),
+ Valu("const3", OpConst32, intType, 3, nil),
+ Goto("b5")),
+ Bloc("b2",
+ Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"),
+ Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"),
+ Valu("phi", OpPhi, intType, 0, nil, "const2", "const3"),
+ If("cond", "b3", "b4")),
+ Bloc("b3",
+ Goto("b2")),
+ Bloc("b4",
+ Goto("b2")),
+ Bloc("b5",
+ Exit("start")))
+
+ CheckFunc(fun.f)
+ branchelim(fun.f)
+ CheckFunc(fun.f)
+
+ if len(fun.f.Blocks) != 5 {
+ t.Errorf("expected 5 block after branchelim; found %d", len(fun.f.Blocks))
+ }
+ if fun.values["phi"].Op != OpPhi {
+ t.Errorf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op)
+ }
+ })
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/cache.go b/src/cmd/compile/internal/ssa/cache.go
new file mode 100644
index 0000000..ba36edd
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/cache.go
@@ -0,0 +1,62 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/obj"
+ "sort"
+)
+
+// A Cache holds reusable compiler state.
+// It is intended to be re-used for multiple Func compilations.
+type Cache struct {
+ // Storage for low-numbered values and blocks.
+ values [2000]Value
+ blocks [200]Block
+ locs [2000]Location
+
+ // Reusable stackAllocState.
+ // See stackalloc.go's {new,put}StackAllocState.
+ stackAllocState *stackAllocState
+
+ scrPoset []*poset // scratch poset to be reused
+
+ // Reusable regalloc state.
+ regallocValues []valState
+
+ ValueToProgAfter []*obj.Prog
+ debugState debugState
+
+ Liveness interface{} // *gc.livenessFuncCache
+
+ // Free "headers" for use by the allocators in allocators.go.
+ // Used to put slices in sync.Pools without allocation.
+ hdrValueSlice []*[]*Value
+ hdrInt64Slice []*[]int64
+}
+
+func (c *Cache) Reset() {
+ nv := sort.Search(len(c.values), func(i int) bool { return c.values[i].ID == 0 })
+ xv := c.values[:nv]
+ for i := range xv {
+ xv[i] = Value{}
+ }
+ nb := sort.Search(len(c.blocks), func(i int) bool { return c.blocks[i].ID == 0 })
+ xb := c.blocks[:nb]
+ for i := range xb {
+ xb[i] = Block{}
+ }
+ nl := sort.Search(len(c.locs), func(i int) bool { return c.locs[i] == nil })
+ xl := c.locs[:nl]
+ for i := range xl {
+ xl[i] = nil
+ }
+
+ // regalloc sets the length of c.regallocValues to whatever it may use,
+ // so clear according to length.
+ for i := range c.regallocValues {
+ c.regallocValues[i] = valState{}
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go
new file mode 100644
index 0000000..bbfdace
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/check.go
@@ -0,0 +1,630 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/internal/obj/s390x"
+ "math"
+ "math/bits"
+)
+
+// checkFunc checks invariants of f.
+func checkFunc(f *Func) {
+ blockMark := make([]bool, f.NumBlocks())
+ valueMark := make([]bool, f.NumValues())
+
+ for _, b := range f.Blocks {
+ if blockMark[b.ID] {
+ f.Fatalf("block %s appears twice in %s!", b, f.Name)
+ }
+ blockMark[b.ID] = true
+ if b.Func != f {
+ f.Fatalf("%s.Func=%s, want %s", b, b.Func.Name, f.Name)
+ }
+
+ for i, e := range b.Preds {
+ if se := e.b.Succs[e.i]; se.b != b || se.i != i {
+ f.Fatalf("block pred/succ not crosslinked correctly %d:%s %d:%s", i, b, se.i, se.b)
+ }
+ }
+ for i, e := range b.Succs {
+ if pe := e.b.Preds[e.i]; pe.b != b || pe.i != i {
+ f.Fatalf("block succ/pred not crosslinked correctly %d:%s %d:%s", i, b, pe.i, pe.b)
+ }
+ }
+
+ switch b.Kind {
+ case BlockExit:
+ if len(b.Succs) != 0 {
+ f.Fatalf("exit block %s has successors", b)
+ }
+ if b.NumControls() != 1 {
+ f.Fatalf("exit block %s has no control value", b)
+ }
+ if !b.Controls[0].Type.IsMemory() {
+ f.Fatalf("exit block %s has non-memory control value %s", b, b.Controls[0].LongString())
+ }
+ case BlockRet:
+ if len(b.Succs) != 0 {
+ f.Fatalf("ret block %s has successors", b)
+ }
+ if b.NumControls() != 1 {
+ f.Fatalf("ret block %s has nil control", b)
+ }
+ if !b.Controls[0].Type.IsMemory() {
+ f.Fatalf("ret block %s has non-memory control value %s", b, b.Controls[0].LongString())
+ }
+ case BlockRetJmp:
+ if len(b.Succs) != 0 {
+ f.Fatalf("retjmp block %s len(Succs)==%d, want 0", b, len(b.Succs))
+ }
+ if b.NumControls() != 1 {
+ f.Fatalf("retjmp block %s has nil control", b)
+ }
+ if !b.Controls[0].Type.IsMemory() {
+ f.Fatalf("retjmp block %s has non-memory control value %s", b, b.Controls[0].LongString())
+ }
+ case BlockPlain:
+ if len(b.Succs) != 1 {
+ f.Fatalf("plain block %s len(Succs)==%d, want 1", b, len(b.Succs))
+ }
+ if b.NumControls() != 0 {
+ f.Fatalf("plain block %s has non-nil control %s", b, b.Controls[0].LongString())
+ }
+ case BlockIf:
+ if len(b.Succs) != 2 {
+ f.Fatalf("if block %s len(Succs)==%d, want 2", b, len(b.Succs))
+ }
+ if b.NumControls() != 1 {
+ f.Fatalf("if block %s has no control value", b)
+ }
+ if !b.Controls[0].Type.IsBoolean() {
+ f.Fatalf("if block %s has non-bool control value %s", b, b.Controls[0].LongString())
+ }
+ case BlockDefer:
+ if len(b.Succs) != 2 {
+ f.Fatalf("defer block %s len(Succs)==%d, want 2", b, len(b.Succs))
+ }
+ if b.NumControls() != 1 {
+ f.Fatalf("defer block %s has no control value", b)
+ }
+ if !b.Controls[0].Type.IsMemory() {
+ f.Fatalf("defer block %s has non-memory control value %s", b, b.Controls[0].LongString())
+ }
+ case BlockFirst:
+ if len(b.Succs) != 2 {
+ f.Fatalf("plain/dead block %s len(Succs)==%d, want 2", b, len(b.Succs))
+ }
+ if b.NumControls() != 0 {
+ f.Fatalf("plain/dead block %s has a control value", b)
+ }
+ case BlockJumpTable:
+ if b.NumControls() != 1 {
+ f.Fatalf("jumpTable block %s has no control value", b)
+ }
+ }
+ if len(b.Succs) != 2 && b.Likely != BranchUnknown {
+ f.Fatalf("likeliness prediction %d for block %s with %d successors", b.Likely, b, len(b.Succs))
+ }
+
+ for _, v := range b.Values {
+ // Check to make sure argument count makes sense (argLen of -1 indicates
+ // variable length args)
+ nArgs := opcodeTable[v.Op].argLen
+ if nArgs != -1 && int32(len(v.Args)) != nArgs {
+ f.Fatalf("value %s has %d args, expected %d", v.LongString(),
+ len(v.Args), nArgs)
+ }
+
+ // Check to make sure aux values make sense.
+ canHaveAux := false
+ canHaveAuxInt := false
+ // TODO: enforce types of Aux in this switch (like auxString does below)
+ switch opcodeTable[v.Op].auxType {
+ case auxNone:
+ case auxBool:
+ if v.AuxInt < 0 || v.AuxInt > 1 {
+ f.Fatalf("bad bool AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ case auxInt8:
+ if v.AuxInt != int64(int8(v.AuxInt)) {
+ f.Fatalf("bad int8 AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ case auxInt16:
+ if v.AuxInt != int64(int16(v.AuxInt)) {
+ f.Fatalf("bad int16 AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ case auxInt32:
+ if v.AuxInt != int64(int32(v.AuxInt)) {
+ f.Fatalf("bad int32 AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ case auxInt64, auxARM64BitField:
+ canHaveAuxInt = true
+ case auxInt128:
+ // AuxInt must be zero, so leave canHaveAuxInt set to false.
+ case auxUInt8:
+ if v.AuxInt != int64(uint8(v.AuxInt)) {
+ f.Fatalf("bad uint8 AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ case auxFloat32:
+ canHaveAuxInt = true
+ if math.IsNaN(v.AuxFloat()) {
+ f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
+ }
+ if !isExactFloat32(v.AuxFloat()) {
+ f.Fatalf("value %v has an AuxInt value that is not an exact float32", v)
+ }
+ case auxFloat64:
+ canHaveAuxInt = true
+ if math.IsNaN(v.AuxFloat()) {
+ f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
+ }
+ case auxString:
+ if _, ok := v.Aux.(stringAux); !ok {
+ f.Fatalf("value %v has Aux type %T, want string", v, v.Aux)
+ }
+ canHaveAux = true
+ case auxCallOff:
+ canHaveAuxInt = true
+ fallthrough
+ case auxCall:
+ if ac, ok := v.Aux.(*AuxCall); ok {
+ if v.Op == OpStaticCall && ac.Fn == nil {
+ f.Fatalf("value %v has *AuxCall with nil Fn", v)
+ }
+ } else {
+ f.Fatalf("value %v has Aux type %T, want *AuxCall", v, v.Aux)
+ }
+ canHaveAux = true
+ case auxNameOffsetInt8:
+ if _, ok := v.Aux.(*AuxNameOffset); !ok {
+ f.Fatalf("value %v has Aux type %T, want *AuxNameOffset", v, v.Aux)
+ }
+ canHaveAux = true
+ canHaveAuxInt = true
+ case auxSym, auxTyp:
+ canHaveAux = true
+ case auxSymOff, auxSymValAndOff, auxTypSize:
+ canHaveAuxInt = true
+ canHaveAux = true
+ case auxCCop:
+ if opcodeTable[Op(v.AuxInt)].name == "OpInvalid" {
+ f.Fatalf("value %v has an AuxInt value that is a valid opcode", v)
+ }
+ canHaveAuxInt = true
+ case auxS390XCCMask:
+ if _, ok := v.Aux.(s390x.CCMask); !ok {
+ f.Fatalf("bad type %T for S390XCCMask in %v", v.Aux, v)
+ }
+ canHaveAux = true
+ case auxS390XRotateParams:
+ if _, ok := v.Aux.(s390x.RotateParams); !ok {
+ f.Fatalf("bad type %T for S390XRotateParams in %v", v.Aux, v)
+ }
+ canHaveAux = true
+ case auxFlagConstant:
+ if v.AuxInt < 0 || v.AuxInt > 15 {
+ f.Fatalf("bad FlagConstant AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ default:
+ f.Fatalf("unknown aux type for %s", v.Op)
+ }
+ if !canHaveAux && v.Aux != nil {
+ f.Fatalf("value %s has an Aux value %v but shouldn't", v.LongString(), v.Aux)
+ }
+ if !canHaveAuxInt && v.AuxInt != 0 {
+ f.Fatalf("value %s has an AuxInt value %d but shouldn't", v.LongString(), v.AuxInt)
+ }
+
+ for i, arg := range v.Args {
+ if arg == nil {
+ f.Fatalf("value %s has nil arg", v.LongString())
+ }
+ if v.Op != OpPhi {
+ // For non-Phi ops, memory args must be last, if present
+ if arg.Type.IsMemory() && i != len(v.Args)-1 {
+ f.Fatalf("value %s has non-final memory arg (%d < %d)", v.LongString(), i, len(v.Args)-1)
+ }
+ }
+ }
+
+ if valueMark[v.ID] {
+ f.Fatalf("value %s appears twice!", v.LongString())
+ }
+ valueMark[v.ID] = true
+
+ if v.Block != b {
+ f.Fatalf("%s.block != %s", v, b)
+ }
+ if v.Op == OpPhi && len(v.Args) != len(b.Preds) {
+ f.Fatalf("phi length %s does not match pred length %d for block %s", v.LongString(), len(b.Preds), b)
+ }
+
+ if v.Op == OpAddr {
+ if len(v.Args) == 0 {
+ f.Fatalf("no args for OpAddr %s", v.LongString())
+ }
+ if v.Args[0].Op != OpSB {
+ f.Fatalf("bad arg to OpAddr %v", v)
+ }
+ }
+
+ if v.Op == OpLocalAddr {
+ if len(v.Args) != 2 {
+ f.Fatalf("wrong # of args for OpLocalAddr %s", v.LongString())
+ }
+ if v.Args[0].Op != OpSP {
+ f.Fatalf("bad arg 0 to OpLocalAddr %v", v)
+ }
+ if !v.Args[1].Type.IsMemory() {
+ f.Fatalf("bad arg 1 to OpLocalAddr %v", v)
+ }
+ }
+
+ if f.RegAlloc != nil && f.Config.SoftFloat && v.Type.IsFloat() {
+ f.Fatalf("unexpected floating-point type %v", v.LongString())
+ }
+
+ // Check types.
+ // TODO: more type checks?
+ switch c := f.Config; v.Op {
+ case OpSP, OpSB:
+ if v.Type != c.Types.Uintptr {
+ f.Fatalf("bad %s type: want uintptr, have %s",
+ v.Op, v.Type.String())
+ }
+ case OpStringLen:
+ if v.Type != c.Types.Int {
+ f.Fatalf("bad %s type: want int, have %s",
+ v.Op, v.Type.String())
+ }
+ case OpLoad:
+ if !v.Args[1].Type.IsMemory() {
+ f.Fatalf("bad arg 1 type to %s: want mem, have %s",
+ v.Op, v.Args[1].Type.String())
+ }
+ case OpStore:
+ if !v.Type.IsMemory() {
+ f.Fatalf("bad %s type: want mem, have %s",
+ v.Op, v.Type.String())
+ }
+ if !v.Args[2].Type.IsMemory() {
+ f.Fatalf("bad arg 2 type to %s: want mem, have %s",
+ v.Op, v.Args[2].Type.String())
+ }
+ case OpCondSelect:
+ if !v.Args[2].Type.IsBoolean() {
+ f.Fatalf("bad arg 2 type to %s: want boolean, have %s",
+ v.Op, v.Args[2].Type.String())
+ }
+ case OpAddPtr:
+ if !v.Args[0].Type.IsPtrShaped() && v.Args[0].Type != c.Types.Uintptr {
+ f.Fatalf("bad arg 0 type to %s: want ptr, have %s", v.Op, v.Args[0].LongString())
+ }
+ if !v.Args[1].Type.IsInteger() {
+ f.Fatalf("bad arg 1 type to %s: want integer, have %s", v.Op, v.Args[1].LongString())
+ }
+ case OpVarDef:
+ if !v.Aux.(*ir.Name).Type().HasPointers() {
+ f.Fatalf("vardef must have pointer type %s", v.Aux.(*ir.Name).Type().String())
+ }
+ case OpNilCheck:
+ // nil checks have pointer type before scheduling, and
+ // void type after scheduling.
+ if f.scheduled {
+ if v.Uses != 0 {
+ f.Fatalf("nilcheck must have 0 uses %s", v.Uses)
+ }
+ if !v.Type.IsVoid() {
+ f.Fatalf("nilcheck must have void type %s", v.Type.String())
+ }
+ } else {
+ if !v.Type.IsPtrShaped() && !v.Type.IsUintptr() {
+ f.Fatalf("nilcheck must have pointer type %s", v.Type.String())
+ }
+ }
+ if !v.Args[0].Type.IsPtrShaped() && !v.Args[0].Type.IsUintptr() {
+ f.Fatalf("nilcheck must have argument of pointer type %s", v.Args[0].Type.String())
+ }
+ if !v.Args[1].Type.IsMemory() {
+ f.Fatalf("bad arg 1 type to %s: want mem, have %s",
+ v.Op, v.Args[1].Type.String())
+ }
+ }
+
+ // TODO: check for cycles in values
+ }
+ }
+
+ // Check to make sure all Blocks referenced are in the function.
+ if !blockMark[f.Entry.ID] {
+ f.Fatalf("entry block %v is missing", f.Entry)
+ }
+ for _, b := range f.Blocks {
+ for _, c := range b.Preds {
+ if !blockMark[c.b.ID] {
+ f.Fatalf("predecessor block %v for %v is missing", c, b)
+ }
+ }
+ for _, c := range b.Succs {
+ if !blockMark[c.b.ID] {
+ f.Fatalf("successor block %v for %v is missing", c, b)
+ }
+ }
+ }
+
+ if len(f.Entry.Preds) > 0 {
+ f.Fatalf("entry block %s of %s has predecessor(s) %v", f.Entry, f.Name, f.Entry.Preds)
+ }
+
+ // Check to make sure all Values referenced are in the function.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, a := range v.Args {
+ if !valueMark[a.ID] {
+ f.Fatalf("%v, arg %d of %s, is missing", a, i, v.LongString())
+ }
+ }
+ }
+ for _, c := range b.ControlValues() {
+ if !valueMark[c.ID] {
+ f.Fatalf("control value for %s is missing: %v", b, c)
+ }
+ }
+ }
+ for b := f.freeBlocks; b != nil; b = b.succstorage[0].b {
+ if blockMark[b.ID] {
+ f.Fatalf("used block b%d in free list", b.ID)
+ }
+ }
+ for v := f.freeValues; v != nil; v = v.argstorage[0] {
+ if valueMark[v.ID] {
+ f.Fatalf("used value v%d in free list", v.ID)
+ }
+ }
+
+ // Check to make sure all args dominate uses.
+ if f.RegAlloc == nil {
+ // Note: regalloc introduces non-dominating args.
+ // See TODO in regalloc.go.
+ sdom := f.Sdom()
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, arg := range v.Args {
+ x := arg.Block
+ y := b
+ if v.Op == OpPhi {
+ y = b.Preds[i].b
+ }
+ if !domCheck(f, sdom, x, y) {
+ f.Fatalf("arg %d of value %s does not dominate, arg=%s", i, v.LongString(), arg.LongString())
+ }
+ }
+ }
+ for _, c := range b.ControlValues() {
+ if !domCheck(f, sdom, c.Block, b) {
+ f.Fatalf("control value %s for %s doesn't dominate", c, b)
+ }
+ }
+ }
+ }
+
+ // Check loop construction
+ if f.RegAlloc == nil && f.pass != nil { // non-nil pass allows better-targeted debug printing
+ ln := f.loopnest()
+ if !ln.hasIrreducible {
+ po := f.postorder() // use po to avoid unreachable blocks.
+ for _, b := range po {
+ for _, s := range b.Succs {
+ bb := s.Block()
+ if ln.b2l[b.ID] == nil && ln.b2l[bb.ID] != nil && bb != ln.b2l[bb.ID].header {
+ f.Fatalf("block %s not in loop branches to non-header block %s in loop", b.String(), bb.String())
+ }
+ if ln.b2l[b.ID] != nil && ln.b2l[bb.ID] != nil && bb != ln.b2l[bb.ID].header && !ln.b2l[b.ID].isWithinOrEq(ln.b2l[bb.ID]) {
+ f.Fatalf("block %s in loop branches to non-header block %s in non-containing loop", b.String(), bb.String())
+ }
+ }
+ }
+ }
+ }
+
+ // Check use counts
+ uses := make([]int32, f.NumValues())
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for _, a := range v.Args {
+ uses[a.ID]++
+ }
+ }
+ for _, c := range b.ControlValues() {
+ uses[c.ID]++
+ }
+ }
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Uses != uses[v.ID] {
+ f.Fatalf("%s has %d uses, but has Uses=%d", v, uses[v.ID], v.Uses)
+ }
+ }
+ }
+
+ memCheck(f)
+}
+
+func memCheck(f *Func) {
+ // Check that if a tuple has a memory type, it is second.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Type.IsTuple() && v.Type.FieldType(0).IsMemory() {
+ f.Fatalf("memory is first in a tuple: %s\n", v.LongString())
+ }
+ }
+ }
+
+ // Single live memory checks.
+ // These checks only work if there are no memory copies.
+ // (Memory copies introduce ambiguity about which mem value is really live.
+ // probably fixable, but it's easier to avoid the problem.)
+ // For the same reason, disable this check if some memory ops are unused.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if (v.Op == OpCopy || v.Uses == 0) && v.Type.IsMemory() {
+ return
+ }
+ }
+ if b != f.Entry && len(b.Preds) == 0 {
+ return
+ }
+ }
+
+ // Compute live memory at the end of each block.
+ lastmem := make([]*Value, f.NumBlocks())
+ ss := newSparseSet(f.NumValues())
+ for _, b := range f.Blocks {
+ // Mark overwritten memory values. Those are args of other
+ // ops that generate memory values.
+ ss.clear()
+ for _, v := range b.Values {
+ if v.Op == OpPhi || !v.Type.IsMemory() {
+ continue
+ }
+ if m := v.MemoryArg(); m != nil {
+ ss.add(m.ID)
+ }
+ }
+ // There should be at most one remaining unoverwritten memory value.
+ for _, v := range b.Values {
+ if !v.Type.IsMemory() {
+ continue
+ }
+ if ss.contains(v.ID) {
+ continue
+ }
+ if lastmem[b.ID] != nil {
+ f.Fatalf("two live memory values in %s: %s and %s", b, lastmem[b.ID], v)
+ }
+ lastmem[b.ID] = v
+ }
+ // If there is no remaining memory value, that means there was no memory update.
+ // Take any memory arg.
+ if lastmem[b.ID] == nil {
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ continue
+ }
+ m := v.MemoryArg()
+ if m == nil {
+ continue
+ }
+ if lastmem[b.ID] != nil && lastmem[b.ID] != m {
+ f.Fatalf("two live memory values in %s: %s and %s", b, lastmem[b.ID], m)
+ }
+ lastmem[b.ID] = m
+ }
+ }
+ }
+ // Propagate last live memory through storeless blocks.
+ for {
+ changed := false
+ for _, b := range f.Blocks {
+ if lastmem[b.ID] != nil {
+ continue
+ }
+ for _, e := range b.Preds {
+ p := e.b
+ if lastmem[p.ID] != nil {
+ lastmem[b.ID] = lastmem[p.ID]
+ changed = true
+ break
+ }
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+ // Check merge points.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == OpPhi && v.Type.IsMemory() {
+ for i, a := range v.Args {
+ if a != lastmem[b.Preds[i].b.ID] {
+ f.Fatalf("inconsistent memory phi %s %d %s %s", v.LongString(), i, a, lastmem[b.Preds[i].b.ID])
+ }
+ }
+ }
+ }
+ }
+
+ // Check that only one memory is live at any point.
+ if f.scheduled {
+ for _, b := range f.Blocks {
+ var mem *Value // the current live memory in the block
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ if v.Type.IsMemory() {
+ mem = v
+ }
+ continue
+ }
+ if mem == nil && len(b.Preds) > 0 {
+ // If no mem phi, take mem of any predecessor.
+ mem = lastmem[b.Preds[0].b.ID]
+ }
+ for _, a := range v.Args {
+ if a.Type.IsMemory() && a != mem {
+ f.Fatalf("two live mems @ %s: %s and %s", v, mem, a)
+ }
+ }
+ if v.Type.IsMemory() {
+ mem = v
+ }
+ }
+ }
+ }
+
+ // Check that after scheduling, phis are always first in the block.
+ if f.scheduled {
+ for _, b := range f.Blocks {
+ seenNonPhi := false
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpPhi:
+ if seenNonPhi {
+ f.Fatalf("phi after non-phi @ %s: %s", b, v)
+ }
+ default:
+ seenNonPhi = true
+ }
+ }
+ }
+ }
+}
+
+// domCheck reports whether x dominates y (including x==y).
+func domCheck(f *Func, sdom SparseTree, x, y *Block) bool {
+ if !sdom.IsAncestorEq(f.Entry, y) {
+ // unreachable - ignore
+ return true
+ }
+ return sdom.IsAncestorEq(x, y)
+}
+
+// isExactFloat32 reports whether x can be exactly represented as a float32.
+func isExactFloat32(x float64) bool {
+ // Check the mantissa is in range.
+ if bits.TrailingZeros64(math.Float64bits(x)) < 52-23 {
+ return false
+ }
+ // Check the exponent is in range. The mantissa check above is sufficient for NaN values.
+ return math.IsNaN(x) || x == float64(float32(x))
+}
diff --git a/src/cmd/compile/internal/ssa/checkbce.go b/src/cmd/compile/internal/ssa/checkbce.go
new file mode 100644
index 0000000..6a9ce2b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/checkbce.go
@@ -0,0 +1,35 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "cmd/compile/internal/logopt"
+
+// checkbce prints all bounds checks that are present in the function.
+// Useful to find regressions. checkbce is only activated when with
+// corresponding debug options, so it's off by default.
+// See test/checkbce.go
+func checkbce(f *Func) {
+ if f.pass.debug <= 0 && !logopt.Enabled() {
+ return
+ }
+
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == OpIsInBounds || v.Op == OpIsSliceInBounds {
+ if f.pass.debug > 0 {
+ f.Warnl(v.Pos, "Found %v", v.Op)
+ }
+ if logopt.Enabled() {
+ if v.Op == OpIsInBounds {
+ logopt.LogOpt(v.Pos, "isInBounds", "checkbce", f.Name)
+ }
+ if v.Op == OpIsSliceInBounds {
+ logopt.LogOpt(v.Pos, "isSliceInBounds", "checkbce", f.Name)
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go
new file mode 100644
index 0000000..d125891
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/compile.go
@@ -0,0 +1,613 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+ "fmt"
+ "hash/crc32"
+ "internal/buildcfg"
+ "io"
+ "log"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "sort"
+ "strings"
+ "time"
+)
+
+// Compile is the main entry point for this package.
+// Compile modifies f so that on return:
+// - all Values in f map to 0 or 1 assembly instructions of the target architecture
+// - the order of f.Blocks is the order to emit the Blocks
+// - the order of b.Values is the order to emit the Values in each Block
+// - f has a non-nil regAlloc field
+func Compile(f *Func) {
+ // TODO: debugging - set flags to control verbosity of compiler,
+ // which phases to dump IR before/after, etc.
+ if f.Log() {
+ f.Logf("compiling %s\n", f.Name)
+ }
+
+ var rnd *rand.Rand
+ if checkEnabled {
+ seed := int64(crc32.ChecksumIEEE(([]byte)(f.Name))) ^ int64(checkRandSeed)
+ rnd = rand.New(rand.NewSource(seed))
+ }
+
+ // hook to print function & phase if panic happens
+ phaseName := "init"
+ defer func() {
+ if phaseName != "" {
+ err := recover()
+ stack := make([]byte, 16384)
+ n := runtime.Stack(stack, false)
+ stack = stack[:n]
+ if f.HTMLWriter != nil {
+ f.HTMLWriter.flushPhases()
+ }
+ f.Fatalf("panic during %s while compiling %s:\n\n%v\n\n%s\n", phaseName, f.Name, err, stack)
+ }
+ }()
+
+ // Run all the passes
+ if f.Log() {
+ printFunc(f)
+ }
+ f.HTMLWriter.WritePhase("start", "start")
+ if BuildDump[f.Name] {
+ f.dumpFile("build")
+ }
+ if checkEnabled {
+ checkFunc(f)
+ }
+ const logMemStats = false
+ for _, p := range passes {
+ if !f.Config.optimize && !p.required || p.disabled {
+ continue
+ }
+ f.pass = &p
+ phaseName = p.name
+ if f.Log() {
+ f.Logf(" pass %s begin\n", p.name)
+ }
+ // TODO: capture logging during this pass, add it to the HTML
+ var mStart runtime.MemStats
+ if logMemStats || p.mem {
+ runtime.ReadMemStats(&mStart)
+ }
+
+ if checkEnabled && !f.scheduled {
+ // Test that we don't depend on the value order, by randomizing
+ // the order of values in each block. See issue 18169.
+ for _, b := range f.Blocks {
+ for i := 0; i < len(b.Values)-1; i++ {
+ j := i + rnd.Intn(len(b.Values)-i)
+ b.Values[i], b.Values[j] = b.Values[j], b.Values[i]
+ }
+ }
+ }
+
+ tStart := time.Now()
+ p.fn(f)
+ tEnd := time.Now()
+
+ // Need something less crude than "Log the whole intermediate result".
+ if f.Log() || f.HTMLWriter != nil {
+ time := tEnd.Sub(tStart).Nanoseconds()
+ var stats string
+ if logMemStats {
+ var mEnd runtime.MemStats
+ runtime.ReadMemStats(&mEnd)
+ nBytes := mEnd.TotalAlloc - mStart.TotalAlloc
+ nAllocs := mEnd.Mallocs - mStart.Mallocs
+ stats = fmt.Sprintf("[%d ns %d allocs %d bytes]", time, nAllocs, nBytes)
+ } else {
+ stats = fmt.Sprintf("[%d ns]", time)
+ }
+
+ if f.Log() {
+ f.Logf(" pass %s end %s\n", p.name, stats)
+ printFunc(f)
+ }
+ f.HTMLWriter.WritePhase(phaseName, fmt.Sprintf("%s <span class=\"stats\">%s</span>", phaseName, stats))
+ }
+ if p.time || p.mem {
+ // Surround timing information w/ enough context to allow comparisons.
+ time := tEnd.Sub(tStart).Nanoseconds()
+ if p.time {
+ f.LogStat("TIME(ns)", time)
+ }
+ if p.mem {
+ var mEnd runtime.MemStats
+ runtime.ReadMemStats(&mEnd)
+ nBytes := mEnd.TotalAlloc - mStart.TotalAlloc
+ nAllocs := mEnd.Mallocs - mStart.Mallocs
+ f.LogStat("TIME(ns):BYTES:ALLOCS", time, nBytes, nAllocs)
+ }
+ }
+ if p.dump != nil && p.dump[f.Name] {
+ // Dump function to appropriately named file
+ f.dumpFile(phaseName)
+ }
+ if checkEnabled {
+ checkFunc(f)
+ }
+ }
+
+ if f.HTMLWriter != nil {
+ // Ensure we write any pending phases to the html
+ f.HTMLWriter.flushPhases()
+ }
+
+ if f.ruleMatches != nil {
+ var keys []string
+ for key := range f.ruleMatches {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ buf := new(strings.Builder)
+ fmt.Fprintf(buf, "%s: ", f.Name)
+ for _, key := range keys {
+ fmt.Fprintf(buf, "%s=%d ", key, f.ruleMatches[key])
+ }
+ fmt.Fprint(buf, "\n")
+ fmt.Print(buf.String())
+ }
+
+ // Squash error printing defer
+ phaseName = ""
+}
+
+// DumpFileForPhase creates a file from the function name and phase name,
+// warning and returning nil if this is not possible.
+func (f *Func) DumpFileForPhase(phaseName string) io.WriteCloser {
+ f.dumpFileSeq++
+ fname := fmt.Sprintf("%s_%02d__%s.dump", f.Name, int(f.dumpFileSeq), phaseName)
+ fname = strings.Replace(fname, " ", "_", -1)
+ fname = strings.Replace(fname, "/", "_", -1)
+ fname = strings.Replace(fname, ":", "_", -1)
+
+ if ssaDir := os.Getenv("GOSSADIR"); ssaDir != "" {
+ fname = filepath.Join(ssaDir, fname)
+ }
+
+ fi, err := os.Create(fname)
+ if err != nil {
+ f.Warnl(src.NoXPos, "Unable to create after-phase dump file %s", fname)
+ return nil
+ }
+ return fi
+}
+
+// dumpFile creates a file from the phase name and function name
+// Dumping is done to files to avoid buffering huge strings before
+// output.
+func (f *Func) dumpFile(phaseName string) {
+ fi := f.DumpFileForPhase(phaseName)
+ if fi != nil {
+ p := stringFuncPrinter{w: fi}
+ fprintFunc(p, f)
+ fi.Close()
+ }
+}
+
+type pass struct {
+ name string
+ fn func(*Func)
+ required bool
+ disabled bool
+ time bool // report time to run pass
+ mem bool // report mem stats to run pass
+ stats int // pass reports own "stats" (e.g., branches removed)
+ debug int // pass performs some debugging. =1 should be in error-testing-friendly Warnl format.
+ test int // pass-specific ad-hoc option, perhaps useful in development
+ dump map[string]bool // dump if function name matches
+}
+
+func (p *pass) addDump(s string) {
+ if p.dump == nil {
+ p.dump = make(map[string]bool)
+ }
+ p.dump[s] = true
+}
+
+func (p *pass) String() string {
+ if p == nil {
+ return "nil pass"
+ }
+ return p.name
+}
+
+// Run consistency checker between each phase
+var (
+ checkEnabled = false
+ checkRandSeed = 0
+)
+
+// Debug output
+var IntrinsicsDebug int
+var IntrinsicsDisable bool
+
+var BuildDebug int
+var BuildTest int
+var BuildStats int
+var BuildDump map[string]bool = make(map[string]bool) // names of functions to dump after initial build of ssa
+
+var GenssaDump map[string]bool = make(map[string]bool) // names of functions to dump after ssa has been converted to asm
+
+// PhaseOption sets the specified flag in the specified ssa phase,
+// returning empty string if this was successful or a string explaining
+// the error if it was not.
+// A version of the phase name with "_" replaced by " " is also checked for a match.
+// If the phase name begins a '~' then the rest of the underscores-replaced-with-blanks
+// version is used as a regular expression to match the phase name(s).
+//
+// Special cases that have turned out to be useful:
+// - ssa/check/on enables checking after each phase
+// - ssa/all/time enables time reporting for all phases
+//
+// See gc/lex.go for dissection of the option string.
+// Example uses:
+//
+// GO_GCFLAGS=-d=ssa/generic_cse/time,ssa/generic_cse/stats,ssa/generic_cse/debug=3 ./make.bash
+//
+// BOOT_GO_GCFLAGS=-d='ssa/~^.*scc$/off' GO_GCFLAGS='-d=ssa/~^.*scc$/off' ./make.bash
+func PhaseOption(phase, flag string, val int, valString string) string {
+ switch phase {
+ case "", "help":
+ lastcr := 0
+ phasenames := " check, all, build, intrinsics, genssa"
+ for _, p := range passes {
+ pn := strings.Replace(p.name, " ", "_", -1)
+ if len(pn)+len(phasenames)-lastcr > 70 {
+ phasenames += "\n "
+ lastcr = len(phasenames)
+ phasenames += pn
+ } else {
+ phasenames += ", " + pn
+ }
+ }
+ return `PhaseOptions usage:
+
+ go tool compile -d=ssa/<phase>/<flag>[=<value>|<function_name>]
+
+where:
+
+- <phase> is one of:
+` + phasenames + `
+
+- <flag> is one of:
+ on, off, debug, mem, time, test, stats, dump, seed
+
+- <value> defaults to 1
+
+- <function_name> is required for the "dump" flag, and specifies the
+ name of function to dump after <phase>
+
+Phase "all" supports flags "time", "mem", and "dump".
+Phase "intrinsics" supports flags "on", "off", and "debug".
+Phase "genssa" (assembly generation) supports the flag "dump".
+
+If the "dump" flag is specified, the output is written on a file named
+<phase>__<function_name>_<seq>.dump; otherwise it is directed to stdout.
+
+Examples:
+
+ -d=ssa/check/on
+enables checking after each phase
+
+ -d=ssa/check/seed=1234
+enables checking after each phase, using 1234 to seed the PRNG
+used for value order randomization
+
+ -d=ssa/all/time
+enables time reporting for all phases
+
+ -d=ssa/prove/debug=2
+sets debugging level to 2 in the prove pass
+
+Be aware that when "/debug=X" is applied to a pass, some passes
+will emit debug output for all functions, and other passes will
+only emit debug output for functions that match the current
+GOSSAFUNC value.
+
+Multiple flags can be passed at once, by separating them with
+commas. For example:
+
+ -d=ssa/check/on,ssa/all/time
+`
+ }
+
+ if phase == "check" {
+ switch flag {
+ case "on":
+ checkEnabled = val != 0
+ debugPoset = checkEnabled // also turn on advanced self-checking in prove's data structure
+ return ""
+ case "off":
+ checkEnabled = val == 0
+ debugPoset = checkEnabled
+ return ""
+ case "seed":
+ checkEnabled = true
+ checkRandSeed = val
+ debugPoset = checkEnabled
+ return ""
+ }
+ }
+
+ alltime := false
+ allmem := false
+ alldump := false
+ if phase == "all" {
+ switch flag {
+ case "time":
+ alltime = val != 0
+ case "mem":
+ allmem = val != 0
+ case "dump":
+ alldump = val != 0
+ if alldump {
+ BuildDump[valString] = true
+ GenssaDump[valString] = true
+ }
+ default:
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option (expected ssa/all/{time,mem,dump=function_name})", flag, phase)
+ }
+ }
+
+ if phase == "intrinsics" {
+ switch flag {
+ case "on":
+ IntrinsicsDisable = val == 0
+ case "off":
+ IntrinsicsDisable = val != 0
+ case "debug":
+ IntrinsicsDebug = val
+ default:
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option (expected ssa/intrinsics/{on,off,debug})", flag, phase)
+ }
+ return ""
+ }
+ if phase == "build" {
+ switch flag {
+ case "debug":
+ BuildDebug = val
+ case "test":
+ BuildTest = val
+ case "stats":
+ BuildStats = val
+ case "dump":
+ BuildDump[valString] = true
+ default:
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option (expected ssa/build/{debug,test,stats,dump=function_name})", flag, phase)
+ }
+ return ""
+ }
+ if phase == "genssa" {
+ switch flag {
+ case "dump":
+ GenssaDump[valString] = true
+ default:
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option (expected ssa/genssa/dump=function_name)", flag, phase)
+ }
+ return ""
+ }
+
+ underphase := strings.Replace(phase, "_", " ", -1)
+ var re *regexp.Regexp
+ if phase[0] == '~' {
+ r, ok := regexp.Compile(underphase[1:])
+ if ok != nil {
+ return fmt.Sprintf("Error %s in regexp for phase %s, flag %s", ok.Error(), phase, flag)
+ }
+ re = r
+ }
+ matchedOne := false
+ for i, p := range passes {
+ if phase == "all" {
+ p.time = alltime
+ p.mem = allmem
+ if alldump {
+ p.addDump(valString)
+ }
+ passes[i] = p
+ matchedOne = true
+ } else if p.name == phase || p.name == underphase || re != nil && re.MatchString(p.name) {
+ switch flag {
+ case "on":
+ p.disabled = val == 0
+ case "off":
+ p.disabled = val != 0
+ case "time":
+ p.time = val != 0
+ case "mem":
+ p.mem = val != 0
+ case "debug":
+ p.debug = val
+ case "stats":
+ p.stats = val
+ case "test":
+ p.test = val
+ case "dump":
+ p.addDump(valString)
+ default:
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
+ }
+ if p.disabled && p.required {
+ return fmt.Sprintf("Cannot disable required SSA phase %s using -d=ssa/%s debug option", phase, phase)
+ }
+ passes[i] = p
+ matchedOne = true
+ }
+ }
+ if matchedOne {
+ return ""
+ }
+ return fmt.Sprintf("Did not find a phase matching %s in -d=ssa/... debug option", phase)
+}
+
+// list of passes for the compiler
+var passes = [...]pass{
+ // TODO: combine phielim and copyelim into a single pass?
+ {name: "number lines", fn: numberLines, required: true},
+ {name: "early phielim", fn: phielim},
+ {name: "early copyelim", fn: copyelim},
+ {name: "early deadcode", fn: deadcode}, // remove generated dead code to avoid doing pointless work during opt
+ {name: "short circuit", fn: shortcircuit},
+ {name: "decompose user", fn: decomposeUser, required: true},
+ {name: "pre-opt deadcode", fn: deadcode},
+ {name: "opt", fn: opt, required: true}, // NB: some generic rules know the name of the opt pass. TODO: split required rules and optimizing rules
+ {name: "zero arg cse", fn: zcse, required: true}, // required to merge OpSB values
+ {name: "opt deadcode", fn: deadcode, required: true}, // remove any blocks orphaned during opt
+ {name: "generic cse", fn: cse},
+ {name: "phiopt", fn: phiopt},
+ {name: "gcse deadcode", fn: deadcode, required: true}, // clean out after cse and phiopt
+ {name: "nilcheckelim", fn: nilcheckelim},
+ {name: "prove", fn: prove},
+ {name: "early fuse", fn: fuseEarly},
+ {name: "expand calls", fn: expandCalls, required: true},
+ {name: "decompose builtin", fn: postExpandCallsDecompose, required: true},
+ {name: "softfloat", fn: softfloat, required: true},
+ {name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
+ {name: "dead auto elim", fn: elimDeadAutosGeneric},
+ {name: "sccp", fn: sccp},
+ {name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain
+ {name: "check bce", fn: checkbce},
+ {name: "branchelim", fn: branchelim},
+ {name: "late fuse", fn: fuseLate},
+ {name: "dse", fn: dse},
+ {name: "memcombine", fn: memcombine},
+ {name: "writebarrier", fn: writebarrier, required: true}, // expand write barrier ops
+ {name: "insert resched checks", fn: insertLoopReschedChecks,
+ disabled: !buildcfg.Experiment.PreemptibleLoops}, // insert resched checks in loops.
+ {name: "lower", fn: lower, required: true},
+ {name: "addressing modes", fn: addressingModes, required: false},
+ {name: "late lower", fn: lateLower, required: true},
+ {name: "lowered deadcode for cse", fn: deadcode}, // deadcode immediately before CSE avoids CSE making dead values live again
+ {name: "lowered cse", fn: cse},
+ {name: "elim unread autos", fn: elimUnreadAutos},
+ {name: "tighten tuple selectors", fn: tightenTupleSelectors, required: true},
+ {name: "lowered deadcode", fn: deadcode, required: true},
+ {name: "checkLower", fn: checkLower, required: true},
+ {name: "late phielim", fn: phielim},
+ {name: "late copyelim", fn: copyelim},
+ {name: "tighten", fn: tighten, required: true}, // move values closer to their uses
+ {name: "late deadcode", fn: deadcode},
+ {name: "critical", fn: critical, required: true}, // remove critical edges
+ {name: "phi tighten", fn: phiTighten}, // place rematerializable phi args near uses to reduce value lifetimes
+ {name: "likelyadjust", fn: likelyadjust},
+ {name: "layout", fn: layout, required: true}, // schedule blocks
+ {name: "schedule", fn: schedule, required: true}, // schedule values
+ {name: "late nilcheck", fn: nilcheckelim2},
+ {name: "flagalloc", fn: flagalloc, required: true}, // allocate flags register
+ {name: "regalloc", fn: regalloc, required: true}, // allocate int & float registers + stack slots
+ {name: "loop rotate", fn: loopRotate},
+ {name: "trim", fn: trim}, // remove empty blocks
+}
+
+// Double-check phase ordering constraints.
+// This code is intended to document the ordering requirements
+// between different phases. It does not override the passes
+// list above.
+type constraint struct {
+ a, b string // a must come before b
+}
+
+var passOrder = [...]constraint{
+ // "insert resched checks" uses mem, better to clean out stores first.
+ {"dse", "insert resched checks"},
+ // insert resched checks adds new blocks containing generic instructions
+ {"insert resched checks", "lower"},
+ {"insert resched checks", "tighten"},
+
+ // prove relies on common-subexpression elimination for maximum benefits.
+ {"generic cse", "prove"},
+ // deadcode after prove to eliminate all new dead blocks.
+ {"prove", "generic deadcode"},
+ // common-subexpression before dead-store elim, so that we recognize
+ // when two address expressions are the same.
+ {"generic cse", "dse"},
+ // cse substantially improves nilcheckelim efficacy
+ {"generic cse", "nilcheckelim"},
+ // allow deadcode to clean up after nilcheckelim
+ {"nilcheckelim", "generic deadcode"},
+ // nilcheckelim generates sequences of plain basic blocks
+ {"nilcheckelim", "late fuse"},
+ // nilcheckelim relies on opt to rewrite user nil checks
+ {"opt", "nilcheckelim"},
+ // tighten will be most effective when as many values have been removed as possible
+ {"generic deadcode", "tighten"},
+ {"generic cse", "tighten"},
+ // checkbce needs the values removed
+ {"generic deadcode", "check bce"},
+ // decompose builtin now also cleans up after expand calls
+ {"expand calls", "decompose builtin"},
+ // don't run optimization pass until we've decomposed builtin objects
+ {"decompose builtin", "late opt"},
+ // decompose builtin is the last pass that may introduce new float ops, so run softfloat after it
+ {"decompose builtin", "softfloat"},
+ // tuple selectors must be tightened to generators and de-duplicated before scheduling
+ {"tighten tuple selectors", "schedule"},
+ // remove critical edges before phi tighten, so that phi args get better placement
+ {"critical", "phi tighten"},
+ // don't layout blocks until critical edges have been removed
+ {"critical", "layout"},
+ // regalloc requires the removal of all critical edges
+ {"critical", "regalloc"},
+ // regalloc requires all the values in a block to be scheduled
+ {"schedule", "regalloc"},
+ // the rules in late lower run after the general rules.
+ {"lower", "late lower"},
+ // late lower may generate some values that need to be CSEed.
+ {"late lower", "lowered cse"},
+ // checkLower must run after lowering & subsequent dead code elim
+ {"lower", "checkLower"},
+ {"lowered deadcode", "checkLower"},
+ {"late lower", "checkLower"},
+ // late nilcheck needs instructions to be scheduled.
+ {"schedule", "late nilcheck"},
+ // flagalloc needs instructions to be scheduled.
+ {"schedule", "flagalloc"},
+ // regalloc needs flags to be allocated first.
+ {"flagalloc", "regalloc"},
+ // loopRotate will confuse regalloc.
+ {"regalloc", "loop rotate"},
+ // trim needs regalloc to be done first.
+ {"regalloc", "trim"},
+ // memcombine works better if fuse happens first, to help merge stores.
+ {"late fuse", "memcombine"},
+ // memcombine is a arch-independent pass.
+ {"memcombine", "lower"},
+}
+
+func init() {
+ for _, c := range passOrder {
+ a, b := c.a, c.b
+ i := -1
+ j := -1
+ for k, p := range passes {
+ if p.name == a {
+ i = k
+ }
+ if p.name == b {
+ j = k
+ }
+ }
+ if i < 0 {
+ log.Panicf("pass %s not found", a)
+ }
+ if j < 0 {
+ log.Panicf("pass %s not found", b)
+ }
+ if i >= j {
+ log.Panicf("passes %s and %s out of order", a, b)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
new file mode 100644
index 0000000..debcf1a
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -0,0 +1,420 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "internal/buildcfg"
+)
+
+// A Config holds readonly compilation information.
+// It is created once, early during compilation,
+// and shared across all compilations.
+type Config struct {
+ arch string // "amd64", etc.
+ PtrSize int64 // 4 or 8; copy of cmd/internal/sys.Arch.PtrSize
+ RegSize int64 // 4 or 8; copy of cmd/internal/sys.Arch.RegSize
+ Types Types
+ lowerBlock blockRewriter // block lowering function, first round
+ lowerValue valueRewriter // value lowering function, first round
+ lateLowerBlock blockRewriter // block lowering function that needs to be run after the first round; only used on some architectures
+ lateLowerValue valueRewriter // value lowering function that needs to be run after the first round; only used on some architectures
+ splitLoad valueRewriter // function for splitting merged load ops; only used on some architectures
+ registers []Register // machine registers
+ gpRegMask regMask // general purpose integer register mask
+ fpRegMask regMask // floating point register mask
+ fp32RegMask regMask // floating point register mask
+ fp64RegMask regMask // floating point register mask
+ specialRegMask regMask // special register mask
+ intParamRegs []int8 // register numbers of integer param (in/out) registers
+ floatParamRegs []int8 // register numbers of floating param (in/out) registers
+ ABI1 *abi.ABIConfig // "ABIInternal" under development // TODO change comment when this becomes current
+ ABI0 *abi.ABIConfig
+ GCRegMap []*Register // garbage collector register map, by GC register index
+ FPReg int8 // register number of frame pointer, -1 if not used
+ LinkReg int8 // register number of link register if it is a general purpose register, -1 if not used
+ hasGReg bool // has hardware g register
+ ctxt *obj.Link // Generic arch information
+ optimize bool // Do optimization
+ noDuffDevice bool // Don't use Duff's device
+ useSSE bool // Use SSE for non-float operations
+ useAvg bool // Use optimizations that need Avg* operations
+ useHmul bool // Use optimizations that need Hmul* operations
+ SoftFloat bool //
+ Race bool // race detector enabled
+ BigEndian bool //
+ UseFMA bool // Use hardware FMA operation
+ unalignedOK bool // Unaligned loads/stores are ok
+ haveBswap64 bool // architecture implements Bswap64
+ haveBswap32 bool // architecture implements Bswap32
+ haveBswap16 bool // architecture implements Bswap16
+}
+
+type (
+ blockRewriter func(*Block) bool
+ valueRewriter func(*Value) bool
+)
+
+type Types struct {
+ Bool *types.Type
+ Int8 *types.Type
+ Int16 *types.Type
+ Int32 *types.Type
+ Int64 *types.Type
+ UInt8 *types.Type
+ UInt16 *types.Type
+ UInt32 *types.Type
+ UInt64 *types.Type
+ Int *types.Type
+ Float32 *types.Type
+ Float64 *types.Type
+ UInt *types.Type
+ Uintptr *types.Type
+ String *types.Type
+ BytePtr *types.Type // TODO: use unsafe.Pointer instead?
+ Int32Ptr *types.Type
+ UInt32Ptr *types.Type
+ IntPtr *types.Type
+ UintptrPtr *types.Type
+ Float32Ptr *types.Type
+ Float64Ptr *types.Type
+ BytePtrPtr *types.Type
+}
+
+// NewTypes creates and populates a Types.
+func NewTypes() *Types {
+ t := new(Types)
+ t.SetTypPtrs()
+ return t
+}
+
+// SetTypPtrs populates t.
+func (t *Types) SetTypPtrs() {
+ t.Bool = types.Types[types.TBOOL]
+ t.Int8 = types.Types[types.TINT8]
+ t.Int16 = types.Types[types.TINT16]
+ t.Int32 = types.Types[types.TINT32]
+ t.Int64 = types.Types[types.TINT64]
+ t.UInt8 = types.Types[types.TUINT8]
+ t.UInt16 = types.Types[types.TUINT16]
+ t.UInt32 = types.Types[types.TUINT32]
+ t.UInt64 = types.Types[types.TUINT64]
+ t.Int = types.Types[types.TINT]
+ t.Float32 = types.Types[types.TFLOAT32]
+ t.Float64 = types.Types[types.TFLOAT64]
+ t.UInt = types.Types[types.TUINT]
+ t.Uintptr = types.Types[types.TUINTPTR]
+ t.String = types.Types[types.TSTRING]
+ t.BytePtr = types.NewPtr(types.Types[types.TUINT8])
+ t.Int32Ptr = types.NewPtr(types.Types[types.TINT32])
+ t.UInt32Ptr = types.NewPtr(types.Types[types.TUINT32])
+ t.IntPtr = types.NewPtr(types.Types[types.TINT])
+ t.UintptrPtr = types.NewPtr(types.Types[types.TUINTPTR])
+ t.Float32Ptr = types.NewPtr(types.Types[types.TFLOAT32])
+ t.Float64Ptr = types.NewPtr(types.Types[types.TFLOAT64])
+ t.BytePtrPtr = types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))
+}
+
+type Logger interface {
+ // Logf logs a message from the compiler.
+ Logf(string, ...interface{})
+
+ // Log reports whether logging is not a no-op
+ // some logging calls account for more than a few heap allocations.
+ Log() bool
+
+ // Fatal reports a compiler error and exits.
+ Fatalf(pos src.XPos, msg string, args ...interface{})
+
+ // Warnl writes compiler messages in the form expected by "errorcheck" tests
+ Warnl(pos src.XPos, fmt_ string, args ...interface{})
+
+ // Forwards the Debug flags from gc
+ Debug_checknil() bool
+}
+
+type Frontend interface {
+ Logger
+
+ // StringData returns a symbol pointing to the given string's contents.
+ StringData(string) *obj.LSym
+
+ // Given the name for a compound type, returns the name we should use
+ // for the parts of that compound type.
+ SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot
+
+ // Syslook returns a symbol of the runtime function/variable with the
+ // given name.
+ Syslook(string) *obj.LSym
+
+ // UseWriteBarrier reports whether write barrier is enabled
+ UseWriteBarrier() bool
+
+ // Func returns the ir.Func of the function being compiled.
+ Func() *ir.Func
+}
+
+// NewConfig returns a new configuration object for the given architecture.
+func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat bool) *Config {
+ c := &Config{arch: arch, Types: types}
+ c.useAvg = true
+ c.useHmul = true
+ switch arch {
+ case "amd64":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockAMD64
+ c.lowerValue = rewriteValueAMD64
+ c.lateLowerBlock = rewriteBlockAMD64latelower
+ c.lateLowerValue = rewriteValueAMD64latelower
+ c.splitLoad = rewriteValueAMD64splitload
+ c.registers = registersAMD64[:]
+ c.gpRegMask = gpRegMaskAMD64
+ c.fpRegMask = fpRegMaskAMD64
+ c.specialRegMask = specialRegMaskAMD64
+ c.intParamRegs = paramIntRegAMD64
+ c.floatParamRegs = paramFloatRegAMD64
+ c.FPReg = framepointerRegAMD64
+ c.LinkReg = linkRegAMD64
+ c.hasGReg = true
+ c.unalignedOK = true
+ c.haveBswap64 = true
+ c.haveBswap32 = true
+ c.haveBswap16 = true
+ case "386":
+ c.PtrSize = 4
+ c.RegSize = 4
+ c.lowerBlock = rewriteBlock386
+ c.lowerValue = rewriteValue386
+ c.splitLoad = rewriteValue386splitload
+ c.registers = registers386[:]
+ c.gpRegMask = gpRegMask386
+ c.fpRegMask = fpRegMask386
+ c.FPReg = framepointerReg386
+ c.LinkReg = linkReg386
+ c.hasGReg = false
+ c.unalignedOK = true
+ c.haveBswap32 = true
+ c.haveBswap16 = true
+ case "arm":
+ c.PtrSize = 4
+ c.RegSize = 4
+ c.lowerBlock = rewriteBlockARM
+ c.lowerValue = rewriteValueARM
+ c.registers = registersARM[:]
+ c.gpRegMask = gpRegMaskARM
+ c.fpRegMask = fpRegMaskARM
+ c.FPReg = framepointerRegARM
+ c.LinkReg = linkRegARM
+ c.hasGReg = true
+ case "arm64":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockARM64
+ c.lowerValue = rewriteValueARM64
+ c.lateLowerBlock = rewriteBlockARM64latelower
+ c.lateLowerValue = rewriteValueARM64latelower
+ c.registers = registersARM64[:]
+ c.gpRegMask = gpRegMaskARM64
+ c.fpRegMask = fpRegMaskARM64
+ c.intParamRegs = paramIntRegARM64
+ c.floatParamRegs = paramFloatRegARM64
+ c.FPReg = framepointerRegARM64
+ c.LinkReg = linkRegARM64
+ c.hasGReg = true
+ c.unalignedOK = true
+ c.haveBswap64 = true
+ c.haveBswap32 = true
+ c.haveBswap16 = true
+ case "ppc64":
+ c.BigEndian = true
+ fallthrough
+ case "ppc64le":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockPPC64
+ c.lowerValue = rewriteValuePPC64
+ c.lateLowerBlock = rewriteBlockPPC64latelower
+ c.lateLowerValue = rewriteValuePPC64latelower
+ c.registers = registersPPC64[:]
+ c.gpRegMask = gpRegMaskPPC64
+ c.fpRegMask = fpRegMaskPPC64
+ c.specialRegMask = specialRegMaskPPC64
+ c.intParamRegs = paramIntRegPPC64
+ c.floatParamRegs = paramFloatRegPPC64
+ c.FPReg = framepointerRegPPC64
+ c.LinkReg = linkRegPPC64
+ c.hasGReg = true
+ c.unalignedOK = true
+ // Note: ppc64 has register bswap ops only when GOPPC64>=10.
+ // But it has bswap+load and bswap+store ops for all ppc64 variants.
+ // That is the sense we're using them here - they are only used
+ // in contexts where they can be merged with a load or store.
+ c.haveBswap64 = true
+ c.haveBswap32 = true
+ c.haveBswap16 = true
+ case "mips64":
+ c.BigEndian = true
+ fallthrough
+ case "mips64le":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockMIPS64
+ c.lowerValue = rewriteValueMIPS64
+ c.registers = registersMIPS64[:]
+ c.gpRegMask = gpRegMaskMIPS64
+ c.fpRegMask = fpRegMaskMIPS64
+ c.specialRegMask = specialRegMaskMIPS64
+ c.FPReg = framepointerRegMIPS64
+ c.LinkReg = linkRegMIPS64
+ c.hasGReg = true
+ case "loong64":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockLOONG64
+ c.lowerValue = rewriteValueLOONG64
+ c.registers = registersLOONG64[:]
+ c.gpRegMask = gpRegMaskLOONG64
+ c.fpRegMask = fpRegMaskLOONG64
+ c.intParamRegs = paramIntRegLOONG64
+ c.floatParamRegs = paramFloatRegLOONG64
+ c.FPReg = framepointerRegLOONG64
+ c.LinkReg = linkRegLOONG64
+ c.hasGReg = true
+ case "s390x":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockS390X
+ c.lowerValue = rewriteValueS390X
+ c.registers = registersS390X[:]
+ c.gpRegMask = gpRegMaskS390X
+ c.fpRegMask = fpRegMaskS390X
+ c.FPReg = framepointerRegS390X
+ c.LinkReg = linkRegS390X
+ c.hasGReg = true
+ c.noDuffDevice = true
+ c.BigEndian = true
+ c.unalignedOK = true
+ c.haveBswap64 = true
+ c.haveBswap32 = true
+ c.haveBswap16 = true // only for loads&stores, see ppc64 comment
+ case "mips":
+ c.BigEndian = true
+ fallthrough
+ case "mipsle":
+ c.PtrSize = 4
+ c.RegSize = 4
+ c.lowerBlock = rewriteBlockMIPS
+ c.lowerValue = rewriteValueMIPS
+ c.registers = registersMIPS[:]
+ c.gpRegMask = gpRegMaskMIPS
+ c.fpRegMask = fpRegMaskMIPS
+ c.specialRegMask = specialRegMaskMIPS
+ c.FPReg = framepointerRegMIPS
+ c.LinkReg = linkRegMIPS
+ c.hasGReg = true
+ c.noDuffDevice = true
+ case "riscv64":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockRISCV64
+ c.lowerValue = rewriteValueRISCV64
+ c.lateLowerBlock = rewriteBlockRISCV64latelower
+ c.lateLowerValue = rewriteValueRISCV64latelower
+ c.registers = registersRISCV64[:]
+ c.gpRegMask = gpRegMaskRISCV64
+ c.fpRegMask = fpRegMaskRISCV64
+ c.intParamRegs = paramIntRegRISCV64
+ c.floatParamRegs = paramFloatRegRISCV64
+ c.FPReg = framepointerRegRISCV64
+ c.hasGReg = true
+ case "wasm":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockWasm
+ c.lowerValue = rewriteValueWasm
+ c.registers = registersWasm[:]
+ c.gpRegMask = gpRegMaskWasm
+ c.fpRegMask = fpRegMaskWasm
+ c.fp32RegMask = fp32RegMaskWasm
+ c.fp64RegMask = fp64RegMaskWasm
+ c.FPReg = framepointerRegWasm
+ c.LinkReg = linkRegWasm
+ c.hasGReg = true
+ c.noDuffDevice = true
+ c.useAvg = false
+ c.useHmul = false
+ default:
+ ctxt.Diag("arch %s not implemented", arch)
+ }
+ c.ctxt = ctxt
+ c.optimize = optimize
+ c.useSSE = true
+ c.UseFMA = true
+ c.SoftFloat = softfloat
+ if softfloat {
+ c.floatParamRegs = nil // no FP registers in softfloat mode
+ }
+
+ c.ABI0 = abi.NewABIConfig(0, 0, ctxt.Arch.FixedFrameSize, 0)
+ c.ABI1 = abi.NewABIConfig(len(c.intParamRegs), len(c.floatParamRegs), ctxt.Arch.FixedFrameSize, 1)
+
+ // On Plan 9, floating point operations are not allowed in note handler.
+ if buildcfg.GOOS == "plan9" {
+ // Don't use FMA on Plan 9
+ c.UseFMA = false
+
+ // Don't use Duff's device and SSE on Plan 9 AMD64.
+ if arch == "amd64" {
+ c.noDuffDevice = true
+ c.useSSE = false
+ }
+ }
+
+ if ctxt.Flag_shared {
+ // LoweredWB is secretly a CALL and CALLs on 386 in
+ // shared mode get rewritten by obj6.go to go through
+ // the GOT, which clobbers BX.
+ opcodeTable[Op386LoweredWB].reg.clobbers |= 1 << 3 // BX
+ }
+
+ // Create the GC register map index.
+ // TODO: This is only used for debug printing. Maybe export config.registers?
+ gcRegMapSize := int16(0)
+ for _, r := range c.registers {
+ if r.gcNum+1 > gcRegMapSize {
+ gcRegMapSize = r.gcNum + 1
+ }
+ }
+ c.GCRegMap = make([]*Register, gcRegMapSize)
+ for i, r := range c.registers {
+ if r.gcNum != -1 {
+ c.GCRegMap[r.gcNum] = &c.registers[i]
+ }
+ }
+
+ return c
+}
+
+func (c *Config) Ctxt() *obj.Link { return c.ctxt }
+
+func (c *Config) haveByteSwap(size int64) bool {
+ switch size {
+ case 8:
+ return c.haveBswap64
+ case 4:
+ return c.haveBswap32
+ case 2:
+ return c.haveBswap16
+ default:
+ base.Fatalf("bad size %d\n", size)
+ return false
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/copyelim.go b/src/cmd/compile/internal/ssa/copyelim.go
new file mode 100644
index 0000000..17f6512
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/copyelim.go
@@ -0,0 +1,84 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// copyelim removes all uses of OpCopy values from f.
+// A subsequent deadcode pass is needed to actually remove the copies.
+func copyelim(f *Func) {
+ // Modify all values so no arg (including args
+ // of OpCopy) is a copy.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ copyelimValue(v)
+ }
+ }
+
+ // Update block control values.
+ for _, b := range f.Blocks {
+ for i, v := range b.ControlValues() {
+ if v.Op == OpCopy {
+ b.ReplaceControl(i, v.Args[0])
+ }
+ }
+ }
+
+ // Update named values.
+ for _, name := range f.Names {
+ values := f.NamedValues[*name]
+ for i, v := range values {
+ if v.Op == OpCopy {
+ values[i] = v.Args[0]
+ }
+ }
+ }
+}
+
+// copySource returns the (non-copy) op which is the
+// ultimate source of v. v must be a copy op.
+func copySource(v *Value) *Value {
+ w := v.Args[0]
+
+ // This loop is just:
+ // for w.Op == OpCopy {
+ // w = w.Args[0]
+ // }
+ // but we take some extra care to make sure we
+ // don't get stuck in an infinite loop.
+ // Infinite copy loops may happen in unreachable code.
+ // (TODO: or can they? Needs a test.)
+ slow := w
+ var advance bool
+ for w.Op == OpCopy {
+ w = w.Args[0]
+ if w == slow {
+ w.reset(OpUnknown)
+ break
+ }
+ if advance {
+ slow = slow.Args[0]
+ }
+ advance = !advance
+ }
+
+ // The answer is w. Update all the copies we saw
+ // to point directly to w. Doing this update makes
+ // sure that we don't end up doing O(n^2) work
+ // for a chain of n copies.
+ for v != w {
+ x := v.Args[0]
+ v.SetArg(0, w)
+ v = x
+ }
+ return w
+}
+
+// copyelimValue ensures that no args of v are copies.
+func copyelimValue(v *Value) {
+ for i, a := range v.Args {
+ if a.Op == OpCopy {
+ v.SetArg(i, copySource(a))
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/copyelim_test.go b/src/cmd/compile/internal/ssa/copyelim_test.go
new file mode 100644
index 0000000..fe31b12
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/copyelim_test.go
@@ -0,0 +1,41 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+ "testing"
+)
+
+func BenchmarkCopyElim1(b *testing.B) { benchmarkCopyElim(b, 1) }
+func BenchmarkCopyElim10(b *testing.B) { benchmarkCopyElim(b, 10) }
+func BenchmarkCopyElim100(b *testing.B) { benchmarkCopyElim(b, 100) }
+func BenchmarkCopyElim1000(b *testing.B) { benchmarkCopyElim(b, 1000) }
+func BenchmarkCopyElim10000(b *testing.B) { benchmarkCopyElim(b, 10000) }
+func BenchmarkCopyElim100000(b *testing.B) { benchmarkCopyElim(b, 100000) }
+
+func benchmarkCopyElim(b *testing.B, n int) {
+ c := testConfig(b)
+
+ values := make([]interface{}, 0, n+2)
+ values = append(values, Valu("mem", OpInitMem, types.TypeMem, 0, nil))
+ last := "mem"
+ for i := 0; i < n; i++ {
+ name := fmt.Sprintf("copy%d", i)
+ values = append(values, Valu(name, OpCopy, types.TypeMem, 0, nil, last))
+ last = name
+ }
+ values = append(values, Exit(last))
+ // Reverse values array to make it hard
+ for i := 0; i < len(values)/2; i++ {
+ values[i], values[len(values)-1-i] = values[len(values)-1-i], values[i]
+ }
+
+ for i := 0; i < b.N; i++ {
+ fun := c.Fun("entry", Bloc("entry", values...))
+ Copyelim(fun.f)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/critical.go b/src/cmd/compile/internal/ssa/critical.go
new file mode 100644
index 0000000..f14bb93
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/critical.go
@@ -0,0 +1,111 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// critical splits critical edges (those that go from a block with
+// more than one outedge to a block with more than one inedge).
+// Regalloc wants a critical-edge-free CFG so it can implement phi values.
+func critical(f *Func) {
+ // maps from phi arg ID to the new block created for that argument
+ blocks := f.Cache.allocBlockSlice(f.NumValues())
+ defer f.Cache.freeBlockSlice(blocks)
+ // need to iterate over f.Blocks without range, as we might
+ // need to split critical edges on newly constructed blocks
+ for j := 0; j < len(f.Blocks); j++ {
+ b := f.Blocks[j]
+ if len(b.Preds) <= 1 {
+ continue
+ }
+
+ var phi *Value
+ // determine if we've only got a single phi in this
+ // block, this is easier to handle than the general
+ // case of a block with multiple phi values.
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ if phi != nil {
+ phi = nil
+ break
+ }
+ phi = v
+ }
+ }
+
+ // reset our block map
+ if phi != nil {
+ for _, v := range phi.Args {
+ blocks[v.ID] = nil
+ }
+ }
+
+ // split input edges coming from multi-output blocks.
+ for i := 0; i < len(b.Preds); {
+ e := b.Preds[i]
+ p := e.b
+ pi := e.i
+ if p.Kind == BlockPlain {
+ i++
+ continue // only single output block
+ }
+
+ var d *Block // new block used to remove critical edge
+ reusedBlock := false // if true, then this is not the first use of this block
+ if phi != nil {
+ argID := phi.Args[i].ID
+ // find or record the block that we used to split
+ // critical edges for this argument
+ if d = blocks[argID]; d == nil {
+ // splitting doesn't necessarily remove the critical edge,
+ // since we're iterating over len(f.Blocks) above, this forces
+ // the new blocks to be re-examined.
+ d = f.NewBlock(BlockPlain)
+ d.Pos = p.Pos
+ blocks[argID] = d
+ if f.pass.debug > 0 {
+ f.Warnl(p.Pos, "split critical edge")
+ }
+ } else {
+ reusedBlock = true
+ }
+ } else {
+ // no existing block, so allocate a new block
+ // to place on the edge
+ d = f.NewBlock(BlockPlain)
+ d.Pos = p.Pos
+ if f.pass.debug > 0 {
+ f.Warnl(p.Pos, "split critical edge")
+ }
+ }
+
+ // if this not the first argument for the
+ // block, then we need to remove the
+ // corresponding elements from the block
+ // predecessors and phi args
+ if reusedBlock {
+ // Add p->d edge
+ p.Succs[pi] = Edge{d, len(d.Preds)}
+ d.Preds = append(d.Preds, Edge{p, pi})
+
+ // Remove p as a predecessor from b.
+ b.removePred(i)
+
+ // Update corresponding phi args
+ b.removePhiArg(phi, i)
+
+ // splitting occasionally leads to a phi having
+ // a single argument (occurs with -N)
+ // Don't increment i in this case because we moved
+ // an unprocessed predecessor down into slot i.
+ } else {
+ // splice it in
+ p.Succs[pi] = Edge{d, 0}
+ b.Preds[i] = Edge{d, 0}
+ d.Preds = append(d.Preds, Edge{p, pi})
+ d.Succs = append(d.Succs, Edge{b, i})
+ i++
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go
new file mode 100644
index 0000000..d649797
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/cse.go
@@ -0,0 +1,378 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "sort"
+)
+
+// cse does common-subexpression elimination on the Function.
+// Values are just relinked, nothing is deleted. A subsequent deadcode
+// pass is required to actually remove duplicate expressions.
+func cse(f *Func) {
+ // Two values are equivalent if they satisfy the following definition:
+ // equivalent(v, w):
+ // v.op == w.op
+ // v.type == w.type
+ // v.aux == w.aux
+ // v.auxint == w.auxint
+ // len(v.args) == len(w.args)
+ // v.block == w.block if v.op == OpPhi
+ // equivalent(v.args[i], w.args[i]) for i in 0..len(v.args)-1
+
+ // The algorithm searches for a partition of f's values into
+ // equivalence classes using the above definition.
+ // It starts with a coarse partition and iteratively refines it
+ // until it reaches a fixed point.
+
+ // Make initial coarse partitions by using a subset of the conditions above.
+ a := f.Cache.allocValueSlice(f.NumValues())
+ defer func() { f.Cache.freeValueSlice(a) }() // inside closure to use final value of a
+ a = a[:0]
+ if f.auxmap == nil {
+ f.auxmap = auxmap{}
+ }
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Type.IsMemory() {
+ continue // memory values can never cse
+ }
+ if f.auxmap[v.Aux] == 0 {
+ f.auxmap[v.Aux] = int32(len(f.auxmap)) + 1
+ }
+ a = append(a, v)
+ }
+ }
+ partition := partitionValues(a, f.auxmap)
+
+ // map from value id back to eqclass id
+ valueEqClass := f.Cache.allocIDSlice(f.NumValues())
+ defer f.Cache.freeIDSlice(valueEqClass)
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ // Use negative equivalence class #s for unique values.
+ valueEqClass[v.ID] = -v.ID
+ }
+ }
+ var pNum ID = 1
+ for _, e := range partition {
+ if f.pass.debug > 1 && len(e) > 500 {
+ fmt.Printf("CSE.large partition (%d): ", len(e))
+ for j := 0; j < 3; j++ {
+ fmt.Printf("%s ", e[j].LongString())
+ }
+ fmt.Println()
+ }
+
+ for _, v := range e {
+ valueEqClass[v.ID] = pNum
+ }
+ if f.pass.debug > 2 && len(e) > 1 {
+ fmt.Printf("CSE.partition #%d:", pNum)
+ for _, v := range e {
+ fmt.Printf(" %s", v.String())
+ }
+ fmt.Printf("\n")
+ }
+ pNum++
+ }
+
+ // Split equivalence classes at points where they have
+ // non-equivalent arguments. Repeat until we can't find any
+ // more splits.
+ var splitPoints []int
+ byArgClass := new(partitionByArgClass) // reusable partitionByArgClass to reduce allocations
+ for {
+ changed := false
+
+ // partition can grow in the loop. By not using a range loop here,
+ // we process new additions as they arrive, avoiding O(n^2) behavior.
+ for i := 0; i < len(partition); i++ {
+ e := partition[i]
+
+ if opcodeTable[e[0].Op].commutative {
+ // Order the first two args before comparison.
+ for _, v := range e {
+ if valueEqClass[v.Args[0].ID] > valueEqClass[v.Args[1].ID] {
+ v.Args[0], v.Args[1] = v.Args[1], v.Args[0]
+ }
+ }
+ }
+
+ // Sort by eq class of arguments.
+ byArgClass.a = e
+ byArgClass.eqClass = valueEqClass
+ sort.Sort(byArgClass)
+
+ // Find split points.
+ splitPoints = append(splitPoints[:0], 0)
+ for j := 1; j < len(e); j++ {
+ v, w := e[j-1], e[j]
+ // Note: commutative args already correctly ordered by byArgClass.
+ eqArgs := true
+ for k, a := range v.Args {
+ b := w.Args[k]
+ if valueEqClass[a.ID] != valueEqClass[b.ID] {
+ eqArgs = false
+ break
+ }
+ }
+ if !eqArgs {
+ splitPoints = append(splitPoints, j)
+ }
+ }
+ if len(splitPoints) == 1 {
+ continue // no splits, leave equivalence class alone.
+ }
+
+ // Move another equivalence class down in place of e.
+ partition[i] = partition[len(partition)-1]
+ partition = partition[:len(partition)-1]
+ i--
+
+ // Add new equivalence classes for the parts of e we found.
+ splitPoints = append(splitPoints, len(e))
+ for j := 0; j < len(splitPoints)-1; j++ {
+ f := e[splitPoints[j]:splitPoints[j+1]]
+ if len(f) == 1 {
+ // Don't add singletons.
+ valueEqClass[f[0].ID] = -f[0].ID
+ continue
+ }
+ for _, v := range f {
+ valueEqClass[v.ID] = pNum
+ }
+ pNum++
+ partition = append(partition, f)
+ }
+ changed = true
+ }
+
+ if !changed {
+ break
+ }
+ }
+
+ sdom := f.Sdom()
+
+ // Compute substitutions we would like to do. We substitute v for w
+ // if v and w are in the same equivalence class and v dominates w.
+ rewrite := f.Cache.allocValueSlice(f.NumValues())
+ defer f.Cache.freeValueSlice(rewrite)
+ byDom := new(partitionByDom) // reusable partitionByDom to reduce allocs
+ for _, e := range partition {
+ byDom.a = e
+ byDom.sdom = sdom
+ sort.Sort(byDom)
+ for i := 0; i < len(e)-1; i++ {
+ // e is sorted by domorder, so a maximal dominant element is first in the slice
+ v := e[i]
+ if v == nil {
+ continue
+ }
+
+ e[i] = nil
+ // Replace all elements of e which v dominates
+ for j := i + 1; j < len(e); j++ {
+ w := e[j]
+ if w == nil {
+ continue
+ }
+ if sdom.IsAncestorEq(v.Block, w.Block) {
+ rewrite[w.ID] = v
+ e[j] = nil
+ } else {
+ // e is sorted by domorder, so v.Block doesn't dominate any subsequent blocks in e
+ break
+ }
+ }
+ }
+ }
+
+ rewrites := int64(0)
+
+ // Apply substitutions
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, w := range v.Args {
+ if x := rewrite[w.ID]; x != nil {
+ if w.Pos.IsStmt() == src.PosIsStmt {
+ // about to lose a statement marker, w
+ // w is an input to v; if they're in the same block
+ // and the same line, v is a good-enough new statement boundary.
+ if w.Block == v.Block && w.Pos.Line() == v.Pos.Line() {
+ v.Pos = v.Pos.WithIsStmt()
+ w.Pos = w.Pos.WithNotStmt()
+ } // TODO and if this fails?
+ }
+ v.SetArg(i, x)
+ rewrites++
+ }
+ }
+ }
+ for i, v := range b.ControlValues() {
+ if x := rewrite[v.ID]; x != nil {
+ if v.Op == OpNilCheck {
+ // nilcheck pass will remove the nil checks and log
+ // them appropriately, so don't mess with them here.
+ continue
+ }
+ b.ReplaceControl(i, x)
+ }
+ }
+ }
+
+ if f.pass.stats > 0 {
+ f.LogStat("CSE REWRITES", rewrites)
+ }
+}
+
+// An eqclass approximates an equivalence class. During the
+// algorithm it may represent the union of several of the
+// final equivalence classes.
+type eqclass []*Value
+
+// partitionValues partitions the values into equivalence classes
+// based on having all the following features match:
+// - opcode
+// - type
+// - auxint
+// - aux
+// - nargs
+// - block # if a phi op
+// - first two arg's opcodes and auxint
+// - NOT first two arg's aux; that can break CSE.
+//
+// partitionValues returns a list of equivalence classes, each
+// being a sorted by ID list of *Values. The eqclass slices are
+// backed by the same storage as the input slice.
+// Equivalence classes of size 1 are ignored.
+func partitionValues(a []*Value, auxIDs auxmap) []eqclass {
+ sort.Sort(sortvalues{a, auxIDs})
+
+ var partition []eqclass
+ for len(a) > 0 {
+ v := a[0]
+ j := 1
+ for ; j < len(a); j++ {
+ w := a[j]
+ if cmpVal(v, w, auxIDs) != types.CMPeq {
+ break
+ }
+ }
+ if j > 1 {
+ partition = append(partition, a[:j])
+ }
+ a = a[j:]
+ }
+
+ return partition
+}
+func lt2Cmp(isLt bool) types.Cmp {
+ if isLt {
+ return types.CMPlt
+ }
+ return types.CMPgt
+}
+
+type auxmap map[Aux]int32
+
+func cmpVal(v, w *Value, auxIDs auxmap) types.Cmp {
+ // Try to order these comparison by cost (cheaper first)
+ if v.Op != w.Op {
+ return lt2Cmp(v.Op < w.Op)
+ }
+ if v.AuxInt != w.AuxInt {
+ return lt2Cmp(v.AuxInt < w.AuxInt)
+ }
+ if len(v.Args) != len(w.Args) {
+ return lt2Cmp(len(v.Args) < len(w.Args))
+ }
+ if v.Op == OpPhi && v.Block != w.Block {
+ return lt2Cmp(v.Block.ID < w.Block.ID)
+ }
+ if v.Type.IsMemory() {
+ // We will never be able to CSE two values
+ // that generate memory.
+ return lt2Cmp(v.ID < w.ID)
+ }
+ // OpSelect is a pseudo-op. We need to be more aggressive
+ // regarding CSE to keep multiple OpSelect's of the same
+ // argument from existing.
+ if v.Op != OpSelect0 && v.Op != OpSelect1 && v.Op != OpSelectN {
+ if tc := v.Type.Compare(w.Type); tc != types.CMPeq {
+ return tc
+ }
+ }
+
+ if v.Aux != w.Aux {
+ if v.Aux == nil {
+ return types.CMPlt
+ }
+ if w.Aux == nil {
+ return types.CMPgt
+ }
+ return lt2Cmp(auxIDs[v.Aux] < auxIDs[w.Aux])
+ }
+
+ return types.CMPeq
+}
+
+// Sort values to make the initial partition.
+type sortvalues struct {
+ a []*Value // array of values
+ auxIDs auxmap // aux -> aux ID map
+}
+
+func (sv sortvalues) Len() int { return len(sv.a) }
+func (sv sortvalues) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] }
+func (sv sortvalues) Less(i, j int) bool {
+ v := sv.a[i]
+ w := sv.a[j]
+ if cmp := cmpVal(v, w, sv.auxIDs); cmp != types.CMPeq {
+ return cmp == types.CMPlt
+ }
+
+ // Sort by value ID last to keep the sort result deterministic.
+ return v.ID < w.ID
+}
+
+type partitionByDom struct {
+ a []*Value // array of values
+ sdom SparseTree
+}
+
+func (sv partitionByDom) Len() int { return len(sv.a) }
+func (sv partitionByDom) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] }
+func (sv partitionByDom) Less(i, j int) bool {
+ v := sv.a[i]
+ w := sv.a[j]
+ return sv.sdom.domorder(v.Block) < sv.sdom.domorder(w.Block)
+}
+
+type partitionByArgClass struct {
+ a []*Value // array of values
+ eqClass []ID // equivalence class IDs of values
+}
+
+func (sv partitionByArgClass) Len() int { return len(sv.a) }
+func (sv partitionByArgClass) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] }
+func (sv partitionByArgClass) Less(i, j int) bool {
+ v := sv.a[i]
+ w := sv.a[j]
+ for i, a := range v.Args {
+ b := w.Args[i]
+ if sv.eqClass[a.ID] < sv.eqClass[b.ID] {
+ return true
+ }
+ if sv.eqClass[a.ID] > sv.eqClass[b.ID] {
+ return false
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/cse_test.go b/src/cmd/compile/internal/ssa/cse_test.go
new file mode 100644
index 0000000..7d3e44f
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/cse_test.go
@@ -0,0 +1,130 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+type tstAux struct {
+ s string
+}
+
+func (*tstAux) CanBeAnSSAAux() {}
+
+// This tests for a bug found when partitioning, but not sorting by the Aux value.
+func TestCSEAuxPartitionBug(t *testing.T) {
+ c := testConfig(t)
+ arg1Aux := &tstAux{"arg1-aux"}
+ arg2Aux := &tstAux{"arg2-aux"}
+ arg3Aux := &tstAux{"arg3-aux"}
+ a := c.Temp(c.config.Types.Int8.PtrTo())
+
+ // construct lots of values with args that have aux values and place
+ // them in an order that triggers the bug
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("r7", OpAdd64, c.config.Types.Int64, 0, nil, "arg3", "arg1"),
+ Valu("r1", OpAdd64, c.config.Types.Int64, 0, nil, "arg1", "arg2"),
+ Valu("arg1", OpArg, c.config.Types.Int64, 0, arg1Aux),
+ Valu("arg2", OpArg, c.config.Types.Int64, 0, arg2Aux),
+ Valu("arg3", OpArg, c.config.Types.Int64, 0, arg3Aux),
+ Valu("r9", OpAdd64, c.config.Types.Int64, 0, nil, "r7", "r8"),
+ Valu("r4", OpAdd64, c.config.Types.Int64, 0, nil, "r1", "r2"),
+ Valu("r8", OpAdd64, c.config.Types.Int64, 0, nil, "arg3", "arg2"),
+ Valu("r2", OpAdd64, c.config.Types.Int64, 0, nil, "arg1", "arg2"),
+ Valu("raddr", OpLocalAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sp", "start"),
+ Valu("raddrdef", OpVarDef, types.TypeMem, 0, a, "start"),
+ Valu("r6", OpAdd64, c.config.Types.Int64, 0, nil, "r4", "r5"),
+ Valu("r3", OpAdd64, c.config.Types.Int64, 0, nil, "arg1", "arg2"),
+ Valu("r5", OpAdd64, c.config.Types.Int64, 0, nil, "r2", "r3"),
+ Valu("r10", OpAdd64, c.config.Types.Int64, 0, nil, "r6", "r9"),
+ Valu("rstore", OpStore, types.TypeMem, 0, c.config.Types.Int64, "raddr", "r10", "raddrdef"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("rstore")))
+
+ CheckFunc(fun.f)
+ cse(fun.f)
+ deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ s1Cnt := 2
+ // r1 == r2 == r3, needs to remove two of this set
+ s2Cnt := 1
+ // r4 == r5, needs to remove one of these
+ for k, v := range fun.values {
+ if v.Op == OpInvalid {
+ switch k {
+ case "r1":
+ fallthrough
+ case "r2":
+ fallthrough
+ case "r3":
+ if s1Cnt == 0 {
+ t.Errorf("cse removed all of r1,r2,r3")
+ }
+ s1Cnt--
+
+ case "r4":
+ fallthrough
+ case "r5":
+ if s2Cnt == 0 {
+ t.Errorf("cse removed all of r4,r5")
+ }
+ s2Cnt--
+ default:
+ t.Errorf("cse removed %s, but shouldn't have", k)
+ }
+ }
+ }
+
+ if s1Cnt != 0 || s2Cnt != 0 {
+ t.Errorf("%d values missed during cse", s1Cnt+s2Cnt)
+ }
+}
+
+// TestZCSE tests the zero arg cse.
+func TestZCSE(t *testing.T) {
+ c := testConfig(t)
+ a := c.Temp(c.config.Types.Int8.PtrTo())
+
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("sb1", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("sb2", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("addr1", OpAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sb1"),
+ Valu("addr2", OpAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sb2"),
+ Valu("a1ld", OpLoad, c.config.Types.Int64, 0, nil, "addr1", "start"),
+ Valu("a2ld", OpLoad, c.config.Types.Int64, 0, nil, "addr2", "start"),
+ Valu("c1", OpConst64, c.config.Types.Int64, 1, nil),
+ Valu("r1", OpAdd64, c.config.Types.Int64, 0, nil, "a1ld", "c1"),
+ Valu("c2", OpConst64, c.config.Types.Int64, 1, nil),
+ Valu("r2", OpAdd64, c.config.Types.Int64, 0, nil, "a2ld", "c2"),
+ Valu("r3", OpAdd64, c.config.Types.Int64, 0, nil, "r1", "r2"),
+ Valu("raddr", OpLocalAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sp", "start"),
+ Valu("raddrdef", OpVarDef, types.TypeMem, 0, a, "start"),
+ Valu("rstore", OpStore, types.TypeMem, 0, c.config.Types.Int64, "raddr", "r3", "raddrdef"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("rstore")))
+
+ CheckFunc(fun.f)
+ zcse(fun.f)
+ deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ if fun.values["c1"].Op != OpInvalid && fun.values["c2"].Op != OpInvalid {
+ t.Errorf("zsce should have removed c1 or c2")
+ }
+ if fun.values["sb1"].Op != OpInvalid && fun.values["sb2"].Op != OpInvalid {
+ t.Errorf("zsce should have removed sb1 or sb2")
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/deadcode.go b/src/cmd/compile/internal/ssa/deadcode.go
new file mode 100644
index 0000000..3bd1737
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/deadcode.go
@@ -0,0 +1,366 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+)
+
+// findlive returns the reachable blocks and live values in f.
+// The caller should call f.Cache.freeBoolSlice(live) when it is done with it.
+func findlive(f *Func) (reachable []bool, live []bool) {
+ reachable = ReachableBlocks(f)
+ var order []*Value
+ live, order = liveValues(f, reachable)
+ f.Cache.freeValueSlice(order)
+ return
+}
+
+// ReachableBlocks returns the reachable blocks in f.
+func ReachableBlocks(f *Func) []bool {
+ reachable := make([]bool, f.NumBlocks())
+ reachable[f.Entry.ID] = true
+ p := make([]*Block, 0, 64) // stack-like worklist
+ p = append(p, f.Entry)
+ for len(p) > 0 {
+ // Pop a reachable block
+ b := p[len(p)-1]
+ p = p[:len(p)-1]
+ // Mark successors as reachable
+ s := b.Succs
+ if b.Kind == BlockFirst {
+ s = s[:1]
+ }
+ for _, e := range s {
+ c := e.b
+ if int(c.ID) >= len(reachable) {
+ f.Fatalf("block %s >= f.NumBlocks()=%d?", c, len(reachable))
+ }
+ if !reachable[c.ID] {
+ reachable[c.ID] = true
+ p = append(p, c) // push
+ }
+ }
+ }
+ return reachable
+}
+
+// liveValues returns the live values in f and a list of values that are eligible
+// to be statements in reversed data flow order.
+// The second result is used to help conserve statement boundaries for debugging.
+// reachable is a map from block ID to whether the block is reachable.
+// The caller should call f.Cache.freeBoolSlice(live) and f.Cache.freeValueSlice(liveOrderStmts).
+// when they are done with the return values.
+func liveValues(f *Func, reachable []bool) (live []bool, liveOrderStmts []*Value) {
+ live = f.Cache.allocBoolSlice(f.NumValues())
+ liveOrderStmts = f.Cache.allocValueSlice(f.NumValues())[:0]
+
+ // After regalloc, consider all values to be live.
+ // See the comment at the top of regalloc.go and in deadcode for details.
+ if f.RegAlloc != nil {
+ for i := range live {
+ live[i] = true
+ }
+ return
+ }
+
+ // Record all the inline indexes we need
+ var liveInlIdx map[int]bool
+ pt := f.Config.ctxt.PosTable
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ i := pt.Pos(v.Pos).Base().InliningIndex()
+ if i < 0 {
+ continue
+ }
+ if liveInlIdx == nil {
+ liveInlIdx = map[int]bool{}
+ }
+ liveInlIdx[i] = true
+ }
+ i := pt.Pos(b.Pos).Base().InliningIndex()
+ if i < 0 {
+ continue
+ }
+ if liveInlIdx == nil {
+ liveInlIdx = map[int]bool{}
+ }
+ liveInlIdx[i] = true
+ }
+
+ // Find all live values
+ q := f.Cache.allocValueSlice(f.NumValues())[:0]
+ defer f.Cache.freeValueSlice(q)
+
+ // Starting set: all control values of reachable blocks are live.
+ // Calls are live (because callee can observe the memory state).
+ for _, b := range f.Blocks {
+ if !reachable[b.ID] {
+ continue
+ }
+ for _, v := range b.ControlValues() {
+ if !live[v.ID] {
+ live[v.ID] = true
+ q = append(q, v)
+ if v.Pos.IsStmt() != src.PosNotStmt {
+ liveOrderStmts = append(liveOrderStmts, v)
+ }
+ }
+ }
+ for _, v := range b.Values {
+ if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects || opcodeTable[v.Op].nilCheck) && !live[v.ID] {
+ live[v.ID] = true
+ q = append(q, v)
+ if v.Pos.IsStmt() != src.PosNotStmt {
+ liveOrderStmts = append(liveOrderStmts, v)
+ }
+ }
+ if v.Op == OpInlMark {
+ if !liveInlIdx[int(v.AuxInt)] {
+ // We don't need marks for bodies that
+ // have been completely optimized away.
+ // TODO: save marks only for bodies which
+ // have a faulting instruction or a call?
+ continue
+ }
+ live[v.ID] = true
+ q = append(q, v)
+ if v.Pos.IsStmt() != src.PosNotStmt {
+ liveOrderStmts = append(liveOrderStmts, v)
+ }
+ }
+ }
+ }
+
+ // Compute transitive closure of live values.
+ for len(q) > 0 {
+ // pop a reachable value
+ v := q[len(q)-1]
+ q[len(q)-1] = nil
+ q = q[:len(q)-1]
+ for i, x := range v.Args {
+ if v.Op == OpPhi && !reachable[v.Block.Preds[i].b.ID] {
+ continue
+ }
+ if !live[x.ID] {
+ live[x.ID] = true
+ q = append(q, x) // push
+ if x.Pos.IsStmt() != src.PosNotStmt {
+ liveOrderStmts = append(liveOrderStmts, x)
+ }
+ }
+ }
+ }
+
+ return
+}
+
+// deadcode removes dead code from f.
+func deadcode(f *Func) {
+ // deadcode after regalloc is forbidden for now. Regalloc
+ // doesn't quite generate legal SSA which will lead to some
+ // required moves being eliminated. See the comment at the
+ // top of regalloc.go for details.
+ if f.RegAlloc != nil {
+ f.Fatalf("deadcode after regalloc")
+ }
+
+ // Find reachable blocks.
+ reachable := ReachableBlocks(f)
+
+ // Get rid of edges from dead to live code.
+ for _, b := range f.Blocks {
+ if reachable[b.ID] {
+ continue
+ }
+ for i := 0; i < len(b.Succs); {
+ e := b.Succs[i]
+ if reachable[e.b.ID] {
+ b.removeEdge(i)
+ } else {
+ i++
+ }
+ }
+ }
+
+ // Get rid of dead edges from live code.
+ for _, b := range f.Blocks {
+ if !reachable[b.ID] {
+ continue
+ }
+ if b.Kind != BlockFirst {
+ continue
+ }
+ b.removeEdge(1)
+ b.Kind = BlockPlain
+ b.Likely = BranchUnknown
+ }
+
+ // Splice out any copies introduced during dead block removal.
+ copyelim(f)
+
+ // Find live values.
+ live, order := liveValues(f, reachable)
+ defer func() { f.Cache.freeBoolSlice(live) }()
+ defer func() { f.Cache.freeValueSlice(order) }()
+
+ // Remove dead & duplicate entries from namedValues map.
+ s := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(s)
+ i := 0
+ for _, name := range f.Names {
+ j := 0
+ s.clear()
+ values := f.NamedValues[*name]
+ for _, v := range values {
+ if live[v.ID] && !s.contains(v.ID) {
+ values[j] = v
+ j++
+ s.add(v.ID)
+ }
+ }
+ if j == 0 {
+ delete(f.NamedValues, *name)
+ } else {
+ f.Names[i] = name
+ i++
+ for k := len(values) - 1; k >= j; k-- {
+ values[k] = nil
+ }
+ f.NamedValues[*name] = values[:j]
+ }
+ }
+ clearNames := f.Names[i:]
+ for j := range clearNames {
+ clearNames[j] = nil
+ }
+ f.Names = f.Names[:i]
+
+ pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
+ pendingLines.clear()
+
+ // Unlink values and conserve statement boundaries
+ for i, b := range f.Blocks {
+ if !reachable[b.ID] {
+ // TODO what if control is statement boundary? Too late here.
+ b.ResetControls()
+ }
+ for _, v := range b.Values {
+ if !live[v.ID] {
+ v.resetArgs()
+ if v.Pos.IsStmt() == src.PosIsStmt && reachable[b.ID] {
+ pendingLines.set(v.Pos, int32(i)) // TODO could be more than one pos for a line
+ }
+ }
+ }
+ }
+
+ // Find new homes for lost lines -- require earliest in data flow with same line that is also in same block
+ for i := len(order) - 1; i >= 0; i-- {
+ w := order[i]
+ if j := pendingLines.get(w.Pos); j > -1 && f.Blocks[j] == w.Block {
+ w.Pos = w.Pos.WithIsStmt()
+ pendingLines.remove(w.Pos)
+ }
+ }
+
+ // Any boundary that failed to match a live value can move to a block end
+ pendingLines.foreachEntry(func(j int32, l uint, bi int32) {
+ b := f.Blocks[bi]
+ if b.Pos.Line() == l && b.Pos.FileIndex() == j {
+ b.Pos = b.Pos.WithIsStmt()
+ }
+ })
+
+ // Remove dead values from blocks' value list. Return dead
+ // values to the allocator.
+ for _, b := range f.Blocks {
+ i := 0
+ for _, v := range b.Values {
+ if live[v.ID] {
+ b.Values[i] = v
+ i++
+ } else {
+ f.freeValue(v)
+ }
+ }
+ b.truncateValues(i)
+ }
+
+ // Remove unreachable blocks. Return dead blocks to allocator.
+ i = 0
+ for _, b := range f.Blocks {
+ if reachable[b.ID] {
+ f.Blocks[i] = b
+ i++
+ } else {
+ if len(b.Values) > 0 {
+ b.Fatalf("live values in unreachable block %v: %v", b, b.Values)
+ }
+ f.freeBlock(b)
+ }
+ }
+ // zero remainder to help GC
+ tail := f.Blocks[i:]
+ for j := range tail {
+ tail[j] = nil
+ }
+ f.Blocks = f.Blocks[:i]
+}
+
+// removeEdge removes the i'th outgoing edge from b (and
+// the corresponding incoming edge from b.Succs[i].b).
+// Note that this potentially reorders successors of b, so it
+// must be used very carefully.
+func (b *Block) removeEdge(i int) {
+ e := b.Succs[i]
+ c := e.b
+ j := e.i
+
+ // Adjust b.Succs
+ b.removeSucc(i)
+
+ // Adjust c.Preds
+ c.removePred(j)
+
+ // Remove phi args from c's phis.
+ for _, v := range c.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ c.removePhiArg(v, j)
+ // Note: this is trickier than it looks. Replacing
+ // a Phi with a Copy can in general cause problems because
+ // Phi and Copy don't have exactly the same semantics.
+ // Phi arguments always come from a predecessor block,
+ // whereas copies don't. This matters in loops like:
+ // 1: x = (Phi y)
+ // y = (Add x 1)
+ // goto 1
+ // If we replace Phi->Copy, we get
+ // 1: x = (Copy y)
+ // y = (Add x 1)
+ // goto 1
+ // (Phi y) refers to the *previous* value of y, whereas
+ // (Copy y) refers to the *current* value of y.
+ // The modified code has a cycle and the scheduler
+ // will barf on it.
+ //
+ // Fortunately, this situation can only happen for dead
+ // code loops. We know the code we're working with is
+ // not dead, so we're ok.
+ // Proof: If we have a potential bad cycle, we have a
+ // situation like this:
+ // x = (Phi z)
+ // y = (op1 x ...)
+ // z = (op2 y ...)
+ // Where opX are not Phi ops. But such a situation
+ // implies a cycle in the dominator graph. In the
+ // example, x.Block dominates y.Block, y.Block dominates
+ // z.Block, and z.Block dominates x.Block (treating
+ // "dominates" as reflexive). Cycles in the dominator
+ // graph can only happen in an unreachable cycle.
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/deadcode_test.go b/src/cmd/compile/internal/ssa/deadcode_test.go
new file mode 100644
index 0000000..5777b84
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/deadcode_test.go
@@ -0,0 +1,161 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+ "strconv"
+ "testing"
+)
+
+func TestDeadLoop(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")),
+ // dead loop
+ Bloc("deadblock",
+ // dead value in dead block
+ Valu("deadval", OpConstBool, c.config.Types.Bool, 1, nil),
+ If("deadval", "deadblock", "exit")))
+
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["deadblock"] {
+ t.Errorf("dead block not removed")
+ }
+ for _, v := range b.Values {
+ if v == fun.values["deadval"] {
+ t.Errorf("control value of dead block not removed")
+ }
+ }
+ }
+}
+
+func TestDeadValue(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("deadval", OpConst64, c.config.Types.Int64, 37, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ for _, v := range b.Values {
+ if v == fun.values["deadval"] {
+ t.Errorf("dead value not removed")
+ }
+ }
+ }
+}
+
+func TestNeverTaken(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("cond", OpConstBool, c.config.Types.Bool, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ If("cond", "then", "else")),
+ Bloc("then",
+ Goto("exit")),
+ Bloc("else",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ Opt(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ if fun.blocks["entry"].Kind != BlockPlain {
+ t.Errorf("if(false) not simplified")
+ }
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] {
+ t.Errorf("then block still present")
+ }
+ for _, v := range b.Values {
+ if v == fun.values["cond"] {
+ t.Errorf("constant condition still present")
+ }
+ }
+ }
+
+}
+
+func TestNestedDeadBlocks(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("cond", OpConstBool, c.config.Types.Bool, 0, nil),
+ If("cond", "b2", "b4")),
+ Bloc("b2",
+ If("cond", "b3", "b4")),
+ Bloc("b3",
+ If("cond", "b3", "b4")),
+ Bloc("b4",
+ If("cond", "b3", "exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ Opt(fun.f)
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+ if fun.blocks["entry"].Kind != BlockPlain {
+ t.Errorf("if(false) not simplified")
+ }
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["b2"] {
+ t.Errorf("b2 block still present")
+ }
+ if b == fun.blocks["b3"] {
+ t.Errorf("b3 block still present")
+ }
+ for _, v := range b.Values {
+ if v == fun.values["cond"] {
+ t.Errorf("constant condition still present")
+ }
+ }
+ }
+}
+
+func BenchmarkDeadCode(b *testing.B) {
+ for _, n := range [...]int{1, 10, 100, 1000, 10000, 100000, 200000} {
+ b.Run(strconv.Itoa(n), func(b *testing.B) {
+ c := testConfig(b)
+ blocks := make([]bloc, 0, n+2)
+ blocks = append(blocks,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")))
+ blocks = append(blocks, Bloc("exit", Exit("mem")))
+ for i := 0; i < n; i++ {
+ blocks = append(blocks, Bloc(fmt.Sprintf("dead%d", i), Goto("exit")))
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ fun := c.Fun("entry", blocks...)
+ Deadcode(fun.f)
+ }
+ })
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go
new file mode 100644
index 0000000..cb34271
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/deadstore.go
@@ -0,0 +1,397 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
+
+// dse does dead-store elimination on the Function.
+// Dead stores are those which are unconditionally followed by
+// another store to the same location, with no intervening load.
+// This implementation only works within a basic block. TODO: use something more global.
+func dse(f *Func) {
+ var stores []*Value
+ loadUse := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(loadUse)
+ storeUse := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(storeUse)
+ shadowed := f.newSparseMap(f.NumValues())
+ defer f.retSparseMap(shadowed)
+ for _, b := range f.Blocks {
+ // Find all the stores in this block. Categorize their uses:
+ // loadUse contains stores which are used by a subsequent load.
+ // storeUse contains stores which are used by a subsequent store.
+ loadUse.clear()
+ storeUse.clear()
+ stores = stores[:0]
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ // Ignore phis - they will always be first and can't be eliminated
+ continue
+ }
+ if v.Type.IsMemory() {
+ stores = append(stores, v)
+ for _, a := range v.Args {
+ if a.Block == b && a.Type.IsMemory() {
+ storeUse.add(a.ID)
+ if v.Op != OpStore && v.Op != OpZero && v.Op != OpVarDef {
+ // CALL, DUFFCOPY, etc. are both
+ // reads and writes.
+ loadUse.add(a.ID)
+ }
+ }
+ }
+ } else {
+ for _, a := range v.Args {
+ if a.Block == b && a.Type.IsMemory() {
+ loadUse.add(a.ID)
+ }
+ }
+ }
+ }
+ if len(stores) == 0 {
+ continue
+ }
+
+ // find last store in the block
+ var last *Value
+ for _, v := range stores {
+ if storeUse.contains(v.ID) {
+ continue
+ }
+ if last != nil {
+ b.Fatalf("two final stores - simultaneous live stores %s %s", last.LongString(), v.LongString())
+ }
+ last = v
+ }
+ if last == nil {
+ b.Fatalf("no last store found - cycle?")
+ }
+
+ // Walk backwards looking for dead stores. Keep track of shadowed addresses.
+ // A "shadowed address" is a pointer, offset, and size describing a memory region that
+ // is known to be written. We keep track of shadowed addresses in the shadowed map,
+ // mapping the ID of the address to a shadowRange where future writes will happen.
+ // Since we're walking backwards, writes to a shadowed region are useless,
+ // as they will be immediately overwritten.
+ shadowed.clear()
+ v := last
+
+ walkloop:
+ if loadUse.contains(v.ID) {
+ // Someone might be reading this memory state.
+ // Clear all shadowed addresses.
+ shadowed.clear()
+ }
+ if v.Op == OpStore || v.Op == OpZero {
+ ptr := v.Args[0]
+ var off int64
+ for ptr.Op == OpOffPtr { // Walk to base pointer
+ off += ptr.AuxInt
+ ptr = ptr.Args[0]
+ }
+ var sz int64
+ if v.Op == OpStore {
+ sz = v.Aux.(*types.Type).Size()
+ } else { // OpZero
+ sz = v.AuxInt
+ }
+ sr := shadowRange(shadowed.get(ptr.ID))
+ if sr.contains(off, off+sz) {
+ // Modify the store/zero into a copy of the memory state,
+ // effectively eliding the store operation.
+ if v.Op == OpStore {
+ // store addr value mem
+ v.SetArgs1(v.Args[2])
+ } else {
+ // zero addr mem
+ v.SetArgs1(v.Args[1])
+ }
+ v.Aux = nil
+ v.AuxInt = 0
+ v.Op = OpCopy
+ } else {
+ // Extend shadowed region.
+ shadowed.set(ptr.ID, int32(sr.merge(off, off+sz)))
+ }
+ }
+ // walk to previous store
+ if v.Op == OpPhi {
+ // At start of block. Move on to next block.
+ // The memory phi, if it exists, is always
+ // the first logical store in the block.
+ // (Even if it isn't the first in the current b.Values order.)
+ continue
+ }
+ for _, a := range v.Args {
+ if a.Block == b && a.Type.IsMemory() {
+ v = a
+ goto walkloop
+ }
+ }
+ }
+}
+
+// A shadowRange encodes a set of byte offsets [lo():hi()] from
+// a given pointer that will be written to later in the block.
+// A zero shadowRange encodes an empty shadowed range (and so
+// does a -1 shadowRange, which is what sparsemap.get returns
+// on a failed lookup).
+type shadowRange int32
+
+func (sr shadowRange) lo() int64 {
+ return int64(sr & 0xffff)
+}
+func (sr shadowRange) hi() int64 {
+ return int64((sr >> 16) & 0xffff)
+}
+
+// contains reports whether [lo:hi] is completely within sr.
+func (sr shadowRange) contains(lo, hi int64) bool {
+ return lo >= sr.lo() && hi <= sr.hi()
+}
+
+// merge returns the union of sr and [lo:hi].
+// merge is allowed to return something smaller than the union.
+func (sr shadowRange) merge(lo, hi int64) shadowRange {
+ if lo < 0 || hi > 0xffff {
+ // Ignore offsets that are too large or small.
+ return sr
+ }
+ if sr.lo() == sr.hi() {
+ // Old range is empty - use new one.
+ return shadowRange(lo + hi<<16)
+ }
+ if hi < sr.lo() || lo > sr.hi() {
+ // The two regions don't overlap or abut, so we would
+ // have to keep track of multiple disjoint ranges.
+ // Because we can only keep one, keep the larger one.
+ if sr.hi()-sr.lo() >= hi-lo {
+ return sr
+ }
+ return shadowRange(lo + hi<<16)
+ }
+ // Regions overlap or abut - compute the union.
+ return shadowRange(min(lo, sr.lo()) + max(hi, sr.hi())<<16)
+}
+
+// elimDeadAutosGeneric deletes autos that are never accessed. To achieve this
+// we track the operations that the address of each auto reaches and if it only
+// reaches stores then we delete all the stores. The other operations will then
+// be eliminated by the dead code elimination pass.
+func elimDeadAutosGeneric(f *Func) {
+ addr := make(map[*Value]*ir.Name) // values that the address of the auto reaches
+ elim := make(map[*Value]*ir.Name) // values that could be eliminated if the auto is
+ var used ir.NameSet // used autos that must be kept
+
+ // visit the value and report whether any of the maps are updated
+ visit := func(v *Value) (changed bool) {
+ args := v.Args
+ switch v.Op {
+ case OpAddr, OpLocalAddr:
+ // Propagate the address if it points to an auto.
+ n, ok := v.Aux.(*ir.Name)
+ if !ok || n.Class != ir.PAUTO {
+ return
+ }
+ if addr[v] == nil {
+ addr[v] = n
+ changed = true
+ }
+ return
+ case OpVarDef:
+ // v should be eliminated if we eliminate the auto.
+ n, ok := v.Aux.(*ir.Name)
+ if !ok || n.Class != ir.PAUTO {
+ return
+ }
+ if elim[v] == nil {
+ elim[v] = n
+ changed = true
+ }
+ return
+ case OpVarLive:
+ // Don't delete the auto if it needs to be kept alive.
+
+ // We depend on this check to keep the autotmp stack slots
+ // for open-coded defers from being removed (since they
+ // may not be used by the inline code, but will be used by
+ // panic processing).
+ n, ok := v.Aux.(*ir.Name)
+ if !ok || n.Class != ir.PAUTO {
+ return
+ }
+ if !used.Has(n) {
+ used.Add(n)
+ changed = true
+ }
+ return
+ case OpStore, OpMove, OpZero:
+ // v should be eliminated if we eliminate the auto.
+ n, ok := addr[args[0]]
+ if ok && elim[v] == nil {
+ elim[v] = n
+ changed = true
+ }
+ // Other args might hold pointers to autos.
+ args = args[1:]
+ }
+
+ // The code below assumes that we have handled all the ops
+ // with sym effects already. Sanity check that here.
+ // Ignore Args since they can't be autos.
+ if v.Op.SymEffect() != SymNone && v.Op != OpArg {
+ panic("unhandled op with sym effect")
+ }
+
+ if v.Uses == 0 && v.Op != OpNilCheck && !v.Op.IsCall() && !v.Op.HasSideEffects() || len(args) == 0 {
+ // We need to keep nil checks even if they have no use.
+ // Also keep calls and values that have side effects.
+ return
+ }
+
+ // If the address of the auto reaches a memory or control
+ // operation not covered above then we probably need to keep it.
+ // We also need to keep autos if they reach Phis (issue #26153).
+ if v.Type.IsMemory() || v.Type.IsFlags() || v.Op == OpPhi || v.MemoryArg() != nil {
+ for _, a := range args {
+ if n, ok := addr[a]; ok {
+ if !used.Has(n) {
+ used.Add(n)
+ changed = true
+ }
+ }
+ }
+ return
+ }
+
+ // Propagate any auto addresses through v.
+ var node *ir.Name
+ for _, a := range args {
+ if n, ok := addr[a]; ok && !used.Has(n) {
+ if node == nil {
+ node = n
+ } else if node != n {
+ // Most of the time we only see one pointer
+ // reaching an op, but some ops can take
+ // multiple pointers (e.g. NeqPtr, Phi etc.).
+ // This is rare, so just propagate the first
+ // value to keep things simple.
+ used.Add(n)
+ changed = true
+ }
+ }
+ }
+ if node == nil {
+ return
+ }
+ if addr[v] == nil {
+ // The address of an auto reaches this op.
+ addr[v] = node
+ changed = true
+ return
+ }
+ if addr[v] != node {
+ // This doesn't happen in practice, but catch it just in case.
+ used.Add(node)
+ changed = true
+ }
+ return
+ }
+
+ iterations := 0
+ for {
+ if iterations == 4 {
+ // give up
+ return
+ }
+ iterations++
+ changed := false
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ changed = visit(v) || changed
+ }
+ // keep the auto if its address reaches a control value
+ for _, c := range b.ControlValues() {
+ if n, ok := addr[c]; ok && !used.Has(n) {
+ used.Add(n)
+ changed = true
+ }
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+
+ // Eliminate stores to unread autos.
+ for v, n := range elim {
+ if used.Has(n) {
+ continue
+ }
+ // replace with OpCopy
+ v.SetArgs1(v.MemoryArg())
+ v.Aux = nil
+ v.AuxInt = 0
+ v.Op = OpCopy
+ }
+}
+
+// elimUnreadAutos deletes stores (and associated bookkeeping ops VarDef and VarKill)
+// to autos that are never read from.
+func elimUnreadAutos(f *Func) {
+ // Loop over all ops that affect autos taking note of which
+ // autos we need and also stores that we might be able to
+ // eliminate.
+ var seen ir.NameSet
+ var stores []*Value
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ n, ok := v.Aux.(*ir.Name)
+ if !ok {
+ continue
+ }
+ if n.Class != ir.PAUTO {
+ continue
+ }
+
+ effect := v.Op.SymEffect()
+ switch effect {
+ case SymNone, SymWrite:
+ // If we haven't seen the auto yet
+ // then this might be a store we can
+ // eliminate.
+ if !seen.Has(n) {
+ stores = append(stores, v)
+ }
+ default:
+ // Assume the auto is needed (loaded,
+ // has its address taken, etc.).
+ // Note we have to check the uses
+ // because dead loads haven't been
+ // eliminated yet.
+ if v.Uses > 0 {
+ seen.Add(n)
+ }
+ }
+ }
+ }
+
+ // Eliminate stores to unread autos.
+ for _, store := range stores {
+ n, _ := store.Aux.(*ir.Name)
+ if seen.Has(n) {
+ continue
+ }
+
+ // replace store with OpCopy
+ store.SetArgs1(store.MemoryArg())
+ store.Aux = nil
+ store.AuxInt = 0
+ store.Op = OpCopy
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/deadstore_test.go b/src/cmd/compile/internal/ssa/deadstore_test.go
new file mode 100644
index 0000000..33cb4b9
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/deadstore_test.go
@@ -0,0 +1,129 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestDeadStore(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ t.Logf("PTRTYPE %v", ptrType)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("v", OpConstBool, c.config.Types.Bool, 1, nil),
+ Valu("addr1", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("addr2", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("addr3", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("zero1", OpZero, types.TypeMem, 1, c.config.Types.Bool, "addr3", "start"),
+ Valu("store1", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "zero1"),
+ Valu("store2", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr2", "v", "store1"),
+ Valu("store3", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "store2"),
+ Valu("store4", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr3", "v", "store3"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("store3")))
+
+ CheckFunc(fun.f)
+ dse(fun.f)
+ CheckFunc(fun.f)
+
+ v1 := fun.values["store1"]
+ if v1.Op != OpCopy {
+ t.Errorf("dead store not removed")
+ }
+
+ v2 := fun.values["zero1"]
+ if v2.Op != OpCopy {
+ t.Errorf("dead store (zero) not removed")
+ }
+}
+func TestDeadStorePhi(t *testing.T) {
+ // make sure we don't get into an infinite loop with phi values.
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("v", OpConstBool, c.config.Types.Bool, 1, nil),
+ Valu("addr", OpAddr, ptrType, 0, nil, "sb"),
+ Goto("loop")),
+ Bloc("loop",
+ Valu("phi", OpPhi, types.TypeMem, 0, nil, "start", "store"),
+ Valu("store", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr", "v", "phi"),
+ If("v", "loop", "exit")),
+ Bloc("exit",
+ Exit("store")))
+
+ CheckFunc(fun.f)
+ dse(fun.f)
+ CheckFunc(fun.f)
+}
+
+func TestDeadStoreTypes(t *testing.T) {
+ // Make sure a narrow store can't shadow a wider one. We test an even
+ // stronger restriction, that one store can't shadow another unless the
+ // types of the address fields are identical (where identicalness is
+ // decided by the CSE pass).
+ c := testConfig(t)
+ t1 := c.config.Types.UInt64.PtrTo()
+ t2 := c.config.Types.UInt32.PtrTo()
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("v", OpConstBool, c.config.Types.Bool, 1, nil),
+ Valu("addr1", OpAddr, t1, 0, nil, "sb"),
+ Valu("addr2", OpAddr, t2, 0, nil, "sb"),
+ Valu("store1", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "start"),
+ Valu("store2", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr2", "v", "store1"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("store2")))
+
+ CheckFunc(fun.f)
+ cse(fun.f)
+ dse(fun.f)
+ CheckFunc(fun.f)
+
+ v := fun.values["store1"]
+ if v.Op == OpCopy {
+ t.Errorf("store %s incorrectly removed", v)
+ }
+}
+
+func TestDeadStoreUnsafe(t *testing.T) {
+ // Make sure a narrow store can't shadow a wider one. The test above
+ // covers the case of two different types, but unsafe pointer casting
+ // can get to a point where the size is changed but type unchanged.
+ c := testConfig(t)
+ ptrType := c.config.Types.UInt64.PtrTo()
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("v", OpConstBool, c.config.Types.Bool, 1, nil),
+ Valu("addr1", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("store1", OpStore, types.TypeMem, 0, c.config.Types.Int64, "addr1", "v", "start"), // store 8 bytes
+ Valu("store2", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "store1"), // store 1 byte
+ Goto("exit")),
+ Bloc("exit",
+ Exit("store2")))
+
+ CheckFunc(fun.f)
+ cse(fun.f)
+ dse(fun.f)
+ CheckFunc(fun.f)
+
+ v := fun.values["store1"]
+ if v.Op == OpCopy {
+ t.Errorf("store %s incorrectly removed", v)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go
new file mode 100644
index 0000000..05a7278
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/debug.go
@@ -0,0 +1,1886 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/abt"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/dwarf"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "encoding/hex"
+ "fmt"
+ "internal/buildcfg"
+ "math/bits"
+ "sort"
+ "strings"
+)
+
+type SlotID int32
+type VarID int32
+
+// A FuncDebug contains all the debug information for the variables in a
+// function. Variables are identified by their LocalSlot, which may be
+// the result of decomposing a larger variable.
+type FuncDebug struct {
+ // Slots is all the slots used in the debug info, indexed by their SlotID.
+ Slots []LocalSlot
+ // The user variables, indexed by VarID.
+ Vars []*ir.Name
+ // The slots that make up each variable, indexed by VarID.
+ VarSlots [][]SlotID
+ // The location list data, indexed by VarID. Must be processed by PutLocationList.
+ LocationLists [][]byte
+ // Register-resident output parameters for the function. This is filled in at
+ // SSA generation time.
+ RegOutputParams []*ir.Name
+ // Variable declarations that were removed during optimization
+ OptDcl []*ir.Name
+
+ // Filled in by the user. Translates Block and Value ID to PC.
+ //
+ // NOTE: block is only used if value is BlockStart.ID or BlockEnd.ID.
+ // Otherwise, it is ignored.
+ GetPC func(block, value ID) int64
+}
+
+type BlockDebug struct {
+ // State at the start and end of the block. These are initialized,
+ // and updated from new information that flows on back edges.
+ startState, endState abt.T
+ // Use these to avoid excess work in the merge. If none of the
+ // predecessors has changed since the last check, the old answer is
+ // still good.
+ lastCheckedTime, lastChangedTime int32
+ // Whether the block had any changes to user variables at all.
+ relevant bool
+ // false until the block has been processed at least once. This
+ // affects how the merge is done; the goal is to maximize sharing
+ // and avoid allocation.
+ everProcessed bool
+}
+
+// A liveSlot is a slot that's live in loc at entry/exit of a block.
+type liveSlot struct {
+ VarLoc
+}
+
+func (ls *liveSlot) String() string {
+ return fmt.Sprintf("0x%x.%d.%d", ls.Registers, ls.stackOffsetValue(), int32(ls.StackOffset)&1)
+}
+
+func (ls liveSlot) absent() bool {
+ return ls.Registers == 0 && !ls.onStack()
+}
+
+// StackOffset encodes whether a value is on the stack and if so, where.
+// It is a 31-bit integer followed by a presence flag at the low-order
+// bit.
+type StackOffset int32
+
+func (s StackOffset) onStack() bool {
+ return s != 0
+}
+
+func (s StackOffset) stackOffsetValue() int32 {
+ return int32(s) >> 1
+}
+
+// stateAtPC is the current state of all variables at some point.
+type stateAtPC struct {
+ // The location of each known slot, indexed by SlotID.
+ slots []VarLoc
+ // The slots present in each register, indexed by register number.
+ registers [][]SlotID
+}
+
+// reset fills state with the live variables from live.
+func (state *stateAtPC) reset(live abt.T) {
+ slots, registers := state.slots, state.registers
+ for i := range slots {
+ slots[i] = VarLoc{}
+ }
+ for i := range registers {
+ registers[i] = registers[i][:0]
+ }
+ for it := live.Iterator(); !it.Done(); {
+ k, d := it.Next()
+ live := d.(*liveSlot)
+ slots[k] = live.VarLoc
+ if live.VarLoc.Registers == 0 {
+ continue
+ }
+
+ mask := uint64(live.VarLoc.Registers)
+ for {
+ if mask == 0 {
+ break
+ }
+ reg := uint8(bits.TrailingZeros64(mask))
+ mask &^= 1 << reg
+
+ registers[reg] = append(registers[reg], SlotID(k))
+ }
+ }
+ state.slots, state.registers = slots, registers
+}
+
+func (s *debugState) LocString(loc VarLoc) string {
+ if loc.absent() {
+ return "<nil>"
+ }
+
+ var storage []string
+ if loc.onStack() {
+ storage = append(storage, fmt.Sprintf("@%+d", loc.stackOffsetValue()))
+ }
+
+ mask := uint64(loc.Registers)
+ for {
+ if mask == 0 {
+ break
+ }
+ reg := uint8(bits.TrailingZeros64(mask))
+ mask &^= 1 << reg
+
+ storage = append(storage, s.registers[reg].String())
+ }
+ return strings.Join(storage, ",")
+}
+
+// A VarLoc describes the storage for part of a user variable.
+type VarLoc struct {
+ // The registers this variable is available in. There can be more than
+ // one in various situations, e.g. it's being moved between registers.
+ Registers RegisterSet
+
+ StackOffset
+}
+
+func (loc VarLoc) absent() bool {
+ return loc.Registers == 0 && !loc.onStack()
+}
+
+func (loc VarLoc) intersect(other VarLoc) VarLoc {
+ if !loc.onStack() || !other.onStack() || loc.StackOffset != other.StackOffset {
+ loc.StackOffset = 0
+ }
+ loc.Registers &= other.Registers
+ return loc
+}
+
+var BlockStart = &Value{
+ ID: -10000,
+ Op: OpInvalid,
+ Aux: StringToAux("BlockStart"),
+}
+
+var BlockEnd = &Value{
+ ID: -20000,
+ Op: OpInvalid,
+ Aux: StringToAux("BlockEnd"),
+}
+
+var FuncEnd = &Value{
+ ID: -30000,
+ Op: OpInvalid,
+ Aux: StringToAux("FuncEnd"),
+}
+
+// RegisterSet is a bitmap of registers, indexed by Register.num.
+type RegisterSet uint64
+
+// logf prints debug-specific logging to stdout (always stdout) if the
+// current function is tagged by GOSSAFUNC (for ssa output directed
+// either to stdout or html).
+func (s *debugState) logf(msg string, args ...interface{}) {
+ if s.f.PrintOrHtmlSSA {
+ fmt.Printf(msg, args...)
+ }
+}
+
+type debugState struct {
+ // See FuncDebug.
+ slots []LocalSlot
+ vars []*ir.Name
+ varSlots [][]SlotID
+ lists [][]byte
+
+ // The user variable that each slot rolls up to, indexed by SlotID.
+ slotVars []VarID
+
+ f *Func
+ loggingLevel int
+ convergeCount int // testing; iterate over block debug state this many times
+ registers []Register
+ stackOffset func(LocalSlot) int32
+ ctxt *obj.Link
+
+ // The names (slots) associated with each value, indexed by Value ID.
+ valueNames [][]SlotID
+
+ // The current state of whatever analysis is running.
+ currentState stateAtPC
+ changedVars *sparseSet
+ changedSlots *sparseSet
+
+ // The pending location list entry for each user variable, indexed by VarID.
+ pendingEntries []pendingEntry
+
+ varParts map[*ir.Name][]SlotID
+ blockDebug []BlockDebug
+ pendingSlotLocs []VarLoc
+ partsByVarOffset sort.Interface
+}
+
+func (state *debugState) initializeCache(f *Func, numVars, numSlots int) {
+ // One blockDebug per block. Initialized in allocBlock.
+ if cap(state.blockDebug) < f.NumBlocks() {
+ state.blockDebug = make([]BlockDebug, f.NumBlocks())
+ } else {
+ // This local variable, and the ones like it below, enable compiler
+ // optimizations. Don't inline them.
+ b := state.blockDebug[:f.NumBlocks()]
+ for i := range b {
+ b[i] = BlockDebug{}
+ }
+ }
+
+ // A list of slots per Value. Reuse the previous child slices.
+ if cap(state.valueNames) < f.NumValues() {
+ old := state.valueNames
+ state.valueNames = make([][]SlotID, f.NumValues())
+ copy(state.valueNames, old)
+ }
+ vn := state.valueNames[:f.NumValues()]
+ for i := range vn {
+ vn[i] = vn[i][:0]
+ }
+
+ // Slot and register contents for currentState. Cleared by reset().
+ if cap(state.currentState.slots) < numSlots {
+ state.currentState.slots = make([]VarLoc, numSlots)
+ } else {
+ state.currentState.slots = state.currentState.slots[:numSlots]
+ }
+ if cap(state.currentState.registers) < len(state.registers) {
+ state.currentState.registers = make([][]SlotID, len(state.registers))
+ } else {
+ state.currentState.registers = state.currentState.registers[:len(state.registers)]
+ }
+
+ // A relatively small slice, but used many times as the return from processValue.
+ state.changedVars = newSparseSet(numVars)
+ state.changedSlots = newSparseSet(numSlots)
+
+ // A pending entry per user variable, with space to track each of its pieces.
+ numPieces := 0
+ for i := range state.varSlots {
+ numPieces += len(state.varSlots[i])
+ }
+ if cap(state.pendingSlotLocs) < numPieces {
+ state.pendingSlotLocs = make([]VarLoc, numPieces)
+ } else {
+ psl := state.pendingSlotLocs[:numPieces]
+ for i := range psl {
+ psl[i] = VarLoc{}
+ }
+ }
+ if cap(state.pendingEntries) < numVars {
+ state.pendingEntries = make([]pendingEntry, numVars)
+ }
+ pe := state.pendingEntries[:numVars]
+ freePieceIdx := 0
+ for varID, slots := range state.varSlots {
+ pe[varID] = pendingEntry{
+ pieces: state.pendingSlotLocs[freePieceIdx : freePieceIdx+len(slots)],
+ }
+ freePieceIdx += len(slots)
+ }
+ state.pendingEntries = pe
+
+ if cap(state.lists) < numVars {
+ state.lists = make([][]byte, numVars)
+ } else {
+ state.lists = state.lists[:numVars]
+ for i := range state.lists {
+ state.lists[i] = nil
+ }
+ }
+}
+
+func (state *debugState) allocBlock(b *Block) *BlockDebug {
+ return &state.blockDebug[b.ID]
+}
+
+func (s *debugState) blockEndStateString(b *BlockDebug) string {
+ endState := stateAtPC{slots: make([]VarLoc, len(s.slots)), registers: make([][]SlotID, len(s.registers))}
+ endState.reset(b.endState)
+ return s.stateString(endState)
+}
+
+func (s *debugState) stateString(state stateAtPC) string {
+ var strs []string
+ for slotID, loc := range state.slots {
+ if !loc.absent() {
+ strs = append(strs, fmt.Sprintf("\t%v = %v\n", s.slots[slotID], s.LocString(loc)))
+ }
+ }
+
+ strs = append(strs, "\n")
+ for reg, slots := range state.registers {
+ if len(slots) != 0 {
+ var slotStrs []string
+ for _, slot := range slots {
+ slotStrs = append(slotStrs, s.slots[slot].String())
+ }
+ strs = append(strs, fmt.Sprintf("\t%v = %v\n", &s.registers[reg], slotStrs))
+ }
+ }
+
+ if len(strs) == 1 {
+ return "(no vars)\n"
+ }
+ return strings.Join(strs, "")
+}
+
+// slotCanonicalizer is a table used to lookup and canonicalize
+// LocalSlot's in a type insensitive way (e.g. taking into account the
+// base name, offset, and width of the slot, but ignoring the slot
+// type).
+type slotCanonicalizer struct {
+ slmap map[slotKey]SlKeyIdx
+ slkeys []LocalSlot
+}
+
+func newSlotCanonicalizer() *slotCanonicalizer {
+ return &slotCanonicalizer{
+ slmap: make(map[slotKey]SlKeyIdx),
+ slkeys: []LocalSlot{LocalSlot{N: nil}},
+ }
+}
+
+type SlKeyIdx uint32
+
+const noSlot = SlKeyIdx(0)
+
+// slotKey is a type-insensitive encapsulation of a LocalSlot; it
+// is used to key a map within slotCanonicalizer.
+type slotKey struct {
+ name *ir.Name
+ offset int64
+ width int64
+ splitOf SlKeyIdx // idx in slkeys slice in slotCanonicalizer
+ splitOffset int64
+}
+
+// lookup looks up a LocalSlot in the slot canonicalizer "sc", returning
+// a canonical index for the slot, and adding it to the table if need
+// be. Return value is the canonical slot index, and a boolean indicating
+// whether the slot was found in the table already (TRUE => found).
+func (sc *slotCanonicalizer) lookup(ls LocalSlot) (SlKeyIdx, bool) {
+ split := noSlot
+ if ls.SplitOf != nil {
+ split, _ = sc.lookup(*ls.SplitOf)
+ }
+ k := slotKey{
+ name: ls.N, offset: ls.Off, width: ls.Type.Size(),
+ splitOf: split, splitOffset: ls.SplitOffset,
+ }
+ if idx, ok := sc.slmap[k]; ok {
+ return idx, true
+ }
+ rv := SlKeyIdx(len(sc.slkeys))
+ sc.slkeys = append(sc.slkeys, ls)
+ sc.slmap[k] = rv
+ return rv, false
+}
+
+func (sc *slotCanonicalizer) canonSlot(idx SlKeyIdx) LocalSlot {
+ return sc.slkeys[idx]
+}
+
+// PopulateABIInRegArgOps examines the entry block of the function
+// and looks for incoming parameters that have missing or partial
+// OpArg{Int,Float}Reg values, inserting additional values in
+// cases where they are missing. Example:
+//
+// func foo(s string, used int, notused int) int {
+// return len(s) + used
+// }
+//
+// In the function above, the incoming parameter "used" is fully live,
+// "notused" is not live, and "s" is partially live (only the length
+// field of the string is used). At the point where debug value
+// analysis runs, we might expect to see an entry block with:
+//
+// b1:
+// v4 = ArgIntReg <uintptr> {s+8} [0] : BX
+// v5 = ArgIntReg <int> {used} [0] : CX
+//
+// While this is an accurate picture of the live incoming params,
+// we also want to have debug locations for non-live params (or
+// their non-live pieces), e.g. something like
+//
+// b1:
+// v9 = ArgIntReg <*uint8> {s+0} [0] : AX
+// v4 = ArgIntReg <uintptr> {s+8} [0] : BX
+// v5 = ArgIntReg <int> {used} [0] : CX
+// v10 = ArgIntReg <int> {unused} [0] : DI
+//
+// This function examines the live OpArg{Int,Float}Reg values and
+// synthesizes new (dead) values for the non-live params or the
+// non-live pieces of partially live params.
+func PopulateABIInRegArgOps(f *Func) {
+ pri := f.ABISelf.ABIAnalyzeFuncType(f.Type)
+
+ // When manufacturing new slots that correspond to splits of
+ // composite parameters, we want to avoid creating a new sub-slot
+ // that differs from some existing sub-slot only by type, since
+ // the debug location analysis will treat that slot as a separate
+ // entity. To achieve this, create a lookup table of existing
+ // slots that is type-insenstitive.
+ sc := newSlotCanonicalizer()
+ for _, sl := range f.Names {
+ sc.lookup(*sl)
+ }
+
+ // Add slot -> value entry to f.NamedValues if not already present.
+ addToNV := func(v *Value, sl LocalSlot) {
+ values, ok := f.NamedValues[sl]
+ if !ok {
+ // Haven't seen this slot yet.
+ sla := f.localSlotAddr(sl)
+ f.Names = append(f.Names, sla)
+ } else {
+ for _, ev := range values {
+ if v == ev {
+ return
+ }
+ }
+ }
+ values = append(values, v)
+ f.NamedValues[sl] = values
+ }
+
+ newValues := []*Value{}
+
+ abiRegIndexToRegister := func(reg abi.RegIndex) int8 {
+ i := f.ABISelf.FloatIndexFor(reg)
+ if i >= 0 { // float PR
+ return f.Config.floatParamRegs[i]
+ } else {
+ return f.Config.intParamRegs[reg]
+ }
+ }
+
+ // Helper to construct a new OpArg{Float,Int}Reg op value.
+ var pos src.XPos
+ if len(f.Entry.Values) != 0 {
+ pos = f.Entry.Values[0].Pos
+ }
+ synthesizeOpIntFloatArg := func(n *ir.Name, t *types.Type, reg abi.RegIndex, sl LocalSlot) *Value {
+ aux := &AuxNameOffset{n, sl.Off}
+ op, auxInt := ArgOpAndRegisterFor(reg, f.ABISelf)
+ v := f.newValueNoBlock(op, t, pos)
+ v.AuxInt = auxInt
+ v.Aux = aux
+ v.Args = nil
+ v.Block = f.Entry
+ newValues = append(newValues, v)
+ addToNV(v, sl)
+ f.setHome(v, &f.Config.registers[abiRegIndexToRegister(reg)])
+ return v
+ }
+
+ // Make a pass through the entry block looking for
+ // OpArg{Int,Float}Reg ops. Record the slots they use in a table
+ // ("sc"). We use a type-insensitive lookup for the slot table,
+ // since the type we get from the ABI analyzer won't always match
+ // what the compiler uses when creating OpArg{Int,Float}Reg ops.
+ for _, v := range f.Entry.Values {
+ if v.Op == OpArgIntReg || v.Op == OpArgFloatReg {
+ aux := v.Aux.(*AuxNameOffset)
+ sl := LocalSlot{N: aux.Name, Type: v.Type, Off: aux.Offset}
+ // install slot in lookup table
+ idx, _ := sc.lookup(sl)
+ // add to f.NamedValues if not already present
+ addToNV(v, sc.canonSlot(idx))
+ } else if v.Op.IsCall() {
+ // if we hit a call, we've gone too far.
+ break
+ }
+ }
+
+ // Now make a pass through the ABI in-params, looking for params
+ // or pieces of params that we didn't encounter in the loop above.
+ for _, inp := range pri.InParams() {
+ if !isNamedRegParam(inp) {
+ continue
+ }
+ n := inp.Name
+
+ // Param is spread across one or more registers. Walk through
+ // each piece to see whether we've seen an arg reg op for it.
+ types, offsets := inp.RegisterTypesAndOffsets()
+ for k, t := range types {
+ // Note: this recipe for creating a LocalSlot is designed
+ // to be compatible with the one used in expand_calls.go
+ // as opposed to decompose.go. The expand calls code just
+ // takes the base name and creates an offset into it,
+ // without using the SplitOf/SplitOffset fields. The code
+ // in decompose.go does the opposite -- it creates a
+ // LocalSlot object with "Off" set to zero, but with
+ // SplitOf pointing to a parent slot, and SplitOffset
+ // holding the offset into the parent object.
+ pieceSlot := LocalSlot{N: n, Type: t, Off: offsets[k]}
+
+ // Look up this piece to see if we've seen a reg op
+ // for it. If not, create one.
+ _, found := sc.lookup(pieceSlot)
+ if !found {
+ // This slot doesn't appear in the map, meaning it
+ // corresponds to an in-param that is not live, or
+ // a portion of an in-param that is not live/used.
+ // Add a new dummy OpArg{Int,Float}Reg for it.
+ synthesizeOpIntFloatArg(n, t, inp.Registers[k],
+ pieceSlot)
+ }
+ }
+ }
+
+ // Insert the new values into the head of the block.
+ f.Entry.Values = append(newValues, f.Entry.Values...)
+}
+
+// BuildFuncDebug debug information for f, placing the results
+// in "rval". f must be fully processed, so that each Value is where it
+// will be when machine code is emitted.
+func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingLevel int, stackOffset func(LocalSlot) int32, rval *FuncDebug) {
+ if f.RegAlloc == nil {
+ f.Fatalf("BuildFuncDebug on func %v that has not been fully processed", f)
+ }
+ state := &f.Cache.debugState
+ state.loggingLevel = loggingLevel % 1000
+
+ // A specific number demands exactly that many iterations. Under
+ // particular circumstances it make require more than the total of
+ // 2 passes implied by a single run through liveness and a single
+ // run through location list generation.
+ state.convergeCount = loggingLevel / 1000
+ state.f = f
+ state.registers = f.Config.registers
+ state.stackOffset = stackOffset
+ state.ctxt = ctxt
+
+ if buildcfg.Experiment.RegabiArgs {
+ PopulateABIInRegArgOps(f)
+ }
+
+ if state.loggingLevel > 0 {
+ state.logf("Generating location lists for function %q\n", f.Name)
+ }
+
+ if state.varParts == nil {
+ state.varParts = make(map[*ir.Name][]SlotID)
+ } else {
+ for n := range state.varParts {
+ delete(state.varParts, n)
+ }
+ }
+
+ // Recompose any decomposed variables, and establish the canonical
+ // IDs for each var and slot by filling out state.vars and state.slots.
+
+ state.slots = state.slots[:0]
+ state.vars = state.vars[:0]
+ for i, slot := range f.Names {
+ state.slots = append(state.slots, *slot)
+ if ir.IsSynthetic(slot.N) {
+ continue
+ }
+
+ topSlot := slot
+ for topSlot.SplitOf != nil {
+ topSlot = topSlot.SplitOf
+ }
+ if _, ok := state.varParts[topSlot.N]; !ok {
+ state.vars = append(state.vars, topSlot.N)
+ }
+ state.varParts[topSlot.N] = append(state.varParts[topSlot.N], SlotID(i))
+ }
+
+ // Recreate the LocalSlot for each stack-only variable.
+ // This would probably be better as an output from stackframe.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == OpVarDef {
+ n := v.Aux.(*ir.Name)
+ if ir.IsSynthetic(n) {
+ continue
+ }
+
+ if _, ok := state.varParts[n]; !ok {
+ slot := LocalSlot{N: n, Type: v.Type, Off: 0}
+ state.slots = append(state.slots, slot)
+ state.varParts[n] = []SlotID{SlotID(len(state.slots) - 1)}
+ state.vars = append(state.vars, n)
+ }
+ }
+ }
+ }
+
+ // Fill in the var<->slot mappings.
+ if cap(state.varSlots) < len(state.vars) {
+ state.varSlots = make([][]SlotID, len(state.vars))
+ } else {
+ state.varSlots = state.varSlots[:len(state.vars)]
+ for i := range state.varSlots {
+ state.varSlots[i] = state.varSlots[i][:0]
+ }
+ }
+ if cap(state.slotVars) < len(state.slots) {
+ state.slotVars = make([]VarID, len(state.slots))
+ } else {
+ state.slotVars = state.slotVars[:len(state.slots)]
+ }
+
+ if state.partsByVarOffset == nil {
+ state.partsByVarOffset = &partsByVarOffset{}
+ }
+ for varID, n := range state.vars {
+ parts := state.varParts[n]
+ state.varSlots[varID] = parts
+ for _, slotID := range parts {
+ state.slotVars[slotID] = VarID(varID)
+ }
+ *state.partsByVarOffset.(*partsByVarOffset) = partsByVarOffset{parts, state.slots}
+ sort.Sort(state.partsByVarOffset)
+ }
+
+ state.initializeCache(f, len(state.varParts), len(state.slots))
+
+ for i, slot := range f.Names {
+ if ir.IsSynthetic(slot.N) {
+ continue
+ }
+ for _, value := range f.NamedValues[*slot] {
+ state.valueNames[value.ID] = append(state.valueNames[value.ID], SlotID(i))
+ }
+ }
+
+ blockLocs := state.liveness()
+ state.buildLocationLists(blockLocs)
+
+ // Populate "rval" with what we've computed.
+ rval.Slots = state.slots
+ rval.VarSlots = state.varSlots
+ rval.Vars = state.vars
+ rval.LocationLists = state.lists
+}
+
+// liveness walks the function in control flow order, calculating the start
+// and end state of each block.
+func (state *debugState) liveness() []*BlockDebug {
+ blockLocs := make([]*BlockDebug, state.f.NumBlocks())
+ counterTime := int32(1)
+
+ // Reverse postorder: visit a block after as many as possible of its
+ // predecessors have been visited.
+ po := state.f.Postorder()
+ converged := false
+
+ // The iteration rule is that by default, run until converged, but
+ // if a particular iteration count is specified, run that many
+ // iterations, no more, no less. A count is specified as the
+ // thousands digit of the location lists debug flag,
+ // e.g. -d=locationlists=4000
+ keepGoing := func(k int) bool {
+ if state.convergeCount == 0 {
+ return !converged
+ }
+ return k < state.convergeCount
+ }
+ for k := 0; keepGoing(k); k++ {
+ if state.loggingLevel > 0 {
+ state.logf("Liveness pass %d\n", k)
+ }
+ converged = true
+ for i := len(po) - 1; i >= 0; i-- {
+ b := po[i]
+ locs := blockLocs[b.ID]
+ if locs == nil {
+ locs = state.allocBlock(b)
+ blockLocs[b.ID] = locs
+ }
+
+ // Build the starting state for the block from the final
+ // state of its predecessors.
+ startState, blockChanged := state.mergePredecessors(b, blockLocs, nil, false)
+ locs.lastCheckedTime = counterTime
+ counterTime++
+ if state.loggingLevel > 1 {
+ state.logf("Processing %v, block changed %v, initial state:\n%v", b, blockChanged, state.stateString(state.currentState))
+ }
+
+ if blockChanged {
+ // If the start did not change, then the old endState is good
+ converged = false
+ changed := false
+ state.changedSlots.clear()
+
+ // Update locs/registers with the effects of each Value.
+ for _, v := range b.Values {
+ slots := state.valueNames[v.ID]
+
+ // Loads and stores inherit the names of their sources.
+ var source *Value
+ switch v.Op {
+ case OpStoreReg:
+ source = v.Args[0]
+ case OpLoadReg:
+ switch a := v.Args[0]; a.Op {
+ case OpArg, OpPhi:
+ source = a
+ case OpStoreReg:
+ source = a.Args[0]
+ default:
+ if state.loggingLevel > 1 {
+ state.logf("at %v: load with unexpected source op: %v (%v)\n", v, a.Op, a)
+ }
+ }
+ }
+ // Update valueNames with the source so that later steps
+ // don't need special handling.
+ if source != nil && k == 0 {
+ // limit to k == 0 otherwise there are duplicates.
+ slots = append(slots, state.valueNames[source.ID]...)
+ state.valueNames[v.ID] = slots
+ }
+
+ reg, _ := state.f.getHome(v.ID).(*Register)
+ c := state.processValue(v, slots, reg)
+ changed = changed || c
+ }
+
+ if state.loggingLevel > 1 {
+ state.logf("Block %v done, locs:\n%v", b, state.stateString(state.currentState))
+ }
+
+ locs.relevant = locs.relevant || changed
+ if !changed {
+ locs.endState = startState
+ } else {
+ for _, id := range state.changedSlots.contents() {
+ slotID := SlotID(id)
+ slotLoc := state.currentState.slots[slotID]
+ if slotLoc.absent() {
+ startState.Delete(int32(slotID))
+ continue
+ }
+ old := startState.Find(int32(slotID)) // do NOT replace existing values
+ if oldLS, ok := old.(*liveSlot); !ok || oldLS.VarLoc != slotLoc {
+ startState.Insert(int32(slotID),
+ &liveSlot{VarLoc: slotLoc})
+ }
+ }
+ locs.endState = startState
+ }
+ locs.lastChangedTime = counterTime
+ }
+ counterTime++
+ }
+ }
+ return blockLocs
+}
+
+// mergePredecessors takes the end state of each of b's predecessors and
+// intersects them to form the starting state for b. It puts that state
+// in blockLocs[b.ID].startState, and fills state.currentState with it.
+// It returns the start state and whether this is changed from the
+// previously approximated value of startState for this block. After
+// the first call, subsequent calls can only shrink startState.
+//
+// Passing forLocationLists=true enables additional side-effects that
+// are necessary for building location lists but superfluous while still
+// iterating to an answer.
+//
+// If previousBlock is non-nil, it registers changes vs. that block's
+// end state in state.changedVars. Note that previousBlock will often
+// not be a predecessor.
+//
+// Note that mergePredecessors behaves slightly differently between
+// first and subsequent calls for a block. For the first call, the
+// starting state is approximated by taking the state from the
+// predecessor whose state is smallest, and removing any elements not
+// in all the other predecessors; this makes the smallest number of
+// changes and shares the most state. On subsequent calls the old
+// value of startState is adjusted with new information; this is judged
+// to do the least amount of extra work.
+//
+// To improve performance, each block's state information is marked with
+// lastChanged and lastChecked "times" so unchanged predecessors can be
+// skipped on after-the-first iterations. Doing this allows extra
+// iterations by the caller to be almost free.
+//
+// It is important to know that the set representation used for
+// startState, endState, and merges can share data for two sets where
+// one is a small delta from the other. Doing this does require a
+// little care in how sets are updated, both in mergePredecessors, and
+// using its result.
+func (state *debugState) mergePredecessors(b *Block, blockLocs []*BlockDebug, previousBlock *Block, forLocationLists bool) (abt.T, bool) {
+ // Filter out back branches.
+ var predsBuf [10]*Block
+
+ preds := predsBuf[:0]
+ locs := blockLocs[b.ID]
+
+ blockChanged := !locs.everProcessed // the first time it always changes.
+ updating := locs.everProcessed
+
+ // For the first merge, exclude predecessors that have not been seen yet.
+ // I.e., backedges.
+ for _, pred := range b.Preds {
+ if bl := blockLocs[pred.b.ID]; bl != nil && bl.everProcessed {
+ // crucially, a self-edge has bl != nil, but bl.everProcessed is false the first time.
+ preds = append(preds, pred.b)
+ }
+ }
+
+ locs.everProcessed = true
+
+ if state.loggingLevel > 1 {
+ // The logf below would cause preds to be heap-allocated if
+ // it were passed directly.
+ preds2 := make([]*Block, len(preds))
+ copy(preds2, preds)
+ state.logf("Merging %v into %v (changed=%d, checked=%d)\n", preds2, b, locs.lastChangedTime, locs.lastCheckedTime)
+ }
+
+ state.changedVars.clear()
+
+ markChangedVars := func(slots, merged abt.T) {
+ if !forLocationLists {
+ return
+ }
+ // Fill changedVars with those that differ between the previous
+ // block (in the emit order, not necessarily a flow predecessor)
+ // and the start state for this block.
+ for it := slots.Iterator(); !it.Done(); {
+ k, v := it.Next()
+ m := merged.Find(k)
+ if m == nil || v.(*liveSlot).VarLoc != m.(*liveSlot).VarLoc {
+ state.changedVars.add(ID(state.slotVars[k]))
+ }
+ }
+ }
+
+ reset := func(ourStartState abt.T) {
+ if !(forLocationLists || blockChanged) {
+ // there is no change and this is not for location lists, do
+ // not bother to reset currentState because it will not be
+ // examined.
+ return
+ }
+ state.currentState.reset(ourStartState)
+ }
+
+ // Zero predecessors
+ if len(preds) == 0 {
+ if previousBlock != nil {
+ state.f.Fatalf("Function %v, block %s with no predecessors is not first block, has previous %s", state.f, b.String(), previousBlock.String())
+ }
+ // startState is empty
+ reset(abt.T{})
+ return abt.T{}, blockChanged
+ }
+
+ // One predecessor
+ l0 := blockLocs[preds[0].ID]
+ p0 := l0.endState
+ if len(preds) == 1 {
+ if previousBlock != nil && preds[0].ID != previousBlock.ID {
+ // Change from previous block is its endState minus the predecessor's endState
+ markChangedVars(blockLocs[previousBlock.ID].endState, p0)
+ }
+ locs.startState = p0
+ blockChanged = blockChanged || l0.lastChangedTime > locs.lastCheckedTime
+ reset(p0)
+ return p0, blockChanged
+ }
+
+ // More than one predecessor
+
+ if updating {
+ // After the first approximation, i.e., when updating, results
+ // can only get smaller, because initially backedge
+ // predecessors do not participate in the intersection. This
+ // means that for the update, given the prior approximation of
+ // startState, there is no need to re-intersect with unchanged
+ // blocks. Therefore remove unchanged blocks from the
+ // predecessor list.
+ for i := len(preds) - 1; i >= 0; i-- {
+ pred := preds[i]
+ if blockLocs[pred.ID].lastChangedTime > locs.lastCheckedTime {
+ continue // keep this predecessor
+ }
+ preds[i] = preds[len(preds)-1]
+ preds = preds[:len(preds)-1]
+ if state.loggingLevel > 2 {
+ state.logf("Pruned b%d, lastChanged was %d but b%d lastChecked is %d\n", pred.ID, blockLocs[pred.ID].lastChangedTime, b.ID, locs.lastCheckedTime)
+ }
+ }
+ // Check for an early out; this should always hit for the update
+ // if there are no cycles.
+ if len(preds) == 0 {
+ blockChanged = false
+
+ reset(locs.startState)
+ if state.loggingLevel > 2 {
+ state.logf("Early out, no predecessors changed since last check\n")
+ }
+ if previousBlock != nil {
+ markChangedVars(blockLocs[previousBlock.ID].endState, locs.startState)
+ }
+ return locs.startState, blockChanged
+ }
+ }
+
+ baseID := preds[0].ID
+ baseState := p0
+
+ // Choose the predecessor with the smallest endState for intersection work
+ for _, pred := range preds[1:] {
+ if blockLocs[pred.ID].endState.Size() < baseState.Size() {
+ baseState = blockLocs[pred.ID].endState
+ baseID = pred.ID
+ }
+ }
+
+ if state.loggingLevel > 2 {
+ state.logf("Starting %v with state from b%v:\n%v", b, baseID, state.blockEndStateString(blockLocs[baseID]))
+ for _, pred := range preds {
+ if pred.ID == baseID {
+ continue
+ }
+ state.logf("Merging in state from %v:\n%v", pred, state.blockEndStateString(blockLocs[pred.ID]))
+ }
+ }
+
+ state.currentState.reset(abt.T{})
+ // The normal logic of "reset" is included in the intersection loop below.
+
+ slotLocs := state.currentState.slots
+
+ // If this is the first call, do updates on the "baseState"; if this
+ // is a subsequent call, tweak the startState instead. Note that
+ // these "set" values are values; there are no side effects to
+ // other values as these are modified.
+ newState := baseState
+ if updating {
+ newState = blockLocs[b.ID].startState
+ }
+
+ for it := newState.Iterator(); !it.Done(); {
+ k, d := it.Next()
+ thisSlot := d.(*liveSlot)
+ x := thisSlot.VarLoc
+ x0 := x // initial value in newState
+
+ // Intersect this slot with the slot in all the predecessors
+ for _, other := range preds {
+ if !updating && other.ID == baseID {
+ continue
+ }
+ otherSlot := blockLocs[other.ID].endState.Find(k)
+ if otherSlot == nil {
+ x = VarLoc{}
+ break
+ }
+ y := otherSlot.(*liveSlot).VarLoc
+ x = x.intersect(y)
+ if x.absent() {
+ x = VarLoc{}
+ break
+ }
+ }
+
+ // Delete if necessary, but not otherwise (in order to maximize sharing).
+ if x.absent() {
+ if !x0.absent() {
+ blockChanged = true
+ newState.Delete(k)
+ }
+ slotLocs[k] = VarLoc{}
+ continue
+ }
+ if x != x0 {
+ blockChanged = true
+ newState.Insert(k, &liveSlot{VarLoc: x})
+ }
+
+ slotLocs[k] = x
+ mask := uint64(x.Registers)
+ for {
+ if mask == 0 {
+ break
+ }
+ reg := uint8(bits.TrailingZeros64(mask))
+ mask &^= 1 << reg
+ state.currentState.registers[reg] = append(state.currentState.registers[reg], SlotID(k))
+ }
+ }
+
+ if previousBlock != nil {
+ markChangedVars(blockLocs[previousBlock.ID].endState, newState)
+ }
+ locs.startState = newState
+ return newState, blockChanged
+}
+
+// processValue updates locs and state.registerContents to reflect v, a
+// value with the names in vSlots and homed in vReg. "v" becomes
+// visible after execution of the instructions evaluating it. It
+// returns which VarIDs were modified by the Value's execution.
+func (state *debugState) processValue(v *Value, vSlots []SlotID, vReg *Register) bool {
+ locs := state.currentState
+ changed := false
+ setSlot := func(slot SlotID, loc VarLoc) {
+ changed = true
+ state.changedVars.add(ID(state.slotVars[slot]))
+ state.changedSlots.add(ID(slot))
+ state.currentState.slots[slot] = loc
+ }
+
+ // Handle any register clobbering. Call operations, for example,
+ // clobber all registers even though they don't explicitly write to
+ // them.
+ clobbers := uint64(opcodeTable[v.Op].reg.clobbers)
+ for {
+ if clobbers == 0 {
+ break
+ }
+ reg := uint8(bits.TrailingZeros64(clobbers))
+ clobbers &^= 1 << reg
+
+ for _, slot := range locs.registers[reg] {
+ if state.loggingLevel > 1 {
+ state.logf("at %v: %v clobbered out of %v\n", v, state.slots[slot], &state.registers[reg])
+ }
+
+ last := locs.slots[slot]
+ if last.absent() {
+ state.f.Fatalf("at %v: slot %v in register %v with no location entry", v, state.slots[slot], &state.registers[reg])
+ continue
+ }
+ regs := last.Registers &^ (1 << reg)
+ setSlot(slot, VarLoc{regs, last.StackOffset})
+ }
+
+ locs.registers[reg] = locs.registers[reg][:0]
+ }
+
+ switch {
+ case v.Op == OpVarDef:
+ n := v.Aux.(*ir.Name)
+ if ir.IsSynthetic(n) {
+ break
+ }
+
+ slotID := state.varParts[n][0]
+ var stackOffset StackOffset
+ if v.Op == OpVarDef {
+ stackOffset = StackOffset(state.stackOffset(state.slots[slotID])<<1 | 1)
+ }
+ setSlot(slotID, VarLoc{0, stackOffset})
+ if state.loggingLevel > 1 {
+ if v.Op == OpVarDef {
+ state.logf("at %v: stack-only var %v now live\n", v, state.slots[slotID])
+ } else {
+ state.logf("at %v: stack-only var %v now dead\n", v, state.slots[slotID])
+ }
+ }
+
+ case v.Op == OpArg:
+ home := state.f.getHome(v.ID).(LocalSlot)
+ stackOffset := state.stackOffset(home)<<1 | 1
+ for _, slot := range vSlots {
+ if state.loggingLevel > 1 {
+ state.logf("at %v: arg %v now on stack in location %v\n", v, state.slots[slot], home)
+ if last := locs.slots[slot]; !last.absent() {
+ state.logf("at %v: unexpected arg op on already-live slot %v\n", v, state.slots[slot])
+ }
+ }
+
+ setSlot(slot, VarLoc{0, StackOffset(stackOffset)})
+ }
+
+ case v.Op == OpStoreReg:
+ home := state.f.getHome(v.ID).(LocalSlot)
+ stackOffset := state.stackOffset(home)<<1 | 1
+ for _, slot := range vSlots {
+ last := locs.slots[slot]
+ if last.absent() {
+ if state.loggingLevel > 1 {
+ state.logf("at %v: unexpected spill of unnamed register %s\n", v, vReg)
+ }
+ break
+ }
+
+ setSlot(slot, VarLoc{last.Registers, StackOffset(stackOffset)})
+ if state.loggingLevel > 1 {
+ state.logf("at %v: %v spilled to stack location %v@%d\n", v, state.slots[slot], home, state.stackOffset(home))
+ }
+ }
+
+ case vReg != nil:
+ if state.loggingLevel > 1 {
+ newSlots := make([]bool, len(state.slots))
+ for _, slot := range vSlots {
+ newSlots[slot] = true
+ }
+
+ for _, slot := range locs.registers[vReg.num] {
+ if !newSlots[slot] {
+ state.logf("at %v: overwrote %v in register %v\n", v, state.slots[slot], vReg)
+ }
+ }
+ }
+
+ for _, slot := range locs.registers[vReg.num] {
+ last := locs.slots[slot]
+ setSlot(slot, VarLoc{last.Registers &^ (1 << uint8(vReg.num)), last.StackOffset})
+ }
+ locs.registers[vReg.num] = locs.registers[vReg.num][:0]
+ locs.registers[vReg.num] = append(locs.registers[vReg.num], vSlots...)
+ for _, slot := range vSlots {
+ if state.loggingLevel > 1 {
+ state.logf("at %v: %v now in %s\n", v, state.slots[slot], vReg)
+ }
+
+ last := locs.slots[slot]
+ setSlot(slot, VarLoc{1<<uint8(vReg.num) | last.Registers, last.StackOffset})
+ }
+ }
+ return changed
+}
+
+// varOffset returns the offset of slot within the user variable it was
+// decomposed from. This has nothing to do with its stack offset.
+func varOffset(slot LocalSlot) int64 {
+ offset := slot.Off
+ s := &slot
+ for ; s.SplitOf != nil; s = s.SplitOf {
+ offset += s.SplitOffset
+ }
+ return offset
+}
+
+type partsByVarOffset struct {
+ slotIDs []SlotID
+ slots []LocalSlot
+}
+
+func (a partsByVarOffset) Len() int { return len(a.slotIDs) }
+func (a partsByVarOffset) Less(i, j int) bool {
+ return varOffset(a.slots[a.slotIDs[i]]) < varOffset(a.slots[a.slotIDs[j]])
+}
+func (a partsByVarOffset) Swap(i, j int) { a.slotIDs[i], a.slotIDs[j] = a.slotIDs[j], a.slotIDs[i] }
+
+// A pendingEntry represents the beginning of a location list entry, missing
+// only its end coordinate.
+type pendingEntry struct {
+ present bool
+ startBlock, startValue ID
+ // The location of each piece of the variable, in the same order as the
+ // SlotIDs in varParts.
+ pieces []VarLoc
+}
+
+func (e *pendingEntry) clear() {
+ e.present = false
+ e.startBlock = 0
+ e.startValue = 0
+ for i := range e.pieces {
+ e.pieces[i] = VarLoc{}
+ }
+}
+
+// canMerge reports whether a new location description is a superset
+// of the (non-empty) pending location description, if so, the two
+// can be merged (i.e., pending is still a valid and useful location
+// description).
+func canMerge(pending, new VarLoc) bool {
+ if pending.absent() && new.absent() {
+ return true
+ }
+ if pending.absent() || new.absent() {
+ return false
+ }
+ // pending is not absent, therefore it has either a stack mapping,
+ // or registers, or both.
+ if pending.onStack() && pending.StackOffset != new.StackOffset {
+ // if pending has a stack offset, then new must also, and it
+ // must be the same (StackOffset encodes onStack).
+ return false
+ }
+ if pending.Registers&new.Registers != pending.Registers {
+ // There is at least one register in pending not mentioned in new.
+ return false
+ }
+ return true
+}
+
+// firstReg returns the first register in set that is present.
+func firstReg(set RegisterSet) uint8 {
+ if set == 0 {
+ // This is wrong, but there seem to be some situations where we
+ // produce locations with no storage.
+ return 0
+ }
+ return uint8(bits.TrailingZeros64(uint64(set)))
+}
+
+// buildLocationLists builds location lists for all the user variables
+// in state.f, using the information about block state in blockLocs.
+// The returned location lists are not fully complete. They are in
+// terms of SSA values rather than PCs, and have no base address/end
+// entries. They will be finished by PutLocationList.
+func (state *debugState) buildLocationLists(blockLocs []*BlockDebug) {
+ // Run through the function in program text order, building up location
+ // lists as we go. The heavy lifting has mostly already been done.
+
+ var prevBlock *Block
+ for _, b := range state.f.Blocks {
+ state.mergePredecessors(b, blockLocs, prevBlock, true)
+
+ // Handle any differences among predecessor blocks and previous block (perhaps not a predecessor)
+ for _, varID := range state.changedVars.contents() {
+ state.updateVar(VarID(varID), b, BlockStart)
+ }
+ state.changedVars.clear()
+
+ if !blockLocs[b.ID].relevant {
+ continue
+ }
+
+ mustBeFirst := func(v *Value) bool {
+ return v.Op == OpPhi || v.Op.isLoweredGetClosurePtr() ||
+ v.Op == OpArgIntReg || v.Op == OpArgFloatReg
+ }
+
+ blockPrologComplete := func(v *Value) bool {
+ if b.ID != state.f.Entry.ID {
+ return !opcodeTable[v.Op].zeroWidth
+ } else {
+ return v.Op == OpInitMem
+ }
+ }
+
+ // Examine the prolog portion of the block to process special
+ // zero-width ops such as Arg, Phi, LoweredGetClosurePtr (etc)
+ // whose lifetimes begin at the block starting point. In an
+ // entry block, allow for the possibility that we may see Arg
+ // ops that appear _after_ other non-zero-width operations.
+ // Example:
+ //
+ // v33 = ArgIntReg <uintptr> {foo+0} [0] : AX (foo)
+ // v34 = ArgIntReg <uintptr> {bar+0} [0] : BX (bar)
+ // ...
+ // v77 = StoreReg <unsafe.Pointer> v67 : ctx+8[unsafe.Pointer]
+ // v78 = StoreReg <unsafe.Pointer> v68 : ctx[unsafe.Pointer]
+ // v79 = Arg <*uint8> {args} : args[*uint8] (args[*uint8])
+ // v80 = Arg <int> {args} [8] : args+8[int] (args+8[int])
+ // ...
+ // v1 = InitMem <mem>
+ //
+ // We can stop scanning the initial portion of the block when
+ // we either see the InitMem op (for entry blocks) or the
+ // first non-zero-width op (for other blocks).
+ for idx := 0; idx < len(b.Values); idx++ {
+ v := b.Values[idx]
+ if blockPrologComplete(v) {
+ break
+ }
+ // Consider only "lifetime begins at block start" ops.
+ if !mustBeFirst(v) && v.Op != OpArg {
+ continue
+ }
+ slots := state.valueNames[v.ID]
+ reg, _ := state.f.getHome(v.ID).(*Register)
+ changed := state.processValue(v, slots, reg) // changed == added to state.changedVars
+ if changed {
+ for _, varID := range state.changedVars.contents() {
+ state.updateVar(VarID(varID), v.Block, BlockStart)
+ }
+ state.changedVars.clear()
+ }
+ }
+
+ // Now examine the block again, handling things other than the
+ // "begins at block start" lifetimes.
+ zeroWidthPending := false
+ prologComplete := false
+ // expect to see values in pattern (apc)* (zerowidth|real)*
+ for _, v := range b.Values {
+ if blockPrologComplete(v) {
+ prologComplete = true
+ }
+ slots := state.valueNames[v.ID]
+ reg, _ := state.f.getHome(v.ID).(*Register)
+ changed := state.processValue(v, slots, reg) // changed == added to state.changedVars
+
+ if opcodeTable[v.Op].zeroWidth {
+ if prologComplete && mustBeFirst(v) {
+ panic(fmt.Errorf("Unexpected placement of op '%s' appearing after non-pseudo-op at beginning of block %s in %s\n%s", v.LongString(), b, b.Func.Name, b.Func))
+ }
+ if changed {
+ if mustBeFirst(v) || v.Op == OpArg {
+ // already taken care of above
+ continue
+ }
+ zeroWidthPending = true
+ }
+ continue
+ }
+ if !changed && !zeroWidthPending {
+ continue
+ }
+
+ // Not zero-width; i.e., a "real" instruction.
+ zeroWidthPending = false
+ for _, varID := range state.changedVars.contents() {
+ state.updateVar(VarID(varID), v.Block, v)
+ }
+ state.changedVars.clear()
+ }
+ for _, varID := range state.changedVars.contents() {
+ state.updateVar(VarID(varID), b, BlockEnd)
+ }
+
+ prevBlock = b
+ }
+
+ if state.loggingLevel > 0 {
+ state.logf("location lists:\n")
+ }
+
+ // Flush any leftover entries live at the end of the last block.
+ for varID := range state.lists {
+ state.writePendingEntry(VarID(varID), -1, FuncEnd.ID)
+ list := state.lists[varID]
+ if state.loggingLevel > 0 {
+ if len(list) == 0 {
+ state.logf("\t%v : empty list\n", state.vars[varID])
+ } else {
+ state.logf("\t%v : %q\n", state.vars[varID], hex.EncodeToString(state.lists[varID]))
+ }
+ }
+ }
+}
+
+// updateVar updates the pending location list entry for varID to
+// reflect the new locations in curLoc, beginning at v in block b.
+// v may be one of the special values indicating block start or end.
+func (state *debugState) updateVar(varID VarID, b *Block, v *Value) {
+ curLoc := state.currentState.slots
+ // Assemble the location list entry with whatever's live.
+ empty := true
+ for _, slotID := range state.varSlots[varID] {
+ if !curLoc[slotID].absent() {
+ empty = false
+ break
+ }
+ }
+ pending := &state.pendingEntries[varID]
+ if empty {
+ state.writePendingEntry(varID, b.ID, v.ID)
+ pending.clear()
+ return
+ }
+
+ // Extend the previous entry if possible.
+ if pending.present {
+ merge := true
+ for i, slotID := range state.varSlots[varID] {
+ if !canMerge(pending.pieces[i], curLoc[slotID]) {
+ merge = false
+ break
+ }
+ }
+ if merge {
+ return
+ }
+ }
+
+ state.writePendingEntry(varID, b.ID, v.ID)
+ pending.present = true
+ pending.startBlock = b.ID
+ pending.startValue = v.ID
+ for i, slot := range state.varSlots[varID] {
+ pending.pieces[i] = curLoc[slot]
+ }
+}
+
+// writePendingEntry writes out the pending entry for varID, if any,
+// terminated at endBlock/Value.
+func (state *debugState) writePendingEntry(varID VarID, endBlock, endValue ID) {
+ pending := state.pendingEntries[varID]
+ if !pending.present {
+ return
+ }
+
+ // Pack the start/end coordinates into the start/end addresses
+ // of the entry, for decoding by PutLocationList.
+ start, startOK := encodeValue(state.ctxt, pending.startBlock, pending.startValue)
+ end, endOK := encodeValue(state.ctxt, endBlock, endValue)
+ if !startOK || !endOK {
+ // If someone writes a function that uses >65K values,
+ // they get incomplete debug info on 32-bit platforms.
+ return
+ }
+ if start == end {
+ if state.loggingLevel > 1 {
+ // Printf not logf so not gated by GOSSAFUNC; this should fire very rarely.
+ // TODO this fires a lot, need to figure out why.
+ state.logf("Skipping empty location list for %v in %s\n", state.vars[varID], state.f.Name)
+ }
+ return
+ }
+
+ list := state.lists[varID]
+ list = appendPtr(state.ctxt, list, start)
+ list = appendPtr(state.ctxt, list, end)
+ // Where to write the length of the location description once
+ // we know how big it is.
+ sizeIdx := len(list)
+ list = list[:len(list)+2]
+
+ if state.loggingLevel > 1 {
+ var partStrs []string
+ for i, slot := range state.varSlots[varID] {
+ partStrs = append(partStrs, fmt.Sprintf("%v@%v", state.slots[slot], state.LocString(pending.pieces[i])))
+ }
+ state.logf("Add entry for %v: \tb%vv%v-b%vv%v = \t%v\n", state.vars[varID], pending.startBlock, pending.startValue, endBlock, endValue, strings.Join(partStrs, " "))
+ }
+
+ for i, slotID := range state.varSlots[varID] {
+ loc := pending.pieces[i]
+ slot := state.slots[slotID]
+
+ if !loc.absent() {
+ if loc.onStack() {
+ if loc.stackOffsetValue() == 0 {
+ list = append(list, dwarf.DW_OP_call_frame_cfa)
+ } else {
+ list = append(list, dwarf.DW_OP_fbreg)
+ list = dwarf.AppendSleb128(list, int64(loc.stackOffsetValue()))
+ }
+ } else {
+ regnum := state.ctxt.Arch.DWARFRegisters[state.registers[firstReg(loc.Registers)].ObjNum()]
+ if regnum < 32 {
+ list = append(list, dwarf.DW_OP_reg0+byte(regnum))
+ } else {
+ list = append(list, dwarf.DW_OP_regx)
+ list = dwarf.AppendUleb128(list, uint64(regnum))
+ }
+ }
+ }
+
+ if len(state.varSlots[varID]) > 1 {
+ list = append(list, dwarf.DW_OP_piece)
+ list = dwarf.AppendUleb128(list, uint64(slot.Type.Size()))
+ }
+ }
+ state.ctxt.Arch.ByteOrder.PutUint16(list[sizeIdx:], uint16(len(list)-sizeIdx-2))
+ state.lists[varID] = list
+}
+
+// PutLocationList adds list (a location list in its intermediate representation) to listSym.
+func (debugInfo *FuncDebug) PutLocationList(list []byte, ctxt *obj.Link, listSym, startPC *obj.LSym) {
+ getPC := debugInfo.GetPC
+
+ if ctxt.UseBASEntries {
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, ^0)
+ listSym.WriteAddr(ctxt, listSym.Size, ctxt.Arch.PtrSize, startPC, 0)
+ }
+
+ // Re-read list, translating its address from block/value ID to PC.
+ for i := 0; i < len(list); {
+ begin := getPC(decodeValue(ctxt, readPtr(ctxt, list[i:])))
+ end := getPC(decodeValue(ctxt, readPtr(ctxt, list[i+ctxt.Arch.PtrSize:])))
+
+ // Horrible hack. If a range contains only zero-width
+ // instructions, e.g. an Arg, and it's at the beginning of the
+ // function, this would be indistinguishable from an
+ // end entry. Fudge it.
+ if begin == 0 && end == 0 {
+ end = 1
+ }
+
+ if ctxt.UseBASEntries {
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, int64(begin))
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, int64(end))
+ } else {
+ listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, int64(begin))
+ listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, int64(end))
+ }
+
+ i += 2 * ctxt.Arch.PtrSize
+ datalen := 2 + int(ctxt.Arch.ByteOrder.Uint16(list[i:]))
+ listSym.WriteBytes(ctxt, listSym.Size, list[i:i+datalen]) // copy datalen and location encoding
+ i += datalen
+ }
+
+ // Location list contents, now with real PCs.
+ // End entry.
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, 0)
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, 0)
+}
+
+// Pack a value and block ID into an address-sized uint, returning
+// encoded value and boolean indicating whether the encoding succeeded.
+// For 32-bit architectures the process may fail for very large
+// procedures(the theory being that it's ok to have degraded debug
+// quality in this case).
+func encodeValue(ctxt *obj.Link, b, v ID) (uint64, bool) {
+ if ctxt.Arch.PtrSize == 8 {
+ result := uint64(b)<<32 | uint64(uint32(v))
+ //ctxt.Logf("b %#x (%d) v %#x (%d) -> %#x\n", b, b, v, v, result)
+ return result, true
+ }
+ if ctxt.Arch.PtrSize != 4 {
+ panic("unexpected pointer size")
+ }
+ if ID(int16(b)) != b || ID(int16(v)) != v {
+ return 0, false
+ }
+ return uint64(b)<<16 | uint64(uint16(v)), true
+}
+
+// Unpack a value and block ID encoded by encodeValue.
+func decodeValue(ctxt *obj.Link, word uint64) (ID, ID) {
+ if ctxt.Arch.PtrSize == 8 {
+ b, v := ID(word>>32), ID(word)
+ //ctxt.Logf("%#x -> b %#x (%d) v %#x (%d)\n", word, b, b, v, v)
+ return b, v
+ }
+ if ctxt.Arch.PtrSize != 4 {
+ panic("unexpected pointer size")
+ }
+ return ID(word >> 16), ID(int16(word))
+}
+
+// Append a pointer-sized uint to buf.
+func appendPtr(ctxt *obj.Link, buf []byte, word uint64) []byte {
+ if cap(buf) < len(buf)+20 {
+ b := make([]byte, len(buf), 20+cap(buf)*2)
+ copy(b, buf)
+ buf = b
+ }
+ writeAt := len(buf)
+ buf = buf[0 : len(buf)+ctxt.Arch.PtrSize]
+ writePtr(ctxt, buf[writeAt:], word)
+ return buf
+}
+
+// Write a pointer-sized uint to the beginning of buf.
+func writePtr(ctxt *obj.Link, buf []byte, word uint64) {
+ switch ctxt.Arch.PtrSize {
+ case 4:
+ ctxt.Arch.ByteOrder.PutUint32(buf, uint32(word))
+ case 8:
+ ctxt.Arch.ByteOrder.PutUint64(buf, word)
+ default:
+ panic("unexpected pointer size")
+ }
+
+}
+
+// Read a pointer-sized uint from the beginning of buf.
+func readPtr(ctxt *obj.Link, buf []byte) uint64 {
+ switch ctxt.Arch.PtrSize {
+ case 4:
+ return uint64(ctxt.Arch.ByteOrder.Uint32(buf))
+ case 8:
+ return ctxt.Arch.ByteOrder.Uint64(buf)
+ default:
+ panic("unexpected pointer size")
+ }
+
+}
+
+// setupLocList creates the initial portion of a location list for a
+// user variable. It emits the encoded start/end of the range and a
+// placeholder for the size. Return value is the new list plus the
+// slot in the list holding the size (to be updated later).
+func setupLocList(ctxt *obj.Link, f *Func, list []byte, st, en ID) ([]byte, int) {
+ start, startOK := encodeValue(ctxt, f.Entry.ID, st)
+ end, endOK := encodeValue(ctxt, f.Entry.ID, en)
+ if !startOK || !endOK {
+ // This could happen if someone writes a function that uses
+ // >65K values on a 32-bit platform. Hopefully a degraded debugging
+ // experience is ok in that case.
+ return nil, 0
+ }
+ list = appendPtr(ctxt, list, start)
+ list = appendPtr(ctxt, list, end)
+
+ // Where to write the length of the location description once
+ // we know how big it is.
+ sizeIdx := len(list)
+ list = list[:len(list)+2]
+ return list, sizeIdx
+}
+
+// locatePrologEnd walks the entry block of a function with incoming
+// register arguments and locates the last instruction in the prolog
+// that spills a register arg. It returns the ID of that instruction
+// Example:
+//
+// b1:
+// v3 = ArgIntReg <int> {p1+0} [0] : AX
+// ... more arg regs ..
+// v4 = ArgFloatReg <float32> {f1+0} [0] : X0
+// v52 = MOVQstore <mem> {p1} v2 v3 v1
+// ... more stores ...
+// v68 = MOVSSstore <mem> {f4} v2 v67 v66
+// v38 = MOVQstoreconst <mem> {blob} [val=0,off=0] v2 v32
+//
+// Important: locatePrologEnd is expected to work properly only with
+// optimization turned off (e.g. "-N"). If optimization is enabled
+// we can't be assured of finding all input arguments spilled in the
+// entry block prolog.
+func locatePrologEnd(f *Func) ID {
+
+ // returns true if this instruction looks like it moves an ABI
+ // register to the stack, along with the value being stored.
+ isRegMoveLike := func(v *Value) (bool, ID) {
+ n, ok := v.Aux.(*ir.Name)
+ var r ID
+ if !ok || n.Class != ir.PPARAM {
+ return false, r
+ }
+ regInputs, memInputs, spInputs := 0, 0, 0
+ for _, a := range v.Args {
+ if a.Op == OpArgIntReg || a.Op == OpArgFloatReg {
+ regInputs++
+ r = a.ID
+ } else if a.Type.IsMemory() {
+ memInputs++
+ } else if a.Op == OpSP {
+ spInputs++
+ } else {
+ return false, r
+ }
+ }
+ return v.Type.IsMemory() && memInputs == 1 &&
+ regInputs == 1 && spInputs == 1, r
+ }
+
+ // OpArg*Reg values we've seen so far on our forward walk,
+ // for which we have not yet seen a corresponding spill.
+ regArgs := make([]ID, 0, 32)
+
+ // removeReg tries to remove a value from regArgs, returning true
+ // if found and removed, or false otherwise.
+ removeReg := func(r ID) bool {
+ for i := 0; i < len(regArgs); i++ {
+ if regArgs[i] == r {
+ regArgs = append(regArgs[:i], regArgs[i+1:]...)
+ return true
+ }
+ }
+ return false
+ }
+
+ // Walk forwards through the block. When we see OpArg*Reg, record
+ // the value it produces in the regArgs list. When see a store that uses
+ // the value, remove the entry. When we hit the last store (use)
+ // then we've arrived at the end of the prolog.
+ for k, v := range f.Entry.Values {
+ if v.Op == OpArgIntReg || v.Op == OpArgFloatReg {
+ regArgs = append(regArgs, v.ID)
+ continue
+ }
+ if ok, r := isRegMoveLike(v); ok {
+ if removed := removeReg(r); removed {
+ if len(regArgs) == 0 {
+ // Found our last spill; return the value after
+ // it. Note that it is possible that this spill is
+ // the last instruction in the block. If so, then
+ // return the "end of block" sentinel.
+ if k < len(f.Entry.Values)-1 {
+ return f.Entry.Values[k+1].ID
+ }
+ return BlockEnd.ID
+ }
+ }
+ }
+ if v.Op.IsCall() {
+ // if we hit a call, we've gone too far.
+ return v.ID
+ }
+ }
+ // nothing found
+ return ID(-1)
+}
+
+// isNamedRegParam returns true if the param corresponding to "p"
+// is a named, non-blank input parameter assigned to one or more
+// registers.
+func isNamedRegParam(p abi.ABIParamAssignment) bool {
+ if p.Name == nil {
+ return false
+ }
+ n := p.Name
+ if n.Sym() == nil || n.Sym().IsBlank() {
+ return false
+ }
+ if len(p.Registers) == 0 {
+ return false
+ }
+ return true
+}
+
+// BuildFuncDebugNoOptimized populates a FuncDebug object "rval" with
+// entries corresponding to the register-resident input parameters for
+// the function "f"; it is used when we are compiling without
+// optimization but the register ABI is enabled. For each reg param,
+// it constructs a 2-element location list: the first element holds
+// the input register, and the second element holds the stack location
+// of the param (the assumption being that when optimization is off,
+// each input param reg will be spilled in the prolog).
+func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset func(LocalSlot) int32, rval *FuncDebug) {
+
+ pri := f.ABISelf.ABIAnalyzeFuncType(f.Type)
+
+ // Look to see if we have any named register-promoted parameters.
+ // If there are none, bail early and let the caller sort things
+ // out for the remainder of the params/locals.
+ numRegParams := 0
+ for _, inp := range pri.InParams() {
+ if isNamedRegParam(inp) {
+ numRegParams++
+ }
+ }
+ if numRegParams == 0 {
+ return
+ }
+
+ state := debugState{f: f}
+
+ if loggingEnabled {
+ state.logf("generating -N reg param loc lists for func %q\n", f.Name)
+ }
+
+ // Allocate location lists.
+ rval.LocationLists = make([][]byte, numRegParams)
+
+ // Locate the value corresponding to the last spill of
+ // an input register.
+ afterPrologVal := locatePrologEnd(f)
+
+ // Walk the input params again and process the register-resident elements.
+ pidx := 0
+ for _, inp := range pri.InParams() {
+ if !isNamedRegParam(inp) {
+ // will be sorted out elsewhere
+ continue
+ }
+
+ n := inp.Name
+ sl := LocalSlot{N: n, Type: inp.Type, Off: 0}
+ rval.Vars = append(rval.Vars, n)
+ rval.Slots = append(rval.Slots, sl)
+ slid := len(rval.VarSlots)
+ rval.VarSlots = append(rval.VarSlots, []SlotID{SlotID(slid)})
+
+ if afterPrologVal == ID(-1) {
+ // This can happen for degenerate functions with infinite
+ // loops such as that in issue 45948. In such cases, leave
+ // the var/slot set up for the param, but don't try to
+ // emit a location list.
+ if loggingEnabled {
+ state.logf("locatePrologEnd failed, skipping %v\n", n)
+ }
+ pidx++
+ continue
+ }
+
+ // Param is arriving in one or more registers. We need a 2-element
+ // location expression for it. First entry in location list
+ // will correspond to lifetime in input registers.
+ list, sizeIdx := setupLocList(ctxt, f, rval.LocationLists[pidx],
+ BlockStart.ID, afterPrologVal)
+ if list == nil {
+ pidx++
+ continue
+ }
+ if loggingEnabled {
+ state.logf("param %v:\n [<entry>, %d]:\n", n, afterPrologVal)
+ }
+ rtypes, _ := inp.RegisterTypesAndOffsets()
+ padding := make([]uint64, 0, 32)
+ padding = inp.ComputePadding(padding)
+ for k, r := range inp.Registers {
+ reg := ObjRegForAbiReg(r, f.Config)
+ dwreg := ctxt.Arch.DWARFRegisters[reg]
+ if dwreg < 32 {
+ list = append(list, dwarf.DW_OP_reg0+byte(dwreg))
+ } else {
+ list = append(list, dwarf.DW_OP_regx)
+ list = dwarf.AppendUleb128(list, uint64(dwreg))
+ }
+ if loggingEnabled {
+ state.logf(" piece %d -> dwreg %d", k, dwreg)
+ }
+ if len(inp.Registers) > 1 {
+ list = append(list, dwarf.DW_OP_piece)
+ ts := rtypes[k].Size()
+ list = dwarf.AppendUleb128(list, uint64(ts))
+ if padding[k] > 0 {
+ if loggingEnabled {
+ state.logf(" [pad %d bytes]", padding[k])
+ }
+ list = append(list, dwarf.DW_OP_piece)
+ list = dwarf.AppendUleb128(list, padding[k])
+ }
+ }
+ if loggingEnabled {
+ state.logf("\n")
+ }
+ }
+ // fill in length of location expression element
+ ctxt.Arch.ByteOrder.PutUint16(list[sizeIdx:], uint16(len(list)-sizeIdx-2))
+
+ // Second entry in the location list will be the stack home
+ // of the param, once it has been spilled. Emit that now.
+ list, sizeIdx = setupLocList(ctxt, f, list,
+ afterPrologVal, FuncEnd.ID)
+ if list == nil {
+ pidx++
+ continue
+ }
+ soff := stackOffset(sl)
+ if soff == 0 {
+ list = append(list, dwarf.DW_OP_call_frame_cfa)
+ } else {
+ list = append(list, dwarf.DW_OP_fbreg)
+ list = dwarf.AppendSleb128(list, int64(soff))
+ }
+ if loggingEnabled {
+ state.logf(" [%d, <end>): stackOffset=%d\n", afterPrologVal, soff)
+ }
+
+ // fill in size
+ ctxt.Arch.ByteOrder.PutUint16(list[sizeIdx:], uint16(len(list)-sizeIdx-2))
+
+ rval.LocationLists[pidx] = list
+ pidx++
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/debug_lines_test.go b/src/cmd/compile/internal/ssa/debug_lines_test.go
new file mode 100644
index 0000000..af9e2a3
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/debug_lines_test.go
@@ -0,0 +1,269 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ "bufio"
+ "bytes"
+ "flag"
+ "fmt"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+// Matches lines in genssa output that are marked "isstmt", and the parenthesized plus-prefixed line number is a submatch
+var asmLine *regexp.Regexp = regexp.MustCompile(`^\s[vb]\d+\s+\d+\s\(\+(\d+)\)`)
+
+// this matches e.g. ` v123456789 000007 (+9876654310) MOVUPS X15, ""..autotmp_2-32(SP)`
+
+// Matches lines in genssa output that describe an inlined file.
+// Note it expects an unadventurous choice of basename.
+var sepRE = regexp.QuoteMeta(string(filepath.Separator))
+var inlineLine *regexp.Regexp = regexp.MustCompile(`^#\s.*` + sepRE + `[-\w]+\.go:(\d+)`)
+
+// this matches e.g. # /pa/inline-dumpxxxx.go:6
+
+var testGoArchFlag = flag.String("arch", "", "run test for specified architecture")
+
+func testGoArch() string {
+ if *testGoArchFlag == "" {
+ return runtime.GOARCH
+ }
+ return *testGoArchFlag
+}
+
+func hasRegisterABI() bool {
+ switch testGoArch() {
+ case "amd64", "arm64", "loong64", "ppc64", "ppc64le", "riscv":
+ return true
+ }
+ return false
+}
+
+func unixOnly(t *testing.T) {
+ if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { // in particular, it could be windows.
+ t.Skip("this test depends on creating a file with a wonky name, only works for sure on Linux and Darwin")
+ }
+}
+
+// testDebugLinesDefault removes the first wanted statement on architectures that are not (yet) register ABI.
+func testDebugLinesDefault(t *testing.T, gcflags, file, function string, wantStmts []int, ignoreRepeats bool) {
+ unixOnly(t)
+ if !hasRegisterABI() {
+ wantStmts = wantStmts[1:]
+ }
+ testDebugLines(t, gcflags, file, function, wantStmts, ignoreRepeats)
+}
+
+func TestDebugLinesSayHi(t *testing.T) {
+ // This test is potentially fragile, the goal is that debugging should step properly through "sayhi"
+ // If the blocks are reordered in a way that changes the statement order but execution flows correctly,
+ // then rearrange the expected numbers. Register abi and not-register-abi also have different sequences,
+ // at least for now.
+
+ testDebugLinesDefault(t, "-N -l", "sayhi.go", "sayhi", []int{8, 9, 10, 11}, false)
+}
+
+func TestDebugLinesPushback(t *testing.T) {
+ unixOnly(t)
+
+ switch testGoArch() {
+ default:
+ t.Skip("skipped for many architectures")
+
+ case "arm64", "amd64": // register ABI
+ fn := "(*List[go.shape.int_0]).PushBack"
+ if true /* was buildcfg.Experiment.Unified */ {
+ // Unified mangles differently
+ fn = "(*List[go.shape.int]).PushBack"
+ }
+ testDebugLines(t, "-N -l", "pushback.go", fn, []int{17, 18, 19, 20, 21, 22, 24}, true)
+ }
+}
+
+func TestDebugLinesConvert(t *testing.T) {
+ unixOnly(t)
+
+ switch testGoArch() {
+ default:
+ t.Skip("skipped for many architectures")
+
+ case "arm64", "amd64": // register ABI
+ fn := "G[go.shape.int_0]"
+ if true /* was buildcfg.Experiment.Unified */ {
+ // Unified mangles differently
+ fn = "G[go.shape.int]"
+ }
+ testDebugLines(t, "-N -l", "convertline.go", fn, []int{9, 10, 11}, true)
+ }
+}
+
+func TestInlineLines(t *testing.T) {
+ if runtime.GOARCH != "amd64" && *testGoArchFlag == "" {
+ // As of september 2021, works for everything except mips64, but still potentially fragile
+ t.Skip("only runs for amd64 unless -arch explicitly supplied")
+ }
+
+ want := [][]int{{3}, {4, 10}, {4, 10, 16}, {4, 10}, {4, 11, 16}, {4, 11}, {4}, {5, 10}, {5, 10, 16}, {5, 10}, {5, 11, 16}, {5, 11}, {5}}
+ testInlineStack(t, "inline-dump.go", "f", want)
+}
+
+func TestDebugLines_53456(t *testing.T) {
+ testDebugLinesDefault(t, "-N -l", "b53456.go", "(*T).Inc", []int{15, 16, 17, 18}, true)
+}
+
+func compileAndDump(t *testing.T, file, function, moreGCFlags string) []byte {
+ testenv.MustHaveGoBuild(t)
+
+ tmpdir, err := os.MkdirTemp("", "debug_lines_test")
+ if err != nil {
+ panic(fmt.Sprintf("Problem creating TempDir, error %v", err))
+ }
+ if testing.Verbose() {
+ fmt.Printf("Preserving temporary directory %s\n", tmpdir)
+ } else {
+ defer os.RemoveAll(tmpdir)
+ }
+
+ source, err := filepath.Abs(filepath.Join("testdata", file))
+ if err != nil {
+ panic(fmt.Sprintf("Could not get abspath of testdata directory and file, %v", err))
+ }
+
+ cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", "foo.o", "-gcflags=-d=ssa/genssa/dump="+function+" "+moreGCFlags, source)
+ cmd.Dir = tmpdir
+ cmd.Env = replaceEnv(cmd.Env, "GOSSADIR", tmpdir)
+ testGoos := "linux" // default to linux
+ if testGoArch() == "wasm" {
+ testGoos = "js"
+ }
+ cmd.Env = replaceEnv(cmd.Env, "GOOS", testGoos)
+ cmd.Env = replaceEnv(cmd.Env, "GOARCH", testGoArch())
+
+ if testing.Verbose() {
+ fmt.Printf("About to run %s\n", asCommandLine("", cmd))
+ }
+
+ var stdout, stderr strings.Builder
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("error running cmd %s: %v\nstdout:\n%sstderr:\n%s\n", asCommandLine("", cmd), err, stdout.String(), stderr.String())
+ }
+
+ if s := stderr.String(); s != "" {
+ t.Fatalf("Wanted empty stderr, instead got:\n%s\n", s)
+ }
+
+ dumpFile := filepath.Join(tmpdir, function+"_01__genssa.dump")
+ dumpBytes, err := os.ReadFile(dumpFile)
+ if err != nil {
+ t.Fatalf("Could not read dump file %s, err=%v", dumpFile, err)
+ }
+ return dumpBytes
+}
+
+func sortInlineStacks(x [][]int) {
+ sort.Slice(x, func(i, j int) bool {
+ if len(x[i]) != len(x[j]) {
+ return len(x[i]) < len(x[j])
+ }
+ for k := range x[i] {
+ if x[i][k] != x[j][k] {
+ return x[i][k] < x[j][k]
+ }
+ }
+ return false
+ })
+}
+
+// testInlineStack ensures that inlining is described properly in the comments in the dump file
+func testInlineStack(t *testing.T, file, function string, wantStacks [][]int) {
+ // this is an inlining reporting test, not an optimization test. -N makes it less fragile
+ dumpBytes := compileAndDump(t, file, function, "-N")
+ dump := bufio.NewScanner(bytes.NewReader(dumpBytes))
+ dumpLineNum := 0
+ var gotStmts []int
+ var gotStacks [][]int
+ for dump.Scan() {
+ line := dump.Text()
+ dumpLineNum++
+ matches := inlineLine.FindStringSubmatch(line)
+ if len(matches) == 2 {
+ stmt, err := strconv.ParseInt(matches[1], 10, 32)
+ if err != nil {
+ t.Fatalf("Expected to parse a line number but saw %s instead on dump line #%d, error %v", matches[1], dumpLineNum, err)
+ }
+ if testing.Verbose() {
+ fmt.Printf("Saw stmt# %d for submatch '%s' on dump line #%d = '%s'\n", stmt, matches[1], dumpLineNum, line)
+ }
+ gotStmts = append(gotStmts, int(stmt))
+ } else if len(gotStmts) > 0 {
+ gotStacks = append(gotStacks, gotStmts)
+ gotStmts = nil
+ }
+ }
+ if len(gotStmts) > 0 {
+ gotStacks = append(gotStacks, gotStmts)
+ gotStmts = nil
+ }
+ sortInlineStacks(gotStacks)
+ sortInlineStacks(wantStacks)
+ if !reflect.DeepEqual(wantStacks, gotStacks) {
+ t.Errorf("wanted inlines %+v but got %+v\n%s", wantStacks, gotStacks, dumpBytes)
+ }
+
+}
+
+// testDebugLines compiles testdata/<file> with flags -N -l and -d=ssa/genssa/dump=<function>
+// then verifies that the statement-marked lines in that file are the same as those in wantStmts
+// These files must all be short because this is super-fragile.
+// "go build" is run in a temporary directory that is normally deleted, unless -test.v
+func testDebugLines(t *testing.T, gcflags, file, function string, wantStmts []int, ignoreRepeats bool) {
+ dumpBytes := compileAndDump(t, file, function, gcflags)
+ dump := bufio.NewScanner(bytes.NewReader(dumpBytes))
+ var gotStmts []int
+ dumpLineNum := 0
+ for dump.Scan() {
+ line := dump.Text()
+ dumpLineNum++
+ matches := asmLine.FindStringSubmatch(line)
+ if len(matches) == 2 {
+ stmt, err := strconv.ParseInt(matches[1], 10, 32)
+ if err != nil {
+ t.Fatalf("Expected to parse a line number but saw %s instead on dump line #%d, error %v", matches[1], dumpLineNum, err)
+ }
+ if testing.Verbose() {
+ fmt.Printf("Saw stmt# %d for submatch '%s' on dump line #%d = '%s'\n", stmt, matches[1], dumpLineNum, line)
+ }
+ gotStmts = append(gotStmts, int(stmt))
+ }
+ }
+ if ignoreRepeats { // remove repeats from gotStmts
+ newGotStmts := []int{gotStmts[0]}
+ for _, x := range gotStmts {
+ if x != newGotStmts[len(newGotStmts)-1] {
+ newGotStmts = append(newGotStmts, x)
+ }
+ }
+ if !reflect.DeepEqual(wantStmts, newGotStmts) {
+ t.Errorf("wanted stmts %v but got %v (with repeats still in: %v)", wantStmts, newGotStmts, gotStmts)
+ }
+
+ } else {
+ if !reflect.DeepEqual(wantStmts, gotStmts) {
+ t.Errorf("wanted stmts %v but got %v", wantStmts, gotStmts)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/debug_test.go b/src/cmd/compile/internal/ssa/debug_test.go
new file mode 100644
index 0000000..9ac414c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/debug_test.go
@@ -0,0 +1,1016 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ "flag"
+ "fmt"
+ "internal/testenv"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+)
+
+var (
+ update = flag.Bool("u", false, "update test reference files")
+ verbose = flag.Bool("v", false, "print debugger interactions (very verbose)")
+ dryrun = flag.Bool("n", false, "just print the command line and first debugging bits")
+ useGdb = flag.Bool("g", false, "use Gdb instead of Delve (dlv), use gdb reference files")
+ force = flag.Bool("f", false, "force run under not linux-amd64; also do not use tempdir")
+ repeats = flag.Bool("r", false, "detect repeats in debug steps and don't ignore them")
+ inlines = flag.Bool("i", false, "do inlining for gdb (makes testing flaky till inlining info is correct)")
+)
+
+var (
+ hexRe = regexp.MustCompile("0x[a-zA-Z0-9]+")
+ numRe = regexp.MustCompile("-?\\d+")
+ stringRe = regexp.MustCompile("\"([^\\\"]|(\\.))*\"")
+ leadingDollarNumberRe = regexp.MustCompile("^[$]\\d+")
+ optOutGdbRe = regexp.MustCompile("[<]optimized out[>]")
+ numberColonRe = regexp.MustCompile("^ *\\d+:")
+)
+
+var gdb = "gdb" // Might be "ggdb" on Darwin, because gdb no longer part of XCode
+var debugger = "dlv" // For naming files, etc.
+
+var gogcflags = os.Getenv("GO_GCFLAGS")
+
+// optimizedLibs usually means "not running in a noopt test builder".
+var optimizedLibs = (!strings.Contains(gogcflags, "-N") && !strings.Contains(gogcflags, "-l"))
+
+// TestNexting go-builds a file, then uses a debugger (default delve, optionally gdb)
+// to next through the generated executable, recording each line landed at, and
+// then compares those lines with reference file(s).
+// Flag -u updates the reference file(s).
+// Flag -g changes the debugger to gdb (and uses gdb-specific reference files)
+// Flag -v is ever-so-slightly verbose.
+// Flag -n is for dry-run, and prints the shell and first debug commands.
+//
+// Because this test (combined with existing compiler deficiencies) is flaky,
+// for gdb-based testing by default inlining is disabled
+// (otherwise output depends on library internals)
+// and for both gdb and dlv by default repeated lines in the next stream are ignored
+// (because this appears to be timing-dependent in gdb, and the cleanest fix is in code common to gdb and dlv).
+//
+// Also by default, any source code outside of .../testdata/ is not mentioned
+// in the debugging histories. This deals both with inlined library code once
+// the compiler is generating clean inline records, and also deals with
+// runtime code between return from main and process exit. This is hidden
+// so that those files (in the runtime/library) can change without affecting
+// this test.
+//
+// These choices can be reversed with -i (inlining on) and -r (repeats detected) which
+// will also cause their own failures against the expected outputs. Note that if the compiler
+// and debugger were behaving properly, the inlined code and repeated lines would not appear,
+// so the expected output is closer to what we hope to see, though it also encodes all our
+// current bugs.
+//
+// The file being tested may contain comments of the form
+// //DBG-TAG=(v1,v2,v3)
+// where DBG = {gdb,dlv} and TAG={dbg,opt}
+// each variable may optionally be followed by a / and one or more of S,A,N,O
+// to indicate normalization of Strings, (hex) addresses, and numbers.
+// "O" is an explicit indication that we expect it to be optimized out.
+// For example:
+//
+// if len(os.Args) > 1 { //gdb-dbg=(hist/A,cannedInput/A) //dlv-dbg=(hist/A,cannedInput/A)
+//
+// TODO: not implemented for Delve yet, but this is the plan
+//
+// After a compiler change that causes a difference in the debug behavior, check
+// to see if it is sensible or not, and if it is, update the reference files with
+// go test debug_test.go -args -u
+// (for Delve)
+// go test debug_test.go -args -u -d
+func TestNexting(t *testing.T) {
+ testenv.SkipFlaky(t, 37404)
+
+ skipReasons := "" // Many possible skip reasons, list all that apply
+ if testing.Short() {
+ skipReasons = "not run in short mode; "
+ }
+ testenv.MustHaveGoBuild(t)
+
+ if *useGdb && !*force && !(runtime.GOOS == "linux" && runtime.GOARCH == "amd64") {
+ // Running gdb on OSX/darwin is very flaky.
+ // Sometimes it is called ggdb, depending on how it is installed.
+ // It also sometimes requires an admin password typed into a dialog box.
+ // Various architectures tend to differ slightly sometimes, and keeping them
+ // all in sync is a pain for people who don't have them all at hand,
+ // so limit testing to amd64 (for now)
+ skipReasons += "not run when testing gdb (-g) unless forced (-f) or linux-amd64; "
+ }
+
+ if !*useGdb && !*force && testenv.Builder() == "linux-386-longtest" {
+ // The latest version of Delve does support linux/386. However, the version currently
+ // installed in the linux-386-longtest builder does not. See golang.org/issue/39309.
+ skipReasons += "not run when testing delve on linux-386-longtest builder unless forced (-f); "
+ }
+
+ if *useGdb {
+ debugger = "gdb"
+ _, err := exec.LookPath(gdb)
+ if err != nil {
+ if runtime.GOOS != "darwin" {
+ skipReasons += "not run because gdb not on path; "
+ } else {
+ // On Darwin, MacPorts installs gdb as "ggdb".
+ _, err = exec.LookPath("ggdb")
+ if err != nil {
+ skipReasons += "not run because gdb (and also ggdb) request by -g option not on path; "
+ } else {
+ gdb = "ggdb"
+ }
+ }
+ }
+ } else { // Delve
+ debugger = "dlv"
+ _, err := exec.LookPath("dlv")
+ if err != nil {
+ skipReasons += "not run because dlv not on path; "
+ }
+ }
+
+ if skipReasons != "" {
+ t.Skip(skipReasons[:len(skipReasons)-2])
+ }
+
+ optFlags := "" // Whatever flags are needed to test debugging of optimized code.
+ dbgFlags := "-N -l"
+ if *useGdb && !*inlines {
+ // For gdb (default), disable inlining so that a compiler test does not depend on library code.
+ // TODO: Technically not necessary in 1.10 and later, but it causes a largish regression that needs investigation.
+ optFlags += " -l"
+ }
+
+ moreargs := []string{}
+ if *useGdb && (runtime.GOOS == "darwin" || runtime.GOOS == "windows") {
+ // gdb and lldb on Darwin do not deal with compressed dwarf.
+ // also, Windows.
+ moreargs = append(moreargs, "-ldflags=-compressdwarf=false")
+ }
+
+ subTest(t, debugger+"-dbg", "hist", dbgFlags, moreargs...)
+ subTest(t, debugger+"-dbg", "scopes", dbgFlags, moreargs...)
+ subTest(t, debugger+"-dbg", "i22558", dbgFlags, moreargs...)
+
+ subTest(t, debugger+"-dbg-race", "i22600", dbgFlags, append(moreargs, "-race")...)
+
+ optSubTest(t, debugger+"-opt", "hist", optFlags, 1000, moreargs...)
+ optSubTest(t, debugger+"-opt", "scopes", optFlags, 1000, moreargs...)
+
+ // Was optSubtest, this test is observed flaky on Linux in Docker on (busy) macOS, probably because of timing
+ // glitches in this harness.
+ // TODO get rid of timing glitches in this harness.
+ skipSubTest(t, debugger+"-opt", "infloop", optFlags, 10, moreargs...)
+
+}
+
+// subTest creates a subtest that compiles basename.go with the specified gcflags and additional compiler arguments,
+// then runs the debugger on the resulting binary, with any comment-specified actions matching tag triggered.
+func subTest(t *testing.T, tag string, basename string, gcflags string, moreargs ...string) {
+ t.Run(tag+"-"+basename, func(t *testing.T) {
+ if t.Name() == "TestNexting/gdb-dbg-i22558" {
+ testenv.SkipFlaky(t, 31263)
+ }
+ testNexting(t, basename, tag, gcflags, 1000, moreargs...)
+ })
+}
+
+// skipSubTest is the same as subTest except that it skips the test if execution is not forced (-f)
+func skipSubTest(t *testing.T, tag string, basename string, gcflags string, count int, moreargs ...string) {
+ t.Run(tag+"-"+basename, func(t *testing.T) {
+ if *force {
+ testNexting(t, basename, tag, gcflags, count, moreargs...)
+ } else {
+ t.Skip("skipping flaky test becaused not forced (-f)")
+ }
+ })
+}
+
+// optSubTest is the same as subTest except that it skips the test if the runtime and libraries
+// were not compiled with optimization turned on. (The skip may not be necessary with Go 1.10 and later)
+func optSubTest(t *testing.T, tag string, basename string, gcflags string, count int, moreargs ...string) {
+ // If optimized test is run with unoptimized libraries (compiled with -N -l), it is very likely to fail.
+ // This occurs in the noopt builders (for example).
+ t.Run(tag+"-"+basename, func(t *testing.T) {
+ if *force || optimizedLibs {
+ testNexting(t, basename, tag, gcflags, count, moreargs...)
+ } else {
+ t.Skip("skipping for unoptimized stdlib/runtime")
+ }
+ })
+}
+
+func testNexting(t *testing.T, base, tag, gcflags string, count int, moreArgs ...string) {
+ // (1) In testdata, build sample.go into test-sample.<tag>
+ // (2) Run debugger gathering a history
+ // (3) Read expected history from testdata/sample.<tag>.nexts
+ // optionally, write out testdata/sample.<tag>.nexts
+
+ testbase := filepath.Join("testdata", base) + "." + tag
+ tmpbase := filepath.Join("testdata", "test-"+base+"."+tag)
+
+ // Use a temporary directory unless -f is specified
+ if !*force {
+ tmpdir := t.TempDir()
+ tmpbase = filepath.Join(tmpdir, "test-"+base+"."+tag)
+ if *verbose {
+ fmt.Printf("Tempdir is %s\n", tmpdir)
+ }
+ }
+ exe := tmpbase
+
+ runGoArgs := []string{"build", "-o", exe, "-gcflags=all=" + gcflags}
+ runGoArgs = append(runGoArgs, moreArgs...)
+ runGoArgs = append(runGoArgs, filepath.Join("testdata", base+".go"))
+
+ runGo(t, "", runGoArgs...)
+
+ nextlog := testbase + ".nexts"
+ tmplog := tmpbase + ".nexts"
+ var dbg dbgr
+ if *useGdb {
+ dbg = newGdb(t, tag, exe)
+ } else {
+ dbg = newDelve(t, tag, exe)
+ }
+ h1 := runDbgr(dbg, count)
+ if *dryrun {
+ fmt.Printf("# Tag for above is %s\n", dbg.tag())
+ return
+ }
+ if *update {
+ h1.write(nextlog)
+ } else {
+ h0 := &nextHist{}
+ h0.read(nextlog)
+ if !h0.equals(h1) {
+ // Be very noisy about exactly what's wrong to simplify debugging.
+ h1.write(tmplog)
+ cmd := testenv.Command(t, "diff", "-u", nextlog, tmplog)
+ line := asCommandLine("", cmd)
+ bytes, err := cmd.CombinedOutput()
+ if err != nil && len(bytes) == 0 {
+ t.Fatalf("step/next histories differ, diff command %s failed with error=%v", line, err)
+ }
+ t.Fatalf("step/next histories differ, diff=\n%s", string(bytes))
+ }
+ }
+}
+
+type dbgr interface {
+ start()
+ stepnext(s string) bool // step or next, possible with parameter, gets line etc. returns true for success, false for unsure response
+ quit()
+ hist() *nextHist
+ tag() string
+}
+
+func runDbgr(dbg dbgr, maxNext int) *nextHist {
+ dbg.start()
+ if *dryrun {
+ return nil
+ }
+ for i := 0; i < maxNext; i++ {
+ if !dbg.stepnext("n") {
+ break
+ }
+ }
+ dbg.quit()
+ h := dbg.hist()
+ return h
+}
+
+func runGo(t *testing.T, dir string, args ...string) string {
+ var stdout, stderr strings.Builder
+ cmd := testenv.Command(t, testenv.GoToolPath(t), args...)
+ cmd.Dir = dir
+ if *dryrun {
+ fmt.Printf("%s\n", asCommandLine("", cmd))
+ return ""
+ }
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("error running cmd (%s): %v\nstdout:\n%sstderr:\n%s\n", asCommandLine("", cmd), err, stdout.String(), stderr.String())
+ }
+
+ if s := stderr.String(); s != "" {
+ t.Fatalf("Stderr = %s\nWant empty", s)
+ }
+
+ return stdout.String()
+}
+
+// tstring provides two strings, o (stdout) and e (stderr)
+type tstring struct {
+ o string
+ e string
+}
+
+func (t tstring) String() string {
+ return t.o + t.e
+}
+
+type pos struct {
+ line uint32
+ file uint8 // Artifact of plans to implement differencing instead of calling out to diff.
+}
+
+type nextHist struct {
+ f2i map[string]uint8
+ fs []string
+ ps []pos
+ texts []string
+ vars [][]string
+}
+
+func (h *nextHist) write(filename string) {
+ file, err := os.Create(filename)
+ if err != nil {
+ panic(fmt.Sprintf("Problem opening %s, error %v\n", filename, err))
+ }
+ defer file.Close()
+ var lastfile uint8
+ for i, x := range h.texts {
+ p := h.ps[i]
+ if lastfile != p.file {
+ fmt.Fprintf(file, " %s\n", h.fs[p.file-1])
+ lastfile = p.file
+ }
+ fmt.Fprintf(file, "%d:%s\n", p.line, x)
+ // TODO, normalize between gdb and dlv into a common, comparable format.
+ for _, y := range h.vars[i] {
+ y = strings.TrimSpace(y)
+ fmt.Fprintf(file, "%s\n", y)
+ }
+ }
+ file.Close()
+}
+
+func (h *nextHist) read(filename string) {
+ h.f2i = make(map[string]uint8)
+ bytes, err := os.ReadFile(filename)
+ if err != nil {
+ panic(fmt.Sprintf("Problem reading %s, error %v\n", filename, err))
+ }
+ var lastfile string
+ lines := strings.Split(string(bytes), "\n")
+ for i, l := range lines {
+ if len(l) > 0 && l[0] != '#' {
+ if l[0] == ' ' {
+ // file -- first two characters expected to be " "
+ lastfile = strings.TrimSpace(l)
+ } else if numberColonRe.MatchString(l) {
+ // line number -- <number>:<line>
+ colonPos := strings.Index(l, ":")
+ if colonPos == -1 {
+ panic(fmt.Sprintf("Line %d (%s) in file %s expected to contain '<number>:' but does not.\n", i+1, l, filename))
+ }
+ h.add(lastfile, l[0:colonPos], l[colonPos+1:])
+ } else {
+ h.addVar(l)
+ }
+ }
+ }
+}
+
+// add appends file (name), line (number) and text (string) to the history,
+// provided that the file+line combo does not repeat the previous position,
+// and provided that the file is within the testdata directory. The return
+// value indicates whether the append occurred.
+func (h *nextHist) add(file, line, text string) bool {
+ // Only record source code in testdata unless the inlines flag is set
+ if !*inlines && !strings.Contains(file, "/testdata/") {
+ return false
+ }
+ fi := h.f2i[file]
+ if fi == 0 {
+ h.fs = append(h.fs, file)
+ fi = uint8(len(h.fs))
+ h.f2i[file] = fi
+ }
+
+ line = strings.TrimSpace(line)
+ var li int
+ var err error
+ if line != "" {
+ li, err = strconv.Atoi(line)
+ if err != nil {
+ panic(fmt.Sprintf("Non-numeric line: %s, error %v\n", line, err))
+ }
+ }
+ l := len(h.ps)
+ p := pos{line: uint32(li), file: fi}
+
+ if l == 0 || *repeats || h.ps[l-1] != p {
+ h.ps = append(h.ps, p)
+ h.texts = append(h.texts, text)
+ h.vars = append(h.vars, []string{})
+ return true
+ }
+ return false
+}
+
+func (h *nextHist) addVar(text string) {
+ l := len(h.texts)
+ h.vars[l-1] = append(h.vars[l-1], text)
+}
+
+func invertMapSU8(hf2i map[string]uint8) map[uint8]string {
+ hi2f := make(map[uint8]string)
+ for hs, i := range hf2i {
+ hi2f[i] = hs
+ }
+ return hi2f
+}
+
+func (h *nextHist) equals(k *nextHist) bool {
+ if len(h.f2i) != len(k.f2i) {
+ return false
+ }
+ if len(h.ps) != len(k.ps) {
+ return false
+ }
+ hi2f := invertMapSU8(h.f2i)
+ ki2f := invertMapSU8(k.f2i)
+
+ for i, hs := range hi2f {
+ if hs != ki2f[i] {
+ return false
+ }
+ }
+
+ for i, x := range h.ps {
+ if k.ps[i] != x {
+ return false
+ }
+ }
+
+ for i, hv := range h.vars {
+ kv := k.vars[i]
+ if len(hv) != len(kv) {
+ return false
+ }
+ for j, hvt := range hv {
+ if hvt != kv[j] {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// canonFileName strips everything before "/src/" from a filename.
+// This makes file names portable across different machines,
+// home directories, and temporary directories.
+func canonFileName(f string) string {
+ i := strings.Index(f, "/src/")
+ if i != -1 {
+ f = f[i+1:]
+ }
+ return f
+}
+
+/* Delve */
+
+type delveState struct {
+ cmd *exec.Cmd
+ tagg string
+ *ioState
+ atLineRe *regexp.Regexp // "\n =>"
+ funcFileLinePCre *regexp.Regexp // "^> ([^ ]+) ([^:]+):([0-9]+) .*[(]PC: (0x[a-z0-9]+)"
+ line string
+ file string
+ function string
+}
+
+func newDelve(t testing.TB, tag, executable string, args ...string) dbgr {
+ cmd := testenv.Command(t, "dlv", "exec", executable)
+ cmd.Env = replaceEnv(cmd.Env, "TERM", "dumb")
+ if len(args) > 0 {
+ cmd.Args = append(cmd.Args, "--")
+ cmd.Args = append(cmd.Args, args...)
+ }
+ s := &delveState{tagg: tag, cmd: cmd}
+ // HAHA Delve has control characters embedded to change the color of the => and the line number
+ // that would be '(\\x1b\\[[0-9;]+m)?' OR TERM=dumb
+ s.atLineRe = regexp.MustCompile("\n=>[[:space:]]+[0-9]+:(.*)")
+ s.funcFileLinePCre = regexp.MustCompile("> ([^ ]+) ([^:]+):([0-9]+) .*[(]PC: (0x[a-z0-9]+)[)]\n")
+ s.ioState = newIoState(s.cmd)
+ return s
+}
+
+func (s *delveState) tag() string {
+ return s.tagg
+}
+
+func (s *delveState) stepnext(ss string) bool {
+ x := s.ioState.writeReadExpect(ss+"\n", "[(]dlv[)] ")
+ excerpts := s.atLineRe.FindStringSubmatch(x.o)
+ locations := s.funcFileLinePCre.FindStringSubmatch(x.o)
+ excerpt := ""
+ if len(excerpts) > 1 {
+ excerpt = excerpts[1]
+ }
+ if len(locations) > 0 {
+ fn := canonFileName(locations[2])
+ if *verbose {
+ if s.file != fn {
+ fmt.Printf("%s\n", locations[2]) // don't canonocalize verbose logging
+ }
+ fmt.Printf(" %s\n", locations[3])
+ }
+ s.line = locations[3]
+ s.file = fn
+ s.function = locations[1]
+ s.ioState.history.add(s.file, s.line, excerpt)
+ // TODO: here is where variable processing will be added. See gdbState.stepnext as a guide.
+ // Adding this may require some amount of normalization so that logs are comparable.
+ return true
+ }
+ if *verbose {
+ fmt.Printf("DID NOT MATCH EXPECTED NEXT OUTPUT\nO='%s'\nE='%s'\n", x.o, x.e)
+ }
+ return false
+}
+
+func (s *delveState) start() {
+ if *dryrun {
+ fmt.Printf("%s\n", asCommandLine("", s.cmd))
+ fmt.Printf("b main.test\n")
+ fmt.Printf("c\n")
+ return
+ }
+ err := s.cmd.Start()
+ if err != nil {
+ line := asCommandLine("", s.cmd)
+ panic(fmt.Sprintf("There was an error [start] running '%s', %v\n", line, err))
+ }
+ s.ioState.readExpecting(-1, 5000, "Type 'help' for list of commands.")
+ s.ioState.writeReadExpect("b main.test\n", "[(]dlv[)] ")
+ s.stepnext("c")
+}
+
+func (s *delveState) quit() {
+ expect("", s.ioState.writeRead("q\n"))
+}
+
+/* Gdb */
+
+type gdbState struct {
+ cmd *exec.Cmd
+ tagg string
+ args []string
+ *ioState
+ atLineRe *regexp.Regexp
+ funcFileLinePCre *regexp.Regexp
+ line string
+ file string
+ function string
+}
+
+func newGdb(t testing.TB, tag, executable string, args ...string) dbgr {
+ // Turn off shell, necessary for Darwin apparently
+ cmd := testenv.Command(t, gdb, "-nx",
+ "-iex", fmt.Sprintf("add-auto-load-safe-path %s/src/runtime", runtime.GOROOT()),
+ "-ex", "set startup-with-shell off", executable)
+ cmd.Env = replaceEnv(cmd.Env, "TERM", "dumb")
+ s := &gdbState{tagg: tag, cmd: cmd, args: args}
+ s.atLineRe = regexp.MustCompile("(^|\n)([0-9]+)(.*)")
+ s.funcFileLinePCre = regexp.MustCompile(
+ "([^ ]+) [(][^)]*[)][ \\t\\n]+at ([^:]+):([0-9]+)")
+ // runtime.main () at /Users/drchase/GoogleDrive/work/go/src/runtime/proc.go:201
+ // function file line
+ // Thread 2 hit Breakpoint 1, main.main () at /Users/drchase/GoogleDrive/work/debug/hist.go:18
+ s.ioState = newIoState(s.cmd)
+ return s
+}
+
+func (s *gdbState) tag() string {
+ return s.tagg
+}
+
+func (s *gdbState) start() {
+ run := "run"
+ for _, a := range s.args {
+ run += " " + a // Can't quote args for gdb, it will pass them through including the quotes
+ }
+ if *dryrun {
+ fmt.Printf("%s\n", asCommandLine("", s.cmd))
+ fmt.Printf("tbreak main.test\n")
+ fmt.Printf("%s\n", run)
+ return
+ }
+ err := s.cmd.Start()
+ if err != nil {
+ line := asCommandLine("", s.cmd)
+ panic(fmt.Sprintf("There was an error [start] running '%s', %v\n", line, err))
+ }
+ s.ioState.readSimpleExpecting("[(]gdb[)] ")
+ x := s.ioState.writeReadExpect("b main.test\n", "[(]gdb[)] ")
+ expect("Breakpoint [0-9]+ at", x)
+ s.stepnext(run)
+}
+
+func (s *gdbState) stepnext(ss string) bool {
+ x := s.ioState.writeReadExpect(ss+"\n", "[(]gdb[)] ")
+ excerpts := s.atLineRe.FindStringSubmatch(x.o)
+ locations := s.funcFileLinePCre.FindStringSubmatch(x.o)
+ excerpt := ""
+ addedLine := false
+ if len(excerpts) == 0 && len(locations) == 0 {
+ if *verbose {
+ fmt.Printf("DID NOT MATCH %s", x.o)
+ }
+ return false
+ }
+ if len(excerpts) > 0 {
+ excerpt = excerpts[3]
+ }
+ if len(locations) > 0 {
+ fn := canonFileName(locations[2])
+ if *verbose {
+ if s.file != fn {
+ fmt.Printf("%s\n", locations[2])
+ }
+ fmt.Printf(" %s\n", locations[3])
+ }
+ s.line = locations[3]
+ s.file = fn
+ s.function = locations[1]
+ addedLine = s.ioState.history.add(s.file, s.line, excerpt)
+ }
+ if len(excerpts) > 0 {
+ if *verbose {
+ fmt.Printf(" %s\n", excerpts[2])
+ }
+ s.line = excerpts[2]
+ addedLine = s.ioState.history.add(s.file, s.line, excerpt)
+ }
+
+ if !addedLine {
+ // True if this was a repeat line
+ return true
+ }
+ // Look for //gdb-<tag>=(v1,v2,v3) and print v1, v2, v3
+ vars := varsToPrint(excerpt, "//"+s.tag()+"=(")
+ for _, v := range vars {
+ response := printVariableAndNormalize(v, func(v string) string {
+ return s.ioState.writeReadExpect("p "+v+"\n", "[(]gdb[)] ").String()
+ })
+ s.ioState.history.addVar(response)
+ }
+ return true
+}
+
+// printVariableAndNormalize extracts any slash-indicated normalizing requests from the variable
+// name, then uses printer to get the value of the variable from the debugger, and then
+// normalizes and returns the response.
+func printVariableAndNormalize(v string, printer func(v string) string) string {
+ slashIndex := strings.Index(v, "/")
+ substitutions := ""
+ if slashIndex != -1 {
+ substitutions = v[slashIndex:]
+ v = v[:slashIndex]
+ }
+ response := printer(v)
+ // expect something like "$1 = ..."
+ dollar := strings.Index(response, "$")
+ cr := strings.Index(response, "\n")
+
+ if dollar == -1 { // some not entirely expected response, whine and carry on.
+ if cr == -1 {
+ response = strings.TrimSpace(response) // discards trailing newline
+ response = strings.Replace(response, "\n", "<BR>", -1)
+ return "$ Malformed response " + response
+ }
+ response = strings.TrimSpace(response[:cr])
+ return "$ " + response
+ }
+ if cr == -1 {
+ cr = len(response)
+ }
+ // Convert the leading $<number> into the variable name to enhance readability
+ // and reduce scope of diffs if an earlier print-variable is added.
+ response = strings.TrimSpace(response[dollar:cr])
+ response = leadingDollarNumberRe.ReplaceAllString(response, v)
+
+ // Normalize value as requested.
+ if strings.Contains(substitutions, "A") {
+ response = hexRe.ReplaceAllString(response, "<A>")
+ }
+ if strings.Contains(substitutions, "N") {
+ response = numRe.ReplaceAllString(response, "<N>")
+ }
+ if strings.Contains(substitutions, "S") {
+ response = stringRe.ReplaceAllString(response, "<S>")
+ }
+ if strings.Contains(substitutions, "O") {
+ response = optOutGdbRe.ReplaceAllString(response, "<Optimized out, as expected>")
+ }
+ return response
+}
+
+// varsToPrint takes a source code line, and extracts the comma-separated variable names
+// found between lookfor and the next ")".
+// For example, if line includes "... //gdb-foo=(v1,v2,v3)" and
+// lookfor="//gdb-foo=(", then varsToPrint returns ["v1", "v2", "v3"]
+func varsToPrint(line, lookfor string) []string {
+ var vars []string
+ if strings.Contains(line, lookfor) {
+ x := line[strings.Index(line, lookfor)+len(lookfor):]
+ end := strings.Index(x, ")")
+ if end == -1 {
+ panic(fmt.Sprintf("Saw variable list begin %s in %s but no closing ')'", lookfor, line))
+ }
+ vars = strings.Split(x[:end], ",")
+ for i, y := range vars {
+ vars[i] = strings.TrimSpace(y)
+ }
+ }
+ return vars
+}
+
+func (s *gdbState) quit() {
+ response := s.ioState.writeRead("q\n")
+ if strings.Contains(response.o, "Quit anyway? (y or n)") {
+ defer func() {
+ if r := recover(); r != nil {
+ if s, ok := r.(string); !(ok && strings.Contains(s, "'Y\n'")) {
+ // Not the panic that was expected.
+ fmt.Printf("Expected a broken pipe panic, but saw the following panic instead")
+ panic(r)
+ }
+ }
+ }()
+ s.ioState.writeRead("Y\n")
+ }
+}
+
+type ioState struct {
+ stdout io.ReadCloser
+ stderr io.ReadCloser
+ stdin io.WriteCloser
+ outChan chan string
+ errChan chan string
+ last tstring // Output of previous step
+ history *nextHist
+}
+
+func newIoState(cmd *exec.Cmd) *ioState {
+ var err error
+ s := &ioState{}
+ s.history = &nextHist{}
+ s.history.f2i = make(map[string]uint8)
+ s.stdout, err = cmd.StdoutPipe()
+ line := asCommandLine("", cmd)
+ if err != nil {
+ panic(fmt.Sprintf("There was an error [stdoutpipe] running '%s', %v\n", line, err))
+ }
+ s.stderr, err = cmd.StderrPipe()
+ if err != nil {
+ panic(fmt.Sprintf("There was an error [stdouterr] running '%s', %v\n", line, err))
+ }
+ s.stdin, err = cmd.StdinPipe()
+ if err != nil {
+ panic(fmt.Sprintf("There was an error [stdinpipe] running '%s', %v\n", line, err))
+ }
+
+ s.outChan = make(chan string, 1)
+ s.errChan = make(chan string, 1)
+ go func() {
+ buffer := make([]byte, 4096)
+ for {
+ n, err := s.stdout.Read(buffer)
+ if n > 0 {
+ s.outChan <- string(buffer[0:n])
+ }
+ if err == io.EOF || n == 0 {
+ break
+ }
+ if err != nil {
+ fmt.Printf("Saw an error forwarding stdout")
+ break
+ }
+ }
+ close(s.outChan)
+ s.stdout.Close()
+ }()
+
+ go func() {
+ buffer := make([]byte, 4096)
+ for {
+ n, err := s.stderr.Read(buffer)
+ if n > 0 {
+ s.errChan <- string(buffer[0:n])
+ }
+ if err == io.EOF || n == 0 {
+ break
+ }
+ if err != nil {
+ fmt.Printf("Saw an error forwarding stderr")
+ break
+ }
+ }
+ close(s.errChan)
+ s.stderr.Close()
+ }()
+ return s
+}
+
+func (s *ioState) hist() *nextHist {
+ return s.history
+}
+
+// writeRead writes ss, then reads stdout and stderr, waiting 500ms to
+// be sure all the output has appeared.
+func (s *ioState) writeRead(ss string) tstring {
+ if *verbose {
+ fmt.Printf("=> %s", ss)
+ }
+ _, err := io.WriteString(s.stdin, ss)
+ if err != nil {
+ panic(fmt.Sprintf("There was an error writing '%s', %v\n", ss, err))
+ }
+ return s.readExpecting(-1, 500, "")
+}
+
+// writeReadExpect writes ss, then reads stdout and stderr until something
+// that matches expectRE appears. expectRE should not be ""
+func (s *ioState) writeReadExpect(ss, expectRE string) tstring {
+ if *verbose {
+ fmt.Printf("=> %s", ss)
+ }
+ if expectRE == "" {
+ panic("expectRE should not be empty; use .* instead")
+ }
+ _, err := io.WriteString(s.stdin, ss)
+ if err != nil {
+ panic(fmt.Sprintf("There was an error writing '%s', %v\n", ss, err))
+ }
+ return s.readSimpleExpecting(expectRE)
+}
+
+func (s *ioState) readExpecting(millis, interlineTimeout int, expectedRE string) tstring {
+ timeout := time.Millisecond * time.Duration(millis)
+ interline := time.Millisecond * time.Duration(interlineTimeout)
+ s.last = tstring{}
+ var re *regexp.Regexp
+ if expectedRE != "" {
+ re = regexp.MustCompile(expectedRE)
+ }
+loop:
+ for {
+ var timer <-chan time.Time
+ if timeout > 0 {
+ timer = time.After(timeout)
+ }
+ select {
+ case x, ok := <-s.outChan:
+ if !ok {
+ s.outChan = nil
+ }
+ s.last.o += x
+ case x, ok := <-s.errChan:
+ if !ok {
+ s.errChan = nil
+ }
+ s.last.e += x
+ case <-timer:
+ break loop
+ }
+ if re != nil {
+ if re.MatchString(s.last.o) {
+ break
+ }
+ if re.MatchString(s.last.e) {
+ break
+ }
+ }
+ timeout = interline
+ }
+ if *verbose {
+ fmt.Printf("<= %s%s", s.last.o, s.last.e)
+ }
+ return s.last
+}
+
+func (s *ioState) readSimpleExpecting(expectedRE string) tstring {
+ s.last = tstring{}
+ var re *regexp.Regexp
+ if expectedRE != "" {
+ re = regexp.MustCompile(expectedRE)
+ }
+ for {
+ select {
+ case x, ok := <-s.outChan:
+ if !ok {
+ s.outChan = nil
+ }
+ s.last.o += x
+ case x, ok := <-s.errChan:
+ if !ok {
+ s.errChan = nil
+ }
+ s.last.e += x
+ }
+ if re != nil {
+ if re.MatchString(s.last.o) {
+ break
+ }
+ if re.MatchString(s.last.e) {
+ break
+ }
+ }
+ }
+ if *verbose {
+ fmt.Printf("<= %s%s", s.last.o, s.last.e)
+ }
+ return s.last
+}
+
+// replaceEnv returns a new environment derived from env
+// by removing any existing definition of ev and adding ev=evv.
+func replaceEnv(env []string, ev string, evv string) []string {
+ if env == nil {
+ env = os.Environ()
+ }
+ evplus := ev + "="
+ var found bool
+ for i, v := range env {
+ if strings.HasPrefix(v, evplus) {
+ found = true
+ env[i] = evplus + evv
+ }
+ }
+ if !found {
+ env = append(env, evplus+evv)
+ }
+ return env
+}
+
+// asCommandLine renders cmd as something that could be copy-and-pasted into a command line
+// If cwd is not empty and different from the command's directory, prepend an appropriate "cd"
+func asCommandLine(cwd string, cmd *exec.Cmd) string {
+ s := "("
+ if cmd.Dir != "" && cmd.Dir != cwd {
+ s += "cd" + escape(cmd.Dir) + ";"
+ }
+ for _, e := range cmd.Env {
+ if !strings.HasPrefix(e, "PATH=") &&
+ !strings.HasPrefix(e, "HOME=") &&
+ !strings.HasPrefix(e, "USER=") &&
+ !strings.HasPrefix(e, "SHELL=") {
+ s += escape(e)
+ }
+ }
+ for _, a := range cmd.Args {
+ s += escape(a)
+ }
+ s += " )"
+ return s
+}
+
+// escape inserts escapes appropriate for use in a shell command line
+func escape(s string) string {
+ s = strings.Replace(s, "\\", "\\\\", -1)
+ s = strings.Replace(s, "'", "\\'", -1)
+ // Conservative guess at characters that will force quoting
+ if strings.ContainsAny(s, "\\ ;#*&$~?!|[]()<>{}`") {
+ s = " '" + s + "'"
+ } else {
+ s = " " + s
+ }
+ return s
+}
+
+func expect(want string, got tstring) {
+ if want != "" {
+ match, err := regexp.MatchString(want, got.o)
+ if err != nil {
+ panic(fmt.Sprintf("Error for regexp %s, %v\n", want, err))
+ }
+ if match {
+ return
+ }
+ // Ignore error as we have already checked for it before
+ match, _ = regexp.MatchString(want, got.e)
+ if match {
+ return
+ }
+ fmt.Printf("EXPECTED '%s'\n GOT O='%s'\nAND E='%s'\n", want, got.o, got.e)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go
new file mode 100644
index 0000000..2293fc0
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/decompose.go
@@ -0,0 +1,479 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "sort"
+)
+
+// decompose converts phi ops on compound builtin types into phi
+// ops on simple types, then invokes rewrite rules to decompose
+// other ops on those types.
+func decomposeBuiltIn(f *Func) {
+ // Decompose phis
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ decomposeBuiltInPhi(v)
+ }
+ }
+
+ // Decompose other values
+ // Note: Leave dead values because we need to keep the original
+ // values around so the name component resolution below can still work.
+ applyRewrite(f, rewriteBlockdec, rewriteValuedec, leaveDeadValues)
+ if f.Config.RegSize == 4 {
+ applyRewrite(f, rewriteBlockdec64, rewriteValuedec64, leaveDeadValues)
+ }
+
+ // Split up named values into their components.
+ // accumulate old names for aggregates (that are decomposed) in toDelete for efficient bulk deletion,
+ // accumulate new LocalSlots in newNames for addition after the iteration. This decomposition is for
+ // builtin types with leaf components, and thus there is no need to reprocess the newly create LocalSlots.
+ var toDelete []namedVal
+ var newNames []*LocalSlot
+ for i, name := range f.Names {
+ t := name.Type
+ switch {
+ case t.IsInteger() && t.Size() > f.Config.RegSize:
+ hiName, loName := f.SplitInt64(name)
+ newNames = maybeAppend2(f, newNames, hiName, loName)
+ for j, v := range f.NamedValues[*name] {
+ if v.Op != OpInt64Make {
+ continue
+ }
+ f.NamedValues[*hiName] = append(f.NamedValues[*hiName], v.Args[0])
+ f.NamedValues[*loName] = append(f.NamedValues[*loName], v.Args[1])
+ toDelete = append(toDelete, namedVal{i, j})
+ }
+ case t.IsComplex():
+ rName, iName := f.SplitComplex(name)
+ newNames = maybeAppend2(f, newNames, rName, iName)
+ for j, v := range f.NamedValues[*name] {
+ if v.Op != OpComplexMake {
+ continue
+ }
+ f.NamedValues[*rName] = append(f.NamedValues[*rName], v.Args[0])
+ f.NamedValues[*iName] = append(f.NamedValues[*iName], v.Args[1])
+ toDelete = append(toDelete, namedVal{i, j})
+ }
+ case t.IsString():
+ ptrName, lenName := f.SplitString(name)
+ newNames = maybeAppend2(f, newNames, ptrName, lenName)
+ for j, v := range f.NamedValues[*name] {
+ if v.Op != OpStringMake {
+ continue
+ }
+ f.NamedValues[*ptrName] = append(f.NamedValues[*ptrName], v.Args[0])
+ f.NamedValues[*lenName] = append(f.NamedValues[*lenName], v.Args[1])
+ toDelete = append(toDelete, namedVal{i, j})
+ }
+ case t.IsSlice():
+ ptrName, lenName, capName := f.SplitSlice(name)
+ newNames = maybeAppend2(f, newNames, ptrName, lenName)
+ newNames = maybeAppend(f, newNames, capName)
+ for j, v := range f.NamedValues[*name] {
+ if v.Op != OpSliceMake {
+ continue
+ }
+ f.NamedValues[*ptrName] = append(f.NamedValues[*ptrName], v.Args[0])
+ f.NamedValues[*lenName] = append(f.NamedValues[*lenName], v.Args[1])
+ f.NamedValues[*capName] = append(f.NamedValues[*capName], v.Args[2])
+ toDelete = append(toDelete, namedVal{i, j})
+ }
+ case t.IsInterface():
+ typeName, dataName := f.SplitInterface(name)
+ newNames = maybeAppend2(f, newNames, typeName, dataName)
+ for j, v := range f.NamedValues[*name] {
+ if v.Op != OpIMake {
+ continue
+ }
+ f.NamedValues[*typeName] = append(f.NamedValues[*typeName], v.Args[0])
+ f.NamedValues[*dataName] = append(f.NamedValues[*dataName], v.Args[1])
+ toDelete = append(toDelete, namedVal{i, j})
+ }
+ case t.IsFloat():
+ // floats are never decomposed, even ones bigger than RegSize
+ case t.Size() > f.Config.RegSize:
+ f.Fatalf("undecomposed named type %s %v", name, t)
+ }
+ }
+
+ deleteNamedVals(f, toDelete)
+ f.Names = append(f.Names, newNames...)
+}
+
+func maybeAppend(f *Func, ss []*LocalSlot, s *LocalSlot) []*LocalSlot {
+ if _, ok := f.NamedValues[*s]; !ok {
+ f.NamedValues[*s] = nil
+ return append(ss, s)
+ }
+ return ss
+}
+
+func maybeAppend2(f *Func, ss []*LocalSlot, s1, s2 *LocalSlot) []*LocalSlot {
+ return maybeAppend(f, maybeAppend(f, ss, s1), s2)
+}
+
+func decomposeBuiltInPhi(v *Value) {
+ switch {
+ case v.Type.IsInteger() && v.Type.Size() > v.Block.Func.Config.RegSize:
+ decomposeInt64Phi(v)
+ case v.Type.IsComplex():
+ decomposeComplexPhi(v)
+ case v.Type.IsString():
+ decomposeStringPhi(v)
+ case v.Type.IsSlice():
+ decomposeSlicePhi(v)
+ case v.Type.IsInterface():
+ decomposeInterfacePhi(v)
+ case v.Type.IsFloat():
+ // floats are never decomposed, even ones bigger than RegSize
+ case v.Type.Size() > v.Block.Func.Config.RegSize:
+ v.Fatalf("%v undecomposed type %v", v, v.Type)
+ }
+}
+
+func decomposeStringPhi(v *Value) {
+ types := &v.Block.Func.Config.Types
+ ptrType := types.BytePtr
+ lenType := types.Int
+
+ ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
+ len := v.Block.NewValue0(v.Pos, OpPhi, lenType)
+ for _, a := range v.Args {
+ ptr.AddArg(a.Block.NewValue1(v.Pos, OpStringPtr, ptrType, a))
+ len.AddArg(a.Block.NewValue1(v.Pos, OpStringLen, lenType, a))
+ }
+ v.reset(OpStringMake)
+ v.AddArg(ptr)
+ v.AddArg(len)
+}
+
+func decomposeSlicePhi(v *Value) {
+ types := &v.Block.Func.Config.Types
+ ptrType := v.Type.Elem().PtrTo()
+ lenType := types.Int
+
+ ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
+ len := v.Block.NewValue0(v.Pos, OpPhi, lenType)
+ cap := v.Block.NewValue0(v.Pos, OpPhi, lenType)
+ for _, a := range v.Args {
+ ptr.AddArg(a.Block.NewValue1(v.Pos, OpSlicePtr, ptrType, a))
+ len.AddArg(a.Block.NewValue1(v.Pos, OpSliceLen, lenType, a))
+ cap.AddArg(a.Block.NewValue1(v.Pos, OpSliceCap, lenType, a))
+ }
+ v.reset(OpSliceMake)
+ v.AddArg(ptr)
+ v.AddArg(len)
+ v.AddArg(cap)
+}
+
+func decomposeInt64Phi(v *Value) {
+ cfgtypes := &v.Block.Func.Config.Types
+ var partType *types.Type
+ if v.Type.IsSigned() {
+ partType = cfgtypes.Int32
+ } else {
+ partType = cfgtypes.UInt32
+ }
+
+ hi := v.Block.NewValue0(v.Pos, OpPhi, partType)
+ lo := v.Block.NewValue0(v.Pos, OpPhi, cfgtypes.UInt32)
+ for _, a := range v.Args {
+ hi.AddArg(a.Block.NewValue1(v.Pos, OpInt64Hi, partType, a))
+ lo.AddArg(a.Block.NewValue1(v.Pos, OpInt64Lo, cfgtypes.UInt32, a))
+ }
+ v.reset(OpInt64Make)
+ v.AddArg(hi)
+ v.AddArg(lo)
+}
+
+func decomposeComplexPhi(v *Value) {
+ cfgtypes := &v.Block.Func.Config.Types
+ var partType *types.Type
+ switch z := v.Type.Size(); z {
+ case 8:
+ partType = cfgtypes.Float32
+ case 16:
+ partType = cfgtypes.Float64
+ default:
+ v.Fatalf("decomposeComplexPhi: bad complex size %d", z)
+ }
+
+ real := v.Block.NewValue0(v.Pos, OpPhi, partType)
+ imag := v.Block.NewValue0(v.Pos, OpPhi, partType)
+ for _, a := range v.Args {
+ real.AddArg(a.Block.NewValue1(v.Pos, OpComplexReal, partType, a))
+ imag.AddArg(a.Block.NewValue1(v.Pos, OpComplexImag, partType, a))
+ }
+ v.reset(OpComplexMake)
+ v.AddArg(real)
+ v.AddArg(imag)
+}
+
+func decomposeInterfacePhi(v *Value) {
+ uintptrType := v.Block.Func.Config.Types.Uintptr
+ ptrType := v.Block.Func.Config.Types.BytePtr
+
+ itab := v.Block.NewValue0(v.Pos, OpPhi, uintptrType)
+ data := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
+ for _, a := range v.Args {
+ itab.AddArg(a.Block.NewValue1(v.Pos, OpITab, uintptrType, a))
+ data.AddArg(a.Block.NewValue1(v.Pos, OpIData, ptrType, a))
+ }
+ v.reset(OpIMake)
+ v.AddArg(itab)
+ v.AddArg(data)
+}
+
+func decomposeUser(f *Func) {
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ decomposeUserPhi(v)
+ }
+ }
+ // Split up named values into their components.
+ i := 0
+ var newNames []*LocalSlot
+ for _, name := range f.Names {
+ t := name.Type
+ switch {
+ case t.IsStruct():
+ newNames = decomposeUserStructInto(f, name, newNames)
+ case t.IsArray():
+ newNames = decomposeUserArrayInto(f, name, newNames)
+ default:
+ f.Names[i] = name
+ i++
+ }
+ }
+ f.Names = f.Names[:i]
+ f.Names = append(f.Names, newNames...)
+}
+
+// decomposeUserArrayInto creates names for the element(s) of arrays referenced
+// by name where possible, and appends those new names to slots, which is then
+// returned.
+func decomposeUserArrayInto(f *Func, name *LocalSlot, slots []*LocalSlot) []*LocalSlot {
+ t := name.Type
+ if t.NumElem() == 0 {
+ // TODO(khr): Not sure what to do here. Probably nothing.
+ // Names for empty arrays aren't important.
+ return slots
+ }
+ if t.NumElem() != 1 {
+ // shouldn't get here due to CanSSA
+ f.Fatalf("array not of size 1")
+ }
+ elemName := f.SplitArray(name)
+ var keep []*Value
+ for _, v := range f.NamedValues[*name] {
+ if v.Op != OpArrayMake1 {
+ keep = append(keep, v)
+ continue
+ }
+ f.NamedValues[*elemName] = append(f.NamedValues[*elemName], v.Args[0])
+ }
+ if len(keep) == 0 {
+ // delete the name for the array as a whole
+ delete(f.NamedValues, *name)
+ } else {
+ f.NamedValues[*name] = keep
+ }
+
+ if t.Elem().IsArray() {
+ return decomposeUserArrayInto(f, elemName, slots)
+ } else if t.Elem().IsStruct() {
+ return decomposeUserStructInto(f, elemName, slots)
+ }
+
+ return append(slots, elemName)
+}
+
+// decomposeUserStructInto creates names for the fields(s) of structs referenced
+// by name where possible, and appends those new names to slots, which is then
+// returned.
+func decomposeUserStructInto(f *Func, name *LocalSlot, slots []*LocalSlot) []*LocalSlot {
+ fnames := []*LocalSlot{} // slots for struct in name
+ t := name.Type
+ n := t.NumFields()
+
+ for i := 0; i < n; i++ {
+ fs := f.SplitStruct(name, i)
+ fnames = append(fnames, fs)
+ // arrays and structs will be decomposed further, so
+ // there's no need to record a name
+ if !fs.Type.IsArray() && !fs.Type.IsStruct() {
+ slots = maybeAppend(f, slots, fs)
+ }
+ }
+
+ makeOp := StructMakeOp(n)
+ var keep []*Value
+ // create named values for each struct field
+ for _, v := range f.NamedValues[*name] {
+ if v.Op != makeOp {
+ keep = append(keep, v)
+ continue
+ }
+ for i := 0; i < len(fnames); i++ {
+ f.NamedValues[*fnames[i]] = append(f.NamedValues[*fnames[i]], v.Args[i])
+ }
+ }
+ if len(keep) == 0 {
+ // delete the name for the struct as a whole
+ delete(f.NamedValues, *name)
+ } else {
+ f.NamedValues[*name] = keep
+ }
+
+ // now that this f.NamedValues contains values for the struct
+ // fields, recurse into nested structs
+ for i := 0; i < n; i++ {
+ if name.Type.FieldType(i).IsStruct() {
+ slots = decomposeUserStructInto(f, fnames[i], slots)
+ delete(f.NamedValues, *fnames[i])
+ } else if name.Type.FieldType(i).IsArray() {
+ slots = decomposeUserArrayInto(f, fnames[i], slots)
+ delete(f.NamedValues, *fnames[i])
+ }
+ }
+ return slots
+}
+func decomposeUserPhi(v *Value) {
+ switch {
+ case v.Type.IsStruct():
+ decomposeStructPhi(v)
+ case v.Type.IsArray():
+ decomposeArrayPhi(v)
+ }
+}
+
+// decomposeStructPhi replaces phi-of-struct with structmake(phi-for-each-field),
+// and then recursively decomposes the phis for each field.
+func decomposeStructPhi(v *Value) {
+ t := v.Type
+ n := t.NumFields()
+ var fields [MaxStruct]*Value
+ for i := 0; i < n; i++ {
+ fields[i] = v.Block.NewValue0(v.Pos, OpPhi, t.FieldType(i))
+ }
+ for _, a := range v.Args {
+ for i := 0; i < n; i++ {
+ fields[i].AddArg(a.Block.NewValue1I(v.Pos, OpStructSelect, t.FieldType(i), int64(i), a))
+ }
+ }
+ v.reset(StructMakeOp(n))
+ v.AddArgs(fields[:n]...)
+
+ // Recursively decompose phis for each field.
+ for _, f := range fields[:n] {
+ decomposeUserPhi(f)
+ }
+}
+
+// decomposeArrayPhi replaces phi-of-array with arraymake(phi-of-array-element),
+// and then recursively decomposes the element phi.
+func decomposeArrayPhi(v *Value) {
+ t := v.Type
+ if t.NumElem() == 0 {
+ v.reset(OpArrayMake0)
+ return
+ }
+ if t.NumElem() != 1 {
+ v.Fatalf("SSAable array must have no more than 1 element")
+ }
+ elem := v.Block.NewValue0(v.Pos, OpPhi, t.Elem())
+ for _, a := range v.Args {
+ elem.AddArg(a.Block.NewValue1I(v.Pos, OpArraySelect, t.Elem(), 0, a))
+ }
+ v.reset(OpArrayMake1)
+ v.AddArg(elem)
+
+ // Recursively decompose elem phi.
+ decomposeUserPhi(elem)
+}
+
+// MaxStruct is the maximum number of fields a struct
+// can have and still be SSAable.
+const MaxStruct = 4
+
+// StructMakeOp returns the opcode to construct a struct with the
+// given number of fields.
+func StructMakeOp(nf int) Op {
+ switch nf {
+ case 0:
+ return OpStructMake0
+ case 1:
+ return OpStructMake1
+ case 2:
+ return OpStructMake2
+ case 3:
+ return OpStructMake3
+ case 4:
+ return OpStructMake4
+ }
+ panic("too many fields in an SSAable struct")
+}
+
+type namedVal struct {
+ locIndex, valIndex int // f.NamedValues[f.Names[locIndex]][valIndex] = key
+}
+
+// deleteNamedVals removes particular values with debugger names from f's naming data structures,
+// removes all values with OpInvalid, and re-sorts the list of Names.
+func deleteNamedVals(f *Func, toDelete []namedVal) {
+ // Arrange to delete from larger indices to smaller, to ensure swap-with-end deletion does not invalidate pending indices.
+ sort.Slice(toDelete, func(i, j int) bool {
+ if toDelete[i].locIndex != toDelete[j].locIndex {
+ return toDelete[i].locIndex > toDelete[j].locIndex
+ }
+ return toDelete[i].valIndex > toDelete[j].valIndex
+
+ })
+
+ // Get rid of obsolete names
+ for _, d := range toDelete {
+ loc := f.Names[d.locIndex]
+ vals := f.NamedValues[*loc]
+ l := len(vals) - 1
+ if l > 0 {
+ vals[d.valIndex] = vals[l]
+ }
+ vals[l] = nil
+ f.NamedValues[*loc] = vals[:l]
+ }
+ // Delete locations with no values attached.
+ end := len(f.Names)
+ for i := len(f.Names) - 1; i >= 0; i-- {
+ loc := f.Names[i]
+ vals := f.NamedValues[*loc]
+ last := len(vals)
+ for j := len(vals) - 1; j >= 0; j-- {
+ if vals[j].Op == OpInvalid {
+ last--
+ vals[j] = vals[last]
+ vals[last] = nil
+ }
+ }
+ if last < len(vals) {
+ f.NamedValues[*loc] = vals[:last]
+ }
+ if len(vals) == 0 {
+ delete(f.NamedValues, *loc)
+ end--
+ f.Names[i] = f.Names[end]
+ f.Names[end] = nil
+ }
+ }
+ f.Names = f.Names[:end]
+}
diff --git a/src/cmd/compile/internal/ssa/dom.go b/src/cmd/compile/internal/ssa/dom.go
new file mode 100644
index 0000000..39ba4d1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/dom.go
@@ -0,0 +1,275 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file contains code to compute the dominator tree
+// of a control-flow graph.
+
+// postorder computes a postorder traversal ordering for the
+// basic blocks in f. Unreachable blocks will not appear.
+func postorder(f *Func) []*Block {
+ return postorderWithNumbering(f, nil)
+}
+
+type blockAndIndex struct {
+ b *Block
+ index int // index is the number of successor edges of b that have already been explored.
+}
+
+// postorderWithNumbering provides a DFS postordering.
+// This seems to make loop-finding more robust.
+func postorderWithNumbering(f *Func, ponums []int32) []*Block {
+ seen := f.Cache.allocBoolSlice(f.NumBlocks())
+ defer f.Cache.freeBoolSlice(seen)
+
+ // result ordering
+ order := make([]*Block, 0, len(f.Blocks))
+
+ // stack of blocks and next child to visit
+ // A constant bound allows this to be stack-allocated. 32 is
+ // enough to cover almost every postorderWithNumbering call.
+ s := make([]blockAndIndex, 0, 32)
+ s = append(s, blockAndIndex{b: f.Entry})
+ seen[f.Entry.ID] = true
+ for len(s) > 0 {
+ tos := len(s) - 1
+ x := s[tos]
+ b := x.b
+ if i := x.index; i < len(b.Succs) {
+ s[tos].index++
+ bb := b.Succs[i].Block()
+ if !seen[bb.ID] {
+ seen[bb.ID] = true
+ s = append(s, blockAndIndex{b: bb})
+ }
+ continue
+ }
+ s = s[:tos]
+ if ponums != nil {
+ ponums[b.ID] = int32(len(order))
+ }
+ order = append(order, b)
+ }
+ return order
+}
+
+type linkedBlocks func(*Block) []Edge
+
+func dominators(f *Func) []*Block {
+ preds := func(b *Block) []Edge { return b.Preds }
+ succs := func(b *Block) []Edge { return b.Succs }
+
+ //TODO: benchmark and try to find criteria for swapping between
+ // dominatorsSimple and dominatorsLT
+ return f.dominatorsLTOrig(f.Entry, preds, succs)
+}
+
+// dominatorsLTOrig runs Lengauer-Tarjan to compute a dominator tree starting at
+// entry and using predFn/succFn to find predecessors/successors to allow
+// computing both dominator and post-dominator trees.
+func (f *Func) dominatorsLTOrig(entry *Block, predFn linkedBlocks, succFn linkedBlocks) []*Block {
+ // Adapted directly from the original TOPLAS article's "simple" algorithm
+
+ maxBlockID := entry.Func.NumBlocks()
+ scratch := f.Cache.allocIDSlice(7 * maxBlockID)
+ defer f.Cache.freeIDSlice(scratch)
+ semi := scratch[0*maxBlockID : 1*maxBlockID]
+ vertex := scratch[1*maxBlockID : 2*maxBlockID]
+ label := scratch[2*maxBlockID : 3*maxBlockID]
+ parent := scratch[3*maxBlockID : 4*maxBlockID]
+ ancestor := scratch[4*maxBlockID : 5*maxBlockID]
+ bucketHead := scratch[5*maxBlockID : 6*maxBlockID]
+ bucketLink := scratch[6*maxBlockID : 7*maxBlockID]
+
+ // This version uses integers for most of the computation,
+ // to make the work arrays smaller and pointer-free.
+ // fromID translates from ID to *Block where that is needed.
+ fromID := f.Cache.allocBlockSlice(maxBlockID)
+ defer f.Cache.freeBlockSlice(fromID)
+ for _, v := range f.Blocks {
+ fromID[v.ID] = v
+ }
+ idom := make([]*Block, maxBlockID)
+
+ // Step 1. Carry out a depth first search of the problem graph. Number
+ // the vertices from 1 to n as they are reached during the search.
+ n := f.dfsOrig(entry, succFn, semi, vertex, label, parent)
+
+ for i := n; i >= 2; i-- {
+ w := vertex[i]
+
+ // step2 in TOPLAS paper
+ for _, e := range predFn(fromID[w]) {
+ v := e.b
+ if semi[v.ID] == 0 {
+ // skip unreachable predecessor
+ // not in original, but we're using existing pred instead of building one.
+ continue
+ }
+ u := evalOrig(v.ID, ancestor, semi, label)
+ if semi[u] < semi[w] {
+ semi[w] = semi[u]
+ }
+ }
+
+ // add w to bucket[vertex[semi[w]]]
+ // implement bucket as a linked list implemented
+ // in a pair of arrays.
+ vsw := vertex[semi[w]]
+ bucketLink[w] = bucketHead[vsw]
+ bucketHead[vsw] = w
+
+ linkOrig(parent[w], w, ancestor)
+
+ // step3 in TOPLAS paper
+ for v := bucketHead[parent[w]]; v != 0; v = bucketLink[v] {
+ u := evalOrig(v, ancestor, semi, label)
+ if semi[u] < semi[v] {
+ idom[v] = fromID[u]
+ } else {
+ idom[v] = fromID[parent[w]]
+ }
+ }
+ }
+ // step 4 in toplas paper
+ for i := ID(2); i <= n; i++ {
+ w := vertex[i]
+ if idom[w].ID != vertex[semi[w]] {
+ idom[w] = idom[idom[w].ID]
+ }
+ }
+
+ return idom
+}
+
+// dfsOrig performs a depth first search over the blocks starting at entry block
+// (in arbitrary order). This is a de-recursed version of dfs from the
+// original Tarjan-Lengauer TOPLAS article. It's important to return the
+// same values for parent as the original algorithm.
+func (f *Func) dfsOrig(entry *Block, succFn linkedBlocks, semi, vertex, label, parent []ID) ID {
+ n := ID(0)
+ s := make([]*Block, 0, 256)
+ s = append(s, entry)
+
+ for len(s) > 0 {
+ v := s[len(s)-1]
+ s = s[:len(s)-1]
+ // recursing on v
+
+ if semi[v.ID] != 0 {
+ continue // already visited
+ }
+ n++
+ semi[v.ID] = n
+ vertex[n] = v.ID
+ label[v.ID] = v.ID
+ // ancestor[v] already zero
+ for _, e := range succFn(v) {
+ w := e.b
+ // if it has a dfnum, we've already visited it
+ if semi[w.ID] == 0 {
+ // yes, w can be pushed multiple times.
+ s = append(s, w)
+ parent[w.ID] = v.ID // keep overwriting this till it is visited.
+ }
+ }
+ }
+ return n
+}
+
+// compressOrig is the "simple" compress function from LT paper.
+func compressOrig(v ID, ancestor, semi, label []ID) {
+ if ancestor[ancestor[v]] != 0 {
+ compressOrig(ancestor[v], ancestor, semi, label)
+ if semi[label[ancestor[v]]] < semi[label[v]] {
+ label[v] = label[ancestor[v]]
+ }
+ ancestor[v] = ancestor[ancestor[v]]
+ }
+}
+
+// evalOrig is the "simple" eval function from LT paper.
+func evalOrig(v ID, ancestor, semi, label []ID) ID {
+ if ancestor[v] == 0 {
+ return v
+ }
+ compressOrig(v, ancestor, semi, label)
+ return label[v]
+}
+
+func linkOrig(v, w ID, ancestor []ID) {
+ ancestor[w] = v
+}
+
+// dominatorsSimple computes the dominator tree for f. It returns a slice
+// which maps block ID to the immediate dominator of that block.
+// Unreachable blocks map to nil. The entry block maps to nil.
+func dominatorsSimple(f *Func) []*Block {
+ // A simple algorithm for now
+ // Cooper, Harvey, Kennedy
+ idom := make([]*Block, f.NumBlocks())
+
+ // Compute postorder walk
+ post := f.postorder()
+
+ // Make map from block id to order index (for intersect call)
+ postnum := f.Cache.allocIntSlice(f.NumBlocks())
+ defer f.Cache.freeIntSlice(postnum)
+ for i, b := range post {
+ postnum[b.ID] = i
+ }
+
+ // Make the entry block a self-loop
+ idom[f.Entry.ID] = f.Entry
+ if postnum[f.Entry.ID] != len(post)-1 {
+ f.Fatalf("entry block %v not last in postorder", f.Entry)
+ }
+
+ // Compute relaxation of idom entries
+ for {
+ changed := false
+
+ for i := len(post) - 2; i >= 0; i-- {
+ b := post[i]
+ var d *Block
+ for _, e := range b.Preds {
+ p := e.b
+ if idom[p.ID] == nil {
+ continue
+ }
+ if d == nil {
+ d = p
+ continue
+ }
+ d = intersect(d, p, postnum, idom)
+ }
+ if d != idom[b.ID] {
+ idom[b.ID] = d
+ changed = true
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+ // Set idom of entry block to nil instead of itself.
+ idom[f.Entry.ID] = nil
+ return idom
+}
+
+// intersect finds the closest dominator of both b and c.
+// It requires a postorder numbering of all the blocks.
+func intersect(b, c *Block, postnum []int, idom []*Block) *Block {
+ // TODO: This loop is O(n^2). It used to be used in nilcheck,
+ // see BenchmarkNilCheckDeep*.
+ for b != c {
+ if postnum[b.ID] < postnum[c.ID] {
+ b = idom[b.ID]
+ } else {
+ c = idom[c.ID]
+ }
+ }
+ return b
+}
diff --git a/src/cmd/compile/internal/ssa/dom_test.go b/src/cmd/compile/internal/ssa/dom_test.go
new file mode 100644
index 0000000..fa51718
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/dom_test.go
@@ -0,0 +1,608 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func BenchmarkDominatorsLinear(b *testing.B) { benchmarkDominators(b, 10000, genLinear) }
+func BenchmarkDominatorsFwdBack(b *testing.B) { benchmarkDominators(b, 10000, genFwdBack) }
+func BenchmarkDominatorsManyPred(b *testing.B) { benchmarkDominators(b, 10000, genManyPred) }
+func BenchmarkDominatorsMaxPred(b *testing.B) { benchmarkDominators(b, 10000, genMaxPred) }
+func BenchmarkDominatorsMaxPredVal(b *testing.B) { benchmarkDominators(b, 10000, genMaxPredValue) }
+
+type blockGen func(size int) []bloc
+
+// genLinear creates an array of blocks that succeed one another
+// b_n -> [b_n+1].
+func genLinear(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto(blockn(0)),
+ ),
+ )
+ for i := 0; i < size; i++ {
+ blocs = append(blocs, Bloc(blockn(i),
+ Goto(blockn(i+1))))
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// genLinear creates an array of blocks that alternate between
+// b_n -> [b_n+1], b_n -> [b_n+1, b_n-1] , b_n -> [b_n+1, b_n+2]
+func genFwdBack(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto(blockn(0)),
+ ),
+ )
+ for i := 0; i < size; i++ {
+ switch i % 2 {
+ case 0:
+ blocs = append(blocs, Bloc(blockn(i),
+ If("p", blockn(i+1), blockn(i+2))))
+ case 1:
+ blocs = append(blocs, Bloc(blockn(i),
+ If("p", blockn(i+1), blockn(i-1))))
+ }
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// genManyPred creates an array of blocks where 1/3rd have a successor of the
+// first block, 1/3rd the last block, and the remaining third are plain.
+func genManyPred(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto(blockn(0)),
+ ),
+ )
+
+ // We want predecessor lists to be long, so 2/3rds of the blocks have a
+ // successor of the first or last block.
+ for i := 0; i < size; i++ {
+ switch i % 3 {
+ case 0:
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto(blockn(i+1))))
+ case 1:
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", blockn(i+1), blockn(0))))
+ case 2:
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", blockn(i+1), blockn(size))))
+ }
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// genMaxPred maximizes the size of the 'exit' predecessor list.
+func genMaxPred(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto(blockn(0)),
+ ),
+ )
+
+ for i := 0; i < size; i++ {
+ blocs = append(blocs, Bloc(blockn(i),
+ If("p", blockn(i+1), "exit")))
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// genMaxPredValue is identical to genMaxPred but contains an
+// additional value.
+func genMaxPredValue(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto(blockn(0)),
+ ),
+ )
+
+ for i := 0; i < size; i++ {
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", blockn(i+1), "exit")))
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// sink for benchmark
+var domBenchRes []*Block
+
+func benchmarkDominators(b *testing.B, size int, bg blockGen) {
+ c := testConfig(b)
+ fun := c.Fun("entry", bg(size)...)
+
+ CheckFunc(fun.f)
+ b.SetBytes(int64(size))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ domBenchRes = dominators(fun.f)
+ }
+}
+
+type domFunc func(f *Func) []*Block
+
+// verifyDominators verifies that the dominators of fut (function under test)
+// as determined by domFn, match the map node->dominator
+func verifyDominators(t *testing.T, fut fun, domFn domFunc, doms map[string]string) {
+ blockNames := map[*Block]string{}
+ for n, b := range fut.blocks {
+ blockNames[b] = n
+ }
+
+ calcDom := domFn(fut.f)
+
+ for n, d := range doms {
+ nblk, ok := fut.blocks[n]
+ if !ok {
+ t.Errorf("invalid block name %s", n)
+ }
+ dblk, ok := fut.blocks[d]
+ if !ok {
+ t.Errorf("invalid block name %s", d)
+ }
+
+ domNode := calcDom[nblk.ID]
+ switch {
+ case calcDom[nblk.ID] == dblk:
+ calcDom[nblk.ID] = nil
+ continue
+ case calcDom[nblk.ID] != dblk:
+ t.Errorf("expected %s as dominator of %s, found %s", d, n, blockNames[domNode])
+ default:
+ t.Fatal("unexpected dominator condition")
+ }
+ }
+
+ for id, d := range calcDom {
+ // If nil, we've already verified it
+ if d == nil {
+ continue
+ }
+ for _, b := range fut.blocks {
+ if int(b.ID) == id {
+ t.Errorf("unexpected dominator of %s for %s", blockNames[d], blockNames[b])
+ }
+ }
+ }
+
+}
+
+func TestDominatorsSingleBlock(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Exit("mem")))
+
+ doms := map[string]string{}
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+
+}
+
+func TestDominatorsSimple(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("a")),
+ Bloc("a",
+ Goto("b")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{
+ "a": "entry",
+ "b": "a",
+ "c": "b",
+ "exit": "c",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+
+}
+
+func TestDominatorsMultPredFwd(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", "a", "c")),
+ Bloc("a",
+ If("p", "b", "c")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{
+ "a": "entry",
+ "b": "a",
+ "c": "entry",
+ "exit": "c",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+}
+
+func TestDominatorsDeadCode(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 0, nil),
+ If("p", "b3", "b5")),
+ Bloc("b2", Exit("mem")),
+ Bloc("b3", Goto("b2")),
+ Bloc("b4", Goto("b2")),
+ Bloc("b5", Goto("b2")))
+
+ doms := map[string]string{
+ "b2": "entry",
+ "b3": "entry",
+ "b5": "entry",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+}
+
+func TestDominatorsMultPredRev(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Goto("first")),
+ Bloc("first",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto("a")),
+ Bloc("a",
+ If("p", "b", "first")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ If("p", "exit", "b")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{
+ "first": "entry",
+ "a": "first",
+ "b": "a",
+ "c": "b",
+ "exit": "c",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+}
+
+func TestDominatorsMultPred(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", "a", "c")),
+ Bloc("a",
+ If("p", "b", "c")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ If("p", "b", "exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{
+ "a": "entry",
+ "b": "entry",
+ "c": "entry",
+ "exit": "c",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+}
+
+func TestInfiniteLoop(t *testing.T) {
+ c := testConfig(t)
+ // note lack of an exit block
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto("a")),
+ Bloc("a",
+ Goto("b")),
+ Bloc("b",
+ Goto("a")))
+
+ CheckFunc(fun.f)
+ doms := map[string]string{"a": "entry",
+ "b": "a"}
+ verifyDominators(t, fun, dominators, doms)
+}
+
+func TestDomTricky(t *testing.T) {
+ doms := map[string]string{
+ "4": "1",
+ "2": "4",
+ "5": "4",
+ "11": "4",
+ "15": "4", // the incorrect answer is "5"
+ "10": "15",
+ "19": "15",
+ }
+
+ if4 := [2]string{"2", "5"}
+ if5 := [2]string{"15", "11"}
+ if15 := [2]string{"19", "10"}
+
+ for i := 0; i < 8; i++ {
+ a := 1 & i
+ b := 1 & i >> 1
+ c := 1 & i >> 2
+
+ cfg := testConfig(t)
+ fun := cfg.Fun("1",
+ Bloc("1",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto("4")),
+ Bloc("2",
+ Goto("11")),
+ Bloc("4",
+ If("p", if4[a], if4[1-a])), // 2, 5
+ Bloc("5",
+ If("p", if5[b], if5[1-b])), //15, 11
+ Bloc("10",
+ Exit("mem")),
+ Bloc("11",
+ Goto("15")),
+ Bloc("15",
+ If("p", if15[c], if15[1-c])), //19, 10
+ Bloc("19",
+ Goto("10")))
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+ }
+}
+
+// generateDominatorMap uses dominatorsSimple to obtain a
+// reference dominator tree for testing faster algorithms.
+func generateDominatorMap(fut fun) map[string]string {
+ blockNames := map[*Block]string{}
+ for n, b := range fut.blocks {
+ blockNames[b] = n
+ }
+ referenceDom := dominatorsSimple(fut.f)
+ doms := make(map[string]string)
+ for _, b := range fut.f.Blocks {
+ if d := referenceDom[b.ID]; d != nil {
+ doms[blockNames[b]] = blockNames[d]
+ }
+ }
+ return doms
+}
+
+func TestDominatorsPostTrickyA(t *testing.T) {
+ testDominatorsPostTricky(t, "b8", "b11", "b10", "b8", "b14", "b15")
+}
+
+func TestDominatorsPostTrickyB(t *testing.T) {
+ testDominatorsPostTricky(t, "b11", "b8", "b10", "b8", "b14", "b15")
+}
+
+func TestDominatorsPostTrickyC(t *testing.T) {
+ testDominatorsPostTricky(t, "b8", "b11", "b8", "b10", "b14", "b15")
+}
+
+func TestDominatorsPostTrickyD(t *testing.T) {
+ testDominatorsPostTricky(t, "b11", "b8", "b8", "b10", "b14", "b15")
+}
+
+func TestDominatorsPostTrickyE(t *testing.T) {
+ testDominatorsPostTricky(t, "b8", "b11", "b10", "b8", "b15", "b14")
+}
+
+func TestDominatorsPostTrickyF(t *testing.T) {
+ testDominatorsPostTricky(t, "b11", "b8", "b10", "b8", "b15", "b14")
+}
+
+func TestDominatorsPostTrickyG(t *testing.T) {
+ testDominatorsPostTricky(t, "b8", "b11", "b8", "b10", "b15", "b14")
+}
+
+func TestDominatorsPostTrickyH(t *testing.T) {
+ testDominatorsPostTricky(t, "b11", "b8", "b8", "b10", "b15", "b14")
+}
+
+func testDominatorsPostTricky(t *testing.T, b7then, b7else, b12then, b12else, b13then, b13else string) {
+ c := testConfig(t)
+ fun := c.Fun("b1",
+ Bloc("b1",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", "b3", "b2")),
+ Bloc("b3",
+ If("p", "b5", "b6")),
+ Bloc("b5",
+ Goto("b7")),
+ Bloc("b7",
+ If("p", b7then, b7else)),
+ Bloc("b8",
+ Goto("b13")),
+ Bloc("b13",
+ If("p", b13then, b13else)),
+ Bloc("b14",
+ Goto("b10")),
+ Bloc("b15",
+ Goto("b16")),
+ Bloc("b16",
+ Goto("b9")),
+ Bloc("b9",
+ Goto("b7")),
+ Bloc("b11",
+ Goto("b12")),
+ Bloc("b12",
+ If("p", b12then, b12else)),
+ Bloc("b10",
+ Goto("b6")),
+ Bloc("b6",
+ Goto("b17")),
+ Bloc("b17",
+ Goto("b18")),
+ Bloc("b18",
+ If("p", "b22", "b19")),
+ Bloc("b22",
+ Goto("b23")),
+ Bloc("b23",
+ If("p", "b21", "b19")),
+ Bloc("b19",
+ If("p", "b24", "b25")),
+ Bloc("b24",
+ Goto("b26")),
+ Bloc("b26",
+ Goto("b25")),
+ Bloc("b25",
+ If("p", "b27", "b29")),
+ Bloc("b27",
+ Goto("b30")),
+ Bloc("b30",
+ Goto("b28")),
+ Bloc("b29",
+ Goto("b31")),
+ Bloc("b31",
+ Goto("b28")),
+ Bloc("b28",
+ If("p", "b32", "b33")),
+ Bloc("b32",
+ Goto("b21")),
+ Bloc("b21",
+ Goto("b47")),
+ Bloc("b47",
+ If("p", "b45", "b46")),
+ Bloc("b45",
+ Goto("b48")),
+ Bloc("b48",
+ Goto("b49")),
+ Bloc("b49",
+ If("p", "b50", "b51")),
+ Bloc("b50",
+ Goto("b52")),
+ Bloc("b52",
+ Goto("b53")),
+ Bloc("b53",
+ Goto("b51")),
+ Bloc("b51",
+ Goto("b54")),
+ Bloc("b54",
+ Goto("b46")),
+ Bloc("b46",
+ Exit("mem")),
+ Bloc("b33",
+ Goto("b34")),
+ Bloc("b34",
+ Goto("b37")),
+ Bloc("b37",
+ If("p", "b35", "b36")),
+ Bloc("b35",
+ Goto("b38")),
+ Bloc("b38",
+ Goto("b39")),
+ Bloc("b39",
+ If("p", "b40", "b41")),
+ Bloc("b40",
+ Goto("b42")),
+ Bloc("b42",
+ Goto("b43")),
+ Bloc("b43",
+ Goto("b41")),
+ Bloc("b41",
+ Goto("b44")),
+ Bloc("b44",
+ Goto("b36")),
+ Bloc("b36",
+ Goto("b20")),
+ Bloc("b20",
+ Goto("b18")),
+ Bloc("b2",
+ Goto("b4")),
+ Bloc("b4",
+ Exit("mem")))
+ CheckFunc(fun.f)
+ doms := generateDominatorMap(fun)
+ verifyDominators(t, fun, dominators, doms)
+}
diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go
new file mode 100644
index 0000000..b0788f1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/expand_calls.go
@@ -0,0 +1,1035 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+)
+
+func postExpandCallsDecompose(f *Func) {
+ decomposeUser(f) // redo user decompose to cleanup after expand calls
+ decomposeBuiltIn(f) // handles both regular decomposition and cleanup.
+}
+
+func expandCalls(f *Func) {
+ // Convert each aggregate arg to a call into "dismantle aggregate, store/pass parts"
+ // Convert each aggregate result from a call into "assemble aggregate from parts"
+ // Convert each multivalue exit into "dismantle aggregate, store/return parts"
+ // Convert incoming aggregate arg into assembly of parts.
+ // Feed modified AST to decompose.
+
+ sp, _ := f.spSb()
+
+ x := &expandState{
+ f: f,
+ debug: f.pass.debug,
+ regSize: f.Config.RegSize,
+ sp: sp,
+ typs: &f.Config.Types,
+ wideSelects: make(map[*Value]*Value),
+ commonArgs: make(map[selKey]*Value),
+ commonSelectors: make(map[selKey]*Value),
+ memForCall: make(map[ID]*Value),
+ }
+
+ // For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness.
+ if f.Config.BigEndian {
+ x.firstOp = OpInt64Hi
+ x.secondOp = OpInt64Lo
+ x.firstType = x.typs.Int32
+ x.secondType = x.typs.UInt32
+ } else {
+ x.firstOp = OpInt64Lo
+ x.secondOp = OpInt64Hi
+ x.firstType = x.typs.UInt32
+ x.secondType = x.typs.Int32
+ }
+
+ // Defer select processing until after all calls and selects are seen.
+ var selects []*Value
+ var calls []*Value
+ var args []*Value
+ var exitBlocks []*Block
+
+ var m0 *Value
+
+ // Accumulate lists of calls, args, selects, and exit blocks to process,
+ // note "wide" selects consumed by stores,
+ // rewrite mem for each call,
+ // rewrite each OpSelectNAddr.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpInitMem:
+ m0 = v
+
+ case OpClosureLECall, OpInterLECall, OpStaticLECall, OpTailLECall:
+ calls = append(calls, v)
+
+ case OpArg:
+ args = append(args, v)
+
+ case OpStore:
+ if a := v.Args[1]; a.Op == OpSelectN && !CanSSA(a.Type) {
+ if a.Uses > 1 {
+ panic(fmt.Errorf("Saw double use of wide SelectN %s operand of Store %s",
+ a.LongString(), v.LongString()))
+ }
+ x.wideSelects[a] = v
+ }
+
+ case OpSelectN:
+ if v.Type == types.TypeMem {
+ // rewrite the mem selector in place
+ call := v.Args[0]
+ aux := call.Aux.(*AuxCall)
+ mem := x.memForCall[call.ID]
+ if mem == nil {
+ v.AuxInt = int64(aux.abiInfo.OutRegistersUsed())
+ x.memForCall[call.ID] = v
+ } else {
+ panic(fmt.Errorf("Saw two memories for call %v, %v and %v", call, mem, v))
+ }
+ } else {
+ selects = append(selects, v)
+ }
+
+ case OpSelectNAddr:
+ call := v.Args[0]
+ which := v.AuxInt
+ aux := call.Aux.(*AuxCall)
+ pt := v.Type
+ off := x.offsetFrom(x.f.Entry, x.sp, aux.OffsetOfResult(which), pt)
+ v.copyOf(off)
+ }
+ }
+
+ // rewrite function results from an exit block
+ // values returned by function need to be split out into registers.
+ if isBlockMultiValueExit(b) {
+ exitBlocks = append(exitBlocks, b)
+ }
+ }
+
+ // Convert each aggregate arg into Make of its parts (and so on, to primitive types)
+ for _, v := range args {
+ var rc registerCursor
+ a := x.prAssignForArg(v)
+ aux := x.f.OwnAux
+ regs := a.Registers
+ var offset int64
+ if len(regs) == 0 {
+ offset = a.FrameOffset(aux.abiInfo)
+ }
+ auxBase := x.offsetFrom(x.f.Entry, x.sp, offset, types.NewPtr(v.Type))
+ rc.init(regs, aux.abiInfo, nil, auxBase, 0)
+ x.rewriteSelectOrArg(f.Entry.Pos, f.Entry, v, v, m0, v.Type, rc)
+ }
+
+ // Rewrite selects of results (which may be aggregates) into make-aggregates of register/memory-targeted selects
+ for _, v := range selects {
+ if v.Op == OpInvalid {
+ continue
+ }
+
+ call := v.Args[0]
+ aux := call.Aux.(*AuxCall)
+ mem := x.memForCall[call.ID]
+ if mem == nil {
+ mem = call.Block.NewValue1I(call.Pos, OpSelectN, types.TypeMem, int64(aux.abiInfo.OutRegistersUsed()), call)
+ x.memForCall[call.ID] = mem
+ }
+
+ i := v.AuxInt
+ regs := aux.RegsOfResult(i)
+
+ // If this select cannot fit into SSA and is stored, either disaggregate to register stores, or mem-mem move.
+ if store := x.wideSelects[v]; store != nil {
+ // Use the mem that comes from the store operation.
+ storeAddr := store.Args[0]
+ mem := store.Args[2]
+ if len(regs) > 0 {
+ // Cannot do a rewrite that builds up a result from pieces; instead, copy pieces to the store operation.
+ var rc registerCursor
+ rc.init(regs, aux.abiInfo, nil, storeAddr, 0)
+ mem = x.rewriteWideSelectToStores(call.Pos, call.Block, v, mem, v.Type, rc)
+ store.copyOf(mem)
+ } else {
+ // Move directly from AuxBase to store target; rewrite the store instruction.
+ offset := aux.OffsetOfResult(i)
+ auxBase := x.offsetFrom(x.f.Entry, x.sp, offset, types.NewPtr(v.Type))
+ // was Store dst, v, mem
+ // now Move dst, auxBase, mem
+ move := store.Block.NewValue3A(store.Pos, OpMove, types.TypeMem, v.Type, storeAddr, auxBase, mem)
+ move.AuxInt = v.Type.Size()
+ store.copyOf(move)
+ }
+ continue
+ }
+
+ var auxBase *Value
+ if len(regs) == 0 {
+ offset := aux.OffsetOfResult(i)
+ auxBase = x.offsetFrom(x.f.Entry, x.sp, offset, types.NewPtr(v.Type))
+ }
+ var rc registerCursor
+ rc.init(regs, aux.abiInfo, nil, auxBase, 0)
+ x.rewriteSelectOrArg(call.Pos, call.Block, v, v, mem, v.Type, rc)
+ }
+
+ rewriteCall := func(v *Value, newOp Op, argStart int) {
+ // Break aggregate args passed to call into smaller pieces.
+ x.rewriteCallArgs(v, argStart)
+ v.Op = newOp
+ rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
+ v.Type = types.NewResults(append(rts, types.TypeMem))
+ }
+
+ // Rewrite calls
+ for _, v := range calls {
+ switch v.Op {
+ case OpStaticLECall:
+ rewriteCall(v, OpStaticCall, 0)
+ case OpTailLECall:
+ rewriteCall(v, OpTailCall, 0)
+ case OpClosureLECall:
+ rewriteCall(v, OpClosureCall, 2)
+ case OpInterLECall:
+ rewriteCall(v, OpInterCall, 1)
+ }
+ }
+
+ // Rewrite results from exit blocks
+ for _, b := range exitBlocks {
+ v := b.Controls[0]
+ x.rewriteFuncResults(v, b, f.OwnAux)
+ b.SetControl(v)
+ }
+
+}
+
+func (x *expandState) rewriteFuncResults(v *Value, b *Block, aux *AuxCall) {
+ // This is very similar to rewriteCallArgs
+ // differences:
+ // firstArg + preArgs
+ // sp vs auxBase
+
+ m0 := v.MemoryArg()
+ mem := m0
+
+ allResults := []*Value{}
+ var oldArgs []*Value
+ argsWithoutMem := v.Args[:len(v.Args)-1]
+
+ for j, a := range argsWithoutMem {
+ oldArgs = append(oldArgs, a)
+ i := int64(j)
+ auxType := aux.TypeOfResult(i)
+ auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.NameOfResult(i), x.sp, mem)
+ auxOffset := int64(0)
+ aRegs := aux.RegsOfResult(int64(j))
+ if a.Op == OpDereference {
+ a.Op = OpLoad
+ }
+ var rc registerCursor
+ var result *[]*Value
+ if len(aRegs) > 0 {
+ result = &allResults
+ } else {
+ if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr {
+ addr := a.Args[0]
+ if addr.MemoryArg() == a.MemoryArg() && addr.Aux == aux.NameOfResult(i) {
+ continue // Self move to output parameter
+ }
+ }
+ }
+ rc.init(aRegs, aux.abiInfo, result, auxBase, auxOffset)
+ mem = x.decomposeAsNecessary(v.Pos, b, a, mem, rc)
+ }
+ v.resetArgs()
+ v.AddArgs(allResults...)
+ v.AddArg(mem)
+ for _, a := range oldArgs {
+ if a.Uses == 0 {
+ if x.debug > 1 {
+ x.Printf("...marking %v unused\n", a.LongString())
+ }
+ x.invalidateRecursively(a)
+ }
+ }
+ v.Type = types.NewResults(append(abi.RegisterTypes(aux.abiInfo.OutParams()), types.TypeMem))
+ return
+}
+
+func (x *expandState) rewriteCallArgs(v *Value, firstArg int) {
+ if x.debug > 1 {
+ x.indent(3)
+ defer x.indent(-3)
+ x.Printf("rewriteCallArgs(%s; %d)\n", v.LongString(), firstArg)
+ }
+ // Thread the stores on the memory arg
+ aux := v.Aux.(*AuxCall)
+ m0 := v.MemoryArg()
+ mem := m0
+ allResults := []*Value{}
+ oldArgs := []*Value{}
+ argsWithoutMem := v.Args[firstArg : len(v.Args)-1] // Also strip closure/interface Op-specific args
+
+ sp := x.sp
+ if v.Op == OpTailLECall {
+ // For tail call, we unwind the frame before the call so we'll use the caller's
+ // SP.
+ sp = x.f.Entry.NewValue1(src.NoXPos, OpGetCallerSP, x.typs.Uintptr, mem)
+ }
+
+ for i, a := range argsWithoutMem { // skip leading non-parameter SSA Args and trailing mem SSA Arg.
+ oldArgs = append(oldArgs, a)
+ auxI := int64(i)
+ aRegs := aux.RegsOfArg(auxI)
+ aType := aux.TypeOfArg(auxI)
+
+ if a.Op == OpDereference {
+ a.Op = OpLoad
+ }
+ var rc registerCursor
+ var result *[]*Value
+ var aOffset int64
+ if len(aRegs) > 0 {
+ result = &allResults
+ } else {
+ aOffset = aux.OffsetOfArg(auxI)
+ }
+ if v.Op == OpTailLECall && a.Op == OpArg && a.AuxInt == 0 {
+ // It's common for a tail call passing the same arguments (e.g. method wrapper),
+ // so this would be a self copy. Detect this and optimize it out.
+ n := a.Aux.(*ir.Name)
+ if n.Class == ir.PPARAM && n.FrameOffset()+x.f.Config.ctxt.Arch.FixedFrameSize == aOffset {
+ continue
+ }
+ }
+ if x.debug > 1 {
+ x.Printf("...storeArg %s, %v, %d\n", a.LongString(), aType, aOffset)
+ }
+
+ rc.init(aRegs, aux.abiInfo, result, sp, aOffset)
+ mem = x.decomposeAsNecessary(v.Pos, v.Block, a, mem, rc)
+ }
+ var preArgStore [2]*Value
+ preArgs := append(preArgStore[:0], v.Args[0:firstArg]...)
+ v.resetArgs()
+ v.AddArgs(preArgs...)
+ v.AddArgs(allResults...)
+ v.AddArg(mem)
+ for _, a := range oldArgs {
+ if a.Uses == 0 {
+ x.invalidateRecursively(a)
+ }
+ }
+
+ return
+}
+
+func (x *expandState) decomposePair(pos src.XPos, b *Block, a, mem *Value, t0, t1 *types.Type, o0, o1 Op, rc *registerCursor) *Value {
+ e := b.NewValue1(pos, o0, t0, a)
+ pos = pos.WithNotStmt()
+ mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(t0))
+ e = b.NewValue1(pos, o1, t1, a)
+ mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(t1))
+ return mem
+}
+
+func (x *expandState) decomposeOne(pos src.XPos, b *Block, a, mem *Value, t0 *types.Type, o0 Op, rc *registerCursor) *Value {
+ e := b.NewValue1(pos, o0, t0, a)
+ pos = pos.WithNotStmt()
+ mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(t0))
+ return mem
+}
+
+// decomposeAsNecessary converts a value (perhaps an aggregate) passed to a call or returned by a function,
+// into the appropriate sequence of stores and register assignments to transmit that value in a given ABI, and
+// returns the current memory after this convert/rewrite (it may be the input memory, perhaps stores were needed.)
+// 'pos' is the source position all this is tied to
+// 'b' is the enclosing block
+// 'a' is the value to decompose
+// 'm0' is the input memory arg used for the first store (or returned if there are no stores)
+// 'rc' is a registerCursor which identifies the register/memory destination for the value
+func (x *expandState) decomposeAsNecessary(pos src.XPos, b *Block, a, m0 *Value, rc registerCursor) *Value {
+ if x.debug > 1 {
+ x.indent(3)
+ defer x.indent(-3)
+ }
+ at := a.Type
+ if at.Size() == 0 {
+ return m0
+ }
+ if a.Op == OpDereference {
+ a.Op = OpLoad // For purposes of parameter passing expansion, a Dereference is a Load.
+ }
+
+ if !rc.hasRegs() && !CanSSA(at) {
+ dst := x.offsetFrom(b, rc.storeDest, rc.storeOffset, types.NewPtr(at))
+ if x.debug > 1 {
+ x.Printf("...recur store %s at %s\n", a.LongString(), dst.LongString())
+ }
+ if a.Op == OpLoad {
+ m0 = b.NewValue3A(pos, OpMove, types.TypeMem, at, dst, a.Args[0], m0)
+ m0.AuxInt = at.Size()
+ return m0
+ } else {
+ panic(fmt.Errorf("Store of not a load"))
+ }
+ }
+
+ mem := m0
+ switch at.Kind() {
+ case types.TARRAY:
+ et := at.Elem()
+ for i := int64(0); i < at.NumElem(); i++ {
+ e := b.NewValue1I(pos, OpArraySelect, et, i, a)
+ pos = pos.WithNotStmt()
+ mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(et))
+ }
+ return mem
+
+ case types.TSTRUCT:
+ for i := 0; i < at.NumFields(); i++ {
+ et := at.Field(i).Type // might need to read offsets from the fields
+ e := b.NewValue1I(pos, OpStructSelect, et, int64(i), a)
+ pos = pos.WithNotStmt()
+ if x.debug > 1 {
+ x.Printf("...recur decompose %s, %v\n", e.LongString(), et)
+ }
+ mem = x.decomposeAsNecessary(pos, b, e, mem, rc.next(et))
+ }
+ return mem
+
+ case types.TSLICE:
+ mem = x.decomposeOne(pos, b, a, mem, at.Elem().PtrTo(), OpSlicePtr, &rc)
+ pos = pos.WithNotStmt()
+ mem = x.decomposeOne(pos, b, a, mem, x.typs.Int, OpSliceLen, &rc)
+ return x.decomposeOne(pos, b, a, mem, x.typs.Int, OpSliceCap, &rc)
+
+ case types.TSTRING:
+ return x.decomposePair(pos, b, a, mem, x.typs.BytePtr, x.typs.Int, OpStringPtr, OpStringLen, &rc)
+
+ case types.TINTER:
+ mem = x.decomposeOne(pos, b, a, mem, x.typs.Uintptr, OpITab, &rc)
+ pos = pos.WithNotStmt()
+ // Immediate interfaces cause so many headaches.
+ if a.Op == OpIMake {
+ data := a.Args[1]
+ for data.Op == OpStructMake1 || data.Op == OpArrayMake1 {
+ data = data.Args[0]
+ }
+ return x.decomposeAsNecessary(pos, b, data, mem, rc.next(data.Type))
+ }
+ return x.decomposeOne(pos, b, a, mem, x.typs.BytePtr, OpIData, &rc)
+
+ case types.TCOMPLEX64:
+ return x.decomposePair(pos, b, a, mem, x.typs.Float32, x.typs.Float32, OpComplexReal, OpComplexImag, &rc)
+
+ case types.TCOMPLEX128:
+ return x.decomposePair(pos, b, a, mem, x.typs.Float64, x.typs.Float64, OpComplexReal, OpComplexImag, &rc)
+
+ case types.TINT64:
+ if at.Size() > x.regSize {
+ return x.decomposePair(pos, b, a, mem, x.firstType, x.secondType, x.firstOp, x.secondOp, &rc)
+ }
+ case types.TUINT64:
+ if at.Size() > x.regSize {
+ return x.decomposePair(pos, b, a, mem, x.typs.UInt32, x.typs.UInt32, x.firstOp, x.secondOp, &rc)
+ }
+ }
+
+ // An atomic type, either record the register or store it and update the memory.
+
+ if rc.hasRegs() {
+ if x.debug > 1 {
+ x.Printf("...recur addArg %s\n", a.LongString())
+ }
+ rc.addArg(a)
+ } else {
+ dst := x.offsetFrom(b, rc.storeDest, rc.storeOffset, types.NewPtr(at))
+ if x.debug > 1 {
+ x.Printf("...recur store %s at %s\n", a.LongString(), dst.LongString())
+ }
+ mem = b.NewValue3A(pos, OpStore, types.TypeMem, at, dst, a, mem)
+ }
+
+ return mem
+}
+
+// Convert scalar OpArg into the proper OpWhateverArg instruction
+// Convert scalar OpSelectN into perhaps-differently-indexed OpSelectN
+// Convert aggregate OpArg into Make of its parts (which are eventually scalars)
+// Convert aggregate OpSelectN into Make of its parts (which are eventually scalars)
+// Returns the converted value.
+//
+// - "pos" the position for any generated instructions
+// - "b" the block for any generated instructions
+// - "container" the outermost OpArg/OpSelectN
+// - "a" the instruction to overwrite, if any (only the outermost caller)
+// - "m0" the memory arg for any loads that are necessary
+// - "at" the type of the Arg/part
+// - "rc" the register/memory cursor locating the various parts of the Arg.
+func (x *expandState) rewriteSelectOrArg(pos src.XPos, b *Block, container, a, m0 *Value, at *types.Type, rc registerCursor) *Value {
+
+ if at == types.TypeMem {
+ a.copyOf(m0)
+ return a
+ }
+
+ makeOf := func(a *Value, op Op, args []*Value) *Value {
+ if a == nil {
+ a = b.NewValue0(pos, op, at)
+ a.AddArgs(args...)
+ } else {
+ a.resetArgs()
+ a.Aux, a.AuxInt = nil, 0
+ a.Pos, a.Op, a.Type = pos, op, at
+ a.AddArgs(args...)
+ }
+ return a
+ }
+
+ if at.Size() == 0 {
+ // For consistency, create these values even though they'll ultimately be unused
+ if at.IsArray() {
+ return makeOf(a, OpArrayMake0, nil)
+ }
+ if at.IsStruct() {
+ return makeOf(a, OpStructMake0, nil)
+ }
+ return a
+ }
+
+ sk := selKey{from: container, size: 0, offsetOrIndex: rc.storeOffset, typ: at}
+ dupe := x.commonSelectors[sk]
+ if dupe != nil {
+ if a == nil {
+ return dupe
+ }
+ a.copyOf(dupe)
+ return a
+ }
+
+ var argStore [10]*Value
+ args := argStore[:0]
+
+ addArg := func(a0 *Value) {
+ if a0 == nil {
+ as := "<nil>"
+ if a != nil {
+ as = a.LongString()
+ }
+ panic(fmt.Errorf("a0 should not be nil, a=%v, container=%v, at=%v", as, container.LongString(), at))
+ }
+ args = append(args, a0)
+ }
+
+ switch at.Kind() {
+ case types.TARRAY:
+ et := at.Elem()
+ for i := int64(0); i < at.NumElem(); i++ {
+ e := x.rewriteSelectOrArg(pos, b, container, nil, m0, et, rc.next(et))
+ addArg(e)
+ }
+ a = makeOf(a, OpArrayMake1, args)
+ x.commonSelectors[sk] = a
+ return a
+
+ case types.TSTRUCT:
+ // Assume ssagen/ssa.go (in buildssa) spills large aggregates so they won't appear here.
+ for i := 0; i < at.NumFields(); i++ {
+ et := at.Field(i).Type
+ e := x.rewriteSelectOrArg(pos, b, container, nil, m0, et, rc.next(et))
+ if e == nil {
+ panic(fmt.Errorf("nil e, et=%v, et.Size()=%d, i=%d", et, et.Size(), i))
+ }
+ addArg(e)
+ pos = pos.WithNotStmt()
+ }
+ if at.NumFields() > 4 {
+ panic(fmt.Errorf("Too many fields (%d, %d bytes), container=%s", at.NumFields(), at.Size(), container.LongString()))
+ }
+ a = makeOf(a, StructMakeOp(at.NumFields()), args)
+ x.commonSelectors[sk] = a
+ return a
+
+ case types.TSLICE:
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, at.Elem().PtrTo(), rc.next(x.typs.BytePtr)))
+ pos = pos.WithNotStmt()
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Int, rc.next(x.typs.Int)))
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Int, rc.next(x.typs.Int)))
+ a = makeOf(a, OpSliceMake, args)
+ x.commonSelectors[sk] = a
+ return a
+
+ case types.TSTRING:
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr)))
+ pos = pos.WithNotStmt()
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Int, rc.next(x.typs.Int)))
+ a = makeOf(a, OpStringMake, args)
+ x.commonSelectors[sk] = a
+ return a
+
+ case types.TINTER:
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Uintptr, rc.next(x.typs.Uintptr)))
+ pos = pos.WithNotStmt()
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr)))
+ a = makeOf(a, OpIMake, args)
+ x.commonSelectors[sk] = a
+ return a
+
+ case types.TCOMPLEX64:
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Float32, rc.next(x.typs.Float32)))
+ pos = pos.WithNotStmt()
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Float32, rc.next(x.typs.Float32)))
+ a = makeOf(a, OpComplexMake, args)
+ x.commonSelectors[sk] = a
+ return a
+
+ case types.TCOMPLEX128:
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Float64, rc.next(x.typs.Float64)))
+ pos = pos.WithNotStmt()
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Float64, rc.next(x.typs.Float64)))
+ a = makeOf(a, OpComplexMake, args)
+ x.commonSelectors[sk] = a
+ return a
+
+ case types.TINT64:
+ if at.Size() > x.regSize {
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.firstType, rc.next(x.firstType)))
+ pos = pos.WithNotStmt()
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.secondType, rc.next(x.secondType)))
+ if !x.f.Config.BigEndian {
+ // Int64Make args are big, little
+ args[0], args[1] = args[1], args[0]
+ }
+ a = makeOf(a, OpInt64Make, args)
+ x.commonSelectors[sk] = a
+ return a
+ }
+ case types.TUINT64:
+ if at.Size() > x.regSize {
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.UInt32, rc.next(x.typs.UInt32)))
+ pos = pos.WithNotStmt()
+ addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.UInt32, rc.next(x.typs.UInt32)))
+ if !x.f.Config.BigEndian {
+ // Int64Make args are big, little
+ args[0], args[1] = args[1], args[0]
+ }
+ a = makeOf(a, OpInt64Make, args)
+ x.commonSelectors[sk] = a
+ return a
+ }
+ }
+
+ // An atomic type, either record the register or store it and update the memory.
+
+ // Depending on the container Op, the leaves are either OpSelectN or OpArg{Int,Float}Reg
+
+ if container.Op == OpArg {
+ if rc.hasRegs() {
+ op, i := rc.ArgOpAndRegisterFor()
+ name := container.Aux.(*ir.Name)
+ a = makeOf(a, op, nil)
+ a.AuxInt = i
+ a.Aux = &AuxNameOffset{name, rc.storeOffset}
+ } else {
+ key := selKey{container, rc.storeOffset, at.Size(), at}
+ w := x.commonArgs[key]
+ if w != nil && w.Uses != 0 {
+ if a == nil {
+ a = w
+ } else {
+ a.copyOf(w)
+ }
+ } else {
+ if a == nil {
+ aux := container.Aux
+ auxInt := container.AuxInt + rc.storeOffset
+ a = container.Block.NewValue0IA(container.Pos, OpArg, at, auxInt, aux)
+ } else {
+ // do nothing, the original should be okay.
+ }
+ x.commonArgs[key] = a
+ }
+ }
+ } else if container.Op == OpSelectN {
+ call := container.Args[0]
+ aux := call.Aux.(*AuxCall)
+ which := container.AuxInt
+
+ if at == types.TypeMem {
+ if a != m0 || a != x.memForCall[call.ID] {
+ panic(fmt.Errorf("Memories %s, %s, and %s should all be equal after %s", a.LongString(), m0.LongString(), x.memForCall[call.ID], call.LongString()))
+ }
+ } else if rc.hasRegs() {
+ firstReg := uint32(0)
+ for i := 0; i < int(which); i++ {
+ firstReg += uint32(len(aux.abiInfo.OutParam(i).Registers))
+ }
+ reg := int64(rc.nextSlice + Abi1RO(firstReg))
+ a = makeOf(a, OpSelectN, []*Value{call})
+ a.AuxInt = reg
+ } else {
+ off := x.offsetFrom(x.f.Entry, x.sp, rc.storeOffset+aux.OffsetOfResult(which), types.NewPtr(at))
+ a = makeOf(a, OpLoad, []*Value{off, m0})
+ }
+
+ } else {
+ panic(fmt.Errorf("Expected container OpArg or OpSelectN, saw %v instead", container.LongString()))
+ }
+
+ x.commonSelectors[sk] = a
+ return a
+}
+
+// rewriteWideSelectToStores handles the case of a SelectN'd result from a function call that is too large for SSA,
+// but is transferred in registers. In this case the register cursor tracks both operands; the register sources and
+// the memory destinations.
+// This returns the memory flowing out of the last store
+func (x *expandState) rewriteWideSelectToStores(pos src.XPos, b *Block, container, m0 *Value, at *types.Type, rc registerCursor) *Value {
+
+ if at.Size() == 0 {
+ return m0
+ }
+
+ switch at.Kind() {
+ case types.TARRAY:
+ et := at.Elem()
+ for i := int64(0); i < at.NumElem(); i++ {
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, et, rc.next(et))
+ }
+ return m0
+
+ case types.TSTRUCT:
+ // Assume ssagen/ssa.go (in buildssa) spills large aggregates so they won't appear here.
+ for i := 0; i < at.NumFields(); i++ {
+ et := at.Field(i).Type
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, et, rc.next(et))
+ pos = pos.WithNotStmt()
+ }
+ return m0
+
+ case types.TSLICE:
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, at.Elem().PtrTo(), rc.next(x.typs.BytePtr))
+ pos = pos.WithNotStmt()
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Int, rc.next(x.typs.Int))
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Int, rc.next(x.typs.Int))
+ return m0
+
+ case types.TSTRING:
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr))
+ pos = pos.WithNotStmt()
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Int, rc.next(x.typs.Int))
+ return m0
+
+ case types.TINTER:
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Uintptr, rc.next(x.typs.Uintptr))
+ pos = pos.WithNotStmt()
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr))
+ return m0
+
+ case types.TCOMPLEX64:
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Float32, rc.next(x.typs.Float32))
+ pos = pos.WithNotStmt()
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Float32, rc.next(x.typs.Float32))
+ return m0
+
+ case types.TCOMPLEX128:
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Float64, rc.next(x.typs.Float64))
+ pos = pos.WithNotStmt()
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Float64, rc.next(x.typs.Float64))
+ return m0
+
+ case types.TINT64:
+ if at.Size() > x.regSize {
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.firstType, rc.next(x.firstType))
+ pos = pos.WithNotStmt()
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.secondType, rc.next(x.secondType))
+ return m0
+ }
+ case types.TUINT64:
+ if at.Size() > x.regSize {
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.UInt32, rc.next(x.typs.UInt32))
+ pos = pos.WithNotStmt()
+ m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.UInt32, rc.next(x.typs.UInt32))
+ return m0
+ }
+ }
+
+ // TODO could change treatment of too-large OpArg, would deal with it here.
+ if container.Op == OpSelectN {
+ call := container.Args[0]
+ aux := call.Aux.(*AuxCall)
+ which := container.AuxInt
+
+ if rc.hasRegs() {
+ firstReg := uint32(0)
+ for i := 0; i < int(which); i++ {
+ firstReg += uint32(len(aux.abiInfo.OutParam(i).Registers))
+ }
+ reg := int64(rc.nextSlice + Abi1RO(firstReg))
+ a := b.NewValue1I(pos, OpSelectN, at, reg, call)
+ dst := x.offsetFrom(b, rc.storeDest, rc.storeOffset, types.NewPtr(at))
+ m0 = b.NewValue3A(pos, OpStore, types.TypeMem, at, dst, a, m0)
+ } else {
+ panic(fmt.Errorf("Expected rc to have registers"))
+ }
+ } else {
+ panic(fmt.Errorf("Expected container OpSelectN, saw %v instead", container.LongString()))
+ }
+ return m0
+}
+
+func isBlockMultiValueExit(b *Block) bool {
+ return (b.Kind == BlockRet || b.Kind == BlockRetJmp) && b.Controls[0] != nil && b.Controls[0].Op == OpMakeResult
+}
+
+type Abi1RO uint8 // An offset within a parameter's slice of register indices, for abi1.
+
+// A registerCursor tracks which register is used for an Arg or regValues, or a piece of such.
+type registerCursor struct {
+ storeDest *Value // if there are no register targets, then this is the base of the store.
+ storeOffset int64
+ regs []abi.RegIndex // the registers available for this Arg/result (which is all in registers or not at all)
+ nextSlice Abi1RO // the next register/register-slice offset
+ config *abi.ABIConfig
+ regValues *[]*Value // values assigned to registers accumulate here
+}
+
+func (c *registerCursor) String() string {
+ dest := "<none>"
+ if c.storeDest != nil {
+ dest = fmt.Sprintf("%s+%d", c.storeDest.String(), c.storeOffset)
+ }
+ regs := "<none>"
+ if c.regValues != nil {
+ regs = ""
+ for i, x := range *c.regValues {
+ if i > 0 {
+ regs = regs + "; "
+ }
+ regs = regs + x.LongString()
+ }
+ }
+
+ // not printing the config because that has not been useful
+ return fmt.Sprintf("RCSR{storeDest=%v, regsLen=%d, nextSlice=%d, regValues=[%s]}", dest, len(c.regs), c.nextSlice, regs)
+}
+
+// next effectively post-increments the register cursor; the receiver is advanced,
+// the (aligned) old value is returned.
+func (c *registerCursor) next(t *types.Type) registerCursor {
+ c.storeOffset = types.RoundUp(c.storeOffset, t.Alignment())
+ rc := *c
+ c.storeOffset = types.RoundUp(c.storeOffset+t.Size(), t.Alignment())
+ if int(c.nextSlice) < len(c.regs) {
+ w := c.config.NumParamRegs(t)
+ c.nextSlice += Abi1RO(w)
+ }
+ return rc
+}
+
+// plus returns a register cursor offset from the original, without modifying the original.
+func (c *registerCursor) plus(regWidth Abi1RO) registerCursor {
+ rc := *c
+ rc.nextSlice += regWidth
+ return rc
+}
+
+// at returns the register cursor for component i of t, where the first
+// component is numbered 0.
+func (c *registerCursor) at(t *types.Type, i int) registerCursor {
+ rc := *c
+ if i == 0 || len(c.regs) == 0 {
+ return rc
+ }
+ if t.IsArray() {
+ w := c.config.NumParamRegs(t.Elem())
+ rc.nextSlice += Abi1RO(i * w)
+ return rc
+ }
+ if t.IsStruct() {
+ for j := 0; j < i; j++ {
+ rc.next(t.FieldType(j))
+ }
+ return rc
+ }
+ panic("Haven't implemented this case yet, do I need to?")
+}
+
+func (c *registerCursor) init(regs []abi.RegIndex, info *abi.ABIParamResultInfo, result *[]*Value, storeDest *Value, storeOffset int64) {
+ c.regs = regs
+ c.nextSlice = 0
+ c.storeOffset = storeOffset
+ c.storeDest = storeDest
+ c.config = info.Config()
+ c.regValues = result
+}
+
+func (c *registerCursor) addArg(v *Value) {
+ *c.regValues = append(*c.regValues, v)
+}
+
+func (c *registerCursor) hasRegs() bool {
+ return len(c.regs) > 0
+}
+
+func (c *registerCursor) ArgOpAndRegisterFor() (Op, int64) {
+ r := c.regs[c.nextSlice]
+ return ArgOpAndRegisterFor(r, c.config)
+}
+
+// ArgOpAndRegisterFor converts an abi register index into an ssa Op and corresponding
+// arg register index.
+func ArgOpAndRegisterFor(r abi.RegIndex, abiConfig *abi.ABIConfig) (Op, int64) {
+ i := abiConfig.FloatIndexFor(r)
+ if i >= 0 { // float PR
+ return OpArgFloatReg, i
+ }
+ return OpArgIntReg, int64(r)
+}
+
+type selKey struct {
+ from *Value // what is selected from
+ offsetOrIndex int64 // whatever is appropriate for the selector
+ size int64
+ typ *types.Type
+}
+
+type expandState struct {
+ f *Func
+ debug int // odd values log lost statement markers, so likely settings are 1 (stmts), 2 (expansion), and 3 (both)
+ regSize int64
+ sp *Value
+ typs *Types
+
+ firstOp Op // for 64-bit integers on 32-bit machines, first word in memory
+ secondOp Op // for 64-bit integers on 32-bit machines, second word in memory
+ firstType *types.Type // first half type, for Int64
+ secondType *types.Type // second half type, for Int64
+
+ wideSelects map[*Value]*Value // Selects that are not SSA-able, mapped to consuming stores.
+ commonSelectors map[selKey]*Value // used to de-dupe selectors
+ commonArgs map[selKey]*Value // used to de-dupe OpArg/OpArgIntReg/OpArgFloatReg
+ memForCall map[ID]*Value // For a call, need to know the unique selector that gets the mem.
+ indentLevel int // Indentation for debugging recursion
+}
+
+// intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
+// that has no 64-bit integer registers.
+func (x *expandState) intPairTypes(et types.Kind) (tHi, tLo *types.Type) {
+ tHi = x.typs.UInt32
+ if et == types.TINT64 {
+ tHi = x.typs.Int32
+ }
+ tLo = x.typs.UInt32
+ return
+}
+
+// offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP
+func (x *expandState) offsetFrom(b *Block, from *Value, offset int64, pt *types.Type) *Value {
+ ft := from.Type
+ if offset == 0 {
+ if ft == pt {
+ return from
+ }
+ // This captures common, (apparently) safe cases. The unsafe cases involve ft == uintptr
+ if (ft.IsPtr() || ft.IsUnsafePtr()) && pt.IsPtr() {
+ return from
+ }
+ }
+ // Simplify, canonicalize
+ for from.Op == OpOffPtr {
+ offset += from.AuxInt
+ from = from.Args[0]
+ }
+ if from == x.sp {
+ return x.f.ConstOffPtrSP(pt, offset, x.sp)
+ }
+ return b.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from)
+}
+
+func (x *expandState) regWidth(t *types.Type) Abi1RO {
+ return Abi1RO(x.f.ABI1.NumParamRegs(t))
+}
+
+// regOffset returns the register offset of the i'th element of type t
+func (x *expandState) regOffset(t *types.Type, i int) Abi1RO {
+ // TODO maybe cache this in a map if profiling recommends.
+ if i == 0 {
+ return 0
+ }
+ if t.IsArray() {
+ return Abi1RO(i) * x.regWidth(t.Elem())
+ }
+ if t.IsStruct() {
+ k := Abi1RO(0)
+ for j := 0; j < i; j++ {
+ k += x.regWidth(t.FieldType(j))
+ }
+ return k
+ }
+ panic("Haven't implemented this case yet, do I need to?")
+}
+
+// prAssignForArg returns the ABIParamAssignment for v, assumed to be an OpArg.
+func (x *expandState) prAssignForArg(v *Value) *abi.ABIParamAssignment {
+ if v.Op != OpArg {
+ panic(fmt.Errorf("Wanted OpArg, instead saw %s", v.LongString()))
+ }
+ return ParamAssignmentForArgName(x.f, v.Aux.(*ir.Name))
+}
+
+// ParamAssignmentForArgName returns the ABIParamAssignment for f's arg with matching name.
+func ParamAssignmentForArgName(f *Func, name *ir.Name) *abi.ABIParamAssignment {
+ abiInfo := f.OwnAux.abiInfo
+ ip := abiInfo.InParams()
+ for i, a := range ip {
+ if a.Name == name {
+ return &ip[i]
+ }
+ }
+ panic(fmt.Errorf("Did not match param %v in prInfo %+v", name, abiInfo.InParams()))
+}
+
+// indent increments (or decrements) the indentation.
+func (x *expandState) indent(n int) {
+ x.indentLevel += n
+}
+
+// Printf does an indented fmt.Printf on the format and args.
+func (x *expandState) Printf(format string, a ...interface{}) (n int, err error) {
+ if x.indentLevel > 0 {
+ fmt.Printf("%[1]*s", x.indentLevel, "")
+ }
+ return fmt.Printf(format, a...)
+}
+
+func (x *expandState) invalidateRecursively(a *Value) {
+ var s string
+ if x.debug > 0 {
+ plus := " "
+ if a.Pos.IsStmt() == src.PosIsStmt {
+ plus = " +"
+ }
+ s = a.String() + plus + a.Pos.LineNumber() + " " + a.LongString()
+ if x.debug > 1 {
+ x.Printf("...marking %v unused\n", s)
+ }
+ }
+ lost := a.invalidateRecursively()
+ if x.debug&1 != 0 && lost { // For odd values of x.debug, do this.
+ x.Printf("Lost statement marker in %s on former %s\n", base.Ctxt.Pkgpath+"."+x.f.Name, s)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
new file mode 100644
index 0000000..b2c4b19
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -0,0 +1,120 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "testing"
+
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm64"
+ "cmd/internal/obj/s390x"
+ "cmd/internal/obj/x86"
+ "cmd/internal/src"
+)
+
+var CheckFunc = checkFunc
+var Opt = opt
+var Deadcode = deadcode
+var Copyelim = copyelim
+
+var testCtxts = map[string]*obj.Link{
+ "amd64": obj.Linknew(&x86.Linkamd64),
+ "s390x": obj.Linknew(&s390x.Links390x),
+ "arm64": obj.Linknew(&arm64.Linkarm64),
+}
+
+func testConfig(tb testing.TB) *Conf { return testConfigArch(tb, "amd64") }
+func testConfigS390X(tb testing.TB) *Conf { return testConfigArch(tb, "s390x") }
+func testConfigARM64(tb testing.TB) *Conf { return testConfigArch(tb, "arm64") }
+
+func testConfigArch(tb testing.TB, arch string) *Conf {
+ ctxt, ok := testCtxts[arch]
+ if !ok {
+ tb.Fatalf("unknown arch %s", arch)
+ }
+ if ctxt.Arch.PtrSize != 8 {
+ tb.Fatal("testTypes is 64-bit only")
+ }
+ c := &Conf{
+ config: NewConfig(arch, testTypes, ctxt, true, false),
+ tb: tb,
+ }
+ return c
+}
+
+type Conf struct {
+ config *Config
+ tb testing.TB
+ fe Frontend
+}
+
+func (c *Conf) Frontend() Frontend {
+ if c.fe == nil {
+ pkg := types.NewPkg("my/import/path", "path")
+ fn := ir.NewFunc(src.NoXPos, src.NoXPos, pkg.Lookup("function"), types.NewSignature(nil, nil, nil))
+ fn.DeclareParams(true)
+ fn.LSym = &obj.LSym{Name: "my/import/path.function"}
+
+ c.fe = TestFrontend{
+ t: c.tb,
+ ctxt: c.config.ctxt,
+ f: fn,
+ }
+ }
+ return c.fe
+}
+
+func (c *Conf) Temp(typ *types.Type) *ir.Name {
+ n := ir.NewNameAt(src.NoXPos, &types.Sym{Name: "aFakeAuto"}, typ)
+ n.Class = ir.PAUTO
+ return n
+}
+
+// TestFrontend is a test-only frontend.
+// It assumes 64 bit integers and pointers.
+type TestFrontend struct {
+ t testing.TB
+ ctxt *obj.Link
+ f *ir.Func
+}
+
+func (TestFrontend) StringData(s string) *obj.LSym {
+ return nil
+}
+func (d TestFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot {
+ return LocalSlot{N: parent.N, Type: t, Off: offset}
+}
+func (d TestFrontend) Syslook(s string) *obj.LSym {
+ return d.ctxt.Lookup(s)
+}
+func (TestFrontend) UseWriteBarrier() bool {
+ return true // only writebarrier_test cares
+}
+
+func (d TestFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) }
+func (d TestFrontend) Log() bool { return true }
+
+func (d TestFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) }
+func (d TestFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) }
+func (d TestFrontend) Debug_checknil() bool { return false }
+
+func (d TestFrontend) Func() *ir.Func {
+ return d.f
+}
+
+var testTypes Types
+
+func init() {
+ // TODO(mdempsky): Push into types.InitUniverse or typecheck.InitUniverse.
+ types.PtrSize = 8
+ types.RegSize = 8
+ types.MaxWidth = 1 << 50
+
+ typecheck.InitUniverse()
+ testTypes.SetTypPtrs()
+}
diff --git a/src/cmd/compile/internal/ssa/flagalloc.go b/src/cmd/compile/internal/ssa/flagalloc.go
new file mode 100644
index 0000000..cf2c9a0
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/flagalloc.go
@@ -0,0 +1,270 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// flagalloc allocates the flag register among all the flag-generating
+// instructions. Flag values are recomputed if they need to be
+// spilled/restored.
+func flagalloc(f *Func) {
+ // Compute the in-register flag value we want at the end of
+ // each block. This is basically a best-effort live variable
+ // analysis, so it can be much simpler than a full analysis.
+ end := f.Cache.allocValueSlice(f.NumBlocks())
+ defer f.Cache.freeValueSlice(end)
+ po := f.postorder()
+ for n := 0; n < 2; n++ {
+ for _, b := range po {
+ // Walk values backwards to figure out what flag
+ // value we want in the flag register at the start
+ // of the block.
+ var flag *Value
+ for _, c := range b.ControlValues() {
+ if c.Type.IsFlags() {
+ if flag != nil {
+ panic("cannot have multiple controls using flags")
+ }
+ flag = c
+ }
+ }
+ if flag == nil {
+ flag = end[b.ID]
+ }
+ for j := len(b.Values) - 1; j >= 0; j-- {
+ v := b.Values[j]
+ if v == flag {
+ flag = nil
+ }
+ if v.clobbersFlags() {
+ flag = nil
+ }
+ for _, a := range v.Args {
+ if a.Type.IsFlags() {
+ flag = a
+ }
+ }
+ }
+ if flag != nil {
+ for _, e := range b.Preds {
+ p := e.b
+ end[p.ID] = flag
+ }
+ }
+ }
+ }
+
+ // For blocks which have a flags control value, that's the only value
+ // we can leave in the flags register at the end of the block. (There
+ // is no place to put a flag regeneration instruction.)
+ for _, b := range f.Blocks {
+ if b.Kind == BlockDefer {
+ // Defer blocks internally use/clobber the flags value.
+ end[b.ID] = nil
+ continue
+ }
+ for _, v := range b.ControlValues() {
+ if v.Type.IsFlags() && end[b.ID] != v {
+ end[b.ID] = nil
+ }
+ }
+ }
+
+ // Compute which flags values will need to be spilled.
+ spill := map[ID]bool{}
+ for _, b := range f.Blocks {
+ var flag *Value
+ if len(b.Preds) > 0 {
+ flag = end[b.Preds[0].b.ID]
+ }
+ for _, v := range b.Values {
+ for _, a := range v.Args {
+ if !a.Type.IsFlags() {
+ continue
+ }
+ if a == flag {
+ continue
+ }
+ // a will need to be restored here.
+ spill[a.ID] = true
+ flag = a
+ }
+ if v.clobbersFlags() {
+ flag = nil
+ }
+ if v.Type.IsFlags() {
+ flag = v
+ }
+ }
+ for _, v := range b.ControlValues() {
+ if v != flag && v.Type.IsFlags() {
+ spill[v.ID] = true
+ }
+ }
+ if v := end[b.ID]; v != nil && v != flag {
+ spill[v.ID] = true
+ }
+ }
+
+ // Add flag spill and recomputation where they are needed.
+ var remove []*Value // values that should be checked for possible removal
+ var oldSched []*Value
+ for _, b := range f.Blocks {
+ oldSched = append(oldSched[:0], b.Values...)
+ b.Values = b.Values[:0]
+ // The current live flag value (the pre-flagalloc copy).
+ var flag *Value
+ if len(b.Preds) > 0 {
+ flag = end[b.Preds[0].b.ID]
+ // Note: the following condition depends on the lack of critical edges.
+ for _, e := range b.Preds[1:] {
+ p := e.b
+ if end[p.ID] != flag {
+ f.Fatalf("live flag in %s's predecessors not consistent", b)
+ }
+ }
+ }
+ for _, v := range oldSched {
+ if v.Op == OpPhi && v.Type.IsFlags() {
+ f.Fatalf("phi of flags not supported: %s", v.LongString())
+ }
+
+ // If v will be spilled, and v uses memory, then we must split it
+ // into a load + a flag generator.
+ if spill[v.ID] && v.MemoryArg() != nil {
+ remove = append(remove, v)
+ if !f.Config.splitLoad(v) {
+ f.Fatalf("can't split flag generator: %s", v.LongString())
+ }
+ }
+
+ // Make sure any flag arg of v is in the flags register.
+ // If not, recompute it.
+ for i, a := range v.Args {
+ if !a.Type.IsFlags() {
+ continue
+ }
+ if a == flag {
+ continue
+ }
+ // Recalculate a
+ c := copyFlags(a, b)
+ // Update v.
+ v.SetArg(i, c)
+ // Remember the most-recently computed flag value.
+ flag = a
+ }
+ // Issue v.
+ b.Values = append(b.Values, v)
+ if v.clobbersFlags() {
+ flag = nil
+ }
+ if v.Type.IsFlags() {
+ flag = v
+ }
+ }
+ for i, v := range b.ControlValues() {
+ if v != flag && v.Type.IsFlags() {
+ // Recalculate control value.
+ remove = append(remove, v)
+ c := copyFlags(v, b)
+ b.ReplaceControl(i, c)
+ flag = v
+ }
+ }
+ if v := end[b.ID]; v != nil && v != flag {
+ // Need to reissue flag generator for use by
+ // subsequent blocks.
+ remove = append(remove, v)
+ copyFlags(v, b)
+ // Note: this flag generator is not properly linked up
+ // with the flag users. This breaks the SSA representation.
+ // We could fix up the users with another pass, but for now
+ // we'll just leave it. (Regalloc has the same issue for
+ // standard regs, and it runs next.)
+ // For this reason, take care not to add this flag
+ // generator to the remove list.
+ }
+ }
+
+ // Save live flag state for later.
+ for _, b := range f.Blocks {
+ b.FlagsLiveAtEnd = end[b.ID] != nil
+ }
+
+ // Remove any now-dead values.
+ // The number of values to remove is likely small,
+ // and removing them requires processing all values in a block,
+ // so minimize the number of blocks that we touch.
+
+ // Shrink remove to contain only dead values, and clobber those dead values.
+ for i := 0; i < len(remove); i++ {
+ v := remove[i]
+ if v.Uses == 0 {
+ v.reset(OpInvalid)
+ continue
+ }
+ // Remove v.
+ last := len(remove) - 1
+ remove[i] = remove[last]
+ remove[last] = nil
+ remove = remove[:last]
+ i-- // reprocess value at i
+ }
+
+ if len(remove) == 0 {
+ return
+ }
+
+ removeBlocks := f.newSparseSet(f.NumBlocks())
+ defer f.retSparseSet(removeBlocks)
+ for _, v := range remove {
+ removeBlocks.add(v.Block.ID)
+ }
+
+ // Process affected blocks, preserving value order.
+ for _, b := range f.Blocks {
+ if !removeBlocks.contains(b.ID) {
+ continue
+ }
+ i := 0
+ for j := 0; j < len(b.Values); j++ {
+ v := b.Values[j]
+ if v.Op == OpInvalid {
+ continue
+ }
+ b.Values[i] = v
+ i++
+ }
+ b.truncateValues(i)
+ }
+}
+
+func (v *Value) clobbersFlags() bool {
+ if opcodeTable[v.Op].clobberFlags {
+ return true
+ }
+ if v.Type.IsTuple() && (v.Type.FieldType(0).IsFlags() || v.Type.FieldType(1).IsFlags()) {
+ // This case handles the possibility where a flag value is generated but never used.
+ // In that case, there's no corresponding Select to overwrite the flags value,
+ // so we must consider flags clobbered by the tuple-generating instruction.
+ return true
+ }
+ return false
+}
+
+// copyFlags copies v (flag generator) into b, returns the copy.
+// If v's arg is also flags, copy recursively.
+func copyFlags(v *Value, b *Block) *Value {
+ flagsArgs := make(map[int]*Value)
+ for i, a := range v.Args {
+ if a.Type.IsFlags() || a.Type.IsTuple() {
+ flagsArgs[i] = copyFlags(a, b)
+ }
+ }
+ c := v.copyInto(b)
+ for i, a := range flagsArgs {
+ c.SetArg(i, a)
+ }
+ return c
+}
diff --git a/src/cmd/compile/internal/ssa/flags_amd64_test.s b/src/cmd/compile/internal/ssa/flags_amd64_test.s
new file mode 100644
index 0000000..7402f6b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/flags_amd64_test.s
@@ -0,0 +1,29 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT ·asmAddFlags(SB),NOSPLIT,$0-24
+ MOVQ x+0(FP), AX
+ ADDQ y+8(FP), AX
+ PUSHFQ
+ POPQ AX
+ MOVQ AX, ret+16(FP)
+ RET
+
+TEXT ·asmSubFlags(SB),NOSPLIT,$0-24
+ MOVQ x+0(FP), AX
+ SUBQ y+8(FP), AX
+ PUSHFQ
+ POPQ AX
+ MOVQ AX, ret+16(FP)
+ RET
+
+TEXT ·asmAndFlags(SB),NOSPLIT,$0-24
+ MOVQ x+0(FP), AX
+ ANDQ y+8(FP), AX
+ PUSHFQ
+ POPQ AX
+ MOVQ AX, ret+16(FP)
+ RET
diff --git a/src/cmd/compile/internal/ssa/flags_arm64_test.s b/src/cmd/compile/internal/ssa/flags_arm64_test.s
new file mode 100644
index 0000000..639d7e3
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/flags_arm64_test.s
@@ -0,0 +1,30 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT ·asmAddFlags(SB),NOSPLIT,$0-24
+ MOVD x+0(FP), R0
+ MOVD y+8(FP), R1
+ CMN R0, R1
+ WORD $0xd53b4200 // MOVD NZCV, R0
+ MOVD R0, ret+16(FP)
+ RET
+
+TEXT ·asmSubFlags(SB),NOSPLIT,$0-24
+ MOVD x+0(FP), R0
+ MOVD y+8(FP), R1
+ CMP R1, R0
+ WORD $0xd53b4200 // MOVD NZCV, R0
+ MOVD R0, ret+16(FP)
+ RET
+
+TEXT ·asmAndFlags(SB),NOSPLIT,$0-24
+ MOVD x+0(FP), R0
+ MOVD y+8(FP), R1
+ TST R1, R0
+ WORD $0xd53b4200 // MOVD NZCV, R0
+ BIC $0x30000000, R0 // clear C, V bits, as TST does not change those flags
+ MOVD R0, ret+16(FP)
+ RET
diff --git a/src/cmd/compile/internal/ssa/flags_test.go b/src/cmd/compile/internal/ssa/flags_test.go
new file mode 100644
index 0000000..d0079ac
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/flags_test.go
@@ -0,0 +1,108 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64
+
+package ssa
+
+// This file tests the functions addFlags64 and subFlags64 by comparing their
+// results to what the chip calculates.
+
+import (
+ "runtime"
+ "testing"
+)
+
+func TestAddFlagsNative(t *testing.T) {
+ var numbers = []int64{
+ 1, 0, -1,
+ 2, -2,
+ 1<<63 - 1, -1 << 63,
+ }
+ coverage := map[flagConstant]bool{}
+ for _, x := range numbers {
+ for _, y := range numbers {
+ a := addFlags64(x, y)
+ b := flagRegister2flagConstant(asmAddFlags(x, y), false)
+ if a != b {
+ t.Errorf("asmAdd diff: x=%x y=%x got=%s want=%s\n", x, y, a, b)
+ }
+ coverage[a] = true
+ }
+ }
+ if len(coverage) != 9 { // TODO: can we cover all outputs?
+ t.Errorf("coverage too small, got %d want 9", len(coverage))
+ }
+}
+
+func TestSubFlagsNative(t *testing.T) {
+ var numbers = []int64{
+ 1, 0, -1,
+ 2, -2,
+ 1<<63 - 1, -1 << 63,
+ }
+ coverage := map[flagConstant]bool{}
+ for _, x := range numbers {
+ for _, y := range numbers {
+ a := subFlags64(x, y)
+ b := flagRegister2flagConstant(asmSubFlags(x, y), true)
+ if a != b {
+ t.Errorf("asmSub diff: x=%x y=%x got=%s want=%s\n", x, y, a, b)
+ }
+ coverage[a] = true
+ }
+ }
+ if len(coverage) != 7 { // TODO: can we cover all outputs?
+ t.Errorf("coverage too small, got %d want 7", len(coverage))
+ }
+}
+
+func TestAndFlagsNative(t *testing.T) {
+ var numbers = []int64{
+ 1, 0, -1,
+ 2, -2,
+ 1<<63 - 1, -1 << 63,
+ }
+ coverage := map[flagConstant]bool{}
+ for _, x := range numbers {
+ for _, y := range numbers {
+ a := logicFlags64(x & y)
+ b := flagRegister2flagConstant(asmAndFlags(x, y), false)
+ if a != b {
+ t.Errorf("asmAnd diff: x=%x y=%x got=%s want=%s\n", x, y, a, b)
+ }
+ coverage[a] = true
+ }
+ }
+ if len(coverage) != 3 {
+ t.Errorf("coverage too small, got %d want 3", len(coverage))
+ }
+}
+
+func asmAddFlags(x, y int64) int
+func asmSubFlags(x, y int64) int
+func asmAndFlags(x, y int64) int
+
+func flagRegister2flagConstant(x int, sub bool) flagConstant {
+ var fcb flagConstantBuilder
+ switch runtime.GOARCH {
+ case "amd64":
+ fcb.Z = x>>6&1 != 0
+ fcb.N = x>>7&1 != 0
+ fcb.C = x>>0&1 != 0
+ if sub {
+ // Convert from amd64-sense to arm-sense
+ fcb.C = !fcb.C
+ }
+ fcb.V = x>>11&1 != 0
+ case "arm64":
+ fcb.Z = x>>30&1 != 0
+ fcb.N = x>>31&1 != 0
+ fcb.C = x>>29&1 != 0
+ fcb.V = x>>28&1 != 0
+ default:
+ panic("unsupported architecture: " + runtime.GOARCH)
+ }
+ return fcb.encode()
+}
diff --git a/src/cmd/compile/internal/ssa/fmahash_test.go b/src/cmd/compile/internal/ssa/fmahash_test.go
new file mode 100644
index 0000000..dfa1aa1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/fmahash_test.go
@@ -0,0 +1,52 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ "internal/testenv"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "testing"
+)
+
+// TestFmaHash checks that the hash-test machinery works properly for a single case.
+// It also runs ssa/check and gccheck to be sure that those are checked at least a
+// little in each run.bash. It does not check or run the generated code.
+// The test file is however a useful example of fused-vs-cascaded multiply-add.
+func TestFmaHash(t *testing.T) {
+ switch runtime.GOOS {
+ case "linux", "darwin":
+ default:
+ t.Skipf("Slow test, usually avoid it, os=%s not linux or darwin", runtime.GOOS)
+ }
+ switch runtime.GOARCH {
+ case "amd64", "arm64":
+ default:
+ t.Skipf("Slow test, usually avoid it, arch=%s not amd64 or arm64", runtime.GOARCH)
+ }
+
+ testenv.MustHaveGoBuild(t)
+ gocmd := testenv.GoToolPath(t)
+ tmpdir := t.TempDir()
+ source := filepath.Join("testdata", "fma.go")
+ output := filepath.Join(tmpdir, "fma.exe")
+ cmd := testenv.Command(t, gocmd, "build", "-o", output, source)
+ // The hash-dependence on file path name is dodged by specifying "all hashes ending in 1" plus "all hashes ending in 0"
+ // i.e., all hashes. This will print all the FMAs; this test is only interested in one of them (that should appear near the end).
+ cmd.Env = append(cmd.Env, "GOCOMPILEDEBUG=fmahash=1/0", "GOOS=linux", "GOARCH=arm64", "HOME="+tmpdir)
+ t.Logf("%v", cmd)
+ t.Logf("%v", cmd.Env)
+ b, e := cmd.CombinedOutput()
+ if e != nil {
+ t.Error(e)
+ }
+ s := string(b) // Looking for "GOFMAHASH triggered main.main:24"
+ re := "fmahash(0?) triggered .*fma.go:29:..;.*fma.go:18:.."
+ match := regexp.MustCompile(re)
+ if !match.MatchString(s) {
+ t.Errorf("Expected to match '%s' with \n-----\n%s-----", re, s)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go
new file mode 100644
index 0000000..031d94f
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/func.go
@@ -0,0 +1,842 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "fmt"
+ "math"
+ "strings"
+)
+
+// A Func represents a Go func declaration (or function literal) and its body.
+// This package compiles each Func independently.
+// Funcs are single-use; a new Func must be created for every compiled function.
+type Func struct {
+ Config *Config // architecture information
+ Cache *Cache // re-usable cache
+ fe Frontend // frontend state associated with this Func, callbacks into compiler frontend
+ pass *pass // current pass information (name, options, etc.)
+ Name string // e.g. NewFunc or (*Func).NumBlocks (no package prefix)
+ Type *types.Type // type signature of the function.
+ Blocks []*Block // unordered set of all basic blocks (note: not indexable by ID)
+ Entry *Block // the entry basic block
+
+ bid idAlloc // block ID allocator
+ vid idAlloc // value ID allocator
+
+ HTMLWriter *HTMLWriter // html writer, for debugging
+ PrintOrHtmlSSA bool // true if GOSSAFUNC matches, true even if fe.Log() (spew phase results to stdout) is false. There's an odd dependence on this in debug.go for method logf.
+ ruleMatches map[string]int // number of times countRule was called during compilation for any given string
+ ABI0 *abi.ABIConfig // A copy, for no-sync access
+ ABI1 *abi.ABIConfig // A copy, for no-sync access
+ ABISelf *abi.ABIConfig // ABI for function being compiled
+ ABIDefault *abi.ABIConfig // ABI for rtcall and other no-parsed-signature/pragma functions.
+
+ scheduled bool // Values in Blocks are in final order
+ laidout bool // Blocks are ordered
+ NoSplit bool // true if function is marked as nosplit. Used by schedule check pass.
+ dumpFileSeq uint8 // the sequence numbers of dump file. (%s_%02d__%s.dump", funcname, dumpFileSeq, phaseName)
+
+ // when register allocation is done, maps value ids to locations
+ RegAlloc []Location
+
+ // temporary registers allocated to rare instructions
+ tempRegs map[ID]*Register
+
+ // map from LocalSlot to set of Values that we want to store in that slot.
+ NamedValues map[LocalSlot][]*Value
+ // Names is a copy of NamedValues.Keys. We keep a separate list
+ // of keys to make iteration order deterministic.
+ Names []*LocalSlot
+ // Canonicalize root/top-level local slots, and canonicalize their pieces.
+ // Because LocalSlot pieces refer to their parents with a pointer, this ensures that equivalent slots really are equal.
+ CanonicalLocalSlots map[LocalSlot]*LocalSlot
+ CanonicalLocalSplits map[LocalSlotSplitKey]*LocalSlot
+
+ // RegArgs is a slice of register-memory pairs that must be spilled and unspilled in the uncommon path of function entry.
+ RegArgs []Spill
+ // OwnAux describes parameters and results for this function.
+ OwnAux *AuxCall
+
+ freeValues *Value // free Values linked by argstorage[0]. All other fields except ID are 0/nil.
+ freeBlocks *Block // free Blocks linked by succstorage[0].b. All other fields except ID are 0/nil.
+
+ cachedPostorder []*Block // cached postorder traversal
+ cachedIdom []*Block // cached immediate dominators
+ cachedSdom SparseTree // cached dominator tree
+ cachedLoopnest *loopnest // cached loop nest information
+ cachedLineStarts *xposmap // cached map/set of xpos to integers
+
+ auxmap auxmap // map from aux values to opaque ids used by CSE
+ constants map[int64][]*Value // constants cache, keyed by constant value; users must check value's Op and Type
+}
+
+type LocalSlotSplitKey struct {
+ parent *LocalSlot
+ Off int64 // offset of slot in N
+ Type *types.Type // type of slot
+}
+
+// NewFunc returns a new, empty function object.
+// Caller must reset cache before calling NewFunc.
+func (c *Config) NewFunc(fe Frontend, cache *Cache) *Func {
+ return &Func{
+ fe: fe,
+ Config: c,
+ Cache: cache,
+
+ NamedValues: make(map[LocalSlot][]*Value),
+ CanonicalLocalSlots: make(map[LocalSlot]*LocalSlot),
+ CanonicalLocalSplits: make(map[LocalSlotSplitKey]*LocalSlot),
+ }
+}
+
+// NumBlocks returns an integer larger than the id of any Block in the Func.
+func (f *Func) NumBlocks() int {
+ return f.bid.num()
+}
+
+// NumValues returns an integer larger than the id of any Value in the Func.
+func (f *Func) NumValues() int {
+ return f.vid.num()
+}
+
+// NameABI returns the function name followed by comma and the ABI number.
+// This is intended for use with GOSSAFUNC and HTML dumps, and differs from
+// the linker's "<1>" convention because "<" and ">" require shell quoting
+// and are not legal file names (for use with GOSSADIR) on Windows.
+func (f *Func) NameABI() string {
+ return FuncNameABI(f.Name, f.ABISelf.Which())
+}
+
+// FuncNameABI returns n followed by a comma and the value of a.
+// This is a separate function to allow a single point encoding
+// of the format, which is used in places where there's not a Func yet.
+func FuncNameABI(n string, a obj.ABI) string {
+ return fmt.Sprintf("%s,%d", n, a)
+}
+
+// newSparseSet returns a sparse set that can store at least up to n integers.
+func (f *Func) newSparseSet(n int) *sparseSet {
+ return f.Cache.allocSparseSet(n)
+}
+
+// retSparseSet returns a sparse set to the config's cache of sparse
+// sets to be reused by f.newSparseSet.
+func (f *Func) retSparseSet(ss *sparseSet) {
+ f.Cache.freeSparseSet(ss)
+}
+
+// newSparseMap returns a sparse map that can store at least up to n integers.
+func (f *Func) newSparseMap(n int) *sparseMap {
+ return f.Cache.allocSparseMap(n)
+}
+
+// retSparseMap returns a sparse map to the config's cache of sparse
+// sets to be reused by f.newSparseMap.
+func (f *Func) retSparseMap(ss *sparseMap) {
+ f.Cache.freeSparseMap(ss)
+}
+
+// newSparseMapPos returns a sparse map that can store at least up to n integers.
+func (f *Func) newSparseMapPos(n int) *sparseMapPos {
+ return f.Cache.allocSparseMapPos(n)
+}
+
+// retSparseMapPos returns a sparse map to the config's cache of sparse
+// sets to be reused by f.newSparseMapPos.
+func (f *Func) retSparseMapPos(ss *sparseMapPos) {
+ f.Cache.freeSparseMapPos(ss)
+}
+
+// newPoset returns a new poset from the internal cache
+func (f *Func) newPoset() *poset {
+ if len(f.Cache.scrPoset) > 0 {
+ po := f.Cache.scrPoset[len(f.Cache.scrPoset)-1]
+ f.Cache.scrPoset = f.Cache.scrPoset[:len(f.Cache.scrPoset)-1]
+ return po
+ }
+ return newPoset()
+}
+
+// retPoset returns a poset to the internal cache
+func (f *Func) retPoset(po *poset) {
+ f.Cache.scrPoset = append(f.Cache.scrPoset, po)
+}
+
+func (f *Func) localSlotAddr(slot LocalSlot) *LocalSlot {
+ a, ok := f.CanonicalLocalSlots[slot]
+ if !ok {
+ a = new(LocalSlot)
+ *a = slot // don't escape slot
+ f.CanonicalLocalSlots[slot] = a
+ }
+ return a
+}
+
+func (f *Func) SplitString(name *LocalSlot) (*LocalSlot, *LocalSlot) {
+ ptrType := types.NewPtr(types.Types[types.TUINT8])
+ lenType := types.Types[types.TINT]
+ // Split this string up into two separate variables.
+ p := f.SplitSlot(name, ".ptr", 0, ptrType)
+ l := f.SplitSlot(name, ".len", ptrType.Size(), lenType)
+ return p, l
+}
+
+func (f *Func) SplitInterface(name *LocalSlot) (*LocalSlot, *LocalSlot) {
+ n := name.N
+ u := types.Types[types.TUINTPTR]
+ t := types.NewPtr(types.Types[types.TUINT8])
+ // Split this interface up into two separate variables.
+ sfx := ".itab"
+ if n.Type().IsEmptyInterface() {
+ sfx = ".type"
+ }
+ c := f.SplitSlot(name, sfx, 0, u) // see comment in typebits.Set
+ d := f.SplitSlot(name, ".data", u.Size(), t)
+ return c, d
+}
+
+func (f *Func) SplitSlice(name *LocalSlot) (*LocalSlot, *LocalSlot, *LocalSlot) {
+ ptrType := types.NewPtr(name.Type.Elem())
+ lenType := types.Types[types.TINT]
+ p := f.SplitSlot(name, ".ptr", 0, ptrType)
+ l := f.SplitSlot(name, ".len", ptrType.Size(), lenType)
+ c := f.SplitSlot(name, ".cap", ptrType.Size()+lenType.Size(), lenType)
+ return p, l, c
+}
+
+func (f *Func) SplitComplex(name *LocalSlot) (*LocalSlot, *LocalSlot) {
+ s := name.Type.Size() / 2
+ var t *types.Type
+ if s == 8 {
+ t = types.Types[types.TFLOAT64]
+ } else {
+ t = types.Types[types.TFLOAT32]
+ }
+ r := f.SplitSlot(name, ".real", 0, t)
+ i := f.SplitSlot(name, ".imag", t.Size(), t)
+ return r, i
+}
+
+func (f *Func) SplitInt64(name *LocalSlot) (*LocalSlot, *LocalSlot) {
+ var t *types.Type
+ if name.Type.IsSigned() {
+ t = types.Types[types.TINT32]
+ } else {
+ t = types.Types[types.TUINT32]
+ }
+ if f.Config.BigEndian {
+ return f.SplitSlot(name, ".hi", 0, t), f.SplitSlot(name, ".lo", t.Size(), types.Types[types.TUINT32])
+ }
+ return f.SplitSlot(name, ".hi", t.Size(), t), f.SplitSlot(name, ".lo", 0, types.Types[types.TUINT32])
+}
+
+func (f *Func) SplitStruct(name *LocalSlot, i int) *LocalSlot {
+ st := name.Type
+ return f.SplitSlot(name, st.FieldName(i), st.FieldOff(i), st.FieldType(i))
+}
+func (f *Func) SplitArray(name *LocalSlot) *LocalSlot {
+ n := name.N
+ at := name.Type
+ if at.NumElem() != 1 {
+ base.FatalfAt(n.Pos(), "bad array size")
+ }
+ et := at.Elem()
+ return f.SplitSlot(name, "[0]", 0, et)
+}
+
+func (f *Func) SplitSlot(name *LocalSlot, sfx string, offset int64, t *types.Type) *LocalSlot {
+ lssk := LocalSlotSplitKey{name, offset, t}
+ if als, ok := f.CanonicalLocalSplits[lssk]; ok {
+ return als
+ }
+ // Note: the _ field may appear several times. But
+ // have no fear, identically-named but distinct Autos are
+ // ok, albeit maybe confusing for a debugger.
+ ls := f.fe.SplitSlot(name, sfx, offset, t)
+ f.CanonicalLocalSplits[lssk] = &ls
+ return &ls
+}
+
+// newValue allocates a new Value with the given fields and places it at the end of b.Values.
+func (f *Func) newValue(op Op, t *types.Type, b *Block, pos src.XPos) *Value {
+ var v *Value
+ if f.freeValues != nil {
+ v = f.freeValues
+ f.freeValues = v.argstorage[0]
+ v.argstorage[0] = nil
+ } else {
+ ID := f.vid.get()
+ if int(ID) < len(f.Cache.values) {
+ v = &f.Cache.values[ID]
+ v.ID = ID
+ } else {
+ v = &Value{ID: ID}
+ }
+ }
+ v.Op = op
+ v.Type = t
+ v.Block = b
+ if notStmtBoundary(op) {
+ pos = pos.WithNotStmt()
+ }
+ v.Pos = pos
+ b.Values = append(b.Values, v)
+ return v
+}
+
+// newValueNoBlock allocates a new Value with the given fields.
+// The returned value is not placed in any block. Once the caller
+// decides on a block b, it must set b.Block and append
+// the returned value to b.Values.
+func (f *Func) newValueNoBlock(op Op, t *types.Type, pos src.XPos) *Value {
+ var v *Value
+ if f.freeValues != nil {
+ v = f.freeValues
+ f.freeValues = v.argstorage[0]
+ v.argstorage[0] = nil
+ } else {
+ ID := f.vid.get()
+ if int(ID) < len(f.Cache.values) {
+ v = &f.Cache.values[ID]
+ v.ID = ID
+ } else {
+ v = &Value{ID: ID}
+ }
+ }
+ v.Op = op
+ v.Type = t
+ v.Block = nil // caller must fix this.
+ if notStmtBoundary(op) {
+ pos = pos.WithNotStmt()
+ }
+ v.Pos = pos
+ return v
+}
+
+// LogStat writes a string key and int value as a warning in a
+// tab-separated format easily handled by spreadsheets or awk.
+// file names, lines, and function names are included to provide enough (?)
+// context to allow item-by-item comparisons across runs.
+// For example:
+// awk 'BEGIN {FS="\t"} $3~/TIME/{sum+=$4} END{print "t(ns)=",sum}' t.log
+func (f *Func) LogStat(key string, args ...interface{}) {
+ value := ""
+ for _, a := range args {
+ value += fmt.Sprintf("\t%v", a)
+ }
+ n := "missing_pass"
+ if f.pass != nil {
+ n = strings.Replace(f.pass.name, " ", "_", -1)
+ }
+ f.Warnl(f.Entry.Pos, "\t%s\t%s%s\t%s", n, key, value, f.Name)
+}
+
+// unCacheLine removes v from f's constant cache "line" for aux,
+// resets v.InCache when it is found (and removed),
+// and returns whether v was found in that line.
+func (f *Func) unCacheLine(v *Value, aux int64) bool {
+ vv := f.constants[aux]
+ for i, cv := range vv {
+ if v == cv {
+ vv[i] = vv[len(vv)-1]
+ vv[len(vv)-1] = nil
+ f.constants[aux] = vv[0 : len(vv)-1]
+ v.InCache = false
+ return true
+ }
+ }
+ return false
+}
+
+// unCache removes v from f's constant cache.
+func (f *Func) unCache(v *Value) {
+ if v.InCache {
+ aux := v.AuxInt
+ if f.unCacheLine(v, aux) {
+ return
+ }
+ if aux == 0 {
+ switch v.Op {
+ case OpConstNil:
+ aux = constNilMagic
+ case OpConstSlice:
+ aux = constSliceMagic
+ case OpConstString:
+ aux = constEmptyStringMagic
+ case OpConstInterface:
+ aux = constInterfaceMagic
+ }
+ if aux != 0 && f.unCacheLine(v, aux) {
+ return
+ }
+ }
+ f.Fatalf("unCached value %s not found in cache, auxInt=0x%x, adjusted aux=0x%x", v.LongString(), v.AuxInt, aux)
+ }
+}
+
+// freeValue frees a value. It must no longer be referenced or have any args.
+func (f *Func) freeValue(v *Value) {
+ if v.Block == nil {
+ f.Fatalf("trying to free an already freed value")
+ }
+ if v.Uses != 0 {
+ f.Fatalf("value %s still has %d uses", v, v.Uses)
+ }
+ if len(v.Args) != 0 {
+ f.Fatalf("value %s still has %d args", v, len(v.Args))
+ }
+ // Clear everything but ID (which we reuse).
+ id := v.ID
+ if v.InCache {
+ f.unCache(v)
+ }
+ *v = Value{}
+ v.ID = id
+ v.argstorage[0] = f.freeValues
+ f.freeValues = v
+}
+
+// NewBlock allocates a new Block of the given kind and places it at the end of f.Blocks.
+func (f *Func) NewBlock(kind BlockKind) *Block {
+ var b *Block
+ if f.freeBlocks != nil {
+ b = f.freeBlocks
+ f.freeBlocks = b.succstorage[0].b
+ b.succstorage[0].b = nil
+ } else {
+ ID := f.bid.get()
+ if int(ID) < len(f.Cache.blocks) {
+ b = &f.Cache.blocks[ID]
+ b.ID = ID
+ } else {
+ b = &Block{ID: ID}
+ }
+ }
+ b.Kind = kind
+ b.Func = f
+ b.Preds = b.predstorage[:0]
+ b.Succs = b.succstorage[:0]
+ b.Values = b.valstorage[:0]
+ f.Blocks = append(f.Blocks, b)
+ f.invalidateCFG()
+ return b
+}
+
+func (f *Func) freeBlock(b *Block) {
+ if b.Func == nil {
+ f.Fatalf("trying to free an already freed block")
+ }
+ // Clear everything but ID (which we reuse).
+ id := b.ID
+ *b = Block{}
+ b.ID = id
+ b.succstorage[0].b = f.freeBlocks
+ f.freeBlocks = b
+}
+
+// NewValue0 returns a new value in the block with no arguments and zero aux values.
+func (b *Block) NewValue0(pos src.XPos, op Op, t *types.Type) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Args = v.argstorage[:0]
+ return v
+}
+
+// NewValue0I returns a new value in the block with no arguments and an auxint value.
+func (b *Block) NewValue0I(pos src.XPos, op Op, t *types.Type, auxint int64) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Args = v.argstorage[:0]
+ return v
+}
+
+// NewValue0A returns a new value in the block with no arguments and an aux value.
+func (b *Block) NewValue0A(pos src.XPos, op Op, t *types.Type, aux Aux) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Aux = aux
+ v.Args = v.argstorage[:0]
+ return v
+}
+
+// NewValue0IA returns a new value in the block with no arguments and both an auxint and aux values.
+func (b *Block) NewValue0IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Aux = aux
+ v.Args = v.argstorage[:0]
+ return v
+}
+
+// NewValue1 returns a new value in the block with one argument and zero aux values.
+func (b *Block) NewValue1(pos src.XPos, op Op, t *types.Type, arg *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Args = v.argstorage[:1]
+ v.argstorage[0] = arg
+ arg.Uses++
+ return v
+}
+
+// NewValue1I returns a new value in the block with one argument and an auxint value.
+func (b *Block) NewValue1I(pos src.XPos, op Op, t *types.Type, auxint int64, arg *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Args = v.argstorage[:1]
+ v.argstorage[0] = arg
+ arg.Uses++
+ return v
+}
+
+// NewValue1A returns a new value in the block with one argument and an aux value.
+func (b *Block) NewValue1A(pos src.XPos, op Op, t *types.Type, aux Aux, arg *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Aux = aux
+ v.Args = v.argstorage[:1]
+ v.argstorage[0] = arg
+ arg.Uses++
+ return v
+}
+
+// NewValue1IA returns a new value in the block with one argument and both an auxint and aux values.
+func (b *Block) NewValue1IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux, arg *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Aux = aux
+ v.Args = v.argstorage[:1]
+ v.argstorage[0] = arg
+ arg.Uses++
+ return v
+}
+
+// NewValue2 returns a new value in the block with two arguments and zero aux values.
+func (b *Block) NewValue2(pos src.XPos, op Op, t *types.Type, arg0, arg1 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Args = v.argstorage[:2]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ arg0.Uses++
+ arg1.Uses++
+ return v
+}
+
+// NewValue2A returns a new value in the block with two arguments and one aux values.
+func (b *Block) NewValue2A(pos src.XPos, op Op, t *types.Type, aux Aux, arg0, arg1 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Aux = aux
+ v.Args = v.argstorage[:2]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ arg0.Uses++
+ arg1.Uses++
+ return v
+}
+
+// NewValue2I returns a new value in the block with two arguments and an auxint value.
+func (b *Block) NewValue2I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Args = v.argstorage[:2]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ arg0.Uses++
+ arg1.Uses++
+ return v
+}
+
+// NewValue2IA returns a new value in the block with two arguments and both an auxint and aux values.
+func (b *Block) NewValue2IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux, arg0, arg1 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Aux = aux
+ v.Args = v.argstorage[:2]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ arg0.Uses++
+ arg1.Uses++
+ return v
+}
+
+// NewValue3 returns a new value in the block with three arguments and zero aux values.
+func (b *Block) NewValue3(pos src.XPos, op Op, t *types.Type, arg0, arg1, arg2 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Args = v.argstorage[:3]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ v.argstorage[2] = arg2
+ arg0.Uses++
+ arg1.Uses++
+ arg2.Uses++
+ return v
+}
+
+// NewValue3I returns a new value in the block with three arguments and an auxint value.
+func (b *Block) NewValue3I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1, arg2 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Args = v.argstorage[:3]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ v.argstorage[2] = arg2
+ arg0.Uses++
+ arg1.Uses++
+ arg2.Uses++
+ return v
+}
+
+// NewValue3A returns a new value in the block with three argument and an aux value.
+func (b *Block) NewValue3A(pos src.XPos, op Op, t *types.Type, aux Aux, arg0, arg1, arg2 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Aux = aux
+ v.Args = v.argstorage[:3]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ v.argstorage[2] = arg2
+ arg0.Uses++
+ arg1.Uses++
+ arg2.Uses++
+ return v
+}
+
+// NewValue4 returns a new value in the block with four arguments and zero aux values.
+func (b *Block) NewValue4(pos src.XPos, op Op, t *types.Type, arg0, arg1, arg2, arg3 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Args = []*Value{arg0, arg1, arg2, arg3}
+ arg0.Uses++
+ arg1.Uses++
+ arg2.Uses++
+ arg3.Uses++
+ return v
+}
+
+// NewValue4I returns a new value in the block with four arguments and auxint value.
+func (b *Block) NewValue4I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1, arg2, arg3 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Args = []*Value{arg0, arg1, arg2, arg3}
+ arg0.Uses++
+ arg1.Uses++
+ arg2.Uses++
+ arg3.Uses++
+ return v
+}
+
+// constVal returns a constant value for c.
+func (f *Func) constVal(op Op, t *types.Type, c int64, setAuxInt bool) *Value {
+ if f.constants == nil {
+ f.constants = make(map[int64][]*Value)
+ }
+ vv := f.constants[c]
+ for _, v := range vv {
+ if v.Op == op && v.Type.Compare(t) == types.CMPeq {
+ if setAuxInt && v.AuxInt != c {
+ panic(fmt.Sprintf("cached const %s should have AuxInt of %d", v.LongString(), c))
+ }
+ return v
+ }
+ }
+ var v *Value
+ if setAuxInt {
+ v = f.Entry.NewValue0I(src.NoXPos, op, t, c)
+ } else {
+ v = f.Entry.NewValue0(src.NoXPos, op, t)
+ }
+ f.constants[c] = append(vv, v)
+ v.InCache = true
+ return v
+}
+
+// These magic auxint values let us easily cache non-numeric constants
+// using the same constants map while making collisions unlikely.
+// These values are unlikely to occur in regular code and
+// are easy to grep for in case of bugs.
+const (
+ constSliceMagic = 1122334455
+ constInterfaceMagic = 2233445566
+ constNilMagic = 3344556677
+ constEmptyStringMagic = 4455667788
+)
+
+// ConstBool returns an int constant representing its argument.
+func (f *Func) ConstBool(t *types.Type, c bool) *Value {
+ i := int64(0)
+ if c {
+ i = 1
+ }
+ return f.constVal(OpConstBool, t, i, true)
+}
+func (f *Func) ConstInt8(t *types.Type, c int8) *Value {
+ return f.constVal(OpConst8, t, int64(c), true)
+}
+func (f *Func) ConstInt16(t *types.Type, c int16) *Value {
+ return f.constVal(OpConst16, t, int64(c), true)
+}
+func (f *Func) ConstInt32(t *types.Type, c int32) *Value {
+ return f.constVal(OpConst32, t, int64(c), true)
+}
+func (f *Func) ConstInt64(t *types.Type, c int64) *Value {
+ return f.constVal(OpConst64, t, c, true)
+}
+func (f *Func) ConstFloat32(t *types.Type, c float64) *Value {
+ return f.constVal(OpConst32F, t, int64(math.Float64bits(float64(float32(c)))), true)
+}
+func (f *Func) ConstFloat64(t *types.Type, c float64) *Value {
+ return f.constVal(OpConst64F, t, int64(math.Float64bits(c)), true)
+}
+
+func (f *Func) ConstSlice(t *types.Type) *Value {
+ return f.constVal(OpConstSlice, t, constSliceMagic, false)
+}
+func (f *Func) ConstInterface(t *types.Type) *Value {
+ return f.constVal(OpConstInterface, t, constInterfaceMagic, false)
+}
+func (f *Func) ConstNil(t *types.Type) *Value {
+ return f.constVal(OpConstNil, t, constNilMagic, false)
+}
+func (f *Func) ConstEmptyString(t *types.Type) *Value {
+ v := f.constVal(OpConstString, t, constEmptyStringMagic, false)
+ v.Aux = StringToAux("")
+ return v
+}
+func (f *Func) ConstOffPtrSP(t *types.Type, c int64, sp *Value) *Value {
+ v := f.constVal(OpOffPtr, t, c, true)
+ if len(v.Args) == 0 {
+ v.AddArg(sp)
+ }
+ return v
+}
+
+func (f *Func) Frontend() Frontend { return f.fe }
+func (f *Func) Warnl(pos src.XPos, msg string, args ...interface{}) { f.fe.Warnl(pos, msg, args...) }
+func (f *Func) Logf(msg string, args ...interface{}) { f.fe.Logf(msg, args...) }
+func (f *Func) Log() bool { return f.fe.Log() }
+
+func (f *Func) Fatalf(msg string, args ...interface{}) {
+ stats := "crashed"
+ if f.Log() {
+ f.Logf(" pass %s end %s\n", f.pass.name, stats)
+ printFunc(f)
+ }
+ if f.HTMLWriter != nil {
+ f.HTMLWriter.WritePhase(f.pass.name, fmt.Sprintf("%s <span class=\"stats\">%s</span>", f.pass.name, stats))
+ f.HTMLWriter.flushPhases()
+ }
+ f.fe.Fatalf(f.Entry.Pos, msg, args...)
+}
+
+// postorder returns the reachable blocks in f in a postorder traversal.
+func (f *Func) postorder() []*Block {
+ if f.cachedPostorder == nil {
+ f.cachedPostorder = postorder(f)
+ }
+ return f.cachedPostorder
+}
+
+func (f *Func) Postorder() []*Block {
+ return f.postorder()
+}
+
+// Idom returns a map from block ID to the immediate dominator of that block.
+// f.Entry.ID maps to nil. Unreachable blocks map to nil as well.
+func (f *Func) Idom() []*Block {
+ if f.cachedIdom == nil {
+ f.cachedIdom = dominators(f)
+ }
+ return f.cachedIdom
+}
+
+// Sdom returns a sparse tree representing the dominator relationships
+// among the blocks of f.
+func (f *Func) Sdom() SparseTree {
+ if f.cachedSdom == nil {
+ f.cachedSdom = newSparseTree(f, f.Idom())
+ }
+ return f.cachedSdom
+}
+
+// loopnest returns the loop nest information for f.
+func (f *Func) loopnest() *loopnest {
+ if f.cachedLoopnest == nil {
+ f.cachedLoopnest = loopnestfor(f)
+ }
+ return f.cachedLoopnest
+}
+
+// invalidateCFG tells f that its CFG has changed.
+func (f *Func) invalidateCFG() {
+ f.cachedPostorder = nil
+ f.cachedIdom = nil
+ f.cachedSdom = nil
+ f.cachedLoopnest = nil
+}
+
+// DebugHashMatch returns
+//
+// base.DebugHashMatch(this function's package.name)
+//
+// for use in bug isolation. The return value is true unless
+// environment variable GOSSAHASH is set, in which case "it depends".
+// See [base.DebugHashMatch] for more information.
+func (f *Func) DebugHashMatch() bool {
+ if !base.HasDebugHash() {
+ return true
+ }
+ sym := f.fe.Func().Sym()
+ return base.DebugHashMatchPkgFunc(sym.Pkg.Path, sym.Name)
+}
+
+func (f *Func) spSb() (sp, sb *Value) {
+ initpos := src.NoXPos // These are originally created with no position in ssa.go; if they are optimized out then recreated, should be the same.
+ for _, v := range f.Entry.Values {
+ if v.Op == OpSB {
+ sb = v
+ }
+ if v.Op == OpSP {
+ sp = v
+ }
+ if sb != nil && sp != nil {
+ return
+ }
+ }
+ if sb == nil {
+ sb = f.Entry.NewValue0(initpos.WithNotStmt(), OpSB, f.Config.Types.Uintptr)
+ }
+ if sp == nil {
+ sp = f.Entry.NewValue0(initpos.WithNotStmt(), OpSP, f.Config.Types.Uintptr)
+ }
+ return
+}
+
+// useFMA allows targeted debugging w/ GOFMAHASH
+// If you have an architecture-dependent FP glitch, this will help you find it.
+func (f *Func) useFMA(v *Value) bool {
+ if !f.Config.UseFMA {
+ return false
+ }
+ if base.FmaHash == nil {
+ return true
+ }
+ return base.FmaHash.MatchPos(v.Pos, nil)
+}
+
+// NewLocal returns a new anonymous local variable of the given type.
+func (f *Func) NewLocal(pos src.XPos, typ *types.Type) *ir.Name {
+ return typecheck.TempAt(pos, f.fe.Func(), typ) // Note: adds new auto to fn.Dcl list
+}
diff --git a/src/cmd/compile/internal/ssa/func_test.go b/src/cmd/compile/internal/ssa/func_test.go
new file mode 100644
index 0000000..6923aaa
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/func_test.go
@@ -0,0 +1,482 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains some utility functions to help define Funcs for testing.
+// As an example, the following func
+//
+// b1:
+// v1 = InitMem <mem>
+// Plain -> b2
+// b2:
+// Exit v1
+// b3:
+// v2 = Const <bool> [true]
+// If v2 -> b3 b2
+//
+// can be defined as
+//
+// fun := Fun("entry",
+// Bloc("entry",
+// Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+// Goto("exit")),
+// Bloc("exit",
+// Exit("mem")),
+// Bloc("deadblock",
+// Valu("deadval", OpConstBool, c.config.Types.Bool, 0, true),
+// If("deadval", "deadblock", "exit")))
+//
+// and the Blocks or Values used in the Func can be accessed
+// like this:
+// fun.blocks["entry"] or fun.values["deadval"]
+
+package ssa
+
+// TODO(matloob): Choose better names for Fun, Bloc, Goto, etc.
+// TODO(matloob): Write a parser for the Func disassembly. Maybe
+// the parser can be used instead of Fun.
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+// Compare two Funcs for equivalence. Their CFGs must be isomorphic,
+// and their values must correspond.
+// Requires that values and predecessors are in the same order, even
+// though Funcs could be equivalent when they are not.
+// TODO(matloob): Allow values and predecessors to be in different
+// orders if the CFG are otherwise equivalent.
+func Equiv(f, g *Func) bool {
+ valcor := make(map[*Value]*Value)
+ var checkVal func(fv, gv *Value) bool
+ checkVal = func(fv, gv *Value) bool {
+ if fv == nil && gv == nil {
+ return true
+ }
+ if valcor[fv] == nil && valcor[gv] == nil {
+ valcor[fv] = gv
+ valcor[gv] = fv
+ // Ignore ids. Ops and Types are compared for equality.
+ // TODO(matloob): Make sure types are canonical and can
+ // be compared for equality.
+ if fv.Op != gv.Op || fv.Type != gv.Type || fv.AuxInt != gv.AuxInt {
+ return false
+ }
+ if !reflect.DeepEqual(fv.Aux, gv.Aux) {
+ // This makes the assumption that aux values can be compared
+ // using DeepEqual.
+ // TODO(matloob): Aux values may be *gc.Sym pointers in the near
+ // future. Make sure they are canonical.
+ return false
+ }
+ if len(fv.Args) != len(gv.Args) {
+ return false
+ }
+ for i := range fv.Args {
+ if !checkVal(fv.Args[i], gv.Args[i]) {
+ return false
+ }
+ }
+ }
+ return valcor[fv] == gv && valcor[gv] == fv
+ }
+ blkcor := make(map[*Block]*Block)
+ var checkBlk func(fb, gb *Block) bool
+ checkBlk = func(fb, gb *Block) bool {
+ if blkcor[fb] == nil && blkcor[gb] == nil {
+ blkcor[fb] = gb
+ blkcor[gb] = fb
+ // ignore ids
+ if fb.Kind != gb.Kind {
+ return false
+ }
+ if len(fb.Values) != len(gb.Values) {
+ return false
+ }
+ for i := range fb.Values {
+ if !checkVal(fb.Values[i], gb.Values[i]) {
+ return false
+ }
+ }
+ if len(fb.Succs) != len(gb.Succs) {
+ return false
+ }
+ for i := range fb.Succs {
+ if !checkBlk(fb.Succs[i].b, gb.Succs[i].b) {
+ return false
+ }
+ }
+ if len(fb.Preds) != len(gb.Preds) {
+ return false
+ }
+ for i := range fb.Preds {
+ if !checkBlk(fb.Preds[i].b, gb.Preds[i].b) {
+ return false
+ }
+ }
+ return true
+
+ }
+ return blkcor[fb] == gb && blkcor[gb] == fb
+ }
+
+ return checkBlk(f.Entry, g.Entry)
+}
+
+// fun is the return type of Fun. It contains the created func
+// itself as well as indexes from block and value names into the
+// corresponding Blocks and Values.
+type fun struct {
+ f *Func
+ blocks map[string]*Block
+ values map[string]*Value
+}
+
+var emptyPass pass = pass{
+ name: "empty pass",
+}
+
+// AuxCallLSym returns an AuxCall initialized with an LSym that should pass "check"
+// as the Aux of a static call.
+func AuxCallLSym(name string) *AuxCall {
+ return &AuxCall{Fn: &obj.LSym{}}
+}
+
+// Fun takes the name of an entry bloc and a series of Bloc calls, and
+// returns a fun containing the composed Func. entry must be a name
+// supplied to one of the Bloc functions. Each of the bloc names and
+// valu names should be unique across the Fun.
+func (c *Conf) Fun(entry string, blocs ...bloc) fun {
+ // TODO: Either mark some SSA tests as t.Parallel,
+ // or set up a shared Cache and Reset it between tests.
+ // But not both.
+ f := c.config.NewFunc(c.Frontend(), new(Cache))
+ f.pass = &emptyPass
+ f.cachedLineStarts = newXposmap(map[int]lineRange{0: {0, 100}, 1: {0, 100}, 2: {0, 100}, 3: {0, 100}, 4: {0, 100}})
+
+ blocks := make(map[string]*Block)
+ values := make(map[string]*Value)
+ // Create all the blocks and values.
+ for _, bloc := range blocs {
+ b := f.NewBlock(bloc.control.kind)
+ blocks[bloc.name] = b
+ for _, valu := range bloc.valus {
+ // args are filled in the second pass.
+ values[valu.name] = b.NewValue0IA(src.NoXPos, valu.op, valu.t, valu.auxint, valu.aux)
+ }
+ }
+ // Connect the blocks together and specify control values.
+ f.Entry = blocks[entry]
+ for _, bloc := range blocs {
+ b := blocks[bloc.name]
+ c := bloc.control
+ // Specify control values.
+ if c.control != "" {
+ cval, ok := values[c.control]
+ if !ok {
+ f.Fatalf("control value for block %s missing", bloc.name)
+ }
+ b.SetControl(cval)
+ }
+ // Fill in args.
+ for _, valu := range bloc.valus {
+ v := values[valu.name]
+ for _, arg := range valu.args {
+ a, ok := values[arg]
+ if !ok {
+ b.Fatalf("arg %s missing for value %s in block %s",
+ arg, valu.name, bloc.name)
+ }
+ v.AddArg(a)
+ }
+ }
+ // Connect to successors.
+ for _, succ := range c.succs {
+ b.AddEdgeTo(blocks[succ])
+ }
+ }
+ return fun{f, blocks, values}
+}
+
+// Bloc defines a block for Fun. The bloc name should be unique
+// across the containing Fun. entries should consist of calls to valu,
+// as well as one call to Goto, If, or Exit to specify the block kind.
+func Bloc(name string, entries ...interface{}) bloc {
+ b := bloc{}
+ b.name = name
+ seenCtrl := false
+ for _, e := range entries {
+ switch v := e.(type) {
+ case ctrl:
+ // there should be exactly one Ctrl entry.
+ if seenCtrl {
+ panic(fmt.Sprintf("already seen control for block %s", name))
+ }
+ b.control = v
+ seenCtrl = true
+ case valu:
+ b.valus = append(b.valus, v)
+ }
+ }
+ if !seenCtrl {
+ panic(fmt.Sprintf("block %s doesn't have control", b.name))
+ }
+ return b
+}
+
+// Valu defines a value in a block.
+func Valu(name string, op Op, t *types.Type, auxint int64, aux Aux, args ...string) valu {
+ return valu{name, op, t, auxint, aux, args}
+}
+
+// Goto specifies that this is a BlockPlain and names the single successor.
+// TODO(matloob): choose a better name.
+func Goto(succ string) ctrl {
+ return ctrl{BlockPlain, "", []string{succ}}
+}
+
+// If specifies a BlockIf.
+func If(cond, sub, alt string) ctrl {
+ return ctrl{BlockIf, cond, []string{sub, alt}}
+}
+
+// Exit specifies a BlockExit.
+func Exit(arg string) ctrl {
+ return ctrl{BlockExit, arg, []string{}}
+}
+
+// Eq specifies a BlockAMD64EQ.
+func Eq(cond, sub, alt string) ctrl {
+ return ctrl{BlockAMD64EQ, cond, []string{sub, alt}}
+}
+
+// bloc, ctrl, and valu are internal structures used by Bloc, Valu, Goto,
+// If, and Exit to help define blocks.
+
+type bloc struct {
+ name string
+ control ctrl
+ valus []valu
+}
+
+type ctrl struct {
+ kind BlockKind
+ control string
+ succs []string
+}
+
+type valu struct {
+ name string
+ op Op
+ t *types.Type
+ auxint int64
+ aux Aux
+ args []string
+}
+
+func TestArgs(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("a", OpConst64, c.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, c.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, c.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+ sum := fun.values["sum"]
+ for i, name := range []string{"a", "b"} {
+ if sum.Args[i] != fun.values[name] {
+ t.Errorf("arg %d for sum is incorrect: want %s, got %s",
+ i, sum.Args[i], fun.values[name])
+ }
+ }
+}
+
+func TestEquiv(t *testing.T) {
+ cfg := testConfig(t)
+ equivalentCases := []struct{ f, g fun }{
+ // simple case
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"))),
+ },
+ // block order changed
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("exit",
+ Exit("mem")),
+ Bloc("entry",
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit"))),
+ },
+ }
+ for _, c := range equivalentCases {
+ if !Equiv(c.f.f, c.g.f) {
+ t.Error("expected equivalence. Func definitions:")
+ t.Error(c.f.f)
+ t.Error(c.g.f)
+ }
+ }
+
+ differentCases := []struct{ f, g fun }{
+ // different shape
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Exit("mem"))),
+ },
+ // value order changed
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Exit("mem"))),
+ },
+ // value auxint different
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Exit("mem"))),
+ },
+ // value aux different
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConstString, cfg.config.Types.String, 0, StringToAux("foo")),
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConstString, cfg.config.Types.String, 0, StringToAux("bar")),
+ Exit("mem"))),
+ },
+ // value args different
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 0, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "b", "a"),
+ Exit("mem"))),
+ },
+ }
+ for _, c := range differentCases {
+ if Equiv(c.f.f, c.g.f) {
+ t.Error("expected difference. Func definitions:")
+ t.Error(c.f.f)
+ t.Error(c.g.f)
+ }
+ }
+}
+
+// TestConstCache ensures that the cache will not return
+// reused free'd values with a non-matching AuxInt
+func TestConstCache(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Exit("mem")))
+ v1 := f.f.ConstBool(c.config.Types.Bool, false)
+ v2 := f.f.ConstBool(c.config.Types.Bool, true)
+ f.f.freeValue(v1)
+ f.f.freeValue(v2)
+ v3 := f.f.ConstBool(c.config.Types.Bool, false)
+ v4 := f.f.ConstBool(c.config.Types.Bool, true)
+ if v3.AuxInt != 0 {
+ t.Errorf("expected %s to have auxint of 0\n", v3.LongString())
+ }
+ if v4.AuxInt != 1 {
+ t.Errorf("expected %s to have auxint of 1\n", v4.LongString())
+ }
+
+}
+
+// opcodeMap returns a map from opcode to the number of times that opcode
+// appears in the function.
+func opcodeMap(f *Func) map[Op]int {
+ m := map[Op]int{}
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ m[v.Op]++
+ }
+ }
+ return m
+}
+
+// opcodeCounts checks that the number of opcodes listed in m agree with the
+// number of opcodes that appear in the function.
+func checkOpcodeCounts(t *testing.T, f *Func, m map[Op]int) {
+ n := opcodeMap(f)
+ for op, cnt := range m {
+ if n[op] != cnt {
+ t.Errorf("%s appears %d times, want %d times", op, n[op], cnt)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go
new file mode 100644
index 0000000..68defde
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/fuse.go
@@ -0,0 +1,333 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+ "fmt"
+)
+
+// fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange).
+func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange) }
+
+// fuseLate runs fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect).
+func fuseLate(f *Func) { fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect) }
+
+type fuseType uint8
+
+const (
+ fuseTypePlain fuseType = 1 << iota
+ fuseTypeIf
+ fuseTypeIntInRange
+ fuseTypeBranchRedirect
+ fuseTypeShortCircuit
+)
+
+// fuse simplifies control flow by joining basic blocks.
+func fuse(f *Func, typ fuseType) {
+ for changed := true; changed; {
+ changed = false
+ // Be sure to avoid quadratic behavior in fuseBlockPlain. See issue 13554.
+ // Previously this was dealt with using backwards iteration, now fuseBlockPlain
+ // handles large runs of blocks.
+ for i := len(f.Blocks) - 1; i >= 0; i-- {
+ b := f.Blocks[i]
+ if typ&fuseTypeIf != 0 {
+ changed = fuseBlockIf(b) || changed
+ }
+ if typ&fuseTypeIntInRange != 0 {
+ changed = fuseIntegerComparisons(b) || changed
+ }
+ if typ&fuseTypePlain != 0 {
+ changed = fuseBlockPlain(b) || changed
+ }
+ if typ&fuseTypeShortCircuit != 0 {
+ changed = shortcircuitBlock(b) || changed
+ }
+ }
+
+ if typ&fuseTypeBranchRedirect != 0 {
+ changed = fuseBranchRedirect(f) || changed
+ }
+ if changed {
+ f.invalidateCFG()
+ }
+ }
+}
+
+// fuseBlockIf handles the following cases where s0 and s1 are empty blocks.
+//
+// b b b b
+// \ / \ / | \ / \ / | | |
+// s0 s1 | s1 s0 | | |
+// \ / | / \ | | |
+// ss ss ss ss
+//
+// If all Phi ops in ss have identical variables for slots corresponding to
+// s0, s1 and b then the branch can be dropped.
+// This optimization often comes up in switch statements with multiple
+// expressions in a case clause:
+//
+// switch n {
+// case 1,2,3: return 4
+// }
+//
+// TODO: If ss doesn't contain any OpPhis, are s0 and s1 dead code anyway.
+func fuseBlockIf(b *Block) bool {
+ if b.Kind != BlockIf {
+ return false
+ }
+ // It doesn't matter how much Preds does s0 or s1 have.
+ var ss0, ss1 *Block
+ s0 := b.Succs[0].b
+ i0 := b.Succs[0].i
+ if s0.Kind != BlockPlain || !isEmpty(s0) {
+ s0, ss0 = b, s0
+ } else {
+ ss0 = s0.Succs[0].b
+ i0 = s0.Succs[0].i
+ }
+ s1 := b.Succs[1].b
+ i1 := b.Succs[1].i
+ if s1.Kind != BlockPlain || !isEmpty(s1) {
+ s1, ss1 = b, s1
+ } else {
+ ss1 = s1.Succs[0].b
+ i1 = s1.Succs[0].i
+ }
+ if ss0 != ss1 {
+ if s0.Kind == BlockPlain && isEmpty(s0) && s1.Kind == BlockPlain && isEmpty(s1) {
+ // Two special cases where both s0, s1 and ss are empty blocks.
+ if s0 == ss1 {
+ s0, ss0 = b, ss1
+ } else if ss0 == s1 {
+ s1, ss1 = b, ss0
+ } else {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ ss := ss0
+
+ // s0 and s1 are equal with b if the corresponding block is missing
+ // (2nd, 3rd and 4th case in the figure).
+
+ for _, v := range ss.Values {
+ if v.Op == OpPhi && v.Uses > 0 && v.Args[i0] != v.Args[i1] {
+ return false
+ }
+ }
+
+ // We do not need to redirect the Preds of s0 and s1 to ss,
+ // the following optimization will do this.
+ b.removeEdge(0)
+ if s0 != b && len(s0.Preds) == 0 {
+ s0.removeEdge(0)
+ // Move any (dead) values in s0 to b,
+ // where they will be eliminated by the next deadcode pass.
+ for _, v := range s0.Values {
+ v.Block = b
+ }
+ b.Values = append(b.Values, s0.Values...)
+ // Clear s0.
+ s0.Kind = BlockInvalid
+ s0.Values = nil
+ s0.Succs = nil
+ s0.Preds = nil
+ }
+
+ b.Kind = BlockPlain
+ b.Likely = BranchUnknown
+ b.ResetControls()
+ // The values in b may be dead codes, and clearing them in time may
+ // obtain new optimization opportunities.
+ // First put dead values that can be deleted into a slice walkValues.
+ // Then put their arguments in walkValues before resetting the dead values
+ // in walkValues, because the arguments may also become dead values.
+ walkValues := []*Value{}
+ for _, v := range b.Values {
+ if v.Uses == 0 && v.removeable() {
+ walkValues = append(walkValues, v)
+ }
+ }
+ for len(walkValues) != 0 {
+ v := walkValues[len(walkValues)-1]
+ walkValues = walkValues[:len(walkValues)-1]
+ if v.Uses == 0 && v.removeable() {
+ walkValues = append(walkValues, v.Args...)
+ v.reset(OpInvalid)
+ }
+ }
+ return true
+}
+
+// isEmpty reports whether b contains any live values.
+// There may be false positives.
+func isEmpty(b *Block) bool {
+ for _, v := range b.Values {
+ if v.Uses > 0 || v.Op.IsCall() || v.Op.HasSideEffects() || v.Type.IsVoid() || opcodeTable[v.Op].nilCheck {
+ return false
+ }
+ }
+ return true
+}
+
+// fuseBlockPlain handles a run of blocks with length >= 2,
+// whose interior has single predecessors and successors,
+// b must be BlockPlain, allowing it to be any node except the
+// last (multiple successors means not BlockPlain).
+// Cycles are handled and merged into b's successor.
+func fuseBlockPlain(b *Block) bool {
+ if b.Kind != BlockPlain {
+ return false
+ }
+
+ c := b.Succs[0].b
+ if len(c.Preds) != 1 || c == b { // At least 2 distinct blocks.
+ return false
+ }
+
+ // find earliest block in run. Avoid simple cycles.
+ for len(b.Preds) == 1 && b.Preds[0].b != c && b.Preds[0].b.Kind == BlockPlain {
+ b = b.Preds[0].b
+ }
+
+ // find latest block in run. Still beware of simple cycles.
+ for {
+ if c.Kind != BlockPlain {
+ break
+ } // Has exactly 1 successor
+ cNext := c.Succs[0].b
+ if cNext == b {
+ break
+ } // not a cycle
+ if len(cNext.Preds) != 1 {
+ break
+ } // no other incoming edge
+ c = cNext
+ }
+
+ // Try to preserve any statement marks on the ends of blocks; move values to C
+ var b_next *Block
+ for bx := b; bx != c; bx = b_next {
+ // For each bx with an end-of-block statement marker,
+ // try to move it to a value in the next block,
+ // or to the next block's end, if possible.
+ b_next = bx.Succs[0].b
+ if bx.Pos.IsStmt() == src.PosIsStmt {
+ l := bx.Pos.Line() // looking for another place to mark for line l
+ outOfOrder := false
+ for _, v := range b_next.Values {
+ if v.Pos.IsStmt() == src.PosNotStmt {
+ continue
+ }
+ if l == v.Pos.Line() { // Found a Value with same line, therefore done.
+ v.Pos = v.Pos.WithIsStmt()
+ l = 0
+ break
+ }
+ if l < v.Pos.Line() {
+ // The order of values in a block is not specified so OOO in a block is not interesting,
+ // but they do all come before the end of the block, so this disqualifies attaching to end of b_next.
+ outOfOrder = true
+ }
+ }
+ if l != 0 && !outOfOrder && (b_next.Pos.Line() == l || b_next.Pos.IsStmt() != src.PosIsStmt) {
+ b_next.Pos = bx.Pos.WithIsStmt()
+ }
+ }
+ // move all of bx's values to c (note containing loop excludes c)
+ for _, v := range bx.Values {
+ v.Block = c
+ }
+ }
+
+ // Compute the total number of values and find the largest value slice in the run, to maximize chance of storage reuse.
+ total := 0
+ totalBeforeMax := 0 // number of elements preceding the maximum block (i.e. its position in the result).
+ max_b := b // block with maximum capacity
+
+ for bx := b; ; bx = bx.Succs[0].b {
+ if cap(bx.Values) > cap(max_b.Values) {
+ totalBeforeMax = total
+ max_b = bx
+ }
+ total += len(bx.Values)
+ if bx == c {
+ break
+ }
+ }
+
+ // Use c's storage if fused blocks will fit, else use the max if that will fit, else allocate new storage.
+
+ // Take care to avoid c.Values pointing to b.valstorage.
+ // See golang.org/issue/18602.
+
+ // It's important to keep the elements in the same order; maintenance of
+ // debugging information depends on the order of *Values in Blocks.
+ // This can also cause changes in the order (which may affect other
+ // optimizations and possibly compiler output) for 32-vs-64 bit compilation
+ // platforms (word size affects allocation bucket size affects slice capacity).
+
+ // figure out what slice will hold the values,
+ // preposition the destination elements if not allocating new storage
+ var t []*Value
+ if total <= len(c.valstorage) {
+ t = c.valstorage[:total]
+ max_b = c
+ totalBeforeMax = total - len(c.Values)
+ copy(t[totalBeforeMax:], c.Values)
+ } else if total <= cap(max_b.Values) { // in place, somewhere
+ t = max_b.Values[0:total]
+ copy(t[totalBeforeMax:], max_b.Values)
+ } else {
+ t = make([]*Value, total)
+ max_b = nil
+ }
+
+ // copy the values
+ copyTo := 0
+ for bx := b; ; bx = bx.Succs[0].b {
+ if bx != max_b {
+ copy(t[copyTo:], bx.Values)
+ } else if copyTo != totalBeforeMax { // trust but verify.
+ panic(fmt.Errorf("totalBeforeMax (%d) != copyTo (%d), max_b=%v, b=%v, c=%v", totalBeforeMax, copyTo, max_b, b, c))
+ }
+ if bx == c {
+ break
+ }
+ copyTo += len(bx.Values)
+ }
+ c.Values = t
+
+ // replace b->c edge with preds(b) -> c
+ c.predstorage[0] = Edge{}
+ if len(b.Preds) > len(b.predstorage) {
+ c.Preds = b.Preds
+ } else {
+ c.Preds = append(c.predstorage[:0], b.Preds...)
+ }
+ for i, e := range c.Preds {
+ p := e.b
+ p.Succs[e.i] = Edge{c, i}
+ }
+ f := b.Func
+ if f.Entry == b {
+ f.Entry = c
+ }
+
+ // trash b's fields, just in case
+ for bx := b; bx != c; bx = b_next {
+ b_next = bx.Succs[0].b
+
+ bx.Kind = BlockInvalid
+ bx.Values = nil
+ bx.Preds = nil
+ bx.Succs = nil
+ }
+ return true
+}
diff --git a/src/cmd/compile/internal/ssa/fuse_branchredirect.go b/src/cmd/compile/internal/ssa/fuse_branchredirect.go
new file mode 100644
index 0000000..153c2a5
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/fuse_branchredirect.go
@@ -0,0 +1,112 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// fuseBranchRedirect checks for a CFG in which the outbound branch
+// of an If block can be derived from its predecessor If block, in
+// some such cases, we can redirect the predecessor If block to the
+// corresponding successor block directly. For example:
+//
+// p:
+// v11 = Less64 <bool> v10 v8
+// If v11 goto b else u
+// b: <- p ...
+// v17 = Leq64 <bool> v10 v8
+// If v17 goto s else o
+//
+// We can redirect p to s directly.
+//
+// The implementation here borrows the framework of the prove pass.
+//
+// 1, Traverse all blocks of function f to find If blocks.
+// 2, For any If block b, traverse all its predecessors to find If blocks.
+// 3, For any If block predecessor p, update relationship p->b.
+// 4, Traverse all successors of b.
+// 5, For any successor s of b, try to update relationship b->s, if a
+// contradiction is found then redirect p to another successor of b.
+func fuseBranchRedirect(f *Func) bool {
+ ft := newFactsTable(f)
+ ft.checkpoint()
+
+ changed := false
+ for i := len(f.Blocks) - 1; i >= 0; i-- {
+ b := f.Blocks[i]
+ if b.Kind != BlockIf {
+ continue
+ }
+ // b is either empty or only contains the control value.
+ // TODO: if b contains only OpCopy or OpNot related to b.Controls,
+ // such as Copy(Not(Copy(Less64(v1, v2)))), perhaps it can be optimized.
+ bCtl := b.Controls[0]
+ if bCtl.Block != b && len(b.Values) != 0 || (len(b.Values) != 1 || bCtl.Uses != 1) && bCtl.Block == b {
+ continue
+ }
+
+ for k := 0; k < len(b.Preds); k++ {
+ pk := b.Preds[k]
+ p := pk.b
+ if p.Kind != BlockIf || p == b {
+ continue
+ }
+ pbranch := positive
+ if pk.i == 1 {
+ pbranch = negative
+ }
+ ft.checkpoint()
+ // Assume branch p->b is taken.
+ addBranchRestrictions(ft, p, pbranch)
+ // Check if any outgoing branch is unreachable based on the above condition.
+ parent := b
+ for j, bbranch := range [...]branch{positive, negative} {
+ ft.checkpoint()
+ // Try to update relationship b->child, and check if the contradiction occurs.
+ addBranchRestrictions(ft, parent, bbranch)
+ unsat := ft.unsat
+ ft.restore()
+ if !unsat {
+ continue
+ }
+ // This branch is impossible,so redirect p directly to another branch.
+ out := 1 ^ j
+ child := parent.Succs[out].b
+ if child == b {
+ continue
+ }
+ b.removePred(k)
+ p.Succs[pk.i] = Edge{child, len(child.Preds)}
+ // Fix up Phi value in b to have one less argument.
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ b.removePhiArg(v, k)
+ }
+ // Fix up child to have one more predecessor.
+ child.Preds = append(child.Preds, Edge{p, pk.i})
+ ai := b.Succs[out].i
+ for _, v := range child.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ v.AddArg(v.Args[ai])
+ }
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(b.Controls[0].Pos, "Redirect %s based on %s", b.Controls[0].Op, p.Controls[0].Op)
+ }
+ changed = true
+ k--
+ break
+ }
+ ft.restore()
+ }
+ if len(b.Preds) == 0 && b != f.Entry {
+ // Block is now dead.
+ b.Kind = BlockInvalid
+ }
+ }
+ ft.restore()
+ ft.cleanup(f)
+ return changed
+}
diff --git a/src/cmd/compile/internal/ssa/fuse_comparisons.go b/src/cmd/compile/internal/ssa/fuse_comparisons.go
new file mode 100644
index 0000000..f5fb84b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/fuse_comparisons.go
@@ -0,0 +1,157 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// fuseIntegerComparisons optimizes inequalities such as '1 <= x && x < 5',
+// which can be optimized to 'unsigned(x-1) < 4'.
+//
+// Look for branch structure like:
+//
+// p
+// |\
+// | b
+// |/ \
+// s0 s1
+//
+// In our example, p has control '1 <= x', b has control 'x < 5',
+// and s0 and s1 are the if and else results of the comparison.
+//
+// This will be optimized into:
+//
+// p
+// \
+// b
+// / \
+// s0 s1
+//
+// where b has the combined control value 'unsigned(x-1) < 4'.
+// Later passes will then fuse p and b.
+func fuseIntegerComparisons(b *Block) bool {
+ if len(b.Preds) != 1 {
+ return false
+ }
+ p := b.Preds[0].Block()
+ if b.Kind != BlockIf || p.Kind != BlockIf {
+ return false
+ }
+
+ // Don't merge control values if b is likely to be bypassed anyway.
+ if p.Likely == BranchLikely && p.Succs[0].Block() != b {
+ return false
+ }
+ if p.Likely == BranchUnlikely && p.Succs[1].Block() != b {
+ return false
+ }
+
+ // Check if the control values combine to make an integer inequality that
+ // can be further optimized later.
+ bc := b.Controls[0]
+ pc := p.Controls[0]
+ if !areMergeableInequalities(bc, pc) {
+ return false
+ }
+
+ // If the first (true) successors match then we have a disjunction (||).
+ // If the second (false) successors match then we have a conjunction (&&).
+ for i, op := range [2]Op{OpOrB, OpAndB} {
+ if p.Succs[i].Block() != b.Succs[i].Block() {
+ continue
+ }
+
+ // TODO(mundaym): should we also check the cost of executing b?
+ // Currently we might speculatively execute b even if b contains
+ // a lot of instructions. We could just check that len(b.Values)
+ // is lower than a fixed amount. Bear in mind however that the
+ // other optimization passes might yet reduce the cost of b
+ // significantly so we shouldn't be overly conservative.
+ if !canSpeculativelyExecute(b) {
+ return false
+ }
+
+ // Logically combine the control values for p and b.
+ v := b.NewValue0(bc.Pos, op, bc.Type)
+ v.AddArg(pc)
+ v.AddArg(bc)
+
+ // Set the combined control value as the control value for b.
+ b.SetControl(v)
+
+ // Modify p so that it jumps directly to b.
+ p.removeEdge(i)
+ p.Kind = BlockPlain
+ p.Likely = BranchUnknown
+ p.ResetControls()
+
+ return true
+ }
+
+ // TODO: could negate condition(s) to merge controls.
+ return false
+}
+
+// getConstIntArgIndex returns the index of the first argument that is a
+// constant integer or -1 if no such argument exists.
+func getConstIntArgIndex(v *Value) int {
+ for i, a := range v.Args {
+ switch a.Op {
+ case OpConst8, OpConst16, OpConst32, OpConst64:
+ return i
+ }
+ }
+ return -1
+}
+
+// isSignedInequality reports whether op represents the inequality < or ≤
+// in the signed domain.
+func isSignedInequality(v *Value) bool {
+ switch v.Op {
+ case OpLess64, OpLess32, OpLess16, OpLess8,
+ OpLeq64, OpLeq32, OpLeq16, OpLeq8:
+ return true
+ }
+ return false
+}
+
+// isUnsignedInequality reports whether op represents the inequality < or ≤
+// in the unsigned domain.
+func isUnsignedInequality(v *Value) bool {
+ switch v.Op {
+ case OpLess64U, OpLess32U, OpLess16U, OpLess8U,
+ OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U:
+ return true
+ }
+ return false
+}
+
+func areMergeableInequalities(x, y *Value) bool {
+ // We need both inequalities to be either in the signed or unsigned domain.
+ // TODO(mundaym): it would also be good to merge when we have an Eq op that
+ // could be transformed into a Less/Leq. For example in the unsigned
+ // domain 'x == 0 || 3 < x' is equivalent to 'x <= 0 || 3 < x'
+ inequalityChecks := [...]func(*Value) bool{
+ isSignedInequality,
+ isUnsignedInequality,
+ }
+ for _, f := range inequalityChecks {
+ if !f(x) || !f(y) {
+ continue
+ }
+
+ // Check that both inequalities are comparisons with constants.
+ xi := getConstIntArgIndex(x)
+ if xi < 0 {
+ return false
+ }
+ yi := getConstIntArgIndex(y)
+ if yi < 0 {
+ return false
+ }
+
+ // Check that the non-constant arguments to the inequalities
+ // are the same.
+ return x.Args[xi^1] == y.Args[yi^1]
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/fuse_test.go b/src/cmd/compile/internal/ssa/fuse_test.go
new file mode 100644
index 0000000..2f89938
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/fuse_test.go
@@ -0,0 +1,305 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+ "strconv"
+ "testing"
+)
+
+func TestFuseEliminatesOneBranch(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("nilptr", OpConstNil, ptrType, 0, nil),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
+ If("bool1", "then", "exit")),
+ Bloc("then",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] && b.Kind != BlockInvalid {
+ t.Errorf("then was not eliminated, but should have")
+ }
+ }
+}
+
+func TestFuseEliminatesBothBranches(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("nilptr", OpConstNil, ptrType, 0, nil),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
+ If("bool1", "then", "else")),
+ Bloc("then",
+ Goto("exit")),
+ Bloc("else",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] && b.Kind != BlockInvalid {
+ t.Errorf("then was not eliminated, but should have")
+ }
+ if b == fun.blocks["else"] && b.Kind != BlockInvalid {
+ t.Errorf("else was not eliminated, but should have")
+ }
+ }
+}
+
+func TestFuseHandlesPhis(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("nilptr", OpConstNil, ptrType, 0, nil),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
+ If("bool1", "then", "else")),
+ Bloc("then",
+ Goto("exit")),
+ Bloc("else",
+ Goto("exit")),
+ Bloc("exit",
+ Valu("phi", OpPhi, ptrType, 0, nil, "ptr1", "ptr1"),
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] && b.Kind != BlockInvalid {
+ t.Errorf("then was not eliminated, but should have")
+ }
+ if b == fun.blocks["else"] && b.Kind != BlockInvalid {
+ t.Errorf("else was not eliminated, but should have")
+ }
+ }
+}
+
+func TestFuseEliminatesEmptyBlocks(t *testing.T) {
+ c := testConfig(t)
+ // Case 1, plain type empty blocks z0 ~ z3 will be eliminated.
+ // entry
+ // |
+ // z0
+ // |
+ // z1
+ // |
+ // z2
+ // |
+ // z3
+ // |
+ // exit
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("z0")),
+ Bloc("z1",
+ Goto("z2")),
+ Bloc("z3",
+ Goto("exit")),
+ Bloc("z2",
+ Goto("z3")),
+ Bloc("z0",
+ Goto("z1")),
+ Bloc("exit",
+ Exit("mem"),
+ ))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for k, b := range fun.blocks {
+ if k[:1] == "z" && b.Kind != BlockInvalid {
+ t.Errorf("case1 %s was not eliminated, but should have", k)
+ }
+ }
+
+ // Case 2, empty blocks with If branch, z0 and z1 will be eliminated.
+ // entry
+ // / \
+ // z0 z1
+ // \ /
+ // exit
+ fun = c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("c", OpArg, c.config.Types.Bool, 0, nil),
+ If("c", "z0", "z1")),
+ Bloc("z0",
+ Goto("exit")),
+ Bloc("z1",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"),
+ ))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for k, b := range fun.blocks {
+ if k[:1] == "z" && b.Kind != BlockInvalid {
+ t.Errorf("case2 %s was not eliminated, but should have", k)
+ }
+ }
+
+ // Case 3, empty blocks with multiple predecessors, z0 and z1 will be eliminated.
+ // entry
+ // | \
+ // | b0
+ // | / \
+ // z0 z1
+ // \ /
+ // exit
+ fun = c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("c1", OpArg, c.config.Types.Bool, 0, nil),
+ If("c1", "b0", "z0")),
+ Bloc("b0",
+ Valu("c2", OpArg, c.config.Types.Bool, 0, nil),
+ If("c2", "z1", "z0")),
+ Bloc("z0",
+ Goto("exit")),
+ Bloc("z1",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"),
+ ))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for k, b := range fun.blocks {
+ if k[:1] == "z" && b.Kind != BlockInvalid {
+ t.Errorf("case3 %s was not eliminated, but should have", k)
+ }
+ }
+}
+
+func TestFuseSideEffects(t *testing.T) {
+ c := testConfig(t)
+ // Case1, test that we don't fuse branches that have side effects but
+ // have no use (e.g. followed by infinite loop).
+ // See issue #36005.
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("b", OpArg, c.config.Types.Bool, 0, nil),
+ If("b", "then", "else")),
+ Bloc("then",
+ Valu("call1", OpStaticCall, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
+ Goto("empty")),
+ Bloc("else",
+ Valu("call2", OpStaticCall, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
+ Goto("empty")),
+ Bloc("empty",
+ Goto("loop")),
+ Bloc("loop",
+ Goto("loop")))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] && b.Kind == BlockInvalid {
+ t.Errorf("then is eliminated, but should not")
+ }
+ if b == fun.blocks["else"] && b.Kind == BlockInvalid {
+ t.Errorf("else is eliminated, but should not")
+ }
+ }
+
+ // Case2, z0 contains a value that has side effect, z0 shouldn't be eliminated.
+ // entry
+ // | \
+ // | z0
+ // | /
+ // exit
+ fun = c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("c1", OpArg, c.config.Types.Bool, 0, nil),
+ Valu("p", OpArg, c.config.Types.IntPtr, 0, nil),
+ If("c1", "z0", "exit")),
+ Bloc("z0",
+ Valu("nilcheck", OpNilCheck, c.config.Types.IntPtr, 0, nil, "p", "mem"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"),
+ ))
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+ z0, ok := fun.blocks["z0"]
+ if !ok || z0.Kind == BlockInvalid {
+ t.Errorf("case2 z0 is eliminated, but should not")
+ }
+}
+
+func BenchmarkFuse(b *testing.B) {
+ for _, n := range [...]int{1, 10, 100, 1000, 10000} {
+ b.Run(strconv.Itoa(n), func(b *testing.B) {
+ c := testConfig(b)
+
+ blocks := make([]bloc, 0, 2*n+3)
+ blocks = append(blocks,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("cond", OpArg, c.config.Types.Bool, 0, nil),
+ Valu("x", OpArg, c.config.Types.Int64, 0, nil),
+ Goto("exit")))
+
+ phiArgs := make([]string, 0, 2*n)
+ for i := 0; i < n; i++ {
+ cname := fmt.Sprintf("c%d", i)
+ blocks = append(blocks,
+ Bloc(fmt.Sprintf("b%d", i), If("cond", cname, "merge")),
+ Bloc(cname, Goto("merge")))
+ phiArgs = append(phiArgs, "x", "x")
+ }
+ blocks = append(blocks,
+ Bloc("merge",
+ Valu("phi", OpPhi, types.TypeMem, 0, nil, phiArgs...),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ fun := c.Fun("entry", blocks...)
+ fuseLate(fun.f)
+ }
+ })
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/generate.go b/src/cmd/compile/internal/ssa/generate.go
new file mode 100644
index 0000000..74c5b31
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/generate.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build generate
+
+package ssa
+
+//go:generate go run -C=_gen .
diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go
new file mode 100644
index 0000000..ea170fb
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/html.go
@@ -0,0 +1,1319 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "bytes"
+ "cmd/internal/src"
+ "fmt"
+ "html"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+type HTMLWriter struct {
+ w io.WriteCloser
+ Func *Func
+ path string
+ dot *dotWriter
+ prevHash []byte
+ pendingPhases []string
+ pendingTitles []string
+}
+
+func NewHTMLWriter(path string, f *Func, cfgMask string) *HTMLWriter {
+ path = strings.Replace(path, "/", string(filepath.Separator), -1)
+ out, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ f.Fatalf("%v", err)
+ }
+ reportPath := path
+ if !filepath.IsAbs(reportPath) {
+ pwd, err := os.Getwd()
+ if err != nil {
+ f.Fatalf("%v", err)
+ }
+ reportPath = filepath.Join(pwd, path)
+ }
+ html := HTMLWriter{
+ w: out,
+ Func: f,
+ path: reportPath,
+ dot: newDotWriter(cfgMask),
+ }
+ html.start()
+ return &html
+}
+
+// Fatalf reports an error and exits.
+func (w *HTMLWriter) Fatalf(msg string, args ...interface{}) {
+ fe := w.Func.Frontend()
+ fe.Fatalf(src.NoXPos, msg, args...)
+}
+
+// Logf calls the (w *HTMLWriter).Func's Logf method passing along a msg and args.
+func (w *HTMLWriter) Logf(msg string, args ...interface{}) {
+ w.Func.Logf(msg, args...)
+}
+
+func (w *HTMLWriter) start() {
+ if w == nil {
+ return
+ }
+ w.WriteString("<html>")
+ w.WriteString(`<head>
+<meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
+<style>
+
+body {
+ font-size: 14px;
+ font-family: Arial, sans-serif;
+}
+
+h1 {
+ font-size: 18px;
+ display: inline-block;
+ margin: 0 1em .5em 0;
+}
+
+#helplink {
+ display: inline-block;
+}
+
+#help {
+ display: none;
+}
+
+.stats {
+ font-size: 60%;
+}
+
+table {
+ border: 1px solid black;
+ table-layout: fixed;
+ width: 300px;
+}
+
+th, td {
+ border: 1px solid black;
+ overflow: hidden;
+ width: 400px;
+ vertical-align: top;
+ padding: 5px;
+}
+
+td > h2 {
+ cursor: pointer;
+ font-size: 120%;
+ margin: 5px 0px 5px 0px;
+}
+
+td.collapsed {
+ font-size: 12px;
+ width: 12px;
+ border: 1px solid white;
+ padding: 2px;
+ cursor: pointer;
+ background: #fafafa;
+}
+
+td.collapsed div {
+ text-align: right;
+ transform: rotate(180deg);
+ writing-mode: vertical-lr;
+ white-space: pre;
+}
+
+code, pre, .lines, .ast {
+ font-family: Menlo, monospace;
+ font-size: 12px;
+}
+
+pre {
+ -moz-tab-size: 4;
+ -o-tab-size: 4;
+ tab-size: 4;
+}
+
+.allow-x-scroll {
+ overflow-x: scroll;
+}
+
+.lines {
+ float: left;
+ overflow: hidden;
+ text-align: right;
+ margin-top: 7px;
+}
+
+.lines div {
+ padding-right: 10px;
+ color: gray;
+}
+
+div.line-number {
+ font-size: 12px;
+}
+
+.ast {
+ white-space: nowrap;
+}
+
+td.ssa-prog {
+ width: 600px;
+ word-wrap: break-word;
+}
+
+li {
+ list-style-type: none;
+}
+
+li.ssa-long-value {
+ text-indent: -2em; /* indent wrapped lines */
+}
+
+li.ssa-value-list {
+ display: inline;
+}
+
+li.ssa-start-block {
+ padding: 0;
+ margin: 0;
+}
+
+li.ssa-end-block {
+ padding: 0;
+ margin: 0;
+}
+
+ul.ssa-print-func {
+ padding-left: 0;
+}
+
+li.ssa-start-block button {
+ padding: 0 1em;
+ margin: 0;
+ border: none;
+ display: inline;
+ font-size: 14px;
+ float: right;
+}
+
+button:hover {
+ background-color: #eee;
+ cursor: pointer;
+}
+
+dl.ssa-gen {
+ padding-left: 0;
+}
+
+dt.ssa-prog-src {
+ padding: 0;
+ margin: 0;
+ float: left;
+ width: 4em;
+}
+
+dd.ssa-prog {
+ padding: 0;
+ margin-right: 0;
+ margin-left: 4em;
+}
+
+.dead-value {
+ color: gray;
+}
+
+.dead-block {
+ opacity: 0.5;
+}
+
+.depcycle {
+ font-style: italic;
+}
+
+.line-number {
+ font-size: 11px;
+}
+
+.no-line-number {
+ font-size: 11px;
+ color: gray;
+}
+
+.zoom {
+ position: absolute;
+ float: left;
+ white-space: nowrap;
+ background-color: #eee;
+}
+
+.zoom a:link, .zoom a:visited {
+ text-decoration: none;
+ color: blue;
+ font-size: 16px;
+ padding: 4px 2px;
+}
+
+svg {
+ cursor: default;
+ outline: 1px solid #eee;
+ width: 100%;
+}
+
+body.darkmode {
+ background-color: rgb(21, 21, 21);
+ color: rgb(230, 255, 255);
+ opacity: 100%;
+}
+
+td.darkmode {
+ background-color: rgb(21, 21, 21);
+ border: 1px solid gray;
+}
+
+body.darkmode table, th {
+ border: 1px solid gray;
+}
+
+body.darkmode text {
+ fill: white;
+}
+
+body.darkmode svg polygon:first-child {
+ fill: rgb(21, 21, 21);
+}
+
+.highlight-aquamarine { background-color: aquamarine; color: black; }
+.highlight-coral { background-color: coral; color: black; }
+.highlight-lightpink { background-color: lightpink; color: black; }
+.highlight-lightsteelblue { background-color: lightsteelblue; color: black; }
+.highlight-palegreen { background-color: palegreen; color: black; }
+.highlight-skyblue { background-color: skyblue; color: black; }
+.highlight-lightgray { background-color: lightgray; color: black; }
+.highlight-yellow { background-color: yellow; color: black; }
+.highlight-lime { background-color: lime; color: black; }
+.highlight-khaki { background-color: khaki; color: black; }
+.highlight-aqua { background-color: aqua; color: black; }
+.highlight-salmon { background-color: salmon; color: black; }
+
+/* Ensure all dead values/blocks continue to have gray font color in dark mode with highlights */
+.dead-value span.highlight-aquamarine,
+.dead-block.highlight-aquamarine,
+.dead-value span.highlight-coral,
+.dead-block.highlight-coral,
+.dead-value span.highlight-lightpink,
+.dead-block.highlight-lightpink,
+.dead-value span.highlight-lightsteelblue,
+.dead-block.highlight-lightsteelblue,
+.dead-value span.highlight-palegreen,
+.dead-block.highlight-palegreen,
+.dead-value span.highlight-skyblue,
+.dead-block.highlight-skyblue,
+.dead-value span.highlight-lightgray,
+.dead-block.highlight-lightgray,
+.dead-value span.highlight-yellow,
+.dead-block.highlight-yellow,
+.dead-value span.highlight-lime,
+.dead-block.highlight-lime,
+.dead-value span.highlight-khaki,
+.dead-block.highlight-khaki,
+.dead-value span.highlight-aqua,
+.dead-block.highlight-aqua,
+.dead-value span.highlight-salmon,
+.dead-block.highlight-salmon {
+ color: gray;
+}
+
+.outline-blue { outline: #2893ff solid 2px; }
+.outline-red { outline: red solid 2px; }
+.outline-blueviolet { outline: blueviolet solid 2px; }
+.outline-darkolivegreen { outline: darkolivegreen solid 2px; }
+.outline-fuchsia { outline: fuchsia solid 2px; }
+.outline-sienna { outline: sienna solid 2px; }
+.outline-gold { outline: gold solid 2px; }
+.outline-orangered { outline: orangered solid 2px; }
+.outline-teal { outline: teal solid 2px; }
+.outline-maroon { outline: maroon solid 2px; }
+.outline-black { outline: black solid 2px; }
+
+ellipse.outline-blue { stroke-width: 2px; stroke: #2893ff; }
+ellipse.outline-red { stroke-width: 2px; stroke: red; }
+ellipse.outline-blueviolet { stroke-width: 2px; stroke: blueviolet; }
+ellipse.outline-darkolivegreen { stroke-width: 2px; stroke: darkolivegreen; }
+ellipse.outline-fuchsia { stroke-width: 2px; stroke: fuchsia; }
+ellipse.outline-sienna { stroke-width: 2px; stroke: sienna; }
+ellipse.outline-gold { stroke-width: 2px; stroke: gold; }
+ellipse.outline-orangered { stroke-width: 2px; stroke: orangered; }
+ellipse.outline-teal { stroke-width: 2px; stroke: teal; }
+ellipse.outline-maroon { stroke-width: 2px; stroke: maroon; }
+ellipse.outline-black { stroke-width: 2px; stroke: black; }
+
+/* Capture alternative for outline-black and ellipse.outline-black when in dark mode */
+body.darkmode .outline-black { outline: gray solid 2px; }
+body.darkmode ellipse.outline-black { outline: gray solid 2px; }
+
+</style>
+
+<script type="text/javascript">
+
+// Contains phase names which are expanded by default. Other columns are collapsed.
+let expandedDefault = [
+ "start",
+ "deadcode",
+ "opt",
+ "lower",
+ "late-deadcode",
+ "regalloc",
+ "genssa",
+];
+if (history.state === null) {
+ history.pushState({expandedDefault}, "", location.href);
+}
+
+// ordered list of all available highlight colors
+var highlights = [
+ "highlight-aquamarine",
+ "highlight-coral",
+ "highlight-lightpink",
+ "highlight-lightsteelblue",
+ "highlight-palegreen",
+ "highlight-skyblue",
+ "highlight-lightgray",
+ "highlight-yellow",
+ "highlight-lime",
+ "highlight-khaki",
+ "highlight-aqua",
+ "highlight-salmon"
+];
+
+// state: which value is highlighted this color?
+var highlighted = {};
+for (var i = 0; i < highlights.length; i++) {
+ highlighted[highlights[i]] = "";
+}
+
+// ordered list of all available outline colors
+var outlines = [
+ "outline-blue",
+ "outline-red",
+ "outline-blueviolet",
+ "outline-darkolivegreen",
+ "outline-fuchsia",
+ "outline-sienna",
+ "outline-gold",
+ "outline-orangered",
+ "outline-teal",
+ "outline-maroon",
+ "outline-black"
+];
+
+// state: which value is outlined this color?
+var outlined = {};
+for (var i = 0; i < outlines.length; i++) {
+ outlined[outlines[i]] = "";
+}
+
+window.onload = function() {
+ if (history.state !== null) {
+ expandedDefault = history.state.expandedDefault;
+ }
+ if (window.matchMedia && window.matchMedia("(prefers-color-scheme: dark)").matches) {
+ toggleDarkMode();
+ document.getElementById("dark-mode-button").checked = true;
+ }
+
+ var ssaElemClicked = function(elem, event, selections, selected) {
+ event.stopPropagation();
+
+ // find all values with the same name
+ var c = elem.classList.item(0);
+ var x = document.getElementsByClassName(c);
+
+ // if selected, remove selections from all of them
+ // otherwise, attempt to add
+
+ var remove = "";
+ for (var i = 0; i < selections.length; i++) {
+ var color = selections[i];
+ if (selected[color] == c) {
+ remove = color;
+ break;
+ }
+ }
+
+ if (remove != "") {
+ for (var i = 0; i < x.length; i++) {
+ x[i].classList.remove(remove);
+ }
+ selected[remove] = "";
+ return;
+ }
+
+ // we're adding a selection
+ // find first available color
+ var avail = "";
+ for (var i = 0; i < selections.length; i++) {
+ var color = selections[i];
+ if (selected[color] == "") {
+ avail = color;
+ break;
+ }
+ }
+ if (avail == "") {
+ alert("out of selection colors; go add more");
+ return;
+ }
+
+ // set that as the selection
+ for (var i = 0; i < x.length; i++) {
+ x[i].classList.add(avail);
+ }
+ selected[avail] = c;
+ };
+
+ var ssaValueClicked = function(event) {
+ ssaElemClicked(this, event, highlights, highlighted);
+ };
+
+ var ssaBlockClicked = function(event) {
+ ssaElemClicked(this, event, outlines, outlined);
+ };
+
+ var ssavalues = document.getElementsByClassName("ssa-value");
+ for (var i = 0; i < ssavalues.length; i++) {
+ ssavalues[i].addEventListener('click', ssaValueClicked);
+ }
+
+ var ssalongvalues = document.getElementsByClassName("ssa-long-value");
+ for (var i = 0; i < ssalongvalues.length; i++) {
+ // don't attach listeners to li nodes, just the spans they contain
+ if (ssalongvalues[i].nodeName == "SPAN") {
+ ssalongvalues[i].addEventListener('click', ssaValueClicked);
+ }
+ }
+
+ var ssablocks = document.getElementsByClassName("ssa-block");
+ for (var i = 0; i < ssablocks.length; i++) {
+ ssablocks[i].addEventListener('click', ssaBlockClicked);
+ }
+
+ var lines = document.getElementsByClassName("line-number");
+ for (var i = 0; i < lines.length; i++) {
+ lines[i].addEventListener('click', ssaValueClicked);
+ }
+
+
+ function toggler(phase) {
+ return function() {
+ toggle_cell(phase+'-col');
+ toggle_cell(phase+'-exp');
+ const i = expandedDefault.indexOf(phase);
+ if (i !== -1) {
+ expandedDefault.splice(i, 1);
+ } else {
+ expandedDefault.push(phase);
+ }
+ history.pushState({expandedDefault}, "", location.href);
+ };
+ }
+
+ function toggle_cell(id) {
+ var e = document.getElementById(id);
+ if (e.style.display == 'table-cell') {
+ e.style.display = 'none';
+ } else {
+ e.style.display = 'table-cell';
+ }
+ }
+
+ // Go through all columns and collapse needed phases.
+ const td = document.getElementsByTagName("td");
+ for (let i = 0; i < td.length; i++) {
+ const id = td[i].id;
+ const phase = id.substr(0, id.length-4);
+ let show = expandedDefault.indexOf(phase) !== -1
+
+ // If show == false, check to see if this is a combined column (multiple phases).
+ // If combined, check each of the phases to see if they are in our expandedDefaults.
+ // If any are found, that entire combined column gets shown.
+ if (!show) {
+ const combined = phase.split('--+--');
+ const len = combined.length;
+ if (len > 1) {
+ for (let i = 0; i < len; i++) {
+ const num = expandedDefault.indexOf(combined[i]);
+ if (num !== -1) {
+ expandedDefault.splice(num, 1);
+ if (expandedDefault.indexOf(phase) === -1) {
+ expandedDefault.push(phase);
+ show = true;
+ }
+ }
+ }
+ }
+ }
+ if (id.endsWith("-exp")) {
+ const h2Els = td[i].getElementsByTagName("h2");
+ const len = h2Els.length;
+ if (len > 0) {
+ for (let i = 0; i < len; i++) {
+ h2Els[i].addEventListener('click', toggler(phase));
+ }
+ }
+ } else {
+ td[i].addEventListener('click', toggler(phase));
+ }
+ if (id.endsWith("-col") && show || id.endsWith("-exp") && !show) {
+ td[i].style.display = 'none';
+ continue;
+ }
+ td[i].style.display = 'table-cell';
+ }
+
+ // find all svg block nodes, add their block classes
+ var nodes = document.querySelectorAll('*[id^="graph_node_"]');
+ for (var i = 0; i < nodes.length; i++) {
+ var node = nodes[i];
+ var name = node.id.toString();
+ var block = name.substring(name.lastIndexOf("_")+1);
+ node.classList.remove("node");
+ node.classList.add(block);
+ node.addEventListener('click', ssaBlockClicked);
+ var ellipse = node.getElementsByTagName('ellipse')[0];
+ ellipse.classList.add(block);
+ ellipse.addEventListener('click', ssaBlockClicked);
+ }
+
+ // make big graphs smaller
+ var targetScale = 0.5;
+ var nodes = document.querySelectorAll('*[id^="svg_graph_"]');
+ // TODO: Implement smarter auto-zoom using the viewBox attribute
+ // and in case of big graphs set the width and height of the svg graph to
+ // maximum allowed.
+ for (var i = 0; i < nodes.length; i++) {
+ var node = nodes[i];
+ var name = node.id.toString();
+ var phase = name.substring(name.lastIndexOf("_")+1);
+ var gNode = document.getElementById("g_graph_"+phase);
+ var scale = gNode.transform.baseVal.getItem(0).matrix.a;
+ if (scale > targetScale) {
+ node.width.baseVal.value *= targetScale / scale;
+ node.height.baseVal.value *= targetScale / scale;
+ }
+ }
+};
+
+function toggle_visibility(id) {
+ var e = document.getElementById(id);
+ if (e.style.display == 'block') {
+ e.style.display = 'none';
+ } else {
+ e.style.display = 'block';
+ }
+}
+
+function hideBlock(el) {
+ var es = el.parentNode.parentNode.getElementsByClassName("ssa-value-list");
+ if (es.length===0)
+ return;
+ var e = es[0];
+ if (e.style.display === 'block' || e.style.display === '') {
+ e.style.display = 'none';
+ el.innerHTML = '+';
+ } else {
+ e.style.display = 'block';
+ el.innerHTML = '-';
+ }
+}
+
+// TODO: scale the graph with the viewBox attribute.
+function graphReduce(id) {
+ var node = document.getElementById(id);
+ if (node) {
+ node.width.baseVal.value *= 0.9;
+ node.height.baseVal.value *= 0.9;
+ }
+ return false;
+}
+
+function graphEnlarge(id) {
+ var node = document.getElementById(id);
+ if (node) {
+ node.width.baseVal.value *= 1.1;
+ node.height.baseVal.value *= 1.1;
+ }
+ return false;
+}
+
+function makeDraggable(event) {
+ var svg = event.target;
+ if (window.PointerEvent) {
+ svg.addEventListener('pointerdown', startDrag);
+ svg.addEventListener('pointermove', drag);
+ svg.addEventListener('pointerup', endDrag);
+ svg.addEventListener('pointerleave', endDrag);
+ } else {
+ svg.addEventListener('mousedown', startDrag);
+ svg.addEventListener('mousemove', drag);
+ svg.addEventListener('mouseup', endDrag);
+ svg.addEventListener('mouseleave', endDrag);
+ }
+
+ var point = svg.createSVGPoint();
+ var isPointerDown = false;
+ var pointerOrigin;
+ var viewBox = svg.viewBox.baseVal;
+
+ function getPointFromEvent (event) {
+ point.x = event.clientX;
+ point.y = event.clientY;
+
+ // We get the current transformation matrix of the SVG and we inverse it
+ var invertedSVGMatrix = svg.getScreenCTM().inverse();
+ return point.matrixTransform(invertedSVGMatrix);
+ }
+
+ function startDrag(event) {
+ isPointerDown = true;
+ pointerOrigin = getPointFromEvent(event);
+ }
+
+ function drag(event) {
+ if (!isPointerDown) {
+ return;
+ }
+ event.preventDefault();
+
+ var pointerPosition = getPointFromEvent(event);
+ viewBox.x -= (pointerPosition.x - pointerOrigin.x);
+ viewBox.y -= (pointerPosition.y - pointerOrigin.y);
+ }
+
+ function endDrag(event) {
+ isPointerDown = false;
+ }
+}
+
+function toggleDarkMode() {
+ document.body.classList.toggle('darkmode');
+
+ // Collect all of the "collapsed" elements and apply dark mode on each collapsed column
+ const collapsedEls = document.getElementsByClassName('collapsed');
+ const len = collapsedEls.length;
+
+ for (let i = 0; i < len; i++) {
+ collapsedEls[i].classList.toggle('darkmode');
+ }
+
+ // Collect and spread the appropriate elements from all of the svgs on the page into one array
+ const svgParts = [
+ ...document.querySelectorAll('path'),
+ ...document.querySelectorAll('ellipse'),
+ ...document.querySelectorAll('polygon'),
+ ];
+
+ // Iterate over the svgParts specifically looking for white and black fill/stroke to be toggled.
+ // The verbose conditional is intentional here so that we do not mutate any svg path, ellipse, or polygon that is of any color other than white or black.
+ svgParts.forEach(el => {
+ if (el.attributes.stroke.value === 'white') {
+ el.attributes.stroke.value = 'black';
+ } else if (el.attributes.stroke.value === 'black') {
+ el.attributes.stroke.value = 'white';
+ }
+ if (el.attributes.fill.value === 'white') {
+ el.attributes.fill.value = 'black';
+ } else if (el.attributes.fill.value === 'black') {
+ el.attributes.fill.value = 'white';
+ }
+ });
+}
+
+</script>
+
+</head>`)
+ w.WriteString("<body>")
+ w.WriteString("<h1>")
+ w.WriteString(html.EscapeString(w.Func.NameABI()))
+ w.WriteString("</h1>")
+ w.WriteString(`
+<a href="#" onclick="toggle_visibility('help');return false;" id="helplink">help</a>
+<div id="help">
+
+<p>
+Click on a value or block to toggle highlighting of that value/block
+and its uses. (Values and blocks are highlighted by ID, and IDs of
+dead items may be reused, so not all highlights necessarily correspond
+to the clicked item.)
+</p>
+
+<p>
+Faded out values and blocks are dead code that has not been eliminated.
+</p>
+
+<p>
+Values printed in italics have a dependency cycle.
+</p>
+
+<p>
+<b>CFG</b>: Dashed edge is for unlikely branches. Blue color is for backward edges.
+Edge with a dot means that this edge follows the order in which blocks were laidout.
+</p>
+
+</div>
+<label for="dark-mode-button" style="margin-left: 15px; cursor: pointer;">darkmode</label>
+<input type="checkbox" onclick="toggleDarkMode();" id="dark-mode-button" style="cursor: pointer" />
+`)
+ w.WriteString("<table>")
+ w.WriteString("<tr>")
+}
+
+func (w *HTMLWriter) Close() {
+ if w == nil {
+ return
+ }
+ io.WriteString(w.w, "</tr>")
+ io.WriteString(w.w, "</table>")
+ io.WriteString(w.w, "</body>")
+ io.WriteString(w.w, "</html>")
+ w.w.Close()
+ fmt.Printf("dumped SSA for %s to %v\n", w.Func.NameABI(), w.path)
+}
+
+// WritePhase writes f in a column headed by title.
+// phase is used for collapsing columns and should be unique across the table.
+func (w *HTMLWriter) WritePhase(phase, title string) {
+ if w == nil {
+ return // avoid generating HTML just to discard it
+ }
+ hash := hashFunc(w.Func)
+ w.pendingPhases = append(w.pendingPhases, phase)
+ w.pendingTitles = append(w.pendingTitles, title)
+ if !bytes.Equal(hash, w.prevHash) {
+ w.flushPhases()
+ }
+ w.prevHash = hash
+}
+
+// flushPhases collects any pending phases and titles, writes them to the html, and resets the pending slices.
+func (w *HTMLWriter) flushPhases() {
+ phaseLen := len(w.pendingPhases)
+ if phaseLen == 0 {
+ return
+ }
+ phases := strings.Join(w.pendingPhases, " + ")
+ w.WriteMultiTitleColumn(
+ phases,
+ w.pendingTitles,
+ fmt.Sprintf("hash-%x", w.prevHash),
+ w.Func.HTML(w.pendingPhases[phaseLen-1], w.dot),
+ )
+ w.pendingPhases = w.pendingPhases[:0]
+ w.pendingTitles = w.pendingTitles[:0]
+}
+
+// FuncLines contains source code for a function to be displayed
+// in sources column.
+type FuncLines struct {
+ Filename string
+ StartLineno uint
+ Lines []string
+}
+
+// ByTopo sorts topologically: target function is on top,
+// followed by inlined functions sorted by filename and line numbers.
+type ByTopo []*FuncLines
+
+func (x ByTopo) Len() int { return len(x) }
+func (x ByTopo) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x ByTopo) Less(i, j int) bool {
+ a := x[i]
+ b := x[j]
+ if a.Filename == b.Filename {
+ return a.StartLineno < b.StartLineno
+ }
+ return a.Filename < b.Filename
+}
+
+// WriteSources writes lines as source code in a column headed by title.
+// phase is used for collapsing columns and should be unique across the table.
+func (w *HTMLWriter) WriteSources(phase string, all []*FuncLines) {
+ if w == nil {
+ return // avoid generating HTML just to discard it
+ }
+ var buf strings.Builder
+ fmt.Fprint(&buf, "<div class=\"lines\" style=\"width: 8%\">")
+ filename := ""
+ for _, fl := range all {
+ fmt.Fprint(&buf, "<div>&nbsp;</div>")
+ if filename != fl.Filename {
+ fmt.Fprint(&buf, "<div>&nbsp;</div>")
+ filename = fl.Filename
+ }
+ for i := range fl.Lines {
+ ln := int(fl.StartLineno) + i
+ fmt.Fprintf(&buf, "<div class=\"l%v line-number\">%v</div>", ln, ln)
+ }
+ }
+ fmt.Fprint(&buf, "</div><div style=\"width: 92%\"><pre>")
+ filename = ""
+ for _, fl := range all {
+ fmt.Fprint(&buf, "<div>&nbsp;</div>")
+ if filename != fl.Filename {
+ fmt.Fprintf(&buf, "<div><strong>%v</strong></div>", fl.Filename)
+ filename = fl.Filename
+ }
+ for i, line := range fl.Lines {
+ ln := int(fl.StartLineno) + i
+ var escaped string
+ if strings.TrimSpace(line) == "" {
+ escaped = "&nbsp;"
+ } else {
+ escaped = html.EscapeString(line)
+ }
+ fmt.Fprintf(&buf, "<div class=\"l%v line-number\">%v</div>", ln, escaped)
+ }
+ }
+ fmt.Fprint(&buf, "</pre></div>")
+ w.WriteColumn(phase, phase, "allow-x-scroll", buf.String())
+}
+
+func (w *HTMLWriter) WriteAST(phase string, buf *bytes.Buffer) {
+ if w == nil {
+ return // avoid generating HTML just to discard it
+ }
+ lines := strings.Split(buf.String(), "\n")
+ var out strings.Builder
+
+ fmt.Fprint(&out, "<div>")
+ for _, l := range lines {
+ l = strings.TrimSpace(l)
+ var escaped string
+ var lineNo string
+ if l == "" {
+ escaped = "&nbsp;"
+ } else {
+ if strings.HasPrefix(l, "buildssa") {
+ escaped = fmt.Sprintf("<b>%v</b>", l)
+ } else {
+ // Parse the line number from the format file:line:col.
+ // See the implementation in ir/fmt.go:dumpNodeHeader.
+ sl := strings.Split(l, ":")
+ if len(sl) >= 3 {
+ if _, err := strconv.Atoi(sl[len(sl)-2]); err == nil {
+ lineNo = sl[len(sl)-2]
+ }
+ }
+ escaped = html.EscapeString(l)
+ }
+ }
+ if lineNo != "" {
+ fmt.Fprintf(&out, "<div class=\"l%v line-number ast\">%v</div>", lineNo, escaped)
+ } else {
+ fmt.Fprintf(&out, "<div class=\"ast\">%v</div>", escaped)
+ }
+ }
+ fmt.Fprint(&out, "</div>")
+ w.WriteColumn(phase, phase, "allow-x-scroll", out.String())
+}
+
+// WriteColumn writes raw HTML in a column headed by title.
+// It is intended for pre- and post-compilation log output.
+func (w *HTMLWriter) WriteColumn(phase, title, class, html string) {
+ w.WriteMultiTitleColumn(phase, []string{title}, class, html)
+}
+
+func (w *HTMLWriter) WriteMultiTitleColumn(phase string, titles []string, class, html string) {
+ if w == nil {
+ return
+ }
+ id := strings.Replace(phase, " ", "-", -1)
+ // collapsed column
+ w.Printf("<td id=\"%v-col\" class=\"collapsed\"><div>%v</div></td>", id, phase)
+
+ if class == "" {
+ w.Printf("<td id=\"%v-exp\">", id)
+ } else {
+ w.Printf("<td id=\"%v-exp\" class=\"%v\">", id, class)
+ }
+ for _, title := range titles {
+ w.WriteString("<h2>" + title + "</h2>")
+ }
+ w.WriteString(html)
+ w.WriteString("</td>\n")
+}
+
+func (w *HTMLWriter) Printf(msg string, v ...interface{}) {
+ if _, err := fmt.Fprintf(w.w, msg, v...); err != nil {
+ w.Fatalf("%v", err)
+ }
+}
+
+func (w *HTMLWriter) WriteString(s string) {
+ if _, err := io.WriteString(w.w, s); err != nil {
+ w.Fatalf("%v", err)
+ }
+}
+
+func (v *Value) HTML() string {
+ // TODO: Using the value ID as the class ignores the fact
+ // that value IDs get recycled and that some values
+ // are transmuted into other values.
+ s := v.String()
+ return fmt.Sprintf("<span class=\"%s ssa-value\">%s</span>", s, s)
+}
+
+func (v *Value) LongHTML() string {
+ // TODO: Any intra-value formatting?
+ // I'm wary of adding too much visual noise,
+ // but a little bit might be valuable.
+ // We already have visual noise in the form of punctuation
+ // maybe we could replace some of that with formatting.
+ s := fmt.Sprintf("<span class=\"%s ssa-long-value\">", v.String())
+
+ linenumber := "<span class=\"no-line-number\">(?)</span>"
+ if v.Pos.IsKnown() {
+ linenumber = fmt.Sprintf("<span class=\"l%v line-number\">(%s)</span>", v.Pos.LineNumber(), v.Pos.LineNumberHTML())
+ }
+
+ s += fmt.Sprintf("%s %s = %s", v.HTML(), linenumber, v.Op.String())
+
+ s += " &lt;" + html.EscapeString(v.Type.String()) + "&gt;"
+ s += html.EscapeString(v.auxString())
+ for _, a := range v.Args {
+ s += fmt.Sprintf(" %s", a.HTML())
+ }
+ r := v.Block.Func.RegAlloc
+ if int(v.ID) < len(r) && r[v.ID] != nil {
+ s += " : " + html.EscapeString(r[v.ID].String())
+ }
+ if reg := v.Block.Func.tempRegs[v.ID]; reg != nil {
+ s += " tmp=" + reg.String()
+ }
+ var names []string
+ for name, values := range v.Block.Func.NamedValues {
+ for _, value := range values {
+ if value == v {
+ names = append(names, name.String())
+ break // drop duplicates.
+ }
+ }
+ }
+ if len(names) != 0 {
+ s += " (" + strings.Join(names, ", ") + ")"
+ }
+
+ s += "</span>"
+ return s
+}
+
+func (b *Block) HTML() string {
+ // TODO: Using the value ID as the class ignores the fact
+ // that value IDs get recycled and that some values
+ // are transmuted into other values.
+ s := html.EscapeString(b.String())
+ return fmt.Sprintf("<span class=\"%s ssa-block\">%s</span>", s, s)
+}
+
+func (b *Block) LongHTML() string {
+ // TODO: improve this for HTML?
+ s := fmt.Sprintf("<span class=\"%s ssa-block\">%s</span>", html.EscapeString(b.String()), html.EscapeString(b.Kind.String()))
+ if b.Aux != nil {
+ s += html.EscapeString(fmt.Sprintf(" {%v}", b.Aux))
+ }
+ if t := b.AuxIntString(); t != "" {
+ s += html.EscapeString(fmt.Sprintf(" [%v]", t))
+ }
+ for _, c := range b.ControlValues() {
+ s += fmt.Sprintf(" %s", c.HTML())
+ }
+ if len(b.Succs) > 0 {
+ s += " &#8594;" // right arrow
+ for _, e := range b.Succs {
+ c := e.b
+ s += " " + c.HTML()
+ }
+ }
+ switch b.Likely {
+ case BranchUnlikely:
+ s += " (unlikely)"
+ case BranchLikely:
+ s += " (likely)"
+ }
+ if b.Pos.IsKnown() {
+ // TODO does not begin to deal with the full complexity of line numbers.
+ // Maybe we want a string/slice instead, of outer-inner when inlining.
+ s += fmt.Sprintf(" <span class=\"l%v line-number\">(%s)</span>", b.Pos.LineNumber(), b.Pos.LineNumberHTML())
+ }
+ return s
+}
+
+func (f *Func) HTML(phase string, dot *dotWriter) string {
+ buf := new(strings.Builder)
+ if dot != nil {
+ dot.writeFuncSVG(buf, phase, f)
+ }
+ fmt.Fprint(buf, "<code>")
+ p := htmlFuncPrinter{w: buf}
+ fprintFunc(p, f)
+
+ // fprintFunc(&buf, f) // TODO: HTML, not text, <br> for line breaks, etc.
+ fmt.Fprint(buf, "</code>")
+ return buf.String()
+}
+
+func (d *dotWriter) writeFuncSVG(w io.Writer, phase string, f *Func) {
+ if d.broken {
+ return
+ }
+ if _, ok := d.phases[phase]; !ok {
+ return
+ }
+ cmd := exec.Command(d.path, "-Tsvg")
+ pipe, err := cmd.StdinPipe()
+ if err != nil {
+ d.broken = true
+ fmt.Println(err)
+ return
+ }
+ buf := new(bytes.Buffer)
+ cmd.Stdout = buf
+ bufErr := new(strings.Builder)
+ cmd.Stderr = bufErr
+ err = cmd.Start()
+ if err != nil {
+ d.broken = true
+ fmt.Println(err)
+ return
+ }
+ fmt.Fprint(pipe, `digraph "" { margin=0; ranksep=.2; `)
+ id := strings.Replace(phase, " ", "-", -1)
+ fmt.Fprintf(pipe, `id="g_graph_%s";`, id)
+ fmt.Fprintf(pipe, `node [style=filled,fillcolor=white,fontsize=16,fontname="Menlo,Times,serif",margin="0.01,0.03"];`)
+ fmt.Fprintf(pipe, `edge [fontsize=16,fontname="Menlo,Times,serif"];`)
+ for i, b := range f.Blocks {
+ if b.Kind == BlockInvalid {
+ continue
+ }
+ layout := ""
+ if f.laidout {
+ layout = fmt.Sprintf(" #%d", i)
+ }
+ fmt.Fprintf(pipe, `%v [label="%v%s\n%v",id="graph_node_%v_%v",tooltip="%v"];`, b, b, layout, b.Kind.String(), id, b, b.LongString())
+ }
+ indexOf := make([]int, f.NumBlocks())
+ for i, b := range f.Blocks {
+ indexOf[b.ID] = i
+ }
+ layoutDrawn := make([]bool, f.NumBlocks())
+
+ ponums := make([]int32, f.NumBlocks())
+ _ = postorderWithNumbering(f, ponums)
+ isBackEdge := func(from, to ID) bool {
+ return ponums[from] <= ponums[to]
+ }
+
+ for _, b := range f.Blocks {
+ for i, s := range b.Succs {
+ style := "solid"
+ color := "black"
+ arrow := "vee"
+ if b.unlikelyIndex() == i {
+ style = "dashed"
+ }
+ if f.laidout && indexOf[s.b.ID] == indexOf[b.ID]+1 {
+ // Red color means ordered edge. It overrides other colors.
+ arrow = "dotvee"
+ layoutDrawn[s.b.ID] = true
+ } else if isBackEdge(b.ID, s.b.ID) {
+ color = "#2893ff"
+ }
+ fmt.Fprintf(pipe, `%v -> %v [label=" %d ",style="%s",color="%s",arrowhead="%s"];`, b, s.b, i, style, color, arrow)
+ }
+ }
+ if f.laidout {
+ fmt.Fprintln(pipe, `edge[constraint=false,color=gray,style=solid,arrowhead=dot];`)
+ colors := [...]string{"#eea24f", "#f38385", "#f4d164", "#ca89fc", "gray"}
+ ci := 0
+ for i := 1; i < len(f.Blocks); i++ {
+ if layoutDrawn[f.Blocks[i].ID] {
+ continue
+ }
+ fmt.Fprintf(pipe, `%s -> %s [color="%s"];`, f.Blocks[i-1], f.Blocks[i], colors[ci])
+ ci = (ci + 1) % len(colors)
+ }
+ }
+ fmt.Fprint(pipe, "}")
+ pipe.Close()
+ err = cmd.Wait()
+ if err != nil {
+ d.broken = true
+ fmt.Printf("dot: %v\n%v\n", err, bufErr.String())
+ return
+ }
+
+ svgID := "svg_graph_" + id
+ fmt.Fprintf(w, `<div class="zoom"><button onclick="return graphReduce('%s');">-</button> <button onclick="return graphEnlarge('%s');">+</button></div>`, svgID, svgID)
+ // For now, an awful hack: edit the html as it passes through
+ // our fingers, finding '<svg ' and injecting needed attributes after it.
+ err = d.copyUntil(w, buf, `<svg `)
+ if err != nil {
+ fmt.Printf("injecting attributes: %v\n", err)
+ return
+ }
+ fmt.Fprintf(w, ` id="%s" onload="makeDraggable(evt)" `, svgID)
+ io.Copy(w, buf)
+}
+
+func (b *Block) unlikelyIndex() int {
+ switch b.Likely {
+ case BranchLikely:
+ return 1
+ case BranchUnlikely:
+ return 0
+ }
+ return -1
+}
+
+func (d *dotWriter) copyUntil(w io.Writer, buf *bytes.Buffer, sep string) error {
+ i := bytes.Index(buf.Bytes(), []byte(sep))
+ if i == -1 {
+ return fmt.Errorf("couldn't find dot sep %q", sep)
+ }
+ _, err := io.CopyN(w, buf, int64(i+len(sep)))
+ return err
+}
+
+type htmlFuncPrinter struct {
+ w io.Writer
+}
+
+func (p htmlFuncPrinter) header(f *Func) {}
+
+func (p htmlFuncPrinter) startBlock(b *Block, reachable bool) {
+ var dead string
+ if !reachable {
+ dead = "dead-block"
+ }
+ fmt.Fprintf(p.w, "<ul class=\"%s ssa-print-func %s\">", b, dead)
+ fmt.Fprintf(p.w, "<li class=\"ssa-start-block\">%s:", b.HTML())
+ if len(b.Preds) > 0 {
+ io.WriteString(p.w, " &#8592;") // left arrow
+ for _, e := range b.Preds {
+ pred := e.b
+ fmt.Fprintf(p.w, " %s", pred.HTML())
+ }
+ }
+ if len(b.Values) > 0 {
+ io.WriteString(p.w, `<button onclick="hideBlock(this)">-</button>`)
+ }
+ io.WriteString(p.w, "</li>")
+ if len(b.Values) > 0 { // start list of values
+ io.WriteString(p.w, "<li class=\"ssa-value-list\">")
+ io.WriteString(p.w, "<ul>")
+ }
+}
+
+func (p htmlFuncPrinter) endBlock(b *Block, reachable bool) {
+ if len(b.Values) > 0 { // end list of values
+ io.WriteString(p.w, "</ul>")
+ io.WriteString(p.w, "</li>")
+ }
+ io.WriteString(p.w, "<li class=\"ssa-end-block\">")
+ fmt.Fprint(p.w, b.LongHTML())
+ io.WriteString(p.w, "</li>")
+ io.WriteString(p.w, "</ul>")
+}
+
+func (p htmlFuncPrinter) value(v *Value, live bool) {
+ var dead string
+ if !live {
+ dead = "dead-value"
+ }
+ fmt.Fprintf(p.w, "<li class=\"ssa-long-value %s\">", dead)
+ fmt.Fprint(p.w, v.LongHTML())
+ io.WriteString(p.w, "</li>")
+}
+
+func (p htmlFuncPrinter) startDepCycle() {
+ fmt.Fprintln(p.w, "<span class=\"depcycle\">")
+}
+
+func (p htmlFuncPrinter) endDepCycle() {
+ fmt.Fprintln(p.w, "</span>")
+}
+
+func (p htmlFuncPrinter) named(n LocalSlot, vals []*Value) {
+ fmt.Fprintf(p.w, "<li>name %s: ", n)
+ for _, val := range vals {
+ fmt.Fprintf(p.w, "%s ", val.HTML())
+ }
+ fmt.Fprintf(p.w, "</li>")
+}
+
+type dotWriter struct {
+ path string
+ broken bool
+ phases map[string]bool // keys specify phases with CFGs
+}
+
+// newDotWriter returns non-nil value when mask is valid.
+// dotWriter will generate SVGs only for the phases specified in the mask.
+// mask can contain following patterns and combinations of them:
+// * - all of them;
+// x-y - x through y, inclusive;
+// x,y - x and y, but not the passes between.
+func newDotWriter(mask string) *dotWriter {
+ if mask == "" {
+ return nil
+ }
+ // User can specify phase name with _ instead of spaces.
+ mask = strings.Replace(mask, "_", " ", -1)
+ ph := make(map[string]bool)
+ ranges := strings.Split(mask, ",")
+ for _, r := range ranges {
+ spl := strings.Split(r, "-")
+ if len(spl) > 2 {
+ fmt.Printf("range is not valid: %v\n", mask)
+ return nil
+ }
+ var first, last int
+ if mask == "*" {
+ first = 0
+ last = len(passes) - 1
+ } else {
+ first = passIdxByName(spl[0])
+ last = passIdxByName(spl[len(spl)-1])
+ }
+ if first < 0 || last < 0 || first > last {
+ fmt.Printf("range is not valid: %v\n", r)
+ return nil
+ }
+ for p := first; p <= last; p++ {
+ ph[passes[p].name] = true
+ }
+ }
+
+ path, err := exec.LookPath("dot")
+ if err != nil {
+ fmt.Println(err)
+ return nil
+ }
+ return &dotWriter{path: path, phases: ph}
+}
+
+func passIdxByName(name string) int {
+ for i, p := range passes {
+ if p.name == name {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/src/cmd/compile/internal/ssa/id.go b/src/cmd/compile/internal/ssa/id.go
new file mode 100644
index 0000000..725279e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/id.go
@@ -0,0 +1,28 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+type ID int32
+
+// idAlloc provides an allocator for unique integers.
+type idAlloc struct {
+ last ID
+}
+
+// get allocates an ID and returns it. IDs are always > 0.
+func (a *idAlloc) get() ID {
+ x := a.last
+ x++
+ if x == 1<<31-1 {
+ panic("too many ids for this function")
+ }
+ a.last = x
+ return x
+}
+
+// num returns the maximum ID ever returned + 1.
+func (a *idAlloc) num() int {
+ return int(a.last + 1)
+}
diff --git a/src/cmd/compile/internal/ssa/layout.go b/src/cmd/compile/internal/ssa/layout.go
new file mode 100644
index 0000000..e4a8c6f
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/layout.go
@@ -0,0 +1,185 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// layout orders basic blocks in f with the goal of minimizing control flow instructions.
+// After this phase returns, the order of f.Blocks matters and is the order
+// in which those blocks will appear in the assembly output.
+func layout(f *Func) {
+ f.Blocks = layoutOrder(f)
+}
+
+// Register allocation may use a different order which has constraints
+// imposed by the linear-scan algorithm.
+func layoutRegallocOrder(f *Func) []*Block {
+ // remnant of an experiment; perhaps there will be another.
+ return layoutOrder(f)
+}
+
+func layoutOrder(f *Func) []*Block {
+ order := make([]*Block, 0, f.NumBlocks())
+ scheduled := f.Cache.allocBoolSlice(f.NumBlocks())
+ defer f.Cache.freeBoolSlice(scheduled)
+ idToBlock := f.Cache.allocBlockSlice(f.NumBlocks())
+ defer f.Cache.freeBlockSlice(idToBlock)
+ indegree := f.Cache.allocIntSlice(f.NumBlocks())
+ defer f.Cache.freeIntSlice(indegree)
+ posdegree := f.newSparseSet(f.NumBlocks()) // blocks with positive remaining degree
+ defer f.retSparseSet(posdegree)
+ // blocks with zero remaining degree. Use slice to simulate a LIFO queue to implement
+ // the depth-first topology sorting algorithm.
+ var zerodegree []ID
+ // LIFO queue. Track the successor blocks of the scheduled block so that when we
+ // encounter loops, we choose to schedule the successor block of the most recently
+ // scheduled block.
+ var succs []ID
+ exit := f.newSparseSet(f.NumBlocks()) // exit blocks
+ defer f.retSparseSet(exit)
+
+ // Populate idToBlock and find exit blocks.
+ for _, b := range f.Blocks {
+ idToBlock[b.ID] = b
+ if b.Kind == BlockExit {
+ exit.add(b.ID)
+ }
+ }
+
+ // Expand exit to include blocks post-dominated by exit blocks.
+ for {
+ changed := false
+ for _, id := range exit.contents() {
+ b := idToBlock[id]
+ NextPred:
+ for _, pe := range b.Preds {
+ p := pe.b
+ if exit.contains(p.ID) {
+ continue
+ }
+ for _, s := range p.Succs {
+ if !exit.contains(s.b.ID) {
+ continue NextPred
+ }
+ }
+ // All Succs are in exit; add p.
+ exit.add(p.ID)
+ changed = true
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+
+ // Initialize indegree of each block
+ for _, b := range f.Blocks {
+ if exit.contains(b.ID) {
+ // exit blocks are always scheduled last
+ continue
+ }
+ indegree[b.ID] = len(b.Preds)
+ if len(b.Preds) == 0 {
+ // Push an element to the tail of the queue.
+ zerodegree = append(zerodegree, b.ID)
+ } else {
+ posdegree.add(b.ID)
+ }
+ }
+
+ bid := f.Entry.ID
+blockloop:
+ for {
+ // add block to schedule
+ b := idToBlock[bid]
+ order = append(order, b)
+ scheduled[bid] = true
+ if len(order) == len(f.Blocks) {
+ break
+ }
+
+ // Here, the order of traversing the b.Succs affects the direction in which the topological
+ // sort advances in depth. Take the following cfg as an example, regardless of other factors.
+ // b1
+ // 0/ \1
+ // b2 b3
+ // Traverse b.Succs in order, the right child node b3 will be scheduled immediately after
+ // b1, traverse b.Succs in reverse order, the left child node b2 will be scheduled
+ // immediately after b1. The test results show that reverse traversal performs a little
+ // better.
+ // Note: You need to consider both layout and register allocation when testing performance.
+ for i := len(b.Succs) - 1; i >= 0; i-- {
+ c := b.Succs[i].b
+ indegree[c.ID]--
+ if indegree[c.ID] == 0 {
+ posdegree.remove(c.ID)
+ zerodegree = append(zerodegree, c.ID)
+ } else {
+ succs = append(succs, c.ID)
+ }
+ }
+
+ // Pick the next block to schedule
+ // Pick among the successor blocks that have not been scheduled yet.
+
+ // Use likely direction if we have it.
+ var likely *Block
+ switch b.Likely {
+ case BranchLikely:
+ likely = b.Succs[0].b
+ case BranchUnlikely:
+ likely = b.Succs[1].b
+ }
+ if likely != nil && !scheduled[likely.ID] {
+ bid = likely.ID
+ continue
+ }
+
+ // Use degree for now.
+ bid = 0
+ // TODO: improve this part
+ // No successor of the previously scheduled block works.
+ // Pick a zero-degree block if we can.
+ for len(zerodegree) > 0 {
+ // Pop an element from the tail of the queue.
+ cid := zerodegree[len(zerodegree)-1]
+ zerodegree = zerodegree[:len(zerodegree)-1]
+ if !scheduled[cid] {
+ bid = cid
+ continue blockloop
+ }
+ }
+
+ // Still nothing, pick the unscheduled successor block encountered most recently.
+ for len(succs) > 0 {
+ // Pop an element from the tail of the queue.
+ cid := succs[len(succs)-1]
+ succs = succs[:len(succs)-1]
+ if !scheduled[cid] {
+ bid = cid
+ continue blockloop
+ }
+ }
+
+ // Still nothing, pick any non-exit block.
+ for posdegree.size() > 0 {
+ cid := posdegree.pop()
+ if !scheduled[cid] {
+ bid = cid
+ continue blockloop
+ }
+ }
+ // Pick any exit block.
+ // TODO: Order these to minimize jump distances?
+ for {
+ cid := exit.pop()
+ if !scheduled[cid] {
+ bid = cid
+ continue blockloop
+ }
+ }
+ }
+ f.laidout = true
+ return order
+ //f.Blocks = order
+}
diff --git a/src/cmd/compile/internal/ssa/lca.go b/src/cmd/compile/internal/ssa/lca.go
new file mode 100644
index 0000000..6e7ad96
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/lca.go
@@ -0,0 +1,127 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "math/bits"
+)
+
+// Code to compute lowest common ancestors in the dominator tree.
+// https://en.wikipedia.org/wiki/Lowest_common_ancestor
+// https://en.wikipedia.org/wiki/Range_minimum_query#Solution_using_constant_time_and_linearithmic_space
+
+// lcaRange is a data structure that can compute lowest common ancestor queries
+// in O(n lg n) precomputed space and O(1) time per query.
+type lcaRange struct {
+ // Additional information about each block (indexed by block ID).
+ blocks []lcaRangeBlock
+
+ // Data structure for range minimum queries.
+ // rangeMin[k][i] contains the ID of the minimum depth block
+ // in the Euler tour from positions i to i+1<<k-1, inclusive.
+ rangeMin [][]ID
+}
+
+type lcaRangeBlock struct {
+ b *Block
+ parent ID // parent in dominator tree. 0 = no parent (entry or unreachable)
+ firstChild ID // first child in dominator tree
+ sibling ID // next child of parent
+ pos int32 // an index in the Euler tour where this block appears (any one of its occurrences)
+ depth int32 // depth in dominator tree (root=0, its children=1, etc.)
+}
+
+func makeLCArange(f *Func) *lcaRange {
+ dom := f.Idom()
+
+ // Build tree
+ blocks := make([]lcaRangeBlock, f.NumBlocks())
+ for _, b := range f.Blocks {
+ blocks[b.ID].b = b
+ if dom[b.ID] == nil {
+ continue // entry or unreachable
+ }
+ parent := dom[b.ID].ID
+ blocks[b.ID].parent = parent
+ blocks[b.ID].sibling = blocks[parent].firstChild
+ blocks[parent].firstChild = b.ID
+ }
+
+ // Compute euler tour ordering.
+ // Each reachable block will appear #children+1 times in the tour.
+ tour := make([]ID, 0, f.NumBlocks()*2-1)
+ type queueEntry struct {
+ bid ID // block to work on
+ cid ID // child we're already working on (0 = haven't started yet)
+ }
+ q := []queueEntry{{f.Entry.ID, 0}}
+ for len(q) > 0 {
+ n := len(q) - 1
+ bid := q[n].bid
+ cid := q[n].cid
+ q = q[:n]
+
+ // Add block to tour.
+ blocks[bid].pos = int32(len(tour))
+ tour = append(tour, bid)
+
+ // Proceed down next child edge (if any).
+ if cid == 0 {
+ // This is our first visit to b. Set its depth.
+ blocks[bid].depth = blocks[blocks[bid].parent].depth + 1
+ // Then explore its first child.
+ cid = blocks[bid].firstChild
+ } else {
+ // We've seen b before. Explore the next child.
+ cid = blocks[cid].sibling
+ }
+ if cid != 0 {
+ q = append(q, queueEntry{bid, cid}, queueEntry{cid, 0})
+ }
+ }
+
+ // Compute fast range-minimum query data structure
+ rangeMin := make([][]ID, 0, bits.Len64(uint64(len(tour))))
+ rangeMin = append(rangeMin, tour) // 1-size windows are just the tour itself.
+ for logS, s := 1, 2; s < len(tour); logS, s = logS+1, s*2 {
+ r := make([]ID, len(tour)-s+1)
+ for i := 0; i < len(tour)-s+1; i++ {
+ bid := rangeMin[logS-1][i]
+ bid2 := rangeMin[logS-1][i+s/2]
+ if blocks[bid2].depth < blocks[bid].depth {
+ bid = bid2
+ }
+ r[i] = bid
+ }
+ rangeMin = append(rangeMin, r)
+ }
+
+ return &lcaRange{blocks: blocks, rangeMin: rangeMin}
+}
+
+// find returns the lowest common ancestor of a and b.
+func (lca *lcaRange) find(a, b *Block) *Block {
+ if a == b {
+ return a
+ }
+ // Find the positions of a and b in the Euler tour.
+ p1 := lca.blocks[a.ID].pos
+ p2 := lca.blocks[b.ID].pos
+ if p1 > p2 {
+ p1, p2 = p2, p1
+ }
+
+ // The lowest common ancestor is the minimum depth block
+ // on the tour from p1 to p2. We've precomputed minimum
+ // depth blocks for powers-of-two subsequences of the tour.
+ // Combine the right two precomputed values to get the answer.
+ logS := uint(log64(int64(p2 - p1)))
+ bid1 := lca.rangeMin[logS][p1]
+ bid2 := lca.rangeMin[logS][p2-1<<logS+1]
+ if lca.blocks[bid1].depth < lca.blocks[bid2].depth {
+ return lca.blocks[bid1].b
+ }
+ return lca.blocks[bid2].b
+}
diff --git a/src/cmd/compile/internal/ssa/lca_test.go b/src/cmd/compile/internal/ssa/lca_test.go
new file mode 100644
index 0000000..8c8920c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/lca_test.go
@@ -0,0 +1,88 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "testing"
+
+func testLCAgen(t *testing.T, bg blockGen, size int) {
+ c := testConfig(t)
+ fun := c.Fun("entry", bg(size)...)
+ CheckFunc(fun.f)
+ if size == 4 {
+ t.Logf(fun.f.String())
+ }
+ lca1 := makeLCArange(fun.f)
+ lca2 := makeLCAeasy(fun.f)
+ for _, b := range fun.f.Blocks {
+ for _, c := range fun.f.Blocks {
+ l1 := lca1.find(b, c)
+ l2 := lca2.find(b, c)
+ if l1 != l2 {
+ t.Errorf("lca(%s,%s)=%s, want %s", b, c, l1, l2)
+ }
+ }
+ }
+}
+
+func TestLCALinear(t *testing.T) {
+ testLCAgen(t, genLinear, 10)
+ testLCAgen(t, genLinear, 100)
+}
+
+func TestLCAFwdBack(t *testing.T) {
+ testLCAgen(t, genFwdBack, 10)
+ testLCAgen(t, genFwdBack, 100)
+}
+
+func TestLCAManyPred(t *testing.T) {
+ testLCAgen(t, genManyPred, 10)
+ testLCAgen(t, genManyPred, 100)
+}
+
+func TestLCAMaxPred(t *testing.T) {
+ testLCAgen(t, genMaxPred, 10)
+ testLCAgen(t, genMaxPred, 100)
+}
+
+func TestLCAMaxPredValue(t *testing.T) {
+ testLCAgen(t, genMaxPredValue, 10)
+ testLCAgen(t, genMaxPredValue, 100)
+}
+
+// Simple implementation of LCA to compare against.
+type lcaEasy struct {
+ parent []*Block
+}
+
+func makeLCAeasy(f *Func) *lcaEasy {
+ return &lcaEasy{parent: dominators(f)}
+}
+
+func (lca *lcaEasy) find(a, b *Block) *Block {
+ da := lca.depth(a)
+ db := lca.depth(b)
+ for da > db {
+ da--
+ a = lca.parent[a.ID]
+ }
+ for da < db {
+ db--
+ b = lca.parent[b.ID]
+ }
+ for a != b {
+ a = lca.parent[a.ID]
+ b = lca.parent[b.ID]
+ }
+ return a
+}
+
+func (lca *lcaEasy) depth(b *Block) int {
+ n := 0
+ for b != nil {
+ b = lca.parent[b.ID]
+ n++
+ }
+ return n
+}
diff --git a/src/cmd/compile/internal/ssa/likelyadjust.go b/src/cmd/compile/internal/ssa/likelyadjust.go
new file mode 100644
index 0000000..1d0e53c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/likelyadjust.go
@@ -0,0 +1,580 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+)
+
+type loop struct {
+ header *Block // The header node of this (reducible) loop
+ outer *loop // loop containing this loop
+
+ // By default, children, exits, and depth are not initialized.
+ children []*loop // loops nested directly within this loop. Initialized by assembleChildren().
+ exits []*Block // exits records blocks reached by exits from this loop. Initialized by findExits().
+
+ // Next three fields used by regalloc and/or
+ // aid in computation of inner-ness and list of blocks.
+ nBlocks int32 // Number of blocks in this loop but not within inner loops
+ depth int16 // Nesting depth of the loop; 1 is outermost. Initialized by calculateDepths().
+ isInner bool // True if never discovered to contain a loop
+
+ // register allocation uses this.
+ containsUnavoidableCall bool // True if all paths through the loop have a call
+}
+
+// outerinner records that outer contains inner
+func (sdom SparseTree) outerinner(outer, inner *loop) {
+ // There could be other outer loops found in some random order,
+ // locate the new outer loop appropriately among them.
+
+ // Outer loop headers dominate inner loop headers.
+ // Use this to put the "new" "outer" loop in the right place.
+ oldouter := inner.outer
+ for oldouter != nil && sdom.isAncestor(outer.header, oldouter.header) {
+ inner = oldouter
+ oldouter = inner.outer
+ }
+ if outer == oldouter {
+ return
+ }
+ if oldouter != nil {
+ sdom.outerinner(oldouter, outer)
+ }
+
+ inner.outer = outer
+ outer.isInner = false
+}
+
+func checkContainsCall(bb *Block) bool {
+ if bb.Kind == BlockDefer {
+ return true
+ }
+ for _, v := range bb.Values {
+ if opcodeTable[v.Op].call {
+ return true
+ }
+ }
+ return false
+}
+
+type loopnest struct {
+ f *Func
+ b2l []*loop
+ po []*Block
+ sdom SparseTree
+ loops []*loop
+ hasIrreducible bool // TODO current treatment of irreducible loops is very flaky, if accurate loops are needed, must punt at function level.
+
+ // Record which of the lazily initialized fields have actually been initialized.
+ initializedChildren, initializedDepth, initializedExits bool
+}
+
+func min8(a, b int8) int8 {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max8(a, b int8) int8 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+const (
+ blDEFAULT = 0
+ blMin = blDEFAULT
+ blCALL = 1
+ blRET = 2
+ blEXIT = 3
+)
+
+var bllikelies = [4]string{"default", "call", "ret", "exit"}
+
+func describePredictionAgrees(b *Block, prediction BranchPrediction) string {
+ s := ""
+ if prediction == b.Likely {
+ s = " (agrees with previous)"
+ } else if b.Likely != BranchUnknown {
+ s = " (disagrees with previous, ignored)"
+ }
+ return s
+}
+
+func describeBranchPrediction(f *Func, b *Block, likely, not int8, prediction BranchPrediction) {
+ f.Warnl(b.Pos, "Branch prediction rule %s < %s%s",
+ bllikelies[likely-blMin], bllikelies[not-blMin], describePredictionAgrees(b, prediction))
+}
+
+func likelyadjust(f *Func) {
+ // The values assigned to certain and local only matter
+ // in their rank order. 0 is default, more positive
+ // is less likely. It's possible to assign a negative
+ // unlikeliness (though not currently the case).
+ certain := f.Cache.allocInt8Slice(f.NumBlocks()) // In the long run, all outcomes are at least this bad. Mainly for Exit
+ defer f.Cache.freeInt8Slice(certain)
+ local := f.Cache.allocInt8Slice(f.NumBlocks()) // for our immediate predecessors.
+ defer f.Cache.freeInt8Slice(local)
+
+ po := f.postorder()
+ nest := f.loopnest()
+ b2l := nest.b2l
+
+ for _, b := range po {
+ switch b.Kind {
+ case BlockExit:
+ // Very unlikely.
+ local[b.ID] = blEXIT
+ certain[b.ID] = blEXIT
+
+ // Ret, it depends.
+ case BlockRet, BlockRetJmp:
+ local[b.ID] = blRET
+ certain[b.ID] = blRET
+
+ // Calls. TODO not all calls are equal, names give useful clues.
+ // Any name-based heuristics are only relative to other calls,
+ // and less influential than inferences from loop structure.
+ case BlockDefer:
+ local[b.ID] = blCALL
+ certain[b.ID] = max8(blCALL, certain[b.Succs[0].b.ID])
+
+ default:
+ if len(b.Succs) == 1 {
+ certain[b.ID] = certain[b.Succs[0].b.ID]
+ } else if len(b.Succs) == 2 {
+ // If successor is an unvisited backedge, it's in loop and we don't care.
+ // Its default unlikely is also zero which is consistent with favoring loop edges.
+ // Notice that this can act like a "reset" on unlikeliness at loops; the
+ // default "everything returns" unlikeliness is erased by min with the
+ // backedge likeliness; however a loop with calls on every path will be
+ // tagged with call cost. Net effect is that loop entry is favored.
+ b0 := b.Succs[0].b.ID
+ b1 := b.Succs[1].b.ID
+ certain[b.ID] = min8(certain[b0], certain[b1])
+
+ l := b2l[b.ID]
+ l0 := b2l[b0]
+ l1 := b2l[b1]
+
+ prediction := b.Likely
+ // Weak loop heuristic -- both source and at least one dest are in loops,
+ // and there is a difference in the destinations.
+ // TODO what is best arrangement for nested loops?
+ if l != nil && l0 != l1 {
+ noprediction := false
+ switch {
+ // prefer not to exit loops
+ case l1 == nil:
+ prediction = BranchLikely
+ case l0 == nil:
+ prediction = BranchUnlikely
+
+ // prefer to stay in loop, not exit to outer.
+ case l == l0:
+ prediction = BranchLikely
+ case l == l1:
+ prediction = BranchUnlikely
+ default:
+ noprediction = true
+ }
+ if f.pass.debug > 0 && !noprediction {
+ f.Warnl(b.Pos, "Branch prediction rule stay in loop%s",
+ describePredictionAgrees(b, prediction))
+ }
+
+ } else {
+ // Lacking loop structure, fall back on heuristics.
+ if certain[b1] > certain[b0] {
+ prediction = BranchLikely
+ if f.pass.debug > 0 {
+ describeBranchPrediction(f, b, certain[b0], certain[b1], prediction)
+ }
+ } else if certain[b0] > certain[b1] {
+ prediction = BranchUnlikely
+ if f.pass.debug > 0 {
+ describeBranchPrediction(f, b, certain[b1], certain[b0], prediction)
+ }
+ } else if local[b1] > local[b0] {
+ prediction = BranchLikely
+ if f.pass.debug > 0 {
+ describeBranchPrediction(f, b, local[b0], local[b1], prediction)
+ }
+ } else if local[b0] > local[b1] {
+ prediction = BranchUnlikely
+ if f.pass.debug > 0 {
+ describeBranchPrediction(f, b, local[b1], local[b0], prediction)
+ }
+ }
+ }
+ if b.Likely != prediction {
+ if b.Likely == BranchUnknown {
+ b.Likely = prediction
+ }
+ }
+ }
+ // Look for calls in the block. If there is one, make this block unlikely.
+ for _, v := range b.Values {
+ if opcodeTable[v.Op].call {
+ local[b.ID] = blCALL
+ certain[b.ID] = max8(blCALL, certain[b.Succs[0].b.ID])
+ break
+ }
+ }
+ }
+ if f.pass.debug > 2 {
+ f.Warnl(b.Pos, "BP: Block %s, local=%s, certain=%s", b, bllikelies[local[b.ID]-blMin], bllikelies[certain[b.ID]-blMin])
+ }
+
+ }
+}
+
+func (l *loop) String() string {
+ return fmt.Sprintf("hdr:%s", l.header)
+}
+
+func (l *loop) LongString() string {
+ i := ""
+ o := ""
+ if l.isInner {
+ i = ", INNER"
+ }
+ if l.outer != nil {
+ o = ", o=" + l.outer.header.String()
+ }
+ return fmt.Sprintf("hdr:%s%s%s", l.header, i, o)
+}
+
+func (l *loop) isWithinOrEq(ll *loop) bool {
+ if ll == nil { // nil means whole program
+ return true
+ }
+ for ; l != nil; l = l.outer {
+ if l == ll {
+ return true
+ }
+ }
+ return false
+}
+
+// nearestOuterLoop returns the outer loop of loop most nearly
+// containing block b; the header must dominate b. loop itself
+// is assumed to not be that loop. For acceptable performance,
+// we're relying on loop nests to not be terribly deep.
+func (l *loop) nearestOuterLoop(sdom SparseTree, b *Block) *loop {
+ var o *loop
+ for o = l.outer; o != nil && !sdom.IsAncestorEq(o.header, b); o = o.outer {
+ }
+ return o
+}
+
+func loopnestfor(f *Func) *loopnest {
+ po := f.postorder()
+ sdom := f.Sdom()
+ b2l := make([]*loop, f.NumBlocks())
+ loops := make([]*loop, 0)
+ visited := f.Cache.allocBoolSlice(f.NumBlocks())
+ defer f.Cache.freeBoolSlice(visited)
+ sawIrred := false
+
+ if f.pass.debug > 2 {
+ fmt.Printf("loop finding in %s\n", f.Name)
+ }
+
+ // Reducible-loop-nest-finding.
+ for _, b := range po {
+ if f.pass != nil && f.pass.debug > 3 {
+ fmt.Printf("loop finding at %s\n", b)
+ }
+
+ var innermost *loop // innermost header reachable from this block
+
+ // IF any successor s of b is in a loop headed by h
+ // AND h dominates b
+ // THEN b is in the loop headed by h.
+ //
+ // Choose the first/innermost such h.
+ //
+ // IF s itself dominates b, then s is a loop header;
+ // and there may be more than one such s.
+ // Since there's at most 2 successors, the inner/outer ordering
+ // between them can be established with simple comparisons.
+ for _, e := range b.Succs {
+ bb := e.b
+ l := b2l[bb.ID]
+
+ if sdom.IsAncestorEq(bb, b) { // Found a loop header
+ if f.pass != nil && f.pass.debug > 4 {
+ fmt.Printf("loop finding succ %s of %s is header\n", bb.String(), b.String())
+ }
+ if l == nil {
+ l = &loop{header: bb, isInner: true}
+ loops = append(loops, l)
+ b2l[bb.ID] = l
+ }
+ } else if !visited[bb.ID] { // Found an irreducible loop
+ sawIrred = true
+ if f.pass != nil && f.pass.debug > 4 {
+ fmt.Printf("loop finding succ %s of %s is IRRED, in %s\n", bb.String(), b.String(), f.Name)
+ }
+ } else if l != nil {
+ // TODO handle case where l is irreducible.
+ // Perhaps a loop header is inherited.
+ // is there any loop containing our successor whose
+ // header dominates b?
+ if !sdom.IsAncestorEq(l.header, b) {
+ l = l.nearestOuterLoop(sdom, b)
+ }
+ if f.pass != nil && f.pass.debug > 4 {
+ if l == nil {
+ fmt.Printf("loop finding succ %s of %s has no loop\n", bb.String(), b.String())
+ } else {
+ fmt.Printf("loop finding succ %s of %s provides loop with header %s\n", bb.String(), b.String(), l.header.String())
+ }
+ }
+ } else { // No loop
+ if f.pass != nil && f.pass.debug > 4 {
+ fmt.Printf("loop finding succ %s of %s has no loop\n", bb.String(), b.String())
+ }
+
+ }
+
+ if l == nil || innermost == l {
+ continue
+ }
+
+ if innermost == nil {
+ innermost = l
+ continue
+ }
+
+ if sdom.isAncestor(innermost.header, l.header) {
+ sdom.outerinner(innermost, l)
+ innermost = l
+ } else if sdom.isAncestor(l.header, innermost.header) {
+ sdom.outerinner(l, innermost)
+ }
+ }
+
+ if innermost != nil {
+ b2l[b.ID] = innermost
+ innermost.nBlocks++
+ }
+ visited[b.ID] = true
+ }
+
+ ln := &loopnest{f: f, b2l: b2l, po: po, sdom: sdom, loops: loops, hasIrreducible: sawIrred}
+
+ // Calculate containsUnavoidableCall for regalloc
+ dominatedByCall := f.Cache.allocBoolSlice(f.NumBlocks())
+ defer f.Cache.freeBoolSlice(dominatedByCall)
+ for _, b := range po {
+ if checkContainsCall(b) {
+ dominatedByCall[b.ID] = true
+ }
+ }
+ // Run dfs to find path through the loop that avoids all calls.
+ // Such path either escapes loop or return back to header.
+ // It isn't enough to have exit not dominated by any call, for example:
+ // ... some loop
+ // call1 call2
+ // \ /
+ // exit
+ // ...
+ // exit is not dominated by any call, but we don't have call-free path to it.
+ for _, l := range loops {
+ // Header contains call.
+ if dominatedByCall[l.header.ID] {
+ l.containsUnavoidableCall = true
+ continue
+ }
+ callfreepath := false
+ tovisit := make([]*Block, 0, len(l.header.Succs))
+ // Push all non-loop non-exit successors of header onto toVisit.
+ for _, s := range l.header.Succs {
+ nb := s.Block()
+ // This corresponds to loop with zero iterations.
+ if !l.iterationEnd(nb, b2l) {
+ tovisit = append(tovisit, nb)
+ }
+ }
+ for len(tovisit) > 0 {
+ cur := tovisit[len(tovisit)-1]
+ tovisit = tovisit[:len(tovisit)-1]
+ if dominatedByCall[cur.ID] {
+ continue
+ }
+ // Record visited in dominatedByCall.
+ dominatedByCall[cur.ID] = true
+ for _, s := range cur.Succs {
+ nb := s.Block()
+ if l.iterationEnd(nb, b2l) {
+ callfreepath = true
+ }
+ if !dominatedByCall[nb.ID] {
+ tovisit = append(tovisit, nb)
+ }
+
+ }
+ if callfreepath {
+ break
+ }
+ }
+ if !callfreepath {
+ l.containsUnavoidableCall = true
+ }
+ }
+
+ // Curious about the loopiness? "-d=ssa/likelyadjust/stats"
+ if f.pass != nil && f.pass.stats > 0 && len(loops) > 0 {
+ ln.assembleChildren()
+ ln.calculateDepths()
+ ln.findExits()
+
+ // Note stats for non-innermost loops are slightly flawed because
+ // they don't account for inner loop exits that span multiple levels.
+
+ for _, l := range loops {
+ x := len(l.exits)
+ cf := 0
+ if !l.containsUnavoidableCall {
+ cf = 1
+ }
+ inner := 0
+ if l.isInner {
+ inner++
+ }
+
+ f.LogStat("loopstats:",
+ l.depth, "depth", x, "exits",
+ inner, "is_inner", cf, "always_calls", l.nBlocks, "n_blocks")
+ }
+ }
+
+ if f.pass != nil && f.pass.debug > 1 && len(loops) > 0 {
+ fmt.Printf("Loops in %s:\n", f.Name)
+ for _, l := range loops {
+ fmt.Printf("%s, b=", l.LongString())
+ for _, b := range f.Blocks {
+ if b2l[b.ID] == l {
+ fmt.Printf(" %s", b)
+ }
+ }
+ fmt.Print("\n")
+ }
+ fmt.Printf("Nonloop blocks in %s:", f.Name)
+ for _, b := range f.Blocks {
+ if b2l[b.ID] == nil {
+ fmt.Printf(" %s", b)
+ }
+ }
+ fmt.Print("\n")
+ }
+ return ln
+}
+
+// assembleChildren initializes the children field of each
+// loop in the nest. Loop A is a child of loop B if A is
+// directly nested within B (based on the reducible-loops
+// detection above)
+func (ln *loopnest) assembleChildren() {
+ if ln.initializedChildren {
+ return
+ }
+ for _, l := range ln.loops {
+ if l.outer != nil {
+ l.outer.children = append(l.outer.children, l)
+ }
+ }
+ ln.initializedChildren = true
+}
+
+// calculateDepths uses the children field of loops
+// to determine the nesting depth (outer=1) of each
+// loop. This is helpful for finding exit edges.
+func (ln *loopnest) calculateDepths() {
+ if ln.initializedDepth {
+ return
+ }
+ ln.assembleChildren()
+ for _, l := range ln.loops {
+ if l.outer == nil {
+ l.setDepth(1)
+ }
+ }
+ ln.initializedDepth = true
+}
+
+// findExits uses loop depth information to find the
+// exits from a loop.
+func (ln *loopnest) findExits() {
+ if ln.initializedExits {
+ return
+ }
+ ln.calculateDepths()
+ b2l := ln.b2l
+ for _, b := range ln.po {
+ l := b2l[b.ID]
+ if l != nil && len(b.Succs) == 2 {
+ sl := b2l[b.Succs[0].b.ID]
+ if recordIfExit(l, sl, b.Succs[0].b) {
+ continue
+ }
+ sl = b2l[b.Succs[1].b.ID]
+ if recordIfExit(l, sl, b.Succs[1].b) {
+ continue
+ }
+ }
+ }
+ ln.initializedExits = true
+}
+
+// depth returns the loop nesting level of block b.
+func (ln *loopnest) depth(b ID) int16 {
+ if l := ln.b2l[b]; l != nil {
+ return l.depth
+ }
+ return 0
+}
+
+// recordIfExit checks sl (the loop containing b) to see if it
+// is outside of loop l, and if so, records b as an exit block
+// from l and returns true.
+func recordIfExit(l, sl *loop, b *Block) bool {
+ if sl != l {
+ if sl == nil || sl.depth <= l.depth {
+ l.exits = append(l.exits, b)
+ return true
+ }
+ // sl is not nil, and is deeper than l
+ // it's possible for this to be a goto into an irreducible loop made from gotos.
+ for sl.depth > l.depth {
+ sl = sl.outer
+ }
+ if sl != l {
+ l.exits = append(l.exits, b)
+ return true
+ }
+ }
+ return false
+}
+
+func (l *loop) setDepth(d int16) {
+ l.depth = d
+ for _, c := range l.children {
+ c.setDepth(d + 1)
+ }
+}
+
+// iterationEnd checks if block b ends iteration of loop l.
+// Ending iteration means either escaping to outer loop/code or
+// going back to header
+func (l *loop) iterationEnd(b *Block, b2l []*loop) bool {
+ return b == l.header || b2l[b.ID] == nil || (b2l[b.ID] != l && b2l[b.ID].depth <= l.depth)
+}
diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go
new file mode 100644
index 0000000..00aea87
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/location.go
@@ -0,0 +1,109 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "fmt"
+)
+
+// A place that an ssa variable can reside.
+type Location interface {
+ String() string // name to use in assembly templates: AX, 16(SP), ...
+}
+
+// A Register is a machine register, like AX.
+// They are numbered densely from 0 (for each architecture).
+type Register struct {
+ num int32 // dense numbering
+ objNum int16 // register number from cmd/internal/obj/$ARCH
+ gcNum int16 // GC register map number (dense numbering of registers that can contain pointers)
+ name string
+}
+
+func (r *Register) String() string {
+ return r.name
+}
+
+// ObjNum returns the register number from cmd/internal/obj/$ARCH that
+// corresponds to this register.
+func (r *Register) ObjNum() int16 {
+ return r.objNum
+}
+
+// GCNum returns the runtime GC register index of r, or -1 if this
+// register can't contain pointers.
+func (r *Register) GCNum() int16 {
+ return r.gcNum
+}
+
+// A LocalSlot is a location in the stack frame, which identifies and stores
+// part or all of a PPARAM, PPARAMOUT, or PAUTO ONAME node.
+// It can represent a whole variable, part of a larger stack slot, or part of a
+// variable that has been decomposed into multiple stack slots.
+// As an example, a string could have the following configurations:
+//
+// stack layout LocalSlots
+//
+// Optimizations are disabled. s is on the stack and represented in its entirety.
+// [ ------- s string ---- ] { N: s, Type: string, Off: 0 }
+//
+// s was not decomposed, but the SSA operates on its parts individually, so
+// there is a LocalSlot for each of its fields that points into the single stack slot.
+// [ ------- s string ---- ] { N: s, Type: *uint8, Off: 0 }, {N: s, Type: int, Off: 8}
+//
+// s was decomposed. Each of its fields is in its own stack slot and has its own LocalSLot.
+// [ ptr *uint8 ] [ len int] { N: ptr, Type: *uint8, Off: 0, SplitOf: parent, SplitOffset: 0},
+// { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8}
+// parent = &{N: s, Type: string}
+type LocalSlot struct {
+ N *ir.Name // an ONAME *ir.Name representing a stack location.
+ Type *types.Type // type of slot
+ Off int64 // offset of slot in N
+
+ SplitOf *LocalSlot // slot is a decomposition of SplitOf
+ SplitOffset int64 // .. at this offset.
+}
+
+func (s LocalSlot) String() string {
+ if s.Off == 0 {
+ return fmt.Sprintf("%v[%v]", s.N, s.Type)
+ }
+ return fmt.Sprintf("%v+%d[%v]", s.N, s.Off, s.Type)
+}
+
+type LocPair [2]Location
+
+func (t LocPair) String() string {
+ n0, n1 := "nil", "nil"
+ if t[0] != nil {
+ n0 = t[0].String()
+ }
+ if t[1] != nil {
+ n1 = t[1].String()
+ }
+ return fmt.Sprintf("<%s,%s>", n0, n1)
+}
+
+type LocResults []Location
+
+func (t LocResults) String() string {
+ s := ""
+ a := "<"
+ for _, r := range t {
+ a += s
+ s = ","
+ a += r.String()
+ }
+ a += ">"
+ return a
+}
+
+type Spill struct {
+ Type *types.Type
+ Offset int64
+ Reg int16
+}
diff --git a/src/cmd/compile/internal/ssa/loopbce.go b/src/cmd/compile/internal/ssa/loopbce.go
new file mode 100644
index 0000000..dd1f39d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/loopbce.go
@@ -0,0 +1,437 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "fmt"
+)
+
+type indVarFlags uint8
+
+const (
+ indVarMinExc indVarFlags = 1 << iota // minimum value is exclusive (default: inclusive)
+ indVarMaxInc // maximum value is inclusive (default: exclusive)
+ indVarCountDown // if set the iteration starts at max and count towards min (default: min towards max)
+)
+
+type indVar struct {
+ ind *Value // induction variable
+ nxt *Value // the incremented variable
+ min *Value // minimum value, inclusive/exclusive depends on flags
+ max *Value // maximum value, inclusive/exclusive depends on flags
+ entry *Block // entry block in the loop.
+ flags indVarFlags
+ // Invariant: for all blocks strictly dominated by entry:
+ // min <= ind < max [if flags == 0]
+ // min < ind < max [if flags == indVarMinExc]
+ // min <= ind <= max [if flags == indVarMaxInc]
+ // min < ind <= max [if flags == indVarMinExc|indVarMaxInc]
+}
+
+// parseIndVar checks whether the SSA value passed as argument is a valid induction
+// variable, and, if so, extracts:
+// - the minimum bound
+// - the increment value
+// - the "next" value (SSA value that is Phi'd into the induction variable every loop)
+//
+// Currently, we detect induction variables that match (Phi min nxt),
+// with nxt being (Add inc ind).
+// If it can't parse the induction variable correctly, it returns (nil, nil, nil).
+func parseIndVar(ind *Value) (min, inc, nxt *Value) {
+ if ind.Op != OpPhi {
+ return
+ }
+
+ if n := ind.Args[0]; (n.Op == OpAdd64 || n.Op == OpAdd32 || n.Op == OpAdd16 || n.Op == OpAdd8) && (n.Args[0] == ind || n.Args[1] == ind) {
+ min, nxt = ind.Args[1], n
+ } else if n := ind.Args[1]; (n.Op == OpAdd64 || n.Op == OpAdd32 || n.Op == OpAdd16 || n.Op == OpAdd8) && (n.Args[0] == ind || n.Args[1] == ind) {
+ min, nxt = ind.Args[0], n
+ } else {
+ // Not a recognized induction variable.
+ return
+ }
+
+ if nxt.Args[0] == ind { // nxt = ind + inc
+ inc = nxt.Args[1]
+ } else if nxt.Args[1] == ind { // nxt = inc + ind
+ inc = nxt.Args[0]
+ } else {
+ panic("unreachable") // one of the cases must be true from the above.
+ }
+
+ return
+}
+
+// findIndVar finds induction variables in a function.
+//
+// Look for variables and blocks that satisfy the following
+//
+// loop:
+// ind = (Phi min nxt),
+// if ind < max
+// then goto enter_loop
+// else goto exit_loop
+//
+// enter_loop:
+// do something
+// nxt = inc + ind
+// goto loop
+//
+// exit_loop:
+func findIndVar(f *Func) []indVar {
+ var iv []indVar
+ sdom := f.Sdom()
+
+ for _, b := range f.Blocks {
+ if b.Kind != BlockIf || len(b.Preds) != 2 {
+ continue
+ }
+
+ var ind *Value // induction variable
+ var init *Value // starting value
+ var limit *Value // ending value
+
+ // Check that the control if it either ind </<= limit or limit </<= ind.
+ // TODO: Handle unsigned comparisons?
+ c := b.Controls[0]
+ inclusive := false
+ switch c.Op {
+ case OpLeq64, OpLeq32, OpLeq16, OpLeq8:
+ inclusive = true
+ fallthrough
+ case OpLess64, OpLess32, OpLess16, OpLess8:
+ ind, limit = c.Args[0], c.Args[1]
+ default:
+ continue
+ }
+
+ // See if this is really an induction variable
+ less := true
+ init, inc, nxt := parseIndVar(ind)
+ if init == nil {
+ // We failed to parse the induction variable. Before punting, we want to check
+ // whether the control op was written with the induction variable on the RHS
+ // instead of the LHS. This happens for the downwards case, like:
+ // for i := len(n)-1; i >= 0; i--
+ init, inc, nxt = parseIndVar(limit)
+ if init == nil {
+ // No recognized induction variable on either operand
+ continue
+ }
+
+ // Ok, the arguments were reversed. Swap them, and remember that we're
+ // looking at an ind >/>= loop (so the induction must be decrementing).
+ ind, limit = limit, ind
+ less = false
+ }
+
+ if ind.Block != b {
+ // TODO: Could be extended to include disjointed loop headers.
+ // I don't think this is causing missed optimizations in real world code often.
+ // See https://go.dev/issue/63955
+ continue
+ }
+
+ // Expect the increment to be a nonzero constant.
+ if !inc.isGenericIntConst() {
+ continue
+ }
+ step := inc.AuxInt
+ if step == 0 {
+ continue
+ }
+
+ // Increment sign must match comparison direction.
+ // When incrementing, the termination comparison must be ind </<= limit.
+ // When decrementing, the termination comparison must be ind >/>= limit.
+ // See issue 26116.
+ if step > 0 && !less {
+ continue
+ }
+ if step < 0 && less {
+ continue
+ }
+
+ // Up to now we extracted the induction variable (ind),
+ // the increment delta (inc), the temporary sum (nxt),
+ // the initial value (init) and the limiting value (limit).
+ //
+ // We also know that ind has the form (Phi init nxt) where
+ // nxt is (Add inc nxt) which means: 1) inc dominates nxt
+ // and 2) there is a loop starting at inc and containing nxt.
+ //
+ // We need to prove that the induction variable is incremented
+ // only when it's smaller than the limiting value.
+ // Two conditions must happen listed below to accept ind
+ // as an induction variable.
+
+ // First condition: loop entry has a single predecessor, which
+ // is the header block. This implies that b.Succs[0] is
+ // reached iff ind < limit.
+ if len(b.Succs[0].b.Preds) != 1 {
+ // b.Succs[1] must exit the loop.
+ continue
+ }
+
+ // Second condition: b.Succs[0] dominates nxt so that
+ // nxt is computed when inc < limit.
+ if !sdom.IsAncestorEq(b.Succs[0].b, nxt.Block) {
+ // inc+ind can only be reached through the branch that enters the loop.
+ continue
+ }
+
+ // Check for overflow/underflow. We need to make sure that inc never causes
+ // the induction variable to wrap around.
+ // We use a function wrapper here for easy return true / return false / keep going logic.
+ // This function returns true if the increment will never overflow/underflow.
+ ok := func() bool {
+ if step > 0 {
+ if limit.isGenericIntConst() {
+ // Figure out the actual largest value.
+ v := limit.AuxInt
+ if !inclusive {
+ if v == minSignedValue(limit.Type) {
+ return false // < minint is never satisfiable.
+ }
+ v--
+ }
+ if init.isGenericIntConst() {
+ // Use stride to compute a better lower limit.
+ if init.AuxInt > v {
+ return false
+ }
+ v = addU(init.AuxInt, diff(v, init.AuxInt)/uint64(step)*uint64(step))
+ }
+ if addWillOverflow(v, step) {
+ return false
+ }
+ if inclusive && v != limit.AuxInt || !inclusive && v+1 != limit.AuxInt {
+ // We know a better limit than the programmer did. Use our limit instead.
+ limit = f.constVal(limit.Op, limit.Type, v, true)
+ inclusive = true
+ }
+ return true
+ }
+ if step == 1 && !inclusive {
+ // Can't overflow because maxint is never a possible value.
+ return true
+ }
+ // If the limit is not a constant, check to see if it is a
+ // negative offset from a known non-negative value.
+ knn, k := findKNN(limit)
+ if knn == nil || k < 0 {
+ return false
+ }
+ // limit == (something nonnegative) - k. That subtraction can't underflow, so
+ // we can trust it.
+ if inclusive {
+ // ind <= knn - k cannot overflow if step is at most k
+ return step <= k
+ }
+ // ind < knn - k cannot overflow if step is at most k+1
+ return step <= k+1 && k != maxSignedValue(limit.Type)
+ } else { // step < 0
+ if limit.Op == OpConst64 {
+ // Figure out the actual smallest value.
+ v := limit.AuxInt
+ if !inclusive {
+ if v == maxSignedValue(limit.Type) {
+ return false // > maxint is never satisfiable.
+ }
+ v++
+ }
+ if init.isGenericIntConst() {
+ // Use stride to compute a better lower limit.
+ if init.AuxInt < v {
+ return false
+ }
+ v = subU(init.AuxInt, diff(init.AuxInt, v)/uint64(-step)*uint64(-step))
+ }
+ if subWillUnderflow(v, -step) {
+ return false
+ }
+ if inclusive && v != limit.AuxInt || !inclusive && v-1 != limit.AuxInt {
+ // We know a better limit than the programmer did. Use our limit instead.
+ limit = f.constVal(limit.Op, limit.Type, v, true)
+ inclusive = true
+ }
+ return true
+ }
+ if step == -1 && !inclusive {
+ // Can't underflow because minint is never a possible value.
+ return true
+ }
+ }
+ return false
+
+ }
+
+ if ok() {
+ flags := indVarFlags(0)
+ var min, max *Value
+ if step > 0 {
+ min = init
+ max = limit
+ if inclusive {
+ flags |= indVarMaxInc
+ }
+ } else {
+ min = limit
+ max = init
+ flags |= indVarMaxInc
+ if !inclusive {
+ flags |= indVarMinExc
+ }
+ flags |= indVarCountDown
+ step = -step
+ }
+ if f.pass.debug >= 1 {
+ printIndVar(b, ind, min, max, step, flags)
+ }
+
+ iv = append(iv, indVar{
+ ind: ind,
+ nxt: nxt,
+ min: min,
+ max: max,
+ entry: b.Succs[0].b,
+ flags: flags,
+ })
+ b.Logf("found induction variable %v (inc = %v, min = %v, max = %v)\n", ind, inc, min, max)
+ }
+
+ // TODO: other unrolling idioms
+ // for i := 0; i < KNN - KNN % k ; i += k
+ // for i := 0; i < KNN&^(k-1) ; i += k // k a power of 2
+ // for i := 0; i < KNN&(-k) ; i += k // k a power of 2
+ }
+
+ return iv
+}
+
+// addWillOverflow reports whether x+y would result in a value more than maxint.
+func addWillOverflow(x, y int64) bool {
+ return x+y < x
+}
+
+// subWillUnderflow reports whether x-y would result in a value less than minint.
+func subWillUnderflow(x, y int64) bool {
+ return x-y > x
+}
+
+// diff returns x-y as a uint64. Requires x>=y.
+func diff(x, y int64) uint64 {
+ if x < y {
+ base.Fatalf("diff %d - %d underflowed", x, y)
+ }
+ return uint64(x - y)
+}
+
+// addU returns x+y. Requires that x+y does not overflow an int64.
+func addU(x int64, y uint64) int64 {
+ if y >= 1<<63 {
+ if x >= 0 {
+ base.Fatalf("addU overflowed %d + %d", x, y)
+ }
+ x += 1<<63 - 1
+ x += 1
+ y -= 1 << 63
+ }
+ if addWillOverflow(x, int64(y)) {
+ base.Fatalf("addU overflowed %d + %d", x, y)
+ }
+ return x + int64(y)
+}
+
+// subU returns x-y. Requires that x-y does not underflow an int64.
+func subU(x int64, y uint64) int64 {
+ if y >= 1<<63 {
+ if x < 0 {
+ base.Fatalf("subU underflowed %d - %d", x, y)
+ }
+ x -= 1<<63 - 1
+ x -= 1
+ y -= 1 << 63
+ }
+ if subWillUnderflow(x, int64(y)) {
+ base.Fatalf("subU underflowed %d - %d", x, y)
+ }
+ return x - int64(y)
+}
+
+// if v is known to be x - c, where x is known to be nonnegative and c is a
+// constant, return x, c. Otherwise return nil, 0.
+func findKNN(v *Value) (*Value, int64) {
+ var x, y *Value
+ x = v
+ switch v.Op {
+ case OpSub64, OpSub32, OpSub16, OpSub8:
+ x = v.Args[0]
+ y = v.Args[1]
+
+ case OpAdd64, OpAdd32, OpAdd16, OpAdd8:
+ x = v.Args[0]
+ y = v.Args[1]
+ if x.isGenericIntConst() {
+ x, y = y, x
+ }
+ }
+ switch x.Op {
+ case OpSliceLen, OpStringLen, OpSliceCap:
+ default:
+ return nil, 0
+ }
+ if y == nil {
+ return x, 0
+ }
+ if !y.isGenericIntConst() {
+ return nil, 0
+ }
+ if v.Op == OpAdd64 || v.Op == OpAdd32 || v.Op == OpAdd16 || v.Op == OpAdd8 {
+ return x, -y.AuxInt
+ }
+ return x, y.AuxInt
+}
+
+func printIndVar(b *Block, i, min, max *Value, inc int64, flags indVarFlags) {
+ mb1, mb2 := "[", "]"
+ if flags&indVarMinExc != 0 {
+ mb1 = "("
+ }
+ if flags&indVarMaxInc == 0 {
+ mb2 = ")"
+ }
+
+ mlim1, mlim2 := fmt.Sprint(min.AuxInt), fmt.Sprint(max.AuxInt)
+ if !min.isGenericIntConst() {
+ if b.Func.pass.debug >= 2 {
+ mlim1 = fmt.Sprint(min)
+ } else {
+ mlim1 = "?"
+ }
+ }
+ if !max.isGenericIntConst() {
+ if b.Func.pass.debug >= 2 {
+ mlim2 = fmt.Sprint(max)
+ } else {
+ mlim2 = "?"
+ }
+ }
+ extra := ""
+ if b.Func.pass.debug >= 2 {
+ extra = fmt.Sprintf(" (%s)", i)
+ }
+ b.Func.Warnl(b.Pos, "Induction variable: limits %v%v,%v%v, increment %d%s", mb1, mlim1, mlim2, mb2, inc, extra)
+}
+
+func minSignedValue(t *types.Type) int64 {
+ return -1 << (t.Size()*8 - 1)
+}
+
+func maxSignedValue(t *types.Type) int64 {
+ return 1<<((t.Size()*8)-1) - 1
+}
diff --git a/src/cmd/compile/internal/ssa/loopreschedchecks.go b/src/cmd/compile/internal/ssa/loopreschedchecks.go
new file mode 100644
index 0000000..0ac473d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/loopreschedchecks.go
@@ -0,0 +1,512 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+)
+
+// an edgeMem records a backedge, together with the memory
+// phi functions at the target of the backedge that must
+// be updated when a rescheduling check replaces the backedge.
+type edgeMem struct {
+ e Edge
+ m *Value // phi for memory at dest of e
+}
+
+// a rewriteTarget is a value-argindex pair indicating
+// where a rewrite is applied. Note that this is for values,
+// not for block controls, because block controls are not targets
+// for the rewrites performed in inserting rescheduling checks.
+type rewriteTarget struct {
+ v *Value
+ i int
+}
+
+type rewrite struct {
+ before, after *Value // before is the expected value before rewrite, after is the new value installed.
+ rewrites []rewriteTarget // all the targets for this rewrite.
+}
+
+func (r *rewrite) String() string {
+ s := "\n\tbefore=" + r.before.String() + ", after=" + r.after.String()
+ for _, rw := range r.rewrites {
+ s += ", (i=" + fmt.Sprint(rw.i) + ", v=" + rw.v.LongString() + ")"
+ }
+ s += "\n"
+ return s
+}
+
+// insertLoopReschedChecks inserts rescheduling checks on loop backedges.
+func insertLoopReschedChecks(f *Func) {
+ // TODO: when split information is recorded in export data, insert checks only on backedges that can be reached on a split-call-free path.
+
+ // Loop reschedule checks compare the stack pointer with
+ // the per-g stack bound. If the pointer appears invalid,
+ // that means a reschedule check is needed.
+ //
+ // Steps:
+ // 1. locate backedges.
+ // 2. Record memory definitions at block end so that
+ // the SSA graph for mem can be properly modified.
+ // 3. Ensure that phi functions that will-be-needed for mem
+ // are present in the graph, initially with trivial inputs.
+ // 4. Record all to-be-modified uses of mem;
+ // apply modifications (split into two steps to simplify and
+ // avoided nagging order-dependencies).
+ // 5. Rewrite backedges to include reschedule check,
+ // and modify destination phi function appropriately with new
+ // definitions for mem.
+
+ if f.NoSplit { // nosplit functions don't reschedule.
+ return
+ }
+
+ backedges := backedges(f)
+ if len(backedges) == 0 { // no backedges means no rescheduling checks.
+ return
+ }
+
+ lastMems := findLastMems(f)
+
+ idom := f.Idom()
+ po := f.postorder()
+ // The ordering in the dominator tree matters; it's important that
+ // the walk of the dominator tree also be a preorder (i.e., a node is
+ // visited only after all its non-backedge predecessors have been visited).
+ sdom := newSparseOrderedTree(f, idom, po)
+
+ if f.pass.debug > 1 {
+ fmt.Printf("before %s = %s\n", f.Name, sdom.treestructure(f.Entry))
+ }
+
+ tofixBackedges := []edgeMem{}
+
+ for _, e := range backedges { // TODO: could filter here by calls in loops, if declared and inferred nosplit are recorded in export data.
+ tofixBackedges = append(tofixBackedges, edgeMem{e, nil})
+ }
+
+ // It's possible that there is no memory state (no global/pointer loads/stores or calls)
+ if lastMems[f.Entry.ID] == nil {
+ lastMems[f.Entry.ID] = f.Entry.NewValue0(f.Entry.Pos, OpInitMem, types.TypeMem)
+ }
+
+ memDefsAtBlockEnds := f.Cache.allocValueSlice(f.NumBlocks()) // For each block, the mem def seen at its bottom. Could be from earlier block.
+ defer f.Cache.freeValueSlice(memDefsAtBlockEnds)
+
+ // Propagate last mem definitions forward through successor blocks.
+ for i := len(po) - 1; i >= 0; i-- {
+ b := po[i]
+ mem := lastMems[b.ID]
+ for j := 0; mem == nil; j++ { // if there's no def, then there's no phi, so the visible mem is identical in all predecessors.
+ // loop because there might be backedges that haven't been visited yet.
+ mem = memDefsAtBlockEnds[b.Preds[j].b.ID]
+ }
+ memDefsAtBlockEnds[b.ID] = mem
+ if f.pass.debug > 2 {
+ fmt.Printf("memDefsAtBlockEnds[%s] = %s\n", b, mem)
+ }
+ }
+
+ // Maps from block to newly-inserted phi function in block.
+ newmemphis := make(map[*Block]rewrite)
+
+ // Insert phi functions as necessary for future changes to flow graph.
+ for i, emc := range tofixBackedges {
+ e := emc.e
+ h := e.b
+
+ // find the phi function for the memory input at "h", if there is one.
+ var headerMemPhi *Value // look for header mem phi
+
+ for _, v := range h.Values {
+ if v.Op == OpPhi && v.Type.IsMemory() {
+ headerMemPhi = v
+ }
+ }
+
+ if headerMemPhi == nil {
+ // if the header is nil, make a trivial phi from the dominator
+ mem0 := memDefsAtBlockEnds[idom[h.ID].ID]
+ headerMemPhi = newPhiFor(h, mem0)
+ newmemphis[h] = rewrite{before: mem0, after: headerMemPhi}
+ addDFphis(mem0, h, h, f, memDefsAtBlockEnds, newmemphis, sdom)
+
+ }
+ tofixBackedges[i].m = headerMemPhi
+
+ }
+ if f.pass.debug > 0 {
+ for b, r := range newmemphis {
+ fmt.Printf("before b=%s, rewrite=%s\n", b, r.String())
+ }
+ }
+
+ // dfPhiTargets notes inputs to phis in dominance frontiers that should not
+ // be rewritten as part of the dominated children of some outer rewrite.
+ dfPhiTargets := make(map[rewriteTarget]bool)
+
+ rewriteNewPhis(f.Entry, f.Entry, f, memDefsAtBlockEnds, newmemphis, dfPhiTargets, sdom)
+
+ if f.pass.debug > 0 {
+ for b, r := range newmemphis {
+ fmt.Printf("after b=%s, rewrite=%s\n", b, r.String())
+ }
+ }
+
+ // Apply collected rewrites.
+ for _, r := range newmemphis {
+ for _, rw := range r.rewrites {
+ rw.v.SetArg(rw.i, r.after)
+ }
+ }
+
+ // Rewrite backedges to include reschedule checks.
+ for _, emc := range tofixBackedges {
+ e := emc.e
+ headerMemPhi := emc.m
+ h := e.b
+ i := e.i
+ p := h.Preds[i]
+ bb := p.b
+ mem0 := headerMemPhi.Args[i]
+ // bb e->p h,
+ // Because we're going to insert a rare-call, make sure the
+ // looping edge still looks likely.
+ likely := BranchLikely
+ if p.i != 0 {
+ likely = BranchUnlikely
+ }
+ if bb.Kind != BlockPlain { // backedges can be unconditional. e.g., if x { something; continue }
+ bb.Likely = likely
+ }
+
+ // rewrite edge to include reschedule check
+ // existing edges:
+ //
+ // bb.Succs[p.i] == Edge{h, i}
+ // h.Preds[i] == p == Edge{bb,p.i}
+ //
+ // new block(s):
+ // test:
+ // if sp < g.limit { goto sched }
+ // goto join
+ // sched:
+ // mem1 := call resched (mem0)
+ // goto join
+ // join:
+ // mem2 := phi(mem0, mem1)
+ // goto h
+ //
+ // and correct arg i of headerMemPhi and headerCtrPhi
+ //
+ // EXCEPT: join block containing only phi functions is bad
+ // for the register allocator. Therefore, there is no
+ // join, and branches targeting join must instead target
+ // the header, and the other phi functions within header are
+ // adjusted for the additional input.
+
+ test := f.NewBlock(BlockIf)
+ sched := f.NewBlock(BlockPlain)
+
+ test.Pos = bb.Pos
+ sched.Pos = bb.Pos
+
+ // if sp < g.limit { goto sched }
+ // goto header
+
+ cfgtypes := &f.Config.Types
+ pt := cfgtypes.Uintptr
+ g := test.NewValue1(bb.Pos, OpGetG, pt, mem0)
+ sp := test.NewValue0(bb.Pos, OpSP, pt)
+ cmpOp := OpLess64U
+ if pt.Size() == 4 {
+ cmpOp = OpLess32U
+ }
+ limaddr := test.NewValue1I(bb.Pos, OpOffPtr, pt, 2*pt.Size(), g)
+ lim := test.NewValue2(bb.Pos, OpLoad, pt, limaddr, mem0)
+ cmp := test.NewValue2(bb.Pos, cmpOp, cfgtypes.Bool, sp, lim)
+ test.SetControl(cmp)
+
+ // if true, goto sched
+ test.AddEdgeTo(sched)
+
+ // if false, rewrite edge to header.
+ // do NOT remove+add, because that will perturb all the other phi functions
+ // as well as messing up other edges to the header.
+ test.Succs = append(test.Succs, Edge{h, i})
+ h.Preds[i] = Edge{test, 1}
+ headerMemPhi.SetArg(i, mem0)
+
+ test.Likely = BranchUnlikely
+
+ // sched:
+ // mem1 := call resched (mem0)
+ // goto header
+ resched := f.fe.Syslook("goschedguarded")
+ call := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(resched, bb.Func.ABIDefault.ABIAnalyzeTypes(nil, nil)), mem0)
+ mem1 := sched.NewValue1I(bb.Pos, OpSelectN, types.TypeMem, 0, call)
+ sched.AddEdgeTo(h)
+ headerMemPhi.AddArg(mem1)
+
+ bb.Succs[p.i] = Edge{test, 0}
+ test.Preds = append(test.Preds, Edge{bb, p.i})
+
+ // Must correct all the other phi functions in the header for new incoming edge.
+ // Except for mem phis, it will be the same value seen on the original
+ // backedge at index i.
+ for _, v := range h.Values {
+ if v.Op == OpPhi && v != headerMemPhi {
+ v.AddArg(v.Args[i])
+ }
+ }
+ }
+
+ f.invalidateCFG()
+
+ if f.pass.debug > 1 {
+ sdom = newSparseTree(f, f.Idom())
+ fmt.Printf("after %s = %s\n", f.Name, sdom.treestructure(f.Entry))
+ }
+}
+
+// newPhiFor inserts a new Phi function into b,
+// with all inputs set to v.
+func newPhiFor(b *Block, v *Value) *Value {
+ phiV := b.NewValue0(b.Pos, OpPhi, v.Type)
+
+ for range b.Preds {
+ phiV.AddArg(v)
+ }
+ return phiV
+}
+
+// rewriteNewPhis updates newphis[h] to record all places where the new phi function inserted
+// in block h will replace a previous definition. Block b is the block currently being processed;
+// if b has its own phi definition then it takes the place of h.
+// defsForUses provides information about other definitions of the variable that are present
+// (and if nil, indicates that the variable is no longer live)
+// sdom must yield a preorder of the flow graph if recursively walked, root-to-children.
+// The result of newSparseOrderedTree with order supplied by a dfs-postorder satisfies this
+// requirement.
+func rewriteNewPhis(h, b *Block, f *Func, defsForUses []*Value, newphis map[*Block]rewrite, dfPhiTargets map[rewriteTarget]bool, sdom SparseTree) {
+ // If b is a block with a new phi, then a new rewrite applies below it in the dominator tree.
+ if _, ok := newphis[b]; ok {
+ h = b
+ }
+ change := newphis[h]
+ x := change.before
+ y := change.after
+
+ // Apply rewrites to this block
+ if x != nil { // don't waste time on the common case of no definition.
+ p := &change.rewrites
+ for _, v := range b.Values {
+ if v == y { // don't rewrite self -- phi inputs are handled below.
+ continue
+ }
+ for i, w := range v.Args {
+ if w != x {
+ continue
+ }
+ tgt := rewriteTarget{v, i}
+
+ // It's possible dominated control flow will rewrite this instead.
+ // Visiting in preorder (a property of how sdom was constructed)
+ // ensures that these are seen in the proper order.
+ if dfPhiTargets[tgt] {
+ continue
+ }
+ *p = append(*p, tgt)
+ if f.pass.debug > 1 {
+ fmt.Printf("added block target for h=%v, b=%v, x=%v, y=%v, tgt.v=%s, tgt.i=%d\n",
+ h, b, x, y, v, i)
+ }
+ }
+ }
+
+ // Rewrite appropriate inputs of phis reached in successors
+ // in dominance frontier, self, and dominated.
+ // If the variable def reaching uses in b is itself defined in b, then the new phi function
+ // does not reach the successors of b. (This assumes a bit about the structure of the
+ // phi use-def graph, but it's true for memory.)
+ if dfu := defsForUses[b.ID]; dfu != nil && dfu.Block != b {
+ for _, e := range b.Succs {
+ s := e.b
+
+ for _, v := range s.Values {
+ if v.Op == OpPhi && v.Args[e.i] == x {
+ tgt := rewriteTarget{v, e.i}
+ *p = append(*p, tgt)
+ dfPhiTargets[tgt] = true
+ if f.pass.debug > 1 {
+ fmt.Printf("added phi target for h=%v, b=%v, s=%v, x=%v, y=%v, tgt.v=%s, tgt.i=%d\n",
+ h, b, s, x, y, v.LongString(), e.i)
+ }
+ break
+ }
+ }
+ }
+ }
+ newphis[h] = change
+ }
+
+ for c := sdom[b.ID].child; c != nil; c = sdom[c.ID].sibling {
+ rewriteNewPhis(h, c, f, defsForUses, newphis, dfPhiTargets, sdom) // TODO: convert to explicit stack from recursion.
+ }
+}
+
+// addDFphis creates new trivial phis that are necessary to correctly reflect (within SSA)
+// a new definition for variable "x" inserted at h (usually but not necessarily a phi).
+// These new phis can only occur at the dominance frontier of h; block s is in the dominance
+// frontier of h if h does not strictly dominate s and if s is a successor of a block b where
+// either b = h or h strictly dominates b.
+// These newly created phis are themselves new definitions that may require addition of their
+// own trivial phi functions in their own dominance frontier, and this is handled recursively.
+func addDFphis(x *Value, h, b *Block, f *Func, defForUses []*Value, newphis map[*Block]rewrite, sdom SparseTree) {
+ oldv := defForUses[b.ID]
+ if oldv != x { // either a new definition replacing x, or nil if it is proven that there are no uses reachable from b
+ return
+ }
+ idom := f.Idom()
+outer:
+ for _, e := range b.Succs {
+ s := e.b
+ // check phi functions in the dominance frontier
+ if sdom.isAncestor(h, s) {
+ continue // h dominates s, successor of b, therefore s is not in the frontier.
+ }
+ if _, ok := newphis[s]; ok {
+ continue // successor s of b already has a new phi function, so there is no need to add another.
+ }
+ if x != nil {
+ for _, v := range s.Values {
+ if v.Op == OpPhi && v.Args[e.i] == x {
+ continue outer // successor s of b has an old phi function, so there is no need to add another.
+ }
+ }
+ }
+
+ old := defForUses[idom[s.ID].ID] // new phi function is correct-but-redundant, combining value "old" on all inputs.
+ headerPhi := newPhiFor(s, old)
+ // the new phi will replace "old" in block s and all blocks dominated by s.
+ newphis[s] = rewrite{before: old, after: headerPhi} // record new phi, to have inputs labeled "old" rewritten to "headerPhi"
+ addDFphis(old, s, s, f, defForUses, newphis, sdom) // the new definition may also create new phi functions.
+ }
+ for c := sdom[b.ID].child; c != nil; c = sdom[c.ID].sibling {
+ addDFphis(x, h, c, f, defForUses, newphis, sdom) // TODO: convert to explicit stack from recursion.
+ }
+}
+
+// findLastMems maps block ids to last memory-output op in a block, if any.
+func findLastMems(f *Func) []*Value {
+
+ var stores []*Value
+ lastMems := f.Cache.allocValueSlice(f.NumBlocks())
+ defer f.Cache.freeValueSlice(lastMems)
+ storeUse := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(storeUse)
+ for _, b := range f.Blocks {
+ // Find all the stores in this block. Categorize their uses:
+ // storeUse contains stores which are used by a subsequent store.
+ storeUse.clear()
+ stores = stores[:0]
+ var memPhi *Value
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ if v.Type.IsMemory() {
+ memPhi = v
+ }
+ continue
+ }
+ if v.Type.IsMemory() {
+ stores = append(stores, v)
+ for _, a := range v.Args {
+ if a.Block == b && a.Type.IsMemory() {
+ storeUse.add(a.ID)
+ }
+ }
+ }
+ }
+ if len(stores) == 0 {
+ lastMems[b.ID] = memPhi
+ continue
+ }
+
+ // find last store in the block
+ var last *Value
+ for _, v := range stores {
+ if storeUse.contains(v.ID) {
+ continue
+ }
+ if last != nil {
+ b.Fatalf("two final stores - simultaneous live stores %s %s", last, v)
+ }
+ last = v
+ }
+ if last == nil {
+ b.Fatalf("no last store found - cycle?")
+ }
+
+ // If this is a tuple containing a mem, select just
+ // the mem. This will generate ops we don't need, but
+ // it's the easiest thing to do.
+ if last.Type.IsTuple() {
+ last = b.NewValue1(last.Pos, OpSelect1, types.TypeMem, last)
+ } else if last.Type.IsResults() {
+ last = b.NewValue1I(last.Pos, OpSelectN, types.TypeMem, int64(last.Type.NumFields()-1), last)
+ }
+
+ lastMems[b.ID] = last
+ }
+ return lastMems
+}
+
+// mark values
+type markKind uint8
+
+const (
+ notFound markKind = iota // block has not been discovered yet
+ notExplored // discovered and in queue, outedges not processed yet
+ explored // discovered and in queue, outedges processed
+ done // all done, in output ordering
+)
+
+type backedgesState struct {
+ b *Block
+ i int
+}
+
+// backedges returns a slice of successor edges that are back
+// edges. For reducible loops, edge.b is the header.
+func backedges(f *Func) []Edge {
+ edges := []Edge{}
+ mark := make([]markKind, f.NumBlocks())
+ stack := []backedgesState{}
+
+ mark[f.Entry.ID] = notExplored
+ stack = append(stack, backedgesState{f.Entry, 0})
+
+ for len(stack) > 0 {
+ l := len(stack)
+ x := stack[l-1]
+ if x.i < len(x.b.Succs) {
+ e := x.b.Succs[x.i]
+ stack[l-1].i++
+ s := e.b
+ if mark[s.ID] == notFound {
+ mark[s.ID] = notExplored
+ stack = append(stack, backedgesState{s, 0})
+ } else if mark[s.ID] == notExplored {
+ edges = append(edges, e)
+ }
+ } else {
+ mark[x.b.ID] = done
+ stack = stack[0 : l-1]
+ }
+ }
+ return edges
+}
diff --git a/src/cmd/compile/internal/ssa/looprotate.go b/src/cmd/compile/internal/ssa/looprotate.go
new file mode 100644
index 0000000..844a8f7
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/looprotate.go
@@ -0,0 +1,113 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// loopRotate converts loops with a check-loop-condition-at-beginning
+// to loops with a check-loop-condition-at-end.
+// This helps loops avoid extra unnecessary jumps.
+//
+// loop:
+// CMPQ ...
+// JGE exit
+// ...
+// JMP loop
+// exit:
+//
+// JMP entry
+// loop:
+// ...
+// entry:
+// CMPQ ...
+// JLT loop
+func loopRotate(f *Func) {
+ loopnest := f.loopnest()
+ if loopnest.hasIrreducible {
+ return
+ }
+ if len(loopnest.loops) == 0 {
+ return
+ }
+
+ idToIdx := f.Cache.allocIntSlice(f.NumBlocks())
+ defer f.Cache.freeIntSlice(idToIdx)
+ for i, b := range f.Blocks {
+ idToIdx[b.ID] = i
+ }
+
+ // Set of blocks we're moving, by ID.
+ move := map[ID]struct{}{}
+
+ // Map from block ID to the moving blocks that should
+ // come right after it.
+ after := map[ID][]*Block{}
+
+ // Check each loop header and decide if we want to move it.
+ for _, loop := range loopnest.loops {
+ b := loop.header
+ var p *Block // b's in-loop predecessor
+ for _, e := range b.Preds {
+ if e.b.Kind != BlockPlain {
+ continue
+ }
+ if loopnest.b2l[e.b.ID] != loop {
+ continue
+ }
+ p = e.b
+ }
+ if p == nil || p == b {
+ continue
+ }
+ after[p.ID] = []*Block{b}
+ for {
+ nextIdx := idToIdx[b.ID] + 1
+ if nextIdx >= len(f.Blocks) { // reached end of function (maybe impossible?)
+ break
+ }
+ nextb := f.Blocks[nextIdx]
+ if nextb == p { // original loop predecessor is next
+ break
+ }
+ if loopnest.b2l[nextb.ID] == loop {
+ after[p.ID] = append(after[p.ID], nextb)
+ }
+ b = nextb
+ }
+ // Swap b and p so that we'll handle p before b when moving blocks.
+ f.Blocks[idToIdx[loop.header.ID]] = p
+ f.Blocks[idToIdx[p.ID]] = loop.header
+ idToIdx[loop.header.ID], idToIdx[p.ID] = idToIdx[p.ID], idToIdx[loop.header.ID]
+
+ // Place b after p.
+ for _, b := range after[p.ID] {
+ move[b.ID] = struct{}{}
+ }
+ }
+
+ // Move blocks to their destinations in a single pass.
+ // We rely here on the fact that loop headers must come
+ // before the rest of the loop. And that relies on the
+ // fact that we only identify reducible loops.
+ j := 0
+ // Some blocks that are not part of a loop may be placed
+ // between loop blocks. In order to avoid these blocks from
+ // being overwritten, use a temporary slice.
+ oldOrder := f.Cache.allocBlockSlice(len(f.Blocks))
+ defer f.Cache.freeBlockSlice(oldOrder)
+ copy(oldOrder, f.Blocks)
+ for _, b := range oldOrder {
+ if _, ok := move[b.ID]; ok {
+ continue
+ }
+ f.Blocks[j] = b
+ j++
+ for _, a := range after[b.ID] {
+ f.Blocks[j] = a
+ j++
+ }
+ }
+ if j != len(oldOrder) {
+ f.Fatalf("bad reordering in looprotate")
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/lower.go b/src/cmd/compile/internal/ssa/lower.go
new file mode 100644
index 0000000..e4aac47
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/lower.go
@@ -0,0 +1,52 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// convert to machine-dependent ops.
+func lower(f *Func) {
+ // repeat rewrites until we find no more rewrites
+ applyRewrite(f, f.Config.lowerBlock, f.Config.lowerValue, removeDeadValues)
+}
+
+// lateLower applies those rules that need to be run after the general lower rules.
+func lateLower(f *Func) {
+ // repeat rewrites until we find no more rewrites
+ if f.Config.lateLowerValue != nil {
+ applyRewrite(f, f.Config.lateLowerBlock, f.Config.lateLowerValue, removeDeadValues)
+ }
+}
+
+// checkLower checks for unlowered opcodes and fails if we find one.
+func checkLower(f *Func) {
+ // Needs to be a separate phase because it must run after both
+ // lowering and a subsequent dead code elimination (because lowering
+ // rules may leave dead generic ops behind).
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if !opcodeTable[v.Op].generic {
+ continue // lowered
+ }
+ switch v.Op {
+ case OpSP, OpSPanchored, OpSB, OpInitMem, OpArg, OpArgIntReg, OpArgFloatReg, OpPhi, OpVarDef, OpVarLive, OpKeepAlive, OpSelect0, OpSelect1, OpSelectN, OpConvert, OpInlMark, OpWBend:
+ continue // ok not to lower
+ case OpMakeResult:
+ if b.Controls[0] == v {
+ continue
+ }
+ case OpGetG:
+ if f.Config.hasGReg {
+ // has hardware g register, regalloc takes care of it
+ continue // ok not to lower
+ }
+ }
+ s := "not lowered: " + v.String() + ", " + v.Op.String() + " " + v.Type.SimpleString()
+
+ for _, a := range v.Args {
+ s += " " + a.Type.SimpleString()
+ }
+ f.Fatalf("%s", s)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/magic.go b/src/cmd/compile/internal/ssa/magic.go
new file mode 100644
index 0000000..235b0e5
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/magic.go
@@ -0,0 +1,426 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "math/big"
+ "math/bits"
+)
+
+// So you want to compute x / c for some constant c?
+// Machine division instructions are slow, so we try to
+// compute this division with a multiplication + a few
+// other cheap instructions instead.
+// (We assume here that c != 0, +/- 1, or +/- 2^i. Those
+// cases are easy to handle in different ways).
+
+// Technique from https://gmplib.org/~tege/divcnst-pldi94.pdf
+
+// First consider unsigned division.
+// Our strategy is to precompute 1/c then do
+// ⎣x / c⎦ = ⎣x * (1/c)⎦.
+// 1/c is less than 1, so we can't compute it directly in
+// integer arithmetic. Let's instead compute 2^e/c
+// for a value of e TBD (^ = exponentiation). Then
+// ⎣x / c⎦ = ⎣x * (2^e/c) / 2^e⎦.
+// Dividing by 2^e is easy. 2^e/c isn't an integer, unfortunately.
+// So we must approximate it. Let's call its approximation m.
+// We'll then compute
+// ⎣x * m / 2^e⎦
+// Which we want to be equal to ⎣x / c⎦ for 0 <= x < 2^n-1
+// where n is the word size.
+// Setting x = c gives us c * m >= 2^e.
+// We'll chose m = ⎡2^e/c⎤ to satisfy that equation.
+// What remains is to choose e.
+// Let m = 2^e/c + delta, 0 <= delta < 1
+// ⎣x * (2^e/c + delta) / 2^e⎦
+// ⎣x / c + x * delta / 2^e⎦
+// We must have x * delta / 2^e < 1/c so that this
+// additional term never rounds differently than ⎣x / c⎦ does.
+// Rearranging,
+// 2^e > x * delta * c
+// x can be at most 2^n-1 and delta can be at most 1.
+// So it is sufficient to have 2^e >= 2^n*c.
+// So we'll choose e = n + s, with s = ⎡log2(c)⎤.
+//
+// An additional complication arises because m has n+1 bits in it.
+// Hardware restricts us to n bit by n bit multiplies.
+// We divide into 3 cases:
+//
+// Case 1: m is even.
+// ⎣x / c⎦ = ⎣x * m / 2^(n+s)⎦
+// ⎣x / c⎦ = ⎣x * (m/2) / 2^(n+s-1)⎦
+// ⎣x / c⎦ = ⎣x * (m/2) / 2^n / 2^(s-1)⎦
+// ⎣x / c⎦ = ⎣⎣x * (m/2) / 2^n⎦ / 2^(s-1)⎦
+// multiply + shift
+//
+// Case 2: c is even.
+// ⎣x / c⎦ = ⎣(x/2) / (c/2)⎦
+// ⎣x / c⎦ = ⎣⎣x/2⎦ / (c/2)⎦
+// This is just the original problem, with x' = ⎣x/2⎦, c' = c/2, n' = n-1.
+// s' = s-1
+// m' = ⎡2^(n'+s')/c'⎤
+// = ⎡2^(n+s-1)/c⎤
+// = ⎡m/2⎤
+// ⎣x / c⎦ = ⎣x' * m' / 2^(n'+s')⎦
+// ⎣x / c⎦ = ⎣⎣x/2⎦ * ⎡m/2⎤ / 2^(n+s-2)⎦
+// ⎣x / c⎦ = ⎣⎣⎣x/2⎦ * ⎡m/2⎤ / 2^n⎦ / 2^(s-2)⎦
+// shift + multiply + shift
+//
+// Case 3: everything else
+// let k = m - 2^n. k fits in n bits.
+// ⎣x / c⎦ = ⎣x * m / 2^(n+s)⎦
+// ⎣x / c⎦ = ⎣x * (2^n + k) / 2^(n+s)⎦
+// ⎣x / c⎦ = ⎣(x + x * k / 2^n) / 2^s⎦
+// ⎣x / c⎦ = ⎣(x + ⎣x * k / 2^n⎦) / 2^s⎦
+// ⎣x / c⎦ = ⎣(x + ⎣x * k / 2^n⎦) / 2^s⎦
+// ⎣x / c⎦ = ⎣⎣(x + ⎣x * k / 2^n⎦) / 2⎦ / 2^(s-1)⎦
+// multiply + avg + shift
+//
+// These can be implemented in hardware using:
+// ⎣a * b / 2^n⎦ - aka high n bits of an n-bit by n-bit multiply.
+// ⎣(a+b) / 2⎦ - aka "average" of two n-bit numbers.
+// (Not just a regular add & shift because the intermediate result
+// a+b has n+1 bits in it. Nevertheless, can be done
+// in 2 instructions on x86.)
+
+// umagicOK reports whether we should strength reduce a n-bit divide by c.
+func umagicOK(n uint, c int64) bool {
+ // Convert from ConstX auxint values to the real uint64 constant they represent.
+ d := uint64(c) << (64 - n) >> (64 - n)
+
+ // Doesn't work for 0.
+ // Don't use for powers of 2.
+ return d&(d-1) != 0
+}
+
+// umagicOKn reports whether we should strength reduce an unsigned n-bit divide by c.
+// We can strength reduce when c != 0 and c is not a power of two.
+func umagicOK8(c int8) bool { return c&(c-1) != 0 }
+func umagicOK16(c int16) bool { return c&(c-1) != 0 }
+func umagicOK32(c int32) bool { return c&(c-1) != 0 }
+func umagicOK64(c int64) bool { return c&(c-1) != 0 }
+
+type umagicData struct {
+ s int64 // ⎡log2(c)⎤
+ m uint64 // ⎡2^(n+s)/c⎤ - 2^n
+}
+
+// umagic computes the constants needed to strength reduce unsigned n-bit divides by the constant uint64(c).
+// The return values satisfy for all 0 <= x < 2^n
+//
+// floor(x / uint64(c)) = x * (m + 2^n) >> (n+s)
+func umagic(n uint, c int64) umagicData {
+ // Convert from ConstX auxint values to the real uint64 constant they represent.
+ d := uint64(c) << (64 - n) >> (64 - n)
+
+ C := new(big.Int).SetUint64(d)
+ s := C.BitLen()
+ M := big.NewInt(1)
+ M.Lsh(M, n+uint(s)) // 2^(n+s)
+ M.Add(M, C) // 2^(n+s)+c
+ M.Sub(M, big.NewInt(1)) // 2^(n+s)+c-1
+ M.Div(M, C) // ⎡2^(n+s)/c⎤
+ if M.Bit(int(n)) != 1 {
+ panic("n+1st bit isn't set")
+ }
+ M.SetBit(M, int(n), 0)
+ m := M.Uint64()
+ return umagicData{s: int64(s), m: m}
+}
+
+func umagic8(c int8) umagicData { return umagic(8, int64(c)) }
+func umagic16(c int16) umagicData { return umagic(16, int64(c)) }
+func umagic32(c int32) umagicData { return umagic(32, int64(c)) }
+func umagic64(c int64) umagicData { return umagic(64, c) }
+
+// For signed division, we use a similar strategy.
+// First, we enforce a positive c.
+// x / c = -(x / (-c))
+// This will require an additional Neg op for c<0.
+//
+// If x is positive we're in a very similar state
+// to the unsigned case above. We define:
+// s = ⎡log2(c)⎤-1
+// m = ⎡2^(n+s)/c⎤
+// Then
+// ⎣x / c⎦ = ⎣x * m / 2^(n+s)⎦
+// If x is negative we have
+// ⎡x / c⎤ = ⎣x * m / 2^(n+s)⎦ + 1
+// (TODO: derivation?)
+//
+// The multiply is a bit odd, as it is a signed n-bit value
+// times an unsigned n-bit value. For n smaller than the
+// word size, we can extend x and m appropriately and use the
+// signed multiply instruction. For n == word size,
+// we must use the signed multiply high and correct
+// the result by adding x*2^n.
+//
+// Adding 1 if x<0 is done by subtracting x>>(n-1).
+
+func smagicOK(n uint, c int64) bool {
+ if c < 0 {
+ // Doesn't work for negative c.
+ return false
+ }
+ // Doesn't work for 0.
+ // Don't use it for powers of 2.
+ return c&(c-1) != 0
+}
+
+// smagicOKn reports whether we should strength reduce a signed n-bit divide by c.
+func smagicOK8(c int8) bool { return smagicOK(8, int64(c)) }
+func smagicOK16(c int16) bool { return smagicOK(16, int64(c)) }
+func smagicOK32(c int32) bool { return smagicOK(32, int64(c)) }
+func smagicOK64(c int64) bool { return smagicOK(64, c) }
+
+type smagicData struct {
+ s int64 // ⎡log2(c)⎤-1
+ m uint64 // ⎡2^(n+s)/c⎤
+}
+
+// smagic computes the constants needed to strength reduce signed n-bit divides by the constant c.
+// Must have c>0.
+// The return values satisfy for all -2^(n-1) <= x < 2^(n-1)
+//
+// trunc(x / c) = x * m >> (n+s) + (x < 0 ? 1 : 0)
+func smagic(n uint, c int64) smagicData {
+ C := new(big.Int).SetInt64(c)
+ s := C.BitLen() - 1
+ M := big.NewInt(1)
+ M.Lsh(M, n+uint(s)) // 2^(n+s)
+ M.Add(M, C) // 2^(n+s)+c
+ M.Sub(M, big.NewInt(1)) // 2^(n+s)+c-1
+ M.Div(M, C) // ⎡2^(n+s)/c⎤
+ if M.Bit(int(n)) != 0 {
+ panic("n+1st bit is set")
+ }
+ if M.Bit(int(n-1)) == 0 {
+ panic("nth bit is not set")
+ }
+ m := M.Uint64()
+ return smagicData{s: int64(s), m: m}
+}
+
+func smagic8(c int8) smagicData { return smagic(8, int64(c)) }
+func smagic16(c int16) smagicData { return smagic(16, int64(c)) }
+func smagic32(c int32) smagicData { return smagic(32, int64(c)) }
+func smagic64(c int64) smagicData { return smagic(64, c) }
+
+// Divisibility x%c == 0 can be checked more efficiently than directly computing
+// the modulus x%c and comparing against 0.
+//
+// The same "Division by invariant integers using multiplication" paper
+// by Granlund and Montgomery referenced above briefly mentions this method
+// and it is further elaborated in "Hacker's Delight" by Warren Section 10-17
+//
+// The first thing to note is that for odd integers, exact division can be computed
+// by using the modular inverse with respect to the word size 2^n.
+//
+// Given c, compute m such that (c * m) mod 2^n == 1
+// Then if c divides x (x%c ==0), the quotient is given by q = x/c == x*m mod 2^n
+//
+// x can range from 0, c, 2c, 3c, ... ⎣(2^n - 1)/c⎦ * c the maximum multiple
+// Thus, x*m mod 2^n is 0, 1, 2, 3, ... ⎣(2^n - 1)/c⎦
+// i.e. the quotient takes all values from zero up to max = ⎣(2^n - 1)/c⎦
+//
+// If x is not divisible by c, then x*m mod 2^n must take some larger value than max.
+//
+// This gives x*m mod 2^n <= ⎣(2^n - 1)/c⎦ as a test for divisibility
+// involving one multiplication and compare.
+//
+// To extend this to even integers, consider c = d0 * 2^k where d0 is odd.
+// We can test whether x is divisible by both d0 and 2^k.
+// For d0, the test is the same as above. Let m be such that m*d0 mod 2^n == 1
+// Then x*m mod 2^n <= ⎣(2^n - 1)/d0⎦ is the first test.
+// The test for divisibility by 2^k is a check for k trailing zeroes.
+// Note that since d0 is odd, m is odd and thus x*m will have the same number of
+// trailing zeroes as x. So the two tests are,
+//
+// x*m mod 2^n <= ⎣(2^n - 1)/d0⎦
+// and x*m ends in k zero bits
+//
+// These can be combined into a single comparison by the following
+// (theorem ZRU in Hacker's Delight) for unsigned integers.
+//
+// x <= a and x ends in k zero bits if and only if RotRight(x ,k) <= ⎣a/(2^k)⎦
+// Where RotRight(x ,k) is right rotation of x by k bits.
+//
+// To prove the first direction, x <= a -> ⎣x/(2^k)⎦ <= ⎣a/(2^k)⎦
+// But since x ends in k zeroes all the rotated bits would be zero too.
+// So RotRight(x, k) == ⎣x/(2^k)⎦ <= ⎣a/(2^k)⎦
+//
+// If x does not end in k zero bits, then RotRight(x, k)
+// has some non-zero bits in the k highest bits.
+// ⎣x/(2^k)⎦ has all zeroes in the k highest bits,
+// so RotRight(x, k) > ⎣x/(2^k)⎦
+//
+// Finally, if x > a and has k trailing zero bits, then RotRight(x, k) == ⎣x/(2^k)⎦
+// and ⎣x/(2^k)⎦ must be greater than ⎣a/(2^k)⎦, that is the top n-k bits of x must
+// be greater than the top n-k bits of a because the rest of x bits are zero.
+//
+// So the two conditions about can be replaced with the single test
+//
+// RotRight(x*m mod 2^n, k) <= ⎣(2^n - 1)/c⎦
+//
+// Where d0*2^k was replaced by c on the right hand side.
+
+// udivisibleOK reports whether we should strength reduce an unsigned n-bit divisibilty check by c.
+func udivisibleOK(n uint, c int64) bool {
+ // Convert from ConstX auxint values to the real uint64 constant they represent.
+ d := uint64(c) << (64 - n) >> (64 - n)
+
+ // Doesn't work for 0.
+ // Don't use for powers of 2.
+ return d&(d-1) != 0
+}
+
+func udivisibleOK8(c int8) bool { return udivisibleOK(8, int64(c)) }
+func udivisibleOK16(c int16) bool { return udivisibleOK(16, int64(c)) }
+func udivisibleOK32(c int32) bool { return udivisibleOK(32, int64(c)) }
+func udivisibleOK64(c int64) bool { return udivisibleOK(64, c) }
+
+type udivisibleData struct {
+ k int64 // trailingZeros(c)
+ m uint64 // m * (c>>k) mod 2^n == 1 multiplicative inverse of odd portion modulo 2^n
+ max uint64 // ⎣(2^n - 1)/ c⎦ max value to for divisibility
+}
+
+func udivisible(n uint, c int64) udivisibleData {
+ // Convert from ConstX auxint values to the real uint64 constant they represent.
+ d := uint64(c) << (64 - n) >> (64 - n)
+
+ k := bits.TrailingZeros64(d)
+ d0 := d >> uint(k) // the odd portion of the divisor
+
+ mask := ^uint64(0) >> (64 - n)
+
+ // Calculate the multiplicative inverse via Newton's method.
+ // Quadratic convergence doubles the number of correct bits per iteration.
+ m := d0 // initial guess correct to 3-bits d0*d0 mod 8 == 1
+ m = m * (2 - m*d0) // 6-bits
+ m = m * (2 - m*d0) // 12-bits
+ m = m * (2 - m*d0) // 24-bits
+ m = m * (2 - m*d0) // 48-bits
+ m = m * (2 - m*d0) // 96-bits >= 64-bits
+ m = m & mask
+
+ max := mask / d
+
+ return udivisibleData{
+ k: int64(k),
+ m: m,
+ max: max,
+ }
+}
+
+func udivisible8(c int8) udivisibleData { return udivisible(8, int64(c)) }
+func udivisible16(c int16) udivisibleData { return udivisible(16, int64(c)) }
+func udivisible32(c int32) udivisibleData { return udivisible(32, int64(c)) }
+func udivisible64(c int64) udivisibleData { return udivisible(64, c) }
+
+// For signed integers, a similar method follows.
+//
+// Given c > 1 and odd, compute m such that (c * m) mod 2^n == 1
+// Then if c divides x (x%c ==0), the quotient is given by q = x/c == x*m mod 2^n
+//
+// x can range from ⎡-2^(n-1)/c⎤ * c, ... -c, 0, c, ... ⎣(2^(n-1) - 1)/c⎦ * c
+// Thus, x*m mod 2^n is ⎡-2^(n-1)/c⎤, ... -2, -1, 0, 1, 2, ... ⎣(2^(n-1) - 1)/c⎦
+//
+// So, x is a multiple of c if and only if:
+// ⎡-2^(n-1)/c⎤ <= x*m mod 2^n <= ⎣(2^(n-1) - 1)/c⎦
+//
+// Since c > 1 and odd, this can be simplified by
+// ⎡-2^(n-1)/c⎤ == ⎡(-2^(n-1) + 1)/c⎤ == -⎣(2^(n-1) - 1)/c⎦
+//
+// -⎣(2^(n-1) - 1)/c⎦ <= x*m mod 2^n <= ⎣(2^(n-1) - 1)/c⎦
+//
+// To extend this to even integers, consider c = d0 * 2^k where d0 is odd.
+// We can test whether x is divisible by both d0 and 2^k.
+//
+// Let m be such that (d0 * m) mod 2^n == 1.
+// Let q = x*m mod 2^n. Then c divides x if:
+//
+// -⎣(2^(n-1) - 1)/d0⎦ <= q <= ⎣(2^(n-1) - 1)/d0⎦ and q ends in at least k 0-bits
+//
+// To transform this to a single comparison, we use the following theorem (ZRS in Hacker's Delight).
+//
+// For a >= 0 the following conditions are equivalent:
+// 1) -a <= x <= a and x ends in at least k 0-bits
+// 2) RotRight(x+a', k) <= ⎣2a'/2^k⎦
+//
+// Where a' = a & -2^k (a with its right k bits set to zero)
+//
+// To see that 1 & 2 are equivalent, note that -a <= x <= a is equivalent to
+// -a' <= x <= a' if and only if x ends in at least k 0-bits. Adding -a' to each side gives,
+// 0 <= x + a' <= 2a' and x + a' ends in at least k 0-bits if and only if x does since a' has
+// k 0-bits by definition. We can use theorem ZRU above with x -> x + a' and a -> 2a' giving 1) == 2).
+//
+// Let m be such that (d0 * m) mod 2^n == 1.
+// Let q = x*m mod 2^n.
+// Let a' = ⎣(2^(n-1) - 1)/d0⎦ & -2^k
+//
+// Then the divisibility test is:
+//
+// RotRight(q+a', k) <= ⎣2a'/2^k⎦
+//
+// Note that the calculation is performed using unsigned integers.
+// Since a' can have n-1 bits, 2a' may have n bits and there is no risk of overflow.
+
+// sdivisibleOK reports whether we should strength reduce a signed n-bit divisibilty check by c.
+func sdivisibleOK(n uint, c int64) bool {
+ if c < 0 {
+ // Doesn't work for negative c.
+ return false
+ }
+ // Doesn't work for 0.
+ // Don't use it for powers of 2.
+ return c&(c-1) != 0
+}
+
+func sdivisibleOK8(c int8) bool { return sdivisibleOK(8, int64(c)) }
+func sdivisibleOK16(c int16) bool { return sdivisibleOK(16, int64(c)) }
+func sdivisibleOK32(c int32) bool { return sdivisibleOK(32, int64(c)) }
+func sdivisibleOK64(c int64) bool { return sdivisibleOK(64, c) }
+
+type sdivisibleData struct {
+ k int64 // trailingZeros(c)
+ m uint64 // m * (c>>k) mod 2^n == 1 multiplicative inverse of odd portion modulo 2^n
+ a uint64 // ⎣(2^(n-1) - 1)/ (c>>k)⎦ & -(1<<k) additive constant
+ max uint64 // ⎣(2 a) / (1<<k)⎦ max value to for divisibility
+}
+
+func sdivisible(n uint, c int64) sdivisibleData {
+ d := uint64(c)
+ k := bits.TrailingZeros64(d)
+ d0 := d >> uint(k) // the odd portion of the divisor
+
+ mask := ^uint64(0) >> (64 - n)
+
+ // Calculate the multiplicative inverse via Newton's method.
+ // Quadratic convergence doubles the number of correct bits per iteration.
+ m := d0 // initial guess correct to 3-bits d0*d0 mod 8 == 1
+ m = m * (2 - m*d0) // 6-bits
+ m = m * (2 - m*d0) // 12-bits
+ m = m * (2 - m*d0) // 24-bits
+ m = m * (2 - m*d0) // 48-bits
+ m = m * (2 - m*d0) // 96-bits >= 64-bits
+ m = m & mask
+
+ a := ((mask >> 1) / d0) & -(1 << uint(k))
+ max := (2 * a) >> uint(k)
+
+ return sdivisibleData{
+ k: int64(k),
+ m: m,
+ a: a,
+ max: max,
+ }
+}
+
+func sdivisible8(c int8) sdivisibleData { return sdivisible(8, int64(c)) }
+func sdivisible16(c int16) sdivisibleData { return sdivisible(16, int64(c)) }
+func sdivisible32(c int32) sdivisibleData { return sdivisible(32, int64(c)) }
+func sdivisible64(c int64) sdivisibleData { return sdivisible(64, c) }
diff --git a/src/cmd/compile/internal/ssa/magic_test.go b/src/cmd/compile/internal/ssa/magic_test.go
new file mode 100644
index 0000000..7c6009d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/magic_test.go
@@ -0,0 +1,410 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "math/big"
+ "testing"
+)
+
+func TestMagicExhaustive8(t *testing.T) {
+ testMagicExhaustive(t, 8)
+}
+func TestMagicExhaustive8U(t *testing.T) {
+ testMagicExhaustiveU(t, 8)
+}
+func TestMagicExhaustive16(t *testing.T) {
+ if testing.Short() {
+ t.Skip("slow test; skipping")
+ }
+ testMagicExhaustive(t, 16)
+}
+func TestMagicExhaustive16U(t *testing.T) {
+ if testing.Short() {
+ t.Skip("slow test; skipping")
+ }
+ testMagicExhaustiveU(t, 16)
+}
+
+// exhaustive test of magic for n bits
+func testMagicExhaustive(t *testing.T, n uint) {
+ min := -int64(1) << (n - 1)
+ max := int64(1) << (n - 1)
+ for c := int64(1); c < max; c++ {
+ if !smagicOK(n, int64(c)) {
+ continue
+ }
+ m := int64(smagic(n, c).m)
+ s := smagic(n, c).s
+ for i := min; i < max; i++ {
+ want := i / c
+ got := (i * m) >> (n + uint(s))
+ if i < 0 {
+ got++
+ }
+ if want != got {
+ t.Errorf("signed magic wrong for %d / %d: got %d, want %d (m=%d,s=%d)\n", i, c, got, want, m, s)
+ }
+ }
+ }
+}
+func testMagicExhaustiveU(t *testing.T, n uint) {
+ max := uint64(1) << n
+ for c := uint64(1); c < max; c++ {
+ if !umagicOK(n, int64(c)) {
+ continue
+ }
+ m := umagic(n, int64(c)).m
+ s := umagic(n, int64(c)).s
+ for i := uint64(0); i < max; i++ {
+ want := i / c
+ got := (i * (max + m)) >> (n + uint(s))
+ if want != got {
+ t.Errorf("unsigned magic wrong for %d / %d: got %d, want %d (m=%d,s=%d)\n", i, c, got, want, m, s)
+ }
+ }
+ }
+}
+
+func TestMagicUnsigned(t *testing.T) {
+ One := new(big.Int).SetUint64(1)
+ for _, n := range [...]uint{8, 16, 32, 64} {
+ TwoN := new(big.Int).Lsh(One, n)
+ Max := new(big.Int).Sub(TwoN, One)
+ for _, c := range [...]uint64{
+ 3,
+ 5,
+ 6,
+ 7,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 17,
+ 1<<8 - 1,
+ 1<<8 + 1,
+ 1<<16 - 1,
+ 1<<16 + 1,
+ 1<<32 - 1,
+ 1<<32 + 1,
+ 1<<64 - 1,
+ } {
+ if c>>n != 0 {
+ continue // not appropriate for the given n.
+ }
+ if !umagicOK(n, int64(c)) {
+ t.Errorf("expected n=%d c=%d to pass\n", n, c)
+ }
+ m := umagic(n, int64(c)).m
+ s := umagic(n, int64(c)).s
+
+ C := new(big.Int).SetUint64(c)
+ M := new(big.Int).SetUint64(m)
+ M.Add(M, TwoN)
+
+ // Find largest multiple of c.
+ Mul := new(big.Int).Div(Max, C)
+ Mul.Mul(Mul, C)
+ mul := Mul.Uint64()
+
+ // Try some input values, mostly around multiples of c.
+ for _, x := range [...]uint64{0, 1,
+ c - 1, c, c + 1,
+ 2*c - 1, 2 * c, 2*c + 1,
+ mul - 1, mul, mul + 1,
+ uint64(1)<<n - 1,
+ } {
+ X := new(big.Int).SetUint64(x)
+ if X.Cmp(Max) > 0 {
+ continue
+ }
+ Want := new(big.Int).Quo(X, C)
+ Got := new(big.Int).Mul(X, M)
+ Got.Rsh(Got, n+uint(s))
+ if Want.Cmp(Got) != 0 {
+ t.Errorf("umagic for %d/%d n=%d doesn't work, got=%s, want %s\n", x, c, n, Got, Want)
+ }
+ }
+ }
+ }
+}
+
+func TestMagicSigned(t *testing.T) {
+ One := new(big.Int).SetInt64(1)
+ for _, n := range [...]uint{8, 16, 32, 64} {
+ TwoNMinusOne := new(big.Int).Lsh(One, n-1)
+ Max := new(big.Int).Sub(TwoNMinusOne, One)
+ Min := new(big.Int).Neg(TwoNMinusOne)
+ for _, c := range [...]int64{
+ 3,
+ 5,
+ 6,
+ 7,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 17,
+ 1<<7 - 1,
+ 1<<7 + 1,
+ 1<<15 - 1,
+ 1<<15 + 1,
+ 1<<31 - 1,
+ 1<<31 + 1,
+ 1<<63 - 1,
+ } {
+ if c>>(n-1) != 0 {
+ continue // not appropriate for the given n.
+ }
+ if !smagicOK(n, int64(c)) {
+ t.Errorf("expected n=%d c=%d to pass\n", n, c)
+ }
+ m := smagic(n, int64(c)).m
+ s := smagic(n, int64(c)).s
+
+ C := new(big.Int).SetInt64(c)
+ M := new(big.Int).SetUint64(m)
+
+ // Find largest multiple of c.
+ Mul := new(big.Int).Div(Max, C)
+ Mul.Mul(Mul, C)
+ mul := Mul.Int64()
+
+ // Try some input values, mostly around multiples of c.
+ for _, x := range [...]int64{
+ -1, 1,
+ -c - 1, -c, -c + 1, c - 1, c, c + 1,
+ -2*c - 1, -2 * c, -2*c + 1, 2*c - 1, 2 * c, 2*c + 1,
+ -mul - 1, -mul, -mul + 1, mul - 1, mul, mul + 1,
+ int64(1)<<(n-1) - 1, -int64(1) << (n - 1),
+ } {
+ X := new(big.Int).SetInt64(x)
+ if X.Cmp(Min) < 0 || X.Cmp(Max) > 0 {
+ continue
+ }
+ Want := new(big.Int).Quo(X, C)
+ Got := new(big.Int).Mul(X, M)
+ Got.Rsh(Got, n+uint(s))
+ if x < 0 {
+ Got.Add(Got, One)
+ }
+ if Want.Cmp(Got) != 0 {
+ t.Errorf("smagic for %d/%d n=%d doesn't work, got=%s, want %s\n", x, c, n, Got, Want)
+ }
+ }
+ }
+ }
+}
+
+func testDivisibleExhaustiveU(t *testing.T, n uint) {
+ maxU := uint64(1) << n
+ for c := uint64(1); c < maxU; c++ {
+ if !udivisibleOK(n, int64(c)) {
+ continue
+ }
+ k := udivisible(n, int64(c)).k
+ m := udivisible(n, int64(c)).m
+ max := udivisible(n, int64(c)).max
+ mask := ^uint64(0) >> (64 - n)
+ for i := uint64(0); i < maxU; i++ {
+ want := i%c == 0
+ mul := (i * m) & mask
+ rot := (mul>>uint(k) | mul<<(n-uint(k))) & mask
+ got := rot <= max
+ if want != got {
+ t.Errorf("unsigned divisible wrong for %d %% %d == 0: got %v, want %v (k=%d,m=%d,max=%d)\n", i, c, got, want, k, m, max)
+ }
+ }
+ }
+}
+
+func TestDivisibleExhaustive8U(t *testing.T) {
+ testDivisibleExhaustiveU(t, 8)
+}
+
+func TestDivisibleExhaustive16U(t *testing.T) {
+ if testing.Short() {
+ t.Skip("slow test; skipping")
+ }
+ testDivisibleExhaustiveU(t, 16)
+}
+
+func TestDivisibleUnsigned(t *testing.T) {
+ One := new(big.Int).SetUint64(1)
+ for _, n := range [...]uint{8, 16, 32, 64} {
+ TwoN := new(big.Int).Lsh(One, n)
+ Max := new(big.Int).Sub(TwoN, One)
+ for _, c := range [...]uint64{
+ 3,
+ 5,
+ 6,
+ 7,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 17,
+ 1<<8 - 1,
+ 1<<8 + 1,
+ 1<<16 - 1,
+ 1<<16 + 1,
+ 1<<32 - 1,
+ 1<<32 + 1,
+ 1<<64 - 1,
+ } {
+ if c>>n != 0 {
+ continue // c too large for the given n.
+ }
+ if !udivisibleOK(n, int64(c)) {
+ t.Errorf("expected n=%d c=%d to pass\n", n, c)
+ }
+ k := udivisible(n, int64(c)).k
+ m := udivisible(n, int64(c)).m
+ max := udivisible(n, int64(c)).max
+ mask := ^uint64(0) >> (64 - n)
+
+ C := new(big.Int).SetUint64(c)
+
+ // Find largest multiple of c.
+ Mul := new(big.Int).Div(Max, C)
+ Mul.Mul(Mul, C)
+ mul := Mul.Uint64()
+
+ // Try some input values, mostly around multiples of c.
+ for _, x := range [...]uint64{0, 1,
+ c - 1, c, c + 1,
+ 2*c - 1, 2 * c, 2*c + 1,
+ mul - 1, mul, mul + 1,
+ uint64(1)<<n - 1,
+ } {
+ X := new(big.Int).SetUint64(x)
+ if X.Cmp(Max) > 0 {
+ continue
+ }
+ want := x%c == 0
+ mul := (x * m) & mask
+ rot := (mul>>uint(k) | mul<<(n-uint(k))) & mask
+ got := rot <= max
+ if want != got {
+ t.Errorf("unsigned divisible wrong for %d %% %d == 0: got %v, want %v (k=%d,m=%d,max=%d)\n", x, c, got, want, k, m, max)
+ }
+ }
+ }
+ }
+}
+
+func testDivisibleExhaustive(t *testing.T, n uint) {
+ minI := -int64(1) << (n - 1)
+ maxI := int64(1) << (n - 1)
+ for c := int64(1); c < maxI; c++ {
+ if !sdivisibleOK(n, int64(c)) {
+ continue
+ }
+ k := sdivisible(n, int64(c)).k
+ m := sdivisible(n, int64(c)).m
+ a := sdivisible(n, int64(c)).a
+ max := sdivisible(n, int64(c)).max
+ mask := ^uint64(0) >> (64 - n)
+ for i := minI; i < maxI; i++ {
+ want := i%c == 0
+ mul := (uint64(i)*m + a) & mask
+ rot := (mul>>uint(k) | mul<<(n-uint(k))) & mask
+ got := rot <= max
+ if want != got {
+ t.Errorf("signed divisible wrong for %d %% %d == 0: got %v, want %v (k=%d,m=%d,a=%d,max=%d)\n", i, c, got, want, k, m, a, max)
+ }
+ }
+ }
+}
+
+func TestDivisibleExhaustive8(t *testing.T) {
+ testDivisibleExhaustive(t, 8)
+}
+
+func TestDivisibleExhaustive16(t *testing.T) {
+ if testing.Short() {
+ t.Skip("slow test; skipping")
+ }
+ testDivisibleExhaustive(t, 16)
+}
+
+func TestDivisibleSigned(t *testing.T) {
+ One := new(big.Int).SetInt64(1)
+ for _, n := range [...]uint{8, 16, 32, 64} {
+ TwoNMinusOne := new(big.Int).Lsh(One, n-1)
+ Max := new(big.Int).Sub(TwoNMinusOne, One)
+ Min := new(big.Int).Neg(TwoNMinusOne)
+ for _, c := range [...]int64{
+ 3,
+ 5,
+ 6,
+ 7,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 17,
+ 1<<7 - 1,
+ 1<<7 + 1,
+ 1<<15 - 1,
+ 1<<15 + 1,
+ 1<<31 - 1,
+ 1<<31 + 1,
+ 1<<63 - 1,
+ } {
+ if c>>(n-1) != 0 {
+ continue // not appropriate for the given n.
+ }
+ if !sdivisibleOK(n, int64(c)) {
+ t.Errorf("expected n=%d c=%d to pass\n", n, c)
+ }
+ k := sdivisible(n, int64(c)).k
+ m := sdivisible(n, int64(c)).m
+ a := sdivisible(n, int64(c)).a
+ max := sdivisible(n, int64(c)).max
+ mask := ^uint64(0) >> (64 - n)
+
+ C := new(big.Int).SetInt64(c)
+
+ // Find largest multiple of c.
+ Mul := new(big.Int).Div(Max, C)
+ Mul.Mul(Mul, C)
+ mul := Mul.Int64()
+
+ // Try some input values, mostly around multiples of c.
+ for _, x := range [...]int64{
+ -1, 1,
+ -c - 1, -c, -c + 1, c - 1, c, c + 1,
+ -2*c - 1, -2 * c, -2*c + 1, 2*c - 1, 2 * c, 2*c + 1,
+ -mul - 1, -mul, -mul + 1, mul - 1, mul, mul + 1,
+ int64(1)<<(n-1) - 1, -int64(1) << (n - 1),
+ } {
+ X := new(big.Int).SetInt64(x)
+ if X.Cmp(Min) < 0 || X.Cmp(Max) > 0 {
+ continue
+ }
+ want := x%c == 0
+ mul := (uint64(x)*m + a) & mask
+ rot := (mul>>uint(k) | mul<<(n-uint(k))) & mask
+ got := rot <= max
+ if want != got {
+ t.Errorf("signed divisible wrong for %d %% %d == 0: got %v, want %v (k=%d,m=%d,a=%d,max=%d)\n", x, c, got, want, k, m, a, max)
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/memcombine.go b/src/cmd/compile/internal/ssa/memcombine.go
new file mode 100644
index 0000000..b1a4751
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/memcombine.go
@@ -0,0 +1,806 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "sort"
+)
+
+// memcombine combines smaller loads and stores into larger ones.
+// We ensure this generates good code for encoding/binary operations.
+// It may help other cases also.
+func memcombine(f *Func) {
+ // This optimization requires that the architecture has
+ // unaligned loads and unaligned stores.
+ if !f.Config.unalignedOK {
+ return
+ }
+
+ memcombineLoads(f)
+ memcombineStores(f)
+}
+
+func memcombineLoads(f *Func) {
+ // Find "OR trees" to start with.
+ mark := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(mark)
+ var order []*Value
+
+ // Mark all values that are the argument of an OR.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == OpOr16 || v.Op == OpOr32 || v.Op == OpOr64 {
+ mark.add(v.Args[0].ID)
+ mark.add(v.Args[1].ID)
+ }
+ }
+ }
+ for _, b := range f.Blocks {
+ order = order[:0]
+ for _, v := range b.Values {
+ if v.Op != OpOr16 && v.Op != OpOr32 && v.Op != OpOr64 {
+ continue
+ }
+ if mark.contains(v.ID) {
+ // marked - means it is not the root of an OR tree
+ continue
+ }
+ // Add the OR tree rooted at v to the order.
+ // We use BFS here, but any walk that puts roots before leaves would work.
+ i := len(order)
+ order = append(order, v)
+ for ; i < len(order); i++ {
+ x := order[i]
+ for j := 0; j < 2; j++ {
+ a := x.Args[j]
+ if a.Op == OpOr16 || a.Op == OpOr32 || a.Op == OpOr64 {
+ order = append(order, a)
+ }
+ }
+ }
+ }
+ for _, v := range order {
+ max := f.Config.RegSize
+ switch v.Op {
+ case OpOr64:
+ case OpOr32:
+ max = 4
+ case OpOr16:
+ max = 2
+ default:
+ continue
+ }
+ for n := max; n > 1; n /= 2 {
+ if combineLoads(v, n) {
+ break
+ }
+ }
+ }
+ }
+}
+
+// A BaseAddress represents the address ptr+idx, where
+// ptr is a pointer type and idx is an integer type.
+// idx may be nil, in which case it is treated as 0.
+type BaseAddress struct {
+ ptr *Value
+ idx *Value
+}
+
+// splitPtr returns the base address of ptr and any
+// constant offset from that base.
+// BaseAddress{ptr,nil},0 is always a valid result, but splitPtr
+// tries to peel away as many constants into off as possible.
+func splitPtr(ptr *Value) (BaseAddress, int64) {
+ var idx *Value
+ var off int64
+ for {
+ if ptr.Op == OpOffPtr {
+ off += ptr.AuxInt
+ ptr = ptr.Args[0]
+ } else if ptr.Op == OpAddPtr {
+ if idx != nil {
+ // We have two or more indexing values.
+ // Pick the first one we found.
+ return BaseAddress{ptr: ptr, idx: idx}, off
+ }
+ idx = ptr.Args[1]
+ if idx.Op == OpAdd32 || idx.Op == OpAdd64 {
+ if idx.Args[0].Op == OpConst32 || idx.Args[0].Op == OpConst64 {
+ off += idx.Args[0].AuxInt
+ idx = idx.Args[1]
+ } else if idx.Args[1].Op == OpConst32 || idx.Args[1].Op == OpConst64 {
+ off += idx.Args[1].AuxInt
+ idx = idx.Args[0]
+ }
+ }
+ ptr = ptr.Args[0]
+ } else {
+ return BaseAddress{ptr: ptr, idx: idx}, off
+ }
+ }
+}
+
+func combineLoads(root *Value, n int64) bool {
+ orOp := root.Op
+ var shiftOp Op
+ switch orOp {
+ case OpOr64:
+ shiftOp = OpLsh64x64
+ case OpOr32:
+ shiftOp = OpLsh32x64
+ case OpOr16:
+ shiftOp = OpLsh16x64
+ default:
+ return false
+ }
+
+ // Find n values that are ORed together with the above op.
+ a := make([]*Value, 0, 8)
+ a = append(a, root)
+ for i := 0; i < len(a) && int64(len(a)) < n; i++ {
+ v := a[i]
+ if v.Uses != 1 && v != root {
+ // Something in this subtree is used somewhere else.
+ return false
+ }
+ if v.Op == orOp {
+ a[i] = v.Args[0]
+ a = append(a, v.Args[1])
+ i--
+ }
+ }
+ if int64(len(a)) != n {
+ return false
+ }
+
+ // Check that the first entry to see what ops we're looking for.
+ // All the entries should be of the form shift(extend(load)), maybe with no shift.
+ v := a[0]
+ if v.Op == shiftOp {
+ v = v.Args[0]
+ }
+ var extOp Op
+ if orOp == OpOr64 && (v.Op == OpZeroExt8to64 || v.Op == OpZeroExt16to64 || v.Op == OpZeroExt32to64) ||
+ orOp == OpOr32 && (v.Op == OpZeroExt8to32 || v.Op == OpZeroExt16to32) ||
+ orOp == OpOr16 && v.Op == OpZeroExt8to16 {
+ extOp = v.Op
+ v = v.Args[0]
+ } else {
+ return false
+ }
+ if v.Op != OpLoad {
+ return false
+ }
+ base, _ := splitPtr(v.Args[0])
+ mem := v.Args[1]
+ size := v.Type.Size()
+
+ if root.Block.Func.Config.arch == "S390X" {
+ // s390x can't handle unaligned accesses to global variables.
+ if base.ptr.Op == OpAddr {
+ return false
+ }
+ }
+
+ // Check all the entries, extract useful info.
+ type LoadRecord struct {
+ load *Value
+ offset int64 // offset of load address from base
+ shift int64
+ }
+ r := make([]LoadRecord, n, 8)
+ for i := int64(0); i < n; i++ {
+ v := a[i]
+ if v.Uses != 1 {
+ return false
+ }
+ shift := int64(0)
+ if v.Op == shiftOp {
+ if v.Args[1].Op != OpConst64 {
+ return false
+ }
+ shift = v.Args[1].AuxInt
+ v = v.Args[0]
+ if v.Uses != 1 {
+ return false
+ }
+ }
+ if v.Op != extOp {
+ return false
+ }
+ load := v.Args[0]
+ if load.Op != OpLoad {
+ return false
+ }
+ if load.Uses != 1 {
+ return false
+ }
+ if load.Args[1] != mem {
+ return false
+ }
+ p, off := splitPtr(load.Args[0])
+ if p != base {
+ return false
+ }
+ r[i] = LoadRecord{load: load, offset: off, shift: shift}
+ }
+
+ // Sort in memory address order.
+ sort.Slice(r, func(i, j int) bool {
+ return r[i].offset < r[j].offset
+ })
+
+ // Check that we have contiguous offsets.
+ for i := int64(0); i < n; i++ {
+ if r[i].offset != r[0].offset+i*size {
+ return false
+ }
+ }
+
+ // Check for reads in little-endian or big-endian order.
+ shift0 := r[0].shift
+ isLittleEndian := true
+ for i := int64(0); i < n; i++ {
+ if r[i].shift != shift0+i*size*8 {
+ isLittleEndian = false
+ break
+ }
+ }
+ isBigEndian := true
+ for i := int64(0); i < n; i++ {
+ if r[i].shift != shift0-i*size*8 {
+ isBigEndian = false
+ break
+ }
+ }
+ if !isLittleEndian && !isBigEndian {
+ return false
+ }
+
+ // Find a place to put the new load.
+ // This is tricky, because it has to be at a point where
+ // its memory argument is live. We can't just put it in root.Block.
+ // We use the block of the latest load.
+ loads := make([]*Value, n, 8)
+ for i := int64(0); i < n; i++ {
+ loads[i] = r[i].load
+ }
+ loadBlock := mergePoint(root.Block, loads...)
+ if loadBlock == nil {
+ return false
+ }
+ // Find a source position to use.
+ pos := src.NoXPos
+ for _, load := range loads {
+ if load.Block == loadBlock {
+ pos = load.Pos
+ break
+ }
+ }
+ if pos == src.NoXPos {
+ return false
+ }
+
+ // Check to see if we need byte swap before storing.
+ needSwap := isLittleEndian && root.Block.Func.Config.BigEndian ||
+ isBigEndian && !root.Block.Func.Config.BigEndian
+ if needSwap && (size != 1 || !root.Block.Func.Config.haveByteSwap(n)) {
+ return false
+ }
+
+ // This is the commit point.
+
+ // First, issue load at lowest address.
+ v = loadBlock.NewValue2(pos, OpLoad, sizeType(n*size), r[0].load.Args[0], mem)
+
+ // Byte swap if needed,
+ if needSwap {
+ v = byteSwap(loadBlock, pos, v)
+ }
+
+ // Extend if needed.
+ if n*size < root.Type.Size() {
+ v = zeroExtend(loadBlock, pos, v, n*size, root.Type.Size())
+ }
+
+ // Shift if needed.
+ if isLittleEndian && shift0 != 0 {
+ v = leftShift(loadBlock, pos, v, shift0)
+ }
+ if isBigEndian && shift0-(n-1)*size*8 != 0 {
+ v = leftShift(loadBlock, pos, v, shift0-(n-1)*size*8)
+ }
+
+ // Install with (Copy v).
+ root.reset(OpCopy)
+ root.AddArg(v)
+
+ // Clobber the loads, just to prevent additional work being done on
+ // subtrees (which are now unreachable).
+ for i := int64(0); i < n; i++ {
+ clobber(r[i].load)
+ }
+ return true
+}
+
+func memcombineStores(f *Func) {
+ mark := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(mark)
+ var order []*Value
+
+ for _, b := range f.Blocks {
+ // Mark all stores which are not last in a store sequence.
+ mark.clear()
+ for _, v := range b.Values {
+ if v.Op == OpStore {
+ mark.add(v.MemoryArg().ID)
+ }
+ }
+
+ // pick an order for visiting stores such that
+ // later stores come earlier in the ordering.
+ order = order[:0]
+ for _, v := range b.Values {
+ if v.Op != OpStore {
+ continue
+ }
+ if mark.contains(v.ID) {
+ continue // not last in a chain of stores
+ }
+ for {
+ order = append(order, v)
+ v = v.Args[2]
+ if v.Block != b || v.Op != OpStore {
+ break
+ }
+ }
+ }
+
+ // Look for combining opportunities at each store in queue order.
+ for _, v := range order {
+ if v.Op != OpStore { // already rewritten
+ continue
+ }
+
+ size := v.Aux.(*types.Type).Size()
+ if size >= f.Config.RegSize || size == 0 {
+ continue
+ }
+
+ for n := f.Config.RegSize / size; n > 1; n /= 2 {
+ if combineStores(v, n) {
+ continue
+ }
+ }
+ }
+ }
+}
+
+// Try to combine the n stores ending in root.
+// Returns true if successful.
+func combineStores(root *Value, n int64) bool {
+ // Helper functions.
+ type StoreRecord struct {
+ store *Value
+ offset int64
+ }
+ getShiftBase := func(a []StoreRecord) *Value {
+ x := a[0].store.Args[1]
+ y := a[1].store.Args[1]
+ switch x.Op {
+ case OpTrunc64to8, OpTrunc64to16, OpTrunc64to32, OpTrunc32to8, OpTrunc32to16, OpTrunc16to8:
+ x = x.Args[0]
+ default:
+ return nil
+ }
+ switch y.Op {
+ case OpTrunc64to8, OpTrunc64to16, OpTrunc64to32, OpTrunc32to8, OpTrunc32to16, OpTrunc16to8:
+ y = y.Args[0]
+ default:
+ return nil
+ }
+ var x2 *Value
+ switch x.Op {
+ case OpRsh64Ux64, OpRsh32Ux64, OpRsh16Ux64:
+ x2 = x.Args[0]
+ default:
+ }
+ var y2 *Value
+ switch y.Op {
+ case OpRsh64Ux64, OpRsh32Ux64, OpRsh16Ux64:
+ y2 = y.Args[0]
+ default:
+ }
+ if y2 == x {
+ // a shift of x and x itself.
+ return x
+ }
+ if x2 == y {
+ // a shift of y and y itself.
+ return y
+ }
+ if x2 == y2 {
+ // 2 shifts both of the same argument.
+ return x2
+ }
+ return nil
+ }
+ isShiftBase := func(v, base *Value) bool {
+ val := v.Args[1]
+ switch val.Op {
+ case OpTrunc64to8, OpTrunc64to16, OpTrunc64to32, OpTrunc32to8, OpTrunc32to16, OpTrunc16to8:
+ val = val.Args[0]
+ default:
+ return false
+ }
+ if val == base {
+ return true
+ }
+ switch val.Op {
+ case OpRsh64Ux64, OpRsh32Ux64, OpRsh16Ux64:
+ val = val.Args[0]
+ default:
+ return false
+ }
+ return val == base
+ }
+ shift := func(v, base *Value) int64 {
+ val := v.Args[1]
+ switch val.Op {
+ case OpTrunc64to8, OpTrunc64to16, OpTrunc64to32, OpTrunc32to8, OpTrunc32to16, OpTrunc16to8:
+ val = val.Args[0]
+ default:
+ return -1
+ }
+ if val == base {
+ return 0
+ }
+ switch val.Op {
+ case OpRsh64Ux64, OpRsh32Ux64, OpRsh16Ux64:
+ val = val.Args[1]
+ default:
+ return -1
+ }
+ if val.Op != OpConst64 {
+ return -1
+ }
+ return val.AuxInt
+ }
+
+ // Element size of the individual stores.
+ size := root.Aux.(*types.Type).Size()
+ if size*n > root.Block.Func.Config.RegSize {
+ return false
+ }
+
+ // Gather n stores to look at. Check easy conditions we require.
+ a := make([]StoreRecord, 0, 8)
+ rbase, roff := splitPtr(root.Args[0])
+ if root.Block.Func.Config.arch == "S390X" {
+ // s390x can't handle unaligned accesses to global variables.
+ if rbase.ptr.Op == OpAddr {
+ return false
+ }
+ }
+ a = append(a, StoreRecord{root, roff})
+ for i, x := int64(1), root.Args[2]; i < n; i, x = i+1, x.Args[2] {
+ if x.Op != OpStore {
+ return false
+ }
+ if x.Block != root.Block {
+ return false
+ }
+ if x.Uses != 1 { // Note: root can have more than one use.
+ return false
+ }
+ if x.Aux.(*types.Type).Size() != size {
+ // TODO: the constant source and consecutive load source cases
+ // do not need all the stores to be the same size.
+ return false
+ }
+ base, off := splitPtr(x.Args[0])
+ if base != rbase {
+ return false
+ }
+ a = append(a, StoreRecord{x, off})
+ }
+ // Before we sort, grab the memory arg the result should have.
+ mem := a[n-1].store.Args[2]
+ // Also grab position of first store (last in array = first in memory order).
+ pos := a[n-1].store.Pos
+
+ // Sort stores in increasing address order.
+ sort.Slice(a, func(i, j int) bool {
+ return a[i].offset < a[j].offset
+ })
+
+ // Check that everything is written to sequential locations.
+ for i := int64(0); i < n; i++ {
+ if a[i].offset != a[0].offset+i*size {
+ return false
+ }
+ }
+
+ // Memory location we're going to write at (the lowest one).
+ ptr := a[0].store.Args[0]
+
+ // Check for constant stores
+ isConst := true
+ for i := int64(0); i < n; i++ {
+ switch a[i].store.Args[1].Op {
+ case OpConst32, OpConst16, OpConst8:
+ default:
+ isConst = false
+ break
+ }
+ }
+ if isConst {
+ // Modify root to do all the stores.
+ var c int64
+ mask := int64(1)<<(8*size) - 1
+ for i := int64(0); i < n; i++ {
+ s := 8 * size * int64(i)
+ if root.Block.Func.Config.BigEndian {
+ s = 8*size*(n-1) - s
+ }
+ c |= (a[i].store.Args[1].AuxInt & mask) << s
+ }
+ var cv *Value
+ switch size * n {
+ case 2:
+ cv = root.Block.Func.ConstInt16(types.Types[types.TUINT16], int16(c))
+ case 4:
+ cv = root.Block.Func.ConstInt32(types.Types[types.TUINT32], int32(c))
+ case 8:
+ cv = root.Block.Func.ConstInt64(types.Types[types.TUINT64], c)
+ }
+
+ // Move all the stores to the root.
+ for i := int64(0); i < n; i++ {
+ v := a[i].store
+ if v == root {
+ v.Aux = cv.Type // widen store type
+ v.Pos = pos
+ v.SetArg(0, ptr)
+ v.SetArg(1, cv)
+ v.SetArg(2, mem)
+ } else {
+ clobber(v)
+ v.Type = types.Types[types.TBOOL] // erase memory type
+ }
+ }
+ return true
+ }
+
+ // Check for consecutive loads as the source of the stores.
+ var loadMem *Value
+ var loadBase BaseAddress
+ var loadIdx int64
+ for i := int64(0); i < n; i++ {
+ load := a[i].store.Args[1]
+ if load.Op != OpLoad {
+ loadMem = nil
+ break
+ }
+ if load.Uses != 1 {
+ loadMem = nil
+ break
+ }
+ if load.Type.IsPtr() {
+ // Don't combine stores containing a pointer, as we need
+ // a write barrier for those. This can't currently happen,
+ // but might in the future if we ever have another
+ // 8-byte-reg/4-byte-ptr architecture like amd64p32.
+ loadMem = nil
+ break
+ }
+ mem := load.Args[1]
+ base, idx := splitPtr(load.Args[0])
+ if loadMem == nil {
+ // First one we found
+ loadMem = mem
+ loadBase = base
+ loadIdx = idx
+ continue
+ }
+ if base != loadBase || mem != loadMem {
+ loadMem = nil
+ break
+ }
+ if idx != loadIdx+(a[i].offset-a[0].offset) {
+ loadMem = nil
+ break
+ }
+ }
+ if loadMem != nil {
+ // Modify the first load to do a larger load instead.
+ load := a[0].store.Args[1]
+ switch size * n {
+ case 2:
+ load.Type = types.Types[types.TUINT16]
+ case 4:
+ load.Type = types.Types[types.TUINT32]
+ case 8:
+ load.Type = types.Types[types.TUINT64]
+ }
+
+ // Modify root to do the store.
+ for i := int64(0); i < n; i++ {
+ v := a[i].store
+ if v == root {
+ v.Aux = load.Type // widen store type
+ v.Pos = pos
+ v.SetArg(0, ptr)
+ v.SetArg(1, load)
+ v.SetArg(2, mem)
+ } else {
+ clobber(v)
+ v.Type = types.Types[types.TBOOL] // erase memory type
+ }
+ }
+ return true
+ }
+
+ // Check that all the shift/trunc are of the same base value.
+ shiftBase := getShiftBase(a)
+ if shiftBase == nil {
+ return false
+ }
+ for i := int64(0); i < n; i++ {
+ if !isShiftBase(a[i].store, shiftBase) {
+ return false
+ }
+ }
+
+ // Check for writes in little-endian or big-endian order.
+ isLittleEndian := true
+ shift0 := shift(a[0].store, shiftBase)
+ for i := int64(1); i < n; i++ {
+ if shift(a[i].store, shiftBase) != shift0+i*size*8 {
+ isLittleEndian = false
+ break
+ }
+ }
+ isBigEndian := true
+ for i := int64(1); i < n; i++ {
+ if shift(a[i].store, shiftBase) != shift0-i*size*8 {
+ isBigEndian = false
+ break
+ }
+ }
+ if !isLittleEndian && !isBigEndian {
+ return false
+ }
+
+ // Check to see if we need byte swap before storing.
+ needSwap := isLittleEndian && root.Block.Func.Config.BigEndian ||
+ isBigEndian && !root.Block.Func.Config.BigEndian
+ if needSwap && (size != 1 || !root.Block.Func.Config.haveByteSwap(n)) {
+ return false
+ }
+
+ // This is the commit point.
+
+ // Modify root to do all the stores.
+ sv := shiftBase
+ if isLittleEndian && shift0 != 0 {
+ sv = rightShift(root.Block, root.Pos, sv, shift0)
+ }
+ if isBigEndian && shift0-(n-1)*size*8 != 0 {
+ sv = rightShift(root.Block, root.Pos, sv, shift0-(n-1)*size*8)
+ }
+ if sv.Type.Size() > size*n {
+ sv = truncate(root.Block, root.Pos, sv, sv.Type.Size(), size*n)
+ }
+ if needSwap {
+ sv = byteSwap(root.Block, root.Pos, sv)
+ }
+
+ // Move all the stores to the root.
+ for i := int64(0); i < n; i++ {
+ v := a[i].store
+ if v == root {
+ v.Aux = sv.Type // widen store type
+ v.Pos = pos
+ v.SetArg(0, ptr)
+ v.SetArg(1, sv)
+ v.SetArg(2, mem)
+ } else {
+ clobber(v)
+ v.Type = types.Types[types.TBOOL] // erase memory type
+ }
+ }
+ return true
+}
+
+func sizeType(size int64) *types.Type {
+ switch size {
+ case 8:
+ return types.Types[types.TUINT64]
+ case 4:
+ return types.Types[types.TUINT32]
+ case 2:
+ return types.Types[types.TUINT16]
+ default:
+ base.Fatalf("bad size %d\n", size)
+ return nil
+ }
+}
+
+func truncate(b *Block, pos src.XPos, v *Value, from, to int64) *Value {
+ switch from*10 + to {
+ case 82:
+ return b.NewValue1(pos, OpTrunc64to16, types.Types[types.TUINT16], v)
+ case 84:
+ return b.NewValue1(pos, OpTrunc64to32, types.Types[types.TUINT32], v)
+ case 42:
+ return b.NewValue1(pos, OpTrunc32to16, types.Types[types.TUINT16], v)
+ default:
+ base.Fatalf("bad sizes %d %d\n", from, to)
+ return nil
+ }
+}
+func zeroExtend(b *Block, pos src.XPos, v *Value, from, to int64) *Value {
+ switch from*10 + to {
+ case 24:
+ return b.NewValue1(pos, OpZeroExt16to32, types.Types[types.TUINT32], v)
+ case 28:
+ return b.NewValue1(pos, OpZeroExt16to64, types.Types[types.TUINT64], v)
+ case 48:
+ return b.NewValue1(pos, OpZeroExt32to64, types.Types[types.TUINT64], v)
+ default:
+ base.Fatalf("bad sizes %d %d\n", from, to)
+ return nil
+ }
+}
+
+func leftShift(b *Block, pos src.XPos, v *Value, shift int64) *Value {
+ s := b.Func.ConstInt64(types.Types[types.TUINT64], shift)
+ size := v.Type.Size()
+ switch size {
+ case 8:
+ return b.NewValue2(pos, OpLsh64x64, v.Type, v, s)
+ case 4:
+ return b.NewValue2(pos, OpLsh32x64, v.Type, v, s)
+ case 2:
+ return b.NewValue2(pos, OpLsh16x64, v.Type, v, s)
+ default:
+ base.Fatalf("bad size %d\n", size)
+ return nil
+ }
+}
+func rightShift(b *Block, pos src.XPos, v *Value, shift int64) *Value {
+ s := b.Func.ConstInt64(types.Types[types.TUINT64], shift)
+ size := v.Type.Size()
+ switch size {
+ case 8:
+ return b.NewValue2(pos, OpRsh64Ux64, v.Type, v, s)
+ case 4:
+ return b.NewValue2(pos, OpRsh32Ux64, v.Type, v, s)
+ case 2:
+ return b.NewValue2(pos, OpRsh16Ux64, v.Type, v, s)
+ default:
+ base.Fatalf("bad size %d\n", size)
+ return nil
+ }
+}
+func byteSwap(b *Block, pos src.XPos, v *Value) *Value {
+ switch v.Type.Size() {
+ case 8:
+ return b.NewValue1(pos, OpBswap64, v.Type, v)
+ case 4:
+ return b.NewValue1(pos, OpBswap32, v.Type, v)
+ case 2:
+ return b.NewValue1(pos, OpBswap16, v.Type, v)
+
+ default:
+ v.Fatalf("bad size %d\n", v.Type.Size())
+ return nil
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go
new file mode 100644
index 0000000..c69cd8c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/nilcheck.go
@@ -0,0 +1,337 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/internal/src"
+ "internal/buildcfg"
+)
+
+// nilcheckelim eliminates unnecessary nil checks.
+// runs on machine-independent code.
+func nilcheckelim(f *Func) {
+ // A nil check is redundant if the same nil check was successful in a
+ // dominating block. The efficacy of this pass depends heavily on the
+ // efficacy of the cse pass.
+ sdom := f.Sdom()
+
+ // TODO: Eliminate more nil checks.
+ // We can recursively remove any chain of fixed offset calculations,
+ // i.e. struct fields and array elements, even with non-constant
+ // indices: x is non-nil iff x.a.b[i].c is.
+
+ type walkState int
+ const (
+ Work walkState = iota // process nil checks and traverse to dominees
+ ClearPtr // forget the fact that ptr is nil
+ )
+
+ type bp struct {
+ block *Block // block, or nil in ClearPtr state
+ ptr *Value // if non-nil, ptr that is to be cleared in ClearPtr state
+ op walkState
+ }
+
+ work := make([]bp, 0, 256)
+ work = append(work, bp{block: f.Entry})
+
+ // map from value ID to known non-nil version of that value ID
+ // (in the current dominator path being walked). This slice is updated by
+ // walkStates to maintain the known non-nil values.
+ // If there is extrinsic information about non-nil-ness, this map
+ // points a value to itself. If a value is known non-nil because we
+ // already did a nil check on it, it points to the nil check operation.
+ nonNilValues := f.Cache.allocValueSlice(f.NumValues())
+ defer f.Cache.freeValueSlice(nonNilValues)
+
+ // make an initial pass identifying any non-nil values
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ // a value resulting from taking the address of a
+ // value, or a value constructed from an offset of a
+ // non-nil ptr (OpAddPtr) implies it is non-nil
+ // We also assume unsafe pointer arithmetic generates non-nil pointers. See #27180.
+ // We assume that SlicePtr is non-nil because we do a bounds check
+ // before the slice access (and all cap>0 slices have a non-nil ptr). See #30366.
+ if v.Op == OpAddr || v.Op == OpLocalAddr || v.Op == OpAddPtr || v.Op == OpOffPtr || v.Op == OpAdd32 || v.Op == OpAdd64 || v.Op == OpSub32 || v.Op == OpSub64 || v.Op == OpSlicePtr {
+ nonNilValues[v.ID] = v
+ }
+ }
+ }
+
+ for changed := true; changed; {
+ changed = false
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ // phis whose arguments are all non-nil
+ // are non-nil
+ if v.Op == OpPhi {
+ argsNonNil := true
+ for _, a := range v.Args {
+ if nonNilValues[a.ID] == nil {
+ argsNonNil = false
+ break
+ }
+ }
+ if argsNonNil {
+ if nonNilValues[v.ID] == nil {
+ changed = true
+ }
+ nonNilValues[v.ID] = v
+ }
+ }
+ }
+ }
+ }
+
+ // allocate auxiliary date structures for computing store order
+ sset := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(sset)
+ storeNumber := f.Cache.allocInt32Slice(f.NumValues())
+ defer f.Cache.freeInt32Slice(storeNumber)
+
+ // perform a depth first walk of the dominee tree
+ for len(work) > 0 {
+ node := work[len(work)-1]
+ work = work[:len(work)-1]
+
+ switch node.op {
+ case Work:
+ b := node.block
+
+ // First, see if we're dominated by an explicit nil check.
+ if len(b.Preds) == 1 {
+ p := b.Preds[0].b
+ if p.Kind == BlockIf && p.Controls[0].Op == OpIsNonNil && p.Succs[0].b == b {
+ if ptr := p.Controls[0].Args[0]; nonNilValues[ptr.ID] == nil {
+ nonNilValues[ptr.ID] = ptr
+ work = append(work, bp{op: ClearPtr, ptr: ptr})
+ }
+ }
+ }
+
+ // Next, order values in the current block w.r.t. stores.
+ b.Values = storeOrder(b.Values, sset, storeNumber)
+
+ pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
+ pendingLines.clear()
+
+ // Next, process values in the block.
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpIsNonNil:
+ ptr := v.Args[0]
+ if nonNilValues[ptr.ID] != nil {
+ if v.Pos.IsStmt() == src.PosIsStmt { // Boolean true is a terrible statement boundary.
+ pendingLines.add(v.Pos)
+ v.Pos = v.Pos.WithNotStmt()
+ }
+ // This is a redundant explicit nil check.
+ v.reset(OpConstBool)
+ v.AuxInt = 1 // true
+ }
+ case OpNilCheck:
+ ptr := v.Args[0]
+ if nilCheck := nonNilValues[ptr.ID]; nilCheck != nil {
+ // This is a redundant implicit nil check.
+ // Logging in the style of the former compiler -- and omit line 1,
+ // which is usually in generated code.
+ if f.fe.Debug_checknil() && v.Pos.Line() > 1 {
+ f.Warnl(v.Pos, "removed nil check")
+ }
+ if v.Pos.IsStmt() == src.PosIsStmt { // About to lose a statement boundary
+ pendingLines.add(v.Pos)
+ }
+ v.Op = OpCopy
+ v.SetArgs1(nilCheck)
+ continue
+ }
+ // Record the fact that we know ptr is non nil, and remember to
+ // undo that information when this dominator subtree is done.
+ nonNilValues[ptr.ID] = v
+ work = append(work, bp{op: ClearPtr, ptr: ptr})
+ fallthrough // a non-eliminated nil check might be a good place for a statement boundary.
+ default:
+ if v.Pos.IsStmt() != src.PosNotStmt && !isPoorStatementOp(v.Op) && pendingLines.contains(v.Pos) {
+ v.Pos = v.Pos.WithIsStmt()
+ pendingLines.remove(v.Pos)
+ }
+ }
+ }
+ // This reduces the lost statement count in "go" by 5 (out of 500 total).
+ for j := range b.Values { // is this an ordering problem?
+ v := b.Values[j]
+ if v.Pos.IsStmt() != src.PosNotStmt && !isPoorStatementOp(v.Op) && pendingLines.contains(v.Pos) {
+ v.Pos = v.Pos.WithIsStmt()
+ pendingLines.remove(v.Pos)
+ }
+ }
+ if pendingLines.contains(b.Pos) {
+ b.Pos = b.Pos.WithIsStmt()
+ pendingLines.remove(b.Pos)
+ }
+
+ // Add all dominated blocks to the work list.
+ for w := sdom[node.block.ID].child; w != nil; w = sdom[w.ID].sibling {
+ work = append(work, bp{op: Work, block: w})
+ }
+
+ case ClearPtr:
+ nonNilValues[node.ptr.ID] = nil
+ continue
+ }
+ }
+}
+
+// All platforms are guaranteed to fault if we load/store to anything smaller than this address.
+//
+// This should agree with minLegalPointer in the runtime.
+const minZeroPage = 4096
+
+// faultOnLoad is true if a load to an address below minZeroPage will trigger a SIGSEGV.
+var faultOnLoad = buildcfg.GOOS != "aix"
+
+// nilcheckelim2 eliminates unnecessary nil checks.
+// Runs after lowering and scheduling.
+func nilcheckelim2(f *Func) {
+ unnecessary := f.newSparseMap(f.NumValues()) // map from pointer that will be dereferenced to index of dereferencing value in b.Values[]
+ defer f.retSparseMap(unnecessary)
+
+ pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
+
+ for _, b := range f.Blocks {
+ // Walk the block backwards. Find instructions that will fault if their
+ // input pointer is nil. Remove nil checks on those pointers, as the
+ // faulting instruction effectively does the nil check for free.
+ unnecessary.clear()
+ pendingLines.clear()
+ // Optimization: keep track of removed nilcheck with smallest index
+ firstToRemove := len(b.Values)
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if opcodeTable[v.Op].nilCheck && unnecessary.contains(v.Args[0].ID) {
+ if f.fe.Debug_checknil() && v.Pos.Line() > 1 {
+ f.Warnl(v.Pos, "removed nil check")
+ }
+ // For bug 33724, policy is that we might choose to bump an existing position
+ // off the faulting load/store in favor of the one from the nil check.
+
+ // Iteration order means that first nilcheck in the chain wins, others
+ // are bumped into the ordinary statement preservation algorithm.
+ u := b.Values[unnecessary.get(v.Args[0].ID)]
+ if !u.Pos.SameFileAndLine(v.Pos) {
+ if u.Pos.IsStmt() == src.PosIsStmt {
+ pendingLines.add(u.Pos)
+ }
+ u.Pos = v.Pos
+ } else if v.Pos.IsStmt() == src.PosIsStmt {
+ pendingLines.add(v.Pos)
+ }
+
+ v.reset(OpUnknown)
+ firstToRemove = i
+ continue
+ }
+ if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
+ if v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(*ir.Name).Type().HasPointers()) {
+ // These ops don't really change memory.
+ continue
+ // Note: OpVarDef requires that the defined variable not have pointers.
+ // We need to make sure that there's no possible faulting
+ // instruction between a VarDef and that variable being
+ // fully initialized. If there was, then anything scanning
+ // the stack during the handling of that fault will see
+ // a live but uninitialized pointer variable on the stack.
+ //
+ // If we have:
+ //
+ // NilCheck p
+ // VarDef x
+ // x = *p
+ //
+ // We can't rewrite that to
+ //
+ // VarDef x
+ // NilCheck p
+ // x = *p
+ //
+ // Particularly, even though *p faults on p==nil, we still
+ // have to do the explicit nil check before the VarDef.
+ // See issue #32288.
+ }
+ // This op changes memory. Any faulting instruction after v that
+ // we've recorded in the unnecessary map is now obsolete.
+ unnecessary.clear()
+ }
+
+ // Find any pointers that this op is guaranteed to fault on if nil.
+ var ptrstore [2]*Value
+ ptrs := ptrstore[:0]
+ if opcodeTable[v.Op].faultOnNilArg0 && (faultOnLoad || v.Type.IsMemory()) {
+ // On AIX, only writing will fault.
+ ptrs = append(ptrs, v.Args[0])
+ }
+ if opcodeTable[v.Op].faultOnNilArg1 && (faultOnLoad || (v.Type.IsMemory() && v.Op != OpPPC64LoweredMove)) {
+ // On AIX, only writing will fault.
+ // LoweredMove is a special case because it's considered as a "mem" as it stores on arg0 but arg1 is accessed as a load and should be checked.
+ ptrs = append(ptrs, v.Args[1])
+ }
+
+ for _, ptr := range ptrs {
+ // Check to make sure the offset is small.
+ switch opcodeTable[v.Op].auxType {
+ case auxSym:
+ if v.Aux != nil {
+ continue
+ }
+ case auxSymOff:
+ if v.Aux != nil || v.AuxInt < 0 || v.AuxInt >= minZeroPage {
+ continue
+ }
+ case auxSymValAndOff:
+ off := ValAndOff(v.AuxInt).Off()
+ if v.Aux != nil || off < 0 || off >= minZeroPage {
+ continue
+ }
+ case auxInt32:
+ // Mips uses this auxType for atomic add constant. It does not affect the effective address.
+ case auxInt64:
+ // ARM uses this auxType for duffcopy/duffzero/alignment info.
+ // It does not affect the effective address.
+ case auxNone:
+ // offset is zero.
+ default:
+ v.Fatalf("can't handle aux %s (type %d) yet\n", v.auxString(), int(opcodeTable[v.Op].auxType))
+ }
+ // This instruction is guaranteed to fault if ptr is nil.
+ // Any previous nil check op is unnecessary.
+ unnecessary.set(ptr.ID, int32(i))
+ }
+ }
+ // Remove values we've clobbered with OpUnknown.
+ i := firstToRemove
+ for j := i; j < len(b.Values); j++ {
+ v := b.Values[j]
+ if v.Op != OpUnknown {
+ if !notStmtBoundary(v.Op) && pendingLines.contains(v.Pos) { // Late in compilation, so any remaining NotStmt values are probably okay now.
+ v.Pos = v.Pos.WithIsStmt()
+ pendingLines.remove(v.Pos)
+ }
+ b.Values[i] = v
+ i++
+ }
+ }
+
+ if pendingLines.contains(b.Pos) {
+ b.Pos = b.Pos.WithIsStmt()
+ }
+
+ b.truncateValues(i)
+
+ // TODO: if b.Kind == BlockPlain, start the analysis in the subsequent block to find
+ // more unnecessary nil checks. Would fix test/nilptr3.go:159.
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/nilcheck_test.go b/src/cmd/compile/internal/ssa/nilcheck_test.go
new file mode 100644
index 0000000..6c89b1e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/nilcheck_test.go
@@ -0,0 +1,438 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "strconv"
+ "testing"
+)
+
+func BenchmarkNilCheckDeep1(b *testing.B) { benchmarkNilCheckDeep(b, 1) }
+func BenchmarkNilCheckDeep10(b *testing.B) { benchmarkNilCheckDeep(b, 10) }
+func BenchmarkNilCheckDeep100(b *testing.B) { benchmarkNilCheckDeep(b, 100) }
+func BenchmarkNilCheckDeep1000(b *testing.B) { benchmarkNilCheckDeep(b, 1000) }
+func BenchmarkNilCheckDeep10000(b *testing.B) { benchmarkNilCheckDeep(b, 10000) }
+
+// benchmarkNilCheckDeep is a stress test of nilcheckelim.
+// It uses the worst possible input: A linear string of
+// nil checks, none of which can be eliminated.
+// Run with multiple depths to observe big-O behavior.
+func benchmarkNilCheckDeep(b *testing.B, depth int) {
+ c := testConfig(b)
+ ptrType := c.config.Types.BytePtr
+
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto(blockn(0)),
+ ),
+ )
+ for i := 0; i < depth; i++ {
+ blocs = append(blocs,
+ Bloc(blockn(i),
+ Valu(ptrn(i), OpAddr, ptrType, 0, nil, "sb"),
+ Valu(booln(i), OpIsNonNil, c.config.Types.Bool, 0, nil, ptrn(i)),
+ If(booln(i), blockn(i+1), "exit"),
+ ),
+ )
+ }
+ blocs = append(blocs,
+ Bloc(blockn(depth), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ fun := c.Fun("entry", blocs...)
+
+ CheckFunc(fun.f)
+ b.SetBytes(int64(depth)) // helps for eyeballing linearity
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ nilcheckelim(fun.f)
+ }
+}
+
+func blockn(n int) string { return "b" + strconv.Itoa(n) }
+func ptrn(n int) string { return "p" + strconv.Itoa(n) }
+func booln(n int) string { return "c" + strconv.Itoa(n) }
+
+func isNilCheck(b *Block) bool {
+ return b.Kind == BlockIf && b.Controls[0].Op == OpIsNonNil
+}
+
+// TestNilcheckSimple verifies that a second repeated nilcheck is removed.
+func TestNilcheckSimple(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "secondCheck", "exit")),
+ Bloc("secondCheck",
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool2", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ t.Errorf("secondCheck was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckDomOrder ensures that the nil check elimination isn't dependent
+// on the order of the dominees.
+func TestNilcheckDomOrder(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "secondCheck", "exit")),
+ Bloc("exit",
+ Exit("mem")),
+ Bloc("secondCheck",
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool2", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ t.Errorf("secondCheck was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckAddr verifies that nilchecks of OpAddr constructed values are removed.
+func TestNilcheckAddr(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["checkPtr"] && isNilCheck(b) {
+ t.Errorf("checkPtr was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckAddPtr verifies that nilchecks of OpAddPtr constructed values are removed.
+func TestNilcheckAddPtr(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("off", OpConst64, c.config.Types.Int64, 20, nil),
+ Valu("ptr1", OpAddPtr, ptrType, 0, nil, "sb", "off"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["checkPtr"] && isNilCheck(b) {
+ t.Errorf("checkPtr was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckPhi tests that nil checks of phis, for which all values are known to be
+// non-nil are removed.
+func TestNilcheckPhi(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("baddr", OpLocalAddr, c.config.Types.Bool, 0, StringToAux("b"), "sp", "mem"),
+ Valu("bool1", OpLoad, c.config.Types.Bool, 0, nil, "baddr", "mem"),
+ If("bool1", "b1", "b2")),
+ Bloc("b1",
+ Valu("ptr1", OpAddr, ptrType, 0, nil, "sb"),
+ Goto("checkPtr")),
+ Bloc("b2",
+ Valu("ptr2", OpAddr, ptrType, 0, nil, "sb"),
+ Goto("checkPtr")),
+ // both ptr1 and ptr2 are guaranteed non-nil here
+ Bloc("checkPtr",
+ Valu("phi", OpPhi, ptrType, 0, nil, "ptr1", "ptr2"),
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "phi"),
+ If("bool2", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["checkPtr"] && isNilCheck(b) {
+ t.Errorf("checkPtr was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckKeepRemove verifies that duplicate checks of the same pointer
+// are removed, but checks of different pointers are not.
+func TestNilcheckKeepRemove(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "differentCheck", "exit")),
+ Bloc("differentCheck",
+ Valu("ptr2", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr2"),
+ If("bool2", "secondCheck", "exit")),
+ Bloc("secondCheck",
+ Valu("bool3", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool3", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ foundDifferentCheck := false
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ t.Errorf("secondCheck was not eliminated")
+ }
+ if b == fun.blocks["differentCheck"] && isNilCheck(b) {
+ foundDifferentCheck = true
+ }
+ }
+ if !foundDifferentCheck {
+ t.Errorf("removed differentCheck, but shouldn't have")
+ }
+}
+
+// TestNilcheckInFalseBranch tests that nil checks in the false branch of a nilcheck
+// block are *not* removed.
+func TestNilcheckInFalseBranch(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "extra", "secondCheck")),
+ Bloc("secondCheck",
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool2", "extra", "thirdCheck")),
+ Bloc("thirdCheck",
+ Valu("bool3", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool3", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ foundSecondCheck := false
+ foundThirdCheck := false
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ foundSecondCheck = true
+ }
+ if b == fun.blocks["thirdCheck"] && isNilCheck(b) {
+ foundThirdCheck = true
+ }
+ }
+ if !foundSecondCheck {
+ t.Errorf("removed secondCheck, but shouldn't have [false branch]")
+ }
+ if !foundThirdCheck {
+ t.Errorf("removed thirdCheck, but shouldn't have [false branch]")
+ }
+}
+
+// TestNilcheckUser verifies that a user nil check that dominates a generated nil check
+// wil remove the generated nil check.
+func TestNilcheckUser(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("nilptr", OpConstNil, ptrType, 0, nil),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
+ If("bool1", "secondCheck", "exit")),
+ Bloc("secondCheck",
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool2", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ // we need the opt here to rewrite the user nilcheck
+ opt(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ t.Errorf("secondCheck was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckBug reproduces a bug in nilcheckelim found by compiling math/big
+func TestNilcheckBug(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("nilptr", OpConstNil, ptrType, 0, nil),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
+ If("bool1", "secondCheck", "couldBeNil")),
+ Bloc("couldBeNil",
+ Goto("secondCheck")),
+ Bloc("secondCheck",
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool2", "extra", "exit")),
+ Bloc("extra",
+ // prevent fuse from eliminating this block
+ Valu("store", OpStore, types.TypeMem, 0, ptrType, "ptr1", "nilptr", "mem"),
+ Goto("exit")),
+ Bloc("exit",
+ Valu("phi", OpPhi, types.TypeMem, 0, nil, "mem", "store"),
+ Exit("phi")))
+
+ CheckFunc(fun.f)
+ // we need the opt here to rewrite the user nilcheck
+ opt(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ foundSecondCheck := false
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ foundSecondCheck = true
+ }
+ }
+ if !foundSecondCheck {
+ t.Errorf("secondCheck was eliminated, but shouldn't have")
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/numberlines.go b/src/cmd/compile/internal/ssa/numberlines.go
new file mode 100644
index 0000000..b4eca32
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/numberlines.go
@@ -0,0 +1,262 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+ "fmt"
+ "sort"
+)
+
+func isPoorStatementOp(op Op) bool {
+ switch op {
+ // Note that Nilcheck often vanishes, but when it doesn't, you'd love to start the statement there
+ // so that a debugger-user sees the stop before the panic, and can examine the value.
+ case OpAddr, OpLocalAddr, OpOffPtr, OpStructSelect, OpPhi, OpITab, OpIData,
+ OpIMake, OpStringMake, OpSliceMake, OpStructMake0, OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4,
+ OpConstBool, OpConst8, OpConst16, OpConst32, OpConst64, OpConst32F, OpConst64F, OpSB, OpSP,
+ OpArgIntReg, OpArgFloatReg:
+ return true
+ }
+ return false
+}
+
+// nextGoodStatementIndex returns an index at i or later that is believed
+// to be a good place to start the statement for b. This decision is
+// based on v's Op, the possibility of a better later operation, and
+// whether the values following i are the same line as v.
+// If a better statement index isn't found, then i is returned.
+func nextGoodStatementIndex(v *Value, i int, b *Block) int {
+ // If the value is the last one in the block, too bad, it will have to do
+ // (this assumes that the value ordering vaguely corresponds to the source
+ // program execution order, which tends to be true directly after ssa is
+ // first built).
+ if i >= len(b.Values)-1 {
+ return i
+ }
+ // Skip the likely-ephemeral/fragile opcodes expected to vanish in a rewrite.
+ if !isPoorStatementOp(v.Op) {
+ return i
+ }
+ // Look ahead to see what the line number is on the next thing that could be a boundary.
+ for j := i + 1; j < len(b.Values); j++ {
+ u := b.Values[j]
+ if u.Pos.IsStmt() == src.PosNotStmt { // ignore non-statements
+ continue
+ }
+ if u.Pos.SameFileAndLine(v.Pos) {
+ if isPoorStatementOp(u.Op) {
+ continue // Keep looking, this is also not a good statement op
+ }
+ return j
+ }
+ return i
+ }
+ return i
+}
+
+// notStmtBoundary reports whether a value with opcode op can never be a statement
+// boundary. Such values don't correspond to a user's understanding of a
+// statement boundary.
+func notStmtBoundary(op Op) bool {
+ switch op {
+ case OpCopy, OpPhi, OpVarDef, OpVarLive, OpUnknown, OpFwdRef, OpArg, OpArgIntReg, OpArgFloatReg:
+ return true
+ }
+ return false
+}
+
+func (b *Block) FirstPossibleStmtValue() *Value {
+ for _, v := range b.Values {
+ if notStmtBoundary(v.Op) {
+ continue
+ }
+ return v
+ }
+ return nil
+}
+
+func flc(p src.XPos) string {
+ if p == src.NoXPos {
+ return "none"
+ }
+ return fmt.Sprintf("(%d):%d:%d", p.FileIndex(), p.Line(), p.Col())
+}
+
+type fileAndPair struct {
+ f int32
+ lp lineRange
+}
+
+type fileAndPairs []fileAndPair
+
+func (fap fileAndPairs) Len() int {
+ return len(fap)
+}
+func (fap fileAndPairs) Less(i, j int) bool {
+ return fap[i].f < fap[j].f
+}
+func (fap fileAndPairs) Swap(i, j int) {
+ fap[i], fap[j] = fap[j], fap[i]
+}
+
+// -d=ssa/number_lines/stats=1 (that bit) for line and file distribution statistics
+// -d=ssa/number_lines/debug for information about why particular values are marked as statements.
+func numberLines(f *Func) {
+ po := f.Postorder()
+ endlines := make(map[ID]src.XPos)
+ ranges := make(map[int]lineRange)
+ note := func(p src.XPos) {
+ line := uint32(p.Line())
+ i := int(p.FileIndex())
+ lp, found := ranges[i]
+ change := false
+ if line < lp.first || !found {
+ lp.first = line
+ change = true
+ }
+ if line > lp.last {
+ lp.last = line
+ change = true
+ }
+ if change {
+ ranges[i] = lp
+ }
+ }
+
+ // Visit in reverse post order so that all non-loop predecessors come first.
+ for j := len(po) - 1; j >= 0; j-- {
+ b := po[j]
+ // Find the first interesting position and check to see if it differs from any predecessor
+ firstPos := src.NoXPos
+ firstPosIndex := -1
+ if b.Pos.IsStmt() != src.PosNotStmt {
+ note(b.Pos)
+ }
+ for i := 0; i < len(b.Values); i++ {
+ v := b.Values[i]
+ if v.Pos.IsStmt() != src.PosNotStmt {
+ note(v.Pos)
+ // skip ahead to better instruction for this line if possible
+ i = nextGoodStatementIndex(v, i, b)
+ v = b.Values[i]
+ firstPosIndex = i
+ firstPos = v.Pos
+ v.Pos = firstPos.WithDefaultStmt() // default to default
+ break
+ }
+ }
+
+ if firstPosIndex == -1 { // Effectively empty block, check block's own Pos, consider preds.
+ line := src.NoXPos
+ for _, p := range b.Preds {
+ pbi := p.Block().ID
+ if !endlines[pbi].SameFileAndLine(line) {
+ if line == src.NoXPos {
+ line = endlines[pbi]
+ continue
+ } else {
+ line = src.NoXPos
+ break
+ }
+
+ }
+ }
+ // If the block has no statement itself and is effectively empty, tag it w/ predecessor(s) but not as a statement
+ if b.Pos.IsStmt() == src.PosNotStmt {
+ b.Pos = line
+ endlines[b.ID] = line
+ continue
+ }
+ // If the block differs from its predecessors, mark it as a statement
+ if line == src.NoXPos || !line.SameFileAndLine(b.Pos) {
+ b.Pos = b.Pos.WithIsStmt()
+ if f.pass.debug > 0 {
+ fmt.Printf("Mark stmt effectively-empty-block %s %s %s\n", f.Name, b, flc(b.Pos))
+ }
+ }
+ endlines[b.ID] = b.Pos
+ continue
+ }
+ // check predecessors for any difference; if firstPos differs, then it is a boundary.
+ if len(b.Preds) == 0 { // Don't forget the entry block
+ b.Values[firstPosIndex].Pos = firstPos.WithIsStmt()
+ if f.pass.debug > 0 {
+ fmt.Printf("Mark stmt entry-block %s %s %s %s\n", f.Name, b, b.Values[firstPosIndex], flc(firstPos))
+ }
+ } else { // differing pred
+ for _, p := range b.Preds {
+ pbi := p.Block().ID
+ if !endlines[pbi].SameFileAndLine(firstPos) {
+ b.Values[firstPosIndex].Pos = firstPos.WithIsStmt()
+ if f.pass.debug > 0 {
+ fmt.Printf("Mark stmt differing-pred %s %s %s %s, different=%s ending %s\n",
+ f.Name, b, b.Values[firstPosIndex], flc(firstPos), p.Block(), flc(endlines[pbi]))
+ }
+ break
+ }
+ }
+ }
+ // iterate forward setting each new (interesting) position as a statement boundary.
+ for i := firstPosIndex + 1; i < len(b.Values); i++ {
+ v := b.Values[i]
+ if v.Pos.IsStmt() == src.PosNotStmt {
+ continue
+ }
+ note(v.Pos)
+ // skip ahead if possible
+ i = nextGoodStatementIndex(v, i, b)
+ v = b.Values[i]
+ if !v.Pos.SameFileAndLine(firstPos) {
+ if f.pass.debug > 0 {
+ fmt.Printf("Mark stmt new line %s %s %s %s prev pos = %s\n", f.Name, b, v, flc(v.Pos), flc(firstPos))
+ }
+ firstPos = v.Pos
+ v.Pos = v.Pos.WithIsStmt()
+ } else {
+ v.Pos = v.Pos.WithDefaultStmt()
+ }
+ }
+ if b.Pos.IsStmt() != src.PosNotStmt && !b.Pos.SameFileAndLine(firstPos) {
+ if f.pass.debug > 0 {
+ fmt.Printf("Mark stmt end of block differs %s %s %s prev pos = %s\n", f.Name, b, flc(b.Pos), flc(firstPos))
+ }
+ b.Pos = b.Pos.WithIsStmt()
+ firstPos = b.Pos
+ }
+ endlines[b.ID] = firstPos
+ }
+ if f.pass.stats&1 != 0 {
+ // Report summary statistics on the shape of the sparse map about to be constructed
+ // TODO use this information to make sparse maps faster.
+ var entries fileAndPairs
+ for k, v := range ranges {
+ entries = append(entries, fileAndPair{int32(k), v})
+ }
+ sort.Sort(entries)
+ total := uint64(0) // sum over files of maxline(file) - minline(file)
+ maxfile := int32(0) // max(file indices)
+ minline := uint32(0xffffffff) // min over files of minline(file)
+ maxline := uint32(0) // max over files of maxline(file)
+ for _, v := range entries {
+ if f.pass.stats > 1 {
+ f.LogStat("file", v.f, "low", v.lp.first, "high", v.lp.last)
+ }
+ total += uint64(v.lp.last - v.lp.first)
+ if maxfile < v.f {
+ maxfile = v.f
+ }
+ if minline > v.lp.first {
+ minline = v.lp.first
+ }
+ if maxline < v.lp.last {
+ maxline = v.lp.last
+ }
+ }
+ f.LogStat("SUM_LINE_RANGE", total, "MAXMIN_LINE_RANGE", maxline-minline, "MAXFILE", maxfile, "NFILES", len(entries))
+ }
+ // cachedLineStarts is an empty sparse map for values that are included within ranges.
+ f.cachedLineStarts = newXposmap(ranges)
+}
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
new file mode 100644
index 0000000..cb151b2
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -0,0 +1,529 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "fmt"
+ "strings"
+)
+
+// An Op encodes the specific operation that a Value performs.
+// Opcodes' semantics can be modified by the type and aux fields of the Value.
+// For instance, OpAdd can be 32 or 64 bit, signed or unsigned, float or complex, depending on Value.Type.
+// Semantics of each op are described in the opcode files in _gen/*Ops.go.
+// There is one file for generic (architecture-independent) ops and one file
+// for each architecture.
+type Op int32
+
+type opInfo struct {
+ name string
+ reg regInfo
+ auxType auxType
+ argLen int32 // the number of arguments, -1 if variable length
+ asm obj.As
+ generic bool // this is a generic (arch-independent) opcode
+ rematerializeable bool // this op is rematerializeable
+ commutative bool // this operation is commutative (e.g. addition)
+ resultInArg0 bool // (first, if a tuple) output of v and v.Args[0] must be allocated to the same register
+ resultNotInArgs bool // outputs must not be allocated to the same registers as inputs
+ clobberFlags bool // this op clobbers flags register
+ needIntTemp bool // need a temporary free integer register
+ call bool // is a function call
+ tailCall bool // is a tail call
+ nilCheck bool // this op is a nil check on arg0
+ faultOnNilArg0 bool // this op will fault if arg0 is nil (and aux encodes a small offset)
+ faultOnNilArg1 bool // this op will fault if arg1 is nil (and aux encodes a small offset)
+ usesScratch bool // this op requires scratch memory space
+ hasSideEffects bool // for "reasons", not to be eliminated. E.g., atomic store, #19182.
+ zeroWidth bool // op never translates into any machine code. example: copy, which may sometimes translate to machine code, is not zero-width.
+ unsafePoint bool // this op is an unsafe point, i.e. not safe for async preemption
+ symEffect SymEffect // effect this op has on symbol in aux
+ scale uint8 // amd64/386 indexed load scale
+}
+
+type inputInfo struct {
+ idx int // index in Args array
+ regs regMask // allowed input registers
+}
+
+type outputInfo struct {
+ idx int // index in output tuple
+ regs regMask // allowed output registers
+}
+
+type regInfo struct {
+ // inputs encodes the register restrictions for an instruction's inputs.
+ // Each entry specifies an allowed register set for a particular input.
+ // They are listed in the order in which regalloc should pick a register
+ // from the register set (most constrained first).
+ // Inputs which do not need registers are not listed.
+ inputs []inputInfo
+ // clobbers encodes the set of registers that are overwritten by
+ // the instruction (other than the output registers).
+ clobbers regMask
+ // outputs is the same as inputs, but for the outputs of the instruction.
+ outputs []outputInfo
+}
+
+func (r *regInfo) String() string {
+ s := ""
+ s += "INS:\n"
+ for _, i := range r.inputs {
+ mask := fmt.Sprintf("%64b", i.regs)
+ mask = strings.Replace(mask, "0", ".", -1)
+ s += fmt.Sprintf("%2d |%s|\n", i.idx, mask)
+ }
+ s += "OUTS:\n"
+ for _, i := range r.outputs {
+ mask := fmt.Sprintf("%64b", i.regs)
+ mask = strings.Replace(mask, "0", ".", -1)
+ s += fmt.Sprintf("%2d |%s|\n", i.idx, mask)
+ }
+ s += "CLOBBERS:\n"
+ mask := fmt.Sprintf("%64b", r.clobbers)
+ mask = strings.Replace(mask, "0", ".", -1)
+ s += fmt.Sprintf(" |%s|\n", mask)
+ return s
+}
+
+type auxType int8
+
+type AuxNameOffset struct {
+ Name *ir.Name
+ Offset int64
+}
+
+func (a *AuxNameOffset) CanBeAnSSAAux() {}
+func (a *AuxNameOffset) String() string {
+ return fmt.Sprintf("%s+%d", a.Name.Sym().Name, a.Offset)
+}
+
+func (a *AuxNameOffset) FrameOffset() int64 {
+ return a.Name.FrameOffset() + a.Offset
+}
+
+type AuxCall struct {
+ Fn *obj.LSym
+ reg *regInfo // regInfo for this call
+ abiInfo *abi.ABIParamResultInfo
+}
+
+// Reg returns the regInfo for a given call, combining the derived in/out register masks
+// with the machine-specific register information in the input i. (The machine-specific
+// regInfo is much handier at the call site than it is when the AuxCall is being constructed,
+// therefore do this lazily).
+//
+// TODO: there is a Clever Hack that allows pre-generation of a small-ish number of the slices
+// of inputInfo and outputInfo used here, provided that we are willing to reorder the inputs
+// and outputs from calls, so that all integer registers come first, then all floating registers.
+// At this point (active development of register ABI) that is very premature,
+// but if this turns out to be a cost, we could do it.
+func (a *AuxCall) Reg(i *regInfo, c *Config) *regInfo {
+ if a.reg.clobbers != 0 {
+ // Already updated
+ return a.reg
+ }
+ if a.abiInfo.InRegistersUsed()+a.abiInfo.OutRegistersUsed() == 0 {
+ // Shortcut for zero case, also handles old ABI.
+ a.reg = i
+ return a.reg
+ }
+
+ k := len(i.inputs)
+ for _, p := range a.abiInfo.InParams() {
+ for _, r := range p.Registers {
+ m := archRegForAbiReg(r, c)
+ a.reg.inputs = append(a.reg.inputs, inputInfo{idx: k, regs: (1 << m)})
+ k++
+ }
+ }
+ a.reg.inputs = append(a.reg.inputs, i.inputs...) // These are less constrained, thus should come last
+ k = len(i.outputs)
+ for _, p := range a.abiInfo.OutParams() {
+ for _, r := range p.Registers {
+ m := archRegForAbiReg(r, c)
+ a.reg.outputs = append(a.reg.outputs, outputInfo{idx: k, regs: (1 << m)})
+ k++
+ }
+ }
+ a.reg.outputs = append(a.reg.outputs, i.outputs...)
+ a.reg.clobbers = i.clobbers
+ return a.reg
+}
+func (a *AuxCall) ABI() *abi.ABIConfig {
+ return a.abiInfo.Config()
+}
+func (a *AuxCall) ABIInfo() *abi.ABIParamResultInfo {
+ return a.abiInfo
+}
+func (a *AuxCall) ResultReg(c *Config) *regInfo {
+ if a.abiInfo.OutRegistersUsed() == 0 {
+ return a.reg
+ }
+ if len(a.reg.inputs) > 0 {
+ return a.reg
+ }
+ k := 0
+ for _, p := range a.abiInfo.OutParams() {
+ for _, r := range p.Registers {
+ m := archRegForAbiReg(r, c)
+ a.reg.inputs = append(a.reg.inputs, inputInfo{idx: k, regs: (1 << m)})
+ k++
+ }
+ }
+ return a.reg
+}
+
+// For ABI register index r, returns the (dense) register number used in
+// SSA backend.
+func archRegForAbiReg(r abi.RegIndex, c *Config) uint8 {
+ var m int8
+ if int(r) < len(c.intParamRegs) {
+ m = c.intParamRegs[r]
+ } else {
+ m = c.floatParamRegs[int(r)-len(c.intParamRegs)]
+ }
+ return uint8(m)
+}
+
+// For ABI register index r, returns the register number used in the obj
+// package (assembler).
+func ObjRegForAbiReg(r abi.RegIndex, c *Config) int16 {
+ m := archRegForAbiReg(r, c)
+ return c.registers[m].objNum
+}
+
+// ArgWidth returns the amount of stack needed for all the inputs
+// and outputs of a function or method, including ABI-defined parameter
+// slots and ABI-defined spill slots for register-resident parameters.
+//
+// The name is taken from the types package's ArgWidth(<function type>),
+// which predated changes to the ABI; this version handles those changes.
+func (a *AuxCall) ArgWidth() int64 {
+ return a.abiInfo.ArgWidth()
+}
+
+// ParamAssignmentForResult returns the ABI Parameter assignment for result which (indexed 0, 1, etc).
+func (a *AuxCall) ParamAssignmentForResult(which int64) *abi.ABIParamAssignment {
+ return a.abiInfo.OutParam(int(which))
+}
+
+// OffsetOfResult returns the SP offset of result which (indexed 0, 1, etc).
+func (a *AuxCall) OffsetOfResult(which int64) int64 {
+ n := int64(a.abiInfo.OutParam(int(which)).Offset())
+ return n
+}
+
+// OffsetOfArg returns the SP offset of argument which (indexed 0, 1, etc).
+// If the call is to a method, the receiver is the first argument (i.e., index 0)
+func (a *AuxCall) OffsetOfArg(which int64) int64 {
+ n := int64(a.abiInfo.InParam(int(which)).Offset())
+ return n
+}
+
+// RegsOfResult returns the register(s) used for result which (indexed 0, 1, etc).
+func (a *AuxCall) RegsOfResult(which int64) []abi.RegIndex {
+ return a.abiInfo.OutParam(int(which)).Registers
+}
+
+// RegsOfArg returns the register(s) used for argument which (indexed 0, 1, etc).
+// If the call is to a method, the receiver is the first argument (i.e., index 0)
+func (a *AuxCall) RegsOfArg(which int64) []abi.RegIndex {
+ return a.abiInfo.InParam(int(which)).Registers
+}
+
+// NameOfResult returns the ir.Name of result which (indexed 0, 1, etc).
+func (a *AuxCall) NameOfResult(which int64) *ir.Name {
+ return a.abiInfo.OutParam(int(which)).Name
+}
+
+// TypeOfResult returns the type of result which (indexed 0, 1, etc).
+func (a *AuxCall) TypeOfResult(which int64) *types.Type {
+ return a.abiInfo.OutParam(int(which)).Type
+}
+
+// TypeOfArg returns the type of argument which (indexed 0, 1, etc).
+// If the call is to a method, the receiver is the first argument (i.e., index 0)
+func (a *AuxCall) TypeOfArg(which int64) *types.Type {
+ return a.abiInfo.InParam(int(which)).Type
+}
+
+// SizeOfResult returns the size of result which (indexed 0, 1, etc).
+func (a *AuxCall) SizeOfResult(which int64) int64 {
+ return a.TypeOfResult(which).Size()
+}
+
+// SizeOfArg returns the size of argument which (indexed 0, 1, etc).
+// If the call is to a method, the receiver is the first argument (i.e., index 0)
+func (a *AuxCall) SizeOfArg(which int64) int64 {
+ return a.TypeOfArg(which).Size()
+}
+
+// NResults returns the number of results.
+func (a *AuxCall) NResults() int64 {
+ return int64(len(a.abiInfo.OutParams()))
+}
+
+// LateExpansionResultType returns the result type (including trailing mem)
+// for a call that will be expanded later in the SSA phase.
+func (a *AuxCall) LateExpansionResultType() *types.Type {
+ var tys []*types.Type
+ for i := int64(0); i < a.NResults(); i++ {
+ tys = append(tys, a.TypeOfResult(i))
+ }
+ tys = append(tys, types.TypeMem)
+ return types.NewResults(tys)
+}
+
+// NArgs returns the number of arguments (including receiver, if there is one).
+func (a *AuxCall) NArgs() int64 {
+ return int64(len(a.abiInfo.InParams()))
+}
+
+// String returns "AuxCall{<fn>}"
+func (a *AuxCall) String() string {
+ var fn string
+ if a.Fn == nil {
+ fn = "AuxCall{nil" // could be interface/closure etc.
+ } else {
+ fn = fmt.Sprintf("AuxCall{%v", a.Fn)
+ }
+ // TODO how much of the ABI should be printed?
+
+ return fn + "}"
+}
+
+// StaticAuxCall returns an AuxCall for a static call.
+func StaticAuxCall(sym *obj.LSym, paramResultInfo *abi.ABIParamResultInfo) *AuxCall {
+ if paramResultInfo == nil {
+ panic(fmt.Errorf("Nil paramResultInfo, sym=%v", sym))
+ }
+ var reg *regInfo
+ if paramResultInfo.InRegistersUsed()+paramResultInfo.OutRegistersUsed() > 0 {
+ reg = &regInfo{}
+ }
+ return &AuxCall{Fn: sym, abiInfo: paramResultInfo, reg: reg}
+}
+
+// InterfaceAuxCall returns an AuxCall for an interface call.
+func InterfaceAuxCall(paramResultInfo *abi.ABIParamResultInfo) *AuxCall {
+ var reg *regInfo
+ if paramResultInfo.InRegistersUsed()+paramResultInfo.OutRegistersUsed() > 0 {
+ reg = &regInfo{}
+ }
+ return &AuxCall{Fn: nil, abiInfo: paramResultInfo, reg: reg}
+}
+
+// ClosureAuxCall returns an AuxCall for a closure call.
+func ClosureAuxCall(paramResultInfo *abi.ABIParamResultInfo) *AuxCall {
+ var reg *regInfo
+ if paramResultInfo.InRegistersUsed()+paramResultInfo.OutRegistersUsed() > 0 {
+ reg = &regInfo{}
+ }
+ return &AuxCall{Fn: nil, abiInfo: paramResultInfo, reg: reg}
+}
+
+func (*AuxCall) CanBeAnSSAAux() {}
+
+// OwnAuxCall returns a function's own AuxCall.
+func OwnAuxCall(fn *obj.LSym, paramResultInfo *abi.ABIParamResultInfo) *AuxCall {
+ // TODO if this remains identical to ClosureAuxCall above after new ABI is done, should deduplicate.
+ var reg *regInfo
+ if paramResultInfo.InRegistersUsed()+paramResultInfo.OutRegistersUsed() > 0 {
+ reg = &regInfo{}
+ }
+ return &AuxCall{Fn: fn, abiInfo: paramResultInfo, reg: reg}
+}
+
+const (
+ auxNone auxType = iota
+ auxBool // auxInt is 0/1 for false/true
+ auxInt8 // auxInt is an 8-bit integer
+ auxInt16 // auxInt is a 16-bit integer
+ auxInt32 // auxInt is a 32-bit integer
+ auxInt64 // auxInt is a 64-bit integer
+ auxInt128 // auxInt represents a 128-bit integer. Always 0.
+ auxUInt8 // auxInt is an 8-bit unsigned integer
+ auxFloat32 // auxInt is a float32 (encoded with math.Float64bits)
+ auxFloat64 // auxInt is a float64 (encoded with math.Float64bits)
+ auxFlagConstant // auxInt is a flagConstant
+ auxNameOffsetInt8 // aux is a &struct{Name ir.Name, Offset int64}; auxInt is index in parameter registers array
+ auxString // aux is a string
+ auxSym // aux is a symbol (a *gc.Node for locals, an *obj.LSym for globals, or nil for none)
+ auxSymOff // aux is a symbol, auxInt is an offset
+ auxSymValAndOff // aux is a symbol, auxInt is a ValAndOff
+ auxTyp // aux is a type
+ auxTypSize // aux is a type, auxInt is a size, must have Aux.(Type).Size() == AuxInt
+ auxCCop // aux is a ssa.Op that represents a flags-to-bool conversion (e.g. LessThan)
+ auxCall // aux is a *ssa.AuxCall
+ auxCallOff // aux is a *ssa.AuxCall, AuxInt is int64 param (in+out) size
+
+ // architecture specific aux types
+ auxARM64BitField // aux is an arm64 bitfield lsb and width packed into auxInt
+ auxS390XRotateParams // aux is a s390x rotate parameters object encoding start bit, end bit and rotate amount
+ auxS390XCCMask // aux is a s390x 4-bit condition code mask
+ auxS390XCCMaskInt8 // aux is a s390x 4-bit condition code mask, auxInt is an int8 immediate
+ auxS390XCCMaskUint8 // aux is a s390x 4-bit condition code mask, auxInt is a uint8 immediate
+)
+
+// A SymEffect describes the effect that an SSA Value has on the variable
+// identified by the symbol in its Aux field.
+type SymEffect int8
+
+const (
+ SymRead SymEffect = 1 << iota
+ SymWrite
+ SymAddr
+
+ SymRdWr = SymRead | SymWrite
+
+ SymNone SymEffect = 0
+)
+
+// A Sym represents a symbolic offset from a base register.
+// Currently a Sym can be one of 3 things:
+// - a *gc.Node, for an offset from SP (the stack pointer)
+// - a *obj.LSym, for an offset from SB (the global pointer)
+// - nil, for no offset
+type Sym interface {
+ CanBeAnSSASym()
+ CanBeAnSSAAux()
+}
+
+// A ValAndOff is used by the several opcodes. It holds
+// both a value and a pointer offset.
+// A ValAndOff is intended to be encoded into an AuxInt field.
+// The zero ValAndOff encodes a value of 0 and an offset of 0.
+// The high 32 bits hold a value.
+// The low 32 bits hold a pointer offset.
+type ValAndOff int64
+
+func (x ValAndOff) Val() int32 { return int32(int64(x) >> 32) }
+func (x ValAndOff) Val64() int64 { return int64(x) >> 32 }
+func (x ValAndOff) Val16() int16 { return int16(int64(x) >> 32) }
+func (x ValAndOff) Val8() int8 { return int8(int64(x) >> 32) }
+
+func (x ValAndOff) Off64() int64 { return int64(int32(x)) }
+func (x ValAndOff) Off() int32 { return int32(x) }
+
+func (x ValAndOff) String() string {
+ return fmt.Sprintf("val=%d,off=%d", x.Val(), x.Off())
+}
+
+// validVal reports whether the value can be used
+// as an argument to makeValAndOff.
+func validVal(val int64) bool {
+ return val == int64(int32(val))
+}
+
+func makeValAndOff(val, off int32) ValAndOff {
+ return ValAndOff(int64(val)<<32 + int64(uint32(off)))
+}
+
+func (x ValAndOff) canAdd32(off int32) bool {
+ newoff := x.Off64() + int64(off)
+ return newoff == int64(int32(newoff))
+}
+func (x ValAndOff) canAdd64(off int64) bool {
+ newoff := x.Off64() + off
+ return newoff == int64(int32(newoff))
+}
+
+func (x ValAndOff) addOffset32(off int32) ValAndOff {
+ if !x.canAdd32(off) {
+ panic("invalid ValAndOff.addOffset32")
+ }
+ return makeValAndOff(x.Val(), x.Off()+off)
+}
+func (x ValAndOff) addOffset64(off int64) ValAndOff {
+ if !x.canAdd64(off) {
+ panic("invalid ValAndOff.addOffset64")
+ }
+ return makeValAndOff(x.Val(), x.Off()+int32(off))
+}
+
+// int128 is a type that stores a 128-bit constant.
+// The only allowed constant right now is 0, so we can cheat quite a bit.
+type int128 int64
+
+type BoundsKind uint8
+
+const (
+ BoundsIndex BoundsKind = iota // indexing operation, 0 <= idx < len failed
+ BoundsIndexU // ... with unsigned idx
+ BoundsSliceAlen // 2-arg slicing operation, 0 <= high <= len failed
+ BoundsSliceAlenU // ... with unsigned high
+ BoundsSliceAcap // 2-arg slicing operation, 0 <= high <= cap failed
+ BoundsSliceAcapU // ... with unsigned high
+ BoundsSliceB // 2-arg slicing operation, 0 <= low <= high failed
+ BoundsSliceBU // ... with unsigned low
+ BoundsSlice3Alen // 3-arg slicing operation, 0 <= max <= len failed
+ BoundsSlice3AlenU // ... with unsigned max
+ BoundsSlice3Acap // 3-arg slicing operation, 0 <= max <= cap failed
+ BoundsSlice3AcapU // ... with unsigned max
+ BoundsSlice3B // 3-arg slicing operation, 0 <= high <= max failed
+ BoundsSlice3BU // ... with unsigned high
+ BoundsSlice3C // 3-arg slicing operation, 0 <= low <= high failed
+ BoundsSlice3CU // ... with unsigned low
+ BoundsConvert // conversion to array pointer failed
+ BoundsKindCount
+)
+
+// boundsABI determines which register arguments a bounds check call should use. For an [a:b:c] slice, we do:
+//
+// CMPQ c, cap
+// JA fail1
+// CMPQ b, c
+// JA fail2
+// CMPQ a, b
+// JA fail3
+//
+// fail1: CALL panicSlice3Acap (c, cap)
+// fail2: CALL panicSlice3B (b, c)
+// fail3: CALL panicSlice3C (a, b)
+//
+// When we register allocate that code, we want the same register to be used for
+// the first arg of panicSlice3Acap and the second arg to panicSlice3B. That way,
+// initializing that register once will satisfy both calls.
+// That desire ends up dividing the set of bounds check calls into 3 sets. This function
+// determines which set to use for a given panic call.
+// The first arg for set 0 should be the second arg for set 1.
+// The first arg for set 1 should be the second arg for set 2.
+func boundsABI(b int64) int {
+ switch BoundsKind(b) {
+ case BoundsSlice3Alen,
+ BoundsSlice3AlenU,
+ BoundsSlice3Acap,
+ BoundsSlice3AcapU,
+ BoundsConvert:
+ return 0
+ case BoundsSliceAlen,
+ BoundsSliceAlenU,
+ BoundsSliceAcap,
+ BoundsSliceAcapU,
+ BoundsSlice3B,
+ BoundsSlice3BU:
+ return 1
+ case BoundsIndex,
+ BoundsIndexU,
+ BoundsSliceB,
+ BoundsSliceBU,
+ BoundsSlice3C,
+ BoundsSlice3CU:
+ return 2
+ default:
+ panic("bad BoundsKind")
+ }
+}
+
+// arm64BitField is the GO type of ARM64BitField auxInt.
+// if x is an ARM64BitField, then width=x&0xff, lsb=(x>>8)&0xff, and
+// width+lsb<64 for 64-bit variant, width+lsb<32 for 32-bit variant.
+// the meaning of width and lsb are instruction-dependent.
+type arm64BitField int16
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
new file mode 100644
index 0000000..c552832
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -0,0 +1,41139 @@
+// Code generated from _gen/*Ops.go using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+ "cmd/internal/obj/arm64"
+ "cmd/internal/obj/loong64"
+ "cmd/internal/obj/mips"
+ "cmd/internal/obj/ppc64"
+ "cmd/internal/obj/riscv"
+ "cmd/internal/obj/s390x"
+ "cmd/internal/obj/wasm"
+ "cmd/internal/obj/x86"
+)
+
+const (
+ BlockInvalid BlockKind = iota
+
+ Block386EQ
+ Block386NE
+ Block386LT
+ Block386LE
+ Block386GT
+ Block386GE
+ Block386OS
+ Block386OC
+ Block386ULT
+ Block386ULE
+ Block386UGT
+ Block386UGE
+ Block386EQF
+ Block386NEF
+ Block386ORD
+ Block386NAN
+
+ BlockAMD64EQ
+ BlockAMD64NE
+ BlockAMD64LT
+ BlockAMD64LE
+ BlockAMD64GT
+ BlockAMD64GE
+ BlockAMD64OS
+ BlockAMD64OC
+ BlockAMD64ULT
+ BlockAMD64ULE
+ BlockAMD64UGT
+ BlockAMD64UGE
+ BlockAMD64EQF
+ BlockAMD64NEF
+ BlockAMD64ORD
+ BlockAMD64NAN
+ BlockAMD64JUMPTABLE
+
+ BlockARMEQ
+ BlockARMNE
+ BlockARMLT
+ BlockARMLE
+ BlockARMGT
+ BlockARMGE
+ BlockARMULT
+ BlockARMULE
+ BlockARMUGT
+ BlockARMUGE
+ BlockARMLTnoov
+ BlockARMLEnoov
+ BlockARMGTnoov
+ BlockARMGEnoov
+
+ BlockARM64EQ
+ BlockARM64NE
+ BlockARM64LT
+ BlockARM64LE
+ BlockARM64GT
+ BlockARM64GE
+ BlockARM64ULT
+ BlockARM64ULE
+ BlockARM64UGT
+ BlockARM64UGE
+ BlockARM64Z
+ BlockARM64NZ
+ BlockARM64ZW
+ BlockARM64NZW
+ BlockARM64TBZ
+ BlockARM64TBNZ
+ BlockARM64FLT
+ BlockARM64FLE
+ BlockARM64FGT
+ BlockARM64FGE
+ BlockARM64LTnoov
+ BlockARM64LEnoov
+ BlockARM64GTnoov
+ BlockARM64GEnoov
+ BlockARM64JUMPTABLE
+
+ BlockLOONG64EQ
+ BlockLOONG64NE
+ BlockLOONG64LTZ
+ BlockLOONG64LEZ
+ BlockLOONG64GTZ
+ BlockLOONG64GEZ
+ BlockLOONG64FPT
+ BlockLOONG64FPF
+
+ BlockMIPSEQ
+ BlockMIPSNE
+ BlockMIPSLTZ
+ BlockMIPSLEZ
+ BlockMIPSGTZ
+ BlockMIPSGEZ
+ BlockMIPSFPT
+ BlockMIPSFPF
+
+ BlockMIPS64EQ
+ BlockMIPS64NE
+ BlockMIPS64LTZ
+ BlockMIPS64LEZ
+ BlockMIPS64GTZ
+ BlockMIPS64GEZ
+ BlockMIPS64FPT
+ BlockMIPS64FPF
+
+ BlockPPC64EQ
+ BlockPPC64NE
+ BlockPPC64LT
+ BlockPPC64LE
+ BlockPPC64GT
+ BlockPPC64GE
+ BlockPPC64FLT
+ BlockPPC64FLE
+ BlockPPC64FGT
+ BlockPPC64FGE
+
+ BlockRISCV64BEQ
+ BlockRISCV64BNE
+ BlockRISCV64BLT
+ BlockRISCV64BGE
+ BlockRISCV64BLTU
+ BlockRISCV64BGEU
+ BlockRISCV64BEQZ
+ BlockRISCV64BNEZ
+ BlockRISCV64BLEZ
+ BlockRISCV64BGEZ
+ BlockRISCV64BLTZ
+ BlockRISCV64BGTZ
+
+ BlockS390XBRC
+ BlockS390XCRJ
+ BlockS390XCGRJ
+ BlockS390XCLRJ
+ BlockS390XCLGRJ
+ BlockS390XCIJ
+ BlockS390XCGIJ
+ BlockS390XCLIJ
+ BlockS390XCLGIJ
+
+ BlockPlain
+ BlockIf
+ BlockDefer
+ BlockRet
+ BlockRetJmp
+ BlockExit
+ BlockJumpTable
+ BlockFirst
+)
+
+var blockString = [...]string{
+ BlockInvalid: "BlockInvalid",
+
+ Block386EQ: "EQ",
+ Block386NE: "NE",
+ Block386LT: "LT",
+ Block386LE: "LE",
+ Block386GT: "GT",
+ Block386GE: "GE",
+ Block386OS: "OS",
+ Block386OC: "OC",
+ Block386ULT: "ULT",
+ Block386ULE: "ULE",
+ Block386UGT: "UGT",
+ Block386UGE: "UGE",
+ Block386EQF: "EQF",
+ Block386NEF: "NEF",
+ Block386ORD: "ORD",
+ Block386NAN: "NAN",
+
+ BlockAMD64EQ: "EQ",
+ BlockAMD64NE: "NE",
+ BlockAMD64LT: "LT",
+ BlockAMD64LE: "LE",
+ BlockAMD64GT: "GT",
+ BlockAMD64GE: "GE",
+ BlockAMD64OS: "OS",
+ BlockAMD64OC: "OC",
+ BlockAMD64ULT: "ULT",
+ BlockAMD64ULE: "ULE",
+ BlockAMD64UGT: "UGT",
+ BlockAMD64UGE: "UGE",
+ BlockAMD64EQF: "EQF",
+ BlockAMD64NEF: "NEF",
+ BlockAMD64ORD: "ORD",
+ BlockAMD64NAN: "NAN",
+ BlockAMD64JUMPTABLE: "JUMPTABLE",
+
+ BlockARMEQ: "EQ",
+ BlockARMNE: "NE",
+ BlockARMLT: "LT",
+ BlockARMLE: "LE",
+ BlockARMGT: "GT",
+ BlockARMGE: "GE",
+ BlockARMULT: "ULT",
+ BlockARMULE: "ULE",
+ BlockARMUGT: "UGT",
+ BlockARMUGE: "UGE",
+ BlockARMLTnoov: "LTnoov",
+ BlockARMLEnoov: "LEnoov",
+ BlockARMGTnoov: "GTnoov",
+ BlockARMGEnoov: "GEnoov",
+
+ BlockARM64EQ: "EQ",
+ BlockARM64NE: "NE",
+ BlockARM64LT: "LT",
+ BlockARM64LE: "LE",
+ BlockARM64GT: "GT",
+ BlockARM64GE: "GE",
+ BlockARM64ULT: "ULT",
+ BlockARM64ULE: "ULE",
+ BlockARM64UGT: "UGT",
+ BlockARM64UGE: "UGE",
+ BlockARM64Z: "Z",
+ BlockARM64NZ: "NZ",
+ BlockARM64ZW: "ZW",
+ BlockARM64NZW: "NZW",
+ BlockARM64TBZ: "TBZ",
+ BlockARM64TBNZ: "TBNZ",
+ BlockARM64FLT: "FLT",
+ BlockARM64FLE: "FLE",
+ BlockARM64FGT: "FGT",
+ BlockARM64FGE: "FGE",
+ BlockARM64LTnoov: "LTnoov",
+ BlockARM64LEnoov: "LEnoov",
+ BlockARM64GTnoov: "GTnoov",
+ BlockARM64GEnoov: "GEnoov",
+ BlockARM64JUMPTABLE: "JUMPTABLE",
+
+ BlockLOONG64EQ: "EQ",
+ BlockLOONG64NE: "NE",
+ BlockLOONG64LTZ: "LTZ",
+ BlockLOONG64LEZ: "LEZ",
+ BlockLOONG64GTZ: "GTZ",
+ BlockLOONG64GEZ: "GEZ",
+ BlockLOONG64FPT: "FPT",
+ BlockLOONG64FPF: "FPF",
+
+ BlockMIPSEQ: "EQ",
+ BlockMIPSNE: "NE",
+ BlockMIPSLTZ: "LTZ",
+ BlockMIPSLEZ: "LEZ",
+ BlockMIPSGTZ: "GTZ",
+ BlockMIPSGEZ: "GEZ",
+ BlockMIPSFPT: "FPT",
+ BlockMIPSFPF: "FPF",
+
+ BlockMIPS64EQ: "EQ",
+ BlockMIPS64NE: "NE",
+ BlockMIPS64LTZ: "LTZ",
+ BlockMIPS64LEZ: "LEZ",
+ BlockMIPS64GTZ: "GTZ",
+ BlockMIPS64GEZ: "GEZ",
+ BlockMIPS64FPT: "FPT",
+ BlockMIPS64FPF: "FPF",
+
+ BlockPPC64EQ: "EQ",
+ BlockPPC64NE: "NE",
+ BlockPPC64LT: "LT",
+ BlockPPC64LE: "LE",
+ BlockPPC64GT: "GT",
+ BlockPPC64GE: "GE",
+ BlockPPC64FLT: "FLT",
+ BlockPPC64FLE: "FLE",
+ BlockPPC64FGT: "FGT",
+ BlockPPC64FGE: "FGE",
+
+ BlockRISCV64BEQ: "BEQ",
+ BlockRISCV64BNE: "BNE",
+ BlockRISCV64BLT: "BLT",
+ BlockRISCV64BGE: "BGE",
+ BlockRISCV64BLTU: "BLTU",
+ BlockRISCV64BGEU: "BGEU",
+ BlockRISCV64BEQZ: "BEQZ",
+ BlockRISCV64BNEZ: "BNEZ",
+ BlockRISCV64BLEZ: "BLEZ",
+ BlockRISCV64BGEZ: "BGEZ",
+ BlockRISCV64BLTZ: "BLTZ",
+ BlockRISCV64BGTZ: "BGTZ",
+
+ BlockS390XBRC: "BRC",
+ BlockS390XCRJ: "CRJ",
+ BlockS390XCGRJ: "CGRJ",
+ BlockS390XCLRJ: "CLRJ",
+ BlockS390XCLGRJ: "CLGRJ",
+ BlockS390XCIJ: "CIJ",
+ BlockS390XCGIJ: "CGIJ",
+ BlockS390XCLIJ: "CLIJ",
+ BlockS390XCLGIJ: "CLGIJ",
+
+ BlockPlain: "Plain",
+ BlockIf: "If",
+ BlockDefer: "Defer",
+ BlockRet: "Ret",
+ BlockRetJmp: "RetJmp",
+ BlockExit: "Exit",
+ BlockJumpTable: "JumpTable",
+ BlockFirst: "First",
+}
+
+func (k BlockKind) String() string { return blockString[k] }
+func (k BlockKind) AuxIntType() string {
+ switch k {
+ case BlockARM64TBZ:
+ return "int64"
+ case BlockARM64TBNZ:
+ return "int64"
+ case BlockS390XCIJ:
+ return "int8"
+ case BlockS390XCGIJ:
+ return "int8"
+ case BlockS390XCLIJ:
+ return "uint8"
+ case BlockS390XCLGIJ:
+ return "uint8"
+ }
+ return ""
+}
+
+const (
+ OpInvalid Op = iota
+
+ Op386ADDSS
+ Op386ADDSD
+ Op386SUBSS
+ Op386SUBSD
+ Op386MULSS
+ Op386MULSD
+ Op386DIVSS
+ Op386DIVSD
+ Op386MOVSSload
+ Op386MOVSDload
+ Op386MOVSSconst
+ Op386MOVSDconst
+ Op386MOVSSloadidx1
+ Op386MOVSSloadidx4
+ Op386MOVSDloadidx1
+ Op386MOVSDloadidx8
+ Op386MOVSSstore
+ Op386MOVSDstore
+ Op386MOVSSstoreidx1
+ Op386MOVSSstoreidx4
+ Op386MOVSDstoreidx1
+ Op386MOVSDstoreidx8
+ Op386ADDSSload
+ Op386ADDSDload
+ Op386SUBSSload
+ Op386SUBSDload
+ Op386MULSSload
+ Op386MULSDload
+ Op386DIVSSload
+ Op386DIVSDload
+ Op386ADDL
+ Op386ADDLconst
+ Op386ADDLcarry
+ Op386ADDLconstcarry
+ Op386ADCL
+ Op386ADCLconst
+ Op386SUBL
+ Op386SUBLconst
+ Op386SUBLcarry
+ Op386SUBLconstcarry
+ Op386SBBL
+ Op386SBBLconst
+ Op386MULL
+ Op386MULLconst
+ Op386MULLU
+ Op386HMULL
+ Op386HMULLU
+ Op386MULLQU
+ Op386AVGLU
+ Op386DIVL
+ Op386DIVW
+ Op386DIVLU
+ Op386DIVWU
+ Op386MODL
+ Op386MODW
+ Op386MODLU
+ Op386MODWU
+ Op386ANDL
+ Op386ANDLconst
+ Op386ORL
+ Op386ORLconst
+ Op386XORL
+ Op386XORLconst
+ Op386CMPL
+ Op386CMPW
+ Op386CMPB
+ Op386CMPLconst
+ Op386CMPWconst
+ Op386CMPBconst
+ Op386CMPLload
+ Op386CMPWload
+ Op386CMPBload
+ Op386CMPLconstload
+ Op386CMPWconstload
+ Op386CMPBconstload
+ Op386UCOMISS
+ Op386UCOMISD
+ Op386TESTL
+ Op386TESTW
+ Op386TESTB
+ Op386TESTLconst
+ Op386TESTWconst
+ Op386TESTBconst
+ Op386SHLL
+ Op386SHLLconst
+ Op386SHRL
+ Op386SHRW
+ Op386SHRB
+ Op386SHRLconst
+ Op386SHRWconst
+ Op386SHRBconst
+ Op386SARL
+ Op386SARW
+ Op386SARB
+ Op386SARLconst
+ Op386SARWconst
+ Op386SARBconst
+ Op386ROLL
+ Op386ROLW
+ Op386ROLB
+ Op386ROLLconst
+ Op386ROLWconst
+ Op386ROLBconst
+ Op386ADDLload
+ Op386SUBLload
+ Op386MULLload
+ Op386ANDLload
+ Op386ORLload
+ Op386XORLload
+ Op386ADDLloadidx4
+ Op386SUBLloadidx4
+ Op386MULLloadidx4
+ Op386ANDLloadidx4
+ Op386ORLloadidx4
+ Op386XORLloadidx4
+ Op386NEGL
+ Op386NOTL
+ Op386BSFL
+ Op386BSFW
+ Op386LoweredCtz32
+ Op386BSRL
+ Op386BSRW
+ Op386BSWAPL
+ Op386SQRTSD
+ Op386SQRTSS
+ Op386SBBLcarrymask
+ Op386SETEQ
+ Op386SETNE
+ Op386SETL
+ Op386SETLE
+ Op386SETG
+ Op386SETGE
+ Op386SETB
+ Op386SETBE
+ Op386SETA
+ Op386SETAE
+ Op386SETO
+ Op386SETEQF
+ Op386SETNEF
+ Op386SETORD
+ Op386SETNAN
+ Op386SETGF
+ Op386SETGEF
+ Op386MOVBLSX
+ Op386MOVBLZX
+ Op386MOVWLSX
+ Op386MOVWLZX
+ Op386MOVLconst
+ Op386CVTTSD2SL
+ Op386CVTTSS2SL
+ Op386CVTSL2SS
+ Op386CVTSL2SD
+ Op386CVTSD2SS
+ Op386CVTSS2SD
+ Op386PXOR
+ Op386LEAL
+ Op386LEAL1
+ Op386LEAL2
+ Op386LEAL4
+ Op386LEAL8
+ Op386MOVBload
+ Op386MOVBLSXload
+ Op386MOVWload
+ Op386MOVWLSXload
+ Op386MOVLload
+ Op386MOVBstore
+ Op386MOVWstore
+ Op386MOVLstore
+ Op386ADDLmodify
+ Op386SUBLmodify
+ Op386ANDLmodify
+ Op386ORLmodify
+ Op386XORLmodify
+ Op386ADDLmodifyidx4
+ Op386SUBLmodifyidx4
+ Op386ANDLmodifyidx4
+ Op386ORLmodifyidx4
+ Op386XORLmodifyidx4
+ Op386ADDLconstmodify
+ Op386ANDLconstmodify
+ Op386ORLconstmodify
+ Op386XORLconstmodify
+ Op386ADDLconstmodifyidx4
+ Op386ANDLconstmodifyidx4
+ Op386ORLconstmodifyidx4
+ Op386XORLconstmodifyidx4
+ Op386MOVBloadidx1
+ Op386MOVWloadidx1
+ Op386MOVWloadidx2
+ Op386MOVLloadidx1
+ Op386MOVLloadidx4
+ Op386MOVBstoreidx1
+ Op386MOVWstoreidx1
+ Op386MOVWstoreidx2
+ Op386MOVLstoreidx1
+ Op386MOVLstoreidx4
+ Op386MOVBstoreconst
+ Op386MOVWstoreconst
+ Op386MOVLstoreconst
+ Op386MOVBstoreconstidx1
+ Op386MOVWstoreconstidx1
+ Op386MOVWstoreconstidx2
+ Op386MOVLstoreconstidx1
+ Op386MOVLstoreconstidx4
+ Op386DUFFZERO
+ Op386REPSTOSL
+ Op386CALLstatic
+ Op386CALLtail
+ Op386CALLclosure
+ Op386CALLinter
+ Op386DUFFCOPY
+ Op386REPMOVSL
+ Op386InvertFlags
+ Op386LoweredGetG
+ Op386LoweredGetClosurePtr
+ Op386LoweredGetCallerPC
+ Op386LoweredGetCallerSP
+ Op386LoweredNilCheck
+ Op386LoweredWB
+ Op386LoweredPanicBoundsA
+ Op386LoweredPanicBoundsB
+ Op386LoweredPanicBoundsC
+ Op386LoweredPanicExtendA
+ Op386LoweredPanicExtendB
+ Op386LoweredPanicExtendC
+ Op386FlagEQ
+ Op386FlagLT_ULT
+ Op386FlagLT_UGT
+ Op386FlagGT_UGT
+ Op386FlagGT_ULT
+ Op386MOVSSconst1
+ Op386MOVSDconst1
+ Op386MOVSSconst2
+ Op386MOVSDconst2
+
+ OpAMD64ADDSS
+ OpAMD64ADDSD
+ OpAMD64SUBSS
+ OpAMD64SUBSD
+ OpAMD64MULSS
+ OpAMD64MULSD
+ OpAMD64DIVSS
+ OpAMD64DIVSD
+ OpAMD64MOVSSload
+ OpAMD64MOVSDload
+ OpAMD64MOVSSconst
+ OpAMD64MOVSDconst
+ OpAMD64MOVSSloadidx1
+ OpAMD64MOVSSloadidx4
+ OpAMD64MOVSDloadidx1
+ OpAMD64MOVSDloadidx8
+ OpAMD64MOVSSstore
+ OpAMD64MOVSDstore
+ OpAMD64MOVSSstoreidx1
+ OpAMD64MOVSSstoreidx4
+ OpAMD64MOVSDstoreidx1
+ OpAMD64MOVSDstoreidx8
+ OpAMD64ADDSSload
+ OpAMD64ADDSDload
+ OpAMD64SUBSSload
+ OpAMD64SUBSDload
+ OpAMD64MULSSload
+ OpAMD64MULSDload
+ OpAMD64DIVSSload
+ OpAMD64DIVSDload
+ OpAMD64ADDSSloadidx1
+ OpAMD64ADDSSloadidx4
+ OpAMD64ADDSDloadidx1
+ OpAMD64ADDSDloadidx8
+ OpAMD64SUBSSloadidx1
+ OpAMD64SUBSSloadidx4
+ OpAMD64SUBSDloadidx1
+ OpAMD64SUBSDloadidx8
+ OpAMD64MULSSloadidx1
+ OpAMD64MULSSloadidx4
+ OpAMD64MULSDloadidx1
+ OpAMD64MULSDloadidx8
+ OpAMD64DIVSSloadidx1
+ OpAMD64DIVSSloadidx4
+ OpAMD64DIVSDloadidx1
+ OpAMD64DIVSDloadidx8
+ OpAMD64ADDQ
+ OpAMD64ADDL
+ OpAMD64ADDQconst
+ OpAMD64ADDLconst
+ OpAMD64ADDQconstmodify
+ OpAMD64ADDLconstmodify
+ OpAMD64SUBQ
+ OpAMD64SUBL
+ OpAMD64SUBQconst
+ OpAMD64SUBLconst
+ OpAMD64MULQ
+ OpAMD64MULL
+ OpAMD64MULQconst
+ OpAMD64MULLconst
+ OpAMD64MULLU
+ OpAMD64MULQU
+ OpAMD64HMULQ
+ OpAMD64HMULL
+ OpAMD64HMULQU
+ OpAMD64HMULLU
+ OpAMD64AVGQU
+ OpAMD64DIVQ
+ OpAMD64DIVL
+ OpAMD64DIVW
+ OpAMD64DIVQU
+ OpAMD64DIVLU
+ OpAMD64DIVWU
+ OpAMD64NEGLflags
+ OpAMD64ADDQcarry
+ OpAMD64ADCQ
+ OpAMD64ADDQconstcarry
+ OpAMD64ADCQconst
+ OpAMD64SUBQborrow
+ OpAMD64SBBQ
+ OpAMD64SUBQconstborrow
+ OpAMD64SBBQconst
+ OpAMD64MULQU2
+ OpAMD64DIVQU2
+ OpAMD64ANDQ
+ OpAMD64ANDL
+ OpAMD64ANDQconst
+ OpAMD64ANDLconst
+ OpAMD64ANDQconstmodify
+ OpAMD64ANDLconstmodify
+ OpAMD64ORQ
+ OpAMD64ORL
+ OpAMD64ORQconst
+ OpAMD64ORLconst
+ OpAMD64ORQconstmodify
+ OpAMD64ORLconstmodify
+ OpAMD64XORQ
+ OpAMD64XORL
+ OpAMD64XORQconst
+ OpAMD64XORLconst
+ OpAMD64XORQconstmodify
+ OpAMD64XORLconstmodify
+ OpAMD64CMPQ
+ OpAMD64CMPL
+ OpAMD64CMPW
+ OpAMD64CMPB
+ OpAMD64CMPQconst
+ OpAMD64CMPLconst
+ OpAMD64CMPWconst
+ OpAMD64CMPBconst
+ OpAMD64CMPQload
+ OpAMD64CMPLload
+ OpAMD64CMPWload
+ OpAMD64CMPBload
+ OpAMD64CMPQconstload
+ OpAMD64CMPLconstload
+ OpAMD64CMPWconstload
+ OpAMD64CMPBconstload
+ OpAMD64CMPQloadidx8
+ OpAMD64CMPQloadidx1
+ OpAMD64CMPLloadidx4
+ OpAMD64CMPLloadidx1
+ OpAMD64CMPWloadidx2
+ OpAMD64CMPWloadidx1
+ OpAMD64CMPBloadidx1
+ OpAMD64CMPQconstloadidx8
+ OpAMD64CMPQconstloadidx1
+ OpAMD64CMPLconstloadidx4
+ OpAMD64CMPLconstloadidx1
+ OpAMD64CMPWconstloadidx2
+ OpAMD64CMPWconstloadidx1
+ OpAMD64CMPBconstloadidx1
+ OpAMD64UCOMISS
+ OpAMD64UCOMISD
+ OpAMD64BTL
+ OpAMD64BTQ
+ OpAMD64BTCL
+ OpAMD64BTCQ
+ OpAMD64BTRL
+ OpAMD64BTRQ
+ OpAMD64BTSL
+ OpAMD64BTSQ
+ OpAMD64BTLconst
+ OpAMD64BTQconst
+ OpAMD64BTCQconst
+ OpAMD64BTRQconst
+ OpAMD64BTSQconst
+ OpAMD64BTSQconstmodify
+ OpAMD64BTRQconstmodify
+ OpAMD64BTCQconstmodify
+ OpAMD64TESTQ
+ OpAMD64TESTL
+ OpAMD64TESTW
+ OpAMD64TESTB
+ OpAMD64TESTQconst
+ OpAMD64TESTLconst
+ OpAMD64TESTWconst
+ OpAMD64TESTBconst
+ OpAMD64SHLQ
+ OpAMD64SHLL
+ OpAMD64SHLQconst
+ OpAMD64SHLLconst
+ OpAMD64SHRQ
+ OpAMD64SHRL
+ OpAMD64SHRW
+ OpAMD64SHRB
+ OpAMD64SHRQconst
+ OpAMD64SHRLconst
+ OpAMD64SHRWconst
+ OpAMD64SHRBconst
+ OpAMD64SARQ
+ OpAMD64SARL
+ OpAMD64SARW
+ OpAMD64SARB
+ OpAMD64SARQconst
+ OpAMD64SARLconst
+ OpAMD64SARWconst
+ OpAMD64SARBconst
+ OpAMD64SHRDQ
+ OpAMD64SHLDQ
+ OpAMD64ROLQ
+ OpAMD64ROLL
+ OpAMD64ROLW
+ OpAMD64ROLB
+ OpAMD64RORQ
+ OpAMD64RORL
+ OpAMD64RORW
+ OpAMD64RORB
+ OpAMD64ROLQconst
+ OpAMD64ROLLconst
+ OpAMD64ROLWconst
+ OpAMD64ROLBconst
+ OpAMD64ADDLload
+ OpAMD64ADDQload
+ OpAMD64SUBQload
+ OpAMD64SUBLload
+ OpAMD64ANDLload
+ OpAMD64ANDQload
+ OpAMD64ORQload
+ OpAMD64ORLload
+ OpAMD64XORQload
+ OpAMD64XORLload
+ OpAMD64ADDLloadidx1
+ OpAMD64ADDLloadidx4
+ OpAMD64ADDLloadidx8
+ OpAMD64ADDQloadidx1
+ OpAMD64ADDQloadidx8
+ OpAMD64SUBLloadidx1
+ OpAMD64SUBLloadidx4
+ OpAMD64SUBLloadidx8
+ OpAMD64SUBQloadidx1
+ OpAMD64SUBQloadidx8
+ OpAMD64ANDLloadidx1
+ OpAMD64ANDLloadidx4
+ OpAMD64ANDLloadidx8
+ OpAMD64ANDQloadidx1
+ OpAMD64ANDQloadidx8
+ OpAMD64ORLloadidx1
+ OpAMD64ORLloadidx4
+ OpAMD64ORLloadidx8
+ OpAMD64ORQloadidx1
+ OpAMD64ORQloadidx8
+ OpAMD64XORLloadidx1
+ OpAMD64XORLloadidx4
+ OpAMD64XORLloadidx8
+ OpAMD64XORQloadidx1
+ OpAMD64XORQloadidx8
+ OpAMD64ADDQmodify
+ OpAMD64SUBQmodify
+ OpAMD64ANDQmodify
+ OpAMD64ORQmodify
+ OpAMD64XORQmodify
+ OpAMD64ADDLmodify
+ OpAMD64SUBLmodify
+ OpAMD64ANDLmodify
+ OpAMD64ORLmodify
+ OpAMD64XORLmodify
+ OpAMD64ADDQmodifyidx1
+ OpAMD64ADDQmodifyidx8
+ OpAMD64SUBQmodifyidx1
+ OpAMD64SUBQmodifyidx8
+ OpAMD64ANDQmodifyidx1
+ OpAMD64ANDQmodifyidx8
+ OpAMD64ORQmodifyidx1
+ OpAMD64ORQmodifyidx8
+ OpAMD64XORQmodifyidx1
+ OpAMD64XORQmodifyidx8
+ OpAMD64ADDLmodifyidx1
+ OpAMD64ADDLmodifyidx4
+ OpAMD64ADDLmodifyidx8
+ OpAMD64SUBLmodifyidx1
+ OpAMD64SUBLmodifyidx4
+ OpAMD64SUBLmodifyidx8
+ OpAMD64ANDLmodifyidx1
+ OpAMD64ANDLmodifyidx4
+ OpAMD64ANDLmodifyidx8
+ OpAMD64ORLmodifyidx1
+ OpAMD64ORLmodifyidx4
+ OpAMD64ORLmodifyidx8
+ OpAMD64XORLmodifyidx1
+ OpAMD64XORLmodifyidx4
+ OpAMD64XORLmodifyidx8
+ OpAMD64ADDQconstmodifyidx1
+ OpAMD64ADDQconstmodifyidx8
+ OpAMD64ANDQconstmodifyidx1
+ OpAMD64ANDQconstmodifyidx8
+ OpAMD64ORQconstmodifyidx1
+ OpAMD64ORQconstmodifyidx8
+ OpAMD64XORQconstmodifyidx1
+ OpAMD64XORQconstmodifyidx8
+ OpAMD64ADDLconstmodifyidx1
+ OpAMD64ADDLconstmodifyidx4
+ OpAMD64ADDLconstmodifyidx8
+ OpAMD64ANDLconstmodifyidx1
+ OpAMD64ANDLconstmodifyidx4
+ OpAMD64ANDLconstmodifyidx8
+ OpAMD64ORLconstmodifyidx1
+ OpAMD64ORLconstmodifyidx4
+ OpAMD64ORLconstmodifyidx8
+ OpAMD64XORLconstmodifyidx1
+ OpAMD64XORLconstmodifyidx4
+ OpAMD64XORLconstmodifyidx8
+ OpAMD64NEGQ
+ OpAMD64NEGL
+ OpAMD64NOTQ
+ OpAMD64NOTL
+ OpAMD64BSFQ
+ OpAMD64BSFL
+ OpAMD64BSRQ
+ OpAMD64BSRL
+ OpAMD64CMOVQEQ
+ OpAMD64CMOVQNE
+ OpAMD64CMOVQLT
+ OpAMD64CMOVQGT
+ OpAMD64CMOVQLE
+ OpAMD64CMOVQGE
+ OpAMD64CMOVQLS
+ OpAMD64CMOVQHI
+ OpAMD64CMOVQCC
+ OpAMD64CMOVQCS
+ OpAMD64CMOVLEQ
+ OpAMD64CMOVLNE
+ OpAMD64CMOVLLT
+ OpAMD64CMOVLGT
+ OpAMD64CMOVLLE
+ OpAMD64CMOVLGE
+ OpAMD64CMOVLLS
+ OpAMD64CMOVLHI
+ OpAMD64CMOVLCC
+ OpAMD64CMOVLCS
+ OpAMD64CMOVWEQ
+ OpAMD64CMOVWNE
+ OpAMD64CMOVWLT
+ OpAMD64CMOVWGT
+ OpAMD64CMOVWLE
+ OpAMD64CMOVWGE
+ OpAMD64CMOVWLS
+ OpAMD64CMOVWHI
+ OpAMD64CMOVWCC
+ OpAMD64CMOVWCS
+ OpAMD64CMOVQEQF
+ OpAMD64CMOVQNEF
+ OpAMD64CMOVQGTF
+ OpAMD64CMOVQGEF
+ OpAMD64CMOVLEQF
+ OpAMD64CMOVLNEF
+ OpAMD64CMOVLGTF
+ OpAMD64CMOVLGEF
+ OpAMD64CMOVWEQF
+ OpAMD64CMOVWNEF
+ OpAMD64CMOVWGTF
+ OpAMD64CMOVWGEF
+ OpAMD64BSWAPQ
+ OpAMD64BSWAPL
+ OpAMD64POPCNTQ
+ OpAMD64POPCNTL
+ OpAMD64SQRTSD
+ OpAMD64SQRTSS
+ OpAMD64ROUNDSD
+ OpAMD64VFMADD231SD
+ OpAMD64MINSD
+ OpAMD64MINSS
+ OpAMD64SBBQcarrymask
+ OpAMD64SBBLcarrymask
+ OpAMD64SETEQ
+ OpAMD64SETNE
+ OpAMD64SETL
+ OpAMD64SETLE
+ OpAMD64SETG
+ OpAMD64SETGE
+ OpAMD64SETB
+ OpAMD64SETBE
+ OpAMD64SETA
+ OpAMD64SETAE
+ OpAMD64SETO
+ OpAMD64SETEQstore
+ OpAMD64SETNEstore
+ OpAMD64SETLstore
+ OpAMD64SETLEstore
+ OpAMD64SETGstore
+ OpAMD64SETGEstore
+ OpAMD64SETBstore
+ OpAMD64SETBEstore
+ OpAMD64SETAstore
+ OpAMD64SETAEstore
+ OpAMD64SETEQstoreidx1
+ OpAMD64SETNEstoreidx1
+ OpAMD64SETLstoreidx1
+ OpAMD64SETLEstoreidx1
+ OpAMD64SETGstoreidx1
+ OpAMD64SETGEstoreidx1
+ OpAMD64SETBstoreidx1
+ OpAMD64SETBEstoreidx1
+ OpAMD64SETAstoreidx1
+ OpAMD64SETAEstoreidx1
+ OpAMD64SETEQF
+ OpAMD64SETNEF
+ OpAMD64SETORD
+ OpAMD64SETNAN
+ OpAMD64SETGF
+ OpAMD64SETGEF
+ OpAMD64MOVBQSX
+ OpAMD64MOVBQZX
+ OpAMD64MOVWQSX
+ OpAMD64MOVWQZX
+ OpAMD64MOVLQSX
+ OpAMD64MOVLQZX
+ OpAMD64MOVLconst
+ OpAMD64MOVQconst
+ OpAMD64CVTTSD2SL
+ OpAMD64CVTTSD2SQ
+ OpAMD64CVTTSS2SL
+ OpAMD64CVTTSS2SQ
+ OpAMD64CVTSL2SS
+ OpAMD64CVTSL2SD
+ OpAMD64CVTSQ2SS
+ OpAMD64CVTSQ2SD
+ OpAMD64CVTSD2SS
+ OpAMD64CVTSS2SD
+ OpAMD64MOVQi2f
+ OpAMD64MOVQf2i
+ OpAMD64MOVLi2f
+ OpAMD64MOVLf2i
+ OpAMD64PXOR
+ OpAMD64POR
+ OpAMD64LEAQ
+ OpAMD64LEAL
+ OpAMD64LEAW
+ OpAMD64LEAQ1
+ OpAMD64LEAL1
+ OpAMD64LEAW1
+ OpAMD64LEAQ2
+ OpAMD64LEAL2
+ OpAMD64LEAW2
+ OpAMD64LEAQ4
+ OpAMD64LEAL4
+ OpAMD64LEAW4
+ OpAMD64LEAQ8
+ OpAMD64LEAL8
+ OpAMD64LEAW8
+ OpAMD64MOVBload
+ OpAMD64MOVBQSXload
+ OpAMD64MOVWload
+ OpAMD64MOVWQSXload
+ OpAMD64MOVLload
+ OpAMD64MOVLQSXload
+ OpAMD64MOVQload
+ OpAMD64MOVBstore
+ OpAMD64MOVWstore
+ OpAMD64MOVLstore
+ OpAMD64MOVQstore
+ OpAMD64MOVOload
+ OpAMD64MOVOstore
+ OpAMD64MOVBloadidx1
+ OpAMD64MOVWloadidx1
+ OpAMD64MOVWloadidx2
+ OpAMD64MOVLloadidx1
+ OpAMD64MOVLloadidx4
+ OpAMD64MOVLloadidx8
+ OpAMD64MOVQloadidx1
+ OpAMD64MOVQloadidx8
+ OpAMD64MOVBstoreidx1
+ OpAMD64MOVWstoreidx1
+ OpAMD64MOVWstoreidx2
+ OpAMD64MOVLstoreidx1
+ OpAMD64MOVLstoreidx4
+ OpAMD64MOVLstoreidx8
+ OpAMD64MOVQstoreidx1
+ OpAMD64MOVQstoreidx8
+ OpAMD64MOVBstoreconst
+ OpAMD64MOVWstoreconst
+ OpAMD64MOVLstoreconst
+ OpAMD64MOVQstoreconst
+ OpAMD64MOVOstoreconst
+ OpAMD64MOVBstoreconstidx1
+ OpAMD64MOVWstoreconstidx1
+ OpAMD64MOVWstoreconstidx2
+ OpAMD64MOVLstoreconstidx1
+ OpAMD64MOVLstoreconstidx4
+ OpAMD64MOVQstoreconstidx1
+ OpAMD64MOVQstoreconstidx8
+ OpAMD64DUFFZERO
+ OpAMD64REPSTOSQ
+ OpAMD64CALLstatic
+ OpAMD64CALLtail
+ OpAMD64CALLclosure
+ OpAMD64CALLinter
+ OpAMD64DUFFCOPY
+ OpAMD64REPMOVSQ
+ OpAMD64InvertFlags
+ OpAMD64LoweredGetG
+ OpAMD64LoweredGetClosurePtr
+ OpAMD64LoweredGetCallerPC
+ OpAMD64LoweredGetCallerSP
+ OpAMD64LoweredNilCheck
+ OpAMD64LoweredWB
+ OpAMD64LoweredHasCPUFeature
+ OpAMD64LoweredPanicBoundsA
+ OpAMD64LoweredPanicBoundsB
+ OpAMD64LoweredPanicBoundsC
+ OpAMD64FlagEQ
+ OpAMD64FlagLT_ULT
+ OpAMD64FlagLT_UGT
+ OpAMD64FlagGT_UGT
+ OpAMD64FlagGT_ULT
+ OpAMD64MOVBatomicload
+ OpAMD64MOVLatomicload
+ OpAMD64MOVQatomicload
+ OpAMD64XCHGB
+ OpAMD64XCHGL
+ OpAMD64XCHGQ
+ OpAMD64XADDLlock
+ OpAMD64XADDQlock
+ OpAMD64AddTupleFirst32
+ OpAMD64AddTupleFirst64
+ OpAMD64CMPXCHGLlock
+ OpAMD64CMPXCHGQlock
+ OpAMD64ANDBlock
+ OpAMD64ANDLlock
+ OpAMD64ORBlock
+ OpAMD64ORLlock
+ OpAMD64PrefetchT0
+ OpAMD64PrefetchNTA
+ OpAMD64ANDNQ
+ OpAMD64ANDNL
+ OpAMD64BLSIQ
+ OpAMD64BLSIL
+ OpAMD64BLSMSKQ
+ OpAMD64BLSMSKL
+ OpAMD64BLSRQ
+ OpAMD64BLSRL
+ OpAMD64TZCNTQ
+ OpAMD64TZCNTL
+ OpAMD64LZCNTQ
+ OpAMD64LZCNTL
+ OpAMD64MOVBEWstore
+ OpAMD64MOVBELload
+ OpAMD64MOVBELstore
+ OpAMD64MOVBEQload
+ OpAMD64MOVBEQstore
+ OpAMD64MOVBELloadidx1
+ OpAMD64MOVBELloadidx4
+ OpAMD64MOVBELloadidx8
+ OpAMD64MOVBEQloadidx1
+ OpAMD64MOVBEQloadidx8
+ OpAMD64MOVBEWstoreidx1
+ OpAMD64MOVBEWstoreidx2
+ OpAMD64MOVBELstoreidx1
+ OpAMD64MOVBELstoreidx4
+ OpAMD64MOVBELstoreidx8
+ OpAMD64MOVBEQstoreidx1
+ OpAMD64MOVBEQstoreidx8
+ OpAMD64SARXQ
+ OpAMD64SARXL
+ OpAMD64SHLXQ
+ OpAMD64SHLXL
+ OpAMD64SHRXQ
+ OpAMD64SHRXL
+ OpAMD64SARXLload
+ OpAMD64SARXQload
+ OpAMD64SHLXLload
+ OpAMD64SHLXQload
+ OpAMD64SHRXLload
+ OpAMD64SHRXQload
+ OpAMD64SARXLloadidx1
+ OpAMD64SARXLloadidx4
+ OpAMD64SARXLloadidx8
+ OpAMD64SARXQloadidx1
+ OpAMD64SARXQloadidx8
+ OpAMD64SHLXLloadidx1
+ OpAMD64SHLXLloadidx4
+ OpAMD64SHLXLloadidx8
+ OpAMD64SHLXQloadidx1
+ OpAMD64SHLXQloadidx8
+ OpAMD64SHRXLloadidx1
+ OpAMD64SHRXLloadidx4
+ OpAMD64SHRXLloadidx8
+ OpAMD64SHRXQloadidx1
+ OpAMD64SHRXQloadidx8
+
+ OpARMADD
+ OpARMADDconst
+ OpARMSUB
+ OpARMSUBconst
+ OpARMRSB
+ OpARMRSBconst
+ OpARMMUL
+ OpARMHMUL
+ OpARMHMULU
+ OpARMCALLudiv
+ OpARMADDS
+ OpARMADDSconst
+ OpARMADC
+ OpARMADCconst
+ OpARMSUBS
+ OpARMSUBSconst
+ OpARMRSBSconst
+ OpARMSBC
+ OpARMSBCconst
+ OpARMRSCconst
+ OpARMMULLU
+ OpARMMULA
+ OpARMMULS
+ OpARMADDF
+ OpARMADDD
+ OpARMSUBF
+ OpARMSUBD
+ OpARMMULF
+ OpARMMULD
+ OpARMNMULF
+ OpARMNMULD
+ OpARMDIVF
+ OpARMDIVD
+ OpARMMULAF
+ OpARMMULAD
+ OpARMMULSF
+ OpARMMULSD
+ OpARMFMULAD
+ OpARMAND
+ OpARMANDconst
+ OpARMOR
+ OpARMORconst
+ OpARMXOR
+ OpARMXORconst
+ OpARMBIC
+ OpARMBICconst
+ OpARMBFX
+ OpARMBFXU
+ OpARMMVN
+ OpARMNEGF
+ OpARMNEGD
+ OpARMSQRTD
+ OpARMSQRTF
+ OpARMABSD
+ OpARMCLZ
+ OpARMREV
+ OpARMREV16
+ OpARMRBIT
+ OpARMSLL
+ OpARMSLLconst
+ OpARMSRL
+ OpARMSRLconst
+ OpARMSRA
+ OpARMSRAconst
+ OpARMSRR
+ OpARMSRRconst
+ OpARMADDshiftLL
+ OpARMADDshiftRL
+ OpARMADDshiftRA
+ OpARMSUBshiftLL
+ OpARMSUBshiftRL
+ OpARMSUBshiftRA
+ OpARMRSBshiftLL
+ OpARMRSBshiftRL
+ OpARMRSBshiftRA
+ OpARMANDshiftLL
+ OpARMANDshiftRL
+ OpARMANDshiftRA
+ OpARMORshiftLL
+ OpARMORshiftRL
+ OpARMORshiftRA
+ OpARMXORshiftLL
+ OpARMXORshiftRL
+ OpARMXORshiftRA
+ OpARMXORshiftRR
+ OpARMBICshiftLL
+ OpARMBICshiftRL
+ OpARMBICshiftRA
+ OpARMMVNshiftLL
+ OpARMMVNshiftRL
+ OpARMMVNshiftRA
+ OpARMADCshiftLL
+ OpARMADCshiftRL
+ OpARMADCshiftRA
+ OpARMSBCshiftLL
+ OpARMSBCshiftRL
+ OpARMSBCshiftRA
+ OpARMRSCshiftLL
+ OpARMRSCshiftRL
+ OpARMRSCshiftRA
+ OpARMADDSshiftLL
+ OpARMADDSshiftRL
+ OpARMADDSshiftRA
+ OpARMSUBSshiftLL
+ OpARMSUBSshiftRL
+ OpARMSUBSshiftRA
+ OpARMRSBSshiftLL
+ OpARMRSBSshiftRL
+ OpARMRSBSshiftRA
+ OpARMADDshiftLLreg
+ OpARMADDshiftRLreg
+ OpARMADDshiftRAreg
+ OpARMSUBshiftLLreg
+ OpARMSUBshiftRLreg
+ OpARMSUBshiftRAreg
+ OpARMRSBshiftLLreg
+ OpARMRSBshiftRLreg
+ OpARMRSBshiftRAreg
+ OpARMANDshiftLLreg
+ OpARMANDshiftRLreg
+ OpARMANDshiftRAreg
+ OpARMORshiftLLreg
+ OpARMORshiftRLreg
+ OpARMORshiftRAreg
+ OpARMXORshiftLLreg
+ OpARMXORshiftRLreg
+ OpARMXORshiftRAreg
+ OpARMBICshiftLLreg
+ OpARMBICshiftRLreg
+ OpARMBICshiftRAreg
+ OpARMMVNshiftLLreg
+ OpARMMVNshiftRLreg
+ OpARMMVNshiftRAreg
+ OpARMADCshiftLLreg
+ OpARMADCshiftRLreg
+ OpARMADCshiftRAreg
+ OpARMSBCshiftLLreg
+ OpARMSBCshiftRLreg
+ OpARMSBCshiftRAreg
+ OpARMRSCshiftLLreg
+ OpARMRSCshiftRLreg
+ OpARMRSCshiftRAreg
+ OpARMADDSshiftLLreg
+ OpARMADDSshiftRLreg
+ OpARMADDSshiftRAreg
+ OpARMSUBSshiftLLreg
+ OpARMSUBSshiftRLreg
+ OpARMSUBSshiftRAreg
+ OpARMRSBSshiftLLreg
+ OpARMRSBSshiftRLreg
+ OpARMRSBSshiftRAreg
+ OpARMCMP
+ OpARMCMPconst
+ OpARMCMN
+ OpARMCMNconst
+ OpARMTST
+ OpARMTSTconst
+ OpARMTEQ
+ OpARMTEQconst
+ OpARMCMPF
+ OpARMCMPD
+ OpARMCMPshiftLL
+ OpARMCMPshiftRL
+ OpARMCMPshiftRA
+ OpARMCMNshiftLL
+ OpARMCMNshiftRL
+ OpARMCMNshiftRA
+ OpARMTSTshiftLL
+ OpARMTSTshiftRL
+ OpARMTSTshiftRA
+ OpARMTEQshiftLL
+ OpARMTEQshiftRL
+ OpARMTEQshiftRA
+ OpARMCMPshiftLLreg
+ OpARMCMPshiftRLreg
+ OpARMCMPshiftRAreg
+ OpARMCMNshiftLLreg
+ OpARMCMNshiftRLreg
+ OpARMCMNshiftRAreg
+ OpARMTSTshiftLLreg
+ OpARMTSTshiftRLreg
+ OpARMTSTshiftRAreg
+ OpARMTEQshiftLLreg
+ OpARMTEQshiftRLreg
+ OpARMTEQshiftRAreg
+ OpARMCMPF0
+ OpARMCMPD0
+ OpARMMOVWconst
+ OpARMMOVFconst
+ OpARMMOVDconst
+ OpARMMOVWaddr
+ OpARMMOVBload
+ OpARMMOVBUload
+ OpARMMOVHload
+ OpARMMOVHUload
+ OpARMMOVWload
+ OpARMMOVFload
+ OpARMMOVDload
+ OpARMMOVBstore
+ OpARMMOVHstore
+ OpARMMOVWstore
+ OpARMMOVFstore
+ OpARMMOVDstore
+ OpARMMOVWloadidx
+ OpARMMOVWloadshiftLL
+ OpARMMOVWloadshiftRL
+ OpARMMOVWloadshiftRA
+ OpARMMOVBUloadidx
+ OpARMMOVBloadidx
+ OpARMMOVHUloadidx
+ OpARMMOVHloadidx
+ OpARMMOVWstoreidx
+ OpARMMOVWstoreshiftLL
+ OpARMMOVWstoreshiftRL
+ OpARMMOVWstoreshiftRA
+ OpARMMOVBstoreidx
+ OpARMMOVHstoreidx
+ OpARMMOVBreg
+ OpARMMOVBUreg
+ OpARMMOVHreg
+ OpARMMOVHUreg
+ OpARMMOVWreg
+ OpARMMOVWnop
+ OpARMMOVWF
+ OpARMMOVWD
+ OpARMMOVWUF
+ OpARMMOVWUD
+ OpARMMOVFW
+ OpARMMOVDW
+ OpARMMOVFWU
+ OpARMMOVDWU
+ OpARMMOVFD
+ OpARMMOVDF
+ OpARMCMOVWHSconst
+ OpARMCMOVWLSconst
+ OpARMSRAcond
+ OpARMCALLstatic
+ OpARMCALLtail
+ OpARMCALLclosure
+ OpARMCALLinter
+ OpARMLoweredNilCheck
+ OpARMEqual
+ OpARMNotEqual
+ OpARMLessThan
+ OpARMLessEqual
+ OpARMGreaterThan
+ OpARMGreaterEqual
+ OpARMLessThanU
+ OpARMLessEqualU
+ OpARMGreaterThanU
+ OpARMGreaterEqualU
+ OpARMDUFFZERO
+ OpARMDUFFCOPY
+ OpARMLoweredZero
+ OpARMLoweredMove
+ OpARMLoweredGetClosurePtr
+ OpARMLoweredGetCallerSP
+ OpARMLoweredGetCallerPC
+ OpARMLoweredPanicBoundsA
+ OpARMLoweredPanicBoundsB
+ OpARMLoweredPanicBoundsC
+ OpARMLoweredPanicExtendA
+ OpARMLoweredPanicExtendB
+ OpARMLoweredPanicExtendC
+ OpARMFlagConstant
+ OpARMInvertFlags
+ OpARMLoweredWB
+
+ OpARM64ADCSflags
+ OpARM64ADCzerocarry
+ OpARM64ADD
+ OpARM64ADDconst
+ OpARM64ADDSconstflags
+ OpARM64ADDSflags
+ OpARM64SUB
+ OpARM64SUBconst
+ OpARM64SBCSflags
+ OpARM64SUBSflags
+ OpARM64MUL
+ OpARM64MULW
+ OpARM64MNEG
+ OpARM64MNEGW
+ OpARM64MULH
+ OpARM64UMULH
+ OpARM64MULL
+ OpARM64UMULL
+ OpARM64DIV
+ OpARM64UDIV
+ OpARM64DIVW
+ OpARM64UDIVW
+ OpARM64MOD
+ OpARM64UMOD
+ OpARM64MODW
+ OpARM64UMODW
+ OpARM64FADDS
+ OpARM64FADDD
+ OpARM64FSUBS
+ OpARM64FSUBD
+ OpARM64FMULS
+ OpARM64FMULD
+ OpARM64FNMULS
+ OpARM64FNMULD
+ OpARM64FDIVS
+ OpARM64FDIVD
+ OpARM64AND
+ OpARM64ANDconst
+ OpARM64OR
+ OpARM64ORconst
+ OpARM64XOR
+ OpARM64XORconst
+ OpARM64BIC
+ OpARM64EON
+ OpARM64ORN
+ OpARM64MVN
+ OpARM64NEG
+ OpARM64NEGSflags
+ OpARM64NGCzerocarry
+ OpARM64FABSD
+ OpARM64FNEGS
+ OpARM64FNEGD
+ OpARM64FSQRTD
+ OpARM64FSQRTS
+ OpARM64FMIND
+ OpARM64FMINS
+ OpARM64FMAXD
+ OpARM64FMAXS
+ OpARM64REV
+ OpARM64REVW
+ OpARM64REV16
+ OpARM64REV16W
+ OpARM64RBIT
+ OpARM64RBITW
+ OpARM64CLZ
+ OpARM64CLZW
+ OpARM64VCNT
+ OpARM64VUADDLV
+ OpARM64LoweredRound32F
+ OpARM64LoweredRound64F
+ OpARM64FMADDS
+ OpARM64FMADDD
+ OpARM64FNMADDS
+ OpARM64FNMADDD
+ OpARM64FMSUBS
+ OpARM64FMSUBD
+ OpARM64FNMSUBS
+ OpARM64FNMSUBD
+ OpARM64MADD
+ OpARM64MADDW
+ OpARM64MSUB
+ OpARM64MSUBW
+ OpARM64SLL
+ OpARM64SLLconst
+ OpARM64SRL
+ OpARM64SRLconst
+ OpARM64SRA
+ OpARM64SRAconst
+ OpARM64ROR
+ OpARM64RORW
+ OpARM64RORconst
+ OpARM64RORWconst
+ OpARM64EXTRconst
+ OpARM64EXTRWconst
+ OpARM64CMP
+ OpARM64CMPconst
+ OpARM64CMPW
+ OpARM64CMPWconst
+ OpARM64CMN
+ OpARM64CMNconst
+ OpARM64CMNW
+ OpARM64CMNWconst
+ OpARM64TST
+ OpARM64TSTconst
+ OpARM64TSTW
+ OpARM64TSTWconst
+ OpARM64FCMPS
+ OpARM64FCMPD
+ OpARM64FCMPS0
+ OpARM64FCMPD0
+ OpARM64MVNshiftLL
+ OpARM64MVNshiftRL
+ OpARM64MVNshiftRA
+ OpARM64MVNshiftRO
+ OpARM64NEGshiftLL
+ OpARM64NEGshiftRL
+ OpARM64NEGshiftRA
+ OpARM64ADDshiftLL
+ OpARM64ADDshiftRL
+ OpARM64ADDshiftRA
+ OpARM64SUBshiftLL
+ OpARM64SUBshiftRL
+ OpARM64SUBshiftRA
+ OpARM64ANDshiftLL
+ OpARM64ANDshiftRL
+ OpARM64ANDshiftRA
+ OpARM64ANDshiftRO
+ OpARM64ORshiftLL
+ OpARM64ORshiftRL
+ OpARM64ORshiftRA
+ OpARM64ORshiftRO
+ OpARM64XORshiftLL
+ OpARM64XORshiftRL
+ OpARM64XORshiftRA
+ OpARM64XORshiftRO
+ OpARM64BICshiftLL
+ OpARM64BICshiftRL
+ OpARM64BICshiftRA
+ OpARM64BICshiftRO
+ OpARM64EONshiftLL
+ OpARM64EONshiftRL
+ OpARM64EONshiftRA
+ OpARM64EONshiftRO
+ OpARM64ORNshiftLL
+ OpARM64ORNshiftRL
+ OpARM64ORNshiftRA
+ OpARM64ORNshiftRO
+ OpARM64CMPshiftLL
+ OpARM64CMPshiftRL
+ OpARM64CMPshiftRA
+ OpARM64CMNshiftLL
+ OpARM64CMNshiftRL
+ OpARM64CMNshiftRA
+ OpARM64TSTshiftLL
+ OpARM64TSTshiftRL
+ OpARM64TSTshiftRA
+ OpARM64TSTshiftRO
+ OpARM64BFI
+ OpARM64BFXIL
+ OpARM64SBFIZ
+ OpARM64SBFX
+ OpARM64UBFIZ
+ OpARM64UBFX
+ OpARM64MOVDconst
+ OpARM64FMOVSconst
+ OpARM64FMOVDconst
+ OpARM64MOVDaddr
+ OpARM64MOVBload
+ OpARM64MOVBUload
+ OpARM64MOVHload
+ OpARM64MOVHUload
+ OpARM64MOVWload
+ OpARM64MOVWUload
+ OpARM64MOVDload
+ OpARM64LDP
+ OpARM64FMOVSload
+ OpARM64FMOVDload
+ OpARM64MOVDloadidx
+ OpARM64MOVWloadidx
+ OpARM64MOVWUloadidx
+ OpARM64MOVHloadidx
+ OpARM64MOVHUloadidx
+ OpARM64MOVBloadidx
+ OpARM64MOVBUloadidx
+ OpARM64FMOVSloadidx
+ OpARM64FMOVDloadidx
+ OpARM64MOVHloadidx2
+ OpARM64MOVHUloadidx2
+ OpARM64MOVWloadidx4
+ OpARM64MOVWUloadidx4
+ OpARM64MOVDloadidx8
+ OpARM64FMOVSloadidx4
+ OpARM64FMOVDloadidx8
+ OpARM64MOVBstore
+ OpARM64MOVHstore
+ OpARM64MOVWstore
+ OpARM64MOVDstore
+ OpARM64STP
+ OpARM64FMOVSstore
+ OpARM64FMOVDstore
+ OpARM64MOVBstoreidx
+ OpARM64MOVHstoreidx
+ OpARM64MOVWstoreidx
+ OpARM64MOVDstoreidx
+ OpARM64FMOVSstoreidx
+ OpARM64FMOVDstoreidx
+ OpARM64MOVHstoreidx2
+ OpARM64MOVWstoreidx4
+ OpARM64MOVDstoreidx8
+ OpARM64FMOVSstoreidx4
+ OpARM64FMOVDstoreidx8
+ OpARM64MOVBstorezero
+ OpARM64MOVHstorezero
+ OpARM64MOVWstorezero
+ OpARM64MOVDstorezero
+ OpARM64MOVQstorezero
+ OpARM64MOVBstorezeroidx
+ OpARM64MOVHstorezeroidx
+ OpARM64MOVWstorezeroidx
+ OpARM64MOVDstorezeroidx
+ OpARM64MOVHstorezeroidx2
+ OpARM64MOVWstorezeroidx4
+ OpARM64MOVDstorezeroidx8
+ OpARM64FMOVDgpfp
+ OpARM64FMOVDfpgp
+ OpARM64FMOVSgpfp
+ OpARM64FMOVSfpgp
+ OpARM64MOVBreg
+ OpARM64MOVBUreg
+ OpARM64MOVHreg
+ OpARM64MOVHUreg
+ OpARM64MOVWreg
+ OpARM64MOVWUreg
+ OpARM64MOVDreg
+ OpARM64MOVDnop
+ OpARM64SCVTFWS
+ OpARM64SCVTFWD
+ OpARM64UCVTFWS
+ OpARM64UCVTFWD
+ OpARM64SCVTFS
+ OpARM64SCVTFD
+ OpARM64UCVTFS
+ OpARM64UCVTFD
+ OpARM64FCVTZSSW
+ OpARM64FCVTZSDW
+ OpARM64FCVTZUSW
+ OpARM64FCVTZUDW
+ OpARM64FCVTZSS
+ OpARM64FCVTZSD
+ OpARM64FCVTZUS
+ OpARM64FCVTZUD
+ OpARM64FCVTSD
+ OpARM64FCVTDS
+ OpARM64FRINTAD
+ OpARM64FRINTMD
+ OpARM64FRINTND
+ OpARM64FRINTPD
+ OpARM64FRINTZD
+ OpARM64CSEL
+ OpARM64CSEL0
+ OpARM64CSINC
+ OpARM64CSINV
+ OpARM64CSNEG
+ OpARM64CSETM
+ OpARM64CALLstatic
+ OpARM64CALLtail
+ OpARM64CALLclosure
+ OpARM64CALLinter
+ OpARM64LoweredNilCheck
+ OpARM64Equal
+ OpARM64NotEqual
+ OpARM64LessThan
+ OpARM64LessEqual
+ OpARM64GreaterThan
+ OpARM64GreaterEqual
+ OpARM64LessThanU
+ OpARM64LessEqualU
+ OpARM64GreaterThanU
+ OpARM64GreaterEqualU
+ OpARM64LessThanF
+ OpARM64LessEqualF
+ OpARM64GreaterThanF
+ OpARM64GreaterEqualF
+ OpARM64NotLessThanF
+ OpARM64NotLessEqualF
+ OpARM64NotGreaterThanF
+ OpARM64NotGreaterEqualF
+ OpARM64LessThanNoov
+ OpARM64GreaterEqualNoov
+ OpARM64DUFFZERO
+ OpARM64LoweredZero
+ OpARM64DUFFCOPY
+ OpARM64LoweredMove
+ OpARM64LoweredGetClosurePtr
+ OpARM64LoweredGetCallerSP
+ OpARM64LoweredGetCallerPC
+ OpARM64FlagConstant
+ OpARM64InvertFlags
+ OpARM64LDAR
+ OpARM64LDARB
+ OpARM64LDARW
+ OpARM64STLRB
+ OpARM64STLR
+ OpARM64STLRW
+ OpARM64LoweredAtomicExchange64
+ OpARM64LoweredAtomicExchange32
+ OpARM64LoweredAtomicExchange64Variant
+ OpARM64LoweredAtomicExchange32Variant
+ OpARM64LoweredAtomicAdd64
+ OpARM64LoweredAtomicAdd32
+ OpARM64LoweredAtomicAdd64Variant
+ OpARM64LoweredAtomicAdd32Variant
+ OpARM64LoweredAtomicCas64
+ OpARM64LoweredAtomicCas32
+ OpARM64LoweredAtomicCas64Variant
+ OpARM64LoweredAtomicCas32Variant
+ OpARM64LoweredAtomicAnd8
+ OpARM64LoweredAtomicAnd32
+ OpARM64LoweredAtomicOr8
+ OpARM64LoweredAtomicOr32
+ OpARM64LoweredAtomicAnd8Variant
+ OpARM64LoweredAtomicAnd32Variant
+ OpARM64LoweredAtomicOr8Variant
+ OpARM64LoweredAtomicOr32Variant
+ OpARM64LoweredWB
+ OpARM64LoweredPanicBoundsA
+ OpARM64LoweredPanicBoundsB
+ OpARM64LoweredPanicBoundsC
+ OpARM64PRFM
+ OpARM64DMB
+
+ OpLOONG64ADDV
+ OpLOONG64ADDVconst
+ OpLOONG64SUBV
+ OpLOONG64SUBVconst
+ OpLOONG64MULV
+ OpLOONG64MULHV
+ OpLOONG64MULHVU
+ OpLOONG64DIVV
+ OpLOONG64DIVVU
+ OpLOONG64REMV
+ OpLOONG64REMVU
+ OpLOONG64ADDF
+ OpLOONG64ADDD
+ OpLOONG64SUBF
+ OpLOONG64SUBD
+ OpLOONG64MULF
+ OpLOONG64MULD
+ OpLOONG64DIVF
+ OpLOONG64DIVD
+ OpLOONG64AND
+ OpLOONG64ANDconst
+ OpLOONG64OR
+ OpLOONG64ORconst
+ OpLOONG64XOR
+ OpLOONG64XORconst
+ OpLOONG64NOR
+ OpLOONG64NORconst
+ OpLOONG64NEGV
+ OpLOONG64NEGF
+ OpLOONG64NEGD
+ OpLOONG64SQRTD
+ OpLOONG64SQRTF
+ OpLOONG64MASKEQZ
+ OpLOONG64MASKNEZ
+ OpLOONG64SLLV
+ OpLOONG64SLLVconst
+ OpLOONG64SRLV
+ OpLOONG64SRLVconst
+ OpLOONG64SRAV
+ OpLOONG64SRAVconst
+ OpLOONG64ROTR
+ OpLOONG64ROTRV
+ OpLOONG64ROTRconst
+ OpLOONG64ROTRVconst
+ OpLOONG64SGT
+ OpLOONG64SGTconst
+ OpLOONG64SGTU
+ OpLOONG64SGTUconst
+ OpLOONG64CMPEQF
+ OpLOONG64CMPEQD
+ OpLOONG64CMPGEF
+ OpLOONG64CMPGED
+ OpLOONG64CMPGTF
+ OpLOONG64CMPGTD
+ OpLOONG64MOVVconst
+ OpLOONG64MOVFconst
+ OpLOONG64MOVDconst
+ OpLOONG64MOVVaddr
+ OpLOONG64MOVBload
+ OpLOONG64MOVBUload
+ OpLOONG64MOVHload
+ OpLOONG64MOVHUload
+ OpLOONG64MOVWload
+ OpLOONG64MOVWUload
+ OpLOONG64MOVVload
+ OpLOONG64MOVFload
+ OpLOONG64MOVDload
+ OpLOONG64MOVBstore
+ OpLOONG64MOVHstore
+ OpLOONG64MOVWstore
+ OpLOONG64MOVVstore
+ OpLOONG64MOVFstore
+ OpLOONG64MOVDstore
+ OpLOONG64MOVBstorezero
+ OpLOONG64MOVHstorezero
+ OpLOONG64MOVWstorezero
+ OpLOONG64MOVVstorezero
+ OpLOONG64MOVBreg
+ OpLOONG64MOVBUreg
+ OpLOONG64MOVHreg
+ OpLOONG64MOVHUreg
+ OpLOONG64MOVWreg
+ OpLOONG64MOVWUreg
+ OpLOONG64MOVVreg
+ OpLOONG64MOVVnop
+ OpLOONG64MOVWF
+ OpLOONG64MOVWD
+ OpLOONG64MOVVF
+ OpLOONG64MOVVD
+ OpLOONG64TRUNCFW
+ OpLOONG64TRUNCDW
+ OpLOONG64TRUNCFV
+ OpLOONG64TRUNCDV
+ OpLOONG64MOVFD
+ OpLOONG64MOVDF
+ OpLOONG64CALLstatic
+ OpLOONG64CALLtail
+ OpLOONG64CALLclosure
+ OpLOONG64CALLinter
+ OpLOONG64DUFFZERO
+ OpLOONG64DUFFCOPY
+ OpLOONG64LoweredZero
+ OpLOONG64LoweredMove
+ OpLOONG64LoweredAtomicLoad8
+ OpLOONG64LoweredAtomicLoad32
+ OpLOONG64LoweredAtomicLoad64
+ OpLOONG64LoweredAtomicStore8
+ OpLOONG64LoweredAtomicStore32
+ OpLOONG64LoweredAtomicStore64
+ OpLOONG64LoweredAtomicStorezero32
+ OpLOONG64LoweredAtomicStorezero64
+ OpLOONG64LoweredAtomicExchange32
+ OpLOONG64LoweredAtomicExchange64
+ OpLOONG64LoweredAtomicAdd32
+ OpLOONG64LoweredAtomicAdd64
+ OpLOONG64LoweredAtomicAddconst32
+ OpLOONG64LoweredAtomicAddconst64
+ OpLOONG64LoweredAtomicCas32
+ OpLOONG64LoweredAtomicCas64
+ OpLOONG64LoweredNilCheck
+ OpLOONG64FPFlagTrue
+ OpLOONG64FPFlagFalse
+ OpLOONG64LoweredGetClosurePtr
+ OpLOONG64LoweredGetCallerSP
+ OpLOONG64LoweredGetCallerPC
+ OpLOONG64LoweredWB
+ OpLOONG64LoweredPanicBoundsA
+ OpLOONG64LoweredPanicBoundsB
+ OpLOONG64LoweredPanicBoundsC
+
+ OpMIPSADD
+ OpMIPSADDconst
+ OpMIPSSUB
+ OpMIPSSUBconst
+ OpMIPSMUL
+ OpMIPSMULT
+ OpMIPSMULTU
+ OpMIPSDIV
+ OpMIPSDIVU
+ OpMIPSADDF
+ OpMIPSADDD
+ OpMIPSSUBF
+ OpMIPSSUBD
+ OpMIPSMULF
+ OpMIPSMULD
+ OpMIPSDIVF
+ OpMIPSDIVD
+ OpMIPSAND
+ OpMIPSANDconst
+ OpMIPSOR
+ OpMIPSORconst
+ OpMIPSXOR
+ OpMIPSXORconst
+ OpMIPSNOR
+ OpMIPSNORconst
+ OpMIPSNEG
+ OpMIPSNEGF
+ OpMIPSNEGD
+ OpMIPSABSD
+ OpMIPSSQRTD
+ OpMIPSSQRTF
+ OpMIPSSLL
+ OpMIPSSLLconst
+ OpMIPSSRL
+ OpMIPSSRLconst
+ OpMIPSSRA
+ OpMIPSSRAconst
+ OpMIPSCLZ
+ OpMIPSSGT
+ OpMIPSSGTconst
+ OpMIPSSGTzero
+ OpMIPSSGTU
+ OpMIPSSGTUconst
+ OpMIPSSGTUzero
+ OpMIPSCMPEQF
+ OpMIPSCMPEQD
+ OpMIPSCMPGEF
+ OpMIPSCMPGED
+ OpMIPSCMPGTF
+ OpMIPSCMPGTD
+ OpMIPSMOVWconst
+ OpMIPSMOVFconst
+ OpMIPSMOVDconst
+ OpMIPSMOVWaddr
+ OpMIPSMOVBload
+ OpMIPSMOVBUload
+ OpMIPSMOVHload
+ OpMIPSMOVHUload
+ OpMIPSMOVWload
+ OpMIPSMOVFload
+ OpMIPSMOVDload
+ OpMIPSMOVBstore
+ OpMIPSMOVHstore
+ OpMIPSMOVWstore
+ OpMIPSMOVFstore
+ OpMIPSMOVDstore
+ OpMIPSMOVBstorezero
+ OpMIPSMOVHstorezero
+ OpMIPSMOVWstorezero
+ OpMIPSMOVWfpgp
+ OpMIPSMOVWgpfp
+ OpMIPSMOVBreg
+ OpMIPSMOVBUreg
+ OpMIPSMOVHreg
+ OpMIPSMOVHUreg
+ OpMIPSMOVWreg
+ OpMIPSMOVWnop
+ OpMIPSCMOVZ
+ OpMIPSCMOVZzero
+ OpMIPSMOVWF
+ OpMIPSMOVWD
+ OpMIPSTRUNCFW
+ OpMIPSTRUNCDW
+ OpMIPSMOVFD
+ OpMIPSMOVDF
+ OpMIPSCALLstatic
+ OpMIPSCALLtail
+ OpMIPSCALLclosure
+ OpMIPSCALLinter
+ OpMIPSLoweredAtomicLoad8
+ OpMIPSLoweredAtomicLoad32
+ OpMIPSLoweredAtomicStore8
+ OpMIPSLoweredAtomicStore32
+ OpMIPSLoweredAtomicStorezero
+ OpMIPSLoweredAtomicExchange
+ OpMIPSLoweredAtomicAdd
+ OpMIPSLoweredAtomicAddconst
+ OpMIPSLoweredAtomicCas
+ OpMIPSLoweredAtomicAnd
+ OpMIPSLoweredAtomicOr
+ OpMIPSLoweredZero
+ OpMIPSLoweredMove
+ OpMIPSLoweredNilCheck
+ OpMIPSFPFlagTrue
+ OpMIPSFPFlagFalse
+ OpMIPSLoweredGetClosurePtr
+ OpMIPSLoweredGetCallerSP
+ OpMIPSLoweredGetCallerPC
+ OpMIPSLoweredWB
+ OpMIPSLoweredPanicBoundsA
+ OpMIPSLoweredPanicBoundsB
+ OpMIPSLoweredPanicBoundsC
+ OpMIPSLoweredPanicExtendA
+ OpMIPSLoweredPanicExtendB
+ OpMIPSLoweredPanicExtendC
+
+ OpMIPS64ADDV
+ OpMIPS64ADDVconst
+ OpMIPS64SUBV
+ OpMIPS64SUBVconst
+ OpMIPS64MULV
+ OpMIPS64MULVU
+ OpMIPS64DIVV
+ OpMIPS64DIVVU
+ OpMIPS64ADDF
+ OpMIPS64ADDD
+ OpMIPS64SUBF
+ OpMIPS64SUBD
+ OpMIPS64MULF
+ OpMIPS64MULD
+ OpMIPS64DIVF
+ OpMIPS64DIVD
+ OpMIPS64AND
+ OpMIPS64ANDconst
+ OpMIPS64OR
+ OpMIPS64ORconst
+ OpMIPS64XOR
+ OpMIPS64XORconst
+ OpMIPS64NOR
+ OpMIPS64NORconst
+ OpMIPS64NEGV
+ OpMIPS64NEGF
+ OpMIPS64NEGD
+ OpMIPS64ABSD
+ OpMIPS64SQRTD
+ OpMIPS64SQRTF
+ OpMIPS64SLLV
+ OpMIPS64SLLVconst
+ OpMIPS64SRLV
+ OpMIPS64SRLVconst
+ OpMIPS64SRAV
+ OpMIPS64SRAVconst
+ OpMIPS64SGT
+ OpMIPS64SGTconst
+ OpMIPS64SGTU
+ OpMIPS64SGTUconst
+ OpMIPS64CMPEQF
+ OpMIPS64CMPEQD
+ OpMIPS64CMPGEF
+ OpMIPS64CMPGED
+ OpMIPS64CMPGTF
+ OpMIPS64CMPGTD
+ OpMIPS64MOVVconst
+ OpMIPS64MOVFconst
+ OpMIPS64MOVDconst
+ OpMIPS64MOVVaddr
+ OpMIPS64MOVBload
+ OpMIPS64MOVBUload
+ OpMIPS64MOVHload
+ OpMIPS64MOVHUload
+ OpMIPS64MOVWload
+ OpMIPS64MOVWUload
+ OpMIPS64MOVVload
+ OpMIPS64MOVFload
+ OpMIPS64MOVDload
+ OpMIPS64MOVBstore
+ OpMIPS64MOVHstore
+ OpMIPS64MOVWstore
+ OpMIPS64MOVVstore
+ OpMIPS64MOVFstore
+ OpMIPS64MOVDstore
+ OpMIPS64MOVBstorezero
+ OpMIPS64MOVHstorezero
+ OpMIPS64MOVWstorezero
+ OpMIPS64MOVVstorezero
+ OpMIPS64MOVWfpgp
+ OpMIPS64MOVWgpfp
+ OpMIPS64MOVVfpgp
+ OpMIPS64MOVVgpfp
+ OpMIPS64MOVBreg
+ OpMIPS64MOVBUreg
+ OpMIPS64MOVHreg
+ OpMIPS64MOVHUreg
+ OpMIPS64MOVWreg
+ OpMIPS64MOVWUreg
+ OpMIPS64MOVVreg
+ OpMIPS64MOVVnop
+ OpMIPS64MOVWF
+ OpMIPS64MOVWD
+ OpMIPS64MOVVF
+ OpMIPS64MOVVD
+ OpMIPS64TRUNCFW
+ OpMIPS64TRUNCDW
+ OpMIPS64TRUNCFV
+ OpMIPS64TRUNCDV
+ OpMIPS64MOVFD
+ OpMIPS64MOVDF
+ OpMIPS64CALLstatic
+ OpMIPS64CALLtail
+ OpMIPS64CALLclosure
+ OpMIPS64CALLinter
+ OpMIPS64DUFFZERO
+ OpMIPS64DUFFCOPY
+ OpMIPS64LoweredZero
+ OpMIPS64LoweredMove
+ OpMIPS64LoweredAtomicAnd32
+ OpMIPS64LoweredAtomicOr32
+ OpMIPS64LoweredAtomicLoad8
+ OpMIPS64LoweredAtomicLoad32
+ OpMIPS64LoweredAtomicLoad64
+ OpMIPS64LoweredAtomicStore8
+ OpMIPS64LoweredAtomicStore32
+ OpMIPS64LoweredAtomicStore64
+ OpMIPS64LoweredAtomicStorezero32
+ OpMIPS64LoweredAtomicStorezero64
+ OpMIPS64LoweredAtomicExchange32
+ OpMIPS64LoweredAtomicExchange64
+ OpMIPS64LoweredAtomicAdd32
+ OpMIPS64LoweredAtomicAdd64
+ OpMIPS64LoweredAtomicAddconst32
+ OpMIPS64LoweredAtomicAddconst64
+ OpMIPS64LoweredAtomicCas32
+ OpMIPS64LoweredAtomicCas64
+ OpMIPS64LoweredNilCheck
+ OpMIPS64FPFlagTrue
+ OpMIPS64FPFlagFalse
+ OpMIPS64LoweredGetClosurePtr
+ OpMIPS64LoweredGetCallerSP
+ OpMIPS64LoweredGetCallerPC
+ OpMIPS64LoweredWB
+ OpMIPS64LoweredPanicBoundsA
+ OpMIPS64LoweredPanicBoundsB
+ OpMIPS64LoweredPanicBoundsC
+
+ OpPPC64ADD
+ OpPPC64ADDCC
+ OpPPC64ADDconst
+ OpPPC64ADDCCconst
+ OpPPC64FADD
+ OpPPC64FADDS
+ OpPPC64SUB
+ OpPPC64SUBCC
+ OpPPC64SUBFCconst
+ OpPPC64FSUB
+ OpPPC64FSUBS
+ OpPPC64MULLD
+ OpPPC64MULLW
+ OpPPC64MULLDconst
+ OpPPC64MULLWconst
+ OpPPC64MADDLD
+ OpPPC64MULHD
+ OpPPC64MULHW
+ OpPPC64MULHDU
+ OpPPC64MULHWU
+ OpPPC64FMUL
+ OpPPC64FMULS
+ OpPPC64FMADD
+ OpPPC64FMADDS
+ OpPPC64FMSUB
+ OpPPC64FMSUBS
+ OpPPC64SRAD
+ OpPPC64SRAW
+ OpPPC64SRD
+ OpPPC64SRW
+ OpPPC64SLD
+ OpPPC64SLW
+ OpPPC64ROTL
+ OpPPC64ROTLW
+ OpPPC64CLRLSLWI
+ OpPPC64CLRLSLDI
+ OpPPC64ADDC
+ OpPPC64SUBC
+ OpPPC64ADDCconst
+ OpPPC64SUBCconst
+ OpPPC64ADDE
+ OpPPC64SUBE
+ OpPPC64ADDZEzero
+ OpPPC64SUBZEzero
+ OpPPC64SRADconst
+ OpPPC64SRAWconst
+ OpPPC64SRDconst
+ OpPPC64SRWconst
+ OpPPC64SLDconst
+ OpPPC64SLWconst
+ OpPPC64ROTLconst
+ OpPPC64ROTLWconst
+ OpPPC64EXTSWSLconst
+ OpPPC64RLWINM
+ OpPPC64RLWNM
+ OpPPC64RLWMI
+ OpPPC64RLDICL
+ OpPPC64RLDICR
+ OpPPC64CNTLZD
+ OpPPC64CNTLZDCC
+ OpPPC64CNTLZW
+ OpPPC64CNTTZD
+ OpPPC64CNTTZW
+ OpPPC64POPCNTD
+ OpPPC64POPCNTW
+ OpPPC64POPCNTB
+ OpPPC64FDIV
+ OpPPC64FDIVS
+ OpPPC64DIVD
+ OpPPC64DIVW
+ OpPPC64DIVDU
+ OpPPC64DIVWU
+ OpPPC64MODUD
+ OpPPC64MODSD
+ OpPPC64MODUW
+ OpPPC64MODSW
+ OpPPC64FCTIDZ
+ OpPPC64FCTIWZ
+ OpPPC64FCFID
+ OpPPC64FCFIDS
+ OpPPC64FRSP
+ OpPPC64MFVSRD
+ OpPPC64MTVSRD
+ OpPPC64AND
+ OpPPC64ANDN
+ OpPPC64ANDNCC
+ OpPPC64ANDCC
+ OpPPC64OR
+ OpPPC64ORN
+ OpPPC64ORCC
+ OpPPC64NOR
+ OpPPC64NORCC
+ OpPPC64XOR
+ OpPPC64XORCC
+ OpPPC64EQV
+ OpPPC64NEG
+ OpPPC64NEGCC
+ OpPPC64BRD
+ OpPPC64BRW
+ OpPPC64BRH
+ OpPPC64FNEG
+ OpPPC64FSQRT
+ OpPPC64FSQRTS
+ OpPPC64FFLOOR
+ OpPPC64FCEIL
+ OpPPC64FTRUNC
+ OpPPC64FROUND
+ OpPPC64FABS
+ OpPPC64FNABS
+ OpPPC64FCPSGN
+ OpPPC64ORconst
+ OpPPC64XORconst
+ OpPPC64ANDCCconst
+ OpPPC64MOVBreg
+ OpPPC64MOVBZreg
+ OpPPC64MOVHreg
+ OpPPC64MOVHZreg
+ OpPPC64MOVWreg
+ OpPPC64MOVWZreg
+ OpPPC64MOVBZload
+ OpPPC64MOVHload
+ OpPPC64MOVHZload
+ OpPPC64MOVWload
+ OpPPC64MOVWZload
+ OpPPC64MOVDload
+ OpPPC64MOVDBRload
+ OpPPC64MOVWBRload
+ OpPPC64MOVHBRload
+ OpPPC64MOVBZloadidx
+ OpPPC64MOVHloadidx
+ OpPPC64MOVHZloadidx
+ OpPPC64MOVWloadidx
+ OpPPC64MOVWZloadidx
+ OpPPC64MOVDloadidx
+ OpPPC64MOVHBRloadidx
+ OpPPC64MOVWBRloadidx
+ OpPPC64MOVDBRloadidx
+ OpPPC64FMOVDloadidx
+ OpPPC64FMOVSloadidx
+ OpPPC64DCBT
+ OpPPC64MOVDBRstore
+ OpPPC64MOVWBRstore
+ OpPPC64MOVHBRstore
+ OpPPC64FMOVDload
+ OpPPC64FMOVSload
+ OpPPC64MOVBstore
+ OpPPC64MOVHstore
+ OpPPC64MOVWstore
+ OpPPC64MOVDstore
+ OpPPC64FMOVDstore
+ OpPPC64FMOVSstore
+ OpPPC64MOVBstoreidx
+ OpPPC64MOVHstoreidx
+ OpPPC64MOVWstoreidx
+ OpPPC64MOVDstoreidx
+ OpPPC64FMOVDstoreidx
+ OpPPC64FMOVSstoreidx
+ OpPPC64MOVHBRstoreidx
+ OpPPC64MOVWBRstoreidx
+ OpPPC64MOVDBRstoreidx
+ OpPPC64MOVBstorezero
+ OpPPC64MOVHstorezero
+ OpPPC64MOVWstorezero
+ OpPPC64MOVDstorezero
+ OpPPC64MOVDaddr
+ OpPPC64MOVDconst
+ OpPPC64FMOVDconst
+ OpPPC64FMOVSconst
+ OpPPC64FCMPU
+ OpPPC64CMP
+ OpPPC64CMPU
+ OpPPC64CMPW
+ OpPPC64CMPWU
+ OpPPC64CMPconst
+ OpPPC64CMPUconst
+ OpPPC64CMPWconst
+ OpPPC64CMPWUconst
+ OpPPC64ISEL
+ OpPPC64ISELZ
+ OpPPC64SETBC
+ OpPPC64SETBCR
+ OpPPC64Equal
+ OpPPC64NotEqual
+ OpPPC64LessThan
+ OpPPC64FLessThan
+ OpPPC64LessEqual
+ OpPPC64FLessEqual
+ OpPPC64GreaterThan
+ OpPPC64FGreaterThan
+ OpPPC64GreaterEqual
+ OpPPC64FGreaterEqual
+ OpPPC64LoweredGetClosurePtr
+ OpPPC64LoweredGetCallerSP
+ OpPPC64LoweredGetCallerPC
+ OpPPC64LoweredNilCheck
+ OpPPC64LoweredRound32F
+ OpPPC64LoweredRound64F
+ OpPPC64CALLstatic
+ OpPPC64CALLtail
+ OpPPC64CALLclosure
+ OpPPC64CALLinter
+ OpPPC64LoweredZero
+ OpPPC64LoweredZeroShort
+ OpPPC64LoweredQuadZeroShort
+ OpPPC64LoweredQuadZero
+ OpPPC64LoweredMove
+ OpPPC64LoweredMoveShort
+ OpPPC64LoweredQuadMove
+ OpPPC64LoweredQuadMoveShort
+ OpPPC64LoweredAtomicStore8
+ OpPPC64LoweredAtomicStore32
+ OpPPC64LoweredAtomicStore64
+ OpPPC64LoweredAtomicLoad8
+ OpPPC64LoweredAtomicLoad32
+ OpPPC64LoweredAtomicLoad64
+ OpPPC64LoweredAtomicLoadPtr
+ OpPPC64LoweredAtomicAdd32
+ OpPPC64LoweredAtomicAdd64
+ OpPPC64LoweredAtomicExchange32
+ OpPPC64LoweredAtomicExchange64
+ OpPPC64LoweredAtomicCas64
+ OpPPC64LoweredAtomicCas32
+ OpPPC64LoweredAtomicAnd8
+ OpPPC64LoweredAtomicAnd32
+ OpPPC64LoweredAtomicOr8
+ OpPPC64LoweredAtomicOr32
+ OpPPC64LoweredWB
+ OpPPC64LoweredPubBarrier
+ OpPPC64LoweredPanicBoundsA
+ OpPPC64LoweredPanicBoundsB
+ OpPPC64LoweredPanicBoundsC
+ OpPPC64InvertFlags
+ OpPPC64FlagEQ
+ OpPPC64FlagLT
+ OpPPC64FlagGT
+
+ OpRISCV64ADD
+ OpRISCV64ADDI
+ OpRISCV64ADDIW
+ OpRISCV64NEG
+ OpRISCV64NEGW
+ OpRISCV64SUB
+ OpRISCV64SUBW
+ OpRISCV64MUL
+ OpRISCV64MULW
+ OpRISCV64MULH
+ OpRISCV64MULHU
+ OpRISCV64LoweredMuluhilo
+ OpRISCV64LoweredMuluover
+ OpRISCV64DIV
+ OpRISCV64DIVU
+ OpRISCV64DIVW
+ OpRISCV64DIVUW
+ OpRISCV64REM
+ OpRISCV64REMU
+ OpRISCV64REMW
+ OpRISCV64REMUW
+ OpRISCV64MOVaddr
+ OpRISCV64MOVDconst
+ OpRISCV64MOVBload
+ OpRISCV64MOVHload
+ OpRISCV64MOVWload
+ OpRISCV64MOVDload
+ OpRISCV64MOVBUload
+ OpRISCV64MOVHUload
+ OpRISCV64MOVWUload
+ OpRISCV64MOVBstore
+ OpRISCV64MOVHstore
+ OpRISCV64MOVWstore
+ OpRISCV64MOVDstore
+ OpRISCV64MOVBstorezero
+ OpRISCV64MOVHstorezero
+ OpRISCV64MOVWstorezero
+ OpRISCV64MOVDstorezero
+ OpRISCV64MOVBreg
+ OpRISCV64MOVHreg
+ OpRISCV64MOVWreg
+ OpRISCV64MOVDreg
+ OpRISCV64MOVBUreg
+ OpRISCV64MOVHUreg
+ OpRISCV64MOVWUreg
+ OpRISCV64MOVDnop
+ OpRISCV64SLL
+ OpRISCV64SRA
+ OpRISCV64SRAW
+ OpRISCV64SRL
+ OpRISCV64SRLW
+ OpRISCV64SLLI
+ OpRISCV64SRAI
+ OpRISCV64SRAIW
+ OpRISCV64SRLI
+ OpRISCV64SRLIW
+ OpRISCV64XOR
+ OpRISCV64XORI
+ OpRISCV64OR
+ OpRISCV64ORI
+ OpRISCV64AND
+ OpRISCV64ANDI
+ OpRISCV64NOT
+ OpRISCV64SEQZ
+ OpRISCV64SNEZ
+ OpRISCV64SLT
+ OpRISCV64SLTI
+ OpRISCV64SLTU
+ OpRISCV64SLTIU
+ OpRISCV64LoweredRound32F
+ OpRISCV64LoweredRound64F
+ OpRISCV64CALLstatic
+ OpRISCV64CALLtail
+ OpRISCV64CALLclosure
+ OpRISCV64CALLinter
+ OpRISCV64DUFFZERO
+ OpRISCV64DUFFCOPY
+ OpRISCV64LoweredZero
+ OpRISCV64LoweredMove
+ OpRISCV64LoweredAtomicLoad8
+ OpRISCV64LoweredAtomicLoad32
+ OpRISCV64LoweredAtomicLoad64
+ OpRISCV64LoweredAtomicStore8
+ OpRISCV64LoweredAtomicStore32
+ OpRISCV64LoweredAtomicStore64
+ OpRISCV64LoweredAtomicExchange32
+ OpRISCV64LoweredAtomicExchange64
+ OpRISCV64LoweredAtomicAdd32
+ OpRISCV64LoweredAtomicAdd64
+ OpRISCV64LoweredAtomicCas32
+ OpRISCV64LoweredAtomicCas64
+ OpRISCV64LoweredAtomicAnd32
+ OpRISCV64LoweredAtomicOr32
+ OpRISCV64LoweredNilCheck
+ OpRISCV64LoweredGetClosurePtr
+ OpRISCV64LoweredGetCallerSP
+ OpRISCV64LoweredGetCallerPC
+ OpRISCV64LoweredWB
+ OpRISCV64LoweredPubBarrier
+ OpRISCV64LoweredPanicBoundsA
+ OpRISCV64LoweredPanicBoundsB
+ OpRISCV64LoweredPanicBoundsC
+ OpRISCV64FADDS
+ OpRISCV64FSUBS
+ OpRISCV64FMULS
+ OpRISCV64FDIVS
+ OpRISCV64FMADDS
+ OpRISCV64FMSUBS
+ OpRISCV64FNMADDS
+ OpRISCV64FNMSUBS
+ OpRISCV64FSQRTS
+ OpRISCV64FNEGS
+ OpRISCV64FMVSX
+ OpRISCV64FCVTSW
+ OpRISCV64FCVTSL
+ OpRISCV64FCVTWS
+ OpRISCV64FCVTLS
+ OpRISCV64FMOVWload
+ OpRISCV64FMOVWstore
+ OpRISCV64FEQS
+ OpRISCV64FNES
+ OpRISCV64FLTS
+ OpRISCV64FLES
+ OpRISCV64FADDD
+ OpRISCV64FSUBD
+ OpRISCV64FMULD
+ OpRISCV64FDIVD
+ OpRISCV64FMADDD
+ OpRISCV64FMSUBD
+ OpRISCV64FNMADDD
+ OpRISCV64FNMSUBD
+ OpRISCV64FSQRTD
+ OpRISCV64FNEGD
+ OpRISCV64FABSD
+ OpRISCV64FSGNJD
+ OpRISCV64FMVDX
+ OpRISCV64FCVTDW
+ OpRISCV64FCVTDL
+ OpRISCV64FCVTWD
+ OpRISCV64FCVTLD
+ OpRISCV64FCVTDS
+ OpRISCV64FCVTSD
+ OpRISCV64FMOVDload
+ OpRISCV64FMOVDstore
+ OpRISCV64FEQD
+ OpRISCV64FNED
+ OpRISCV64FLTD
+ OpRISCV64FLED
+
+ OpS390XFADDS
+ OpS390XFADD
+ OpS390XFSUBS
+ OpS390XFSUB
+ OpS390XFMULS
+ OpS390XFMUL
+ OpS390XFDIVS
+ OpS390XFDIV
+ OpS390XFNEGS
+ OpS390XFNEG
+ OpS390XFMADDS
+ OpS390XFMADD
+ OpS390XFMSUBS
+ OpS390XFMSUB
+ OpS390XLPDFR
+ OpS390XLNDFR
+ OpS390XCPSDR
+ OpS390XFIDBR
+ OpS390XFMOVSload
+ OpS390XFMOVDload
+ OpS390XFMOVSconst
+ OpS390XFMOVDconst
+ OpS390XFMOVSloadidx
+ OpS390XFMOVDloadidx
+ OpS390XFMOVSstore
+ OpS390XFMOVDstore
+ OpS390XFMOVSstoreidx
+ OpS390XFMOVDstoreidx
+ OpS390XADD
+ OpS390XADDW
+ OpS390XADDconst
+ OpS390XADDWconst
+ OpS390XADDload
+ OpS390XADDWload
+ OpS390XSUB
+ OpS390XSUBW
+ OpS390XSUBconst
+ OpS390XSUBWconst
+ OpS390XSUBload
+ OpS390XSUBWload
+ OpS390XMULLD
+ OpS390XMULLW
+ OpS390XMULLDconst
+ OpS390XMULLWconst
+ OpS390XMULLDload
+ OpS390XMULLWload
+ OpS390XMULHD
+ OpS390XMULHDU
+ OpS390XDIVD
+ OpS390XDIVW
+ OpS390XDIVDU
+ OpS390XDIVWU
+ OpS390XMODD
+ OpS390XMODW
+ OpS390XMODDU
+ OpS390XMODWU
+ OpS390XAND
+ OpS390XANDW
+ OpS390XANDconst
+ OpS390XANDWconst
+ OpS390XANDload
+ OpS390XANDWload
+ OpS390XOR
+ OpS390XORW
+ OpS390XORconst
+ OpS390XORWconst
+ OpS390XORload
+ OpS390XORWload
+ OpS390XXOR
+ OpS390XXORW
+ OpS390XXORconst
+ OpS390XXORWconst
+ OpS390XXORload
+ OpS390XXORWload
+ OpS390XADDC
+ OpS390XADDCconst
+ OpS390XADDE
+ OpS390XSUBC
+ OpS390XSUBE
+ OpS390XCMP
+ OpS390XCMPW
+ OpS390XCMPU
+ OpS390XCMPWU
+ OpS390XCMPconst
+ OpS390XCMPWconst
+ OpS390XCMPUconst
+ OpS390XCMPWUconst
+ OpS390XFCMPS
+ OpS390XFCMP
+ OpS390XLTDBR
+ OpS390XLTEBR
+ OpS390XSLD
+ OpS390XSLW
+ OpS390XSLDconst
+ OpS390XSLWconst
+ OpS390XSRD
+ OpS390XSRW
+ OpS390XSRDconst
+ OpS390XSRWconst
+ OpS390XSRAD
+ OpS390XSRAW
+ OpS390XSRADconst
+ OpS390XSRAWconst
+ OpS390XRLLG
+ OpS390XRLL
+ OpS390XRLLconst
+ OpS390XRXSBG
+ OpS390XRISBGZ
+ OpS390XNEG
+ OpS390XNEGW
+ OpS390XNOT
+ OpS390XNOTW
+ OpS390XFSQRT
+ OpS390XFSQRTS
+ OpS390XLOCGR
+ OpS390XMOVBreg
+ OpS390XMOVBZreg
+ OpS390XMOVHreg
+ OpS390XMOVHZreg
+ OpS390XMOVWreg
+ OpS390XMOVWZreg
+ OpS390XMOVDconst
+ OpS390XLDGR
+ OpS390XLGDR
+ OpS390XCFDBRA
+ OpS390XCGDBRA
+ OpS390XCFEBRA
+ OpS390XCGEBRA
+ OpS390XCEFBRA
+ OpS390XCDFBRA
+ OpS390XCEGBRA
+ OpS390XCDGBRA
+ OpS390XCLFEBR
+ OpS390XCLFDBR
+ OpS390XCLGEBR
+ OpS390XCLGDBR
+ OpS390XCELFBR
+ OpS390XCDLFBR
+ OpS390XCELGBR
+ OpS390XCDLGBR
+ OpS390XLEDBR
+ OpS390XLDEBR
+ OpS390XMOVDaddr
+ OpS390XMOVDaddridx
+ OpS390XMOVBZload
+ OpS390XMOVBload
+ OpS390XMOVHZload
+ OpS390XMOVHload
+ OpS390XMOVWZload
+ OpS390XMOVWload
+ OpS390XMOVDload
+ OpS390XMOVWBR
+ OpS390XMOVDBR
+ OpS390XMOVHBRload
+ OpS390XMOVWBRload
+ OpS390XMOVDBRload
+ OpS390XMOVBstore
+ OpS390XMOVHstore
+ OpS390XMOVWstore
+ OpS390XMOVDstore
+ OpS390XMOVHBRstore
+ OpS390XMOVWBRstore
+ OpS390XMOVDBRstore
+ OpS390XMVC
+ OpS390XMOVBZloadidx
+ OpS390XMOVBloadidx
+ OpS390XMOVHZloadidx
+ OpS390XMOVHloadidx
+ OpS390XMOVWZloadidx
+ OpS390XMOVWloadidx
+ OpS390XMOVDloadidx
+ OpS390XMOVHBRloadidx
+ OpS390XMOVWBRloadidx
+ OpS390XMOVDBRloadidx
+ OpS390XMOVBstoreidx
+ OpS390XMOVHstoreidx
+ OpS390XMOVWstoreidx
+ OpS390XMOVDstoreidx
+ OpS390XMOVHBRstoreidx
+ OpS390XMOVWBRstoreidx
+ OpS390XMOVDBRstoreidx
+ OpS390XMOVBstoreconst
+ OpS390XMOVHstoreconst
+ OpS390XMOVWstoreconst
+ OpS390XMOVDstoreconst
+ OpS390XCLEAR
+ OpS390XCALLstatic
+ OpS390XCALLtail
+ OpS390XCALLclosure
+ OpS390XCALLinter
+ OpS390XInvertFlags
+ OpS390XLoweredGetG
+ OpS390XLoweredGetClosurePtr
+ OpS390XLoweredGetCallerSP
+ OpS390XLoweredGetCallerPC
+ OpS390XLoweredNilCheck
+ OpS390XLoweredRound32F
+ OpS390XLoweredRound64F
+ OpS390XLoweredWB
+ OpS390XLoweredPanicBoundsA
+ OpS390XLoweredPanicBoundsB
+ OpS390XLoweredPanicBoundsC
+ OpS390XFlagEQ
+ OpS390XFlagLT
+ OpS390XFlagGT
+ OpS390XFlagOV
+ OpS390XSYNC
+ OpS390XMOVBZatomicload
+ OpS390XMOVWZatomicload
+ OpS390XMOVDatomicload
+ OpS390XMOVBatomicstore
+ OpS390XMOVWatomicstore
+ OpS390XMOVDatomicstore
+ OpS390XLAA
+ OpS390XLAAG
+ OpS390XAddTupleFirst32
+ OpS390XAddTupleFirst64
+ OpS390XLAN
+ OpS390XLANfloor
+ OpS390XLAO
+ OpS390XLAOfloor
+ OpS390XLoweredAtomicCas32
+ OpS390XLoweredAtomicCas64
+ OpS390XLoweredAtomicExchange32
+ OpS390XLoweredAtomicExchange64
+ OpS390XFLOGR
+ OpS390XPOPCNT
+ OpS390XMLGR
+ OpS390XSumBytes2
+ OpS390XSumBytes4
+ OpS390XSumBytes8
+ OpS390XSTMG2
+ OpS390XSTMG3
+ OpS390XSTMG4
+ OpS390XSTM2
+ OpS390XSTM3
+ OpS390XSTM4
+ OpS390XLoweredMove
+ OpS390XLoweredZero
+
+ OpWasmLoweredStaticCall
+ OpWasmLoweredTailCall
+ OpWasmLoweredClosureCall
+ OpWasmLoweredInterCall
+ OpWasmLoweredAddr
+ OpWasmLoweredMove
+ OpWasmLoweredZero
+ OpWasmLoweredGetClosurePtr
+ OpWasmLoweredGetCallerPC
+ OpWasmLoweredGetCallerSP
+ OpWasmLoweredNilCheck
+ OpWasmLoweredWB
+ OpWasmLoweredConvert
+ OpWasmSelect
+ OpWasmI64Load8U
+ OpWasmI64Load8S
+ OpWasmI64Load16U
+ OpWasmI64Load16S
+ OpWasmI64Load32U
+ OpWasmI64Load32S
+ OpWasmI64Load
+ OpWasmI64Store8
+ OpWasmI64Store16
+ OpWasmI64Store32
+ OpWasmI64Store
+ OpWasmF32Load
+ OpWasmF64Load
+ OpWasmF32Store
+ OpWasmF64Store
+ OpWasmI64Const
+ OpWasmF32Const
+ OpWasmF64Const
+ OpWasmI64Eqz
+ OpWasmI64Eq
+ OpWasmI64Ne
+ OpWasmI64LtS
+ OpWasmI64LtU
+ OpWasmI64GtS
+ OpWasmI64GtU
+ OpWasmI64LeS
+ OpWasmI64LeU
+ OpWasmI64GeS
+ OpWasmI64GeU
+ OpWasmF32Eq
+ OpWasmF32Ne
+ OpWasmF32Lt
+ OpWasmF32Gt
+ OpWasmF32Le
+ OpWasmF32Ge
+ OpWasmF64Eq
+ OpWasmF64Ne
+ OpWasmF64Lt
+ OpWasmF64Gt
+ OpWasmF64Le
+ OpWasmF64Ge
+ OpWasmI64Add
+ OpWasmI64AddConst
+ OpWasmI64Sub
+ OpWasmI64Mul
+ OpWasmI64DivS
+ OpWasmI64DivU
+ OpWasmI64RemS
+ OpWasmI64RemU
+ OpWasmI64And
+ OpWasmI64Or
+ OpWasmI64Xor
+ OpWasmI64Shl
+ OpWasmI64ShrS
+ OpWasmI64ShrU
+ OpWasmF32Neg
+ OpWasmF32Add
+ OpWasmF32Sub
+ OpWasmF32Mul
+ OpWasmF32Div
+ OpWasmF64Neg
+ OpWasmF64Add
+ OpWasmF64Sub
+ OpWasmF64Mul
+ OpWasmF64Div
+ OpWasmI64TruncSatF64S
+ OpWasmI64TruncSatF64U
+ OpWasmI64TruncSatF32S
+ OpWasmI64TruncSatF32U
+ OpWasmF32ConvertI64S
+ OpWasmF32ConvertI64U
+ OpWasmF64ConvertI64S
+ OpWasmF64ConvertI64U
+ OpWasmF32DemoteF64
+ OpWasmF64PromoteF32
+ OpWasmI64Extend8S
+ OpWasmI64Extend16S
+ OpWasmI64Extend32S
+ OpWasmF32Sqrt
+ OpWasmF32Trunc
+ OpWasmF32Ceil
+ OpWasmF32Floor
+ OpWasmF32Nearest
+ OpWasmF32Abs
+ OpWasmF32Copysign
+ OpWasmF64Sqrt
+ OpWasmF64Trunc
+ OpWasmF64Ceil
+ OpWasmF64Floor
+ OpWasmF64Nearest
+ OpWasmF64Abs
+ OpWasmF64Copysign
+ OpWasmI64Ctz
+ OpWasmI64Clz
+ OpWasmI32Rotl
+ OpWasmI64Rotl
+ OpWasmI64Popcnt
+
+ OpAdd8
+ OpAdd16
+ OpAdd32
+ OpAdd64
+ OpAddPtr
+ OpAdd32F
+ OpAdd64F
+ OpSub8
+ OpSub16
+ OpSub32
+ OpSub64
+ OpSubPtr
+ OpSub32F
+ OpSub64F
+ OpMul8
+ OpMul16
+ OpMul32
+ OpMul64
+ OpMul32F
+ OpMul64F
+ OpDiv32F
+ OpDiv64F
+ OpHmul32
+ OpHmul32u
+ OpHmul64
+ OpHmul64u
+ OpMul32uhilo
+ OpMul64uhilo
+ OpMul32uover
+ OpMul64uover
+ OpAvg32u
+ OpAvg64u
+ OpDiv8
+ OpDiv8u
+ OpDiv16
+ OpDiv16u
+ OpDiv32
+ OpDiv32u
+ OpDiv64
+ OpDiv64u
+ OpDiv128u
+ OpMod8
+ OpMod8u
+ OpMod16
+ OpMod16u
+ OpMod32
+ OpMod32u
+ OpMod64
+ OpMod64u
+ OpAnd8
+ OpAnd16
+ OpAnd32
+ OpAnd64
+ OpOr8
+ OpOr16
+ OpOr32
+ OpOr64
+ OpXor8
+ OpXor16
+ OpXor32
+ OpXor64
+ OpLsh8x8
+ OpLsh8x16
+ OpLsh8x32
+ OpLsh8x64
+ OpLsh16x8
+ OpLsh16x16
+ OpLsh16x32
+ OpLsh16x64
+ OpLsh32x8
+ OpLsh32x16
+ OpLsh32x32
+ OpLsh32x64
+ OpLsh64x8
+ OpLsh64x16
+ OpLsh64x32
+ OpLsh64x64
+ OpRsh8x8
+ OpRsh8x16
+ OpRsh8x32
+ OpRsh8x64
+ OpRsh16x8
+ OpRsh16x16
+ OpRsh16x32
+ OpRsh16x64
+ OpRsh32x8
+ OpRsh32x16
+ OpRsh32x32
+ OpRsh32x64
+ OpRsh64x8
+ OpRsh64x16
+ OpRsh64x32
+ OpRsh64x64
+ OpRsh8Ux8
+ OpRsh8Ux16
+ OpRsh8Ux32
+ OpRsh8Ux64
+ OpRsh16Ux8
+ OpRsh16Ux16
+ OpRsh16Ux32
+ OpRsh16Ux64
+ OpRsh32Ux8
+ OpRsh32Ux16
+ OpRsh32Ux32
+ OpRsh32Ux64
+ OpRsh64Ux8
+ OpRsh64Ux16
+ OpRsh64Ux32
+ OpRsh64Ux64
+ OpEq8
+ OpEq16
+ OpEq32
+ OpEq64
+ OpEqPtr
+ OpEqInter
+ OpEqSlice
+ OpEq32F
+ OpEq64F
+ OpNeq8
+ OpNeq16
+ OpNeq32
+ OpNeq64
+ OpNeqPtr
+ OpNeqInter
+ OpNeqSlice
+ OpNeq32F
+ OpNeq64F
+ OpLess8
+ OpLess8U
+ OpLess16
+ OpLess16U
+ OpLess32
+ OpLess32U
+ OpLess64
+ OpLess64U
+ OpLess32F
+ OpLess64F
+ OpLeq8
+ OpLeq8U
+ OpLeq16
+ OpLeq16U
+ OpLeq32
+ OpLeq32U
+ OpLeq64
+ OpLeq64U
+ OpLeq32F
+ OpLeq64F
+ OpCondSelect
+ OpAndB
+ OpOrB
+ OpEqB
+ OpNeqB
+ OpNot
+ OpNeg8
+ OpNeg16
+ OpNeg32
+ OpNeg64
+ OpNeg32F
+ OpNeg64F
+ OpCom8
+ OpCom16
+ OpCom32
+ OpCom64
+ OpCtz8
+ OpCtz16
+ OpCtz32
+ OpCtz64
+ OpCtz8NonZero
+ OpCtz16NonZero
+ OpCtz32NonZero
+ OpCtz64NonZero
+ OpBitLen8
+ OpBitLen16
+ OpBitLen32
+ OpBitLen64
+ OpBswap16
+ OpBswap32
+ OpBswap64
+ OpBitRev8
+ OpBitRev16
+ OpBitRev32
+ OpBitRev64
+ OpPopCount8
+ OpPopCount16
+ OpPopCount32
+ OpPopCount64
+ OpRotateLeft64
+ OpRotateLeft32
+ OpRotateLeft16
+ OpRotateLeft8
+ OpSqrt
+ OpSqrt32
+ OpFloor
+ OpCeil
+ OpTrunc
+ OpRound
+ OpRoundToEven
+ OpAbs
+ OpCopysign
+ OpMin64F
+ OpMin32F
+ OpMax64F
+ OpMax32F
+ OpFMA
+ OpPhi
+ OpCopy
+ OpConvert
+ OpConstBool
+ OpConstString
+ OpConstNil
+ OpConst8
+ OpConst16
+ OpConst32
+ OpConst64
+ OpConst32F
+ OpConst64F
+ OpConstInterface
+ OpConstSlice
+ OpInitMem
+ OpArg
+ OpArgIntReg
+ OpArgFloatReg
+ OpAddr
+ OpLocalAddr
+ OpSP
+ OpSB
+ OpSPanchored
+ OpLoad
+ OpDereference
+ OpStore
+ OpMove
+ OpZero
+ OpStoreWB
+ OpMoveWB
+ OpZeroWB
+ OpWBend
+ OpWB
+ OpHasCPUFeature
+ OpPanicBounds
+ OpPanicExtend
+ OpClosureCall
+ OpStaticCall
+ OpInterCall
+ OpTailCall
+ OpClosureLECall
+ OpStaticLECall
+ OpInterLECall
+ OpTailLECall
+ OpSignExt8to16
+ OpSignExt8to32
+ OpSignExt8to64
+ OpSignExt16to32
+ OpSignExt16to64
+ OpSignExt32to64
+ OpZeroExt8to16
+ OpZeroExt8to32
+ OpZeroExt8to64
+ OpZeroExt16to32
+ OpZeroExt16to64
+ OpZeroExt32to64
+ OpTrunc16to8
+ OpTrunc32to8
+ OpTrunc32to16
+ OpTrunc64to8
+ OpTrunc64to16
+ OpTrunc64to32
+ OpCvt32to32F
+ OpCvt32to64F
+ OpCvt64to32F
+ OpCvt64to64F
+ OpCvt32Fto32
+ OpCvt32Fto64
+ OpCvt64Fto32
+ OpCvt64Fto64
+ OpCvt32Fto64F
+ OpCvt64Fto32F
+ OpCvtBoolToUint8
+ OpRound32F
+ OpRound64F
+ OpIsNonNil
+ OpIsInBounds
+ OpIsSliceInBounds
+ OpNilCheck
+ OpGetG
+ OpGetClosurePtr
+ OpGetCallerPC
+ OpGetCallerSP
+ OpPtrIndex
+ OpOffPtr
+ OpSliceMake
+ OpSlicePtr
+ OpSliceLen
+ OpSliceCap
+ OpSlicePtrUnchecked
+ OpComplexMake
+ OpComplexReal
+ OpComplexImag
+ OpStringMake
+ OpStringPtr
+ OpStringLen
+ OpIMake
+ OpITab
+ OpIData
+ OpStructMake0
+ OpStructMake1
+ OpStructMake2
+ OpStructMake3
+ OpStructMake4
+ OpStructSelect
+ OpArrayMake0
+ OpArrayMake1
+ OpArraySelect
+ OpStoreReg
+ OpLoadReg
+ OpFwdRef
+ OpUnknown
+ OpVarDef
+ OpVarLive
+ OpKeepAlive
+ OpInlMark
+ OpInt64Make
+ OpInt64Hi
+ OpInt64Lo
+ OpAdd32carry
+ OpAdd32withcarry
+ OpSub32carry
+ OpSub32withcarry
+ OpAdd64carry
+ OpSub64borrow
+ OpSignmask
+ OpZeromask
+ OpSlicemask
+ OpSpectreIndex
+ OpSpectreSliceIndex
+ OpCvt32Uto32F
+ OpCvt32Uto64F
+ OpCvt32Fto32U
+ OpCvt64Fto32U
+ OpCvt64Uto32F
+ OpCvt64Uto64F
+ OpCvt32Fto64U
+ OpCvt64Fto64U
+ OpSelect0
+ OpSelect1
+ OpSelectN
+ OpSelectNAddr
+ OpMakeResult
+ OpAtomicLoad8
+ OpAtomicLoad32
+ OpAtomicLoad64
+ OpAtomicLoadPtr
+ OpAtomicLoadAcq32
+ OpAtomicLoadAcq64
+ OpAtomicStore8
+ OpAtomicStore32
+ OpAtomicStore64
+ OpAtomicStorePtrNoWB
+ OpAtomicStoreRel32
+ OpAtomicStoreRel64
+ OpAtomicExchange32
+ OpAtomicExchange64
+ OpAtomicAdd32
+ OpAtomicAdd64
+ OpAtomicCompareAndSwap32
+ OpAtomicCompareAndSwap64
+ OpAtomicCompareAndSwapRel32
+ OpAtomicAnd8
+ OpAtomicAnd32
+ OpAtomicOr8
+ OpAtomicOr32
+ OpAtomicAdd32Variant
+ OpAtomicAdd64Variant
+ OpAtomicExchange32Variant
+ OpAtomicExchange64Variant
+ OpAtomicCompareAndSwap32Variant
+ OpAtomicCompareAndSwap64Variant
+ OpAtomicAnd8Variant
+ OpAtomicAnd32Variant
+ OpAtomicOr8Variant
+ OpAtomicOr32Variant
+ OpPubBarrier
+ OpClobber
+ OpClobberReg
+ OpPrefetchCache
+ OpPrefetchCacheStreamed
+)
+
+var opcodeTable = [...]opInfo{
+ {name: "OpInvalid"},
+
+ {
+ name: "ADDSS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "ADDSD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SUBSS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SUBSD",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MULSS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AMULSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MULSD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AMULSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "DIVSS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ADIVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "DIVSD",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ADIVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSSstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSSstoreidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ADDSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "ADDSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SUBSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SUBSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MULSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AMULSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MULSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AMULSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "DIVSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "DIVSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "ADDL",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 239}, // AX CX DX BX BP SI DI
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLcarry",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLconstcarry",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADCL",
+ argLen: 3,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AADCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADCLconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AADCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLcarry",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLconstcarry",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SBBL",
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASBBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SBBLconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASBBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AIMUL3L,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULLU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "HMULL",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULLU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "MULLQU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ {1, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "AVGLU",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "DIVL",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "DIVLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "DIVWU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "MODL",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "MODW",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "MODLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "MODWU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "ANDL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ANDLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ORL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ORLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "XORL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "XORLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPL",
+ argLen: 2,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPB",
+ argLen: 2,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "CMPWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "CMPBload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "CMPLconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "CMPWconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "CMPBconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "UCOMISS",
+ argLen: 2,
+ asm: x86.AUCOMISS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "UCOMISD",
+ argLen: 2,
+ asm: x86.AUCOMISD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "TESTL",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTW",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTB",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ATESTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: x86.ATESTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ATESTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHLL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHLLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ROLL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ROLW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ROLB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ROLLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ROLWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ROLBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ANDLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ORLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "XORLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ANDLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ORLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "XORLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "NEGL",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ANEGL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "NOTL",
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ANOTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSFL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSFL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSFW",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LoweredCtz32",
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSRL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSRW",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSWAPL",
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ABSWAPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SQRTSD",
+ argLen: 1,
+ asm: x86.ASQRTSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SQRTSS",
+ argLen: 1,
+ asm: x86.ASQRTSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SBBLcarrymask",
+ argLen: 1,
+ asm: x86.ASBBL,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETEQ",
+ argLen: 1,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETNE",
+ argLen: 1,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETL",
+ argLen: 1,
+ asm: x86.ASETLT,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETLE",
+ argLen: 1,
+ asm: x86.ASETLE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETG",
+ argLen: 1,
+ asm: x86.ASETGT,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETGE",
+ argLen: 1,
+ asm: x86.ASETGE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETB",
+ argLen: 1,
+ asm: x86.ASETCS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETBE",
+ argLen: 1,
+ asm: x86.ASETLS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETA",
+ argLen: 1,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETAE",
+ argLen: 1,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETO",
+ argLen: 1,
+ asm: x86.ASETOS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETEQF",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 238}, // CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETNEF",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 238}, // CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETORD",
+ argLen: 1,
+ asm: x86.ASETPC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETNAN",
+ argLen: 1,
+ asm: x86.ASETPS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETGF",
+ argLen: 1,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETGEF",
+ argLen: 1,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBLSX",
+ argLen: 1,
+ asm: x86.AMOVBLSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBLZX",
+ argLen: 1,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWLSX",
+ argLen: 1,
+ asm: x86.AMOVWLSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWLZX",
+ argLen: 1,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "CVTTSD2SL",
+ argLen: 1,
+ asm: x86.ACVTTSD2SL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "CVTTSS2SL",
+ argLen: 1,
+ asm: x86.ACVTTSS2SL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "CVTSL2SS",
+ argLen: 1,
+ asm: x86.ACVTSL2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "CVTSL2SD",
+ argLen: 1,
+ asm: x86.ACVTSL2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "CVTSD2SS",
+ argLen: 1,
+ asm: x86.ACVTSD2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "CVTSS2SD",
+ argLen: 1,
+ asm: x86.ACVTSS2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "PXOR",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.APXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "LEAL",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LEAL1",
+ auxType: auxSymOff,
+ argLen: 2,
+ commutative: true,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LEAL2",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LEAL4",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LEAL8",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBLSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBLSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWLSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWLSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ORLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "XORLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ORLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "XORLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVBloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx2",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx4",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx2",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconstidx2",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconstidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 1}, // AX
+ },
+ clobbers: 130, // CX DI
+ },
+ },
+ {
+ name: "REPSTOSL",
+ argLen: 4,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 2}, // CX
+ {2, 1}, // AX
+ },
+ clobbers: 130, // CX DI
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4}, // DX
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 64}, // SI
+ },
+ clobbers: 194, // CX SI DI
+ },
+ },
+ {
+ name: "REPMOVSL",
+ argLen: 4,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 64}, // SI
+ {2, 2}, // CX
+ },
+ clobbers: 194, // CX SI DI
+ },
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "LoweredGetG",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 1,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ clobberFlags: true,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 65280, // X0 X1 X2 X3 X4 X5 X6 X7
+ outputs: []outputInfo{
+ {0, 128}, // DI
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // DX
+ {1, 8}, // BX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // CX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 2}, // CX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendA",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 64}, // SI
+ {1, 4}, // DX
+ {2, 8}, // BX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendB",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 64}, // SI
+ {1, 2}, // CX
+ {2, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendC",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 64}, // SI
+ {1, 1}, // AX
+ {2, 2}, // CX
+ },
+ },
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "MOVSSconst1",
+ auxType: auxFloat32,
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVSDconst1",
+ auxType: auxFloat64,
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVSSconst2",
+ argLen: 1,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDconst2",
+ argLen: 1,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+
+ {
+ name: "ADDSS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "ADDSD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SUBSS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SUBSD",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MULSS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AMULSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MULSD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AMULSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "DIVSS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ADIVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "DIVSD",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ADIVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSSconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVSSstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVSSstoreidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "ADDSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SUBSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SUBSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MULSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AMULSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MULSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AMULSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "DIVSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "DIVSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "ADDSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AADDSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "ADDSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AADDSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "ADDSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AADDSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "ADDSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AADDSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SUBSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SUBSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SUBSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SUBSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MULSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMULSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MULSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMULSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MULSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMULSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MULSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMULSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "DIVSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "DIVSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "DIVSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "DIVSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "ADDQ",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDL",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MULQ",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AIMULQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MULL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MULQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AIMUL3Q,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MULLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AIMUL3L,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MULLU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "MULQU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "HMULQ",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIMULQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULL",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULQU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AMULQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "AVGQU",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "DIVQ",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "DIVL",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "DIVQU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "DIVLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "DIVWU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "NEGLflags",
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ANEGL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDQcarry",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADCQ",
+ argLen: 3,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDQconstcarry",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADCQconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.AADCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBQborrow",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SBBQ",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ASBBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBQconstborrow",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SBBQconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASBBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MULQU2",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ {1, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "DIVQU2",
+ argLen: 3,
+ clobberFlags: true,
+ asm: x86.ADIVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // DX
+ {1, 1}, // AX
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "ANDQ",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQ",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQ",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQ",
+ argLen: 2,
+ asm: x86.ACMPQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPL",
+ argLen: 2,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPB",
+ argLen: 2,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ACMPQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPBload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPBconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWloadidx2",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPBloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQconstloadidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQconstloadidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLconstloadidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLconstloadidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWconstloadidx2",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWconstloadidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPBconstloadidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "UCOMISS",
+ argLen: 2,
+ asm: x86.AUCOMISS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "UCOMISD",
+ argLen: 2,
+ asm: x86.AUCOMISD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "BTL",
+ argLen: 2,
+ asm: x86.ABTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTQ",
+ argLen: 2,
+ asm: x86.ABTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTCL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTCQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTRL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTRQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTSL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTSQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTSQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ABTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ABTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTCQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTRQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTSQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTSQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTSQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTSQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "BTRQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "BTCQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ABTCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "TESTQ",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TESTL",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TESTW",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TESTB",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TESTQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ATESTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TESTLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ATESTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TESTWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: x86.ATESTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TESTBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ATESTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRWconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARWconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRDQ",
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLDQ",
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ROLQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ROLL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ROLW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ROLB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "RORQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ARORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "RORL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ARORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "RORW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ARORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "RORB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ARORB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ROLQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ROLLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ROLWconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ROLBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDQmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDQmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBQmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBQmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDQmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDQmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDQconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDQconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDQconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDQconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "NEGQ",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ANEGQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "NEGL",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ANEGL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "NOTQ",
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ANOTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "NOTL",
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ANOTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BSFQ",
+ argLen: 1,
+ asm: x86.ABSFQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BSFL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSFL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BSRQ",
+ argLen: 1,
+ asm: x86.ABSRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BSRL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQEQ",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQNE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQLT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQGT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQLE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQLE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQGE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQGE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQLS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQHI",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQCC",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQCS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLEQ",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLNE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLLT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLGT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLLE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLLE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLGE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLGE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLLS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLHI",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLCC",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLCS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWEQ",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWNE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWLT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWGT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWLE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWLE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWGE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWGE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWLS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWHI",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWCC",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWCS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQEQF",
+ argLen: 3,
+ resultInArg0: true,
+ needIntTemp: true,
+ asm: x86.ACMOVQNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQNEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQGTF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQGEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLEQF",
+ argLen: 3,
+ resultInArg0: true,
+ needIntTemp: true,
+ asm: x86.ACMOVLNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLNEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLGTF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLGEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWEQF",
+ argLen: 3,
+ resultInArg0: true,
+ needIntTemp: true,
+ asm: x86.ACMOVWNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWNEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWGTF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWGEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BSWAPQ",
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ABSWAPQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BSWAPL",
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ABSWAPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "POPCNTQ",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.APOPCNTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "POPCNTL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.APOPCNTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SQRTSD",
+ argLen: 1,
+ asm: x86.ASQRTSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SQRTSS",
+ argLen: 1,
+ asm: x86.ASQRTSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "ROUNDSD",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.AROUNDSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD231SD",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD231SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MINSD",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.AMINSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MINSS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.AMINSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SBBQcarrymask",
+ argLen: 1,
+ asm: x86.ASBBQ,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SBBLcarrymask",
+ argLen: 1,
+ asm: x86.ASBBL,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETEQ",
+ argLen: 1,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETNE",
+ argLen: 1,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETL",
+ argLen: 1,
+ asm: x86.ASETLT,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETLE",
+ argLen: 1,
+ asm: x86.ASETLE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETG",
+ argLen: 1,
+ asm: x86.ASETGT,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETGE",
+ argLen: 1,
+ asm: x86.ASETGE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETB",
+ argLen: 1,
+ asm: x86.ASETCS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETBE",
+ argLen: 1,
+ asm: x86.ASETLS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETA",
+ argLen: 1,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETAE",
+ argLen: 1,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETO",
+ argLen: 1,
+ asm: x86.ASETOS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETEQstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETNEstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETLstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETLEstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETLE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETGstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETGEstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETGE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETBEstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETAstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETAEstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETEQstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETEQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETNEstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETNE,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETLstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETLT,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETLEstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETLE,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETGstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETGT,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETGEstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETGE,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETBstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETCS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETBEstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETLS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETAstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETHI,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETAEstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.ASETCC,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETEQF",
+ argLen: 1,
+ clobberFlags: true,
+ needIntTemp: true,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETNEF",
+ argLen: 1,
+ clobberFlags: true,
+ needIntTemp: true,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETORD",
+ argLen: 1,
+ asm: x86.ASETPC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETNAN",
+ argLen: 1,
+ asm: x86.ASETPS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETGF",
+ argLen: 1,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETGEF",
+ argLen: 1,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBQSX",
+ argLen: 1,
+ asm: x86.AMOVBQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBQZX",
+ argLen: 1,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVWQSX",
+ argLen: 1,
+ asm: x86.AMOVWQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVWQZX",
+ argLen: 1,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLQSX",
+ argLen: 1,
+ asm: x86.AMOVLQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLQZX",
+ argLen: 1,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVQconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CVTTSD2SL",
+ argLen: 1,
+ asm: x86.ACVTTSD2SL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CVTTSD2SQ",
+ argLen: 1,
+ asm: x86.ACVTTSD2SQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CVTTSS2SL",
+ argLen: 1,
+ asm: x86.ACVTTSS2SL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CVTTSS2SQ",
+ argLen: 1,
+ asm: x86.ACVTTSS2SQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CVTSL2SS",
+ argLen: 1,
+ asm: x86.ACVTSL2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "CVTSL2SD",
+ argLen: 1,
+ asm: x86.ACVTSL2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "CVTSQ2SS",
+ argLen: 1,
+ asm: x86.ACVTSQ2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "CVTSQ2SD",
+ argLen: 1,
+ asm: x86.ACVTSQ2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "CVTSD2SS",
+ argLen: 1,
+ asm: x86.ACVTSD2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "CVTSS2SD",
+ argLen: 1,
+ asm: x86.ACVTSS2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVQi2f",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVQf2i",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLi2f",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVLf2i",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "PXOR",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.APXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "POR",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.APOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "LEAQ",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAL",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAW",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAQ1",
+ auxType: auxSymOff,
+ argLen: 2,
+ commutative: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAL1",
+ auxType: auxSymOff,
+ argLen: 2,
+ commutative: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAW1",
+ auxType: auxSymOff,
+ argLen: 2,
+ commutative: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAQ2",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAQ,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAL2",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAL,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAW2",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAQ4",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAQ,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAL4",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAW4",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAW,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAQ8",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAL8",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAW8",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAW,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBQSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVWQSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLQSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVLQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVQload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVOload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVUPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVOstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVUPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBLZX,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx2",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx4",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVQloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVQloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx2",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVOstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVUPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconstidx2",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconstidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstoreconstidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ },
+ clobbers: 128, // DI
+ },
+ },
+ {
+ name: "REPSTOSQ",
+ argLen: 4,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 2}, // CX
+ {2, 1}, // AX
+ },
+ clobbers: 130, // CX DI
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4}, // DX
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 64}, // SI
+ },
+ clobbers: 65728, // SI DI X0
+ },
+ },
+ {
+ name: "REPMOVSQ",
+ argLen: 4,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 64}, // SI
+ {2, 2}, // CX
+ },
+ clobbers: 194, // CX SI DI
+ },
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "LoweredGetG",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 1,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ clobberFlags: true,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 2147418112, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ outputs: []outputInfo{
+ {0, 2048}, // R11
+ },
+ },
+ },
+ {
+ name: "LoweredHasCPUFeature",
+ auxType: auxSym,
+ argLen: 0,
+ rematerializeable: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // DX
+ {1, 8}, // BX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // CX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 2}, // CX
+ },
+ },
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "MOVBatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVQatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XCHGB",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AXCHGB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XCHGL",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AXCHGL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XCHGQ",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AXCHGQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XADDLlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AXADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XADDQlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AXADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "AddTupleFirst32",
+ argLen: 2,
+ reg: regInfo{},
+ },
+ {
+ name: "AddTupleFirst64",
+ argLen: 2,
+ reg: regInfo{},
+ },
+ {
+ name: "CMPXCHGLlock",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.ACMPXCHGL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1}, // AX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPXCHGQlock",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.ACMPXCHGQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1}, // AX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDBlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AANDB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORBlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AORB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "PrefetchT0",
+ argLen: 2,
+ hasSideEffects: true,
+ asm: x86.APREFETCHT0,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "PrefetchNTA",
+ argLen: 2,
+ hasSideEffects: true,
+ asm: x86.APREFETCHNTA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDNQ",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AANDNQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDNL",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AANDNL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BLSIQ",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABLSIQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BLSIL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABLSIL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BLSMSKQ",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABLSMSKQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BLSMSKL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABLSMSKL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BLSRQ",
+ argLen: 1,
+ asm: x86.ABLSRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BLSRL",
+ argLen: 1,
+ asm: x86.ABLSRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TZCNTQ",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ATZCNTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TZCNTL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ATZCNTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LZCNTQ",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ALZCNTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LZCNTL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ALZCNTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBEWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBELload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBELstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBEQload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBEQstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBELloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBEL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBELloadidx4",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVBEL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBELloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVBEL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBEQloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBEQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBEQloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVBEQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBEWstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBEWstoreidx2",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBELstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBELstoreidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBELstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBEQstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBEQstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SARXQ",
+ argLen: 2,
+ asm: x86.ASARXQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARXL",
+ argLen: 2,
+ asm: x86.ASARXL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLXQ",
+ argLen: 2,
+ asm: x86.ASHLXQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLXL",
+ argLen: 2,
+ asm: x86.ASHLXL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRXQ",
+ argLen: 2,
+ asm: x86.ASHRXQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRXL",
+ argLen: 2,
+ asm: x86.ASHRXL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARXLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASARXL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARXQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASARXQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLXLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASHLXL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLXQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASHLXQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRXLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASHRXL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRXQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASHRXQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARXLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASARXL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARXLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASARXL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARXLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASARXL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARXQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASARXQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARXQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASARXQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLXLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASHLXL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLXLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASHLXL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLXLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASHLXL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLXQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASHLXQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLXQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASHLXQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRXLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASHRXL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRXLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASHRXL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRXLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASHRXL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRXQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASHRXQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRXQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASHRXQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 30719}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSB",
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MUL",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "HMUL",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "HMULU",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULLU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CALLudiv",
+ argLen: 2,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 1}, // R0
+ },
+ clobbers: 20492, // R2 R3 R12 R14
+ outputs: []outputInfo{
+ {0, 1}, // R0
+ {1, 2}, // R1
+ },
+ },
+ },
+ {
+ name: "ADDS",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADC",
+ argLen: 3,
+ commutative: true,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCconst",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBS",
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBC",
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCconst",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCconst",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLU",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULLU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULA",
+ argLen: 3,
+ asm: arm.AMULA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULS",
+ argLen: 3,
+ asm: arm.AMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDF",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AADDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "ADDD",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SUBF",
+ argLen: 2,
+ asm: arm.ASUBF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SUBD",
+ argLen: 2,
+ asm: arm.ASUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULF",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULD",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "NMULF",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ANMULF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "NMULD",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ANMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "DIVF",
+ argLen: 2,
+ asm: arm.ADIVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ asm: arm.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULAF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: arm.AMULAF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULAD",
+ argLen: 3,
+ resultInArg0: true,
+ asm: arm.AMULAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULSF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: arm.AMULSF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULSD",
+ argLen: 3,
+ resultInArg0: true,
+ asm: arm.AMULSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMULAD",
+ argLen: 3,
+ resultInArg0: true,
+ asm: arm.AFMULAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BIC",
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BFX",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ABFX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BFXU",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ABFXU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVN",
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "NEGF",
+ argLen: 1,
+ asm: arm.ANEGF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "NEGD",
+ argLen: 1,
+ asm: arm.ANEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SQRTD",
+ argLen: 1,
+ asm: arm.ASQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SQRTF",
+ argLen: 1,
+ asm: arm.ASQRTF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "ABSD",
+ argLen: 1,
+ asm: arm.AABSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CLZ",
+ argLen: 1,
+ asm: arm.ACLZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "REV",
+ argLen: 1,
+ asm: arm.AREV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "REV16",
+ argLen: 1,
+ asm: arm.AREV16,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RBIT",
+ argLen: 1,
+ asm: arm.ARBIT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SLL",
+ argLen: 2,
+ asm: arm.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SLLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRL",
+ argLen: 2,
+ asm: arm.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRA",
+ argLen: 2,
+ asm: arm.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRAconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRR",
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRRconst",
+ auxType: auxInt32,
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftRR",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftLL",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftRL",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftRA",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftLLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftRLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftRAreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftLLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftRLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftRAreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftLLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftRLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftRAreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftLLreg",
+ argLen: 3,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftRLreg",
+ argLen: 3,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftRAreg",
+ argLen: 3,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftLLreg",
+ argLen: 3,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftRLreg",
+ argLen: 3,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftRAreg",
+ argLen: 3,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftLLreg",
+ argLen: 3,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftRLreg",
+ argLen: 3,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftRAreg",
+ argLen: 3,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftLLreg",
+ argLen: 3,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftRLreg",
+ argLen: 3,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftRAreg",
+ argLen: 3,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftLLreg",
+ argLen: 2,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftRLreg",
+ argLen: 2,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftRAreg",
+ argLen: 2,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftLLreg",
+ argLen: 4,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftRLreg",
+ argLen: 4,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftRAreg",
+ argLen: 4,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftLLreg",
+ argLen: 4,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftRLreg",
+ argLen: 4,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftRAreg",
+ argLen: 4,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftLLreg",
+ argLen: 4,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftRLreg",
+ argLen: 4,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftRAreg",
+ argLen: 4,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftLLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRAreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftLLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRAreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftLLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRAreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMP",
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMN",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TST",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQ",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPF",
+ argLen: 2,
+ asm: arm.ACMPF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CMPD",
+ argLen: 2,
+ asm: arm.ACMPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CMPshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPshiftLLreg",
+ argLen: 3,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPshiftRLreg",
+ argLen: 3,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPshiftRAreg",
+ argLen: 3,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftLLreg",
+ argLen: 3,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftRLreg",
+ argLen: 3,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftRAreg",
+ argLen: 3,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftLLreg",
+ argLen: 3,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftRLreg",
+ argLen: 3,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftRAreg",
+ argLen: 3,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftLLreg",
+ argLen: 3,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftRLreg",
+ argLen: 3,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftRAreg",
+ argLen: 3,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPF0",
+ argLen: 1,
+ asm: arm.ACMPF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CMPD0",
+ argLen: 1,
+ asm: arm.ACMPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVFconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm.AMOVF,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294975488}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVFload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVFstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx",
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWloadshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWloadshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWloadshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBUloadidx",
+ argLen: 3,
+ asm: arm.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBloadidx",
+ argLen: 3,
+ asm: arm.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHUloadidx",
+ argLen: 3,
+ asm: arm.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHloadidx",
+ argLen: 3,
+ asm: arm.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx",
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreshiftLL",
+ auxType: auxInt32,
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreshiftRL",
+ auxType: auxInt32,
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreshiftRA",
+ auxType: auxInt32,
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx",
+ argLen: 4,
+ asm: arm.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstoreidx",
+ argLen: 4,
+ asm: arm.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: arm.AMOVBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: arm.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: arm.AMOVHS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: arm.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWF",
+ argLen: 1,
+ asm: arm.AMOVWF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWD",
+ argLen: 1,
+ asm: arm.AMOVWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWUF",
+ argLen: 1,
+ asm: arm.AMOVWF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWUD",
+ argLen: 1,
+ asm: arm.AMOVWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVFW",
+ argLen: 1,
+ asm: arm.AMOVFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDW",
+ argLen: 1,
+ asm: arm.AMOVDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVFWU",
+ argLen: 1,
+ asm: arm.AMOVFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDWU",
+ argLen: 1,
+ asm: arm.AMOVDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVFD",
+ argLen: 1,
+ asm: arm.AMOVFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDF",
+ argLen: 1,
+ asm: arm.AMOVDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CMOVWHSconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMOVWLSconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRAcond",
+ argLen: 3,
+ asm: arm.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 128}, // R7
+ {0, 29695}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP R14
+ },
+ clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "Equal",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "NotEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LessThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LessEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "GreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "GreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LessThanU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LessEqualU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "GreaterThanU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "GreaterEqualU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 1}, // R0
+ },
+ clobbers: 20482, // R1 R12 R14
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ },
+ clobbers: 20487, // R0 R1 R2 R12 R14
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 6, // R1 R2
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 128}, // R7
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 1,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // R0
+ {1, 2}, // R1
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendA",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // R4
+ {1, 4}, // R2
+ {2, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendB",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // R4
+ {1, 2}, // R1
+ {2, 4}, // R2
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendC",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // R4
+ {1, 1}, // R0
+ {2, 2}, // R1
+ },
+ },
+ },
+ {
+ name: "FlagConstant",
+ auxType: auxFlagConstant,
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 4294922240, // R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ outputs: []outputInfo{
+ {0, 256}, // R8
+ },
+ },
+ },
+
+ {
+ name: "ADCSflags",
+ argLen: 3,
+ commutative: true,
+ asm: arm64.AADCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADCzerocarry",
+ argLen: 1,
+ asm: arm64.AADC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1878786047}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDSconstflags",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDSflags",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUBconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SBCSflags",
+ argLen: 3,
+ asm: arm64.ASBCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUBSflags",
+ argLen: 2,
+ asm: arm64.ASUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MUL",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MULW",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AMULW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MNEG",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AMNEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MNEGW",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AMNEGW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MULH",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ASMULH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UMULH",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AUMULH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MULL",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ASMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UMULL",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AUMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "DIV",
+ argLen: 2,
+ asm: arm64.ASDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UDIV",
+ argLen: 2,
+ asm: arm64.AUDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ asm: arm64.ASDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UDIVW",
+ argLen: 2,
+ asm: arm64.AUDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOD",
+ argLen: 2,
+ asm: arm64.AREM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UMOD",
+ argLen: 2,
+ asm: arm64.AUREM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MODW",
+ argLen: 2,
+ asm: arm64.AREMW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UMODW",
+ argLen: 2,
+ asm: arm64.AUREMW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FADDS",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FADDD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSUBS",
+ argLen: 2,
+ asm: arm64.AFSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSUBD",
+ argLen: 2,
+ asm: arm64.AFSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMULS",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMULD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMULS",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFNMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMULD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFNMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FDIVS",
+ argLen: 2,
+ asm: arm64.AFDIVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FDIVD",
+ argLen: 2,
+ asm: arm64.AFDIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BIC",
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EON",
+ argLen: 2,
+ asm: arm64.AEON,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORN",
+ argLen: 2,
+ asm: arm64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MVN",
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEGSflags",
+ argLen: 1,
+ asm: arm64.ANEGS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NGCzerocarry",
+ argLen: 1,
+ asm: arm64.ANGC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FABSD",
+ argLen: 1,
+ asm: arm64.AFABSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNEGS",
+ argLen: 1,
+ asm: arm64.AFNEGS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNEGD",
+ argLen: 1,
+ asm: arm64.AFNEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSQRTD",
+ argLen: 1,
+ asm: arm64.AFSQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSQRTS",
+ argLen: 1,
+ asm: arm64.AFSQRTS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMIND",
+ argLen: 2,
+ asm: arm64.AFMIND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMINS",
+ argLen: 2,
+ asm: arm64.AFMINS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMAXD",
+ argLen: 2,
+ asm: arm64.AFMAXD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMAXS",
+ argLen: 2,
+ asm: arm64.AFMAXS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "REV",
+ argLen: 1,
+ asm: arm64.AREV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "REVW",
+ argLen: 1,
+ asm: arm64.AREVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "REV16",
+ argLen: 1,
+ asm: arm64.AREV16,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "REV16W",
+ argLen: 1,
+ asm: arm64.AREV16W,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "RBIT",
+ argLen: 1,
+ asm: arm64.ARBIT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "RBITW",
+ argLen: 1,
+ asm: arm64.ARBITW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CLZ",
+ argLen: 1,
+ asm: arm64.ACLZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CLZW",
+ argLen: 1,
+ asm: arm64.ACLZW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "VCNT",
+ argLen: 1,
+ asm: arm64.AVCNT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "VUADDLV",
+ argLen: 1,
+ asm: arm64.AVUADDLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "LoweredRound32F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "LoweredRound64F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMADDS",
+ argLen: 3,
+ asm: arm64.AFMADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMADDD",
+ argLen: 3,
+ asm: arm64.AFMADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMADDS",
+ argLen: 3,
+ asm: arm64.AFNMADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMADDD",
+ argLen: 3,
+ asm: arm64.AFNMADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMSUBS",
+ argLen: 3,
+ asm: arm64.AFMSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMSUBD",
+ argLen: 3,
+ asm: arm64.AFMSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMSUBS",
+ argLen: 3,
+ asm: arm64.AFNMSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMSUBD",
+ argLen: 3,
+ asm: arm64.AFNMSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MADD",
+ argLen: 3,
+ asm: arm64.AMADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MADDW",
+ argLen: 3,
+ asm: arm64.AMADDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MSUB",
+ argLen: 3,
+ asm: arm64.AMSUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MSUBW",
+ argLen: 3,
+ asm: arm64.AMSUBW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SLL",
+ argLen: 2,
+ asm: arm64.ALSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SLLconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ALSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SRL",
+ argLen: 2,
+ asm: arm64.ALSR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SRLconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ALSR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SRA",
+ argLen: 2,
+ asm: arm64.AASR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SRAconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AASR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ROR",
+ argLen: 2,
+ asm: arm64.AROR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "RORW",
+ argLen: 2,
+ asm: arm64.ARORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "RORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AROR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "RORWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ARORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EXTRconst",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEXTR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EXTRWconst",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEXTRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CMP",
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMPconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: arm64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMN",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNW",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ACMNW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ACMNW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TST",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTW",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ATSTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ATSTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "FCMPS",
+ argLen: 2,
+ asm: arm64.AFCMPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCMPD",
+ argLen: 2,
+ asm: arm64.AFCMPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCMPS0",
+ argLen: 1,
+ asm: arm64.AFCMPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCMPD0",
+ argLen: 1,
+ asm: arm64.AFCMPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MVNshiftLL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MVNshiftRL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MVNshiftRA",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MVNshiftRO",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEGshiftLL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEGshiftRL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEGshiftRA",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUBshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUBshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUBshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ANDshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ANDshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ANDshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ANDshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XORshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XORshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XORshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XORshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BICshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BICshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BICshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BICshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EONshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEON,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EONshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEON,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EONshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEON,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EONshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEON,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORNshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORNshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORNshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORNshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CMPshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMPshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMPshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "BFI",
+ auxType: auxARM64BitField,
+ argLen: 2,
+ resultInArg0: true,
+ asm: arm64.ABFI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BFXIL",
+ auxType: auxARM64BitField,
+ argLen: 2,
+ resultInArg0: true,
+ asm: arm64.ABFXIL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SBFIZ",
+ auxType: auxARM64BitField,
+ argLen: 1,
+ asm: arm64.ASBFIZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SBFX",
+ auxType: auxARM64BitField,
+ argLen: 1,
+ asm: arm64.ASBFX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UBFIZ",
+ auxType: auxARM64BitField,
+ argLen: 1,
+ asm: arm64.AUBFIZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UBFX",
+ auxType: auxARM64BitField,
+ argLen: 1,
+ asm: arm64.AUBFX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FMOVSconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517632}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LDP",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.ALDP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "FMOVSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDloadidx",
+ argLen: 3,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx",
+ argLen: 3,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWUloadidx",
+ argLen: 3,
+ asm: arm64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHloadidx",
+ argLen: 3,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHUloadidx",
+ argLen: 3,
+ asm: arm64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBloadidx",
+ argLen: 3,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBUloadidx",
+ argLen: 3,
+ asm: arm64.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FMOVSloadidx",
+ argLen: 3,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDloadidx",
+ argLen: 3,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVHloadidx2",
+ argLen: 3,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHUloadidx2",
+ argLen: 3,
+ asm: arm64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx4",
+ argLen: 3,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWUloadidx4",
+ argLen: 3,
+ asm: arm64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVDloadidx8",
+ argLen: 3,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FMOVSloadidx4",
+ argLen: 3,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDloadidx8",
+ argLen: 3,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "STP",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.ASTP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "FMOVSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx",
+ argLen: 4,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstoreidx",
+ argLen: 4,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx",
+ argLen: 4,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstoreidx",
+ argLen: 4,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "FMOVSstoreidx",
+ argLen: 4,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDstoreidx",
+ argLen: 4,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVHstoreidx2",
+ argLen: 4,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx4",
+ argLen: 4,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstoreidx8",
+ argLen: 4,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "FMOVSstoreidx4",
+ argLen: 4,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDstoreidx8",
+ argLen: 4,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVQstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.ASTP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVBstorezeroidx",
+ argLen: 3,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezeroidx",
+ argLen: 3,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezeroidx",
+ argLen: 3,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstorezeroidx",
+ argLen: 3,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezeroidx2",
+ argLen: 3,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezeroidx4",
+ argLen: 3,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstorezeroidx8",
+ argLen: 3,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "FMOVDgpfp",
+ argLen: 1,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDfpgp",
+ argLen: 1,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FMOVSgpfp",
+ argLen: 1,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVSfpgp",
+ argLen: 1,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: arm64.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: arm64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWUreg",
+ argLen: 1,
+ asm: arm64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVDreg",
+ argLen: 1,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVDnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SCVTFWS",
+ argLen: 1,
+ asm: arm64.ASCVTFWS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SCVTFWD",
+ argLen: 1,
+ asm: arm64.ASCVTFWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "UCVTFWS",
+ argLen: 1,
+ asm: arm64.AUCVTFWS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "UCVTFWD",
+ argLen: 1,
+ asm: arm64.AUCVTFWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SCVTFS",
+ argLen: 1,
+ asm: arm64.ASCVTFS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SCVTFD",
+ argLen: 1,
+ asm: arm64.ASCVTFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "UCVTFS",
+ argLen: 1,
+ asm: arm64.AUCVTFS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "UCVTFD",
+ argLen: 1,
+ asm: arm64.AUCVTFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTZSSW",
+ argLen: 1,
+ asm: arm64.AFCVTZSSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZSDW",
+ argLen: 1,
+ asm: arm64.AFCVTZSDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZUSW",
+ argLen: 1,
+ asm: arm64.AFCVTZUSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZUDW",
+ argLen: 1,
+ asm: arm64.AFCVTZUDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZSS",
+ argLen: 1,
+ asm: arm64.AFCVTZSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZSD",
+ argLen: 1,
+ asm: arm64.AFCVTZSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZUS",
+ argLen: 1,
+ asm: arm64.AFCVTZUS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZUD",
+ argLen: 1,
+ asm: arm64.AFCVTZUD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTSD",
+ argLen: 1,
+ asm: arm64.AFCVTSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTDS",
+ argLen: 1,
+ asm: arm64.AFCVTDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FRINTAD",
+ argLen: 1,
+ asm: arm64.AFRINTAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FRINTMD",
+ argLen: 1,
+ asm: arm64.AFRINTMD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FRINTND",
+ argLen: 1,
+ asm: arm64.AFRINTND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FRINTPD",
+ argLen: 1,
+ asm: arm64.AFRINTPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FRINTZD",
+ argLen: 1,
+ asm: arm64.AFRINTZD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CSEL",
+ auxType: auxCCop,
+ argLen: 3,
+ asm: arm64.ACSEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CSEL0",
+ auxType: auxCCop,
+ argLen: 2,
+ asm: arm64.ACSEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CSINC",
+ auxType: auxCCop,
+ argLen: 3,
+ asm: arm64.ACSINC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CSINV",
+ auxType: auxCCop,
+ argLen: 3,
+ asm: arm64.ACSINV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CSNEG",
+ auxType: auxCCop,
+ argLen: 3,
+ asm: arm64.ACSNEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CSETM",
+ auxType: auxCCop,
+ argLen: 1,
+ asm: arm64.ACSETM,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 67108864}, // R26
+ {0, 1744568319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 SP
+ },
+ clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "Equal",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessThanU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessEqualU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterThanU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterEqualU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessThanF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessEqualF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterThanF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterEqualF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotLessThanF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotLessEqualF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotGreaterThanF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotGreaterEqualF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessThanNoov",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterEqualNoov",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ },
+ clobbers: 538116096, // R16 R17 R20 R30
+ },
+ },
+ {
+ name: "LoweredZero",
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65536}, // R16
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ clobbers: 65536, // R16
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2097152}, // R21
+ {1, 1048576}, // R20
+ },
+ clobbers: 607322112, // R16 R17 R20 R21 R26 R30
+ },
+ },
+ {
+ name: "LoweredMove",
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 131072}, // R17
+ {1, 65536}, // R16
+ {2, 637272063}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R26 R30
+ },
+ clobbers: 33751040, // R16 R17 R25
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 67108864}, // R26
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 1,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FlagConstant",
+ auxType: auxFlagConstant,
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "LDAR",
+ argLen: 2,
+ faultOnNilArg0: true,
+ asm: arm64.ALDAR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LDARB",
+ argLen: 2,
+ faultOnNilArg0: true,
+ asm: arm64.ALDARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LDARW",
+ argLen: 2,
+ faultOnNilArg0: true,
+ asm: arm64.ALDARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "STLRB",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: arm64.ASTLRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "STLR",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: arm64.ASTLR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "STLRW",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: arm64.ASTLRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd64Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd32Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64",
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas32",
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64Variant",
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas32Variant",
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd8",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr8",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd8Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd32Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr8Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr32Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 9223372035244359680, // R16 R17 R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ outputs: []outputInfo{
+ {0, 33554432}, // R25
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // R0
+ {1, 2}, // R1
+ },
+ },
+ },
+ {
+ name: "PRFM",
+ auxType: auxInt64,
+ argLen: 2,
+ hasSideEffects: true,
+ asm: arm64.APRFM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "DMB",
+ auxType: auxInt64,
+ argLen: 1,
+ hasSideEffects: true,
+ asm: arm64.ADMB,
+ reg: regInfo{},
+ },
+
+ {
+ name: "ADDV",
+ argLen: 2,
+ commutative: true,
+ asm: loong64.AADDVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "ADDVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: loong64.AADDVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "SUBV",
+ argLen: 2,
+ asm: loong64.ASUBVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "SUBVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: loong64.ASUBVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MULV",
+ argLen: 2,
+ commutative: true,
+ asm: loong64.AMULV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MULHV",
+ argLen: 2,
+ commutative: true,
+ asm: loong64.AMULHV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MULHVU",
+ argLen: 2,
+ commutative: true,
+ asm: loong64.AMULHVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "DIVV",
+ argLen: 2,
+ asm: loong64.ADIVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "DIVVU",
+ argLen: 2,
+ asm: loong64.ADIVVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "REMV",
+ argLen: 2,
+ asm: loong64.AREMV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "REMVU",
+ argLen: 2,
+ asm: loong64.AREMVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "ADDF",
+ argLen: 2,
+ commutative: true,
+ asm: loong64.AADDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "ADDD",
+ argLen: 2,
+ commutative: true,
+ asm: loong64.AADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SUBF",
+ argLen: 2,
+ asm: loong64.ASUBF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SUBD",
+ argLen: 2,
+ asm: loong64.ASUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MULF",
+ argLen: 2,
+ commutative: true,
+ asm: loong64.AMULF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MULD",
+ argLen: 2,
+ commutative: true,
+ asm: loong64.AMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "DIVF",
+ argLen: 2,
+ asm: loong64.ADIVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ asm: loong64.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: loong64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: loong64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: loong64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: loong64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: loong64.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: loong64.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "NOR",
+ argLen: 2,
+ commutative: true,
+ asm: loong64.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "NORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: loong64.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "NEGV",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "NEGF",
+ argLen: 1,
+ asm: loong64.ANEGF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "NEGD",
+ argLen: 1,
+ asm: loong64.ANEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SQRTD",
+ argLen: 1,
+ asm: loong64.ASQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SQRTF",
+ argLen: 1,
+ asm: loong64.ASQRTF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MASKEQZ",
+ argLen: 2,
+ asm: loong64.AMASKEQZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MASKNEZ",
+ argLen: 2,
+ asm: loong64.AMASKNEZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "SLLV",
+ argLen: 2,
+ asm: loong64.ASLLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "SLLVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: loong64.ASLLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "SRLV",
+ argLen: 2,
+ asm: loong64.ASRLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "SRLVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: loong64.ASRLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "SRAV",
+ argLen: 2,
+ asm: loong64.ASRAV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "SRAVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: loong64.ASRAV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "ROTR",
+ argLen: 2,
+ asm: loong64.AROTR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "ROTRV",
+ argLen: 2,
+ asm: loong64.AROTRV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "ROTRconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: loong64.AROTR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "ROTRVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: loong64.AROTRV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "SGT",
+ argLen: 2,
+ asm: loong64.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "SGTconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: loong64.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "SGTU",
+ argLen: 2,
+ asm: loong64.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "SGTUconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: loong64.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "CMPEQF",
+ argLen: 2,
+ asm: loong64.ACMPEQF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPEQD",
+ argLen: 2,
+ asm: loong64.ACMPEQD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPGEF",
+ argLen: 2,
+ asm: loong64.ACMPGEF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPGED",
+ argLen: 2,
+ asm: loong64.ACMPGED,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPGTF",
+ argLen: 2,
+ asm: loong64.ACMPGTF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPGTD",
+ argLen: 2,
+ asm: loong64.ACMPGTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: loong64.AMOVV,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVFconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: loong64.AMOVF,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: loong64.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: loong64.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018427387908}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: loong64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: loong64.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: loong64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: loong64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: loong64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVWUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: loong64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVVload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: loong64.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVFload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: loong64.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: loong64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: loong64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: loong64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: loong64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVVstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: loong64.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVFstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: loong64.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: loong64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: loong64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: loong64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: loong64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVVstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: loong64.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: loong64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: loong64.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: loong64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: loong64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: loong64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVWUreg",
+ argLen: 1,
+ asm: loong64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVVreg",
+ argLen: 1,
+ asm: loong64.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVVnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "MOVWF",
+ argLen: 1,
+ asm: loong64.AMOVWF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVWD",
+ argLen: 1,
+ asm: loong64.AMOVWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVF",
+ argLen: 1,
+ asm: loong64.AMOVVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVD",
+ argLen: 1,
+ asm: loong64.AMOVVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "TRUNCFW",
+ argLen: 1,
+ asm: loong64.ATRUNCFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "TRUNCDW",
+ argLen: 1,
+ asm: loong64.ATRUNCDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "TRUNCFV",
+ argLen: 1,
+ asm: loong64.ATRUNCFV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "TRUNCDV",
+ argLen: 1,
+ asm: loong64.ATRUNCDV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVFD",
+ argLen: 1,
+ asm: loong64.AMOVFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDF",
+ argLen: 1,
+ asm: loong64.AMOVDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 268435456}, // R29
+ {0, 1071644668}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 524288}, // R20
+ },
+ clobbers: 524290, // R1 R20
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R21
+ {1, 524288}, // R20
+ },
+ clobbers: 1572866, // R1 R20 R21
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 524288}, // R20
+ {1, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ clobbers: 524288, // R20
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R21
+ {1, 524288}, // R20
+ {2, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ clobbers: 1572864, // R20 R21
+ },
+ },
+ {
+ name: "LoweredAtomicLoad8",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad32",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad64",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore8",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore64",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStorezero32",
+ argLen: 2,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStorezero64",
+ argLen: 2,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAddconst32",
+ auxType: auxInt32,
+ argLen: 2,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAddconst64",
+ auxType: auxInt64,
+ argLen: 2,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas32",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "FPFlagTrue",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "FPFlagFalse",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 268435456}, // R29
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 1,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 4611686017353646082, // R1 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ outputs: []outputInfo{
+ {0, 268435456}, // R29
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4194304}, // R23
+ {1, 8388608}, // R24
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R21
+ {1, 4194304}, // R23
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 524288}, // R20
+ {1, 1048576}, // R21
+ },
+ },
+ },
+
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.AADDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: mips.ASUBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SUBconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASUBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MUL",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ clobbers: 105553116266496, // HI LO
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MULT",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 35184372088832}, // HI
+ {1, 70368744177664}, // LO
+ },
+ },
+ },
+ {
+ name: "MULTU",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 35184372088832}, // HI
+ {1, 70368744177664}, // LO
+ },
+ },
+ },
+ {
+ name: "DIV",
+ argLen: 2,
+ asm: mips.ADIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 35184372088832}, // HI
+ {1, 70368744177664}, // LO
+ },
+ },
+ },
+ {
+ name: "DIVU",
+ argLen: 2,
+ asm: mips.ADIVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 35184372088832}, // HI
+ {1, 70368744177664}, // LO
+ },
+ },
+ },
+ {
+ name: "ADDF",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "ADDD",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "SUBF",
+ argLen: 2,
+ asm: mips.ASUBF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "SUBD",
+ argLen: 2,
+ asm: mips.ASUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MULF",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MULD",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "DIVF",
+ argLen: 2,
+ asm: mips.ADIVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ asm: mips.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "NOR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "NORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "NEGF",
+ argLen: 1,
+ asm: mips.ANEGF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "NEGD",
+ argLen: 1,
+ asm: mips.ANEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "ABSD",
+ argLen: 1,
+ asm: mips.AABSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "SQRTD",
+ argLen: 1,
+ asm: mips.ASQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "SQRTF",
+ argLen: 1,
+ asm: mips.ASQRTF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "SLL",
+ argLen: 2,
+ asm: mips.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SLLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SRL",
+ argLen: 2,
+ asm: mips.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SRLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SRA",
+ argLen: 2,
+ asm: mips.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SRAconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "CLZ",
+ argLen: 1,
+ asm: mips.ACLZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGT",
+ argLen: 2,
+ asm: mips.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGTconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGTzero",
+ argLen: 1,
+ asm: mips.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGTU",
+ argLen: 2,
+ asm: mips.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGTUconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGTUzero",
+ argLen: 1,
+ asm: mips.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "CMPEQF",
+ argLen: 2,
+ asm: mips.ACMPEQF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CMPEQD",
+ argLen: 2,
+ asm: mips.ACMPEQD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CMPGEF",
+ argLen: 2,
+ asm: mips.ACMPGEF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CMPGED",
+ argLen: 2,
+ asm: mips.ACMPGED,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CMPGTF",
+ argLen: 2,
+ asm: mips.ACMPGTF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CMPGTD",
+ argLen: 2,
+ asm: mips.ACMPGTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVWconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVFconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVWaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140737555464192}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVFload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVFstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVWfpgp",
+ argLen: 1,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVWgpfp",
+ argLen: 1,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: mips.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: mips.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVWnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "CMOVZ",
+ argLen: 3,
+ resultInArg0: true,
+ asm: mips.ACMOVZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "CMOVZzero",
+ argLen: 2,
+ resultInArg0: true,
+ asm: mips.ACMOVZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVWF",
+ argLen: 1,
+ asm: mips.AMOVWF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVWD",
+ argLen: 1,
+ asm: mips.AMOVWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "TRUNCFW",
+ argLen: 1,
+ asm: mips.ATRUNCFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "TRUNCDW",
+ argLen: 1,
+ asm: mips.ATRUNCDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVFD",
+ argLen: 1,
+ asm: mips.AMOVFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVDF",
+ argLen: 1,
+ asm: mips.AMOVDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4194304}, // R22
+ {0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31
+ },
+ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
+ },
+ },
+ {
+ name: "LoweredAtomicLoad8",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad32",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore8",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStorezero",
+ argLen: 2,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAddconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {2, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: mips.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: mips.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt32,
+ argLen: 3,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt32,
+ argLen: 4,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ clobbers: 6, // R1 R2
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ },
+ },
+ {
+ name: "FPFlagTrue",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "FPFlagFalse",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4194304}, // R22
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 1,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 140737219919872, // R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
+ outputs: []outputInfo{
+ {0, 16777216}, // R25
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 8}, // R3
+ {1, 16}, // R4
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendA",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32}, // R5
+ {1, 8}, // R3
+ {2, 16}, // R4
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendB",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32}, // R5
+ {1, 4}, // R2
+ {2, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendC",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32}, // R5
+ {1, 2}, // R1
+ {2, 4}, // R2
+ },
+ },
+ },
+
+ {
+ name: "ADDV",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "ADDVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.AADDVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268435454}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SUBV",
+ argLen: 2,
+ asm: mips.ASUBVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SUBVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASUBVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MULV",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504606846976}, // HI
+ {1, 2305843009213693952}, // LO
+ },
+ },
+ },
+ {
+ name: "MULVU",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504606846976}, // HI
+ {1, 2305843009213693952}, // LO
+ },
+ },
+ },
+ {
+ name: "DIVV",
+ argLen: 2,
+ asm: mips.ADIVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504606846976}, // HI
+ {1, 2305843009213693952}, // LO
+ },
+ },
+ },
+ {
+ name: "DIVVU",
+ argLen: 2,
+ asm: mips.ADIVVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504606846976}, // HI
+ {1, 2305843009213693952}, // LO
+ },
+ },
+ },
+ {
+ name: "ADDF",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "ADDD",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SUBF",
+ argLen: 2,
+ asm: mips.ASUBF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SUBD",
+ argLen: 2,
+ asm: mips.ASUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MULF",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MULD",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "DIVF",
+ argLen: 2,
+ asm: mips.ADIVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ asm: mips.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "NOR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "NORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "NEGV",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "NEGF",
+ argLen: 1,
+ asm: mips.ANEGF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "NEGD",
+ argLen: 1,
+ asm: mips.ANEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "ABSD",
+ argLen: 1,
+ asm: mips.AABSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SQRTD",
+ argLen: 1,
+ asm: mips.ASQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SQRTF",
+ argLen: 1,
+ asm: mips.ASQRTF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SLLV",
+ argLen: 2,
+ asm: mips.ASLLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SLLVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASLLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SRLV",
+ argLen: 2,
+ asm: mips.ASRLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SRLVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASRLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SRAV",
+ argLen: 2,
+ asm: mips.ASRAV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SRAVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASRAV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SGT",
+ argLen: 2,
+ asm: mips.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SGTconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SGTU",
+ argLen: 2,
+ asm: mips.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SGTUconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "CMPEQF",
+ argLen: 2,
+ asm: mips.ACMPEQF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPEQD",
+ argLen: 2,
+ asm: mips.ACMPEQD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPGEF",
+ argLen: 2,
+ asm: mips.ACMPGEF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPGED",
+ argLen: 2,
+ asm: mips.ACMPGED,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPGTF",
+ argLen: 2,
+ asm: mips.ACMPGTF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPGTD",
+ argLen: 2,
+ asm: mips.ACMPGTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVFconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018460942336}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVWUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVVload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVFload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVVstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVFstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVVstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVWfpgp",
+ argLen: 1,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVWgpfp",
+ argLen: 1,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVfpgp",
+ argLen: 1,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVVgpfp",
+ argLen: 1,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: mips.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: mips.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVWUreg",
+ argLen: 1,
+ asm: mips.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVVreg",
+ argLen: 1,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVVnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVWF",
+ argLen: 1,
+ asm: mips.AMOVWF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVWD",
+ argLen: 1,
+ asm: mips.AMOVWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVF",
+ argLen: 1,
+ asm: mips.AMOVVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVD",
+ argLen: 1,
+ asm: mips.AMOVVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "TRUNCFW",
+ argLen: 1,
+ asm: mips.ATRUNCFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "TRUNCDW",
+ argLen: 1,
+ asm: mips.ATRUNCDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "TRUNCFV",
+ argLen: 1,
+ asm: mips.ATRUNCFV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "TRUNCDV",
+ argLen: 1,
+ asm: mips.ATRUNCDV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVFD",
+ argLen: 1,
+ asm: mips.AMOVFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDF",
+ argLen: 1,
+ asm: mips.AMOVDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4194304}, // R22
+ {0, 201326590}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP R31
+ },
+ clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ clobbers: 134217730, // R1 R31
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ },
+ clobbers: 134217734, // R1 R2 R31
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ {2, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ clobbers: 6, // R1 R2
+ },
+ },
+ {
+ name: "LoweredAtomicAnd32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: mips.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: mips.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad8",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad32",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad64",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore8",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore64",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStorezero32",
+ argLen: 2,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStorezero64",
+ argLen: 2,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAddconst32",
+ auxType: auxInt32,
+ argLen: 2,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAddconst64",
+ auxType: auxInt64,
+ argLen: 2,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas32",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ },
+ },
+ {
+ name: "FPFlagTrue",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "FPFlagFalse",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4194304}, // R22
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 1,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 4611686018293170176, // R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
+ outputs: []outputInfo{
+ {0, 16777216}, // R25
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 8}, // R3
+ {1, 16}, // R4
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ },
+ },
+ },
+
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ADDCC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AADDCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ADDCCconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AADDCCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 9223372036854775808, // XER
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FADD",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AFADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FADDS",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AFADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: ppc64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SUBCC",
+ argLen: 2,
+ asm: ppc64.ASUBCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SUBFCconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASUBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 9223372036854775808, // XER
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FSUB",
+ argLen: 2,
+ asm: ppc64.AFSUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FSUBS",
+ argLen: 2,
+ asm: ppc64.AFSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "MULLD",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULLW",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULLDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULLWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MADDLD",
+ argLen: 3,
+ asm: ppc64.AMADDLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULHD",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULHD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULHW",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULHW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULHDU",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULHDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULHWU",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULHWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMUL",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AFMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FMULS",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AFMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FMADD",
+ argLen: 3,
+ asm: ppc64.AFMADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FMADDS",
+ argLen: 3,
+ asm: ppc64.AFMADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FMSUB",
+ argLen: 3,
+ asm: ppc64.AFMSUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FMSUBS",
+ argLen: 3,
+ asm: ppc64.AFMSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "SRAD",
+ argLen: 2,
+ asm: ppc64.ASRAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 9223372036854775808, // XER
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRAW",
+ argLen: 2,
+ asm: ppc64.ASRAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 9223372036854775808, // XER
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRD",
+ argLen: 2,
+ asm: ppc64.ASRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRW",
+ argLen: 2,
+ asm: ppc64.ASRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SLD",
+ argLen: 2,
+ asm: ppc64.ASLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SLW",
+ argLen: 2,
+ asm: ppc64.ASLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ROTL",
+ argLen: 2,
+ asm: ppc64.AROTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ROTLW",
+ argLen: 2,
+ asm: ppc64.AROTLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CLRLSLWI",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ACLRLSLWI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CLRLSLDI",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ACLRLSLDI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ADDC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AADDC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 9223372036854775808, // XER
+ outputs: []outputInfo{
+ {1, 9223372036854775808}, // XER
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SUBC",
+ argLen: 2,
+ asm: ppc64.ASUBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 9223372036854775808, // XER
+ outputs: []outputInfo{
+ {1, 9223372036854775808}, // XER
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ADDCconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AADDC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {1, 9223372036854775808}, // XER
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SUBCconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASUBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {1, 9223372036854775808}, // XER
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ADDE",
+ argLen: 3,
+ commutative: true,
+ asm: ppc64.AADDE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 9223372036854775808}, // XER
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 9223372036854775808, // XER
+ outputs: []outputInfo{
+ {1, 9223372036854775808}, // XER
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SUBE",
+ argLen: 3,
+ asm: ppc64.ASUBE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 9223372036854775808}, // XER
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 9223372036854775808, // XER
+ outputs: []outputInfo{
+ {1, 9223372036854775808}, // XER
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ADDZEzero",
+ argLen: 1,
+ asm: ppc64.AADDZE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372036854775808}, // XER
+ },
+ clobbers: 9223372036854775808, // XER
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SUBZEzero",
+ argLen: 1,
+ asm: ppc64.ASUBZE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372036854775808}, // XER
+ },
+ clobbers: 9223372036854775808, // XER
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRADconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASRAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 9223372036854775808, // XER
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRAWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASRAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 9223372036854775808, // XER
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SLDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SLWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ROTLconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AROTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ROTLWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AROTLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "EXTSWSLconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AEXTSWSLI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "RLWINM",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ARLWNM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "RLWNM",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: ppc64.ARLWNM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "RLWMI",
+ auxType: auxInt64,
+ argLen: 2,
+ resultInArg0: true,
+ asm: ppc64.ARLWMI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "RLDICL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ARLDICL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "RLDICR",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ARLDICR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CNTLZD",
+ argLen: 1,
+ asm: ppc64.ACNTLZD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CNTLZDCC",
+ argLen: 1,
+ asm: ppc64.ACNTLZDCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CNTLZW",
+ argLen: 1,
+ asm: ppc64.ACNTLZW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CNTTZD",
+ argLen: 1,
+ asm: ppc64.ACNTTZD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CNTTZW",
+ argLen: 1,
+ asm: ppc64.ACNTTZW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "POPCNTD",
+ argLen: 1,
+ asm: ppc64.APOPCNTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "POPCNTW",
+ argLen: 1,
+ asm: ppc64.APOPCNTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "POPCNTB",
+ argLen: 1,
+ asm: ppc64.APOPCNTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FDIV",
+ argLen: 2,
+ asm: ppc64.AFDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FDIVS",
+ argLen: 2,
+ asm: ppc64.AFDIVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ asm: ppc64.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ asm: ppc64.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "DIVDU",
+ argLen: 2,
+ asm: ppc64.ADIVDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "DIVWU",
+ argLen: 2,
+ asm: ppc64.ADIVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MODUD",
+ argLen: 2,
+ asm: ppc64.AMODUD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MODSD",
+ argLen: 2,
+ asm: ppc64.AMODSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MODUW",
+ argLen: 2,
+ asm: ppc64.AMODUW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MODSW",
+ argLen: 2,
+ asm: ppc64.AMODSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FCTIDZ",
+ argLen: 1,
+ asm: ppc64.AFCTIDZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FCTIWZ",
+ argLen: 1,
+ asm: ppc64.AFCTIWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FCFID",
+ argLen: 1,
+ asm: ppc64.AFCFID,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FCFIDS",
+ argLen: 1,
+ asm: ppc64.AFCFIDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FRSP",
+ argLen: 1,
+ asm: ppc64.AFRSP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "MFVSRD",
+ argLen: 1,
+ asm: ppc64.AMFVSRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MTVSRD",
+ argLen: 1,
+ asm: ppc64.AMTVSRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ANDN",
+ argLen: 2,
+ asm: ppc64.AANDN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ANDNCC",
+ argLen: 2,
+ asm: ppc64.AANDNCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ANDCC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AANDCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ORN",
+ argLen: 2,
+ asm: ppc64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ORCC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AORCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "NOR",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "NORCC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.ANORCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "XORCC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AXORCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "EQV",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AEQV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ asm: ppc64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "NEGCC",
+ argLen: 1,
+ asm: ppc64.ANEGCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "BRD",
+ argLen: 1,
+ asm: ppc64.ABRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "BRW",
+ argLen: 1,
+ asm: ppc64.ABRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "BRH",
+ argLen: 1,
+ asm: ppc64.ABRH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FNEG",
+ argLen: 1,
+ asm: ppc64.AFNEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FSQRT",
+ argLen: 1,
+ asm: ppc64.AFSQRT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FSQRTS",
+ argLen: 1,
+ asm: ppc64.AFSQRTS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FFLOOR",
+ argLen: 1,
+ asm: ppc64.AFRIM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FCEIL",
+ argLen: 1,
+ asm: ppc64.AFRIP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FTRUNC",
+ argLen: 1,
+ asm: ppc64.AFRIZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FROUND",
+ argLen: 1,
+ asm: ppc64.AFRIN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FABS",
+ argLen: 1,
+ asm: ppc64.AFABS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FNABS",
+ argLen: 1,
+ asm: ppc64.AFNABS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FCPSGN",
+ argLen: 2,
+ asm: ppc64.AFCPSGN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ANDCCconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AANDCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBZreg",
+ argLen: 1,
+ asm: ppc64.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHZreg",
+ argLen: 1,
+ asm: ppc64.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWZreg",
+ argLen: 1,
+ asm: ppc64.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDBRload",
+ argLen: 2,
+ faultOnNilArg0: true,
+ asm: ppc64.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWBRload",
+ argLen: 2,
+ faultOnNilArg0: true,
+ asm: ppc64.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHBRload",
+ argLen: 2,
+ faultOnNilArg0: true,
+ asm: ppc64.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBZloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHZloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWZloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHBRloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWBRloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDBRloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDloadidx",
+ argLen: 3,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FMOVSloadidx",
+ argLen: 3,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "DCBT",
+ auxType: auxInt64,
+ argLen: 2,
+ hasSideEffects: true,
+ asm: ppc64.ADCBT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDBRstore",
+ argLen: 3,
+ faultOnNilArg0: true,
+ asm: ppc64.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWBRstore",
+ argLen: 3,
+ faultOnNilArg0: true,
+ asm: ppc64.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHBRstore",
+ argLen: 3,
+ faultOnNilArg0: true,
+ asm: ppc64.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FMOVSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FMOVSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDstoreidx",
+ argLen: 4,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FMOVSstoreidx",
+ argLen: 4,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "MOVHBRstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWBRstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDBRstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FMOVSconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "FCMPU",
+ argLen: 2,
+ asm: ppc64.AFCMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ {1, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "CMP",
+ argLen: 2,
+ asm: ppc64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPU",
+ argLen: 2,
+ asm: ppc64.ACMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: ppc64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPWU",
+ argLen: 2,
+ asm: ppc64.ACMPWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPUconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ACMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPWUconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ACMPWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ISEL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: ppc64.AISEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ISELZ",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: ppc64.AISEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SETBC",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ASETBC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SETBCR",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ASETBCR,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "Equal",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "NotEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LessThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FLessThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LessEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FLessEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "GreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FGreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "GreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FGreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 2048}, // R11
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 1,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ clobberFlags: true,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 2147483648, // R31
+ },
+ },
+ {
+ name: "LoweredRound32F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "LoweredRound64F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ outputs: []outputInfo{
+ {0, 9223372032559808512}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4096}, // R12
+ {1, 2048}, // R11
+ },
+ clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4096}, // R12
+ },
+ clobbers: 18446744071562059768, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ },
+ clobbers: 1048576, // R20
+ },
+ },
+ {
+ name: "LoweredZeroShort",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredQuadZeroShort",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredQuadZero",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ },
+ clobbers: 1048576, // R20
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ {1, 2097152}, // R21
+ },
+ clobbers: 3145728, // R20 R21
+ },
+ },
+ {
+ name: "LoweredMoveShort",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredQuadMove",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ {1, 2097152}, // R21
+ },
+ clobbers: 3145728, // R20 R21
+ },
+ },
+ {
+ name: "LoweredQuadMoveShort",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore8",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore32",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore64",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad8",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad32",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad64",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoadPtr",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd64",
+ argLen: 3,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32",
+ argLen: 3,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64",
+ argLen: 3,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64",
+ auxType: auxInt64,
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas32",
+ auxType: auxInt64,
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd8",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: ppc64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: ppc64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr8",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: ppc64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: ppc64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 18446744072632408064, // R11 R12 R18 R19 R22 R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 XER
+ outputs: []outputInfo{
+ {0, 536870912}, // R29
+ },
+ },
+ },
+ {
+ name: "LoweredPubBarrier",
+ argLen: 1,
+ hasSideEffects: true,
+ asm: ppc64.ALWSYNC,
+ reg: regInfo{},
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32}, // R5
+ {1, 64}, // R6
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // R4
+ {1, 32}, // R5
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 8}, // R3
+ {1, 16}, // R4
+ },
+ },
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "ADDI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AADDI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "ADDIW",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AADDIW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ asm: riscv.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "NEGW",
+ argLen: 1,
+ asm: riscv.ANEGW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: riscv.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SUBW",
+ argLen: 2,
+ asm: riscv.ASUBW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MUL",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MULW",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AMULW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MULH",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AMULH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MULHU",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AMULHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredMuluhilo",
+ argLen: 2,
+ resultNotInArgs: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredMuluover",
+ argLen: 2,
+ resultNotInArgs: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "DIV",
+ argLen: 2,
+ asm: riscv.ADIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "DIVU",
+ argLen: 2,
+ asm: riscv.ADIVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ asm: riscv.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "DIVUW",
+ argLen: 2,
+ asm: riscv.ADIVUW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "REM",
+ argLen: 2,
+ asm: riscv.AREM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "REMU",
+ argLen: 2,
+ asm: riscv.AREMU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "REMW",
+ argLen: 2,
+ asm: riscv.AREMW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "REMUW",
+ argLen: 2,
+ asm: riscv.AREMUW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVDstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVDreg",
+ argLen: 1,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: riscv.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: riscv.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWUreg",
+ argLen: 1,
+ asm: riscv.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVDnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLL",
+ argLen: 2,
+ asm: riscv.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRA",
+ argLen: 2,
+ asm: riscv.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRAW",
+ argLen: 2,
+ asm: riscv.ASRAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRL",
+ argLen: 2,
+ asm: riscv.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRLW",
+ argLen: 2,
+ asm: riscv.ASRLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLLI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASLLI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRAI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASRAI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRAIW",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASRAIW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRLI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASRLI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRLIW",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASRLIW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "XORI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AXORI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "ORI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AORI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "ANDI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AANDI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "NOT",
+ argLen: 1,
+ asm: riscv.ANOT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SEQZ",
+ argLen: 1,
+ asm: riscv.ASEQZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SNEZ",
+ argLen: 1,
+ asm: riscv.ASNEZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLT",
+ argLen: 2,
+ asm: riscv.ASLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLTI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASLTI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLTU",
+ argLen: 2,
+ asm: riscv.ASLTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLTIU",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASLTIU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredRound32F",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "LoweredRound64F",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ reg: regInfo{
+ clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 33554432}, // X26
+ {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16777216}, // X25
+ },
+ clobbers: 16777216, // X25
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16777216}, // X25
+ {1, 8388608}, // X24
+ },
+ clobbers: 25165824, // X24 X25
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // X5
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ clobbers: 16, // X5
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // X5
+ {1, 32}, // X6
+ {2, 1006632880}, // X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ clobbers: 112, // X5 X6 X7
+ },
+ },
+ {
+ name: "LoweredAtomicLoad8",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad32",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad64",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore8",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore64",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas32",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: riscv.AAMOANDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: riscv.AAMOORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 33554432}, // X26
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 1,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 9223372034707292160, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ outputs: []outputInfo{
+ {0, 8388608}, // X24
+ },
+ },
+ },
+ {
+ name: "LoweredPubBarrier",
+ argLen: 1,
+ hasSideEffects: true,
+ asm: riscv.AFENCE,
+ reg: regInfo{},
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 64}, // X7
+ {1, 134217728}, // X28
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32}, // X6
+ {1, 64}, // X7
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // X5
+ {1, 32}, // X6
+ },
+ },
+ },
+ {
+ name: "FADDS",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSUBS",
+ argLen: 2,
+ asm: riscv.AFSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMULS",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FDIVS",
+ argLen: 2,
+ asm: riscv.AFDIVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMADDS",
+ argLen: 3,
+ commutative: true,
+ asm: riscv.AFMADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMSUBS",
+ argLen: 3,
+ commutative: true,
+ asm: riscv.AFMSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMADDS",
+ argLen: 3,
+ commutative: true,
+ asm: riscv.AFNMADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMSUBS",
+ argLen: 3,
+ commutative: true,
+ asm: riscv.AFNMSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSQRTS",
+ argLen: 1,
+ asm: riscv.AFSQRTS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNEGS",
+ argLen: 1,
+ asm: riscv.AFNEGS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMVSX",
+ argLen: 1,
+ asm: riscv.AFMVSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTSW",
+ argLen: 1,
+ asm: riscv.AFCVTSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTSL",
+ argLen: 1,
+ asm: riscv.AFCVTSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTWS",
+ argLen: 1,
+ asm: riscv.AFCVTWS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FCVTLS",
+ argLen: 1,
+ asm: riscv.AFCVTLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FMOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FEQS",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFEQS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FNES",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFNES,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FLTS",
+ argLen: 2,
+ asm: riscv.AFLTS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FLES",
+ argLen: 2,
+ asm: riscv.AFLES,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FADDD",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSUBD",
+ argLen: 2,
+ asm: riscv.AFSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMULD",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FDIVD",
+ argLen: 2,
+ asm: riscv.AFDIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMADDD",
+ argLen: 3,
+ commutative: true,
+ asm: riscv.AFMADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMSUBD",
+ argLen: 3,
+ commutative: true,
+ asm: riscv.AFMSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMADDD",
+ argLen: 3,
+ commutative: true,
+ asm: riscv.AFNMADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMSUBD",
+ argLen: 3,
+ commutative: true,
+ asm: riscv.AFNMSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSQRTD",
+ argLen: 1,
+ asm: riscv.AFSQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNEGD",
+ argLen: 1,
+ asm: riscv.AFNEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FABSD",
+ argLen: 1,
+ asm: riscv.AFABSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSGNJD",
+ argLen: 2,
+ asm: riscv.AFSGNJD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMVDX",
+ argLen: 1,
+ asm: riscv.AFMVDX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTDW",
+ argLen: 1,
+ asm: riscv.AFCVTDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTDL",
+ argLen: 1,
+ asm: riscv.AFCVTDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTWD",
+ argLen: 1,
+ asm: riscv.AFCVTWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FCVTLD",
+ argLen: 1,
+ asm: riscv.AFCVTLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FCVTDS",
+ argLen: 1,
+ asm: riscv.AFCVTDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTSD",
+ argLen: 1,
+ asm: riscv.AFCVTSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FEQD",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFEQD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FNED",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFNED,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FLTD",
+ argLen: 2,
+ asm: riscv.AFLTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FLED",
+ argLen: 2,
+ asm: riscv.AFLED,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+
+ {
+ name: "FADDS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: s390x.AFADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FADD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: s390x.AFADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FSUBS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: s390x.AFSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FSUB",
+ argLen: 2,
+ resultInArg0: true,
+ asm: s390x.AFSUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMULS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: s390x.AFMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMUL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: s390x.AFMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FDIVS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: s390x.AFDIVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FDIV",
+ argLen: 2,
+ resultInArg0: true,
+ asm: s390x.AFDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FNEGS",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.AFNEGS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FNEG",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.AFNEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMADDS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.AFMADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMADD",
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.AFMADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMSUBS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.AFMSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMSUB",
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.AFMSUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LPDFR",
+ argLen: 1,
+ asm: s390x.ALPDFR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LNDFR",
+ argLen: 1,
+ asm: s390x.ALNDFR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CPSDR",
+ argLen: 2,
+ asm: s390x.ACPSDR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FIDBR",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: s390x.AFIDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVSconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: s390x.AFMOVS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: s390x.AFMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVSloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: s390x.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVDloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: s390x.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVSstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: s390x.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVDstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: s390x.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDW",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AADDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.AADDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AADDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ clobberFlags: true,
+ asm: s390x.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBW",
+ argLen: 2,
+ clobberFlags: true,
+ asm: s390x.ASUBW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ASUBW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.ASUBW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLW",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULHD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULHD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULHDU",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULHDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "DIVDU",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ADIVDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "DIVWU",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ADIVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MODD",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMODD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MODW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMODW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MODDU",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMODDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MODWU",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMODWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDW",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AANDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AANDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AANDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORW",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORW",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AXORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AXORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AXORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDC",
+ argLen: 2,
+ commutative: true,
+ asm: s390x.AADDC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDCconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: s390x.AADDC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDE",
+ argLen: 3,
+ commutative: true,
+ resultInArg0: true,
+ asm: s390x.AADDE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBC",
+ argLen: 2,
+ asm: s390x.ASUBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.ASUBE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMP",
+ argLen: 2,
+ asm: s390x.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: s390x.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPU",
+ argLen: 2,
+ asm: s390x.ACMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPWU",
+ argLen: 2,
+ asm: s390x.ACMPWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: s390x.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: s390x.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPUconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: s390x.ACMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPWUconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: s390x.ACMPWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "FCMPS",
+ argLen: 2,
+ asm: s390x.ACEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FCMP",
+ argLen: 2,
+ asm: s390x.AFCMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LTDBR",
+ argLen: 1,
+ asm: s390x.ALTDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LTEBR",
+ argLen: 1,
+ asm: s390x.ALTEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SLD",
+ argLen: 2,
+ asm: s390x.ASLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SLW",
+ argLen: 2,
+ asm: s390x.ASLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SLDconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ asm: s390x.ASLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SLWconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ asm: s390x.ASLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRD",
+ argLen: 2,
+ asm: s390x.ASRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRW",
+ argLen: 2,
+ asm: s390x.ASRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRDconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ asm: s390x.ASRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRWconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ asm: s390x.ASRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRAD",
+ argLen: 2,
+ clobberFlags: true,
+ asm: s390x.ASRAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRAW",
+ argLen: 2,
+ clobberFlags: true,
+ asm: s390x.ASRAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRADconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ASRAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRAWconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ASRAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "RLLG",
+ argLen: 2,
+ asm: s390x.ARLLG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "RLL",
+ argLen: 2,
+ asm: s390x.ARLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "RLLconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ asm: s390x.ARLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "RXSBG",
+ auxType: auxS390XRotateParams,
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ARXSBG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "RISBGZ",
+ auxType: auxS390XRotateParams,
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ARISBGZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "NEGW",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ANEGW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "NOT",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "NOTW",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "FSQRT",
+ argLen: 1,
+ asm: s390x.AFSQRT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FSQRTS",
+ argLen: 1,
+ asm: s390x.AFSQRTS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LOCGR",
+ auxType: auxS390XCCMask,
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.ALOCGR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBZreg",
+ argLen: 1,
+ asm: s390x.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHZreg",
+ argLen: 1,
+ asm: s390x.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWZreg",
+ argLen: 1,
+ asm: s390x.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LDGR",
+ argLen: 1,
+ asm: s390x.ALDGR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LGDR",
+ argLen: 1,
+ asm: s390x.ALGDR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CFDBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACFDBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CGDBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACGDBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CFEBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACFEBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CGEBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACGEBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CEFBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACEFBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CDFBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACDFBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CEGBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACEGBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CDGBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACDGBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CLFEBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACLFEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CLFDBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACLFDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CLGEBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACLGEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CLGDBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACLGDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CELFBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACELFBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CDLFBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACDLFBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CELGBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACELGBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CDLGBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACDLGBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LEDBR",
+ argLen: 1,
+ asm: s390x.ALEDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LDEBR",
+ argLen: 1,
+ asm: s390x.ALDEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295000064}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDaddridx",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295000064}, // SP SB
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWBR",
+ argLen: 1,
+ asm: s390x.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDBR",
+ argLen: 1,
+ asm: s390x.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHBRload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWBRload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDBRload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVHBRstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVWBRstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVDBRstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MVC",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymNone,
+ asm: s390x.AMVC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVBZloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHZloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWZloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHBRloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWBRloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDBRloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVHstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVDstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVHBRstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVWBRstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVDBRstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ },
+ },
+ {
+ name: "CLEAR",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ACLEAR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4096}, // R12
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "LoweredGetG",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4096}, // R12
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 1,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ clobberFlags: true,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LoweredRound32F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LoweredRound64F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ reg: regInfo{
+ clobbers: 4294918146, // R1 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ outputs: []outputInfo{
+ {0, 512}, // R9
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // R0
+ {1, 2}, // R1
+ },
+ },
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagOV",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "SYNC",
+ argLen: 1,
+ asm: s390x.ASYNC,
+ reg: regInfo{},
+ },
+ {
+ name: "MOVBZatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWZatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBatomicstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVWatomicstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVDatomicstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LAA",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ALAA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LAAG",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ALAAG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "AddTupleFirst32",
+ argLen: 2,
+ reg: regInfo{},
+ },
+ {
+ name: "AddTupleFirst64",
+ argLen: 2,
+ reg: regInfo{},
+ },
+ {
+ name: "LAN",
+ argLen: 3,
+ clobberFlags: true,
+ hasSideEffects: true,
+ asm: s390x.ALAN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LANfloor",
+ argLen: 3,
+ clobberFlags: true,
+ hasSideEffects: true,
+ asm: s390x.ALAN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "LAO",
+ argLen: 3,
+ clobberFlags: true,
+ hasSideEffects: true,
+ asm: s390x.ALAO,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LAOfloor",
+ argLen: 3,
+ clobberFlags: true,
+ hasSideEffects: true,
+ asm: s390x.ALAO,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "LoweredAtomicCas32",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ACS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1}, // R0
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 1, // R0
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ACSG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1}, // R0
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 1, // R0
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ACS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // R0
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ACSG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // R0
+ },
+ },
+ },
+ {
+ name: "FLOGR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.AFLOGR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ clobbers: 2, // R1
+ outputs: []outputInfo{
+ {0, 1}, // R0
+ },
+ },
+ },
+ {
+ name: "POPCNT",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.APOPCNT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MLGR",
+ argLen: 2,
+ asm: s390x.AMLGR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 8}, // R3
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "SumBytes2",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "SumBytes4",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "SumBytes8",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "STMG2",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "STMG3",
+ auxType: auxSymOff,
+ argLen: 5,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {3, 8}, // R3
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "STMG4",
+ auxType: auxSymOff,
+ argLen: 6,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {3, 8}, // R3
+ {4, 16}, // R4
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "STM2",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMY,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "STM3",
+ auxType: auxSymOff,
+ argLen: 5,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMY,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {3, 8}, // R3
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "STM4",
+ auxType: auxSymOff,
+ argLen: 6,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMY,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {3, 8}, // R3
+ {4, 16}, // R4
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 6, // R1 R2
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 2, // R1
+ },
+ },
+
+ {
+ name: "LoweredStaticCall",
+ auxType: auxCallOff,
+ argLen: 1,
+ call: true,
+ reg: regInfo{
+ clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g
+ },
+ },
+ {
+ name: "LoweredTailCall",
+ auxType: auxCallOff,
+ argLen: 1,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g
+ },
+ },
+ {
+ name: "LoweredClosureCall",
+ auxType: auxCallOff,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g
+ },
+ },
+ {
+ name: "LoweredInterCall",
+ auxType: auxCallOff,
+ argLen: 2,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g
+ },
+ },
+ {
+ name: "LoweredAddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 3,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 1,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxInt64,
+ argLen: 1,
+ reg: regInfo{
+ clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredConvert",
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "Select",
+ argLen: 3,
+ asm: wasm.ASelect,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {2, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load8U",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load8U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load8S",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load8S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load16U",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load16U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load16S",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load16S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load32U",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load32U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load32S",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load32S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Store8",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AI64Store8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "I64Store16",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AI64Store16,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "I64Store32",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AI64Store32,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "I64Store",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AI64Store,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "F32Load",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AF32Load,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64Load",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AF64Load,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F32Store",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AF32Store,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "F64Store",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AF64Store,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "I64Const",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Const",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64Const",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "I64Eqz",
+ argLen: 1,
+ asm: wasm.AI64Eqz,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Eq",
+ argLen: 2,
+ asm: wasm.AI64Eq,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Ne",
+ argLen: 2,
+ asm: wasm.AI64Ne,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64LtS",
+ argLen: 2,
+ asm: wasm.AI64LtS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64LtU",
+ argLen: 2,
+ asm: wasm.AI64LtU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64GtS",
+ argLen: 2,
+ asm: wasm.AI64GtS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64GtU",
+ argLen: 2,
+ asm: wasm.AI64GtU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64LeS",
+ argLen: 2,
+ asm: wasm.AI64LeS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64LeU",
+ argLen: 2,
+ asm: wasm.AI64LeU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64GeS",
+ argLen: 2,
+ asm: wasm.AI64GeS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64GeU",
+ argLen: 2,
+ asm: wasm.AI64GeU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Eq",
+ argLen: 2,
+ asm: wasm.AF32Eq,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Ne",
+ argLen: 2,
+ asm: wasm.AF32Ne,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Lt",
+ argLen: 2,
+ asm: wasm.AF32Lt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Gt",
+ argLen: 2,
+ asm: wasm.AF32Gt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Le",
+ argLen: 2,
+ asm: wasm.AF32Le,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Ge",
+ argLen: 2,
+ asm: wasm.AF32Ge,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Eq",
+ argLen: 2,
+ asm: wasm.AF64Eq,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Ne",
+ argLen: 2,
+ asm: wasm.AF64Ne,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Lt",
+ argLen: 2,
+ asm: wasm.AF64Lt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Gt",
+ argLen: 2,
+ asm: wasm.AF64Gt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Le",
+ argLen: 2,
+ asm: wasm.AF64Le,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Ge",
+ argLen: 2,
+ asm: wasm.AF64Ge,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Add",
+ argLen: 2,
+ asm: wasm.AI64Add,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64AddConst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: wasm.AI64Add,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Sub",
+ argLen: 2,
+ asm: wasm.AI64Sub,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Mul",
+ argLen: 2,
+ asm: wasm.AI64Mul,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64DivS",
+ argLen: 2,
+ asm: wasm.AI64DivS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64DivU",
+ argLen: 2,
+ asm: wasm.AI64DivU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64RemS",
+ argLen: 2,
+ asm: wasm.AI64RemS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64RemU",
+ argLen: 2,
+ asm: wasm.AI64RemU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64And",
+ argLen: 2,
+ asm: wasm.AI64And,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Or",
+ argLen: 2,
+ asm: wasm.AI64Or,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Xor",
+ argLen: 2,
+ asm: wasm.AI64Xor,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Shl",
+ argLen: 2,
+ asm: wasm.AI64Shl,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64ShrS",
+ argLen: 2,
+ asm: wasm.AI64ShrS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64ShrU",
+ argLen: 2,
+ asm: wasm.AI64ShrU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Neg",
+ argLen: 1,
+ asm: wasm.AF32Neg,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Add",
+ argLen: 2,
+ asm: wasm.AF32Add,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Sub",
+ argLen: 2,
+ asm: wasm.AF32Sub,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Mul",
+ argLen: 2,
+ asm: wasm.AF32Mul,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Div",
+ argLen: 2,
+ asm: wasm.AF32Div,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64Neg",
+ argLen: 1,
+ asm: wasm.AF64Neg,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Add",
+ argLen: 2,
+ asm: wasm.AF64Add,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Sub",
+ argLen: 2,
+ asm: wasm.AF64Sub,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Mul",
+ argLen: 2,
+ asm: wasm.AF64Mul,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Div",
+ argLen: 2,
+ asm: wasm.AF64Div,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "I64TruncSatF64S",
+ argLen: 1,
+ asm: wasm.AI64TruncSatF64S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64TruncSatF64U",
+ argLen: 1,
+ asm: wasm.AI64TruncSatF64U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64TruncSatF32S",
+ argLen: 1,
+ asm: wasm.AI64TruncSatF32S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64TruncSatF32U",
+ argLen: 1,
+ asm: wasm.AI64TruncSatF32U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32ConvertI64S",
+ argLen: 1,
+ asm: wasm.AF32ConvertI64S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32ConvertI64U",
+ argLen: 1,
+ asm: wasm.AF32ConvertI64U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64ConvertI64S",
+ argLen: 1,
+ asm: wasm.AF64ConvertI64S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64ConvertI64U",
+ argLen: 1,
+ asm: wasm.AF64ConvertI64U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F32DemoteF64",
+ argLen: 1,
+ asm: wasm.AF32DemoteF64,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64PromoteF32",
+ argLen: 1,
+ asm: wasm.AF64PromoteF32,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "I64Extend8S",
+ argLen: 1,
+ asm: wasm.AI64Extend8S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Extend16S",
+ argLen: 1,
+ asm: wasm.AI64Extend16S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Extend32S",
+ argLen: 1,
+ asm: wasm.AI64Extend32S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Sqrt",
+ argLen: 1,
+ asm: wasm.AF32Sqrt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Trunc",
+ argLen: 1,
+ asm: wasm.AF32Trunc,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Ceil",
+ argLen: 1,
+ asm: wasm.AF32Ceil,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Floor",
+ argLen: 1,
+ asm: wasm.AF32Floor,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Nearest",
+ argLen: 1,
+ asm: wasm.AF32Nearest,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Abs",
+ argLen: 1,
+ asm: wasm.AF32Abs,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Copysign",
+ argLen: 2,
+ asm: wasm.AF32Copysign,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64Sqrt",
+ argLen: 1,
+ asm: wasm.AF64Sqrt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Trunc",
+ argLen: 1,
+ asm: wasm.AF64Trunc,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Ceil",
+ argLen: 1,
+ asm: wasm.AF64Ceil,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Floor",
+ argLen: 1,
+ asm: wasm.AF64Floor,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Nearest",
+ argLen: 1,
+ asm: wasm.AF64Nearest,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Abs",
+ argLen: 1,
+ asm: wasm.AF64Abs,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Copysign",
+ argLen: 2,
+ asm: wasm.AF64Copysign,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "I64Ctz",
+ argLen: 1,
+ asm: wasm.AI64Ctz,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Clz",
+ argLen: 1,
+ asm: wasm.AI64Clz,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I32Rotl",
+ argLen: 2,
+ asm: wasm.AI32Rotl,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Rotl",
+ argLen: 2,
+ asm: wasm.AI64Rotl,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Popcnt",
+ argLen: 1,
+ asm: wasm.AI64Popcnt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+
+ {
+ name: "Add8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Add16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Add32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Add64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "AddPtr",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Add32F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Add64F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Sub8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub16",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "SubPtr",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub32F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub64F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mul8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul32F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul64F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Div32F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div64F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Hmul32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Hmul32u",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Hmul64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Hmul64u",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul32uhilo",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul64uhilo",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul32uover",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul64uover",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Avg32u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Avg64u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div8u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div16u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div32u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div64u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div128u",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Mod8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod8u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod16u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod32u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod64u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "And8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "And16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "And32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "And64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Or8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Or16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Or32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Or64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Xor8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Xor16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Xor32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Xor64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Lsh8x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh8x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh8x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh8x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh16x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh16x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh16x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh16x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh32x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh32x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh32x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh32x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh64x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh64x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh64x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh64x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8Ux8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8Ux16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8Ux32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8Ux64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16Ux8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16Ux16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16Ux32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16Ux64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32Ux8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32Ux16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32Ux32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32Ux64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64Ux8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64Ux16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64Ux32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64Ux64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Eq8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Eq16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Eq32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Eq64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "EqPtr",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "EqInter",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "EqSlice",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Eq32F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Eq64F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Neq8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Neq16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Neq32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Neq64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "NeqPtr",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "NeqInter",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "NeqSlice",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Neq32F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Neq64F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Less8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less8U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less16",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less16U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less32U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less64U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less32F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less64F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq8U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq16",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq16U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq32U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq64U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq32F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq64F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "CondSelect",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "AndB",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "OrB",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "EqB",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "NeqB",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Not",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Com8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Com16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Com32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Com64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz8NonZero",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz16NonZero",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz32NonZero",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz64NonZero",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitLen8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitLen16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitLen32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitLen64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Bswap16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Bswap32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Bswap64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitRev8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitRev16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitRev32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitRev64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "PopCount8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "PopCount16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "PopCount32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "PopCount64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "RotateLeft64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "RotateLeft32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "RotateLeft16",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "RotateLeft8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sqrt",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Sqrt32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Floor",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ceil",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Round",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "RoundToEven",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Abs",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Copysign",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Min64F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Min32F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Max64F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Max32F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "FMA",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Phi",
+ argLen: -1,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "Copy",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Convert",
+ argLen: 2,
+ resultInArg0: true,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "ConstBool",
+ auxType: auxBool,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "ConstString",
+ auxType: auxString,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "ConstNil",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const8",
+ auxType: auxInt8,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const16",
+ auxType: auxInt16,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const32",
+ auxType: auxInt32,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const64",
+ auxType: auxInt64,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const32F",
+ auxType: auxFloat32,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const64F",
+ auxType: auxFloat64,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "ConstInterface",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "ConstSlice",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "InitMem",
+ argLen: 0,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "Arg",
+ auxType: auxSymOff,
+ argLen: 0,
+ zeroWidth: true,
+ symEffect: SymRead,
+ generic: true,
+ },
+ {
+ name: "ArgIntReg",
+ auxType: auxNameOffsetInt8,
+ argLen: 0,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "ArgFloatReg",
+ auxType: auxNameOffsetInt8,
+ argLen: 0,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "Addr",
+ auxType: auxSym,
+ argLen: 1,
+ symEffect: SymAddr,
+ generic: true,
+ },
+ {
+ name: "LocalAddr",
+ auxType: auxSym,
+ argLen: 2,
+ symEffect: SymAddr,
+ generic: true,
+ },
+ {
+ name: "SP",
+ argLen: 0,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "SB",
+ argLen: 0,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "SPanchored",
+ argLen: 2,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "Load",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Dereference",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Store",
+ auxType: auxTyp,
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Move",
+ auxType: auxTypSize,
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Zero",
+ auxType: auxTypSize,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "StoreWB",
+ auxType: auxTyp,
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "MoveWB",
+ auxType: auxTypSize,
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "ZeroWB",
+ auxType: auxTypSize,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "WBend",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "WB",
+ auxType: auxInt64,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "HasCPUFeature",
+ auxType: auxSym,
+ argLen: 0,
+ symEffect: SymNone,
+ generic: true,
+ },
+ {
+ name: "PanicBounds",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "PanicExtend",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "ClosureCall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "StaticCall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "InterCall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "TailCall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "ClosureLECall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "StaticLECall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "InterLECall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "TailLECall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "SignExt8to16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SignExt8to32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SignExt8to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SignExt16to32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SignExt16to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SignExt32to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt8to16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt8to32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt8to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt16to32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt16to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt32to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc16to8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc32to8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc32to16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc64to8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc64to16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc64to32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32to32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32to64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64to32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64to64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Fto32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Fto64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Fto32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Fto64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Fto64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Fto32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "CvtBoolToUint8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Round32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Round64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "IsNonNil",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "IsInBounds",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "IsSliceInBounds",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "NilCheck",
+ argLen: 2,
+ nilCheck: true,
+ generic: true,
+ },
+ {
+ name: "GetG",
+ argLen: 1,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "GetClosurePtr",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "GetCallerPC",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "GetCallerSP",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "PtrIndex",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "OffPtr",
+ auxType: auxInt64,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SliceMake",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "SlicePtr",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SliceLen",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SliceCap",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SlicePtrUnchecked",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ComplexMake",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "ComplexReal",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ComplexImag",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "StringMake",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "StringPtr",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "StringLen",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "IMake",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "ITab",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "IData",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "StructMake0",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "StructMake1",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "StructMake2",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "StructMake3",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "StructMake4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "StructSelect",
+ auxType: auxInt64,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ArrayMake0",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "ArrayMake1",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ArraySelect",
+ auxType: auxInt64,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "StoreReg",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "LoadReg",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "FwdRef",
+ auxType: auxSym,
+ argLen: 0,
+ symEffect: SymNone,
+ generic: true,
+ },
+ {
+ name: "Unknown",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "VarDef",
+ auxType: auxSym,
+ argLen: 1,
+ zeroWidth: true,
+ symEffect: SymNone,
+ generic: true,
+ },
+ {
+ name: "VarLive",
+ auxType: auxSym,
+ argLen: 1,
+ zeroWidth: true,
+ symEffect: SymRead,
+ generic: true,
+ },
+ {
+ name: "KeepAlive",
+ argLen: 2,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "InlMark",
+ auxType: auxInt32,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Int64Make",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Int64Hi",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Int64Lo",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Add32carry",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Add32withcarry",
+ argLen: 3,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Sub32carry",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub32withcarry",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Add64carry",
+ argLen: 3,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Sub64borrow",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Signmask",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Zeromask",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Slicemask",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SpectreIndex",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "SpectreSliceIndex",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Cvt32Uto32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Uto64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Fto32U",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Fto32U",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Uto32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Uto64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Fto64U",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Fto64U",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Select0",
+ argLen: 1,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "Select1",
+ argLen: 1,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "SelectN",
+ auxType: auxInt64,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SelectNAddr",
+ auxType: auxInt64,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "MakeResult",
+ argLen: -1,
+ generic: true,
+ },
+ {
+ name: "AtomicLoad8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicLoad32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicLoad64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicLoadPtr",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicLoadAcq32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicLoadAcq64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicStore8",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicStore32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicStore64",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicStorePtrNoWB",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicStoreRel32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicStoreRel64",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicExchange32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicExchange64",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAdd32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAdd64",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicCompareAndSwap32",
+ argLen: 4,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicCompareAndSwap64",
+ argLen: 4,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicCompareAndSwapRel32",
+ argLen: 4,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAnd8",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAnd32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicOr8",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicOr32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAdd32Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAdd64Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicExchange32Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicExchange64Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicCompareAndSwap32Variant",
+ argLen: 4,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicCompareAndSwap64Variant",
+ argLen: 4,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAnd8Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAnd32Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicOr8Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicOr32Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "PubBarrier",
+ argLen: 1,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "Clobber",
+ auxType: auxSymOff,
+ argLen: 0,
+ symEffect: SymNone,
+ generic: true,
+ },
+ {
+ name: "ClobberReg",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "PrefetchCache",
+ argLen: 2,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "PrefetchCacheStreamed",
+ argLen: 2,
+ hasSideEffects: true,
+ generic: true,
+ },
+}
+
+func (o Op) Asm() obj.As { return opcodeTable[o].asm }
+func (o Op) Scale() int16 { return int16(opcodeTable[o].scale) }
+func (o Op) String() string { return opcodeTable[o].name }
+func (o Op) SymEffect() SymEffect { return opcodeTable[o].symEffect }
+func (o Op) IsCall() bool { return opcodeTable[o].call }
+func (o Op) IsTailCall() bool { return opcodeTable[o].tailCall }
+func (o Op) HasSideEffects() bool { return opcodeTable[o].hasSideEffects }
+func (o Op) UnsafePoint() bool { return opcodeTable[o].unsafePoint }
+func (o Op) ResultInArg0() bool { return opcodeTable[o].resultInArg0 }
+
+var registers386 = [...]Register{
+ {0, x86.REG_AX, 0, "AX"},
+ {1, x86.REG_CX, 1, "CX"},
+ {2, x86.REG_DX, 2, "DX"},
+ {3, x86.REG_BX, 3, "BX"},
+ {4, x86.REGSP, -1, "SP"},
+ {5, x86.REG_BP, 4, "BP"},
+ {6, x86.REG_SI, 5, "SI"},
+ {7, x86.REG_DI, 6, "DI"},
+ {8, x86.REG_X0, -1, "X0"},
+ {9, x86.REG_X1, -1, "X1"},
+ {10, x86.REG_X2, -1, "X2"},
+ {11, x86.REG_X3, -1, "X3"},
+ {12, x86.REG_X4, -1, "X4"},
+ {13, x86.REG_X5, -1, "X5"},
+ {14, x86.REG_X6, -1, "X6"},
+ {15, x86.REG_X7, -1, "X7"},
+ {16, 0, -1, "SB"},
+}
+var paramIntReg386 = []int8(nil)
+var paramFloatReg386 = []int8(nil)
+var gpRegMask386 = regMask(239)
+var fpRegMask386 = regMask(65280)
+var specialRegMask386 = regMask(0)
+var framepointerReg386 = int8(5)
+var linkReg386 = int8(-1)
+var registersAMD64 = [...]Register{
+ {0, x86.REG_AX, 0, "AX"},
+ {1, x86.REG_CX, 1, "CX"},
+ {2, x86.REG_DX, 2, "DX"},
+ {3, x86.REG_BX, 3, "BX"},
+ {4, x86.REGSP, -1, "SP"},
+ {5, x86.REG_BP, 4, "BP"},
+ {6, x86.REG_SI, 5, "SI"},
+ {7, x86.REG_DI, 6, "DI"},
+ {8, x86.REG_R8, 7, "R8"},
+ {9, x86.REG_R9, 8, "R9"},
+ {10, x86.REG_R10, 9, "R10"},
+ {11, x86.REG_R11, 10, "R11"},
+ {12, x86.REG_R12, 11, "R12"},
+ {13, x86.REG_R13, 12, "R13"},
+ {14, x86.REGG, -1, "g"},
+ {15, x86.REG_R15, 13, "R15"},
+ {16, x86.REG_X0, -1, "X0"},
+ {17, x86.REG_X1, -1, "X1"},
+ {18, x86.REG_X2, -1, "X2"},
+ {19, x86.REG_X3, -1, "X3"},
+ {20, x86.REG_X4, -1, "X4"},
+ {21, x86.REG_X5, -1, "X5"},
+ {22, x86.REG_X6, -1, "X6"},
+ {23, x86.REG_X7, -1, "X7"},
+ {24, x86.REG_X8, -1, "X8"},
+ {25, x86.REG_X9, -1, "X9"},
+ {26, x86.REG_X10, -1, "X10"},
+ {27, x86.REG_X11, -1, "X11"},
+ {28, x86.REG_X12, -1, "X12"},
+ {29, x86.REG_X13, -1, "X13"},
+ {30, x86.REG_X14, -1, "X14"},
+ {31, x86.REG_X15, -1, "X15"},
+ {32, 0, -1, "SB"},
+}
+var paramIntRegAMD64 = []int8{0, 3, 1, 7, 6, 8, 9, 10, 11}
+var paramFloatRegAMD64 = []int8{16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}
+var gpRegMaskAMD64 = regMask(49135)
+var fpRegMaskAMD64 = regMask(2147418112)
+var specialRegMaskAMD64 = regMask(2147483648)
+var framepointerRegAMD64 = int8(5)
+var linkRegAMD64 = int8(-1)
+var registersARM = [...]Register{
+ {0, arm.REG_R0, 0, "R0"},
+ {1, arm.REG_R1, 1, "R1"},
+ {2, arm.REG_R2, 2, "R2"},
+ {3, arm.REG_R3, 3, "R3"},
+ {4, arm.REG_R4, 4, "R4"},
+ {5, arm.REG_R5, 5, "R5"},
+ {6, arm.REG_R6, 6, "R6"},
+ {7, arm.REG_R7, 7, "R7"},
+ {8, arm.REG_R8, 8, "R8"},
+ {9, arm.REG_R9, 9, "R9"},
+ {10, arm.REGG, -1, "g"},
+ {11, arm.REG_R11, -1, "R11"},
+ {12, arm.REG_R12, 10, "R12"},
+ {13, arm.REGSP, -1, "SP"},
+ {14, arm.REG_R14, 11, "R14"},
+ {15, arm.REG_R15, -1, "R15"},
+ {16, arm.REG_F0, -1, "F0"},
+ {17, arm.REG_F1, -1, "F1"},
+ {18, arm.REG_F2, -1, "F2"},
+ {19, arm.REG_F3, -1, "F3"},
+ {20, arm.REG_F4, -1, "F4"},
+ {21, arm.REG_F5, -1, "F5"},
+ {22, arm.REG_F6, -1, "F6"},
+ {23, arm.REG_F7, -1, "F7"},
+ {24, arm.REG_F8, -1, "F8"},
+ {25, arm.REG_F9, -1, "F9"},
+ {26, arm.REG_F10, -1, "F10"},
+ {27, arm.REG_F11, -1, "F11"},
+ {28, arm.REG_F12, -1, "F12"},
+ {29, arm.REG_F13, -1, "F13"},
+ {30, arm.REG_F14, -1, "F14"},
+ {31, arm.REG_F15, -1, "F15"},
+ {32, 0, -1, "SB"},
+}
+var paramIntRegARM = []int8(nil)
+var paramFloatRegARM = []int8(nil)
+var gpRegMaskARM = regMask(21503)
+var fpRegMaskARM = regMask(4294901760)
+var specialRegMaskARM = regMask(0)
+var framepointerRegARM = int8(-1)
+var linkRegARM = int8(14)
+var registersARM64 = [...]Register{
+ {0, arm64.REG_R0, 0, "R0"},
+ {1, arm64.REG_R1, 1, "R1"},
+ {2, arm64.REG_R2, 2, "R2"},
+ {3, arm64.REG_R3, 3, "R3"},
+ {4, arm64.REG_R4, 4, "R4"},
+ {5, arm64.REG_R5, 5, "R5"},
+ {6, arm64.REG_R6, 6, "R6"},
+ {7, arm64.REG_R7, 7, "R7"},
+ {8, arm64.REG_R8, 8, "R8"},
+ {9, arm64.REG_R9, 9, "R9"},
+ {10, arm64.REG_R10, 10, "R10"},
+ {11, arm64.REG_R11, 11, "R11"},
+ {12, arm64.REG_R12, 12, "R12"},
+ {13, arm64.REG_R13, 13, "R13"},
+ {14, arm64.REG_R14, 14, "R14"},
+ {15, arm64.REG_R15, 15, "R15"},
+ {16, arm64.REG_R16, 16, "R16"},
+ {17, arm64.REG_R17, 17, "R17"},
+ {18, arm64.REG_R18, -1, "R18"},
+ {19, arm64.REG_R19, 18, "R19"},
+ {20, arm64.REG_R20, 19, "R20"},
+ {21, arm64.REG_R21, 20, "R21"},
+ {22, arm64.REG_R22, 21, "R22"},
+ {23, arm64.REG_R23, 22, "R23"},
+ {24, arm64.REG_R24, 23, "R24"},
+ {25, arm64.REG_R25, 24, "R25"},
+ {26, arm64.REG_R26, 25, "R26"},
+ {27, arm64.REGG, -1, "g"},
+ {28, arm64.REG_R29, -1, "R29"},
+ {29, arm64.REG_R30, 26, "R30"},
+ {30, arm64.REGSP, -1, "SP"},
+ {31, arm64.REG_F0, -1, "F0"},
+ {32, arm64.REG_F1, -1, "F1"},
+ {33, arm64.REG_F2, -1, "F2"},
+ {34, arm64.REG_F3, -1, "F3"},
+ {35, arm64.REG_F4, -1, "F4"},
+ {36, arm64.REG_F5, -1, "F5"},
+ {37, arm64.REG_F6, -1, "F6"},
+ {38, arm64.REG_F7, -1, "F7"},
+ {39, arm64.REG_F8, -1, "F8"},
+ {40, arm64.REG_F9, -1, "F9"},
+ {41, arm64.REG_F10, -1, "F10"},
+ {42, arm64.REG_F11, -1, "F11"},
+ {43, arm64.REG_F12, -1, "F12"},
+ {44, arm64.REG_F13, -1, "F13"},
+ {45, arm64.REG_F14, -1, "F14"},
+ {46, arm64.REG_F15, -1, "F15"},
+ {47, arm64.REG_F16, -1, "F16"},
+ {48, arm64.REG_F17, -1, "F17"},
+ {49, arm64.REG_F18, -1, "F18"},
+ {50, arm64.REG_F19, -1, "F19"},
+ {51, arm64.REG_F20, -1, "F20"},
+ {52, arm64.REG_F21, -1, "F21"},
+ {53, arm64.REG_F22, -1, "F22"},
+ {54, arm64.REG_F23, -1, "F23"},
+ {55, arm64.REG_F24, -1, "F24"},
+ {56, arm64.REG_F25, -1, "F25"},
+ {57, arm64.REG_F26, -1, "F26"},
+ {58, arm64.REG_F27, -1, "F27"},
+ {59, arm64.REG_F28, -1, "F28"},
+ {60, arm64.REG_F29, -1, "F29"},
+ {61, arm64.REG_F30, -1, "F30"},
+ {62, arm64.REG_F31, -1, "F31"},
+ {63, 0, -1, "SB"},
+}
+var paramIntRegARM64 = []int8{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+var paramFloatRegARM64 = []int8{31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46}
+var gpRegMaskARM64 = regMask(670826495)
+var fpRegMaskARM64 = regMask(9223372034707292160)
+var specialRegMaskARM64 = regMask(0)
+var framepointerRegARM64 = int8(-1)
+var linkRegARM64 = int8(29)
+var registersLOONG64 = [...]Register{
+ {0, loong64.REG_R0, -1, "R0"},
+ {1, loong64.REG_R1, -1, "R1"},
+ {2, loong64.REGSP, -1, "SP"},
+ {3, loong64.REG_R4, 0, "R4"},
+ {4, loong64.REG_R5, 1, "R5"},
+ {5, loong64.REG_R6, 2, "R6"},
+ {6, loong64.REG_R7, 3, "R7"},
+ {7, loong64.REG_R8, 4, "R8"},
+ {8, loong64.REG_R9, 5, "R9"},
+ {9, loong64.REG_R10, 6, "R10"},
+ {10, loong64.REG_R11, 7, "R11"},
+ {11, loong64.REG_R12, 8, "R12"},
+ {12, loong64.REG_R13, 9, "R13"},
+ {13, loong64.REG_R14, 10, "R14"},
+ {14, loong64.REG_R15, 11, "R15"},
+ {15, loong64.REG_R16, 12, "R16"},
+ {16, loong64.REG_R17, 13, "R17"},
+ {17, loong64.REG_R18, 14, "R18"},
+ {18, loong64.REG_R19, 15, "R19"},
+ {19, loong64.REG_R20, 16, "R20"},
+ {20, loong64.REG_R21, 17, "R21"},
+ {21, loong64.REGG, -1, "g"},
+ {22, loong64.REG_R23, 18, "R23"},
+ {23, loong64.REG_R24, 19, "R24"},
+ {24, loong64.REG_R25, 20, "R25"},
+ {25, loong64.REG_R26, 21, "R26"},
+ {26, loong64.REG_R27, 22, "R27"},
+ {27, loong64.REG_R28, 23, "R28"},
+ {28, loong64.REG_R29, 24, "R29"},
+ {29, loong64.REG_R31, 25, "R31"},
+ {30, loong64.REG_F0, -1, "F0"},
+ {31, loong64.REG_F1, -1, "F1"},
+ {32, loong64.REG_F2, -1, "F2"},
+ {33, loong64.REG_F3, -1, "F3"},
+ {34, loong64.REG_F4, -1, "F4"},
+ {35, loong64.REG_F5, -1, "F5"},
+ {36, loong64.REG_F6, -1, "F6"},
+ {37, loong64.REG_F7, -1, "F7"},
+ {38, loong64.REG_F8, -1, "F8"},
+ {39, loong64.REG_F9, -1, "F9"},
+ {40, loong64.REG_F10, -1, "F10"},
+ {41, loong64.REG_F11, -1, "F11"},
+ {42, loong64.REG_F12, -1, "F12"},
+ {43, loong64.REG_F13, -1, "F13"},
+ {44, loong64.REG_F14, -1, "F14"},
+ {45, loong64.REG_F15, -1, "F15"},
+ {46, loong64.REG_F16, -1, "F16"},
+ {47, loong64.REG_F17, -1, "F17"},
+ {48, loong64.REG_F18, -1, "F18"},
+ {49, loong64.REG_F19, -1, "F19"},
+ {50, loong64.REG_F20, -1, "F20"},
+ {51, loong64.REG_F21, -1, "F21"},
+ {52, loong64.REG_F22, -1, "F22"},
+ {53, loong64.REG_F23, -1, "F23"},
+ {54, loong64.REG_F24, -1, "F24"},
+ {55, loong64.REG_F25, -1, "F25"},
+ {56, loong64.REG_F26, -1, "F26"},
+ {57, loong64.REG_F27, -1, "F27"},
+ {58, loong64.REG_F28, -1, "F28"},
+ {59, loong64.REG_F29, -1, "F29"},
+ {60, loong64.REG_F30, -1, "F30"},
+ {61, loong64.REG_F31, -1, "F31"},
+ {62, 0, -1, "SB"},
+}
+var paramIntRegLOONG64 = []int8{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}
+var paramFloatRegLOONG64 = []int8{30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45}
+var gpRegMaskLOONG64 = regMask(1071644664)
+var fpRegMaskLOONG64 = regMask(4611686017353646080)
+var specialRegMaskLOONG64 = regMask(0)
+var framepointerRegLOONG64 = int8(-1)
+var linkRegLOONG64 = int8(1)
+var registersMIPS = [...]Register{
+ {0, mips.REG_R0, -1, "R0"},
+ {1, mips.REG_R1, 0, "R1"},
+ {2, mips.REG_R2, 1, "R2"},
+ {3, mips.REG_R3, 2, "R3"},
+ {4, mips.REG_R4, 3, "R4"},
+ {5, mips.REG_R5, 4, "R5"},
+ {6, mips.REG_R6, 5, "R6"},
+ {7, mips.REG_R7, 6, "R7"},
+ {8, mips.REG_R8, 7, "R8"},
+ {9, mips.REG_R9, 8, "R9"},
+ {10, mips.REG_R10, 9, "R10"},
+ {11, mips.REG_R11, 10, "R11"},
+ {12, mips.REG_R12, 11, "R12"},
+ {13, mips.REG_R13, 12, "R13"},
+ {14, mips.REG_R14, 13, "R14"},
+ {15, mips.REG_R15, 14, "R15"},
+ {16, mips.REG_R16, 15, "R16"},
+ {17, mips.REG_R17, 16, "R17"},
+ {18, mips.REG_R18, 17, "R18"},
+ {19, mips.REG_R19, 18, "R19"},
+ {20, mips.REG_R20, 19, "R20"},
+ {21, mips.REG_R21, 20, "R21"},
+ {22, mips.REG_R22, 21, "R22"},
+ {23, mips.REG_R24, 22, "R24"},
+ {24, mips.REG_R25, 23, "R25"},
+ {25, mips.REG_R28, 24, "R28"},
+ {26, mips.REGSP, -1, "SP"},
+ {27, mips.REGG, -1, "g"},
+ {28, mips.REG_R31, 25, "R31"},
+ {29, mips.REG_F0, -1, "F0"},
+ {30, mips.REG_F2, -1, "F2"},
+ {31, mips.REG_F4, -1, "F4"},
+ {32, mips.REG_F6, -1, "F6"},
+ {33, mips.REG_F8, -1, "F8"},
+ {34, mips.REG_F10, -1, "F10"},
+ {35, mips.REG_F12, -1, "F12"},
+ {36, mips.REG_F14, -1, "F14"},
+ {37, mips.REG_F16, -1, "F16"},
+ {38, mips.REG_F18, -1, "F18"},
+ {39, mips.REG_F20, -1, "F20"},
+ {40, mips.REG_F22, -1, "F22"},
+ {41, mips.REG_F24, -1, "F24"},
+ {42, mips.REG_F26, -1, "F26"},
+ {43, mips.REG_F28, -1, "F28"},
+ {44, mips.REG_F30, -1, "F30"},
+ {45, mips.REG_HI, -1, "HI"},
+ {46, mips.REG_LO, -1, "LO"},
+ {47, 0, -1, "SB"},
+}
+var paramIntRegMIPS = []int8(nil)
+var paramFloatRegMIPS = []int8(nil)
+var gpRegMaskMIPS = regMask(335544318)
+var fpRegMaskMIPS = regMask(35183835217920)
+var specialRegMaskMIPS = regMask(105553116266496)
+var framepointerRegMIPS = int8(-1)
+var linkRegMIPS = int8(28)
+var registersMIPS64 = [...]Register{
+ {0, mips.REG_R0, -1, "R0"},
+ {1, mips.REG_R1, 0, "R1"},
+ {2, mips.REG_R2, 1, "R2"},
+ {3, mips.REG_R3, 2, "R3"},
+ {4, mips.REG_R4, 3, "R4"},
+ {5, mips.REG_R5, 4, "R5"},
+ {6, mips.REG_R6, 5, "R6"},
+ {7, mips.REG_R7, 6, "R7"},
+ {8, mips.REG_R8, 7, "R8"},
+ {9, mips.REG_R9, 8, "R9"},
+ {10, mips.REG_R10, 9, "R10"},
+ {11, mips.REG_R11, 10, "R11"},
+ {12, mips.REG_R12, 11, "R12"},
+ {13, mips.REG_R13, 12, "R13"},
+ {14, mips.REG_R14, 13, "R14"},
+ {15, mips.REG_R15, 14, "R15"},
+ {16, mips.REG_R16, 15, "R16"},
+ {17, mips.REG_R17, 16, "R17"},
+ {18, mips.REG_R18, 17, "R18"},
+ {19, mips.REG_R19, 18, "R19"},
+ {20, mips.REG_R20, 19, "R20"},
+ {21, mips.REG_R21, 20, "R21"},
+ {22, mips.REG_R22, 21, "R22"},
+ {23, mips.REG_R24, 22, "R24"},
+ {24, mips.REG_R25, 23, "R25"},
+ {25, mips.REGSP, -1, "SP"},
+ {26, mips.REGG, -1, "g"},
+ {27, mips.REG_R31, 24, "R31"},
+ {28, mips.REG_F0, -1, "F0"},
+ {29, mips.REG_F1, -1, "F1"},
+ {30, mips.REG_F2, -1, "F2"},
+ {31, mips.REG_F3, -1, "F3"},
+ {32, mips.REG_F4, -1, "F4"},
+ {33, mips.REG_F5, -1, "F5"},
+ {34, mips.REG_F6, -1, "F6"},
+ {35, mips.REG_F7, -1, "F7"},
+ {36, mips.REG_F8, -1, "F8"},
+ {37, mips.REG_F9, -1, "F9"},
+ {38, mips.REG_F10, -1, "F10"},
+ {39, mips.REG_F11, -1, "F11"},
+ {40, mips.REG_F12, -1, "F12"},
+ {41, mips.REG_F13, -1, "F13"},
+ {42, mips.REG_F14, -1, "F14"},
+ {43, mips.REG_F15, -1, "F15"},
+ {44, mips.REG_F16, -1, "F16"},
+ {45, mips.REG_F17, -1, "F17"},
+ {46, mips.REG_F18, -1, "F18"},
+ {47, mips.REG_F19, -1, "F19"},
+ {48, mips.REG_F20, -1, "F20"},
+ {49, mips.REG_F21, -1, "F21"},
+ {50, mips.REG_F22, -1, "F22"},
+ {51, mips.REG_F23, -1, "F23"},
+ {52, mips.REG_F24, -1, "F24"},
+ {53, mips.REG_F25, -1, "F25"},
+ {54, mips.REG_F26, -1, "F26"},
+ {55, mips.REG_F27, -1, "F27"},
+ {56, mips.REG_F28, -1, "F28"},
+ {57, mips.REG_F29, -1, "F29"},
+ {58, mips.REG_F30, -1, "F30"},
+ {59, mips.REG_F31, -1, "F31"},
+ {60, mips.REG_HI, -1, "HI"},
+ {61, mips.REG_LO, -1, "LO"},
+ {62, 0, -1, "SB"},
+}
+var paramIntRegMIPS64 = []int8(nil)
+var paramFloatRegMIPS64 = []int8(nil)
+var gpRegMaskMIPS64 = regMask(167772158)
+var fpRegMaskMIPS64 = regMask(1152921504338411520)
+var specialRegMaskMIPS64 = regMask(3458764513820540928)
+var framepointerRegMIPS64 = int8(-1)
+var linkRegMIPS64 = int8(27)
+var registersPPC64 = [...]Register{
+ {0, ppc64.REG_R0, -1, "R0"},
+ {1, ppc64.REGSP, -1, "SP"},
+ {2, 0, -1, "SB"},
+ {3, ppc64.REG_R3, 0, "R3"},
+ {4, ppc64.REG_R4, 1, "R4"},
+ {5, ppc64.REG_R5, 2, "R5"},
+ {6, ppc64.REG_R6, 3, "R6"},
+ {7, ppc64.REG_R7, 4, "R7"},
+ {8, ppc64.REG_R8, 5, "R8"},
+ {9, ppc64.REG_R9, 6, "R9"},
+ {10, ppc64.REG_R10, 7, "R10"},
+ {11, ppc64.REG_R11, 8, "R11"},
+ {12, ppc64.REG_R12, 9, "R12"},
+ {13, ppc64.REG_R13, -1, "R13"},
+ {14, ppc64.REG_R14, 10, "R14"},
+ {15, ppc64.REG_R15, 11, "R15"},
+ {16, ppc64.REG_R16, 12, "R16"},
+ {17, ppc64.REG_R17, 13, "R17"},
+ {18, ppc64.REG_R18, 14, "R18"},
+ {19, ppc64.REG_R19, 15, "R19"},
+ {20, ppc64.REG_R20, 16, "R20"},
+ {21, ppc64.REG_R21, 17, "R21"},
+ {22, ppc64.REG_R22, 18, "R22"},
+ {23, ppc64.REG_R23, 19, "R23"},
+ {24, ppc64.REG_R24, 20, "R24"},
+ {25, ppc64.REG_R25, 21, "R25"},
+ {26, ppc64.REG_R26, 22, "R26"},
+ {27, ppc64.REG_R27, 23, "R27"},
+ {28, ppc64.REG_R28, 24, "R28"},
+ {29, ppc64.REG_R29, 25, "R29"},
+ {30, ppc64.REGG, -1, "g"},
+ {31, ppc64.REG_R31, -1, "R31"},
+ {32, ppc64.REG_F0, -1, "F0"},
+ {33, ppc64.REG_F1, -1, "F1"},
+ {34, ppc64.REG_F2, -1, "F2"},
+ {35, ppc64.REG_F3, -1, "F3"},
+ {36, ppc64.REG_F4, -1, "F4"},
+ {37, ppc64.REG_F5, -1, "F5"},
+ {38, ppc64.REG_F6, -1, "F6"},
+ {39, ppc64.REG_F7, -1, "F7"},
+ {40, ppc64.REG_F8, -1, "F8"},
+ {41, ppc64.REG_F9, -1, "F9"},
+ {42, ppc64.REG_F10, -1, "F10"},
+ {43, ppc64.REG_F11, -1, "F11"},
+ {44, ppc64.REG_F12, -1, "F12"},
+ {45, ppc64.REG_F13, -1, "F13"},
+ {46, ppc64.REG_F14, -1, "F14"},
+ {47, ppc64.REG_F15, -1, "F15"},
+ {48, ppc64.REG_F16, -1, "F16"},
+ {49, ppc64.REG_F17, -1, "F17"},
+ {50, ppc64.REG_F18, -1, "F18"},
+ {51, ppc64.REG_F19, -1, "F19"},
+ {52, ppc64.REG_F20, -1, "F20"},
+ {53, ppc64.REG_F21, -1, "F21"},
+ {54, ppc64.REG_F22, -1, "F22"},
+ {55, ppc64.REG_F23, -1, "F23"},
+ {56, ppc64.REG_F24, -1, "F24"},
+ {57, ppc64.REG_F25, -1, "F25"},
+ {58, ppc64.REG_F26, -1, "F26"},
+ {59, ppc64.REG_F27, -1, "F27"},
+ {60, ppc64.REG_F28, -1, "F28"},
+ {61, ppc64.REG_F29, -1, "F29"},
+ {62, ppc64.REG_F30, -1, "F30"},
+ {63, ppc64.REG_XER, -1, "XER"},
+}
+var paramIntRegPPC64 = []int8{3, 4, 5, 6, 7, 8, 9, 10, 14, 15, 16, 17}
+var paramFloatRegPPC64 = []int8{33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44}
+var gpRegMaskPPC64 = regMask(1073733624)
+var fpRegMaskPPC64 = regMask(9223372032559808512)
+var specialRegMaskPPC64 = regMask(9223372036854775808)
+var framepointerRegPPC64 = int8(-1)
+var linkRegPPC64 = int8(-1)
+var registersRISCV64 = [...]Register{
+ {0, riscv.REG_X0, -1, "X0"},
+ {1, riscv.REGSP, -1, "SP"},
+ {2, riscv.REG_X3, -1, "X3"},
+ {3, riscv.REG_X4, -1, "X4"},
+ {4, riscv.REG_X5, 0, "X5"},
+ {5, riscv.REG_X6, 1, "X6"},
+ {6, riscv.REG_X7, 2, "X7"},
+ {7, riscv.REG_X8, 3, "X8"},
+ {8, riscv.REG_X9, 4, "X9"},
+ {9, riscv.REG_X10, 5, "X10"},
+ {10, riscv.REG_X11, 6, "X11"},
+ {11, riscv.REG_X12, 7, "X12"},
+ {12, riscv.REG_X13, 8, "X13"},
+ {13, riscv.REG_X14, 9, "X14"},
+ {14, riscv.REG_X15, 10, "X15"},
+ {15, riscv.REG_X16, 11, "X16"},
+ {16, riscv.REG_X17, 12, "X17"},
+ {17, riscv.REG_X18, 13, "X18"},
+ {18, riscv.REG_X19, 14, "X19"},
+ {19, riscv.REG_X20, 15, "X20"},
+ {20, riscv.REG_X21, 16, "X21"},
+ {21, riscv.REG_X22, 17, "X22"},
+ {22, riscv.REG_X23, 18, "X23"},
+ {23, riscv.REG_X24, 19, "X24"},
+ {24, riscv.REG_X25, 20, "X25"},
+ {25, riscv.REG_X26, 21, "X26"},
+ {26, riscv.REGG, -1, "g"},
+ {27, riscv.REG_X28, 22, "X28"},
+ {28, riscv.REG_X29, 23, "X29"},
+ {29, riscv.REG_X30, 24, "X30"},
+ {30, riscv.REG_X31, -1, "X31"},
+ {31, riscv.REG_F0, -1, "F0"},
+ {32, riscv.REG_F1, -1, "F1"},
+ {33, riscv.REG_F2, -1, "F2"},
+ {34, riscv.REG_F3, -1, "F3"},
+ {35, riscv.REG_F4, -1, "F4"},
+ {36, riscv.REG_F5, -1, "F5"},
+ {37, riscv.REG_F6, -1, "F6"},
+ {38, riscv.REG_F7, -1, "F7"},
+ {39, riscv.REG_F8, -1, "F8"},
+ {40, riscv.REG_F9, -1, "F9"},
+ {41, riscv.REG_F10, -1, "F10"},
+ {42, riscv.REG_F11, -1, "F11"},
+ {43, riscv.REG_F12, -1, "F12"},
+ {44, riscv.REG_F13, -1, "F13"},
+ {45, riscv.REG_F14, -1, "F14"},
+ {46, riscv.REG_F15, -1, "F15"},
+ {47, riscv.REG_F16, -1, "F16"},
+ {48, riscv.REG_F17, -1, "F17"},
+ {49, riscv.REG_F18, -1, "F18"},
+ {50, riscv.REG_F19, -1, "F19"},
+ {51, riscv.REG_F20, -1, "F20"},
+ {52, riscv.REG_F21, -1, "F21"},
+ {53, riscv.REG_F22, -1, "F22"},
+ {54, riscv.REG_F23, -1, "F23"},
+ {55, riscv.REG_F24, -1, "F24"},
+ {56, riscv.REG_F25, -1, "F25"},
+ {57, riscv.REG_F26, -1, "F26"},
+ {58, riscv.REG_F27, -1, "F27"},
+ {59, riscv.REG_F28, -1, "F28"},
+ {60, riscv.REG_F29, -1, "F29"},
+ {61, riscv.REG_F30, -1, "F30"},
+ {62, riscv.REG_F31, -1, "F31"},
+ {63, 0, -1, "SB"},
+}
+var paramIntRegRISCV64 = []int8{9, 10, 11, 12, 13, 14, 15, 16, 7, 8, 17, 18, 19, 20, 21, 22}
+var paramFloatRegRISCV64 = []int8{41, 42, 43, 44, 45, 46, 47, 48, 39, 40, 49, 50, 51, 52, 53, 54}
+var gpRegMaskRISCV64 = regMask(1006632944)
+var fpRegMaskRISCV64 = regMask(9223372034707292160)
+var specialRegMaskRISCV64 = regMask(0)
+var framepointerRegRISCV64 = int8(-1)
+var linkRegRISCV64 = int8(0)
+var registersS390X = [...]Register{
+ {0, s390x.REG_R0, 0, "R0"},
+ {1, s390x.REG_R1, 1, "R1"},
+ {2, s390x.REG_R2, 2, "R2"},
+ {3, s390x.REG_R3, 3, "R3"},
+ {4, s390x.REG_R4, 4, "R4"},
+ {5, s390x.REG_R5, 5, "R5"},
+ {6, s390x.REG_R6, 6, "R6"},
+ {7, s390x.REG_R7, 7, "R7"},
+ {8, s390x.REG_R8, 8, "R8"},
+ {9, s390x.REG_R9, 9, "R9"},
+ {10, s390x.REG_R10, -1, "R10"},
+ {11, s390x.REG_R11, 10, "R11"},
+ {12, s390x.REG_R12, 11, "R12"},
+ {13, s390x.REGG, -1, "g"},
+ {14, s390x.REG_R14, 12, "R14"},
+ {15, s390x.REGSP, -1, "SP"},
+ {16, s390x.REG_F0, -1, "F0"},
+ {17, s390x.REG_F1, -1, "F1"},
+ {18, s390x.REG_F2, -1, "F2"},
+ {19, s390x.REG_F3, -1, "F3"},
+ {20, s390x.REG_F4, -1, "F4"},
+ {21, s390x.REG_F5, -1, "F5"},
+ {22, s390x.REG_F6, -1, "F6"},
+ {23, s390x.REG_F7, -1, "F7"},
+ {24, s390x.REG_F8, -1, "F8"},
+ {25, s390x.REG_F9, -1, "F9"},
+ {26, s390x.REG_F10, -1, "F10"},
+ {27, s390x.REG_F11, -1, "F11"},
+ {28, s390x.REG_F12, -1, "F12"},
+ {29, s390x.REG_F13, -1, "F13"},
+ {30, s390x.REG_F14, -1, "F14"},
+ {31, s390x.REG_F15, -1, "F15"},
+ {32, 0, -1, "SB"},
+}
+var paramIntRegS390X = []int8(nil)
+var paramFloatRegS390X = []int8(nil)
+var gpRegMaskS390X = regMask(23551)
+var fpRegMaskS390X = regMask(4294901760)
+var specialRegMaskS390X = regMask(0)
+var framepointerRegS390X = int8(-1)
+var linkRegS390X = int8(14)
+var registersWasm = [...]Register{
+ {0, wasm.REG_R0, 0, "R0"},
+ {1, wasm.REG_R1, 1, "R1"},
+ {2, wasm.REG_R2, 2, "R2"},
+ {3, wasm.REG_R3, 3, "R3"},
+ {4, wasm.REG_R4, 4, "R4"},
+ {5, wasm.REG_R5, 5, "R5"},
+ {6, wasm.REG_R6, 6, "R6"},
+ {7, wasm.REG_R7, 7, "R7"},
+ {8, wasm.REG_R8, 8, "R8"},
+ {9, wasm.REG_R9, 9, "R9"},
+ {10, wasm.REG_R10, 10, "R10"},
+ {11, wasm.REG_R11, 11, "R11"},
+ {12, wasm.REG_R12, 12, "R12"},
+ {13, wasm.REG_R13, 13, "R13"},
+ {14, wasm.REG_R14, 14, "R14"},
+ {15, wasm.REG_R15, 15, "R15"},
+ {16, wasm.REG_F0, -1, "F0"},
+ {17, wasm.REG_F1, -1, "F1"},
+ {18, wasm.REG_F2, -1, "F2"},
+ {19, wasm.REG_F3, -1, "F3"},
+ {20, wasm.REG_F4, -1, "F4"},
+ {21, wasm.REG_F5, -1, "F5"},
+ {22, wasm.REG_F6, -1, "F6"},
+ {23, wasm.REG_F7, -1, "F7"},
+ {24, wasm.REG_F8, -1, "F8"},
+ {25, wasm.REG_F9, -1, "F9"},
+ {26, wasm.REG_F10, -1, "F10"},
+ {27, wasm.REG_F11, -1, "F11"},
+ {28, wasm.REG_F12, -1, "F12"},
+ {29, wasm.REG_F13, -1, "F13"},
+ {30, wasm.REG_F14, -1, "F14"},
+ {31, wasm.REG_F15, -1, "F15"},
+ {32, wasm.REG_F16, -1, "F16"},
+ {33, wasm.REG_F17, -1, "F17"},
+ {34, wasm.REG_F18, -1, "F18"},
+ {35, wasm.REG_F19, -1, "F19"},
+ {36, wasm.REG_F20, -1, "F20"},
+ {37, wasm.REG_F21, -1, "F21"},
+ {38, wasm.REG_F22, -1, "F22"},
+ {39, wasm.REG_F23, -1, "F23"},
+ {40, wasm.REG_F24, -1, "F24"},
+ {41, wasm.REG_F25, -1, "F25"},
+ {42, wasm.REG_F26, -1, "F26"},
+ {43, wasm.REG_F27, -1, "F27"},
+ {44, wasm.REG_F28, -1, "F28"},
+ {45, wasm.REG_F29, -1, "F29"},
+ {46, wasm.REG_F30, -1, "F30"},
+ {47, wasm.REG_F31, -1, "F31"},
+ {48, wasm.REGSP, -1, "SP"},
+ {49, wasm.REGG, -1, "g"},
+ {50, 0, -1, "SB"},
+}
+var paramIntRegWasm = []int8(nil)
+var paramFloatRegWasm = []int8(nil)
+var gpRegMaskWasm = regMask(65535)
+var fpRegMaskWasm = regMask(281474976645120)
+var fp32RegMaskWasm = regMask(4294901760)
+var fp64RegMaskWasm = regMask(281470681743360)
+var specialRegMaskWasm = regMask(0)
+var framepointerRegWasm = int8(-1)
+var linkRegWasm = int8(-1)
diff --git a/src/cmd/compile/internal/ssa/opt.go b/src/cmd/compile/internal/ssa/opt.go
new file mode 100644
index 0000000..0f15c3d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/opt.go
@@ -0,0 +1,10 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// machine-independent optimization.
+func opt(f *Func) {
+ applyRewrite(f, rewriteBlockgeneric, rewriteValuegeneric, removeDeadValues)
+}
diff --git a/src/cmd/compile/internal/ssa/passbm_test.go b/src/cmd/compile/internal/ssa/passbm_test.go
new file mode 100644
index 0000000..3fd3eb5
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/passbm_test.go
@@ -0,0 +1,101 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+ "testing"
+)
+
+const (
+ blockCount = 1000
+ passCount = 15000
+)
+
+type passFunc func(*Func)
+
+func BenchmarkDSEPass(b *testing.B) { benchFnPass(b, dse, blockCount, genFunction) }
+func BenchmarkDSEPassBlock(b *testing.B) { benchFnBlock(b, dse, genFunction) }
+func BenchmarkCSEPass(b *testing.B) { benchFnPass(b, cse, blockCount, genFunction) }
+func BenchmarkCSEPassBlock(b *testing.B) { benchFnBlock(b, cse, genFunction) }
+func BenchmarkDeadcodePass(b *testing.B) { benchFnPass(b, deadcode, blockCount, genFunction) }
+func BenchmarkDeadcodePassBlock(b *testing.B) { benchFnBlock(b, deadcode, genFunction) }
+
+func multi(f *Func) {
+ cse(f)
+ dse(f)
+ deadcode(f)
+}
+func BenchmarkMultiPass(b *testing.B) { benchFnPass(b, multi, blockCount, genFunction) }
+func BenchmarkMultiPassBlock(b *testing.B) { benchFnBlock(b, multi, genFunction) }
+
+// benchFnPass runs passFunc b.N times across a single function.
+func benchFnPass(b *testing.B, fn passFunc, size int, bg blockGen) {
+ b.ReportAllocs()
+ c := testConfig(b)
+ fun := c.Fun("entry", bg(size)...)
+ CheckFunc(fun.f)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ fn(fun.f)
+ b.StopTimer()
+ CheckFunc(fun.f)
+ b.StartTimer()
+ }
+}
+
+// benchFnPass runs passFunc across a function with b.N blocks.
+func benchFnBlock(b *testing.B, fn passFunc, bg blockGen) {
+ b.ReportAllocs()
+ c := testConfig(b)
+ fun := c.Fun("entry", bg(b.N)...)
+ CheckFunc(fun.f)
+ b.ResetTimer()
+ for i := 0; i < passCount; i++ {
+ fn(fun.f)
+ }
+ b.StopTimer()
+}
+
+func genFunction(size int) []bloc {
+ var blocs []bloc
+ elemType := types.Types[types.TINT64]
+ ptrType := elemType.PtrTo()
+
+ valn := func(s string, m, n int) string { return fmt.Sprintf("%s%d-%d", s, m, n) }
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu(valn("store", 0, 4), OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.Types[types.TUINTPTR], 0, nil),
+ Goto(blockn(1)),
+ ),
+ )
+ for i := 1; i < size+1; i++ {
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu(valn("v", i, 0), OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Valu(valn("addr", i, 1), OpAddr, ptrType, 0, nil, "sb"),
+ Valu(valn("addr", i, 2), OpAddr, ptrType, 0, nil, "sb"),
+ Valu(valn("addr", i, 3), OpAddr, ptrType, 0, nil, "sb"),
+ Valu(valn("zero", i, 1), OpZero, types.TypeMem, 8, elemType, valn("addr", i, 3),
+ valn("store", i-1, 4)),
+ Valu(valn("store", i, 1), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 1),
+ valn("v", i, 0), valn("zero", i, 1)),
+ Valu(valn("store", i, 2), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 2),
+ valn("v", i, 0), valn("store", i, 1)),
+ Valu(valn("store", i, 3), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 1),
+ valn("v", i, 0), valn("store", i, 2)),
+ Valu(valn("store", i, 4), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 3),
+ valn("v", i, 0), valn("store", i, 3)),
+ Goto(blockn(i+1))))
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size+1), Goto("exit")),
+ Bloc("exit", Exit("store0-4")),
+ )
+
+ return blocs
+}
diff --git a/src/cmd/compile/internal/ssa/phielim.go b/src/cmd/compile/internal/ssa/phielim.go
new file mode 100644
index 0000000..4fc9423
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/phielim.go
@@ -0,0 +1,75 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// phielim eliminates redundant phi values from f.
+// A phi is redundant if its arguments are all equal. For
+// purposes of counting, ignore the phi itself. Both of
+// these phis are redundant:
+//
+// v = phi(x,x,x)
+// v = phi(x,v,x,v)
+//
+// We repeat this process to also catch situations like:
+//
+// v = phi(x, phi(x, x), phi(x, v))
+//
+// TODO: Can we also simplify cases like:
+//
+// v = phi(v, w, x)
+// w = phi(v, w, x)
+//
+// and would that be useful?
+func phielim(f *Func) {
+ for {
+ change := false
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ copyelimValue(v)
+ change = phielimValue(v) || change
+ }
+ }
+ if !change {
+ break
+ }
+ }
+}
+
+// phielimValue tries to convert the phi v to a copy.
+func phielimValue(v *Value) bool {
+ if v.Op != OpPhi {
+ return false
+ }
+
+ // If there are two distinct args of v which
+ // are not v itself, then the phi must remain.
+ // Otherwise, we can replace it with a copy.
+ var w *Value
+ for _, x := range v.Args {
+ if x == v {
+ continue
+ }
+ if x == w {
+ continue
+ }
+ if w != nil {
+ return false
+ }
+ w = x
+ }
+
+ if w == nil {
+ // v references only itself. It must be in
+ // a dead code loop. Don't bother modifying it.
+ return false
+ }
+ v.Op = OpCopy
+ v.SetArgs1(w)
+ f := v.Block.Func
+ if f.pass.debug > 0 {
+ f.Warnl(v.Pos, "eliminated phi")
+ }
+ return true
+}
diff --git a/src/cmd/compile/internal/ssa/phiopt.go b/src/cmd/compile/internal/ssa/phiopt.go
new file mode 100644
index 0000000..037845e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/phiopt.go
@@ -0,0 +1,325 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// phiopt eliminates boolean Phis based on the previous if.
+//
+// Main use case is to transform:
+//
+// x := false
+// if b {
+// x = true
+// }
+//
+// into x = b.
+//
+// In SSA code this appears as
+//
+// b0
+// If b -> b1 b2
+// b1
+// Plain -> b2
+// b2
+// x = (OpPhi (ConstBool [true]) (ConstBool [false]))
+//
+// In this case we can replace x with a copy of b.
+func phiopt(f *Func) {
+ sdom := f.Sdom()
+ for _, b := range f.Blocks {
+ if len(b.Preds) != 2 || len(b.Values) == 0 {
+ // TODO: handle more than 2 predecessors, e.g. a || b || c.
+ continue
+ }
+
+ pb0, b0 := b, b.Preds[0].b
+ for len(b0.Succs) == 1 && len(b0.Preds) == 1 {
+ pb0, b0 = b0, b0.Preds[0].b
+ }
+ if b0.Kind != BlockIf {
+ continue
+ }
+ pb1, b1 := b, b.Preds[1].b
+ for len(b1.Succs) == 1 && len(b1.Preds) == 1 {
+ pb1, b1 = b1, b1.Preds[0].b
+ }
+ if b1 != b0 {
+ continue
+ }
+ // b0 is the if block giving the boolean value.
+ // reverse is the predecessor from which the truth value comes.
+ var reverse int
+ if b0.Succs[0].b == pb0 && b0.Succs[1].b == pb1 {
+ reverse = 0
+ } else if b0.Succs[0].b == pb1 && b0.Succs[1].b == pb0 {
+ reverse = 1
+ } else {
+ b.Fatalf("invalid predecessors\n")
+ }
+
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+
+ // Look for conversions from bool to 0/1.
+ if v.Type.IsInteger() {
+ phioptint(v, b0, reverse)
+ }
+
+ if !v.Type.IsBoolean() {
+ continue
+ }
+
+ // Replaces
+ // if a { x = true } else { x = false } with x = a
+ // and
+ // if a { x = false } else { x = true } with x = !a
+ if v.Args[0].Op == OpConstBool && v.Args[1].Op == OpConstBool {
+ if v.Args[reverse].AuxInt != v.Args[1-reverse].AuxInt {
+ ops := [2]Op{OpNot, OpCopy}
+ v.reset(ops[v.Args[reverse].AuxInt])
+ v.AddArg(b0.Controls[0])
+ if f.pass.debug > 0 {
+ f.Warnl(b.Pos, "converted OpPhi to %v", v.Op)
+ }
+ continue
+ }
+ }
+
+ // Replaces
+ // if a { x = true } else { x = value } with x = a || value.
+ // Requires that value dominates x, meaning that regardless of a,
+ // value is always computed. This guarantees that the side effects
+ // of value are not seen if a is false.
+ if v.Args[reverse].Op == OpConstBool && v.Args[reverse].AuxInt == 1 {
+ if tmp := v.Args[1-reverse]; sdom.IsAncestorEq(tmp.Block, b) {
+ v.reset(OpOrB)
+ v.SetArgs2(b0.Controls[0], tmp)
+ if f.pass.debug > 0 {
+ f.Warnl(b.Pos, "converted OpPhi to %v", v.Op)
+ }
+ continue
+ }
+ }
+
+ // Replaces
+ // if a { x = value } else { x = false } with x = a && value.
+ // Requires that value dominates x, meaning that regardless of a,
+ // value is always computed. This guarantees that the side effects
+ // of value are not seen if a is false.
+ if v.Args[1-reverse].Op == OpConstBool && v.Args[1-reverse].AuxInt == 0 {
+ if tmp := v.Args[reverse]; sdom.IsAncestorEq(tmp.Block, b) {
+ v.reset(OpAndB)
+ v.SetArgs2(b0.Controls[0], tmp)
+ if f.pass.debug > 0 {
+ f.Warnl(b.Pos, "converted OpPhi to %v", v.Op)
+ }
+ continue
+ }
+ }
+ }
+ }
+ // strengthen phi optimization.
+ // Main use case is to transform:
+ // x := false
+ // if c {
+ // x = true
+ // ...
+ // }
+ // into
+ // x := c
+ // if x { ... }
+ //
+ // For example, in SSA code a case appears as
+ // b0
+ // If c -> b, sb0
+ // sb0
+ // If d -> sd0, sd1
+ // sd1
+ // ...
+ // sd0
+ // Plain -> b
+ // b
+ // x = (OpPhi (ConstBool [true]) (ConstBool [false]))
+ //
+ // In this case we can also replace x with a copy of c.
+ //
+ // The optimization idea:
+ // 1. block b has a phi value x, x = OpPhi (ConstBool [true]) (ConstBool [false]),
+ // and len(b.Preds) is equal to 2.
+ // 2. find the common dominator(b0) of the predecessors(pb0, pb1) of block b, and the
+ // dominator(b0) is a If block.
+ // Special case: one of the predecessors(pb0 or pb1) is the dominator(b0).
+ // 3. the successors(sb0, sb1) of the dominator need to dominate the predecessors(pb0, pb1)
+ // of block b respectively.
+ // 4. replace this boolean Phi based on dominator block.
+ //
+ // b0(pb0) b0(pb1) b0
+ // | \ / | / \
+ // | sb1 sb0 | sb0 sb1
+ // | ... ... | ... ...
+ // | pb1 pb0 | pb0 pb1
+ // | / \ | \ /
+ // b b b
+ //
+ var lca *lcaRange
+ for _, b := range f.Blocks {
+ if len(b.Preds) != 2 || len(b.Values) == 0 {
+ // TODO: handle more than 2 predecessors, e.g. a || b || c.
+ continue
+ }
+
+ for _, v := range b.Values {
+ // find a phi value v = OpPhi (ConstBool [true]) (ConstBool [false]).
+ // TODO: v = OpPhi (ConstBool [true]) (Arg <bool> {value})
+ if v.Op != OpPhi {
+ continue
+ }
+ if v.Args[0].Op != OpConstBool || v.Args[1].Op != OpConstBool {
+ continue
+ }
+ if v.Args[0].AuxInt == v.Args[1].AuxInt {
+ continue
+ }
+
+ pb0 := b.Preds[0].b
+ pb1 := b.Preds[1].b
+ if pb0.Kind == BlockIf && pb0 == sdom.Parent(b) {
+ // special case: pb0 is the dominator block b0.
+ // b0(pb0)
+ // | \
+ // | sb1
+ // | ...
+ // | pb1
+ // | /
+ // b
+ // if another successor sb1 of b0(pb0) dominates pb1, do replace.
+ ei := b.Preds[0].i
+ sb1 := pb0.Succs[1-ei].b
+ if sdom.IsAncestorEq(sb1, pb1) {
+ convertPhi(pb0, v, ei)
+ break
+ }
+ } else if pb1.Kind == BlockIf && pb1 == sdom.Parent(b) {
+ // special case: pb1 is the dominator block b0.
+ // b0(pb1)
+ // / |
+ // sb0 |
+ // ... |
+ // pb0 |
+ // \ |
+ // b
+ // if another successor sb0 of b0(pb0) dominates pb0, do replace.
+ ei := b.Preds[1].i
+ sb0 := pb1.Succs[1-ei].b
+ if sdom.IsAncestorEq(sb0, pb0) {
+ convertPhi(pb1, v, 1-ei)
+ break
+ }
+ } else {
+ // b0
+ // / \
+ // sb0 sb1
+ // ... ...
+ // pb0 pb1
+ // \ /
+ // b
+ //
+ // Build data structure for fast least-common-ancestor queries.
+ if lca == nil {
+ lca = makeLCArange(f)
+ }
+ b0 := lca.find(pb0, pb1)
+ if b0.Kind != BlockIf {
+ break
+ }
+ sb0 := b0.Succs[0].b
+ sb1 := b0.Succs[1].b
+ var reverse int
+ if sdom.IsAncestorEq(sb0, pb0) && sdom.IsAncestorEq(sb1, pb1) {
+ reverse = 0
+ } else if sdom.IsAncestorEq(sb1, pb0) && sdom.IsAncestorEq(sb0, pb1) {
+ reverse = 1
+ } else {
+ break
+ }
+ if len(sb0.Preds) != 1 || len(sb1.Preds) != 1 {
+ // we can not replace phi value x in the following case.
+ // if gp == nil || sp < lo { x = true}
+ // if a || b { x = true }
+ // so the if statement can only have one condition.
+ break
+ }
+ convertPhi(b0, v, reverse)
+ }
+ }
+ }
+}
+
+func phioptint(v *Value, b0 *Block, reverse int) {
+ a0 := v.Args[0]
+ a1 := v.Args[1]
+ if a0.Op != a1.Op {
+ return
+ }
+
+ switch a0.Op {
+ case OpConst8, OpConst16, OpConst32, OpConst64:
+ default:
+ return
+ }
+
+ negate := false
+ switch {
+ case a0.AuxInt == 0 && a1.AuxInt == 1:
+ negate = true
+ case a0.AuxInt == 1 && a1.AuxInt == 0:
+ default:
+ return
+ }
+
+ if reverse == 1 {
+ negate = !negate
+ }
+
+ a := b0.Controls[0]
+ if negate {
+ a = v.Block.NewValue1(v.Pos, OpNot, a.Type, a)
+ }
+ v.AddArg(a)
+
+ cvt := v.Block.NewValue1(v.Pos, OpCvtBoolToUint8, v.Block.Func.Config.Types.UInt8, a)
+ switch v.Type.Size() {
+ case 1:
+ v.reset(OpCopy)
+ case 2:
+ v.reset(OpZeroExt8to16)
+ case 4:
+ v.reset(OpZeroExt8to32)
+ case 8:
+ v.reset(OpZeroExt8to64)
+ default:
+ v.Fatalf("bad int size %d", v.Type.Size())
+ }
+ v.AddArg(cvt)
+
+ f := b0.Func
+ if f.pass.debug > 0 {
+ f.Warnl(v.Block.Pos, "converted OpPhi bool -> int%d", v.Type.Size()*8)
+ }
+}
+
+// b is the If block giving the boolean value.
+// v is the phi value v = (OpPhi (ConstBool [true]) (ConstBool [false])).
+// reverse is the predecessor from which the truth value comes.
+func convertPhi(b *Block, v *Value, reverse int) {
+ f := b.Func
+ ops := [2]Op{OpNot, OpCopy}
+ v.reset(ops[v.Args[reverse].AuxInt])
+ v.AddArg(b.Controls[0])
+ if f.pass.debug > 0 {
+ f.Warnl(b.Pos, "converted OpPhi to %v", v.Op)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/poset.go b/src/cmd/compile/internal/ssa/poset.go
new file mode 100644
index 0000000..7b64843
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/poset.go
@@ -0,0 +1,1358 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+ "os"
+)
+
+// If true, check poset integrity after every mutation
+var debugPoset = false
+
+const uintSize = 32 << (^uint(0) >> 63) // 32 or 64
+
+// bitset is a bit array for dense indexes.
+type bitset []uint
+
+func newBitset(n int) bitset {
+ return make(bitset, (n+uintSize-1)/uintSize)
+}
+
+func (bs bitset) Reset() {
+ for i := range bs {
+ bs[i] = 0
+ }
+}
+
+func (bs bitset) Set(idx uint32) {
+ bs[idx/uintSize] |= 1 << (idx % uintSize)
+}
+
+func (bs bitset) Clear(idx uint32) {
+ bs[idx/uintSize] &^= 1 << (idx % uintSize)
+}
+
+func (bs bitset) Test(idx uint32) bool {
+ return bs[idx/uintSize]&(1<<(idx%uintSize)) != 0
+}
+
+type undoType uint8
+
+const (
+ undoInvalid undoType = iota
+ undoCheckpoint // a checkpoint to group undo passes
+ undoSetChl // change back left child of undo.idx to undo.edge
+ undoSetChr // change back right child of undo.idx to undo.edge
+ undoNonEqual // forget that SSA value undo.ID is non-equal to undo.idx (another ID)
+ undoNewNode // remove new node created for SSA value undo.ID
+ undoNewConstant // remove the constant node idx from the constants map
+ undoAliasNode // unalias SSA value undo.ID so that it points back to node index undo.idx
+ undoNewRoot // remove node undo.idx from root list
+ undoChangeRoot // remove node undo.idx from root list, and put back undo.edge.Target instead
+ undoMergeRoot // remove node undo.idx from root list, and put back its children instead
+)
+
+// posetUndo represents an undo pass to be performed.
+// It's a union of fields that can be used to store information,
+// and typ is the discriminant, that specifies which kind
+// of operation must be performed. Not all fields are always used.
+type posetUndo struct {
+ typ undoType
+ idx uint32
+ ID ID
+ edge posetEdge
+}
+
+const (
+ // Make poset handle constants as unsigned numbers.
+ posetFlagUnsigned = 1 << iota
+)
+
+// A poset edge. The zero value is the null/empty edge.
+// Packs target node index (31 bits) and strict flag (1 bit).
+type posetEdge uint32
+
+func newedge(t uint32, strict bool) posetEdge {
+ s := uint32(0)
+ if strict {
+ s = 1
+ }
+ return posetEdge(t<<1 | s)
+}
+func (e posetEdge) Target() uint32 { return uint32(e) >> 1 }
+func (e posetEdge) Strict() bool { return uint32(e)&1 != 0 }
+func (e posetEdge) String() string {
+ s := fmt.Sprint(e.Target())
+ if e.Strict() {
+ s += "*"
+ }
+ return s
+}
+
+// posetNode is a node of a DAG within the poset.
+type posetNode struct {
+ l, r posetEdge
+}
+
+// poset is a union-find data structure that can represent a partially ordered set
+// of SSA values. Given a binary relation that creates a partial order (eg: '<'),
+// clients can record relations between SSA values using SetOrder, and later
+// check relations (in the transitive closure) with Ordered. For instance,
+// if SetOrder is called to record that A<B and B<C, Ordered will later confirm
+// that A<C.
+//
+// It is possible to record equality relations between SSA values with SetEqual and check
+// equality with Equal. Equality propagates into the transitive closure for the partial
+// order so that if we know that A<B<C and later learn that A==D, Ordered will return
+// true for D<C.
+//
+// It is also possible to record inequality relations between nodes with SetNonEqual;
+// non-equality relations are not transitive, but they can still be useful: for instance
+// if we know that A<=B and later we learn that A!=B, we can deduce that A<B.
+// NonEqual can be used to check whether it is known that the nodes are different, either
+// because SetNonEqual was called before, or because we know that they are strictly ordered.
+//
+// poset will refuse to record new relations that contradict existing relations:
+// for instance if A<B<C, calling SetOrder for C<A will fail returning false; also
+// calling SetEqual for C==A will fail.
+//
+// poset is implemented as a forest of DAGs; in each DAG, if there is a path (directed)
+// from node A to B, it means that A<B (or A<=B). Equality is represented by mapping
+// two SSA values to the same DAG node; when a new equality relation is recorded
+// between two existing nodes, the nodes are merged, adjusting incoming and outgoing edges.
+//
+// Constants are specially treated. When a constant is added to the poset, it is
+// immediately linked to other constants already present; so for instance if the
+// poset knows that x<=3, and then x is tested against 5, 5 is first added and linked
+// 3 (using 3<5), so that the poset knows that x<=3<5; at that point, it is able
+// to answer x<5 correctly. This means that all constants are always within the same
+// DAG; as an implementation detail, we enfoce that the DAG containtining the constants
+// is always the first in the forest.
+//
+// poset is designed to be memory efficient and do little allocations during normal usage.
+// Most internal data structures are pre-allocated and flat, so for instance adding a
+// new relation does not cause any allocation. For performance reasons,
+// each node has only up to two outgoing edges (like a binary tree), so intermediate
+// "extra" nodes are required to represent more than two relations. For instance,
+// to record that A<I, A<J, A<K (with no known relation between I,J,K), we create the
+// following DAG:
+//
+// A
+// / \
+// I extra
+// / \
+// J K
+type poset struct {
+ lastidx uint32 // last generated dense index
+ flags uint8 // internal flags
+ values map[ID]uint32 // map SSA values to dense indexes
+ constants map[int64]uint32 // record SSA constants together with their value
+ nodes []posetNode // nodes (in all DAGs)
+ roots []uint32 // list of root nodes (forest)
+ noneq map[uint32]bitset // non-equal relations
+ undo []posetUndo // undo chain
+}
+
+func newPoset() *poset {
+ return &poset{
+ values: make(map[ID]uint32),
+ constants: make(map[int64]uint32, 8),
+ nodes: make([]posetNode, 1, 16),
+ roots: make([]uint32, 0, 4),
+ noneq: make(map[uint32]bitset),
+ undo: make([]posetUndo, 0, 4),
+ }
+}
+
+func (po *poset) SetUnsigned(uns bool) {
+ if uns {
+ po.flags |= posetFlagUnsigned
+ } else {
+ po.flags &^= posetFlagUnsigned
+ }
+}
+
+// Handle children
+func (po *poset) setchl(i uint32, l posetEdge) { po.nodes[i].l = l }
+func (po *poset) setchr(i uint32, r posetEdge) { po.nodes[i].r = r }
+func (po *poset) chl(i uint32) uint32 { return po.nodes[i].l.Target() }
+func (po *poset) chr(i uint32) uint32 { return po.nodes[i].r.Target() }
+func (po *poset) children(i uint32) (posetEdge, posetEdge) {
+ return po.nodes[i].l, po.nodes[i].r
+}
+
+// upush records a new undo step. It can be used for simple
+// undo passes that record up to one index and one edge.
+func (po *poset) upush(typ undoType, p uint32, e posetEdge) {
+ po.undo = append(po.undo, posetUndo{typ: typ, idx: p, edge: e})
+}
+
+// upushnew pushes an undo pass for a new node
+func (po *poset) upushnew(id ID, idx uint32) {
+ po.undo = append(po.undo, posetUndo{typ: undoNewNode, ID: id, idx: idx})
+}
+
+// upushneq pushes a new undo pass for a nonequal relation
+func (po *poset) upushneq(idx1 uint32, idx2 uint32) {
+ po.undo = append(po.undo, posetUndo{typ: undoNonEqual, ID: ID(idx1), idx: idx2})
+}
+
+// upushalias pushes a new undo pass for aliasing two nodes
+func (po *poset) upushalias(id ID, i2 uint32) {
+ po.undo = append(po.undo, posetUndo{typ: undoAliasNode, ID: id, idx: i2})
+}
+
+// upushconst pushes a new undo pass for a new constant
+func (po *poset) upushconst(idx uint32, old uint32) {
+ po.undo = append(po.undo, posetUndo{typ: undoNewConstant, idx: idx, ID: ID(old)})
+}
+
+// addchild adds i2 as direct child of i1.
+func (po *poset) addchild(i1, i2 uint32, strict bool) {
+ i1l, i1r := po.children(i1)
+ e2 := newedge(i2, strict)
+
+ if i1l == 0 {
+ po.setchl(i1, e2)
+ po.upush(undoSetChl, i1, 0)
+ } else if i1r == 0 {
+ po.setchr(i1, e2)
+ po.upush(undoSetChr, i1, 0)
+ } else {
+ // If n1 already has two children, add an intermediate extra
+ // node to record the relation correctly (without relating
+ // n2 to other existing nodes). Use a non-deterministic value
+ // to decide whether to append on the left or the right, to avoid
+ // creating degenerated chains.
+ //
+ // n1
+ // / \
+ // i1l extra
+ // / \
+ // i1r n2
+ //
+ extra := po.newnode(nil)
+ if (i1^i2)&1 != 0 { // non-deterministic
+ po.setchl(extra, i1r)
+ po.setchr(extra, e2)
+ po.setchr(i1, newedge(extra, false))
+ po.upush(undoSetChr, i1, i1r)
+ } else {
+ po.setchl(extra, i1l)
+ po.setchr(extra, e2)
+ po.setchl(i1, newedge(extra, false))
+ po.upush(undoSetChl, i1, i1l)
+ }
+ }
+}
+
+// newnode allocates a new node bound to SSA value n.
+// If n is nil, this is an extra node (= only used internally).
+func (po *poset) newnode(n *Value) uint32 {
+ i := po.lastidx + 1
+ po.lastidx++
+ po.nodes = append(po.nodes, posetNode{})
+ if n != nil {
+ if po.values[n.ID] != 0 {
+ panic("newnode for Value already inserted")
+ }
+ po.values[n.ID] = i
+ po.upushnew(n.ID, i)
+ } else {
+ po.upushnew(0, i)
+ }
+ return i
+}
+
+// lookup searches for a SSA value into the forest of DAGS, and return its node.
+// Constants are materialized on the fly during lookup.
+func (po *poset) lookup(n *Value) (uint32, bool) {
+ i, f := po.values[n.ID]
+ if !f && n.isGenericIntConst() {
+ po.newconst(n)
+ i, f = po.values[n.ID]
+ }
+ return i, f
+}
+
+// newconst creates a node for a constant. It links it to other constants, so
+// that n<=5 is detected true when n<=3 is known to be true.
+// TODO: this is O(N), fix it.
+func (po *poset) newconst(n *Value) {
+ if !n.isGenericIntConst() {
+ panic("newconst on non-constant")
+ }
+
+ // If the same constant is already present in the poset through a different
+ // Value, just alias to it without allocating a new node.
+ val := n.AuxInt
+ if po.flags&posetFlagUnsigned != 0 {
+ val = int64(n.AuxUnsigned())
+ }
+ if c, found := po.constants[val]; found {
+ po.values[n.ID] = c
+ po.upushalias(n.ID, 0)
+ return
+ }
+
+ // Create the new node for this constant
+ i := po.newnode(n)
+
+ // If this is the first constant, put it as a new root, as
+ // we can't record an existing connection so we don't have
+ // a specific DAG to add it to. Notice that we want all
+ // constants to be in root #0, so make sure the new root
+ // goes there.
+ if len(po.constants) == 0 {
+ idx := len(po.roots)
+ po.roots = append(po.roots, i)
+ po.roots[0], po.roots[idx] = po.roots[idx], po.roots[0]
+ po.upush(undoNewRoot, i, 0)
+ po.constants[val] = i
+ po.upushconst(i, 0)
+ return
+ }
+
+ // Find the lower and upper bound among existing constants. That is,
+ // find the higher constant that is lower than the one that we're adding,
+ // and the lower constant that is higher.
+ // The loop is duplicated to handle signed and unsigned comparison,
+ // depending on how the poset was configured.
+ var lowerptr, higherptr uint32
+
+ if po.flags&posetFlagUnsigned != 0 {
+ var lower, higher uint64
+ val1 := n.AuxUnsigned()
+ for val2, ptr := range po.constants {
+ val2 := uint64(val2)
+ if val1 == val2 {
+ panic("unreachable")
+ }
+ if val2 < val1 && (lowerptr == 0 || val2 > lower) {
+ lower = val2
+ lowerptr = ptr
+ } else if val2 > val1 && (higherptr == 0 || val2 < higher) {
+ higher = val2
+ higherptr = ptr
+ }
+ }
+ } else {
+ var lower, higher int64
+ val1 := n.AuxInt
+ for val2, ptr := range po.constants {
+ if val1 == val2 {
+ panic("unreachable")
+ }
+ if val2 < val1 && (lowerptr == 0 || val2 > lower) {
+ lower = val2
+ lowerptr = ptr
+ } else if val2 > val1 && (higherptr == 0 || val2 < higher) {
+ higher = val2
+ higherptr = ptr
+ }
+ }
+ }
+
+ if lowerptr == 0 && higherptr == 0 {
+ // This should not happen, as at least one
+ // other constant must exist if we get here.
+ panic("no constant found")
+ }
+
+ // Create the new node and connect it to the bounds, so that
+ // lower < n < higher. We could have found both bounds or only one
+ // of them, depending on what other constants are present in the poset.
+ // Notice that we always link constants together, so they
+ // are always part of the same DAG.
+ switch {
+ case lowerptr != 0 && higherptr != 0:
+ // Both bounds are present, record lower < n < higher.
+ po.addchild(lowerptr, i, true)
+ po.addchild(i, higherptr, true)
+
+ case lowerptr != 0:
+ // Lower bound only, record lower < n.
+ po.addchild(lowerptr, i, true)
+
+ case higherptr != 0:
+ // Higher bound only. To record n < higher, we need
+ // an extra root:
+ //
+ // extra
+ // / \
+ // root \
+ // / n
+ // .... /
+ // \ /
+ // higher
+ //
+ i2 := higherptr
+ r2 := po.findroot(i2)
+ if r2 != po.roots[0] { // all constants should be in root #0
+ panic("constant not in root #0")
+ }
+ extra := po.newnode(nil)
+ po.changeroot(r2, extra)
+ po.upush(undoChangeRoot, extra, newedge(r2, false))
+ po.addchild(extra, r2, false)
+ po.addchild(extra, i, false)
+ po.addchild(i, i2, true)
+ }
+
+ po.constants[val] = i
+ po.upushconst(i, 0)
+}
+
+// aliasnewnode records that a single node n2 (not in the poset yet) is an alias
+// of the master node n1.
+func (po *poset) aliasnewnode(n1, n2 *Value) {
+ i1, i2 := po.values[n1.ID], po.values[n2.ID]
+ if i1 == 0 || i2 != 0 {
+ panic("aliasnewnode invalid arguments")
+ }
+
+ po.values[n2.ID] = i1
+ po.upushalias(n2.ID, 0)
+}
+
+// aliasnodes records that all the nodes i2s are aliases of a single master node n1.
+// aliasnodes takes care of rearranging the DAG, changing references of parent/children
+// of nodes in i2s, so that they point to n1 instead.
+// Complexity is O(n) (with n being the total number of nodes in the poset, not just
+// the number of nodes being aliased).
+func (po *poset) aliasnodes(n1 *Value, i2s bitset) {
+ i1 := po.values[n1.ID]
+ if i1 == 0 {
+ panic("aliasnode for non-existing node")
+ }
+ if i2s.Test(i1) {
+ panic("aliasnode i2s contains n1 node")
+ }
+
+ // Go through all the nodes to adjust parent/chidlren of nodes in i2s
+ for idx, n := range po.nodes {
+ // Do not touch i1 itself, otherwise we can create useless self-loops
+ if uint32(idx) == i1 {
+ continue
+ }
+ l, r := n.l, n.r
+
+ // Rename all references to i2s into i1
+ if i2s.Test(l.Target()) {
+ po.setchl(uint32(idx), newedge(i1, l.Strict()))
+ po.upush(undoSetChl, uint32(idx), l)
+ }
+ if i2s.Test(r.Target()) {
+ po.setchr(uint32(idx), newedge(i1, r.Strict()))
+ po.upush(undoSetChr, uint32(idx), r)
+ }
+
+ // Connect all children of i2s to i1 (unless those children
+ // are in i2s as well, in which case it would be useless)
+ if i2s.Test(uint32(idx)) {
+ if l != 0 && !i2s.Test(l.Target()) {
+ po.addchild(i1, l.Target(), l.Strict())
+ }
+ if r != 0 && !i2s.Test(r.Target()) {
+ po.addchild(i1, r.Target(), r.Strict())
+ }
+ po.setchl(uint32(idx), 0)
+ po.setchr(uint32(idx), 0)
+ po.upush(undoSetChl, uint32(idx), l)
+ po.upush(undoSetChr, uint32(idx), r)
+ }
+ }
+
+ // Reassign all existing IDs that point to i2 to i1.
+ // This includes n2.ID.
+ for k, v := range po.values {
+ if i2s.Test(v) {
+ po.values[k] = i1
+ po.upushalias(k, v)
+ }
+ }
+
+ // If one of the aliased nodes is a constant, then make sure
+ // po.constants is updated to point to the master node.
+ for val, idx := range po.constants {
+ if i2s.Test(idx) {
+ po.constants[val] = i1
+ po.upushconst(i1, idx)
+ }
+ }
+}
+
+func (po *poset) isroot(r uint32) bool {
+ for i := range po.roots {
+ if po.roots[i] == r {
+ return true
+ }
+ }
+ return false
+}
+
+func (po *poset) changeroot(oldr, newr uint32) {
+ for i := range po.roots {
+ if po.roots[i] == oldr {
+ po.roots[i] = newr
+ return
+ }
+ }
+ panic("changeroot on non-root")
+}
+
+func (po *poset) removeroot(r uint32) {
+ for i := range po.roots {
+ if po.roots[i] == r {
+ po.roots = append(po.roots[:i], po.roots[i+1:]...)
+ return
+ }
+ }
+ panic("removeroot on non-root")
+}
+
+// dfs performs a depth-first search within the DAG whose root is r.
+// f is the visit function called for each node; if it returns true,
+// the search is aborted and true is returned. The root node is
+// visited too.
+// If strict, ignore edges across a path until at least one
+// strict edge is found. For instance, for a chain A<=B<=C<D<=E<F,
+// a strict walk visits D,E,F.
+// If the visit ends, false is returned.
+func (po *poset) dfs(r uint32, strict bool, f func(i uint32) bool) bool {
+ closed := newBitset(int(po.lastidx + 1))
+ open := make([]uint32, 1, 64)
+ open[0] = r
+
+ if strict {
+ // Do a first DFS; walk all paths and stop when we find a strict
+ // edge, building a "next" list of nodes reachable through strict
+ // edges. This will be the bootstrap open list for the real DFS.
+ next := make([]uint32, 0, 64)
+
+ for len(open) > 0 {
+ i := open[len(open)-1]
+ open = open[:len(open)-1]
+
+ // Don't visit the same node twice. Notice that all nodes
+ // across non-strict paths are still visited at least once, so
+ // a non-strict path can never obscure a strict path to the
+ // same node.
+ if !closed.Test(i) {
+ closed.Set(i)
+
+ l, r := po.children(i)
+ if l != 0 {
+ if l.Strict() {
+ next = append(next, l.Target())
+ } else {
+ open = append(open, l.Target())
+ }
+ }
+ if r != 0 {
+ if r.Strict() {
+ next = append(next, r.Target())
+ } else {
+ open = append(open, r.Target())
+ }
+ }
+ }
+ }
+ open = next
+ closed.Reset()
+ }
+
+ for len(open) > 0 {
+ i := open[len(open)-1]
+ open = open[:len(open)-1]
+
+ if !closed.Test(i) {
+ if f(i) {
+ return true
+ }
+ closed.Set(i)
+ l, r := po.children(i)
+ if l != 0 {
+ open = append(open, l.Target())
+ }
+ if r != 0 {
+ open = append(open, r.Target())
+ }
+ }
+ }
+ return false
+}
+
+// Returns true if there is a path from i1 to i2.
+// If strict == true: if the function returns true, then i1 < i2.
+// If strict == false: if the function returns true, then i1 <= i2.
+// If the function returns false, no relation is known.
+func (po *poset) reaches(i1, i2 uint32, strict bool) bool {
+ return po.dfs(i1, strict, func(n uint32) bool {
+ return n == i2
+ })
+}
+
+// findroot finds i's root, that is which DAG contains i.
+// Returns the root; if i is itself a root, it is returned.
+// Panic if i is not in any DAG.
+func (po *poset) findroot(i uint32) uint32 {
+ // TODO(rasky): if needed, a way to speed up this search is
+ // storing a bitset for each root using it as a mini bloom filter
+ // of nodes present under that root.
+ for _, r := range po.roots {
+ if po.reaches(r, i, false) {
+ return r
+ }
+ }
+ panic("findroot didn't find any root")
+}
+
+// mergeroot merges two DAGs into one DAG by creating a new extra root
+func (po *poset) mergeroot(r1, r2 uint32) uint32 {
+ // Root #0 is special as it contains all constants. Since mergeroot
+ // discards r2 as root and keeps r1, make sure that r2 is not root #0,
+ // otherwise constants would move to a different root.
+ if r2 == po.roots[0] {
+ r1, r2 = r2, r1
+ }
+ r := po.newnode(nil)
+ po.setchl(r, newedge(r1, false))
+ po.setchr(r, newedge(r2, false))
+ po.changeroot(r1, r)
+ po.removeroot(r2)
+ po.upush(undoMergeRoot, r, 0)
+ return r
+}
+
+// collapsepath marks n1 and n2 as equal and collapses as equal all
+// nodes across all paths between n1 and n2. If a strict edge is
+// found, the function does not modify the DAG and returns false.
+// Complexity is O(n).
+func (po *poset) collapsepath(n1, n2 *Value) bool {
+ i1, i2 := po.values[n1.ID], po.values[n2.ID]
+ if po.reaches(i1, i2, true) {
+ return false
+ }
+
+ // Find all the paths from i1 to i2
+ paths := po.findpaths(i1, i2)
+ // Mark all nodes in all the paths as aliases of n1
+ // (excluding n1 itself)
+ paths.Clear(i1)
+ po.aliasnodes(n1, paths)
+ return true
+}
+
+// findpaths is a recursive function that calculates all paths from cur to dst
+// and return them as a bitset (the index of a node is set in the bitset if
+// that node is on at least one path from cur to dst).
+// We do a DFS from cur (stopping going deep any time we reach dst, if ever),
+// and mark as part of the paths any node that has a children which is already
+// part of the path (or is dst itself).
+func (po *poset) findpaths(cur, dst uint32) bitset {
+ seen := newBitset(int(po.lastidx + 1))
+ path := newBitset(int(po.lastidx + 1))
+ path.Set(dst)
+ po.findpaths1(cur, dst, seen, path)
+ return path
+}
+
+func (po *poset) findpaths1(cur, dst uint32, seen bitset, path bitset) {
+ if cur == dst {
+ return
+ }
+ seen.Set(cur)
+ l, r := po.chl(cur), po.chr(cur)
+ if !seen.Test(l) {
+ po.findpaths1(l, dst, seen, path)
+ }
+ if !seen.Test(r) {
+ po.findpaths1(r, dst, seen, path)
+ }
+ if path.Test(l) || path.Test(r) {
+ path.Set(cur)
+ }
+}
+
+// Check whether it is recorded that i1!=i2
+func (po *poset) isnoneq(i1, i2 uint32) bool {
+ if i1 == i2 {
+ return false
+ }
+ if i1 < i2 {
+ i1, i2 = i2, i1
+ }
+
+ // Check if we recorded a non-equal relation before
+ if bs, ok := po.noneq[i1]; ok && bs.Test(i2) {
+ return true
+ }
+ return false
+}
+
+// Record that i1!=i2
+func (po *poset) setnoneq(n1, n2 *Value) {
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+
+ // If any of the nodes do not exist in the poset, allocate them. Since
+ // we don't know any relation (in the partial order) about them, they must
+ // become independent roots.
+ if !f1 {
+ i1 = po.newnode(n1)
+ po.roots = append(po.roots, i1)
+ po.upush(undoNewRoot, i1, 0)
+ }
+ if !f2 {
+ i2 = po.newnode(n2)
+ po.roots = append(po.roots, i2)
+ po.upush(undoNewRoot, i2, 0)
+ }
+
+ if i1 == i2 {
+ panic("setnoneq on same node")
+ }
+ if i1 < i2 {
+ i1, i2 = i2, i1
+ }
+ bs := po.noneq[i1]
+ if bs == nil {
+ // Given that we record non-equality relations using the
+ // higher index as a key, the bitsize will never change size.
+ // TODO(rasky): if memory is a problem, consider allocating
+ // a small bitset and lazily grow it when higher indices arrive.
+ bs = newBitset(int(i1))
+ po.noneq[i1] = bs
+ } else if bs.Test(i2) {
+ // Already recorded
+ return
+ }
+ bs.Set(i2)
+ po.upushneq(i1, i2)
+}
+
+// CheckIntegrity verifies internal integrity of a poset. It is intended
+// for debugging purposes.
+func (po *poset) CheckIntegrity() {
+ // Record which index is a constant
+ constants := newBitset(int(po.lastidx + 1))
+ for _, c := range po.constants {
+ constants.Set(c)
+ }
+
+ // Verify that each node appears in a single DAG, and that
+ // all constants are within the first DAG
+ seen := newBitset(int(po.lastidx + 1))
+ for ridx, r := range po.roots {
+ if r == 0 {
+ panic("empty root")
+ }
+
+ po.dfs(r, false, func(i uint32) bool {
+ if seen.Test(i) {
+ panic("duplicate node")
+ }
+ seen.Set(i)
+ if constants.Test(i) {
+ if ridx != 0 {
+ panic("constants not in the first DAG")
+ }
+ }
+ return false
+ })
+ }
+
+ // Verify that values contain the minimum set
+ for id, idx := range po.values {
+ if !seen.Test(idx) {
+ panic(fmt.Errorf("spurious value [%d]=%d", id, idx))
+ }
+ }
+
+ // Verify that only existing nodes have non-zero children
+ for i, n := range po.nodes {
+ if n.l|n.r != 0 {
+ if !seen.Test(uint32(i)) {
+ panic(fmt.Errorf("children of unknown node %d->%v", i, n))
+ }
+ if n.l.Target() == uint32(i) || n.r.Target() == uint32(i) {
+ panic(fmt.Errorf("self-loop on node %d", i))
+ }
+ }
+ }
+}
+
+// CheckEmpty checks that a poset is completely empty.
+// It can be used for debugging purposes, as a poset is supposed to
+// be empty after it's fully rolled back through Undo.
+func (po *poset) CheckEmpty() error {
+ if len(po.nodes) != 1 {
+ return fmt.Errorf("non-empty nodes list: %v", po.nodes)
+ }
+ if len(po.values) != 0 {
+ return fmt.Errorf("non-empty value map: %v", po.values)
+ }
+ if len(po.roots) != 0 {
+ return fmt.Errorf("non-empty root list: %v", po.roots)
+ }
+ if len(po.constants) != 0 {
+ return fmt.Errorf("non-empty constants: %v", po.constants)
+ }
+ if len(po.undo) != 0 {
+ return fmt.Errorf("non-empty undo list: %v", po.undo)
+ }
+ if po.lastidx != 0 {
+ return fmt.Errorf("lastidx index is not zero: %v", po.lastidx)
+ }
+ for _, bs := range po.noneq {
+ for _, x := range bs {
+ if x != 0 {
+ return fmt.Errorf("non-empty noneq map")
+ }
+ }
+ }
+ return nil
+}
+
+// DotDump dumps the poset in graphviz format to file fn, with the specified title.
+func (po *poset) DotDump(fn string, title string) error {
+ f, err := os.Create(fn)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Create reverse index mapping (taking aliases into account)
+ names := make(map[uint32]string)
+ for id, i := range po.values {
+ s := names[i]
+ if s == "" {
+ s = fmt.Sprintf("v%d", id)
+ } else {
+ s += fmt.Sprintf(", v%d", id)
+ }
+ names[i] = s
+ }
+
+ // Create reverse constant mapping
+ consts := make(map[uint32]int64)
+ for val, idx := range po.constants {
+ consts[idx] = val
+ }
+
+ fmt.Fprintf(f, "digraph poset {\n")
+ fmt.Fprintf(f, "\tedge [ fontsize=10 ]\n")
+ for ridx, r := range po.roots {
+ fmt.Fprintf(f, "\tsubgraph root%d {\n", ridx)
+ po.dfs(r, false, func(i uint32) bool {
+ if val, ok := consts[i]; ok {
+ // Constant
+ var vals string
+ if po.flags&posetFlagUnsigned != 0 {
+ vals = fmt.Sprint(uint64(val))
+ } else {
+ vals = fmt.Sprint(int64(val))
+ }
+ fmt.Fprintf(f, "\t\tnode%d [shape=box style=filled fillcolor=cadetblue1 label=<%s <font point-size=\"6\">%s [%d]</font>>]\n",
+ i, vals, names[i], i)
+ } else {
+ // Normal SSA value
+ fmt.Fprintf(f, "\t\tnode%d [label=<%s <font point-size=\"6\">[%d]</font>>]\n", i, names[i], i)
+ }
+ chl, chr := po.children(i)
+ for _, ch := range []posetEdge{chl, chr} {
+ if ch != 0 {
+ if ch.Strict() {
+ fmt.Fprintf(f, "\t\tnode%d -> node%d [label=\" <\" color=\"red\"]\n", i, ch.Target())
+ } else {
+ fmt.Fprintf(f, "\t\tnode%d -> node%d [label=\" <=\" color=\"green\"]\n", i, ch.Target())
+ }
+ }
+ }
+ return false
+ })
+ fmt.Fprintf(f, "\t}\n")
+ }
+ fmt.Fprintf(f, "\tlabelloc=\"t\"\n")
+ fmt.Fprintf(f, "\tlabeldistance=\"3.0\"\n")
+ fmt.Fprintf(f, "\tlabel=%q\n", title)
+ fmt.Fprintf(f, "}\n")
+ return nil
+}
+
+// Ordered reports whether n1<n2. It returns false either when it is
+// certain that n1<n2 is false, or if there is not enough information
+// to tell.
+// Complexity is O(n).
+func (po *poset) Ordered(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call Ordered with n1==n2")
+ }
+
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+ if !f1 || !f2 {
+ return false
+ }
+
+ return i1 != i2 && po.reaches(i1, i2, true)
+}
+
+// OrderedOrEqual reports whether n1<=n2. It returns false either when it is
+// certain that n1<=n2 is false, or if there is not enough information
+// to tell.
+// Complexity is O(n).
+func (po *poset) OrderedOrEqual(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call Ordered with n1==n2")
+ }
+
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+ if !f1 || !f2 {
+ return false
+ }
+
+ return i1 == i2 || po.reaches(i1, i2, false)
+}
+
+// Equal reports whether n1==n2. It returns false either when it is
+// certain that n1==n2 is false, or if there is not enough information
+// to tell.
+// Complexity is O(1).
+func (po *poset) Equal(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call Equal with n1==n2")
+ }
+
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+ return f1 && f2 && i1 == i2
+}
+
+// NonEqual reports whether n1!=n2. It returns false either when it is
+// certain that n1!=n2 is false, or if there is not enough information
+// to tell.
+// Complexity is O(n) (because it internally calls Ordered to see if we
+// can infer n1!=n2 from n1<n2 or n2<n1).
+func (po *poset) NonEqual(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call NonEqual with n1==n2")
+ }
+
+ // If we never saw the nodes before, we don't
+ // have a recorded non-equality.
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+ if !f1 || !f2 {
+ return false
+ }
+
+ // Check if we recorded inequality
+ if po.isnoneq(i1, i2) {
+ return true
+ }
+
+ // Check if n1<n2 or n2<n1, in which case we can infer that n1!=n2
+ if po.Ordered(n1, n2) || po.Ordered(n2, n1) {
+ return true
+ }
+
+ return false
+}
+
+// setOrder records that n1<n2 or n1<=n2 (depending on strict). Returns false
+// if this is a contradiction.
+// Implements SetOrder() and SetOrderOrEqual()
+func (po *poset) setOrder(n1, n2 *Value, strict bool) bool {
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+
+ switch {
+ case !f1 && !f2:
+ // Neither n1 nor n2 are in the poset, so they are not related
+ // in any way to existing nodes.
+ // Create a new DAG to record the relation.
+ i1, i2 = po.newnode(n1), po.newnode(n2)
+ po.roots = append(po.roots, i1)
+ po.upush(undoNewRoot, i1, 0)
+ po.addchild(i1, i2, strict)
+
+ case f1 && !f2:
+ // n1 is in one of the DAGs, while n2 is not. Add n2 as children
+ // of n1.
+ i2 = po.newnode(n2)
+ po.addchild(i1, i2, strict)
+
+ case !f1 && f2:
+ // n1 is not in any DAG but n2 is. If n2 is a root, we can put
+ // n1 in its place as a root; otherwise, we need to create a new
+ // extra root to record the relation.
+ i1 = po.newnode(n1)
+
+ if po.isroot(i2) {
+ po.changeroot(i2, i1)
+ po.upush(undoChangeRoot, i1, newedge(i2, strict))
+ po.addchild(i1, i2, strict)
+ return true
+ }
+
+ // Search for i2's root; this requires a O(n) search on all
+ // DAGs
+ r := po.findroot(i2)
+
+ // Re-parent as follows:
+ //
+ // extra
+ // r / \
+ // \ ===> r i1
+ // i2 \ /
+ // i2
+ //
+ extra := po.newnode(nil)
+ po.changeroot(r, extra)
+ po.upush(undoChangeRoot, extra, newedge(r, false))
+ po.addchild(extra, r, false)
+ po.addchild(extra, i1, false)
+ po.addchild(i1, i2, strict)
+
+ case f1 && f2:
+ // If the nodes are aliased, fail only if we're setting a strict order
+ // (that is, we cannot set n1<n2 if n1==n2).
+ if i1 == i2 {
+ return !strict
+ }
+
+ // If we are trying to record n1<=n2 but we learned that n1!=n2,
+ // record n1<n2, as it provides more information.
+ if !strict && po.isnoneq(i1, i2) {
+ strict = true
+ }
+
+ // Both n1 and n2 are in the poset. This is the complex part of the algorithm
+ // as we need to find many different cases and DAG shapes.
+
+ // Check if n1 somehow reaches n2
+ if po.reaches(i1, i2, false) {
+ // This is the table of all cases we need to handle:
+ //
+ // DAG New Action
+ // ---------------------------------------------------
+ // #1: N1<=X<=N2 | N1<=N2 | do nothing
+ // #2: N1<=X<=N2 | N1<N2 | add strict edge (N1<N2)
+ // #3: N1<X<N2 | N1<=N2 | do nothing (we already know more)
+ // #4: N1<X<N2 | N1<N2 | do nothing
+
+ // Check if we're in case #2
+ if strict && !po.reaches(i1, i2, true) {
+ po.addchild(i1, i2, true)
+ return true
+ }
+
+ // Case #1, #3, or #4: nothing to do
+ return true
+ }
+
+ // Check if n2 somehow reaches n1
+ if po.reaches(i2, i1, false) {
+ // This is the table of all cases we need to handle:
+ //
+ // DAG New Action
+ // ---------------------------------------------------
+ // #5: N2<=X<=N1 | N1<=N2 | collapse path (learn that N1=X=N2)
+ // #6: N2<=X<=N1 | N1<N2 | contradiction
+ // #7: N2<X<N1 | N1<=N2 | contradiction in the path
+ // #8: N2<X<N1 | N1<N2 | contradiction
+
+ if strict {
+ // Cases #6 and #8: contradiction
+ return false
+ }
+
+ // We're in case #5 or #7. Try to collapse path, and that will
+ // fail if it realizes that we are in case #7.
+ return po.collapsepath(n2, n1)
+ }
+
+ // We don't know of any existing relation between n1 and n2. They could
+ // be part of the same DAG or not.
+ // Find their roots to check whether they are in the same DAG.
+ r1, r2 := po.findroot(i1), po.findroot(i2)
+ if r1 != r2 {
+ // We need to merge the two DAGs to record a relation between the nodes
+ po.mergeroot(r1, r2)
+ }
+
+ // Connect n1 and n2
+ po.addchild(i1, i2, strict)
+ }
+
+ return true
+}
+
+// SetOrder records that n1<n2. Returns false if this is a contradiction
+// Complexity is O(1) if n2 was never seen before, or O(n) otherwise.
+func (po *poset) SetOrder(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call SetOrder with n1==n2")
+ }
+ return po.setOrder(n1, n2, true)
+}
+
+// SetOrderOrEqual records that n1<=n2. Returns false if this is a contradiction
+// Complexity is O(1) if n2 was never seen before, or O(n) otherwise.
+func (po *poset) SetOrderOrEqual(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call SetOrder with n1==n2")
+ }
+ return po.setOrder(n1, n2, false)
+}
+
+// SetEqual records that n1==n2. Returns false if this is a contradiction
+// (that is, if it is already recorded that n1<n2 or n2<n1).
+// Complexity is O(1) if n2 was never seen before, or O(n) otherwise.
+func (po *poset) SetEqual(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call Add with n1==n2")
+ }
+
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+
+ switch {
+ case !f1 && !f2:
+ i1 = po.newnode(n1)
+ po.roots = append(po.roots, i1)
+ po.upush(undoNewRoot, i1, 0)
+ po.aliasnewnode(n1, n2)
+ case f1 && !f2:
+ po.aliasnewnode(n1, n2)
+ case !f1 && f2:
+ po.aliasnewnode(n2, n1)
+ case f1 && f2:
+ if i1 == i2 {
+ // Already aliased, ignore
+ return true
+ }
+
+ // If we recorded that n1!=n2, this is a contradiction.
+ if po.isnoneq(i1, i2) {
+ return false
+ }
+
+ // If we already knew that n1<=n2, we can collapse the path to
+ // record n1==n2 (and vice versa).
+ if po.reaches(i1, i2, false) {
+ return po.collapsepath(n1, n2)
+ }
+ if po.reaches(i2, i1, false) {
+ return po.collapsepath(n2, n1)
+ }
+
+ r1 := po.findroot(i1)
+ r2 := po.findroot(i2)
+ if r1 != r2 {
+ // Merge the two DAGs so we can record relations between the nodes
+ po.mergeroot(r1, r2)
+ }
+
+ // Set n2 as alias of n1. This will also update all the references
+ // to n2 to become references to n1
+ i2s := newBitset(int(po.lastidx) + 1)
+ i2s.Set(i2)
+ po.aliasnodes(n1, i2s)
+ }
+ return true
+}
+
+// SetNonEqual records that n1!=n2. Returns false if this is a contradiction
+// (that is, if it is already recorded that n1==n2).
+// Complexity is O(n).
+func (po *poset) SetNonEqual(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call SetNonEqual with n1==n2")
+ }
+
+ // Check whether the nodes are already in the poset
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+
+ // If either node wasn't present, we just record the new relation
+ // and exit.
+ if !f1 || !f2 {
+ po.setnoneq(n1, n2)
+ return true
+ }
+
+ // See if we already know this, in which case there's nothing to do.
+ if po.isnoneq(i1, i2) {
+ return true
+ }
+
+ // Check if we're contradicting an existing equality relation
+ if po.Equal(n1, n2) {
+ return false
+ }
+
+ // Record non-equality
+ po.setnoneq(n1, n2)
+
+ // If we know that i1<=i2 but not i1<i2, learn that as we
+ // now know that they are not equal. Do the same for i2<=i1.
+ // Do this check only if both nodes were already in the DAG,
+ // otherwise there cannot be an existing relation.
+ if po.reaches(i1, i2, false) && !po.reaches(i1, i2, true) {
+ po.addchild(i1, i2, true)
+ }
+ if po.reaches(i2, i1, false) && !po.reaches(i2, i1, true) {
+ po.addchild(i2, i1, true)
+ }
+
+ return true
+}
+
+// Checkpoint saves the current state of the DAG so that it's possible
+// to later undo this state.
+// Complexity is O(1).
+func (po *poset) Checkpoint() {
+ po.undo = append(po.undo, posetUndo{typ: undoCheckpoint})
+}
+
+// Undo restores the state of the poset to the previous checkpoint.
+// Complexity depends on the type of operations that were performed
+// since the last checkpoint; each Set* operation creates an undo
+// pass which Undo has to revert with a worst-case complexity of O(n).
+func (po *poset) Undo() {
+ if len(po.undo) == 0 {
+ panic("empty undo stack")
+ }
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+
+ for len(po.undo) > 0 {
+ pass := po.undo[len(po.undo)-1]
+ po.undo = po.undo[:len(po.undo)-1]
+
+ switch pass.typ {
+ case undoCheckpoint:
+ return
+
+ case undoSetChl:
+ po.setchl(pass.idx, pass.edge)
+
+ case undoSetChr:
+ po.setchr(pass.idx, pass.edge)
+
+ case undoNonEqual:
+ po.noneq[uint32(pass.ID)].Clear(pass.idx)
+
+ case undoNewNode:
+ if pass.idx != po.lastidx {
+ panic("invalid newnode index")
+ }
+ if pass.ID != 0 {
+ if po.values[pass.ID] != pass.idx {
+ panic("invalid newnode undo pass")
+ }
+ delete(po.values, pass.ID)
+ }
+ po.setchl(pass.idx, 0)
+ po.setchr(pass.idx, 0)
+ po.nodes = po.nodes[:pass.idx]
+ po.lastidx--
+
+ case undoNewConstant:
+ // FIXME: remove this O(n) loop
+ var val int64
+ var i uint32
+ for val, i = range po.constants {
+ if i == pass.idx {
+ break
+ }
+ }
+ if i != pass.idx {
+ panic("constant not found in undo pass")
+ }
+ if pass.ID == 0 {
+ delete(po.constants, val)
+ } else {
+ // Restore previous index as constant node
+ // (also restoring the invariant on correct bounds)
+ oldidx := uint32(pass.ID)
+ po.constants[val] = oldidx
+ }
+
+ case undoAliasNode:
+ ID, prev := pass.ID, pass.idx
+ cur := po.values[ID]
+ if prev == 0 {
+ // Born as an alias, die as an alias
+ delete(po.values, ID)
+ } else {
+ if cur == prev {
+ panic("invalid aliasnode undo pass")
+ }
+ // Give it back previous value
+ po.values[ID] = prev
+ }
+
+ case undoNewRoot:
+ i := pass.idx
+ l, r := po.children(i)
+ if l|r != 0 {
+ panic("non-empty root in undo newroot")
+ }
+ po.removeroot(i)
+
+ case undoChangeRoot:
+ i := pass.idx
+ l, r := po.children(i)
+ if l|r != 0 {
+ panic("non-empty root in undo changeroot")
+ }
+ po.changeroot(i, pass.edge.Target())
+
+ case undoMergeRoot:
+ i := pass.idx
+ l, r := po.children(i)
+ po.changeroot(i, l.Target())
+ po.roots = append(po.roots, r.Target())
+
+ default:
+ panic(pass.typ)
+ }
+ }
+
+ if debugPoset && po.CheckEmpty() != nil {
+ panic("poset not empty at the end of undo")
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/poset_test.go b/src/cmd/compile/internal/ssa/poset_test.go
new file mode 100644
index 0000000..a6db1d1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/poset_test.go
@@ -0,0 +1,800 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+ "testing"
+)
+
+const (
+ SetOrder = "SetOrder"
+ SetOrder_Fail = "SetOrder_Fail"
+ SetOrderOrEqual = "SetOrderOrEqual"
+ SetOrderOrEqual_Fail = "SetOrderOrEqual_Fail"
+ Ordered = "Ordered"
+ Ordered_Fail = "Ordered_Fail"
+ OrderedOrEqual = "OrderedOrEqual"
+ OrderedOrEqual_Fail = "OrderedOrEqual_Fail"
+ SetEqual = "SetEqual"
+ SetEqual_Fail = "SetEqual_Fail"
+ Equal = "Equal"
+ Equal_Fail = "Equal_Fail"
+ SetNonEqual = "SetNonEqual"
+ SetNonEqual_Fail = "SetNonEqual_Fail"
+ NonEqual = "NonEqual"
+ NonEqual_Fail = "NonEqual_Fail"
+ Checkpoint = "Checkpoint"
+ Undo = "Undo"
+)
+
+type posetTestOp struct {
+ typ string
+ a, b int
+}
+
+func vconst(i int) int {
+ if i < -128 || i >= 128 {
+ panic("invalid const")
+ }
+ return 1000 + 128 + i
+}
+
+func vconst2(i int) int {
+ if i < -128 || i >= 128 {
+ panic("invalid const")
+ }
+ return 1000 + 256 + i
+}
+
+func testPosetOps(t *testing.T, unsigned bool, ops []posetTestOp) {
+ var v [1512]*Value
+ for i := range v {
+ v[i] = new(Value)
+ v[i].ID = ID(i)
+ if i >= 1000 && i < 1256 {
+ v[i].Op = OpConst64
+ v[i].AuxInt = int64(i - 1000 - 128)
+ }
+ if i >= 1256 && i < 1512 {
+ v[i].Op = OpConst64
+ v[i].AuxInt = int64(i - 1000 - 256)
+ }
+ }
+
+ po := newPoset()
+ po.SetUnsigned(unsigned)
+ for idx, op := range ops {
+ t.Logf("op%d%v", idx, op)
+ switch op.typ {
+ case SetOrder:
+ if !po.SetOrder(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case SetOrder_Fail:
+ if po.SetOrder(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case SetOrderOrEqual:
+ if !po.SetOrderOrEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case SetOrderOrEqual_Fail:
+ if po.SetOrderOrEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case Ordered:
+ if !po.Ordered(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case Ordered_Fail:
+ if po.Ordered(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case OrderedOrEqual:
+ if !po.OrderedOrEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case OrderedOrEqual_Fail:
+ if po.OrderedOrEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case SetEqual:
+ if !po.SetEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case SetEqual_Fail:
+ if po.SetEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case Equal:
+ if !po.Equal(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case Equal_Fail:
+ if po.Equal(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case SetNonEqual:
+ if !po.SetNonEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case SetNonEqual_Fail:
+ if po.SetNonEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case NonEqual:
+ if !po.NonEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case NonEqual_Fail:
+ if po.NonEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case Checkpoint:
+ po.Checkpoint()
+ case Undo:
+ t.Log("Undo stack", po.undo)
+ po.Undo()
+ default:
+ panic("unimplemented")
+ }
+
+ if false {
+ po.DotDump(fmt.Sprintf("op%d.dot", idx), fmt.Sprintf("Last op: %v", op))
+ }
+
+ po.CheckIntegrity()
+ }
+
+ // Check that the poset is completely empty
+ if err := po.CheckEmpty(); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPoset(t *testing.T) {
+ testPosetOps(t, false, []posetTestOp{
+ {Ordered_Fail, 123, 124},
+
+ // Dag #0: 100<101
+ {Checkpoint, 0, 0},
+ {SetOrder, 100, 101},
+ {Ordered, 100, 101},
+ {Ordered_Fail, 101, 100},
+ {SetOrder_Fail, 101, 100},
+ {SetOrder, 100, 101}, // repeat
+ {NonEqual, 100, 101},
+ {NonEqual, 101, 100},
+ {SetEqual_Fail, 100, 101},
+
+ // Dag #1: 4<=7<12
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 4, 7},
+ {OrderedOrEqual, 4, 7},
+ {SetOrder, 7, 12},
+ {Ordered, 7, 12},
+ {Ordered, 4, 12},
+ {Ordered_Fail, 12, 4},
+ {NonEqual, 4, 12},
+ {NonEqual, 12, 4},
+ {NonEqual_Fail, 4, 100},
+ {OrderedOrEqual, 4, 12},
+ {OrderedOrEqual_Fail, 12, 4},
+ {OrderedOrEqual, 4, 7},
+ {OrderedOrEqual_Fail, 7, 4},
+
+ // Dag #1: 1<4<=7<12
+ {Checkpoint, 0, 0},
+ {SetOrder, 1, 4},
+ {Ordered, 1, 4},
+ {Ordered, 1, 12},
+ {Ordered_Fail, 12, 1},
+
+ // Dag #1: 1<4<=7<12, 6<7
+ {Checkpoint, 0, 0},
+ {SetOrder, 6, 7},
+ {Ordered, 6, 7},
+ {Ordered, 6, 12},
+ {SetOrder_Fail, 7, 4},
+ {SetOrder_Fail, 7, 6},
+ {SetOrder_Fail, 7, 1},
+
+ // Dag #1: 1<4<=7<12, 1<6<7
+ {Checkpoint, 0, 0},
+ {Ordered_Fail, 1, 6},
+ {SetOrder, 1, 6},
+ {Ordered, 1, 6},
+ {SetOrder_Fail, 6, 1},
+
+ // Dag #1: 1<4<=7<12, 1<4<6<7
+ {Checkpoint, 0, 0},
+ {Ordered_Fail, 4, 6},
+ {Ordered_Fail, 4, 7},
+ {SetOrder, 4, 6},
+ {Ordered, 4, 6},
+ {OrderedOrEqual, 4, 6},
+ {Ordered, 4, 7},
+ {OrderedOrEqual, 4, 7},
+ {SetOrder_Fail, 6, 4},
+ {Ordered_Fail, 7, 6},
+ {Ordered_Fail, 7, 4},
+ {OrderedOrEqual_Fail, 7, 6},
+ {OrderedOrEqual_Fail, 7, 4},
+
+ // Merge: 1<4<6, 4<=7<12, 6<101
+ {Checkpoint, 0, 0},
+ {Ordered_Fail, 6, 101},
+ {SetOrder, 6, 101},
+ {Ordered, 6, 101},
+ {Ordered, 1, 101},
+
+ // Merge: 1<4<6, 4<=7<12, 6<100<101
+ {Checkpoint, 0, 0},
+ {Ordered_Fail, 6, 100},
+ {SetOrder, 6, 100},
+ {Ordered, 1, 100},
+
+ // Undo: 1<4<6<7<12, 6<101
+ {Ordered, 100, 101},
+ {Undo, 0, 0},
+ {Ordered, 100, 101},
+ {Ordered_Fail, 6, 100},
+ {Ordered, 6, 101},
+ {Ordered, 1, 101},
+
+ // Undo: 1<4<6<7<12, 100<101
+ {Undo, 0, 0},
+ {Ordered_Fail, 1, 100},
+ {Ordered_Fail, 1, 101},
+ {Ordered_Fail, 6, 100},
+ {Ordered_Fail, 6, 101},
+
+ // Merge: 1<4<6<7<12, 6<100<101
+ {Checkpoint, 0, 0},
+ {Ordered, 100, 101},
+ {SetOrder, 6, 100},
+ {Ordered, 6, 100},
+ {Ordered, 6, 101},
+ {Ordered, 1, 101},
+
+ // Undo 2 times: 1<4<7<12, 1<6<7
+ {Undo, 0, 0},
+ {Undo, 0, 0},
+ {Ordered, 1, 6},
+ {Ordered, 4, 12},
+ {Ordered_Fail, 4, 6},
+ {SetOrder_Fail, 6, 1},
+
+ // Undo 2 times: 1<4<7<12
+ {Undo, 0, 0},
+ {Undo, 0, 0},
+ {Ordered, 1, 12},
+ {Ordered, 7, 12},
+ {Ordered_Fail, 1, 6},
+ {Ordered_Fail, 6, 7},
+ {Ordered, 100, 101},
+ {Ordered_Fail, 1, 101},
+
+ // Undo: 4<7<12
+ {Undo, 0, 0},
+ {Ordered_Fail, 1, 12},
+ {Ordered_Fail, 1, 4},
+ {Ordered, 4, 12},
+ {Ordered, 100, 101},
+
+ // Undo: 100<101
+ {Undo, 0, 0},
+ {Ordered_Fail, 4, 7},
+ {Ordered_Fail, 7, 12},
+ {Ordered, 100, 101},
+
+ // Recreated DAG #1 from scratch, reusing same nodes.
+ // This also stresses that Undo has done its job correctly.
+ // DAG: 1<2<(5|6), 101<102<(105|106<107)
+ {Checkpoint, 0, 0},
+ {SetOrder, 101, 102},
+ {SetOrder, 102, 105},
+ {SetOrder, 102, 106},
+ {SetOrder, 106, 107},
+ {SetOrder, 1, 2},
+ {SetOrder, 2, 5},
+ {SetOrder, 2, 6},
+ {SetEqual_Fail, 1, 6},
+ {SetEqual_Fail, 107, 102},
+
+ // Now Set 2 == 102
+ // New DAG: (1|101)<2==102<(5|6|105|106<107)
+ {Checkpoint, 0, 0},
+ {SetEqual, 2, 102},
+ {Equal, 2, 102},
+ {SetEqual, 2, 102}, // trivially pass
+ {SetNonEqual_Fail, 2, 102}, // trivially fail
+ {Ordered, 1, 107},
+ {Ordered, 101, 6},
+ {Ordered, 101, 105},
+ {Ordered, 2, 106},
+ {Ordered, 102, 6},
+
+ // Undo SetEqual
+ {Undo, 0, 0},
+ {Equal_Fail, 2, 102},
+ {Ordered_Fail, 2, 102},
+ {Ordered_Fail, 1, 107},
+ {Ordered_Fail, 101, 6},
+ {Checkpoint, 0, 0},
+ {SetEqual, 2, 100},
+ {Ordered, 1, 107},
+ {Ordered, 100, 6},
+
+ // SetEqual with new node
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetEqual, 2, 400},
+ {SetEqual, 401, 2},
+ {Equal, 400, 401},
+ {Ordered, 1, 400},
+ {Ordered, 400, 6},
+ {Ordered, 1, 401},
+ {Ordered, 401, 6},
+ {Ordered_Fail, 2, 401},
+
+ // SetEqual unseen nodes and then connect
+ {Checkpoint, 0, 0},
+ {SetEqual, 500, 501},
+ {SetEqual, 102, 501},
+ {Equal, 500, 102},
+ {Ordered, 501, 106},
+ {Ordered, 100, 500},
+ {SetEqual, 500, 501},
+ {Ordered_Fail, 500, 501},
+ {Ordered_Fail, 102, 501},
+
+ // SetNonEqual relations
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetNonEqual, 600, 601},
+ {NonEqual, 600, 601},
+ {SetNonEqual, 601, 602},
+ {NonEqual, 601, 602},
+ {NonEqual_Fail, 600, 602}, // non-transitive
+ {SetEqual_Fail, 601, 602},
+
+ // Undo back to beginning, leave the poset empty
+ {Undo, 0, 0},
+ {Undo, 0, 0},
+ {Undo, 0, 0},
+ {Undo, 0, 0},
+ })
+}
+
+func TestPosetStrict(t *testing.T) {
+
+ testPosetOps(t, false, []posetTestOp{
+ {Checkpoint, 0, 0},
+ // Build: 20!=30, 10<20<=30<40. The 20<=30 will become 20<30.
+ {SetNonEqual, 20, 30},
+ {SetOrder, 10, 20},
+ {SetOrderOrEqual, 20, 30}, // this is affected by 20!=30
+ {SetOrder, 30, 40},
+
+ {Ordered, 10, 30},
+ {Ordered, 20, 30},
+ {Ordered, 10, 40},
+ {OrderedOrEqual, 10, 30},
+ {OrderedOrEqual, 20, 30},
+ {OrderedOrEqual, 10, 40},
+
+ {Undo, 0, 0},
+
+ // Now do the opposite: first build the DAG and then learn non-equality
+ {Checkpoint, 0, 0},
+ {SetOrder, 10, 20},
+ {SetOrderOrEqual, 20, 30}, // this is affected by 20!=30
+ {SetOrder, 30, 40},
+
+ {Ordered, 10, 30},
+ {Ordered_Fail, 20, 30},
+ {Ordered, 10, 40},
+ {OrderedOrEqual, 10, 30},
+ {OrderedOrEqual, 20, 30},
+ {OrderedOrEqual, 10, 40},
+
+ {Checkpoint, 0, 0},
+ {SetNonEqual, 20, 30},
+ {Ordered, 10, 30},
+ {Ordered, 20, 30},
+ {Ordered, 10, 40},
+ {OrderedOrEqual, 10, 30},
+ {OrderedOrEqual, 20, 30},
+ {OrderedOrEqual, 10, 40},
+ {Undo, 0, 0},
+
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 30, 35},
+ {OrderedOrEqual, 20, 35},
+ {Ordered_Fail, 20, 35},
+ {SetNonEqual, 20, 35},
+ {Ordered, 20, 35},
+ {Undo, 0, 0},
+
+ // Learn <= and >=
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 50, 60},
+ {SetOrderOrEqual, 60, 50},
+ {OrderedOrEqual, 50, 60},
+ {OrderedOrEqual, 60, 50},
+ {Ordered_Fail, 50, 60},
+ {Ordered_Fail, 60, 50},
+ {Equal, 50, 60},
+ {Equal, 60, 50},
+ {NonEqual_Fail, 50, 60},
+ {NonEqual_Fail, 60, 50},
+ {Undo, 0, 0},
+
+ {Undo, 0, 0},
+ })
+}
+
+func TestPosetCollapse(t *testing.T) {
+ testPosetOps(t, false, []posetTestOp{
+ {Checkpoint, 0, 0},
+ // Create a complex graph of <= relations among nodes between 10 and 25.
+ {SetOrderOrEqual, 10, 15},
+ {SetOrderOrEqual, 15, 20},
+ {SetOrderOrEqual, 20, vconst(20)},
+ {SetOrderOrEqual, vconst(20), 25},
+ {SetOrderOrEqual, 10, 12},
+ {SetOrderOrEqual, 12, 16},
+ {SetOrderOrEqual, 16, vconst(20)},
+ {SetOrderOrEqual, 10, 17},
+ {SetOrderOrEqual, 17, 25},
+ {SetOrderOrEqual, 15, 18},
+ {SetOrderOrEqual, 18, vconst(20)},
+ {SetOrderOrEqual, 15, 19},
+ {SetOrderOrEqual, 19, 25},
+
+ // These are other paths not part of the main collapsing path
+ {SetOrderOrEqual, 10, 11},
+ {SetOrderOrEqual, 11, 26},
+ {SetOrderOrEqual, 13, 25},
+ {SetOrderOrEqual, 100, 25},
+ {SetOrderOrEqual, 101, 15},
+ {SetOrderOrEqual, 102, 10},
+ {SetOrderOrEqual, 25, 103},
+ {SetOrderOrEqual, 20, 104},
+
+ {Checkpoint, 0, 0},
+ // Collapse everything by setting 10 >= 25: this should make everything equal
+ {SetOrderOrEqual, 25, 10},
+
+ // Check that all nodes are pairwise equal now
+ {Equal, 10, 12},
+ {Equal, 10, 15},
+ {Equal, 10, 16},
+ {Equal, 10, 17},
+ {Equal, 10, 18},
+ {Equal, 10, 19},
+ {Equal, 10, vconst(20)},
+ {Equal, 10, vconst2(20)},
+ {Equal, 10, 25},
+
+ {Equal, 12, 15},
+ {Equal, 12, 16},
+ {Equal, 12, 17},
+ {Equal, 12, 18},
+ {Equal, 12, 19},
+ {Equal, 12, vconst(20)},
+ {Equal, 12, vconst2(20)},
+ {Equal, 12, 25},
+
+ {Equal, 15, 16},
+ {Equal, 15, 17},
+ {Equal, 15, 18},
+ {Equal, 15, 19},
+ {Equal, 15, vconst(20)},
+ {Equal, 15, vconst2(20)},
+ {Equal, 15, 25},
+
+ {Equal, 16, 17},
+ {Equal, 16, 18},
+ {Equal, 16, 19},
+ {Equal, 16, vconst(20)},
+ {Equal, 16, vconst2(20)},
+ {Equal, 16, 25},
+
+ {Equal, 17, 18},
+ {Equal, 17, 19},
+ {Equal, 17, vconst(20)},
+ {Equal, 17, vconst2(20)},
+ {Equal, 17, 25},
+
+ {Equal, 18, 19},
+ {Equal, 18, vconst(20)},
+ {Equal, 18, vconst2(20)},
+ {Equal, 18, 25},
+
+ {Equal, 19, vconst(20)},
+ {Equal, 19, vconst2(20)},
+ {Equal, 19, 25},
+
+ {Equal, vconst(20), vconst2(20)},
+ {Equal, vconst(20), 25},
+
+ {Equal, vconst2(20), 25},
+
+ // ... but not 11/26/100/101/102, which were on a different path
+ {Equal_Fail, 10, 11},
+ {Equal_Fail, 10, 26},
+ {Equal_Fail, 10, 100},
+ {Equal_Fail, 10, 101},
+ {Equal_Fail, 10, 102},
+ {OrderedOrEqual, 10, 26},
+ {OrderedOrEqual, 25, 26},
+ {OrderedOrEqual, 13, 25},
+ {OrderedOrEqual, 13, 10},
+
+ {Undo, 0, 0},
+ {OrderedOrEqual, 10, 25},
+ {Equal_Fail, 10, 12},
+ {Equal_Fail, 10, 15},
+ {Equal_Fail, 10, 25},
+
+ {Undo, 0, 0},
+ })
+
+ testPosetOps(t, false, []posetTestOp{
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 10, 15},
+ {SetOrderOrEqual, 15, 20},
+ {SetOrderOrEqual, 20, 25},
+ {SetOrder, 10, 16},
+ {SetOrderOrEqual, 16, 20},
+ // Check that we cannot collapse here because of the strict relation 10<16
+ {SetOrderOrEqual_Fail, 20, 10},
+ {Undo, 0, 0},
+ })
+}
+
+func TestPosetSetEqual(t *testing.T) {
+ testPosetOps(t, false, []posetTestOp{
+ // 10<=20<=30<40, 20<=100<110
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 10, 20},
+ {SetOrderOrEqual, 20, 30},
+ {SetOrder, 30, 40},
+ {SetOrderOrEqual, 20, 100},
+ {SetOrder, 100, 110},
+ {OrderedOrEqual, 10, 30},
+ {OrderedOrEqual_Fail, 30, 10},
+ {Ordered_Fail, 10, 30},
+ {Ordered_Fail, 30, 10},
+ {Ordered, 10, 40},
+ {Ordered_Fail, 40, 10},
+
+ // Try learning 10==20.
+ {Checkpoint, 0, 0},
+ {SetEqual, 10, 20},
+ {OrderedOrEqual, 10, 20},
+ {Ordered_Fail, 10, 20},
+ {Equal, 10, 20},
+ {SetOrderOrEqual, 10, 20},
+ {SetOrderOrEqual, 20, 10},
+ {SetOrder_Fail, 10, 20},
+ {SetOrder_Fail, 20, 10},
+ {Undo, 0, 0},
+
+ // Try learning 20==10.
+ {Checkpoint, 0, 0},
+ {SetEqual, 20, 10},
+ {OrderedOrEqual, 10, 20},
+ {Ordered_Fail, 10, 20},
+ {Equal, 10, 20},
+ {Undo, 0, 0},
+
+ // Try learning 10==40 or 30==40 or 10==110.
+ {Checkpoint, 0, 0},
+ {SetEqual_Fail, 10, 40},
+ {SetEqual_Fail, 40, 10},
+ {SetEqual_Fail, 30, 40},
+ {SetEqual_Fail, 40, 30},
+ {SetEqual_Fail, 10, 110},
+ {SetEqual_Fail, 110, 10},
+ {Undo, 0, 0},
+
+ // Try learning 40==110, and then 10==40 or 10=110
+ {Checkpoint, 0, 0},
+ {SetEqual, 40, 110},
+ {SetEqual_Fail, 10, 40},
+ {SetEqual_Fail, 40, 10},
+ {SetEqual_Fail, 10, 110},
+ {SetEqual_Fail, 110, 10},
+ {Undo, 0, 0},
+
+ // Try learning 40<20 or 30<20 or 110<10
+ {Checkpoint, 0, 0},
+ {SetOrder_Fail, 40, 20},
+ {SetOrder_Fail, 30, 20},
+ {SetOrder_Fail, 110, 10},
+ {Undo, 0, 0},
+
+ // Try learning 30<=20
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 30, 20},
+ {Equal, 30, 20},
+ {OrderedOrEqual, 30, 100},
+ {Ordered, 30, 110},
+ {Undo, 0, 0},
+
+ {Undo, 0, 0},
+ })
+}
+
+func TestPosetConst(t *testing.T) {
+ testPosetOps(t, false, []posetTestOp{
+ {Checkpoint, 0, 0},
+ {SetOrder, 1, vconst(15)},
+ {SetOrderOrEqual, 100, vconst(120)},
+ {Ordered, 1, vconst(15)},
+ {Ordered, 1, vconst(120)},
+ {OrderedOrEqual, 1, vconst(120)},
+ {OrderedOrEqual, 100, vconst(120)},
+ {Ordered_Fail, 100, vconst(15)},
+ {Ordered_Fail, vconst(15), 100},
+
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 1, 5},
+ {SetOrderOrEqual, 5, 25},
+ {SetEqual, 20, vconst(20)},
+ {SetEqual, 25, vconst(25)},
+ {Ordered, 1, 20},
+ {Ordered, 1, vconst(30)},
+ {Undo, 0, 0},
+
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 1, 5},
+ {SetOrderOrEqual, 5, 25},
+ {SetEqual, vconst(-20), 5},
+ {SetEqual, vconst(-25), 1},
+ {Ordered, 1, 5},
+ {Ordered, vconst(-30), 1},
+ {Undo, 0, 0},
+
+ {Checkpoint, 0, 0},
+ {SetNonEqual, 1, vconst(4)},
+ {SetNonEqual, 1, vconst(6)},
+ {NonEqual, 1, vconst(4)},
+ {NonEqual_Fail, 1, vconst(5)},
+ {NonEqual, 1, vconst(6)},
+ {Equal_Fail, 1, vconst(4)},
+ {Equal_Fail, 1, vconst(5)},
+ {Equal_Fail, 1, vconst(6)},
+ {Equal_Fail, 1, vconst(7)},
+ {Undo, 0, 0},
+
+ {Undo, 0, 0},
+ })
+
+ testPosetOps(t, true, []posetTestOp{
+ {Checkpoint, 0, 0},
+ {SetOrder, 1, vconst(15)},
+ {SetOrderOrEqual, 100, vconst(-5)}, // -5 is a very big number in unsigned
+ {Ordered, 1, vconst(15)},
+ {Ordered, 1, vconst(-5)},
+ {OrderedOrEqual, 1, vconst(-5)},
+ {OrderedOrEqual, 100, vconst(-5)},
+ {Ordered_Fail, 100, vconst(15)},
+ {Ordered_Fail, vconst(15), 100},
+
+ {Undo, 0, 0},
+ })
+
+ testPosetOps(t, false, []posetTestOp{
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 1, vconst(3)},
+ {SetNonEqual, 1, vconst(0)},
+ {Ordered_Fail, 1, vconst(0)},
+ {Undo, 0, 0},
+ })
+
+ testPosetOps(t, false, []posetTestOp{
+ // Check relations of a constant with itself
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, vconst(3), vconst2(3)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetEqual, vconst(3), vconst2(3)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetNonEqual_Fail, vconst(3), vconst2(3)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetOrder_Fail, vconst(3), vconst2(3)},
+ {Undo, 0, 0},
+
+ // Check relations of two constants among them, using
+ // different instances of the same constant
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, vconst(3), vconst(4)},
+ {OrderedOrEqual, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetOrder, vconst(3), vconst(4)},
+ {Ordered, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetEqual_Fail, vconst(3), vconst(4)},
+ {SetEqual_Fail, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {NonEqual, vconst(3), vconst(4)},
+ {NonEqual, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {Equal_Fail, vconst(3), vconst(4)},
+ {Equal_Fail, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetNonEqual, vconst(3), vconst(4)},
+ {SetNonEqual, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ })
+}
+
+func TestPosetNonEqual(t *testing.T) {
+ testPosetOps(t, false, []posetTestOp{
+ {Equal_Fail, 10, 20},
+ {NonEqual_Fail, 10, 20},
+
+ // Learn 10!=20
+ {Checkpoint, 0, 0},
+ {SetNonEqual, 10, 20},
+ {Equal_Fail, 10, 20},
+ {NonEqual, 10, 20},
+ {SetEqual_Fail, 10, 20},
+
+ // Learn again 10!=20
+ {Checkpoint, 0, 0},
+ {SetNonEqual, 10, 20},
+ {Equal_Fail, 10, 20},
+ {NonEqual, 10, 20},
+
+ // Undo. We still know 10!=20
+ {Undo, 0, 0},
+ {Equal_Fail, 10, 20},
+ {NonEqual, 10, 20},
+ {SetEqual_Fail, 10, 20},
+
+ // Undo again. Now we know nothing
+ {Undo, 0, 0},
+ {Equal_Fail, 10, 20},
+ {NonEqual_Fail, 10, 20},
+
+ // Learn 10==20
+ {Checkpoint, 0, 0},
+ {SetEqual, 10, 20},
+ {Equal, 10, 20},
+ {NonEqual_Fail, 10, 20},
+ {SetNonEqual_Fail, 10, 20},
+
+ // Learn again 10==20
+ {Checkpoint, 0, 0},
+ {SetEqual, 10, 20},
+ {Equal, 10, 20},
+ {NonEqual_Fail, 10, 20},
+ {SetNonEqual_Fail, 10, 20},
+
+ // Undo. We still know 10==20
+ {Undo, 0, 0},
+ {Equal, 10, 20},
+ {NonEqual_Fail, 10, 20},
+ {SetNonEqual_Fail, 10, 20},
+
+ // Undo. We know nothing
+ {Undo, 0, 0},
+ {Equal_Fail, 10, 20},
+ {NonEqual_Fail, 10, 20},
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/print.go b/src/cmd/compile/internal/ssa/print.go
new file mode 100644
index 0000000..0d3b5d9
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/print.go
@@ -0,0 +1,192 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "cmd/internal/notsha256"
+ "cmd/internal/src"
+)
+
+func printFunc(f *Func) {
+ f.Logf("%s", f)
+}
+
+func hashFunc(f *Func) []byte {
+ h := notsha256.New()
+ p := stringFuncPrinter{w: h, printDead: true}
+ fprintFunc(p, f)
+ return h.Sum(nil)
+}
+
+func (f *Func) String() string {
+ var buf strings.Builder
+ p := stringFuncPrinter{w: &buf, printDead: true}
+ fprintFunc(p, f)
+ return buf.String()
+}
+
+// rewriteHash returns a hash of f suitable for detecting rewrite cycles.
+func (f *Func) rewriteHash() string {
+ h := notsha256.New()
+ p := stringFuncPrinter{w: h, printDead: false}
+ fprintFunc(p, f)
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+type funcPrinter interface {
+ header(f *Func)
+ startBlock(b *Block, reachable bool)
+ endBlock(b *Block, reachable bool)
+ value(v *Value, live bool)
+ startDepCycle()
+ endDepCycle()
+ named(n LocalSlot, vals []*Value)
+}
+
+type stringFuncPrinter struct {
+ w io.Writer
+ printDead bool
+}
+
+func (p stringFuncPrinter) header(f *Func) {
+ fmt.Fprint(p.w, f.Name)
+ fmt.Fprint(p.w, " ")
+ fmt.Fprintln(p.w, f.Type)
+}
+
+func (p stringFuncPrinter) startBlock(b *Block, reachable bool) {
+ if !p.printDead && !reachable {
+ return
+ }
+ fmt.Fprintf(p.w, " b%d:", b.ID)
+ if len(b.Preds) > 0 {
+ io.WriteString(p.w, " <-")
+ for _, e := range b.Preds {
+ pred := e.b
+ fmt.Fprintf(p.w, " b%d", pred.ID)
+ }
+ }
+ if !reachable {
+ fmt.Fprint(p.w, " DEAD")
+ }
+ io.WriteString(p.w, "\n")
+}
+
+func (p stringFuncPrinter) endBlock(b *Block, reachable bool) {
+ if !p.printDead && !reachable {
+ return
+ }
+ fmt.Fprintln(p.w, " "+b.LongString())
+}
+
+func StmtString(p src.XPos) string {
+ linenumber := "(?) "
+ if p.IsKnown() {
+ pfx := ""
+ if p.IsStmt() == src.PosIsStmt {
+ pfx = "+"
+ }
+ if p.IsStmt() == src.PosNotStmt {
+ pfx = "-"
+ }
+ linenumber = fmt.Sprintf("(%s%d) ", pfx, p.Line())
+ }
+ return linenumber
+}
+
+func (p stringFuncPrinter) value(v *Value, live bool) {
+ if !p.printDead && !live {
+ return
+ }
+ fmt.Fprintf(p.w, " %s", StmtString(v.Pos))
+ fmt.Fprint(p.w, v.LongString())
+ if !live {
+ fmt.Fprint(p.w, " DEAD")
+ }
+ fmt.Fprintln(p.w)
+}
+
+func (p stringFuncPrinter) startDepCycle() {
+ fmt.Fprintln(p.w, "dependency cycle!")
+}
+
+func (p stringFuncPrinter) endDepCycle() {}
+
+func (p stringFuncPrinter) named(n LocalSlot, vals []*Value) {
+ fmt.Fprintf(p.w, "name %s: %v\n", n, vals)
+}
+
+func fprintFunc(p funcPrinter, f *Func) {
+ reachable, live := findlive(f)
+ defer f.Cache.freeBoolSlice(live)
+ p.header(f)
+ printed := make([]bool, f.NumValues())
+ for _, b := range f.Blocks {
+ p.startBlock(b, reachable[b.ID])
+
+ if f.scheduled {
+ // Order of Values has been decided - print in that order.
+ for _, v := range b.Values {
+ p.value(v, live[v.ID])
+ printed[v.ID] = true
+ }
+ p.endBlock(b, reachable[b.ID])
+ continue
+ }
+
+ // print phis first since all value cycles contain a phi
+ n := 0
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ p.value(v, live[v.ID])
+ printed[v.ID] = true
+ n++
+ }
+
+ // print rest of values in dependency order
+ for n < len(b.Values) {
+ m := n
+ outer:
+ for _, v := range b.Values {
+ if printed[v.ID] {
+ continue
+ }
+ for _, w := range v.Args {
+ // w == nil shouldn't happen, but if it does,
+ // don't panic; we'll get a better diagnosis later.
+ if w != nil && w.Block == b && !printed[w.ID] {
+ continue outer
+ }
+ }
+ p.value(v, live[v.ID])
+ printed[v.ID] = true
+ n++
+ }
+ if m == n {
+ p.startDepCycle()
+ for _, v := range b.Values {
+ if printed[v.ID] {
+ continue
+ }
+ p.value(v, live[v.ID])
+ printed[v.ID] = true
+ n++
+ }
+ p.endDepCycle()
+ }
+ }
+
+ p.endBlock(b, reachable[b.ID])
+ }
+ for _, name := range f.Names {
+ p.named(*name, f.NamedValues[*name])
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go
new file mode 100644
index 0000000..842719f
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/prove.go
@@ -0,0 +1,1813 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+ "fmt"
+ "math"
+)
+
+type branch int
+
+const (
+ unknown branch = iota
+ positive
+ negative
+ // The outedges from a jump table are jumpTable0,
+ // jumpTable0+1, jumpTable0+2, etc. There could be an
+ // arbitrary number so we can't list them all here.
+ jumpTable0
+)
+
+// relation represents the set of possible relations between
+// pairs of variables (v, w). Without a priori knowledge the
+// mask is lt | eq | gt meaning v can be less than, equal to or
+// greater than w. When the execution path branches on the condition
+// `v op w` the set of relations is updated to exclude any
+// relation not possible due to `v op w` being true (or false).
+//
+// E.g.
+//
+// r := relation(...)
+//
+// if v < w {
+// newR := r & lt
+// }
+// if v >= w {
+// newR := r & (eq|gt)
+// }
+// if v != w {
+// newR := r & (lt|gt)
+// }
+type relation uint
+
+const (
+ lt relation = 1 << iota
+ eq
+ gt
+)
+
+var relationStrings = [...]string{
+ 0: "none", lt: "<", eq: "==", lt | eq: "<=",
+ gt: ">", gt | lt: "!=", gt | eq: ">=", gt | eq | lt: "any",
+}
+
+func (r relation) String() string {
+ if r < relation(len(relationStrings)) {
+ return relationStrings[r]
+ }
+ return fmt.Sprintf("relation(%d)", uint(r))
+}
+
+// domain represents the domain of a variable pair in which a set
+// of relations is known. For example, relations learned for unsigned
+// pairs cannot be transferred to signed pairs because the same bit
+// representation can mean something else.
+type domain uint
+
+const (
+ signed domain = 1 << iota
+ unsigned
+ pointer
+ boolean
+)
+
+var domainStrings = [...]string{
+ "signed", "unsigned", "pointer", "boolean",
+}
+
+func (d domain) String() string {
+ s := ""
+ for i, ds := range domainStrings {
+ if d&(1<<uint(i)) != 0 {
+ if len(s) != 0 {
+ s += "|"
+ }
+ s += ds
+ d &^= 1 << uint(i)
+ }
+ }
+ if d != 0 {
+ if len(s) != 0 {
+ s += "|"
+ }
+ s += fmt.Sprintf("0x%x", uint(d))
+ }
+ return s
+}
+
+type pair struct {
+ // a pair of values, ordered by ID.
+ // v can be nil, to mean the zero value.
+ // for booleans the zero value (v == nil) is false.
+ v, w *Value
+ d domain
+}
+
+// fact is a pair plus a relation for that pair.
+type fact struct {
+ p pair
+ r relation
+}
+
+// a limit records known upper and lower bounds for a value.
+type limit struct {
+ min, max int64 // min <= value <= max, signed
+ umin, umax uint64 // umin <= value <= umax, unsigned
+}
+
+func (l limit) String() string {
+ return fmt.Sprintf("sm,SM,um,UM=%d,%d,%d,%d", l.min, l.max, l.umin, l.umax)
+}
+
+func (l limit) intersect(l2 limit) limit {
+ if l.min < l2.min {
+ l.min = l2.min
+ }
+ if l.umin < l2.umin {
+ l.umin = l2.umin
+ }
+ if l.max > l2.max {
+ l.max = l2.max
+ }
+ if l.umax > l2.umax {
+ l.umax = l2.umax
+ }
+ return l
+}
+
+var noLimit = limit{math.MinInt64, math.MaxInt64, 0, math.MaxUint64}
+
+// a limitFact is a limit known for a particular value.
+type limitFact struct {
+ vid ID
+ limit limit
+}
+
+// factsTable keeps track of relations between pairs of values.
+//
+// The fact table logic is sound, but incomplete. Outside of a few
+// special cases, it performs no deduction or arithmetic. While there
+// are known decision procedures for this, the ad hoc approach taken
+// by the facts table is effective for real code while remaining very
+// efficient.
+type factsTable struct {
+ // unsat is true if facts contains a contradiction.
+ //
+ // Note that the factsTable logic is incomplete, so if unsat
+ // is false, the assertions in factsTable could be satisfiable
+ // *or* unsatisfiable.
+ unsat bool // true if facts contains a contradiction
+ unsatDepth int // number of unsat checkpoints
+
+ facts map[pair]relation // current known set of relation
+ stack []fact // previous sets of relations
+
+ // order* is a couple of partial order sets that record information
+ // about relations between SSA values in the signed and unsigned
+ // domain.
+ orderS *poset
+ orderU *poset
+
+ // known lower and upper bounds on individual values.
+ limits map[ID]limit
+ limitStack []limitFact // previous entries
+
+ // For each slice s, a map from s to a len(s)/cap(s) value (if any)
+ // TODO: check if there are cases that matter where we have
+ // more than one len(s) for a slice. We could keep a list if necessary.
+ lens map[ID]*Value
+ caps map[ID]*Value
+
+ // zero is a zero-valued constant
+ zero *Value
+}
+
+// checkpointFact is an invalid value used for checkpointing
+// and restoring factsTable.
+var checkpointFact = fact{}
+var checkpointBound = limitFact{}
+
+func newFactsTable(f *Func) *factsTable {
+ ft := &factsTable{}
+ ft.orderS = f.newPoset()
+ ft.orderU = f.newPoset()
+ ft.orderS.SetUnsigned(false)
+ ft.orderU.SetUnsigned(true)
+ ft.facts = make(map[pair]relation)
+ ft.stack = make([]fact, 4)
+ ft.limits = make(map[ID]limit)
+ ft.limitStack = make([]limitFact, 4)
+ ft.zero = f.ConstInt64(f.Config.Types.Int64, 0)
+ return ft
+}
+
+// update updates the set of relations between v and w in domain d
+// restricting it to r.
+func (ft *factsTable) update(parent *Block, v, w *Value, d domain, r relation) {
+ if parent.Func.pass.debug > 2 {
+ parent.Func.Warnl(parent.Pos, "parent=%s, update %s %s %s", parent, v, w, r)
+ }
+ // No need to do anything else if we already found unsat.
+ if ft.unsat {
+ return
+ }
+
+ // Self-fact. It's wasteful to register it into the facts
+ // table, so just note whether it's satisfiable
+ if v == w {
+ if r&eq == 0 {
+ ft.unsat = true
+ }
+ return
+ }
+
+ if d == signed || d == unsigned {
+ var ok bool
+ order := ft.orderS
+ if d == unsigned {
+ order = ft.orderU
+ }
+ switch r {
+ case lt:
+ ok = order.SetOrder(v, w)
+ case gt:
+ ok = order.SetOrder(w, v)
+ case lt | eq:
+ ok = order.SetOrderOrEqual(v, w)
+ case gt | eq:
+ ok = order.SetOrderOrEqual(w, v)
+ case eq:
+ ok = order.SetEqual(v, w)
+ case lt | gt:
+ ok = order.SetNonEqual(v, w)
+ default:
+ panic("unknown relation")
+ }
+ if !ok {
+ if parent.Func.pass.debug > 2 {
+ parent.Func.Warnl(parent.Pos, "unsat %s %s %s", v, w, r)
+ }
+ ft.unsat = true
+ return
+ }
+ } else {
+ if lessByID(w, v) {
+ v, w = w, v
+ r = reverseBits[r]
+ }
+
+ p := pair{v, w, d}
+ oldR, ok := ft.facts[p]
+ if !ok {
+ if v == w {
+ oldR = eq
+ } else {
+ oldR = lt | eq | gt
+ }
+ }
+ // No changes compared to information already in facts table.
+ if oldR == r {
+ return
+ }
+ ft.stack = append(ft.stack, fact{p, oldR})
+ ft.facts[p] = oldR & r
+ // If this relation is not satisfiable, mark it and exit right away
+ if oldR&r == 0 {
+ if parent.Func.pass.debug > 2 {
+ parent.Func.Warnl(parent.Pos, "unsat %s %s %s", v, w, r)
+ }
+ ft.unsat = true
+ return
+ }
+ }
+
+ // Extract bounds when comparing against constants
+ if v.isGenericIntConst() {
+ v, w = w, v
+ r = reverseBits[r]
+ }
+ if v != nil && w.isGenericIntConst() {
+ // Note: all the +1/-1 below could overflow/underflow. Either will
+ // still generate correct results, it will just lead to imprecision.
+ // In fact if there is overflow/underflow, the corresponding
+ // code is unreachable because the known range is outside the range
+ // of the value's type.
+ old, ok := ft.limits[v.ID]
+ if !ok {
+ old = noLimit
+ if v.isGenericIntConst() {
+ switch d {
+ case signed:
+ old.min, old.max = v.AuxInt, v.AuxInt
+ if v.AuxInt >= 0 {
+ old.umin, old.umax = uint64(v.AuxInt), uint64(v.AuxInt)
+ }
+ case unsigned:
+ old.umin = v.AuxUnsigned()
+ old.umax = old.umin
+ if int64(old.umin) >= 0 {
+ old.min, old.max = int64(old.umin), int64(old.umin)
+ }
+ }
+ }
+ }
+ lim := noLimit
+ switch d {
+ case signed:
+ c := w.AuxInt
+ switch r {
+ case lt:
+ lim.max = c - 1
+ case lt | eq:
+ lim.max = c
+ case gt | eq:
+ lim.min = c
+ case gt:
+ lim.min = c + 1
+ case lt | gt:
+ lim = old
+ if c == lim.min {
+ lim.min++
+ }
+ if c == lim.max {
+ lim.max--
+ }
+ case eq:
+ lim.min = c
+ lim.max = c
+ }
+ if lim.min >= 0 {
+ // int(x) >= 0 && int(x) >= N ⇒ uint(x) >= N
+ lim.umin = uint64(lim.min)
+ }
+ if lim.max != noLimit.max && old.min >= 0 && lim.max >= 0 {
+ // 0 <= int(x) <= N ⇒ 0 <= uint(x) <= N
+ // This is for a max update, so the lower bound
+ // comes from what we already know (old).
+ lim.umax = uint64(lim.max)
+ }
+ case unsigned:
+ uc := w.AuxUnsigned()
+ switch r {
+ case lt:
+ lim.umax = uc - 1
+ case lt | eq:
+ lim.umax = uc
+ case gt | eq:
+ lim.umin = uc
+ case gt:
+ lim.umin = uc + 1
+ case lt | gt:
+ lim = old
+ if uc == lim.umin {
+ lim.umin++
+ }
+ if uc == lim.umax {
+ lim.umax--
+ }
+ case eq:
+ lim.umin = uc
+ lim.umax = uc
+ }
+ // We could use the contrapositives of the
+ // signed implications to derive signed facts,
+ // but it turns out not to matter.
+ }
+ ft.limitStack = append(ft.limitStack, limitFact{v.ID, old})
+ lim = old.intersect(lim)
+ ft.limits[v.ID] = lim
+ if v.Block.Func.pass.debug > 2 {
+ v.Block.Func.Warnl(parent.Pos, "parent=%s, new limits %s %s %s %s", parent, v, w, r, lim.String())
+ }
+ if lim.min > lim.max || lim.umin > lim.umax {
+ ft.unsat = true
+ return
+ }
+ }
+
+ // Derived facts below here are only about numbers.
+ if d != signed && d != unsigned {
+ return
+ }
+
+ // Additional facts we know given the relationship between len and cap.
+ //
+ // TODO: Since prove now derives transitive relations, it
+ // should be sufficient to learn that len(w) <= cap(w) at the
+ // beginning of prove where we look for all len/cap ops.
+ if v.Op == OpSliceLen && r&lt == 0 && ft.caps[v.Args[0].ID] != nil {
+ // len(s) > w implies cap(s) > w
+ // len(s) >= w implies cap(s) >= w
+ // len(s) == w implies cap(s) >= w
+ ft.update(parent, ft.caps[v.Args[0].ID], w, d, r|gt)
+ }
+ if w.Op == OpSliceLen && r&gt == 0 && ft.caps[w.Args[0].ID] != nil {
+ // same, length on the RHS.
+ ft.update(parent, v, ft.caps[w.Args[0].ID], d, r|lt)
+ }
+ if v.Op == OpSliceCap && r&gt == 0 && ft.lens[v.Args[0].ID] != nil {
+ // cap(s) < w implies len(s) < w
+ // cap(s) <= w implies len(s) <= w
+ // cap(s) == w implies len(s) <= w
+ ft.update(parent, ft.lens[v.Args[0].ID], w, d, r|lt)
+ }
+ if w.Op == OpSliceCap && r&lt == 0 && ft.lens[w.Args[0].ID] != nil {
+ // same, capacity on the RHS.
+ ft.update(parent, v, ft.lens[w.Args[0].ID], d, r|gt)
+ }
+
+ // Process fence-post implications.
+ //
+ // First, make the condition > or >=.
+ if r == lt || r == lt|eq {
+ v, w = w, v
+ r = reverseBits[r]
+ }
+ switch r {
+ case gt:
+ if x, delta := isConstDelta(v); x != nil && delta == 1 {
+ // x+1 > w ⇒ x >= w
+ //
+ // This is useful for eliminating the
+ // growslice branch of append.
+ ft.update(parent, x, w, d, gt|eq)
+ } else if x, delta := isConstDelta(w); x != nil && delta == -1 {
+ // v > x-1 ⇒ v >= x
+ ft.update(parent, v, x, d, gt|eq)
+ }
+ case gt | eq:
+ if x, delta := isConstDelta(v); x != nil && delta == -1 {
+ // x-1 >= w && x > min ⇒ x > w
+ //
+ // Useful for i > 0; s[i-1].
+ lim, ok := ft.limits[x.ID]
+ if ok && ((d == signed && lim.min > opMin[v.Op]) || (d == unsigned && lim.umin > 0)) {
+ ft.update(parent, x, w, d, gt)
+ }
+ } else if x, delta := isConstDelta(w); x != nil && delta == 1 {
+ // v >= x+1 && x < max ⇒ v > x
+ lim, ok := ft.limits[x.ID]
+ if ok && ((d == signed && lim.max < opMax[w.Op]) || (d == unsigned && lim.umax < opUMax[w.Op])) {
+ ft.update(parent, v, x, d, gt)
+ }
+ }
+ }
+
+ // Process: x+delta > w (with delta constant)
+ // Only signed domain for now (useful for accesses to slices in loops).
+ if r == gt || r == gt|eq {
+ if x, delta := isConstDelta(v); x != nil && d == signed {
+ if parent.Func.pass.debug > 1 {
+ parent.Func.Warnl(parent.Pos, "x+d %s w; x:%v %v delta:%v w:%v d:%v", r, x, parent.String(), delta, w.AuxInt, d)
+ }
+ underflow := true
+ if l, has := ft.limits[x.ID]; has && delta < 0 {
+ if (x.Type.Size() == 8 && l.min >= math.MinInt64-delta) ||
+ (x.Type.Size() == 4 && l.min >= math.MinInt32-delta) {
+ underflow = false
+ }
+ }
+ if delta < 0 && !underflow {
+ // If delta < 0 and x+delta cannot underflow then x > x+delta (that is, x > v)
+ ft.update(parent, x, v, signed, gt)
+ }
+ if !w.isGenericIntConst() {
+ // If we know that x+delta > w but w is not constant, we can derive:
+ // if delta < 0 and x+delta cannot underflow, then x > w
+ // This is useful for loops with bounds "len(slice)-K" (delta = -K)
+ if delta < 0 && !underflow {
+ ft.update(parent, x, w, signed, r)
+ }
+ } else {
+ // With w,delta constants, we want to derive: x+delta > w ⇒ x > w-delta
+ //
+ // We compute (using integers of the correct size):
+ // min = w - delta
+ // max = MaxInt - delta
+ //
+ // And we prove that:
+ // if min<max: min < x AND x <= max
+ // if min>max: min < x OR x <= max
+ //
+ // This is always correct, even in case of overflow.
+ //
+ // If the initial fact is x+delta >= w instead, the derived conditions are:
+ // if min<max: min <= x AND x <= max
+ // if min>max: min <= x OR x <= max
+ //
+ // Notice the conditions for max are still <=, as they handle overflows.
+ var min, max int64
+ var vmin, vmax *Value
+ switch x.Type.Size() {
+ case 8:
+ min = w.AuxInt - delta
+ max = int64(^uint64(0)>>1) - delta
+
+ vmin = parent.NewValue0I(parent.Pos, OpConst64, parent.Func.Config.Types.Int64, min)
+ vmax = parent.NewValue0I(parent.Pos, OpConst64, parent.Func.Config.Types.Int64, max)
+
+ case 4:
+ min = int64(int32(w.AuxInt) - int32(delta))
+ max = int64(int32(^uint32(0)>>1) - int32(delta))
+
+ vmin = parent.NewValue0I(parent.Pos, OpConst32, parent.Func.Config.Types.Int32, min)
+ vmax = parent.NewValue0I(parent.Pos, OpConst32, parent.Func.Config.Types.Int32, max)
+
+ case 2:
+ min = int64(int16(w.AuxInt) - int16(delta))
+ max = int64(int16(^uint16(0)>>1) - int16(delta))
+
+ vmin = parent.NewValue0I(parent.Pos, OpConst16, parent.Func.Config.Types.Int16, min)
+ vmax = parent.NewValue0I(parent.Pos, OpConst16, parent.Func.Config.Types.Int16, max)
+
+ case 1:
+ min = int64(int8(w.AuxInt) - int8(delta))
+ max = int64(int8(^uint8(0)>>1) - int8(delta))
+
+ vmin = parent.NewValue0I(parent.Pos, OpConst8, parent.Func.Config.Types.Int8, min)
+ vmax = parent.NewValue0I(parent.Pos, OpConst8, parent.Func.Config.Types.Int8, max)
+
+ default:
+ panic("unimplemented")
+ }
+
+ if min < max {
+ // Record that x > min and max >= x
+ ft.update(parent, x, vmin, d, r)
+ ft.update(parent, vmax, x, d, r|eq)
+ } else {
+ // We know that either x>min OR x<=max. factsTable cannot record OR conditions,
+ // so let's see if we can already prove that one of them is false, in which case
+ // the other must be true
+ if l, has := ft.limits[x.ID]; has {
+ if l.max <= min {
+ if r&eq == 0 || l.max < min {
+ // x>min (x>=min) is impossible, so it must be x<=max
+ ft.update(parent, vmax, x, d, r|eq)
+ }
+ } else if l.min > max {
+ // x<=max is impossible, so it must be x>min
+ ft.update(parent, x, vmin, d, r)
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Look through value-preserving extensions.
+ // If the domain is appropriate for the pre-extension Type,
+ // repeat the update with the pre-extension Value.
+ if isCleanExt(v) {
+ switch {
+ case d == signed && v.Args[0].Type.IsSigned():
+ fallthrough
+ case d == unsigned && !v.Args[0].Type.IsSigned():
+ ft.update(parent, v.Args[0], w, d, r)
+ }
+ }
+ if isCleanExt(w) {
+ switch {
+ case d == signed && w.Args[0].Type.IsSigned():
+ fallthrough
+ case d == unsigned && !w.Args[0].Type.IsSigned():
+ ft.update(parent, v, w.Args[0], d, r)
+ }
+ }
+}
+
+var opMin = map[Op]int64{
+ OpAdd64: math.MinInt64, OpSub64: math.MinInt64,
+ OpAdd32: math.MinInt32, OpSub32: math.MinInt32,
+}
+
+var opMax = map[Op]int64{
+ OpAdd64: math.MaxInt64, OpSub64: math.MaxInt64,
+ OpAdd32: math.MaxInt32, OpSub32: math.MaxInt32,
+}
+
+var opUMax = map[Op]uint64{
+ OpAdd64: math.MaxUint64, OpSub64: math.MaxUint64,
+ OpAdd32: math.MaxUint32, OpSub32: math.MaxUint32,
+}
+
+// isNonNegative reports whether v is known to be non-negative.
+func (ft *factsTable) isNonNegative(v *Value) bool {
+ if isNonNegative(v) {
+ return true
+ }
+
+ var max int64
+ switch v.Type.Size() {
+ case 1:
+ max = math.MaxInt8
+ case 2:
+ max = math.MaxInt16
+ case 4:
+ max = math.MaxInt32
+ case 8:
+ max = math.MaxInt64
+ default:
+ panic("unexpected integer size")
+ }
+
+ // Check if the recorded limits can prove that the value is positive
+
+ if l, has := ft.limits[v.ID]; has && (l.min >= 0 || l.umax <= uint64(max)) {
+ return true
+ }
+
+ // Check if v = x+delta, and we can use x's limits to prove that it's positive
+ if x, delta := isConstDelta(v); x != nil {
+ if l, has := ft.limits[x.ID]; has {
+ if delta > 0 && l.min >= -delta && l.max <= max-delta {
+ return true
+ }
+ if delta < 0 && l.min >= -delta {
+ return true
+ }
+ }
+ }
+
+ // Check if v is a value-preserving extension of a non-negative value.
+ if isCleanExt(v) && ft.isNonNegative(v.Args[0]) {
+ return true
+ }
+
+ // Check if the signed poset can prove that the value is >= 0
+ return ft.orderS.OrderedOrEqual(ft.zero, v)
+}
+
+// checkpoint saves the current state of known relations.
+// Called when descending on a branch.
+func (ft *factsTable) checkpoint() {
+ if ft.unsat {
+ ft.unsatDepth++
+ }
+ ft.stack = append(ft.stack, checkpointFact)
+ ft.limitStack = append(ft.limitStack, checkpointBound)
+ ft.orderS.Checkpoint()
+ ft.orderU.Checkpoint()
+}
+
+// restore restores known relation to the state just
+// before the previous checkpoint.
+// Called when backing up on a branch.
+func (ft *factsTable) restore() {
+ if ft.unsatDepth > 0 {
+ ft.unsatDepth--
+ } else {
+ ft.unsat = false
+ }
+ for {
+ old := ft.stack[len(ft.stack)-1]
+ ft.stack = ft.stack[:len(ft.stack)-1]
+ if old == checkpointFact {
+ break
+ }
+ if old.r == lt|eq|gt {
+ delete(ft.facts, old.p)
+ } else {
+ ft.facts[old.p] = old.r
+ }
+ }
+ for {
+ old := ft.limitStack[len(ft.limitStack)-1]
+ ft.limitStack = ft.limitStack[:len(ft.limitStack)-1]
+ if old.vid == 0 { // checkpointBound
+ break
+ }
+ if old.limit == noLimit {
+ delete(ft.limits, old.vid)
+ } else {
+ ft.limits[old.vid] = old.limit
+ }
+ }
+ ft.orderS.Undo()
+ ft.orderU.Undo()
+}
+
+func lessByID(v, w *Value) bool {
+ if v == nil && w == nil {
+ // Should not happen, but just in case.
+ return false
+ }
+ if v == nil {
+ return true
+ }
+ return w != nil && v.ID < w.ID
+}
+
+var (
+ reverseBits = [...]relation{0, 4, 2, 6, 1, 5, 3, 7}
+
+ // maps what we learn when the positive branch is taken.
+ // For example:
+ // OpLess8: {signed, lt},
+ // v1 = (OpLess8 v2 v3).
+ // If v1 branch is taken then we learn that the rangeMask
+ // can be at most lt.
+ domainRelationTable = map[Op]struct {
+ d domain
+ r relation
+ }{
+ OpEq8: {signed | unsigned, eq},
+ OpEq16: {signed | unsigned, eq},
+ OpEq32: {signed | unsigned, eq},
+ OpEq64: {signed | unsigned, eq},
+ OpEqPtr: {pointer, eq},
+
+ OpNeq8: {signed | unsigned, lt | gt},
+ OpNeq16: {signed | unsigned, lt | gt},
+ OpNeq32: {signed | unsigned, lt | gt},
+ OpNeq64: {signed | unsigned, lt | gt},
+ OpNeqPtr: {pointer, lt | gt},
+
+ OpLess8: {signed, lt},
+ OpLess8U: {unsigned, lt},
+ OpLess16: {signed, lt},
+ OpLess16U: {unsigned, lt},
+ OpLess32: {signed, lt},
+ OpLess32U: {unsigned, lt},
+ OpLess64: {signed, lt},
+ OpLess64U: {unsigned, lt},
+
+ OpLeq8: {signed, lt | eq},
+ OpLeq8U: {unsigned, lt | eq},
+ OpLeq16: {signed, lt | eq},
+ OpLeq16U: {unsigned, lt | eq},
+ OpLeq32: {signed, lt | eq},
+ OpLeq32U: {unsigned, lt | eq},
+ OpLeq64: {signed, lt | eq},
+ OpLeq64U: {unsigned, lt | eq},
+
+ // For these ops, the negative branch is different: we can only
+ // prove signed/GE (signed/GT) if we can prove that arg0 is non-negative.
+ // See the special case in addBranchRestrictions.
+ OpIsInBounds: {signed | unsigned, lt}, // 0 <= arg0 < arg1
+ OpIsSliceInBounds: {signed | unsigned, lt | eq}, // 0 <= arg0 <= arg1
+ }
+)
+
+// cleanup returns the posets to the free list
+func (ft *factsTable) cleanup(f *Func) {
+ for _, po := range []*poset{ft.orderS, ft.orderU} {
+ // Make sure it's empty as it should be. A non-empty poset
+ // might cause errors and miscompilations if reused.
+ if checkEnabled {
+ if err := po.CheckEmpty(); err != nil {
+ f.Fatalf("poset not empty after function %s: %v", f.Name, err)
+ }
+ }
+ f.retPoset(po)
+ }
+}
+
+// prove removes redundant BlockIf branches that can be inferred
+// from previous dominating comparisons.
+//
+// By far, the most common redundant pair are generated by bounds checking.
+// For example for the code:
+//
+// a[i] = 4
+// foo(a[i])
+//
+// The compiler will generate the following code:
+//
+// if i >= len(a) {
+// panic("not in bounds")
+// }
+// a[i] = 4
+// if i >= len(a) {
+// panic("not in bounds")
+// }
+// foo(a[i])
+//
+// The second comparison i >= len(a) is clearly redundant because if the
+// else branch of the first comparison is executed, we already know that i < len(a).
+// The code for the second panic can be removed.
+//
+// prove works by finding contradictions and trimming branches whose
+// conditions are unsatisfiable given the branches leading up to them.
+// It tracks a "fact table" of branch conditions. For each branching
+// block, it asserts the branch conditions that uniquely dominate that
+// block, and then separately asserts the block's branch condition and
+// its negation. If either leads to a contradiction, it can trim that
+// successor.
+func prove(f *Func) {
+ // Find induction variables. Currently, findIndVars
+ // is limited to one induction variable per block.
+ var indVars map[*Block]indVar
+ for _, v := range findIndVar(f) {
+ ind := v.ind
+ if len(ind.Args) != 2 {
+ // the rewrite code assumes there is only ever two parents to loops
+ panic("unexpected induction with too many parents")
+ }
+
+ nxt := v.nxt
+ if !(ind.Uses == 2 && // 2 used by comparison and next
+ nxt.Uses == 1) { // 1 used by induction
+ // ind or nxt is used inside the loop, add it for the facts table
+ if indVars == nil {
+ indVars = make(map[*Block]indVar)
+ }
+ indVars[v.entry] = v
+ continue
+ } else {
+ // Since this induction variable is not used for anything but counting the iterations,
+ // no point in putting it into the facts table.
+ }
+
+ // try to rewrite to a downward counting loop checking against start if the
+ // loop body does not depends on ind or nxt and end is known before the loop.
+ // This reduce pressure on the register allocator because this do not need
+ // to use end on each iteration anymore. We compare against the start constant instead.
+ // That means this code:
+ //
+ // loop:
+ // ind = (Phi (Const [x]) nxt),
+ // if ind < end
+ // then goto enter_loop
+ // else goto exit_loop
+ //
+ // enter_loop:
+ // do something without using ind nor nxt
+ // nxt = inc + ind
+ // goto loop
+ //
+ // exit_loop:
+ //
+ // is rewritten to:
+ //
+ // loop:
+ // ind = (Phi end nxt)
+ // if (Const [x]) < ind
+ // then goto enter_loop
+ // else goto exit_loop
+ //
+ // enter_loop:
+ // do something without using ind nor nxt
+ // nxt = ind - inc
+ // goto loop
+ //
+ // exit_loop:
+ //
+ // this is better because it only require to keep ind then nxt alive while looping,
+ // while the original form keeps ind then nxt and end alive
+ start, end := v.min, v.max
+ if v.flags&indVarCountDown != 0 {
+ start, end = end, start
+ }
+
+ if !(start.Op == OpConst8 || start.Op == OpConst16 || start.Op == OpConst32 || start.Op == OpConst64) {
+ // if start is not a constant we would be winning nothing from inverting the loop
+ continue
+ }
+ if end.Op == OpConst8 || end.Op == OpConst16 || end.Op == OpConst32 || end.Op == OpConst64 {
+ // TODO: if both start and end are constants we should rewrite such that the comparison
+ // is against zero and nxt is ++ or -- operation
+ // That means:
+ // for i := 2; i < 11; i += 2 {
+ // should be rewritten to:
+ // for i := 5; 0 < i; i-- {
+ continue
+ }
+
+ header := ind.Block
+ check := header.Controls[0]
+ if check == nil {
+ // we don't know how to rewrite a loop that not simple comparison
+ continue
+ }
+ switch check.Op {
+ case OpLeq64, OpLeq32, OpLeq16, OpLeq8,
+ OpLess64, OpLess32, OpLess16, OpLess8:
+ default:
+ // we don't know how to rewrite a loop that not simple comparison
+ continue
+ }
+ if !((check.Args[0] == ind && check.Args[1] == end) ||
+ (check.Args[1] == ind && check.Args[0] == end)) {
+ // we don't know how to rewrite a loop that not simple comparison
+ continue
+ }
+ if end.Block == ind.Block {
+ // we can't rewrite loops where the condition depends on the loop body
+ // this simple check is forced to work because if this is true a Phi in ind.Block must exists
+ continue
+ }
+
+ // invert the check
+ check.Args[0], check.Args[1] = check.Args[1], check.Args[0]
+
+ // invert start and end in the loop
+ for i, v := range check.Args {
+ if v != end {
+ continue
+ }
+
+ check.SetArg(i, start)
+ goto replacedEnd
+ }
+ panic(fmt.Sprintf("unreachable, ind: %v, start: %v, end: %v", ind, start, end))
+ replacedEnd:
+
+ for i, v := range ind.Args {
+ if v != start {
+ continue
+ }
+
+ ind.SetArg(i, end)
+ goto replacedStart
+ }
+ panic(fmt.Sprintf("unreachable, ind: %v, start: %v, end: %v", ind, start, end))
+ replacedStart:
+
+ if nxt.Args[0] != ind {
+ // unlike additions subtractions are not commutative so be sure we get it right
+ nxt.Args[0], nxt.Args[1] = nxt.Args[1], nxt.Args[0]
+ }
+
+ switch nxt.Op {
+ case OpAdd8:
+ nxt.Op = OpSub8
+ case OpAdd16:
+ nxt.Op = OpSub16
+ case OpAdd32:
+ nxt.Op = OpSub32
+ case OpAdd64:
+ nxt.Op = OpSub64
+ case OpSub8:
+ nxt.Op = OpAdd8
+ case OpSub16:
+ nxt.Op = OpAdd16
+ case OpSub32:
+ nxt.Op = OpAdd32
+ case OpSub64:
+ nxt.Op = OpAdd64
+ default:
+ panic("unreachable")
+ }
+
+ if f.pass.debug > 0 {
+ f.Warnl(ind.Pos, "Inverted loop iteration")
+ }
+ }
+
+ ft := newFactsTable(f)
+ ft.checkpoint()
+
+ var lensVars map[*Block][]*Value
+ var logicVars map[*Block][]*Value
+
+ // Find length and capacity ops.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Uses == 0 {
+ // We don't care about dead values.
+ // (There can be some that are CSEd but not removed yet.)
+ continue
+ }
+ switch v.Op {
+ case OpStringLen:
+ ft.update(b, v, ft.zero, signed, gt|eq)
+ case OpSliceLen:
+ if ft.lens == nil {
+ ft.lens = map[ID]*Value{}
+ }
+ // Set all len Values for the same slice as equal in the poset.
+ // The poset handles transitive relations, so Values related to
+ // any OpSliceLen for this slice will be correctly related to others.
+ if l, ok := ft.lens[v.Args[0].ID]; ok {
+ ft.update(b, v, l, signed, eq)
+ } else {
+ ft.lens[v.Args[0].ID] = v
+ }
+ ft.update(b, v, ft.zero, signed, gt|eq)
+ if v.Args[0].Op == OpSliceMake {
+ if lensVars == nil {
+ lensVars = make(map[*Block][]*Value)
+ }
+ lensVars[b] = append(lensVars[b], v)
+ }
+ case OpSliceCap:
+ if ft.caps == nil {
+ ft.caps = map[ID]*Value{}
+ }
+ // Same as case OpSliceLen above, but for slice cap.
+ if c, ok := ft.caps[v.Args[0].ID]; ok {
+ ft.update(b, v, c, signed, eq)
+ } else {
+ ft.caps[v.Args[0].ID] = v
+ }
+ ft.update(b, v, ft.zero, signed, gt|eq)
+ if v.Args[0].Op == OpSliceMake {
+ if lensVars == nil {
+ lensVars = make(map[*Block][]*Value)
+ }
+ lensVars[b] = append(lensVars[b], v)
+ }
+ case OpCtz64, OpCtz32, OpCtz16, OpCtz8, OpBitLen64, OpBitLen32, OpBitLen16, OpBitLen8:
+ ft.update(b, v, ft.zero, signed, gt|eq)
+ // TODO: we could also do <= 64/32/16/8, if that helped.
+ case OpAnd64, OpAnd32, OpAnd16, OpAnd8:
+ ft.update(b, v, v.Args[1], unsigned, lt|eq)
+ ft.update(b, v, v.Args[0], unsigned, lt|eq)
+ for i := 0; i < 2; i++ {
+ if isNonNegative(v.Args[i]) {
+ ft.update(b, v, v.Args[i], signed, lt|eq)
+ ft.update(b, v, ft.zero, signed, gt|eq)
+ }
+ }
+ if logicVars == nil {
+ logicVars = make(map[*Block][]*Value)
+ }
+ logicVars[b] = append(logicVars[b], v)
+ case OpOr64, OpOr32, OpOr16, OpOr8:
+ // TODO: investigate how to always add facts without much slowdown, see issue #57959.
+ if v.Args[0].isGenericIntConst() {
+ ft.update(b, v, v.Args[0], unsigned, gt|eq)
+ }
+ if v.Args[1].isGenericIntConst() {
+ ft.update(b, v, v.Args[1], unsigned, gt|eq)
+ }
+ case OpDiv64u, OpDiv32u, OpDiv16u, OpDiv8u,
+ OpRsh8Ux64, OpRsh8Ux32, OpRsh8Ux16, OpRsh8Ux8,
+ OpRsh16Ux64, OpRsh16Ux32, OpRsh16Ux16, OpRsh16Ux8,
+ OpRsh32Ux64, OpRsh32Ux32, OpRsh32Ux16, OpRsh32Ux8,
+ OpRsh64Ux64, OpRsh64Ux32, OpRsh64Ux16, OpRsh64Ux8:
+ ft.update(b, v, v.Args[0], unsigned, lt|eq)
+ case OpMod64u, OpMod32u, OpMod16u, OpMod8u:
+ ft.update(b, v, v.Args[0], unsigned, lt|eq)
+ ft.update(b, v, v.Args[1], unsigned, lt)
+ case OpPhi:
+ // Determine the min and max value of OpPhi composed entirely of integer constants.
+ //
+ // For example, for an OpPhi:
+ //
+ // v1 = OpConst64 [13]
+ // v2 = OpConst64 [7]
+ // v3 = OpConst64 [42]
+ //
+ // v4 = OpPhi(v1, v2, v3)
+ //
+ // We can prove:
+ //
+ // v4 >= 7 && v4 <= 42
+ //
+ // TODO(jake-ciolek): Handle nested constant OpPhi's
+ sameConstOp := true
+ min := 0
+ max := 0
+
+ if !v.Args[min].isGenericIntConst() {
+ break
+ }
+
+ for k := range v.Args {
+ if v.Args[k].Op != v.Args[min].Op {
+ sameConstOp = false
+ break
+ }
+ if v.Args[k].AuxInt < v.Args[min].AuxInt {
+ min = k
+ }
+ if v.Args[k].AuxInt > v.Args[max].AuxInt {
+ max = k
+ }
+ }
+
+ if sameConstOp {
+ ft.update(b, v, v.Args[min], signed, gt|eq)
+ ft.update(b, v, v.Args[max], signed, lt|eq)
+ }
+ // One might be tempted to create a v >= ft.zero relation for
+ // all OpPhi's composed of only provably-positive values
+ // but that bloats up the facts table for a very negligible gain.
+ // In Go itself, very few functions get improved (< 5) at a cost of 5-7% total increase
+ // of compile time.
+ }
+ }
+ }
+
+ // current node state
+ type walkState int
+ const (
+ descend walkState = iota
+ simplify
+ )
+ // work maintains the DFS stack.
+ type bp struct {
+ block *Block // current handled block
+ state walkState // what's to do
+ }
+ work := make([]bp, 0, 256)
+ work = append(work, bp{
+ block: f.Entry,
+ state: descend,
+ })
+
+ idom := f.Idom()
+ sdom := f.Sdom()
+
+ // DFS on the dominator tree.
+ //
+ // For efficiency, we consider only the dominator tree rather
+ // than the entire flow graph. On the way down, we consider
+ // incoming branches and accumulate conditions that uniquely
+ // dominate the current block. If we discover a contradiction,
+ // we can eliminate the entire block and all of its children.
+ // On the way back up, we consider outgoing branches that
+ // haven't already been considered. This way we consider each
+ // branch condition only once.
+ for len(work) > 0 {
+ node := work[len(work)-1]
+ work = work[:len(work)-1]
+ parent := idom[node.block.ID]
+ branch := getBranch(sdom, parent, node.block)
+
+ switch node.state {
+ case descend:
+ ft.checkpoint()
+
+ // Entering the block, add the block-depending facts that we collected
+ // at the beginning: induction variables and lens/caps of slices.
+ if iv, ok := indVars[node.block]; ok {
+ addIndVarRestrictions(ft, parent, iv)
+ }
+ if lens, ok := lensVars[node.block]; ok {
+ for _, v := range lens {
+ switch v.Op {
+ case OpSliceLen:
+ ft.update(node.block, v, v.Args[0].Args[1], signed, eq)
+ case OpSliceCap:
+ ft.update(node.block, v, v.Args[0].Args[2], signed, eq)
+ }
+ }
+ }
+
+ if branch != unknown {
+ addBranchRestrictions(ft, parent, branch)
+ // After we add the branch restriction, re-check the logic operations in the parent block,
+ // it may give us more info to omit some branches
+ if logic, ok := logicVars[parent]; ok {
+ for _, v := range logic {
+ // we only have OpAnd for now
+ ft.update(parent, v, v.Args[1], unsigned, lt|eq)
+ ft.update(parent, v, v.Args[0], unsigned, lt|eq)
+ for i := 0; i < 2; i++ {
+ if isNonNegative(v.Args[i]) {
+ ft.update(parent, v, v.Args[i], signed, lt|eq)
+ ft.update(parent, v, ft.zero, signed, gt|eq)
+ }
+ }
+ }
+ }
+ if ft.unsat {
+ // node.block is unreachable.
+ // Remove it and don't visit
+ // its children.
+ removeBranch(parent, branch)
+ ft.restore()
+ break
+ }
+ // Otherwise, we can now commit to
+ // taking this branch. We'll restore
+ // ft when we unwind.
+ }
+
+ // Add inductive facts for phis in this block.
+ addLocalInductiveFacts(ft, node.block)
+
+ work = append(work, bp{
+ block: node.block,
+ state: simplify,
+ })
+ for s := sdom.Child(node.block); s != nil; s = sdom.Sibling(s) {
+ work = append(work, bp{
+ block: s,
+ state: descend,
+ })
+ }
+
+ case simplify:
+ simplifyBlock(sdom, ft, node.block)
+ ft.restore()
+ }
+ }
+
+ ft.restore()
+
+ ft.cleanup(f)
+}
+
+// getBranch returns the range restrictions added by p
+// when reaching b. p is the immediate dominator of b.
+func getBranch(sdom SparseTree, p *Block, b *Block) branch {
+ if p == nil {
+ return unknown
+ }
+ switch p.Kind {
+ case BlockIf:
+ // If p and p.Succs[0] are dominators it means that every path
+ // from entry to b passes through p and p.Succs[0]. We care that
+ // no path from entry to b passes through p.Succs[1]. If p.Succs[0]
+ // has one predecessor then (apart from the degenerate case),
+ // there is no path from entry that can reach b through p.Succs[1].
+ // TODO: how about p->yes->b->yes, i.e. a loop in yes.
+ if sdom.IsAncestorEq(p.Succs[0].b, b) && len(p.Succs[0].b.Preds) == 1 {
+ return positive
+ }
+ if sdom.IsAncestorEq(p.Succs[1].b, b) && len(p.Succs[1].b.Preds) == 1 {
+ return negative
+ }
+ case BlockJumpTable:
+ // TODO: this loop can lead to quadratic behavior, as
+ // getBranch can be called len(p.Succs) times.
+ for i, e := range p.Succs {
+ if sdom.IsAncestorEq(e.b, b) && len(e.b.Preds) == 1 {
+ return jumpTable0 + branch(i)
+ }
+ }
+ }
+ return unknown
+}
+
+// addIndVarRestrictions updates the factsTables ft with the facts
+// learned from the induction variable indVar which drives the loop
+// starting in Block b.
+func addIndVarRestrictions(ft *factsTable, b *Block, iv indVar) {
+ d := signed
+ if ft.isNonNegative(iv.min) && ft.isNonNegative(iv.max) {
+ d |= unsigned
+ }
+
+ if iv.flags&indVarMinExc == 0 {
+ addRestrictions(b, ft, d, iv.min, iv.ind, lt|eq)
+ } else {
+ addRestrictions(b, ft, d, iv.min, iv.ind, lt)
+ }
+
+ if iv.flags&indVarMaxInc == 0 {
+ addRestrictions(b, ft, d, iv.ind, iv.max, lt)
+ } else {
+ addRestrictions(b, ft, d, iv.ind, iv.max, lt|eq)
+ }
+}
+
+// addBranchRestrictions updates the factsTables ft with the facts learned when
+// branching from Block b in direction br.
+func addBranchRestrictions(ft *factsTable, b *Block, br branch) {
+ c := b.Controls[0]
+ switch {
+ case br == negative:
+ addRestrictions(b, ft, boolean, nil, c, eq)
+ case br == positive:
+ addRestrictions(b, ft, boolean, nil, c, lt|gt)
+ case br >= jumpTable0:
+ idx := br - jumpTable0
+ val := int64(idx)
+ if v, off := isConstDelta(c); v != nil {
+ // Establish the bound on the underlying value we're switching on,
+ // not on the offset-ed value used as the jump table index.
+ c = v
+ val -= off
+ }
+ old, ok := ft.limits[c.ID]
+ if !ok {
+ old = noLimit
+ }
+ ft.limitStack = append(ft.limitStack, limitFact{c.ID, old})
+ if val < old.min || val > old.max || uint64(val) < old.umin || uint64(val) > old.umax {
+ ft.unsat = true
+ if b.Func.pass.debug > 2 {
+ b.Func.Warnl(b.Pos, "block=%s outedge=%d %s=%d unsat", b, idx, c, val)
+ }
+ } else {
+ ft.limits[c.ID] = limit{val, val, uint64(val), uint64(val)}
+ if b.Func.pass.debug > 2 {
+ b.Func.Warnl(b.Pos, "block=%s outedge=%d %s=%d", b, idx, c, val)
+ }
+ }
+ default:
+ panic("unknown branch")
+ }
+ if tr, has := domainRelationTable[c.Op]; has {
+ // When we branched from parent we learned a new set of
+ // restrictions. Update the factsTable accordingly.
+ d := tr.d
+ if d == signed && ft.isNonNegative(c.Args[0]) && ft.isNonNegative(c.Args[1]) {
+ d |= unsigned
+ }
+ switch c.Op {
+ case OpIsInBounds, OpIsSliceInBounds:
+ // 0 <= a0 < a1 (or 0 <= a0 <= a1)
+ //
+ // On the positive branch, we learn:
+ // signed: 0 <= a0 < a1 (or 0 <= a0 <= a1)
+ // unsigned: a0 < a1 (or a0 <= a1)
+ //
+ // On the negative branch, we learn (0 > a0 ||
+ // a0 >= a1). In the unsigned domain, this is
+ // simply a0 >= a1 (which is the reverse of the
+ // positive branch, so nothing surprising).
+ // But in the signed domain, we can't express the ||
+ // condition, so check if a0 is non-negative instead,
+ // to be able to learn something.
+ switch br {
+ case negative:
+ d = unsigned
+ if ft.isNonNegative(c.Args[0]) {
+ d |= signed
+ }
+ addRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r^(lt|gt|eq))
+ case positive:
+ addRestrictions(b, ft, signed, ft.zero, c.Args[0], lt|eq)
+ addRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r)
+ }
+ default:
+ switch br {
+ case negative:
+ addRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r^(lt|gt|eq))
+ case positive:
+ addRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r)
+ }
+ }
+
+ }
+}
+
+// addRestrictions updates restrictions from the immediate
+// dominating block (p) using r.
+func addRestrictions(parent *Block, ft *factsTable, t domain, v, w *Value, r relation) {
+ if t == 0 {
+ // Trivial case: nothing to do.
+ // Should not happen, but just in case.
+ return
+ }
+ for i := domain(1); i <= t; i <<= 1 {
+ if t&i == 0 {
+ continue
+ }
+ ft.update(parent, v, w, i, r)
+ }
+}
+
+// addLocalInductiveFacts adds inductive facts when visiting b, where
+// b is a join point in a loop. In contrast with findIndVar, this
+// depends on facts established for b, which is why it happens when
+// visiting b.
+//
+// TODO: It would be nice to combine this with findIndVar.
+func addLocalInductiveFacts(ft *factsTable, b *Block) {
+ // This looks for a specific pattern of induction:
+ //
+ // 1. i1 = OpPhi(min, i2) in b
+ // 2. i2 = i1 + 1
+ // 3. i2 < max at exit from b.Preds[1]
+ // 4. min < max
+ //
+ // If all of these conditions are true, then i1 < max and i1 >= min.
+
+ // To ensure this is a loop header node.
+ if len(b.Preds) != 2 {
+ return
+ }
+
+ for _, i1 := range b.Values {
+ if i1.Op != OpPhi {
+ continue
+ }
+
+ // Check for conditions 1 and 2. This is easy to do
+ // and will throw out most phis.
+ min, i2 := i1.Args[0], i1.Args[1]
+ if i1q, delta := isConstDelta(i2); i1q != i1 || delta != 1 {
+ continue
+ }
+
+ // Try to prove condition 3. We can't just query the
+ // fact table for this because we don't know what the
+ // facts of b.Preds[1] are (in general, b.Preds[1] is
+ // a loop-back edge, so we haven't even been there
+ // yet). As a conservative approximation, we look for
+ // this condition in the predecessor chain until we
+ // hit a join point.
+ uniquePred := func(b *Block) *Block {
+ if len(b.Preds) == 1 {
+ return b.Preds[0].b
+ }
+ return nil
+ }
+ pred, child := b.Preds[1].b, b
+ for ; pred != nil; pred, child = uniquePred(pred), pred {
+ if pred.Kind != BlockIf {
+ continue
+ }
+ control := pred.Controls[0]
+
+ br := unknown
+ if pred.Succs[0].b == child {
+ br = positive
+ }
+ if pred.Succs[1].b == child {
+ if br != unknown {
+ continue
+ }
+ br = negative
+ }
+ if br == unknown {
+ continue
+ }
+
+ tr, has := domainRelationTable[control.Op]
+ if !has {
+ continue
+ }
+ r := tr.r
+ if br == negative {
+ // Negative branch taken to reach b.
+ // Complement the relations.
+ r = (lt | eq | gt) ^ r
+ }
+
+ // Check for i2 < max or max > i2.
+ var max *Value
+ if r == lt && control.Args[0] == i2 {
+ max = control.Args[1]
+ } else if r == gt && control.Args[1] == i2 {
+ max = control.Args[0]
+ } else {
+ continue
+ }
+
+ // Check condition 4 now that we have a
+ // candidate max. For this we can query the
+ // fact table. We "prove" min < max by showing
+ // that min >= max is unsat. (This may simply
+ // compare two constants; that's fine.)
+ ft.checkpoint()
+ ft.update(b, min, max, tr.d, gt|eq)
+ proved := ft.unsat
+ ft.restore()
+
+ if proved {
+ // We know that min <= i1 < max.
+ if b.Func.pass.debug > 0 {
+ printIndVar(b, i1, min, max, 1, 0)
+ }
+ ft.update(b, min, i1, tr.d, lt|eq)
+ ft.update(b, i1, max, tr.d, lt)
+ }
+ }
+ }
+}
+
+var ctzNonZeroOp = map[Op]Op{OpCtz8: OpCtz8NonZero, OpCtz16: OpCtz16NonZero, OpCtz32: OpCtz32NonZero, OpCtz64: OpCtz64NonZero}
+var mostNegativeDividend = map[Op]int64{
+ OpDiv16: -1 << 15,
+ OpMod16: -1 << 15,
+ OpDiv32: -1 << 31,
+ OpMod32: -1 << 31,
+ OpDiv64: -1 << 63,
+ OpMod64: -1 << 63}
+
+// simplifyBlock simplifies some constant values in b and evaluates
+// branches to non-uniquely dominated successors of b.
+func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) {
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpSlicemask:
+ // Replace OpSlicemask operations in b with constants where possible.
+ x, delta := isConstDelta(v.Args[0])
+ if x == nil {
+ break
+ }
+ // slicemask(x + y)
+ // if x is larger than -y (y is negative), then slicemask is -1.
+ lim, ok := ft.limits[x.ID]
+ if !ok {
+ break
+ }
+ if lim.umin > uint64(-delta) {
+ if v.Args[0].Op == OpAdd64 {
+ v.reset(OpConst64)
+ } else {
+ v.reset(OpConst32)
+ }
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved slicemask not needed")
+ }
+ v.AuxInt = -1
+ }
+ case OpCtz8, OpCtz16, OpCtz32, OpCtz64:
+ // On some architectures, notably amd64, we can generate much better
+ // code for CtzNN if we know that the argument is non-zero.
+ // Capture that information here for use in arch-specific optimizations.
+ x := v.Args[0]
+ lim, ok := ft.limits[x.ID]
+ if !ok {
+ break
+ }
+ if lim.umin > 0 || lim.min > 0 || lim.max < 0 {
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved %v non-zero", v.Op)
+ }
+ v.Op = ctzNonZeroOp[v.Op]
+ }
+ case OpRsh8x8, OpRsh8x16, OpRsh8x32, OpRsh8x64,
+ OpRsh16x8, OpRsh16x16, OpRsh16x32, OpRsh16x64,
+ OpRsh32x8, OpRsh32x16, OpRsh32x32, OpRsh32x64,
+ OpRsh64x8, OpRsh64x16, OpRsh64x32, OpRsh64x64:
+ // Check whether, for a >> b, we know that a is non-negative
+ // and b is all of a's bits except the MSB. If so, a is shifted to zero.
+ bits := 8 * v.Type.Size()
+ if v.Args[1].isGenericIntConst() && v.Args[1].AuxInt >= bits-1 && ft.isNonNegative(v.Args[0]) {
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved %v shifts to zero", v.Op)
+ }
+ switch bits {
+ case 64:
+ v.reset(OpConst64)
+ case 32:
+ v.reset(OpConst32)
+ case 16:
+ v.reset(OpConst16)
+ case 8:
+ v.reset(OpConst8)
+ default:
+ panic("unexpected integer size")
+ }
+ v.AuxInt = 0
+ break // Be sure not to fallthrough - this is no longer OpRsh.
+ }
+ // If the Rsh hasn't been replaced with 0, still check if it is bounded.
+ fallthrough
+ case OpLsh8x8, OpLsh8x16, OpLsh8x32, OpLsh8x64,
+ OpLsh16x8, OpLsh16x16, OpLsh16x32, OpLsh16x64,
+ OpLsh32x8, OpLsh32x16, OpLsh32x32, OpLsh32x64,
+ OpLsh64x8, OpLsh64x16, OpLsh64x32, OpLsh64x64,
+ OpRsh8Ux8, OpRsh8Ux16, OpRsh8Ux32, OpRsh8Ux64,
+ OpRsh16Ux8, OpRsh16Ux16, OpRsh16Ux32, OpRsh16Ux64,
+ OpRsh32Ux8, OpRsh32Ux16, OpRsh32Ux32, OpRsh32Ux64,
+ OpRsh64Ux8, OpRsh64Ux16, OpRsh64Ux32, OpRsh64Ux64:
+ // Check whether, for a << b, we know that b
+ // is strictly less than the number of bits in a.
+ by := v.Args[1]
+ lim, ok := ft.limits[by.ID]
+ if !ok {
+ break
+ }
+ bits := 8 * v.Args[0].Type.Size()
+ if lim.umax < uint64(bits) || (lim.max < bits && ft.isNonNegative(by)) {
+ v.AuxInt = 1 // see shiftIsBounded
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved %v bounded", v.Op)
+ }
+ }
+ case OpDiv16, OpDiv32, OpDiv64, OpMod16, OpMod32, OpMod64:
+ // On amd64 and 386 fix-up code can be avoided if we know
+ // the divisor is not -1 or the dividend > MinIntNN.
+ // Don't modify AuxInt on other architectures,
+ // as that can interfere with CSE.
+ // TODO: add other architectures?
+ if b.Func.Config.arch != "386" && b.Func.Config.arch != "amd64" {
+ break
+ }
+ divr := v.Args[1]
+ divrLim, divrLimok := ft.limits[divr.ID]
+ divd := v.Args[0]
+ divdLim, divdLimok := ft.limits[divd.ID]
+ if (divrLimok && (divrLim.max < -1 || divrLim.min > -1)) ||
+ (divdLimok && divdLim.min > mostNegativeDividend[v.Op]) {
+ // See DivisionNeedsFixUp in rewrite.go.
+ // v.AuxInt = 1 means we have proved both that the divisor is not -1
+ // and that the dividend is not the most negative integer,
+ // so we do not need to add fix-up code.
+ v.AuxInt = 1
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved %v does not need fix-up", v.Op)
+ }
+ }
+ }
+ // Fold provable constant results.
+ // Helps in cases where we reuse a value after branching on its equality.
+ for i, arg := range v.Args {
+ switch arg.Op {
+ case OpConst64, OpConst32, OpConst16, OpConst8:
+ continue
+ }
+ lim, ok := ft.limits[arg.ID]
+ if !ok {
+ continue
+ }
+
+ var constValue int64
+ typ := arg.Type
+ bits := 8 * typ.Size()
+ switch {
+ case lim.min == lim.max:
+ constValue = lim.min
+ case lim.umin == lim.umax:
+ // truncate then sign extand
+ switch bits {
+ case 64:
+ constValue = int64(lim.umin)
+ case 32:
+ constValue = int64(int32(lim.umin))
+ case 16:
+ constValue = int64(int16(lim.umin))
+ case 8:
+ constValue = int64(int8(lim.umin))
+ default:
+ panic("unexpected integer size")
+ }
+ default:
+ continue
+ }
+ var c *Value
+ f := b.Func
+ switch bits {
+ case 64:
+ c = f.ConstInt64(typ, constValue)
+ case 32:
+ c = f.ConstInt32(typ, int32(constValue))
+ case 16:
+ c = f.ConstInt16(typ, int16(constValue))
+ case 8:
+ c = f.ConstInt8(typ, int8(constValue))
+ default:
+ panic("unexpected integer size")
+ }
+ v.SetArg(i, c)
+ if b.Func.pass.debug > 1 {
+ b.Func.Warnl(v.Pos, "Proved %v's arg %d (%v) is constant %d", v, i, arg, constValue)
+ }
+ }
+ }
+
+ if b.Kind != BlockIf {
+ return
+ }
+
+ // Consider outgoing edges from this block.
+ parent := b
+ for i, branch := range [...]branch{positive, negative} {
+ child := parent.Succs[i].b
+ if getBranch(sdom, parent, child) != unknown {
+ // For edges to uniquely dominated blocks, we
+ // already did this when we visited the child.
+ continue
+ }
+ // For edges to other blocks, this can trim a branch
+ // even if we couldn't get rid of the child itself.
+ ft.checkpoint()
+ addBranchRestrictions(ft, parent, branch)
+ unsat := ft.unsat
+ ft.restore()
+ if unsat {
+ // This branch is impossible, so remove it
+ // from the block.
+ removeBranch(parent, branch)
+ // No point in considering the other branch.
+ // (It *is* possible for both to be
+ // unsatisfiable since the fact table is
+ // incomplete. We could turn this into a
+ // BlockExit, but it doesn't seem worth it.)
+ break
+ }
+ }
+}
+
+func removeBranch(b *Block, branch branch) {
+ c := b.Controls[0]
+ if b.Func.pass.debug > 0 {
+ verb := "Proved"
+ if branch == positive {
+ verb = "Disproved"
+ }
+ if b.Func.pass.debug > 1 {
+ b.Func.Warnl(b.Pos, "%s %s (%s)", verb, c.Op, c)
+ } else {
+ b.Func.Warnl(b.Pos, "%s %s", verb, c.Op)
+ }
+ }
+ if c != nil && c.Pos.IsStmt() == src.PosIsStmt && c.Pos.SameFileAndLine(b.Pos) {
+ // attempt to preserve statement marker.
+ b.Pos = b.Pos.WithIsStmt()
+ }
+ if branch == positive || branch == negative {
+ b.Kind = BlockFirst
+ b.ResetControls()
+ if branch == positive {
+ b.swapSuccessors()
+ }
+ } else {
+ // TODO: figure out how to remove an entry from a jump table
+ }
+}
+
+// isNonNegative reports whether v is known to be greater or equal to zero.
+func isNonNegative(v *Value) bool {
+ if !v.Type.IsInteger() {
+ v.Fatalf("isNonNegative bad type: %v", v.Type)
+ }
+ // TODO: return true if !v.Type.IsSigned()
+ // SSA isn't type-safe enough to do that now (issue 37753).
+ // The checks below depend only on the pattern of bits.
+
+ switch v.Op {
+ case OpConst64:
+ return v.AuxInt >= 0
+
+ case OpConst32:
+ return int32(v.AuxInt) >= 0
+
+ case OpConst16:
+ return int16(v.AuxInt) >= 0
+
+ case OpConst8:
+ return int8(v.AuxInt) >= 0
+
+ case OpStringLen, OpSliceLen, OpSliceCap,
+ OpZeroExt8to64, OpZeroExt16to64, OpZeroExt32to64,
+ OpZeroExt8to32, OpZeroExt16to32, OpZeroExt8to16,
+ OpCtz64, OpCtz32, OpCtz16, OpCtz8,
+ OpCtz64NonZero, OpCtz32NonZero, OpCtz16NonZero, OpCtz8NonZero,
+ OpBitLen64, OpBitLen32, OpBitLen16, OpBitLen8:
+ return true
+
+ case OpRsh64Ux64, OpRsh32Ux64:
+ by := v.Args[1]
+ return by.Op == OpConst64 && by.AuxInt > 0
+
+ case OpRsh64x64, OpRsh32x64, OpRsh8x64, OpRsh16x64, OpRsh32x32, OpRsh64x32,
+ OpSignExt32to64, OpSignExt16to64, OpSignExt8to64, OpSignExt16to32, OpSignExt8to32:
+ return isNonNegative(v.Args[0])
+
+ case OpAnd64, OpAnd32, OpAnd16, OpAnd8:
+ return isNonNegative(v.Args[0]) || isNonNegative(v.Args[1])
+
+ case OpMod64, OpMod32, OpMod16, OpMod8,
+ OpDiv64, OpDiv32, OpDiv16, OpDiv8,
+ OpOr64, OpOr32, OpOr16, OpOr8,
+ OpXor64, OpXor32, OpXor16, OpXor8:
+ return isNonNegative(v.Args[0]) && isNonNegative(v.Args[1])
+
+ // We could handle OpPhi here, but the improvements from doing
+ // so are very minor, and it is neither simple nor cheap.
+ }
+ return false
+}
+
+// isConstDelta returns non-nil if v is equivalent to w+delta (signed).
+func isConstDelta(v *Value) (w *Value, delta int64) {
+ cop := OpConst64
+ switch v.Op {
+ case OpAdd32, OpSub32:
+ cop = OpConst32
+ case OpAdd16, OpSub16:
+ cop = OpConst16
+ case OpAdd8, OpSub8:
+ cop = OpConst8
+ }
+ switch v.Op {
+ case OpAdd64, OpAdd32, OpAdd16, OpAdd8:
+ if v.Args[0].Op == cop {
+ return v.Args[1], v.Args[0].AuxInt
+ }
+ if v.Args[1].Op == cop {
+ return v.Args[0], v.Args[1].AuxInt
+ }
+ case OpSub64, OpSub32, OpSub16, OpSub8:
+ if v.Args[1].Op == cop {
+ aux := v.Args[1].AuxInt
+ if aux != -aux { // Overflow; too bad
+ return v.Args[0], -aux
+ }
+ }
+ }
+ return nil, 0
+}
+
+// isCleanExt reports whether v is the result of a value-preserving
+// sign or zero extension.
+func isCleanExt(v *Value) bool {
+ switch v.Op {
+ case OpSignExt8to16, OpSignExt8to32, OpSignExt8to64,
+ OpSignExt16to32, OpSignExt16to64, OpSignExt32to64:
+ // signed -> signed is the only value-preserving sign extension
+ return v.Args[0].Type.IsSigned() && v.Type.IsSigned()
+
+ case OpZeroExt8to16, OpZeroExt8to32, OpZeroExt8to64,
+ OpZeroExt16to32, OpZeroExt16to64, OpZeroExt32to64:
+ // unsigned -> signed/unsigned are value-preserving zero extensions
+ return !v.Args[0].Type.IsSigned()
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
new file mode 100644
index 0000000..2325b9e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -0,0 +1,2947 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Register allocation.
+//
+// We use a version of a linear scan register allocator. We treat the
+// whole function as a single long basic block and run through
+// it using a greedy register allocator. Then all merge edges
+// (those targeting a block with len(Preds)>1) are processed to
+// shuffle data into the place that the target of the edge expects.
+//
+// The greedy allocator moves values into registers just before they
+// are used, spills registers only when necessary, and spills the
+// value whose next use is farthest in the future.
+//
+// The register allocator requires that a block is not scheduled until
+// at least one of its predecessors have been scheduled. The most recent
+// such predecessor provides the starting register state for a block.
+//
+// It also requires that there are no critical edges (critical =
+// comes from a block with >1 successor and goes to a block with >1
+// predecessor). This makes it easy to add fixup code on merge edges -
+// the source of a merge edge has only one successor, so we can add
+// fixup code to the end of that block.
+
+// Spilling
+//
+// During the normal course of the allocator, we might throw a still-live
+// value out of all registers. When that value is subsequently used, we must
+// load it from a slot on the stack. We must also issue an instruction to
+// initialize that stack location with a copy of v.
+//
+// pre-regalloc:
+// (1) v = Op ...
+// (2) x = Op ...
+// (3) ... = Op v ...
+//
+// post-regalloc:
+// (1) v = Op ... : AX // computes v, store result in AX
+// s = StoreReg v // spill v to a stack slot
+// (2) x = Op ... : AX // some other op uses AX
+// c = LoadReg s : CX // restore v from stack slot
+// (3) ... = Op c ... // use the restored value
+//
+// Allocation occurs normally until we reach (3) and we realize we have
+// a use of v and it isn't in any register. At that point, we allocate
+// a spill (a StoreReg) for v. We can't determine the correct place for
+// the spill at this point, so we allocate the spill as blockless initially.
+// The restore is then generated to load v back into a register so it can
+// be used. Subsequent uses of v will use the restored value c instead.
+//
+// What remains is the question of where to schedule the spill.
+// During allocation, we keep track of the dominator of all restores of v.
+// The spill of v must dominate that block. The spill must also be issued at
+// a point where v is still in a register.
+//
+// To find the right place, start at b, the block which dominates all restores.
+// - If b is v.Block, then issue the spill right after v.
+// It is known to be in a register at that point, and dominates any restores.
+// - Otherwise, if v is in a register at the start of b,
+// put the spill of v at the start of b.
+// - Otherwise, set b = immediate dominator of b, and repeat.
+//
+// Phi values are special, as always. We define two kinds of phis, those
+// where the merge happens in a register (a "register" phi) and those where
+// the merge happens in a stack location (a "stack" phi).
+//
+// A register phi must have the phi and all of its inputs allocated to the
+// same register. Register phis are spilled similarly to regular ops.
+//
+// A stack phi must have the phi and all of its inputs allocated to the same
+// stack location. Stack phis start out life already spilled - each phi
+// input must be a store (using StoreReg) at the end of the corresponding
+// predecessor block.
+// b1: y = ... : AX b2: z = ... : BX
+// y2 = StoreReg y z2 = StoreReg z
+// goto b3 goto b3
+// b3: x = phi(y2, z2)
+// The stack allocator knows that StoreReg args of stack-allocated phis
+// must be allocated to the same stack slot as the phi that uses them.
+// x is now a spilled value and a restore must appear before its first use.
+
+// TODO
+
+// Use an affinity graph to mark two values which should use the
+// same register. This affinity graph will be used to prefer certain
+// registers for allocation. This affinity helps eliminate moves that
+// are required for phi implementations and helps generate allocations
+// for 2-register architectures.
+
+// Note: regalloc generates a not-quite-SSA output. If we have:
+//
+// b1: x = ... : AX
+// x2 = StoreReg x
+// ... AX gets reused for something else ...
+// if ... goto b3 else b4
+//
+// b3: x3 = LoadReg x2 : BX b4: x4 = LoadReg x2 : CX
+// ... use x3 ... ... use x4 ...
+//
+// b2: ... use x3 ...
+//
+// If b3 is the primary predecessor of b2, then we use x3 in b2 and
+// add a x4:CX->BX copy at the end of b4.
+// But the definition of x3 doesn't dominate b2. We should really
+// insert an extra phi at the start of b2 (x5=phi(x3,x4):BX) to keep
+// SSA form. For now, we ignore this problem as remaining in strict
+// SSA form isn't needed after regalloc. We'll just leave the use
+// of x3 not dominated by the definition of x3, and the CX->BX copy
+// will have no use (so don't run deadcode after regalloc!).
+// TODO: maybe we should introduce these extra phis?
+
+package ssa
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "cmd/internal/sys"
+ "fmt"
+ "internal/buildcfg"
+ "math/bits"
+ "unsafe"
+)
+
+const (
+ moveSpills = iota
+ logSpills
+ regDebug
+ stackDebug
+)
+
+// distance is a measure of how far into the future values are used.
+// distance is measured in units of instructions.
+const (
+ likelyDistance = 1
+ normalDistance = 10
+ unlikelyDistance = 100
+)
+
+// regalloc performs register allocation on f. It sets f.RegAlloc
+// to the resulting allocation.
+func regalloc(f *Func) {
+ var s regAllocState
+ s.init(f)
+ s.regalloc(f)
+ s.close()
+}
+
+type register uint8
+
+const noRegister register = 255
+
+// For bulk initializing
+var noRegisters [32]register = [32]register{
+ noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister,
+ noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister,
+ noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister,
+ noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister,
+}
+
+// A regMask encodes a set of machine registers.
+// TODO: regMask -> regSet?
+type regMask uint64
+
+func (m regMask) String() string {
+ s := ""
+ for r := register(0); m != 0; r++ {
+ if m>>r&1 == 0 {
+ continue
+ }
+ m &^= regMask(1) << r
+ if s != "" {
+ s += " "
+ }
+ s += fmt.Sprintf("r%d", r)
+ }
+ return s
+}
+
+func (s *regAllocState) RegMaskString(m regMask) string {
+ str := ""
+ for r := register(0); m != 0; r++ {
+ if m>>r&1 == 0 {
+ continue
+ }
+ m &^= regMask(1) << r
+ if str != "" {
+ str += " "
+ }
+ str += s.registers[r].String()
+ }
+ return str
+}
+
+// countRegs returns the number of set bits in the register mask.
+func countRegs(r regMask) int {
+ return bits.OnesCount64(uint64(r))
+}
+
+// pickReg picks an arbitrary register from the register mask.
+func pickReg(r regMask) register {
+ if r == 0 {
+ panic("can't pick a register from an empty set")
+ }
+ // pick the lowest one
+ return register(bits.TrailingZeros64(uint64(r)))
+}
+
+type use struct {
+ dist int32 // distance from start of the block to a use of a value
+ pos src.XPos // source position of the use
+ next *use // linked list of uses of a value in nondecreasing dist order
+}
+
+// A valState records the register allocation state for a (pre-regalloc) value.
+type valState struct {
+ regs regMask // the set of registers holding a Value (usually just one)
+ uses *use // list of uses in this block
+ spill *Value // spilled copy of the Value (if any)
+ restoreMin int32 // minimum of all restores' blocks' sdom.entry
+ restoreMax int32 // maximum of all restores' blocks' sdom.exit
+ needReg bool // cached value of !v.Type.IsMemory() && !v.Type.IsVoid() && !.v.Type.IsFlags()
+ rematerializeable bool // cached value of v.rematerializeable()
+}
+
+type regState struct {
+ v *Value // Original (preregalloc) Value stored in this register.
+ c *Value // A Value equal to v which is currently in a register. Might be v or a copy of it.
+ // If a register is unused, v==c==nil
+}
+
+type regAllocState struct {
+ f *Func
+
+ sdom SparseTree
+ registers []Register
+ numRegs register
+ SPReg register
+ SBReg register
+ GReg register
+ allocatable regMask
+
+ // live values at the end of each block. live[b.ID] is a list of value IDs
+ // which are live at the end of b, together with a count of how many instructions
+ // forward to the next use.
+ live [][]liveInfo
+ // desired register assignments at the end of each block.
+ // Note that this is a static map computed before allocation occurs. Dynamic
+ // register desires (from partially completed allocations) will trump
+ // this information.
+ desired []desiredState
+
+ // current state of each (preregalloc) Value
+ values []valState
+
+ // ID of SP, SB values
+ sp, sb ID
+
+ // For each Value, map from its value ID back to the
+ // preregalloc Value it was derived from.
+ orig []*Value
+
+ // current state of each register
+ regs []regState
+
+ // registers that contain values which can't be kicked out
+ nospill regMask
+
+ // mask of registers currently in use
+ used regMask
+
+ // mask of registers used since the start of the current block
+ usedSinceBlockStart regMask
+
+ // mask of registers used in the current instruction
+ tmpused regMask
+
+ // current block we're working on
+ curBlock *Block
+
+ // cache of use records
+ freeUseRecords *use
+
+ // endRegs[blockid] is the register state at the end of each block.
+ // encoded as a set of endReg records.
+ endRegs [][]endReg
+
+ // startRegs[blockid] is the register state at the start of merge blocks.
+ // saved state does not include the state of phi ops in the block.
+ startRegs [][]startReg
+
+ // startRegsMask is a mask of the registers in startRegs[curBlock.ID].
+ // Registers dropped from startRegsMask are later synchronoized back to
+ // startRegs by dropping from there as well.
+ startRegsMask regMask
+
+ // spillLive[blockid] is the set of live spills at the end of each block
+ spillLive [][]ID
+
+ // a set of copies we generated to move things around, and
+ // whether it is used in shuffle. Unused copies will be deleted.
+ copies map[*Value]bool
+
+ loopnest *loopnest
+
+ // choose a good order in which to visit blocks for allocation purposes.
+ visitOrder []*Block
+
+ // blockOrder[b.ID] corresponds to the index of block b in visitOrder.
+ blockOrder []int32
+
+ // whether to insert instructions that clobber dead registers at call sites
+ doClobber bool
+}
+
+type endReg struct {
+ r register
+ v *Value // pre-regalloc value held in this register (TODO: can we use ID here?)
+ c *Value // cached version of the value
+}
+
+type startReg struct {
+ r register
+ v *Value // pre-regalloc value needed in this register
+ c *Value // cached version of the value
+ pos src.XPos // source position of use of this register
+}
+
+// freeReg frees up register r. Any current user of r is kicked out.
+func (s *regAllocState) freeReg(r register) {
+ v := s.regs[r].v
+ if v == nil {
+ s.f.Fatalf("tried to free an already free register %d\n", r)
+ }
+
+ // Mark r as unused.
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("freeReg %s (dump %s/%s)\n", &s.registers[r], v, s.regs[r].c)
+ }
+ s.regs[r] = regState{}
+ s.values[v.ID].regs &^= regMask(1) << r
+ s.used &^= regMask(1) << r
+}
+
+// freeRegs frees up all registers listed in m.
+func (s *regAllocState) freeRegs(m regMask) {
+ for m&s.used != 0 {
+ s.freeReg(pickReg(m & s.used))
+ }
+}
+
+// clobberRegs inserts instructions that clobber registers listed in m.
+func (s *regAllocState) clobberRegs(m regMask) {
+ m &= s.allocatable & s.f.Config.gpRegMask // only integer register can contain pointers, only clobber them
+ for m != 0 {
+ r := pickReg(m)
+ m &^= 1 << r
+ x := s.curBlock.NewValue0(src.NoXPos, OpClobberReg, types.TypeVoid)
+ s.f.setHome(x, &s.registers[r])
+ }
+}
+
+// setOrig records that c's original value is the same as
+// v's original value.
+func (s *regAllocState) setOrig(c *Value, v *Value) {
+ if int(c.ID) >= cap(s.orig) {
+ x := s.f.Cache.allocValueSlice(int(c.ID) + 1)
+ copy(x, s.orig)
+ s.f.Cache.freeValueSlice(s.orig)
+ s.orig = x
+ }
+ for int(c.ID) >= len(s.orig) {
+ s.orig = append(s.orig, nil)
+ }
+ if s.orig[c.ID] != nil {
+ s.f.Fatalf("orig value set twice %s %s", c, v)
+ }
+ s.orig[c.ID] = s.orig[v.ID]
+}
+
+// assignReg assigns register r to hold c, a copy of v.
+// r must be unused.
+func (s *regAllocState) assignReg(r register, v *Value, c *Value) {
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("assignReg %s %s/%s\n", &s.registers[r], v, c)
+ }
+ if s.regs[r].v != nil {
+ s.f.Fatalf("tried to assign register %d to %s/%s but it is already used by %s", r, v, c, s.regs[r].v)
+ }
+
+ // Update state.
+ s.regs[r] = regState{v, c}
+ s.values[v.ID].regs |= regMask(1) << r
+ s.used |= regMask(1) << r
+ s.f.setHome(c, &s.registers[r])
+}
+
+// allocReg chooses a register from the set of registers in mask.
+// If there is no unused register, a Value will be kicked out of
+// a register to make room.
+func (s *regAllocState) allocReg(mask regMask, v *Value) register {
+ if v.OnWasmStack {
+ return noRegister
+ }
+
+ mask &= s.allocatable
+ mask &^= s.nospill
+ if mask == 0 {
+ s.f.Fatalf("no register available for %s", v.LongString())
+ }
+
+ // Pick an unused register if one is available.
+ if mask&^s.used != 0 {
+ r := pickReg(mask &^ s.used)
+ s.usedSinceBlockStart |= regMask(1) << r
+ return r
+ }
+
+ // Pick a value to spill. Spill the value with the
+ // farthest-in-the-future use.
+ // TODO: Prefer registers with already spilled Values?
+ // TODO: Modify preference using affinity graph.
+ // TODO: if a single value is in multiple registers, spill one of them
+ // before spilling a value in just a single register.
+
+ // Find a register to spill. We spill the register containing the value
+ // whose next use is as far in the future as possible.
+ // https://en.wikipedia.org/wiki/Page_replacement_algorithm#The_theoretically_optimal_page_replacement_algorithm
+ var r register
+ maxuse := int32(-1)
+ for t := register(0); t < s.numRegs; t++ {
+ if mask>>t&1 == 0 {
+ continue
+ }
+ v := s.regs[t].v
+ if n := s.values[v.ID].uses.dist; n > maxuse {
+ // v's next use is farther in the future than any value
+ // we've seen so far. A new best spill candidate.
+ r = t
+ maxuse = n
+ }
+ }
+ if maxuse == -1 {
+ s.f.Fatalf("couldn't find register to spill")
+ }
+
+ if s.f.Config.ctxt.Arch.Arch == sys.ArchWasm {
+ // TODO(neelance): In theory this should never happen, because all wasm registers are equal.
+ // So if there is still a free register, the allocation should have picked that one in the first place instead of
+ // trying to kick some other value out. In practice, this case does happen and it breaks the stack optimization.
+ s.freeReg(r)
+ return r
+ }
+
+ // Try to move it around before kicking out, if there is a free register.
+ // We generate a Copy and record it. It will be deleted if never used.
+ v2 := s.regs[r].v
+ m := s.compatRegs(v2.Type) &^ s.used &^ s.tmpused &^ (regMask(1) << r)
+ if m != 0 && !s.values[v2.ID].rematerializeable && countRegs(s.values[v2.ID].regs) == 1 {
+ s.usedSinceBlockStart |= regMask(1) << r
+ r2 := pickReg(m)
+ c := s.curBlock.NewValue1(v2.Pos, OpCopy, v2.Type, s.regs[r].c)
+ s.copies[c] = false
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("copy %s to %s : %s\n", v2, c, &s.registers[r2])
+ }
+ s.setOrig(c, v2)
+ s.assignReg(r2, v2, c)
+ }
+
+ // If the evicted register isn't used between the start of the block
+ // and now then there is no reason to even request it on entry. We can
+ // drop from startRegs in that case.
+ if s.usedSinceBlockStart&(regMask(1)<<r) == 0 {
+ if s.startRegsMask&(regMask(1)<<r) == 1 {
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("dropped from startRegs: %s\n", &s.registers[r])
+ }
+ s.startRegsMask &^= regMask(1) << r
+ }
+ }
+
+ s.freeReg(r)
+ s.usedSinceBlockStart |= regMask(1) << r
+ return r
+}
+
+// makeSpill returns a Value which represents the spilled value of v.
+// b is the block in which the spill is used.
+func (s *regAllocState) makeSpill(v *Value, b *Block) *Value {
+ vi := &s.values[v.ID]
+ if vi.spill != nil {
+ // Final block not known - keep track of subtree where restores reside.
+ vi.restoreMin = min32(vi.restoreMin, s.sdom[b.ID].entry)
+ vi.restoreMax = max32(vi.restoreMax, s.sdom[b.ID].exit)
+ return vi.spill
+ }
+ // Make a spill for v. We don't know where we want
+ // to put it yet, so we leave it blockless for now.
+ spill := s.f.newValueNoBlock(OpStoreReg, v.Type, v.Pos)
+ // We also don't know what the spill's arg will be.
+ // Leave it argless for now.
+ s.setOrig(spill, v)
+ vi.spill = spill
+ vi.restoreMin = s.sdom[b.ID].entry
+ vi.restoreMax = s.sdom[b.ID].exit
+ return spill
+}
+
+// allocValToReg allocates v to a register selected from regMask and
+// returns the register copy of v. Any previous user is kicked out and spilled
+// (if necessary). Load code is added at the current pc. If nospill is set the
+// allocated register is marked nospill so the assignment cannot be
+// undone until the caller allows it by clearing nospill. Returns a
+// *Value which is either v or a copy of v allocated to the chosen register.
+func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, pos src.XPos) *Value {
+ if s.f.Config.ctxt.Arch.Arch == sys.ArchWasm && v.rematerializeable() {
+ c := v.copyIntoWithXPos(s.curBlock, pos)
+ c.OnWasmStack = true
+ s.setOrig(c, v)
+ return c
+ }
+ if v.OnWasmStack {
+ return v
+ }
+
+ vi := &s.values[v.ID]
+ pos = pos.WithNotStmt()
+ // Check if v is already in a requested register.
+ if mask&vi.regs != 0 {
+ r := pickReg(mask & vi.regs)
+ if s.regs[r].v != v || s.regs[r].c == nil {
+ panic("bad register state")
+ }
+ if nospill {
+ s.nospill |= regMask(1) << r
+ }
+ s.usedSinceBlockStart |= regMask(1) << r
+ return s.regs[r].c
+ }
+
+ var r register
+ // If nospill is set, the value is used immediately, so it can live on the WebAssembly stack.
+ onWasmStack := nospill && s.f.Config.ctxt.Arch.Arch == sys.ArchWasm
+ if !onWasmStack {
+ // Allocate a register.
+ r = s.allocReg(mask, v)
+ }
+
+ // Allocate v to the new register.
+ var c *Value
+ if vi.regs != 0 {
+ // Copy from a register that v is already in.
+ r2 := pickReg(vi.regs)
+ if s.regs[r2].v != v {
+ panic("bad register state")
+ }
+ s.usedSinceBlockStart |= regMask(1) << r2
+ c = s.curBlock.NewValue1(pos, OpCopy, v.Type, s.regs[r2].c)
+ } else if v.rematerializeable() {
+ // Rematerialize instead of loading from the spill location.
+ c = v.copyIntoWithXPos(s.curBlock, pos)
+ } else {
+ // Load v from its spill location.
+ spill := s.makeSpill(v, s.curBlock)
+ if s.f.pass.debug > logSpills {
+ s.f.Warnl(vi.spill.Pos, "load spill for %v from %v", v, spill)
+ }
+ c = s.curBlock.NewValue1(pos, OpLoadReg, v.Type, spill)
+ }
+
+ s.setOrig(c, v)
+
+ if onWasmStack {
+ c.OnWasmStack = true
+ return c
+ }
+
+ s.assignReg(r, v, c)
+ if c.Op == OpLoadReg && s.isGReg(r) {
+ s.f.Fatalf("allocValToReg.OpLoadReg targeting g: " + c.LongString())
+ }
+ if nospill {
+ s.nospill |= regMask(1) << r
+ }
+ return c
+}
+
+// isLeaf reports whether f performs any calls.
+func isLeaf(f *Func) bool {
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op.IsCall() && !v.Op.IsTailCall() {
+ // tail call is not counted as it does not save the return PC or need a frame
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// needRegister reports whether v needs a register.
+func (v *Value) needRegister() bool {
+ return !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && !v.Type.IsTuple()
+}
+
+func (s *regAllocState) init(f *Func) {
+ s.f = f
+ s.f.RegAlloc = s.f.Cache.locs[:0]
+ s.registers = f.Config.registers
+ if nr := len(s.registers); nr == 0 || nr > int(noRegister) || nr > int(unsafe.Sizeof(regMask(0))*8) {
+ s.f.Fatalf("bad number of registers: %d", nr)
+ } else {
+ s.numRegs = register(nr)
+ }
+ // Locate SP, SB, and g registers.
+ s.SPReg = noRegister
+ s.SBReg = noRegister
+ s.GReg = noRegister
+ for r := register(0); r < s.numRegs; r++ {
+ switch s.registers[r].String() {
+ case "SP":
+ s.SPReg = r
+ case "SB":
+ s.SBReg = r
+ case "g":
+ s.GReg = r
+ }
+ }
+ // Make sure we found all required registers.
+ switch noRegister {
+ case s.SPReg:
+ s.f.Fatalf("no SP register found")
+ case s.SBReg:
+ s.f.Fatalf("no SB register found")
+ case s.GReg:
+ if f.Config.hasGReg {
+ s.f.Fatalf("no g register found")
+ }
+ }
+
+ // Figure out which registers we're allowed to use.
+ s.allocatable = s.f.Config.gpRegMask | s.f.Config.fpRegMask | s.f.Config.specialRegMask
+ s.allocatable &^= 1 << s.SPReg
+ s.allocatable &^= 1 << s.SBReg
+ if s.f.Config.hasGReg {
+ s.allocatable &^= 1 << s.GReg
+ }
+ if buildcfg.FramePointerEnabled && s.f.Config.FPReg >= 0 {
+ s.allocatable &^= 1 << uint(s.f.Config.FPReg)
+ }
+ if s.f.Config.LinkReg != -1 {
+ if isLeaf(f) {
+ // Leaf functions don't save/restore the link register.
+ s.allocatable &^= 1 << uint(s.f.Config.LinkReg)
+ }
+ }
+ if s.f.Config.ctxt.Flag_dynlink {
+ switch s.f.Config.arch {
+ case "386":
+ // nothing to do.
+ // Note that for Flag_shared (position independent code)
+ // we do need to be careful, but that carefulness is hidden
+ // in the rewrite rules so we always have a free register
+ // available for global load/stores. See _gen/386.rules (search for Flag_shared).
+ case "amd64":
+ s.allocatable &^= 1 << 15 // R15
+ case "arm":
+ s.allocatable &^= 1 << 9 // R9
+ case "arm64":
+ // nothing to do
+ case "loong64": // R2 (aka TP) already reserved.
+ // nothing to do
+ case "ppc64le": // R2 already reserved.
+ // nothing to do
+ case "riscv64": // X3 (aka GP) and X4 (aka TP) already reserved.
+ // nothing to do
+ case "s390x":
+ s.allocatable &^= 1 << 11 // R11
+ default:
+ s.f.fe.Fatalf(src.NoXPos, "arch %s not implemented", s.f.Config.arch)
+ }
+ }
+
+ // Linear scan register allocation can be influenced by the order in which blocks appear.
+ // Decouple the register allocation order from the generated block order.
+ // This also creates an opportunity for experiments to find a better order.
+ s.visitOrder = layoutRegallocOrder(f)
+
+ // Compute block order. This array allows us to distinguish forward edges
+ // from backward edges and compute how far they go.
+ s.blockOrder = make([]int32, f.NumBlocks())
+ for i, b := range s.visitOrder {
+ s.blockOrder[b.ID] = int32(i)
+ }
+
+ s.regs = make([]regState, s.numRegs)
+ nv := f.NumValues()
+ if cap(s.f.Cache.regallocValues) >= nv {
+ s.f.Cache.regallocValues = s.f.Cache.regallocValues[:nv]
+ } else {
+ s.f.Cache.regallocValues = make([]valState, nv)
+ }
+ s.values = s.f.Cache.regallocValues
+ s.orig = s.f.Cache.allocValueSlice(nv)
+ s.copies = make(map[*Value]bool)
+ for _, b := range s.visitOrder {
+ for _, v := range b.Values {
+ if v.needRegister() {
+ s.values[v.ID].needReg = true
+ s.values[v.ID].rematerializeable = v.rematerializeable()
+ s.orig[v.ID] = v
+ }
+ // Note: needReg is false for values returning Tuple types.
+ // Instead, we mark the corresponding Selects as needReg.
+ }
+ }
+ s.computeLive()
+
+ s.endRegs = make([][]endReg, f.NumBlocks())
+ s.startRegs = make([][]startReg, f.NumBlocks())
+ s.spillLive = make([][]ID, f.NumBlocks())
+ s.sdom = f.Sdom()
+
+ // wasm: Mark instructions that can be optimized to have their values only on the WebAssembly stack.
+ if f.Config.ctxt.Arch.Arch == sys.ArchWasm {
+ canLiveOnStack := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(canLiveOnStack)
+ for _, b := range f.Blocks {
+ // New block. Clear candidate set.
+ canLiveOnStack.clear()
+ for _, c := range b.ControlValues() {
+ if c.Uses == 1 && !opcodeTable[c.Op].generic {
+ canLiveOnStack.add(c.ID)
+ }
+ }
+ // Walking backwards.
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if canLiveOnStack.contains(v.ID) {
+ v.OnWasmStack = true
+ } else {
+ // Value can not live on stack. Values are not allowed to be reordered, so clear candidate set.
+ canLiveOnStack.clear()
+ }
+ for _, arg := range v.Args {
+ // Value can live on the stack if:
+ // - it is only used once
+ // - it is used in the same basic block
+ // - it is not a "mem" value
+ // - it is a WebAssembly op
+ if arg.Uses == 1 && arg.Block == v.Block && !arg.Type.IsMemory() && !opcodeTable[arg.Op].generic {
+ canLiveOnStack.add(arg.ID)
+ }
+ }
+ }
+ }
+ }
+
+ // The clobberdeadreg experiment inserts code to clobber dead registers
+ // at call sites.
+ // Ignore huge functions to avoid doing too much work.
+ if base.Flag.ClobberDeadReg && len(s.f.Blocks) <= 10000 {
+ // TODO: honor GOCLOBBERDEADHASH, or maybe GOSSAHASH.
+ s.doClobber = true
+ }
+}
+
+func (s *regAllocState) close() {
+ s.f.Cache.freeValueSlice(s.orig)
+}
+
+// Adds a use record for id at distance dist from the start of the block.
+// All calls to addUse must happen with nonincreasing dist.
+func (s *regAllocState) addUse(id ID, dist int32, pos src.XPos) {
+ r := s.freeUseRecords
+ if r != nil {
+ s.freeUseRecords = r.next
+ } else {
+ r = &use{}
+ }
+ r.dist = dist
+ r.pos = pos
+ r.next = s.values[id].uses
+ s.values[id].uses = r
+ if r.next != nil && dist > r.next.dist {
+ s.f.Fatalf("uses added in wrong order")
+ }
+}
+
+// advanceUses advances the uses of v's args from the state before v to the state after v.
+// Any values which have no more uses are deallocated from registers.
+func (s *regAllocState) advanceUses(v *Value) {
+ for _, a := range v.Args {
+ if !s.values[a.ID].needReg {
+ continue
+ }
+ ai := &s.values[a.ID]
+ r := ai.uses
+ ai.uses = r.next
+ if r.next == nil {
+ // Value is dead, free all registers that hold it.
+ s.freeRegs(ai.regs)
+ }
+ r.next = s.freeUseRecords
+ s.freeUseRecords = r
+ }
+}
+
+// liveAfterCurrentInstruction reports whether v is live after
+// the current instruction is completed. v must be used by the
+// current instruction.
+func (s *regAllocState) liveAfterCurrentInstruction(v *Value) bool {
+ u := s.values[v.ID].uses
+ if u == nil {
+ panic(fmt.Errorf("u is nil, v = %s, s.values[v.ID] = %v", v.LongString(), s.values[v.ID]))
+ }
+ d := u.dist
+ for u != nil && u.dist == d {
+ u = u.next
+ }
+ return u != nil && u.dist > d
+}
+
+// Sets the state of the registers to that encoded in regs.
+func (s *regAllocState) setState(regs []endReg) {
+ s.freeRegs(s.used)
+ for _, x := range regs {
+ s.assignReg(x.r, x.v, x.c)
+ }
+}
+
+// compatRegs returns the set of registers which can store a type t.
+func (s *regAllocState) compatRegs(t *types.Type) regMask {
+ var m regMask
+ if t.IsTuple() || t.IsFlags() {
+ return 0
+ }
+ if t.IsFloat() || t == types.TypeInt128 {
+ if t.Kind() == types.TFLOAT32 && s.f.Config.fp32RegMask != 0 {
+ m = s.f.Config.fp32RegMask
+ } else if t.Kind() == types.TFLOAT64 && s.f.Config.fp64RegMask != 0 {
+ m = s.f.Config.fp64RegMask
+ } else {
+ m = s.f.Config.fpRegMask
+ }
+ } else {
+ m = s.f.Config.gpRegMask
+ }
+ return m & s.allocatable
+}
+
+// regspec returns the regInfo for operation op.
+func (s *regAllocState) regspec(v *Value) regInfo {
+ op := v.Op
+ if op == OpConvert {
+ // OpConvert is a generic op, so it doesn't have a
+ // register set in the static table. It can use any
+ // allocatable integer register.
+ m := s.allocatable & s.f.Config.gpRegMask
+ return regInfo{inputs: []inputInfo{{regs: m}}, outputs: []outputInfo{{regs: m}}}
+ }
+ if op == OpArgIntReg {
+ reg := v.Block.Func.Config.intParamRegs[v.AuxInt8()]
+ return regInfo{outputs: []outputInfo{{regs: 1 << uint(reg)}}}
+ }
+ if op == OpArgFloatReg {
+ reg := v.Block.Func.Config.floatParamRegs[v.AuxInt8()]
+ return regInfo{outputs: []outputInfo{{regs: 1 << uint(reg)}}}
+ }
+ if op.IsCall() {
+ if ac, ok := v.Aux.(*AuxCall); ok && ac.reg != nil {
+ return *ac.Reg(&opcodeTable[op].reg, s.f.Config)
+ }
+ }
+ if op == OpMakeResult && s.f.OwnAux.reg != nil {
+ return *s.f.OwnAux.ResultReg(s.f.Config)
+ }
+ return opcodeTable[op].reg
+}
+
+func (s *regAllocState) isGReg(r register) bool {
+ return s.f.Config.hasGReg && s.GReg == r
+}
+
+// Dummy value used to represent the value being held in a temporary register.
+var tmpVal Value
+
+func (s *regAllocState) regalloc(f *Func) {
+ regValLiveSet := f.newSparseSet(f.NumValues()) // set of values that may be live in register
+ defer f.retSparseSet(regValLiveSet)
+ var oldSched []*Value
+ var phis []*Value
+ var phiRegs []register
+ var args []*Value
+
+ // Data structure used for computing desired registers.
+ var desired desiredState
+
+ // Desired registers for inputs & outputs for each instruction in the block.
+ type dentry struct {
+ out [4]register // desired output registers
+ in [3][4]register // desired input registers (for inputs 0,1, and 2)
+ }
+ var dinfo []dentry
+
+ if f.Entry != f.Blocks[0] {
+ f.Fatalf("entry block must be first")
+ }
+
+ for _, b := range s.visitOrder {
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("Begin processing block %v\n", b)
+ }
+ s.curBlock = b
+ s.startRegsMask = 0
+ s.usedSinceBlockStart = 0
+
+ // Initialize regValLiveSet and uses fields for this block.
+ // Walk backwards through the block doing liveness analysis.
+ regValLiveSet.clear()
+ for _, e := range s.live[b.ID] {
+ s.addUse(e.ID, int32(len(b.Values))+e.dist, e.pos) // pseudo-uses from beyond end of block
+ regValLiveSet.add(e.ID)
+ }
+ for _, v := range b.ControlValues() {
+ if s.values[v.ID].needReg {
+ s.addUse(v.ID, int32(len(b.Values)), b.Pos) // pseudo-use by control values
+ regValLiveSet.add(v.ID)
+ }
+ }
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ regValLiveSet.remove(v.ID)
+ if v.Op == OpPhi {
+ // Remove v from the live set, but don't add
+ // any inputs. This is the state the len(b.Preds)>1
+ // case below desires; it wants to process phis specially.
+ continue
+ }
+ if opcodeTable[v.Op].call {
+ // Function call clobbers all the registers but SP and SB.
+ regValLiveSet.clear()
+ if s.sp != 0 && s.values[s.sp].uses != nil {
+ regValLiveSet.add(s.sp)
+ }
+ if s.sb != 0 && s.values[s.sb].uses != nil {
+ regValLiveSet.add(s.sb)
+ }
+ }
+ for _, a := range v.Args {
+ if !s.values[a.ID].needReg {
+ continue
+ }
+ s.addUse(a.ID, int32(i), v.Pos)
+ regValLiveSet.add(a.ID)
+ }
+ }
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("use distances for %s\n", b)
+ for i := range s.values {
+ vi := &s.values[i]
+ u := vi.uses
+ if u == nil {
+ continue
+ }
+ fmt.Printf(" v%d:", i)
+ for u != nil {
+ fmt.Printf(" %d", u.dist)
+ u = u.next
+ }
+ fmt.Println()
+ }
+ }
+
+ // Make a copy of the block schedule so we can generate a new one in place.
+ // We make a separate copy for phis and regular values.
+ nphi := 0
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ break
+ }
+ nphi++
+ }
+ phis = append(phis[:0], b.Values[:nphi]...)
+ oldSched = append(oldSched[:0], b.Values[nphi:]...)
+ b.Values = b.Values[:0]
+
+ // Initialize start state of block.
+ if b == f.Entry {
+ // Regalloc state is empty to start.
+ if nphi > 0 {
+ f.Fatalf("phis in entry block")
+ }
+ } else if len(b.Preds) == 1 {
+ // Start regalloc state with the end state of the previous block.
+ s.setState(s.endRegs[b.Preds[0].b.ID])
+ if nphi > 0 {
+ f.Fatalf("phis in single-predecessor block")
+ }
+ // Drop any values which are no longer live.
+ // This may happen because at the end of p, a value may be
+ // live but only used by some other successor of p.
+ for r := register(0); r < s.numRegs; r++ {
+ v := s.regs[r].v
+ if v != nil && !regValLiveSet.contains(v.ID) {
+ s.freeReg(r)
+ }
+ }
+ } else {
+ // This is the complicated case. We have more than one predecessor,
+ // which means we may have Phi ops.
+
+ // Start with the final register state of the predecessor with least spill values.
+ // This is based on the following points:
+ // 1, The less spill value indicates that the register pressure of this path is smaller,
+ // so the values of this block are more likely to be allocated to registers.
+ // 2, Avoid the predecessor that contains the function call, because the predecessor that
+ // contains the function call usually generates a lot of spills and lose the previous
+ // allocation state.
+ // TODO: Improve this part. At least the size of endRegs of the predecessor also has
+ // an impact on the code size and compiler speed. But it is not easy to find a simple
+ // and efficient method that combines multiple factors.
+ idx := -1
+ for i, p := range b.Preds {
+ // If the predecessor has not been visited yet, skip it because its end state
+ // (redRegs and spillLive) has not been computed yet.
+ pb := p.b
+ if s.blockOrder[pb.ID] >= s.blockOrder[b.ID] {
+ continue
+ }
+ if idx == -1 {
+ idx = i
+ continue
+ }
+ pSel := b.Preds[idx].b
+ if len(s.spillLive[pb.ID]) < len(s.spillLive[pSel.ID]) {
+ idx = i
+ } else if len(s.spillLive[pb.ID]) == len(s.spillLive[pSel.ID]) {
+ // Use a bit of likely information. After critical pass, pb and pSel must
+ // be plain blocks, so check edge pb->pb.Preds instead of edge pb->b.
+ // TODO: improve the prediction of the likely predecessor. The following
+ // method is only suitable for the simplest cases. For complex cases,
+ // the prediction may be inaccurate, but this does not affect the
+ // correctness of the program.
+ // According to the layout algorithm, the predecessor with the
+ // smaller blockOrder is the true branch, and the test results show
+ // that it is better to choose the predecessor with a smaller
+ // blockOrder than no choice.
+ if pb.likelyBranch() && !pSel.likelyBranch() || s.blockOrder[pb.ID] < s.blockOrder[pSel.ID] {
+ idx = i
+ }
+ }
+ }
+ if idx < 0 {
+ f.Fatalf("bad visitOrder, no predecessor of %s has been visited before it", b)
+ }
+ p := b.Preds[idx].b
+ s.setState(s.endRegs[p.ID])
+
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("starting merge block %s with end state of %s:\n", b, p)
+ for _, x := range s.endRegs[p.ID] {
+ fmt.Printf(" %s: orig:%s cache:%s\n", &s.registers[x.r], x.v, x.c)
+ }
+ }
+
+ // Decide on registers for phi ops. Use the registers determined
+ // by the primary predecessor if we can.
+ // TODO: pick best of (already processed) predecessors?
+ // Majority vote? Deepest nesting level?
+ phiRegs = phiRegs[:0]
+ var phiUsed regMask
+
+ for _, v := range phis {
+ if !s.values[v.ID].needReg {
+ phiRegs = append(phiRegs, noRegister)
+ continue
+ }
+ a := v.Args[idx]
+ // Some instructions target not-allocatable registers.
+ // They're not suitable for further (phi-function) allocation.
+ m := s.values[a.ID].regs &^ phiUsed & s.allocatable
+ if m != 0 {
+ r := pickReg(m)
+ phiUsed |= regMask(1) << r
+ phiRegs = append(phiRegs, r)
+ } else {
+ phiRegs = append(phiRegs, noRegister)
+ }
+ }
+
+ // Second pass - deallocate all in-register phi inputs.
+ for i, v := range phis {
+ if !s.values[v.ID].needReg {
+ continue
+ }
+ a := v.Args[idx]
+ r := phiRegs[i]
+ if r == noRegister {
+ continue
+ }
+ if regValLiveSet.contains(a.ID) {
+ // Input value is still live (it is used by something other than Phi).
+ // Try to move it around before kicking out, if there is a free register.
+ // We generate a Copy in the predecessor block and record it. It will be
+ // deleted later if never used.
+ //
+ // Pick a free register. At this point some registers used in the predecessor
+ // block may have been deallocated. Those are the ones used for Phis. Exclude
+ // them (and they are not going to be helpful anyway).
+ m := s.compatRegs(a.Type) &^ s.used &^ phiUsed
+ if m != 0 && !s.values[a.ID].rematerializeable && countRegs(s.values[a.ID].regs) == 1 {
+ r2 := pickReg(m)
+ c := p.NewValue1(a.Pos, OpCopy, a.Type, s.regs[r].c)
+ s.copies[c] = false
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("copy %s to %s : %s\n", a, c, &s.registers[r2])
+ }
+ s.setOrig(c, a)
+ s.assignReg(r2, a, c)
+ s.endRegs[p.ID] = append(s.endRegs[p.ID], endReg{r2, a, c})
+ }
+ }
+ s.freeReg(r)
+ }
+
+ // Copy phi ops into new schedule.
+ b.Values = append(b.Values, phis...)
+
+ // Third pass - pick registers for phis whose input
+ // was not in a register in the primary predecessor.
+ for i, v := range phis {
+ if !s.values[v.ID].needReg {
+ continue
+ }
+ if phiRegs[i] != noRegister {
+ continue
+ }
+ m := s.compatRegs(v.Type) &^ phiUsed &^ s.used
+ // If one of the other inputs of v is in a register, and the register is available,
+ // select this register, which can save some unnecessary copies.
+ for i, pe := range b.Preds {
+ if i == idx {
+ continue
+ }
+ ri := noRegister
+ for _, er := range s.endRegs[pe.b.ID] {
+ if er.v == s.orig[v.Args[i].ID] {
+ ri = er.r
+ break
+ }
+ }
+ if ri != noRegister && m>>ri&1 != 0 {
+ m = regMask(1) << ri
+ break
+ }
+ }
+ if m != 0 {
+ r := pickReg(m)
+ phiRegs[i] = r
+ phiUsed |= regMask(1) << r
+ }
+ }
+
+ // Set registers for phis. Add phi spill code.
+ for i, v := range phis {
+ if !s.values[v.ID].needReg {
+ continue
+ }
+ r := phiRegs[i]
+ if r == noRegister {
+ // stack-based phi
+ // Spills will be inserted in all the predecessors below.
+ s.values[v.ID].spill = v // v starts life spilled
+ continue
+ }
+ // register-based phi
+ s.assignReg(r, v, v)
+ }
+
+ // Deallocate any values which are no longer live. Phis are excluded.
+ for r := register(0); r < s.numRegs; r++ {
+ if phiUsed>>r&1 != 0 {
+ continue
+ }
+ v := s.regs[r].v
+ if v != nil && !regValLiveSet.contains(v.ID) {
+ s.freeReg(r)
+ }
+ }
+
+ // Save the starting state for use by merge edges.
+ // We append to a stack allocated variable that we'll
+ // later copy into s.startRegs in one fell swoop, to save
+ // on allocations.
+ regList := make([]startReg, 0, 32)
+ for r := register(0); r < s.numRegs; r++ {
+ v := s.regs[r].v
+ if v == nil {
+ continue
+ }
+ if phiUsed>>r&1 != 0 {
+ // Skip registers that phis used, we'll handle those
+ // specially during merge edge processing.
+ continue
+ }
+ regList = append(regList, startReg{r, v, s.regs[r].c, s.values[v.ID].uses.pos})
+ s.startRegsMask |= regMask(1) << r
+ }
+ s.startRegs[b.ID] = make([]startReg, len(regList))
+ copy(s.startRegs[b.ID], regList)
+
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("after phis\n")
+ for _, x := range s.startRegs[b.ID] {
+ fmt.Printf(" %s: v%d\n", &s.registers[x.r], x.v.ID)
+ }
+ }
+ }
+
+ // Allocate space to record the desired registers for each value.
+ if l := len(oldSched); cap(dinfo) < l {
+ dinfo = make([]dentry, l)
+ } else {
+ dinfo = dinfo[:l]
+ for i := range dinfo {
+ dinfo[i] = dentry{}
+ }
+ }
+
+ // Load static desired register info at the end of the block.
+ desired.copy(&s.desired[b.ID])
+
+ // Check actual assigned registers at the start of the next block(s).
+ // Dynamically assigned registers will trump the static
+ // desired registers computed during liveness analysis.
+ // Note that we do this phase after startRegs is set above, so that
+ // we get the right behavior for a block which branches to itself.
+ for _, e := range b.Succs {
+ succ := e.b
+ // TODO: prioritize likely successor?
+ for _, x := range s.startRegs[succ.ID] {
+ desired.add(x.v.ID, x.r)
+ }
+ // Process phi ops in succ.
+ pidx := e.i
+ for _, v := range succ.Values {
+ if v.Op != OpPhi {
+ break
+ }
+ if !s.values[v.ID].needReg {
+ continue
+ }
+ rp, ok := s.f.getHome(v.ID).(*Register)
+ if !ok {
+ // If v is not assigned a register, pick a register assigned to one of v's inputs.
+ // Hopefully v will get assigned that register later.
+ // If the inputs have allocated register information, add it to desired,
+ // which may reduce spill or copy operations when the register is available.
+ for _, a := range v.Args {
+ rp, ok = s.f.getHome(a.ID).(*Register)
+ if ok {
+ break
+ }
+ }
+ if !ok {
+ continue
+ }
+ }
+ desired.add(v.Args[pidx].ID, register(rp.num))
+ }
+ }
+ // Walk values backwards computing desired register info.
+ // See computeLive for more comments.
+ for i := len(oldSched) - 1; i >= 0; i-- {
+ v := oldSched[i]
+ prefs := desired.remove(v.ID)
+ regspec := s.regspec(v)
+ desired.clobber(regspec.clobbers)
+ for _, j := range regspec.inputs {
+ if countRegs(j.regs) != 1 {
+ continue
+ }
+ desired.clobber(j.regs)
+ desired.add(v.Args[j.idx].ID, pickReg(j.regs))
+ }
+ if opcodeTable[v.Op].resultInArg0 || v.Op == OpAMD64ADDQconst || v.Op == OpAMD64ADDLconst || v.Op == OpSelect0 {
+ if opcodeTable[v.Op].commutative {
+ desired.addList(v.Args[1].ID, prefs)
+ }
+ desired.addList(v.Args[0].ID, prefs)
+ }
+ // Save desired registers for this value.
+ dinfo[i].out = prefs
+ for j, a := range v.Args {
+ if j >= len(dinfo[i].in) {
+ break
+ }
+ dinfo[i].in[j] = desired.get(a.ID)
+ }
+ }
+
+ // Process all the non-phi values.
+ for idx, v := range oldSched {
+ tmpReg := noRegister
+ if s.f.pass.debug > regDebug {
+ fmt.Printf(" processing %s\n", v.LongString())
+ }
+ regspec := s.regspec(v)
+ if v.Op == OpPhi {
+ f.Fatalf("phi %s not at start of block", v)
+ }
+ if v.Op == OpSP {
+ s.assignReg(s.SPReg, v, v)
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ s.sp = v.ID
+ continue
+ }
+ if v.Op == OpSB {
+ s.assignReg(s.SBReg, v, v)
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ s.sb = v.ID
+ continue
+ }
+ if v.Op == OpSelect0 || v.Op == OpSelect1 || v.Op == OpSelectN {
+ if s.values[v.ID].needReg {
+ if v.Op == OpSelectN {
+ s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocResults)[int(v.AuxInt)].(*Register).num), v, v)
+ } else {
+ var i = 0
+ if v.Op == OpSelect1 {
+ i = 1
+ }
+ s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocPair)[i].(*Register).num), v, v)
+ }
+ }
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ continue
+ }
+ if v.Op == OpGetG && s.f.Config.hasGReg {
+ // use hardware g register
+ if s.regs[s.GReg].v != nil {
+ s.freeReg(s.GReg) // kick out the old value
+ }
+ s.assignReg(s.GReg, v, v)
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ continue
+ }
+ if v.Op == OpArg {
+ // Args are "pre-spilled" values. We don't allocate
+ // any register here. We just set up the spill pointer to
+ // point at itself and any later user will restore it to use it.
+ s.values[v.ID].spill = v
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ continue
+ }
+ if v.Op == OpKeepAlive {
+ // Make sure the argument to v is still live here.
+ s.advanceUses(v)
+ a := v.Args[0]
+ vi := &s.values[a.ID]
+ if vi.regs == 0 && !vi.rematerializeable {
+ // Use the spill location.
+ // This forces later liveness analysis to make the
+ // value live at this point.
+ v.SetArg(0, s.makeSpill(a, b))
+ } else if _, ok := a.Aux.(*ir.Name); ok && vi.rematerializeable {
+ // Rematerializeable value with a gc.Node. This is the address of
+ // a stack object (e.g. an LEAQ). Keep the object live.
+ // Change it to VarLive, which is what plive expects for locals.
+ v.Op = OpVarLive
+ v.SetArgs1(v.Args[1])
+ v.Aux = a.Aux
+ } else {
+ // In-register and rematerializeable values are already live.
+ // These are typically rematerializeable constants like nil,
+ // or values of a variable that were modified since the last call.
+ v.Op = OpCopy
+ v.SetArgs1(v.Args[1])
+ }
+ b.Values = append(b.Values, v)
+ continue
+ }
+ if len(regspec.inputs) == 0 && len(regspec.outputs) == 0 {
+ // No register allocation required (or none specified yet)
+ if s.doClobber && v.Op.IsCall() {
+ s.clobberRegs(regspec.clobbers)
+ }
+ s.freeRegs(regspec.clobbers)
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ continue
+ }
+
+ if s.values[v.ID].rematerializeable {
+ // Value is rematerializeable, don't issue it here.
+ // It will get issued just before each use (see
+ // allocValueToReg).
+ for _, a := range v.Args {
+ a.Uses--
+ }
+ s.advanceUses(v)
+ continue
+ }
+
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("value %s\n", v.LongString())
+ fmt.Printf(" out:")
+ for _, r := range dinfo[idx].out {
+ if r != noRegister {
+ fmt.Printf(" %s", &s.registers[r])
+ }
+ }
+ fmt.Println()
+ for i := 0; i < len(v.Args) && i < 3; i++ {
+ fmt.Printf(" in%d:", i)
+ for _, r := range dinfo[idx].in[i] {
+ if r != noRegister {
+ fmt.Printf(" %s", &s.registers[r])
+ }
+ }
+ fmt.Println()
+ }
+ }
+
+ // Move arguments to registers.
+ // First, if an arg must be in a specific register and it is already
+ // in place, keep it.
+ args = append(args[:0], make([]*Value, len(v.Args))...)
+ for i, a := range v.Args {
+ if !s.values[a.ID].needReg {
+ args[i] = a
+ }
+ }
+ for _, i := range regspec.inputs {
+ mask := i.regs
+ if countRegs(mask) == 1 && mask&s.values[v.Args[i.idx].ID].regs != 0 {
+ args[i.idx] = s.allocValToReg(v.Args[i.idx], mask, true, v.Pos)
+ }
+ }
+ // Then, if an arg must be in a specific register and that
+ // register is free, allocate that one. Otherwise when processing
+ // another input we may kick a value into the free register, which
+ // then will be kicked out again.
+ // This is a common case for passing-in-register arguments for
+ // function calls.
+ for {
+ freed := false
+ for _, i := range regspec.inputs {
+ if args[i.idx] != nil {
+ continue // already allocated
+ }
+ mask := i.regs
+ if countRegs(mask) == 1 && mask&^s.used != 0 {
+ args[i.idx] = s.allocValToReg(v.Args[i.idx], mask, true, v.Pos)
+ // If the input is in other registers that will be clobbered by v,
+ // or the input is dead, free the registers. This may make room
+ // for other inputs.
+ oldregs := s.values[v.Args[i.idx].ID].regs
+ if oldregs&^regspec.clobbers == 0 || !s.liveAfterCurrentInstruction(v.Args[i.idx]) {
+ s.freeRegs(oldregs &^ mask &^ s.nospill)
+ freed = true
+ }
+ }
+ }
+ if !freed {
+ break
+ }
+ }
+ // Last, allocate remaining ones, in an ordering defined
+ // by the register specification (most constrained first).
+ for _, i := range regspec.inputs {
+ if args[i.idx] != nil {
+ continue // already allocated
+ }
+ mask := i.regs
+ if mask&s.values[v.Args[i.idx].ID].regs == 0 {
+ // Need a new register for the input.
+ mask &= s.allocatable
+ mask &^= s.nospill
+ // Used desired register if available.
+ if i.idx < 3 {
+ for _, r := range dinfo[idx].in[i.idx] {
+ if r != noRegister && (mask&^s.used)>>r&1 != 0 {
+ // Desired register is allowed and unused.
+ mask = regMask(1) << r
+ break
+ }
+ }
+ }
+ // Avoid registers we're saving for other values.
+ if mask&^desired.avoid != 0 {
+ mask &^= desired.avoid
+ }
+ }
+ args[i.idx] = s.allocValToReg(v.Args[i.idx], mask, true, v.Pos)
+ }
+
+ // If the output clobbers the input register, make sure we have
+ // at least two copies of the input register so we don't
+ // have to reload the value from the spill location.
+ if opcodeTable[v.Op].resultInArg0 {
+ var m regMask
+ if !s.liveAfterCurrentInstruction(v.Args[0]) {
+ // arg0 is dead. We can clobber its register.
+ goto ok
+ }
+ if opcodeTable[v.Op].commutative && !s.liveAfterCurrentInstruction(v.Args[1]) {
+ args[0], args[1] = args[1], args[0]
+ goto ok
+ }
+ if s.values[v.Args[0].ID].rematerializeable {
+ // We can rematerialize the input, don't worry about clobbering it.
+ goto ok
+ }
+ if opcodeTable[v.Op].commutative && s.values[v.Args[1].ID].rematerializeable {
+ args[0], args[1] = args[1], args[0]
+ goto ok
+ }
+ if countRegs(s.values[v.Args[0].ID].regs) >= 2 {
+ // we have at least 2 copies of arg0. We can afford to clobber one.
+ goto ok
+ }
+ if opcodeTable[v.Op].commutative && countRegs(s.values[v.Args[1].ID].regs) >= 2 {
+ args[0], args[1] = args[1], args[0]
+ goto ok
+ }
+
+ // We can't overwrite arg0 (or arg1, if commutative). So we
+ // need to make a copy of an input so we have a register we can modify.
+
+ // Possible new registers to copy into.
+ m = s.compatRegs(v.Args[0].Type) &^ s.used
+ if m == 0 {
+ // No free registers. In this case we'll just clobber
+ // an input and future uses of that input must use a restore.
+ // TODO(khr): We should really do this like allocReg does it,
+ // spilling the value with the most distant next use.
+ goto ok
+ }
+
+ // Try to move an input to the desired output, if allowed.
+ for _, r := range dinfo[idx].out {
+ if r != noRegister && (m&regspec.outputs[0].regs)>>r&1 != 0 {
+ m = regMask(1) << r
+ args[0] = s.allocValToReg(v.Args[0], m, true, v.Pos)
+ // Note: we update args[0] so the instruction will
+ // use the register copy we just made.
+ goto ok
+ }
+ }
+ // Try to copy input to its desired location & use its old
+ // location as the result register.
+ for _, r := range dinfo[idx].in[0] {
+ if r != noRegister && m>>r&1 != 0 {
+ m = regMask(1) << r
+ c := s.allocValToReg(v.Args[0], m, true, v.Pos)
+ s.copies[c] = false
+ // Note: no update to args[0] so the instruction will
+ // use the original copy.
+ goto ok
+ }
+ }
+ if opcodeTable[v.Op].commutative {
+ for _, r := range dinfo[idx].in[1] {
+ if r != noRegister && m>>r&1 != 0 {
+ m = regMask(1) << r
+ c := s.allocValToReg(v.Args[1], m, true, v.Pos)
+ s.copies[c] = false
+ args[0], args[1] = args[1], args[0]
+ goto ok
+ }
+ }
+ }
+
+ // Avoid future fixed uses if we can.
+ if m&^desired.avoid != 0 {
+ m &^= desired.avoid
+ }
+ // Save input 0 to a new register so we can clobber it.
+ c := s.allocValToReg(v.Args[0], m, true, v.Pos)
+ s.copies[c] = false
+
+ // Normally we use the register of the old copy of input 0 as the target.
+ // However, if input 0 is already in its desired register then we use
+ // the register of the new copy instead.
+ if regspec.outputs[0].regs>>s.f.getHome(c.ID).(*Register).num&1 != 0 {
+ if rp, ok := s.f.getHome(args[0].ID).(*Register); ok {
+ r := register(rp.num)
+ for _, r2 := range dinfo[idx].in[0] {
+ if r == r2 {
+ args[0] = c
+ break
+ }
+ }
+ }
+ }
+ }
+
+ ok:
+ // Pick a temporary register if needed.
+ // It should be distinct from all the input registers, so we
+ // allocate it after all the input registers, but before
+ // the input registers are freed via advanceUses below.
+ // (Not all instructions need that distinct part, but it is conservative.)
+ if opcodeTable[v.Op].needIntTemp {
+ m := s.allocatable & s.f.Config.gpRegMask
+ if m&^desired.avoid&^s.nospill != 0 {
+ m &^= desired.avoid
+ }
+ tmpReg = s.allocReg(m, &tmpVal)
+ s.nospill |= regMask(1) << tmpReg
+ }
+
+ // Now that all args are in regs, we're ready to issue the value itself.
+ // Before we pick a register for the output value, allow input registers
+ // to be deallocated. We do this here so that the output can use the
+ // same register as a dying input.
+ if !opcodeTable[v.Op].resultNotInArgs {
+ s.tmpused = s.nospill
+ s.nospill = 0
+ s.advanceUses(v) // frees any registers holding args that are no longer live
+ }
+
+ // Dump any registers which will be clobbered
+ if s.doClobber && v.Op.IsCall() {
+ // clobber registers that are marked as clobber in regmask, but
+ // don't clobber inputs.
+ s.clobberRegs(regspec.clobbers &^ s.tmpused &^ s.nospill)
+ }
+ s.freeRegs(regspec.clobbers)
+ s.tmpused |= regspec.clobbers
+
+ // Pick registers for outputs.
+ {
+ outRegs := noRegisters // TODO if this is costly, hoist and clear incrementally below.
+ maxOutIdx := -1
+ var used regMask
+ if tmpReg != noRegister {
+ // Ensure output registers are distinct from the temporary register.
+ // (Not all instructions need that distinct part, but it is conservative.)
+ used |= regMask(1) << tmpReg
+ }
+ for _, out := range regspec.outputs {
+ mask := out.regs & s.allocatable &^ used
+ if mask == 0 {
+ continue
+ }
+ if opcodeTable[v.Op].resultInArg0 && out.idx == 0 {
+ if !opcodeTable[v.Op].commutative {
+ // Output must use the same register as input 0.
+ r := register(s.f.getHome(args[0].ID).(*Register).num)
+ if mask>>r&1 == 0 {
+ s.f.Fatalf("resultInArg0 value's input %v cannot be an output of %s", s.f.getHome(args[0].ID).(*Register), v.LongString())
+ }
+ mask = regMask(1) << r
+ } else {
+ // Output must use the same register as input 0 or 1.
+ r0 := register(s.f.getHome(args[0].ID).(*Register).num)
+ r1 := register(s.f.getHome(args[1].ID).(*Register).num)
+ // Check r0 and r1 for desired output register.
+ found := false
+ for _, r := range dinfo[idx].out {
+ if (r == r0 || r == r1) && (mask&^s.used)>>r&1 != 0 {
+ mask = regMask(1) << r
+ found = true
+ if r == r1 {
+ args[0], args[1] = args[1], args[0]
+ }
+ break
+ }
+ }
+ if !found {
+ // Neither are desired, pick r0.
+ mask = regMask(1) << r0
+ }
+ }
+ }
+ if out.idx == 0 { // desired registers only apply to the first element of a tuple result
+ for _, r := range dinfo[idx].out {
+ if r != noRegister && (mask&^s.used)>>r&1 != 0 {
+ // Desired register is allowed and unused.
+ mask = regMask(1) << r
+ break
+ }
+ }
+ }
+ // Avoid registers we're saving for other values.
+ if mask&^desired.avoid&^s.nospill&^s.used != 0 {
+ mask &^= desired.avoid
+ }
+ r := s.allocReg(mask, v)
+ if out.idx > maxOutIdx {
+ maxOutIdx = out.idx
+ }
+ outRegs[out.idx] = r
+ used |= regMask(1) << r
+ s.tmpused |= regMask(1) << r
+ }
+ // Record register choices
+ if v.Type.IsTuple() {
+ var outLocs LocPair
+ if r := outRegs[0]; r != noRegister {
+ outLocs[0] = &s.registers[r]
+ }
+ if r := outRegs[1]; r != noRegister {
+ outLocs[1] = &s.registers[r]
+ }
+ s.f.setHome(v, outLocs)
+ // Note that subsequent SelectX instructions will do the assignReg calls.
+ } else if v.Type.IsResults() {
+ // preallocate outLocs to the right size, which is maxOutIdx+1
+ outLocs := make(LocResults, maxOutIdx+1, maxOutIdx+1)
+ for i := 0; i <= maxOutIdx; i++ {
+ if r := outRegs[i]; r != noRegister {
+ outLocs[i] = &s.registers[r]
+ }
+ }
+ s.f.setHome(v, outLocs)
+ } else {
+ if r := outRegs[0]; r != noRegister {
+ s.assignReg(r, v, v)
+ }
+ }
+ if tmpReg != noRegister {
+ // Remember the temp register allocation, if any.
+ if s.f.tempRegs == nil {
+ s.f.tempRegs = map[ID]*Register{}
+ }
+ s.f.tempRegs[v.ID] = &s.registers[tmpReg]
+ }
+ }
+
+ // deallocate dead args, if we have not done so
+ if opcodeTable[v.Op].resultNotInArgs {
+ s.nospill = 0
+ s.advanceUses(v) // frees any registers holding args that are no longer live
+ }
+ s.tmpused = 0
+
+ // Issue the Value itself.
+ for i, a := range args {
+ v.SetArg(i, a) // use register version of arguments
+ }
+ b.Values = append(b.Values, v)
+ }
+
+ // Copy the control values - we need this so we can reduce the
+ // uses property of these values later.
+ controls := append(make([]*Value, 0, 2), b.ControlValues()...)
+
+ // Load control values into registers.
+ for i, v := range b.ControlValues() {
+ if !s.values[v.ID].needReg {
+ continue
+ }
+ if s.f.pass.debug > regDebug {
+ fmt.Printf(" processing control %s\n", v.LongString())
+ }
+ // We assume that a control input can be passed in any
+ // type-compatible register. If this turns out not to be true,
+ // we'll need to introduce a regspec for a block's control value.
+ b.ReplaceControl(i, s.allocValToReg(v, s.compatRegs(v.Type), false, b.Pos))
+ }
+
+ // Reduce the uses of the control values once registers have been loaded.
+ // This loop is equivalent to the advanceUses method.
+ for _, v := range controls {
+ vi := &s.values[v.ID]
+ if !vi.needReg {
+ continue
+ }
+ // Remove this use from the uses list.
+ u := vi.uses
+ vi.uses = u.next
+ if u.next == nil {
+ s.freeRegs(vi.regs) // value is dead
+ }
+ u.next = s.freeUseRecords
+ s.freeUseRecords = u
+ }
+
+ // If we are approaching a merge point and we are the primary
+ // predecessor of it, find live values that we use soon after
+ // the merge point and promote them to registers now.
+ if len(b.Succs) == 1 {
+ if s.f.Config.hasGReg && s.regs[s.GReg].v != nil {
+ s.freeReg(s.GReg) // Spill value in G register before any merge.
+ }
+ // For this to be worthwhile, the loop must have no calls in it.
+ top := b.Succs[0].b
+ loop := s.loopnest.b2l[top.ID]
+ if loop == nil || loop.header != top || loop.containsUnavoidableCall {
+ goto badloop
+ }
+
+ // TODO: sort by distance, pick the closest ones?
+ for _, live := range s.live[b.ID] {
+ if live.dist >= unlikelyDistance {
+ // Don't preload anything live after the loop.
+ continue
+ }
+ vid := live.ID
+ vi := &s.values[vid]
+ if vi.regs != 0 {
+ continue
+ }
+ if vi.rematerializeable {
+ continue
+ }
+ v := s.orig[vid]
+ m := s.compatRegs(v.Type) &^ s.used
+ // Used desired register if available.
+ outerloop:
+ for _, e := range desired.entries {
+ if e.ID != v.ID {
+ continue
+ }
+ for _, r := range e.regs {
+ if r != noRegister && m>>r&1 != 0 {
+ m = regMask(1) << r
+ break outerloop
+ }
+ }
+ }
+ if m&^desired.avoid != 0 {
+ m &^= desired.avoid
+ }
+ if m != 0 {
+ s.allocValToReg(v, m, false, b.Pos)
+ }
+ }
+ }
+ badloop:
+ ;
+
+ // Save end-of-block register state.
+ // First count how many, this cuts allocations in half.
+ k := 0
+ for r := register(0); r < s.numRegs; r++ {
+ v := s.regs[r].v
+ if v == nil {
+ continue
+ }
+ k++
+ }
+ regList := make([]endReg, 0, k)
+ for r := register(0); r < s.numRegs; r++ {
+ v := s.regs[r].v
+ if v == nil {
+ continue
+ }
+ regList = append(regList, endReg{r, v, s.regs[r].c})
+ }
+ s.endRegs[b.ID] = regList
+
+ if checkEnabled {
+ regValLiveSet.clear()
+ for _, x := range s.live[b.ID] {
+ regValLiveSet.add(x.ID)
+ }
+ for r := register(0); r < s.numRegs; r++ {
+ v := s.regs[r].v
+ if v == nil {
+ continue
+ }
+ if !regValLiveSet.contains(v.ID) {
+ s.f.Fatalf("val %s is in reg but not live at end of %s", v, b)
+ }
+ }
+ }
+
+ // If a value is live at the end of the block and
+ // isn't in a register, generate a use for the spill location.
+ // We need to remember this information so that
+ // the liveness analysis in stackalloc is correct.
+ for _, e := range s.live[b.ID] {
+ vi := &s.values[e.ID]
+ if vi.regs != 0 {
+ // in a register, we'll use that source for the merge.
+ continue
+ }
+ if vi.rematerializeable {
+ // we'll rematerialize during the merge.
+ continue
+ }
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("live-at-end spill for %s at %s\n", s.orig[e.ID], b)
+ }
+ spill := s.makeSpill(s.orig[e.ID], b)
+ s.spillLive[b.ID] = append(s.spillLive[b.ID], spill.ID)
+ }
+
+ // Clear any final uses.
+ // All that is left should be the pseudo-uses added for values which
+ // are live at the end of b.
+ for _, e := range s.live[b.ID] {
+ u := s.values[e.ID].uses
+ if u == nil {
+ f.Fatalf("live at end, no uses v%d", e.ID)
+ }
+ if u.next != nil {
+ f.Fatalf("live at end, too many uses v%d", e.ID)
+ }
+ s.values[e.ID].uses = nil
+ u.next = s.freeUseRecords
+ s.freeUseRecords = u
+ }
+
+ // allocReg may have dropped registers from startRegsMask that
+ // aren't actually needed in startRegs. Synchronize back to
+ // startRegs.
+ //
+ // This must be done before placing spills, which will look at
+ // startRegs to decide if a block is a valid block for a spill.
+ if c := countRegs(s.startRegsMask); c != len(s.startRegs[b.ID]) {
+ regs := make([]startReg, 0, c)
+ for _, sr := range s.startRegs[b.ID] {
+ if s.startRegsMask&(regMask(1)<<sr.r) == 0 {
+ continue
+ }
+ regs = append(regs, sr)
+ }
+ s.startRegs[b.ID] = regs
+ }
+ }
+
+ // Decide where the spills we generated will go.
+ s.placeSpills()
+
+ // Anything that didn't get a register gets a stack location here.
+ // (StoreReg, stack-based phis, inputs, ...)
+ stacklive := stackalloc(s.f, s.spillLive)
+
+ // Fix up all merge edges.
+ s.shuffle(stacklive)
+
+ // Erase any copies we never used.
+ // Also, an unused copy might be the only use of another copy,
+ // so continue erasing until we reach a fixed point.
+ for {
+ progress := false
+ for c, used := range s.copies {
+ if !used && c.Uses == 0 {
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("delete copied value %s\n", c.LongString())
+ }
+ c.resetArgs()
+ f.freeValue(c)
+ delete(s.copies, c)
+ progress = true
+ }
+ }
+ if !progress {
+ break
+ }
+ }
+
+ for _, b := range s.visitOrder {
+ i := 0
+ for _, v := range b.Values {
+ if v.Op == OpInvalid {
+ continue
+ }
+ b.Values[i] = v
+ i++
+ }
+ b.Values = b.Values[:i]
+ }
+}
+
+func (s *regAllocState) placeSpills() {
+ mustBeFirst := func(op Op) bool {
+ return op.isLoweredGetClosurePtr() || op == OpPhi || op == OpArgIntReg || op == OpArgFloatReg
+ }
+
+ // Start maps block IDs to the list of spills
+ // that go at the start of the block (but after any phis).
+ start := map[ID][]*Value{}
+ // After maps value IDs to the list of spills
+ // that go immediately after that value ID.
+ after := map[ID][]*Value{}
+
+ for i := range s.values {
+ vi := s.values[i]
+ spill := vi.spill
+ if spill == nil {
+ continue
+ }
+ if spill.Block != nil {
+ // Some spills are already fully set up,
+ // like OpArgs and stack-based phis.
+ continue
+ }
+ v := s.orig[i]
+
+ // Walk down the dominator tree looking for a good place to
+ // put the spill of v. At the start "best" is the best place
+ // we have found so far.
+ // TODO: find a way to make this O(1) without arbitrary cutoffs.
+ if v == nil {
+ panic(fmt.Errorf("nil v, s.orig[%d], vi = %v, spill = %s", i, vi, spill.LongString()))
+ }
+ best := v.Block
+ bestArg := v
+ var bestDepth int16
+ if l := s.loopnest.b2l[best.ID]; l != nil {
+ bestDepth = l.depth
+ }
+ b := best
+ const maxSpillSearch = 100
+ for i := 0; i < maxSpillSearch; i++ {
+ // Find the child of b in the dominator tree which
+ // dominates all restores.
+ p := b
+ b = nil
+ for c := s.sdom.Child(p); c != nil && i < maxSpillSearch; c, i = s.sdom.Sibling(c), i+1 {
+ if s.sdom[c.ID].entry <= vi.restoreMin && s.sdom[c.ID].exit >= vi.restoreMax {
+ // c also dominates all restores. Walk down into c.
+ b = c
+ break
+ }
+ }
+ if b == nil {
+ // Ran out of blocks which dominate all restores.
+ break
+ }
+
+ var depth int16
+ if l := s.loopnest.b2l[b.ID]; l != nil {
+ depth = l.depth
+ }
+ if depth > bestDepth {
+ // Don't push the spill into a deeper loop.
+ continue
+ }
+
+ // If v is in a register at the start of b, we can
+ // place the spill here (after the phis).
+ if len(b.Preds) == 1 {
+ for _, e := range s.endRegs[b.Preds[0].b.ID] {
+ if e.v == v {
+ // Found a better spot for the spill.
+ best = b
+ bestArg = e.c
+ bestDepth = depth
+ break
+ }
+ }
+ } else {
+ for _, e := range s.startRegs[b.ID] {
+ if e.v == v {
+ // Found a better spot for the spill.
+ best = b
+ bestArg = e.c
+ bestDepth = depth
+ break
+ }
+ }
+ }
+ }
+
+ // Put the spill in the best block we found.
+ spill.Block = best
+ spill.AddArg(bestArg)
+ if best == v.Block && !mustBeFirst(v.Op) {
+ // Place immediately after v.
+ after[v.ID] = append(after[v.ID], spill)
+ } else {
+ // Place at the start of best block.
+ start[best.ID] = append(start[best.ID], spill)
+ }
+ }
+
+ // Insert spill instructions into the block schedules.
+ var oldSched []*Value
+ for _, b := range s.visitOrder {
+ nfirst := 0
+ for _, v := range b.Values {
+ if !mustBeFirst(v.Op) {
+ break
+ }
+ nfirst++
+ }
+ oldSched = append(oldSched[:0], b.Values[nfirst:]...)
+ b.Values = b.Values[:nfirst]
+ b.Values = append(b.Values, start[b.ID]...)
+ for _, v := range oldSched {
+ b.Values = append(b.Values, v)
+ b.Values = append(b.Values, after[v.ID]...)
+ }
+ }
+}
+
+// shuffle fixes up all the merge edges (those going into blocks of indegree > 1).
+func (s *regAllocState) shuffle(stacklive [][]ID) {
+ var e edgeState
+ e.s = s
+ e.cache = map[ID][]*Value{}
+ e.contents = map[Location]contentRecord{}
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("shuffle %s\n", s.f.Name)
+ fmt.Println(s.f.String())
+ }
+
+ for _, b := range s.visitOrder {
+ if len(b.Preds) <= 1 {
+ continue
+ }
+ e.b = b
+ for i, edge := range b.Preds {
+ p := edge.b
+ e.p = p
+ e.setup(i, s.endRegs[p.ID], s.startRegs[b.ID], stacklive[p.ID])
+ e.process()
+ }
+ }
+
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("post shuffle %s\n", s.f.Name)
+ fmt.Println(s.f.String())
+ }
+}
+
+type edgeState struct {
+ s *regAllocState
+ p, b *Block // edge goes from p->b.
+
+ // for each pre-regalloc value, a list of equivalent cached values
+ cache map[ID][]*Value
+ cachedVals []ID // (superset of) keys of the above map, for deterministic iteration
+
+ // map from location to the value it contains
+ contents map[Location]contentRecord
+
+ // desired destination locations
+ destinations []dstRecord
+ extra []dstRecord
+
+ usedRegs regMask // registers currently holding something
+ uniqueRegs regMask // registers holding the only copy of a value
+ finalRegs regMask // registers holding final target
+ rematerializeableRegs regMask // registers that hold rematerializeable values
+}
+
+type contentRecord struct {
+ vid ID // pre-regalloc value
+ c *Value // cached value
+ final bool // this is a satisfied destination
+ pos src.XPos // source position of use of the value
+}
+
+type dstRecord struct {
+ loc Location // register or stack slot
+ vid ID // pre-regalloc value it should contain
+ splice **Value // place to store reference to the generating instruction
+ pos src.XPos // source position of use of this location
+}
+
+// setup initializes the edge state for shuffling.
+func (e *edgeState) setup(idx int, srcReg []endReg, dstReg []startReg, stacklive []ID) {
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf("edge %s->%s\n", e.p, e.b)
+ }
+
+ // Clear state.
+ for _, vid := range e.cachedVals {
+ delete(e.cache, vid)
+ }
+ e.cachedVals = e.cachedVals[:0]
+ for k := range e.contents {
+ delete(e.contents, k)
+ }
+ e.usedRegs = 0
+ e.uniqueRegs = 0
+ e.finalRegs = 0
+ e.rematerializeableRegs = 0
+
+ // Live registers can be sources.
+ for _, x := range srcReg {
+ e.set(&e.s.registers[x.r], x.v.ID, x.c, false, src.NoXPos) // don't care the position of the source
+ }
+ // So can all of the spill locations.
+ for _, spillID := range stacklive {
+ v := e.s.orig[spillID]
+ spill := e.s.values[v.ID].spill
+ if !e.s.sdom.IsAncestorEq(spill.Block, e.p) {
+ // Spills were placed that only dominate the uses found
+ // during the first regalloc pass. The edge fixup code
+ // can't use a spill location if the spill doesn't dominate
+ // the edge.
+ // We are guaranteed that if the spill doesn't dominate this edge,
+ // then the value is available in a register (because we called
+ // makeSpill for every value not in a register at the start
+ // of an edge).
+ continue
+ }
+ e.set(e.s.f.getHome(spillID), v.ID, spill, false, src.NoXPos) // don't care the position of the source
+ }
+
+ // Figure out all the destinations we need.
+ dsts := e.destinations[:0]
+ for _, x := range dstReg {
+ dsts = append(dsts, dstRecord{&e.s.registers[x.r], x.v.ID, nil, x.pos})
+ }
+ // Phis need their args to end up in a specific location.
+ for _, v := range e.b.Values {
+ if v.Op != OpPhi {
+ break
+ }
+ loc := e.s.f.getHome(v.ID)
+ if loc == nil {
+ continue
+ }
+ dsts = append(dsts, dstRecord{loc, v.Args[idx].ID, &v.Args[idx], v.Pos})
+ }
+ e.destinations = dsts
+
+ if e.s.f.pass.debug > regDebug {
+ for _, vid := range e.cachedVals {
+ a := e.cache[vid]
+ for _, c := range a {
+ fmt.Printf("src %s: v%d cache=%s\n", e.s.f.getHome(c.ID), vid, c)
+ }
+ }
+ for _, d := range e.destinations {
+ fmt.Printf("dst %s: v%d\n", d.loc, d.vid)
+ }
+ }
+}
+
+// process generates code to move all the values to the right destination locations.
+func (e *edgeState) process() {
+ dsts := e.destinations
+
+ // Process the destinations until they are all satisfied.
+ for len(dsts) > 0 {
+ i := 0
+ for _, d := range dsts {
+ if !e.processDest(d.loc, d.vid, d.splice, d.pos) {
+ // Failed - save for next iteration.
+ dsts[i] = d
+ i++
+ }
+ }
+ if i < len(dsts) {
+ // Made some progress. Go around again.
+ dsts = dsts[:i]
+
+ // Append any extras destinations we generated.
+ dsts = append(dsts, e.extra...)
+ e.extra = e.extra[:0]
+ continue
+ }
+
+ // We made no progress. That means that any
+ // remaining unsatisfied moves are in simple cycles.
+ // For example, A -> B -> C -> D -> A.
+ // A ----> B
+ // ^ |
+ // | |
+ // | v
+ // D <---- C
+
+ // To break the cycle, we pick an unused register, say R,
+ // and put a copy of B there.
+ // A ----> B
+ // ^ |
+ // | |
+ // | v
+ // D <---- C <---- R=copyofB
+ // When we resume the outer loop, the A->B move can now proceed,
+ // and eventually the whole cycle completes.
+
+ // Copy any cycle location to a temp register. This duplicates
+ // one of the cycle entries, allowing the just duplicated value
+ // to be overwritten and the cycle to proceed.
+ d := dsts[0]
+ loc := d.loc
+ vid := e.contents[loc].vid
+ c := e.contents[loc].c
+ r := e.findRegFor(c.Type)
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf("breaking cycle with v%d in %s:%s\n", vid, loc, c)
+ }
+ e.erase(r)
+ pos := d.pos.WithNotStmt()
+ if _, isReg := loc.(*Register); isReg {
+ c = e.p.NewValue1(pos, OpCopy, c.Type, c)
+ } else {
+ c = e.p.NewValue1(pos, OpLoadReg, c.Type, c)
+ }
+ e.set(r, vid, c, false, pos)
+ if c.Op == OpLoadReg && e.s.isGReg(register(r.(*Register).num)) {
+ e.s.f.Fatalf("process.OpLoadReg targeting g: " + c.LongString())
+ }
+ }
+}
+
+// processDest generates code to put value vid into location loc. Returns true
+// if progress was made.
+func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XPos) bool {
+ pos = pos.WithNotStmt()
+ occupant := e.contents[loc]
+ if occupant.vid == vid {
+ // Value is already in the correct place.
+ e.contents[loc] = contentRecord{vid, occupant.c, true, pos}
+ if splice != nil {
+ (*splice).Uses--
+ *splice = occupant.c
+ occupant.c.Uses++
+ }
+ // Note: if splice==nil then c will appear dead. This is
+ // non-SSA formed code, so be careful after this pass not to run
+ // deadcode elimination.
+ if _, ok := e.s.copies[occupant.c]; ok {
+ // The copy at occupant.c was used to avoid spill.
+ e.s.copies[occupant.c] = true
+ }
+ return true
+ }
+
+ // Check if we're allowed to clobber the destination location.
+ if len(e.cache[occupant.vid]) == 1 && !e.s.values[occupant.vid].rematerializeable {
+ // We can't overwrite the last copy
+ // of a value that needs to survive.
+ return false
+ }
+
+ // Copy from a source of v, register preferred.
+ v := e.s.orig[vid]
+ var c *Value
+ var src Location
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf("moving v%d to %s\n", vid, loc)
+ fmt.Printf("sources of v%d:", vid)
+ }
+ for _, w := range e.cache[vid] {
+ h := e.s.f.getHome(w.ID)
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf(" %s:%s", h, w)
+ }
+ _, isreg := h.(*Register)
+ if src == nil || isreg {
+ c = w
+ src = h
+ }
+ }
+ if e.s.f.pass.debug > regDebug {
+ if src != nil {
+ fmt.Printf(" [use %s]\n", src)
+ } else {
+ fmt.Printf(" [no source]\n")
+ }
+ }
+ _, dstReg := loc.(*Register)
+
+ // Pre-clobber destination. This avoids the
+ // following situation:
+ // - v is currently held in R0 and stacktmp0.
+ // - We want to copy stacktmp1 to stacktmp0.
+ // - We choose R0 as the temporary register.
+ // During the copy, both R0 and stacktmp0 are
+ // clobbered, losing both copies of v. Oops!
+ // Erasing the destination early means R0 will not
+ // be chosen as the temp register, as it will then
+ // be the last copy of v.
+ e.erase(loc)
+ var x *Value
+ if c == nil || e.s.values[vid].rematerializeable {
+ if !e.s.values[vid].rematerializeable {
+ e.s.f.Fatalf("can't find source for %s->%s: %s\n", e.p, e.b, v.LongString())
+ }
+ if dstReg {
+ x = v.copyInto(e.p)
+ } else {
+ // Rematerialize into stack slot. Need a free
+ // register to accomplish this.
+ r := e.findRegFor(v.Type)
+ e.erase(r)
+ x = v.copyIntoWithXPos(e.p, pos)
+ e.set(r, vid, x, false, pos)
+ // Make sure we spill with the size of the slot, not the
+ // size of x (which might be wider due to our dropping
+ // of narrowing conversions).
+ x = e.p.NewValue1(pos, OpStoreReg, loc.(LocalSlot).Type, x)
+ }
+ } else {
+ // Emit move from src to dst.
+ _, srcReg := src.(*Register)
+ if srcReg {
+ if dstReg {
+ x = e.p.NewValue1(pos, OpCopy, c.Type, c)
+ } else {
+ x = e.p.NewValue1(pos, OpStoreReg, loc.(LocalSlot).Type, c)
+ }
+ } else {
+ if dstReg {
+ x = e.p.NewValue1(pos, OpLoadReg, c.Type, c)
+ } else {
+ // mem->mem. Use temp register.
+ r := e.findRegFor(c.Type)
+ e.erase(r)
+ t := e.p.NewValue1(pos, OpLoadReg, c.Type, c)
+ e.set(r, vid, t, false, pos)
+ x = e.p.NewValue1(pos, OpStoreReg, loc.(LocalSlot).Type, t)
+ }
+ }
+ }
+ e.set(loc, vid, x, true, pos)
+ if x.Op == OpLoadReg && e.s.isGReg(register(loc.(*Register).num)) {
+ e.s.f.Fatalf("processDest.OpLoadReg targeting g: " + x.LongString())
+ }
+ if splice != nil {
+ (*splice).Uses--
+ *splice = x
+ x.Uses++
+ }
+ return true
+}
+
+// set changes the contents of location loc to hold the given value and its cached representative.
+func (e *edgeState) set(loc Location, vid ID, c *Value, final bool, pos src.XPos) {
+ e.s.f.setHome(c, loc)
+ e.contents[loc] = contentRecord{vid, c, final, pos}
+ a := e.cache[vid]
+ if len(a) == 0 {
+ e.cachedVals = append(e.cachedVals, vid)
+ }
+ a = append(a, c)
+ e.cache[vid] = a
+ if r, ok := loc.(*Register); ok {
+ if e.usedRegs&(regMask(1)<<uint(r.num)) != 0 {
+ e.s.f.Fatalf("%v is already set (v%d/%v)", r, vid, c)
+ }
+ e.usedRegs |= regMask(1) << uint(r.num)
+ if final {
+ e.finalRegs |= regMask(1) << uint(r.num)
+ }
+ if len(a) == 1 {
+ e.uniqueRegs |= regMask(1) << uint(r.num)
+ }
+ if len(a) == 2 {
+ if t, ok := e.s.f.getHome(a[0].ID).(*Register); ok {
+ e.uniqueRegs &^= regMask(1) << uint(t.num)
+ }
+ }
+ if e.s.values[vid].rematerializeable {
+ e.rematerializeableRegs |= regMask(1) << uint(r.num)
+ }
+ }
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf("%s\n", c.LongString())
+ fmt.Printf("v%d now available in %s:%s\n", vid, loc, c)
+ }
+}
+
+// erase removes any user of loc.
+func (e *edgeState) erase(loc Location) {
+ cr := e.contents[loc]
+ if cr.c == nil {
+ return
+ }
+ vid := cr.vid
+
+ if cr.final {
+ // Add a destination to move this value back into place.
+ // Make sure it gets added to the tail of the destination queue
+ // so we make progress on other moves first.
+ e.extra = append(e.extra, dstRecord{loc, cr.vid, nil, cr.pos})
+ }
+
+ // Remove c from the list of cached values.
+ a := e.cache[vid]
+ for i, c := range a {
+ if e.s.f.getHome(c.ID) == loc {
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf("v%d no longer available in %s:%s\n", vid, loc, c)
+ }
+ a[i], a = a[len(a)-1], a[:len(a)-1]
+ break
+ }
+ }
+ e.cache[vid] = a
+
+ // Update register masks.
+ if r, ok := loc.(*Register); ok {
+ e.usedRegs &^= regMask(1) << uint(r.num)
+ if cr.final {
+ e.finalRegs &^= regMask(1) << uint(r.num)
+ }
+ e.rematerializeableRegs &^= regMask(1) << uint(r.num)
+ }
+ if len(a) == 1 {
+ if r, ok := e.s.f.getHome(a[0].ID).(*Register); ok {
+ e.uniqueRegs |= regMask(1) << uint(r.num)
+ }
+ }
+}
+
+// findRegFor finds a register we can use to make a temp copy of type typ.
+func (e *edgeState) findRegFor(typ *types.Type) Location {
+ // Which registers are possibilities.
+ types := &e.s.f.Config.Types
+ m := e.s.compatRegs(typ)
+
+ // Pick a register. In priority order:
+ // 1) an unused register
+ // 2) a non-unique register not holding a final value
+ // 3) a non-unique register
+ // 4) a register holding a rematerializeable value
+ x := m &^ e.usedRegs
+ if x != 0 {
+ return &e.s.registers[pickReg(x)]
+ }
+ x = m &^ e.uniqueRegs &^ e.finalRegs
+ if x != 0 {
+ return &e.s.registers[pickReg(x)]
+ }
+ x = m &^ e.uniqueRegs
+ if x != 0 {
+ return &e.s.registers[pickReg(x)]
+ }
+ x = m & e.rematerializeableRegs
+ if x != 0 {
+ return &e.s.registers[pickReg(x)]
+ }
+
+ // No register is available.
+ // Pick a register to spill.
+ for _, vid := range e.cachedVals {
+ a := e.cache[vid]
+ for _, c := range a {
+ if r, ok := e.s.f.getHome(c.ID).(*Register); ok && m>>uint(r.num)&1 != 0 {
+ if !c.rematerializeable() {
+ x := e.p.NewValue1(c.Pos, OpStoreReg, c.Type, c)
+ // Allocate a temp location to spill a register to.
+ // The type of the slot is immaterial - it will not be live across
+ // any safepoint. Just use a type big enough to hold any register.
+ t := LocalSlot{N: e.s.f.NewLocal(c.Pos, types.Int64), Type: types.Int64}
+ // TODO: reuse these slots. They'll need to be erased first.
+ e.set(t, vid, x, false, c.Pos)
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf(" SPILL %s->%s %s\n", r, t, x.LongString())
+ }
+ }
+ // r will now be overwritten by the caller. At some point
+ // later, the newly saved value will be moved back to its
+ // final destination in processDest.
+ return r
+ }
+ }
+ }
+
+ fmt.Printf("m:%d unique:%d final:%d rematerializable:%d\n", m, e.uniqueRegs, e.finalRegs, e.rematerializeableRegs)
+ for _, vid := range e.cachedVals {
+ a := e.cache[vid]
+ for _, c := range a {
+ fmt.Printf("v%d: %s %s\n", vid, c, e.s.f.getHome(c.ID))
+ }
+ }
+ e.s.f.Fatalf("can't find empty register on edge %s->%s", e.p, e.b)
+ return nil
+}
+
+// rematerializeable reports whether the register allocator should recompute
+// a value instead of spilling/restoring it.
+func (v *Value) rematerializeable() bool {
+ if !opcodeTable[v.Op].rematerializeable {
+ return false
+ }
+ for _, a := range v.Args {
+ // SP and SB (generated by OpSP and OpSB) are always available.
+ if a.Op != OpSP && a.Op != OpSB {
+ return false
+ }
+ }
+ return true
+}
+
+type liveInfo struct {
+ ID ID // ID of value
+ dist int32 // # of instructions before next use
+ pos src.XPos // source position of next use
+}
+
+// computeLive computes a map from block ID to a list of value IDs live at the end
+// of that block. Together with the value ID is a count of how many instructions
+// to the next use of that value. The resulting map is stored in s.live.
+// computeLive also computes the desired register information at the end of each block.
+// This desired register information is stored in s.desired.
+// TODO: this could be quadratic if lots of variables are live across lots of
+// basic blocks. Figure out a way to make this function (or, more precisely, the user
+// of this function) require only linear size & time.
+func (s *regAllocState) computeLive() {
+ f := s.f
+ s.live = make([][]liveInfo, f.NumBlocks())
+ s.desired = make([]desiredState, f.NumBlocks())
+ var phis []*Value
+
+ live := f.newSparseMapPos(f.NumValues())
+ defer f.retSparseMapPos(live)
+ t := f.newSparseMapPos(f.NumValues())
+ defer f.retSparseMapPos(t)
+
+ // Keep track of which value we want in each register.
+ var desired desiredState
+
+ // Instead of iterating over f.Blocks, iterate over their postordering.
+ // Liveness information flows backward, so starting at the end
+ // increases the probability that we will stabilize quickly.
+ // TODO: Do a better job yet. Here's one possibility:
+ // Calculate the dominator tree and locate all strongly connected components.
+ // If a value is live in one block of an SCC, it is live in all.
+ // Walk the dominator tree from end to beginning, just once, treating SCC
+ // components as single blocks, duplicated calculated liveness information
+ // out to all of them.
+ po := f.postorder()
+ s.loopnest = f.loopnest()
+ s.loopnest.calculateDepths()
+ for {
+ changed := false
+
+ for _, b := range po {
+ // Start with known live values at the end of the block.
+ // Add len(b.Values) to adjust from end-of-block distance
+ // to beginning-of-block distance.
+ live.clear()
+ for _, e := range s.live[b.ID] {
+ live.set(e.ID, e.dist+int32(len(b.Values)), e.pos)
+ }
+
+ // Mark control values as live
+ for _, c := range b.ControlValues() {
+ if s.values[c.ID].needReg {
+ live.set(c.ID, int32(len(b.Values)), b.Pos)
+ }
+ }
+
+ // Propagate backwards to the start of the block
+ // Assumes Values have been scheduled.
+ phis = phis[:0]
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ live.remove(v.ID)
+ if v.Op == OpPhi {
+ // save phi ops for later
+ phis = append(phis, v)
+ continue
+ }
+ if opcodeTable[v.Op].call {
+ c := live.contents()
+ for i := range c {
+ c[i].val += unlikelyDistance
+ }
+ }
+ for _, a := range v.Args {
+ if s.values[a.ID].needReg {
+ live.set(a.ID, int32(i), v.Pos)
+ }
+ }
+ }
+ // Propagate desired registers backwards.
+ desired.copy(&s.desired[b.ID])
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ prefs := desired.remove(v.ID)
+ if v.Op == OpPhi {
+ // TODO: if v is a phi, save desired register for phi inputs.
+ // For now, we just drop it and don't propagate
+ // desired registers back though phi nodes.
+ continue
+ }
+ regspec := s.regspec(v)
+ // Cancel desired registers if they get clobbered.
+ desired.clobber(regspec.clobbers)
+ // Update desired registers if there are any fixed register inputs.
+ for _, j := range regspec.inputs {
+ if countRegs(j.regs) != 1 {
+ continue
+ }
+ desired.clobber(j.regs)
+ desired.add(v.Args[j.idx].ID, pickReg(j.regs))
+ }
+ // Set desired register of input 0 if this is a 2-operand instruction.
+ if opcodeTable[v.Op].resultInArg0 || v.Op == OpAMD64ADDQconst || v.Op == OpAMD64ADDLconst || v.Op == OpSelect0 {
+ // ADDQconst is added here because we want to treat it as resultInArg0 for
+ // the purposes of desired registers, even though it is not an absolute requirement.
+ // This is because we'd rather implement it as ADDQ instead of LEAQ.
+ // Same for ADDLconst
+ // Select0 is added here to propagate the desired register to the tuple-generating instruction.
+ if opcodeTable[v.Op].commutative {
+ desired.addList(v.Args[1].ID, prefs)
+ }
+ desired.addList(v.Args[0].ID, prefs)
+ }
+ }
+
+ // For each predecessor of b, expand its list of live-at-end values.
+ // invariant: live contains the values live at the start of b (excluding phi inputs)
+ for i, e := range b.Preds {
+ p := e.b
+ // Compute additional distance for the edge.
+ // Note: delta must be at least 1 to distinguish the control
+ // value use from the first user in a successor block.
+ delta := int32(normalDistance)
+ if len(p.Succs) == 2 {
+ if p.Succs[0].b == b && p.Likely == BranchLikely ||
+ p.Succs[1].b == b && p.Likely == BranchUnlikely {
+ delta = likelyDistance
+ }
+ if p.Succs[0].b == b && p.Likely == BranchUnlikely ||
+ p.Succs[1].b == b && p.Likely == BranchLikely {
+ delta = unlikelyDistance
+ }
+ }
+
+ // Update any desired registers at the end of p.
+ s.desired[p.ID].merge(&desired)
+
+ // Start t off with the previously known live values at the end of p.
+ t.clear()
+ for _, e := range s.live[p.ID] {
+ t.set(e.ID, e.dist, e.pos)
+ }
+ update := false
+
+ // Add new live values from scanning this block.
+ for _, e := range live.contents() {
+ d := e.val + delta
+ if !t.contains(e.key) || d < t.get(e.key) {
+ update = true
+ t.set(e.key, d, e.pos)
+ }
+ }
+ // Also add the correct arg from the saved phi values.
+ // All phis are at distance delta (we consider them
+ // simultaneously happening at the start of the block).
+ for _, v := range phis {
+ id := v.Args[i].ID
+ if s.values[id].needReg && (!t.contains(id) || delta < t.get(id)) {
+ update = true
+ t.set(id, delta, v.Pos)
+ }
+ }
+
+ if !update {
+ continue
+ }
+ // The live set has changed, update it.
+ l := s.live[p.ID][:0]
+ if cap(l) < t.size() {
+ l = make([]liveInfo, 0, t.size())
+ }
+ for _, e := range t.contents() {
+ l = append(l, liveInfo{e.key, e.val, e.pos})
+ }
+ s.live[p.ID] = l
+ changed = true
+ }
+ }
+
+ if !changed {
+ break
+ }
+ }
+ if f.pass.debug > regDebug {
+ fmt.Println("live values at end of each block")
+ for _, b := range f.Blocks {
+ fmt.Printf(" %s:", b)
+ for _, x := range s.live[b.ID] {
+ fmt.Printf(" v%d(%d)", x.ID, x.dist)
+ for _, e := range s.desired[b.ID].entries {
+ if e.ID != x.ID {
+ continue
+ }
+ fmt.Printf("[")
+ first := true
+ for _, r := range e.regs {
+ if r == noRegister {
+ continue
+ }
+ if !first {
+ fmt.Printf(",")
+ }
+ fmt.Print(&s.registers[r])
+ first = false
+ }
+ fmt.Printf("]")
+ }
+ }
+ if avoid := s.desired[b.ID].avoid; avoid != 0 {
+ fmt.Printf(" avoid=%v", s.RegMaskString(avoid))
+ }
+ fmt.Println()
+ }
+ }
+}
+
+// A desiredState represents desired register assignments.
+type desiredState struct {
+ // Desired assignments will be small, so we just use a list
+ // of valueID+registers entries.
+ entries []desiredStateEntry
+ // Registers that other values want to be in. This value will
+ // contain at least the union of the regs fields of entries, but
+ // may contain additional entries for values that were once in
+ // this data structure but are no longer.
+ avoid regMask
+}
+type desiredStateEntry struct {
+ // (pre-regalloc) value
+ ID ID
+ // Registers it would like to be in, in priority order.
+ // Unused slots are filled with noRegister.
+ // For opcodes that return tuples, we track desired registers only
+ // for the first element of the tuple.
+ regs [4]register
+}
+
+func (d *desiredState) clear() {
+ d.entries = d.entries[:0]
+ d.avoid = 0
+}
+
+// get returns a list of desired registers for value vid.
+func (d *desiredState) get(vid ID) [4]register {
+ for _, e := range d.entries {
+ if e.ID == vid {
+ return e.regs
+ }
+ }
+ return [4]register{noRegister, noRegister, noRegister, noRegister}
+}
+
+// add records that we'd like value vid to be in register r.
+func (d *desiredState) add(vid ID, r register) {
+ d.avoid |= regMask(1) << r
+ for i := range d.entries {
+ e := &d.entries[i]
+ if e.ID != vid {
+ continue
+ }
+ if e.regs[0] == r {
+ // Already known and highest priority
+ return
+ }
+ for j := 1; j < len(e.regs); j++ {
+ if e.regs[j] == r {
+ // Move from lower priority to top priority
+ copy(e.regs[1:], e.regs[:j])
+ e.regs[0] = r
+ return
+ }
+ }
+ copy(e.regs[1:], e.regs[:])
+ e.regs[0] = r
+ return
+ }
+ d.entries = append(d.entries, desiredStateEntry{vid, [4]register{r, noRegister, noRegister, noRegister}})
+}
+
+func (d *desiredState) addList(vid ID, regs [4]register) {
+ // regs is in priority order, so iterate in reverse order.
+ for i := len(regs) - 1; i >= 0; i-- {
+ r := regs[i]
+ if r != noRegister {
+ d.add(vid, r)
+ }
+ }
+}
+
+// clobber erases any desired registers in the set m.
+func (d *desiredState) clobber(m regMask) {
+ for i := 0; i < len(d.entries); {
+ e := &d.entries[i]
+ j := 0
+ for _, r := range e.regs {
+ if r != noRegister && m>>r&1 == 0 {
+ e.regs[j] = r
+ j++
+ }
+ }
+ if j == 0 {
+ // No more desired registers for this value.
+ d.entries[i] = d.entries[len(d.entries)-1]
+ d.entries = d.entries[:len(d.entries)-1]
+ continue
+ }
+ for ; j < len(e.regs); j++ {
+ e.regs[j] = noRegister
+ }
+ i++
+ }
+ d.avoid &^= m
+}
+
+// copy copies a desired state from another desiredState x.
+func (d *desiredState) copy(x *desiredState) {
+ d.entries = append(d.entries[:0], x.entries...)
+ d.avoid = x.avoid
+}
+
+// remove removes the desired registers for vid and returns them.
+func (d *desiredState) remove(vid ID) [4]register {
+ for i := range d.entries {
+ if d.entries[i].ID == vid {
+ regs := d.entries[i].regs
+ d.entries[i] = d.entries[len(d.entries)-1]
+ d.entries = d.entries[:len(d.entries)-1]
+ return regs
+ }
+ }
+ return [4]register{noRegister, noRegister, noRegister, noRegister}
+}
+
+// merge merges another desired state x into d.
+func (d *desiredState) merge(x *desiredState) {
+ d.avoid |= x.avoid
+ // There should only be a few desired registers, so
+ // linear insert is ok.
+ for _, e := range x.entries {
+ d.addList(e.ID, e.regs)
+ }
+}
+
+func min32(x, y int32) int32 {
+ if x < y {
+ return x
+ }
+ return y
+}
+func max32(x, y int32) int32 {
+ if x > y {
+ return x
+ }
+ return y
+}
diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go
new file mode 100644
index 0000000..7d804a0
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/regalloc_test.go
@@ -0,0 +1,229 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestLiveControlOps(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("x", OpAMD64MOVLconst, c.config.Types.Int8, 1, nil),
+ Valu("y", OpAMD64MOVLconst, c.config.Types.Int8, 2, nil),
+ Valu("a", OpAMD64TESTB, types.TypeFlags, 0, nil, "x", "y"),
+ Valu("b", OpAMD64TESTB, types.TypeFlags, 0, nil, "y", "x"),
+ Eq("a", "if", "exit"),
+ ),
+ Bloc("if",
+ Eq("b", "plain", "exit"),
+ ),
+ Bloc("plain",
+ Goto("exit"),
+ ),
+ Bloc("exit",
+ Exit("mem"),
+ ),
+ )
+ flagalloc(f.f)
+ regalloc(f.f)
+ checkFunc(f.f)
+}
+
+// Test to make sure G register is never reloaded from spill (spill of G is okay)
+// See #25504
+func TestNoGetgLoadReg(t *testing.T) {
+ /*
+ Original:
+ func fff3(i int) *g {
+ gee := getg()
+ if i == 0 {
+ fff()
+ }
+ return gee // here
+ }
+ */
+ c := testConfigARM64(t)
+ f := c.Fun("b1",
+ Bloc("b1",
+ Valu("v1", OpInitMem, types.TypeMem, 0, nil),
+ Valu("v6", OpArg, c.config.Types.Int64, 0, c.Temp(c.config.Types.Int64)),
+ Valu("v8", OpGetG, c.config.Types.Int64.PtrTo(), 0, nil, "v1"),
+ Valu("v11", OpARM64CMPconst, types.TypeFlags, 0, nil, "v6"),
+ Eq("v11", "b2", "b4"),
+ ),
+ Bloc("b4",
+ Goto("b3"),
+ ),
+ Bloc("b3",
+ Valu("v14", OpPhi, types.TypeMem, 0, nil, "v1", "v12"),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("v16", OpARM64MOVDstore, types.TypeMem, 0, nil, "v8", "sb", "v14"),
+ Exit("v16"),
+ ),
+ Bloc("b2",
+ Valu("v12", OpARM64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "v1"),
+ Goto("b3"),
+ ),
+ )
+ regalloc(f.f)
+ checkFunc(f.f)
+ // Double-check that we never restore to the G register. Regalloc should catch it, but check again anyway.
+ r := f.f.RegAlloc
+ for _, b := range f.blocks {
+ for _, v := range b.Values {
+ if v.Op == OpLoadReg && r[v.ID].String() == "g" {
+ t.Errorf("Saw OpLoadReg targeting g register: %s", v.LongString())
+ }
+ }
+ }
+}
+
+// Test to make sure we don't push spills into loops.
+// See issue #19595.
+func TestSpillWithLoop(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("ptr", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64)),
+ Valu("cond", OpArg, c.config.Types.Bool, 0, c.Temp(c.config.Types.Bool)),
+ Valu("ld", OpAMD64MOVQload, c.config.Types.Int64, 0, nil, "ptr", "mem"), // this value needs a spill
+ Goto("loop"),
+ ),
+ Bloc("loop",
+ Valu("memphi", OpPhi, types.TypeMem, 0, nil, "mem", "call"),
+ Valu("call", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "memphi"),
+ Valu("test", OpAMD64CMPBconst, types.TypeFlags, 0, nil, "cond"),
+ Eq("test", "next", "exit"),
+ ),
+ Bloc("next",
+ Goto("loop"),
+ ),
+ Bloc("exit",
+ Valu("store", OpAMD64MOVQstore, types.TypeMem, 0, nil, "ptr", "ld", "call"),
+ Exit("store"),
+ ),
+ )
+ regalloc(f.f)
+ checkFunc(f.f)
+ for _, v := range f.blocks["loop"].Values {
+ if v.Op == OpStoreReg {
+ t.Errorf("spill inside loop %s", v.LongString())
+ }
+ }
+}
+
+func TestSpillMove1(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("x", OpArg, c.config.Types.Int64, 0, c.Temp(c.config.Types.Int64)),
+ Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo())),
+ Valu("a", OpAMD64TESTQ, types.TypeFlags, 0, nil, "x", "x"),
+ Goto("loop1"),
+ ),
+ Bloc("loop1",
+ Valu("y", OpAMD64MULQ, c.config.Types.Int64, 0, nil, "x", "x"),
+ Eq("a", "loop2", "exit1"),
+ ),
+ Bloc("loop2",
+ Eq("a", "loop1", "exit2"),
+ ),
+ Bloc("exit1",
+ // store before call, y is available in a register
+ Valu("mem2", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem"),
+ Valu("mem3", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem2"),
+ Exit("mem3"),
+ ),
+ Bloc("exit2",
+ // store after call, y must be loaded from a spill location
+ Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
+ Valu("mem5", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem4"),
+ Exit("mem5"),
+ ),
+ )
+ flagalloc(f.f)
+ regalloc(f.f)
+ checkFunc(f.f)
+ // Spill should be moved to exit2.
+ if numSpills(f.blocks["loop1"]) != 0 {
+ t.Errorf("spill present from loop1")
+ }
+ if numSpills(f.blocks["loop2"]) != 0 {
+ t.Errorf("spill present in loop2")
+ }
+ if numSpills(f.blocks["exit1"]) != 0 {
+ t.Errorf("spill present in exit1")
+ }
+ if numSpills(f.blocks["exit2"]) != 1 {
+ t.Errorf("spill missing in exit2")
+ }
+
+}
+
+func TestSpillMove2(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("x", OpArg, c.config.Types.Int64, 0, c.Temp(c.config.Types.Int64)),
+ Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Temp(c.config.Types.Int64.PtrTo())),
+ Valu("a", OpAMD64TESTQ, types.TypeFlags, 0, nil, "x", "x"),
+ Goto("loop1"),
+ ),
+ Bloc("loop1",
+ Valu("y", OpAMD64MULQ, c.config.Types.Int64, 0, nil, "x", "x"),
+ Eq("a", "loop2", "exit1"),
+ ),
+ Bloc("loop2",
+ Eq("a", "loop1", "exit2"),
+ ),
+ Bloc("exit1",
+ // store after call, y must be loaded from a spill location
+ Valu("mem2", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
+ Valu("mem3", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem2"),
+ Exit("mem3"),
+ ),
+ Bloc("exit2",
+ // store after call, y must be loaded from a spill location
+ Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
+ Valu("mem5", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem4"),
+ Exit("mem5"),
+ ),
+ )
+ flagalloc(f.f)
+ regalloc(f.f)
+ checkFunc(f.f)
+ // There should be a spill in loop1, and nowhere else.
+ // TODO: resurrect moving spills out of loops? We could put spills at the start of both exit1 and exit2.
+ if numSpills(f.blocks["loop1"]) != 1 {
+ t.Errorf("spill missing from loop1")
+ }
+ if numSpills(f.blocks["loop2"]) != 0 {
+ t.Errorf("spill present in loop2")
+ }
+ if numSpills(f.blocks["exit1"]) != 0 {
+ t.Errorf("spill present in exit1")
+ }
+ if numSpills(f.blocks["exit2"]) != 0 {
+ t.Errorf("spill present in exit2")
+ }
+
+}
+
+func numSpills(b *Block) int {
+ n := 0
+ for _, v := range b.Values {
+ if v.Op == OpStoreReg {
+ n++
+ }
+ }
+ return n
+}
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
new file mode 100644
index 0000000..bb09c6c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -0,0 +1,2211 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/s390x"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "encoding/binary"
+ "fmt"
+ "internal/buildcfg"
+ "io"
+ "math"
+ "math/bits"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+type deadValueChoice bool
+
+const (
+ leaveDeadValues deadValueChoice = false
+ removeDeadValues = true
+)
+
+// deadcode indicates whether rewrite should try to remove any values that become dead.
+func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter, deadcode deadValueChoice) {
+ // repeat rewrites until we find no more rewrites
+ pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
+ pendingLines.clear()
+ debug := f.pass.debug
+ if debug > 1 {
+ fmt.Printf("%s: rewriting for %s\n", f.pass.name, f.Name)
+ }
+ var iters int
+ var states map[string]bool
+ for {
+ change := false
+ deadChange := false
+ for _, b := range f.Blocks {
+ var b0 *Block
+ if debug > 1 {
+ b0 = new(Block)
+ *b0 = *b
+ b0.Succs = append([]Edge{}, b.Succs...) // make a new copy, not aliasing
+ }
+ for i, c := range b.ControlValues() {
+ for c.Op == OpCopy {
+ c = c.Args[0]
+ b.ReplaceControl(i, c)
+ }
+ }
+ if rb(b) {
+ change = true
+ if debug > 1 {
+ fmt.Printf("rewriting %s -> %s\n", b0.LongString(), b.LongString())
+ }
+ }
+ for j, v := range b.Values {
+ var v0 *Value
+ if debug > 1 {
+ v0 = new(Value)
+ *v0 = *v
+ v0.Args = append([]*Value{}, v.Args...) // make a new copy, not aliasing
+ }
+ if v.Uses == 0 && v.removeable() {
+ if v.Op != OpInvalid && deadcode == removeDeadValues {
+ // Reset any values that are now unused, so that we decrement
+ // the use count of all of its arguments.
+ // Not quite a deadcode pass, because it does not handle cycles.
+ // But it should help Uses==1 rules to fire.
+ v.reset(OpInvalid)
+ deadChange = true
+ }
+ // No point rewriting values which aren't used.
+ continue
+ }
+
+ vchange := phielimValue(v)
+ if vchange && debug > 1 {
+ fmt.Printf("rewriting %s -> %s\n", v0.LongString(), v.LongString())
+ }
+
+ // Eliminate copy inputs.
+ // If any copy input becomes unused, mark it
+ // as invalid and discard its argument. Repeat
+ // recursively on the discarded argument.
+ // This phase helps remove phantom "dead copy" uses
+ // of a value so that a x.Uses==1 rule condition
+ // fires reliably.
+ for i, a := range v.Args {
+ if a.Op != OpCopy {
+ continue
+ }
+ aa := copySource(a)
+ v.SetArg(i, aa)
+ // If a, a copy, has a line boundary indicator, attempt to find a new value
+ // to hold it. The first candidate is the value that will replace a (aa),
+ // if it shares the same block and line and is eligible.
+ // The second option is v, which has a as an input. Because aa is earlier in
+ // the data flow, it is the better choice.
+ if a.Pos.IsStmt() == src.PosIsStmt {
+ if aa.Block == a.Block && aa.Pos.Line() == a.Pos.Line() && aa.Pos.IsStmt() != src.PosNotStmt {
+ aa.Pos = aa.Pos.WithIsStmt()
+ } else if v.Block == a.Block && v.Pos.Line() == a.Pos.Line() && v.Pos.IsStmt() != src.PosNotStmt {
+ v.Pos = v.Pos.WithIsStmt()
+ } else {
+ // Record the lost line and look for a new home after all rewrites are complete.
+ // TODO: it's possible (in FOR loops, in particular) for statement boundaries for the same
+ // line to appear in more than one block, but only one block is stored, so if both end
+ // up here, then one will be lost.
+ pendingLines.set(a.Pos, int32(a.Block.ID))
+ }
+ a.Pos = a.Pos.WithNotStmt()
+ }
+ vchange = true
+ for a.Uses == 0 {
+ b := a.Args[0]
+ a.reset(OpInvalid)
+ a = b
+ }
+ }
+ if vchange && debug > 1 {
+ fmt.Printf("rewriting %s -> %s\n", v0.LongString(), v.LongString())
+ }
+
+ // apply rewrite function
+ if rv(v) {
+ vchange = true
+ // If value changed to a poor choice for a statement boundary, move the boundary
+ if v.Pos.IsStmt() == src.PosIsStmt {
+ if k := nextGoodStatementIndex(v, j, b); k != j {
+ v.Pos = v.Pos.WithNotStmt()
+ b.Values[k].Pos = b.Values[k].Pos.WithIsStmt()
+ }
+ }
+ }
+
+ change = change || vchange
+ if vchange && debug > 1 {
+ fmt.Printf("rewriting %s -> %s\n", v0.LongString(), v.LongString())
+ }
+ }
+ }
+ if !change && !deadChange {
+ break
+ }
+ iters++
+ if (iters > 1000 || debug >= 2) && change {
+ // We've done a suspiciously large number of rewrites (or we're in debug mode).
+ // As of Sep 2021, 90% of rewrites complete in 4 iterations or fewer
+ // and the maximum value encountered during make.bash is 12.
+ // Start checking for cycles. (This is too expensive to do routinely.)
+ // Note: we avoid this path for deadChange-only iterations, to fix #51639.
+ if states == nil {
+ states = make(map[string]bool)
+ }
+ h := f.rewriteHash()
+ if _, ok := states[h]; ok {
+ // We've found a cycle.
+ // To diagnose it, set debug to 2 and start again,
+ // so that we'll print all rules applied until we complete another cycle.
+ // If debug is already >= 2, we've already done that, so it's time to crash.
+ if debug < 2 {
+ debug = 2
+ states = make(map[string]bool)
+ } else {
+ f.Fatalf("rewrite cycle detected")
+ }
+ }
+ states[h] = true
+ }
+ }
+ // remove clobbered values
+ for _, b := range f.Blocks {
+ j := 0
+ for i, v := range b.Values {
+ vl := v.Pos
+ if v.Op == OpInvalid {
+ if v.Pos.IsStmt() == src.PosIsStmt {
+ pendingLines.set(vl, int32(b.ID))
+ }
+ f.freeValue(v)
+ continue
+ }
+ if v.Pos.IsStmt() != src.PosNotStmt && !notStmtBoundary(v.Op) && pendingLines.get(vl) == int32(b.ID) {
+ pendingLines.remove(vl)
+ v.Pos = v.Pos.WithIsStmt()
+ }
+ if i != j {
+ b.Values[j] = v
+ }
+ j++
+ }
+ if pendingLines.get(b.Pos) == int32(b.ID) {
+ b.Pos = b.Pos.WithIsStmt()
+ pendingLines.remove(b.Pos)
+ }
+ b.truncateValues(j)
+ }
+}
+
+// Common functions called from rewriting rules
+
+func is64BitFloat(t *types.Type) bool {
+ return t.Size() == 8 && t.IsFloat()
+}
+
+func is32BitFloat(t *types.Type) bool {
+ return t.Size() == 4 && t.IsFloat()
+}
+
+func is64BitInt(t *types.Type) bool {
+ return t.Size() == 8 && t.IsInteger()
+}
+
+func is32BitInt(t *types.Type) bool {
+ return t.Size() == 4 && t.IsInteger()
+}
+
+func is16BitInt(t *types.Type) bool {
+ return t.Size() == 2 && t.IsInteger()
+}
+
+func is8BitInt(t *types.Type) bool {
+ return t.Size() == 1 && t.IsInteger()
+}
+
+func isPtr(t *types.Type) bool {
+ return t.IsPtrShaped()
+}
+
+// mergeSym merges two symbolic offsets. There is no real merging of
+// offsets, we just pick the non-nil one.
+func mergeSym(x, y Sym) Sym {
+ if x == nil {
+ return y
+ }
+ if y == nil {
+ return x
+ }
+ panic(fmt.Sprintf("mergeSym with two non-nil syms %v %v", x, y))
+}
+
+func canMergeSym(x, y Sym) bool {
+ return x == nil || y == nil
+}
+
+// canMergeLoadClobber reports whether the load can be merged into target without
+// invalidating the schedule.
+// It also checks that the other non-load argument x is something we
+// are ok with clobbering.
+func canMergeLoadClobber(target, load, x *Value) bool {
+ // The register containing x is going to get clobbered.
+ // Don't merge if we still need the value of x.
+ // We don't have liveness information here, but we can
+ // approximate x dying with:
+ // 1) target is x's only use.
+ // 2) target is not in a deeper loop than x.
+ if x.Uses != 1 {
+ return false
+ }
+ loopnest := x.Block.Func.loopnest()
+ loopnest.calculateDepths()
+ if loopnest.depth(target.Block.ID) > loopnest.depth(x.Block.ID) {
+ return false
+ }
+ return canMergeLoad(target, load)
+}
+
+// canMergeLoad reports whether the load can be merged into target without
+// invalidating the schedule.
+func canMergeLoad(target, load *Value) bool {
+ if target.Block.ID != load.Block.ID {
+ // If the load is in a different block do not merge it.
+ return false
+ }
+
+ // We can't merge the load into the target if the load
+ // has more than one use.
+ if load.Uses != 1 {
+ return false
+ }
+
+ mem := load.MemoryArg()
+
+ // We need the load's memory arg to still be alive at target. That
+ // can't be the case if one of target's args depends on a memory
+ // state that is a successor of load's memory arg.
+ //
+ // For example, it would be invalid to merge load into target in
+ // the following situation because newmem has killed oldmem
+ // before target is reached:
+ // load = read ... oldmem
+ // newmem = write ... oldmem
+ // arg0 = read ... newmem
+ // target = add arg0 load
+ //
+ // If the argument comes from a different block then we can exclude
+ // it immediately because it must dominate load (which is in the
+ // same block as target).
+ var args []*Value
+ for _, a := range target.Args {
+ if a != load && a.Block.ID == target.Block.ID {
+ args = append(args, a)
+ }
+ }
+
+ // memPreds contains memory states known to be predecessors of load's
+ // memory state. It is lazily initialized.
+ var memPreds map[*Value]bool
+ for i := 0; len(args) > 0; i++ {
+ const limit = 100
+ if i >= limit {
+ // Give up if we have done a lot of iterations.
+ return false
+ }
+ v := args[len(args)-1]
+ args = args[:len(args)-1]
+ if target.Block.ID != v.Block.ID {
+ // Since target and load are in the same block
+ // we can stop searching when we leave the block.
+ continue
+ }
+ if v.Op == OpPhi {
+ // A Phi implies we have reached the top of the block.
+ // The memory phi, if it exists, is always
+ // the first logical store in the block.
+ continue
+ }
+ if v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
+ // We could handle this situation however it is likely
+ // to be very rare.
+ return false
+ }
+ if v.Op.SymEffect()&SymAddr != 0 {
+ // This case prevents an operation that calculates the
+ // address of a local variable from being forced to schedule
+ // before its corresponding VarDef.
+ // See issue 28445.
+ // v1 = LOAD ...
+ // v2 = VARDEF
+ // v3 = LEAQ
+ // v4 = CMPQ v1 v3
+ // We don't want to combine the CMPQ with the load, because
+ // that would force the CMPQ to schedule before the VARDEF, which
+ // in turn requires the LEAQ to schedule before the VARDEF.
+ return false
+ }
+ if v.Type.IsMemory() {
+ if memPreds == nil {
+ // Initialise a map containing memory states
+ // known to be predecessors of load's memory
+ // state.
+ memPreds = make(map[*Value]bool)
+ m := mem
+ const limit = 50
+ for i := 0; i < limit; i++ {
+ if m.Op == OpPhi {
+ // The memory phi, if it exists, is always
+ // the first logical store in the block.
+ break
+ }
+ if m.Block.ID != target.Block.ID {
+ break
+ }
+ if !m.Type.IsMemory() {
+ break
+ }
+ memPreds[m] = true
+ if len(m.Args) == 0 {
+ break
+ }
+ m = m.MemoryArg()
+ }
+ }
+
+ // We can merge if v is a predecessor of mem.
+ //
+ // For example, we can merge load into target in the
+ // following scenario:
+ // x = read ... v
+ // mem = write ... v
+ // load = read ... mem
+ // target = add x load
+ if memPreds[v] {
+ continue
+ }
+ return false
+ }
+ if len(v.Args) > 0 && v.Args[len(v.Args)-1] == mem {
+ // If v takes mem as an input then we know mem
+ // is valid at this point.
+ continue
+ }
+ for _, a := range v.Args {
+ if target.Block.ID == a.Block.ID {
+ args = append(args, a)
+ }
+ }
+ }
+
+ return true
+}
+
+// isSameCall reports whether sym is the same as the given named symbol.
+func isSameCall(sym interface{}, name string) bool {
+ fn := sym.(*AuxCall).Fn
+ return fn != nil && fn.String() == name
+}
+
+// canLoadUnaligned reports if the architecture supports unaligned load operations.
+func canLoadUnaligned(c *Config) bool {
+ return c.ctxt.Arch.Alignment == 1
+}
+
+// nlzX returns the number of leading zeros.
+func nlz64(x int64) int { return bits.LeadingZeros64(uint64(x)) }
+func nlz32(x int32) int { return bits.LeadingZeros32(uint32(x)) }
+func nlz16(x int16) int { return bits.LeadingZeros16(uint16(x)) }
+func nlz8(x int8) int { return bits.LeadingZeros8(uint8(x)) }
+
+// ntzX returns the number of trailing zeros.
+func ntz64(x int64) int { return bits.TrailingZeros64(uint64(x)) }
+func ntz32(x int32) int { return bits.TrailingZeros32(uint32(x)) }
+func ntz16(x int16) int { return bits.TrailingZeros16(uint16(x)) }
+func ntz8(x int8) int { return bits.TrailingZeros8(uint8(x)) }
+
+func oneBit(x int64) bool { return x&(x-1) == 0 && x != 0 }
+func oneBit8(x int8) bool { return x&(x-1) == 0 && x != 0 }
+func oneBit16(x int16) bool { return x&(x-1) == 0 && x != 0 }
+func oneBit32(x int32) bool { return x&(x-1) == 0 && x != 0 }
+func oneBit64(x int64) bool { return x&(x-1) == 0 && x != 0 }
+
+// nto returns the number of trailing ones.
+func nto(x int64) int64 {
+ return int64(ntz64(^x))
+}
+
+// logX returns logarithm of n base 2.
+// n must be a positive power of 2 (isPowerOfTwoX returns true).
+func log8(n int8) int64 {
+ return int64(bits.Len8(uint8(n))) - 1
+}
+func log16(n int16) int64 {
+ return int64(bits.Len16(uint16(n))) - 1
+}
+func log32(n int32) int64 {
+ return int64(bits.Len32(uint32(n))) - 1
+}
+func log64(n int64) int64 {
+ return int64(bits.Len64(uint64(n))) - 1
+}
+
+// log2uint32 returns logarithm in base 2 of uint32(n), with log2(0) = -1.
+// Rounds down.
+func log2uint32(n int64) int64 {
+ return int64(bits.Len32(uint32(n))) - 1
+}
+
+// isPowerOfTwoX functions report whether n is a power of 2.
+func isPowerOfTwo8(n int8) bool {
+ return n > 0 && n&(n-1) == 0
+}
+func isPowerOfTwo16(n int16) bool {
+ return n > 0 && n&(n-1) == 0
+}
+func isPowerOfTwo32(n int32) bool {
+ return n > 0 && n&(n-1) == 0
+}
+func isPowerOfTwo64(n int64) bool {
+ return n > 0 && n&(n-1) == 0
+}
+
+// isUint64PowerOfTwo reports whether uint64(n) is a power of 2.
+func isUint64PowerOfTwo(in int64) bool {
+ n := uint64(in)
+ return n > 0 && n&(n-1) == 0
+}
+
+// isUint32PowerOfTwo reports whether uint32(n) is a power of 2.
+func isUint32PowerOfTwo(in int64) bool {
+ n := uint64(uint32(in))
+ return n > 0 && n&(n-1) == 0
+}
+
+// is32Bit reports whether n can be represented as a signed 32 bit integer.
+func is32Bit(n int64) bool {
+ return n == int64(int32(n))
+}
+
+// is16Bit reports whether n can be represented as a signed 16 bit integer.
+func is16Bit(n int64) bool {
+ return n == int64(int16(n))
+}
+
+// is8Bit reports whether n can be represented as a signed 8 bit integer.
+func is8Bit(n int64) bool {
+ return n == int64(int8(n))
+}
+
+// isU8Bit reports whether n can be represented as an unsigned 8 bit integer.
+func isU8Bit(n int64) bool {
+ return n == int64(uint8(n))
+}
+
+// isU12Bit reports whether n can be represented as an unsigned 12 bit integer.
+func isU12Bit(n int64) bool {
+ return 0 <= n && n < (1<<12)
+}
+
+// isU16Bit reports whether n can be represented as an unsigned 16 bit integer.
+func isU16Bit(n int64) bool {
+ return n == int64(uint16(n))
+}
+
+// isU32Bit reports whether n can be represented as an unsigned 32 bit integer.
+func isU32Bit(n int64) bool {
+ return n == int64(uint32(n))
+}
+
+// is20Bit reports whether n can be represented as a signed 20 bit integer.
+func is20Bit(n int64) bool {
+ return -(1<<19) <= n && n < (1<<19)
+}
+
+// b2i translates a boolean value to 0 or 1 for assigning to auxInt.
+func b2i(b bool) int64 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// b2i32 translates a boolean value to 0 or 1.
+func b2i32(b bool) int32 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// shiftIsBounded reports whether (left/right) shift Value v is known to be bounded.
+// A shift is bounded if it is shifting by less than the width of the shifted value.
+func shiftIsBounded(v *Value) bool {
+ return v.AuxInt != 0
+}
+
+// canonLessThan returns whether x is "ordered" less than y, for purposes of normalizing
+// generated code as much as possible.
+func canonLessThan(x, y *Value) bool {
+ if x.Op != y.Op {
+ return x.Op < y.Op
+ }
+ if !x.Pos.SameFileAndLine(y.Pos) {
+ return x.Pos.Before(y.Pos)
+ }
+ return x.ID < y.ID
+}
+
+// truncate64Fto32F converts a float64 value to a float32 preserving the bit pattern
+// of the mantissa. It will panic if the truncation results in lost information.
+func truncate64Fto32F(f float64) float32 {
+ if !isExactFloat32(f) {
+ panic("truncate64Fto32F: truncation is not exact")
+ }
+ if !math.IsNaN(f) {
+ return float32(f)
+ }
+ // NaN bit patterns aren't necessarily preserved across conversion
+ // instructions so we need to do the conversion manually.
+ b := math.Float64bits(f)
+ m := b & ((1 << 52) - 1) // mantissa (a.k.a. significand)
+ // | sign | exponent | mantissa |
+ r := uint32(((b >> 32) & (1 << 31)) | 0x7f800000 | (m >> (52 - 23)))
+ return math.Float32frombits(r)
+}
+
+// extend32Fto64F converts a float32 value to a float64 value preserving the bit
+// pattern of the mantissa.
+func extend32Fto64F(f float32) float64 {
+ if !math.IsNaN(float64(f)) {
+ return float64(f)
+ }
+ // NaN bit patterns aren't necessarily preserved across conversion
+ // instructions so we need to do the conversion manually.
+ b := uint64(math.Float32bits(f))
+ // | sign | exponent | mantissa |
+ r := ((b << 32) & (1 << 63)) | (0x7ff << 52) | ((b & 0x7fffff) << (52 - 23))
+ return math.Float64frombits(r)
+}
+
+// DivisionNeedsFixUp reports whether the division needs fix-up code.
+func DivisionNeedsFixUp(v *Value) bool {
+ return v.AuxInt == 0
+}
+
+// auxFrom64F encodes a float64 value so it can be stored in an AuxInt.
+func auxFrom64F(f float64) int64 {
+ if f != f {
+ panic("can't encode a NaN in AuxInt field")
+ }
+ return int64(math.Float64bits(f))
+}
+
+// auxFrom32F encodes a float32 value so it can be stored in an AuxInt.
+func auxFrom32F(f float32) int64 {
+ if f != f {
+ panic("can't encode a NaN in AuxInt field")
+ }
+ return int64(math.Float64bits(extend32Fto64F(f)))
+}
+
+// auxTo32F decodes a float32 from the AuxInt value provided.
+func auxTo32F(i int64) float32 {
+ return truncate64Fto32F(math.Float64frombits(uint64(i)))
+}
+
+// auxTo64F decodes a float64 from the AuxInt value provided.
+func auxTo64F(i int64) float64 {
+ return math.Float64frombits(uint64(i))
+}
+
+func auxIntToBool(i int64) bool {
+ if i == 0 {
+ return false
+ }
+ return true
+}
+func auxIntToInt8(i int64) int8 {
+ return int8(i)
+}
+func auxIntToInt16(i int64) int16 {
+ return int16(i)
+}
+func auxIntToInt32(i int64) int32 {
+ return int32(i)
+}
+func auxIntToInt64(i int64) int64 {
+ return i
+}
+func auxIntToUint8(i int64) uint8 {
+ return uint8(i)
+}
+func auxIntToFloat32(i int64) float32 {
+ return float32(math.Float64frombits(uint64(i)))
+}
+func auxIntToFloat64(i int64) float64 {
+ return math.Float64frombits(uint64(i))
+}
+func auxIntToValAndOff(i int64) ValAndOff {
+ return ValAndOff(i)
+}
+func auxIntToArm64BitField(i int64) arm64BitField {
+ return arm64BitField(i)
+}
+func auxIntToInt128(x int64) int128 {
+ if x != 0 {
+ panic("nonzero int128 not allowed")
+ }
+ return 0
+}
+func auxIntToFlagConstant(x int64) flagConstant {
+ return flagConstant(x)
+}
+
+func auxIntToOp(cc int64) Op {
+ return Op(cc)
+}
+
+func boolToAuxInt(b bool) int64 {
+ if b {
+ return 1
+ }
+ return 0
+}
+func int8ToAuxInt(i int8) int64 {
+ return int64(i)
+}
+func int16ToAuxInt(i int16) int64 {
+ return int64(i)
+}
+func int32ToAuxInt(i int32) int64 {
+ return int64(i)
+}
+func int64ToAuxInt(i int64) int64 {
+ return int64(i)
+}
+func uint8ToAuxInt(i uint8) int64 {
+ return int64(int8(i))
+}
+func float32ToAuxInt(f float32) int64 {
+ return int64(math.Float64bits(float64(f)))
+}
+func float64ToAuxInt(f float64) int64 {
+ return int64(math.Float64bits(f))
+}
+func valAndOffToAuxInt(v ValAndOff) int64 {
+ return int64(v)
+}
+func arm64BitFieldToAuxInt(v arm64BitField) int64 {
+ return int64(v)
+}
+func int128ToAuxInt(x int128) int64 {
+ if x != 0 {
+ panic("nonzero int128 not allowed")
+ }
+ return 0
+}
+func flagConstantToAuxInt(x flagConstant) int64 {
+ return int64(x)
+}
+
+func opToAuxInt(o Op) int64 {
+ return int64(o)
+}
+
+// Aux is an interface to hold miscellaneous data in Blocks and Values.
+type Aux interface {
+ CanBeAnSSAAux()
+}
+
+// for now only used to mark moves that need to avoid clobbering flags
+type auxMark bool
+
+func (auxMark) CanBeAnSSAAux() {}
+
+var AuxMark auxMark
+
+// stringAux wraps string values for use in Aux.
+type stringAux string
+
+func (stringAux) CanBeAnSSAAux() {}
+
+func auxToString(i Aux) string {
+ return string(i.(stringAux))
+}
+func auxToSym(i Aux) Sym {
+ // TODO: kind of a hack - allows nil interface through
+ s, _ := i.(Sym)
+ return s
+}
+func auxToType(i Aux) *types.Type {
+ return i.(*types.Type)
+}
+func auxToCall(i Aux) *AuxCall {
+ return i.(*AuxCall)
+}
+func auxToS390xCCMask(i Aux) s390x.CCMask {
+ return i.(s390x.CCMask)
+}
+func auxToS390xRotateParams(i Aux) s390x.RotateParams {
+ return i.(s390x.RotateParams)
+}
+
+func StringToAux(s string) Aux {
+ return stringAux(s)
+}
+func symToAux(s Sym) Aux {
+ return s
+}
+func callToAux(s *AuxCall) Aux {
+ return s
+}
+func typeToAux(t *types.Type) Aux {
+ return t
+}
+func s390xCCMaskToAux(c s390x.CCMask) Aux {
+ return c
+}
+func s390xRotateParamsToAux(r s390x.RotateParams) Aux {
+ return r
+}
+
+// uaddOvf reports whether unsigned a+b would overflow.
+func uaddOvf(a, b int64) bool {
+ return uint64(a)+uint64(b) < uint64(a)
+}
+
+// loadLSymOffset simulates reading a word at an offset into a
+// read-only symbol's runtime memory. If it would read a pointer to
+// another symbol, that symbol is returned. Otherwise, it returns nil.
+func loadLSymOffset(lsym *obj.LSym, offset int64) *obj.LSym {
+ if lsym.Type != objabi.SRODATA {
+ return nil
+ }
+
+ for _, r := range lsym.R {
+ if int64(r.Off) == offset && r.Type&^objabi.R_WEAK == objabi.R_ADDR && r.Add == 0 {
+ return r.Sym
+ }
+ }
+
+ return nil
+}
+
+func devirtLECall(v *Value, sym *obj.LSym) *Value {
+ v.Op = OpStaticLECall
+ auxcall := v.Aux.(*AuxCall)
+ auxcall.Fn = sym
+ // Remove first arg
+ v.Args[0].Uses--
+ copy(v.Args[0:], v.Args[1:])
+ v.Args[len(v.Args)-1] = nil // aid GC
+ v.Args = v.Args[:len(v.Args)-1]
+ if f := v.Block.Func; f.pass.debug > 0 {
+ f.Warnl(v.Pos, "de-virtualizing call")
+ }
+ return v
+}
+
+// isSamePtr reports whether p1 and p2 point to the same address.
+func isSamePtr(p1, p2 *Value) bool {
+ if p1 == p2 {
+ return true
+ }
+ if p1.Op != p2.Op {
+ return false
+ }
+ switch p1.Op {
+ case OpOffPtr:
+ return p1.AuxInt == p2.AuxInt && isSamePtr(p1.Args[0], p2.Args[0])
+ case OpAddr, OpLocalAddr:
+ return p1.Aux == p2.Aux
+ case OpAddPtr:
+ return p1.Args[1] == p2.Args[1] && isSamePtr(p1.Args[0], p2.Args[0])
+ }
+ return false
+}
+
+func isStackPtr(v *Value) bool {
+ for v.Op == OpOffPtr || v.Op == OpAddPtr {
+ v = v.Args[0]
+ }
+ return v.Op == OpSP || v.Op == OpLocalAddr
+}
+
+// disjoint reports whether the memory region specified by [p1:p1+n1)
+// does not overlap with [p2:p2+n2).
+// A return value of false does not imply the regions overlap.
+func disjoint(p1 *Value, n1 int64, p2 *Value, n2 int64) bool {
+ if n1 == 0 || n2 == 0 {
+ return true
+ }
+ if p1 == p2 {
+ return false
+ }
+ baseAndOffset := func(ptr *Value) (base *Value, offset int64) {
+ base, offset = ptr, 0
+ for base.Op == OpOffPtr {
+ offset += base.AuxInt
+ base = base.Args[0]
+ }
+ if opcodeTable[base.Op].nilCheck {
+ base = base.Args[0]
+ }
+ return base, offset
+ }
+ p1, off1 := baseAndOffset(p1)
+ p2, off2 := baseAndOffset(p2)
+ if isSamePtr(p1, p2) {
+ return !overlap(off1, n1, off2, n2)
+ }
+ // p1 and p2 are not the same, so if they are both OpAddrs then
+ // they point to different variables.
+ // If one pointer is on the stack and the other is an argument
+ // then they can't overlap.
+ switch p1.Op {
+ case OpAddr, OpLocalAddr:
+ if p2.Op == OpAddr || p2.Op == OpLocalAddr || p2.Op == OpSP {
+ return true
+ }
+ return (p2.Op == OpArg || p2.Op == OpArgIntReg) && p1.Args[0].Op == OpSP
+ case OpArg, OpArgIntReg:
+ if p2.Op == OpSP || p2.Op == OpLocalAddr {
+ return true
+ }
+ case OpSP:
+ return p2.Op == OpAddr || p2.Op == OpLocalAddr || p2.Op == OpArg || p2.Op == OpArgIntReg || p2.Op == OpSP
+ }
+ return false
+}
+
+// moveSize returns the number of bytes an aligned MOV instruction moves.
+func moveSize(align int64, c *Config) int64 {
+ switch {
+ case align%8 == 0 && c.PtrSize == 8:
+ return 8
+ case align%4 == 0:
+ return 4
+ case align%2 == 0:
+ return 2
+ }
+ return 1
+}
+
+// mergePoint finds a block among a's blocks which dominates b and is itself
+// dominated by all of a's blocks. Returns nil if it can't find one.
+// Might return nil even if one does exist.
+func mergePoint(b *Block, a ...*Value) *Block {
+ // Walk backward from b looking for one of the a's blocks.
+
+ // Max distance
+ d := 100
+
+ for d > 0 {
+ for _, x := range a {
+ if b == x.Block {
+ goto found
+ }
+ }
+ if len(b.Preds) > 1 {
+ // Don't know which way to go back. Abort.
+ return nil
+ }
+ b = b.Preds[0].b
+ d--
+ }
+ return nil // too far away
+found:
+ // At this point, r is the first value in a that we find by walking backwards.
+ // if we return anything, r will be it.
+ r := b
+
+ // Keep going, counting the other a's that we find. They must all dominate r.
+ na := 0
+ for d > 0 {
+ for _, x := range a {
+ if b == x.Block {
+ na++
+ }
+ }
+ if na == len(a) {
+ // Found all of a in a backwards walk. We can return r.
+ return r
+ }
+ if len(b.Preds) > 1 {
+ return nil
+ }
+ b = b.Preds[0].b
+ d--
+
+ }
+ return nil // too far away
+}
+
+// clobber invalidates values. Returns true.
+// clobber is used by rewrite rules to:
+//
+// A) make sure the values are really dead and never used again.
+// B) decrement use counts of the values' args.
+func clobber(vv ...*Value) bool {
+ for _, v := range vv {
+ v.reset(OpInvalid)
+ // Note: leave v.Block intact. The Block field is used after clobber.
+ }
+ return true
+}
+
+// clobberIfDead resets v when use count is 1. Returns true.
+// clobberIfDead is used by rewrite rules to decrement
+// use counts of v's args when v is dead and never used.
+func clobberIfDead(v *Value) bool {
+ if v.Uses == 1 {
+ v.reset(OpInvalid)
+ }
+ // Note: leave v.Block intact. The Block field is used after clobberIfDead.
+ return true
+}
+
+// noteRule is an easy way to track if a rule is matched when writing
+// new ones. Make the rule of interest also conditional on
+//
+// noteRule("note to self: rule of interest matched")
+//
+// and that message will print when the rule matches.
+func noteRule(s string) bool {
+ fmt.Println(s)
+ return true
+}
+
+// countRule increments Func.ruleMatches[key].
+// If Func.ruleMatches is non-nil at the end
+// of compilation, it will be printed to stdout.
+// This is intended to make it easier to find which functions
+// which contain lots of rules matches when developing new rules.
+func countRule(v *Value, key string) bool {
+ f := v.Block.Func
+ if f.ruleMatches == nil {
+ f.ruleMatches = make(map[string]int)
+ }
+ f.ruleMatches[key]++
+ return true
+}
+
+// warnRule generates compiler debug output with string s when
+// v is not in autogenerated code, cond is true and the rule has fired.
+func warnRule(cond bool, v *Value, s string) bool {
+ if pos := v.Pos; pos.Line() > 1 && cond {
+ v.Block.Func.Warnl(pos, s)
+ }
+ return true
+}
+
+// for a pseudo-op like (LessThan x), extract x.
+func flagArg(v *Value) *Value {
+ if len(v.Args) != 1 || !v.Args[0].Type.IsFlags() {
+ return nil
+ }
+ return v.Args[0]
+}
+
+// arm64Negate finds the complement to an ARM64 condition code,
+// for example !Equal -> NotEqual or !LessThan -> GreaterEqual
+//
+// For floating point, it's more subtle because NaN is unordered. We do
+// !LessThanF -> NotLessThanF, the latter takes care of NaNs.
+func arm64Negate(op Op) Op {
+ switch op {
+ case OpARM64LessThan:
+ return OpARM64GreaterEqual
+ case OpARM64LessThanU:
+ return OpARM64GreaterEqualU
+ case OpARM64GreaterThan:
+ return OpARM64LessEqual
+ case OpARM64GreaterThanU:
+ return OpARM64LessEqualU
+ case OpARM64LessEqual:
+ return OpARM64GreaterThan
+ case OpARM64LessEqualU:
+ return OpARM64GreaterThanU
+ case OpARM64GreaterEqual:
+ return OpARM64LessThan
+ case OpARM64GreaterEqualU:
+ return OpARM64LessThanU
+ case OpARM64Equal:
+ return OpARM64NotEqual
+ case OpARM64NotEqual:
+ return OpARM64Equal
+ case OpARM64LessThanF:
+ return OpARM64NotLessThanF
+ case OpARM64NotLessThanF:
+ return OpARM64LessThanF
+ case OpARM64LessEqualF:
+ return OpARM64NotLessEqualF
+ case OpARM64NotLessEqualF:
+ return OpARM64LessEqualF
+ case OpARM64GreaterThanF:
+ return OpARM64NotGreaterThanF
+ case OpARM64NotGreaterThanF:
+ return OpARM64GreaterThanF
+ case OpARM64GreaterEqualF:
+ return OpARM64NotGreaterEqualF
+ case OpARM64NotGreaterEqualF:
+ return OpARM64GreaterEqualF
+ default:
+ panic("unreachable")
+ }
+}
+
+// arm64Invert evaluates (InvertFlags op), which
+// is the same as altering the condition codes such
+// that the same result would be produced if the arguments
+// to the flag-generating instruction were reversed, e.g.
+// (InvertFlags (CMP x y)) -> (CMP y x)
+func arm64Invert(op Op) Op {
+ switch op {
+ case OpARM64LessThan:
+ return OpARM64GreaterThan
+ case OpARM64LessThanU:
+ return OpARM64GreaterThanU
+ case OpARM64GreaterThan:
+ return OpARM64LessThan
+ case OpARM64GreaterThanU:
+ return OpARM64LessThanU
+ case OpARM64LessEqual:
+ return OpARM64GreaterEqual
+ case OpARM64LessEqualU:
+ return OpARM64GreaterEqualU
+ case OpARM64GreaterEqual:
+ return OpARM64LessEqual
+ case OpARM64GreaterEqualU:
+ return OpARM64LessEqualU
+ case OpARM64Equal, OpARM64NotEqual:
+ return op
+ case OpARM64LessThanF:
+ return OpARM64GreaterThanF
+ case OpARM64GreaterThanF:
+ return OpARM64LessThanF
+ case OpARM64LessEqualF:
+ return OpARM64GreaterEqualF
+ case OpARM64GreaterEqualF:
+ return OpARM64LessEqualF
+ case OpARM64NotLessThanF:
+ return OpARM64NotGreaterThanF
+ case OpARM64NotGreaterThanF:
+ return OpARM64NotLessThanF
+ case OpARM64NotLessEqualF:
+ return OpARM64NotGreaterEqualF
+ case OpARM64NotGreaterEqualF:
+ return OpARM64NotLessEqualF
+ default:
+ panic("unreachable")
+ }
+}
+
+// evaluate an ARM64 op against a flags value
+// that is potentially constant; return 1 for true,
+// -1 for false, and 0 for not constant.
+func ccARM64Eval(op Op, flags *Value) int {
+ fop := flags.Op
+ if fop == OpARM64InvertFlags {
+ return -ccARM64Eval(op, flags.Args[0])
+ }
+ if fop != OpARM64FlagConstant {
+ return 0
+ }
+ fc := flagConstant(flags.AuxInt)
+ b2i := func(b bool) int {
+ if b {
+ return 1
+ }
+ return -1
+ }
+ switch op {
+ case OpARM64Equal:
+ return b2i(fc.eq())
+ case OpARM64NotEqual:
+ return b2i(fc.ne())
+ case OpARM64LessThan:
+ return b2i(fc.lt())
+ case OpARM64LessThanU:
+ return b2i(fc.ult())
+ case OpARM64GreaterThan:
+ return b2i(fc.gt())
+ case OpARM64GreaterThanU:
+ return b2i(fc.ugt())
+ case OpARM64LessEqual:
+ return b2i(fc.le())
+ case OpARM64LessEqualU:
+ return b2i(fc.ule())
+ case OpARM64GreaterEqual:
+ return b2i(fc.ge())
+ case OpARM64GreaterEqualU:
+ return b2i(fc.uge())
+ }
+ return 0
+}
+
+// logRule logs the use of the rule s. This will only be enabled if
+// rewrite rules were generated with the -log option, see _gen/rulegen.go.
+func logRule(s string) {
+ if ruleFile == nil {
+ // Open a log file to write log to. We open in append
+ // mode because all.bash runs the compiler lots of times,
+ // and we want the concatenation of all of those logs.
+ // This means, of course, that users need to rm the old log
+ // to get fresh data.
+ // TODO: all.bash runs compilers in parallel. Need to synchronize logging somehow?
+ w, err := os.OpenFile(filepath.Join(os.Getenv("GOROOT"), "src", "rulelog"),
+ os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
+ if err != nil {
+ panic(err)
+ }
+ ruleFile = w
+ }
+ _, err := fmt.Fprintln(ruleFile, s)
+ if err != nil {
+ panic(err)
+ }
+}
+
+var ruleFile io.Writer
+
+func min(x, y int64) int64 {
+ if x < y {
+ return x
+ }
+ return y
+}
+func max(x, y int64) int64 {
+ if x > y {
+ return x
+ }
+ return y
+}
+
+func isConstZero(v *Value) bool {
+ switch v.Op {
+ case OpConstNil:
+ return true
+ case OpConst64, OpConst32, OpConst16, OpConst8, OpConstBool, OpConst32F, OpConst64F:
+ return v.AuxInt == 0
+ }
+ return false
+}
+
+// reciprocalExact64 reports whether 1/c is exactly representable.
+func reciprocalExact64(c float64) bool {
+ b := math.Float64bits(c)
+ man := b & (1<<52 - 1)
+ if man != 0 {
+ return false // not a power of 2, denormal, or NaN
+ }
+ exp := b >> 52 & (1<<11 - 1)
+ // exponent bias is 0x3ff. So taking the reciprocal of a number
+ // changes the exponent to 0x7fe-exp.
+ switch exp {
+ case 0:
+ return false // ±0
+ case 0x7ff:
+ return false // ±inf
+ case 0x7fe:
+ return false // exponent is not representable
+ default:
+ return true
+ }
+}
+
+// reciprocalExact32 reports whether 1/c is exactly representable.
+func reciprocalExact32(c float32) bool {
+ b := math.Float32bits(c)
+ man := b & (1<<23 - 1)
+ if man != 0 {
+ return false // not a power of 2, denormal, or NaN
+ }
+ exp := b >> 23 & (1<<8 - 1)
+ // exponent bias is 0x7f. So taking the reciprocal of a number
+ // changes the exponent to 0xfe-exp.
+ switch exp {
+ case 0:
+ return false // ±0
+ case 0xff:
+ return false // ±inf
+ case 0xfe:
+ return false // exponent is not representable
+ default:
+ return true
+ }
+}
+
+// check if an immediate can be directly encoded into an ARM's instruction.
+func isARMImmRot(v uint32) bool {
+ for i := 0; i < 16; i++ {
+ if v&^0xff == 0 {
+ return true
+ }
+ v = v<<2 | v>>30
+ }
+
+ return false
+}
+
+// overlap reports whether the ranges given by the given offset and
+// size pairs overlap.
+func overlap(offset1, size1, offset2, size2 int64) bool {
+ if offset1 >= offset2 && offset2+size2 > offset1 {
+ return true
+ }
+ if offset2 >= offset1 && offset1+size1 > offset2 {
+ return true
+ }
+ return false
+}
+
+func areAdjacentOffsets(off1, off2, size int64) bool {
+ return off1+size == off2 || off1 == off2+size
+}
+
+// check if value zeroes out upper 32-bit of 64-bit register.
+// depth limits recursion depth. In AMD64.rules 3 is used as limit,
+// because it catches same amount of cases as 4.
+func zeroUpper32Bits(x *Value, depth int) bool {
+ switch x.Op {
+ case OpAMD64MOVLconst, OpAMD64MOVLload, OpAMD64MOVLQZX, OpAMD64MOVLloadidx1,
+ OpAMD64MOVWload, OpAMD64MOVWloadidx1, OpAMD64MOVBload, OpAMD64MOVBloadidx1,
+ OpAMD64MOVLloadidx4, OpAMD64ADDLload, OpAMD64SUBLload, OpAMD64ANDLload,
+ OpAMD64ORLload, OpAMD64XORLload, OpAMD64CVTTSD2SL,
+ OpAMD64ADDL, OpAMD64ADDLconst, OpAMD64SUBL, OpAMD64SUBLconst,
+ OpAMD64ANDL, OpAMD64ANDLconst, OpAMD64ORL, OpAMD64ORLconst,
+ OpAMD64XORL, OpAMD64XORLconst, OpAMD64NEGL, OpAMD64NOTL,
+ OpAMD64SHRL, OpAMD64SHRLconst, OpAMD64SARL, OpAMD64SARLconst,
+ OpAMD64SHLL, OpAMD64SHLLconst:
+ return true
+ case OpARM64REV16W, OpARM64REVW, OpARM64RBITW, OpARM64CLZW, OpARM64EXTRWconst,
+ OpARM64MULW, OpARM64MNEGW, OpARM64UDIVW, OpARM64DIVW, OpARM64UMODW,
+ OpARM64MADDW, OpARM64MSUBW, OpARM64RORW, OpARM64RORWconst:
+ return true
+ case OpArg:
+ return x.Type.Size() == 4
+ case OpPhi, OpSelect0, OpSelect1:
+ // Phis can use each-other as an arguments, instead of tracking visited values,
+ // just limit recursion depth.
+ if depth <= 0 {
+ return false
+ }
+ for i := range x.Args {
+ if !zeroUpper32Bits(x.Args[i], depth-1) {
+ return false
+ }
+ }
+ return true
+
+ }
+ return false
+}
+
+// zeroUpper48Bits is similar to zeroUpper32Bits, but for upper 48 bits.
+func zeroUpper48Bits(x *Value, depth int) bool {
+ switch x.Op {
+ case OpAMD64MOVWQZX, OpAMD64MOVWload, OpAMD64MOVWloadidx1, OpAMD64MOVWloadidx2:
+ return true
+ case OpArg:
+ return x.Type.Size() == 2
+ case OpPhi, OpSelect0, OpSelect1:
+ // Phis can use each-other as an arguments, instead of tracking visited values,
+ // just limit recursion depth.
+ if depth <= 0 {
+ return false
+ }
+ for i := range x.Args {
+ if !zeroUpper48Bits(x.Args[i], depth-1) {
+ return false
+ }
+ }
+ return true
+
+ }
+ return false
+}
+
+// zeroUpper56Bits is similar to zeroUpper32Bits, but for upper 56 bits.
+func zeroUpper56Bits(x *Value, depth int) bool {
+ switch x.Op {
+ case OpAMD64MOVBQZX, OpAMD64MOVBload, OpAMD64MOVBloadidx1:
+ return true
+ case OpArg:
+ return x.Type.Size() == 1
+ case OpPhi, OpSelect0, OpSelect1:
+ // Phis can use each-other as an arguments, instead of tracking visited values,
+ // just limit recursion depth.
+ if depth <= 0 {
+ return false
+ }
+ for i := range x.Args {
+ if !zeroUpper56Bits(x.Args[i], depth-1) {
+ return false
+ }
+ }
+ return true
+
+ }
+ return false
+}
+
+func isInlinableMemclr(c *Config, sz int64) bool {
+ if sz < 0 {
+ return false
+ }
+ // TODO: expand this check to allow other architectures
+ // see CL 454255 and issue 56997
+ switch c.arch {
+ case "amd64", "arm64":
+ return true
+ case "ppc64le", "ppc64":
+ return sz < 512
+ }
+ return false
+}
+
+// isInlinableMemmove reports whether the given arch performs a Move of the given size
+// faster than memmove. It will only return true if replacing the memmove with a Move is
+// safe, either because Move will do all of its loads before any of its stores, or
+// because the arguments are known to be disjoint.
+// This is used as a check for replacing memmove with Move ops.
+func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {
+ // It is always safe to convert memmove into Move when its arguments are disjoint.
+ // Move ops may or may not be faster for large sizes depending on how the platform
+ // lowers them, so we only perform this optimization on platforms that we know to
+ // have fast Move ops.
+ switch c.arch {
+ case "amd64":
+ return sz <= 16 || (sz < 1024 && disjoint(dst, sz, src, sz))
+ case "386", "arm64":
+ return sz <= 8
+ case "s390x", "ppc64", "ppc64le":
+ return sz <= 8 || disjoint(dst, sz, src, sz)
+ case "arm", "loong64", "mips", "mips64", "mipsle", "mips64le":
+ return sz <= 4
+ }
+ return false
+}
+func IsInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {
+ return isInlinableMemmove(dst, src, sz, c)
+}
+
+// logLargeCopy logs the occurrence of a large copy.
+// The best place to do this is in the rewrite rules where the size of the move is easy to find.
+// "Large" is arbitrarily chosen to be 128 bytes; this may change.
+func logLargeCopy(v *Value, s int64) bool {
+ if s < 128 {
+ return true
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "copy", "lower", v.Block.Func.Name, fmt.Sprintf("%d bytes", s))
+ }
+ return true
+}
+func LogLargeCopy(funcName string, pos src.XPos, s int64) {
+ if s < 128 {
+ return
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(pos, "copy", "lower", funcName, fmt.Sprintf("%d bytes", s))
+ }
+}
+
+// hasSmallRotate reports whether the architecture has rotate instructions
+// for sizes < 32-bit. This is used to decide whether to promote some rotations.
+func hasSmallRotate(c *Config) bool {
+ switch c.arch {
+ case "amd64", "386":
+ return true
+ default:
+ return false
+ }
+}
+
+func supportsPPC64PCRel() bool {
+ // PCRel is currently supported for >= power10, linux only
+ // Internal and external linking supports this on ppc64le; internal linking on ppc64.
+ return buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux"
+}
+
+func newPPC64ShiftAuxInt(sh, mb, me, sz int64) int32 {
+ if sh < 0 || sh >= sz {
+ panic("PPC64 shift arg sh out of range")
+ }
+ if mb < 0 || mb >= sz {
+ panic("PPC64 shift arg mb out of range")
+ }
+ if me < 0 || me >= sz {
+ panic("PPC64 shift arg me out of range")
+ }
+ return int32(sh<<16 | mb<<8 | me)
+}
+
+func GetPPC64Shiftsh(auxint int64) int64 {
+ return int64(int8(auxint >> 16))
+}
+
+func GetPPC64Shiftmb(auxint int64) int64 {
+ return int64(int8(auxint >> 8))
+}
+
+func GetPPC64Shiftme(auxint int64) int64 {
+ return int64(int8(auxint))
+}
+
+// Test if this value can encoded as a mask for a rlwinm like
+// operation. Masks can also extend from the msb and wrap to
+// the lsb too. That is, the valid masks are 32 bit strings
+// of the form: 0..01..10..0 or 1..10..01..1 or 1...1
+func isPPC64WordRotateMask(v64 int64) bool {
+ // Isolate rightmost 1 (if none 0) and add.
+ v := uint32(v64)
+ vp := (v & -v) + v
+ // Likewise, for the wrapping case.
+ vn := ^v
+ vpn := (vn & -vn) + vn
+ return (v&vp == 0 || vn&vpn == 0) && v != 0
+}
+
+// Compress mask and shift into single value of the form
+// me | mb<<8 | rotate<<16 | nbits<<24 where me and mb can
+// be used to regenerate the input mask.
+func encodePPC64RotateMask(rotate, mask, nbits int64) int64 {
+ var mb, me, mbn, men int
+
+ // Determine boundaries and then decode them
+ if mask == 0 || ^mask == 0 || rotate >= nbits {
+ panic(fmt.Sprintf("invalid PPC64 rotate mask: %x %d %d", uint64(mask), rotate, nbits))
+ } else if nbits == 32 {
+ mb = bits.LeadingZeros32(uint32(mask))
+ me = 32 - bits.TrailingZeros32(uint32(mask))
+ mbn = bits.LeadingZeros32(^uint32(mask))
+ men = 32 - bits.TrailingZeros32(^uint32(mask))
+ } else {
+ mb = bits.LeadingZeros64(uint64(mask))
+ me = 64 - bits.TrailingZeros64(uint64(mask))
+ mbn = bits.LeadingZeros64(^uint64(mask))
+ men = 64 - bits.TrailingZeros64(^uint64(mask))
+ }
+ // Check for a wrapping mask (e.g bits at 0 and 63)
+ if mb == 0 && me == int(nbits) {
+ // swap the inverted values
+ mb, me = men, mbn
+ }
+
+ return int64(me) | int64(mb<<8) | int64(rotate<<16) | int64(nbits<<24)
+}
+
+// Merge (RLDICL [encoded] (SRDconst [s] x)) into (RLDICL [new_encoded] x)
+// SRDconst on PPC64 is an extended mnemonic of RLDICL. If the input to an
+// RLDICL is an SRDconst, and the RLDICL does not rotate its value, the two
+// operations can be combined. This functions assumes the two opcodes can
+// be merged, and returns an encoded rotate+mask value of the combined RLDICL.
+func mergePPC64RLDICLandSRDconst(encoded, s int64) int64 {
+ mb := s
+ r := 64 - s
+ // A larger mb is a smaller mask.
+ if (encoded>>8)&0xFF < mb {
+ encoded = (encoded &^ 0xFF00) | mb<<8
+ }
+ // The rotate is expected to be 0.
+ if (encoded & 0xFF0000) != 0 {
+ panic("non-zero rotate")
+ }
+ return encoded | r<<16
+}
+
+// DecodePPC64RotateMask is the inverse operation of encodePPC64RotateMask. The values returned as
+// mb and me satisfy the POWER ISA definition of MASK(x,y) where MASK(mb,me) = mask.
+func DecodePPC64RotateMask(sauxint int64) (rotate, mb, me int64, mask uint64) {
+ auxint := uint64(sauxint)
+ rotate = int64((auxint >> 16) & 0xFF)
+ mb = int64((auxint >> 8) & 0xFF)
+ me = int64((auxint >> 0) & 0xFF)
+ nbits := int64((auxint >> 24) & 0xFF)
+ mask = ((1 << uint(nbits-mb)) - 1) ^ ((1 << uint(nbits-me)) - 1)
+ if mb > me {
+ mask = ^mask
+ }
+ if nbits == 32 {
+ mask = uint64(uint32(mask))
+ }
+
+ // Fixup ME to match ISA definition. The second argument to MASK(..,me)
+ // is inclusive.
+ me = (me - 1) & (nbits - 1)
+ return
+}
+
+// This verifies that the mask is a set of
+// consecutive bits including the least
+// significant bit.
+func isPPC64ValidShiftMask(v int64) bool {
+ if (v != 0) && ((v+1)&v) == 0 {
+ return true
+ }
+ return false
+}
+
+func getPPC64ShiftMaskLength(v int64) int64 {
+ return int64(bits.Len64(uint64(v)))
+}
+
+// Decompose a shift right into an equivalent rotate/mask,
+// and return mask & m.
+func mergePPC64RShiftMask(m, s, nbits int64) int64 {
+ smask := uint64((1<<uint(nbits))-1) >> uint(s)
+ return m & int64(smask)
+}
+
+// Combine (ANDconst [m] (SRWconst [s])) into (RLWINM [y]) or return 0
+func mergePPC64AndSrwi(m, s int64) int64 {
+ mask := mergePPC64RShiftMask(m, s, 32)
+ if !isPPC64WordRotateMask(mask) {
+ return 0
+ }
+ return encodePPC64RotateMask((32-s)&31, mask, 32)
+}
+
+// Test if a shift right feeding into a CLRLSLDI can be merged into RLWINM.
+// Return the encoded RLWINM constant, or 0 if they cannot be merged.
+func mergePPC64ClrlsldiSrw(sld, srw int64) int64 {
+ mask_1 := uint64(0xFFFFFFFF >> uint(srw))
+ // for CLRLSLDI, it's more convenient to think of it as a mask left bits then rotate left.
+ mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
+
+ // Rewrite mask to apply after the final left shift.
+ mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(sld))
+
+ r_1 := 32 - srw
+ r_2 := GetPPC64Shiftsh(sld)
+ r_3 := (r_1 + r_2) & 31 // This can wrap.
+
+ if uint64(uint32(mask_3)) != mask_3 || mask_3 == 0 {
+ return 0
+ }
+ return encodePPC64RotateMask(int64(r_3), int64(mask_3), 32)
+}
+
+// Test if a RLWINM feeding into a CLRLSLDI can be merged into RLWINM. Return
+// the encoded RLWINM constant, or 0 if they cannot be merged.
+func mergePPC64ClrlsldiRlwinm(sld int32, rlw int64) int64 {
+ r_1, _, _, mask_1 := DecodePPC64RotateMask(rlw)
+ // for CLRLSLDI, it's more convenient to think of it as a mask left bits then rotate left.
+ mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
+
+ // combine the masks, and adjust for the final left shift.
+ mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(int64(sld)))
+ r_2 := GetPPC64Shiftsh(int64(sld))
+ r_3 := (r_1 + r_2) & 31 // This can wrap.
+
+ // Verify the result is still a valid bitmask of <= 32 bits.
+ if !isPPC64WordRotateMask(int64(mask_3)) || uint64(uint32(mask_3)) != mask_3 {
+ return 0
+ }
+ return encodePPC64RotateMask(r_3, int64(mask_3), 32)
+}
+
+// Compute the encoded RLWINM constant from combining (SLDconst [sld] (SRWconst [srw] x)),
+// or return 0 if they cannot be combined.
+func mergePPC64SldiSrw(sld, srw int64) int64 {
+ if sld > srw || srw >= 32 {
+ return 0
+ }
+ mask_r := uint32(0xFFFFFFFF) >> uint(srw)
+ mask_l := uint32(0xFFFFFFFF) >> uint(sld)
+ mask := (mask_r & mask_l) << uint(sld)
+ return encodePPC64RotateMask((32-srw+sld)&31, int64(mask), 32)
+}
+
+// Convert a PPC64 opcode from the Op to OpCC form. This converts (op x y)
+// to (Select0 (opCC x y)) without having to explicitly fixup every user
+// of op.
+//
+// E.g consider the case:
+// a = (ADD x y)
+// b = (CMPconst [0] a)
+// c = (OR a z)
+//
+// A rule like (CMPconst [0] (ADD x y)) => (CMPconst [0] (Select0 (ADDCC x y)))
+// would produce:
+// a = (ADD x y)
+// a' = (ADDCC x y)
+// a” = (Select0 a')
+// b = (CMPconst [0] a”)
+// c = (OR a z)
+//
+// which makes it impossible to rewrite the second user. Instead the result
+// of this conversion is:
+// a' = (ADDCC x y)
+// a = (Select0 a')
+// b = (CMPconst [0] a)
+// c = (OR a z)
+//
+// Which makes it trivial to rewrite b using a lowering rule.
+func convertPPC64OpToOpCC(op *Value) *Value {
+ ccOpMap := map[Op]Op{
+ OpPPC64ADD: OpPPC64ADDCC,
+ OpPPC64ADDconst: OpPPC64ADDCCconst,
+ OpPPC64AND: OpPPC64ANDCC,
+ OpPPC64ANDN: OpPPC64ANDNCC,
+ OpPPC64CNTLZD: OpPPC64CNTLZDCC,
+ OpPPC64OR: OpPPC64ORCC,
+ OpPPC64SUB: OpPPC64SUBCC,
+ OpPPC64NEG: OpPPC64NEGCC,
+ OpPPC64NOR: OpPPC64NORCC,
+ OpPPC64XOR: OpPPC64XORCC,
+ }
+ b := op.Block
+ opCC := b.NewValue0I(op.Pos, ccOpMap[op.Op], types.NewTuple(op.Type, types.TypeFlags), op.AuxInt)
+ opCC.AddArgs(op.Args...)
+ op.reset(OpSelect0)
+ op.AddArgs(opCC)
+ return op
+}
+
+// Convenience function to rotate a 32 bit constant value by another constant.
+func rotateLeft32(v, rotate int64) int64 {
+ return int64(bits.RotateLeft32(uint32(v), int(rotate)))
+}
+
+func rotateRight64(v, rotate int64) int64 {
+ return int64(bits.RotateLeft64(uint64(v), int(-rotate)))
+}
+
+// encodes the lsb and width for arm(64) bitfield ops into the expected auxInt format.
+func armBFAuxInt(lsb, width int64) arm64BitField {
+ if lsb < 0 || lsb > 63 {
+ panic("ARM(64) bit field lsb constant out of range")
+ }
+ if width < 1 || lsb+width > 64 {
+ panic("ARM(64) bit field width constant out of range")
+ }
+ return arm64BitField(width | lsb<<8)
+}
+
+// returns the lsb part of the auxInt field of arm64 bitfield ops.
+func (bfc arm64BitField) getARM64BFlsb() int64 {
+ return int64(uint64(bfc) >> 8)
+}
+
+// returns the width part of the auxInt field of arm64 bitfield ops.
+func (bfc arm64BitField) getARM64BFwidth() int64 {
+ return int64(bfc) & 0xff
+}
+
+// checks if mask >> rshift applied at lsb is a valid arm64 bitfield op mask.
+func isARM64BFMask(lsb, mask, rshift int64) bool {
+ shiftedMask := int64(uint64(mask) >> uint64(rshift))
+ return shiftedMask != 0 && isPowerOfTwo64(shiftedMask+1) && nto(shiftedMask)+lsb < 64
+}
+
+// returns the bitfield width of mask >> rshift for arm64 bitfield ops.
+func arm64BFWidth(mask, rshift int64) int64 {
+ shiftedMask := int64(uint64(mask) >> uint64(rshift))
+ if shiftedMask == 0 {
+ panic("ARM64 BF mask is zero")
+ }
+ return nto(shiftedMask)
+}
+
+// sizeof returns the size of t in bytes.
+// It will panic if t is not a *types.Type.
+func sizeof(t interface{}) int64 {
+ return t.(*types.Type).Size()
+}
+
+// registerizable reports whether t is a primitive type that fits in
+// a register. It assumes float64 values will always fit into registers
+// even if that isn't strictly true.
+func registerizable(b *Block, typ *types.Type) bool {
+ if typ.IsPtrShaped() || typ.IsFloat() || typ.IsBoolean() {
+ return true
+ }
+ if typ.IsInteger() {
+ return typ.Size() <= b.Func.Config.RegSize
+ }
+ return false
+}
+
+// needRaceCleanup reports whether this call to racefuncenter/exit isn't needed.
+func needRaceCleanup(sym *AuxCall, v *Value) bool {
+ f := v.Block.Func
+ if !f.Config.Race {
+ return false
+ }
+ if !isSameCall(sym, "runtime.racefuncenter") && !isSameCall(sym, "runtime.racefuncexit") {
+ return false
+ }
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpStaticCall, OpStaticLECall:
+ // Check for racefuncenter will encounter racefuncexit and vice versa.
+ // Allow calls to panic*
+ s := v.Aux.(*AuxCall).Fn.String()
+ switch s {
+ case "runtime.racefuncenter", "runtime.racefuncexit",
+ "runtime.panicdivide", "runtime.panicwrap",
+ "runtime.panicshift":
+ continue
+ }
+ // If we encountered any call, we need to keep racefunc*,
+ // for accurate stacktraces.
+ return false
+ case OpPanicBounds, OpPanicExtend:
+ // Note: these are panic generators that are ok (like the static calls above).
+ case OpClosureCall, OpInterCall, OpClosureLECall, OpInterLECall:
+ // We must keep the race functions if there are any other call types.
+ return false
+ }
+ }
+ }
+ if isSameCall(sym, "runtime.racefuncenter") {
+ // TODO REGISTER ABI this needs to be cleaned up.
+ // If we're removing racefuncenter, remove its argument as well.
+ if v.Args[0].Op != OpStore {
+ if v.Op == OpStaticLECall {
+ // there is no store, yet.
+ return true
+ }
+ return false
+ }
+ mem := v.Args[0].Args[2]
+ v.Args[0].reset(OpCopy)
+ v.Args[0].AddArg(mem)
+ }
+ return true
+}
+
+// symIsRO reports whether sym is a read-only global.
+func symIsRO(sym interface{}) bool {
+ lsym := sym.(*obj.LSym)
+ return lsym.Type == objabi.SRODATA && len(lsym.R) == 0
+}
+
+// symIsROZero reports whether sym is a read-only global whose data contains all zeros.
+func symIsROZero(sym Sym) bool {
+ lsym := sym.(*obj.LSym)
+ if lsym.Type != objabi.SRODATA || len(lsym.R) != 0 {
+ return false
+ }
+ for _, b := range lsym.P {
+ if b != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// isFixed32 returns true if the int32 at offset off in symbol sym
+// is known and constant.
+func isFixed32(c *Config, sym Sym, off int64) bool {
+ return isFixed(c, sym, off, 4)
+}
+
+// isFixed returns true if the range [off,off+size] of the symbol sym
+// is known and constant.
+func isFixed(c *Config, sym Sym, off, size int64) bool {
+ lsym := sym.(*obj.LSym)
+ if lsym.Extra == nil {
+ return false
+ }
+ if _, ok := (*lsym.Extra).(*obj.TypeInfo); ok {
+ if off == 2*c.PtrSize && size == 4 {
+ return true // type hash field
+ }
+ }
+ return false
+}
+func fixed32(c *Config, sym Sym, off int64) int32 {
+ lsym := sym.(*obj.LSym)
+ if ti, ok := (*lsym.Extra).(*obj.TypeInfo); ok {
+ if off == 2*c.PtrSize {
+ return int32(types.TypeHash(ti.Type.(*types.Type)))
+ }
+ }
+ base.Fatalf("fixed32 data not known for %s:%d", sym, off)
+ return 0
+}
+
+// isFixedSym returns true if the contents of sym at the given offset
+// is known and is the constant address of another symbol.
+func isFixedSym(sym Sym, off int64) bool {
+ lsym := sym.(*obj.LSym)
+ switch {
+ case lsym.Type == objabi.SRODATA:
+ // itabs, dictionaries
+ default:
+ return false
+ }
+ for _, r := range lsym.R {
+ if (r.Type == objabi.R_ADDR || r.Type == objabi.R_WEAKADDR) && int64(r.Off) == off && r.Add == 0 {
+ return true
+ }
+ }
+ return false
+}
+func fixedSym(f *Func, sym Sym, off int64) Sym {
+ lsym := sym.(*obj.LSym)
+ for _, r := range lsym.R {
+ if (r.Type == objabi.R_ADDR || r.Type == objabi.R_WEAKADDR) && int64(r.Off) == off {
+ if strings.HasPrefix(r.Sym.Name, "type:") {
+ // In case we're loading a type out of a dictionary, we need to record
+ // that the containing function might put that type in an interface.
+ // That information is currently recorded in relocations in the dictionary,
+ // but if we perform this load at compile time then the dictionary
+ // might be dead.
+ reflectdata.MarkTypeSymUsedInInterface(r.Sym, f.fe.Func().Linksym())
+ } else if strings.HasPrefix(r.Sym.Name, "go:itab") {
+ // Same, but if we're using an itab we need to record that the
+ // itab._type might be put in an interface.
+ reflectdata.MarkTypeSymUsedInInterface(r.Sym, f.fe.Func().Linksym())
+ }
+ return r.Sym
+ }
+ }
+ base.Fatalf("fixedSym data not known for %s:%d", sym, off)
+ return nil
+}
+
+// read8 reads one byte from the read-only global sym at offset off.
+func read8(sym interface{}, off int64) uint8 {
+ lsym := sym.(*obj.LSym)
+ if off >= int64(len(lsym.P)) || off < 0 {
+ // Invalid index into the global sym.
+ // This can happen in dead code, so we don't want to panic.
+ // Just return any value, it will eventually get ignored.
+ // See issue 29215.
+ return 0
+ }
+ return lsym.P[off]
+}
+
+// read16 reads two bytes from the read-only global sym at offset off.
+func read16(sym interface{}, off int64, byteorder binary.ByteOrder) uint16 {
+ lsym := sym.(*obj.LSym)
+ // lsym.P is written lazily.
+ // Bytes requested after the end of lsym.P are 0.
+ var src []byte
+ if 0 <= off && off < int64(len(lsym.P)) {
+ src = lsym.P[off:]
+ }
+ buf := make([]byte, 2)
+ copy(buf, src)
+ return byteorder.Uint16(buf)
+}
+
+// read32 reads four bytes from the read-only global sym at offset off.
+func read32(sym interface{}, off int64, byteorder binary.ByteOrder) uint32 {
+ lsym := sym.(*obj.LSym)
+ var src []byte
+ if 0 <= off && off < int64(len(lsym.P)) {
+ src = lsym.P[off:]
+ }
+ buf := make([]byte, 4)
+ copy(buf, src)
+ return byteorder.Uint32(buf)
+}
+
+// read64 reads eight bytes from the read-only global sym at offset off.
+func read64(sym interface{}, off int64, byteorder binary.ByteOrder) uint64 {
+ lsym := sym.(*obj.LSym)
+ var src []byte
+ if 0 <= off && off < int64(len(lsym.P)) {
+ src = lsym.P[off:]
+ }
+ buf := make([]byte, 8)
+ copy(buf, src)
+ return byteorder.Uint64(buf)
+}
+
+// sequentialAddresses reports true if it can prove that x + n == y
+func sequentialAddresses(x, y *Value, n int64) bool {
+ if x == y && n == 0 {
+ return true
+ }
+ if x.Op == Op386ADDL && y.Op == Op386LEAL1 && y.AuxInt == n && y.Aux == nil &&
+ (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
+ x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
+ return true
+ }
+ if x.Op == Op386LEAL1 && y.Op == Op386LEAL1 && y.AuxInt == x.AuxInt+n && x.Aux == y.Aux &&
+ (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
+ x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
+ return true
+ }
+ if x.Op == OpAMD64ADDQ && y.Op == OpAMD64LEAQ1 && y.AuxInt == n && y.Aux == nil &&
+ (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
+ x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
+ return true
+ }
+ if x.Op == OpAMD64LEAQ1 && y.Op == OpAMD64LEAQ1 && y.AuxInt == x.AuxInt+n && x.Aux == y.Aux &&
+ (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
+ x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
+ return true
+ }
+ return false
+}
+
+// flagConstant represents the result of a compile-time comparison.
+// The sense of these flags does not necessarily represent the hardware's notion
+// of a flags register - these are just a compile-time construct.
+// We happen to match the semantics to those of arm/arm64.
+// Note that these semantics differ from x86: the carry flag has the opposite
+// sense on a subtraction!
+//
+// On amd64, C=1 represents a borrow, e.g. SBB on amd64 does x - y - C.
+// On arm64, C=0 represents a borrow, e.g. SBC on arm64 does x - y - ^C.
+// (because it does x + ^y + C).
+//
+// See https://en.wikipedia.org/wiki/Carry_flag#Vs._borrow_flag
+type flagConstant uint8
+
+// N reports whether the result of an operation is negative (high bit set).
+func (fc flagConstant) N() bool {
+ return fc&1 != 0
+}
+
+// Z reports whether the result of an operation is 0.
+func (fc flagConstant) Z() bool {
+ return fc&2 != 0
+}
+
+// C reports whether an unsigned add overflowed (carry), or an
+// unsigned subtract did not underflow (borrow).
+func (fc flagConstant) C() bool {
+ return fc&4 != 0
+}
+
+// V reports whether a signed operation overflowed or underflowed.
+func (fc flagConstant) V() bool {
+ return fc&8 != 0
+}
+
+func (fc flagConstant) eq() bool {
+ return fc.Z()
+}
+func (fc flagConstant) ne() bool {
+ return !fc.Z()
+}
+func (fc flagConstant) lt() bool {
+ return fc.N() != fc.V()
+}
+func (fc flagConstant) le() bool {
+ return fc.Z() || fc.lt()
+}
+func (fc flagConstant) gt() bool {
+ return !fc.Z() && fc.ge()
+}
+func (fc flagConstant) ge() bool {
+ return fc.N() == fc.V()
+}
+func (fc flagConstant) ult() bool {
+ return !fc.C()
+}
+func (fc flagConstant) ule() bool {
+ return fc.Z() || fc.ult()
+}
+func (fc flagConstant) ugt() bool {
+ return !fc.Z() && fc.uge()
+}
+func (fc flagConstant) uge() bool {
+ return fc.C()
+}
+
+func (fc flagConstant) ltNoov() bool {
+ return fc.lt() && !fc.V()
+}
+func (fc flagConstant) leNoov() bool {
+ return fc.le() && !fc.V()
+}
+func (fc flagConstant) gtNoov() bool {
+ return fc.gt() && !fc.V()
+}
+func (fc flagConstant) geNoov() bool {
+ return fc.ge() && !fc.V()
+}
+
+func (fc flagConstant) String() string {
+ return fmt.Sprintf("N=%v,Z=%v,C=%v,V=%v", fc.N(), fc.Z(), fc.C(), fc.V())
+}
+
+type flagConstantBuilder struct {
+ N bool
+ Z bool
+ C bool
+ V bool
+}
+
+func (fcs flagConstantBuilder) encode() flagConstant {
+ var fc flagConstant
+ if fcs.N {
+ fc |= 1
+ }
+ if fcs.Z {
+ fc |= 2
+ }
+ if fcs.C {
+ fc |= 4
+ }
+ if fcs.V {
+ fc |= 8
+ }
+ return fc
+}
+
+// Note: addFlags(x,y) != subFlags(x,-y) in some situations:
+// - the results of the C flag are different
+// - the results of the V flag when y==minint are different
+
+// addFlags64 returns the flags that would be set from computing x+y.
+func addFlags64(x, y int64) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x+y == 0
+ fcb.N = x+y < 0
+ fcb.C = uint64(x+y) < uint64(x)
+ fcb.V = x >= 0 && y >= 0 && x+y < 0 || x < 0 && y < 0 && x+y >= 0
+ return fcb.encode()
+}
+
+// subFlags64 returns the flags that would be set from computing x-y.
+func subFlags64(x, y int64) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x-y == 0
+ fcb.N = x-y < 0
+ fcb.C = uint64(y) <= uint64(x) // This code follows the arm carry flag model.
+ fcb.V = x >= 0 && y < 0 && x-y < 0 || x < 0 && y >= 0 && x-y >= 0
+ return fcb.encode()
+}
+
+// addFlags32 returns the flags that would be set from computing x+y.
+func addFlags32(x, y int32) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x+y == 0
+ fcb.N = x+y < 0
+ fcb.C = uint32(x+y) < uint32(x)
+ fcb.V = x >= 0 && y >= 0 && x+y < 0 || x < 0 && y < 0 && x+y >= 0
+ return fcb.encode()
+}
+
+// subFlags32 returns the flags that would be set from computing x-y.
+func subFlags32(x, y int32) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x-y == 0
+ fcb.N = x-y < 0
+ fcb.C = uint32(y) <= uint32(x) // This code follows the arm carry flag model.
+ fcb.V = x >= 0 && y < 0 && x-y < 0 || x < 0 && y >= 0 && x-y >= 0
+ return fcb.encode()
+}
+
+// logicFlags64 returns flags set to the sign/zeroness of x.
+// C and V are set to false.
+func logicFlags64(x int64) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x == 0
+ fcb.N = x < 0
+ return fcb.encode()
+}
+
+// logicFlags32 returns flags set to the sign/zeroness of x.
+// C and V are set to false.
+func logicFlags32(x int32) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x == 0
+ fcb.N = x < 0
+ return fcb.encode()
+}
+
+func makeJumpTableSym(b *Block) *obj.LSym {
+ s := base.Ctxt.Lookup(fmt.Sprintf("%s.jump%d", b.Func.fe.Func().LSym.Name, b.ID))
+ // The jump table symbol is accessed only from the function symbol.
+ s.Set(obj.AttrStatic, true)
+ return s
+}
+
+// canRotate reports whether the architecture supports
+// rotates of integer registers with the given number of bits.
+func canRotate(c *Config, bits int64) bool {
+ if bits > c.PtrSize*8 {
+ // Don't rewrite to rotates bigger than the machine word.
+ return false
+ }
+ switch c.arch {
+ case "386", "amd64", "arm64":
+ return true
+ case "arm", "s390x", "ppc64", "ppc64le", "wasm", "loong64":
+ return bits >= 32
+ default:
+ return false
+ }
+}
+
+// isARM64bitcon reports whether a constant can be encoded into a logical instruction.
+func isARM64bitcon(x uint64) bool {
+ if x == 1<<64-1 || x == 0 {
+ return false
+ }
+ // determine the period and sign-extend a unit to 64 bits
+ switch {
+ case x != x>>32|x<<32:
+ // period is 64
+ // nothing to do
+ case x != x>>16|x<<48:
+ // period is 32
+ x = uint64(int64(int32(x)))
+ case x != x>>8|x<<56:
+ // period is 16
+ x = uint64(int64(int16(x)))
+ case x != x>>4|x<<60:
+ // period is 8
+ x = uint64(int64(int8(x)))
+ default:
+ // period is 4 or 2, always true
+ // 0001, 0010, 0100, 1000 -- 0001 rotate
+ // 0011, 0110, 1100, 1001 -- 0011 rotate
+ // 0111, 1011, 1101, 1110 -- 0111 rotate
+ // 0101, 1010 -- 01 rotate, repeat
+ return true
+ }
+ return sequenceOfOnes(x) || sequenceOfOnes(^x)
+}
+
+// sequenceOfOnes tests whether a constant is a sequence of ones in binary, with leading and trailing zeros.
+func sequenceOfOnes(x uint64) bool {
+ y := x & -x // lowest set bit of x. x is good iff x+y is a power of 2
+ y += x
+ return (y-1)&y == 0
+}
+
+// isARM64addcon reports whether x can be encoded as the immediate value in an ADD or SUB instruction.
+func isARM64addcon(v int64) bool {
+ /* uimm12 or uimm24? */
+ if v < 0 {
+ return false
+ }
+ if (v & 0xFFF) == 0 {
+ v >>= 12
+ }
+ return v <= 0xFFF
+}
+
+// setPos sets the position of v to pos, then returns true.
+// Useful for setting the result of a rewrite's position to
+// something other than the default.
+func setPos(v *Value, pos src.XPos) bool {
+ v.Pos = pos
+ return true
+}
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
new file mode 100644
index 0000000..b051267
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -0,0 +1,11602 @@
+// Code generated from _gen/386.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import "math"
+import "cmd/compile/internal/types"
+
+func rewriteValue386(v *Value) bool {
+ switch v.Op {
+ case Op386ADCL:
+ return rewriteValue386_Op386ADCL(v)
+ case Op386ADDL:
+ return rewriteValue386_Op386ADDL(v)
+ case Op386ADDLcarry:
+ return rewriteValue386_Op386ADDLcarry(v)
+ case Op386ADDLconst:
+ return rewriteValue386_Op386ADDLconst(v)
+ case Op386ADDLconstmodify:
+ return rewriteValue386_Op386ADDLconstmodify(v)
+ case Op386ADDLload:
+ return rewriteValue386_Op386ADDLload(v)
+ case Op386ADDLmodify:
+ return rewriteValue386_Op386ADDLmodify(v)
+ case Op386ADDSD:
+ return rewriteValue386_Op386ADDSD(v)
+ case Op386ADDSDload:
+ return rewriteValue386_Op386ADDSDload(v)
+ case Op386ADDSS:
+ return rewriteValue386_Op386ADDSS(v)
+ case Op386ADDSSload:
+ return rewriteValue386_Op386ADDSSload(v)
+ case Op386ANDL:
+ return rewriteValue386_Op386ANDL(v)
+ case Op386ANDLconst:
+ return rewriteValue386_Op386ANDLconst(v)
+ case Op386ANDLconstmodify:
+ return rewriteValue386_Op386ANDLconstmodify(v)
+ case Op386ANDLload:
+ return rewriteValue386_Op386ANDLload(v)
+ case Op386ANDLmodify:
+ return rewriteValue386_Op386ANDLmodify(v)
+ case Op386CMPB:
+ return rewriteValue386_Op386CMPB(v)
+ case Op386CMPBconst:
+ return rewriteValue386_Op386CMPBconst(v)
+ case Op386CMPBload:
+ return rewriteValue386_Op386CMPBload(v)
+ case Op386CMPL:
+ return rewriteValue386_Op386CMPL(v)
+ case Op386CMPLconst:
+ return rewriteValue386_Op386CMPLconst(v)
+ case Op386CMPLload:
+ return rewriteValue386_Op386CMPLload(v)
+ case Op386CMPW:
+ return rewriteValue386_Op386CMPW(v)
+ case Op386CMPWconst:
+ return rewriteValue386_Op386CMPWconst(v)
+ case Op386CMPWload:
+ return rewriteValue386_Op386CMPWload(v)
+ case Op386DIVSD:
+ return rewriteValue386_Op386DIVSD(v)
+ case Op386DIVSDload:
+ return rewriteValue386_Op386DIVSDload(v)
+ case Op386DIVSS:
+ return rewriteValue386_Op386DIVSS(v)
+ case Op386DIVSSload:
+ return rewriteValue386_Op386DIVSSload(v)
+ case Op386LEAL:
+ return rewriteValue386_Op386LEAL(v)
+ case Op386LEAL1:
+ return rewriteValue386_Op386LEAL1(v)
+ case Op386LEAL2:
+ return rewriteValue386_Op386LEAL2(v)
+ case Op386LEAL4:
+ return rewriteValue386_Op386LEAL4(v)
+ case Op386LEAL8:
+ return rewriteValue386_Op386LEAL8(v)
+ case Op386MOVBLSX:
+ return rewriteValue386_Op386MOVBLSX(v)
+ case Op386MOVBLSXload:
+ return rewriteValue386_Op386MOVBLSXload(v)
+ case Op386MOVBLZX:
+ return rewriteValue386_Op386MOVBLZX(v)
+ case Op386MOVBload:
+ return rewriteValue386_Op386MOVBload(v)
+ case Op386MOVBstore:
+ return rewriteValue386_Op386MOVBstore(v)
+ case Op386MOVBstoreconst:
+ return rewriteValue386_Op386MOVBstoreconst(v)
+ case Op386MOVLload:
+ return rewriteValue386_Op386MOVLload(v)
+ case Op386MOVLstore:
+ return rewriteValue386_Op386MOVLstore(v)
+ case Op386MOVLstoreconst:
+ return rewriteValue386_Op386MOVLstoreconst(v)
+ case Op386MOVSDconst:
+ return rewriteValue386_Op386MOVSDconst(v)
+ case Op386MOVSDload:
+ return rewriteValue386_Op386MOVSDload(v)
+ case Op386MOVSDstore:
+ return rewriteValue386_Op386MOVSDstore(v)
+ case Op386MOVSSconst:
+ return rewriteValue386_Op386MOVSSconst(v)
+ case Op386MOVSSload:
+ return rewriteValue386_Op386MOVSSload(v)
+ case Op386MOVSSstore:
+ return rewriteValue386_Op386MOVSSstore(v)
+ case Op386MOVWLSX:
+ return rewriteValue386_Op386MOVWLSX(v)
+ case Op386MOVWLSXload:
+ return rewriteValue386_Op386MOVWLSXload(v)
+ case Op386MOVWLZX:
+ return rewriteValue386_Op386MOVWLZX(v)
+ case Op386MOVWload:
+ return rewriteValue386_Op386MOVWload(v)
+ case Op386MOVWstore:
+ return rewriteValue386_Op386MOVWstore(v)
+ case Op386MOVWstoreconst:
+ return rewriteValue386_Op386MOVWstoreconst(v)
+ case Op386MULL:
+ return rewriteValue386_Op386MULL(v)
+ case Op386MULLconst:
+ return rewriteValue386_Op386MULLconst(v)
+ case Op386MULLload:
+ return rewriteValue386_Op386MULLload(v)
+ case Op386MULSD:
+ return rewriteValue386_Op386MULSD(v)
+ case Op386MULSDload:
+ return rewriteValue386_Op386MULSDload(v)
+ case Op386MULSS:
+ return rewriteValue386_Op386MULSS(v)
+ case Op386MULSSload:
+ return rewriteValue386_Op386MULSSload(v)
+ case Op386NEGL:
+ return rewriteValue386_Op386NEGL(v)
+ case Op386NOTL:
+ return rewriteValue386_Op386NOTL(v)
+ case Op386ORL:
+ return rewriteValue386_Op386ORL(v)
+ case Op386ORLconst:
+ return rewriteValue386_Op386ORLconst(v)
+ case Op386ORLconstmodify:
+ return rewriteValue386_Op386ORLconstmodify(v)
+ case Op386ORLload:
+ return rewriteValue386_Op386ORLload(v)
+ case Op386ORLmodify:
+ return rewriteValue386_Op386ORLmodify(v)
+ case Op386ROLB:
+ return rewriteValue386_Op386ROLB(v)
+ case Op386ROLBconst:
+ return rewriteValue386_Op386ROLBconst(v)
+ case Op386ROLL:
+ return rewriteValue386_Op386ROLL(v)
+ case Op386ROLLconst:
+ return rewriteValue386_Op386ROLLconst(v)
+ case Op386ROLW:
+ return rewriteValue386_Op386ROLW(v)
+ case Op386ROLWconst:
+ return rewriteValue386_Op386ROLWconst(v)
+ case Op386SARB:
+ return rewriteValue386_Op386SARB(v)
+ case Op386SARBconst:
+ return rewriteValue386_Op386SARBconst(v)
+ case Op386SARL:
+ return rewriteValue386_Op386SARL(v)
+ case Op386SARLconst:
+ return rewriteValue386_Op386SARLconst(v)
+ case Op386SARW:
+ return rewriteValue386_Op386SARW(v)
+ case Op386SARWconst:
+ return rewriteValue386_Op386SARWconst(v)
+ case Op386SBBL:
+ return rewriteValue386_Op386SBBL(v)
+ case Op386SBBLcarrymask:
+ return rewriteValue386_Op386SBBLcarrymask(v)
+ case Op386SETA:
+ return rewriteValue386_Op386SETA(v)
+ case Op386SETAE:
+ return rewriteValue386_Op386SETAE(v)
+ case Op386SETB:
+ return rewriteValue386_Op386SETB(v)
+ case Op386SETBE:
+ return rewriteValue386_Op386SETBE(v)
+ case Op386SETEQ:
+ return rewriteValue386_Op386SETEQ(v)
+ case Op386SETG:
+ return rewriteValue386_Op386SETG(v)
+ case Op386SETGE:
+ return rewriteValue386_Op386SETGE(v)
+ case Op386SETL:
+ return rewriteValue386_Op386SETL(v)
+ case Op386SETLE:
+ return rewriteValue386_Op386SETLE(v)
+ case Op386SETNE:
+ return rewriteValue386_Op386SETNE(v)
+ case Op386SHLL:
+ return rewriteValue386_Op386SHLL(v)
+ case Op386SHLLconst:
+ return rewriteValue386_Op386SHLLconst(v)
+ case Op386SHRB:
+ return rewriteValue386_Op386SHRB(v)
+ case Op386SHRBconst:
+ return rewriteValue386_Op386SHRBconst(v)
+ case Op386SHRL:
+ return rewriteValue386_Op386SHRL(v)
+ case Op386SHRLconst:
+ return rewriteValue386_Op386SHRLconst(v)
+ case Op386SHRW:
+ return rewriteValue386_Op386SHRW(v)
+ case Op386SHRWconst:
+ return rewriteValue386_Op386SHRWconst(v)
+ case Op386SUBL:
+ return rewriteValue386_Op386SUBL(v)
+ case Op386SUBLcarry:
+ return rewriteValue386_Op386SUBLcarry(v)
+ case Op386SUBLconst:
+ return rewriteValue386_Op386SUBLconst(v)
+ case Op386SUBLload:
+ return rewriteValue386_Op386SUBLload(v)
+ case Op386SUBLmodify:
+ return rewriteValue386_Op386SUBLmodify(v)
+ case Op386SUBSD:
+ return rewriteValue386_Op386SUBSD(v)
+ case Op386SUBSDload:
+ return rewriteValue386_Op386SUBSDload(v)
+ case Op386SUBSS:
+ return rewriteValue386_Op386SUBSS(v)
+ case Op386SUBSSload:
+ return rewriteValue386_Op386SUBSSload(v)
+ case Op386XORL:
+ return rewriteValue386_Op386XORL(v)
+ case Op386XORLconst:
+ return rewriteValue386_Op386XORLconst(v)
+ case Op386XORLconstmodify:
+ return rewriteValue386_Op386XORLconstmodify(v)
+ case Op386XORLload:
+ return rewriteValue386_Op386XORLload(v)
+ case Op386XORLmodify:
+ return rewriteValue386_Op386XORLmodify(v)
+ case OpAdd16:
+ v.Op = Op386ADDL
+ return true
+ case OpAdd32:
+ v.Op = Op386ADDL
+ return true
+ case OpAdd32F:
+ v.Op = Op386ADDSS
+ return true
+ case OpAdd32carry:
+ v.Op = Op386ADDLcarry
+ return true
+ case OpAdd32withcarry:
+ v.Op = Op386ADCL
+ return true
+ case OpAdd64F:
+ v.Op = Op386ADDSD
+ return true
+ case OpAdd8:
+ v.Op = Op386ADDL
+ return true
+ case OpAddPtr:
+ v.Op = Op386ADDL
+ return true
+ case OpAddr:
+ return rewriteValue386_OpAddr(v)
+ case OpAnd16:
+ v.Op = Op386ANDL
+ return true
+ case OpAnd32:
+ v.Op = Op386ANDL
+ return true
+ case OpAnd8:
+ v.Op = Op386ANDL
+ return true
+ case OpAndB:
+ v.Op = Op386ANDL
+ return true
+ case OpAvg32u:
+ v.Op = Op386AVGLU
+ return true
+ case OpBswap16:
+ return rewriteValue386_OpBswap16(v)
+ case OpBswap32:
+ v.Op = Op386BSWAPL
+ return true
+ case OpClosureCall:
+ v.Op = Op386CALLclosure
+ return true
+ case OpCom16:
+ v.Op = Op386NOTL
+ return true
+ case OpCom32:
+ v.Op = Op386NOTL
+ return true
+ case OpCom8:
+ v.Op = Op386NOTL
+ return true
+ case OpConst16:
+ return rewriteValue386_OpConst16(v)
+ case OpConst32:
+ v.Op = Op386MOVLconst
+ return true
+ case OpConst32F:
+ v.Op = Op386MOVSSconst
+ return true
+ case OpConst64F:
+ v.Op = Op386MOVSDconst
+ return true
+ case OpConst8:
+ return rewriteValue386_OpConst8(v)
+ case OpConstBool:
+ return rewriteValue386_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValue386_OpConstNil(v)
+ case OpCtz16:
+ return rewriteValue386_OpCtz16(v)
+ case OpCtz16NonZero:
+ v.Op = Op386BSFL
+ return true
+ case OpCtz32:
+ v.Op = Op386LoweredCtz32
+ return true
+ case OpCtz32NonZero:
+ v.Op = Op386BSFL
+ return true
+ case OpCtz8:
+ return rewriteValue386_OpCtz8(v)
+ case OpCtz8NonZero:
+ v.Op = Op386BSFL
+ return true
+ case OpCvt32Fto32:
+ v.Op = Op386CVTTSS2SL
+ return true
+ case OpCvt32Fto64F:
+ v.Op = Op386CVTSS2SD
+ return true
+ case OpCvt32to32F:
+ v.Op = Op386CVTSL2SS
+ return true
+ case OpCvt32to64F:
+ v.Op = Op386CVTSL2SD
+ return true
+ case OpCvt64Fto32:
+ v.Op = Op386CVTTSD2SL
+ return true
+ case OpCvt64Fto32F:
+ v.Op = Op386CVTSD2SS
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ v.Op = Op386DIVW
+ return true
+ case OpDiv16u:
+ v.Op = Op386DIVWU
+ return true
+ case OpDiv32:
+ v.Op = Op386DIVL
+ return true
+ case OpDiv32F:
+ v.Op = Op386DIVSS
+ return true
+ case OpDiv32u:
+ v.Op = Op386DIVLU
+ return true
+ case OpDiv64F:
+ v.Op = Op386DIVSD
+ return true
+ case OpDiv8:
+ return rewriteValue386_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValue386_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValue386_OpEq16(v)
+ case OpEq32:
+ return rewriteValue386_OpEq32(v)
+ case OpEq32F:
+ return rewriteValue386_OpEq32F(v)
+ case OpEq64F:
+ return rewriteValue386_OpEq64F(v)
+ case OpEq8:
+ return rewriteValue386_OpEq8(v)
+ case OpEqB:
+ return rewriteValue386_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValue386_OpEqPtr(v)
+ case OpGetCallerPC:
+ v.Op = Op386LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = Op386LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = Op386LoweredGetClosurePtr
+ return true
+ case OpGetG:
+ v.Op = Op386LoweredGetG
+ return true
+ case OpHmul32:
+ v.Op = Op386HMULL
+ return true
+ case OpHmul32u:
+ v.Op = Op386HMULLU
+ return true
+ case OpInterCall:
+ v.Op = Op386CALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValue386_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValue386_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValue386_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValue386_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValue386_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValue386_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValue386_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValue386_OpLeq32U(v)
+ case OpLeq64F:
+ return rewriteValue386_OpLeq64F(v)
+ case OpLeq8:
+ return rewriteValue386_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValue386_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValue386_OpLess16(v)
+ case OpLess16U:
+ return rewriteValue386_OpLess16U(v)
+ case OpLess32:
+ return rewriteValue386_OpLess32(v)
+ case OpLess32F:
+ return rewriteValue386_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValue386_OpLess32U(v)
+ case OpLess64F:
+ return rewriteValue386_OpLess64F(v)
+ case OpLess8:
+ return rewriteValue386_OpLess8(v)
+ case OpLess8U:
+ return rewriteValue386_OpLess8U(v)
+ case OpLoad:
+ return rewriteValue386_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValue386_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValue386_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValue386_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValue386_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValue386_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValue386_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValue386_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValue386_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValue386_OpLsh32x8(v)
+ case OpLsh8x16:
+ return rewriteValue386_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValue386_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValue386_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValue386_OpLsh8x8(v)
+ case OpMod16:
+ v.Op = Op386MODW
+ return true
+ case OpMod16u:
+ v.Op = Op386MODWU
+ return true
+ case OpMod32:
+ v.Op = Op386MODL
+ return true
+ case OpMod32u:
+ v.Op = Op386MODLU
+ return true
+ case OpMod8:
+ return rewriteValue386_OpMod8(v)
+ case OpMod8u:
+ return rewriteValue386_OpMod8u(v)
+ case OpMove:
+ return rewriteValue386_OpMove(v)
+ case OpMul16:
+ v.Op = Op386MULL
+ return true
+ case OpMul32:
+ v.Op = Op386MULL
+ return true
+ case OpMul32F:
+ v.Op = Op386MULSS
+ return true
+ case OpMul32uhilo:
+ v.Op = Op386MULLQU
+ return true
+ case OpMul64F:
+ v.Op = Op386MULSD
+ return true
+ case OpMul8:
+ v.Op = Op386MULL
+ return true
+ case OpNeg16:
+ v.Op = Op386NEGL
+ return true
+ case OpNeg32:
+ v.Op = Op386NEGL
+ return true
+ case OpNeg32F:
+ return rewriteValue386_OpNeg32F(v)
+ case OpNeg64F:
+ return rewriteValue386_OpNeg64F(v)
+ case OpNeg8:
+ v.Op = Op386NEGL
+ return true
+ case OpNeq16:
+ return rewriteValue386_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValue386_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValue386_OpNeq32F(v)
+ case OpNeq64F:
+ return rewriteValue386_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValue386_OpNeq8(v)
+ case OpNeqB:
+ return rewriteValue386_OpNeqB(v)
+ case OpNeqPtr:
+ return rewriteValue386_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = Op386LoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValue386_OpNot(v)
+ case OpOffPtr:
+ return rewriteValue386_OpOffPtr(v)
+ case OpOr16:
+ v.Op = Op386ORL
+ return true
+ case OpOr32:
+ v.Op = Op386ORL
+ return true
+ case OpOr8:
+ v.Op = Op386ORL
+ return true
+ case OpOrB:
+ v.Op = Op386ORL
+ return true
+ case OpPanicBounds:
+ return rewriteValue386_OpPanicBounds(v)
+ case OpPanicExtend:
+ return rewriteValue386_OpPanicExtend(v)
+ case OpRotateLeft16:
+ v.Op = Op386ROLW
+ return true
+ case OpRotateLeft32:
+ v.Op = Op386ROLL
+ return true
+ case OpRotateLeft8:
+ v.Op = Op386ROLB
+ return true
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRsh16Ux16:
+ return rewriteValue386_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValue386_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValue386_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValue386_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValue386_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValue386_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValue386_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValue386_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValue386_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValue386_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValue386_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValue386_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValue386_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValue386_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValue386_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValue386_OpRsh32x8(v)
+ case OpRsh8Ux16:
+ return rewriteValue386_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValue386_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValue386_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValue386_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValue386_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValue386_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValue386_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValue386_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValue386_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValue386_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = Op386MOVWLSX
+ return true
+ case OpSignExt8to16:
+ v.Op = Op386MOVBLSX
+ return true
+ case OpSignExt8to32:
+ v.Op = Op386MOVBLSX
+ return true
+ case OpSignmask:
+ return rewriteValue386_OpSignmask(v)
+ case OpSlicemask:
+ return rewriteValue386_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = Op386SQRTSD
+ return true
+ case OpSqrt32:
+ v.Op = Op386SQRTSS
+ return true
+ case OpStaticCall:
+ v.Op = Op386CALLstatic
+ return true
+ case OpStore:
+ return rewriteValue386_OpStore(v)
+ case OpSub16:
+ v.Op = Op386SUBL
+ return true
+ case OpSub32:
+ v.Op = Op386SUBL
+ return true
+ case OpSub32F:
+ v.Op = Op386SUBSS
+ return true
+ case OpSub32carry:
+ v.Op = Op386SUBLcarry
+ return true
+ case OpSub32withcarry:
+ v.Op = Op386SBBL
+ return true
+ case OpSub64F:
+ v.Op = Op386SUBSD
+ return true
+ case OpSub8:
+ v.Op = Op386SUBL
+ return true
+ case OpSubPtr:
+ v.Op = Op386SUBL
+ return true
+ case OpTailCall:
+ v.Op = Op386CALLtail
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = Op386LoweredWB
+ return true
+ case OpXor16:
+ v.Op = Op386XORL
+ return true
+ case OpXor32:
+ v.Op = Op386XORL
+ return true
+ case OpXor8:
+ v.Op = Op386XORL
+ return true
+ case OpZero:
+ return rewriteValue386_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = Op386MOVWLZX
+ return true
+ case OpZeroExt8to16:
+ v.Op = Op386MOVBLZX
+ return true
+ case OpZeroExt8to32:
+ v.Op = Op386MOVBLZX
+ return true
+ case OpZeromask:
+ return rewriteValue386_OpZeromask(v)
+ }
+ return false
+}
+func rewriteValue386_Op386ADCL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADCL x (MOVLconst [c]) f)
+ // result: (ADCLconst [c] x f)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ f := v_2
+ v.reset(Op386ADCLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, f)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ADDL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDL x (MOVLconst <t> [c]))
+ // cond: !t.IsPtr()
+ // result: (ADDLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ t := v_1.Type
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(!t.IsPtr()) {
+ continue
+ }
+ v.reset(Op386ADDLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [3] y))
+ // result: (LEAL8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [2] y))
+ // result: (LEAL4 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL4)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [1] y))
+ // result: (LEAL2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (ADDL y y))
+ // result: (LEAL2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386ADDL {
+ continue
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] {
+ continue
+ }
+ v.reset(Op386LEAL2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (ADDL x y))
+ // result: (LEAL2 y x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386ADDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(Op386LEAL2)
+ v.AddArg2(y, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ADDL (ADDLconst [c] x) y)
+ // result: (LEAL1 [c] x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386ADDLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (LEAL [c] {s} y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAL1 [c] {s} x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386LEAL {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ s := auxToSym(v_1.Aux)
+ y := v_1.Args[0]
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386ADDLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (NEGL y))
+ // result: (SUBL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386NEGL {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386SUBL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLcarry(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDLcarry x (MOVLconst [c]))
+ // result: (ADDLconstcarry [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ADDLconstcarry)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDLconst [c] (ADDL x y))
+ // result: (LEAL1 [c] x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL [d] {s} x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDLconst [c] x:(SP))
+ // result: (LEAL [c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpSP {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL1 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL2 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386LEAL2 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL4 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386LEAL4 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL8 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386LEAL8 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ return true
+ }
+ // match: (ADDLconst [c] (ADDLconst [d] x))
+ // result: (ADDLconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ADDLconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ADDLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem)
+ // cond: valoff1.canAdd32(off2)
+ // result: (ADDLconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2)) {
+ break
+ }
+ v.reset(Op386ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ADDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ADDLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ADDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ADDLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ADDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDSDload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSDload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386ADDSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ADDSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ADDSDload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ADDSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDSSload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSSload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386ADDSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ADDSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ADDSSload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ADDSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDL x (MOVLconst [c]))
+ // result: (ANDLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ANDLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386ANDLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDLconst [c] (ANDLconst [d] x))
+ // result: (ANDLconst [c & d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [c] _)
+ // cond: c==0
+ // result: (MOVLconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(c == 0) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (ANDLconst [c] x)
+ // cond: c==-1
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == -1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ANDLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem)
+ // cond: valoff1.canAdd32(off2)
+ // result: (ANDLconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2)) {
+ break
+ }
+ v.reset(Op386ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ANDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ANDLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ANDLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ANDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ANDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ANDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ANDLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ANDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPB x (MOVLconst [c]))
+ // result: (CMPBconst x [int8(c)])
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386CMPBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPB (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPBconst x [int8(c)]))
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPB x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPB y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPBload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != Op386MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386CMPBload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPB x l:(MOVBload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPBload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(l.Pos, Op386CMPBload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) == y) {
+ break
+ }
+ v.reset(Op386FlagEQ)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)<y && uint8(x)<uint8(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) < y && uint8(x) < uint8(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)<y && uint8(x)>uint8(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) < y && uint8(x) > uint8(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_UGT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)>y && uint8(x)<uint8(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) > y && uint8(x) < uint8(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_ULT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)>y && uint8(x)>uint8(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) > y && uint8(x) > uint8(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_UGT)
+ return true
+ }
+ // match: (CMPBconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int8(m) && int8(m) < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= int8(m) && int8(m) < n) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPBconst l:(ANDL x y) [0])
+ // cond: l.Uses==1
+ // result: (TESTB x y)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDL {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPBconst l:(ANDLconst [c] x) [0])
+ // cond: l.Uses==1
+ // result: (TESTBconst [int8(c)] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPBconst x [0])
+ // result: (TESTB x x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(Op386TESTB)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPBconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ l := v_0
+ if l.Op != Op386MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, Op386CMPBconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPBload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem)
+ // result: (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(Op386CMPBconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPL x (MOVLconst [c]))
+ // result: (CMPLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386CMPLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPL (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPLconst x [c]))
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPL x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPL y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPLload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != Op386MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386CMPLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPL x l:(MOVLload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPLload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(l.Pos, Op386CMPLload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(Op386FlagEQ)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x<y && uint32(x)<uint32(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x < y && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x<y && uint32(x)>uint32(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x < y && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_UGT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x>y && uint32(x)<uint32(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > y && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_ULT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x>y && uint32(x)>uint32(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > y && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_UGT)
+ return true
+ }
+ // match: (CMPLconst (SHRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386SHRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst l:(ANDL x y) [0])
+ // cond: l.Uses==1
+ // result: (TESTL x y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDL {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPLconst l:(ANDLconst [c] x) [0])
+ // cond: l.Uses==1
+ // result: (TESTLconst [c] x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPLconst x [0])
+ // result: (TESTL x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(Op386TESTL)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPLconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ l := v_0
+ if l.Op != Op386MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, Op386CMPLconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem)
+ // result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(Op386CMPLconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPW x (MOVLconst [c]))
+ // result: (CMPWconst x [int16(c)])
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386CMPWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPWconst x [int16(c)]))
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v0.AuxInt = int16ToAuxInt(int16(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPW y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPWload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != Op386MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386CMPWload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPW x l:(MOVWload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPWload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(l.Pos, Op386CMPWload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) == y) {
+ break
+ }
+ v.reset(Op386FlagEQ)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)<y && uint16(x)<uint16(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) < y && uint16(x) < uint16(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)<y && uint16(x)>uint16(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) < y && uint16(x) > uint16(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_UGT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)>y && uint16(x)<uint16(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) > y && uint16(x) < uint16(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)>y && uint16(x)>uint16(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) > y && uint16(x) > uint16(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_UGT)
+ return true
+ }
+ // match: (CMPWconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int16(m) && int16(m) < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= int16(m) && int16(m) < n) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPWconst l:(ANDL x y) [0])
+ // cond: l.Uses==1
+ // result: (TESTW x y)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDL {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWconst l:(ANDLconst [c] x) [0])
+ // cond: l.Uses==1
+ // result: (TESTWconst [int16(c)] x)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWconst x [0])
+ // result: (TESTW x x)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(Op386TESTW)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPWconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ l := v_0
+ if l.Op != Op386MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, Op386CMPWconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem)
+ // result: (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(Op386CMPWconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386DIVSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (DIVSDload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSDload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386DIVSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386DIVSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (DIVSDload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (DIVSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386DIVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (DIVSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386DIVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386DIVSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (DIVSSload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSSload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386DIVSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386DIVSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (DIVSSload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (DIVSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386DIVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (DIVSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386DIVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LEAL [c] {s} (ADDLconst [d] x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAL [c] {s} (ADDL x y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAL1 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL [off1] {sym1} (LEAL [off2] {sym2} x))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL2 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL4 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL8 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL1(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL1 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386ADDLconst {
+ continue
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ continue
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL2 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [2] y))
+ // result: (LEAL4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [3] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386LEAL {
+ continue
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ continue
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} y y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386LEAL1 {
+ continue
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ continue
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} y x)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386LEAL1 {
+ continue
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ continue
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(y, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (LEAL1 [0] {nil} x y)
+ // result: (ADDL x y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || auxToSym(v.Aux) != nil {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(Op386ADDL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL2(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL2 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB
+ // result: (LEAL2 [c+2*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(c + 2*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (SHLLconst [2] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 2 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [off1] {sym} x (LEAL1 [off2] {nil} y y))
+ // cond: is32Bit(int64(off1)+2*int64(off2))
+ // result: (LEAL4 [off1+2*off2] {sym} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386LEAL1 {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ if auxToSym(v_1.Aux) != nil {
+ break
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1) + 2*int64(off2))) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(off1 + 2*off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL4(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL4 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB
+ // result: (LEAL4 [c+4*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(c + 4*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [off1] {sym} x (LEAL1 [off2] {nil} y y))
+ // cond: is32Bit(int64(off1)+4*int64(off2))
+ // result: (LEAL8 [off1+4*off2] {sym} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386LEAL1 {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ if auxToSym(v_1.Aux) != nil {
+ break
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1) + 4*int64(off2))) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(off1 + 4*off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL8 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL8 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB
+ // result: (LEAL8 [c+8*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c + 8*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBLSX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBLSX x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBLSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != Op386MOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, Op386MOVBLSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBLSX (ANDLconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDLconst [c & 0x7f] x)
+ for {
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7f)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBLSXload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBLSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBLSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(Op386MOVBLSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVBLSXload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBLZX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBLZX x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != Op386MOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, Op386MOVBload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBLZX (ANDLconst [c] x))
+ // result: (ANDLconst [c & 0xff] x)
+ for {
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0xff)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBLZX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(Op386MOVBLZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(read8(sym, int64(off)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBstore [off] {sym} ptr (MOVBLSX x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVBLSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBLZX x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVBLZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (MOVBstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sc.canAdd32(off)) {
+ break
+ }
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVLload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (MOVLstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ADDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(Op386ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ANDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(Op386ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(Op386ORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (XORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(Op386XORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ADDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ADDL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(Op386ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (SUBLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386SUBL {
+ break
+ }
+ x := y.Args[1]
+ l := y.Args[0]
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(Op386SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ANDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ANDL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(Op386ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ORL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(Op386ORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (XORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386XORL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(Op386XORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ADDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ADDLconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ADDLconst {
+ break
+ }
+ c := auxIntToInt32(y.AuxInt)
+ l := y.Args[0]
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(Op386ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ANDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ANDLconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(y.AuxInt)
+ l := y.Args[0]
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(Op386ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ORLconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ORLconst {
+ break
+ }
+ c := auxIntToInt32(y.AuxInt)
+ l := y.Args[0]
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(Op386ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (XORLconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386XORLconst {
+ break
+ }
+ c := auxIntToInt32(y.AuxInt)
+ l := y.Args[0]
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(Op386XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVLstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sc.canAdd32(off)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVLstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDconst(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (MOVSDconst [c])
+ // cond: config.ctxt.Flag_shared
+ // result: (MOVSDconst2 (MOVSDconst1 [c]))
+ for {
+ c := auxIntToFloat64(v.AuxInt)
+ if !(config.ctxt.Flag_shared) {
+ break
+ }
+ v.reset(Op386MOVSDconst2)
+ v0 := b.NewValue0(v.Pos, Op386MOVSDconst1, typ.UInt32)
+ v0.AuxInt = float64ToAuxInt(c)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVSDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSSconst(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (MOVSSconst [c])
+ // cond: config.ctxt.Flag_shared
+ // result: (MOVSSconst2 (MOVSSconst1 [c]))
+ for {
+ c := auxIntToFloat32(v.AuxInt)
+ if !(config.ctxt.Flag_shared) {
+ break
+ }
+ v.reset(Op386MOVSSconst2)
+ v0 := b.NewValue0(v.Pos, Op386MOVSSconst1, typ.UInt32)
+ v0.AuxInt = float32ToAuxInt(c)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSSload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSSload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSSstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSSstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVSSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWLSX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWLSX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWLSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != Op386MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, Op386MOVWLSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWLSX (ANDLconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDLconst [c & 0x7fff] x)
+ for {
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
+ break
+ }
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7fff)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWLSXload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWLSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVWLSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(Op386MOVWLSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWLSXload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWLZX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWLZX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != Op386MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, Op386MOVWload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWLZX (ANDLconst [c] x))
+ // result: (ANDLconst [c & 0xffff] x)
+ for {
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0xffff)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVWLZX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(Op386MOVWLZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstore [off] {sym} ptr (MOVWLSX x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVWLSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWLZX x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVWLZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (MOVWstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sc.canAdd32(off)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MULL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULL x (MOVLconst [c]))
+ // result: (MULLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386MULLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MULL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (MULLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386MULLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386MULLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULLconst [c] (MULLconst [d] x))
+ // result: (MULLconst [c * d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MULLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386MULLconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [-9] x)
+ // result: (NEGL (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -9 {
+ break
+ }
+ x := v_0
+ v.reset(Op386NEGL)
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-5] x)
+ // result: (NEGL (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -5 {
+ break
+ }
+ x := v_0
+ v.reset(Op386NEGL)
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-3] x)
+ // result: (NEGL (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -3 {
+ break
+ }
+ x := v_0
+ v.reset(Op386NEGL)
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-1] x)
+ // result: (NEGL x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(Op386NEGL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [0] _)
+ // result: (MOVLconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (MULLconst [1] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (MULLconst [3] x)
+ // result: (LEAL2 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 3 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL2)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [5] x)
+ // result: (LEAL4 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL4)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [7] x)
+ // result: (LEAL2 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 7 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL2)
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [9] x)
+ // result: (LEAL8 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 9 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [11] x)
+ // result: (LEAL2 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 11 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL2)
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [13] x)
+ // result: (LEAL4 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 13 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL4)
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [19] x)
+ // result: (LEAL2 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 19 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL2)
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [21] x)
+ // result: (LEAL4 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 21 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL4)
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [25] x)
+ // result: (LEAL8 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 25 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [27] x)
+ // result: (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 27 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [37] x)
+ // result: (LEAL4 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 37 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL4)
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [41] x)
+ // result: (LEAL8 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 41 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [45] x)
+ // result: (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 45 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [73] x)
+ // result: (LEAL8 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 73 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [81] x)
+ // result: (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 81 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c+1) && c >= 15
+ // result: (SUBL (SHLLconst <v.Type> [int32(log32(c+1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c+1) && c >= 15) {
+ break
+ }
+ v.reset(Op386SUBL)
+ v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-1) && c >= 17
+ // result: (LEAL1 (SHLLconst <v.Type> [int32(log32(c-1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-1) && c >= 17) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-2) && c >= 34
+ // result: (LEAL2 (SHLLconst <v.Type> [int32(log32(c-2))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-2) && c >= 34) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 2)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-4) && c >= 68
+ // result: (LEAL4 (SHLLconst <v.Type> [int32(log32(c-4))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-4) && c >= 68) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 4)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-8) && c >= 136
+ // result: (LEAL8 (SHLLconst <v.Type> [int32(log32(c-8))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-8) && c >= 136) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 8)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (SHLLconst [int32(log32(c/3))] (LEAL2 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (SHLLconst [int32(log32(c/5))] (LEAL4 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (SHLLconst [int32(log32(c/9))] (LEAL8 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c*d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MULLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MULLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MULLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MULLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MULLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MULLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MULSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (MULSDload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSDload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386MULSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386MULSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MULSDload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MULSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MULSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MULSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MULSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (MULSSload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSSload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386MULSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386MULSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MULSSload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MULSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MULSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MULSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386NEGL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGL (MOVLconst [c]))
+ // result: (MOVLconst [-c])
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386NOTL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NOTL (MOVLconst [c]))
+ // result: (MOVLconst [^c])
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(^c)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ORL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORL x (MOVLconst [c]))
+ // result: (ORLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ORLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ORLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386ORLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORL x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ORLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORLconst [c] _)
+ // cond: c==-1
+ // result: (MOVLconst [-1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(c == -1) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (ORLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ORLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ORLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem)
+ // cond: valoff1.canAdd32(off2)
+ // result: (ORLconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2)) {
+ break
+ }
+ v.reset(Op386ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ORLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ORLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ORLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ORLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ORLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ROLB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROLB x (MOVLconst [c]))
+ // result: (ROLBconst [int8(c&7)] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 7))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ROLBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLBconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ROLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROLL x (MOVLconst [c]))
+ // result: (ROLLconst [c&31] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ROLLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ROLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLLconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ROLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROLW x (MOVLconst [c]))
+ // result: (ROLWconst [int16(c&15)] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ROLWconst)
+ v.AuxInt = int16ToAuxInt(int16(c & 15))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ROLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLWconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARB x (MOVLconst [c]))
+ // result: (SARBconst [int8(min(int64(c&31),7))] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SARBconst)
+ v.AuxInt = int8ToAuxInt(int8(min(int64(c&31), 7)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARBconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARBconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARL x (MOVLconst [c]))
+ // result: (SARLconst [c&31] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SARLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARL x (ANDLconst [31] y))
+ // result: (SARL x y)
+ for {
+ x := v_0
+ if v_1.Op != Op386ANDLconst || auxIntToInt32(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARW x (MOVLconst [c]))
+ // result: (SARWconst [int16(min(int64(c&31),15))] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SARWconst)
+ v.AuxInt = int16ToAuxInt(int16(min(int64(c&31), 15)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARWconst x [0])
+ // result: x
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARWconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SBBL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SBBL x (MOVLconst [c]) f)
+ // result: (SBBLconst [c] x f)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ f := v_2
+ v.reset(Op386SBBLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, f)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SBBLcarrymask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SBBLcarrymask (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagLT_ULT))
+ // result: (MOVLconst [-1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagGT_ULT))
+ // result: (MOVLconst [-1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETA(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETA (InvertFlags x))
+ // result: (SETB x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETB)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETA (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETA (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETAE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETAE (InvertFlags x))
+ // result: (SETBE x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETBE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETAE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETAE (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETAE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETAE (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETAE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETB(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETB (InvertFlags x))
+ // result: (SETA x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETA)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETB (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETB (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETB (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETB (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETB (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETBE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETBE (InvertFlags x))
+ // result: (SETAE x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETAE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETBE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETBE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETEQ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETEQ (InvertFlags x))
+ // result: (SETEQ x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETEQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETEQ (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETEQ (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETG (InvertFlags x))
+ // result: (SETL x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETG (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETG (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETGE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETGE (InvertFlags x))
+ // result: (SETLE x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETLE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETGE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETGE (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETGE (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETGE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETGE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETL (InvertFlags x))
+ // result: (SETG x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETL (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETL (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETL (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETL (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETL (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETLE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETLE (InvertFlags x))
+ // result: (SETGE x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETGE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETLE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETLE (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETNE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETNE (InvertFlags x))
+ // result: (SETNE x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETNE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETNE (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETNE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHLL x (MOVLconst [c]))
+ // result: (SHLLconst [c&31] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLL x (ANDLconst [31] y))
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ if v_1.Op != Op386ANDLconst || auxIntToInt32(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHLLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRB x (MOVLconst [c]))
+ // cond: c&31 < 8
+ // result: (SHRBconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 < 8) {
+ break
+ }
+ v.reset(Op386SHRBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRB _ (MOVLconst [c]))
+ // cond: c&31 >= 8
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 >= 8) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRBconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRL x (MOVLconst [c]))
+ // result: (SHRLconst [c&31] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SHRLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRL x (ANDLconst [31] y))
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ if v_1.Op != Op386ANDLconst || auxIntToInt32(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRW x (MOVLconst [c]))
+ // cond: c&31 < 16
+ // result: (SHRWconst [int16(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 < 16) {
+ break
+ }
+ v.reset(Op386SHRWconst)
+ v.AuxInt = int16ToAuxInt(int16(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRW _ (MOVLconst [c]))
+ // cond: c&31 >= 16
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 >= 16) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRWconst x [0])
+ // result: x
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBL x (MOVLconst [c]))
+ // result: (SUBLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SUBLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBL (MOVLconst [c]) x)
+ // result: (NEGL (SUBLconst <v.Type> x [c]))
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(Op386NEGL)
+ v0 := b.NewValue0(v.Pos, Op386SUBLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBLload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386SUBLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (SUBL x x)
+ // result: (MOVLconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBLcarry(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBLcarry x (MOVLconst [c]))
+ // result: (SUBLconstcarry [c] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SUBLconstcarry)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBLconst [c] x)
+ // result: (ADDLconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ v.reset(Op386ADDLconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_Op386SUBLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SUBLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386SUBLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386SUBLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SUBLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SUBLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBSDload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSDload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386SUBSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SUBSDload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386SUBSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386SUBSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBSSload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSSload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386SUBSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SUBSSload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386SUBSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386SUBSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORL x (MOVLconst [c]))
+ // result: (XORLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386XORLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (XORLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386XORLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (XORL x x)
+ // result: (MOVLconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORLconst [c] (XORLconst [d] x))
+ // result: (XORLconst [c ^ d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386XORLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386XORLconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (XORLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem)
+ // cond: valoff1.canAdd32(off2)
+ // result: (XORLconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2)) {
+ break
+ }
+ v.reset(Op386XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (XORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (XORLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (XORLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386XORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (XORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386XORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (XORLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386XORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (XORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386XORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (LEAL {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(Op386LEAL)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValue386_OpBswap16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Bswap16 x)
+ // result: (ROLWconst [8] x)
+ for {
+ x := v_0
+ v.reset(Op386ROLWconst)
+ v.AuxInt = int16ToAuxInt(8)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpConst16(v *Value) bool {
+ // match: (Const16 [c])
+ // result: (MOVLconst [int32(c)])
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+}
+func rewriteValue386_OpConst8(v *Value) bool {
+ // match: (Const8 [c])
+ // result: (MOVLconst [int32(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+}
+func rewriteValue386_OpConstBool(v *Value) bool {
+ // match: (ConstBool [c])
+ // result: (MOVLconst [b2i32(c)])
+ for {
+ c := auxIntToBool(v.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(b2i32(c))
+ return true
+ }
+}
+func rewriteValue386_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVLconst [0])
+ for {
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValue386_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 x)
+ // result: (BSFL (ORLconst <typ.UInt32> [0x10000] x))
+ for {
+ x := v_0
+ v.reset(Op386BSFL)
+ v0 := b.NewValue0(v.Pos, Op386ORLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0x10000)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz8 x)
+ // result: (BSFL (ORLconst <typ.UInt32> [0x100] x))
+ for {
+ x := v_0
+ v.reset(Op386BSFL)
+ v0 := b.NewValue0(v.Pos, Op386ORLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0x100)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVW (SignExt8to16 x) (SignExt8to16 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValue386_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386DIVWU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValue386_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq16 x y)
+ // result: (SETEQ (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32 x y)
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (SETEQF (UCOMISS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (SETEQF (UCOMISD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq8 x y)
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqB x y)
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsInBounds idx len)
+ // result: (SETB (CMPL idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsNonNil p)
+ // result: (SETNE (TESTL p p))
+ for {
+ p := v_0
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386TESTL, types.TypeFlags)
+ v0.AddArg2(p, p)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsSliceInBounds idx len)
+ // result: (SETBE (CMPL idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq16 x y)
+ // result: (SETLE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq16U x y)
+ // result: (SETBE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32 x y)
+ // result: (SETLE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (SETGEF (UCOMISS y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32U x y)
+ // result: (SETBE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (SETGEF (UCOMISD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq8 x y)
+ // result: (SETLE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq8U x y)
+ // result: (SETBE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less16 x y)
+ // result: (SETL (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less16U x y)
+ // result: (SETB (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 x y)
+ // result: (SETL (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (SETGF (UCOMISS y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32U x y)
+ // result: (SETB (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (SETGF (UCOMISD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less8 x y)
+ // result: (SETL (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less8U x y)
+ // result: (SETB (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVLload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(Op386MOVLload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(Op386MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || is8BitInt(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean() || is8BitInt(t)) {
+ break
+ }
+ v.reset(Op386MOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVSSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(Op386MOVSSload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVSDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(Op386MOVSDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLocalAddr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (LocalAddr <t> {sym} base mem)
+ // cond: t.Elem().HasPointers()
+ // result: (LEAL {sym} (SPanchored base mem))
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ mem := v_1
+ if !(t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
+ v0.AddArg2(base, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LocalAddr <t> {sym} base _)
+ // cond: !t.Elem().HasPointers()
+ // result: (LEAL {sym} base)
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ if !(!t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SHLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SHLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SHLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (MODW (SignExt8to16 x) (SignExt8to16 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386MODW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValue386_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386MODWU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValue386_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVBstore)
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVWstore)
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVLstore dst (MOVLload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVLstore)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, Op386MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (MOVLstore [4] dst (MOVLload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && s%4 != 0
+ // result: (Move [s-s%4] (ADDLconst <dst.Type> dst [int32(s%4)]) (ADDLconst <src.Type> src [int32(s%4)]) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && s%4 != 0) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%4)
+ v0 := b.NewValue0(v.Pos, Op386ADDLconst, dst.Type)
+ v0.AuxInt = int32ToAuxInt(int32(s % 4))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, Op386ADDLconst, src.Type)
+ v1.AuxInt = int32ToAuxInt(int32(s % 4))
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
+ v3 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v3.AddArg2(src, mem)
+ v2.AddArg3(dst, v3, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [10*(128-s/4)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(Op386DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(10 * (128 - s/4))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: (s > 4*128 || config.noDuffDevice) && s%4 == 0 && logLargeCopy(v, s)
+ // result: (REPMOVSL dst src (MOVLconst [int32(s/4)]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !((s > 4*128 || config.noDuffDevice) && s%4 == 0 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(Op386REPMOVSL)
+ v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(s / 4))
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpNeg32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg32F x)
+ // result: (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
+ for {
+ x := v_0
+ v.reset(Op386PXOR)
+ v0 := b.NewValue0(v.Pos, Op386MOVSSconst, typ.Float32)
+ v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeg64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg64F x)
+ // result: (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
+ for {
+ x := v_0
+ v.reset(Op386PXOR)
+ v0 := b.NewValue0(v.Pos, Op386MOVSDconst, typ.Float64)
+ v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq16 x y)
+ // result: (SETNE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32 x y)
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (SETNEF (UCOMISS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNEF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (SETNEF (UCOMISD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNEF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq8 x y)
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqB x y)
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORLconst [1] x)
+ for {
+ x := v_0
+ v.reset(Op386XORLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr [off] ptr)
+ // result: (ADDLconst [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(Op386ADDLconst)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValue386_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(Op386LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(Op386LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(Op386LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpPanicExtend(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicExtendA [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(Op386LoweredPanicExtendA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicExtendB [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(Op386LoweredPanicExtendB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicExtendC [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(Op386LoweredPanicExtendC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRW)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRW)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh16Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SHRWconst x [int16(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SHRWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRW)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SARWconst x [int16(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SARWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (SARWconst x [15])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(Op386SARWconst)
+ v.AuxInt = int16ToAuxInt(15)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SHRLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SHRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SARLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SARLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (SARLconst x [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(Op386SARLconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRB)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRB)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh8Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SHRBconst x [int8(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SHRBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRB)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SARBconst x [int8(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SARBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (SARBconst x [7])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(Op386SARBconst)
+ v.AuxInt = int8ToAuxInt(7)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Mul32uover x y))
+ // result: (Select0 <typ.UInt32> (MULLU x y))
+ for {
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Mul32uover x y))
+ // result: (SETO (Select1 <types.TypeFlags> (MULLU x y)))
+ for {
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(Op386SETO)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpSignmask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Signmask x)
+ // result: (SARLconst x [31])
+ for {
+ x := v_0
+ v.reset(Op386SARLconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SARLconst (NEGL <t> x) [31])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(Op386SARLconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, Op386NEGL, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && t.IsFloat()
+ // result: (MOVSDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && t.IsFloat()) {
+ break
+ }
+ v.reset(Op386MOVSDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && t.IsFloat()
+ // result: (MOVSSstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && t.IsFloat()) {
+ break
+ }
+ v.reset(Op386MOVSSstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !t.IsFloat()
+ // result: (MOVLstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !t.IsFloat()) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] destptr mem)
+ // result: (MOVBstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [2] destptr mem)
+ // result: (MOVWstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [4] destptr mem)
+ // result: (MOVLstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [3] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2))
+ v0 := b.NewValue0(v.Pos, Op386MOVWstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [5] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [6] destptr mem)
+ // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [7] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%4 != 0 && s > 4
+ // result: (Zero [s-s%4] (ADDLconst destptr [int32(s%4)]) (MOVLstoreconst [0] destptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%4 != 0 && s > 4) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(s - s%4)
+ v0 := b.NewValue0(v.Pos, Op386ADDLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(s % 4))
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(0)
+ v1.AddArg2(destptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [8] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [12] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVLstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v1.AddArg2(destptr, mem)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [16] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff(0,12)] destptr (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVLstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 12))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+ v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v2 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v2.AddArg2(destptr, mem)
+ v1.AddArg2(destptr, v2)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s > 16 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [1*(128-s/4)] destptr (MOVLconst [0]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s > 16 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(Op386DUFFZERO)
+ v.AuxInt = int64ToAuxInt(1 * (128 - s/4))
+ v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: (s > 4*128 || (config.noDuffDevice && s > 16)) && s%4 == 0
+ // result: (REPSTOSL destptr (MOVLconst [int32(s/4)]) (MOVLconst [0]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !((s > 4*128 || (config.noDuffDevice && s > 16)) && s%4 == 0) {
+ break
+ }
+ v.reset(Op386REPSTOSL)
+ v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(s / 4))
+ v1 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg4(destptr, v0, v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpZeromask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Zeromask <t> x)
+ // result: (XORLconst [-1] (SBBLcarrymask <t> (CMPLconst x [1])))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(Op386XORLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ v0 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v1 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteBlock386(b *Block) bool {
+ switch b.Kind {
+ case Block386EQ:
+ // match: (EQ (InvertFlags cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386EQ, cmp)
+ return true
+ }
+ // match: (EQ (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case Block386GE:
+ // match: (GE (InvertFlags cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386LE, cmp)
+ return true
+ }
+ // match: (GE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case Block386GT:
+ // match: (GT (InvertFlags cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386LT, cmp)
+ return true
+ }
+ // match: (GT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GT (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockIf:
+ // match: (If (SETL cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == Op386SETL {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386LT, cmp)
+ return true
+ }
+ // match: (If (SETLE cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == Op386SETLE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386LE, cmp)
+ return true
+ }
+ // match: (If (SETG cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == Op386SETG {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386GT, cmp)
+ return true
+ }
+ // match: (If (SETGE cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == Op386SETGE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386GE, cmp)
+ return true
+ }
+ // match: (If (SETEQ cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == Op386SETEQ {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386EQ, cmp)
+ return true
+ }
+ // match: (If (SETNE cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == Op386SETNE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386NE, cmp)
+ return true
+ }
+ // match: (If (SETB cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == Op386SETB {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386ULT, cmp)
+ return true
+ }
+ // match: (If (SETBE cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == Op386SETBE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386ULE, cmp)
+ return true
+ }
+ // match: (If (SETA cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == Op386SETA {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGT, cmp)
+ return true
+ }
+ // match: (If (SETAE cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == Op386SETAE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGE, cmp)
+ return true
+ }
+ // match: (If (SETO cmp) yes no)
+ // result: (OS cmp yes no)
+ for b.Controls[0].Op == Op386SETO {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386OS, cmp)
+ return true
+ }
+ // match: (If (SETGF cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == Op386SETGF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGT, cmp)
+ return true
+ }
+ // match: (If (SETGEF cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == Op386SETGEF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGE, cmp)
+ return true
+ }
+ // match: (If (SETEQF cmp) yes no)
+ // result: (EQF cmp yes no)
+ for b.Controls[0].Op == Op386SETEQF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386EQF, cmp)
+ return true
+ }
+ // match: (If (SETNEF cmp) yes no)
+ // result: (NEF cmp yes no)
+ for b.Controls[0].Op == Op386SETNEF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386NEF, cmp)
+ return true
+ }
+ // match: (If cond yes no)
+ // result: (NE (TESTB cond cond) yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, Op386TESTB, types.TypeFlags)
+ v0.AddArg2(cond, cond)
+ b.resetWithControl(Block386NE, v0)
+ return true
+ }
+ case Block386LE:
+ // match: (LE (InvertFlags cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386GE, cmp)
+ return true
+ }
+ // match: (LE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LE (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case Block386LT:
+ // match: (LT (InvertFlags cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386GT, cmp)
+ return true
+ }
+ // match: (LT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case Block386NE:
+ // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETL {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETL || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386LT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETLE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETLE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386LE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETG {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETG || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386GT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETGE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETGE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386GE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETEQ {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETEQ || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386EQ, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETNE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETNE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386NE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETB {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETB || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386ULT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETBE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETBE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386ULE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETA {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETA || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386UGT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETAE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETAE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386UGE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
+ // result: (OS cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETO {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETO || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386OS, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETGF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETGF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386UGT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETGEF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETGEF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386UGE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
+ // result: (EQF cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETEQF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETEQF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386EQF, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
+ // result: (NEF cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETNEF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETNEF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386NEF, cmp)
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386NE, cmp)
+ return true
+ }
+ // match: (NE (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case Block386UGE:
+ // match: (UGE (InvertFlags cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386ULE, cmp)
+ return true
+ }
+ // match: (UGE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case Block386UGT:
+ // match: (UGT (InvertFlags cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386ULT, cmp)
+ return true
+ }
+ // match: (UGT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGT (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case Block386ULE:
+ // match: (ULE (InvertFlags cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGE, cmp)
+ return true
+ }
+ // match: (ULE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case Block386ULT:
+ // match: (ULT (InvertFlags cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGT, cmp)
+ return true
+ }
+ // match: (ULT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewrite386splitload.go b/src/cmd/compile/internal/ssa/rewrite386splitload.go
new file mode 100644
index 0000000..a8bd6aa
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewrite386splitload.go
@@ -0,0 +1,159 @@
+// Code generated from _gen/386splitload.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+func rewriteValue386splitload(v *Value) bool {
+ switch v.Op {
+ case Op386CMPBconstload:
+ return rewriteValue386splitload_Op386CMPBconstload(v)
+ case Op386CMPBload:
+ return rewriteValue386splitload_Op386CMPBload(v)
+ case Op386CMPLconstload:
+ return rewriteValue386splitload_Op386CMPLconstload(v)
+ case Op386CMPLload:
+ return rewriteValue386splitload_Op386CMPLload(v)
+ case Op386CMPWconstload:
+ return rewriteValue386splitload_Op386CMPWconstload(v)
+ case Op386CMPWload:
+ return rewriteValue386splitload_Op386CMPWload(v)
+ }
+ return false
+}
+func rewriteValue386splitload_Op386CMPBconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBconstload {sym} [vo] ptr mem)
+ // result: (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ v.reset(Op386CMPBconst)
+ v.AuxInt = int8ToAuxInt(vo.Val8())
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386splitload_Op386CMPBload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBload {sym} [off] ptr x mem)
+ // result: (CMPB (MOVBload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(Op386CMPB)
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValue386splitload_Op386CMPLconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLconstload {sym} [vo] ptr mem)
+ // result: (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ v.reset(Op386CMPLconst)
+ v.AuxInt = int32ToAuxInt(vo.Val())
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386splitload_Op386CMPLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLload {sym} [off] ptr x mem)
+ // result: (CMPL (MOVLload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(Op386CMPL)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValue386splitload_Op386CMPWconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWconstload {sym} [vo] ptr mem)
+ // result: (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ v.reset(Op386CMPWconst)
+ v.AuxInt = int16ToAuxInt(vo.Val16())
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386splitload_Op386CMPWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWload {sym} [off] ptr x mem)
+ // result: (CMPW (MOVWload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(Op386CMPW)
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteBlock386splitload(b *Block) bool {
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
new file mode 100644
index 0000000..5332512
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -0,0 +1,31785 @@
+// Code generated from _gen/AMD64.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import "internal/buildcfg"
+import "math"
+import "cmd/internal/obj"
+import "cmd/compile/internal/types"
+
+func rewriteValueAMD64(v *Value) bool {
+ switch v.Op {
+ case OpAMD64ADCQ:
+ return rewriteValueAMD64_OpAMD64ADCQ(v)
+ case OpAMD64ADCQconst:
+ return rewriteValueAMD64_OpAMD64ADCQconst(v)
+ case OpAMD64ADDL:
+ return rewriteValueAMD64_OpAMD64ADDL(v)
+ case OpAMD64ADDLconst:
+ return rewriteValueAMD64_OpAMD64ADDLconst(v)
+ case OpAMD64ADDLconstmodify:
+ return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
+ case OpAMD64ADDLload:
+ return rewriteValueAMD64_OpAMD64ADDLload(v)
+ case OpAMD64ADDLmodify:
+ return rewriteValueAMD64_OpAMD64ADDLmodify(v)
+ case OpAMD64ADDQ:
+ return rewriteValueAMD64_OpAMD64ADDQ(v)
+ case OpAMD64ADDQcarry:
+ return rewriteValueAMD64_OpAMD64ADDQcarry(v)
+ case OpAMD64ADDQconst:
+ return rewriteValueAMD64_OpAMD64ADDQconst(v)
+ case OpAMD64ADDQconstmodify:
+ return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
+ case OpAMD64ADDQload:
+ return rewriteValueAMD64_OpAMD64ADDQload(v)
+ case OpAMD64ADDQmodify:
+ return rewriteValueAMD64_OpAMD64ADDQmodify(v)
+ case OpAMD64ADDSD:
+ return rewriteValueAMD64_OpAMD64ADDSD(v)
+ case OpAMD64ADDSDload:
+ return rewriteValueAMD64_OpAMD64ADDSDload(v)
+ case OpAMD64ADDSS:
+ return rewriteValueAMD64_OpAMD64ADDSS(v)
+ case OpAMD64ADDSSload:
+ return rewriteValueAMD64_OpAMD64ADDSSload(v)
+ case OpAMD64ANDL:
+ return rewriteValueAMD64_OpAMD64ANDL(v)
+ case OpAMD64ANDLconst:
+ return rewriteValueAMD64_OpAMD64ANDLconst(v)
+ case OpAMD64ANDLconstmodify:
+ return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
+ case OpAMD64ANDLload:
+ return rewriteValueAMD64_OpAMD64ANDLload(v)
+ case OpAMD64ANDLmodify:
+ return rewriteValueAMD64_OpAMD64ANDLmodify(v)
+ case OpAMD64ANDNL:
+ return rewriteValueAMD64_OpAMD64ANDNL(v)
+ case OpAMD64ANDNQ:
+ return rewriteValueAMD64_OpAMD64ANDNQ(v)
+ case OpAMD64ANDQ:
+ return rewriteValueAMD64_OpAMD64ANDQ(v)
+ case OpAMD64ANDQconst:
+ return rewriteValueAMD64_OpAMD64ANDQconst(v)
+ case OpAMD64ANDQconstmodify:
+ return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
+ case OpAMD64ANDQload:
+ return rewriteValueAMD64_OpAMD64ANDQload(v)
+ case OpAMD64ANDQmodify:
+ return rewriteValueAMD64_OpAMD64ANDQmodify(v)
+ case OpAMD64BSFQ:
+ return rewriteValueAMD64_OpAMD64BSFQ(v)
+ case OpAMD64BSWAPL:
+ return rewriteValueAMD64_OpAMD64BSWAPL(v)
+ case OpAMD64BSWAPQ:
+ return rewriteValueAMD64_OpAMD64BSWAPQ(v)
+ case OpAMD64BTCQconst:
+ return rewriteValueAMD64_OpAMD64BTCQconst(v)
+ case OpAMD64BTLconst:
+ return rewriteValueAMD64_OpAMD64BTLconst(v)
+ case OpAMD64BTQconst:
+ return rewriteValueAMD64_OpAMD64BTQconst(v)
+ case OpAMD64BTRQconst:
+ return rewriteValueAMD64_OpAMD64BTRQconst(v)
+ case OpAMD64BTSQconst:
+ return rewriteValueAMD64_OpAMD64BTSQconst(v)
+ case OpAMD64CMOVLCC:
+ return rewriteValueAMD64_OpAMD64CMOVLCC(v)
+ case OpAMD64CMOVLCS:
+ return rewriteValueAMD64_OpAMD64CMOVLCS(v)
+ case OpAMD64CMOVLEQ:
+ return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
+ case OpAMD64CMOVLGE:
+ return rewriteValueAMD64_OpAMD64CMOVLGE(v)
+ case OpAMD64CMOVLGT:
+ return rewriteValueAMD64_OpAMD64CMOVLGT(v)
+ case OpAMD64CMOVLHI:
+ return rewriteValueAMD64_OpAMD64CMOVLHI(v)
+ case OpAMD64CMOVLLE:
+ return rewriteValueAMD64_OpAMD64CMOVLLE(v)
+ case OpAMD64CMOVLLS:
+ return rewriteValueAMD64_OpAMD64CMOVLLS(v)
+ case OpAMD64CMOVLLT:
+ return rewriteValueAMD64_OpAMD64CMOVLLT(v)
+ case OpAMD64CMOVLNE:
+ return rewriteValueAMD64_OpAMD64CMOVLNE(v)
+ case OpAMD64CMOVQCC:
+ return rewriteValueAMD64_OpAMD64CMOVQCC(v)
+ case OpAMD64CMOVQCS:
+ return rewriteValueAMD64_OpAMD64CMOVQCS(v)
+ case OpAMD64CMOVQEQ:
+ return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
+ case OpAMD64CMOVQGE:
+ return rewriteValueAMD64_OpAMD64CMOVQGE(v)
+ case OpAMD64CMOVQGT:
+ return rewriteValueAMD64_OpAMD64CMOVQGT(v)
+ case OpAMD64CMOVQHI:
+ return rewriteValueAMD64_OpAMD64CMOVQHI(v)
+ case OpAMD64CMOVQLE:
+ return rewriteValueAMD64_OpAMD64CMOVQLE(v)
+ case OpAMD64CMOVQLS:
+ return rewriteValueAMD64_OpAMD64CMOVQLS(v)
+ case OpAMD64CMOVQLT:
+ return rewriteValueAMD64_OpAMD64CMOVQLT(v)
+ case OpAMD64CMOVQNE:
+ return rewriteValueAMD64_OpAMD64CMOVQNE(v)
+ case OpAMD64CMOVWCC:
+ return rewriteValueAMD64_OpAMD64CMOVWCC(v)
+ case OpAMD64CMOVWCS:
+ return rewriteValueAMD64_OpAMD64CMOVWCS(v)
+ case OpAMD64CMOVWEQ:
+ return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
+ case OpAMD64CMOVWGE:
+ return rewriteValueAMD64_OpAMD64CMOVWGE(v)
+ case OpAMD64CMOVWGT:
+ return rewriteValueAMD64_OpAMD64CMOVWGT(v)
+ case OpAMD64CMOVWHI:
+ return rewriteValueAMD64_OpAMD64CMOVWHI(v)
+ case OpAMD64CMOVWLE:
+ return rewriteValueAMD64_OpAMD64CMOVWLE(v)
+ case OpAMD64CMOVWLS:
+ return rewriteValueAMD64_OpAMD64CMOVWLS(v)
+ case OpAMD64CMOVWLT:
+ return rewriteValueAMD64_OpAMD64CMOVWLT(v)
+ case OpAMD64CMOVWNE:
+ return rewriteValueAMD64_OpAMD64CMOVWNE(v)
+ case OpAMD64CMPB:
+ return rewriteValueAMD64_OpAMD64CMPB(v)
+ case OpAMD64CMPBconst:
+ return rewriteValueAMD64_OpAMD64CMPBconst(v)
+ case OpAMD64CMPBconstload:
+ return rewriteValueAMD64_OpAMD64CMPBconstload(v)
+ case OpAMD64CMPBload:
+ return rewriteValueAMD64_OpAMD64CMPBload(v)
+ case OpAMD64CMPL:
+ return rewriteValueAMD64_OpAMD64CMPL(v)
+ case OpAMD64CMPLconst:
+ return rewriteValueAMD64_OpAMD64CMPLconst(v)
+ case OpAMD64CMPLconstload:
+ return rewriteValueAMD64_OpAMD64CMPLconstload(v)
+ case OpAMD64CMPLload:
+ return rewriteValueAMD64_OpAMD64CMPLload(v)
+ case OpAMD64CMPQ:
+ return rewriteValueAMD64_OpAMD64CMPQ(v)
+ case OpAMD64CMPQconst:
+ return rewriteValueAMD64_OpAMD64CMPQconst(v)
+ case OpAMD64CMPQconstload:
+ return rewriteValueAMD64_OpAMD64CMPQconstload(v)
+ case OpAMD64CMPQload:
+ return rewriteValueAMD64_OpAMD64CMPQload(v)
+ case OpAMD64CMPW:
+ return rewriteValueAMD64_OpAMD64CMPW(v)
+ case OpAMD64CMPWconst:
+ return rewriteValueAMD64_OpAMD64CMPWconst(v)
+ case OpAMD64CMPWconstload:
+ return rewriteValueAMD64_OpAMD64CMPWconstload(v)
+ case OpAMD64CMPWload:
+ return rewriteValueAMD64_OpAMD64CMPWload(v)
+ case OpAMD64CMPXCHGLlock:
+ return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
+ case OpAMD64CMPXCHGQlock:
+ return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
+ case OpAMD64DIVSD:
+ return rewriteValueAMD64_OpAMD64DIVSD(v)
+ case OpAMD64DIVSDload:
+ return rewriteValueAMD64_OpAMD64DIVSDload(v)
+ case OpAMD64DIVSS:
+ return rewriteValueAMD64_OpAMD64DIVSS(v)
+ case OpAMD64DIVSSload:
+ return rewriteValueAMD64_OpAMD64DIVSSload(v)
+ case OpAMD64HMULL:
+ return rewriteValueAMD64_OpAMD64HMULL(v)
+ case OpAMD64HMULLU:
+ return rewriteValueAMD64_OpAMD64HMULLU(v)
+ case OpAMD64HMULQ:
+ return rewriteValueAMD64_OpAMD64HMULQ(v)
+ case OpAMD64HMULQU:
+ return rewriteValueAMD64_OpAMD64HMULQU(v)
+ case OpAMD64LEAL:
+ return rewriteValueAMD64_OpAMD64LEAL(v)
+ case OpAMD64LEAL1:
+ return rewriteValueAMD64_OpAMD64LEAL1(v)
+ case OpAMD64LEAL2:
+ return rewriteValueAMD64_OpAMD64LEAL2(v)
+ case OpAMD64LEAL4:
+ return rewriteValueAMD64_OpAMD64LEAL4(v)
+ case OpAMD64LEAL8:
+ return rewriteValueAMD64_OpAMD64LEAL8(v)
+ case OpAMD64LEAQ:
+ return rewriteValueAMD64_OpAMD64LEAQ(v)
+ case OpAMD64LEAQ1:
+ return rewriteValueAMD64_OpAMD64LEAQ1(v)
+ case OpAMD64LEAQ2:
+ return rewriteValueAMD64_OpAMD64LEAQ2(v)
+ case OpAMD64LEAQ4:
+ return rewriteValueAMD64_OpAMD64LEAQ4(v)
+ case OpAMD64LEAQ8:
+ return rewriteValueAMD64_OpAMD64LEAQ8(v)
+ case OpAMD64MOVBELstore:
+ return rewriteValueAMD64_OpAMD64MOVBELstore(v)
+ case OpAMD64MOVBEQstore:
+ return rewriteValueAMD64_OpAMD64MOVBEQstore(v)
+ case OpAMD64MOVBEWstore:
+ return rewriteValueAMD64_OpAMD64MOVBEWstore(v)
+ case OpAMD64MOVBQSX:
+ return rewriteValueAMD64_OpAMD64MOVBQSX(v)
+ case OpAMD64MOVBQSXload:
+ return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
+ case OpAMD64MOVBQZX:
+ return rewriteValueAMD64_OpAMD64MOVBQZX(v)
+ case OpAMD64MOVBatomicload:
+ return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
+ case OpAMD64MOVBload:
+ return rewriteValueAMD64_OpAMD64MOVBload(v)
+ case OpAMD64MOVBstore:
+ return rewriteValueAMD64_OpAMD64MOVBstore(v)
+ case OpAMD64MOVBstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
+ case OpAMD64MOVLQSX:
+ return rewriteValueAMD64_OpAMD64MOVLQSX(v)
+ case OpAMD64MOVLQSXload:
+ return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
+ case OpAMD64MOVLQZX:
+ return rewriteValueAMD64_OpAMD64MOVLQZX(v)
+ case OpAMD64MOVLatomicload:
+ return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
+ case OpAMD64MOVLf2i:
+ return rewriteValueAMD64_OpAMD64MOVLf2i(v)
+ case OpAMD64MOVLi2f:
+ return rewriteValueAMD64_OpAMD64MOVLi2f(v)
+ case OpAMD64MOVLload:
+ return rewriteValueAMD64_OpAMD64MOVLload(v)
+ case OpAMD64MOVLstore:
+ return rewriteValueAMD64_OpAMD64MOVLstore(v)
+ case OpAMD64MOVLstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
+ case OpAMD64MOVOload:
+ return rewriteValueAMD64_OpAMD64MOVOload(v)
+ case OpAMD64MOVOstore:
+ return rewriteValueAMD64_OpAMD64MOVOstore(v)
+ case OpAMD64MOVOstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVOstoreconst(v)
+ case OpAMD64MOVQatomicload:
+ return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
+ case OpAMD64MOVQf2i:
+ return rewriteValueAMD64_OpAMD64MOVQf2i(v)
+ case OpAMD64MOVQi2f:
+ return rewriteValueAMD64_OpAMD64MOVQi2f(v)
+ case OpAMD64MOVQload:
+ return rewriteValueAMD64_OpAMD64MOVQload(v)
+ case OpAMD64MOVQstore:
+ return rewriteValueAMD64_OpAMD64MOVQstore(v)
+ case OpAMD64MOVQstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
+ case OpAMD64MOVSDload:
+ return rewriteValueAMD64_OpAMD64MOVSDload(v)
+ case OpAMD64MOVSDstore:
+ return rewriteValueAMD64_OpAMD64MOVSDstore(v)
+ case OpAMD64MOVSSload:
+ return rewriteValueAMD64_OpAMD64MOVSSload(v)
+ case OpAMD64MOVSSstore:
+ return rewriteValueAMD64_OpAMD64MOVSSstore(v)
+ case OpAMD64MOVWQSX:
+ return rewriteValueAMD64_OpAMD64MOVWQSX(v)
+ case OpAMD64MOVWQSXload:
+ return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
+ case OpAMD64MOVWQZX:
+ return rewriteValueAMD64_OpAMD64MOVWQZX(v)
+ case OpAMD64MOVWload:
+ return rewriteValueAMD64_OpAMD64MOVWload(v)
+ case OpAMD64MOVWstore:
+ return rewriteValueAMD64_OpAMD64MOVWstore(v)
+ case OpAMD64MOVWstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
+ case OpAMD64MULL:
+ return rewriteValueAMD64_OpAMD64MULL(v)
+ case OpAMD64MULLconst:
+ return rewriteValueAMD64_OpAMD64MULLconst(v)
+ case OpAMD64MULQ:
+ return rewriteValueAMD64_OpAMD64MULQ(v)
+ case OpAMD64MULQconst:
+ return rewriteValueAMD64_OpAMD64MULQconst(v)
+ case OpAMD64MULSD:
+ return rewriteValueAMD64_OpAMD64MULSD(v)
+ case OpAMD64MULSDload:
+ return rewriteValueAMD64_OpAMD64MULSDload(v)
+ case OpAMD64MULSS:
+ return rewriteValueAMD64_OpAMD64MULSS(v)
+ case OpAMD64MULSSload:
+ return rewriteValueAMD64_OpAMD64MULSSload(v)
+ case OpAMD64NEGL:
+ return rewriteValueAMD64_OpAMD64NEGL(v)
+ case OpAMD64NEGQ:
+ return rewriteValueAMD64_OpAMD64NEGQ(v)
+ case OpAMD64NOTL:
+ return rewriteValueAMD64_OpAMD64NOTL(v)
+ case OpAMD64NOTQ:
+ return rewriteValueAMD64_OpAMD64NOTQ(v)
+ case OpAMD64ORL:
+ return rewriteValueAMD64_OpAMD64ORL(v)
+ case OpAMD64ORLconst:
+ return rewriteValueAMD64_OpAMD64ORLconst(v)
+ case OpAMD64ORLconstmodify:
+ return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
+ case OpAMD64ORLload:
+ return rewriteValueAMD64_OpAMD64ORLload(v)
+ case OpAMD64ORLmodify:
+ return rewriteValueAMD64_OpAMD64ORLmodify(v)
+ case OpAMD64ORQ:
+ return rewriteValueAMD64_OpAMD64ORQ(v)
+ case OpAMD64ORQconst:
+ return rewriteValueAMD64_OpAMD64ORQconst(v)
+ case OpAMD64ORQconstmodify:
+ return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
+ case OpAMD64ORQload:
+ return rewriteValueAMD64_OpAMD64ORQload(v)
+ case OpAMD64ORQmodify:
+ return rewriteValueAMD64_OpAMD64ORQmodify(v)
+ case OpAMD64ROLB:
+ return rewriteValueAMD64_OpAMD64ROLB(v)
+ case OpAMD64ROLBconst:
+ return rewriteValueAMD64_OpAMD64ROLBconst(v)
+ case OpAMD64ROLL:
+ return rewriteValueAMD64_OpAMD64ROLL(v)
+ case OpAMD64ROLLconst:
+ return rewriteValueAMD64_OpAMD64ROLLconst(v)
+ case OpAMD64ROLQ:
+ return rewriteValueAMD64_OpAMD64ROLQ(v)
+ case OpAMD64ROLQconst:
+ return rewriteValueAMD64_OpAMD64ROLQconst(v)
+ case OpAMD64ROLW:
+ return rewriteValueAMD64_OpAMD64ROLW(v)
+ case OpAMD64ROLWconst:
+ return rewriteValueAMD64_OpAMD64ROLWconst(v)
+ case OpAMD64RORB:
+ return rewriteValueAMD64_OpAMD64RORB(v)
+ case OpAMD64RORL:
+ return rewriteValueAMD64_OpAMD64RORL(v)
+ case OpAMD64RORQ:
+ return rewriteValueAMD64_OpAMD64RORQ(v)
+ case OpAMD64RORW:
+ return rewriteValueAMD64_OpAMD64RORW(v)
+ case OpAMD64SARB:
+ return rewriteValueAMD64_OpAMD64SARB(v)
+ case OpAMD64SARBconst:
+ return rewriteValueAMD64_OpAMD64SARBconst(v)
+ case OpAMD64SARL:
+ return rewriteValueAMD64_OpAMD64SARL(v)
+ case OpAMD64SARLconst:
+ return rewriteValueAMD64_OpAMD64SARLconst(v)
+ case OpAMD64SARQ:
+ return rewriteValueAMD64_OpAMD64SARQ(v)
+ case OpAMD64SARQconst:
+ return rewriteValueAMD64_OpAMD64SARQconst(v)
+ case OpAMD64SARW:
+ return rewriteValueAMD64_OpAMD64SARW(v)
+ case OpAMD64SARWconst:
+ return rewriteValueAMD64_OpAMD64SARWconst(v)
+ case OpAMD64SARXLload:
+ return rewriteValueAMD64_OpAMD64SARXLload(v)
+ case OpAMD64SARXQload:
+ return rewriteValueAMD64_OpAMD64SARXQload(v)
+ case OpAMD64SBBLcarrymask:
+ return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
+ case OpAMD64SBBQ:
+ return rewriteValueAMD64_OpAMD64SBBQ(v)
+ case OpAMD64SBBQcarrymask:
+ return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
+ case OpAMD64SBBQconst:
+ return rewriteValueAMD64_OpAMD64SBBQconst(v)
+ case OpAMD64SETA:
+ return rewriteValueAMD64_OpAMD64SETA(v)
+ case OpAMD64SETAE:
+ return rewriteValueAMD64_OpAMD64SETAE(v)
+ case OpAMD64SETAEstore:
+ return rewriteValueAMD64_OpAMD64SETAEstore(v)
+ case OpAMD64SETAstore:
+ return rewriteValueAMD64_OpAMD64SETAstore(v)
+ case OpAMD64SETB:
+ return rewriteValueAMD64_OpAMD64SETB(v)
+ case OpAMD64SETBE:
+ return rewriteValueAMD64_OpAMD64SETBE(v)
+ case OpAMD64SETBEstore:
+ return rewriteValueAMD64_OpAMD64SETBEstore(v)
+ case OpAMD64SETBstore:
+ return rewriteValueAMD64_OpAMD64SETBstore(v)
+ case OpAMD64SETEQ:
+ return rewriteValueAMD64_OpAMD64SETEQ(v)
+ case OpAMD64SETEQstore:
+ return rewriteValueAMD64_OpAMD64SETEQstore(v)
+ case OpAMD64SETG:
+ return rewriteValueAMD64_OpAMD64SETG(v)
+ case OpAMD64SETGE:
+ return rewriteValueAMD64_OpAMD64SETGE(v)
+ case OpAMD64SETGEstore:
+ return rewriteValueAMD64_OpAMD64SETGEstore(v)
+ case OpAMD64SETGstore:
+ return rewriteValueAMD64_OpAMD64SETGstore(v)
+ case OpAMD64SETL:
+ return rewriteValueAMD64_OpAMD64SETL(v)
+ case OpAMD64SETLE:
+ return rewriteValueAMD64_OpAMD64SETLE(v)
+ case OpAMD64SETLEstore:
+ return rewriteValueAMD64_OpAMD64SETLEstore(v)
+ case OpAMD64SETLstore:
+ return rewriteValueAMD64_OpAMD64SETLstore(v)
+ case OpAMD64SETNE:
+ return rewriteValueAMD64_OpAMD64SETNE(v)
+ case OpAMD64SETNEstore:
+ return rewriteValueAMD64_OpAMD64SETNEstore(v)
+ case OpAMD64SHLL:
+ return rewriteValueAMD64_OpAMD64SHLL(v)
+ case OpAMD64SHLLconst:
+ return rewriteValueAMD64_OpAMD64SHLLconst(v)
+ case OpAMD64SHLQ:
+ return rewriteValueAMD64_OpAMD64SHLQ(v)
+ case OpAMD64SHLQconst:
+ return rewriteValueAMD64_OpAMD64SHLQconst(v)
+ case OpAMD64SHLXLload:
+ return rewriteValueAMD64_OpAMD64SHLXLload(v)
+ case OpAMD64SHLXQload:
+ return rewriteValueAMD64_OpAMD64SHLXQload(v)
+ case OpAMD64SHRB:
+ return rewriteValueAMD64_OpAMD64SHRB(v)
+ case OpAMD64SHRBconst:
+ return rewriteValueAMD64_OpAMD64SHRBconst(v)
+ case OpAMD64SHRL:
+ return rewriteValueAMD64_OpAMD64SHRL(v)
+ case OpAMD64SHRLconst:
+ return rewriteValueAMD64_OpAMD64SHRLconst(v)
+ case OpAMD64SHRQ:
+ return rewriteValueAMD64_OpAMD64SHRQ(v)
+ case OpAMD64SHRQconst:
+ return rewriteValueAMD64_OpAMD64SHRQconst(v)
+ case OpAMD64SHRW:
+ return rewriteValueAMD64_OpAMD64SHRW(v)
+ case OpAMD64SHRWconst:
+ return rewriteValueAMD64_OpAMD64SHRWconst(v)
+ case OpAMD64SHRXLload:
+ return rewriteValueAMD64_OpAMD64SHRXLload(v)
+ case OpAMD64SHRXQload:
+ return rewriteValueAMD64_OpAMD64SHRXQload(v)
+ case OpAMD64SUBL:
+ return rewriteValueAMD64_OpAMD64SUBL(v)
+ case OpAMD64SUBLconst:
+ return rewriteValueAMD64_OpAMD64SUBLconst(v)
+ case OpAMD64SUBLload:
+ return rewriteValueAMD64_OpAMD64SUBLload(v)
+ case OpAMD64SUBLmodify:
+ return rewriteValueAMD64_OpAMD64SUBLmodify(v)
+ case OpAMD64SUBQ:
+ return rewriteValueAMD64_OpAMD64SUBQ(v)
+ case OpAMD64SUBQborrow:
+ return rewriteValueAMD64_OpAMD64SUBQborrow(v)
+ case OpAMD64SUBQconst:
+ return rewriteValueAMD64_OpAMD64SUBQconst(v)
+ case OpAMD64SUBQload:
+ return rewriteValueAMD64_OpAMD64SUBQload(v)
+ case OpAMD64SUBQmodify:
+ return rewriteValueAMD64_OpAMD64SUBQmodify(v)
+ case OpAMD64SUBSD:
+ return rewriteValueAMD64_OpAMD64SUBSD(v)
+ case OpAMD64SUBSDload:
+ return rewriteValueAMD64_OpAMD64SUBSDload(v)
+ case OpAMD64SUBSS:
+ return rewriteValueAMD64_OpAMD64SUBSS(v)
+ case OpAMD64SUBSSload:
+ return rewriteValueAMD64_OpAMD64SUBSSload(v)
+ case OpAMD64TESTB:
+ return rewriteValueAMD64_OpAMD64TESTB(v)
+ case OpAMD64TESTBconst:
+ return rewriteValueAMD64_OpAMD64TESTBconst(v)
+ case OpAMD64TESTL:
+ return rewriteValueAMD64_OpAMD64TESTL(v)
+ case OpAMD64TESTLconst:
+ return rewriteValueAMD64_OpAMD64TESTLconst(v)
+ case OpAMD64TESTQ:
+ return rewriteValueAMD64_OpAMD64TESTQ(v)
+ case OpAMD64TESTQconst:
+ return rewriteValueAMD64_OpAMD64TESTQconst(v)
+ case OpAMD64TESTW:
+ return rewriteValueAMD64_OpAMD64TESTW(v)
+ case OpAMD64TESTWconst:
+ return rewriteValueAMD64_OpAMD64TESTWconst(v)
+ case OpAMD64XADDLlock:
+ return rewriteValueAMD64_OpAMD64XADDLlock(v)
+ case OpAMD64XADDQlock:
+ return rewriteValueAMD64_OpAMD64XADDQlock(v)
+ case OpAMD64XCHGL:
+ return rewriteValueAMD64_OpAMD64XCHGL(v)
+ case OpAMD64XCHGQ:
+ return rewriteValueAMD64_OpAMD64XCHGQ(v)
+ case OpAMD64XORL:
+ return rewriteValueAMD64_OpAMD64XORL(v)
+ case OpAMD64XORLconst:
+ return rewriteValueAMD64_OpAMD64XORLconst(v)
+ case OpAMD64XORLconstmodify:
+ return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
+ case OpAMD64XORLload:
+ return rewriteValueAMD64_OpAMD64XORLload(v)
+ case OpAMD64XORLmodify:
+ return rewriteValueAMD64_OpAMD64XORLmodify(v)
+ case OpAMD64XORQ:
+ return rewriteValueAMD64_OpAMD64XORQ(v)
+ case OpAMD64XORQconst:
+ return rewriteValueAMD64_OpAMD64XORQconst(v)
+ case OpAMD64XORQconstmodify:
+ return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
+ case OpAMD64XORQload:
+ return rewriteValueAMD64_OpAMD64XORQload(v)
+ case OpAMD64XORQmodify:
+ return rewriteValueAMD64_OpAMD64XORQmodify(v)
+ case OpAdd16:
+ v.Op = OpAMD64ADDL
+ return true
+ case OpAdd32:
+ v.Op = OpAMD64ADDL
+ return true
+ case OpAdd32F:
+ v.Op = OpAMD64ADDSS
+ return true
+ case OpAdd64:
+ v.Op = OpAMD64ADDQ
+ return true
+ case OpAdd64F:
+ v.Op = OpAMD64ADDSD
+ return true
+ case OpAdd8:
+ v.Op = OpAMD64ADDL
+ return true
+ case OpAddPtr:
+ v.Op = OpAMD64ADDQ
+ return true
+ case OpAddr:
+ return rewriteValueAMD64_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpAMD64ANDL
+ return true
+ case OpAnd32:
+ v.Op = OpAMD64ANDL
+ return true
+ case OpAnd64:
+ v.Op = OpAMD64ANDQ
+ return true
+ case OpAnd8:
+ v.Op = OpAMD64ANDL
+ return true
+ case OpAndB:
+ v.Op = OpAMD64ANDL
+ return true
+ case OpAtomicAdd32:
+ return rewriteValueAMD64_OpAtomicAdd32(v)
+ case OpAtomicAdd64:
+ return rewriteValueAMD64_OpAtomicAdd64(v)
+ case OpAtomicAnd32:
+ return rewriteValueAMD64_OpAtomicAnd32(v)
+ case OpAtomicAnd8:
+ return rewriteValueAMD64_OpAtomicAnd8(v)
+ case OpAtomicCompareAndSwap32:
+ return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
+ case OpAtomicCompareAndSwap64:
+ return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
+ case OpAtomicExchange32:
+ return rewriteValueAMD64_OpAtomicExchange32(v)
+ case OpAtomicExchange64:
+ return rewriteValueAMD64_OpAtomicExchange64(v)
+ case OpAtomicLoad32:
+ return rewriteValueAMD64_OpAtomicLoad32(v)
+ case OpAtomicLoad64:
+ return rewriteValueAMD64_OpAtomicLoad64(v)
+ case OpAtomicLoad8:
+ return rewriteValueAMD64_OpAtomicLoad8(v)
+ case OpAtomicLoadPtr:
+ return rewriteValueAMD64_OpAtomicLoadPtr(v)
+ case OpAtomicOr32:
+ return rewriteValueAMD64_OpAtomicOr32(v)
+ case OpAtomicOr8:
+ return rewriteValueAMD64_OpAtomicOr8(v)
+ case OpAtomicStore32:
+ return rewriteValueAMD64_OpAtomicStore32(v)
+ case OpAtomicStore64:
+ return rewriteValueAMD64_OpAtomicStore64(v)
+ case OpAtomicStore8:
+ return rewriteValueAMD64_OpAtomicStore8(v)
+ case OpAtomicStorePtrNoWB:
+ return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
+ case OpAvg64u:
+ v.Op = OpAMD64AVGQU
+ return true
+ case OpBitLen16:
+ return rewriteValueAMD64_OpBitLen16(v)
+ case OpBitLen32:
+ return rewriteValueAMD64_OpBitLen32(v)
+ case OpBitLen64:
+ return rewriteValueAMD64_OpBitLen64(v)
+ case OpBitLen8:
+ return rewriteValueAMD64_OpBitLen8(v)
+ case OpBswap16:
+ return rewriteValueAMD64_OpBswap16(v)
+ case OpBswap32:
+ v.Op = OpAMD64BSWAPL
+ return true
+ case OpBswap64:
+ v.Op = OpAMD64BSWAPQ
+ return true
+ case OpCeil:
+ return rewriteValueAMD64_OpCeil(v)
+ case OpClosureCall:
+ v.Op = OpAMD64CALLclosure
+ return true
+ case OpCom16:
+ v.Op = OpAMD64NOTL
+ return true
+ case OpCom32:
+ v.Op = OpAMD64NOTL
+ return true
+ case OpCom64:
+ v.Op = OpAMD64NOTQ
+ return true
+ case OpCom8:
+ v.Op = OpAMD64NOTL
+ return true
+ case OpCondSelect:
+ return rewriteValueAMD64_OpCondSelect(v)
+ case OpConst16:
+ return rewriteValueAMD64_OpConst16(v)
+ case OpConst32:
+ v.Op = OpAMD64MOVLconst
+ return true
+ case OpConst32F:
+ v.Op = OpAMD64MOVSSconst
+ return true
+ case OpConst64:
+ v.Op = OpAMD64MOVQconst
+ return true
+ case OpConst64F:
+ v.Op = OpAMD64MOVSDconst
+ return true
+ case OpConst8:
+ return rewriteValueAMD64_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueAMD64_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueAMD64_OpConstNil(v)
+ case OpCtz16:
+ return rewriteValueAMD64_OpCtz16(v)
+ case OpCtz16NonZero:
+ return rewriteValueAMD64_OpCtz16NonZero(v)
+ case OpCtz32:
+ return rewriteValueAMD64_OpCtz32(v)
+ case OpCtz32NonZero:
+ return rewriteValueAMD64_OpCtz32NonZero(v)
+ case OpCtz64:
+ return rewriteValueAMD64_OpCtz64(v)
+ case OpCtz64NonZero:
+ return rewriteValueAMD64_OpCtz64NonZero(v)
+ case OpCtz8:
+ return rewriteValueAMD64_OpCtz8(v)
+ case OpCtz8NonZero:
+ return rewriteValueAMD64_OpCtz8NonZero(v)
+ case OpCvt32Fto32:
+ v.Op = OpAMD64CVTTSS2SL
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpAMD64CVTTSS2SQ
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpAMD64CVTSS2SD
+ return true
+ case OpCvt32to32F:
+ v.Op = OpAMD64CVTSL2SS
+ return true
+ case OpCvt32to64F:
+ v.Op = OpAMD64CVTSL2SD
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpAMD64CVTTSD2SL
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpAMD64CVTSD2SS
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpAMD64CVTTSD2SQ
+ return true
+ case OpCvt64to32F:
+ v.Op = OpAMD64CVTSQ2SS
+ return true
+ case OpCvt64to64F:
+ v.Op = OpAMD64CVTSQ2SD
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv128u:
+ v.Op = OpAMD64DIVQU2
+ return true
+ case OpDiv16:
+ return rewriteValueAMD64_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueAMD64_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueAMD64_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpAMD64DIVSS
+ return true
+ case OpDiv32u:
+ return rewriteValueAMD64_OpDiv32u(v)
+ case OpDiv64:
+ return rewriteValueAMD64_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpAMD64DIVSD
+ return true
+ case OpDiv64u:
+ return rewriteValueAMD64_OpDiv64u(v)
+ case OpDiv8:
+ return rewriteValueAMD64_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueAMD64_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueAMD64_OpEq16(v)
+ case OpEq32:
+ return rewriteValueAMD64_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueAMD64_OpEq32F(v)
+ case OpEq64:
+ return rewriteValueAMD64_OpEq64(v)
+ case OpEq64F:
+ return rewriteValueAMD64_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueAMD64_OpEq8(v)
+ case OpEqB:
+ return rewriteValueAMD64_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueAMD64_OpEqPtr(v)
+ case OpFMA:
+ return rewriteValueAMD64_OpFMA(v)
+ case OpFloor:
+ return rewriteValueAMD64_OpFloor(v)
+ case OpGetCallerPC:
+ v.Op = OpAMD64LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpAMD64LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpAMD64LoweredGetClosurePtr
+ return true
+ case OpGetG:
+ return rewriteValueAMD64_OpGetG(v)
+ case OpHasCPUFeature:
+ return rewriteValueAMD64_OpHasCPUFeature(v)
+ case OpHmul32:
+ v.Op = OpAMD64HMULL
+ return true
+ case OpHmul32u:
+ v.Op = OpAMD64HMULLU
+ return true
+ case OpHmul64:
+ v.Op = OpAMD64HMULQ
+ return true
+ case OpHmul64u:
+ v.Op = OpAMD64HMULQU
+ return true
+ case OpInterCall:
+ v.Op = OpAMD64CALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueAMD64_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueAMD64_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueAMD64_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueAMD64_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueAMD64_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueAMD64_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueAMD64_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueAMD64_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValueAMD64_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValueAMD64_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValueAMD64_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValueAMD64_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueAMD64_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueAMD64_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueAMD64_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueAMD64_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueAMD64_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueAMD64_OpLess32U(v)
+ case OpLess64:
+ return rewriteValueAMD64_OpLess64(v)
+ case OpLess64F:
+ return rewriteValueAMD64_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValueAMD64_OpLess64U(v)
+ case OpLess8:
+ return rewriteValueAMD64_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueAMD64_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueAMD64_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueAMD64_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueAMD64_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueAMD64_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueAMD64_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueAMD64_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueAMD64_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueAMD64_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueAMD64_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueAMD64_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueAMD64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueAMD64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueAMD64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueAMD64_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueAMD64_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueAMD64_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueAMD64_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueAMD64_OpLsh8x8(v)
+ case OpMax32F:
+ return rewriteValueAMD64_OpMax32F(v)
+ case OpMax64F:
+ return rewriteValueAMD64_OpMax64F(v)
+ case OpMin32F:
+ return rewriteValueAMD64_OpMin32F(v)
+ case OpMin64F:
+ return rewriteValueAMD64_OpMin64F(v)
+ case OpMod16:
+ return rewriteValueAMD64_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueAMD64_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueAMD64_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueAMD64_OpMod32u(v)
+ case OpMod64:
+ return rewriteValueAMD64_OpMod64(v)
+ case OpMod64u:
+ return rewriteValueAMD64_OpMod64u(v)
+ case OpMod8:
+ return rewriteValueAMD64_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueAMD64_OpMod8u(v)
+ case OpMove:
+ return rewriteValueAMD64_OpMove(v)
+ case OpMul16:
+ v.Op = OpAMD64MULL
+ return true
+ case OpMul32:
+ v.Op = OpAMD64MULL
+ return true
+ case OpMul32F:
+ v.Op = OpAMD64MULSS
+ return true
+ case OpMul64:
+ v.Op = OpAMD64MULQ
+ return true
+ case OpMul64F:
+ v.Op = OpAMD64MULSD
+ return true
+ case OpMul64uhilo:
+ v.Op = OpAMD64MULQU2
+ return true
+ case OpMul8:
+ v.Op = OpAMD64MULL
+ return true
+ case OpNeg16:
+ v.Op = OpAMD64NEGL
+ return true
+ case OpNeg32:
+ v.Op = OpAMD64NEGL
+ return true
+ case OpNeg32F:
+ return rewriteValueAMD64_OpNeg32F(v)
+ case OpNeg64:
+ v.Op = OpAMD64NEGQ
+ return true
+ case OpNeg64F:
+ return rewriteValueAMD64_OpNeg64F(v)
+ case OpNeg8:
+ v.Op = OpAMD64NEGL
+ return true
+ case OpNeq16:
+ return rewriteValueAMD64_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueAMD64_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueAMD64_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValueAMD64_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValueAMD64_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueAMD64_OpNeq8(v)
+ case OpNeqB:
+ return rewriteValueAMD64_OpNeqB(v)
+ case OpNeqPtr:
+ return rewriteValueAMD64_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpAMD64LoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueAMD64_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueAMD64_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpAMD64ORL
+ return true
+ case OpOr32:
+ v.Op = OpAMD64ORL
+ return true
+ case OpOr64:
+ v.Op = OpAMD64ORQ
+ return true
+ case OpOr8:
+ v.Op = OpAMD64ORL
+ return true
+ case OpOrB:
+ v.Op = OpAMD64ORL
+ return true
+ case OpPanicBounds:
+ return rewriteValueAMD64_OpPanicBounds(v)
+ case OpPopCount16:
+ return rewriteValueAMD64_OpPopCount16(v)
+ case OpPopCount32:
+ v.Op = OpAMD64POPCNTL
+ return true
+ case OpPopCount64:
+ v.Op = OpAMD64POPCNTQ
+ return true
+ case OpPopCount8:
+ return rewriteValueAMD64_OpPopCount8(v)
+ case OpPrefetchCache:
+ v.Op = OpAMD64PrefetchT0
+ return true
+ case OpPrefetchCacheStreamed:
+ v.Op = OpAMD64PrefetchNTA
+ return true
+ case OpRotateLeft16:
+ v.Op = OpAMD64ROLW
+ return true
+ case OpRotateLeft32:
+ v.Op = OpAMD64ROLL
+ return true
+ case OpRotateLeft64:
+ v.Op = OpAMD64ROLQ
+ return true
+ case OpRotateLeft8:
+ v.Op = OpAMD64ROLB
+ return true
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRoundToEven:
+ return rewriteValueAMD64_OpRoundToEven(v)
+ case OpRsh16Ux16:
+ return rewriteValueAMD64_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueAMD64_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueAMD64_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueAMD64_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueAMD64_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueAMD64_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueAMD64_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueAMD64_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueAMD64_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueAMD64_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueAMD64_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueAMD64_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueAMD64_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueAMD64_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueAMD64_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueAMD64_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueAMD64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueAMD64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueAMD64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueAMD64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueAMD64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueAMD64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueAMD64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueAMD64_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueAMD64_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueAMD64_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueAMD64_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueAMD64_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueAMD64_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueAMD64_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueAMD64_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueAMD64_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueAMD64_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueAMD64_OpSelect1(v)
+ case OpSelectN:
+ return rewriteValueAMD64_OpSelectN(v)
+ case OpSignExt16to32:
+ v.Op = OpAMD64MOVWQSX
+ return true
+ case OpSignExt16to64:
+ v.Op = OpAMD64MOVWQSX
+ return true
+ case OpSignExt32to64:
+ v.Op = OpAMD64MOVLQSX
+ return true
+ case OpSignExt8to16:
+ v.Op = OpAMD64MOVBQSX
+ return true
+ case OpSignExt8to32:
+ v.Op = OpAMD64MOVBQSX
+ return true
+ case OpSignExt8to64:
+ v.Op = OpAMD64MOVBQSX
+ return true
+ case OpSlicemask:
+ return rewriteValueAMD64_OpSlicemask(v)
+ case OpSpectreIndex:
+ return rewriteValueAMD64_OpSpectreIndex(v)
+ case OpSpectreSliceIndex:
+ return rewriteValueAMD64_OpSpectreSliceIndex(v)
+ case OpSqrt:
+ v.Op = OpAMD64SQRTSD
+ return true
+ case OpSqrt32:
+ v.Op = OpAMD64SQRTSS
+ return true
+ case OpStaticCall:
+ v.Op = OpAMD64CALLstatic
+ return true
+ case OpStore:
+ return rewriteValueAMD64_OpStore(v)
+ case OpSub16:
+ v.Op = OpAMD64SUBL
+ return true
+ case OpSub32:
+ v.Op = OpAMD64SUBL
+ return true
+ case OpSub32F:
+ v.Op = OpAMD64SUBSS
+ return true
+ case OpSub64:
+ v.Op = OpAMD64SUBQ
+ return true
+ case OpSub64F:
+ v.Op = OpAMD64SUBSD
+ return true
+ case OpSub8:
+ v.Op = OpAMD64SUBL
+ return true
+ case OpSubPtr:
+ v.Op = OpAMD64SUBQ
+ return true
+ case OpTailCall:
+ v.Op = OpAMD64CALLtail
+ return true
+ case OpTrunc:
+ return rewriteValueAMD64_OpTrunc(v)
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpAMD64LoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpAMD64XORL
+ return true
+ case OpXor32:
+ v.Op = OpAMD64XORL
+ return true
+ case OpXor64:
+ v.Op = OpAMD64XORQ
+ return true
+ case OpXor8:
+ v.Op = OpAMD64XORL
+ return true
+ case OpZero:
+ return rewriteValueAMD64_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpAMD64MOVWQZX
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpAMD64MOVWQZX
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpAMD64MOVLQZX
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpAMD64MOVBQZX
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpAMD64MOVBQZX
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpAMD64MOVBQZX
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADCQ x (MOVQconst [c]) carry)
+ // cond: is32Bit(c)
+ // result: (ADCQconst x [int32(c)] carry)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ carry := v_2
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64ADCQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(x, carry)
+ return true
+ }
+ break
+ }
+ // match: (ADCQ x y (FlagEQ))
+ // result: (ADDQcarry x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64ADDQcarry)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADCQconst x [c] (FlagEQ))
+ // result: (ADDQconstcarry x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64ADDQconstcarry)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDL x (MOVLconst [c]))
+ // result: (ADDLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ADDLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [3] y))
+ // result: (LEAL8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [2] y))
+ // result: (LEAL4 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL4)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [1] y))
+ // result: (LEAL2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (ADDL y y))
+ // result: (LEAL2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDL {
+ continue
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64LEAL2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (ADDL x y))
+ // result: (LEAL2 y x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAMD64LEAL2)
+ v.AddArg2(y, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ADDL (ADDLconst [c] x) y)
+ // result: (LEAL1 [c] x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64ADDLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (LEAL [c] {s} y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAL1 [c] {s} x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64LEAL {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ s := auxToSym(v_1.Aux)
+ y := v_1.Args[0]
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (NEGL y))
+ // result: (SUBL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SUBL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDLconst [c] (ADDL x y))
+ // result: (LEAL1 [c] x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ADDL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (SHLLconst [1] x))
+ // result: (LEAL1 [c] x x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL [d] {s} x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL1 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAL1 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL2 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAL2 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL4 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAL4 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL8 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAL8 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ return true
+ }
+ // match: (ADDLconst [c] (ADDLconst [d] x))
+ // result: (ADDLconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ADDLconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDLconst [off] x:(SP))
+ // result: (LEAL [off] x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpSP {
+ break
+ }
+ v.reset(OpAMD64LEAL)
+ v.AuxInt = int32ToAuxInt(off)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDLload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // result: (ADDL x (MOVLf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ADDL)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDQ x (MOVQconst <t> [c]))
+ // cond: is32Bit(c) && !t.IsPtr()
+ // result: (ADDQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c) && !t.IsPtr()) {
+ continue
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (MOVLconst [c]))
+ // result: (ADDQconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (SHLQconst [3] y))
+ // result: (LEAQ8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (SHLQconst [2] y))
+ // result: (LEAQ4 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ4)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (SHLQconst [1] y))
+ // result: (LEAQ2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (ADDQ y y))
+ // result: (LEAQ2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQ {
+ continue
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (ADDQ x y))
+ // result: (LEAQ2 y x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAMD64LEAQ2)
+ v.AddArg2(y, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ADDQ (ADDQconst [c] x) y)
+ // result: (LEAQ1 [c] x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64ADDQconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (LEAQ [c] {s} y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAQ1 [c] {s} x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ s := auxToSym(v_1.Aux)
+ y := v_1.Args[0]
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (NEGQ y))
+ // result: (SUBQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SUBQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDQload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDQcarry x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (ADDQconstcarry x [int32(c)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64ADDQconstcarry)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDQconst [c] (ADDQ x y))
+ // result: (LEAQ1 [c] x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ADDQ {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDQconst [c] (SHLQconst [1] x))
+ // result: (LEAQ1 [c] x x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (ADDQconst [c] (LEAQ [d] {s} x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDQconst [c] (LEAQ1 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAQ1 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDQconst [c] (LEAQ2 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAQ2 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDQconst [c] (LEAQ4 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAQ4 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDQconst [c] (LEAQ8 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAQ8 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDQconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(c)+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) + d)
+ return true
+ }
+ // match: (ADDQconst [c] (ADDQconst [d] x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (ADDQconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDQconst [off] x:(SP))
+ // result: (LEAQ [off] x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpSP {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDQload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDQload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // result: (ADDQ x (MOVQf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ADDQ)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDSDload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSDload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDSDload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
+ // result: (ADDSD x (MOVQi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ADDSD)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDSSload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSSload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDSSload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
+ // result: (ADDSS x (MOVLi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ADDSS)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ANDL (NOTL (SHLL (MOVLconst [1]) y)) x)
+ // result: (BTRL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64NOTL {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SHLL {
+ continue
+ }
+ y := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x (MOVLconst [c]))
+ // result: (ANDLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ANDLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ANDLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x (NOTL y))
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (ANDNL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64NOTL {
+ continue
+ }
+ y := v_1.Args[0]
+ if !(buildcfg.GOAMD64 >= 3) {
+ continue
+ }
+ v.reset(OpAMD64ANDNL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x (NEGL x))
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (BLSIL x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
+ continue
+ }
+ v.reset(OpAMD64BLSIL)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDL <t> x (ADDLconst [-1] x))
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (Select0 <t> (BLSRL x))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
+ continue
+ }
+ v.reset(OpSelect0)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64BLSRL, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDLconst [c] (ANDLconst [d] x))
+ // result: (ANDLconst [c & d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [ 0xFF] x)
+ // result: (MOVBQZX x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0xFF {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [0xFFFF] x)
+ // result: (MOVWQZX x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0xFFFF {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64MOVWQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [c] _)
+ // cond: c==0
+ // result: (MOVLconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(c == 0) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (ANDLconst [c] x)
+ // cond: c==-1
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == -1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ANDLload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ANDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // result: (ANDL x (MOVLf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDNL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDNL x (SHLL (MOVLconst [1]) y))
+ // result: (BTRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64SHLL {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64BTRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDNQ x (SHLQ (MOVQconst [1]) y))
+ // result: (BTRQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQ {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64BTRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ANDQ (NOTQ (SHLQ (MOVQconst [1]) y)) x)
+ // result: (BTRQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64NOTQ {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ y := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ANDQ (MOVQconst [c]) x)
+ // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31
+ // result: (BTRQconst [int8(log64(^c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31) {
+ continue
+ }
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = int8ToAuxInt(int8(log64(^c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (ANDQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDQ x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ANDQload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ANDQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ANDQ x (NOTQ y))
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (ANDNQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64NOTQ {
+ continue
+ }
+ y := v_1.Args[0]
+ if !(buildcfg.GOAMD64 >= 3) {
+ continue
+ }
+ v.reset(OpAMD64ANDNQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ANDQ x (NEGQ x))
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (BLSIQ x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
+ continue
+ }
+ v.reset(OpAMD64BLSIQ)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDQ <t> x (ADDQconst [-1] x))
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (Select0 <t> (BLSRQ x))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
+ continue
+ }
+ v.reset(OpSelect0)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64BLSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDQconst [c] (ANDQconst [d] x))
+ // result: (ANDQconst [c & d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDQconst [ 0xFF] x)
+ // result: (MOVBQZX x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0xFF {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDQconst [0xFFFF] x)
+ // result: (MOVWQZX x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0xFFFF {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64MOVWQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDQconst [0] _)
+ // result: (MOVQconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDQconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(c)&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) & d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ANDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ANDQload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDQload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ANDQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ANDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // result: (ANDQ x (MOVQf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ANDQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ANDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x)))
+ // result: (BSFQ (ORQconst <t> [1<<8] x))
+ for {
+ if v_0.Op != OpAMD64ORQconst {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt32(v_0.AuxInt) != 1<<8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVBQZX {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpAMD64BSFQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
+ v0.AuxInt = int32ToAuxInt(1 << 8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x)))
+ // result: (BSFQ (ORQconst <t> [1<<16] x))
+ for {
+ if v_0.Op != OpAMD64ORQconst {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt32(v_0.AuxInt) != 1<<16 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVWQZX {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpAMD64BSFQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
+ v0.AuxInt = int32ToAuxInt(1 << 16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BSWAPL(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BSWAPL (BSWAPL p))
+ // result: p
+ for {
+ if v_0.Op != OpAMD64BSWAPL {
+ break
+ }
+ p := v_0.Args[0]
+ v.copyOf(p)
+ return true
+ }
+ // match: (BSWAPL x:(MOVLload [i] {s} p mem))
+ // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
+ // result: @x.Block (MOVBELload [i] {s} p mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ i := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBELload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (BSWAPL x:(MOVBELload [i] {s} p mem))
+ // cond: x.Uses == 1
+ // result: @x.Block (MOVLload [i] {s} p mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVBELload {
+ break
+ }
+ i := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BSWAPQ (BSWAPQ p))
+ // result: p
+ for {
+ if v_0.Op != OpAMD64BSWAPQ {
+ break
+ }
+ p := v_0.Args[0]
+ v.copyOf(p)
+ return true
+ }
+ // match: (BSWAPQ x:(MOVQload [i] {s} p mem))
+ // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
+ // result: @x.Block (MOVBEQload [i] {s} p mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ i := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBEQload, typ.UInt64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (BSWAPQ x:(MOVBEQload [i] {s} p mem))
+ // cond: x.Uses == 1
+ // result: @x.Block (MOVQload [i] {s} p mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVBEQload {
+ break
+ }
+ i := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVQload, typ.UInt64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTCQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [d^(1<<uint32(c))])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTLconst [c] (SHRQconst [d] x))
+ // cond: (c+d)<64
+ // result: (BTQconst [c+d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !((c + d) < 64) {
+ break
+ }
+ v.reset(OpAMD64BTQconst)
+ v.AuxInt = int8ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [c] (SHLQconst [d] x))
+ // cond: c>d
+ // result: (BTLconst [c-d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > d) {
+ break
+ }
+ v.reset(OpAMD64BTLconst)
+ v.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [0] s:(SHRQ x y))
+ // result: (BTQ y x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ s := v_0
+ if s.Op != OpAMD64SHRQ {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ v.reset(OpAMD64BTQ)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (BTLconst [c] (SHRLconst [d] x))
+ // cond: (c+d)<32
+ // result: (BTLconst [c+d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHRLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !((c + d) < 32) {
+ break
+ }
+ v.reset(OpAMD64BTLconst)
+ v.AuxInt = int8ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [c] (SHLLconst [d] x))
+ // cond: c>d
+ // result: (BTLconst [c-d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > d) {
+ break
+ }
+ v.reset(OpAMD64BTLconst)
+ v.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [0] s:(SHRL x y))
+ // result: (BTL y x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ s := v_0
+ if s.Op != OpAMD64SHRL {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ v.reset(OpAMD64BTL)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (BTLconst [0] s:(SHRXL x y))
+ // result: (BTL y x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ s := v_0
+ if s.Op != OpAMD64SHRXL {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ v.reset(OpAMD64BTL)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTQconst [c] (SHRQconst [d] x))
+ // cond: (c+d)<64
+ // result: (BTQconst [c+d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !((c + d) < 64) {
+ break
+ }
+ v.reset(OpAMD64BTQconst)
+ v.AuxInt = int8ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTQconst [c] (SHLQconst [d] x))
+ // cond: c>d
+ // result: (BTQconst [c-d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > d) {
+ break
+ }
+ v.reset(OpAMD64BTQconst)
+ v.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTQconst [0] s:(SHRQ x y))
+ // result: (BTQ y x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ s := v_0
+ if s.Op != OpAMD64SHRQ {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ v.reset(OpAMD64BTQ)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTRQconst [c] (BTSQconst [c] x))
+ // result: (BTRQconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRQconst [c] (BTCQconst [c] x))
+ // result: (BTRQconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [d&^(1<<uint32(c))])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTSQconst [c] (BTRQconst [c] x))
+ // result: (BTSQconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSQconst [c] (BTCQconst [c] x))
+ // result: (BTSQconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [d|(1<<uint32(c))])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLCC x y (InvertFlags cond))
+ // result: (CMOVLLS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLLS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLCC _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLCC _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLCC y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLCC y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLCC _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLCS x y (InvertFlags cond))
+ // result: (CMOVLHI x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLHI)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLCS y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLCS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLCS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLCS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLCS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMOVLEQ x y (InvertFlags cond))
+ // result: (CMOVLEQ x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLEQ)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLEQ _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLEQ y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLEQ y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLEQ y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLEQ y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLEQ x y (TESTQ s:(Select0 blsr:(BLSRQ _)) s))
+ // result: (CMOVLEQ x y (Select1 <types.TypeFlags> blsr))
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_2.Args[1]
+ v_2_0 := v_2.Args[0]
+ v_2_1 := v_2.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
+ s := v_2_0
+ if s.Op != OpSelect0 {
+ continue
+ }
+ blsr := s.Args[0]
+ if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
+ continue
+ }
+ v.reset(OpAMD64CMOVLEQ)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(blsr)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ break
+ }
+ // match: (CMOVLEQ x y (TESTL s:(Select0 blsr:(BLSRL _)) s))
+ // result: (CMOVLEQ x y (Select1 <types.TypeFlags> blsr))
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_2.Args[1]
+ v_2_0 := v_2.Args[0]
+ v_2_1 := v_2.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
+ s := v_2_0
+ if s.Op != OpSelect0 {
+ continue
+ }
+ blsr := s.Args[0]
+ if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
+ continue
+ }
+ v.reset(OpAMD64CMOVLEQ)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(blsr)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLGE x y (InvertFlags cond))
+ // result: (CMOVLLE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLLE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLGE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLGE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLGE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLGE y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLGE y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLGT x y (InvertFlags cond))
+ // result: (CMOVLLT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLLT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLGT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLGT _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLGT _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLGT y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLGT y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLHI x y (InvertFlags cond))
+ // result: (CMOVLCS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLCS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLHI y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLHI _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLHI y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLHI y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLHI _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLLE x y (InvertFlags cond))
+ // result: (CMOVLGE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLGE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLLE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLE y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLE y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLLS x y (InvertFlags cond))
+ // result: (CMOVLCC x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLCC)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLLS _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLLT x y (InvertFlags cond))
+ // result: (CMOVLGT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLGT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLLT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLT y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLT y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLT _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLT _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMOVLNE x y (InvertFlags cond))
+ // result: (CMOVLNE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLNE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLNE y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLNE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLNE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLNE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLNE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLNE x y (TESTQ s:(Select0 blsr:(BLSRQ _)) s))
+ // result: (CMOVLNE x y (Select1 <types.TypeFlags> blsr))
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_2.Args[1]
+ v_2_0 := v_2.Args[0]
+ v_2_1 := v_2.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
+ s := v_2_0
+ if s.Op != OpSelect0 {
+ continue
+ }
+ blsr := s.Args[0]
+ if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
+ continue
+ }
+ v.reset(OpAMD64CMOVLNE)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(blsr)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ break
+ }
+ // match: (CMOVLNE x y (TESTL s:(Select0 blsr:(BLSRL _)) s))
+ // result: (CMOVLNE x y (Select1 <types.TypeFlags> blsr))
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_2.Args[1]
+ v_2_0 := v_2.Args[0]
+ v_2_1 := v_2.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
+ s := v_2_0
+ if s.Op != OpSelect0 {
+ continue
+ }
+ blsr := s.Args[0]
+ if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
+ continue
+ }
+ v.reset(OpAMD64CMOVLNE)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(blsr)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQCC x y (InvertFlags cond))
+ // result: (CMOVQLS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQLS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQCC _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQCC _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQCC y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQCC y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQCC _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQCS x y (InvertFlags cond))
+ // result: (CMOVQHI x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQHI)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQCS y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQCS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQCS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQCS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQCS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMOVQEQ x y (InvertFlags cond))
+ // result: (CMOVQEQ x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQEQ)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQEQ _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQEQ y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQEQ y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQEQ y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQEQ y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _))))
+ // cond: c != 0
+ // result: x
+ for {
+ x := v_0
+ if v_2.Op != OpSelect1 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpAMD64BSFQ {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpAMD64ORQconst {
+ break
+ }
+ c := auxIntToInt32(v_2_0_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQEQ x _ (Select1 (BSRQ (ORQconst [c] _))))
+ // cond: c != 0
+ // result: x
+ for {
+ x := v_0
+ if v_2.Op != OpSelect1 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpAMD64BSRQ {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpAMD64ORQconst {
+ break
+ }
+ c := auxIntToInt32(v_2_0_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQEQ x y (TESTQ s:(Select0 blsr:(BLSRQ _)) s))
+ // result: (CMOVQEQ x y (Select1 <types.TypeFlags> blsr))
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_2.Args[1]
+ v_2_0 := v_2.Args[0]
+ v_2_1 := v_2.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
+ s := v_2_0
+ if s.Op != OpSelect0 {
+ continue
+ }
+ blsr := s.Args[0]
+ if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
+ continue
+ }
+ v.reset(OpAMD64CMOVQEQ)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(blsr)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ break
+ }
+ // match: (CMOVQEQ x y (TESTL s:(Select0 blsr:(BLSRL _)) s))
+ // result: (CMOVQEQ x y (Select1 <types.TypeFlags> blsr))
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_2.Args[1]
+ v_2_0 := v_2.Args[0]
+ v_2_1 := v_2.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
+ s := v_2_0
+ if s.Op != OpSelect0 {
+ continue
+ }
+ blsr := s.Args[0]
+ if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
+ continue
+ }
+ v.reset(OpAMD64CMOVQEQ)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(blsr)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQGE x y (InvertFlags cond))
+ // result: (CMOVQLE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQLE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQGE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQGE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQGE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQGE y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQGE y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQGT x y (InvertFlags cond))
+ // result: (CMOVQLT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQLT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQGT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQGT _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQGT _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQGT y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQGT y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQHI x y (InvertFlags cond))
+ // result: (CMOVQCS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQCS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQHI y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQHI _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQHI y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQHI y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQHI _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQLE x y (InvertFlags cond))
+ // result: (CMOVQGE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQGE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQLE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLE y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLE y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQLS x y (InvertFlags cond))
+ // result: (CMOVQCC x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQCC)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQLS _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQLT x y (InvertFlags cond))
+ // result: (CMOVQGT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQGT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQLT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLT y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLT y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLT _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLT _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMOVQNE x y (InvertFlags cond))
+ // result: (CMOVQNE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQNE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQNE y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQNE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQNE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQNE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQNE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQNE x y (TESTQ s:(Select0 blsr:(BLSRQ _)) s))
+ // result: (CMOVQNE x y (Select1 <types.TypeFlags> blsr))
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_2.Args[1]
+ v_2_0 := v_2.Args[0]
+ v_2_1 := v_2.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
+ s := v_2_0
+ if s.Op != OpSelect0 {
+ continue
+ }
+ blsr := s.Args[0]
+ if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
+ continue
+ }
+ v.reset(OpAMD64CMOVQNE)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(blsr)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ break
+ }
+ // match: (CMOVQNE x y (TESTL s:(Select0 blsr:(BLSRL _)) s))
+ // result: (CMOVQNE x y (Select1 <types.TypeFlags> blsr))
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_2.Args[1]
+ v_2_0 := v_2.Args[0]
+ v_2_1 := v_2.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
+ s := v_2_0
+ if s.Op != OpSelect0 {
+ continue
+ }
+ blsr := s.Args[0]
+ if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
+ continue
+ }
+ v.reset(OpAMD64CMOVQNE)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(blsr)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWCC x y (InvertFlags cond))
+ // result: (CMOVWLS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWLS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWCC _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWCC _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWCC y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWCC y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWCC _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWCS x y (InvertFlags cond))
+ // result: (CMOVWHI x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWHI)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWCS y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWCS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWCS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWCS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWCS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWEQ x y (InvertFlags cond))
+ // result: (CMOVWEQ x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWEQ)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWEQ _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWEQ y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWEQ y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWEQ y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWEQ y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWGE x y (InvertFlags cond))
+ // result: (CMOVWLE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWLE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWGE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWGE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWGE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWGE y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWGE y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWGT x y (InvertFlags cond))
+ // result: (CMOVWLT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWLT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWGT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWGT _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWGT _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWGT y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWGT y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWHI x y (InvertFlags cond))
+ // result: (CMOVWCS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWCS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWHI y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWHI _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWHI y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWHI y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWHI _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWLE x y (InvertFlags cond))
+ // result: (CMOVWGE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWGE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWLE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLE y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLE y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWLS x y (InvertFlags cond))
+ // result: (CMOVWCC x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWCC)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWLS _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWLT x y (InvertFlags cond))
+ // result: (CMOVWGT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWGT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWLT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLT y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLT y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLT _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLT _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWNE x y (InvertFlags cond))
+ // result: (CMOVWNE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWNE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWNE y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWNE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWNE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWNE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWNE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPB x (MOVLconst [c]))
+ // result: (CMPBconst x [int8(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64CMPBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPB (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPBconst x [int8(c)]))
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPB x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPB y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPBload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64CMPBload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPB x l:(MOVBload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPBload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) == y) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)<y && uint8(x)<uint8(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) < y && uint8(x) < uint8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)<y && uint8(x)>uint8(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) < y && uint8(x) > uint8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)>y && uint8(x)<uint8(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) > y && uint8(x) < uint8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)>y && uint8(x)>uint8(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) > y && uint8(x) > uint8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPBconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int8(m) && int8(m) < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= int8(m) && int8(m) < n) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPBconst a:(ANDL x y) [0])
+ // cond: a.Uses == 1
+ // result: (TESTB x y)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDL {
+ break
+ }
+ y := a.Args[1]
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPBconst a:(ANDLconst [c] x) [0])
+ // cond: a.Uses == 1
+ // result: (TESTBconst [int8(c)] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPBconst x [0])
+ // result: (TESTB x x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64TESTB)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPBconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ l := v_0
+ if l.Op != OpAMD64MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPBconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPBconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPBload [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPBload [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (CMPBload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem)
+ // result: (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64CMPBconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPL x (MOVLconst [c]))
+ // result: (CMPLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64CMPLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPL (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPLconst x [c]))
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPL x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPL y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPLload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64CMPLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPL x l:(MOVLload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPLload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x<y && uint32(x)<uint32(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x < y && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x<y && uint32(x)>uint32(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x < y && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x>y && uint32(x)<uint32(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > y && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x>y && uint32(x)>uint32(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > y && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPLconst (SHRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64SHRLconst {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst a:(ANDL x y) [0])
+ // cond: a.Uses == 1
+ // result: (TESTL x y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDL {
+ break
+ }
+ y := a.Args[1]
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPLconst a:(ANDLconst [c] x) [0])
+ // cond: a.Uses == 1
+ // result: (TESTLconst [c] x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPLconst x [0])
+ // result: (TESTL x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64TESTL)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPLconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPLconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPLconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPLload [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPLload [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (CMPLload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem)
+ // result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64CMPLconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (CMPQconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64CMPQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (InvertFlags (CMPQconst x [int32(c)]))
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPQ x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPQ y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x<y && uint64(x)<uint64(y)
+ // result: (FlagLT_ULT)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x < y && uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x<y && uint64(x)>uint64(y)
+ // result: (FlagLT_UGT)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x < y && uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x>y && uint64(x)<uint64(y)
+ // result: (FlagGT_ULT)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x > y && uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x>y && uint64(x)>uint64(y)
+ // result: (FlagGT_UGT)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x > y && uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPQload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64CMPQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPQ x l:(MOVQload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPQload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x==int64(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x == int64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x<int64(y) && uint64(x)<uint64(int64(y))
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x<int64(y) && uint64(x)>uint64(int64(y))
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x>int64(y) && uint64(x)<uint64(int64(y))
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x>int64(y) && uint64(x)>uint64(int64(y))
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPQconst (MOVBQZX _) [c])
+ // cond: 0xFF < c
+ // result: (FlagLT_ULT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (MOVWQZX _) [c])
+ // cond: 0xFFFF < c
+ // result: (FlagLT_ULT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (SHRQconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (ANDQconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst a:(ANDQ x y) [0])
+ // cond: a.Uses == 1
+ // result: (TESTQ x y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDQ {
+ break
+ }
+ y := a.Args[1]
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPQconst a:(ANDQconst [c] x) [0])
+ // cond: a.Uses == 1
+ // result: (TESTQconst [c] x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPQconst x [0])
+ // result: (TESTQ x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64TESTQ)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPQconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPQconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPQconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPQload [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPQload [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (CMPQload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem)
+ // cond: validVal(c)
+ // result: (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(validVal(c)) {
+ break
+ }
+ v.reset(OpAMD64CMPQconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPW x (MOVLconst [c]))
+ // result: (CMPWconst x [int16(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64CMPWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPWconst x [int16(c)]))
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int16ToAuxInt(int16(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPW y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPWload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64CMPWload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPW x l:(MOVWload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPWload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) == y) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)<y && uint16(x)<uint16(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) < y && uint16(x) < uint16(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)<y && uint16(x)>uint16(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) < y && uint16(x) > uint16(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)>y && uint16(x)<uint16(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) > y && uint16(x) < uint16(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)>y && uint16(x)>uint16(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) > y && uint16(x) > uint16(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPWconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int16(m) && int16(m) < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= int16(m) && int16(m) < n) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPWconst a:(ANDL x y) [0])
+ // cond: a.Uses == 1
+ // result: (TESTW x y)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDL {
+ break
+ }
+ y := a.Args[1]
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWconst a:(ANDLconst [c] x) [0])
+ // cond: a.Uses == 1
+ // result: (TESTWconst [int16(c)] x)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWconst x [0])
+ // result: (TESTW x x)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64TESTW)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPWconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ l := v_0
+ if l.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPWconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPWconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPWconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPWload [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPWload [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (CMPWload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem)
+ // result: (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64CMPWconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPXCHGLlock)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPXCHGQlock)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (DIVSDload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSDload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64DIVSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSDload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (DIVSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64DIVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64DIVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (DIVSSload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSSload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64DIVSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSSload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (DIVSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64DIVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64DIVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (HMULL x y)
+ // cond: !x.rematerializeable() && y.rematerializeable()
+ // result: (HMULL y x)
+ for {
+ x := v_0
+ y := v_1
+ if !(!x.rematerializeable() && y.rematerializeable()) {
+ break
+ }
+ v.reset(OpAMD64HMULL)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (HMULLU x y)
+ // cond: !x.rematerializeable() && y.rematerializeable()
+ // result: (HMULLU y x)
+ for {
+ x := v_0
+ y := v_1
+ if !(!x.rematerializeable() && y.rematerializeable()) {
+ break
+ }
+ v.reset(OpAMD64HMULLU)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (HMULQ x y)
+ // cond: !x.rematerializeable() && y.rematerializeable()
+ // result: (HMULQ y x)
+ for {
+ x := v_0
+ y := v_1
+ if !(!x.rematerializeable() && y.rematerializeable()) {
+ break
+ }
+ v.reset(OpAMD64HMULQ)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (HMULQU x y)
+ // cond: !x.rematerializeable() && y.rematerializeable()
+ // result: (HMULQU y x)
+ for {
+ x := v_0
+ y := v_1
+ if !(!x.rematerializeable() && y.rematerializeable()) {
+ break
+ }
+ v.reset(OpAMD64HMULQU)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LEAL [c] {s} (ADDLconst [d] x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAL [c] {s} (ADDL x y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAL1 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL1 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64ADDLconst {
+ continue
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL2 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL2)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [2] y))
+ // result: (LEAL4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [3] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL2 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB
+ // result: (LEAL2 [c+2*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL2)
+ v.AuxInt = int32ToAuxInt(c + 2*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (SHLLconst [2] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL4 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB
+ // result: (LEAL4 [c+4*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL4)
+ v.AuxInt = int32ToAuxInt(c + 4*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL8 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL8 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB
+ // result: (LEAL8 [c+8*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c + 8*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LEAQ [c] {s} (ADDQconst [d] x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ [c] {s} (ADDQ x y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAQ1 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ1 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ2 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ4 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ8 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAQ1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64ADDQconst {
+ continue
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [c] {s} x (SHLQconst [1] y))
+ // result: (LEAQ2 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [c] {s} x (SHLQconst [2] y))
+ // result: (LEAQ4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [c] {s} x (SHLQconst [3] y))
+ // result: (LEAQ8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64LEAQ {
+ continue
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64LEAQ1 {
+ continue
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64LEAQ1 {
+ continue
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(y, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (LEAQ1 [0] x y)
+ // cond: v.Aux == nil
+ // result: (ADDQ x y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ y := v_1
+ if !(v.Aux == nil) {
+ break
+ }
+ v.reset(OpAMD64ADDQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAQ2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
+ // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB
+ // result: (LEAQ2 [c+2*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(c + 2*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
+ // result: (LEAQ4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
+ // result: (LEAQ8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
+ // cond: is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil
+ // result: (LEAQ4 [off1+2*off2] {sym1} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64LEAQ1 {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(off1 + 2*off2)
+ v.Aux = symToAux(sym1)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [off] {sym} x (MOVQconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*2)
+ // result: (LEAQ [off+int32(scale)*2] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ scale := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ2 [off] {sym} x (MOVLconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*2)
+ // result: (LEAQ [off+int32(scale)*2] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ scale := auxIntToInt32(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAQ4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
+ // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB
+ // result: (LEAQ4 [c+4*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(c + 4*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
+ // result: (LEAQ8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
+ // cond: is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil
+ // result: (LEAQ8 [off1+4*off2] {sym1} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64LEAQ1 {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(off1 + 4*off2)
+ v.Aux = symToAux(sym1)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ4 [off] {sym} x (MOVQconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*4)
+ // result: (LEAQ [off+int32(scale)*4] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ scale := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*4)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ4 [off] {sym} x (MOVLconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*4)
+ // result: (LEAQ [off+int32(scale)*4] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ scale := auxIntToInt32(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*4)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAQ8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
+ // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB
+ // result: (LEAQ8 [c+8*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c + 8*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ8 [off] {sym} x (MOVQconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*8)
+ // result: (LEAQ [off+int32(scale)*8] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ scale := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*8)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ8 [off] {sym} x (MOVLconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*8)
+ // result: (LEAQ [off+int32(scale)*8] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ scale := auxIntToInt32(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*8)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBELstore [i] {s} p x:(BSWAPL w) mem)
+ // cond: x.Uses == 1
+ // result: (MOVLstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64BSWAPL {
+ break
+ }
+ w := x.Args[0]
+ mem := v_2
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBEQstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBEQstore [i] {s} p x:(BSWAPQ w) mem)
+ // cond: x.Uses == 1
+ // result: (MOVQstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64BSWAPQ {
+ break
+ }
+ w := x.Args[0]
+ mem := v_2
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBEWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBEWstore [i] {s} p x:(ROLWconst [8] w) mem)
+ // cond: x.Uses == 1
+ // result: (MOVWstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 {
+ break
+ }
+ w := x.Args[0]
+ mem := v_2
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQSX (ANDLconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDLconst [c & 0x7f] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7f)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBQSX (MOVBQSX x))
+ // result: (MOVBQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQSX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBQSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBQSXload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQZX x)
+ // cond: zeroUpper56Bits(x,3)
+ // result: x
+ for {
+ x := v_0
+ if !(zeroUpper56Bits(x, 3)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBQZX (ANDLconst [c] x))
+ // result: (ANDLconst [c & 0xff] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0xff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBQZX (MOVBQZX x))
+ // result: (MOVBQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBatomicload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVBatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBQZX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(read8(sym, int64(off)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem)
+ // cond: y.Uses == 1
+ // result: (SETLstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETL {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETLstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETLEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETLE {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETLEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem)
+ // cond: y.Uses == 1
+ // result: (SETGstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETG {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETGstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETGEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETGE {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETGEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem)
+ // cond: y.Uses == 1
+ // result: (SETEQstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETEQ {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETNEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETNE {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem)
+ // cond: y.Uses == 1
+ // result: (SETBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETB {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETBEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETBE {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETBEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem)
+ // cond: y.Uses == 1
+ // result: (SETAstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETA {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETAstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETAEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETAE {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVBQSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVBQZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem)
+ // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd32(off)
+ // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLQSX (ANDLconst [c] x))
+ // cond: uint32(c) & 0x80000000 == 0
+ // result: (ANDLconst [c & 0x7fffffff] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(uint32(c)&0x80000000 == 0) {
+ break
+ }
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7fffffff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQSX (MOVLQSX x))
+ // result: (MOVLQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVLQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVLQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQSX (MOVWQSX x))
+ // result: (MOVWQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVWQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVWQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQSX (MOVBQSX x))
+ // result: (MOVBQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQSX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVLQSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLQSXload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLQZX x)
+ // cond: zeroUpper32Bits(x,3)
+ // result: x
+ for {
+ x := v_0
+ if !(zeroUpper32Bits(x, 3)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVLQZX (ANDLconst [c] x))
+ // result: (ANDLconst [c] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQZX (MOVLQZX x))
+ // result: (MOVLQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVLQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVLQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQZX (MOVWQZX x))
+ // result: (MOVWQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVWQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVWQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQZX (MOVBQZX x))
+ // result: (MOVBQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLatomicload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVLatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVLatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVLf2i <t> (Arg <u> [off] {sym}))
+ // cond: t.Size() == u.Size()
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ u := v_0.Type
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ if !(t.Size() == u.Size()) {
+ break
+ }
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVLi2f <t> (Arg <u> [off] {sym}))
+ // cond: t.Size() == u.Size()
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ u := v_0.Type
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ if !(t.Size() == u.Size()) {
+ break
+ }
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVLQZX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _))
+ // result: (MOVLf2i val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MOVLf2i)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVLload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem)
+ // result: (MOVLstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLQSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem)
+ // result: (MOVLstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLQZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem)
+ // result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ADDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ANDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (XORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64XORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ADDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ADDL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (SUBLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SUBL {
+ break
+ }
+ x := y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(OpAMD64SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ANDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ANDL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ORL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (XORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64XORL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64XORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
+ // result: (ADDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
+ // result: (ANDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
+ // result: (ORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ORLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
+ // result: (XORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64XORLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem)
+ // result: (MOVSSstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLf2i {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVSSstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVLstore [i] {s} p x:(BSWAPL w) mem)
+ // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
+ // result: (MOVBELstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64BSWAPL {
+ break
+ }
+ w := x.Args[0]
+ mem := v_2
+ if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64MOVBELstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd32(off)
+ // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVOload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVOload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVOload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVOstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem)
+ // cond: symIsRO(srcSym)
+ // result: (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))]) (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
+ for {
+ dstOff := auxIntToInt32(v.AuxInt)
+ dstSym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVOload {
+ break
+ }
+ srcOff := auxIntToInt32(v_1.AuxInt)
+ srcSym := auxToSym(v_1.Aux)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpSB {
+ break
+ }
+ mem := v_2
+ if !(symIsRO(srcSym)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(dstOff + 8)
+ v.Aux = symToAux(dstSym)
+ v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
+ v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(dstOff)
+ v1.Aux = symToAux(dstSym)
+ v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
+ v1.AddArg3(ptr, v2, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVOstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVOstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd32(off)
+ // result: (MOVOstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVOstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVOstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVOstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVOstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVQatomicload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVQatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVQatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVQf2i <t> (Arg <u> [off] {sym}))
+ // cond: t.Size() == u.Size()
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ u := v_0.Type
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ if !(t.Size() == u.Size()) {
+ break
+ }
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVQi2f <t> (Arg <u> [off] {sym}))
+ // cond: t.Size() == u.Size()
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ u := v_0.Type
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ if !(t.Size() == u.Size()) {
+ break
+ }
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVQload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _))
+ // result: (MOVQf2i val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MOVQf2i)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVQload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVQstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem)
+ // cond: validVal(c)
+ // result: (MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(validVal(c)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ADDQload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ADDQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ADDQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ANDQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ANDQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ORQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ORQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(XORQload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (XORQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64XORQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ADDQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ADDQ {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(SUBQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (SUBQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SUBQ {
+ break
+ }
+ x := y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(OpAMD64SUBQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ANDQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ANDQ {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ANDQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ORQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ORQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ORQ {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ORQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(XORQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (XORQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64XORQ {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64XORQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVQstore {sym} [off] ptr x:(BTSQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem)
+ // cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l)
+ // result: (BTSQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ if x.Op != OpAMD64BTSQconst {
+ break
+ }
+ c := auxIntToInt8(x.AuxInt)
+ l := x.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
+ break
+ }
+ v.reset(OpAMD64BTSQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr x:(BTRQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem)
+ // cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l)
+ // result: (BTRQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ if x.Op != OpAMD64BTRQconst {
+ break
+ }
+ c := auxIntToInt8(x.AuxInt)
+ l := x.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
+ break
+ }
+ v.reset(OpAMD64BTRQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr x:(BTCQconst [c] l:(MOVQload {sym} [off] ptr mem)) mem)
+ // cond: x.Uses == 1 && l.Uses == 1 && clobber(x, l)
+ // result: (BTCQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ if x.Op != OpAMD64BTCQconst {
+ break
+ }
+ c := auxIntToInt8(x.AuxInt)
+ l := x.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
+ break
+ }
+ v.reset(OpAMD64BTCQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
+ // result: (ADDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
+ // result: (ANDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ANDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
+ // result: (ORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ORQconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
+ // result: (XORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64XORQconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64XORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem)
+ // result: (MOVSDstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQf2i {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVSDstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVQstore [i] {s} p x:(BSWAPQ w) mem)
+ // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
+ // result: (MOVBEQstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64BSWAPQ {
+ break
+ }
+ w := x.Args[0]
+ mem := v_2
+ if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64MOVBEQstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd32(off)
+ // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstoreconst [c] {s} p1 x:(MOVQstoreconst [a] {s} p0 mem))
+ // cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)
+ // result: (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVQstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ p0 := x.Args[0]
+ if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVOstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p0, mem)
+ return true
+ }
+ // match: (MOVQstoreconst [a] {s} p0 x:(MOVQstoreconst [c] {s} p1 mem))
+ // cond: config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)
+ // result: (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem)
+ for {
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVQstoreconst {
+ break
+ }
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ p1 := x.Args[0]
+ if !(config.useSSE && x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVOstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _))
+ // result: (MOVQi2f val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MOVQi2f)
+ v.AddArg(val)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVSDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVSDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem)
+ // result: (MOVQstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQi2f {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSSload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _))
+ // result: (MOVLi2f val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MOVLi2f)
+ v.AddArg(val)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSSstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVSSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVSSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem)
+ // result: (MOVLstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLi2f {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQSX (ANDLconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDLconst [c & 0x7fff] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
+ break
+ }
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7fff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWQSX (MOVWQSX x))
+ // result: (MOVWQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVWQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVWQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWQSX (MOVBQSX x))
+ // result: (MOVBQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQSX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVWQSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWQSXload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQZX x)
+ // cond: zeroUpper48Bits(x,3)
+ // result: x
+ for {
+ x := v_0
+ if !(zeroUpper48Bits(x, 3)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWQZX (ANDLconst [c] x))
+ // result: (ANDLconst [c & 0xffff] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0xffff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWQZX (MOVWQZX x))
+ // result: (MOVWQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVWQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVWQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWQZX (MOVBQZX x))
+ // result: (MOVBQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVWQZX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVWQSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVWQZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem)
+ // result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p x:(ROLWconst [8] w) mem)
+ // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
+ // result: (MOVBEWstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 {
+ break
+ }
+ w := x.Args[0]
+ mem := v_2
+ if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64MOVBEWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd32(off)
+ // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULL x (MOVLconst [c]))
+ // result: (MULLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64MULLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULLconst [c] (MULLconst [d] x))
+ // result: (MULLconst [c * d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MULLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64MULLconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [-9] x)
+ // result: (NEGL (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -9 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGL)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-5] x)
+ // result: (NEGL (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -5 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGL)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-3] x)
+ // result: (NEGL (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -3 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGL)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-1] x)
+ // result: (NEGL x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [ 0] _)
+ // result: (MOVLconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (MULLconst [ 1] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (MULLconst [ 3] x)
+ // result: (LEAL2 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 3 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL2)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [ 5] x)
+ // result: (LEAL4 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL4)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [ 7] x)
+ // result: (LEAL2 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 7 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [ 9] x)
+ // result: (LEAL8 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 9 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [11] x)
+ // result: (LEAL2 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 11 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [13] x)
+ // result: (LEAL4 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 13 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [19] x)
+ // result: (LEAL2 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 19 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [21] x)
+ // result: (LEAL4 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 21 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [25] x)
+ // result: (LEAL8 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 25 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [27] x)
+ // result: (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 27 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [37] x)
+ // result: (LEAL4 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 37 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [41] x)
+ // result: (LEAL8 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 41 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [45] x)
+ // result: (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 45 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [73] x)
+ // result: (LEAL8 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 73 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [81] x)
+ // result: (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 81 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo64(int64(c)+1) && c >= 15
+ // result: (SUBL (SHLLconst <v.Type> [int8(log64(int64(c)+1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
+ break
+ }
+ v.reset(OpAMD64SUBL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-1) && c >= 17
+ // result: (LEAL1 (SHLLconst <v.Type> [int8(log32(c-1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-1) && c >= 17) {
+ break
+ }
+ v.reset(OpAMD64LEAL1)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-2) && c >= 34
+ // result: (LEAL2 (SHLLconst <v.Type> [int8(log32(c-2))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-2) && c >= 34) {
+ break
+ }
+ v.reset(OpAMD64LEAL2)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-4) && c >= 68
+ // result: (LEAL4 (SHLLconst <v.Type> [int8(log32(c-4))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-4) && c >= 68) {
+ break
+ }
+ v.reset(OpAMD64LEAL4)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-8) && c >= 136
+ // result: (LEAL8 (SHLLconst <v.Type> [int8(log32(c-8))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-8) && c >= 136) {
+ break
+ }
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (SHLLconst [int8(log32(c/3))] (LEAL2 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (SHLLconst [int8(log32(c/5))] (LEAL4 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (SHLLconst [int8(log32(c/9))] (LEAL8 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c*d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (MULQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64MULQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULQconst [c] (MULQconst [d] x))
+ // cond: is32Bit(int64(c)*int64(d))
+ // result: (MULQconst [c * d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MULQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) * int64(d))) {
+ break
+ }
+ v.reset(OpAMD64MULQconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULQconst [-9] x)
+ // result: (NEGQ (LEAQ8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -9 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [-5] x)
+ // result: (NEGQ (LEAQ4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -5 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [-3] x)
+ // result: (NEGQ (LEAQ2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -3 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [-1] x)
+ // result: (NEGQ x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULQconst [ 0] _)
+ // result: (MOVQconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MULQconst [ 1] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (MULQconst [ 3] x)
+ // result: (LEAQ2 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 3 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ2)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULQconst [ 5] x)
+ // result: (LEAQ4 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ4)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULQconst [ 7] x)
+ // result: (LEAQ2 x (LEAQ2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 7 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [ 9] x)
+ // result: (LEAQ8 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 9 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULQconst [11] x)
+ // result: (LEAQ2 x (LEAQ4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 11 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [13] x)
+ // result: (LEAQ4 x (LEAQ2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 13 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [19] x)
+ // result: (LEAQ2 x (LEAQ8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 19 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [21] x)
+ // result: (LEAQ4 x (LEAQ4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 21 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [25] x)
+ // result: (LEAQ8 x (LEAQ2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 25 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [27] x)
+ // result: (LEAQ8 (LEAQ2 <v.Type> x x) (LEAQ2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 27 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULQconst [37] x)
+ // result: (LEAQ4 x (LEAQ8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 37 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [41] x)
+ // result: (LEAQ8 x (LEAQ4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 41 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [45] x)
+ // result: (LEAQ8 (LEAQ4 <v.Type> x x) (LEAQ4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 45 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULQconst [73] x)
+ // result: (LEAQ8 x (LEAQ8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 73 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [81] x)
+ // result: (LEAQ8 (LEAQ8 <v.Type> x x) (LEAQ8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 81 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: isPowerOfTwo64(int64(c)+1) && c >= 15
+ // result: (SUBQ (SHLQconst <v.Type> [int8(log64(int64(c)+1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
+ break
+ }
+ v.reset(OpAMD64SUBQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: isPowerOfTwo32(c-1) && c >= 17
+ // result: (LEAQ1 (SHLQconst <v.Type> [int8(log32(c-1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-1) && c >= 17) {
+ break
+ }
+ v.reset(OpAMD64LEAQ1)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: isPowerOfTwo32(c-2) && c >= 34
+ // result: (LEAQ2 (SHLQconst <v.Type> [int8(log32(c-2))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-2) && c >= 34) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: isPowerOfTwo32(c-4) && c >= 68
+ // result: (LEAQ4 (SHLQconst <v.Type> [int8(log32(c-4))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-4) && c >= 68) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: isPowerOfTwo32(c-8) && c >= 136
+ // result: (LEAQ8 (SHLQconst <v.Type> [int8(log32(c-8))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-8) && c >= 136) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (SHLQconst [int8(log32(c/3))] (LEAQ2 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (SHLQconst [int8(log32(c/5))] (LEAQ4 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (SHLQconst [int8(log32(c/9))] (LEAQ8 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(c)*d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) * d)
+ return true
+ }
+ // match: (MULQconst [c] (NEGQ x))
+ // cond: c != -(1<<31)
+ // result: (MULQconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64NEGQ {
+ break
+ }
+ x := v_0.Args[0]
+ if !(c != -(1 << 31)) {
+ break
+ }
+ v.reset(OpAMD64MULQconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (MULSDload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSDload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64MULSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MULSDload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MULSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MULSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MULSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
+ // result: (MULSD x (MOVQi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MULSD)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (MULSSload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSSload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64MULSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MULSSload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MULSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MULSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MULSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
+ // result: (MULSS x (MOVLi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MULSS)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGL (NEGL x))
+ // result: x
+ for {
+ if v_0.Op != OpAMD64NEGL {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (NEGL s:(SUBL x y))
+ // cond: s.Uses == 1
+ // result: (SUBL y x)
+ for {
+ s := v_0
+ if s.Op != OpAMD64SUBL {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if !(s.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SUBL)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (NEGL (MOVLconst [c]))
+ // result: (MOVLconst [-c])
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGQ (NEGQ x))
+ // result: x
+ for {
+ if v_0.Op != OpAMD64NEGQ {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (NEGQ s:(SUBQ x y))
+ // cond: s.Uses == 1
+ // result: (SUBQ y x)
+ for {
+ s := v_0
+ if s.Op != OpAMD64SUBQ {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if !(s.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SUBQ)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (NEGQ (MOVQconst [c]))
+ // result: (MOVQconst [-c])
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ return true
+ }
+ // match: (NEGQ (ADDQconst [c] (NEGQ x)))
+ // cond: c != -(1<<31)
+ // result: (ADDQconst [-c] x)
+ for {
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64NEGQ {
+ break
+ }
+ x := v_0_0.Args[0]
+ if !(c != -(1 << 31)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NOTL (MOVLconst [c]))
+ // result: (MOVLconst [^c])
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(^c)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NOTQ (MOVQconst [c]))
+ // result: (MOVQconst [^c])
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(^c)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORL (SHLL (MOVLconst [1]) y) x)
+ // result: (BTSL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTSL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORL x (MOVLconst [c]))
+ // result: (ORLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORL x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ORLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ORLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORLconst [c] (ORLconst [d] x))
+ // result: (ORLconst [c | d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ORLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORLconst [c] _)
+ // cond: c==-1
+ // result: (MOVLconst [-1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(c == -1) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (ORLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORLload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // result: ( ORL x (MOVLf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ORL)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORQ (SHLQ (MOVQconst [1]) y) x)
+ // result: (BTSQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTSQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORQ (MOVQconst [c]) x)
+ // cond: isUint64PowerOfTwo(c) && uint64(c) >= 1<<31
+ // result: (BTSQconst [int8(log64(c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
+ continue
+ }
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (ORQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x (MOVLconst [c]))
+ // result: (ORQconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORQ (SHRQ lo bits) (SHLQ hi (NEGQ bits)))
+ // result: (SHRDQ lo hi bits)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRQ {
+ continue
+ }
+ bits := v_0.Args[1]
+ lo := v_0.Args[0]
+ if v_1.Op != OpAMD64SHLQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ hi := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64SHRDQ)
+ v.AddArg3(lo, hi, bits)
+ return true
+ }
+ break
+ }
+ // match: (ORQ (SHLQ lo bits) (SHRQ hi (NEGQ bits)))
+ // result: (SHLDQ lo hi bits)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ bits := v_0.Args[1]
+ lo := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ hi := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64SHLDQ)
+ v.AddArg3(lo, hi, bits)
+ return true
+ }
+ break
+ }
+ // match: (ORQ (SHRXQ lo bits) (SHLXQ hi (NEGQ bits)))
+ // result: (SHRDQ lo hi bits)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRXQ {
+ continue
+ }
+ bits := v_0.Args[1]
+ lo := v_0.Args[0]
+ if v_1.Op != OpAMD64SHLXQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ hi := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64SHRDQ)
+ v.AddArg3(lo, hi, bits)
+ return true
+ }
+ break
+ }
+ // match: (ORQ (SHLXQ lo bits) (SHRXQ hi (NEGQ bits)))
+ // result: (SHLDQ lo hi bits)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLXQ {
+ continue
+ }
+ bits := v_0.Args[1]
+ lo := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRXQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ hi := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64SHLDQ)
+ v.AddArg3(lo, hi, bits)
+ return true
+ }
+ break
+ }
+ // match: (ORQ (MOVQconst [c]) (MOVQconst [d]))
+ // result: (MOVQconst [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORQ x l:(MOVQload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ORQload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ORQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORQconst [c] (ORQconst [d] x))
+ // result: (ORQconst [c | d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ORQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORQconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORQconst [-1] _)
+ // result: (MOVQconst [-1])
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(c)|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) | d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORQload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORQload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ORQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // result: ( ORQ x (MOVQf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ORQ)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ORQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROLB x (NEGQ y))
+ // result: (RORB x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLB x (NEGL y))
+ // result: (RORB x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLB x (MOVQconst [c]))
+ // result: (ROLBconst [int8(c&7) ] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 7))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLB x (MOVLconst [c]))
+ // result: (ROLBconst [int8(c&7) ] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 7))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLBconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROLL x (NEGQ y))
+ // result: (RORL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLL x (NEGL y))
+ // result: (RORL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLL x (MOVQconst [c]))
+ // result: (ROLLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLL x (MOVLconst [c]))
+ // result: (ROLLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROLQ x (NEGQ y))
+ // result: (RORQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLQ x (NEGL y))
+ // result: (RORQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLQ x (MOVQconst [c]))
+ // result: (ROLQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLQ x (MOVLconst [c]))
+ // result: (ROLQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLQconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROLW x (NEGQ y))
+ // result: (RORW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLW x (NEGL y))
+ // result: (RORW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLW x (MOVQconst [c]))
+ // result: (ROLWconst [int8(c&15)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 15))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLW x (MOVLconst [c]))
+ // result: (ROLWconst [int8(c&15)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 15))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLWconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64RORB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RORB x (NEGQ y))
+ // result: (ROLB x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORB x (NEGL y))
+ // result: (ROLB x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORB x (MOVQconst [c]))
+ // result: (ROLBconst [int8((-c)&7) ] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 7))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RORB x (MOVLconst [c]))
+ // result: (ROLBconst [int8((-c)&7) ] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 7))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64RORL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RORL x (NEGQ y))
+ // result: (ROLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORL x (NEGL y))
+ // result: (ROLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORL x (MOVQconst [c]))
+ // result: (ROLLconst [int8((-c)&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RORL x (MOVLconst [c]))
+ // result: (ROLLconst [int8((-c)&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RORQ x (NEGQ y))
+ // result: (ROLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORQ x (NEGL y))
+ // result: (ROLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORQ x (MOVQconst [c]))
+ // result: (ROLQconst [int8((-c)&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RORQ x (MOVLconst [c]))
+ // result: (ROLQconst [int8((-c)&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64RORW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RORW x (NEGQ y))
+ // result: (ROLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORW x (NEGL y))
+ // result: (ROLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORW x (MOVQconst [c]))
+ // result: (ROLWconst [int8((-c)&15)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 15))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RORW x (MOVLconst [c]))
+ // result: (ROLWconst [int8((-c)&15)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 15))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARB x (MOVQconst [c]))
+ // result: (SARBconst [int8(min(int64(c)&31,7))] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SARBconst)
+ v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARB x (MOVLconst [c]))
+ // result: (SARBconst [int8(min(int64(c)&31,7))] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SARBconst)
+ v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARBconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARBconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(int8(d))>>uint64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SARL x (MOVQconst [c]))
+ // result: (SARLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SARLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARL x (MOVLconst [c]))
+ // result: (SARLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SARLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARL x (ADDQconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SARL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARL x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SARL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARL x (ANDQconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SARL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARL x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SARL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARL x (ADDLconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SARL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARL x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SARL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARL x (ANDLconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SARL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARL x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SARL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARL l:(MOVLload [off] {sym} ptr mem) x)
+ // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
+ // result: (SARXLload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SARXLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARLconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(int32(d))>>uint64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SARQ x (MOVQconst [c]))
+ // result: (SARQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SARQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARQ x (MOVLconst [c]))
+ // result: (SARQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SARQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARQ x (ADDQconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARQ x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SARQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARQ x (ANDQconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARQ x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SARQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARQ x (ADDLconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARQ x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SARQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARQ x (ANDLconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARQ x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SARQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARQ l:(MOVQload [off] {sym} ptr mem) x)
+ // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
+ // result: (SARXQload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SARXQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARQconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARW x (MOVQconst [c]))
+ // result: (SARWconst [int8(min(int64(c)&31,15))] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SARWconst)
+ v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARW x (MOVLconst [c]))
+ // result: (SARWconst [int8(min(int64(c)&31,15))] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SARWconst)
+ v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARWconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARWconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(int16(d))>>uint64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARXLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SARXLload [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (SARLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SARLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARXQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SARXQload [off] {sym} ptr (MOVQconst [c]) mem)
+ // result: (SARQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SARQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SARXQload [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (SARQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SARQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SBBLcarrymask (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagLT_ULT))
+ // result: (MOVLconst [-1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagGT_ULT))
+ // result: (MOVLconst [-1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SBBQ x (MOVQconst [c]) borrow)
+ // cond: is32Bit(c)
+ // result: (SBBQconst x [int32(c)] borrow)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ borrow := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64SBBQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(x, borrow)
+ return true
+ }
+ // match: (SBBQ x y (FlagEQ))
+ // result: (SUBQborrow x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64SUBQborrow)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SBBQcarrymask (FlagEQ))
+ // result: (MOVQconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SBBQcarrymask (FlagLT_ULT))
+ // result: (MOVQconst [-1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBQcarrymask (FlagLT_UGT))
+ // result: (MOVQconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SBBQcarrymask (FlagGT_ULT))
+ // result: (MOVQconst [-1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBQcarrymask (FlagGT_UGT))
+ // result: (MOVQconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SBBQconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SBBQconst x [c] (FlagEQ))
+ // result: (SUBQconstborrow x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64SUBQconstborrow)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETA(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETA (InvertFlags x))
+ // result: (SETB x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETB)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETA (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETA (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETAE (TESTQ x x))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (SETAE (TESTL x x))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (SETAE (TESTW x x))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAMD64TESTW {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (SETAE (TESTB x x))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAMD64TESTB {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (SETAE (InvertFlags x))
+ // result: (SETBE x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETBE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETAE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETAE (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETAE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETAE (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETAE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETAEstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETBEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETBEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETAEstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETAEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETAEstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETAstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETAstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETAstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETAstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETAstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETAstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETB(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETB (TESTQ x x))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (SETB (TESTL x x))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (SETB (TESTW x x))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpAMD64TESTW {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (SETB (TESTB x x))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpAMD64TESTB {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (SETB (BTLconst [0] x))
+ // result: (ANDLconst [1] x)
+ for {
+ if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETB (BTQconst [0] x))
+ // result: (ANDQconst [1] x)
+ for {
+ if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETB (InvertFlags x))
+ // result: (SETA x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETA)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETB (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETB (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETB (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETB (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETB (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETBE (InvertFlags x))
+ // result: (SETAE x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETAE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETBE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETBE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETBEstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETAEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETBEstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETBEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETBEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETBEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETBEstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETBstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETAstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETAstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETBstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETBstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y))
+ // result: (SETAE (BTL x y))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y))
+ // result: (SETAE (BTQ x y))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTLconst [c] x))
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (SETAE (BTLconst [int8(log32(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTQconst [c] x))
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (SETAE (BTQconst [int8(log32(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTQconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTQ (MOVQconst [c]) x))
+ // cond: isUint64PowerOfTwo(c)
+ // result: (SETAE (BTQconst [int8(log64(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (CMPLconst [1] s:(ANDLconst [1] _)))
+ // result: (SETNE (CMPLconst [0] s))
+ for {
+ if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ s := v_0.Args[0]
+ if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (CMPQconst [1] s:(ANDQconst [1] _)))
+ // result: (SETNE (CMPQconst [0] s))
+ for {
+ if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ s := v_0.Args[0]
+ if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTQconst [63] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTQconst [31] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTQconst [0] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTLconst [0] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTQ z1:(SHRQconst [63] x) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTQconst [63] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTL z1:(SHRLconst [31] x) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTLconst [31] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (InvertFlags x))
+ // result: (SETEQ x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETEQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETEQ (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETEQ (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (TESTQ s:(Select0 blsr:(BLSRQ _)) s))
+ // result: (SETEQ (Select1 <types.TypeFlags> blsr))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ s := v_0_0
+ if s.Op != OpSelect0 {
+ continue
+ }
+ blsr := s.Args[0]
+ if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
+ continue
+ }
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(blsr)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTL s:(Select0 blsr:(BLSRL _)) s))
+ // result: (SETEQ (Select1 <types.TypeFlags> blsr))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ s := v_0_0
+ if s.Op != OpSelect0 {
+ continue
+ }
+ blsr := s.Args[0]
+ if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
+ continue
+ }
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(blsr)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETEQstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTL x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem)
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ x := v_1.Args[0]
+ mem := v_2
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem)
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ x := v_1.Args[0]
+ mem := v_2
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
+ // cond: isUint64PowerOfTwo(c)
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ mem := v_2
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem)
+ // result: (SETNEstore [off] {sym} ptr (CMPLconst [0] s) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ s := v_1.Args[0]
+ if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
+ // result: (SETNEstore [off] {sym} ptr (CMPQconst [0] s) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ s := v_1.Args[0]
+ if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETEQstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETEQstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETEQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETG (InvertFlags x))
+ // result: (SETL x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETG (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETG (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETGE (InvertFlags x))
+ // result: (SETLE x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETLE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETGE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETGE (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETGE (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETGE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETGE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETGEstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETLEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETLEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETGEstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETGEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETGEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETGEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETGEstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETGstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETLstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETLstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETGstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETGstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETGstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETGstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETGstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETL (InvertFlags x))
+ // result: (SETG x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETL (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETL (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETL (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETL (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETL (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETLE (InvertFlags x))
+ // result: (SETGE x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETGE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETLE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETLE (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETLEstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETGEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETGEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETLEstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETLEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETLEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETLEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETLEstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETLstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETGstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETGstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETLstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETLstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SETNE (TESTBconst [1] x))
+ // result: (ANDLconst [1] x)
+ for {
+ if v_0.Op != OpAMD64TESTBconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETNE (TESTWconst [1] x))
+ // result: (ANDLconst [1] x)
+ for {
+ if v_0.Op != OpAMD64TESTWconst || auxIntToInt16(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y))
+ // result: (SETB (BTL x y))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y))
+ // result: (SETB (BTQ x y))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTLconst [c] x))
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (SETB (BTLconst [int8(log32(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTQconst [c] x))
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (SETB (BTQconst [int8(log32(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTQconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTQ (MOVQconst [c]) x))
+ // cond: isUint64PowerOfTwo(c)
+ // result: (SETB (BTQconst [int8(log64(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (CMPLconst [1] s:(ANDLconst [1] _)))
+ // result: (SETEQ (CMPLconst [0] s))
+ for {
+ if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ s := v_0.Args[0]
+ if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (CMPQconst [1] s:(ANDQconst [1] _)))
+ // result: (SETEQ (CMPQconst [0] s))
+ for {
+ if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ s := v_0.Args[0]
+ if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (SETB (BTQconst [63] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (SETB (BTQconst [31] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (SETB (BTQconst [0] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (SETB (BTLconst [0] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTQ z1:(SHRQconst [63] x) z2))
+ // cond: z1==z2
+ // result: (SETB (BTQconst [63] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTL z1:(SHRLconst [31] x) z2))
+ // cond: z1==z2
+ // result: (SETB (BTLconst [31] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (InvertFlags x))
+ // result: (SETNE x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETNE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETNE (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETNE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (TESTQ s:(Select0 blsr:(BLSRQ _)) s))
+ // result: (SETNE (Select1 <types.TypeFlags> blsr))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ s := v_0_0
+ if s.Op != OpSelect0 {
+ continue
+ }
+ blsr := s.Args[0]
+ if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
+ continue
+ }
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(blsr)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTL s:(Select0 blsr:(BLSRL _)) s))
+ // result: (SETNE (Select1 <types.TypeFlags> blsr))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ s := v_0_0
+ if s.Op != OpSelect0 {
+ continue
+ }
+ blsr := s.Args[0]
+ if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
+ continue
+ }
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(blsr)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETNEstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
+ // result: (SETBstore [off] {sym} ptr (BTL x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
+ // result: (SETBstore [off] {sym} ptr (BTQ x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem)
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ x := v_1.Args[0]
+ mem := v_2
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem)
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ x := v_1.Args[0]
+ mem := v_2
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
+ // cond: isUint64PowerOfTwo(c)
+ // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ mem := v_2
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem)
+ // result: (SETEQstore [off] {sym} ptr (CMPLconst [0] s) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ s := v_1.Args[0]
+ if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
+ // result: (SETEQstore [off] {sym} ptr (CMPQconst [0] s) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ s := v_1.Args[0]
+ if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETNEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETNEstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETNEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHLL x (MOVQconst [c]))
+ // result: (SHLLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLL x (MOVLconst [c]))
+ // result: (SHLLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLL x (ADDQconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLL x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHLL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLL x (ANDQconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLL x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHLL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLL x (ADDLconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLL x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHLL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLL x (ANDLconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLL x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHLL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLL l:(MOVLload [off] {sym} ptr mem) x)
+ // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
+ // result: (SHLXLload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SHLXLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHLLconst [1] (SHRLconst [1] x))
+ // result: (ANDLconst [-2] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(-2)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SHLLconst [d] (MOVLconst [c]))
+ // result: (MOVLconst [c << uint64(d)])
+ for {
+ d := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHLQ x (MOVQconst [c]))
+ // result: (SHLQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLQ x (MOVLconst [c]))
+ // result: (SHLQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLQ x (ADDQconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHLQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLQ x (ANDQconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHLQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLQ x (ADDLconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLQ x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHLQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLQ x (ANDLconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLQ x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHLQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLQ l:(MOVQload [off] {sym} ptr mem) x)
+ // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
+ // result: (SHLXQload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SHLXQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHLQconst [1] (SHRQconst [1] x))
+ // result: (ANDQconst [-2] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(-2)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLQconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SHLQconst [d] (MOVQconst [c]))
+ // result: (MOVQconst [c << uint64(d)])
+ for {
+ d := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(c << uint64(d))
+ return true
+ }
+ // match: (SHLQconst [d] (MOVLconst [c]))
+ // result: (MOVQconst [int64(c) << uint64(d)])
+ for {
+ d := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLXLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SHLXLload [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (SHLLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLXQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SHLXQload [off] {sym} ptr (MOVQconst [c]) mem)
+ // result: (SHLQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SHLXQload [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (SHLQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRB x (MOVQconst [c]))
+ // cond: c&31 < 8
+ // result: (SHRBconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&31 < 8) {
+ break
+ }
+ v.reset(OpAMD64SHRBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRB x (MOVLconst [c]))
+ // cond: c&31 < 8
+ // result: (SHRBconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 < 8) {
+ break
+ }
+ v.reset(OpAMD64SHRBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRB _ (MOVQconst [c]))
+ // cond: c&31 >= 8
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&31 >= 8) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SHRB _ (MOVLconst [c]))
+ // cond: c&31 >= 8
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 >= 8) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRBconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHRL x (MOVQconst [c]))
+ // result: (SHRLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHRLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRL x (MOVLconst [c]))
+ // result: (SHRLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHRLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRL x (ADDQconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRL x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHRL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRL x (ANDQconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRL x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHRL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRL x (ADDLconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRL x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHRL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRL x (ANDLconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRL x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHRL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRL l:(MOVLload [off] {sym} ptr mem) x)
+ // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
+ // result: (SHRXLload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SHRXLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRLconst [1] (SHLLconst [1] x))
+ // result: (ANDLconst [0x7fffffff] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(0x7fffffff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHRQ x (MOVQconst [c]))
+ // result: (SHRQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRQ x (MOVLconst [c]))
+ // result: (SHRQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRQ x (ADDQconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHRQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRQ x (ANDQconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHRQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRQ x (ADDLconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRQ x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHRQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRQ x (ANDLconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRQ x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHRQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRQ l:(MOVQload [off] {sym} ptr mem) x)
+ // cond: buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)
+ // result: (SHRXQload [off] {sym} ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SHRXQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRQconst [1] (SHLQconst [1] x))
+ // result: (BTRQconst [63] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = int8ToAuxInt(63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRQconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRW x (MOVQconst [c]))
+ // cond: c&31 < 16
+ // result: (SHRWconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&31 < 16) {
+ break
+ }
+ v.reset(OpAMD64SHRWconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRW x (MOVLconst [c]))
+ // cond: c&31 < 16
+ // result: (SHRWconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 < 16) {
+ break
+ }
+ v.reset(OpAMD64SHRWconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRW _ (MOVQconst [c]))
+ // cond: c&31 >= 16
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&31 >= 16) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SHRW _ (MOVLconst [c]))
+ // cond: c&31 >= 16
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 >= 16) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRWconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRXLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SHRXLload [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (SHRLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SHRLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRXQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SHRXQload [off] {sym} ptr (MOVQconst [c]) mem)
+ // result: (SHRQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SHRXQload [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (SHRQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBL x (MOVLconst [c]))
+ // result: (SUBLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SUBLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBL (MOVLconst [c]) x)
+ // result: (NEGL (SUBLconst <v.Type> x [c]))
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64NEGL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBL x x)
+ // result: (MOVLconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SUBL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBLload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SUBLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBLconst [c] x)
+ // result: (ADDLconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ v.reset(OpAMD64ADDLconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SUBLload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // result: (SUBL x (MOVLf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64SUBL)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (SUBQconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64SUBQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (NEGQ (SUBQconst <v.Type> x [int32(c)]))
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBQ x x)
+ // result: (MOVQconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBQload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SUBQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQborrow(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBQborrow x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (SUBQconstborrow x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64SUBQconstborrow)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBQconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBQconst [c] x)
+ // cond: c != -(1<<31)
+ // result: (ADDQconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c != -(1 << 31)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBQconst (MOVQconst [d]) [c])
+ // result: (MOVQconst [d-int64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(d - int64(c))
+ return true
+ }
+ // match: (SUBQconst (SUBQconst x [d]) [c])
+ // cond: is32Bit(int64(-c)-int64(d))
+ // result: (ADDQconst [-c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64SUBQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(-c) - int64(d))) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SUBQload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBQload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // result: (SUBQ x (MOVQf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64SUBQ)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBSDload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSDload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SUBSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SUBSDload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
+ // result: (SUBSD x (MOVQi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64SUBSD)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBSSload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSSload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SUBSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SUBSSload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
+ // result: (SUBSS x (MOVLi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64SUBSS)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TESTB (MOVLconst [c]) x)
+ // result: (TESTBconst [int8(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64TESTBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2)
+ // cond: l == l2 && l.Uses == 2 && clobber(l)
+ // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0, off)] ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ l := v_0
+ if l.Op != OpAMD64MOVBload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ l2 := v_1
+ if !(l == l2 && l.Uses == 2 && clobber(l)) {
+ continue
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TESTBconst [-1] x)
+ // cond: x.Op != OpAMD64MOVLconst
+ // result: (TESTB x x)
+ for {
+ if auxIntToInt8(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ if !(x.Op != OpAMD64MOVLconst) {
+ break
+ }
+ v.reset(OpAMD64TESTB)
+ v.AddArg2(x, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TESTL (MOVLconst [c]) x)
+ // result: (TESTLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64TESTLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2)
+ // cond: l == l2 && l.Uses == 2 && clobber(l)
+ // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0, off)] ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ l2 := v_1
+ if !(l == l2 && l.Uses == 2 && clobber(l)) {
+ continue
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (TESTL a:(ANDLload [off] {sym} x ptr mem) a)
+ // cond: a.Uses == 2 && a.Block == v.Block && clobber(a)
+ // result: (TESTL (MOVLload <a.Type> [off] {sym} ptr mem) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if a.Op != OpAMD64ANDLload {
+ continue
+ }
+ off := auxIntToInt32(a.AuxInt)
+ sym := auxToSym(a.Aux)
+ mem := a.Args[2]
+ x := a.Args[0]
+ ptr := a.Args[1]
+ if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
+ continue
+ }
+ v.reset(OpAMD64TESTL)
+ v0 := b.NewValue0(a.Pos, OpAMD64MOVLload, a.Type)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TESTLconst [c] (MOVLconst [c]))
+ // cond: c == 0
+ // result: (FlagEQ)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (TESTLconst [c] (MOVLconst [c]))
+ // cond: c < 0
+ // result: (FlagLT_UGT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (TESTLconst [c] (MOVLconst [c]))
+ // cond: c > 0
+ // result: (FlagGT_UGT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (TESTLconst [-1] x)
+ // cond: x.Op != OpAMD64MOVLconst
+ // result: (TESTL x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ if !(x.Op != OpAMD64MOVLconst) {
+ break
+ }
+ v.reset(OpAMD64TESTL)
+ v.AddArg2(x, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TESTQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (TESTQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64TESTQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2)
+ // cond: l == l2 && l.Uses == 2 && clobber(l)
+ // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0, off)] ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ l2 := v_1
+ if !(l == l2 && l.Uses == 2 && clobber(l)) {
+ continue
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (TESTQ a:(ANDQload [off] {sym} x ptr mem) a)
+ // cond: a.Uses == 2 && a.Block == v.Block && clobber(a)
+ // result: (TESTQ (MOVQload <a.Type> [off] {sym} ptr mem) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if a.Op != OpAMD64ANDQload {
+ continue
+ }
+ off := auxIntToInt32(a.AuxInt)
+ sym := auxToSym(a.Aux)
+ mem := a.Args[2]
+ x := a.Args[0]
+ ptr := a.Args[1]
+ if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
+ continue
+ }
+ v.reset(OpAMD64TESTQ)
+ v0 := b.NewValue0(a.Pos, OpAMD64MOVQload, a.Type)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TESTQconst [c] (MOVQconst [d]))
+ // cond: int64(c) == d && c == 0
+ // result: (FlagEQ)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(int64(c) == d && c == 0) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (TESTQconst [c] (MOVQconst [d]))
+ // cond: int64(c) == d && c < 0
+ // result: (FlagLT_UGT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(int64(c) == d && c < 0) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (TESTQconst [c] (MOVQconst [d]))
+ // cond: int64(c) == d && c > 0
+ // result: (FlagGT_UGT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(int64(c) == d && c > 0) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (TESTQconst [-1] x)
+ // cond: x.Op != OpAMD64MOVQconst
+ // result: (TESTQ x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ if !(x.Op != OpAMD64MOVQconst) {
+ break
+ }
+ v.reset(OpAMD64TESTQ)
+ v.AddArg2(x, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TESTW (MOVLconst [c]) x)
+ // result: (TESTWconst [int16(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64TESTWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2)
+ // cond: l == l2 && l.Uses == 2 && clobber(l)
+ // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0, off)] ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ l := v_0
+ if l.Op != OpAMD64MOVWload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ l2 := v_1
+ if !(l == l2 && l.Uses == 2 && clobber(l)) {
+ continue
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TESTWconst [-1] x)
+ // cond: x.Op != OpAMD64MOVLconst
+ // result: (TESTW x x)
+ for {
+ if auxIntToInt16(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ if !(x.Op != OpAMD64MOVLconst) {
+ break
+ }
+ v.reset(OpAMD64TESTW)
+ v.AddArg2(x, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XADDLlock [off1+off2] {sym} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XADDLlock)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XADDQlock [off1+off2] {sym} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XADDQlock)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XCHGL [off1+off2] {sym} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XCHGL)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
+ // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64XCHGL)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XCHGQ [off1+off2] {sym} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XCHGQ)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
+ // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64XCHGQ)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORL (SHLL (MOVLconst [1]) y) x)
+ // result: (BTCL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTCL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XORL x (MOVLconst [c]))
+ // result: (XORLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL x x)
+ // result: (MOVLconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (XORL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (XORLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64XORLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (XORL x (ADDLconst [-1] x))
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (BLSMSKL x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
+ continue
+ }
+ v.reset(OpAMD64BLSMSKL)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORLconst [1] (SETNE x))
+ // result: (SETEQ x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETEQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETEQ x))
+ // result: (SETNE x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETNE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETL x))
+ // result: (SETGE x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETGE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETGE x))
+ // result: (SETL x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETLE x))
+ // result: (SETG x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETG x))
+ // result: (SETLE x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETLE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETB x))
+ // result: (SETAE x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETAE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETAE x))
+ // result: (SETB x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETB)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETBE x))
+ // result: (SETA x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETA)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETA x))
+ // result: (SETBE x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETBE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] (XORLconst [d] x))
+ // result: (XORLconst [c ^ d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64XORLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XORLload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // result: (XORL x (MOVLf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64XORL)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORQ (SHLQ (MOVQconst [1]) y) x)
+ // result: (BTCQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTCQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XORQ (MOVQconst [c]) x)
+ // cond: isUint64PowerOfTwo(c) && uint64(c) >= 1<<31
+ // result: (BTCQconst [int8(log64(c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
+ continue
+ }
+ v.reset(OpAMD64BTCQconst)
+ v.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (XORQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORQ x x)
+ // result: (MOVQconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XORQ x l:(MOVQload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (XORQload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64XORQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (XORQ x (ADDQconst [-1] x))
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (BLSMSKQ x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
+ continue
+ }
+ v.reset(OpAMD64BLSMSKQ)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORQconst [c] (XORQconst [d] x))
+ // result: (XORQconst [c ^ d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64XORQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(c)^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) ^ d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64XORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XORQload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORQload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XORQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (XORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // result: (XORQ x (MOVQf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64XORQ)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XORQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (XORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (LEAQ {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpAMD64LEAQ)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAdd32 ptr val mem)
+ // result: (AddTupleFirst32 val (XADDLlock val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64AddTupleFirst32)
+ v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg2(val, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAdd64 ptr val mem)
+ // result: (AddTupleFirst64 val (XADDQlock val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64AddTupleFirst64)
+ v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg2(val, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicAnd32 ptr val mem)
+ // result: (ANDLlock ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64ANDLlock)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicAnd8 ptr val mem)
+ // result: (ANDBlock ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64ANDBlock)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap32 ptr old new_ mem)
+ // result: (CMPXCHGLlock ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPXCHGLlock)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap64 ptr old new_ mem)
+ // result: (CMPXCHGQlock ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPXCHGQlock)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicExchange32 ptr val mem)
+ // result: (XCHGL val ptr mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64XCHGL)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicExchange64 ptr val mem)
+ // result: (XCHGQ val ptr mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64XCHGQ)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad32 ptr mem)
+ // result: (MOVLatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVLatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad64 ptr mem)
+ // result: (MOVQatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVQatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad8 ptr mem)
+ // result: (MOVBatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVBatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadPtr ptr mem)
+ // result: (MOVQatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVQatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicOr32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicOr32 ptr val mem)
+ // result: (ORLlock ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64ORLlock)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicOr8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicOr8 ptr val mem)
+ // result: (ORBlock ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64ORBlock)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicStore32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicStore32 ptr val mem)
+ // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicStore64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicStore64 ptr val mem)
+ // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicStore8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicStore8 ptr val mem)
+ // result: (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicStorePtrNoWB ptr val mem)
+ // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpBitLen16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen16 x)
+ // cond: buildcfg.GOAMD64 < 3
+ // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x)))
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 < 3) {
+ break
+ }
+ v.reset(OpAMD64BSRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (BitLen16 <t> x)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (NEGQ (ADDQconst <t> [-32] (LZCNTL (MOVWQZX <x.Type> x))))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
+ v0.AuxInt = int32ToAuxInt(-32)
+ v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type)
+ v2.AddArg(x)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen32 x)
+ // cond: buildcfg.GOAMD64 < 3
+ // result: (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x))))
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 < 3) {
+ break
+ }
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
+ v2.AddArg(x)
+ v1.AddArg2(v2, v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (BitLen32 <t> x)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (NEGQ (ADDQconst <t> [-32] (LZCNTL x)))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
+ v0.AuxInt = int32ToAuxInt(-32)
+ v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 <t> x)
+ // cond: buildcfg.GOAMD64 < 3
+ // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOAMD64 < 3) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
+ v1 := b.NewValue0(v.Pos, OpSelect0, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2.AddArg(x)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
+ v3.AuxInt = int64ToAuxInt(-1)
+ v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4.AddArg(v2)
+ v0.AddArg3(v1, v3, v4)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (BitLen64 <t> x)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (NEGQ (ADDQconst <t> [-64] (LZCNTQ x)))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
+ v0.AuxInt = int32ToAuxInt(-64)
+ v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpBitLen8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen8 x)
+ // cond: buildcfg.GOAMD64 < 3
+ // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x)))
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 < 3) {
+ break
+ }
+ v.reset(OpAMD64BSRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (BitLen8 <t> x)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (NEGQ (ADDQconst <t> [-32] (LZCNTL (MOVBQZX <x.Type> x))))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
+ v0.AuxInt = int32ToAuxInt(-32)
+ v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type)
+ v2.AddArg(x)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpBswap16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Bswap16 x)
+ // result: (ROLWconst [8] x)
+ for {
+ x := v_0
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(8)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCeil(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Ceil x)
+ // result: (ROUNDSD [2] x)
+ for {
+ x := v_0
+ v.reset(OpAMD64ROUNDSD)
+ v.AuxInt = int8ToAuxInt(2)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCondSelect(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CondSelect <t> x y (SETEQ cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQEQ y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQ {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQEQ)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQNE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQNE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETL cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQLT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETL {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQLT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETG cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQGT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETG {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETLE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQLE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETLE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQLE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQGE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETA cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQHI y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETA {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQHI)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETB cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQCS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETB {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQCS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETAE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQCC y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETAE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQCC)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETBE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQLS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETBE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQLS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQF cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQEQF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQEQF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNEF cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQNEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQNEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGF cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQGTF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGTF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGEF cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQGEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQ cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLEQ y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQ {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLEQ)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLNE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLNE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETL cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLLT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETL {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLLT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETG cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLGT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETG {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETLE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLLE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETLE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLLE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLGE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETA cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLHI y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETA {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLHI)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETB cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLCS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETB {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLCS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETAE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLCC y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETAE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLCC)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETBE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLLS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETBE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLLS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQF cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLEQF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLEQF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNEF cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLNEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLNEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGF cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLGTF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGTF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGEF cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLGEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQ cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWEQ y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQ {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWEQ)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWNE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWNE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETL cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWLT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETL {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWLT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETG cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWGT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETG {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWGT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETLE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWLE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETLE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWLE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWGE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWGE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETA cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWHI y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETA {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWHI)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETB cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWCS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETB {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWCS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETAE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWCC y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETAE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWCC)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETBE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWLS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETBE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWLS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQF cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWEQF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWEQF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNEF cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWNEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWNEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGF cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWGTF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWGTF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGEF cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWGEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWGEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 1
+ // result: (CondSelect <t> x y (MOVBQZX <typ.UInt64> check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 1) {
+ break
+ }
+ v.reset(OpCondSelect)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64)
+ v0.AddArg(check)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 2
+ // result: (CondSelect <t> x y (MOVWQZX <typ.UInt64> check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 2) {
+ break
+ }
+ v.reset(OpCondSelect)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64)
+ v0.AddArg(check)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 4
+ // result: (CondSelect <t> x y (MOVLQZX <typ.UInt64> check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 4) {
+ break
+ }
+ v.reset(OpCondSelect)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
+ v0.AddArg(check)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQNE y x (CMPQconst [0] check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) {
+ break
+ }
+ v.reset(OpAMD64CMOVQNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(check)
+ v.AddArg3(y, x, v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
+ // result: (CMOVLNE y x (CMPQconst [0] check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(check)
+ v.AddArg3(y, x, v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
+ // result: (CMOVWNE y x (CMPQconst [0] check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(check)
+ v.AddArg3(y, x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpConst16(v *Value) bool {
+ // match: (Const16 [c])
+ // result: (MOVLconst [int32(c)])
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst8(v *Value) bool {
+ // match: (Const8 [c])
+ // result: (MOVLconst [int32(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+}
+func rewriteValueAMD64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [c])
+ // result: (MOVLconst [b2i32(c)])
+ for {
+ c := auxIntToBool(v.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(b2i32(c))
+ return true
+ }
+}
+func rewriteValueAMD64_OpConstNil(v *Value) bool {
+ // match: (ConstNil )
+ // result: (MOVQconst [0])
+ for {
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 x)
+ // result: (BSFL (ORLconst <typ.UInt32> [1<<16] x))
+ for {
+ x := v_0
+ v.reset(OpAMD64BSFL)
+ v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(1 << 16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Ctz16NonZero x)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (TZCNTL x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64TZCNTL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Ctz16NonZero x)
+ // cond: buildcfg.GOAMD64 < 3
+ // result: (BSFL x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 < 3) {
+ break
+ }
+ v.reset(OpAMD64BSFL)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz32 x)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (TZCNTL x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64TZCNTL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Ctz32 x)
+ // cond: buildcfg.GOAMD64 < 3
+ // result: (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 < 3) {
+ break
+ }
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64)
+ v1.AuxInt = int8ToAuxInt(32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Ctz32NonZero x)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (TZCNTL x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64TZCNTL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Ctz32NonZero x)
+ // cond: buildcfg.GOAMD64 < 3
+ // result: (BSFL x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 < 3) {
+ break
+ }
+ v.reset(OpAMD64BSFL)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz64 x)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (TZCNTQ x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64TZCNTQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Ctz64 <t> x)
+ // cond: buildcfg.GOAMD64 < 3
+ // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOAMD64 < 3) {
+ break
+ }
+ v.reset(OpAMD64CMOVQEQ)
+ v0 := b.NewValue0(v.Pos, OpSelect0, t)
+ v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz64NonZero x)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (TZCNTQ x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64TZCNTQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Ctz64NonZero x)
+ // cond: buildcfg.GOAMD64 < 3
+ // result: (Select0 (BSFQ x))
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 < 3) {
+ break
+ }
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz8 x)
+ // result: (BSFL (ORLconst <typ.UInt32> [1<<8 ] x))
+ for {
+ x := v_0
+ v.reset(OpAMD64BSFL)
+ v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(1 << 8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Ctz8NonZero x)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (TZCNTL x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64TZCNTL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Ctz8NonZero x)
+ // cond: buildcfg.GOAMD64 < 3
+ // result: (BSFL x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 < 3) {
+ break
+ }
+ v.reset(OpAMD64BSFL)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 [a] x y)
+ // result: (Select0 (DIVW [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (Select0 (DIVWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 [a] x y)
+ // result: (Select0 (DIVL [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (Select0 (DIVLU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div64 [a] x y)
+ // result: (Select0 (DIVQ [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div64u x y)
+ // result: (Select0 (DIVQU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq16 x y)
+ // result: (SETEQ (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32 x y)
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (SETEQF (UCOMISS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64 x y)
+ // result: (SETEQ (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (SETEQF (UCOMISD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq8 x y)
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqB x y)
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (SETEQ (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpFMA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMA x y z)
+ // result: (VFMADD231SD z x y)
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ v.reset(OpAMD64VFMADD231SD)
+ v.AddArg3(z, x, y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpFloor(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Floor x)
+ // result: (ROUNDSD [1] x)
+ for {
+ x := v_0
+ v.reset(OpAMD64ROUNDSD)
+ v.AuxInt = int8ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGetG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GetG mem)
+ // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal
+ // result: (LoweredGetG mem)
+ for {
+ mem := v_0
+ if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) {
+ break
+ }
+ v.reset(OpAMD64LoweredGetG)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (HasCPUFeature {s})
+ // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s})))
+ for {
+ s := auxToSym(v.Aux)
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64)
+ v1.Aux = symToAux(s)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsInBounds idx len)
+ // result: (SETB (CMPQ idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsNonNil p)
+ // result: (SETNE (TESTQ p p))
+ for {
+ p := v_0
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
+ v0.AddArg2(p, p)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsSliceInBounds idx len)
+ // result: (SETBE (CMPQ idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq16 x y)
+ // result: (SETLE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq16U x y)
+ // result: (SETBE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32 x y)
+ // result: (SETLE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (SETGEF (UCOMISS y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32U x y)
+ // result: (SETBE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64 x y)
+ // result: (SETLE (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (SETGEF (UCOMISD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64U x y)
+ // result: (SETBE (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq8 x y)
+ // result: (SETLE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq8U x y)
+ // result: (SETBE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less16 x y)
+ // result: (SETL (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less16U x y)
+ // result: (SETB (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 x y)
+ // result: (SETL (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (SETGF (UCOMISS y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32U x y)
+ // result: (SETB (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64 x y)
+ // result: (SETL (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (SETGF (UCOMISD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64U x y)
+ // result: (SETB (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less8 x y)
+ // result: (SETL (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less8U x y)
+ // result: (SETB (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVQload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t)
+ // result: (MOVLload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || is8BitInt(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean() || is8BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVSSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVSSload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVSDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVSDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLocalAddr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (LocalAddr <t> {sym} base mem)
+ // cond: t.Elem().HasPointers()
+ // result: (LEAQ {sym} (SPanchored base mem))
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ mem := v_1
+ if !(t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
+ v0.AddArg2(base, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LocalAddr <t> {sym} base _)
+ // cond: !t.Elem().HasPointers()
+ // result: (LEAQ {sym} base)
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ if !(!t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpMax32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Max32F <t> x y)
+ // result: (Neg32F <t> (Min32F <t> (Neg32F <t> x) (Neg32F <t> y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpNeg32F)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpMin32F, t)
+ v1 := b.NewValue0(v.Pos, OpNeg32F, t)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpNeg32F, t)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMax64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Max64F <t> x y)
+ // result: (Neg64F <t> (Min64F <t> (Neg64F <t> x) (Neg64F <t> y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpNeg64F)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpMin64F, t)
+ v1 := b.NewValue0(v.Pos, OpNeg64F, t)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpNeg64F, t)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMin32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Min32F <t> x y)
+ // result: (POR (MINSS <t> (MINSS <t> x y) x) (MINSS <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64POR)
+ v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
+ v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
+ v1.AddArg2(x, y)
+ v0.AddArg2(v1, x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMin64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Min64F <t> x y)
+ // result: (POR (MINSD <t> (MINSD <t> x y) x) (MINSD <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64POR)
+ v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
+ v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
+ v1.AddArg2(x, y)
+ v0.AddArg2(v1, x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 [a] x y)
+ // result: (Select1 (DIVW [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (Select1 (DIVWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 [a] x y)
+ // result: (Select1 (DIVL [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (Select1 (DIVLU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64 [a] x y)
+ // result: (Select1 (DIVQ [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64u x y)
+ // result: (Select1 (DIVQU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVLstore dst (MOVLload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (MOVQstore dst (MOVQload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVQstore)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [16] dst src mem)
+ // cond: config.useSSE
+ // result: (MOVOstore dst (MOVOload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [16] dst src mem)
+ // cond: !config.useSSE
+ // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(!config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [32] dst src mem)
+ // result: (Move [16] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(16)
+ v2.AddArg3(dst, src, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [48] dst src mem)
+ // cond: config.useSSE
+ // result: (Move [32] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 48 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(16)
+ v2.AddArg3(dst, src, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [64] dst src mem)
+ // cond: config.useSSE
+ // result: (Move [32] (OffPtr <dst.Type> dst [32]) (OffPtr <src.Type> src [32]) (Move [32] dst src mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 64 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(32)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(32)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(32)
+ v2.AddArg3(dst, src, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [9] dst src mem)
+ // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 9 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [10] dst src mem)
+ // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 10 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [11] dst src mem)
+ // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 11 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(7)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(7)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [12] dst src mem)
+ // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s >= 13 && s <= 15
+ // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s >= 13 && s <= 15) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(int32(s - 8))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(int32(s - 8))
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 16 && s%16 != 0 && s%16 <= 8
+ // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 16 && s%16 != 0 && s%16 <= 8) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s % 16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v3.AddArg2(src, mem)
+ v2.AddArg3(dst, v3, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE
+ // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s % 16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
+ v3.AddArg2(src, mem)
+ v2.AddArg3(dst, v3, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE
+ // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s % 16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(8)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v3.AuxInt = int32ToAuxInt(8)
+ v3.AddArg2(src, mem)
+ v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v5.AddArg2(src, mem)
+ v4.AddArg3(dst, v5, mem)
+ v2.AddArg3(dst, v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [s] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpAMD64DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)
+ // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !((s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpAMD64REPMOVSQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(s / 8)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpNeg32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg32F x)
+ // result: (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
+ for {
+ x := v_0
+ v.reset(OpAMD64PXOR)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
+ v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg64F x)
+ // result: (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
+ for {
+ x := v_0
+ v.reset(OpAMD64PXOR)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
+ v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq16 x y)
+ // result: (SETNE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32 x y)
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (SETNEF (UCOMISS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNEF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64 x y)
+ // result: (SETNE (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (SETNEF (UCOMISD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNEF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq8 x y)
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqB x y)
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (SETNE (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORLconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OffPtr [off] ptr)
+ // cond: is32Bit(off)
+ // result: (ADDQconst [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADDQ (MOVQconst [off]) ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpAMD64ADDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(off)
+ v.AddArg2(v0, ptr)
+ return true
+ }
+}
+func rewriteValueAMD64_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpAMD64LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpAMD64LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpAMD64LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpPopCount16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount16 x)
+ // result: (POPCNTL (MOVWQZX <typ.UInt32> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64POPCNTL)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpPopCount8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount8 x)
+ // result: (POPCNTL (MOVBQZX <typ.UInt32> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64POPCNTL)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRoundToEven(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RoundToEven x)
+ // result: (ROUNDSD [0] x)
+ for {
+ x := v_0
+ v.reset(OpAMD64ROUNDSD)
+ v.AuxInt = int8ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh64Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh64Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh64Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Mul64uover x y))
+ // result: (Select0 <typ.UInt64> (MULQU x y))
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Mul32uover x y))
+ // result: (Select0 <typ.UInt32> (MULLU x y))
+ for {
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Add64carry x y c))
+ // result: (Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v2.AddArg(c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Sub64borrow x y c))
+ // result: (Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v2.AddArg(c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 <t> (AddTupleFirst32 val tuple))
+ // result: (ADDL val (Select0 <t> tuple))
+ for {
+ t := v.Type
+ if v_0.Op != OpAMD64AddTupleFirst32 {
+ break
+ }
+ tuple := v_0.Args[1]
+ val := v_0.Args[0]
+ v.reset(OpAMD64ADDL)
+ v0 := b.NewValue0(v.Pos, OpSelect0, t)
+ v0.AddArg(tuple)
+ v.AddArg2(val, v0)
+ return true
+ }
+ // match: (Select0 <t> (AddTupleFirst64 val tuple))
+ // result: (ADDQ val (Select0 <t> tuple))
+ for {
+ t := v.Type
+ if v_0.Op != OpAMD64AddTupleFirst64 {
+ break
+ }
+ tuple := v_0.Args[1]
+ val := v_0.Args[0]
+ v.reset(OpAMD64ADDQ)
+ v0 := b.NewValue0(v.Pos, OpSelect0, t)
+ v0.AddArg(tuple)
+ v.AddArg2(val, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Mul64uover x y))
+ // result: (SETO (Select1 <types.TypeFlags> (MULQU x y)))
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETO)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Mul32uover x y))
+ // result: (SETO (Select1 <types.TypeFlags> (MULLU x y)))
+ for {
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETO)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Add64carry x y c))
+ // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpAMD64NEGQ)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v4.AddArg(c)
+ v3.AddArg(v4)
+ v2.AddArg3(x, y, v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Sub64borrow x y c))
+ // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpAMD64NEGQ)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v4.AddArg(c)
+ v3.AddArg(v4)
+ v2.AddArg3(x, y, v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (NEGLflags (MOVQconst [0])))
+ // result: (FlagEQ)
+ for {
+ if v_0.Op != OpAMD64NEGLflags {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x))))
+ // result: x
+ for {
+ if v_0.Op != OpAMD64NEGLflags {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64NEGQ {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64SBBQcarrymask {
+ break
+ }
+ x := v_0_0_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Select1 (AddTupleFirst32 _ tuple))
+ // result: (Select1 tuple)
+ for {
+ if v_0.Op != OpAMD64AddTupleFirst32 {
+ break
+ }
+ tuple := v_0.Args[1]
+ v.reset(OpSelect1)
+ v.AddArg(tuple)
+ return true
+ }
+ // match: (Select1 (AddTupleFirst64 _ tuple))
+ // result: (Select1 tuple)
+ for {
+ if v_0.Op != OpAMD64AddTupleFirst64 {
+ break
+ }
+ tuple := v_0.Args[1]
+ v.reset(OpSelect1)
+ v.AddArg(tuple)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpSelectN(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem)))))
+ // cond: sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)
+ // result: (Move [sc.Val64()] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ s1 := call.Args[0]
+ if s1.Op != OpAMD64MOVQstoreconst {
+ break
+ }
+ sc := auxIntToValAndOff(s1.AuxInt)
+ _ = s1.Args[1]
+ s2 := s1.Args[1]
+ if s2.Op != OpAMD64MOVQstore {
+ break
+ }
+ _ = s2.Args[2]
+ src := s2.Args[1]
+ s3 := s2.Args[2]
+ if s3.Op != OpAMD64MOVQstore {
+ break
+ }
+ mem := s3.Args[2]
+ dst := s3.Args[1]
+ if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(sc.Val64())
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)
+ // result: (Move [sz] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpAMD64MOVQconst {
+ break
+ }
+ sz := auxIntToInt64(call_2.AuxInt)
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(sz)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SARQconst (NEGQ <t> x) [63])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpAMD64SARQconst)
+ v.AuxInt = int8ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpSpectreIndex(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SpectreIndex <t> x y)
+ // result: (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64CMOVQCC)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v1.AddArg2(x, y)
+ v.AddArg3(x, v0, v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SpectreSliceIndex <t> x y)
+ // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64CMOVQHI)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v1.AddArg2(x, y)
+ v.AddArg3(x, v0, v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && t.IsFloat()
+ // result: (MOVSDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpAMD64MOVSDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && t.IsFloat()
+ // result: (MOVSSstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpAMD64MOVSSstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && !t.IsFloat()
+ // result: (MOVQstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && !t.IsFloat()) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !t.IsFloat()
+ // result: (MOVLstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !t.IsFloat()) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpTrunc(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc x)
+ // result: (ROUNDSD [3] x)
+ for {
+ x := v_0
+ v.reset(OpAMD64ROUNDSD)
+ v.AuxInt = int8ToAuxInt(3)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff(0,0)] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [2] destptr mem)
+ // result: (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [4] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [8] destptr mem)
+ // result: (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [3] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [5] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [6] destptr mem)
+ // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [7] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%8 != 0 && s > 8 && !config.useSSE
+ // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%8 != 0 && s > 8 && !config.useSSE) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(s - s%8)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v0.AuxInt = int64ToAuxInt(s % 8)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v1.AddArg2(destptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [16] destptr mem)
+ // cond: !config.useSSE
+ // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(!config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [24] destptr mem)
+ // cond: !config.useSSE
+ // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(!config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v1.AddArg2(destptr, mem)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [32] destptr mem)
+ // cond: !config.useSSE
+ // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(!config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 24))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v2.AddArg2(destptr, mem)
+ v1.AddArg2(destptr, v2)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [9] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVBstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 9 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [10] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVWstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 10 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [11] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVLstoreconst [makeValAndOff(0,7)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 11 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 7))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [12] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s > 12 && s < 16 && config.useSSE
+ // result: (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s > 12 && s < 16 && config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, int32(s-8)))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE
+ // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v1.AddArg2(destptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE
+ // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v1.AddArg2(destptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [16] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVOstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [32] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVOstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [48] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVOstoreconst [makeValAndOff(0,32)] destptr (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 48 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVOstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v1.AddArg2(destptr, mem)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [64] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVOstoreconst [makeValAndOff(0,48)] destptr (MOVOstoreconst [makeValAndOff(0,32)] destptr (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 64 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVOstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 48))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v2.AddArg2(destptr, mem)
+ v1.AddArg2(destptr, v2)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [s] destptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpAMD64DUFFZERO)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0
+ // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) {
+ break
+ }
+ v.reset(OpAMD64REPSTOSQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(s / 8)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg4(destptr, v0, v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockAMD64(b *Block) bool {
+ typ := &b.Func.Config.Types
+ switch b.Kind {
+ case BlockAMD64EQ:
+ // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y))
+ // result: (UGE (BTL x y))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y))
+ // result: (UGE (BTQ x y))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTLconst [c] x))
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (UGE (BTLconst [int8(log32(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTLconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ // match: (EQ (TESTQconst [c] x))
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (UGE (BTQconst [int8(log32(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTQconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ // match: (EQ (TESTQ (MOVQconst [c]) x))
+ // cond: isUint64PowerOfTwo(c)
+ // result: (UGE (BTQconst [int8(log64(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (UGE (BTQconst [63] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (UGE (BTQconst [31] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (UGE (BTQconst [0] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (UGE (BTLconst [0] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTQ z1:(SHRQconst [63] x) z2))
+ // cond: z1==z2
+ // result: (UGE (BTQconst [63] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTL z1:(SHRLconst [31] x) z2))
+ // cond: z1==z2
+ // result: (UGE (BTLconst [31] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (InvertFlags cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64EQ, cmp)
+ return true
+ }
+ // match: (EQ (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (TESTQ s:(Select0 blsr:(BLSRQ _)) s) yes no)
+ // result: (EQ (Select1 <types.TypeFlags> blsr) yes no)
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ s := v_0_0
+ if s.Op != OpSelect0 {
+ continue
+ }
+ blsr := s.Args[0]
+ if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(blsr)
+ b.resetWithControl(BlockAMD64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTL s:(Select0 blsr:(BLSRL _)) s) yes no)
+ // result: (EQ (Select1 <types.TypeFlags> blsr) yes no)
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ s := v_0_0
+ if s.Op != OpSelect0 {
+ continue
+ }
+ blsr := s.Args[0]
+ if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(blsr)
+ b.resetWithControl(BlockAMD64EQ, v0)
+ return true
+ }
+ break
+ }
+ case BlockAMD64GE:
+ // match: (GE (InvertFlags cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64LE, cmp)
+ return true
+ }
+ // match: (GE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockAMD64GT:
+ // match: (GT (InvertFlags cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64LT, cmp)
+ return true
+ }
+ // match: (GT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GT (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockIf:
+ // match: (If (SETL cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETL {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64LT, cmp)
+ return true
+ }
+ // match: (If (SETLE cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETLE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64LE, cmp)
+ return true
+ }
+ // match: (If (SETG cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETG {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64GT, cmp)
+ return true
+ }
+ // match: (If (SETGE cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETGE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64GE, cmp)
+ return true
+ }
+ // match: (If (SETEQ cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETEQ {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64EQ, cmp)
+ return true
+ }
+ // match: (If (SETNE cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETNE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64NE, cmp)
+ return true
+ }
+ // match: (If (SETB cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETB {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64ULT, cmp)
+ return true
+ }
+ // match: (If (SETBE cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETBE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64ULE, cmp)
+ return true
+ }
+ // match: (If (SETA cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETA {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGT, cmp)
+ return true
+ }
+ // match: (If (SETAE cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETAE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGE, cmp)
+ return true
+ }
+ // match: (If (SETO cmp) yes no)
+ // result: (OS cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETO {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64OS, cmp)
+ return true
+ }
+ // match: (If (SETGF cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETGF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGT, cmp)
+ return true
+ }
+ // match: (If (SETGEF cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETGEF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGE, cmp)
+ return true
+ }
+ // match: (If (SETEQF cmp) yes no)
+ // result: (EQF cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETEQF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64EQF, cmp)
+ return true
+ }
+ // match: (If (SETNEF cmp) yes no)
+ // result: (NEF cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETNEF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64NEF, cmp)
+ return true
+ }
+ // match: (If cond yes no)
+ // result: (NE (TESTB cond cond) yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags)
+ v0.AddArg2(cond, cond)
+ b.resetWithControl(BlockAMD64NE, v0)
+ return true
+ }
+ case BlockJumpTable:
+ // match: (JumpTable idx)
+ // result: (JUMPTABLE {makeJumpTableSym(b)} idx (LEAQ <typ.Uintptr> {makeJumpTableSym(b)} (SB)))
+ for {
+ idx := b.Controls[0]
+ v0 := b.NewValue0(b.Pos, OpAMD64LEAQ, typ.Uintptr)
+ v0.Aux = symToAux(makeJumpTableSym(b))
+ v1 := b.NewValue0(b.Pos, OpSB, typ.Uintptr)
+ v0.AddArg(v1)
+ b.resetWithControl2(BlockAMD64JUMPTABLE, idx, v0)
+ b.Aux = symToAux(makeJumpTableSym(b))
+ return true
+ }
+ case BlockAMD64LE:
+ // match: (LE (InvertFlags cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64GE, cmp)
+ return true
+ }
+ // match: (LE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LE (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockAMD64LT:
+ // match: (LT (InvertFlags cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64GT, cmp)
+ return true
+ }
+ // match: (LT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockAMD64NE:
+ // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETL {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64LT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETLE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64LE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETG {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64GT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETGE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64GE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETEQ {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64EQ, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETNE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64NE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETB {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64ULT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETBE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64ULE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETA {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64UGT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETAE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64UGE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
+ // result: (OS cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETO {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64OS, cmp)
+ return true
+ }
+ // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y))
+ // result: (ULT (BTL x y))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y))
+ // result: (ULT (BTQ x y))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTLconst [c] x))
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (ULT (BTLconst [int8(log32(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTLconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ // match: (NE (TESTQconst [c] x))
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (ULT (BTQconst [int8(log32(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTQconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ // match: (NE (TESTQ (MOVQconst [c]) x))
+ // cond: isUint64PowerOfTwo(c)
+ // result: (ULT (BTQconst [int8(log64(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (ULT (BTQconst [63] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (ULT (BTQconst [31] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (ULT (BTQconst [0] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (ULT (BTLconst [0] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTQ z1:(SHRQconst [63] x) z2))
+ // cond: z1==z2
+ // result: (ULT (BTQconst [63] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTL z1:(SHRLconst [31] x) z2))
+ // cond: z1==z2
+ // result: (ULT (BTLconst [31] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETGF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64UGT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETGEF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64UGE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
+ // result: (EQF cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETEQF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64EQF, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
+ // result: (NEF cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETNEF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64NEF, cmp)
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64NE, cmp)
+ return true
+ }
+ // match: (NE (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (TESTQ s:(Select0 blsr:(BLSRQ _)) s) yes no)
+ // result: (NE (Select1 <types.TypeFlags> blsr) yes no)
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ s := v_0_0
+ if s.Op != OpSelect0 {
+ continue
+ }
+ blsr := s.Args[0]
+ if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(blsr)
+ b.resetWithControl(BlockAMD64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTL s:(Select0 blsr:(BLSRL _)) s) yes no)
+ // result: (NE (Select1 <types.TypeFlags> blsr) yes no)
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ s := v_0_0
+ if s.Op != OpSelect0 {
+ continue
+ }
+ blsr := s.Args[0]
+ if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(blsr)
+ b.resetWithControl(BlockAMD64NE, v0)
+ return true
+ }
+ break
+ }
+ case BlockAMD64UGE:
+ // match: (UGE (TESTQ x x) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (TESTL x x) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (TESTW x x) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64TESTW {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (TESTB x x) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (InvertFlags cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64ULE, cmp)
+ return true
+ }
+ // match: (UGE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockAMD64UGT:
+ // match: (UGT (InvertFlags cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64ULT, cmp)
+ return true
+ }
+ // match: (UGT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGT (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockAMD64ULE:
+ // match: (ULE (InvertFlags cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGE, cmp)
+ return true
+ }
+ // match: (ULE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockAMD64ULT:
+ // match: (ULT (TESTQ x x) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (TESTL x x) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (TESTW x x) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64TESTW {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (TESTB x x) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (InvertFlags cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGT, cmp)
+ return true
+ }
+ // match: (ULT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64latelower.go b/src/cmd/compile/internal/ssa/rewriteAMD64latelower.go
new file mode 100644
index 0000000..d3dd263
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64latelower.go
@@ -0,0 +1,134 @@
+// Code generated from _gen/AMD64latelower.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import "internal/buildcfg"
+
+func rewriteValueAMD64latelower(v *Value) bool {
+ switch v.Op {
+ case OpAMD64SARL:
+ return rewriteValueAMD64latelower_OpAMD64SARL(v)
+ case OpAMD64SARQ:
+ return rewriteValueAMD64latelower_OpAMD64SARQ(v)
+ case OpAMD64SHLL:
+ return rewriteValueAMD64latelower_OpAMD64SHLL(v)
+ case OpAMD64SHLQ:
+ return rewriteValueAMD64latelower_OpAMD64SHLQ(v)
+ case OpAMD64SHRL:
+ return rewriteValueAMD64latelower_OpAMD64SHRL(v)
+ case OpAMD64SHRQ:
+ return rewriteValueAMD64latelower_OpAMD64SHRQ(v)
+ }
+ return false
+}
+func rewriteValueAMD64latelower_OpAMD64SARL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARL x y)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (SARXL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64SARXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64latelower_OpAMD64SARQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARQ x y)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (SARXQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64SARXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64latelower_OpAMD64SHLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHLL x y)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (SHLXL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64SHLXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64latelower_OpAMD64SHLQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHLQ x y)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (SHLXQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64SHLXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64latelower_OpAMD64SHRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRL x y)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (SHRXL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64SHRXL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64latelower_OpAMD64SHRQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRQ x y)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (SHRXQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64SHRXQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteBlockAMD64latelower(b *Block) bool {
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go
new file mode 100644
index 0000000..0dcb1b4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go
@@ -0,0 +1,850 @@
+// Code generated from _gen/AMD64splitload.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+func rewriteValueAMD64splitload(v *Value) bool {
+ switch v.Op {
+ case OpAMD64CMPBconstload:
+ return rewriteValueAMD64splitload_OpAMD64CMPBconstload(v)
+ case OpAMD64CMPBconstloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v)
+ case OpAMD64CMPBload:
+ return rewriteValueAMD64splitload_OpAMD64CMPBload(v)
+ case OpAMD64CMPBloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPBloadidx1(v)
+ case OpAMD64CMPLconstload:
+ return rewriteValueAMD64splitload_OpAMD64CMPLconstload(v)
+ case OpAMD64CMPLconstloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v)
+ case OpAMD64CMPLconstloadidx4:
+ return rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v)
+ case OpAMD64CMPLload:
+ return rewriteValueAMD64splitload_OpAMD64CMPLload(v)
+ case OpAMD64CMPLloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPLloadidx1(v)
+ case OpAMD64CMPLloadidx4:
+ return rewriteValueAMD64splitload_OpAMD64CMPLloadidx4(v)
+ case OpAMD64CMPQconstload:
+ return rewriteValueAMD64splitload_OpAMD64CMPQconstload(v)
+ case OpAMD64CMPQconstloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v)
+ case OpAMD64CMPQconstloadidx8:
+ return rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v)
+ case OpAMD64CMPQload:
+ return rewriteValueAMD64splitload_OpAMD64CMPQload(v)
+ case OpAMD64CMPQloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPQloadidx1(v)
+ case OpAMD64CMPQloadidx8:
+ return rewriteValueAMD64splitload_OpAMD64CMPQloadidx8(v)
+ case OpAMD64CMPWconstload:
+ return rewriteValueAMD64splitload_OpAMD64CMPWconstload(v)
+ case OpAMD64CMPWconstloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v)
+ case OpAMD64CMPWconstloadidx2:
+ return rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v)
+ case OpAMD64CMPWload:
+ return rewriteValueAMD64splitload_OpAMD64CMPWload(v)
+ case OpAMD64CMPWloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPWloadidx1(v)
+ case OpAMD64CMPWloadidx2:
+ return rewriteValueAMD64splitload_OpAMD64CMPWloadidx2(v)
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() == 0
+ // result: (TESTB x:(MOVBload {sym} [vo.Off()] ptr mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTB)
+ x := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg2(ptr, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPBconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() != 0
+ // result: (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPBconst)
+ v.AuxInt = int8ToAuxInt(vo.Val8())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTB x:(MOVBloadidx1 {sym} [vo.Off()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTB)
+ x := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPBconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPBconst (MOVBloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val8()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPBconst)
+ v.AuxInt = int8ToAuxInt(vo.Val8())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPBload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBload {sym} [off] ptr x mem)
+ // result: (CMPB (MOVBload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(OpAMD64CMPB)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPBloadidx1(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBloadidx1 {sym} [off] ptr idx x mem)
+ // result: (CMPB (MOVBloadidx1 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPB)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() == 0
+ // result: (TESTL x:(MOVLload {sym} [vo.Off()] ptr mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTL)
+ x := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg2(ptr, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPLconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() != 0
+ // result: (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPLconst)
+ v.AuxInt = int32ToAuxInt(vo.Val())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTL x:(MOVLloadidx1 {sym} [vo.Off()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTL)
+ x := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPLconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPLconst (MOVLloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPLconst)
+ v.AuxInt = int32ToAuxInt(vo.Val())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLconstloadidx4 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTL x:(MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTL)
+ x := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPLconstloadidx4 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPLconst (MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPLconst)
+ v.AuxInt = int32ToAuxInt(vo.Val())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLload {sym} [off] ptr x mem)
+ // result: (CMPL (MOVLload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(OpAMD64CMPL)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLloadidx1(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLloadidx1 {sym} [off] ptr idx x mem)
+ // result: (CMPL (MOVLloadidx1 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPL)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLloadidx4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLloadidx4 {sym} [off] ptr idx x mem)
+ // result: (CMPL (MOVLloadidx4 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPL)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() == 0
+ // result: (TESTQ x:(MOVQload {sym} [vo.Off()] ptr mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTQ)
+ x := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg2(ptr, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPQconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() != 0
+ // result: (CMPQconst (MOVQload {sym} [vo.Off()] ptr mem) [vo.Val()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPQconst)
+ v.AuxInt = int32ToAuxInt(vo.Val())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTQ x:(MOVQloadidx1 {sym} [vo.Off()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTQ)
+ x := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPQconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPQconst (MOVQloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPQconst)
+ v.AuxInt = int32ToAuxInt(vo.Val())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQconstloadidx8 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTQ x:(MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTQ)
+ x := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPQconstloadidx8 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPQconst (MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPQconst)
+ v.AuxInt = int32ToAuxInt(vo.Val())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQload {sym} [off] ptr x mem)
+ // result: (CMPQ (MOVQload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(OpAMD64CMPQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQloadidx1(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQloadidx1 {sym} [off] ptr idx x mem)
+ // result: (CMPQ (MOVQloadidx1 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQloadidx8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQloadidx8 {sym} [off] ptr idx x mem)
+ // result: (CMPQ (MOVQloadidx8 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() == 0
+ // result: (TESTW x:(MOVWload {sym} [vo.Off()] ptr mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTW)
+ x := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg2(ptr, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPWconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() != 0
+ // result: (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPWconst)
+ v.AuxInt = int16ToAuxInt(vo.Val16())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTW x:(MOVWloadidx1 {sym} [vo.Off()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTW)
+ x := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPWconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPWconst (MOVWloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val16()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPWconst)
+ v.AuxInt = int16ToAuxInt(vo.Val16())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWconstloadidx2 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTW x:(MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTW)
+ x := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPWconstloadidx2 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPWconst (MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) [vo.Val16()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPWconst)
+ v.AuxInt = int16ToAuxInt(vo.Val16())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWload {sym} [off] ptr x mem)
+ // result: (CMPW (MOVWload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(OpAMD64CMPW)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWloadidx1(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWloadidx1 {sym} [off] ptr idx x mem)
+ // result: (CMPW (MOVWloadidx1 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPW)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWloadidx2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWloadidx2 {sym} [off] ptr idx x mem)
+ // result: (CMPW (MOVWloadidx2 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPW)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteBlockAMD64splitload(b *Block) bool {
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
new file mode 100644
index 0000000..971c9a5
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -0,0 +1,21838 @@
+// Code generated from _gen/ARM.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import "internal/buildcfg"
+import "cmd/compile/internal/types"
+
+func rewriteValueARM(v *Value) bool {
+ switch v.Op {
+ case OpARMADC:
+ return rewriteValueARM_OpARMADC(v)
+ case OpARMADCconst:
+ return rewriteValueARM_OpARMADCconst(v)
+ case OpARMADCshiftLL:
+ return rewriteValueARM_OpARMADCshiftLL(v)
+ case OpARMADCshiftLLreg:
+ return rewriteValueARM_OpARMADCshiftLLreg(v)
+ case OpARMADCshiftRA:
+ return rewriteValueARM_OpARMADCshiftRA(v)
+ case OpARMADCshiftRAreg:
+ return rewriteValueARM_OpARMADCshiftRAreg(v)
+ case OpARMADCshiftRL:
+ return rewriteValueARM_OpARMADCshiftRL(v)
+ case OpARMADCshiftRLreg:
+ return rewriteValueARM_OpARMADCshiftRLreg(v)
+ case OpARMADD:
+ return rewriteValueARM_OpARMADD(v)
+ case OpARMADDD:
+ return rewriteValueARM_OpARMADDD(v)
+ case OpARMADDF:
+ return rewriteValueARM_OpARMADDF(v)
+ case OpARMADDS:
+ return rewriteValueARM_OpARMADDS(v)
+ case OpARMADDSshiftLL:
+ return rewriteValueARM_OpARMADDSshiftLL(v)
+ case OpARMADDSshiftLLreg:
+ return rewriteValueARM_OpARMADDSshiftLLreg(v)
+ case OpARMADDSshiftRA:
+ return rewriteValueARM_OpARMADDSshiftRA(v)
+ case OpARMADDSshiftRAreg:
+ return rewriteValueARM_OpARMADDSshiftRAreg(v)
+ case OpARMADDSshiftRL:
+ return rewriteValueARM_OpARMADDSshiftRL(v)
+ case OpARMADDSshiftRLreg:
+ return rewriteValueARM_OpARMADDSshiftRLreg(v)
+ case OpARMADDconst:
+ return rewriteValueARM_OpARMADDconst(v)
+ case OpARMADDshiftLL:
+ return rewriteValueARM_OpARMADDshiftLL(v)
+ case OpARMADDshiftLLreg:
+ return rewriteValueARM_OpARMADDshiftLLreg(v)
+ case OpARMADDshiftRA:
+ return rewriteValueARM_OpARMADDshiftRA(v)
+ case OpARMADDshiftRAreg:
+ return rewriteValueARM_OpARMADDshiftRAreg(v)
+ case OpARMADDshiftRL:
+ return rewriteValueARM_OpARMADDshiftRL(v)
+ case OpARMADDshiftRLreg:
+ return rewriteValueARM_OpARMADDshiftRLreg(v)
+ case OpARMAND:
+ return rewriteValueARM_OpARMAND(v)
+ case OpARMANDconst:
+ return rewriteValueARM_OpARMANDconst(v)
+ case OpARMANDshiftLL:
+ return rewriteValueARM_OpARMANDshiftLL(v)
+ case OpARMANDshiftLLreg:
+ return rewriteValueARM_OpARMANDshiftLLreg(v)
+ case OpARMANDshiftRA:
+ return rewriteValueARM_OpARMANDshiftRA(v)
+ case OpARMANDshiftRAreg:
+ return rewriteValueARM_OpARMANDshiftRAreg(v)
+ case OpARMANDshiftRL:
+ return rewriteValueARM_OpARMANDshiftRL(v)
+ case OpARMANDshiftRLreg:
+ return rewriteValueARM_OpARMANDshiftRLreg(v)
+ case OpARMBFX:
+ return rewriteValueARM_OpARMBFX(v)
+ case OpARMBFXU:
+ return rewriteValueARM_OpARMBFXU(v)
+ case OpARMBIC:
+ return rewriteValueARM_OpARMBIC(v)
+ case OpARMBICconst:
+ return rewriteValueARM_OpARMBICconst(v)
+ case OpARMBICshiftLL:
+ return rewriteValueARM_OpARMBICshiftLL(v)
+ case OpARMBICshiftLLreg:
+ return rewriteValueARM_OpARMBICshiftLLreg(v)
+ case OpARMBICshiftRA:
+ return rewriteValueARM_OpARMBICshiftRA(v)
+ case OpARMBICshiftRAreg:
+ return rewriteValueARM_OpARMBICshiftRAreg(v)
+ case OpARMBICshiftRL:
+ return rewriteValueARM_OpARMBICshiftRL(v)
+ case OpARMBICshiftRLreg:
+ return rewriteValueARM_OpARMBICshiftRLreg(v)
+ case OpARMCMN:
+ return rewriteValueARM_OpARMCMN(v)
+ case OpARMCMNconst:
+ return rewriteValueARM_OpARMCMNconst(v)
+ case OpARMCMNshiftLL:
+ return rewriteValueARM_OpARMCMNshiftLL(v)
+ case OpARMCMNshiftLLreg:
+ return rewriteValueARM_OpARMCMNshiftLLreg(v)
+ case OpARMCMNshiftRA:
+ return rewriteValueARM_OpARMCMNshiftRA(v)
+ case OpARMCMNshiftRAreg:
+ return rewriteValueARM_OpARMCMNshiftRAreg(v)
+ case OpARMCMNshiftRL:
+ return rewriteValueARM_OpARMCMNshiftRL(v)
+ case OpARMCMNshiftRLreg:
+ return rewriteValueARM_OpARMCMNshiftRLreg(v)
+ case OpARMCMOVWHSconst:
+ return rewriteValueARM_OpARMCMOVWHSconst(v)
+ case OpARMCMOVWLSconst:
+ return rewriteValueARM_OpARMCMOVWLSconst(v)
+ case OpARMCMP:
+ return rewriteValueARM_OpARMCMP(v)
+ case OpARMCMPD:
+ return rewriteValueARM_OpARMCMPD(v)
+ case OpARMCMPF:
+ return rewriteValueARM_OpARMCMPF(v)
+ case OpARMCMPconst:
+ return rewriteValueARM_OpARMCMPconst(v)
+ case OpARMCMPshiftLL:
+ return rewriteValueARM_OpARMCMPshiftLL(v)
+ case OpARMCMPshiftLLreg:
+ return rewriteValueARM_OpARMCMPshiftLLreg(v)
+ case OpARMCMPshiftRA:
+ return rewriteValueARM_OpARMCMPshiftRA(v)
+ case OpARMCMPshiftRAreg:
+ return rewriteValueARM_OpARMCMPshiftRAreg(v)
+ case OpARMCMPshiftRL:
+ return rewriteValueARM_OpARMCMPshiftRL(v)
+ case OpARMCMPshiftRLreg:
+ return rewriteValueARM_OpARMCMPshiftRLreg(v)
+ case OpARMEqual:
+ return rewriteValueARM_OpARMEqual(v)
+ case OpARMGreaterEqual:
+ return rewriteValueARM_OpARMGreaterEqual(v)
+ case OpARMGreaterEqualU:
+ return rewriteValueARM_OpARMGreaterEqualU(v)
+ case OpARMGreaterThan:
+ return rewriteValueARM_OpARMGreaterThan(v)
+ case OpARMGreaterThanU:
+ return rewriteValueARM_OpARMGreaterThanU(v)
+ case OpARMLessEqual:
+ return rewriteValueARM_OpARMLessEqual(v)
+ case OpARMLessEqualU:
+ return rewriteValueARM_OpARMLessEqualU(v)
+ case OpARMLessThan:
+ return rewriteValueARM_OpARMLessThan(v)
+ case OpARMLessThanU:
+ return rewriteValueARM_OpARMLessThanU(v)
+ case OpARMMOVBUload:
+ return rewriteValueARM_OpARMMOVBUload(v)
+ case OpARMMOVBUloadidx:
+ return rewriteValueARM_OpARMMOVBUloadidx(v)
+ case OpARMMOVBUreg:
+ return rewriteValueARM_OpARMMOVBUreg(v)
+ case OpARMMOVBload:
+ return rewriteValueARM_OpARMMOVBload(v)
+ case OpARMMOVBloadidx:
+ return rewriteValueARM_OpARMMOVBloadidx(v)
+ case OpARMMOVBreg:
+ return rewriteValueARM_OpARMMOVBreg(v)
+ case OpARMMOVBstore:
+ return rewriteValueARM_OpARMMOVBstore(v)
+ case OpARMMOVBstoreidx:
+ return rewriteValueARM_OpARMMOVBstoreidx(v)
+ case OpARMMOVDload:
+ return rewriteValueARM_OpARMMOVDload(v)
+ case OpARMMOVDstore:
+ return rewriteValueARM_OpARMMOVDstore(v)
+ case OpARMMOVFload:
+ return rewriteValueARM_OpARMMOVFload(v)
+ case OpARMMOVFstore:
+ return rewriteValueARM_OpARMMOVFstore(v)
+ case OpARMMOVHUload:
+ return rewriteValueARM_OpARMMOVHUload(v)
+ case OpARMMOVHUloadidx:
+ return rewriteValueARM_OpARMMOVHUloadidx(v)
+ case OpARMMOVHUreg:
+ return rewriteValueARM_OpARMMOVHUreg(v)
+ case OpARMMOVHload:
+ return rewriteValueARM_OpARMMOVHload(v)
+ case OpARMMOVHloadidx:
+ return rewriteValueARM_OpARMMOVHloadidx(v)
+ case OpARMMOVHreg:
+ return rewriteValueARM_OpARMMOVHreg(v)
+ case OpARMMOVHstore:
+ return rewriteValueARM_OpARMMOVHstore(v)
+ case OpARMMOVHstoreidx:
+ return rewriteValueARM_OpARMMOVHstoreidx(v)
+ case OpARMMOVWload:
+ return rewriteValueARM_OpARMMOVWload(v)
+ case OpARMMOVWloadidx:
+ return rewriteValueARM_OpARMMOVWloadidx(v)
+ case OpARMMOVWloadshiftLL:
+ return rewriteValueARM_OpARMMOVWloadshiftLL(v)
+ case OpARMMOVWloadshiftRA:
+ return rewriteValueARM_OpARMMOVWloadshiftRA(v)
+ case OpARMMOVWloadshiftRL:
+ return rewriteValueARM_OpARMMOVWloadshiftRL(v)
+ case OpARMMOVWnop:
+ return rewriteValueARM_OpARMMOVWnop(v)
+ case OpARMMOVWreg:
+ return rewriteValueARM_OpARMMOVWreg(v)
+ case OpARMMOVWstore:
+ return rewriteValueARM_OpARMMOVWstore(v)
+ case OpARMMOVWstoreidx:
+ return rewriteValueARM_OpARMMOVWstoreidx(v)
+ case OpARMMOVWstoreshiftLL:
+ return rewriteValueARM_OpARMMOVWstoreshiftLL(v)
+ case OpARMMOVWstoreshiftRA:
+ return rewriteValueARM_OpARMMOVWstoreshiftRA(v)
+ case OpARMMOVWstoreshiftRL:
+ return rewriteValueARM_OpARMMOVWstoreshiftRL(v)
+ case OpARMMUL:
+ return rewriteValueARM_OpARMMUL(v)
+ case OpARMMULA:
+ return rewriteValueARM_OpARMMULA(v)
+ case OpARMMULD:
+ return rewriteValueARM_OpARMMULD(v)
+ case OpARMMULF:
+ return rewriteValueARM_OpARMMULF(v)
+ case OpARMMULS:
+ return rewriteValueARM_OpARMMULS(v)
+ case OpARMMVN:
+ return rewriteValueARM_OpARMMVN(v)
+ case OpARMMVNshiftLL:
+ return rewriteValueARM_OpARMMVNshiftLL(v)
+ case OpARMMVNshiftLLreg:
+ return rewriteValueARM_OpARMMVNshiftLLreg(v)
+ case OpARMMVNshiftRA:
+ return rewriteValueARM_OpARMMVNshiftRA(v)
+ case OpARMMVNshiftRAreg:
+ return rewriteValueARM_OpARMMVNshiftRAreg(v)
+ case OpARMMVNshiftRL:
+ return rewriteValueARM_OpARMMVNshiftRL(v)
+ case OpARMMVNshiftRLreg:
+ return rewriteValueARM_OpARMMVNshiftRLreg(v)
+ case OpARMNEGD:
+ return rewriteValueARM_OpARMNEGD(v)
+ case OpARMNEGF:
+ return rewriteValueARM_OpARMNEGF(v)
+ case OpARMNMULD:
+ return rewriteValueARM_OpARMNMULD(v)
+ case OpARMNMULF:
+ return rewriteValueARM_OpARMNMULF(v)
+ case OpARMNotEqual:
+ return rewriteValueARM_OpARMNotEqual(v)
+ case OpARMOR:
+ return rewriteValueARM_OpARMOR(v)
+ case OpARMORconst:
+ return rewriteValueARM_OpARMORconst(v)
+ case OpARMORshiftLL:
+ return rewriteValueARM_OpARMORshiftLL(v)
+ case OpARMORshiftLLreg:
+ return rewriteValueARM_OpARMORshiftLLreg(v)
+ case OpARMORshiftRA:
+ return rewriteValueARM_OpARMORshiftRA(v)
+ case OpARMORshiftRAreg:
+ return rewriteValueARM_OpARMORshiftRAreg(v)
+ case OpARMORshiftRL:
+ return rewriteValueARM_OpARMORshiftRL(v)
+ case OpARMORshiftRLreg:
+ return rewriteValueARM_OpARMORshiftRLreg(v)
+ case OpARMRSB:
+ return rewriteValueARM_OpARMRSB(v)
+ case OpARMRSBSshiftLL:
+ return rewriteValueARM_OpARMRSBSshiftLL(v)
+ case OpARMRSBSshiftLLreg:
+ return rewriteValueARM_OpARMRSBSshiftLLreg(v)
+ case OpARMRSBSshiftRA:
+ return rewriteValueARM_OpARMRSBSshiftRA(v)
+ case OpARMRSBSshiftRAreg:
+ return rewriteValueARM_OpARMRSBSshiftRAreg(v)
+ case OpARMRSBSshiftRL:
+ return rewriteValueARM_OpARMRSBSshiftRL(v)
+ case OpARMRSBSshiftRLreg:
+ return rewriteValueARM_OpARMRSBSshiftRLreg(v)
+ case OpARMRSBconst:
+ return rewriteValueARM_OpARMRSBconst(v)
+ case OpARMRSBshiftLL:
+ return rewriteValueARM_OpARMRSBshiftLL(v)
+ case OpARMRSBshiftLLreg:
+ return rewriteValueARM_OpARMRSBshiftLLreg(v)
+ case OpARMRSBshiftRA:
+ return rewriteValueARM_OpARMRSBshiftRA(v)
+ case OpARMRSBshiftRAreg:
+ return rewriteValueARM_OpARMRSBshiftRAreg(v)
+ case OpARMRSBshiftRL:
+ return rewriteValueARM_OpARMRSBshiftRL(v)
+ case OpARMRSBshiftRLreg:
+ return rewriteValueARM_OpARMRSBshiftRLreg(v)
+ case OpARMRSCconst:
+ return rewriteValueARM_OpARMRSCconst(v)
+ case OpARMRSCshiftLL:
+ return rewriteValueARM_OpARMRSCshiftLL(v)
+ case OpARMRSCshiftLLreg:
+ return rewriteValueARM_OpARMRSCshiftLLreg(v)
+ case OpARMRSCshiftRA:
+ return rewriteValueARM_OpARMRSCshiftRA(v)
+ case OpARMRSCshiftRAreg:
+ return rewriteValueARM_OpARMRSCshiftRAreg(v)
+ case OpARMRSCshiftRL:
+ return rewriteValueARM_OpARMRSCshiftRL(v)
+ case OpARMRSCshiftRLreg:
+ return rewriteValueARM_OpARMRSCshiftRLreg(v)
+ case OpARMSBC:
+ return rewriteValueARM_OpARMSBC(v)
+ case OpARMSBCconst:
+ return rewriteValueARM_OpARMSBCconst(v)
+ case OpARMSBCshiftLL:
+ return rewriteValueARM_OpARMSBCshiftLL(v)
+ case OpARMSBCshiftLLreg:
+ return rewriteValueARM_OpARMSBCshiftLLreg(v)
+ case OpARMSBCshiftRA:
+ return rewriteValueARM_OpARMSBCshiftRA(v)
+ case OpARMSBCshiftRAreg:
+ return rewriteValueARM_OpARMSBCshiftRAreg(v)
+ case OpARMSBCshiftRL:
+ return rewriteValueARM_OpARMSBCshiftRL(v)
+ case OpARMSBCshiftRLreg:
+ return rewriteValueARM_OpARMSBCshiftRLreg(v)
+ case OpARMSLL:
+ return rewriteValueARM_OpARMSLL(v)
+ case OpARMSLLconst:
+ return rewriteValueARM_OpARMSLLconst(v)
+ case OpARMSRA:
+ return rewriteValueARM_OpARMSRA(v)
+ case OpARMSRAcond:
+ return rewriteValueARM_OpARMSRAcond(v)
+ case OpARMSRAconst:
+ return rewriteValueARM_OpARMSRAconst(v)
+ case OpARMSRL:
+ return rewriteValueARM_OpARMSRL(v)
+ case OpARMSRLconst:
+ return rewriteValueARM_OpARMSRLconst(v)
+ case OpARMSRR:
+ return rewriteValueARM_OpARMSRR(v)
+ case OpARMSUB:
+ return rewriteValueARM_OpARMSUB(v)
+ case OpARMSUBD:
+ return rewriteValueARM_OpARMSUBD(v)
+ case OpARMSUBF:
+ return rewriteValueARM_OpARMSUBF(v)
+ case OpARMSUBS:
+ return rewriteValueARM_OpARMSUBS(v)
+ case OpARMSUBSshiftLL:
+ return rewriteValueARM_OpARMSUBSshiftLL(v)
+ case OpARMSUBSshiftLLreg:
+ return rewriteValueARM_OpARMSUBSshiftLLreg(v)
+ case OpARMSUBSshiftRA:
+ return rewriteValueARM_OpARMSUBSshiftRA(v)
+ case OpARMSUBSshiftRAreg:
+ return rewriteValueARM_OpARMSUBSshiftRAreg(v)
+ case OpARMSUBSshiftRL:
+ return rewriteValueARM_OpARMSUBSshiftRL(v)
+ case OpARMSUBSshiftRLreg:
+ return rewriteValueARM_OpARMSUBSshiftRLreg(v)
+ case OpARMSUBconst:
+ return rewriteValueARM_OpARMSUBconst(v)
+ case OpARMSUBshiftLL:
+ return rewriteValueARM_OpARMSUBshiftLL(v)
+ case OpARMSUBshiftLLreg:
+ return rewriteValueARM_OpARMSUBshiftLLreg(v)
+ case OpARMSUBshiftRA:
+ return rewriteValueARM_OpARMSUBshiftRA(v)
+ case OpARMSUBshiftRAreg:
+ return rewriteValueARM_OpARMSUBshiftRAreg(v)
+ case OpARMSUBshiftRL:
+ return rewriteValueARM_OpARMSUBshiftRL(v)
+ case OpARMSUBshiftRLreg:
+ return rewriteValueARM_OpARMSUBshiftRLreg(v)
+ case OpARMTEQ:
+ return rewriteValueARM_OpARMTEQ(v)
+ case OpARMTEQconst:
+ return rewriteValueARM_OpARMTEQconst(v)
+ case OpARMTEQshiftLL:
+ return rewriteValueARM_OpARMTEQshiftLL(v)
+ case OpARMTEQshiftLLreg:
+ return rewriteValueARM_OpARMTEQshiftLLreg(v)
+ case OpARMTEQshiftRA:
+ return rewriteValueARM_OpARMTEQshiftRA(v)
+ case OpARMTEQshiftRAreg:
+ return rewriteValueARM_OpARMTEQshiftRAreg(v)
+ case OpARMTEQshiftRL:
+ return rewriteValueARM_OpARMTEQshiftRL(v)
+ case OpARMTEQshiftRLreg:
+ return rewriteValueARM_OpARMTEQshiftRLreg(v)
+ case OpARMTST:
+ return rewriteValueARM_OpARMTST(v)
+ case OpARMTSTconst:
+ return rewriteValueARM_OpARMTSTconst(v)
+ case OpARMTSTshiftLL:
+ return rewriteValueARM_OpARMTSTshiftLL(v)
+ case OpARMTSTshiftLLreg:
+ return rewriteValueARM_OpARMTSTshiftLLreg(v)
+ case OpARMTSTshiftRA:
+ return rewriteValueARM_OpARMTSTshiftRA(v)
+ case OpARMTSTshiftRAreg:
+ return rewriteValueARM_OpARMTSTshiftRAreg(v)
+ case OpARMTSTshiftRL:
+ return rewriteValueARM_OpARMTSTshiftRL(v)
+ case OpARMTSTshiftRLreg:
+ return rewriteValueARM_OpARMTSTshiftRLreg(v)
+ case OpARMXOR:
+ return rewriteValueARM_OpARMXOR(v)
+ case OpARMXORconst:
+ return rewriteValueARM_OpARMXORconst(v)
+ case OpARMXORshiftLL:
+ return rewriteValueARM_OpARMXORshiftLL(v)
+ case OpARMXORshiftLLreg:
+ return rewriteValueARM_OpARMXORshiftLLreg(v)
+ case OpARMXORshiftRA:
+ return rewriteValueARM_OpARMXORshiftRA(v)
+ case OpARMXORshiftRAreg:
+ return rewriteValueARM_OpARMXORshiftRAreg(v)
+ case OpARMXORshiftRL:
+ return rewriteValueARM_OpARMXORshiftRL(v)
+ case OpARMXORshiftRLreg:
+ return rewriteValueARM_OpARMXORshiftRLreg(v)
+ case OpARMXORshiftRR:
+ return rewriteValueARM_OpARMXORshiftRR(v)
+ case OpAbs:
+ v.Op = OpARMABSD
+ return true
+ case OpAdd16:
+ v.Op = OpARMADD
+ return true
+ case OpAdd32:
+ v.Op = OpARMADD
+ return true
+ case OpAdd32F:
+ v.Op = OpARMADDF
+ return true
+ case OpAdd32carry:
+ v.Op = OpARMADDS
+ return true
+ case OpAdd32withcarry:
+ v.Op = OpARMADC
+ return true
+ case OpAdd64F:
+ v.Op = OpARMADDD
+ return true
+ case OpAdd8:
+ v.Op = OpARMADD
+ return true
+ case OpAddPtr:
+ v.Op = OpARMADD
+ return true
+ case OpAddr:
+ return rewriteValueARM_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpARMAND
+ return true
+ case OpAnd32:
+ v.Op = OpARMAND
+ return true
+ case OpAnd8:
+ v.Op = OpARMAND
+ return true
+ case OpAndB:
+ v.Op = OpARMAND
+ return true
+ case OpAvg32u:
+ return rewriteValueARM_OpAvg32u(v)
+ case OpBitLen32:
+ return rewriteValueARM_OpBitLen32(v)
+ case OpBswap32:
+ return rewriteValueARM_OpBswap32(v)
+ case OpClosureCall:
+ v.Op = OpARMCALLclosure
+ return true
+ case OpCom16:
+ v.Op = OpARMMVN
+ return true
+ case OpCom32:
+ v.Op = OpARMMVN
+ return true
+ case OpCom8:
+ v.Op = OpARMMVN
+ return true
+ case OpConst16:
+ return rewriteValueARM_OpConst16(v)
+ case OpConst32:
+ return rewriteValueARM_OpConst32(v)
+ case OpConst32F:
+ return rewriteValueARM_OpConst32F(v)
+ case OpConst64F:
+ return rewriteValueARM_OpConst64F(v)
+ case OpConst8:
+ return rewriteValueARM_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueARM_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueARM_OpConstNil(v)
+ case OpCtz16:
+ return rewriteValueARM_OpCtz16(v)
+ case OpCtz16NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz32:
+ return rewriteValueARM_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz8:
+ return rewriteValueARM_OpCtz8(v)
+ case OpCtz8NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpARMMOVFW
+ return true
+ case OpCvt32Fto32U:
+ v.Op = OpARMMOVFWU
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpARMMOVFD
+ return true
+ case OpCvt32Uto32F:
+ v.Op = OpARMMOVWUF
+ return true
+ case OpCvt32Uto64F:
+ v.Op = OpARMMOVWUD
+ return true
+ case OpCvt32to32F:
+ v.Op = OpARMMOVWF
+ return true
+ case OpCvt32to64F:
+ v.Op = OpARMMOVWD
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpARMMOVDW
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpARMMOVDF
+ return true
+ case OpCvt64Fto32U:
+ v.Op = OpARMMOVDWU
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueARM_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueARM_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueARM_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpARMDIVF
+ return true
+ case OpDiv32u:
+ return rewriteValueARM_OpDiv32u(v)
+ case OpDiv64F:
+ v.Op = OpARMDIVD
+ return true
+ case OpDiv8:
+ return rewriteValueARM_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueARM_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueARM_OpEq16(v)
+ case OpEq32:
+ return rewriteValueARM_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueARM_OpEq32F(v)
+ case OpEq64F:
+ return rewriteValueARM_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueARM_OpEq8(v)
+ case OpEqB:
+ return rewriteValueARM_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueARM_OpEqPtr(v)
+ case OpFMA:
+ return rewriteValueARM_OpFMA(v)
+ case OpGetCallerPC:
+ v.Op = OpARMLoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpARMLoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpARMLoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ v.Op = OpARMHMUL
+ return true
+ case OpHmul32u:
+ v.Op = OpARMHMULU
+ return true
+ case OpInterCall:
+ v.Op = OpARMCALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueARM_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueARM_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueARM_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueARM_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueARM_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueARM_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueARM_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueARM_OpLeq32U(v)
+ case OpLeq64F:
+ return rewriteValueARM_OpLeq64F(v)
+ case OpLeq8:
+ return rewriteValueARM_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueARM_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueARM_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueARM_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueARM_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueARM_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueARM_OpLess32U(v)
+ case OpLess64F:
+ return rewriteValueARM_OpLess64F(v)
+ case OpLess8:
+ return rewriteValueARM_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueARM_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueARM_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueARM_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueARM_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueARM_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueARM_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueARM_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueARM_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueARM_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueARM_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueARM_OpLsh32x8(v)
+ case OpLsh8x16:
+ return rewriteValueARM_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueARM_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueARM_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueARM_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueARM_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueARM_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueARM_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueARM_OpMod32u(v)
+ case OpMod8:
+ return rewriteValueARM_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueARM_OpMod8u(v)
+ case OpMove:
+ return rewriteValueARM_OpMove(v)
+ case OpMul16:
+ v.Op = OpARMMUL
+ return true
+ case OpMul32:
+ v.Op = OpARMMUL
+ return true
+ case OpMul32F:
+ v.Op = OpARMMULF
+ return true
+ case OpMul32uhilo:
+ v.Op = OpARMMULLU
+ return true
+ case OpMul64F:
+ v.Op = OpARMMULD
+ return true
+ case OpMul8:
+ v.Op = OpARMMUL
+ return true
+ case OpNeg16:
+ return rewriteValueARM_OpNeg16(v)
+ case OpNeg32:
+ return rewriteValueARM_OpNeg32(v)
+ case OpNeg32F:
+ v.Op = OpARMNEGF
+ return true
+ case OpNeg64F:
+ v.Op = OpARMNEGD
+ return true
+ case OpNeg8:
+ return rewriteValueARM_OpNeg8(v)
+ case OpNeq16:
+ return rewriteValueARM_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueARM_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueARM_OpNeq32F(v)
+ case OpNeq64F:
+ return rewriteValueARM_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueARM_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpARMXOR
+ return true
+ case OpNeqPtr:
+ return rewriteValueARM_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpARMLoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueARM_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueARM_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpARMOR
+ return true
+ case OpOr32:
+ v.Op = OpARMOR
+ return true
+ case OpOr8:
+ v.Op = OpARMOR
+ return true
+ case OpOrB:
+ v.Op = OpARMOR
+ return true
+ case OpPanicBounds:
+ return rewriteValueARM_OpPanicBounds(v)
+ case OpPanicExtend:
+ return rewriteValueARM_OpPanicExtend(v)
+ case OpRotateLeft16:
+ return rewriteValueARM_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValueARM_OpRotateLeft32(v)
+ case OpRotateLeft8:
+ return rewriteValueARM_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueARM_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueARM_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueARM_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueARM_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueARM_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueARM_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueARM_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueARM_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueARM_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueARM_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueARM_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueARM_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueARM_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueARM_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueARM_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueARM_OpRsh32x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueARM_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueARM_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueARM_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueARM_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueARM_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueARM_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueARM_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueARM_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueARM_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueARM_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = OpARMMOVHreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpARMMOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpARMMOVBreg
+ return true
+ case OpSignmask:
+ return rewriteValueARM_OpSignmask(v)
+ case OpSlicemask:
+ return rewriteValueARM_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpARMSQRTD
+ return true
+ case OpSqrt32:
+ v.Op = OpARMSQRTF
+ return true
+ case OpStaticCall:
+ v.Op = OpARMCALLstatic
+ return true
+ case OpStore:
+ return rewriteValueARM_OpStore(v)
+ case OpSub16:
+ v.Op = OpARMSUB
+ return true
+ case OpSub32:
+ v.Op = OpARMSUB
+ return true
+ case OpSub32F:
+ v.Op = OpARMSUBF
+ return true
+ case OpSub32carry:
+ v.Op = OpARMSUBS
+ return true
+ case OpSub32withcarry:
+ v.Op = OpARMSBC
+ return true
+ case OpSub64F:
+ v.Op = OpARMSUBD
+ return true
+ case OpSub8:
+ v.Op = OpARMSUB
+ return true
+ case OpSubPtr:
+ v.Op = OpARMSUB
+ return true
+ case OpTailCall:
+ v.Op = OpARMCALLtail
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpARMLoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpARMXOR
+ return true
+ case OpXor32:
+ v.Op = OpARMXOR
+ return true
+ case OpXor8:
+ v.Op = OpARMXOR
+ return true
+ case OpZero:
+ return rewriteValueARM_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpARMMOVHUreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpARMMOVBUreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpARMMOVBUreg
+ return true
+ case OpZeromask:
+ return rewriteValueARM_OpZeromask(v)
+ }
+ return false
+}
+func rewriteValueARM_OpARMADC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADC (MOVWconst [c]) x flags)
+ // result: (ADCconst [c] x flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SLLconst [c] y) flags)
+ // result: (ADCshiftLL x y [c] flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SRLconst [c] y) flags)
+ // result: (ADCshiftRL x y [c] flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SRAconst [c] y) flags)
+ // result: (ADCshiftRA x y [c] flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SLL y z) flags)
+ // result: (ADCshiftLLreg x y z flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftLLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SRL y z) flags)
+ // result: (ADCshiftRLreg x y z flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftRLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SRA y z) flags)
+ // result: (ADCshiftRAreg x y z flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftRAreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADCconst [c] (ADDconst [d] x) flags)
+ // result: (ADCconst [c+d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ // match: (ADCconst [c] (SUBconst [d] x) flags)
+ // result: (ADCconst [c-d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftLL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftLL (MOVWconst [c]) x [d] flags)
+ // result: (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftLL x (MOVWconst [c]) [d] flags)
+ // result: (ADCconst x [c<<uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftLLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftLLreg (MOVWconst [c]) x y flags)
+ // result: (ADCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (ADCshiftLL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftRA (MOVWconst [c]) x [d] flags)
+ // result: (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftRA x (MOVWconst [c]) [d] flags)
+ // result: (ADCconst x [c>>uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRAreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftRAreg (MOVWconst [c]) x y flags)
+ // result: (ADCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (ADCshiftRA x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftRL (MOVWconst [c]) x [d] flags)
+ // result: (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftRL x (MOVWconst [c]) [d] flags)
+ // result: (ADCconst x [int32(uint32(c)>>uint64(d))] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftRLreg (MOVWconst [c]) x y flags)
+ // result: (ADCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (ADCshiftRL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADD x (MOVWconst <t> [c]))
+ // cond: !t.IsPtr()
+ // result: (ADDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ t := v_1.Type
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(!t.IsPtr()) {
+ continue
+ }
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SLLconst [c] y))
+ // result: (ADDshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SRLconst [c] y))
+ // result: (ADDshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SRAconst [c] y))
+ // result: (ADDshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SLL y z))
+ // result: (ADDshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SRL y z))
+ // result: (ADDshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SRA y z))
+ // result: (ADDshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (RSBconst [0] y))
+ // result: (SUB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMRSBconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARMSUB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD <t> (RSBconst [c] x) (RSBconst [d] y))
+ // result: (RSBconst [c+d] (ADD <t> x y))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMRSBconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpARMRSBconst {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v0 := b.NewValue0(v.Pos, OpARMADD, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (ADD (MUL x y) a)
+ // result: (MULA x y a)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMMUL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ v.reset(OpARMMULA)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDD a (MULD x y))
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
+ // result: (MULAD a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARMMULD {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
+ continue
+ }
+ v.reset(OpARMMULAD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDD a (NMULD x y))
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
+ // result: (MULSD a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARMNMULD {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
+ continue
+ }
+ v.reset(OpARMMULSD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDF(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDF a (MULF x y))
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
+ // result: (MULAF a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARMMULF {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
+ continue
+ }
+ v.reset(OpARMMULAF)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDF a (NMULF x y))
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
+ // result: (MULSF a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARMNMULF {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
+ continue
+ }
+ v.reset(OpARMMULSF)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDS x (MOVWconst [c]))
+ // result: (ADDSconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SLLconst [c] y))
+ // result: (ADDSshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SRLconst [c] y))
+ // result: (ADDSshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SRAconst [c] y))
+ // result: (ADDSshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SLL y z))
+ // result: (ADDSshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SRL y z))
+ // result: (ADDSshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SRA y z))
+ // result: (ADDSshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftLL (MOVWconst [c]) x [d])
+ // result: (ADDSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftLL x (MOVWconst [c]) [d])
+ // result: (ADDSconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftLLreg (MOVWconst [c]) x y)
+ // result: (ADDSconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDSshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftRA (MOVWconst [c]) x [d])
+ // result: (ADDSconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRA x (MOVWconst [c]) [d])
+ // result: (ADDSconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftRAreg (MOVWconst [c]) x y)
+ // result: (ADDSconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDSshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftRL (MOVWconst [c]) x [d])
+ // result: (ADDSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRL x (MOVWconst [c]) [d])
+ // result: (ADDSconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftRLreg (MOVWconst [c]) x y)
+ // result: (ADDSconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDSshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr))
+ // result: (MOVWaddr [off1+off2] {sym} ptr)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ v.reset(OpARMMOVWaddr)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDconst [c] x)
+ // cond: !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))
+ // result: (SUBconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(!isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))) {
+ break
+ }
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] x)
+ // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
+ // result: (SUBconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
+ break
+ }
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ return true
+ }
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // result: (ADDconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (SUBconst [d] x))
+ // result: (ADDconst [c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (RSBconst [d] x))
+ // result: (RSBconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDshiftLL (MOVWconst [c]) x [d])
+ // result: (ADDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftLL x (MOVWconst [c]) [d])
+ // result: (ADDconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
+ // cond: buildcfg.GOARM.Version>=6
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMSLLconst || auxIntToInt32(v_0_0.AuxInt) != 16 {
+ break
+ }
+ x := v_0_0.Args[0]
+ if x != v_1 || !(buildcfg.GOARM.Version >= 6) {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftLLreg (MOVWconst [c]) x y)
+ // result: (ADDconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRA (MOVWconst [c]) x [d])
+ // result: (ADDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRA x (MOVWconst [c]) [d])
+ // result: (ADDconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRAreg (MOVWconst [c]) x y)
+ // result: (ADDconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRL (MOVWconst [c]) x [d])
+ // result: (ADDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRL x (MOVWconst [c]) [d])
+ // result: (ADDconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRLreg (MOVWconst [c]) x y)
+ // result: (ADDconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMAND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AND x (MOVWconst [c]))
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SLLconst [c] y))
+ // result: (ANDshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SRLconst [c] y))
+ // result: (ANDshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SRAconst [c] y))
+ // result: (ANDshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SLL y z))
+ // result: (ANDshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SRL y z))
+ // result: (ANDshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SRA y z))
+ // result: (ANDshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (AND x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (AND x (MVN y))
+ // result: (BIC x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMVN {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARMBIC)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MVNshiftLL y [c]))
+ // result: (BICshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMVNshiftLL {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MVNshiftRL y [c]))
+ // result: (BICshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMVNshiftRL {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MVNshiftRA y [c]))
+ // result: (BICshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMVNshiftRA {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [0] _)
+ // result: (MOVWconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [c] x)
+ // cond: int32(c)==-1
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == -1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [c] x)
+ // cond: !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))
+ // result: (BICconst [int32(^uint32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(!isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))) {
+ break
+ }
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(int32(^uint32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] x)
+ // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
+ // result: (BICconst [int32(^uint32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
+ break
+ }
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(int32(^uint32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftLL (MOVWconst [c]) x [d])
+ // result: (ANDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftLL x (MOVWconst [c]) [d])
+ // result: (ANDconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftLL y:(SLLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSLLconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftLLreg (MOVWconst [c]) x y)
+ // result: (ANDconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ANDshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMANDshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRA (MOVWconst [c]) x [d])
+ // result: (ANDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRA x (MOVWconst [c]) [d])
+ // result: (ANDconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRA y:(SRAconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSRAconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRAreg (MOVWconst [c]) x y)
+ // result: (ANDconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ANDshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMANDshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRL (MOVWconst [c]) x [d])
+ // result: (ANDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRL x (MOVWconst [c]) [d])
+ // result: (ANDconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRL y:(SRLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSRLconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRLreg (MOVWconst [c]) x y)
+ // result: (ANDconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ANDshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMANDshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBFX(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BFX [c] (MOVWconst [d]))
+ // result: (MOVWconst [d<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8))])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(d << (32 - uint32(c&0xff) - uint32(c>>8)) >> (32 - uint32(c>>8)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBFXU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BFXU [c] (MOVWconst [d]))
+ // result: (MOVWconst [int32(uint32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(d) << (32 - uint32(c&0xff) - uint32(c>>8)) >> (32 - uint32(c>>8))))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBIC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BIC x (MOVWconst [c]))
+ // result: (BICconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BIC x (SLLconst [c] y))
+ // result: (BICshiftLL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (BIC x (SRLconst [c] y))
+ // result: (BICshiftRL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (BIC x (SRAconst [c] y))
+ // result: (BICshiftRA x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (BIC x (SLL y z))
+ // result: (BICshiftLLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (BIC x (SRL y z))
+ // result: (BICshiftRLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (BIC x (SRA y z))
+ // result: (BICshiftRAreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (BIC x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BICconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (BICconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (BICconst [c] x)
+ // cond: !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))
+ // result: (ANDconst [int32(^uint32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(!isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(int32(^uint32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICconst [c] x)
+ // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
+ // result: (ANDconst [int32(^uint32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(int32(^uint32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d&^c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(d &^ c)
+ return true
+ }
+ // match: (BICconst [c] (BICconst [d] x))
+ // result: (BICconst [c|d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMBICconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftLL x (MOVWconst [c]) [d])
+ // result: (BICconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (BICshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMBICshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRA x (MOVWconst [c]) [d])
+ // result: (BICconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (BICshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMBICshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRL x (MOVWconst [c]) [d])
+ // result: (BICconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (BICshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMBICshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMN(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMN x (MOVWconst [c]))
+ // result: (CMNconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SLLconst [c] y))
+ // result: (CMNshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SRLconst [c] y))
+ // result: (CMNshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SRAconst [c] y))
+ // result: (CMNshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SLL y z))
+ // result: (CMNshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SRL y z))
+ // result: (CMNshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SRA y z))
+ // result: (CMNshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMNconst (MOVWconst [x]) [y])
+ // result: (FlagConstant [addFlags32(x,y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(addFlags32(x, y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftLL (MOVWconst [c]) x [d])
+ // result: (CMNconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftLL x (MOVWconst [c]) [d])
+ // result: (CMNconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftLLreg (MOVWconst [c]) x y)
+ // result: (CMNconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMNshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMNshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRA (MOVWconst [c]) x [d])
+ // result: (CMNconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRA x (MOVWconst [c]) [d])
+ // result: (CMNconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRAreg (MOVWconst [c]) x y)
+ // result: (CMNconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMNshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMNshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRL (MOVWconst [c]) x [d])
+ // result: (CMNconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRL x (MOVWconst [c]) [d])
+ // result: (CMNconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRLreg (MOVWconst [c]) x y)
+ // result: (CMNconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMNshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMNshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMOVWHSconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWHSconst _ (FlagConstant [fc]) [c])
+ // cond: fc.uge()
+ // result: (MOVWconst [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_1.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_1.AuxInt)
+ if !(fc.uge()) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ // match: (CMOVWHSconst x (FlagConstant [fc]) [c])
+ // cond: fc.ult()
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_1.AuxInt)
+ if !(fc.ult()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWHSconst x (InvertFlags flags) [c])
+ // result: (CMOVWLSconst x flags [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMInvertFlags {
+ break
+ }
+ flags := v_1.Args[0]
+ v.reset(OpARMCMOVWLSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMOVWLSconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWLSconst _ (FlagConstant [fc]) [c])
+ // cond: fc.ule()
+ // result: (MOVWconst [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_1.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_1.AuxInt)
+ if !(fc.ule()) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ // match: (CMOVWLSconst x (FlagConstant [fc]) [c])
+ // cond: fc.ugt()
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_1.AuxInt)
+ if !(fc.ugt()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLSconst x (InvertFlags flags) [c])
+ // result: (CMOVWHSconst x flags [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMInvertFlags {
+ break
+ }
+ flags := v_1.Args[0]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMP(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMP x (MOVWconst [c]))
+ // result: (CMPconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMPconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMP (MOVWconst [c]) x)
+ // result: (InvertFlags (CMPconst [c] x))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMP y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SLLconst [c] y))
+ // result: (CMPshiftLL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMP (SLLconst [c] y) x)
+ // result: (InvertFlags (CMPshiftLL x y [c]))
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRLconst [c] y))
+ // result: (CMPshiftRL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMP (SRLconst [c] y) x)
+ // result: (InvertFlags (CMPshiftRL x y [c]))
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRAconst [c] y))
+ // result: (CMPshiftRA x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMP (SRAconst [c] y) x)
+ // result: (InvertFlags (CMPshiftRA x y [c]))
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SLL y z))
+ // result: (CMPshiftLLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (CMP (SLL y z) x)
+ // result: (InvertFlags (CMPshiftLLreg x y z))
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRL y z))
+ // result: (CMPshiftRLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (CMP (SRL y z) x)
+ // result: (InvertFlags (CMPshiftRLreg x y z))
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRA y z))
+ // result: (CMPshiftRAreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (CMP (SRA y z) x)
+ // result: (InvertFlags (CMPshiftRAreg x y z))
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPD x (MOVDconst [0]))
+ // result: (CMPD0 x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVDconst || auxIntToFloat64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARMCMPD0)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPF(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPF x (MOVFconst [0]))
+ // result: (CMPF0 x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVFconst || auxIntToFloat64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARMCMPF0)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPconst (MOVWconst [x]) [y])
+ // result: (FlagConstant [subFlags32(x,y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(x, y))
+ return true
+ }
+ // match: (CMPconst (MOVBUreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagConstant [subFlags32(0, 1)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVBUreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(0, 1))
+ return true
+ }
+ // match: (CMPconst (MOVHUreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagConstant [subFlags32(0, 1)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVHUreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(0, 1))
+ return true
+ }
+ // match: (CMPconst (ANDconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagConstant [subFlags32(0, 1)])
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(0, 1))
+ return true
+ }
+ // match: (CMPconst (SRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)
+ // result: (FlagConstant [subFlags32(0, 1)])
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)) {
+ break
+ }
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(0, 1))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftLL (MOVWconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v1.AuxInt = int32ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftLL x (MOVWconst [c]) [d])
+ // result: (CMPconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMPconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftLLreg (MOVWconst [c]) x y)
+ // result: (InvertFlags (CMPconst [c] (SLL <x.Type> x y)))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMPshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMPshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRA (MOVWconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v1.AuxInt = int32ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRA x (MOVWconst [c]) [d])
+ // result: (CMPconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMPconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRAreg (MOVWconst [c]) x y)
+ // result: (InvertFlags (CMPconst [c] (SRA <x.Type> x y)))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMPshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMPshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRL (MOVWconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v1.AuxInt = int32ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRL x (MOVWconst [c]) [d])
+ // result: (CMPconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMPconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRLreg (MOVWconst [c]) x y)
+ // result: (InvertFlags (CMPconst [c] (SRL <x.Type> x y)))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMPshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMPshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Equal (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.eq())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.eq()))
+ return true
+ }
+ // match: (Equal (InvertFlags x))
+ // result: (Equal x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMGreaterEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterEqual (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.ge())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.ge()))
+ return true
+ }
+ // match: (GreaterEqual (InvertFlags x))
+ // result: (LessEqual x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMGreaterEqualU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterEqualU (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.uge())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.uge()))
+ return true
+ }
+ // match: (GreaterEqualU (InvertFlags x))
+ // result: (LessEqualU x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessEqualU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMGreaterThan(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterThan (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.gt())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.gt()))
+ return true
+ }
+ // match: (GreaterThan (InvertFlags x))
+ // result: (LessThan x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMGreaterThanU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterThanU (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.ugt())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.ugt()))
+ return true
+ }
+ // match: (GreaterThanU (InvertFlags x))
+ // result: (LessThanU x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessThanU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLessEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessEqual (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.le())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.le()))
+ return true
+ }
+ // match: (LessEqual (InvertFlags x))
+ // result: (GreaterEqual x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLessEqualU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessEqualU (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.ule())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.ule()))
+ return true
+ }
+ // match: (LessEqualU (InvertFlags x))
+ // result: (GreaterEqualU x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterEqualU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLessThan(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessThan (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.lt())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.lt()))
+ return true
+ }
+ // match: (LessThan (InvertFlags x))
+ // result: (GreaterThan x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLessThanU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessThanU (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.ult())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.ult()))
+ return true
+ }
+ // match: (LessThanU (InvertFlags x))
+ // result: (GreaterThanU x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterThanU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVBUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVBUload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBUreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil
+ // result: (MOVBUloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVBUloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVWconst [int32(read8(sym, int64(off)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBUloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUloadidx ptr idx (MOVBstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: (MOVBUreg x)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVBstoreidx {
+ break
+ }
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUloadidx ptr (MOVWconst [c]) mem)
+ // result: (MOVBUload [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUloadidx (MOVWconst [c]) ptr mem)
+ // result: (MOVBUload [c] ptr mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (ANDconst [c] x))
+ // result: (ANDconst [c&0xff] x)
+ for {
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0xff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(uint8(c))])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVBload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil
+ // result: (MOVBloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVBloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBloadidx ptr idx (MOVBstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: (MOVBreg x)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVBstoreidx {
+ break
+ }
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBloadidx ptr (MOVWconst [c]) mem)
+ // result: (MOVBload [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVBload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBloadidx (MOVWconst [c]) ptr mem)
+ // result: (MOVBload [c] ptr mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVBload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (ANDconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDconst [c&0x7f] x)
+ for {
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7f)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(int8(c))])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(int8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (SUBconst [off2] ptr) val mem)
+ // result: (MOVBstore [off1-off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [0] {sym} (ADD ptr idx) val mem)
+ // cond: sym == nil
+ // result: (MOVBstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVBstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstoreidx ptr (MOVWconst [c]) val mem)
+ // result: (MOVBstore [c] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstoreidx (MOVWconst [c]) ptr val mem)
+ // result: (MOVBstore [c] ptr val mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVDload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVDstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym} (SUBconst [off2] ptr) val mem)
+ // result: (MOVDstore [off1-off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVFload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVFload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVFload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVFload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVFstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVFstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // result: (MOVFstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVFstore [off1] {sym} (SUBconst [off2] ptr) val mem)
+ // result: (MOVFstore [off1-off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVHUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVHUload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVHUreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVHUreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil
+ // result: (MOVHUloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVHUloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVWconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHUloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUloadidx ptr idx (MOVHstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: (MOVHUreg x)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVHstoreidx {
+ break
+ }
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVHUreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUloadidx ptr (MOVWconst [c]) mem)
+ // result: (MOVHUload [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUloadidx (MOVWconst [c]) ptr mem)
+ // result: (MOVHUload [c] ptr mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVHUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (ANDconst [c] x))
+ // result: (ANDconst [c&0xffff] x)
+ for {
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0xffff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVHUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(uint16(c))])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVHload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVHload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVHreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil
+ // result: (MOVHloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVHloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHloadidx ptr idx (MOVHstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: (MOVHreg x)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVHstoreidx {
+ break
+ }
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHloadidx ptr (MOVWconst [c]) mem)
+ // result: (MOVHload [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVHload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHloadidx (MOVWconst [c]) ptr mem)
+ // result: (MOVHload [c] ptr mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVHload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVHload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (ANDconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDconst [c&0x7fff] x)
+ for {
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7fff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVHreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(int16(c))])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(int16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym} (SUBconst [off2] ptr) val mem)
+ // result: (MOVHstore [off1-off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [0] {sym} (ADD ptr idx) val mem)
+ // cond: sym == nil
+ // result: (MOVHstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVHstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstoreidx ptr (MOVWconst [c]) val mem)
+ // result: (MOVHstore [c] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx (MOVWconst [c]) ptr val mem)
+ // result: (MOVHstore [c] ptr val mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVWload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil
+ // result: (MOVWloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem)
+ // cond: sym == nil
+ // result: (MOVWloadshiftLL ptr idx [c] mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem)
+ // cond: sym == nil
+ // result: (MOVWloadshiftRL ptr idx [c] mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem)
+ // cond: sym == nil
+ // result: (MOVWloadshiftRA ptr idx [c] mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVWconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVWstoreidx {
+ break
+ }
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWloadidx ptr (MOVWconst [c]) mem)
+ // result: (MOVWload [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx (MOVWconst [c]) ptr mem)
+ // result: (MOVWload [c] ptr mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SLLconst idx [c]) mem)
+ // result: (MOVWloadshiftLL ptr idx [c] mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx (SLLconst idx [c]) ptr mem)
+ // result: (MOVWloadshiftLL ptr idx [c] mem)
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SRLconst idx [c]) mem)
+ // result: (MOVWloadshiftRL ptr idx [c] mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx (SRLconst idx [c]) ptr mem)
+ // result: (MOVWloadshiftRL ptr idx [c] mem)
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SRAconst idx [c]) mem)
+ // result: (MOVWloadshiftRA ptr idx [c] mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx (SRAconst idx [c]) ptr mem)
+ // result: (MOVWloadshiftRA ptr idx [c] mem)
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _))
+ // cond: c==d && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVWstoreshiftLL {
+ break
+ }
+ d := auxIntToInt32(v_2.AuxInt)
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem)
+ // result: (MOVWload [int32(uint32(c)<<uint64(d))] ptr mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) << uint64(d)))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _))
+ // cond: c==d && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVWstoreshiftRA {
+ break
+ }
+ d := auxIntToInt32(v_2.AuxInt)
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem)
+ // result: (MOVWload [c>>uint64(d)] ptr mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _))
+ // cond: c==d && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVWstoreshiftRL {
+ break
+ }
+ d := auxIntToInt32(v_2.AuxInt)
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem)
+ // result: (MOVWload [int32(uint32(c)>>uint64(d))] ptr mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWnop(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWnop (MOVWconst [c]))
+ // result: (MOVWconst [c])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWreg x)
+ // cond: x.Uses == 1
+ // result: (MOVWnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARMMOVWnop)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (MOVWconst [c]))
+ // result: (MOVWconst [c])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (SUBconst [off2] ptr) val mem)
+ // result: (MOVWstore [off1-off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADD ptr idx) val mem)
+ // cond: sym == nil
+ // result: (MOVWstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem)
+ // cond: sym == nil
+ // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem)
+ // cond: sym == nil
+ // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem)
+ // cond: sym == nil
+ // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreidx ptr (MOVWconst [c]) val mem)
+ // result: (MOVWstore [c] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (MOVWconst [c]) ptr val mem)
+ // result: (MOVWstore [c] ptr val mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SLLconst idx [c]) val mem)
+ // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SLLconst idx [c]) ptr val mem)
+ // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SRLconst idx [c]) val mem)
+ // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SRLconst idx [c]) ptr val mem)
+ // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SRAconst idx [c]) val mem)
+ // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SRAconst idx [c]) ptr val mem)
+ // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem)
+ // result: (MOVWstore [int32(uint32(c)<<uint64(d))] ptr val mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) << uint64(d)))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem)
+ // result: (MOVWstore [c>>uint64(d)] ptr val mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem)
+ // result: (MOVWstore [int32(uint32(c)>>uint64(d))] ptr val mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMUL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MUL x (MOVWconst [c]))
+ // cond: int32(c) == -1
+ // result: (RSBconst [0] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(int32(c) == -1) {
+ continue
+ }
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL _ (MOVWconst [0]))
+ // result: (MOVWconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: isPowerOfTwo32(c)
+ // result: (SLLconst [int32(log32(c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ continue
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: isPowerOfTwo32(c-1) && c >= 3
+ // result: (ADDshiftLL x x [int32(log32(c-1))])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c-1) && c >= 3) {
+ continue
+ }
+ v.reset(OpARMADDshiftLL)
+ v.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: isPowerOfTwo32(c+1) && c >= 7
+ // result: (RSBshiftLL x x [int32(log32(c+1))])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c+1) && c >= 7) {
+ continue
+ }
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (SLLconst [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ continue
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (SLLconst [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ continue
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo32(c/7)
+ // result: (SLLconst [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo32(c/7)) {
+ continue
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 7)))
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (SLLconst [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ continue
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVWconst [c]) (MOVWconst [d]))
+ // result: (MOVWconst [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMMULA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c == -1
+ // result: (SUB a x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c == -1) {
+ break
+ }
+ v.reset(OpARMSUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MULA _ (MOVWconst [0]) a)
+ // result: a
+ for {
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ a := v_2
+ v.copyOf(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [1]) a)
+ // result: (ADD x a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ a := v_2
+ v.reset(OpARMADD)
+ v.AddArg2(x, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c))] x) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v0.AddArg(x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c-1) && c >= 3
+ // result: (ADD (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c+1) && c >= 7
+ // result: (ADD (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%7 == 0 && isPowerOfTwo32(c/7)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%7 == 0 && isPowerOfTwo32(c/7)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 7)))
+ v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c == -1
+ // result: (SUB a x)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c == -1) {
+ break
+ }
+ v.reset(OpARMSUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MULA (MOVWconst [0]) _ a)
+ // result: a
+ for {
+ if v_0.Op != OpARMMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ a := v_2
+ v.copyOf(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [1]) x a)
+ // result: (ADD x a)
+ for {
+ if v_0.Op != OpARMMOVWconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ a := v_2
+ v.reset(OpARMADD)
+ v.AddArg2(x, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c))] x) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v0.AddArg(x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c-1) && c >= 3
+ // result: (ADD (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c+1) && c >= 7
+ // result: (ADD (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%7 == 0 && isPowerOfTwo32(c/7)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%7 == 0 && isPowerOfTwo32(c/7)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 7)))
+ v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) (MOVWconst [d]) a)
+ // result: (ADDconst [c*d] a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMULD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULD (NEGD x) y)
+ // cond: buildcfg.GOARM.Version >= 6
+ // result: (NMULD x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMNEGD {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ if !(buildcfg.GOARM.Version >= 6) {
+ continue
+ }
+ v.reset(OpARMNMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMMULF(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULF (NEGF x) y)
+ // cond: buildcfg.GOARM.Version >= 6
+ // result: (NMULF x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMNEGF {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ if !(buildcfg.GOARM.Version >= 6) {
+ continue
+ }
+ v.reset(OpARMNMULF)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMMULS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: c == -1
+ // result: (ADD a x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c == -1) {
+ break
+ }
+ v.reset(OpARMADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MULS _ (MOVWconst [0]) a)
+ // result: a
+ for {
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ a := v_2
+ v.copyOf(a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [1]) a)
+ // result: (RSB x a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ a := v_2
+ v.reset(OpARMRSB)
+ v.AddArg2(x, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c))] x) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v0.AddArg(x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c-1) && c >= 3
+ // result: (RSB (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c+1) && c >= 7
+ // result: (RSB (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: c%7 == 0 && isPowerOfTwo32(c/7)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%7 == 0 && isPowerOfTwo32(c/7)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 7)))
+ v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: c == -1
+ // result: (ADD a x)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c == -1) {
+ break
+ }
+ v.reset(OpARMADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MULS (MOVWconst [0]) _ a)
+ // result: a
+ for {
+ if v_0.Op != OpARMMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ a := v_2
+ v.copyOf(a)
+ return true
+ }
+ // match: (MULS (MOVWconst [1]) x a)
+ // result: (RSB x a)
+ for {
+ if v_0.Op != OpARMMOVWconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ a := v_2
+ v.reset(OpARMRSB)
+ v.AddArg2(x, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c))] x) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v0.AddArg(x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c-1) && c >= 3
+ // result: (RSB (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c+1) && c >= 7
+ // result: (RSB (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: c%7 == 0 && isPowerOfTwo32(c/7)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%7 == 0 && isPowerOfTwo32(c/7)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 7)))
+ v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) (MOVWconst [d]) a)
+ // result: (SUBconst [c*d] a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVN(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVN (MOVWconst [c]))
+ // result: (MOVWconst [^c])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(^c)
+ return true
+ }
+ // match: (MVN (SLLconst [c] x))
+ // result: (MVNshiftLL x [c])
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MVN (SRLconst [c] x))
+ // result: (MVNshiftRL x [c])
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MVN (SRAconst [c] x))
+ // result: (MVNshiftRA x [c])
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MVN (SLL x y))
+ // result: (MVNshiftLLreg x y)
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftLLreg)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MVN (SRL x y))
+ // result: (MVNshiftRLreg x y)
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftRLreg)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MVN (SRA x y))
+ // result: (MVNshiftRAreg x y)
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftRAreg)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftLL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftLL (MOVWconst [c]) [d])
+ // result: (MOVWconst [^(c<<uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(^(c << uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftLLreg(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MVNshiftLLreg x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (MVNshiftLL x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMMVNshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRA(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftRA (MOVWconst [c]) [d])
+ // result: (MOVWconst [int32(c)>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRAreg(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MVNshiftRAreg x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (MVNshiftRA x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMMVNshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftRL (MOVWconst [c]) [d])
+ // result: (MOVWconst [^int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(^int32(uint32(c) >> uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRLreg(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MVNshiftRLreg x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (MVNshiftRL x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMMVNshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMNEGD(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGD (MULD x y))
+ // cond: buildcfg.GOARM.Version >= 6
+ // result: (NMULD x y)
+ for {
+ if v_0.Op != OpARMMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(buildcfg.GOARM.Version >= 6) {
+ break
+ }
+ v.reset(OpARMNMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMNEGF(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGF (MULF x y))
+ // cond: buildcfg.GOARM.Version >= 6
+ // result: (NMULF x y)
+ for {
+ if v_0.Op != OpARMMULF {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(buildcfg.GOARM.Version >= 6) {
+ break
+ }
+ v.reset(OpARMNMULF)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMNMULD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NMULD (NEGD x) y)
+ // result: (MULD x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMNEGD {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARMMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMNMULF(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NMULF (NEGF x) y)
+ // result: (MULF x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMNEGF {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARMMULF)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMNotEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NotEqual (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.ne())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.ne()))
+ return true
+ }
+ // match: (NotEqual (InvertFlags x))
+ // result: (NotEqual x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMNotEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (OR x (MOVWconst [c]))
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SLLconst [c] y))
+ // result: (ORshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SRLconst [c] y))
+ // result: (ORshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SRAconst [c] y))
+ // result: (ORshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SLL y z))
+ // result: (ORshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SRL y z))
+ // result: (ORshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SRA y z))
+ // result: (ORshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (OR x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVWconst [-1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // result: (ORconst [c|d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMORconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORshiftLL (MOVWconst [c]) x [d])
+ // result: (ORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftLL x (MOVWconst [c]) [d])
+ // result: (ORconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
+ // cond: buildcfg.GOARM.Version>=6
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMSLLconst || auxIntToInt32(v_0_0.AuxInt) != 16 {
+ break
+ }
+ x := v_0_0.Args[0]
+ if x != v_1 || !(buildcfg.GOARM.Version >= 6) {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL y:(SLLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSLLconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftLLreg (MOVWconst [c]) x y)
+ // result: (ORconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ORshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMORshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRA (MOVWconst [c]) x [d])
+ // result: (ORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRA x (MOVWconst [c]) [d])
+ // result: (ORconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRA y:(SRAconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSRAconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRAreg (MOVWconst [c]) x y)
+ // result: (ORconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ORshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMORshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRL (MOVWconst [c]) x [d])
+ // result: (ORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRL x (MOVWconst [c]) [d])
+ // result: (ORconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRL y:(SRLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSRLconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRLreg (MOVWconst [c]) x y)
+ // result: (ORconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ORshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMORshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RSB (MOVWconst [c]) x)
+ // result: (SUBconst [c] x)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSB x (MOVWconst [c]))
+ // result: (RSBconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSB x (SLLconst [c] y))
+ // result: (RSBshiftLL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB (SLLconst [c] y) x)
+ // result: (SUBshiftLL x y [c])
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB x (SRLconst [c] y))
+ // result: (RSBshiftRL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB (SRLconst [c] y) x)
+ // result: (SUBshiftRL x y [c])
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB x (SRAconst [c] y))
+ // result: (RSBshiftRA x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB (SRAconst [c] y) x)
+ // result: (SUBshiftRA x y [c])
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB x (SLL y z))
+ // result: (RSBshiftLLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB (SLL y z) x)
+ // result: (SUBshiftLLreg x y z)
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB x (SRL y z))
+ // result: (RSBshiftRLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB (SRL y z) x)
+ // result: (SUBshiftRLreg x y z)
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB x (SRA y z))
+ // result: (RSBshiftRAreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB (SRA y z) x)
+ // result: (SUBshiftRAreg x y z)
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (RSB (MUL x y) a)
+ // cond: buildcfg.GOARM.Version == 7
+ // result: (MULS x y a)
+ for {
+ if v_0.Op != OpARMMUL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ if !(buildcfg.GOARM.Version == 7) {
+ break
+ }
+ v.reset(OpARMMULS)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftLL (MOVWconst [c]) x [d])
+ // result: (SUBSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftLL x (MOVWconst [c]) [d])
+ // result: (RSBSconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftLLreg (MOVWconst [c]) x y)
+ // result: (SUBSconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBSshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftRA (MOVWconst [c]) x [d])
+ // result: (SUBSconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRA x (MOVWconst [c]) [d])
+ // result: (RSBSconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftRAreg (MOVWconst [c]) x y)
+ // result: (SUBSconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBSshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftRL (MOVWconst [c]) x [d])
+ // result: (SUBSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRL x (MOVWconst [c]) [d])
+ // result: (RSBSconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftRLreg (MOVWconst [c]) x y)
+ // result: (SUBSconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBSshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RSBconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c-d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ return true
+ }
+ // match: (RSBconst [c] (RSBconst [d] x))
+ // result: (ADDconst [c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBconst [c] (ADDconst [d] x))
+ // result: (RSBconst [c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBconst [c] (SUBconst [d] x))
+ // result: (RSBconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftLL (MOVWconst [c]) x [d])
+ // result: (SUBconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftLL x (MOVWconst [c]) [d])
+ // result: (RSBconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftLLreg (MOVWconst [c]) x y)
+ // result: (SUBconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftRA (MOVWconst [c]) x [d])
+ // result: (SUBconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRA x (MOVWconst [c]) [d])
+ // result: (RSBconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftRAreg (MOVWconst [c]) x y)
+ // result: (SUBconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftRL (MOVWconst [c]) x [d])
+ // result: (SUBconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRL x (MOVWconst [c]) [d])
+ // result: (RSBconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftRLreg (MOVWconst [c]) x y)
+ // result: (SUBconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RSCconst [c] (ADDconst [d] x) flags)
+ // result: (RSCconst [c-d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ // match: (RSCconst [c] (SUBconst [d] x) flags)
+ // result: (RSCconst [c+d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftLL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftLL (MOVWconst [c]) x [d] flags)
+ // result: (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftLL x (MOVWconst [c]) [d] flags)
+ // result: (RSCconst x [c<<uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftLLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftLLreg (MOVWconst [c]) x y flags)
+ // result: (SBCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (RSCshiftLL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftRA (MOVWconst [c]) x [d] flags)
+ // result: (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftRA x (MOVWconst [c]) [d] flags)
+ // result: (RSCconst x [c>>uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRAreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftRAreg (MOVWconst [c]) x y flags)
+ // result: (SBCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (RSCshiftRA x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftRL (MOVWconst [c]) x [d] flags)
+ // result: (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftRL x (MOVWconst [c]) [d] flags)
+ // result: (RSCconst x [int32(uint32(c)>>uint64(d))] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftRLreg (MOVWconst [c]) x y flags)
+ // result: (SBCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (RSCshiftRL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SBC (MOVWconst [c]) x flags)
+ // result: (RSCconst [c] x flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, flags)
+ return true
+ }
+ // match: (SBC x (MOVWconst [c]) flags)
+ // result: (SBCconst [c] x flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, flags)
+ return true
+ }
+ // match: (SBC x (SLLconst [c] y) flags)
+ // result: (SBCshiftLL x y [c] flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC (SLLconst [c] y) x flags)
+ // result: (RSCshiftLL x y [c] flags)
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC x (SRLconst [c] y) flags)
+ // result: (SBCshiftRL x y [c] flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC (SRLconst [c] y) x flags)
+ // result: (RSCshiftRL x y [c] flags)
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC x (SRAconst [c] y) flags)
+ // result: (SBCshiftRA x y [c] flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC (SRAconst [c] y) x flags)
+ // result: (RSCshiftRA x y [c] flags)
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC x (SLL y z) flags)
+ // result: (SBCshiftLLreg x y z flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftLLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ // match: (SBC (SLL y z) x flags)
+ // result: (RSCshiftLLreg x y z flags)
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftLLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ // match: (SBC x (SRL y z) flags)
+ // result: (SBCshiftRLreg x y z flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftRLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ // match: (SBC (SRL y z) x flags)
+ // result: (RSCshiftRLreg x y z flags)
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftRLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ // match: (SBC x (SRA y z) flags)
+ // result: (SBCshiftRAreg x y z flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftRAreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ // match: (SBC (SRA y z) x flags)
+ // result: (RSCshiftRAreg x y z flags)
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftRAreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SBCconst [c] (ADDconst [d] x) flags)
+ // result: (SBCconst [c-d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ // match: (SBCconst [c] (SUBconst [d] x) flags)
+ // result: (SBCconst [c+d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftLL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftLL (MOVWconst [c]) x [d] flags)
+ // result: (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftLL x (MOVWconst [c]) [d] flags)
+ // result: (SBCconst x [c<<uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftLLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftLLreg (MOVWconst [c]) x y flags)
+ // result: (RSCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (SBCshiftLL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSBCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftRA (MOVWconst [c]) x [d] flags)
+ // result: (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftRA x (MOVWconst [c]) [d] flags)
+ // result: (SBCconst x [c>>uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRAreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftRAreg (MOVWconst [c]) x y flags)
+ // result: (RSCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (SBCshiftRA x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSBCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftRL (MOVWconst [c]) x [d] flags)
+ // result: (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftRL x (MOVWconst [c]) [d] flags)
+ // result: (SBCconst x [int32(uint32(c)>>uint64(d))] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftRLreg (MOVWconst [c]) x y flags)
+ // result: (RSCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (SBCshiftRL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSBCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLL x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SLLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLLconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d<<uint64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(d << uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRA x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SRAconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRAcond(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRAcond x _ (FlagConstant [fc]))
+ // cond: fc.uge()
+ // result: (SRAconst x [31])
+ for {
+ x := v_0
+ if v_2.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_2.AuxInt)
+ if !(fc.uge()) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAcond x y (FlagConstant [fc]))
+ // cond: fc.ult()
+ // result: (SRA x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_2.AuxInt)
+ if !(fc.ult()) {
+ break
+ }
+ v.reset(OpARMSRA)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRAconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(d >> uint64(c))
+ return true
+ }
+ // match: (SRAconst (SLLconst x [c]) [d])
+ // cond: buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31
+ // result: (BFX [(d-c)|(32-d)<<8] x)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(buildcfg.GOARM.Version == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
+ break
+ }
+ v.reset(OpARMBFX)
+ v.AuxInt = int32ToAuxInt((d - c) | (32-d)<<8)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRL x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SRLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRLconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [int32(uint32(d)>>uint64(c))])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(d) >> uint64(c)))
+ return true
+ }
+ // match: (SRLconst (SLLconst x [c]) [d])
+ // cond: buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31
+ // result: (BFXU [(d-c)|(32-d)<<8] x)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(buildcfg.GOARM.Version == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
+ break
+ }
+ v.reset(OpARMBFXU)
+ v.AuxInt = int32ToAuxInt((d - c) | (32-d)<<8)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRR x (MOVWconst [c]))
+ // result: (SRRconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSRRconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUB (MOVWconst [c]) x)
+ // result: (RSBconst [c] x)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x (MOVWconst [c]))
+ // result: (SUBconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x (SLLconst [c] y))
+ // result: (SUBshiftLL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB (SLLconst [c] y) x)
+ // result: (RSBshiftLL x y [c])
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB x (SRLconst [c] y))
+ // result: (SUBshiftRL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB (SRLconst [c] y) x)
+ // result: (RSBshiftRL x y [c])
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB x (SRAconst [c] y))
+ // result: (SUBshiftRA x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB (SRAconst [c] y) x)
+ // result: (RSBshiftRA x y [c])
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB x (SLL y z))
+ // result: (SUBshiftLLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB (SLL y z) x)
+ // result: (RSBshiftLLreg x y z)
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB x (SRL y z))
+ // result: (SUBshiftRLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB (SRL y z) x)
+ // result: (RSBshiftRLreg x y z)
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB x (SRA y z))
+ // result: (SUBshiftRAreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB (SRA y z) x)
+ // result: (RSBshiftRAreg x y z)
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SUB a (MUL x y))
+ // cond: buildcfg.GOARM.Version == 7
+ // result: (MULS x y a)
+ for {
+ a := v_0
+ if v_1.Op != OpARMMUL {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(buildcfg.GOARM.Version == 7) {
+ break
+ }
+ v.reset(OpARMMULS)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBD a (MULD x y))
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
+ // result: (MULSD a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARMMULD {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
+ break
+ }
+ v.reset(OpARMMULSD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUBD a (NMULD x y))
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
+ // result: (MULAD a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARMNMULD {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
+ break
+ }
+ v.reset(OpARMMULAD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBF(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBF a (MULF x y))
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
+ // result: (MULSF a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARMMULF {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
+ break
+ }
+ v.reset(OpARMMULSF)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUBF a (NMULF x y))
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
+ // result: (MULAF a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARMNMULF {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
+ break
+ }
+ v.reset(OpARMMULAF)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBS x (MOVWconst [c]))
+ // result: (SUBSconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBS x (SLLconst [c] y))
+ // result: (SUBSshiftLL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS (SLLconst [c] y) x)
+ // result: (RSBSshiftLL x y [c])
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS x (SRLconst [c] y))
+ // result: (SUBSshiftRL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS (SRLconst [c] y) x)
+ // result: (RSBSshiftRL x y [c])
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS x (SRAconst [c] y))
+ // result: (SUBSshiftRA x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS (SRAconst [c] y) x)
+ // result: (RSBSshiftRA x y [c])
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS x (SLL y z))
+ // result: (SUBSshiftLLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUBS (SLL y z) x)
+ // result: (RSBSshiftLLreg x y z)
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUBS x (SRL y z))
+ // result: (SUBSshiftRLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUBS (SRL y z) x)
+ // result: (RSBSshiftRLreg x y z)
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUBS x (SRA y z))
+ // result: (SUBSshiftRAreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUBS (SRA y z) x)
+ // result: (RSBSshiftRAreg x y z)
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftLL (MOVWconst [c]) x [d])
+ // result: (RSBSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftLL x (MOVWconst [c]) [d])
+ // result: (SUBSconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftLLreg (MOVWconst [c]) x y)
+ // result: (RSBSconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBSshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftRA (MOVWconst [c]) x [d])
+ // result: (RSBSconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRA x (MOVWconst [c]) [d])
+ // result: (SUBSconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftRAreg (MOVWconst [c]) x y)
+ // result: (RSBSconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBSshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftRL (MOVWconst [c]) x [d])
+ // result: (RSBSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRL x (MOVWconst [c]) [d])
+ // result: (SUBSconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftRLreg (MOVWconst [c]) x y)
+ // result: (RSBSconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBSshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBconst [off1] (MOVWaddr [off2] {sym} ptr))
+ // result: (MOVWaddr [off2-off1] {sym} ptr)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ v.reset(OpARMMOVWaddr)
+ v.AuxInt = int32ToAuxInt(off2 - off1)
+ v.Aux = symToAux(sym)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (SUBconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBconst [c] x)
+ // cond: !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))
+ // result: (ADDconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(!isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))) {
+ break
+ }
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] x)
+ // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
+ // result: (ADDconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
+ break
+ }
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d-c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(d - c)
+ return true
+ }
+ // match: (SUBconst [c] (SUBconst [d] x))
+ // result: (ADDconst [-c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (ADDconst [d] x))
+ // result: (ADDconst [-c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(-c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (RSBconst [d] x))
+ // result: (RSBconst [-c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(-c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftLL (MOVWconst [c]) x [d])
+ // result: (RSBconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftLL x (MOVWconst [c]) [d])
+ // result: (SUBconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftLLreg (MOVWconst [c]) x y)
+ // result: (RSBconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftRA (MOVWconst [c]) x [d])
+ // result: (RSBconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRA x (MOVWconst [c]) [d])
+ // result: (SUBconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftRAreg (MOVWconst [c]) x y)
+ // result: (RSBconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftRL (MOVWconst [c]) x [d])
+ // result: (RSBconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRL x (MOVWconst [c]) [d])
+ // result: (SUBconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftRLreg (MOVWconst [c]) x y)
+ // result: (RSBconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (TEQ x (MOVWconst [c]))
+ // result: (TEQconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SLLconst [c] y))
+ // result: (TEQshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SRLconst [c] y))
+ // result: (TEQshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SRAconst [c] y))
+ // result: (TEQshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SLL y z))
+ // result: (TEQshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SRL y z))
+ // result: (TEQshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SRA y z))
+ // result: (TEQshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TEQconst (MOVWconst [x]) [y])
+ // result: (FlagConstant [logicFlags32(x^y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(logicFlags32(x ^ y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftLL (MOVWconst [c]) x [d])
+ // result: (TEQconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftLL x (MOVWconst [c]) [d])
+ // result: (TEQconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftLLreg (MOVWconst [c]) x y)
+ // result: (TEQconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TEQshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTEQshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftRA (MOVWconst [c]) x [d])
+ // result: (TEQconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftRA x (MOVWconst [c]) [d])
+ // result: (TEQconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftRAreg (MOVWconst [c]) x y)
+ // result: (TEQconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TEQshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTEQshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftRL (MOVWconst [c]) x [d])
+ // result: (TEQconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftRL x (MOVWconst [c]) [d])
+ // result: (TEQconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftRLreg (MOVWconst [c]) x y)
+ // result: (TEQconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TEQshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTEQshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTST(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (TST x (MOVWconst [c]))
+ // result: (TSTconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SLLconst [c] y))
+ // result: (TSTshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SRLconst [c] y))
+ // result: (TSTshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SRAconst [c] y))
+ // result: (TSTshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SLL y z))
+ // result: (TSTshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SRL y z))
+ // result: (TSTshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SRA y z))
+ // result: (TSTshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TSTconst (MOVWconst [x]) [y])
+ // result: (FlagConstant [logicFlags32(x&y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(logicFlags32(x & y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftLL (MOVWconst [c]) x [d])
+ // result: (TSTconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftLL x (MOVWconst [c]) [d])
+ // result: (TSTconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftLLreg (MOVWconst [c]) x y)
+ // result: (TSTconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TSTshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTSTshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRA (MOVWconst [c]) x [d])
+ // result: (TSTconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRA x (MOVWconst [c]) [d])
+ // result: (TSTconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRAreg (MOVWconst [c]) x y)
+ // result: (TSTconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TSTshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTSTshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRL (MOVWconst [c]) x [d])
+ // result: (TSTconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRL x (MOVWconst [c]) [d])
+ // result: (TSTconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRLreg (MOVWconst [c]) x y)
+ // result: (TSTconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TSTshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTSTshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR x (MOVWconst [c]))
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SLLconst [c] y))
+ // result: (XORshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SRLconst [c] y))
+ // result: (XORshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SRAconst [c] y))
+ // result: (XORshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SRRconst [c] y))
+ // result: (XORshiftRR x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRRconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRR)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SLL y z))
+ // result: (XORshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SRL y z))
+ // result: (XORshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SRA y z))
+ // result: (XORshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (XOR x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // result: (XORconst [c^d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMXORconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XORshiftLL (MOVWconst [c]) x [d])
+ // result: (XORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLL x (MOVWconst [c]) [d])
+ // result: (XORconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
+ // cond: buildcfg.GOARM.Version>=6
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMSLLconst || auxIntToInt32(v_0_0.AuxInt) != 16 {
+ break
+ }
+ x := v_0_0.Args[0]
+ if x != v_1 || !(buildcfg.GOARM.Version >= 6) {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftLLreg (MOVWconst [c]) x y)
+ // result: (XORconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (XORshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMXORshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRA (MOVWconst [c]) x [d])
+ // result: (XORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRA x (MOVWconst [c]) [d])
+ // result: (XORconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRAreg (MOVWconst [c]) x y)
+ // result: (XORconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (XORshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMXORshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRL (MOVWconst [c]) x [d])
+ // result: (XORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRL x (MOVWconst [c]) [d])
+ // result: (XORconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRLreg (MOVWconst [c]) x y)
+ // result: (XORconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (XORshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMXORshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRR (MOVWconst [c]) x [d])
+ // result: (XORconst [c] (SRRconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRRconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRR x (MOVWconst [c]) [d])
+ // result: (XORconst x [int32(uint32(c)>>uint64(d)|uint32(c)<<uint64(32-d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c)>>uint64(d) | uint32(c)<<uint64(32-d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVWaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpARMMOVWaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueARM_OpAvg32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg32u <t> x y)
+ // result: (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, t)
+ v0.AuxInt = int32ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpARMSUB, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueARM_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (BitLen32 <t> x)
+ // result: (RSBconst [32] (CLZ <t> x))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARMCLZ, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpBswap32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Bswap32 <t> x)
+ // cond: buildcfg.GOARM.Version==5
+ // result: (XOR <t> (SRLconst <t> (BICconst <t> (XOR <t> x (SRRconst <t> [16] x)) [0xff0000]) [8]) (SRRconst <t> x [8]))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOARM.Version == 5) {
+ break
+ }
+ v.reset(OpARMXOR)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, t)
+ v0.AuxInt = int32ToAuxInt(8)
+ v1 := b.NewValue0(v.Pos, OpARMBICconst, t)
+ v1.AuxInt = int32ToAuxInt(0xff0000)
+ v2 := b.NewValue0(v.Pos, OpARMXOR, t)
+ v3 := b.NewValue0(v.Pos, OpARMSRRconst, t)
+ v3.AuxInt = int32ToAuxInt(16)
+ v3.AddArg(x)
+ v2.AddArg2(x, v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpARMSRRconst, t)
+ v4.AuxInt = int32ToAuxInt(8)
+ v4.AddArg(x)
+ v.AddArg2(v0, v4)
+ return true
+ }
+ // match: (Bswap32 x)
+ // cond: buildcfg.GOARM.Version>=6
+ // result: (REV x)
+ for {
+ x := v_0
+ if !(buildcfg.GOARM.Version >= 6) {
+ break
+ }
+ v.reset(OpARMREV)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConst32F(v *Value) bool {
+ // match: (Const32F [val])
+ // result: (MOVFconst [float64(val)])
+ for {
+ val := auxIntToFloat32(v.AuxInt)
+ v.reset(OpARMMOVFconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConst64F(v *Value) bool {
+ // match: (Const64F [val])
+ // result: (MOVDconst [float64(val)])
+ for {
+ val := auxIntToFloat64(v.AuxInt)
+ v.reset(OpARMMOVDconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConstBool(v *Value) bool {
+ // match: (ConstBool [t])
+ // result: (MOVWconst [b2i32(t)])
+ for {
+ t := auxIntToBool(v.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(t))
+ return true
+ }
+}
+func rewriteValueARM_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVWconst [0])
+ for {
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueARM_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 <t> x)
+ // cond: buildcfg.GOARM.Version<=6
+ // result: (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x10000] x))) [1])))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOARM.Version <= 6) {
+ break
+ }
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARMCLZ, t)
+ v1 := b.NewValue0(v.Pos, OpARMSUBconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpARMAND, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0x10000)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpARMRSBconst, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(0)
+ v4.AddArg(v3)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Ctz16 <t> x)
+ // cond: buildcfg.GOARM.Version==7
+ // result: (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOARM.Version == 7) {
+ break
+ }
+ v.reset(OpARMCLZ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARMRBIT, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0x10000)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Ctz32 <t> x)
+ // cond: buildcfg.GOARM.Version<=6
+ // result: (RSBconst [32] (CLZ <t> (SUBconst <t> (AND <t> x (RSBconst <t> [0] x)) [1])))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOARM.Version <= 6) {
+ break
+ }
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARMCLZ, t)
+ v1 := b.NewValue0(v.Pos, OpARMSUBconst, t)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpARMAND, t)
+ v3 := b.NewValue0(v.Pos, OpARMRSBconst, t)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg(x)
+ v2.AddArg2(x, v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Ctz32 <t> x)
+ // cond: buildcfg.GOARM.Version==7
+ // result: (CLZ <t> (RBIT <t> x))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOARM.Version == 7) {
+ break
+ }
+ v.reset(OpARMCLZ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARMRBIT, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz8 <t> x)
+ // cond: buildcfg.GOARM.Version<=6
+ // result: (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x100] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x100] x))) [1])))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOARM.Version <= 6) {
+ break
+ }
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARMCLZ, t)
+ v1 := b.NewValue0(v.Pos, OpARMSUBconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpARMAND, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0x100)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpARMRSBconst, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(0)
+ v4.AddArg(v3)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Ctz8 <t> x)
+ // cond: buildcfg.GOARM.Version==7
+ // result: (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOARM.Version == 7) {
+ break
+ }
+ v.reset(OpARMCLZ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARMRBIT, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0x100)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y)
+ // result: (Div32 (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpDiv32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpDiv32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 x y)
+ // result: (SUB (XOR <typ.UInt32> (Select0 <typ.UInt32> (CALLudiv (SUB <typ.UInt32> (XOR x <typ.UInt32> (Signmask x)) (Signmask x)) (SUB <typ.UInt32> (XOR y <typ.UInt32> (Signmask y)) (Signmask y)))) (Signmask (XOR <typ.UInt32> x y))) (Signmask (XOR <typ.UInt32> x y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSUB)
+ v0 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
+ v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v5.AddArg(x)
+ v4.AddArg2(x, v5)
+ v3.AddArg2(v4, v5)
+ v6 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v8.AddArg(y)
+ v7.AddArg2(y, v8)
+ v6.AddArg2(v7, v8)
+ v2.AddArg2(v3, v6)
+ v1.AddArg(v2)
+ v9 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v10 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v10.AddArg2(x, y)
+ v9.AddArg(v10)
+ v0.AddArg2(v1, v9)
+ v.AddArg2(v0, v9)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (Select0 <typ.UInt32> (CALLudiv x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (Div32 (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpDiv32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpDiv32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32 x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (Equal (CMPF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (Equal (CMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (XORconst [1] (XOR <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpARMXOR, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpFMA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMA x y z)
+ // result: (FMULAD z x y)
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ v.reset(OpARMFMULAD)
+ v.AddArg3(z, x, y)
+ return true
+ }
+}
+func rewriteValueARM_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsInBounds idx len)
+ // result: (LessThanU (CMP idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsNonNil ptr)
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v_0
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsSliceInBounds idx len)
+ // result: (LessEqualU (CMP idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32 x y)
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (GreaterEqual (CMPF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32U x y)
+ // result: (LessEqualU (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (GreaterEqual (CMPD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 x y)
+ // result: (LessThan (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (GreaterThan (CMPF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32U x y)
+ // result: (LessThanU (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (GreaterThan (CMPD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && t.IsSigned())
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpARMMOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !t.IsSigned())
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && t.IsSigned())
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpARMMOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !t.IsSigned())
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpARMMOVHUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpARMMOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVFload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpARMMOVFload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpARMMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLocalAddr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (LocalAddr <t> {sym} base mem)
+ // cond: t.Elem().HasPointers()
+ // result: (MOVWaddr {sym} (SPanchored base mem))
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ mem := v_1
+ if !(t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpARMMOVWaddr)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
+ v0.AddArg2(base, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LocalAddr <t> {sym} base _)
+ // cond: !t.Elem().HasPointers()
+ // result: (MOVWaddr {sym} base)
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ if !(!t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpARMMOVWaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x32 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 x y)
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSLL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x32 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 x y)
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSLL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x32 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 x y)
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSLL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (Mod32 (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 x y)
+ // result: (SUB (XOR <typ.UInt32> (Select1 <typ.UInt32> (CALLudiv (SUB <typ.UInt32> (XOR <typ.UInt32> x (Signmask x)) (Signmask x)) (SUB <typ.UInt32> (XOR <typ.UInt32> y (Signmask y)) (Signmask y)))) (Signmask x)) (Signmask x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSUB)
+ v0 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpSelect1, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
+ v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v5.AddArg(x)
+ v4.AddArg2(x, v5)
+ v3.AddArg2(v4, v5)
+ v6 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v8.AddArg(y)
+ v7.AddArg2(y, v8)
+ v6.AddArg2(v7, v8)
+ v2.AddArg2(v3, v6)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, v5)
+ v.AddArg2(v0, v5)
+ return true
+ }
+}
+func rewriteValueARM_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (Select1 <typ.UInt32> (CALLudiv x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (Mod32 (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore dst (MOVHUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(1)
+ v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v4.AuxInt = int32ToAuxInt(1)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v2.AuxInt = int32ToAuxInt(1)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [8 * (128 - s/4)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpARMDUFFCOPY)
+ v.AuxInt = int64ToAuxInt(8 * (128 - s/4))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: ((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s)
+ // result: (LoweredMove [t.Alignment()] dst src (ADDconst <src.Type> src [int32(s-moveSize(t.Alignment(), config))]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpARMLoweredMove)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpARMADDconst, src.Type)
+ v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config)))
+ v0.AddArg(src)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpNeg16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Neg16 x)
+ // result: (RSBconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Neg32 x)
+ // result: (RSBconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Neg8 x)
+ // result: (RSBconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32 x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (NotEqual (CMPF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (NotEqual (CMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr [off] ptr:(SP))
+ // result: (MOVWaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpARMMOVWaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADDconst [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueARM_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpARMLoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpARMLoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpARMLoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpPanicExtend(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicExtendA [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpARMLoweredPanicExtendA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicExtendB [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpARMLoweredPanicExtendB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicExtendC [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpARMLoweredPanicExtendC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVWconst [c]))
+ // result: (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x32, t)
+ v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RotateLeft32 x y)
+ // result: (SRR x (RSBconst [0] <y.Type> y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRR)
+ v0 := b.NewValue0(v.Pos, OpARMRSBconst, y.Type)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVWconst [c]))
+ // result: (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x32, t)
+ v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(256)
+ v3.AddArg(v2)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(y)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SRLconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 16))
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 x y)
+ // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 x y)
+ // result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 x y)
+ // result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg3(v0, y, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 16))
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 x y)
+ // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux32 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SRLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 x y)
+ // result: (SRL x (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 x y)
+ // result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(v0)
+ v.AddArg3(x, v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x32 x y)
+ // result: (SRAcond x y (CMPconst [256] y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(256)
+ v0.AddArg(y)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SRAconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (SRAconst x [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 x y)
+ // result: (SRA x (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(256)
+ v3.AddArg(v2)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(y)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SRLconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 24))
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 x y)
+ // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 x y)
+ // result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 x y)
+ // result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg3(v0, y, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 24))
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 x y)
+ // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Select0 (CALLudiv x (MOVWconst [1])))
+ // result: x
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst || auxIntToInt32(v_0_1.AuxInt) != 1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Select0 (CALLudiv x (MOVWconst [c])))
+ // cond: isPowerOfTwo32(c)
+ // result: (SRLconst [int32(log32(c))] x)
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select0 (CALLudiv (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [int32(uint32(c)/uint32(d))])
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) / uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Select1 (CALLudiv _ (MOVWconst [1])))
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst || auxIntToInt32(v_0_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Select1 (CALLudiv x (MOVWconst [c])))
+ // cond: isPowerOfTwo32(c)
+ // result: (ANDconst [c-1] x)
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select1 (CALLudiv (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [int32(uint32(c)%uint32(d))])
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) % uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpSignmask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Signmask x)
+ // result: (SRAconst x [31])
+ for {
+ x := v_0
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRAconst (RSBconst <t> [0] x) [31])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpARMRSBconst, t)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !t.IsFloat()
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !t.IsFloat()) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && t.IsFloat()
+ // result: (MOVFstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpARMMOVFstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && t.IsFloat()
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpARMMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARMMOVBstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(1)
+ v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [3] ptr mem)
+ // result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [4 * (128 - s/4)] ptr (MOVWconst [0]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpARMDUFFZERO)
+ v.AuxInt = int64ToAuxInt(4 * (128 - s/4))
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: (s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0
+ // result: (LoweredZero [t.Alignment()] ptr (ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))]) (MOVWconst [0]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) {
+ break
+ }
+ v.reset(OpARMLoweredZero)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpARMADDconst, ptr.Type)
+ v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config)))
+ v0.AddArg(ptr)
+ v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg4(ptr, v0, v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpZeromask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Zeromask x)
+ // result: (SRAconst (RSBshiftRL <typ.Int32> x x [1]) [31])
+ for {
+ x := v_0
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftRL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteBlockARM(b *Block) bool {
+ switch b.Kind {
+ case BlockARMEQ:
+ // match: (EQ (FlagConstant [fc]) yes no)
+ // cond: fc.eq()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.eq()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (FlagConstant [fc]) yes no)
+ // cond: !fc.eq()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.eq()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (InvertFlags cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMEQ, cmp)
+ return true
+ }
+ // match: (EQ (CMP x (RSBconst [0] y)))
+ // result: (EQ (CMN x y))
+ for b.Controls[0].Op == OpARMCMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMN x (RSBconst [0] y)))
+ // result: (EQ (CMP x y))
+ for b.Controls[0].Op == OpARMCMN {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ case BlockARMGE:
+ // match: (GE (FlagConstant [fc]) yes no)
+ // cond: fc.ge()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagConstant [fc]) yes no)
+ // cond: !fc.ge()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (InvertFlags cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMLE, cmp)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ case BlockARMGEnoov:
+ // match: (GEnoov (FlagConstant [fc]) yes no)
+ // cond: fc.geNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.geNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GEnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.geNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.geNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GEnoov (InvertFlags cmp) yes no)
+ // result: (LEnoov cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMLEnoov, cmp)
+ return true
+ }
+ case BlockARMGT:
+ // match: (GT (FlagConstant [fc]) yes no)
+ // cond: fc.gt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.gt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GT (FlagConstant [fc]) yes no)
+ // cond: !fc.gt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.gt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (InvertFlags cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMLT, cmp)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ case BlockARMGTnoov:
+ // match: (GTnoov (FlagConstant [fc]) yes no)
+ // cond: fc.gtNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.gtNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GTnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.gtNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.gtNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GTnoov (InvertFlags cmp) yes no)
+ // result: (LTnoov cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMLTnoov, cmp)
+ return true
+ }
+ case BlockIf:
+ // match: (If (Equal cc) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpARMEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMEQ, cc)
+ return true
+ }
+ // match: (If (NotEqual cc) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpARMNotEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMNE, cc)
+ return true
+ }
+ // match: (If (LessThan cc) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpARMLessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMLT, cc)
+ return true
+ }
+ // match: (If (LessThanU cc) yes no)
+ // result: (ULT cc yes no)
+ for b.Controls[0].Op == OpARMLessThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMULT, cc)
+ return true
+ }
+ // match: (If (LessEqual cc) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpARMLessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMLE, cc)
+ return true
+ }
+ // match: (If (LessEqualU cc) yes no)
+ // result: (ULE cc yes no)
+ for b.Controls[0].Op == OpARMLessEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMULE, cc)
+ return true
+ }
+ // match: (If (GreaterThan cc) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpARMGreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMGT, cc)
+ return true
+ }
+ // match: (If (GreaterThanU cc) yes no)
+ // result: (UGT cc yes no)
+ for b.Controls[0].Op == OpARMGreaterThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMUGT, cc)
+ return true
+ }
+ // match: (If (GreaterEqual cc) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpARMGreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMGE, cc)
+ return true
+ }
+ // match: (If (GreaterEqualU cc) yes no)
+ // result: (UGE cc yes no)
+ for b.Controls[0].Op == OpARMGreaterEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMUGE, cc)
+ return true
+ }
+ // match: (If cond yes no)
+ // result: (NE (CMPconst [0] cond) yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(cond)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ case BlockARMLE:
+ // match: (LE (FlagConstant [fc]) yes no)
+ // cond: fc.le()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.le()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagConstant [fc]) yes no)
+ // cond: !fc.le()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.le()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LE (InvertFlags cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMGE, cmp)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ case BlockARMLEnoov:
+ // match: (LEnoov (FlagConstant [fc]) yes no)
+ // cond: fc.leNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.leNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LEnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.leNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.leNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LEnoov (InvertFlags cmp) yes no)
+ // result: (GEnoov cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMGEnoov, cmp)
+ return true
+ }
+ case BlockARMLT:
+ // match: (LT (FlagConstant [fc]) yes no)
+ // cond: fc.lt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.lt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagConstant [fc]) yes no)
+ // cond: !fc.lt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.lt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (InvertFlags cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMGT, cmp)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ case BlockARMLTnoov:
+ // match: (LTnoov (FlagConstant [fc]) yes no)
+ // cond: fc.ltNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ltNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LTnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.ltNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ltNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LTnoov (InvertFlags cmp) yes no)
+ // result: (GTnoov cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMGTnoov, cmp)
+ return true
+ }
+ case BlockARMNE:
+ // match: (NE (CMPconst [0] (Equal cc)) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMEQ, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (NotEqual cc)) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMNotEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMNE, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessThan cc)) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMLessThan {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMLT, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessThanU cc)) yes no)
+ // result: (ULT cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMLessThanU {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMULT, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessEqual cc)) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMLessEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMLE, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessEqualU cc)) yes no)
+ // result: (ULE cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMLessEqualU {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMULE, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterThan cc)) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMGreaterThan {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMGT, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no)
+ // result: (UGT cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMGreaterThanU {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMUGT, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMGreaterEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMGE, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no)
+ // result: (UGE cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMGreaterEqualU {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMUGE, cc)
+ return true
+ }
+ // match: (NE (FlagConstant [fc]) yes no)
+ // cond: fc.ne()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ne()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagConstant [fc]) yes no)
+ // cond: !fc.ne()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ne()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMNE, cmp)
+ return true
+ }
+ // match: (NE (CMP x (RSBconst [0] y)))
+ // result: (NE (CMN x y))
+ for b.Controls[0].Op == OpARMCMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMN x (RSBconst [0] y)))
+ // result: (NE (CMP x y))
+ for b.Controls[0].Op == OpARMCMN {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ case BlockARMUGE:
+ // match: (UGE (FlagConstant [fc]) yes no)
+ // cond: fc.uge()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.uge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagConstant [fc]) yes no)
+ // cond: !fc.uge()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.uge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (InvertFlags cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMULE, cmp)
+ return true
+ }
+ case BlockARMUGT:
+ // match: (UGT (FlagConstant [fc]) yes no)
+ // cond: fc.ugt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ugt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGT (FlagConstant [fc]) yes no)
+ // cond: !fc.ugt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ugt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (InvertFlags cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMULT, cmp)
+ return true
+ }
+ case BlockARMULE:
+ // match: (ULE (FlagConstant [fc]) yes no)
+ // cond: fc.ule()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ule()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagConstant [fc]) yes no)
+ // cond: !fc.ule()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ule()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULE (InvertFlags cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMUGE, cmp)
+ return true
+ }
+ case BlockARMULT:
+ // match: (ULT (FlagConstant [fc]) yes no)
+ // cond: fc.ult()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ult()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagConstant [fc]) yes no)
+ // cond: !fc.ult()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ult()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (InvertFlags cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMUGT, cmp)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
new file mode 100644
index 0000000..f0a4425
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -0,0 +1,27265 @@
+// Code generated from _gen/ARM64.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+func rewriteValueARM64(v *Value) bool {
+ switch v.Op {
+ case OpARM64ADCSflags:
+ return rewriteValueARM64_OpARM64ADCSflags(v)
+ case OpARM64ADD:
+ return rewriteValueARM64_OpARM64ADD(v)
+ case OpARM64ADDSflags:
+ return rewriteValueARM64_OpARM64ADDSflags(v)
+ case OpARM64ADDconst:
+ return rewriteValueARM64_OpARM64ADDconst(v)
+ case OpARM64ADDshiftLL:
+ return rewriteValueARM64_OpARM64ADDshiftLL(v)
+ case OpARM64ADDshiftRA:
+ return rewriteValueARM64_OpARM64ADDshiftRA(v)
+ case OpARM64ADDshiftRL:
+ return rewriteValueARM64_OpARM64ADDshiftRL(v)
+ case OpARM64AND:
+ return rewriteValueARM64_OpARM64AND(v)
+ case OpARM64ANDconst:
+ return rewriteValueARM64_OpARM64ANDconst(v)
+ case OpARM64ANDshiftLL:
+ return rewriteValueARM64_OpARM64ANDshiftLL(v)
+ case OpARM64ANDshiftRA:
+ return rewriteValueARM64_OpARM64ANDshiftRA(v)
+ case OpARM64ANDshiftRL:
+ return rewriteValueARM64_OpARM64ANDshiftRL(v)
+ case OpARM64ANDshiftRO:
+ return rewriteValueARM64_OpARM64ANDshiftRO(v)
+ case OpARM64BIC:
+ return rewriteValueARM64_OpARM64BIC(v)
+ case OpARM64BICshiftLL:
+ return rewriteValueARM64_OpARM64BICshiftLL(v)
+ case OpARM64BICshiftRA:
+ return rewriteValueARM64_OpARM64BICshiftRA(v)
+ case OpARM64BICshiftRL:
+ return rewriteValueARM64_OpARM64BICshiftRL(v)
+ case OpARM64BICshiftRO:
+ return rewriteValueARM64_OpARM64BICshiftRO(v)
+ case OpARM64CMN:
+ return rewriteValueARM64_OpARM64CMN(v)
+ case OpARM64CMNW:
+ return rewriteValueARM64_OpARM64CMNW(v)
+ case OpARM64CMNWconst:
+ return rewriteValueARM64_OpARM64CMNWconst(v)
+ case OpARM64CMNconst:
+ return rewriteValueARM64_OpARM64CMNconst(v)
+ case OpARM64CMNshiftLL:
+ return rewriteValueARM64_OpARM64CMNshiftLL(v)
+ case OpARM64CMNshiftRA:
+ return rewriteValueARM64_OpARM64CMNshiftRA(v)
+ case OpARM64CMNshiftRL:
+ return rewriteValueARM64_OpARM64CMNshiftRL(v)
+ case OpARM64CMP:
+ return rewriteValueARM64_OpARM64CMP(v)
+ case OpARM64CMPW:
+ return rewriteValueARM64_OpARM64CMPW(v)
+ case OpARM64CMPWconst:
+ return rewriteValueARM64_OpARM64CMPWconst(v)
+ case OpARM64CMPconst:
+ return rewriteValueARM64_OpARM64CMPconst(v)
+ case OpARM64CMPshiftLL:
+ return rewriteValueARM64_OpARM64CMPshiftLL(v)
+ case OpARM64CMPshiftRA:
+ return rewriteValueARM64_OpARM64CMPshiftRA(v)
+ case OpARM64CMPshiftRL:
+ return rewriteValueARM64_OpARM64CMPshiftRL(v)
+ case OpARM64CSEL:
+ return rewriteValueARM64_OpARM64CSEL(v)
+ case OpARM64CSEL0:
+ return rewriteValueARM64_OpARM64CSEL0(v)
+ case OpARM64CSETM:
+ return rewriteValueARM64_OpARM64CSETM(v)
+ case OpARM64CSINC:
+ return rewriteValueARM64_OpARM64CSINC(v)
+ case OpARM64CSINV:
+ return rewriteValueARM64_OpARM64CSINV(v)
+ case OpARM64CSNEG:
+ return rewriteValueARM64_OpARM64CSNEG(v)
+ case OpARM64DIV:
+ return rewriteValueARM64_OpARM64DIV(v)
+ case OpARM64DIVW:
+ return rewriteValueARM64_OpARM64DIVW(v)
+ case OpARM64EON:
+ return rewriteValueARM64_OpARM64EON(v)
+ case OpARM64EONshiftLL:
+ return rewriteValueARM64_OpARM64EONshiftLL(v)
+ case OpARM64EONshiftRA:
+ return rewriteValueARM64_OpARM64EONshiftRA(v)
+ case OpARM64EONshiftRL:
+ return rewriteValueARM64_OpARM64EONshiftRL(v)
+ case OpARM64EONshiftRO:
+ return rewriteValueARM64_OpARM64EONshiftRO(v)
+ case OpARM64Equal:
+ return rewriteValueARM64_OpARM64Equal(v)
+ case OpARM64FADDD:
+ return rewriteValueARM64_OpARM64FADDD(v)
+ case OpARM64FADDS:
+ return rewriteValueARM64_OpARM64FADDS(v)
+ case OpARM64FCMPD:
+ return rewriteValueARM64_OpARM64FCMPD(v)
+ case OpARM64FCMPS:
+ return rewriteValueARM64_OpARM64FCMPS(v)
+ case OpARM64FMOVDfpgp:
+ return rewriteValueARM64_OpARM64FMOVDfpgp(v)
+ case OpARM64FMOVDgpfp:
+ return rewriteValueARM64_OpARM64FMOVDgpfp(v)
+ case OpARM64FMOVDload:
+ return rewriteValueARM64_OpARM64FMOVDload(v)
+ case OpARM64FMOVDloadidx:
+ return rewriteValueARM64_OpARM64FMOVDloadidx(v)
+ case OpARM64FMOVDloadidx8:
+ return rewriteValueARM64_OpARM64FMOVDloadidx8(v)
+ case OpARM64FMOVDstore:
+ return rewriteValueARM64_OpARM64FMOVDstore(v)
+ case OpARM64FMOVDstoreidx:
+ return rewriteValueARM64_OpARM64FMOVDstoreidx(v)
+ case OpARM64FMOVDstoreidx8:
+ return rewriteValueARM64_OpARM64FMOVDstoreidx8(v)
+ case OpARM64FMOVSload:
+ return rewriteValueARM64_OpARM64FMOVSload(v)
+ case OpARM64FMOVSloadidx:
+ return rewriteValueARM64_OpARM64FMOVSloadidx(v)
+ case OpARM64FMOVSloadidx4:
+ return rewriteValueARM64_OpARM64FMOVSloadidx4(v)
+ case OpARM64FMOVSstore:
+ return rewriteValueARM64_OpARM64FMOVSstore(v)
+ case OpARM64FMOVSstoreidx:
+ return rewriteValueARM64_OpARM64FMOVSstoreidx(v)
+ case OpARM64FMOVSstoreidx4:
+ return rewriteValueARM64_OpARM64FMOVSstoreidx4(v)
+ case OpARM64FMULD:
+ return rewriteValueARM64_OpARM64FMULD(v)
+ case OpARM64FMULS:
+ return rewriteValueARM64_OpARM64FMULS(v)
+ case OpARM64FNEGD:
+ return rewriteValueARM64_OpARM64FNEGD(v)
+ case OpARM64FNEGS:
+ return rewriteValueARM64_OpARM64FNEGS(v)
+ case OpARM64FNMULD:
+ return rewriteValueARM64_OpARM64FNMULD(v)
+ case OpARM64FNMULS:
+ return rewriteValueARM64_OpARM64FNMULS(v)
+ case OpARM64FSUBD:
+ return rewriteValueARM64_OpARM64FSUBD(v)
+ case OpARM64FSUBS:
+ return rewriteValueARM64_OpARM64FSUBS(v)
+ case OpARM64GreaterEqual:
+ return rewriteValueARM64_OpARM64GreaterEqual(v)
+ case OpARM64GreaterEqualF:
+ return rewriteValueARM64_OpARM64GreaterEqualF(v)
+ case OpARM64GreaterEqualNoov:
+ return rewriteValueARM64_OpARM64GreaterEqualNoov(v)
+ case OpARM64GreaterEqualU:
+ return rewriteValueARM64_OpARM64GreaterEqualU(v)
+ case OpARM64GreaterThan:
+ return rewriteValueARM64_OpARM64GreaterThan(v)
+ case OpARM64GreaterThanF:
+ return rewriteValueARM64_OpARM64GreaterThanF(v)
+ case OpARM64GreaterThanU:
+ return rewriteValueARM64_OpARM64GreaterThanU(v)
+ case OpARM64LDP:
+ return rewriteValueARM64_OpARM64LDP(v)
+ case OpARM64LessEqual:
+ return rewriteValueARM64_OpARM64LessEqual(v)
+ case OpARM64LessEqualF:
+ return rewriteValueARM64_OpARM64LessEqualF(v)
+ case OpARM64LessEqualU:
+ return rewriteValueARM64_OpARM64LessEqualU(v)
+ case OpARM64LessThan:
+ return rewriteValueARM64_OpARM64LessThan(v)
+ case OpARM64LessThanF:
+ return rewriteValueARM64_OpARM64LessThanF(v)
+ case OpARM64LessThanNoov:
+ return rewriteValueARM64_OpARM64LessThanNoov(v)
+ case OpARM64LessThanU:
+ return rewriteValueARM64_OpARM64LessThanU(v)
+ case OpARM64MADD:
+ return rewriteValueARM64_OpARM64MADD(v)
+ case OpARM64MADDW:
+ return rewriteValueARM64_OpARM64MADDW(v)
+ case OpARM64MNEG:
+ return rewriteValueARM64_OpARM64MNEG(v)
+ case OpARM64MNEGW:
+ return rewriteValueARM64_OpARM64MNEGW(v)
+ case OpARM64MOD:
+ return rewriteValueARM64_OpARM64MOD(v)
+ case OpARM64MODW:
+ return rewriteValueARM64_OpARM64MODW(v)
+ case OpARM64MOVBUload:
+ return rewriteValueARM64_OpARM64MOVBUload(v)
+ case OpARM64MOVBUloadidx:
+ return rewriteValueARM64_OpARM64MOVBUloadidx(v)
+ case OpARM64MOVBUreg:
+ return rewriteValueARM64_OpARM64MOVBUreg(v)
+ case OpARM64MOVBload:
+ return rewriteValueARM64_OpARM64MOVBload(v)
+ case OpARM64MOVBloadidx:
+ return rewriteValueARM64_OpARM64MOVBloadidx(v)
+ case OpARM64MOVBreg:
+ return rewriteValueARM64_OpARM64MOVBreg(v)
+ case OpARM64MOVBstore:
+ return rewriteValueARM64_OpARM64MOVBstore(v)
+ case OpARM64MOVBstoreidx:
+ return rewriteValueARM64_OpARM64MOVBstoreidx(v)
+ case OpARM64MOVBstorezero:
+ return rewriteValueARM64_OpARM64MOVBstorezero(v)
+ case OpARM64MOVBstorezeroidx:
+ return rewriteValueARM64_OpARM64MOVBstorezeroidx(v)
+ case OpARM64MOVDload:
+ return rewriteValueARM64_OpARM64MOVDload(v)
+ case OpARM64MOVDloadidx:
+ return rewriteValueARM64_OpARM64MOVDloadidx(v)
+ case OpARM64MOVDloadidx8:
+ return rewriteValueARM64_OpARM64MOVDloadidx8(v)
+ case OpARM64MOVDnop:
+ return rewriteValueARM64_OpARM64MOVDnop(v)
+ case OpARM64MOVDreg:
+ return rewriteValueARM64_OpARM64MOVDreg(v)
+ case OpARM64MOVDstore:
+ return rewriteValueARM64_OpARM64MOVDstore(v)
+ case OpARM64MOVDstoreidx:
+ return rewriteValueARM64_OpARM64MOVDstoreidx(v)
+ case OpARM64MOVDstoreidx8:
+ return rewriteValueARM64_OpARM64MOVDstoreidx8(v)
+ case OpARM64MOVDstorezero:
+ return rewriteValueARM64_OpARM64MOVDstorezero(v)
+ case OpARM64MOVDstorezeroidx:
+ return rewriteValueARM64_OpARM64MOVDstorezeroidx(v)
+ case OpARM64MOVDstorezeroidx8:
+ return rewriteValueARM64_OpARM64MOVDstorezeroidx8(v)
+ case OpARM64MOVHUload:
+ return rewriteValueARM64_OpARM64MOVHUload(v)
+ case OpARM64MOVHUloadidx:
+ return rewriteValueARM64_OpARM64MOVHUloadidx(v)
+ case OpARM64MOVHUloadidx2:
+ return rewriteValueARM64_OpARM64MOVHUloadidx2(v)
+ case OpARM64MOVHUreg:
+ return rewriteValueARM64_OpARM64MOVHUreg(v)
+ case OpARM64MOVHload:
+ return rewriteValueARM64_OpARM64MOVHload(v)
+ case OpARM64MOVHloadidx:
+ return rewriteValueARM64_OpARM64MOVHloadidx(v)
+ case OpARM64MOVHloadidx2:
+ return rewriteValueARM64_OpARM64MOVHloadidx2(v)
+ case OpARM64MOVHreg:
+ return rewriteValueARM64_OpARM64MOVHreg(v)
+ case OpARM64MOVHstore:
+ return rewriteValueARM64_OpARM64MOVHstore(v)
+ case OpARM64MOVHstoreidx:
+ return rewriteValueARM64_OpARM64MOVHstoreidx(v)
+ case OpARM64MOVHstoreidx2:
+ return rewriteValueARM64_OpARM64MOVHstoreidx2(v)
+ case OpARM64MOVHstorezero:
+ return rewriteValueARM64_OpARM64MOVHstorezero(v)
+ case OpARM64MOVHstorezeroidx:
+ return rewriteValueARM64_OpARM64MOVHstorezeroidx(v)
+ case OpARM64MOVHstorezeroidx2:
+ return rewriteValueARM64_OpARM64MOVHstorezeroidx2(v)
+ case OpARM64MOVQstorezero:
+ return rewriteValueARM64_OpARM64MOVQstorezero(v)
+ case OpARM64MOVWUload:
+ return rewriteValueARM64_OpARM64MOVWUload(v)
+ case OpARM64MOVWUloadidx:
+ return rewriteValueARM64_OpARM64MOVWUloadidx(v)
+ case OpARM64MOVWUloadidx4:
+ return rewriteValueARM64_OpARM64MOVWUloadidx4(v)
+ case OpARM64MOVWUreg:
+ return rewriteValueARM64_OpARM64MOVWUreg(v)
+ case OpARM64MOVWload:
+ return rewriteValueARM64_OpARM64MOVWload(v)
+ case OpARM64MOVWloadidx:
+ return rewriteValueARM64_OpARM64MOVWloadidx(v)
+ case OpARM64MOVWloadidx4:
+ return rewriteValueARM64_OpARM64MOVWloadidx4(v)
+ case OpARM64MOVWreg:
+ return rewriteValueARM64_OpARM64MOVWreg(v)
+ case OpARM64MOVWstore:
+ return rewriteValueARM64_OpARM64MOVWstore(v)
+ case OpARM64MOVWstoreidx:
+ return rewriteValueARM64_OpARM64MOVWstoreidx(v)
+ case OpARM64MOVWstoreidx4:
+ return rewriteValueARM64_OpARM64MOVWstoreidx4(v)
+ case OpARM64MOVWstorezero:
+ return rewriteValueARM64_OpARM64MOVWstorezero(v)
+ case OpARM64MOVWstorezeroidx:
+ return rewriteValueARM64_OpARM64MOVWstorezeroidx(v)
+ case OpARM64MOVWstorezeroidx4:
+ return rewriteValueARM64_OpARM64MOVWstorezeroidx4(v)
+ case OpARM64MSUB:
+ return rewriteValueARM64_OpARM64MSUB(v)
+ case OpARM64MSUBW:
+ return rewriteValueARM64_OpARM64MSUBW(v)
+ case OpARM64MUL:
+ return rewriteValueARM64_OpARM64MUL(v)
+ case OpARM64MULW:
+ return rewriteValueARM64_OpARM64MULW(v)
+ case OpARM64MVN:
+ return rewriteValueARM64_OpARM64MVN(v)
+ case OpARM64MVNshiftLL:
+ return rewriteValueARM64_OpARM64MVNshiftLL(v)
+ case OpARM64MVNshiftRA:
+ return rewriteValueARM64_OpARM64MVNshiftRA(v)
+ case OpARM64MVNshiftRL:
+ return rewriteValueARM64_OpARM64MVNshiftRL(v)
+ case OpARM64MVNshiftRO:
+ return rewriteValueARM64_OpARM64MVNshiftRO(v)
+ case OpARM64NEG:
+ return rewriteValueARM64_OpARM64NEG(v)
+ case OpARM64NEGshiftLL:
+ return rewriteValueARM64_OpARM64NEGshiftLL(v)
+ case OpARM64NEGshiftRA:
+ return rewriteValueARM64_OpARM64NEGshiftRA(v)
+ case OpARM64NEGshiftRL:
+ return rewriteValueARM64_OpARM64NEGshiftRL(v)
+ case OpARM64NotEqual:
+ return rewriteValueARM64_OpARM64NotEqual(v)
+ case OpARM64OR:
+ return rewriteValueARM64_OpARM64OR(v)
+ case OpARM64ORN:
+ return rewriteValueARM64_OpARM64ORN(v)
+ case OpARM64ORNshiftLL:
+ return rewriteValueARM64_OpARM64ORNshiftLL(v)
+ case OpARM64ORNshiftRA:
+ return rewriteValueARM64_OpARM64ORNshiftRA(v)
+ case OpARM64ORNshiftRL:
+ return rewriteValueARM64_OpARM64ORNshiftRL(v)
+ case OpARM64ORNshiftRO:
+ return rewriteValueARM64_OpARM64ORNshiftRO(v)
+ case OpARM64ORconst:
+ return rewriteValueARM64_OpARM64ORconst(v)
+ case OpARM64ORshiftLL:
+ return rewriteValueARM64_OpARM64ORshiftLL(v)
+ case OpARM64ORshiftRA:
+ return rewriteValueARM64_OpARM64ORshiftRA(v)
+ case OpARM64ORshiftRL:
+ return rewriteValueARM64_OpARM64ORshiftRL(v)
+ case OpARM64ORshiftRO:
+ return rewriteValueARM64_OpARM64ORshiftRO(v)
+ case OpARM64REV:
+ return rewriteValueARM64_OpARM64REV(v)
+ case OpARM64REVW:
+ return rewriteValueARM64_OpARM64REVW(v)
+ case OpARM64ROR:
+ return rewriteValueARM64_OpARM64ROR(v)
+ case OpARM64RORW:
+ return rewriteValueARM64_OpARM64RORW(v)
+ case OpARM64SBCSflags:
+ return rewriteValueARM64_OpARM64SBCSflags(v)
+ case OpARM64SLL:
+ return rewriteValueARM64_OpARM64SLL(v)
+ case OpARM64SLLconst:
+ return rewriteValueARM64_OpARM64SLLconst(v)
+ case OpARM64SRA:
+ return rewriteValueARM64_OpARM64SRA(v)
+ case OpARM64SRAconst:
+ return rewriteValueARM64_OpARM64SRAconst(v)
+ case OpARM64SRL:
+ return rewriteValueARM64_OpARM64SRL(v)
+ case OpARM64SRLconst:
+ return rewriteValueARM64_OpARM64SRLconst(v)
+ case OpARM64STP:
+ return rewriteValueARM64_OpARM64STP(v)
+ case OpARM64SUB:
+ return rewriteValueARM64_OpARM64SUB(v)
+ case OpARM64SUBconst:
+ return rewriteValueARM64_OpARM64SUBconst(v)
+ case OpARM64SUBshiftLL:
+ return rewriteValueARM64_OpARM64SUBshiftLL(v)
+ case OpARM64SUBshiftRA:
+ return rewriteValueARM64_OpARM64SUBshiftRA(v)
+ case OpARM64SUBshiftRL:
+ return rewriteValueARM64_OpARM64SUBshiftRL(v)
+ case OpARM64TST:
+ return rewriteValueARM64_OpARM64TST(v)
+ case OpARM64TSTW:
+ return rewriteValueARM64_OpARM64TSTW(v)
+ case OpARM64TSTWconst:
+ return rewriteValueARM64_OpARM64TSTWconst(v)
+ case OpARM64TSTconst:
+ return rewriteValueARM64_OpARM64TSTconst(v)
+ case OpARM64TSTshiftLL:
+ return rewriteValueARM64_OpARM64TSTshiftLL(v)
+ case OpARM64TSTshiftRA:
+ return rewriteValueARM64_OpARM64TSTshiftRA(v)
+ case OpARM64TSTshiftRL:
+ return rewriteValueARM64_OpARM64TSTshiftRL(v)
+ case OpARM64TSTshiftRO:
+ return rewriteValueARM64_OpARM64TSTshiftRO(v)
+ case OpARM64UBFIZ:
+ return rewriteValueARM64_OpARM64UBFIZ(v)
+ case OpARM64UBFX:
+ return rewriteValueARM64_OpARM64UBFX(v)
+ case OpARM64UDIV:
+ return rewriteValueARM64_OpARM64UDIV(v)
+ case OpARM64UDIVW:
+ return rewriteValueARM64_OpARM64UDIVW(v)
+ case OpARM64UMOD:
+ return rewriteValueARM64_OpARM64UMOD(v)
+ case OpARM64UMODW:
+ return rewriteValueARM64_OpARM64UMODW(v)
+ case OpARM64XOR:
+ return rewriteValueARM64_OpARM64XOR(v)
+ case OpARM64XORconst:
+ return rewriteValueARM64_OpARM64XORconst(v)
+ case OpARM64XORshiftLL:
+ return rewriteValueARM64_OpARM64XORshiftLL(v)
+ case OpARM64XORshiftRA:
+ return rewriteValueARM64_OpARM64XORshiftRA(v)
+ case OpARM64XORshiftRL:
+ return rewriteValueARM64_OpARM64XORshiftRL(v)
+ case OpARM64XORshiftRO:
+ return rewriteValueARM64_OpARM64XORshiftRO(v)
+ case OpAbs:
+ v.Op = OpARM64FABSD
+ return true
+ case OpAdd16:
+ v.Op = OpARM64ADD
+ return true
+ case OpAdd32:
+ v.Op = OpARM64ADD
+ return true
+ case OpAdd32F:
+ v.Op = OpARM64FADDS
+ return true
+ case OpAdd64:
+ v.Op = OpARM64ADD
+ return true
+ case OpAdd64F:
+ v.Op = OpARM64FADDD
+ return true
+ case OpAdd8:
+ v.Op = OpARM64ADD
+ return true
+ case OpAddPtr:
+ v.Op = OpARM64ADD
+ return true
+ case OpAddr:
+ return rewriteValueARM64_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpARM64AND
+ return true
+ case OpAnd32:
+ v.Op = OpARM64AND
+ return true
+ case OpAnd64:
+ v.Op = OpARM64AND
+ return true
+ case OpAnd8:
+ v.Op = OpARM64AND
+ return true
+ case OpAndB:
+ v.Op = OpARM64AND
+ return true
+ case OpAtomicAdd32:
+ v.Op = OpARM64LoweredAtomicAdd32
+ return true
+ case OpAtomicAdd32Variant:
+ v.Op = OpARM64LoweredAtomicAdd32Variant
+ return true
+ case OpAtomicAdd64:
+ v.Op = OpARM64LoweredAtomicAdd64
+ return true
+ case OpAtomicAdd64Variant:
+ v.Op = OpARM64LoweredAtomicAdd64Variant
+ return true
+ case OpAtomicAnd32:
+ return rewriteValueARM64_OpAtomicAnd32(v)
+ case OpAtomicAnd32Variant:
+ return rewriteValueARM64_OpAtomicAnd32Variant(v)
+ case OpAtomicAnd8:
+ return rewriteValueARM64_OpAtomicAnd8(v)
+ case OpAtomicAnd8Variant:
+ return rewriteValueARM64_OpAtomicAnd8Variant(v)
+ case OpAtomicCompareAndSwap32:
+ v.Op = OpARM64LoweredAtomicCas32
+ return true
+ case OpAtomicCompareAndSwap32Variant:
+ v.Op = OpARM64LoweredAtomicCas32Variant
+ return true
+ case OpAtomicCompareAndSwap64:
+ v.Op = OpARM64LoweredAtomicCas64
+ return true
+ case OpAtomicCompareAndSwap64Variant:
+ v.Op = OpARM64LoweredAtomicCas64Variant
+ return true
+ case OpAtomicExchange32:
+ v.Op = OpARM64LoweredAtomicExchange32
+ return true
+ case OpAtomicExchange32Variant:
+ v.Op = OpARM64LoweredAtomicExchange32Variant
+ return true
+ case OpAtomicExchange64:
+ v.Op = OpARM64LoweredAtomicExchange64
+ return true
+ case OpAtomicExchange64Variant:
+ v.Op = OpARM64LoweredAtomicExchange64Variant
+ return true
+ case OpAtomicLoad32:
+ v.Op = OpARM64LDARW
+ return true
+ case OpAtomicLoad64:
+ v.Op = OpARM64LDAR
+ return true
+ case OpAtomicLoad8:
+ v.Op = OpARM64LDARB
+ return true
+ case OpAtomicLoadPtr:
+ v.Op = OpARM64LDAR
+ return true
+ case OpAtomicOr32:
+ return rewriteValueARM64_OpAtomicOr32(v)
+ case OpAtomicOr32Variant:
+ return rewriteValueARM64_OpAtomicOr32Variant(v)
+ case OpAtomicOr8:
+ return rewriteValueARM64_OpAtomicOr8(v)
+ case OpAtomicOr8Variant:
+ return rewriteValueARM64_OpAtomicOr8Variant(v)
+ case OpAtomicStore32:
+ v.Op = OpARM64STLRW
+ return true
+ case OpAtomicStore64:
+ v.Op = OpARM64STLR
+ return true
+ case OpAtomicStore8:
+ v.Op = OpARM64STLRB
+ return true
+ case OpAtomicStorePtrNoWB:
+ v.Op = OpARM64STLR
+ return true
+ case OpAvg64u:
+ return rewriteValueARM64_OpAvg64u(v)
+ case OpBitLen32:
+ return rewriteValueARM64_OpBitLen32(v)
+ case OpBitLen64:
+ return rewriteValueARM64_OpBitLen64(v)
+ case OpBitRev16:
+ return rewriteValueARM64_OpBitRev16(v)
+ case OpBitRev32:
+ v.Op = OpARM64RBITW
+ return true
+ case OpBitRev64:
+ v.Op = OpARM64RBIT
+ return true
+ case OpBitRev8:
+ return rewriteValueARM64_OpBitRev8(v)
+ case OpBswap16:
+ v.Op = OpARM64REV16W
+ return true
+ case OpBswap32:
+ v.Op = OpARM64REVW
+ return true
+ case OpBswap64:
+ v.Op = OpARM64REV
+ return true
+ case OpCeil:
+ v.Op = OpARM64FRINTPD
+ return true
+ case OpClosureCall:
+ v.Op = OpARM64CALLclosure
+ return true
+ case OpCom16:
+ v.Op = OpARM64MVN
+ return true
+ case OpCom32:
+ v.Op = OpARM64MVN
+ return true
+ case OpCom64:
+ v.Op = OpARM64MVN
+ return true
+ case OpCom8:
+ v.Op = OpARM64MVN
+ return true
+ case OpCondSelect:
+ return rewriteValueARM64_OpCondSelect(v)
+ case OpConst16:
+ return rewriteValueARM64_OpConst16(v)
+ case OpConst32:
+ return rewriteValueARM64_OpConst32(v)
+ case OpConst32F:
+ return rewriteValueARM64_OpConst32F(v)
+ case OpConst64:
+ return rewriteValueARM64_OpConst64(v)
+ case OpConst64F:
+ return rewriteValueARM64_OpConst64F(v)
+ case OpConst8:
+ return rewriteValueARM64_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueARM64_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueARM64_OpConstNil(v)
+ case OpCtz16:
+ return rewriteValueARM64_OpCtz16(v)
+ case OpCtz16NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz32:
+ return rewriteValueARM64_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz64:
+ return rewriteValueARM64_OpCtz64(v)
+ case OpCtz64NonZero:
+ v.Op = OpCtz64
+ return true
+ case OpCtz8:
+ return rewriteValueARM64_OpCtz8(v)
+ case OpCtz8NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpARM64FCVTZSSW
+ return true
+ case OpCvt32Fto32U:
+ v.Op = OpARM64FCVTZUSW
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpARM64FCVTZSS
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpARM64FCVTSD
+ return true
+ case OpCvt32Fto64U:
+ v.Op = OpARM64FCVTZUS
+ return true
+ case OpCvt32Uto32F:
+ v.Op = OpARM64UCVTFWS
+ return true
+ case OpCvt32Uto64F:
+ v.Op = OpARM64UCVTFWD
+ return true
+ case OpCvt32to32F:
+ v.Op = OpARM64SCVTFWS
+ return true
+ case OpCvt32to64F:
+ v.Op = OpARM64SCVTFWD
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpARM64FCVTZSDW
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpARM64FCVTDS
+ return true
+ case OpCvt64Fto32U:
+ v.Op = OpARM64FCVTZUDW
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpARM64FCVTZSD
+ return true
+ case OpCvt64Fto64U:
+ v.Op = OpARM64FCVTZUD
+ return true
+ case OpCvt64Uto32F:
+ v.Op = OpARM64UCVTFS
+ return true
+ case OpCvt64Uto64F:
+ v.Op = OpARM64UCVTFD
+ return true
+ case OpCvt64to32F:
+ v.Op = OpARM64SCVTFS
+ return true
+ case OpCvt64to64F:
+ v.Op = OpARM64SCVTFD
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueARM64_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueARM64_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueARM64_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpARM64FDIVS
+ return true
+ case OpDiv32u:
+ v.Op = OpARM64UDIVW
+ return true
+ case OpDiv64:
+ return rewriteValueARM64_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpARM64FDIVD
+ return true
+ case OpDiv64u:
+ v.Op = OpARM64UDIV
+ return true
+ case OpDiv8:
+ return rewriteValueARM64_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueARM64_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueARM64_OpEq16(v)
+ case OpEq32:
+ return rewriteValueARM64_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueARM64_OpEq32F(v)
+ case OpEq64:
+ return rewriteValueARM64_OpEq64(v)
+ case OpEq64F:
+ return rewriteValueARM64_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueARM64_OpEq8(v)
+ case OpEqB:
+ return rewriteValueARM64_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueARM64_OpEqPtr(v)
+ case OpFMA:
+ return rewriteValueARM64_OpFMA(v)
+ case OpFloor:
+ v.Op = OpARM64FRINTMD
+ return true
+ case OpGetCallerPC:
+ v.Op = OpARM64LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpARM64LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpARM64LoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ return rewriteValueARM64_OpHmul32(v)
+ case OpHmul32u:
+ return rewriteValueARM64_OpHmul32u(v)
+ case OpHmul64:
+ v.Op = OpARM64MULH
+ return true
+ case OpHmul64u:
+ v.Op = OpARM64UMULH
+ return true
+ case OpInterCall:
+ v.Op = OpARM64CALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueARM64_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueARM64_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueARM64_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueARM64_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueARM64_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueARM64_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueARM64_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueARM64_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValueARM64_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValueARM64_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValueARM64_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValueARM64_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueARM64_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueARM64_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueARM64_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueARM64_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueARM64_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueARM64_OpLess32U(v)
+ case OpLess64:
+ return rewriteValueARM64_OpLess64(v)
+ case OpLess64F:
+ return rewriteValueARM64_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValueARM64_OpLess64U(v)
+ case OpLess8:
+ return rewriteValueARM64_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueARM64_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueARM64_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueARM64_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueARM64_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueARM64_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueARM64_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueARM64_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueARM64_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueARM64_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueARM64_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueARM64_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueARM64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueARM64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueARM64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueARM64_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueARM64_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueARM64_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueARM64_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueARM64_OpLsh8x8(v)
+ case OpMax32F:
+ v.Op = OpARM64FMAXS
+ return true
+ case OpMax64F:
+ v.Op = OpARM64FMAXD
+ return true
+ case OpMin32F:
+ v.Op = OpARM64FMINS
+ return true
+ case OpMin64F:
+ v.Op = OpARM64FMIND
+ return true
+ case OpMod16:
+ return rewriteValueARM64_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueARM64_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueARM64_OpMod32(v)
+ case OpMod32u:
+ v.Op = OpARM64UMODW
+ return true
+ case OpMod64:
+ return rewriteValueARM64_OpMod64(v)
+ case OpMod64u:
+ v.Op = OpARM64UMOD
+ return true
+ case OpMod8:
+ return rewriteValueARM64_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueARM64_OpMod8u(v)
+ case OpMove:
+ return rewriteValueARM64_OpMove(v)
+ case OpMul16:
+ v.Op = OpARM64MULW
+ return true
+ case OpMul32:
+ v.Op = OpARM64MULW
+ return true
+ case OpMul32F:
+ v.Op = OpARM64FMULS
+ return true
+ case OpMul64:
+ v.Op = OpARM64MUL
+ return true
+ case OpMul64F:
+ v.Op = OpARM64FMULD
+ return true
+ case OpMul8:
+ v.Op = OpARM64MULW
+ return true
+ case OpNeg16:
+ v.Op = OpARM64NEG
+ return true
+ case OpNeg32:
+ v.Op = OpARM64NEG
+ return true
+ case OpNeg32F:
+ v.Op = OpARM64FNEGS
+ return true
+ case OpNeg64:
+ v.Op = OpARM64NEG
+ return true
+ case OpNeg64F:
+ v.Op = OpARM64FNEGD
+ return true
+ case OpNeg8:
+ v.Op = OpARM64NEG
+ return true
+ case OpNeq16:
+ return rewriteValueARM64_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueARM64_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueARM64_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValueARM64_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValueARM64_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueARM64_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpARM64XOR
+ return true
+ case OpNeqPtr:
+ return rewriteValueARM64_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpARM64LoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueARM64_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueARM64_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpARM64OR
+ return true
+ case OpOr32:
+ v.Op = OpARM64OR
+ return true
+ case OpOr64:
+ v.Op = OpARM64OR
+ return true
+ case OpOr8:
+ v.Op = OpARM64OR
+ return true
+ case OpOrB:
+ v.Op = OpARM64OR
+ return true
+ case OpPanicBounds:
+ return rewriteValueARM64_OpPanicBounds(v)
+ case OpPopCount16:
+ return rewriteValueARM64_OpPopCount16(v)
+ case OpPopCount32:
+ return rewriteValueARM64_OpPopCount32(v)
+ case OpPopCount64:
+ return rewriteValueARM64_OpPopCount64(v)
+ case OpPrefetchCache:
+ return rewriteValueARM64_OpPrefetchCache(v)
+ case OpPrefetchCacheStreamed:
+ return rewriteValueARM64_OpPrefetchCacheStreamed(v)
+ case OpPubBarrier:
+ return rewriteValueARM64_OpPubBarrier(v)
+ case OpRotateLeft16:
+ return rewriteValueARM64_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValueARM64_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValueARM64_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValueARM64_OpRotateLeft8(v)
+ case OpRound:
+ v.Op = OpARM64FRINTAD
+ return true
+ case OpRound32F:
+ v.Op = OpARM64LoweredRound32F
+ return true
+ case OpRound64F:
+ v.Op = OpARM64LoweredRound64F
+ return true
+ case OpRoundToEven:
+ v.Op = OpARM64FRINTND
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueARM64_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueARM64_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueARM64_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueARM64_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueARM64_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueARM64_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueARM64_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueARM64_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueARM64_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueARM64_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueARM64_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueARM64_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueARM64_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueARM64_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueARM64_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueARM64_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueARM64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueARM64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueARM64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueARM64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueARM64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueARM64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueARM64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueARM64_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueARM64_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueARM64_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueARM64_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueARM64_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueARM64_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueARM64_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueARM64_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueARM64_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueARM64_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueARM64_OpSelect1(v)
+ case OpSelectN:
+ return rewriteValueARM64_OpSelectN(v)
+ case OpSignExt16to32:
+ v.Op = OpARM64MOVHreg
+ return true
+ case OpSignExt16to64:
+ v.Op = OpARM64MOVHreg
+ return true
+ case OpSignExt32to64:
+ v.Op = OpARM64MOVWreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpARM64MOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpARM64MOVBreg
+ return true
+ case OpSignExt8to64:
+ v.Op = OpARM64MOVBreg
+ return true
+ case OpSlicemask:
+ return rewriteValueARM64_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpARM64FSQRTD
+ return true
+ case OpSqrt32:
+ v.Op = OpARM64FSQRTS
+ return true
+ case OpStaticCall:
+ v.Op = OpARM64CALLstatic
+ return true
+ case OpStore:
+ return rewriteValueARM64_OpStore(v)
+ case OpSub16:
+ v.Op = OpARM64SUB
+ return true
+ case OpSub32:
+ v.Op = OpARM64SUB
+ return true
+ case OpSub32F:
+ v.Op = OpARM64FSUBS
+ return true
+ case OpSub64:
+ v.Op = OpARM64SUB
+ return true
+ case OpSub64F:
+ v.Op = OpARM64FSUBD
+ return true
+ case OpSub8:
+ v.Op = OpARM64SUB
+ return true
+ case OpSubPtr:
+ v.Op = OpARM64SUB
+ return true
+ case OpTailCall:
+ v.Op = OpARM64CALLtail
+ return true
+ case OpTrunc:
+ v.Op = OpARM64FRINTZD
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpARM64LoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpARM64XOR
+ return true
+ case OpXor32:
+ v.Op = OpARM64XOR
+ return true
+ case OpXor64:
+ v.Op = OpARM64XOR
+ return true
+ case OpXor8:
+ v.Op = OpARM64XOR
+ return true
+ case OpZero:
+ return rewriteValueARM64_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpARM64MOVHUreg
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpARM64MOVHUreg
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpARM64MOVWUreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpARM64MOVBUreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpARM64MOVBUreg
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpARM64MOVBUreg
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADCSflags(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (ADCzerocarry <typ.UInt64> c))))
+ // result: (ADCSflags x y c)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpARM64ADDSconstflags || auxIntToInt64(v_2_0.AuxInt) != -1 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpARM64ADCzerocarry || v_2_0_0.Type != typ.UInt64 {
+ break
+ }
+ c := v_2_0_0.Args[0]
+ v.reset(OpARM64ADCSflags)
+ v.AddArg3(x, y, c)
+ return true
+ }
+ // match: (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (MOVDconst [0]))))
+ // result: (ADDSflags x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpARM64ADDSconstflags || auxIntToInt64(v_2_0.AuxInt) != -1 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARM64ADDSflags)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADD x (MOVDconst <t> [c]))
+ // cond: !t.IsPtr()
+ // result: (ADDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(!t.IsPtr()) {
+ continue
+ }
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD a l:(MUL x y))
+ // cond: l.Uses==1 && clobber(l)
+ // result: (MADD a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MUL {
+ continue
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ continue
+ }
+ v.reset(OpARM64MADD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD a l:(MNEG x y))
+ // cond: l.Uses==1 && clobber(l)
+ // result: (MSUB a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MNEG {
+ continue
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ continue
+ }
+ v.reset(OpARM64MSUB)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD a l:(MULW x y))
+ // cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l)
+ // result: (MADDW a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MULW {
+ continue
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) {
+ continue
+ }
+ v.reset(OpARM64MADDW)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD a l:(MNEGW x y))
+ // cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l)
+ // result: (MSUBW a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MNEGW {
+ continue
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) {
+ continue
+ }
+ v.reset(OpARM64MSUBW)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (NEG y))
+ // result: (SUB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64NEG {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64SUB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ADDshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ADDshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ADDshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDSflags(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDSflags x (MOVDconst [c]))
+ // result: (ADDSconstflags [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ADDSconstflags)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDconst [off1] (MOVDaddr [off2] {sym} ptr))
+ // cond: is32Bit(off1+int64(off2))
+ // result: (MOVDaddr [int32(off1)+off2] {sym} ptr)
+ for {
+ off1 := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ if !(is32Bit(off1 + int64(off2))) {
+ break
+ }
+ v.reset(OpARM64MOVDaddr)
+ v.AuxInt = int32ToAuxInt(int32(off1) + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (ADDconst [c] y)
+ // cond: c < 0
+ // result: (SUBconst [-c] y)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if !(c < 0) {
+ break
+ }
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c+d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ return true
+ }
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // result: (ADDconst [c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (SUBconst [d] x))
+ // result: (ADDconst [c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SUBconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDshiftLL (MOVDconst [c]) x [d])
+ // result: (ADDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftLL x (MOVDconst [c]) [d])
+ // result: (ADDconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
+ // result: (REV16W x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64REV16W)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff
+ // result: (REV16W x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 24) {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16W)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff)
+ // result: (REV16 x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff)
+ // result: (REV16 (ANDconst <x.Type> [0xffffffff] x))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16)
+ v0 := b.NewValue0(v.Pos, OpARM64ANDconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(0xffffffff)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftLL [c] (SRLconst x [64-c]) x2)
+ // result: (EXTRconst [64-c] x2 x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ x2 := v_1
+ v.reset(OpARM64EXTRconst)
+ v.AuxInt = int64ToAuxInt(64 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ // match: (ADDshiftLL <t> [c] (UBFX [bfc] x) x2)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ // result: (EXTRWconst [32-c] x2 x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ x2 := v_1
+ if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
+ break
+ }
+ v.reset(OpARM64EXTRWconst)
+ v.AuxInt = int64ToAuxInt(32 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRA (MOVDconst [c]) x [d])
+ // result: (ADDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRA x (MOVDconst [c]) [d])
+ // result: (ADDconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRL (MOVDconst [c]) x [d])
+ // result: (ADDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRL x (MOVDconst [c]) [d])
+ // result: (ADDconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64AND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AND x (MOVDconst [c]))
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (AND x (MVN y))
+ // result: (BIC x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MVN {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64BIC)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ANDshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ANDshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ANDshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ANDshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ANDshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ANDshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ANDshiftRO x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ANDshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [0] _)
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c&d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVWUreg x))
+ // result: (ANDconst [c&(1<<32-1)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<32 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVHUreg x))
+ // result: (ANDconst [c&(1<<16-1)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<16 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVBUreg x))
+ // result: (ANDconst [c&(1<<8-1)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<8 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [ac] (SLLconst [sc] x))
+ // cond: isARM64BFMask(sc, ac, sc)
+ // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+ for {
+ ac := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, ac, sc)) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [ac] (SRLconst [sc] x))
+ // cond: isARM64BFMask(sc, ac, 0)
+ // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+ for {
+ ac := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, ac, 0)) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (UBFX [bfc] x))
+ // cond: isARM64BFMask(0, c, 0)
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), arm64BFWidth(c, 0)))] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(0, c, 0)) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), arm64BFWidth(c, 0))))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftLL (MOVDconst [c]) x [d])
+ // result: (ANDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftLL x (MOVDconst [c]) [d])
+ // result: (ANDconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftLL y:(SLLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SLLconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRA (MOVDconst [c]) x [d])
+ // result: (ANDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRA x (MOVDconst [c]) [d])
+ // result: (ANDconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRA y:(SRAconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SRAconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRL (MOVDconst [c]) x [d])
+ // result: (ANDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRL x (MOVDconst [c]) [d])
+ // result: (ANDconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRL y:(SRLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SRLconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRO (MOVDconst [c]) x [d])
+ // result: (ANDconst [c] (RORconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRO x (MOVDconst [c]) [d])
+ // result: (ANDconst x [rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRO y:(RORconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64RORconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BIC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BIC x (MOVDconst [c]))
+ // result: (ANDconst [^c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BIC x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (BIC x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (BICshiftLL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64BICshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (BIC x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (BICshiftRL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64BICshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (BIC x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (BICshiftRA x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64BICshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (BIC x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (BICshiftRO x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64BICshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BICshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftLL x (MOVDconst [c]) [d])
+ // result: (ANDconst x [^int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BICshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRA x (MOVDconst [c]) [d])
+ // result: (ANDconst x [^(c>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BICshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRL x (MOVDconst [c]) [d])
+ // result: (ANDconst x [^int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BICshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRO x (MOVDconst [c]) [d])
+ // result: (ANDconst x [^rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRO (RORconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMN(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMN x (MOVDconst [c]))
+ // result: (CMNconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (CMN x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64CMNshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (CMN x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64CMNshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (CMN x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64CMNshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMNW x (MOVDconst [c]))
+ // result: (CMNWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMNWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMNWconst [c] y)
+ // cond: c < 0 && c != -1<<31
+ // result: (CMPWconst [-c] y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if !(c < 0 && c != -1<<31) {
+ break
+ }
+ v.reset(OpARM64CMPWconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMNWconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [addFlags32(int32(x),y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(addFlags32(int32(x), y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMNconst [c] y)
+ // cond: c < 0 && c != -1<<63
+ // result: (CMPconst [-c] y)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if !(c < 0 && c != -1<<63) {
+ break
+ }
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMNconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [addFlags64(x,y)])
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(addFlags64(x, y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftLL (MOVDconst [c]) x [d])
+ // result: (CMNconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftLL x (MOVDconst [c]) [d])
+ // result: (CMNconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRA (MOVDconst [c]) x [d])
+ // result: (CMNconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRA x (MOVDconst [c]) [d])
+ // result: (CMNconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRL (MOVDconst [c]) x [d])
+ // result: (CMNconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRL x (MOVDconst [c]) [d])
+ // result: (CMNconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMP(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMP x (MOVDconst [c]))
+ // result: (CMPconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMP (MOVDconst [c]) x)
+ // result: (InvertFlags (CMPconst [c] x))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMP y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMPshiftLL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMPshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (CMP x0:(SLLconst [c] y) x1)
+ // cond: clobberIfDead(x0)
+ // result: (InvertFlags (CMPshiftLL x1 y [c]))
+ for {
+ x0 := v_0
+ if x0.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x0.AuxInt)
+ y := x0.Args[0]
+ x1 := v_1
+ if !(clobberIfDead(x0)) {
+ break
+ }
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x1, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMPshiftRL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMPshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (CMP x0:(SRLconst [c] y) x1)
+ // cond: clobberIfDead(x0)
+ // result: (InvertFlags (CMPshiftRL x1 y [c]))
+ for {
+ x0 := v_0
+ if x0.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x0.AuxInt)
+ y := x0.Args[0]
+ x1 := v_1
+ if !(clobberIfDead(x0)) {
+ break
+ }
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x1, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMPshiftRA x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMPshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (CMP x0:(SRAconst [c] y) x1)
+ // cond: clobberIfDead(x0)
+ // result: (InvertFlags (CMPshiftRA x1 y [c]))
+ for {
+ x0 := v_0
+ if x0.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x0.AuxInt)
+ y := x0.Args[0]
+ x1 := v_1
+ if !(clobberIfDead(x0)) {
+ break
+ }
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x1, y)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPW x (MOVDconst [c]))
+ // result: (CMPWconst [int32(c)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMPWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVDconst [c]) x)
+ // result: (InvertFlags (CMPWconst [int32(c)] x))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPW y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPWconst [c] y)
+ // cond: c < 0 && c != -1<<31
+ // result: (CMNWconst [-c] y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if !(c < 0 && c != -1<<31) {
+ break
+ }
+ v.reset(OpARM64CMNWconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [subFlags32(int32(x),y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(int32(x), y))
+ return true
+ }
+ // match: (CMPWconst (MOVBUreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ // match: (CMPWconst (MOVHUreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPconst [c] y)
+ // cond: c < 0 && c != -1<<63
+ // result: (CMNconst [-c] y)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if !(c < 0 && c != -1<<63) {
+ break
+ }
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [subFlags64(x,y)])
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(x, y))
+ return true
+ }
+ // match: (CMPconst (MOVBUreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ // match: (CMPconst (MOVHUreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ // match: (CMPconst (MOVWUreg _) [c])
+ // cond: 0xffffffff < c
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWUreg || !(0xffffffff < c) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ // match: (CMPconst (ANDconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ // match: (CMPconst (SRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n)
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftLL (MOVDconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v1.AuxInt = int64ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftLL x (MOVDconst [c]) [d])
+ // result: (CMPconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRA (MOVDconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v1.AuxInt = int64ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRA x (MOVDconst [c]) [d])
+ // result: (CMPconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRL (MOVDconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v1.AuxInt = int64ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRL x (MOVDconst [c]) [d])
+ // result: (CMPconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CSEL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CSEL [cc] (MOVDconst [-1]) (MOVDconst [0]) flag)
+ // result: (CSETM [cc] flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != -1 || v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ flag := v_2
+ v.reset(OpARM64CSETM)
+ v.AuxInt = opToAuxInt(cc)
+ v.AddArg(flag)
+ return true
+ }
+ // match: (CSEL [cc] (MOVDconst [0]) (MOVDconst [-1]) flag)
+ // result: (CSETM [arm64Negate(cc)] flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 || v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ break
+ }
+ flag := v_2
+ v.reset(OpARM64CSETM)
+ v.AuxInt = opToAuxInt(arm64Negate(cc))
+ v.AddArg(flag)
+ return true
+ }
+ // match: (CSEL [cc] x (MOVDconst [0]) flag)
+ // result: (CSEL0 [cc] x flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ flag := v_2
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(cc)
+ v.AddArg2(x, flag)
+ return true
+ }
+ // match: (CSEL [cc] (MOVDconst [0]) y flag)
+ // result: (CSEL0 [arm64Negate(cc)] y flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ y := v_1
+ flag := v_2
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(arm64Negate(cc))
+ v.AddArg2(y, flag)
+ return true
+ }
+ // match: (CSEL [cc] x (ADDconst [1] a) flag)
+ // result: (CSINC [cc] x a flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ a := v_1.Args[0]
+ flag := v_2
+ v.reset(OpARM64CSINC)
+ v.AuxInt = opToAuxInt(cc)
+ v.AddArg3(x, a, flag)
+ return true
+ }
+ // match: (CSEL [cc] (ADDconst [1] a) x flag)
+ // result: (CSINC [arm64Negate(cc)] x a flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ if v_0.Op != OpARM64ADDconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ a := v_0.Args[0]
+ x := v_1
+ flag := v_2
+ v.reset(OpARM64CSINC)
+ v.AuxInt = opToAuxInt(arm64Negate(cc))
+ v.AddArg3(x, a, flag)
+ return true
+ }
+ // match: (CSEL [cc] x (MVN a) flag)
+ // result: (CSINV [cc] x a flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MVN {
+ break
+ }
+ a := v_1.Args[0]
+ flag := v_2
+ v.reset(OpARM64CSINV)
+ v.AuxInt = opToAuxInt(cc)
+ v.AddArg3(x, a, flag)
+ return true
+ }
+ // match: (CSEL [cc] (MVN a) x flag)
+ // result: (CSINV [arm64Negate(cc)] x a flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ if v_0.Op != OpARM64MVN {
+ break
+ }
+ a := v_0.Args[0]
+ x := v_1
+ flag := v_2
+ v.reset(OpARM64CSINV)
+ v.AuxInt = opToAuxInt(arm64Negate(cc))
+ v.AddArg3(x, a, flag)
+ return true
+ }
+ // match: (CSEL [cc] x (NEG a) flag)
+ // result: (CSNEG [cc] x a flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64NEG {
+ break
+ }
+ a := v_1.Args[0]
+ flag := v_2
+ v.reset(OpARM64CSNEG)
+ v.AuxInt = opToAuxInt(cc)
+ v.AddArg3(x, a, flag)
+ return true
+ }
+ // match: (CSEL [cc] (NEG a) x flag)
+ // result: (CSNEG [arm64Negate(cc)] x a flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ if v_0.Op != OpARM64NEG {
+ break
+ }
+ a := v_0.Args[0]
+ x := v_1
+ flag := v_2
+ v.reset(OpARM64CSNEG)
+ v.AuxInt = opToAuxInt(arm64Negate(cc))
+ v.AddArg3(x, a, flag)
+ return true
+ }
+ // match: (CSEL [cc] x y (InvertFlags cmp))
+ // result: (CSEL [arm64Invert(cc)] x y cmp)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(arm64Invert(cc))
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (CSEL [cc] x _ flag)
+ // cond: ccARM64Eval(cc, flag) > 0
+ // result: x
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) > 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CSEL [cc] _ y flag)
+ // cond: ccARM64Eval(cc, flag) < 0
+ // result: y
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ y := v_1
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) < 0) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CSEL [cc] x y (CMPWconst [0] boolval))
+ // cond: cc == OpARM64NotEqual && flagArg(boolval) != nil
+ // result: (CSEL [boolval.Op] x y flagArg(boolval))
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARM64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
+ break
+ }
+ boolval := v_2.Args[0]
+ if !(cc == OpARM64NotEqual && flagArg(boolval) != nil) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(boolval.Op)
+ v.AddArg3(x, y, flagArg(boolval))
+ return true
+ }
+ // match: (CSEL [cc] x y (CMPWconst [0] boolval))
+ // cond: cc == OpARM64Equal && flagArg(boolval) != nil
+ // result: (CSEL [arm64Negate(boolval.Op)] x y flagArg(boolval))
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARM64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
+ break
+ }
+ boolval := v_2.Args[0]
+ if !(cc == OpARM64Equal && flagArg(boolval) != nil) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(arm64Negate(boolval.Op))
+ v.AddArg3(x, y, flagArg(boolval))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CSEL0(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CSEL0 [cc] x (InvertFlags cmp))
+ // result: (CSEL0 [arm64Invert(cc)] x cmp)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v_1.Args[0]
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(arm64Invert(cc))
+ v.AddArg2(x, cmp)
+ return true
+ }
+ // match: (CSEL0 [cc] x flag)
+ // cond: ccARM64Eval(cc, flag) > 0
+ // result: x
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ flag := v_1
+ if !(ccARM64Eval(cc, flag) > 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CSEL0 [cc] _ flag)
+ // cond: ccARM64Eval(cc, flag) < 0
+ // result: (MOVDconst [0])
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ flag := v_1
+ if !(ccARM64Eval(cc, flag) < 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (CSEL0 [cc] x (CMPWconst [0] boolval))
+ // cond: cc == OpARM64NotEqual && flagArg(boolval) != nil
+ // result: (CSEL0 [boolval.Op] x flagArg(boolval))
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ boolval := v_1.Args[0]
+ if !(cc == OpARM64NotEqual && flagArg(boolval) != nil) {
+ break
+ }
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(boolval.Op)
+ v.AddArg2(x, flagArg(boolval))
+ return true
+ }
+ // match: (CSEL0 [cc] x (CMPWconst [0] boolval))
+ // cond: cc == OpARM64Equal && flagArg(boolval) != nil
+ // result: (CSEL0 [arm64Negate(boolval.Op)] x flagArg(boolval))
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ boolval := v_1.Args[0]
+ if !(cc == OpARM64Equal && flagArg(boolval) != nil) {
+ break
+ }
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(arm64Negate(boolval.Op))
+ v.AddArg2(x, flagArg(boolval))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CSETM(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CSETM [cc] (InvertFlags cmp))
+ // result: (CSETM [arm64Invert(cc)] cmp)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v_0.Args[0]
+ v.reset(OpARM64CSETM)
+ v.AuxInt = opToAuxInt(arm64Invert(cc))
+ v.AddArg(cmp)
+ return true
+ }
+ // match: (CSETM [cc] flag)
+ // cond: ccARM64Eval(cc, flag) > 0
+ // result: (MOVDconst [-1])
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ flag := v_0
+ if !(ccARM64Eval(cc, flag) > 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (CSETM [cc] flag)
+ // cond: ccARM64Eval(cc, flag) < 0
+ // result: (MOVDconst [0])
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ flag := v_0
+ if !(ccARM64Eval(cc, flag) < 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CSINC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CSINC [cc] x y (InvertFlags cmp))
+ // result: (CSINC [arm64Invert(cc)] x y cmp)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpARM64CSINC)
+ v.AuxInt = opToAuxInt(arm64Invert(cc))
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (CSINC [cc] x _ flag)
+ // cond: ccARM64Eval(cc, flag) > 0
+ // result: x
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) > 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CSINC [cc] _ y flag)
+ // cond: ccARM64Eval(cc, flag) < 0
+ // result: (ADDconst [1] y)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ y := v_1
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) < 0) {
+ break
+ }
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CSINV(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CSINV [cc] x y (InvertFlags cmp))
+ // result: (CSINV [arm64Invert(cc)] x y cmp)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpARM64CSINV)
+ v.AuxInt = opToAuxInt(arm64Invert(cc))
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (CSINV [cc] x _ flag)
+ // cond: ccARM64Eval(cc, flag) > 0
+ // result: x
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) > 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CSINV [cc] _ y flag)
+ // cond: ccARM64Eval(cc, flag) < 0
+ // result: (Not y)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ y := v_1
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) < 0) {
+ break
+ }
+ v.reset(OpNot)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CSNEG(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CSNEG [cc] x y (InvertFlags cmp))
+ // result: (CSNEG [arm64Invert(cc)] x y cmp)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpARM64CSNEG)
+ v.AuxInt = opToAuxInt(arm64Invert(cc))
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (CSNEG [cc] x _ flag)
+ // cond: ccARM64Eval(cc, flag) > 0
+ // result: x
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) > 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CSNEG [cc] _ y flag)
+ // cond: ccARM64Eval(cc, flag) < 0
+ // result: (NEG y)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ y := v_1
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) < 0) {
+ break
+ }
+ v.reset(OpARM64NEG)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64DIV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIV (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [c/d])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c / d)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64DIVW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVW (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(uint32(int32(c)/int32(d)))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(int32(c) / int32(d))))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64EON(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EON x (MOVDconst [c]))
+ // result: (XORconst [^c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(^c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (EON x x)
+ // result: (MOVDconst [-1])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (EON x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (EONshiftLL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64EONshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (EON x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (EONshiftRL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64EONshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (EON x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (EONshiftRA x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64EONshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (EON x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (EONshiftRO x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64EONshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64EONshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EONshiftLL x (MOVDconst [c]) [d])
+ // result: (XORconst x [^int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (EONshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64EONshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EONshiftRA x (MOVDconst [c]) [d])
+ // result: (XORconst x [^(c>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (EONshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64EONshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EONshiftRL x (MOVDconst [c]) [d])
+ // result: (XORconst x [^int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (EONshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64EONshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EONshiftRO x (MOVDconst [c]) [d])
+ // result: (XORconst x [^rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(^rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (EONshiftRO (RORconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64Equal(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Equal (CMPconst [0] z:(AND x y)))
+ // cond: z.Uses == 1
+ // result: (Equal (TST x y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Equal (CMPWconst [0] x:(ANDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (Equal (TSTWconst [int32(c)] y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Equal (CMPWconst [0] z:(AND x y)))
+ // cond: z.Uses == 1
+ // result: (Equal (TSTW x y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Equal (CMPconst [0] x:(ANDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (Equal (TSTconst [c] y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Equal (CMP x z:(NEG y)))
+ // cond: z.Uses == 1
+ // result: (Equal (CMN x y))
+ for {
+ if v_0.Op != OpARM64CMP {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Equal (CMPW x z:(NEG y)))
+ // cond: z.Uses == 1
+ // result: (Equal (CMNW x y))
+ for {
+ if v_0.Op != OpARM64CMPW {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Equal (CMPconst [0] x:(ADDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (Equal (CMNconst [c] y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Equal (CMPWconst [0] x:(ADDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (Equal (CMNWconst [int32(c)] y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Equal (CMPconst [0] z:(ADD x y)))
+ // cond: z.Uses == 1
+ // result: (Equal (CMN x y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Equal (CMPWconst [0] z:(ADD x y)))
+ // cond: z.Uses == 1
+ // result: (Equal (CMNW x y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Equal (CMPconst [0] z:(MADD a x y)))
+ // cond: z.Uses == 1
+ // result: (Equal (CMN a (MUL <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Equal (CMPconst [0] z:(MSUB a x y)))
+ // cond: z.Uses == 1
+ // result: (Equal (CMP a (MUL <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Equal (CMPWconst [0] z:(MADDW a x y)))
+ // cond: z.Uses == 1
+ // result: (Equal (CMNW a (MULW <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Equal (CMPWconst [0] z:(MSUBW a x y)))
+ // cond: z.Uses == 1
+ // result: (Equal (CMPW a (MULW <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Equal (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.eq())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.eq()))
+ return true
+ }
+ // match: (Equal (InvertFlags x))
+ // result: (Equal x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64Equal)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FADDD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FADDD a (FMULD x y))
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FMADDD a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARM64FMULD {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Block.Func.useFMA(v)) {
+ continue
+ }
+ v.reset(OpARM64FMADDD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (FADDD a (FNMULD x y))
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FMSUBD a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARM64FNMULD {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Block.Func.useFMA(v)) {
+ continue
+ }
+ v.reset(OpARM64FMSUBD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FADDS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FADDS a (FMULS x y))
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FMADDS a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARM64FMULS {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Block.Func.useFMA(v)) {
+ continue
+ }
+ v.reset(OpARM64FMADDS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (FADDS a (FNMULS x y))
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FMSUBS a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARM64FNMULS {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Block.Func.useFMA(v)) {
+ continue
+ }
+ v.reset(OpARM64FMSUBS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FCMPD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FCMPD x (FMOVDconst [0]))
+ // result: (FCMPD0 x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARM64FCMPD0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FCMPD (FMOVDconst [0]) x)
+ // result: (InvertFlags (FCMPD0 x))
+ for {
+ if v_0.Op != OpARM64FMOVDconst || auxIntToFloat64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD0, types.TypeFlags)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FCMPS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FCMPS x (FMOVSconst [0]))
+ // result: (FCMPS0 x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64FMOVSconst || auxIntToFloat64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARM64FCMPS0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FCMPS (FMOVSconst [0]) x)
+ // result: (InvertFlags (FCMPS0 x))
+ for {
+ if v_0.Op != OpARM64FMOVSconst || auxIntToFloat64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS0, types.TypeFlags)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDfpgp(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FMOVDfpgp <t> (Arg [off] {sym}))
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDgpfp(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FMOVDgpfp <t> (Arg [off] {sym}))
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _))
+ // result: (FMOVDgpfp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpARM64FMOVDgpfp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVDload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVDload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVDloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVDloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (FMOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVDloadidx8 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVDloadidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (FMOVDload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVDloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (FMOVDload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVDloadidx ptr (SLLconst [3] idx) mem)
+ // result: (FMOVDloadidx8 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64FMOVDloadidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (FMOVDloadidx (SLLconst [3] idx) ptr mem)
+ // result: (FMOVDloadidx8 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64FMOVDloadidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDloadidx8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDloadidx8 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<3)
+ // result: (FMOVDload ptr [int32(c)<<3] mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 3)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 3)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem)
+ // result: (MOVDstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVDgpfp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVDstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVDstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVDstoreidx8 ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVDstoreidx8)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (FMOVDstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVDstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (FMOVDstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ // match: (FMOVDstoreidx ptr (SLLconst [3] idx) val mem)
+ // result: (FMOVDstoreidx8 ptr idx val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 {
+ break
+ }
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64FMOVDstoreidx8)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (FMOVDstoreidx (SLLconst [3] idx) ptr val mem)
+ // result: (FMOVDstoreidx8 ptr idx val mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64FMOVDstoreidx8)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDstoreidx8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDstoreidx8 ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c<<3)
+ // result: (FMOVDstore [int32(c)<<3] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c << 3)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c) << 3)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _))
+ // result: (FMOVSgpfp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpARM64FMOVSgpfp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVSload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVSload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVSloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVSloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (FMOVSload [off] {sym} (ADDshiftLL [2] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVSloadidx4 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVSloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (FMOVSload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVSloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (FMOVSload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVSloadidx ptr (SLLconst [2] idx) mem)
+ // result: (FMOVSloadidx4 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64FMOVSloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (FMOVSloadidx (SLLconst [2] idx) ptr mem)
+ // result: (FMOVSloadidx4 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64FMOVSloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSloadidx4(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSloadidx4 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<2)
+ // result: (FMOVSload ptr [int32(c)<<2] mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 2)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem)
+ // result: (MOVWstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVSgpfp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVSstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVSstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVSstoreidx4 ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVSstoreidx4)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (FMOVSstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVSstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (FMOVSstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ // match: (FMOVSstoreidx ptr (SLLconst [2] idx) val mem)
+ // result: (FMOVSstoreidx4 ptr idx val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64FMOVSstoreidx4)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (FMOVSstoreidx (SLLconst [2] idx) ptr val mem)
+ // result: (FMOVSstoreidx4 ptr idx val mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64FMOVSstoreidx4)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSstoreidx4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSstoreidx4 ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c<<2)
+ // result: (FMOVSstore [int32(c)<<2] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c << 2)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(int32(c) << 2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMULD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMULD (FNEGD x) y)
+ // result: (FNMULD x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64FNEGD {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64FNMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMULS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMULS (FNEGS x) y)
+ // result: (FNMULS x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64FNEGS {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64FNMULS)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FNEGD(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FNEGD (FMULD x y))
+ // result: (FNMULD x y)
+ for {
+ if v_0.Op != OpARM64FMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64FNMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (FNEGD (FNMULD x y))
+ // result: (FMULD x y)
+ for {
+ if v_0.Op != OpARM64FNMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64FMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FNEGS(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FNEGS (FMULS x y))
+ // result: (FNMULS x y)
+ for {
+ if v_0.Op != OpARM64FMULS {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64FNMULS)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (FNEGS (FNMULS x y))
+ // result: (FMULS x y)
+ for {
+ if v_0.Op != OpARM64FNMULS {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64FMULS)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FNMULD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FNMULD (FNEGD x) y)
+ // result: (FMULD x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64FNEGD {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64FMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FNMULS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FNMULS (FNEGS x) y)
+ // result: (FMULS x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64FNEGS {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64FMULS)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FSUBD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FSUBD a (FMULD x y))
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FMSUBD a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64FMULD {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpARM64FMSUBD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBD (FMULD x y) a)
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FNMSUBD a x y)
+ for {
+ if v_0.Op != OpARM64FMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ if !(a.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpARM64FNMSUBD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBD a (FNMULD x y))
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FMADDD a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64FNMULD {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpARM64FMADDD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBD (FNMULD x y) a)
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FNMADDD a x y)
+ for {
+ if v_0.Op != OpARM64FNMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ if !(a.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpARM64FNMADDD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FSUBS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FSUBS a (FMULS x y))
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FMSUBS a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64FMULS {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpARM64FMSUBS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBS (FMULS x y) a)
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FNMSUBS a x y)
+ for {
+ if v_0.Op != OpARM64FMULS {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ if !(a.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpARM64FNMSUBS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBS a (FNMULS x y))
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FMADDS a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64FNMULS {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpARM64FMADDS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBS (FNMULS x y) a)
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FNMADDS a x y)
+ for {
+ if v_0.Op != OpARM64FNMULS {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ if !(a.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpARM64FNMADDS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (GreaterEqual (CMPconst [0] z:(AND x y)))
+ // cond: z.Uses == 1
+ // result: (GreaterEqual (TST x y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (GreaterEqual (CMPWconst [0] x:(ANDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (GreaterEqual (TSTWconst [int32(c)] y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (GreaterEqual (CMPWconst [0] z:(AND x y)))
+ // cond: z.Uses == 1
+ // result: (GreaterEqual (TSTW x y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (GreaterEqual (CMPconst [0] x:(ANDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (GreaterEqual (TSTconst [c] y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64GreaterEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (GreaterEqual (CMPconst [0] x:(ADDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (GreaterEqualNoov (CMNconst [c] y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64GreaterEqualNoov)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (GreaterEqual (CMPWconst [0] x:(ADDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (GreaterEqualNoov (CMNWconst [int32(c)] y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64GreaterEqualNoov)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (GreaterEqual (CMPconst [0] z:(ADD x y)))
+ // cond: z.Uses == 1
+ // result: (GreaterEqualNoov (CMN x y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64GreaterEqualNoov)
+ v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (GreaterEqual (CMPWconst [0] z:(ADD x y)))
+ // cond: z.Uses == 1
+ // result: (GreaterEqualNoov (CMNW x y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64GreaterEqualNoov)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (GreaterEqual (CMPconst [0] z:(MADD a x y)))
+ // cond: z.Uses == 1
+ // result: (GreaterEqualNoov (CMN a (MUL <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64GreaterEqualNoov)
+ v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (GreaterEqual (CMPconst [0] z:(MSUB a x y)))
+ // cond: z.Uses == 1
+ // result: (GreaterEqualNoov (CMP a (MUL <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64GreaterEqualNoov)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (GreaterEqual (CMPWconst [0] z:(MADDW a x y)))
+ // cond: z.Uses == 1
+ // result: (GreaterEqualNoov (CMNW a (MULW <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64GreaterEqualNoov)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (GreaterEqual (CMPWconst [0] z:(MSUBW a x y)))
+ // cond: z.Uses == 1
+ // result: (GreaterEqualNoov (CMPW a (MULW <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64GreaterEqualNoov)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (GreaterEqual (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.ge())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.ge()))
+ return true
+ }
+ // match: (GreaterEqual (InvertFlags x))
+ // result: (LessEqual x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterEqualF(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterEqualF (InvertFlags x))
+ // result: (LessEqualF x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessEqualF)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterEqualNoov(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (GreaterEqualNoov (InvertFlags x))
+ // result: (CSINC [OpARM64NotEqual] (LessThanNoov <typ.Bool> x) (MOVDconst [0]) x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64CSINC)
+ v.AuxInt = opToAuxInt(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64LessThanNoov, typ.Bool)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(v0, v1, x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterEqualU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterEqualU (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.uge())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.uge()))
+ return true
+ }
+ // match: (GreaterEqualU (InvertFlags x))
+ // result: (LessEqualU x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessEqualU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterThan(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (GreaterThan (CMPconst [0] z:(AND x y)))
+ // cond: z.Uses == 1
+ // result: (GreaterThan (TST x y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (GreaterThan (CMPWconst [0] x:(ANDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (GreaterThan (TSTWconst [int32(c)] y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (GreaterThan (CMPWconst [0] z:(AND x y)))
+ // cond: z.Uses == 1
+ // result: (GreaterThan (TSTW x y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (GreaterThan (CMPconst [0] x:(ANDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (GreaterThan (TSTconst [c] y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64GreaterThan)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (GreaterThan (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.gt())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.gt()))
+ return true
+ }
+ // match: (GreaterThan (InvertFlags x))
+ // result: (LessThan x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterThanF(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterThanF (InvertFlags x))
+ // result: (LessThanF x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessThanF)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterThanU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterThanU (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.ugt())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.ugt()))
+ return true
+ }
+ // match: (GreaterThanU (InvertFlags x))
+ // result: (LessThanU x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessThanU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LDP(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (LDP [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (LDP [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64LDP)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (LDP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (LDP [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64LDP)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (LessEqual (CMPconst [0] z:(AND x y)))
+ // cond: z.Uses == 1
+ // result: (LessEqual (TST x y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LessEqual (CMPWconst [0] x:(ANDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (LessEqual (TSTWconst [int32(c)] y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LessEqual (CMPWconst [0] z:(AND x y)))
+ // cond: z.Uses == 1
+ // result: (LessEqual (TSTW x y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LessEqual (CMPconst [0] x:(ANDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (LessEqual (TSTconst [c] y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LessEqual (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.le())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.le()))
+ return true
+ }
+ // match: (LessEqual (InvertFlags x))
+ // result: (GreaterEqual x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessEqualF(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessEqualF (InvertFlags x))
+ // result: (GreaterEqualF x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterEqualF)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessEqualU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessEqualU (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.ule())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.ule()))
+ return true
+ }
+ // match: (LessEqualU (InvertFlags x))
+ // result: (GreaterEqualU x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterEqualU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessThan(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (LessThan (CMPconst [0] z:(AND x y)))
+ // cond: z.Uses == 1
+ // result: (LessThan (TST x y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LessThan (CMPWconst [0] x:(ANDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (LessThan (TSTWconst [int32(c)] y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LessThan (CMPWconst [0] z:(AND x y)))
+ // cond: z.Uses == 1
+ // result: (LessThan (TSTW x y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LessThan (CMPconst [0] x:(ANDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (LessThan (TSTconst [c] y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LessThan (CMPconst [0] x:(ADDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (LessThanNoov (CMNconst [c] y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64LessThanNoov)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LessThan (CMPWconst [0] x:(ADDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (LessThanNoov (CMNWconst [int32(c)] y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64LessThanNoov)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LessThan (CMPconst [0] z:(ADD x y)))
+ // cond: z.Uses == 1
+ // result: (LessThanNoov (CMN x y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64LessThanNoov)
+ v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LessThan (CMPWconst [0] z:(ADD x y)))
+ // cond: z.Uses == 1
+ // result: (LessThanNoov (CMNW x y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64LessThanNoov)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LessThan (CMPconst [0] z:(MADD a x y)))
+ // cond: z.Uses == 1
+ // result: (LessThanNoov (CMN a (MUL <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64LessThanNoov)
+ v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LessThan (CMPconst [0] z:(MSUB a x y)))
+ // cond: z.Uses == 1
+ // result: (LessThanNoov (CMP a (MUL <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64LessThanNoov)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LessThan (CMPWconst [0] z:(MADDW a x y)))
+ // cond: z.Uses == 1
+ // result: (LessThanNoov (CMNW a (MULW <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64LessThanNoov)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LessThan (CMPWconst [0] z:(MSUBW a x y)))
+ // cond: z.Uses == 1
+ // result: (LessThanNoov (CMPW a (MULW <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64LessThanNoov)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LessThan (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.lt())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.lt()))
+ return true
+ }
+ // match: (LessThan (InvertFlags x))
+ // result: (GreaterThan x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessThanF(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessThanF (InvertFlags x))
+ // result: (GreaterThanF x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterThanF)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessThanNoov(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (LessThanNoov (InvertFlags x))
+ // result: (CSEL0 [OpARM64NotEqual] (GreaterEqualNoov <typ.Bool> x) x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64GreaterEqualNoov, typ.Bool)
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessThanU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessThanU (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.ult())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.ult()))
+ return true
+ }
+ // match: (LessThanU (InvertFlags x))
+ // result: (GreaterThanU x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterThanU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MADD(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MADD a x (MOVDconst [-1]))
+ // result: (SUB a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a _ (MOVDconst [0]))
+ // result: a
+ for {
+ a := v_0
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [1]))
+ // result: (ADD a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (ADDshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && c>=3
+ // result: (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && c>=7
+ // result: (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [-1]) x)
+ // result: (SUB a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ break
+ }
+ x := v_2
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a (MOVDconst [0]) _)
+ // result: a
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MADD a (MOVDconst [1]) x)
+ // result: (ADD a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ x := v_2
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c)
+ // result: (ADDshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c-1) && c>=3
+ // result: (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c+1) && c>=7
+ // result: (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD (MOVDconst [c]) x y)
+ // result: (ADDconst [c] (MUL <x.Type> x y))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64MUL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) (MOVDconst [d]))
+ // result: (ADDconst [c*d] a)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_2.AuxInt)
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c * d)
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: int32(c)==-1
+ // result: (MOVWUreg (SUB <a.Type> a x))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a _ (MOVDconst [c]))
+ // cond: int32(c)==0
+ // result: (MOVWUreg a)
+ for {
+ a := v_0
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: int32(c)==1
+ // result: (MOVWUreg (ADD <a.Type> a x))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && int32(c)>=3
+ // result: (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c - 1))
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && int32(c)>=7
+ // result: (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c + 1))
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 3))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 5))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 7))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 9))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: int32(c)==-1
+ // result: (MOVWUreg (SUB <a.Type> a x))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) _)
+ // cond: int32(c)==0
+ // result: (MOVWUreg a)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: int32(c)==1
+ // result: (MOVWUreg (ADD <a.Type> a x))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c)
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c-1) && int32(c)>=3
+ // result: (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c - 1))
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c+1) && int32(c)>=7
+ // result: (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c + 1))
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 3))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 5))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 7))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 9))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW (MOVDconst [c]) x y)
+ // result: (MOVWUreg (ADDconst <x.Type> [c] (MULW <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVWUreg (ADDconst <a.Type> [c*d] a))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_2.AuxInt)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDconst, a.Type)
+ v0.AuxInt = int64ToAuxInt(c * d)
+ v0.AddArg(a)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MNEG x (MOVDconst [-1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MNEG _ (MOVDconst [0]))
+ // result: (MOVDconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [1]))
+ // result: (NEG x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (NEG (SLLconst <x.Type> [log64(c)] x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && c >= 3
+ // result: (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && c >= 7
+ // result: (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.Type = x.Type
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 5))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.Type = x.Type
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 9))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [-c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-c * d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: int32(c)==-1
+ // result: (MOVWUreg x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == -1) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW _ (MOVDconst [c]))
+ // cond: int32(c)==0
+ // result: (MOVDconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 0) {
+ continue
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: int32(c)==1
+ // result: (MOVWUreg (NEG <x.Type> x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 1) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (NEG (SLLconst <x.Type> [log64(c)] x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && int32(c) >= 3
+ // result: (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> x x [log64(c-1)])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c - 1))
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && int32(c) >= 7
+ // result: (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c + 1))
+ v2 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v2.AddArg(x)
+ v1.AddArg2(v2, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 3))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c / 5))
+ v2 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v2.AuxInt = int64ToAuxInt(2)
+ v2.AddArg2(x, x)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 7))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c / 9))
+ v2 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v2.AuxInt = int64ToAuxInt(3)
+ v2.AddArg2(x, x)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [int64(uint32(-c*d))])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(-c * d)))
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOD (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [c%d])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c % d)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MODW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MODW (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(uint32(int32(c)%int32(d)))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(int32(c) % int32(d))))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVBUloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVBUloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVBstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(read8(sym, int64(off)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(read8(sym, int64(off))))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBUloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVBUload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVBUload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVBstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (ANDconst [c] x))
+ // result: (ANDconst [c&(1<<8-1)] x)
+ for {
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<8 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint8(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ // match: (MOVBUreg x:(Equal _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64Equal {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(NotEqual _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64NotEqual {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(LessThan _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64LessThan {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(LessThanU _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64LessThanU {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(LessThanF _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64LessThanF {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(LessEqual _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64LessEqual {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(LessEqualU _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64LessEqualU {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(LessEqualF _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64LessEqualF {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(GreaterThan _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64GreaterThan {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(GreaterThanU _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64GreaterThanU {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(GreaterThanF _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64GreaterThanF {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(GreaterEqual _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64GreaterEqual {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(GreaterEqualU _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64GreaterEqualU {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(GreaterEqualF _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64GreaterEqualF {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x)
+ // cond: v.Type.Size() <= 1
+ // result: x
+ for {
+ x := v_0
+ if !(v.Type.Size() <= 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg (SLLconst [lc] x))
+ // cond: lc >= 8
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ if !(lc >= 8) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVBUreg (SLLconst [lc] x))
+ // cond: lc < 8
+ // result: (UBFIZ [armBFAuxInt(lc, 8-lc)] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < 8) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 8-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (SRLconst [rc] x))
+ // cond: rc < 8
+ // result: (UBFX [armBFAuxInt(rc, 8)] x)
+ for {
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ rc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(rc < 8) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (UBFX [bfc] x))
+ // cond: bfc.getARM64BFwidth() <= 8
+ // result: (UBFX [bfc] x)
+ for {
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(bfc.getARM64BFwidth() <= 8) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVBloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVBloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVBstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVBload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVBload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVBstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int8(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
+ return true
+ }
+ // match: (MOVBreg x)
+ // cond: v.Type.Size() <= 1
+ // result: x
+ for {
+ x := v_0
+ if !(v.Type.Size() <= 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBreg <t> (ANDconst x [c]))
+ // cond: uint64(c) & uint64(0xffffffffffffff80) == 0
+ // result: (ANDconst <t> x [c])
+ for {
+ t := v.Type
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(uint64(c)&uint64(0xffffffffffffff80) == 0) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SLLconst [lc] x))
+ // cond: lc < 8
+ // result: (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < 8) {
+ break
+ }
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 8-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SBFX [bfc] x))
+ // cond: bfc.getARM64BFwidth() <= 8
+ // result: (SBFX [bfc] x)
+ for {
+ if v_0.Op != OpARM64SBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(bfc.getARM64BFwidth() <= 8) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVBstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (MOVBstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (MOVBstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVDconst [0]) mem)
+ // result: (MOVBstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVBstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVBreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVBreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVBUreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVHreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVHUreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVWUreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVBstorezeroidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVBstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstorezeroidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstorezeroidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVBstorezero [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezeroidx (MOVDconst [c]) idx mem)
+ // cond: is32Bit(c)
+ // result: (MOVBstorezero [int32(c)] idx mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _))
+ // result: (FMOVDfpgp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpARM64FMOVDfpgp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVDload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDloadidx8 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDloadidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVDload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVDload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVDload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDloadidx ptr (SLLconst [3] idx) mem)
+ // result: (MOVDloadidx8 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVDloadidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDloadidx (SLLconst [3] idx) ptr mem)
+ // result: (MOVDloadidx8 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDloadidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDloadidx ptr idx (MOVDstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDloadidx8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDloadidx8 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<3)
+ // result: (MOVDload [int32(c)<<3] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 3)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 3)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDloadidx8 ptr idx (MOVDstorezeroidx8 ptr2 idx2 _))
+ // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDstorezeroidx8 {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDnop(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVDnop (MOVDconst [c]))
+ // result: (MOVDconst [c])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVDreg x)
+ // cond: x.Uses == 1
+ // result: (MOVDnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64MOVDnop)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDreg (MOVDconst [c]))
+ // result: (MOVDconst [c])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem)
+ // result: (FMOVDstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVDfpgp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDstoreidx8 ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDstoreidx8)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVDstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (MOVDstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (MOVDstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx ptr (SLLconst [3] idx) val mem)
+ // result: (MOVDstoreidx8 ptr idx val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 {
+ break
+ }
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVDstoreidx8)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx (SLLconst [3] idx) ptr val mem)
+ // result: (MOVDstoreidx8 ptr idx val mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVDstoreidx8)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx ptr idx (MOVDconst [0]) mem)
+ // result: (MOVDstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVDstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstoreidx8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstoreidx8 ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c<<3)
+ // result: (MOVDstore [int32(c)<<3] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c << 3)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c) << 3)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx8 ptr idx (MOVDconst [0]) mem)
+ // result: (MOVDstorezeroidx8 ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVDstorezeroidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i+8] ptr mem))
+ // cond: x.Uses == 1 && setPos(v, x.Pos) && clobber(x)
+ // result: (MOVQstorezero {s} [i] ptr mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ if x.Op != OpARM64MOVDstorezero || auxIntToInt32(x.AuxInt) != i+8 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if ptr != x.Args[0] || !(x.Uses == 1 && setPos(v, x.Pos) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i-8] ptr mem))
+ // cond: x.Uses == 1 && setPos(v, x.Pos) && clobber(x)
+ // result: (MOVQstorezero {s} [i-8] ptr mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ if x.Op != OpARM64MOVDstorezero || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if ptr != x.Args[0] || !(x.Uses == 1 && setPos(v, x.Pos) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(i - 8)
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDstorezeroidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off] {sym} (ADDshiftLL [3] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDstorezeroidx8 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezeroidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstorezeroidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstorezeroidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVDstorezero [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstorezeroidx (MOVDconst [c]) idx mem)
+ // cond: is32Bit(c)
+ // result: (MOVDstorezero [int32(c)] idx mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(idx, mem)
+ return true
+ }
+ // match: (MOVDstorezeroidx ptr (SLLconst [3] idx) mem)
+ // result: (MOVDstorezeroidx8 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVDstorezeroidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDstorezeroidx (SLLconst [3] idx) ptr mem)
+ // result: (MOVDstorezeroidx8 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDstorezeroidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstorezeroidx8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstorezeroidx8 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<3)
+ // result: (MOVDstorezero [int32(c<<3)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 3)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c << 3))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHUloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHUloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHUloadidx2 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHUloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVHUload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVHUload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUloadidx ptr (SLLconst [1] idx) mem)
+ // result: (MOVHUloadidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHUloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUloadidx ptr (ADD idx idx) mem)
+ // result: (MOVHUloadidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADD {
+ break
+ }
+ idx := v_1.Args[1]
+ if idx != v_1.Args[0] {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVHUloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUloadidx (ADD idx idx) ptr mem)
+ // result: (MOVHUloadidx2 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ if idx != v_0.Args[0] {
+ break
+ }
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHUloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUloadidx2(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUloadidx2 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<1)
+ // result: (MOVHUload [int32(c)<<1] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 1)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _))
+ // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHstorezeroidx2 {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUloadidx2 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx2 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (ANDconst [c] x))
+ // result: (ANDconst [c&(1<<16-1)] x)
+ for {
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<16 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ // match: (MOVHUreg x)
+ // cond: v.Type.Size() <= 2
+ // result: x
+ for {
+ x := v_0
+ if !(v.Type.Size() <= 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHUreg (SLLconst [lc] x))
+ // cond: lc >= 16
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ if !(lc >= 16) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVHUreg (SLLconst [lc] x))
+ // cond: lc < 16
+ // result: (UBFIZ [armBFAuxInt(lc, 16-lc)] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < 16) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 16-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (SRLconst [rc] x))
+ // cond: rc < 16
+ // result: (UBFX [armBFAuxInt(rc, 16)] x)
+ for {
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ rc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(rc < 16) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (UBFX [bfc] x))
+ // cond: bfc.getARM64BFwidth() <= 16
+ // result: (UBFX [bfc] x)
+ for {
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(bfc.getARM64BFwidth() <= 16) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} (ADDshiftLL [1] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHloadidx2 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVHload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVHload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHloadidx ptr (SLLconst [1] idx) mem)
+ // result: (MOVHloadidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHloadidx ptr (ADD idx idx) mem)
+ // result: (MOVHloadidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADD {
+ break
+ }
+ idx := v_1.Args[1]
+ if idx != v_1.Args[0] {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVHloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHloadidx (ADD idx idx) ptr mem)
+ // result: (MOVHloadidx2 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ if idx != v_0.Args[0] {
+ break
+ }
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHloadidx2(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHloadidx2 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<1)
+ // result: (MOVHload [int32(c)<<1] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 1)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _))
+ // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHstorezeroidx2 {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHloadidx2 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHloadidx2 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int16(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
+ return true
+ }
+ // match: (MOVHreg x)
+ // cond: v.Type.Size() <= 2
+ // result: x
+ for {
+ x := v_0
+ if !(v.Type.Size() <= 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg <t> (ANDconst x [c]))
+ // cond: uint64(c) & uint64(0xffffffffffff8000) == 0
+ // result: (ANDconst <t> x [c])
+ for {
+ t := v.Type
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(uint64(c)&uint64(0xffffffffffff8000) == 0) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SLLconst [lc] x))
+ // cond: lc < 16
+ // result: (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < 16) {
+ break
+ }
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 16-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SBFX [bfc] x))
+ // cond: bfc.getARM64BFwidth() <= 16
+ // result: (SBFX [bfc] x)
+ for {
+ if v_0.Op != OpARM64SBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(bfc.getARM64BFwidth() <= 16) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHstoreidx2 ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (MOVHstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (MOVHstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr (SLLconst [1] idx) val mem)
+ // result: (MOVHstoreidx2 ptr idx val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr (ADD idx idx) val mem)
+ // result: (MOVHstoreidx2 ptr idx val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADD {
+ break
+ }
+ idx := v_1.Args[1]
+ if idx != v_1.Args[0] {
+ break
+ }
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx (SLLconst [1] idx) ptr val mem)
+ // result: (MOVHstoreidx2 ptr idx val mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx (ADD idx idx) ptr val mem)
+ // result: (MOVHstoreidx2 ptr idx val mem)
+ for {
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ if idx != v_0.Args[0] {
+ break
+ }
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVDconst [0]) mem)
+ // result: (MOVHstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVHstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVHreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVHUreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVWUreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstoreidx2 ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c<<1)
+ // result: (MOVHstore [int32(c)<<1] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c << 1)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(int32(c) << 1)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx2 ptr idx (MOVDconst [0]) mem)
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstoreidx2 ptr idx (MOVHreg x) mem)
+ // result: (MOVHstoreidx2 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx2 ptr idx (MOVHUreg x) mem)
+ // result: (MOVHstoreidx2 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx2 ptr idx (MOVWreg x) mem)
+ // result: (MOVHstoreidx2 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx2 ptr idx (MOVWUreg x) mem)
+ // result: (MOVHstoreidx2 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHstorezeroidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off] {sym} (ADDshiftLL [1] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezeroidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVHstorezero [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx (MOVDconst [c]) idx mem)
+ // cond: is32Bit(c)
+ // result: (MOVHstorezero [int32(c)] idx mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(idx, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx ptr (SLLconst [1] idx) mem)
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx ptr (ADD idx idx) mem)
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADD {
+ break
+ }
+ idx := v_1.Args[1]
+ if idx != v_1.Args[0] {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx (SLLconst [1] idx) ptr mem)
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx (ADD idx idx) ptr mem)
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ if idx != v_0.Args[0] {
+ break
+ }
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstorezeroidx2(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezeroidx2 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<1)
+ // result: (MOVHstorezero [int32(c<<1)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 1)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c << 1))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVQstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVQstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVQstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVQstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _))
+ // result: (FMOVSfpgp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpARM64FMOVSfpgp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWUloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWUloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWUloadidx4 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWUloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVWUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWUloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVWUload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVWUload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUloadidx ptr (SLLconst [2] idx) mem)
+ // result: (MOVWUloadidx4 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWUloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWUloadidx (SLLconst [2] idx) ptr mem)
+ // result: (MOVWUloadidx4 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVWUloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWUloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUloadidx4(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWUloadidx4 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<2)
+ // result: (MOVWUload [int32(c)<<2] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 2)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _))
+ // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWstorezeroidx4 {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUloadidx2 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx2 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUloadidx4 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWUloadidx4 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg (ANDconst [c] x))
+ // result: (ANDconst [c&(1<<32-1)] x)
+ for {
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<32 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ // match: (MOVWUreg x)
+ // cond: v.Type.Size() <= 4
+ // result: x
+ for {
+ x := v_0
+ if !(v.Type.Size() <= 4) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWUreg x)
+ // cond: zeroUpper32Bits(x, 3)
+ // result: x
+ for {
+ x := v_0
+ if !(zeroUpper32Bits(x, 3)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWUreg (SLLconst [lc] x))
+ // cond: lc >= 32
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ if !(lc >= 32) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVWUreg (SLLconst [lc] x))
+ // cond: lc < 32
+ // result: (UBFIZ [armBFAuxInt(lc, 32-lc)] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < 32) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 32-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg (SRLconst [rc] x))
+ // cond: rc < 32
+ // result: (UBFX [armBFAuxInt(rc, 32)] x)
+ for {
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ rc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(rc < 32) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg (UBFX [bfc] x))
+ // cond: bfc.getARM64BFwidth() <= 32
+ // result: (UBFX [bfc] x)
+ for {
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(bfc.getARM64BFwidth() <= 32) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (ADDshiftLL [2] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWloadidx4 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVWload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVWload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SLLconst [2] idx) mem)
+ // result: (MOVWloadidx4 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx (SLLconst [2] idx) ptr mem)
+ // result: (MOVWloadidx4 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVWloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWloadidx4(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadidx4 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<2)
+ // result: (MOVWload [int32(c)<<2] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 2)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _))
+ // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWstorezeroidx4 {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHloadidx2 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHloadidx2 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUloadidx2 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx2 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWloadidx4 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWloadidx4 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int32(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
+ return true
+ }
+ // match: (MOVWreg x)
+ // cond: v.Type.Size() <= 4
+ // result: x
+ for {
+ x := v_0
+ if !(v.Type.Size() <= 4) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg <t> (ANDconst x [c]))
+ // cond: uint64(c) & uint64(0xffffffff80000000) == 0
+ // result: (ANDconst <t> x [c])
+ for {
+ t := v.Type
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(uint64(c)&uint64(0xffffffff80000000) == 0) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (SLLconst [lc] x))
+ // cond: lc < 32
+ // result: (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < 32) {
+ break
+ }
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 32-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (SBFX [bfc] x))
+ // cond: bfc.getARM64BFwidth() <= 32
+ // result: (SBFX [bfc] x)
+ for {
+ if v_0.Op != OpARM64SBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(bfc.getARM64BFwidth() <= 32) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem)
+ // result: (FMOVSstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVSfpgp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWstoreidx4 ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx4)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (MOVWstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (MOVWstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SLLconst [2] idx) val mem)
+ // result: (MOVWstoreidx4 ptr idx val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx4)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SLLconst [2] idx) ptr val mem)
+ // result: (MOVWstoreidx4 ptr idx val mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx4)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx (MOVDconst [0]) mem)
+ // result: (MOVWstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVWstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVWstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx (MOVWUreg x) mem)
+ // result: (MOVWstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstoreidx4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreidx4 ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c<<2)
+ // result: (MOVWstore [int32(c)<<2] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c << 2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(c) << 2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx4 ptr idx (MOVDconst [0]) mem)
+ // result: (MOVWstorezeroidx4 ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVWstorezeroidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWstoreidx4 ptr idx (MOVWreg x) mem)
+ // result: (MOVWstoreidx4 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx4)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVWstoreidx4 ptr idx (MOVWUreg x) mem)
+ // result: (MOVWstoreidx4 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx4)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWstorezeroidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off] {sym} (ADDshiftLL [2] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWstorezeroidx4 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezeroidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstorezeroidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezeroidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVWstorezero [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezeroidx (MOVDconst [c]) idx mem)
+ // cond: is32Bit(c)
+ // result: (MOVWstorezero [int32(c)] idx mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(idx, mem)
+ return true
+ }
+ // match: (MOVWstorezeroidx ptr (SLLconst [2] idx) mem)
+ // result: (MOVWstorezeroidx4 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWstorezeroidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWstorezeroidx (SLLconst [2] idx) ptr mem)
+ // result: (MOVWstorezeroidx4 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVWstorezeroidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstorezeroidx4(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezeroidx4 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<2)
+ // result: (MOVWstorezero [int32(c<<2)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c << 2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MSUB a x (MOVDconst [-1]))
+ // result: (ADD a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a _ (MOVDconst [0]))
+ // result: a
+ for {
+ a := v_0
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [1]))
+ // result: (SUB a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (SUBshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && c>=3
+ // result: (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && c>=7
+ // result: (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [-1]) x)
+ // result: (ADD a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ break
+ }
+ x := v_2
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [0]) _)
+ // result: a
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [1]) x)
+ // result: (SUB a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ x := v_2
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c)
+ // result: (SUBshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c-1) && c>=3
+ // result: (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c+1) && c>=7
+ // result: (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB (MOVDconst [c]) x y)
+ // result: (ADDconst [c] (MNEG <x.Type> x y))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64MNEG, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) (MOVDconst [d]))
+ // result: (SUBconst [c*d] a)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_2.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(c * d)
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: int32(c)==-1
+ // result: (MOVWUreg (ADD <a.Type> a x))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a _ (MOVDconst [c]))
+ // cond: int32(c)==0
+ // result: (MOVWUreg a)
+ for {
+ a := v_0
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: int32(c)==1
+ // result: (MOVWUreg (SUB <a.Type> a x))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && int32(c)>=3
+ // result: (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c - 1))
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && int32(c)>=7
+ // result: (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c + 1))
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 3))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 5))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 7))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 9))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: int32(c)==-1
+ // result: (MOVWUreg (ADD <a.Type> a x))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) _)
+ // cond: int32(c)==0
+ // result: (MOVWUreg a)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v.AddArg(a)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: int32(c)==1
+ // result: (MOVWUreg (SUB <a.Type> a x))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c)
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v0.AddArg2(a, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c-1) && int32(c)>=3
+ // result: (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUB, a.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c - 1))
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c+1) && int32(c)>=7
+ // result: (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, a.Type)
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(log64(c + 1))
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 3))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 5))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 7))
+ v1 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, a.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 9))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW (MOVDconst [c]) x y)
+ // result: (MOVWUreg (ADDconst <x.Type> [c] (MNEGW <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARM64MNEGW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVWUreg (SUBconst <a.Type> [c*d] a))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_2.AuxInt)
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBconst, a.Type)
+ v0.AuxInt = int64ToAuxInt(c * d)
+ v0.AddArg(a)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MUL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MUL (NEG x) y)
+ // result: (MNEG x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64NEG {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64MNEG)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [-1]))
+ // result: (NEG x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL _ (MOVDconst [0]))
+ // result: (MOVDconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (SLLconst [log64(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && c >= 3
+ // result: (ADDshiftLL x x [log64(c-1)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c - 1))
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && c >= 7
+ // result: (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MULW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULW (NEG x) y)
+ // result: (MNEGW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64NEG {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64MNEGW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: int32(c)==-1
+ // result: (MOVWUreg (NEG <x.Type> x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == -1) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MULW _ (MOVDconst [c]))
+ // cond: int32(c)==0
+ // result: (MOVDconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 0) {
+ continue
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: int32(c)==1
+ // result: (MOVWUreg x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 1) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (MOVWUreg (SLLconst <x.Type> [log64(c)] x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && int32(c) >= 3
+ // result: (MOVWUreg (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && int32(c) >= 7
+ // result: (MOVWUreg (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (ADDshiftLL <x.Type> x x [1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 3))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(1)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (MOVWUreg (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 5))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 7))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v2 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v2.AddArg(x)
+ v1.AddArg2(v2, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (MOVWUreg (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64MOVWUreg)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 9))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MULW (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [int64(uint32(c*d))])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c * d)))
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVN(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVN (XOR x y))
+ // result: (EON x y)
+ for {
+ if v_0.Op != OpARM64XOR {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64EON)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MVN (MOVDconst [c]))
+ // result: (MOVDconst [^c])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^c)
+ return true
+ }
+ // match: (MVN x:(SLLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftLL [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (MVN x:(SRLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftRL [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (MVN x:(SRAconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftRA [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (MVN x:(RORconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftRO [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64RORconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftLL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftLL (MOVDconst [c]) [d])
+ // result: (MOVDconst [^int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftRA(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftRA (MOVDconst [c]) [d])
+ // result: (MOVDconst [^(c>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftRL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftRL (MOVDconst [c]) [d])
+ // result: (MOVDconst [^int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftRO(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftRO (MOVDconst [c]) [d])
+ // result: (MOVDconst [^rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^rotateRight64(c, d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEG (MUL x y))
+ // result: (MNEG x y)
+ for {
+ if v_0.Op != OpARM64MUL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64MNEG)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (NEG (MULW x y))
+ // cond: v.Type.Size() <= 4
+ // result: (MNEGW x y)
+ for {
+ if v_0.Op != OpARM64MULW {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(v.Type.Size() <= 4) {
+ break
+ }
+ v.reset(OpARM64MNEGW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (NEG (NEG x))
+ // result: x
+ for {
+ if v_0.Op != OpARM64NEG {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (NEG (MOVDconst [c]))
+ // result: (MOVDconst [-c])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ return true
+ }
+ // match: (NEG x:(SLLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (NEGshiftLL [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64NEGshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (NEG x:(SRLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (NEGshiftRL [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64NEGshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (NEG x:(SRAconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (NEGshiftRA [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64NEGshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEGshiftLL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGshiftLL (MOVDconst [c]) [d])
+ // result: (MOVDconst [-int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-int64(uint64(c) << uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEGshiftRA(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGshiftRA (MOVDconst [c]) [d])
+ // result: (MOVDconst [-(c>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-(c >> uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEGshiftRL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGshiftRL (MOVDconst [c]) [d])
+ // result: (MOVDconst [-int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-int64(uint64(c) >> uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NotEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NotEqual (CMPconst [0] z:(AND x y)))
+ // cond: z.Uses == 1
+ // result: (NotEqual (TST x y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (NotEqual (CMPWconst [0] x:(ANDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (NotEqual (TSTWconst [int32(c)] y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (NotEqual (CMPWconst [0] z:(AND x y)))
+ // cond: z.Uses == 1
+ // result: (NotEqual (TSTW x y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (NotEqual (CMPconst [0] x:(ANDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (NotEqual (TSTconst [c] y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (NotEqual (CMP x z:(NEG y)))
+ // cond: z.Uses == 1
+ // result: (NotEqual (CMN x y))
+ for {
+ if v_0.Op != OpARM64CMP {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (NotEqual (CMPW x z:(NEG y)))
+ // cond: z.Uses == 1
+ // result: (NotEqual (CMNW x y))
+ for {
+ if v_0.Op != OpARM64CMPW {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (NotEqual (CMPconst [0] x:(ADDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (NotEqual (CMNconst [c] y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (NotEqual (CMPWconst [0] x:(ADDconst [c] y)))
+ // cond: x.Uses == 1
+ // result: (NotEqual (CMNWconst [int32(c)] y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (NotEqual (CMPconst [0] z:(ADD x y)))
+ // cond: z.Uses == 1
+ // result: (NotEqual (CMN x y))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (NotEqual (CMPWconst [0] z:(ADD x y)))
+ // cond: z.Uses == 1
+ // result: (NotEqual (CMNW x y))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ y := z.Args[1]
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (NotEqual (CMPconst [0] z:(MADD a x y)))
+ // cond: z.Uses == 1
+ // result: (NotEqual (CMN a (MUL <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (NotEqual (CMPconst [0] z:(MSUB a x y)))
+ // cond: z.Uses == 1
+ // result: (NotEqual (CMP a (MUL <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (NotEqual (CMPWconst [0] z:(MADDW a x y)))
+ // cond: z.Uses == 1
+ // result: (NotEqual (CMNW a (MULW <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (NotEqual (CMPWconst [0] z:(MSUBW a x y)))
+ // cond: z.Uses == 1
+ // result: (NotEqual (CMPW a (MULW <x.Type> x y)))
+ for {
+ if v_0.Op != OpARM64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (NotEqual (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.ne())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.ne()))
+ return true
+ }
+ // match: (NotEqual (InvertFlags x))
+ // result: (NotEqual x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64NotEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64OR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (OR x (MOVDconst [c]))
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (OR x (MVN y))
+ // result: (ORN x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MVN {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64ORN)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ORshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ORshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ORshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORshiftRO x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ORshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y))
+ // cond: ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
+ // result: (BFI [bfc] y x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64UBFIZ {
+ continue
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ continue
+ }
+ ac := auxIntToInt64(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(ac == ^((1<<uint(bfc.getARM64BFwidth()) - 1) << uint(bfc.getARM64BFlsb()))) {
+ continue
+ }
+ v.reset(OpARM64BFI)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg2(y, x)
+ return true
+ }
+ break
+ }
+ // match: (OR (UBFX [bfc] x) (ANDconst [ac] y))
+ // cond: ac == ^(1<<uint(bfc.getARM64BFwidth())-1)
+ // result: (BFXIL [bfc] y x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64UBFX {
+ continue
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ continue
+ }
+ ac := auxIntToInt64(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(ac == ^(1<<uint(bfc.getARM64BFwidth()) - 1)) {
+ continue
+ }
+ v.reset(OpARM64BFXIL)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg2(y, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORN(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORN x (MOVDconst [c]))
+ // result: (ORconst [^c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(^c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORN x x)
+ // result: (MOVDconst [-1])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORN x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORNshiftLL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64ORNshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (ORN x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORNshiftRL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64ORNshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (ORN x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORNshiftRA x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64ORNshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (ORN x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORNshiftRO x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64ORNshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORNshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORNshiftLL x (MOVDconst [c]) [d])
+ // result: (ORconst x [^int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORNshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORNshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORNshiftRA x (MOVDconst [c]) [d])
+ // result: (ORconst x [^(c>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORNshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORNshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORNshiftRL x (MOVDconst [c]) [d])
+ // result: (ORconst x [^int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORNshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORNshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORNshiftRO x (MOVDconst [c]) [d])
+ // result: (ORconst x [^rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(^rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORNshiftRO (RORconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // result: (MOVDconst [-1])
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c|d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // result: (ORconst [c|d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORconst [c1] (ANDconst [c2] x))
+ // cond: c2|c1 == ^0
+ // result: (ORconst [c1] x)
+ for {
+ c1 := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c2|c1 == ^0) {
+ break
+ }
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c1)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORshiftLL (MOVDconst [c]) x [d])
+ // result: (ORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftLL x (MOVDconst [c]) [d])
+ // result: (ORconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL y:(SLLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SLLconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
+ // result: (REV16W x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64REV16W)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff
+ // result: (REV16W x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 24) {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16W)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff)
+ // result: (REV16 x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff)
+ // result: (REV16 (ANDconst <x.Type> [0xffffffff] x))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16)
+ v0 := b.NewValue0(v.Pos, OpARM64ANDconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(0xffffffff)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: ( ORshiftLL [c] (SRLconst x [64-c]) x2)
+ // result: (EXTRconst [64-c] x2 x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ x2 := v_1
+ v.reset(OpARM64EXTRconst)
+ v.AuxInt = int64ToAuxInt(64 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ // match: ( ORshiftLL <t> [c] (UBFX [bfc] x) x2)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ // result: (EXTRWconst [32-c] x2 x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ x2 := v_1
+ if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
+ break
+ }
+ v.reset(OpARM64EXTRWconst)
+ v.AuxInt = int64ToAuxInt(32 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ // match: (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y))
+ // cond: sc == bfc.getARM64BFwidth()
+ // result: (BFXIL [bfc] y x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != sc {
+ break
+ }
+ y := v_1.Args[0]
+ if !(sc == bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64BFXIL)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRA (MOVDconst [c]) x [d])
+ // result: (ORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRA x (MOVDconst [c]) [d])
+ // result: (ORconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRA y:(SRAconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SRAconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRL (MOVDconst [c]) x [d])
+ // result: (ORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRL x (MOVDconst [c]) [d])
+ // result: (ORconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRL y:(SRLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SRLconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y))
+ // cond: lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
+ // result: (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ ac := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(lc > rc && ac == ^((1<<uint(64-lc)-1)<<uint64(lc-rc))) {
+ break
+ }
+ v.reset(OpARM64BFI)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x))
+ // cond: lc < rc && ac == ^((1<<uint(64-rc)-1))
+ // result: (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ ac := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if v_1.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_1.AuxInt)
+ x := v_1.Args[0]
+ if !(lc < rc && ac == ^(1<<uint(64-rc)-1)) {
+ break
+ }
+ v.reset(OpARM64BFXIL)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRO (MOVDconst [c]) x [d])
+ // result: (ORconst [c] (RORconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRO x (MOVDconst [c]) [d])
+ // result: (ORconst x [rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRO y:(RORconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64RORconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64REV(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (REV (REV p))
+ // result: p
+ for {
+ if v_0.Op != OpARM64REV {
+ break
+ }
+ p := v_0.Args[0]
+ v.copyOf(p)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64REVW(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (REVW (REVW p))
+ // result: p
+ for {
+ if v_0.Op != OpARM64REVW {
+ break
+ }
+ p := v_0.Args[0]
+ v.copyOf(p)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ROR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROR x (MOVDconst [c]))
+ // result: (RORconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64RORW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RORW x (MOVDconst [c]))
+ // result: (RORWconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SBCSflags(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> bo)))))
+ // result: (SBCSflags x y bo)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpARM64NEGSflags {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpARM64NEG || v_2_0_0.Type != typ.UInt64 {
+ break
+ }
+ v_2_0_0_0 := v_2_0_0.Args[0]
+ if v_2_0_0_0.Op != OpARM64NGCzerocarry || v_2_0_0_0.Type != typ.UInt64 {
+ break
+ }
+ bo := v_2_0_0_0.Args[0]
+ v.reset(OpARM64SBCSflags)
+ v.AddArg3(x, y, bo)
+ return true
+ }
+ // match: (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (MOVDconst [0]))))
+ // result: (SUBSflags x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpARM64NEGSflags {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARM64SUBSflags)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLL x (MOVDconst [c]))
+ // result: (SLLconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLL x (ANDconst [63] y))
+ // result: (SLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64ANDconst || auxIntToInt64(v_1.AuxInt) != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLLconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [d<<uint64(c)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(d << uint64(c))
+ return true
+ }
+ // match: (SLLconst [c] (SRLconst [c] x))
+ // cond: 0 < c && c < 64
+ // result: (ANDconst [^(1<<uint(c)-1)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if !(0 < c && c < 64) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^(1<<uint(c) - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [lc] (MOVWreg x))
+ // result: (SBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
+ for {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(32, 64-lc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [lc] (MOVHreg x))
+ // result: (SBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
+ for {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(16, 64-lc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [lc] (MOVBreg x))
+ // result: (SBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
+ for {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(8, 64-lc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [lc] (MOVWUreg x))
+ // result: (UBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
+ for {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(32, 64-lc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [lc] (MOVHUreg x))
+ // result: (UBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
+ for {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(16, 64-lc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [lc] (MOVBUreg x))
+ // result: (UBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
+ for {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(8, 64-lc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [sc] (ANDconst [ac] x))
+ // cond: isARM64BFMask(sc, ac, 0)
+ // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ ac := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, ac, 0)) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [sc] (UBFIZ [bfc] x))
+ // cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRA x (MOVDconst [c]))
+ // result: (SRAconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRA x (ANDconst [63] y))
+ // result: (SRA x y)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64ANDconst || auxIntToInt64(v_1.AuxInt) != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64SRA)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SRAconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(d >> uint64(c))
+ return true
+ }
+ // match: (SRAconst [rc] (SLLconst [lc] x))
+ // cond: lc > rc
+ // result: (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc > rc) {
+ break
+ }
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [rc] (SLLconst [lc] x))
+ // cond: lc <= rc
+ // result: (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc <= rc) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [rc] (MOVWreg x))
+ // cond: rc < 32
+ // result: (SBFX [armBFAuxInt(rc, 32-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(rc < 32) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [rc] (MOVHreg x))
+ // cond: rc < 16
+ // result: (SBFX [armBFAuxInt(rc, 16-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(rc < 16) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [rc] (MOVBreg x))
+ // cond: rc < 8
+ // result: (SBFX [armBFAuxInt(rc, 8-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(rc < 8) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [sc] (SBFIZ [bfc] x))
+ // cond: sc < bfc.getARM64BFlsb()
+ // result: (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc < bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [sc] (SBFIZ [bfc] x))
+ // cond: sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ // result: (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRL x (MOVDconst [c]))
+ // result: (SRLconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRL x (ANDconst [63] y))
+ // result: (SRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64ANDconst || auxIntToInt64(v_1.AuxInt) != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64SRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRLconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(uint64(d)>>uint64(c))])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c)))
+ return true
+ }
+ // match: (SRLconst [c] (SLLconst [c] x))
+ // cond: 0 < c && c < 64
+ // result: (ANDconst [1<<uint(64-c)-1] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if !(0 < c && c < 64) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(1<<uint(64-c) - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [rc] (MOVWUreg x))
+ // cond: rc >= 32
+ // result: (MOVDconst [0])
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ if !(rc >= 32) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLconst [rc] (MOVHUreg x))
+ // cond: rc >= 16
+ // result: (MOVDconst [0])
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg {
+ break
+ }
+ if !(rc >= 16) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLconst [rc] (MOVBUreg x))
+ // cond: rc >= 8
+ // result: (MOVDconst [0])
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg {
+ break
+ }
+ if !(rc >= 8) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLconst [rc] (SLLconst [lc] x))
+ // cond: lc > rc
+ // result: (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc > rc) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [rc] (SLLconst [lc] x))
+ // cond: lc < rc
+ // result: (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < rc) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [rc] (MOVWUreg x))
+ // cond: rc < 32
+ // result: (UBFX [armBFAuxInt(rc, 32-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(rc < 32) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [rc] (MOVHUreg x))
+ // cond: rc < 16
+ // result: (UBFX [armBFAuxInt(rc, 16-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(rc < 16) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [rc] (MOVBUreg x))
+ // cond: rc < 8
+ // result: (UBFX [armBFAuxInt(rc, 8-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(rc < 8) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (ANDconst [ac] x))
+ // cond: isARM64BFMask(sc, ac, sc)
+ // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ ac := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, ac, sc)) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (UBFX [bfc] x))
+ // cond: sc < bfc.getARM64BFwidth()
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc < bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (UBFIZ [bfc] x))
+ // cond: sc == bfc.getARM64BFlsb()
+ // result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc == bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (UBFIZ [bfc] x))
+ // cond: sc < bfc.getARM64BFlsb()
+ // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc < bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (UBFIZ [bfc] x))
+ // cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ // result: (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64STP(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (STP [off1+int32(off2)] {sym} ptr val1 val2 mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val1 := v_1
+ val2 := v_2
+ mem := v_3
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg4(ptr, val1, val2, mem)
+ return true
+ }
+ // match: (STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val1 := v_1
+ val2 := v_2
+ mem := v_3
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg4(ptr, val1, val2, mem)
+ return true
+ }
+ // match: (STP [off] {sym} ptr (MOVDconst [0]) (MOVDconst [0]) mem)
+ // result: (MOVQstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 || v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUB x (MOVDconst [c]))
+ // result: (SUBconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB a l:(MUL x y))
+ // cond: l.Uses==1 && clobber(l)
+ // result: (MSUB a x y)
+ for {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MUL {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ v.reset(OpARM64MSUB)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUB a l:(MNEG x y))
+ // cond: l.Uses==1 && clobber(l)
+ // result: (MADD a x y)
+ for {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MNEG {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ v.reset(OpARM64MADD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUB a l:(MULW x y))
+ // cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l)
+ // result: (MSUBW a x y)
+ for {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MULW {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) {
+ break
+ }
+ v.reset(OpARM64MSUBW)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUB a l:(MNEGW x y))
+ // cond: v.Type.Size() <= 4 && l.Uses==1 && clobber(l)
+ // result: (MADDW a x y)
+ for {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MNEGW {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(v.Type.Size() <= 4 && l.Uses == 1 && clobber(l)) {
+ break
+ }
+ v.reset(OpARM64MADDW)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUB x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SUB x (SUB y z))
+ // result: (SUB (ADD <v.Type> x z) y)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64SUB {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, v.Type)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (SUB (SUB x y) z)
+ // result: (SUB x (ADD <y.Type> y z))
+ for {
+ if v_0.Op != OpARM64SUB {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_1
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, y.Type)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SUB x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (SUBshiftLL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (SUB x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (SUBshiftRL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (SUB x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (SUBshiftRA x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [d-c])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(d - c)
+ return true
+ }
+ // match: (SUBconst [c] (SUBconst [d] x))
+ // result: (ADDconst [-c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SUBconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (ADDconst [d] x))
+ // result: (ADDconst [-c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(-c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUBshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBshiftLL x (MOVDconst [c]) [d])
+ // result: (SUBconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUBshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBshiftRA x (MOVDconst [c]) [d])
+ // result: (SUBconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUBshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBshiftRL x (MOVDconst [c]) [d])
+ // result: (SUBconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TST(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (TST x (MOVDconst [c]))
+ // result: (TSTconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TST x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64TSTshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64TSTshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64TSTshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRO x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64TSTshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (TSTW x (MOVDconst [c]))
+ // result: (TSTWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TSTWconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [logicFlags32(int32(x)&y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(logicFlags32(int32(x) & y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TSTconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [logicFlags64(x&y)])
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(logicFlags64(x & y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftLL (MOVDconst [c]) x [d])
+ // result: (TSTconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftLL x (MOVDconst [c]) [d])
+ // result: (TSTconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRA (MOVDconst [c]) x [d])
+ // result: (TSTconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRA x (MOVDconst [c]) [d])
+ // result: (TSTconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRL (MOVDconst [c]) x [d])
+ // result: (TSTconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRL x (MOVDconst [c]) [d])
+ // result: (TSTconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRO (MOVDconst [c]) x [d])
+ // result: (TSTconst [c] (RORconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRO x (MOVDconst [c]) [d])
+ // result: (TSTconst x [rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UBFIZ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (UBFIZ [bfc] (SLLconst [sc] x))
+ // cond: sc < bfc.getARM64BFwidth()
+ // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc < bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (UBFX [bfc] (ANDconst [c] x))
+ // cond: isARM64BFMask(0, c, 0) && bfc.getARM64BFlsb() + bfc.getARM64BFwidth() <= arm64BFWidth(c, 0)
+ // result: (UBFX [bfc] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(0, c, 0) && bfc.getARM64BFlsb()+bfc.getARM64BFwidth() <= arm64BFWidth(c, 0)) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg(x)
+ return true
+ }
+ // match: (UBFX [bfc] (SRLconst [sc] x))
+ // cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()))
+ v.AddArg(x)
+ return true
+ }
+ // match: (UBFX [bfc] (SLLconst [sc] x))
+ // cond: sc == bfc.getARM64BFlsb()
+ // result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc == bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (UBFX [bfc] (SLLconst [sc] x))
+ // cond: sc < bfc.getARM64BFlsb()
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc < bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
+ v.AddArg(x)
+ return true
+ }
+ // match: (UBFX [bfc] (SLLconst [sc] x))
+ // cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ // result: (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UDIV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (UDIV x (MOVDconst [1]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (UDIV x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (SRLconst [log64(c)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (UDIV (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(uint64(c)/uint64(d))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UDIVW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (UDIVW x (MOVDconst [c]))
+ // cond: uint32(c)==1
+ // result: (MOVWUreg x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64MOVWUreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (UDIVW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c) && is32Bit(c)
+ // result: (SRLconst [log64(c)] (MOVWUreg <v.Type> x))
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUreg, v.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (UDIVW (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(uint32(c)/uint32(d))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c) / uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UMOD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (UMOD <typ.UInt64> x y)
+ // result: (MSUB <typ.UInt64> x y (UDIV <typ.UInt64> x y))
+ for {
+ if v.Type != typ.UInt64 {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MSUB)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpARM64UDIV, typ.UInt64)
+ v0.AddArg2(x, y)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (UMOD _ (MOVDconst [1]))
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (UMOD x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (ANDconst [c-1] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (UMOD (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(uint64(c)%uint64(d))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UMODW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (UMODW <typ.UInt32> x y)
+ // result: (MSUBW <typ.UInt32> x y (UDIVW <typ.UInt32> x y))
+ for {
+ if v.Type != typ.UInt32 {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MSUBW)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpARM64UDIVW, typ.UInt32)
+ v0.AddArg2(x, y)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (UMODW _ (MOVDconst [c]))
+ // cond: uint32(c)==1
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (UMODW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c) && is32Bit(c)
+ // result: (ANDconst [c-1] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (UMODW (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(uint32(c)%uint32(d))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c) % uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR x (MOVDconst [c]))
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XOR x (MVN y))
+ // result: (EON x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MVN {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64EON)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (XORshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64XORshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (XORshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64XORshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (XORshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64XORshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (XORshiftRO x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64XORshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [-1] x)
+ // result: (MVN x)
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(OpARM64MVN)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c^d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // result: (XORconst [c^d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64XORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XORshiftLL (MOVDconst [c]) x [d])
+ // result: (XORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLL x (MOVDconst [c]) [d])
+ // result: (XORconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
+ // result: (REV16W x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64REV16W)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff
+ // result: (REV16W x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 24) {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16W)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff)
+ // result: (REV16 x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff)
+ // result: (REV16 (ANDconst <x.Type> [0xffffffff] x))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16)
+ v0 := b.NewValue0(v.Pos, OpARM64ANDconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(0xffffffff)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLL [c] (SRLconst x [64-c]) x2)
+ // result: (EXTRconst [64-c] x2 x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ x2 := v_1
+ v.reset(OpARM64EXTRconst)
+ v.AuxInt = int64ToAuxInt(64 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ // match: (XORshiftLL <t> [c] (UBFX [bfc] x) x2)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ // result: (EXTRWconst [32-c] x2 x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ x2 := v_1
+ if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
+ break
+ }
+ v.reset(OpARM64EXTRWconst)
+ v.AuxInt = int64ToAuxInt(32 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRA (MOVDconst [c]) x [d])
+ // result: (XORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRA x (MOVDconst [c]) [d])
+ // result: (XORconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRL (MOVDconst [c]) x [d])
+ // result: (XORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRL x (MOVDconst [c]) [d])
+ // result: (XORconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRO (MOVDconst [c]) x [d])
+ // result: (XORconst [c] (RORconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRO x (MOVDconst [c]) [d])
+ // result: (XORconst x [rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRO (RORconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVDaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpARM64MOVDaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicAnd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd32 ptr val mem)
+ // result: (Select1 (LoweredAtomicAnd32 ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd32, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicAnd32Variant(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd32Variant ptr val mem)
+ // result: (Select1 (LoweredAtomicAnd32Variant ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd32Variant, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicAnd8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd8 ptr val mem)
+ // result: (Select1 (LoweredAtomicAnd8 ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd8, types.NewTuple(typ.UInt8, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicAnd8Variant(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd8Variant ptr val mem)
+ // result: (Select1 (LoweredAtomicAnd8Variant ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd8Variant, types.NewTuple(typ.UInt8, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicOr32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr32 ptr val mem)
+ // result: (Select1 (LoweredAtomicOr32 ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr32, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicOr32Variant(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr32Variant ptr val mem)
+ // result: (Select1 (LoweredAtomicOr32Variant ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr32Variant, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicOr8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr8 ptr val mem)
+ // result: (Select1 (LoweredAtomicOr8 ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr8, types.NewTuple(typ.UInt8, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicOr8Variant(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr8Variant ptr val mem)
+ // result: (Select1 (LoweredAtomicOr8Variant ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr8Variant, types.NewTuple(typ.UInt8, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAvg64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, t)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpARM64SUB, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueARM64_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen32 x)
+ // result: (SUB (MOVDconst [32]) (CLZW <typ.Int> x))
+ for {
+ x := v_0
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpARM64CLZW, typ.Int)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 x)
+ // result: (SUB (MOVDconst [64]) (CLZ <typ.Int> x))
+ for {
+ x := v_0
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpARM64CLZ, typ.Int)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpBitRev16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitRev16 x)
+ // result: (SRLconst [48] (RBIT <typ.UInt64> x))
+ for {
+ x := v_0
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = int64ToAuxInt(48)
+ v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpBitRev8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitRev8 x)
+ // result: (SRLconst [56] (RBIT <typ.UInt64> x))
+ for {
+ x := v_0
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = int64ToAuxInt(56)
+ v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpCondSelect(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CondSelect x y boolval)
+ // cond: flagArg(boolval) != nil
+ // result: (CSEL [boolval.Op] x y flagArg(boolval))
+ for {
+ x := v_0
+ y := v_1
+ boolval := v_2
+ if !(flagArg(boolval) != nil) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(boolval.Op)
+ v.AddArg3(x, y, flagArg(boolval))
+ return true
+ }
+ // match: (CondSelect x y boolval)
+ // cond: flagArg(boolval) == nil
+ // result: (CSEL [OpARM64NotEqual] x y (TSTWconst [1] boolval))
+ for {
+ x := v_0
+ y := v_1
+ boolval := v_2
+ if !(flagArg(boolval) == nil) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg(boolval)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConst32F(v *Value) bool {
+ // match: (Const32F [val])
+ // result: (FMOVSconst [float64(val)])
+ for {
+ val := auxIntToFloat32(v.AuxInt)
+ v.reset(OpARM64FMOVSconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConst64(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt64(v.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConst64F(v *Value) bool {
+ // match: (Const64F [val])
+ // result: (FMOVDconst [float64(val)])
+ for {
+ val := auxIntToFloat64(v.AuxInt)
+ v.reset(OpARM64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [t])
+ // result: (MOVDconst [b2i(t)])
+ for {
+ t := auxIntToBool(v.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(t))
+ return true
+ }
+}
+func rewriteValueARM64_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueARM64_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 <t> x)
+ // result: (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64CLZW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64RBITW, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpARM64ORconst, typ.UInt32)
+ v1.AuxInt = int64ToAuxInt(0x10000)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Ctz32 <t> x)
+ // result: (CLZW (RBITW <t> x))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64CLZW)
+ v0 := b.NewValue0(v.Pos, OpARM64RBITW, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Ctz64 <t> x)
+ // result: (CLZ (RBIT <t> x))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64CLZ)
+ v0 := b.NewValue0(v.Pos, OpARM64RBIT, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz8 <t> x)
+ // result: (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64CLZW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64RBITW, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpARM64ORconst, typ.UInt32)
+ v1.AuxInt = int64ToAuxInt(0x100)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 [false] x y)
+ // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpARM64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64UDIVW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div32 [false] x y)
+ // result: (DIVW x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpARM64DIVW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 [false] x y)
+ // result: (DIV x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpARM64DIV)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64UDIVW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32 x y)
+ // result: (Equal (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (Equal (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64 x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (Equal (FCMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (XOR (MOVDconst [1]) (XOR <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64XOR)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpARM64XOR, typ.Bool)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpFMA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMA x y z)
+ // result: (FMADDD z x y)
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ v.reset(OpARM64FMADDD)
+ v.AddArg3(z, x, y)
+ return true
+ }
+}
+func rewriteValueARM64_OpHmul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (SRAconst (MULL <typ.Int64> x y) [32])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARM64MULL, typ.Int64)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpHmul32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (SRAconst (UMULL <typ.UInt64> x y) [32])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARM64UMULL, typ.UInt64)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsInBounds idx len)
+ // result: (LessThanU (CMP idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsNonNil ptr)
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v_0
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(0)
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsSliceInBounds idx len)
+ // result: (LessEqualU (CMP idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x zero:(MOVDconst [0]))
+ // result: (Eq16 x zero)
+ for {
+ x := v_0
+ zero := v_1
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpEq16)
+ v.AddArg2(x, zero)
+ return true
+ }
+ // match: (Leq16U (MOVDconst [1]) x)
+ // result: (Neq16 (MOVDconst [0]) x)
+ for {
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq16)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Leq16U x y)
+ // result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32 x y)
+ // result: (LessEqual (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (LessEqualF (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualF)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x zero:(MOVDconst [0]))
+ // result: (Eq32 x zero)
+ for {
+ x := v_0
+ zero := v_1
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpEq32)
+ v.AddArg2(x, zero)
+ return true
+ }
+ // match: (Leq32U (MOVDconst [1]) x)
+ // result: (Neq32 (MOVDconst [0]) x)
+ for {
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq32)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Leq32U x y)
+ // result: (LessEqualU (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64 x y)
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (LessEqualF (FCMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualF)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x zero:(MOVDconst [0]))
+ // result: (Eq64 x zero)
+ for {
+ x := v_0
+ zero := v_1
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpEq64)
+ v.AddArg2(x, zero)
+ return true
+ }
+ // match: (Leq64U (MOVDconst [1]) x)
+ // result: (Neq64 (MOVDconst [0]) x)
+ for {
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Leq64U x y)
+ // result: (LessEqualU (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x zero:(MOVDconst [0]))
+ // result: (Eq8 x zero)
+ for {
+ x := v_0
+ zero := v_1
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpEq8)
+ v.AddArg2(x, zero)
+ return true
+ }
+ // match: (Leq8U (MOVDconst [1]) x)
+ // result: (Neq8 (MOVDconst [0]) x)
+ for {
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Leq8U x y)
+ // result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U zero:(MOVDconst [0]) x)
+ // result: (Neq16 zero x)
+ for {
+ zero := v_0
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq16)
+ v.AddArg2(zero, x)
+ return true
+ }
+ // match: (Less16U x (MOVDconst [1]))
+ // result: (Eq16 x (MOVDconst [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpEq16)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Less16U x y)
+ // result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 x y)
+ // result: (LessThan (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (LessThanF (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanF)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U zero:(MOVDconst [0]) x)
+ // result: (Neq32 zero x)
+ for {
+ zero := v_0
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq32)
+ v.AddArg2(zero, x)
+ return true
+ }
+ // match: (Less32U x (MOVDconst [1]))
+ // result: (Eq32 x (MOVDconst [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Less32U x y)
+ // result: (LessThanU (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64 x y)
+ // result: (LessThan (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (LessThanF (FCMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanF)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64U zero:(MOVDconst [0]) x)
+ // result: (Neq64 zero x)
+ for {
+ zero := v_0
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq64)
+ v.AddArg2(zero, x)
+ return true
+ }
+ // match: (Less64U x (MOVDconst [1]))
+ // result: (Eq64 x (MOVDconst [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpEq64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Less64U x y)
+ // result: (LessThanU (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U zero:(MOVDconst [0]) x)
+ // result: (Neq8 zero x)
+ for {
+ zero := v_0
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq8)
+ v.AddArg2(zero, x)
+ return true
+ }
+ // match: (Less8U x (MOVDconst [1]))
+ // result: (Eq8 x (MOVDconst [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpEq8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Less8U x y)
+ // result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && t.IsSigned())
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !t.IsSigned())
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && t.IsSigned())
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !t.IsSigned())
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && t.IsSigned())
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && !t.IsSigned())
+ // result: (MOVWUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLocalAddr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (LocalAddr <t> {sym} base mem)
+ // cond: t.Elem().HasPointers()
+ // result: (MOVDaddr {sym} (SPanchored base mem))
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ mem := v_1
+ if !(t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpARM64MOVDaddr)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
+ v0.AddArg2(base, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LocalAddr <t> {sym} base _)
+ // cond: !t.Elem().HasPointers()
+ // result: (MOVDaddr {sym} base)
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ if !(!t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpARM64MOVDaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x64 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x64 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x64 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x64 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (MODW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MODW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64UMODW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod32 x y)
+ // result: (MODW x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MODW)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod64 x y)
+ // result: (MOD x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MOD)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (MODW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MODW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64UMODW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVHstore dst (MOVHUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVWstore dst (MOVWUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (MOVBstore [4] dst (MOVBUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (MOVWstore [3] dst (MOVWUload [3] src mem) (MOVWstore dst (MOVWUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (MOVDstore dst (MOVDload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [9] dst src mem)
+ // result: (MOVBstore [8] dst (MOVBUload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 9 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [10] dst src mem)
+ // result: (MOVHstore [8] dst (MOVHUload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 10 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [11] dst src mem)
+ // result: (MOVDstore [3] dst (MOVDload [3] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 11 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [12] dst src mem)
+ // result: (MOVWstore [8] dst (MOVWUload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [13] dst src mem)
+ // result: (MOVDstore [5] dst (MOVDload [5] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 13 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(5)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(5)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [14] dst src mem)
+ // result: (MOVDstore [6] dst (MOVDload [6] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 14 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [15] dst src mem)
+ // result: (MOVDstore [7] dst (MOVDload [7] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 15 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(7)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(7)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [16] dst src mem)
+ // result: (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64STP)
+ v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1.AddArg2(src, mem)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v2.AddArg(v1)
+ v.AddArg4(dst, v0, v2, mem)
+ return true
+ }
+ // match: (Move [32] dst src mem)
+ // result: (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem)) (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1.AuxInt = int32ToAuxInt(16)
+ v1.AddArg2(src, mem)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v2.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v5 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64))
+ v5.AddArg2(src, mem)
+ v4.AddArg(v5)
+ v6 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v6.AddArg(v5)
+ v3.AddArg4(dst, v4, v6, mem)
+ v.AddArg4(dst, v0, v2, v3)
+ return true
+ }
+ // match: (Move [48] dst src mem)
+ // result: (STP [32] dst (Select0 <typ.UInt64> (LDP [32] src mem)) (Select1 <typ.UInt64> (LDP [32] src mem)) (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem)) (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 48 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1.AuxInt = int32ToAuxInt(32)
+ v1.AddArg2(src, mem)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v2.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(16)
+ v4 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v5 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64))
+ v5.AuxInt = int32ToAuxInt(16)
+ v5.AddArg2(src, mem)
+ v4.AddArg(v5)
+ v6 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v6.AddArg(v5)
+ v7 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v8 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v9 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64))
+ v9.AddArg2(src, mem)
+ v8.AddArg(v9)
+ v10 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v10.AddArg(v9)
+ v7.AddArg4(dst, v8, v10, mem)
+ v3.AddArg4(dst, v4, v6, v7)
+ v.AddArg4(dst, v0, v2, v3)
+ return true
+ }
+ // match: (Move [64] dst src mem)
+ // result: (STP [48] dst (Select0 <typ.UInt64> (LDP [48] src mem)) (Select1 <typ.UInt64> (LDP [48] src mem)) (STP [32] dst (Select0 <typ.UInt64> (LDP [32] src mem)) (Select1 <typ.UInt64> (LDP [32] src mem)) (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem)) (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 64 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(48)
+ v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1.AuxInt = int32ToAuxInt(48)
+ v1.AddArg2(src, mem)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v2.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(32)
+ v4 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v5 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64))
+ v5.AuxInt = int32ToAuxInt(32)
+ v5.AddArg2(src, mem)
+ v4.AddArg(v5)
+ v6 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v6.AddArg(v5)
+ v7 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v7.AuxInt = int32ToAuxInt(16)
+ v8 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v9 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64))
+ v9.AuxInt = int32ToAuxInt(16)
+ v9.AddArg2(src, mem)
+ v8.AddArg(v9)
+ v10 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v10.AddArg(v9)
+ v11 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v12 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v13 := b.NewValue0(v.Pos, OpARM64LDP, types.NewTuple(typ.UInt64, typ.UInt64))
+ v13.AddArg2(src, mem)
+ v12.AddArg(v13)
+ v14 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v14.AddArg(v13)
+ v11.AddArg4(dst, v12, v14, mem)
+ v7.AddArg4(dst, v8, v10, v11)
+ v3.AddArg4(dst, v4, v6, v7)
+ v.AddArg4(dst, v0, v2, v3)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s%16 != 0 && s%16 <= 8 && s > 16
+ // result: (Move [8] (OffPtr <dst.Type> dst [s-8]) (OffPtr <src.Type> src [s-8]) (Move [s-s%16] dst src mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%16 != 0 && s%16 <= 8 && s > 16) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s - 8)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s - 8)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(s - s%16)
+ v2.AddArg3(dst, src, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s%16 != 0 && s%16 > 8 && s > 16
+ // result: (Move [16] (OffPtr <dst.Type> dst [s-16]) (OffPtr <src.Type> src [s-16]) (Move [s-s%16] dst src mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%16 != 0 && s%16 > 8 && s > 16) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s - 16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s - 16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(s - s%16)
+ v2.AddArg3(dst, src, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [8 * (64 - s/16)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpARM64DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(8 * (64 - s/16))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s%16 == 0 && (s > 16*64 || config.noDuffDevice) && logLargeCopy(v, s)
+ // result: (LoweredMove dst src (ADDconst <src.Type> src [s-16]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%16 == 0 && (s > 16*64 || config.noDuffDevice) && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpARM64LoweredMove)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDconst, src.Type)
+ v0.AuxInt = int64ToAuxInt(s - 16)
+ v0.AddArg(src)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32 x y)
+ // result: (NotEqual (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (NotEqual (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64 x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (NotEqual (FCMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Not x)
+ // result: (XOR (MOVDconst [1]) x)
+ for {
+ x := v_0
+ v.reset(OpARM64XOR)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueARM64_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr [off] ptr:(SP))
+ // cond: is32Bit(off)
+ // result: (MOVDaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP || !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpARM64MOVDaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADDconst [off] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(off)
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueARM64_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpARM64LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpARM64LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpARM64LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpPopCount16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount16 <t> x)
+ // result: (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt16to64 x)))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64FMOVDfpgp)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64)
+ v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpPopCount32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount32 <t> x)
+ // result: (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt32to64 x)))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64FMOVDfpgp)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64)
+ v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpPopCount64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount64 <t> x)
+ // result: (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> x))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64FMOVDfpgp)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64)
+ v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64)
+ v2.AddArg(x)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpPrefetchCache(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PrefetchCache addr mem)
+ // result: (PRFM [0] addr mem)
+ for {
+ addr := v_0
+ mem := v_1
+ v.reset(OpARM64PRFM)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(addr, mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpPrefetchCacheStreamed(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PrefetchCacheStreamed addr mem)
+ // result: (PRFM [1] addr mem)
+ for {
+ addr := v_0
+ mem := v_1
+ v.reset(OpARM64PRFM)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(addr, mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpPubBarrier(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (PubBarrier mem)
+ // result: (DMB [0xe] mem)
+ for {
+ mem := v_0
+ v.reset(OpARM64DMB)
+ v.AuxInt = int64ToAuxInt(0xe)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVDconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (RotateLeft16 <t> x y)
+ // result: (RORW <t> (ORshiftLL <typ.UInt32> (ZeroExt16to32 x) (ZeroExt16to32 x) [16]) (NEG <typ.Int64> y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64RORW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64ORshiftLL, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(16)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, v1)
+ v2 := b.NewValue0(v.Pos, OpARM64NEG, typ.Int64)
+ v2.AddArg(y)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RotateLeft32 x y)
+ // result: (RORW x (NEG <y.Type> y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64RORW)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RotateLeft64 x y)
+ // result: (ROR x (NEG <y.Type> y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64ROR)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVDconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (RotateLeft8 <t> x y)
+ // result: (OR <t> (SLL <t> x (ANDconst <typ.Int64> [7] y)) (SRL <t> (ZeroExt8to64 x) (ANDconst <typ.Int64> [7] (NEG <typ.Int64> y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64OR)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpARM64ANDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(7)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpARM64ANDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(7)
+ v5 := b.NewValue0(v.Pos, OpARM64NEG, typ.Int64)
+ v5.AddArg(y)
+ v4.AddArg(v5)
+ v2.AddArg2(v3, v4)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL <t> (ZeroExt16to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL <t> (ZeroExt16to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL <t> (ZeroExt16to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL <t> (ZeroExt16to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA <t> (SignExt16to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x16 x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA <t> (SignExt16to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x32 x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA <t> (SignExt16to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+ for {
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA <t> (SignExt16to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x8 x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL <t> (ZeroExt32to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh32Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL <t> (ZeroExt32to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh32Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL <t> (ZeroExt32to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh32Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL <t> (ZeroExt32to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh32Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA <t> (SignExt32to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh32x16 x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA <t> (SignExt32to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh32x32 x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA <t> (SignExt32to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh32x64 x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+ for {
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA <t> (SignExt32to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh32x8 x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux64 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x16 x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v0.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v1 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x32 x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v0.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v1 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x64 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+ for {
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v0.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v1 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x8 x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v0.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v1 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL <t> (ZeroExt8to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL <t> (ZeroExt8to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL <t> (ZeroExt8to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL <t> (ZeroExt8to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA <t> (SignExt8to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x16 x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA <t> (SignExt8to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x32 x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA <t> (SignExt8to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+ for {
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA <t> (SignExt8to64 x) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x8 x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Mul64uhilo x y))
+ // result: (UMULH x y)
+ for {
+ if v_0.Op != OpMul64uhilo {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64UMULH)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Select0 (Add64carry x y c))
+ // result: (Select0 <typ.UInt64> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpARM64ADCSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARM64ADDSconstflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2.AuxInt = int64ToAuxInt(-1)
+ v2.AddArg(c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Sub64borrow x y bo))
+ // result: (Select0 <typ.UInt64> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ bo := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpARM64SBCSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARM64NEGSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2.AddArg(bo)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Mul64uover x y))
+ // result: (MUL x y)
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64MUL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Mul64uhilo x y))
+ // result: (MUL x y)
+ for {
+ if v_0.Op != OpMul64uhilo {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64MUL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Select1 (Add64carry x y c))
+ // result: (ADCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c)))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpARM64ADCzerocarry)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64ADCSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64ADDSconstflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v3.AuxInt = int64ToAuxInt(-1)
+ v3.AddArg(c)
+ v2.AddArg(v3)
+ v1.AddArg3(x, y, v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Sub64borrow x y bo))
+ // result: (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ bo := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpARM64NEG)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpARM64NGCzerocarry, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARM64SBCSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64NEGSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v4.AddArg(bo)
+ v3.AddArg(v4)
+ v2.AddArg3(x, y, v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Mul64uover x y))
+ // result: (NotEqual (CMPconst (UMULH <typ.UInt64> x y) [0]))
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64UMULH, typ.UInt64)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpSelectN(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore _ src s3:(MOVDstore {t} _ dst mem)))))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1, s2, s3, call)
+ // result: (Move [sz] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpARM64CALLstatic || len(call.Args) != 1 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ s1 := call.Args[0]
+ if s1.Op != OpARM64MOVDstore {
+ break
+ }
+ _ = s1.Args[2]
+ s1_1 := s1.Args[1]
+ if s1_1.Op != OpARM64MOVDconst {
+ break
+ }
+ sz := auxIntToInt64(s1_1.AuxInt)
+ s2 := s1.Args[2]
+ if s2.Op != OpARM64MOVDstore {
+ break
+ }
+ _ = s2.Args[2]
+ src := s2.Args[1]
+ s3 := s2.Args[2]
+ if s3.Op != OpARM64MOVDstore {
+ break
+ }
+ mem := s3.Args[2]
+ dst := s3.Args[1]
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1, s2, s3, call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(sz)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)
+ // result: (Move [sz] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpARM64CALLstatic || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpARM64MOVDconst {
+ break
+ }
+ sz := auxIntToInt64(call_2.AuxInt)
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(sz)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRAconst (NEG <t> x) [63])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !t.IsFloat()
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !t.IsFloat()) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && !t.IsFloat()
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && !t.IsFloat()) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && t.IsFloat()
+ // result: (FMOVSstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && t.IsFloat()
+ // result: (FMOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVHstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVWstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [3] ptr mem)
+ // result: (MOVBstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [5] ptr mem)
+ // result: (MOVBstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [6] ptr mem)
+ // result: (MOVHstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [7] ptr mem)
+ // result: (MOVWstore [3] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] ptr mem)
+ // result: (MOVDstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVDstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [9] ptr mem)
+ // result: (MOVBstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 9 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [10] ptr mem)
+ // result: (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 10 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [11] ptr mem)
+ // result: (MOVDstore [3] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 11 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [12] ptr mem)
+ // result: (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [13] ptr mem)
+ // result: (MOVDstore [5] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 13 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(5)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [14] ptr mem)
+ // result: (MOVDstore [6] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 14 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [15] ptr mem)
+ // result: (MOVDstore [7] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 15 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(7)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [16] ptr mem)
+ // result: (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg4(ptr, v0, v0, mem)
+ return true
+ }
+ // match: (Zero [32] ptr mem)
+ // result: (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg4(ptr, v0, v0, mem)
+ v.AddArg4(ptr, v0, v0, v1)
+ return true
+ }
+ // match: (Zero [48] ptr mem)
+ // result: (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 48 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(16)
+ v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg4(ptr, v0, v0, mem)
+ v1.AddArg4(ptr, v0, v0, v2)
+ v.AddArg4(ptr, v0, v0, v1)
+ return true
+ }
+ // match: (Zero [64] ptr mem)
+ // result: (STP [48] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 64 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(48)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(32)
+ v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(16)
+ v3 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg4(ptr, v0, v0, mem)
+ v2.AddArg4(ptr, v0, v0, v3)
+ v1.AddArg4(ptr, v0, v0, v2)
+ v.AddArg4(ptr, v0, v0, v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: s%16 != 0 && s%16 <= 8 && s > 16
+ // result: (Zero [8] (OffPtr <ptr.Type> ptr [s-8]) (Zero [s-s%16] ptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(s%16 != 0 && s%16 <= 8 && s > 16) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type)
+ v0.AuxInt = int64ToAuxInt(s - 8)
+ v0.AddArg(ptr)
+ v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(s - s%16)
+ v1.AddArg2(ptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: s%16 != 0 && s%16 > 8 && s > 16
+ // result: (Zero [16] (OffPtr <ptr.Type> ptr [s-16]) (Zero [s-s%16] ptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(s%16 != 0 && s%16 > 8 && s > 16) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type)
+ v0.AuxInt = int64ToAuxInt(s - 16)
+ v0.AddArg(ptr)
+ v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(s - s%16)
+ v1.AddArg2(ptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice
+ // result: (DUFFZERO [4 * (64 - s/16)] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpARM64DUFFZERO)
+ v.AuxInt = int64ToAuxInt(4 * (64 - s/16))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: s%16 == 0 && (s > 16*64 || config.noDuffDevice)
+ // result: (LoweredZero ptr (ADDconst <ptr.Type> [s-16] ptr) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(s%16 == 0 && (s > 16*64 || config.noDuffDevice)) {
+ break
+ }
+ v.reset(OpARM64LoweredZero)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDconst, ptr.Type)
+ v0.AuxInt = int64ToAuxInt(s - 16)
+ v0.AddArg(ptr)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockARM64(b *Block) bool {
+ typ := &b.Func.Config.Types
+ switch b.Kind {
+ case BlockARM64EQ:
+ // match: (EQ (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (EQ (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (EQ (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (EQ (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (EQ (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMP x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPW x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPW {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] x) yes no)
+ // result: (Z x yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64Z, x)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] x) yes no)
+ // result: (ZW x yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64ZW, x)
+ return true
+ }
+ // match: (EQ (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (EQ (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (EQ (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (EQ (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (EQ (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (TSTconst [c] x) yes no)
+ // cond: oneBit(c)
+ // result: (TBZ [int64(ntz64(c))] x yes no)
+ for b.Controls[0].Op == OpARM64TSTconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(c)) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(c)))
+ return true
+ }
+ // match: (EQ (TSTWconst [c] x) yes no)
+ // cond: oneBit(int64(uint32(c)))
+ // result: (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
+ for b.Controls[0].Op == OpARM64TSTWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(int64(uint32(c)))) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c)))))
+ return true
+ }
+ // match: (EQ (FlagConstant [fc]) yes no)
+ // cond: fc.eq()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.eq()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (FlagConstant [fc]) yes no)
+ // cond: !fc.eq()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.eq()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (InvertFlags cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64EQ, cmp)
+ return true
+ }
+ case BlockARM64FGE:
+ // match: (FGE (InvertFlags cmp) yes no)
+ // result: (FLE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLE, cmp)
+ return true
+ }
+ case BlockARM64FGT:
+ // match: (FGT (InvertFlags cmp) yes no)
+ // result: (FLT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLT, cmp)
+ return true
+ }
+ case BlockARM64FLE:
+ // match: (FLE (InvertFlags cmp) yes no)
+ // result: (FGE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGE, cmp)
+ return true
+ }
+ case BlockARM64FLT:
+ // match: (FLT (InvertFlags cmp) yes no)
+ // result: (FGT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGT, cmp)
+ return true
+ }
+ case BlockARM64GE:
+ // match: (GE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GE, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GE (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GE, v0)
+ return true
+ }
+ // match: (GE (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GE, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GE (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GE, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GEnoov (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GEnoov (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GEnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GEnoov (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GEnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GEnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GEnoov (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GEnoov (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPWconst [0] x) yes no)
+ // result: (TBZ [31] x yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(31)
+ return true
+ }
+ // match: (GE (CMPconst [0] x) yes no)
+ // result: (TBZ [63] x yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(63)
+ return true
+ }
+ // match: (GE (FlagConstant [fc]) yes no)
+ // cond: fc.ge()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagConstant [fc]) yes no)
+ // cond: !fc.ge()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (InvertFlags cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64LE, cmp)
+ return true
+ }
+ case BlockARM64GEnoov:
+ // match: (GEnoov (FlagConstant [fc]) yes no)
+ // cond: fc.geNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.geNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GEnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.geNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.geNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GEnoov (InvertFlags cmp) yes no)
+ // result: (LEnoov cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64LEnoov, cmp)
+ return true
+ }
+ case BlockARM64GT:
+ // match: (GT (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GT, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GT (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GT, v0)
+ return true
+ }
+ // match: (GT (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GT, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GT (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GT, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GTnoov (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GTnoov (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GTnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GTnoov (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GTnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GTnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GTnoov (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GTnoov (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (FlagConstant [fc]) yes no)
+ // cond: fc.gt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.gt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GT (FlagConstant [fc]) yes no)
+ // cond: !fc.gt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.gt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (InvertFlags cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64LT, cmp)
+ return true
+ }
+ case BlockARM64GTnoov:
+ // match: (GTnoov (FlagConstant [fc]) yes no)
+ // cond: fc.gtNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.gtNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GTnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.gtNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.gtNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GTnoov (InvertFlags cmp) yes no)
+ // result: (LTnoov cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64LTnoov, cmp)
+ return true
+ }
+ case BlockIf:
+ // match: (If (Equal cc) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpARM64Equal {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64EQ, cc)
+ return true
+ }
+ // match: (If (NotEqual cc) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpARM64NotEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64NE, cc)
+ return true
+ }
+ // match: (If (LessThan cc) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64LT, cc)
+ return true
+ }
+ // match: (If (LessThanU cc) yes no)
+ // result: (ULT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULT, cc)
+ return true
+ }
+ // match: (If (LessEqual cc) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64LE, cc)
+ return true
+ }
+ // match: (If (LessEqualU cc) yes no)
+ // result: (ULE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULE, cc)
+ return true
+ }
+ // match: (If (GreaterThan cc) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64GT, cc)
+ return true
+ }
+ // match: (If (GreaterThanU cc) yes no)
+ // result: (UGT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGT, cc)
+ return true
+ }
+ // match: (If (GreaterEqual cc) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64GE, cc)
+ return true
+ }
+ // match: (If (GreaterEqualU cc) yes no)
+ // result: (UGE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGE, cc)
+ return true
+ }
+ // match: (If (LessThanF cc) yes no)
+ // result: (FLT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThanF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLT, cc)
+ return true
+ }
+ // match: (If (LessEqualF cc) yes no)
+ // result: (FLE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqualF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLE, cc)
+ return true
+ }
+ // match: (If (GreaterThanF cc) yes no)
+ // result: (FGT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThanF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGT, cc)
+ return true
+ }
+ // match: (If (GreaterEqualF cc) yes no)
+ // result: (FGE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqualF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGE, cc)
+ return true
+ }
+ // match: (If cond yes no)
+ // result: (TBNZ [0] cond yes no)
+ for {
+ cond := b.Controls[0]
+ b.resetWithControl(BlockARM64TBNZ, cond)
+ b.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ case BlockJumpTable:
+ // match: (JumpTable idx)
+ // result: (JUMPTABLE {makeJumpTableSym(b)} idx (MOVDaddr <typ.Uintptr> {makeJumpTableSym(b)} (SB)))
+ for {
+ idx := b.Controls[0]
+ v0 := b.NewValue0(b.Pos, OpARM64MOVDaddr, typ.Uintptr)
+ v0.Aux = symToAux(makeJumpTableSym(b))
+ v1 := b.NewValue0(b.Pos, OpSB, typ.Uintptr)
+ v0.AddArg(v1)
+ b.resetWithControl2(BlockARM64JUMPTABLE, idx, v0)
+ b.Aux = symToAux(makeJumpTableSym(b))
+ return true
+ }
+ case BlockARM64LE:
+ // match: (LE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LE, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LE (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LE, v0)
+ return true
+ }
+ // match: (LE (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LE, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LE (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LE, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LEnoov (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LEnoov (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LEnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LEnoov (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LEnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LEnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LEnoov (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LEnoov (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (FlagConstant [fc]) yes no)
+ // cond: fc.le()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.le()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagConstant [fc]) yes no)
+ // cond: !fc.le()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.le()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LE (InvertFlags cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64GE, cmp)
+ return true
+ }
+ case BlockARM64LEnoov:
+ // match: (LEnoov (FlagConstant [fc]) yes no)
+ // cond: fc.leNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.leNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LEnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.leNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.leNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LEnoov (InvertFlags cmp) yes no)
+ // result: (GEnoov cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64GEnoov, cmp)
+ return true
+ }
+ case BlockARM64LT:
+ // match: (LT (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LT, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LT (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LT, v0)
+ return true
+ }
+ // match: (LT (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LT, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LT (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LT, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LTnoov (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LTnoov (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LTnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LTnoov (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LTnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LTnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LTnoov (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LTnoov (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPWconst [0] x) yes no)
+ // result: (TBNZ [31] x yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(31)
+ return true
+ }
+ // match: (LT (CMPconst [0] x) yes no)
+ // result: (TBNZ [63] x yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(63)
+ return true
+ }
+ // match: (LT (FlagConstant [fc]) yes no)
+ // cond: fc.lt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.lt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagConstant [fc]) yes no)
+ // cond: !fc.lt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.lt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (InvertFlags cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64GT, cmp)
+ return true
+ }
+ case BlockARM64LTnoov:
+ // match: (LTnoov (FlagConstant [fc]) yes no)
+ // cond: fc.ltNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ltNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LTnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.ltNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ltNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LTnoov (InvertFlags cmp) yes no)
+ // result: (GTnoov cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64GTnoov, cmp)
+ return true
+ }
+ case BlockARM64NE:
+ // match: (NE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (NE (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (NE (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (NE (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (NE (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMP x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPW x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPW {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] x) yes no)
+ // result: (NZ x yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64NZ, x)
+ return true
+ }
+ // match: (NE (CMPWconst [0] x) yes no)
+ // result: (NZW x yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64NZW, x)
+ return true
+ }
+ // match: (NE (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (NE (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (NE (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (NE (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (NE (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (TSTconst [c] x) yes no)
+ // cond: oneBit(c)
+ // result: (TBNZ [int64(ntz64(c))] x yes no)
+ for b.Controls[0].Op == OpARM64TSTconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(c)) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(c)))
+ return true
+ }
+ // match: (NE (TSTWconst [c] x) yes no)
+ // cond: oneBit(int64(uint32(c)))
+ // result: (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
+ for b.Controls[0].Op == OpARM64TSTWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(int64(uint32(c)))) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c)))))
+ return true
+ }
+ // match: (NE (FlagConstant [fc]) yes no)
+ // cond: fc.ne()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ne()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagConstant [fc]) yes no)
+ // cond: !fc.ne()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ne()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64NE, cmp)
+ return true
+ }
+ case BlockARM64NZ:
+ // match: (NZ (Equal cc) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpARM64Equal {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64EQ, cc)
+ return true
+ }
+ // match: (NZ (NotEqual cc) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpARM64NotEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64NE, cc)
+ return true
+ }
+ // match: (NZ (LessThan cc) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64LT, cc)
+ return true
+ }
+ // match: (NZ (LessThanU cc) yes no)
+ // result: (ULT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULT, cc)
+ return true
+ }
+ // match: (NZ (LessEqual cc) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64LE, cc)
+ return true
+ }
+ // match: (NZ (LessEqualU cc) yes no)
+ // result: (ULE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULE, cc)
+ return true
+ }
+ // match: (NZ (GreaterThan cc) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64GT, cc)
+ return true
+ }
+ // match: (NZ (GreaterThanU cc) yes no)
+ // result: (UGT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGT, cc)
+ return true
+ }
+ // match: (NZ (GreaterEqual cc) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64GE, cc)
+ return true
+ }
+ // match: (NZ (GreaterEqualU cc) yes no)
+ // result: (UGE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGE, cc)
+ return true
+ }
+ // match: (NZ (LessThanF cc) yes no)
+ // result: (FLT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThanF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLT, cc)
+ return true
+ }
+ // match: (NZ (LessEqualF cc) yes no)
+ // result: (FLE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqualF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLE, cc)
+ return true
+ }
+ // match: (NZ (GreaterThanF cc) yes no)
+ // result: (FGT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThanF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGT, cc)
+ return true
+ }
+ // match: (NZ (GreaterEqualF cc) yes no)
+ // result: (FGE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqualF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGE, cc)
+ return true
+ }
+ // match: (NZ (ANDconst [c] x) yes no)
+ // cond: oneBit(c)
+ // result: (TBNZ [int64(ntz64(c))] x yes no)
+ for b.Controls[0].Op == OpARM64ANDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(c)) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(c)))
+ return true
+ }
+ // match: (NZ (MOVDconst [0]) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NZ (MOVDconst [c]) yes no)
+ // cond: c != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockARM64NZW:
+ // match: (NZW (ANDconst [c] x) yes no)
+ // cond: oneBit(int64(uint32(c)))
+ // result: (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
+ for b.Controls[0].Op == OpARM64ANDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(int64(uint32(c)))) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c)))))
+ return true
+ }
+ // match: (NZW (MOVDconst [c]) yes no)
+ // cond: int32(c) == 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NZW (MOVDconst [c]) yes no)
+ // cond: int32(c) != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(int32(c) != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockARM64TBNZ:
+ // match: (TBNZ [0] (Equal cc) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpARM64Equal {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64EQ, cc)
+ return true
+ }
+ // match: (TBNZ [0] (NotEqual cc) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpARM64NotEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64NE, cc)
+ return true
+ }
+ // match: (TBNZ [0] (LessThan cc) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64LT, cc)
+ return true
+ }
+ // match: (TBNZ [0] (LessThanU cc) yes no)
+ // result: (ULT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64ULT, cc)
+ return true
+ }
+ // match: (TBNZ [0] (LessEqual cc) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64LE, cc)
+ return true
+ }
+ // match: (TBNZ [0] (LessEqualU cc) yes no)
+ // result: (ULE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64ULE, cc)
+ return true
+ }
+ // match: (TBNZ [0] (GreaterThan cc) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64GT, cc)
+ return true
+ }
+ // match: (TBNZ [0] (GreaterThanU cc) yes no)
+ // result: (UGT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64UGT, cc)
+ return true
+ }
+ // match: (TBNZ [0] (GreaterEqual cc) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64GE, cc)
+ return true
+ }
+ // match: (TBNZ [0] (GreaterEqualU cc) yes no)
+ // result: (UGE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64UGE, cc)
+ return true
+ }
+ // match: (TBNZ [0] (LessThanF cc) yes no)
+ // result: (FLT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThanF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64FLT, cc)
+ return true
+ }
+ // match: (TBNZ [0] (LessEqualF cc) yes no)
+ // result: (FLE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqualF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64FLE, cc)
+ return true
+ }
+ // match: (TBNZ [0] (GreaterThanF cc) yes no)
+ // result: (FGT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThanF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64FGT, cc)
+ return true
+ }
+ // match: (TBNZ [0] (GreaterEqualF cc) yes no)
+ // result: (FGE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqualF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64FGE, cc)
+ return true
+ }
+ case BlockARM64UGE:
+ // match: (UGE (FlagConstant [fc]) yes no)
+ // cond: fc.uge()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.uge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagConstant [fc]) yes no)
+ // cond: !fc.uge()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.uge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (InvertFlags cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULE, cmp)
+ return true
+ }
+ case BlockARM64UGT:
+ // match: (UGT (FlagConstant [fc]) yes no)
+ // cond: fc.ugt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ugt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGT (FlagConstant [fc]) yes no)
+ // cond: !fc.ugt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ugt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (InvertFlags cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULT, cmp)
+ return true
+ }
+ case BlockARM64ULE:
+ // match: (ULE (FlagConstant [fc]) yes no)
+ // cond: fc.ule()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ule()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagConstant [fc]) yes no)
+ // cond: !fc.ule()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ule()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULE (InvertFlags cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGE, cmp)
+ return true
+ }
+ case BlockARM64ULT:
+ // match: (ULT (FlagConstant [fc]) yes no)
+ // cond: fc.ult()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ult()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagConstant [fc]) yes no)
+ // cond: !fc.ult()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ult()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (InvertFlags cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGT, cmp)
+ return true
+ }
+ case BlockARM64Z:
+ // match: (Z (ANDconst [c] x) yes no)
+ // cond: oneBit(c)
+ // result: (TBZ [int64(ntz64(c))] x yes no)
+ for b.Controls[0].Op == OpARM64ANDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(c)) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(c)))
+ return true
+ }
+ // match: (Z (MOVDconst [0]) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (Z (MOVDconst [c]) yes no)
+ // cond: c != 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockARM64ZW:
+ // match: (ZW (ANDconst [c] x) yes no)
+ // cond: oneBit(int64(uint32(c)))
+ // result: (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
+ for b.Controls[0].Op == OpARM64ANDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(int64(uint32(c)))) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c)))))
+ return true
+ }
+ // match: (ZW (MOVDconst [c]) yes no)
+ // cond: int32(c) == 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ZW (MOVDconst [c]) yes no)
+ // cond: int32(c) != 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(int32(c) != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64latelower.go b/src/cmd/compile/internal/ssa/rewriteARM64latelower.go
new file mode 100644
index 0000000..0998757
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteARM64latelower.go
@@ -0,0 +1,288 @@
+// Code generated from _gen/ARM64latelower.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+func rewriteValueARM64latelower(v *Value) bool {
+ switch v.Op {
+ case OpARM64ADDSconstflags:
+ return rewriteValueARM64latelower_OpARM64ADDSconstflags(v)
+ case OpARM64ADDconst:
+ return rewriteValueARM64latelower_OpARM64ADDconst(v)
+ case OpARM64ANDconst:
+ return rewriteValueARM64latelower_OpARM64ANDconst(v)
+ case OpARM64CMNWconst:
+ return rewriteValueARM64latelower_OpARM64CMNWconst(v)
+ case OpARM64CMNconst:
+ return rewriteValueARM64latelower_OpARM64CMNconst(v)
+ case OpARM64CMPWconst:
+ return rewriteValueARM64latelower_OpARM64CMPWconst(v)
+ case OpARM64CMPconst:
+ return rewriteValueARM64latelower_OpARM64CMPconst(v)
+ case OpARM64ORconst:
+ return rewriteValueARM64latelower_OpARM64ORconst(v)
+ case OpARM64SUBconst:
+ return rewriteValueARM64latelower_OpARM64SUBconst(v)
+ case OpARM64TSTWconst:
+ return rewriteValueARM64latelower_OpARM64TSTWconst(v)
+ case OpARM64TSTconst:
+ return rewriteValueARM64latelower_OpARM64TSTconst(v)
+ case OpARM64XORconst:
+ return rewriteValueARM64latelower_OpARM64XORconst(v)
+ }
+ return false
+}
+func rewriteValueARM64latelower_OpARM64ADDSconstflags(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDSconstflags [c] x)
+ // cond: !isARM64addcon(c)
+ // result: (ADDSflags x (MOVDconst [c]))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if !(!isARM64addcon(c)) {
+ break
+ }
+ v.reset(OpARM64ADDSflags)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64latelower_OpARM64ADDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDconst [c] x)
+ // cond: !isARM64addcon(c)
+ // result: (ADD x (MOVDconst [c]))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if !(!isARM64addcon(c)) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64latelower_OpARM64ANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ANDconst [c] x)
+ // cond: !isARM64bitcon(uint64(c))
+ // result: (AND x (MOVDconst [c]))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if !(!isARM64bitcon(uint64(c))) {
+ break
+ }
+ v.reset(OpARM64AND)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64latelower_OpARM64CMNWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMNWconst [c] x)
+ // cond: !isARM64addcon(int64(c))
+ // result: (CMNW x (MOVDconst [int64(c)]))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(!isARM64addcon(int64(c))) {
+ break
+ }
+ v.reset(OpARM64CMNW)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(int64(c))
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64latelower_OpARM64CMNconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMNconst [c] x)
+ // cond: !isARM64addcon(c)
+ // result: (CMN x (MOVDconst [c]))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if !(!isARM64addcon(c)) {
+ break
+ }
+ v.reset(OpARM64CMN)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64latelower_OpARM64CMPWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWconst [c] x)
+ // cond: !isARM64addcon(int64(c))
+ // result: (CMPW x (MOVDconst [int64(c)]))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(!isARM64addcon(int64(c))) {
+ break
+ }
+ v.reset(OpARM64CMPW)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(int64(c))
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64latelower_OpARM64CMPconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPconst [c] x)
+ // cond: !isARM64addcon(c)
+ // result: (CMP x (MOVDconst [c]))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if !(!isARM64addcon(c)) {
+ break
+ }
+ v.reset(OpARM64CMP)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64latelower_OpARM64ORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORconst [c] x)
+ // cond: !isARM64bitcon(uint64(c))
+ // result: (OR x (MOVDconst [c]))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if !(!isARM64bitcon(uint64(c))) {
+ break
+ }
+ v.reset(OpARM64OR)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64latelower_OpARM64SUBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SUBconst [c] x)
+ // cond: !isARM64addcon(c)
+ // result: (SUB x (MOVDconst [c]))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if !(!isARM64addcon(c)) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64latelower_OpARM64TSTWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (TSTWconst [c] x)
+ // cond: !isARM64bitcon(uint64(c)|uint64(c)<<32)
+ // result: (TSTW x (MOVDconst [int64(c)]))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(!isARM64bitcon(uint64(c) | uint64(c)<<32)) {
+ break
+ }
+ v.reset(OpARM64TSTW)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(int64(c))
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64latelower_OpARM64TSTconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (TSTconst [c] x)
+ // cond: !isARM64bitcon(uint64(c))
+ // result: (TST x (MOVDconst [c]))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if !(!isARM64bitcon(uint64(c))) {
+ break
+ }
+ v.reset(OpARM64TST)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64latelower_OpARM64XORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XORconst [c] x)
+ // cond: !isARM64bitcon(uint64(c))
+ // result: (XOR x (MOVDconst [c]))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if !(!isARM64bitcon(uint64(c))) {
+ break
+ }
+ v.reset(OpARM64XOR)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteBlockARM64latelower(b *Block) bool {
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteCond_test.go b/src/cmd/compile/internal/ssa/rewriteCond_test.go
new file mode 100644
index 0000000..eb5c1de
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteCond_test.go
@@ -0,0 +1,635 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "math"
+ "math/rand"
+ "testing"
+)
+
+var (
+ x64 int64 = math.MaxInt64 - 2
+ x64b int64 = math.MaxInt64 - 2
+ x64c int64 = math.MaxInt64 - 2
+ y64 int64 = math.MinInt64 + 1
+ x32 int32 = math.MaxInt32 - 2
+ x32b int32 = math.MaxInt32 - 2
+ x32c int32 = math.MaxInt32 - 2
+ y32 int32 = math.MinInt32 + 1
+ one64 int64 = 1
+ one32 int32 = 1
+ v64 int64 = 11 // ensure it's not 2**n +/- 1
+ v64_n int64 = -11
+ v32 int32 = 11
+ v32_n int32 = -11
+ uv32 uint32 = 19
+ uz uint8 = 1 // for lowering to SLL/SRL/SRA
+)
+
+var crTests = []struct {
+ name string
+ tf func(t *testing.T)
+}{
+ {"AddConst64", testAddConst64},
+ {"AddConst32", testAddConst32},
+ {"AddVar64", testAddVar64},
+ {"AddVar64Cset", testAddVar64Cset},
+ {"AddVar32", testAddVar32},
+ {"MAddVar64", testMAddVar64},
+ {"MAddVar32", testMAddVar32},
+ {"MSubVar64", testMSubVar64},
+ {"MSubVar32", testMSubVar32},
+ {"AddShift32", testAddShift32},
+ {"SubShift32", testSubShift32},
+}
+
+var crBenches = []struct {
+ name string
+ bf func(b *testing.B)
+}{
+ {"SoloJump", benchSoloJump},
+ {"CombJump", benchCombJump},
+}
+
+// Test int32/int64's add/sub/madd/msub operations with boundary values to
+// ensure the optimization to 'comparing to zero' expressions of if-statements
+// yield expected results.
+// 32 rewriting rules are covered. At least two scenarios for "Canonicalize
+// the order of arguments to comparisons", which helps with CSE, are covered.
+// The tedious if-else structures are necessary to ensure all concerned rules
+// and machine code sequences are covered.
+// It's for arm64 initially, please see https://github.com/golang/go/issues/38740
+func TestCondRewrite(t *testing.T) {
+ for _, test := range crTests {
+ t.Run(test.name, test.tf)
+ }
+}
+
+// Profile the aforementioned optimization from two angles:
+//
+// SoloJump: generated branching code has one 'jump', for '<' and '>='
+// CombJump: generated branching code has two consecutive 'jump', for '<=' and '>'
+//
+// We expect that 'CombJump' is generally on par with the non-optimized code, and
+// 'SoloJump' demonstrates some improvement.
+// It's for arm64 initially, please see https://github.com/golang/go/issues/38740
+func BenchmarkCondRewrite(b *testing.B) {
+ for _, bench := range crBenches {
+ b.Run(bench.name, bench.bf)
+ }
+}
+
+// var +/- const
+func testAddConst64(t *testing.T) {
+ if x64+11 < 0 {
+ } else {
+ t.Errorf("'%#x + 11 < 0' failed", x64)
+ }
+
+ if x64+13 <= 0 {
+ } else {
+ t.Errorf("'%#x + 13 <= 0' failed", x64)
+ }
+
+ if y64-11 > 0 {
+ } else {
+ t.Errorf("'%#x - 11 > 0' failed", y64)
+ }
+
+ if y64-13 >= 0 {
+ } else {
+ t.Errorf("'%#x - 13 >= 0' failed", y64)
+ }
+
+ if x64+19 > 0 {
+ t.Errorf("'%#x + 19 > 0' failed", x64)
+ }
+
+ if x64+23 >= 0 {
+ t.Errorf("'%#x + 23 >= 0' failed", x64)
+ }
+
+ if y64-19 < 0 {
+ t.Errorf("'%#x - 19 < 0' failed", y64)
+ }
+
+ if y64-23 <= 0 {
+ t.Errorf("'%#x - 23 <= 0' failed", y64)
+ }
+}
+
+// 32-bit var +/- const
+func testAddConst32(t *testing.T) {
+ if x32+11 < 0 {
+ } else {
+ t.Errorf("'%#x + 11 < 0' failed", x32)
+ }
+
+ if x32+13 <= 0 {
+ } else {
+ t.Errorf("'%#x + 13 <= 0' failed", x32)
+ }
+
+ if y32-11 > 0 {
+ } else {
+ t.Errorf("'%#x - 11 > 0' failed", y32)
+ }
+
+ if y32-13 >= 0 {
+ } else {
+ t.Errorf("'%#x - 13 >= 0' failed", y32)
+ }
+
+ if x32+19 > 0 {
+ t.Errorf("'%#x + 19 > 0' failed", x32)
+ }
+
+ if x32+23 >= 0 {
+ t.Errorf("'%#x + 23 >= 0' failed", x32)
+ }
+
+ if y32-19 < 0 {
+ t.Errorf("'%#x - 19 < 0' failed", y32)
+ }
+
+ if y32-23 <= 0 {
+ t.Errorf("'%#x - 23 <= 0' failed", y32)
+ }
+}
+
+// var + var
+func testAddVar64(t *testing.T) {
+ if x64+v64 < 0 {
+ } else {
+ t.Errorf("'%#x + %#x < 0' failed", x64, v64)
+ }
+
+ if x64+v64 <= 0 {
+ } else {
+ t.Errorf("'%#x + %#x <= 0' failed", x64, v64)
+ }
+
+ if y64+v64_n > 0 {
+ } else {
+ t.Errorf("'%#x + %#x > 0' failed", y64, v64_n)
+ }
+
+ if y64+v64_n >= 0 {
+ } else {
+ t.Errorf("'%#x + %#x >= 0' failed", y64, v64_n)
+ }
+
+ if x64+v64 > 0 {
+ t.Errorf("'%#x + %#x > 0' failed", x64, v64)
+ }
+
+ if x64+v64 >= 0 {
+ t.Errorf("'%#x + %#x >= 0' failed", x64, v64)
+ }
+
+ if y64+v64_n < 0 {
+ t.Errorf("'%#x + %#x < 0' failed", y64, v64_n)
+ }
+
+ if y64+v64_n <= 0 {
+ t.Errorf("'%#x + %#x <= 0' failed", y64, v64_n)
+ }
+}
+
+// var + var, cset
+func testAddVar64Cset(t *testing.T) {
+ var a int
+ if x64+v64 < 0 {
+ a = 1
+ }
+ if a != 1 {
+ t.Errorf("'%#x + %#x < 0' failed", x64, v64)
+ }
+
+ a = 0
+ if y64+v64_n >= 0 {
+ a = 1
+ }
+ if a != 1 {
+ t.Errorf("'%#x + %#x >= 0' failed", y64, v64_n)
+ }
+
+ a = 1
+ if x64+v64 >= 0 {
+ a = 0
+ }
+ if a == 0 {
+ t.Errorf("'%#x + %#x >= 0' failed", x64, v64)
+ }
+
+ a = 1
+ if y64+v64_n < 0 {
+ a = 0
+ }
+ if a == 0 {
+ t.Errorf("'%#x + %#x < 0' failed", y64, v64_n)
+ }
+}
+
+// 32-bit var+var
+func testAddVar32(t *testing.T) {
+ if x32+v32 < 0 {
+ } else {
+ t.Errorf("'%#x + %#x < 0' failed", x32, v32)
+ }
+
+ if x32+v32 <= 0 {
+ } else {
+ t.Errorf("'%#x + %#x <= 0' failed", x32, v32)
+ }
+
+ if y32+v32_n > 0 {
+ } else {
+ t.Errorf("'%#x + %#x > 0' failed", y32, v32_n)
+ }
+
+ if y32+v32_n >= 0 {
+ } else {
+ t.Errorf("'%#x + %#x >= 0' failed", y32, v32_n)
+ }
+
+ if x32+v32 > 0 {
+ t.Errorf("'%#x + %#x > 0' failed", x32, v32)
+ }
+
+ if x32+v32 >= 0 {
+ t.Errorf("'%#x + %#x >= 0' failed", x32, v32)
+ }
+
+ if y32+v32_n < 0 {
+ t.Errorf("'%#x + %#x < 0' failed", y32, v32_n)
+ }
+
+ if y32+v32_n <= 0 {
+ t.Errorf("'%#x + %#x <= 0' failed", y32, v32_n)
+ }
+}
+
+// multiply-add
+func testMAddVar64(t *testing.T) {
+ if x64+v64*one64 < 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 < 0' failed", x64, v64)
+ }
+
+ if x64+v64*one64 <= 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 <= 0' failed", x64, v64)
+ }
+
+ if y64+v64_n*one64 > 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 > 0' failed", y64, v64_n)
+ }
+
+ if y64+v64_n*one64 >= 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 >= 0' failed", y64, v64_n)
+ }
+
+ if x64+v64*one64 > 0 {
+ t.Errorf("'%#x + %#x*1 > 0' failed", x64, v64)
+ }
+
+ if x64+v64*one64 >= 0 {
+ t.Errorf("'%#x + %#x*1 >= 0' failed", x64, v64)
+ }
+
+ if y64+v64_n*one64 < 0 {
+ t.Errorf("'%#x + %#x*1 < 0' failed", y64, v64_n)
+ }
+
+ if y64+v64_n*one64 <= 0 {
+ t.Errorf("'%#x + %#x*1 <= 0' failed", y64, v64_n)
+ }
+}
+
+// 32-bit multiply-add
+func testMAddVar32(t *testing.T) {
+ if x32+v32*one32 < 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 < 0' failed", x32, v32)
+ }
+
+ if x32+v32*one32 <= 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 <= 0' failed", x32, v32)
+ }
+
+ if y32+v32_n*one32 > 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 > 0' failed", y32, v32_n)
+ }
+
+ if y32+v32_n*one32 >= 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 >= 0' failed", y32, v32_n)
+ }
+
+ if x32+v32*one32 > 0 {
+ t.Errorf("'%#x + %#x*1 > 0' failed", x32, v32)
+ }
+
+ if x32+v32*one32 >= 0 {
+ t.Errorf("'%#x + %#x*1 >= 0' failed", x32, v32)
+ }
+
+ if y32+v32_n*one32 < 0 {
+ t.Errorf("'%#x + %#x*1 < 0' failed", y32, v32_n)
+ }
+
+ if y32+v32_n*one32 <= 0 {
+ t.Errorf("'%#x + %#x*1 <= 0' failed", y32, v32_n)
+ }
+}
+
+// multiply-sub
+func testMSubVar64(t *testing.T) {
+ if x64-v64_n*one64 < 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 < 0' failed", x64, v64_n)
+ }
+
+ if x64-v64_n*one64 <= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 <= 0' failed", x64, v64_n)
+ }
+
+ if y64-v64*one64 > 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 > 0' failed", y64, v64)
+ }
+
+ if y64-v64*one64 >= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", y64, v64)
+ }
+
+ if x64-v64_n*one64 > 0 {
+ t.Errorf("'%#x - %#x*1 > 0' failed", x64, v64_n)
+ }
+
+ if x64-v64_n*one64 >= 0 {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", x64, v64_n)
+ }
+
+ if y64-v64*one64 < 0 {
+ t.Errorf("'%#x - %#x*1 < 0' failed", y64, v64)
+ }
+
+ if y64-v64*one64 <= 0 {
+ t.Errorf("'%#x - %#x*1 <= 0' failed", y64, v64)
+ }
+
+ if x64-x64b*one64 < 0 {
+ t.Errorf("'%#x - %#x*1 < 0' failed", x64, x64b)
+ }
+
+ if x64-x64b*one64 >= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", x64, x64b)
+ }
+}
+
+// 32-bit multiply-sub
+func testMSubVar32(t *testing.T) {
+ if x32-v32_n*one32 < 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 < 0' failed", x32, v32_n)
+ }
+
+ if x32-v32_n*one32 <= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 <= 0' failed", x32, v32_n)
+ }
+
+ if y32-v32*one32 > 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 > 0' failed", y32, v32)
+ }
+
+ if y32-v32*one32 >= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", y32, v32)
+ }
+
+ if x32-v32_n*one32 > 0 {
+ t.Errorf("'%#x - %#x*1 > 0' failed", x32, v32_n)
+ }
+
+ if x32-v32_n*one32 >= 0 {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", x32, v32_n)
+ }
+
+ if y32-v32*one32 < 0 {
+ t.Errorf("'%#x - %#x*1 < 0' failed", y32, v32)
+ }
+
+ if y32-v32*one32 <= 0 {
+ t.Errorf("'%#x - %#x*1 <= 0' failed", y32, v32)
+ }
+
+ if x32-x32b*one32 < 0 {
+ t.Errorf("'%#x - %#x*1 < 0' failed", x32, x32b)
+ }
+
+ if x32-x32b*one32 >= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", x32, x32b)
+ }
+}
+
+// 32-bit ADDshift, pick up 1~2 scenarios randomly for each condition
+func testAddShift32(t *testing.T) {
+ if x32+v32<<1 < 0 {
+ } else {
+ t.Errorf("'%#x + %#x<<%#x < 0' failed", x32, v32, 1)
+ }
+
+ if x32+v32>>1 <= 0 {
+ } else {
+ t.Errorf("'%#x + %#x>>%#x <= 0' failed", x32, v32, 1)
+ }
+
+ if x32+int32(uv32>>1) > 0 {
+ t.Errorf("'%#x + int32(%#x>>%#x) > 0' failed", x32, uv32, 1)
+ }
+
+ if x32+v32<<uz >= 0 {
+ t.Errorf("'%#x + %#x<<%#x >= 0' failed", x32, v32, uz)
+ }
+
+ if x32+v32>>uz > 0 {
+ t.Errorf("'%#x + %#x>>%#x > 0' failed", x32, v32, uz)
+ }
+
+ if x32+int32(uv32>>uz) < 0 {
+ } else {
+ t.Errorf("'%#x + int32(%#x>>%#x) < 0' failed", x32, uv32, uz)
+ }
+}
+
+// 32-bit SUBshift, pick up 1~2 scenarios randomly for each condition
+func testSubShift32(t *testing.T) {
+ if y32-v32<<1 > 0 {
+ } else {
+ t.Errorf("'%#x - %#x<<%#x > 0' failed", y32, v32, 1)
+ }
+
+ if y32-v32>>1 < 0 {
+ t.Errorf("'%#x - %#x>>%#x < 0' failed", y32, v32, 1)
+ }
+
+ if y32-int32(uv32>>1) >= 0 {
+ } else {
+ t.Errorf("'%#x - int32(%#x>>%#x) >= 0' failed", y32, uv32, 1)
+ }
+
+ if y32-v32<<uz < 0 {
+ t.Errorf("'%#x - %#x<<%#x < 0' failed", y32, v32, uz)
+ }
+
+ if y32-v32>>uz >= 0 {
+ } else {
+ t.Errorf("'%#x - %#x>>%#x >= 0' failed", y32, v32, uz)
+ }
+
+ if y32-int32(uv32>>uz) <= 0 {
+ t.Errorf("'%#x - int32(%#x>>%#x) <= 0' failed", y32, uv32, uz)
+ }
+}
+
+var rnd = rand.New(rand.NewSource(0))
+var sink int64
+
+func benchSoloJump(b *testing.B) {
+ r1 := x64
+ r2 := x64b
+ r3 := x64c
+ r4 := y64
+ d := rnd.Int63n(10)
+
+ // 6 out 10 conditions evaluate to true
+ for i := 0; i < b.N; i++ {
+ if r1+r2 < 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+r3 >= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+r2*one64 < 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r2+r3*one64 >= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1-r2*v64 >= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r3-r4*v64 < 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+11 < 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+13 >= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r4-17 < 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r4-19 >= 0 {
+ d *= 2
+ d /= 2
+ }
+ }
+ sink = d
+}
+
+func benchCombJump(b *testing.B) {
+ r1 := x64
+ r2 := x64b
+ r3 := x64c
+ r4 := y64
+ d := rnd.Int63n(10)
+
+ // 6 out 10 conditions evaluate to true
+ for i := 0; i < b.N; i++ {
+ if r1+r2 <= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+r3 > 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+r2*one64 <= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r2+r3*one64 > 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1-r2*v64 > 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r3-r4*v64 <= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+11 <= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+13 > 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r4-17 <= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r4-19 > 0 {
+ d *= 2
+ d /= 2
+ }
+ }
+ sink = d
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go
new file mode 100644
index 0000000..edd3ffe
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go
@@ -0,0 +1,8037 @@
+// Code generated from _gen/LOONG64.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+func rewriteValueLOONG64(v *Value) bool {
+ switch v.Op {
+ case OpAdd16:
+ v.Op = OpLOONG64ADDV
+ return true
+ case OpAdd32:
+ v.Op = OpLOONG64ADDV
+ return true
+ case OpAdd32F:
+ v.Op = OpLOONG64ADDF
+ return true
+ case OpAdd64:
+ v.Op = OpLOONG64ADDV
+ return true
+ case OpAdd64F:
+ v.Op = OpLOONG64ADDD
+ return true
+ case OpAdd8:
+ v.Op = OpLOONG64ADDV
+ return true
+ case OpAddPtr:
+ v.Op = OpLOONG64ADDV
+ return true
+ case OpAddr:
+ return rewriteValueLOONG64_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpLOONG64AND
+ return true
+ case OpAnd32:
+ v.Op = OpLOONG64AND
+ return true
+ case OpAnd64:
+ v.Op = OpLOONG64AND
+ return true
+ case OpAnd8:
+ v.Op = OpLOONG64AND
+ return true
+ case OpAndB:
+ v.Op = OpLOONG64AND
+ return true
+ case OpAtomicAdd32:
+ v.Op = OpLOONG64LoweredAtomicAdd32
+ return true
+ case OpAtomicAdd64:
+ v.Op = OpLOONG64LoweredAtomicAdd64
+ return true
+ case OpAtomicCompareAndSwap32:
+ return rewriteValueLOONG64_OpAtomicCompareAndSwap32(v)
+ case OpAtomicCompareAndSwap64:
+ v.Op = OpLOONG64LoweredAtomicCas64
+ return true
+ case OpAtomicExchange32:
+ v.Op = OpLOONG64LoweredAtomicExchange32
+ return true
+ case OpAtomicExchange64:
+ v.Op = OpLOONG64LoweredAtomicExchange64
+ return true
+ case OpAtomicLoad32:
+ v.Op = OpLOONG64LoweredAtomicLoad32
+ return true
+ case OpAtomicLoad64:
+ v.Op = OpLOONG64LoweredAtomicLoad64
+ return true
+ case OpAtomicLoad8:
+ v.Op = OpLOONG64LoweredAtomicLoad8
+ return true
+ case OpAtomicLoadPtr:
+ v.Op = OpLOONG64LoweredAtomicLoad64
+ return true
+ case OpAtomicStore32:
+ v.Op = OpLOONG64LoweredAtomicStore32
+ return true
+ case OpAtomicStore64:
+ v.Op = OpLOONG64LoweredAtomicStore64
+ return true
+ case OpAtomicStore8:
+ v.Op = OpLOONG64LoweredAtomicStore8
+ return true
+ case OpAtomicStorePtrNoWB:
+ v.Op = OpLOONG64LoweredAtomicStore64
+ return true
+ case OpAvg64u:
+ return rewriteValueLOONG64_OpAvg64u(v)
+ case OpClosureCall:
+ v.Op = OpLOONG64CALLclosure
+ return true
+ case OpCom16:
+ return rewriteValueLOONG64_OpCom16(v)
+ case OpCom32:
+ return rewriteValueLOONG64_OpCom32(v)
+ case OpCom64:
+ return rewriteValueLOONG64_OpCom64(v)
+ case OpCom8:
+ return rewriteValueLOONG64_OpCom8(v)
+ case OpCondSelect:
+ return rewriteValueLOONG64_OpCondSelect(v)
+ case OpConst16:
+ return rewriteValueLOONG64_OpConst16(v)
+ case OpConst32:
+ return rewriteValueLOONG64_OpConst32(v)
+ case OpConst32F:
+ return rewriteValueLOONG64_OpConst32F(v)
+ case OpConst64:
+ return rewriteValueLOONG64_OpConst64(v)
+ case OpConst64F:
+ return rewriteValueLOONG64_OpConst64F(v)
+ case OpConst8:
+ return rewriteValueLOONG64_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueLOONG64_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueLOONG64_OpConstNil(v)
+ case OpCvt32Fto32:
+ v.Op = OpLOONG64TRUNCFW
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpLOONG64TRUNCFV
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpLOONG64MOVFD
+ return true
+ case OpCvt32to32F:
+ v.Op = OpLOONG64MOVWF
+ return true
+ case OpCvt32to64F:
+ v.Op = OpLOONG64MOVWD
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpLOONG64TRUNCDW
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpLOONG64MOVDF
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpLOONG64TRUNCDV
+ return true
+ case OpCvt64to32F:
+ v.Op = OpLOONG64MOVVF
+ return true
+ case OpCvt64to64F:
+ v.Op = OpLOONG64MOVVD
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueLOONG64_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueLOONG64_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueLOONG64_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpLOONG64DIVF
+ return true
+ case OpDiv32u:
+ return rewriteValueLOONG64_OpDiv32u(v)
+ case OpDiv64:
+ return rewriteValueLOONG64_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpLOONG64DIVD
+ return true
+ case OpDiv64u:
+ v.Op = OpLOONG64DIVVU
+ return true
+ case OpDiv8:
+ return rewriteValueLOONG64_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueLOONG64_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueLOONG64_OpEq16(v)
+ case OpEq32:
+ return rewriteValueLOONG64_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueLOONG64_OpEq32F(v)
+ case OpEq64:
+ return rewriteValueLOONG64_OpEq64(v)
+ case OpEq64F:
+ return rewriteValueLOONG64_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueLOONG64_OpEq8(v)
+ case OpEqB:
+ return rewriteValueLOONG64_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueLOONG64_OpEqPtr(v)
+ case OpGetCallerPC:
+ v.Op = OpLOONG64LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpLOONG64LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpLOONG64LoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ return rewriteValueLOONG64_OpHmul32(v)
+ case OpHmul32u:
+ return rewriteValueLOONG64_OpHmul32u(v)
+ case OpHmul64:
+ v.Op = OpLOONG64MULHV
+ return true
+ case OpHmul64u:
+ v.Op = OpLOONG64MULHVU
+ return true
+ case OpInterCall:
+ v.Op = OpLOONG64CALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueLOONG64_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueLOONG64_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueLOONG64_OpIsSliceInBounds(v)
+ case OpLOONG64ADDV:
+ return rewriteValueLOONG64_OpLOONG64ADDV(v)
+ case OpLOONG64ADDVconst:
+ return rewriteValueLOONG64_OpLOONG64ADDVconst(v)
+ case OpLOONG64AND:
+ return rewriteValueLOONG64_OpLOONG64AND(v)
+ case OpLOONG64ANDconst:
+ return rewriteValueLOONG64_OpLOONG64ANDconst(v)
+ case OpLOONG64DIVV:
+ return rewriteValueLOONG64_OpLOONG64DIVV(v)
+ case OpLOONG64DIVVU:
+ return rewriteValueLOONG64_OpLOONG64DIVVU(v)
+ case OpLOONG64LoweredAtomicAdd32:
+ return rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd32(v)
+ case OpLOONG64LoweredAtomicAdd64:
+ return rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd64(v)
+ case OpLOONG64LoweredAtomicStore32:
+ return rewriteValueLOONG64_OpLOONG64LoweredAtomicStore32(v)
+ case OpLOONG64LoweredAtomicStore64:
+ return rewriteValueLOONG64_OpLOONG64LoweredAtomicStore64(v)
+ case OpLOONG64MASKEQZ:
+ return rewriteValueLOONG64_OpLOONG64MASKEQZ(v)
+ case OpLOONG64MASKNEZ:
+ return rewriteValueLOONG64_OpLOONG64MASKNEZ(v)
+ case OpLOONG64MOVBUload:
+ return rewriteValueLOONG64_OpLOONG64MOVBUload(v)
+ case OpLOONG64MOVBUreg:
+ return rewriteValueLOONG64_OpLOONG64MOVBUreg(v)
+ case OpLOONG64MOVBload:
+ return rewriteValueLOONG64_OpLOONG64MOVBload(v)
+ case OpLOONG64MOVBreg:
+ return rewriteValueLOONG64_OpLOONG64MOVBreg(v)
+ case OpLOONG64MOVBstore:
+ return rewriteValueLOONG64_OpLOONG64MOVBstore(v)
+ case OpLOONG64MOVBstorezero:
+ return rewriteValueLOONG64_OpLOONG64MOVBstorezero(v)
+ case OpLOONG64MOVDload:
+ return rewriteValueLOONG64_OpLOONG64MOVDload(v)
+ case OpLOONG64MOVDstore:
+ return rewriteValueLOONG64_OpLOONG64MOVDstore(v)
+ case OpLOONG64MOVFload:
+ return rewriteValueLOONG64_OpLOONG64MOVFload(v)
+ case OpLOONG64MOVFstore:
+ return rewriteValueLOONG64_OpLOONG64MOVFstore(v)
+ case OpLOONG64MOVHUload:
+ return rewriteValueLOONG64_OpLOONG64MOVHUload(v)
+ case OpLOONG64MOVHUreg:
+ return rewriteValueLOONG64_OpLOONG64MOVHUreg(v)
+ case OpLOONG64MOVHload:
+ return rewriteValueLOONG64_OpLOONG64MOVHload(v)
+ case OpLOONG64MOVHreg:
+ return rewriteValueLOONG64_OpLOONG64MOVHreg(v)
+ case OpLOONG64MOVHstore:
+ return rewriteValueLOONG64_OpLOONG64MOVHstore(v)
+ case OpLOONG64MOVHstorezero:
+ return rewriteValueLOONG64_OpLOONG64MOVHstorezero(v)
+ case OpLOONG64MOVVload:
+ return rewriteValueLOONG64_OpLOONG64MOVVload(v)
+ case OpLOONG64MOVVreg:
+ return rewriteValueLOONG64_OpLOONG64MOVVreg(v)
+ case OpLOONG64MOVVstore:
+ return rewriteValueLOONG64_OpLOONG64MOVVstore(v)
+ case OpLOONG64MOVVstorezero:
+ return rewriteValueLOONG64_OpLOONG64MOVVstorezero(v)
+ case OpLOONG64MOVWUload:
+ return rewriteValueLOONG64_OpLOONG64MOVWUload(v)
+ case OpLOONG64MOVWUreg:
+ return rewriteValueLOONG64_OpLOONG64MOVWUreg(v)
+ case OpLOONG64MOVWload:
+ return rewriteValueLOONG64_OpLOONG64MOVWload(v)
+ case OpLOONG64MOVWreg:
+ return rewriteValueLOONG64_OpLOONG64MOVWreg(v)
+ case OpLOONG64MOVWstore:
+ return rewriteValueLOONG64_OpLOONG64MOVWstore(v)
+ case OpLOONG64MOVWstorezero:
+ return rewriteValueLOONG64_OpLOONG64MOVWstorezero(v)
+ case OpLOONG64MULV:
+ return rewriteValueLOONG64_OpLOONG64MULV(v)
+ case OpLOONG64NEGV:
+ return rewriteValueLOONG64_OpLOONG64NEGV(v)
+ case OpLOONG64NOR:
+ return rewriteValueLOONG64_OpLOONG64NOR(v)
+ case OpLOONG64NORconst:
+ return rewriteValueLOONG64_OpLOONG64NORconst(v)
+ case OpLOONG64OR:
+ return rewriteValueLOONG64_OpLOONG64OR(v)
+ case OpLOONG64ORconst:
+ return rewriteValueLOONG64_OpLOONG64ORconst(v)
+ case OpLOONG64REMV:
+ return rewriteValueLOONG64_OpLOONG64REMV(v)
+ case OpLOONG64REMVU:
+ return rewriteValueLOONG64_OpLOONG64REMVU(v)
+ case OpLOONG64ROTR:
+ return rewriteValueLOONG64_OpLOONG64ROTR(v)
+ case OpLOONG64ROTRV:
+ return rewriteValueLOONG64_OpLOONG64ROTRV(v)
+ case OpLOONG64SGT:
+ return rewriteValueLOONG64_OpLOONG64SGT(v)
+ case OpLOONG64SGTU:
+ return rewriteValueLOONG64_OpLOONG64SGTU(v)
+ case OpLOONG64SGTUconst:
+ return rewriteValueLOONG64_OpLOONG64SGTUconst(v)
+ case OpLOONG64SGTconst:
+ return rewriteValueLOONG64_OpLOONG64SGTconst(v)
+ case OpLOONG64SLLV:
+ return rewriteValueLOONG64_OpLOONG64SLLV(v)
+ case OpLOONG64SLLVconst:
+ return rewriteValueLOONG64_OpLOONG64SLLVconst(v)
+ case OpLOONG64SRAV:
+ return rewriteValueLOONG64_OpLOONG64SRAV(v)
+ case OpLOONG64SRAVconst:
+ return rewriteValueLOONG64_OpLOONG64SRAVconst(v)
+ case OpLOONG64SRLV:
+ return rewriteValueLOONG64_OpLOONG64SRLV(v)
+ case OpLOONG64SRLVconst:
+ return rewriteValueLOONG64_OpLOONG64SRLVconst(v)
+ case OpLOONG64SUBV:
+ return rewriteValueLOONG64_OpLOONG64SUBV(v)
+ case OpLOONG64SUBVconst:
+ return rewriteValueLOONG64_OpLOONG64SUBVconst(v)
+ case OpLOONG64XOR:
+ return rewriteValueLOONG64_OpLOONG64XOR(v)
+ case OpLOONG64XORconst:
+ return rewriteValueLOONG64_OpLOONG64XORconst(v)
+ case OpLeq16:
+ return rewriteValueLOONG64_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueLOONG64_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueLOONG64_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueLOONG64_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueLOONG64_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValueLOONG64_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValueLOONG64_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValueLOONG64_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValueLOONG64_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueLOONG64_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueLOONG64_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueLOONG64_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueLOONG64_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueLOONG64_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueLOONG64_OpLess32U(v)
+ case OpLess64:
+ return rewriteValueLOONG64_OpLess64(v)
+ case OpLess64F:
+ return rewriteValueLOONG64_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValueLOONG64_OpLess64U(v)
+ case OpLess8:
+ return rewriteValueLOONG64_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueLOONG64_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueLOONG64_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueLOONG64_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueLOONG64_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueLOONG64_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueLOONG64_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueLOONG64_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueLOONG64_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueLOONG64_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueLOONG64_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueLOONG64_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueLOONG64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueLOONG64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueLOONG64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueLOONG64_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueLOONG64_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueLOONG64_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueLOONG64_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueLOONG64_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueLOONG64_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueLOONG64_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueLOONG64_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueLOONG64_OpMod32u(v)
+ case OpMod64:
+ return rewriteValueLOONG64_OpMod64(v)
+ case OpMod64u:
+ v.Op = OpLOONG64REMVU
+ return true
+ case OpMod8:
+ return rewriteValueLOONG64_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueLOONG64_OpMod8u(v)
+ case OpMove:
+ return rewriteValueLOONG64_OpMove(v)
+ case OpMul16:
+ v.Op = OpLOONG64MULV
+ return true
+ case OpMul32:
+ v.Op = OpLOONG64MULV
+ return true
+ case OpMul32F:
+ v.Op = OpLOONG64MULF
+ return true
+ case OpMul64:
+ v.Op = OpLOONG64MULV
+ return true
+ case OpMul64F:
+ v.Op = OpLOONG64MULD
+ return true
+ case OpMul8:
+ v.Op = OpLOONG64MULV
+ return true
+ case OpNeg16:
+ v.Op = OpLOONG64NEGV
+ return true
+ case OpNeg32:
+ v.Op = OpLOONG64NEGV
+ return true
+ case OpNeg32F:
+ v.Op = OpLOONG64NEGF
+ return true
+ case OpNeg64:
+ v.Op = OpLOONG64NEGV
+ return true
+ case OpNeg64F:
+ v.Op = OpLOONG64NEGD
+ return true
+ case OpNeg8:
+ v.Op = OpLOONG64NEGV
+ return true
+ case OpNeq16:
+ return rewriteValueLOONG64_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueLOONG64_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueLOONG64_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValueLOONG64_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValueLOONG64_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueLOONG64_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpLOONG64XOR
+ return true
+ case OpNeqPtr:
+ return rewriteValueLOONG64_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpLOONG64LoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueLOONG64_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueLOONG64_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpLOONG64OR
+ return true
+ case OpOr32:
+ v.Op = OpLOONG64OR
+ return true
+ case OpOr64:
+ v.Op = OpLOONG64OR
+ return true
+ case OpOr8:
+ v.Op = OpLOONG64OR
+ return true
+ case OpOrB:
+ v.Op = OpLOONG64OR
+ return true
+ case OpPanicBounds:
+ return rewriteValueLOONG64_OpPanicBounds(v)
+ case OpRotateLeft16:
+ return rewriteValueLOONG64_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValueLOONG64_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValueLOONG64_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValueLOONG64_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueLOONG64_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueLOONG64_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueLOONG64_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueLOONG64_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueLOONG64_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueLOONG64_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueLOONG64_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueLOONG64_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueLOONG64_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueLOONG64_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueLOONG64_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueLOONG64_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueLOONG64_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueLOONG64_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueLOONG64_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueLOONG64_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueLOONG64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueLOONG64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueLOONG64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueLOONG64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueLOONG64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueLOONG64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueLOONG64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueLOONG64_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueLOONG64_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueLOONG64_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueLOONG64_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueLOONG64_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueLOONG64_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueLOONG64_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueLOONG64_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueLOONG64_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueLOONG64_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueLOONG64_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = OpLOONG64MOVHreg
+ return true
+ case OpSignExt16to64:
+ v.Op = OpLOONG64MOVHreg
+ return true
+ case OpSignExt32to64:
+ v.Op = OpLOONG64MOVWreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpLOONG64MOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpLOONG64MOVBreg
+ return true
+ case OpSignExt8to64:
+ v.Op = OpLOONG64MOVBreg
+ return true
+ case OpSlicemask:
+ return rewriteValueLOONG64_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpLOONG64SQRTD
+ return true
+ case OpSqrt32:
+ v.Op = OpLOONG64SQRTF
+ return true
+ case OpStaticCall:
+ v.Op = OpLOONG64CALLstatic
+ return true
+ case OpStore:
+ return rewriteValueLOONG64_OpStore(v)
+ case OpSub16:
+ v.Op = OpLOONG64SUBV
+ return true
+ case OpSub32:
+ v.Op = OpLOONG64SUBV
+ return true
+ case OpSub32F:
+ v.Op = OpLOONG64SUBF
+ return true
+ case OpSub64:
+ v.Op = OpLOONG64SUBV
+ return true
+ case OpSub64F:
+ v.Op = OpLOONG64SUBD
+ return true
+ case OpSub8:
+ v.Op = OpLOONG64SUBV
+ return true
+ case OpSubPtr:
+ v.Op = OpLOONG64SUBV
+ return true
+ case OpTailCall:
+ v.Op = OpLOONG64CALLtail
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpLOONG64LoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpLOONG64XOR
+ return true
+ case OpXor32:
+ v.Op = OpLOONG64XOR
+ return true
+ case OpXor64:
+ v.Op = OpLOONG64XOR
+ return true
+ case OpXor8:
+ v.Op = OpLOONG64XOR
+ return true
+ case OpZero:
+ return rewriteValueLOONG64_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpLOONG64MOVHUreg
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpLOONG64MOVHUreg
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpLOONG64MOVWUreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpLOONG64MOVBUreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpLOONG64MOVBUreg
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpLOONG64MOVBUreg
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVVaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpLOONG64MOVVaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpAtomicCompareAndSwap32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicCompareAndSwap32 ptr old new mem)
+ // result: (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new := v_2
+ mem := v_3
+ v.reset(OpLOONG64LoweredAtomicCas32)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(old)
+ v.AddArg4(ptr, v0, new, mem)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpAvg64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64ADDV)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLVconst, t)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpLOONG64SUBV, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpCom16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com16 x)
+ // result: (NOR (MOVVconst [0]) x)
+ for {
+ x := v_0
+ v.reset(OpLOONG64NOR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpCom32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com32 x)
+ // result: (NOR (MOVVconst [0]) x)
+ for {
+ x := v_0
+ v.reset(OpLOONG64NOR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpCom64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com64 x)
+ // result: (NOR (MOVVconst [0]) x)
+ for {
+ x := v_0
+ v.reset(OpLOONG64NOR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpCom8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com8 x)
+ // result: (NOR (MOVVconst [0]) x)
+ for {
+ x := v_0
+ v.reset(OpLOONG64NOR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpCondSelect(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CondSelect <t> x y cond)
+ // result: (OR (MASKEQZ <t> x cond) (MASKNEZ <t> y cond))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ cond := v_2
+ v.reset(OpLOONG64OR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MASKEQZ, t)
+ v0.AddArg2(x, cond)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MASKNEZ, t)
+ v1.AddArg2(y, cond)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVVconst [int64(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueLOONG64_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVVconst [int64(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueLOONG64_OpConst32F(v *Value) bool {
+ // match: (Const32F [val])
+ // result: (MOVFconst [float64(val)])
+ for {
+ val := auxIntToFloat32(v.AuxInt)
+ v.reset(OpLOONG64MOVFconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueLOONG64_OpConst64(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVVconst [int64(val)])
+ for {
+ val := auxIntToInt64(v.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueLOONG64_OpConst64F(v *Value) bool {
+ // match: (Const64F [val])
+ // result: (MOVDconst [float64(val)])
+ for {
+ val := auxIntToFloat64(v.AuxInt)
+ v.reset(OpLOONG64MOVDconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueLOONG64_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVVconst [int64(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueLOONG64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [t])
+ // result: (MOVVconst [int64(b2i(t))])
+ for {
+ t := auxIntToBool(v.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(b2i(t)))
+ return true
+ }
+}
+func rewriteValueLOONG64_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVVconst [0])
+ for {
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y)
+ // result: (DIVV (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64DIVV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64DIVVU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 x y)
+ // result: (DIVV (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64DIVV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64DIVVU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 x y)
+ // result: (DIVV x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64DIVV)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVV (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64DIVV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64DIVVU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGTU)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x y)
+ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGTU)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (FPFlagTrue (CMPEQF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq64 x y)
+ // result: (SGTU (MOVVconst [1]) (XOR x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGTU)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (FPFlagTrue (CMPEQD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGTU)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64XOR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.Bool)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqPtr x y)
+ // result: (SGTU (MOVVconst [1]) (XOR x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGTU)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpHmul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (SRAVconst (MULV (SignExt32to64 x) (SignExt32to64 y)) [32])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAVconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MULV, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpHmul32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (SRLVconst (MULV (ZeroExt32to64 x) (ZeroExt32to64 y)) [32])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRLVconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MULV, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IsInBounds idx len)
+ // result: (SGTU len idx)
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpLOONG64SGTU)
+ v.AddArg2(len, idx)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsNonNil ptr)
+ // result: (SGTU ptr (MOVVconst [0]))
+ for {
+ ptr := v_0
+ v.reset(OpLOONG64SGTU)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(ptr, v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsSliceInBounds idx len)
+ // result: (XOR (MOVVconst [1]) (SGTU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpLOONG64XOR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v1.AddArg2(idx, len)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLOONG64ADDV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDV x (MOVVconst <t> [c]))
+ // cond: is32Bit(c) && !t.IsPtr()
+ // result: (ADDVconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ continue
+ }
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c) && !t.IsPtr()) {
+ continue
+ }
+ v.reset(OpLOONG64ADDVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDV x (NEGV y))
+ // result: (SUBV x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpLOONG64NEGV {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpLOONG64SUBV)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64ADDVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr))
+ // cond: is32Bit(off1+int64(off2))
+ // result: (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
+ for {
+ off1 := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ if !(is32Bit(off1 + int64(off2))) {
+ break
+ }
+ v.reset(OpLOONG64MOVVaddr)
+ v.AuxInt = int32ToAuxInt(int32(off1) + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (ADDVconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [c+d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ return true
+ }
+ // match: (ADDVconst [c] (ADDVconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (ADDVconst [c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpLOONG64ADDVconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDVconst [c] (SUBVconst [d] x))
+ // cond: is32Bit(c-d)
+ // result: (ADDVconst [c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64SUBVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c - d)) {
+ break
+ }
+ v.reset(OpLOONG64ADDVconst)
+ v.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64AND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AND x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpLOONG64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64ANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [0] _)
+ // result: (MOVVconst [0])
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [c&d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64ANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpLOONG64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64DIVV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVV (MOVVconst [c]) (MOVVconst [d]))
+ // cond: d != 0
+ // result: (MOVVconst [c/d])
+ for {
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c / d)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64DIVVU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVVU x (MOVVconst [1]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (DIVVU x (MOVVconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (SRLVconst [log64(c)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpLOONG64SRLVconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (DIVVU (MOVVconst [c]) (MOVVconst [d]))
+ // cond: d != 0
+ // result: (MOVVconst [int64(uint64(c)/uint64(d))])
+ for {
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (LoweredAtomicAddconst32 [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpLOONG64LoweredAtomicAddconst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicAdd64 ptr (MOVVconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (LoweredAtomicAddconst64 [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpLOONG64LoweredAtomicAddconst64)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64LoweredAtomicStore32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicStore32 ptr (MOVVconst [0]) mem)
+ // result: (LoweredAtomicStorezero32 ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpLOONG64LoweredAtomicStorezero32)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64LoweredAtomicStore64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicStore64 ptr (MOVVconst [0]) mem)
+ // result: (LoweredAtomicStorezero64 ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpLOONG64LoweredAtomicStorezero64)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MASKEQZ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MASKEQZ (MOVVconst [0]) cond)
+ // result: (MOVVconst [0])
+ for {
+ if v_0.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MASKEQZ x (MOVVconst [c]))
+ // cond: c == 0
+ // result: (MOVVconst [0])
+ for {
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MASKEQZ x (MOVVconst [c]))
+ // cond: c != 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MASKNEZ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MASKNEZ (MOVVconst [0]) cond)
+ // result: (MOVVconst [0])
+ for {
+ if v_0.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBUreg x:(SGT _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpLOONG64SGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(SGTU _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpLOONG64SGTU {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVBUload {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVBUreg {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(uint8(c))])
+ for {
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVBload {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVBreg {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(int8(c))])
+ for {
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpLOONG64MOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpLOONG64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpLOONG64MOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpLOONG64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpLOONG64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpLOONG64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpLOONG64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpLOONG64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpLOONG64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpLOONG64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpLOONG64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpLOONG64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVDload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVFload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVFstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVBUload {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVHUload {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVBUreg {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVHUreg {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVBload {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVBUload {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVHload {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVBreg {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVBUreg {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVHreg {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(int16(c))])
+ for {
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpLOONG64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpLOONG64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpLOONG64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpLOONG64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpLOONG64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpLOONG64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpLOONG64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpLOONG64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVVload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVVload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVVload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVVreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVVreg x)
+ // cond: x.Uses == 1
+ // result: (MOVVnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpLOONG64MOVVnop)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVVreg (MOVVconst [c]))
+ // result: (MOVVconst [c])
+ for {
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVVstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVVstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVVstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVVstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVVstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVWUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWUreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVBUload {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVHUload {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVWUload {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVBUreg {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVHUreg {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVWUreg {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWreg x:(MOVBload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVBload {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVBUload {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVHload {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVHUload {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVWload {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVBreg {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVBUreg {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVHreg {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpLOONG64MOVWreg {
+ break
+ }
+ v.reset(OpLOONG64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(int32(c))])
+ for {
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpLOONG64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpLOONG64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpLOONG64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpLOONG64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpLOONG64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpLOONG64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64MULV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULV x (MOVVconst [-1]))
+ // result: (NEGV x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpLOONG64NEGV)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MULV _ (MOVVconst [0]))
+ // result: (MOVVconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MULV x (MOVVconst [1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MULV x (MOVVconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (SLLVconst [log64(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpLOONG64SLLVconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MULV (MOVVconst [c]) (MOVVconst [d]))
+ // result: (MOVVconst [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLOONG64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpLOONG64MOVVconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64NEGV(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGV (MOVVconst [c]))
+ // result: (MOVVconst [-c])
+ for {
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64NOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NOR x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (NORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpLOONG64NORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64NORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NORconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [^(c|d)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(^(c | d))
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64OR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (OR x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpLOONG64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64ORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // result: (MOVVconst [-1])
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [c|d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // cond: is32Bit(c|d)
+ // result: (ORconst [c|d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64ORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c | d)) {
+ break
+ }
+ v.reset(OpLOONG64ORconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64REMV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (REMV (MOVVconst [c]) (MOVVconst [d]))
+ // cond: d != 0
+ // result: (MOVVconst [c%d])
+ for {
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c % d)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64REMVU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (REMVU _ (MOVVconst [1]))
+ // result: (MOVVconst [0])
+ for {
+ if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (REMVU x (MOVVconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (ANDconst [c-1] x)
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpLOONG64ANDconst)
+ v.AuxInt = int64ToAuxInt(c - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (REMVU (MOVVconst [c]) (MOVVconst [d]))
+ // cond: d != 0
+ // result: (MOVVconst [int64(uint64(c)%uint64(d))])
+ for {
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64ROTR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROTR x (MOVVconst [c]))
+ // result: (ROTRconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpLOONG64ROTRconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64ROTRV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROTRV x (MOVVconst [c]))
+ // result: (ROTRVconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpLOONG64ROTRVconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64SGT(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SGT (MOVVconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (SGTconst [c] x)
+ for {
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpLOONG64SGTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SGT x x)
+ // result: (MOVVconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64SGTU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SGTU (MOVVconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (SGTUconst [c] x)
+ for {
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpLOONG64SGTUconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SGTU x x)
+ // result: (MOVVconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64SGTUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTUconst [c] (MOVVconst [d]))
+ // cond: uint64(c)>uint64(d)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(c) > uint64(d)) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVVconst [d]))
+ // cond: uint64(c)<=uint64(d)
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(c) <= uint64(d)) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVBUreg _))
+ // cond: 0xff < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVBUreg || !(0xff < uint64(c)) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVHUreg _))
+ // cond: 0xffff < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVHUreg || !(0xffff < uint64(c)) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (ANDconst [m] _))
+ // cond: uint64(m) < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(m) < uint64(c)) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (SRLVconst _ [d]))
+ // cond: 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64SRLVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64SGTconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTconst [c] (MOVVconst [d]))
+ // cond: c>d
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(c > d) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVVconst [d]))
+ // cond: c<=d
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(c <= d) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBreg _))
+ // cond: 0x7f < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVBreg || !(0x7f < c) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBreg _))
+ // cond: c <= -0x80
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVBreg || !(c <= -0x80) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBUreg _))
+ // cond: 0xff < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVBUreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBUreg _))
+ // cond: c < 0
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVBUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHreg _))
+ // cond: 0x7fff < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVHreg || !(0x7fff < c) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHreg _))
+ // cond: c <= -0x8000
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVHreg || !(c <= -0x8000) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHUreg _))
+ // cond: 0xffff < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVHUreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHUreg _))
+ // cond: c < 0
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVHUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVWUreg _))
+ // cond: c < 0
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVWUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (ANDconst [m] _))
+ // cond: 0 <= m && m < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if !(0 <= m && m < c) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (SRLVconst _ [d]))
+ // cond: 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64SRLVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64SLLV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLLV _ (MOVVconst [c]))
+ // cond: uint64(c)>=64
+ // result: (MOVVconst [0])
+ for {
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SLLV x (MOVVconst [c]))
+ // result: (SLLVconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpLOONG64SLLVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64SLLVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLLVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [d<<uint64(c)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(d << uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64SRAV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRAV x (MOVVconst [c]))
+ // cond: uint64(c)>=64
+ // result: (SRAVconst x [63])
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpLOONG64SRAVconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAV x (MOVVconst [c]))
+ // result: (SRAVconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpLOONG64SRAVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64SRAVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64SRLV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRLV _ (MOVVconst [c]))
+ // cond: uint64(c)>=64
+ // result: (MOVVconst [0])
+ for {
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLV x (MOVVconst [c]))
+ // result: (SRLVconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpLOONG64SRLVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64SRLVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRLVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [int64(uint64(d)>>uint64(c))])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64SUBV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBV x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (SUBVconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpLOONG64SUBVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBV x x)
+ // result: (MOVVconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SUBV (MOVVconst [0]) x)
+ // result: (NEGV x)
+ for {
+ if v_0.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpLOONG64NEGV)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64SUBVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBVconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [d-c])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(d - c)
+ return true
+ }
+ // match: (SUBVconst [c] (SUBVconst [d] x))
+ // cond: is32Bit(-c-d)
+ // result: (ADDVconst [-c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64SUBVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-c - d)) {
+ break
+ }
+ v.reset(OpLOONG64ADDVconst)
+ v.AuxInt = int64ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBVconst [c] (ADDVconst [d] x))
+ // cond: is32Bit(-c+d)
+ // result: (ADDVconst [-c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64ADDVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-c + d)) {
+ break
+ }
+ v.reset(OpLOONG64ADDVconst)
+ v.AuxInt = int64ToAuxInt(-c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64XOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpLOONG64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR x x)
+ // result: (MOVVconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLOONG64XORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [-1] x)
+ // result: (NORconst [0] x)
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(OpLOONG64NORconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [c^d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpLOONG64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // cond: is32Bit(c^d)
+ // result: (XORconst [c^d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpLOONG64XORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c ^ d)) {
+ break
+ }
+ v.reset(OpLOONG64XORconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64XOR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64XOR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64XOR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (FPFlagTrue (CMPGEF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpLOONG64CMPGEF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64XOR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64 x y)
+ // result: (XOR (MOVVconst [1]) (SGT x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64XOR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (FPFlagTrue (CMPGED y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpLOONG64CMPGED, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x y)
+ // result: (XOR (MOVVconst [1]) (SGTU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64XOR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64XOR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64XOR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (SGT (SignExt16to64 y) (SignExt16to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32 x y)
+ // result: (SGT (SignExt32to64 y) (SignExt32to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (FPFlagTrue (CMPGTF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpLOONG64CMPGTF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U x y)
+ // result: (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less64 x y)
+ // result: (SGT y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGT)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (FPFlagTrue (CMPGTD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpLOONG64CMPGTD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less64U x y)
+ // result: (SGTU y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGTU)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (SGT (SignExt8to64 y) (SignExt8to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpLOONG64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && t.IsSigned())
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpLOONG64MOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !t.IsSigned())
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpLOONG64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && t.IsSigned())
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpLOONG64MOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !t.IsSigned())
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpLOONG64MOVHUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && t.IsSigned())
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpLOONG64MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && !t.IsSigned())
+ // result: (MOVWUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpLOONG64MOVWUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVVload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpLOONG64MOVVload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVFload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpLOONG64MOVFload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpLOONG64MOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLocalAddr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (LocalAddr <t> {sym} base mem)
+ // cond: t.Elem().HasPointers()
+ // result: (MOVVaddr {sym} (SPanchored base mem))
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ mem := v_1
+ if !(t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpLOONG64MOVVaddr)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
+ v0.AddArg2(base, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LocalAddr <t> {sym} base _)
+ // cond: !t.Elem().HasPointers()
+ // result: (MOVVaddr {sym} base)
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ if !(!t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpLOONG64MOVVaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 <t> x y)
+ // result: (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 <t> x y)
+ // result: (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x64 <t> x y)
+ // result: (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 <t> x y)
+ // result: (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 <t> x y)
+ // result: (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 <t> x y)
+ // result: (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x64 <t> x y)
+ // result: (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 <t> x y)
+ // result: (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 <t> x y)
+ // result: (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 <t> x y)
+ // result: (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 <t> x y)
+ // result: (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 <t> x y)
+ // result: (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 <t> x y)
+ // result: (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 <t> x y)
+ // result: (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x64 <t> x y)
+ // result: (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 <t> x y)
+ // result: (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SLLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (REMV (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64REMV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (REMVU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64REMVU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 x y)
+ // result: (REMV (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64REMV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (REMVU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64REMVU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod64 x y)
+ // result: (REMV x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64REMV)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (REMV (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64REMV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (REMVU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64REMVU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpLOONG64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore dst (MOVHload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpLOONG64MOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpLOONG64MOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(1)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8)
+ v4.AuxInt = int32ToAuxInt(1)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore dst (MOVVload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVVstore)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(2)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16)
+ v4.AuxInt = int32ToAuxInt(2)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpLOONG64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8)
+ v2.AuxInt = int32ToAuxInt(1)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [12] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [16] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVVstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [24] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore [16] dst (MOVVload [16] src mem) (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVVstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [16 * (128 - s/8)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpLOONG64DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(16 * (128 - s/8))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0
+ // result: (LoweredMove [t.Alignment()] dst src (ADDVconst <src.Type> src [s-moveSize(t.Alignment(), config)]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0) {
+ break
+ }
+ v.reset(OpLOONG64LoweredMove)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpLOONG64ADDVconst, src.Type)
+ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
+ v0.AddArg(src)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGTU)
+ v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x y)
+ // result: (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGTU)
+ v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (FPFlagFalse (CMPEQF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64FPFlagFalse)
+ v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64 x y)
+ // result: (SGTU (XOR x y) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGTU)
+ v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (FPFlagFalse (CMPEQD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64FPFlagFalse)
+ v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGTU)
+ v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqPtr x y)
+ // result: (SGTU (XOR x y) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SGTU)
+ v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpLOONG64XORconst)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr [off] ptr:(SP))
+ // result: (MOVVaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpLOONG64MOVVaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADDVconst [off] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpLOONG64ADDVconst)
+ v.AuxInt = int64ToAuxInt(off)
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpLOONG64LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpLOONG64LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpLOONG64LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVVconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVVconst [c&15])) (Rsh16Ux64 <t> x (MOVVconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RotateLeft32 x y)
+ // result: (ROTR x (NEGV <y.Type> y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64ROTR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RotateLeft64 x y)
+ // result: (ROTRV x (NEGV <y.Type> y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64ROTRV)
+ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVVconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVVconst [c&7])) (Rsh8Ux64 <t> x (MOVVconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpLOONG64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 <t> x y)
+ // result: (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v3.AddArg2(v4, v2)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 <t> x y)
+ // result: (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v3.AddArg2(v4, v2)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 <t> x y)
+ // result: (MASKEQZ (SRLV <t> (ZeroExt16to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, y)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 <t> x y)
+ // result: (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v3.AddArg2(v4, v2)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 <t> x y)
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t)
+ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 <t> x y)
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t)
+ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 <t> x y)
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t)
+ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(y, v4)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 <t> x y)
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t)
+ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 <t> x y)
+ // result: (MASKEQZ (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v3.AddArg2(v4, v2)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 <t> x y)
+ // result: (MASKEQZ (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v3.AddArg2(v4, v2)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 <t> x y)
+ // result: (MASKEQZ (SRLV <t> (ZeroExt32to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, y)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 <t> x y)
+ // result: (MASKEQZ (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v3.AddArg2(v4, v2)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 <t> x y)
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t)
+ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 <t> x y)
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t)
+ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 <t> x y)
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t)
+ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(y, v4)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 <t> x y)
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t)
+ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 <t> x y)
+ // result: (MASKEQZ (SRLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 <t> x y)
+ // result: (MASKEQZ (SRLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 <t> x y)
+ // result: (MASKEQZ (SRLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 <t> x y)
+ // result: (MASKEQZ (SRLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 <t> x y)
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpLOONG64OR, t)
+ v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 <t> x y)
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpLOONG64OR, t)
+ v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x64 <t> x y)
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpLOONG64OR, t)
+ v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(y, v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 <t> x y)
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpLOONG64OR, t)
+ v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 <t> x y)
+ // result: (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v3.AddArg2(v4, v2)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 <t> x y)
+ // result: (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v3.AddArg2(v4, v2)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 <t> x y)
+ // result: (MASKEQZ (SRLV <t> (ZeroExt8to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, y)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 <t> x y)
+ // result: (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64MASKEQZ)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SRLV, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v3.AddArg2(v4, v2)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 <t> x y)
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t)
+ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 <t> x y)
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t)
+ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 <t> x y)
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t)
+ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(y, v4)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 <t> x y)
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpLOONG64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpLOONG64OR, t)
+ v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Select0 (Mul64uhilo x y))
+ // result: (MULHVU x y)
+ for {
+ if v_0.Op != OpMul64uhilo {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLOONG64MULHVU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Select0 (Mul64uover x y))
+ // result: (MULV x y)
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLOONG64MULV)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Select0 <t> (Add64carry x y c))
+ // result: (ADDV (ADDV <t> x y) c)
+ for {
+ t := v.Type
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpLOONG64ADDV)
+ v0 := b.NewValue0(v.Pos, OpLOONG64ADDV, t)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, c)
+ return true
+ }
+ // match: (Select0 <t> (Sub64borrow x y c))
+ // result: (SUBV (SUBV <t> x y) c)
+ for {
+ t := v.Type
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpLOONG64SUBV)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SUBV, t)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, c)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Mul64uhilo x y))
+ // result: (MULV x y)
+ for {
+ if v_0.Op != OpMul64uhilo {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLOONG64MULV)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Select1 (Mul64uover x y))
+ // result: (SGTU <typ.Bool> (MULHVU x y) (MOVVconst <typ.UInt64> [0]))
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLOONG64SGTU)
+ v.Type = typ.Bool
+ v0 := b.NewValue0(v.Pos, OpLOONG64MULHVU, typ.UInt64)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Select1 <t> (Add64carry x y c))
+ // result: (OR (SGTU <t> x s:(ADDV <t> x y)) (SGTU <t> s (ADDV <t> s c)))
+ for {
+ t := v.Type
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpLOONG64OR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SGTU, t)
+ s := b.NewValue0(v.Pos, OpLOONG64ADDV, t)
+ s.AddArg2(x, y)
+ v0.AddArg2(x, s)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, t)
+ v3 := b.NewValue0(v.Pos, OpLOONG64ADDV, t)
+ v3.AddArg2(s, c)
+ v2.AddArg2(s, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Select1 <t> (Sub64borrow x y c))
+ // result: (OR (SGTU <t> s:(SUBV <t> x y) x) (SGTU <t> (SUBV <t> s c) s))
+ for {
+ t := v.Type
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpLOONG64OR)
+ v0 := b.NewValue0(v.Pos, OpLOONG64SGTU, t)
+ s := b.NewValue0(v.Pos, OpLOONG64SUBV, t)
+ s.AddArg2(x, y)
+ v0.AddArg2(s, x)
+ v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, t)
+ v3 := b.NewValue0(v.Pos, OpLOONG64SUBV, t)
+ v3.AddArg2(s, c)
+ v2.AddArg2(v3, s)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRAVconst (NEGV <t> x) [63])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpLOONG64SRAVconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueLOONG64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpLOONG64MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpLOONG64MOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !t.IsFloat()
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !t.IsFloat()) {
+ break
+ }
+ v.reset(OpLOONG64MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && !t.IsFloat()
+ // result: (MOVVstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && !t.IsFloat()) {
+ break
+ }
+ v.reset(OpLOONG64MOVVstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && t.IsFloat()
+ // result: (MOVFstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpLOONG64MOVFstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && t.IsFloat()
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpLOONG64MOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueLOONG64_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVVconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpLOONG64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore ptr (MOVVconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpLOONG64MOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore ptr (MOVVconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVBstore [3] ptr (MOVVconst [0]) (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpLOONG64MOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(1)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore ptr (MOVVconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVVstore)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] ptr (MOVVconst [0]) (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(2)
+ v3 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [3] ptr mem)
+ // result: (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpLOONG64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [6] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [12] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] ptr (MOVVconst [0]) (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [16] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVVstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [24] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore [16] ptr (MOVVconst [0]) (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpLOONG64MOVVstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [8 * (128 - s/8)] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpLOONG64DUFFZERO)
+ v.AuxInt = int64ToAuxInt(8 * (128 - s/8))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0
+ // result: (LoweredZero [t.Alignment()] ptr (ADDVconst <ptr.Type> ptr [s-moveSize(t.Alignment(), config)]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !((s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0) {
+ break
+ }
+ v.reset(OpLOONG64LoweredZero)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpLOONG64ADDVconst, ptr.Type)
+ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
+ v0.AddArg(ptr)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockLOONG64(b *Block) bool {
+ typ := &b.Func.Config.Types
+ switch b.Kind {
+ case BlockLOONG64EQ:
+ // match: (EQ (FPFlagTrue cmp) yes no)
+ // result: (FPF cmp yes no)
+ for b.Controls[0].Op == OpLOONG64FPFlagTrue {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockLOONG64FPF, cmp)
+ return true
+ }
+ // match: (EQ (FPFlagFalse cmp) yes no)
+ // result: (FPT cmp yes no)
+ for b.Controls[0].Op == OpLOONG64FPFlagFalse {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockLOONG64FPT, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpLOONG64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpLOONG64SGT {
+ break
+ }
+ b.resetWithControl(BlockLOONG64NE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpLOONG64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpLOONG64SGTU {
+ break
+ }
+ b.resetWithControl(BlockLOONG64NE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpLOONG64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpLOONG64SGTconst {
+ break
+ }
+ b.resetWithControl(BlockLOONG64NE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpLOONG64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpLOONG64SGTUconst {
+ break
+ }
+ b.resetWithControl(BlockLOONG64NE, cmp)
+ return true
+ }
+ // match: (EQ (SGTUconst [1] x) yes no)
+ // result: (NE x yes no)
+ for b.Controls[0].Op == OpLOONG64SGTUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockLOONG64NE, x)
+ return true
+ }
+ // match: (EQ (SGTU x (MOVVconst [0])) yes no)
+ // result: (EQ x yes no)
+ for b.Controls[0].Op == OpLOONG64SGTU {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockLOONG64EQ, x)
+ return true
+ }
+ // match: (EQ (SGTconst [0] x) yes no)
+ // result: (GEZ x yes no)
+ for b.Controls[0].Op == OpLOONG64SGTconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockLOONG64GEZ, x)
+ return true
+ }
+ // match: (EQ (SGT x (MOVVconst [0])) yes no)
+ // result: (LEZ x yes no)
+ for b.Controls[0].Op == OpLOONG64SGT {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockLOONG64LEZ, x)
+ return true
+ }
+ // match: (EQ (MOVVconst [0]) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (MOVVconst [c]) yes no)
+ // cond: c != 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockLOONG64GEZ:
+ // match: (GEZ (MOVVconst [c]) yes no)
+ // cond: c >= 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c >= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GEZ (MOVVconst [c]) yes no)
+ // cond: c < 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c < 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockLOONG64GTZ:
+ // match: (GTZ (MOVVconst [c]) yes no)
+ // cond: c > 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GTZ (MOVVconst [c]) yes no)
+ // cond: c <= 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c <= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockIf:
+ // match: (If cond yes no)
+ // result: (NE (MOVBUreg <typ.UInt64> cond) yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, OpLOONG64MOVBUreg, typ.UInt64)
+ v0.AddArg(cond)
+ b.resetWithControl(BlockLOONG64NE, v0)
+ return true
+ }
+ case BlockLOONG64LEZ:
+ // match: (LEZ (MOVVconst [c]) yes no)
+ // cond: c <= 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c <= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LEZ (MOVVconst [c]) yes no)
+ // cond: c > 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockLOONG64LTZ:
+ // match: (LTZ (MOVVconst [c]) yes no)
+ // cond: c < 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c < 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LTZ (MOVVconst [c]) yes no)
+ // cond: c >= 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c >= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockLOONG64NE:
+ // match: (NE (FPFlagTrue cmp) yes no)
+ // result: (FPT cmp yes no)
+ for b.Controls[0].Op == OpLOONG64FPFlagTrue {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockLOONG64FPT, cmp)
+ return true
+ }
+ // match: (NE (FPFlagFalse cmp) yes no)
+ // result: (FPF cmp yes no)
+ for b.Controls[0].Op == OpLOONG64FPFlagFalse {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockLOONG64FPF, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpLOONG64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpLOONG64SGT {
+ break
+ }
+ b.resetWithControl(BlockLOONG64EQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpLOONG64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpLOONG64SGTU {
+ break
+ }
+ b.resetWithControl(BlockLOONG64EQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpLOONG64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpLOONG64SGTconst {
+ break
+ }
+ b.resetWithControl(BlockLOONG64EQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpLOONG64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpLOONG64SGTUconst {
+ break
+ }
+ b.resetWithControl(BlockLOONG64EQ, cmp)
+ return true
+ }
+ // match: (NE (SGTUconst [1] x) yes no)
+ // result: (EQ x yes no)
+ for b.Controls[0].Op == OpLOONG64SGTUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockLOONG64EQ, x)
+ return true
+ }
+ // match: (NE (SGTU x (MOVVconst [0])) yes no)
+ // result: (NE x yes no)
+ for b.Controls[0].Op == OpLOONG64SGTU {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockLOONG64NE, x)
+ return true
+ }
+ // match: (NE (SGTconst [0] x) yes no)
+ // result: (LTZ x yes no)
+ for b.Controls[0].Op == OpLOONG64SGTconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockLOONG64LTZ, x)
+ return true
+ }
+ // match: (NE (SGT x (MOVVconst [0])) yes no)
+ // result: (GTZ x yes no)
+ for b.Controls[0].Op == OpLOONG64SGT {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockLOONG64GTZ, x)
+ return true
+ }
+ // match: (NE (MOVVconst [0]) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (MOVVconst [c]) yes no)
+ // cond: c != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpLOONG64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go
new file mode 100644
index 0000000..6a259f5
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go
@@ -0,0 +1,7660 @@
+// Code generated from _gen/MIPS.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+func rewriteValueMIPS(v *Value) bool {
+ switch v.Op {
+ case OpAbs:
+ v.Op = OpMIPSABSD
+ return true
+ case OpAdd16:
+ v.Op = OpMIPSADD
+ return true
+ case OpAdd32:
+ v.Op = OpMIPSADD
+ return true
+ case OpAdd32F:
+ v.Op = OpMIPSADDF
+ return true
+ case OpAdd32withcarry:
+ return rewriteValueMIPS_OpAdd32withcarry(v)
+ case OpAdd64F:
+ v.Op = OpMIPSADDD
+ return true
+ case OpAdd8:
+ v.Op = OpMIPSADD
+ return true
+ case OpAddPtr:
+ v.Op = OpMIPSADD
+ return true
+ case OpAddr:
+ return rewriteValueMIPS_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpMIPSAND
+ return true
+ case OpAnd32:
+ v.Op = OpMIPSAND
+ return true
+ case OpAnd8:
+ v.Op = OpMIPSAND
+ return true
+ case OpAndB:
+ v.Op = OpMIPSAND
+ return true
+ case OpAtomicAdd32:
+ v.Op = OpMIPSLoweredAtomicAdd
+ return true
+ case OpAtomicAnd32:
+ v.Op = OpMIPSLoweredAtomicAnd
+ return true
+ case OpAtomicAnd8:
+ return rewriteValueMIPS_OpAtomicAnd8(v)
+ case OpAtomicCompareAndSwap32:
+ v.Op = OpMIPSLoweredAtomicCas
+ return true
+ case OpAtomicExchange32:
+ v.Op = OpMIPSLoweredAtomicExchange
+ return true
+ case OpAtomicLoad32:
+ v.Op = OpMIPSLoweredAtomicLoad32
+ return true
+ case OpAtomicLoad8:
+ v.Op = OpMIPSLoweredAtomicLoad8
+ return true
+ case OpAtomicLoadPtr:
+ v.Op = OpMIPSLoweredAtomicLoad32
+ return true
+ case OpAtomicOr32:
+ v.Op = OpMIPSLoweredAtomicOr
+ return true
+ case OpAtomicOr8:
+ return rewriteValueMIPS_OpAtomicOr8(v)
+ case OpAtomicStore32:
+ v.Op = OpMIPSLoweredAtomicStore32
+ return true
+ case OpAtomicStore8:
+ v.Op = OpMIPSLoweredAtomicStore8
+ return true
+ case OpAtomicStorePtrNoWB:
+ v.Op = OpMIPSLoweredAtomicStore32
+ return true
+ case OpAvg32u:
+ return rewriteValueMIPS_OpAvg32u(v)
+ case OpBitLen32:
+ return rewriteValueMIPS_OpBitLen32(v)
+ case OpClosureCall:
+ v.Op = OpMIPSCALLclosure
+ return true
+ case OpCom16:
+ return rewriteValueMIPS_OpCom16(v)
+ case OpCom32:
+ return rewriteValueMIPS_OpCom32(v)
+ case OpCom8:
+ return rewriteValueMIPS_OpCom8(v)
+ case OpConst16:
+ return rewriteValueMIPS_OpConst16(v)
+ case OpConst32:
+ return rewriteValueMIPS_OpConst32(v)
+ case OpConst32F:
+ v.Op = OpMIPSMOVFconst
+ return true
+ case OpConst64F:
+ v.Op = OpMIPSMOVDconst
+ return true
+ case OpConst8:
+ return rewriteValueMIPS_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueMIPS_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueMIPS_OpConstNil(v)
+ case OpCtz32:
+ return rewriteValueMIPS_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpMIPSTRUNCFW
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpMIPSMOVFD
+ return true
+ case OpCvt32to32F:
+ v.Op = OpMIPSMOVWF
+ return true
+ case OpCvt32to64F:
+ v.Op = OpMIPSMOVWD
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpMIPSTRUNCDW
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpMIPSMOVDF
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueMIPS_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueMIPS_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueMIPS_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpMIPSDIVF
+ return true
+ case OpDiv32u:
+ return rewriteValueMIPS_OpDiv32u(v)
+ case OpDiv64F:
+ v.Op = OpMIPSDIVD
+ return true
+ case OpDiv8:
+ return rewriteValueMIPS_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueMIPS_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueMIPS_OpEq16(v)
+ case OpEq32:
+ return rewriteValueMIPS_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueMIPS_OpEq32F(v)
+ case OpEq64F:
+ return rewriteValueMIPS_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueMIPS_OpEq8(v)
+ case OpEqB:
+ return rewriteValueMIPS_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueMIPS_OpEqPtr(v)
+ case OpGetCallerPC:
+ v.Op = OpMIPSLoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpMIPSLoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpMIPSLoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ return rewriteValueMIPS_OpHmul32(v)
+ case OpHmul32u:
+ return rewriteValueMIPS_OpHmul32u(v)
+ case OpInterCall:
+ v.Op = OpMIPSCALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueMIPS_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueMIPS_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueMIPS_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueMIPS_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueMIPS_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueMIPS_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueMIPS_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueMIPS_OpLeq32U(v)
+ case OpLeq64F:
+ return rewriteValueMIPS_OpLeq64F(v)
+ case OpLeq8:
+ return rewriteValueMIPS_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueMIPS_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueMIPS_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueMIPS_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueMIPS_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueMIPS_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueMIPS_OpLess32U(v)
+ case OpLess64F:
+ return rewriteValueMIPS_OpLess64F(v)
+ case OpLess8:
+ return rewriteValueMIPS_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueMIPS_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueMIPS_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueMIPS_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueMIPS_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueMIPS_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueMIPS_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueMIPS_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueMIPS_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueMIPS_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueMIPS_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueMIPS_OpLsh32x8(v)
+ case OpLsh8x16:
+ return rewriteValueMIPS_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueMIPS_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueMIPS_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueMIPS_OpLsh8x8(v)
+ case OpMIPSADD:
+ return rewriteValueMIPS_OpMIPSADD(v)
+ case OpMIPSADDconst:
+ return rewriteValueMIPS_OpMIPSADDconst(v)
+ case OpMIPSAND:
+ return rewriteValueMIPS_OpMIPSAND(v)
+ case OpMIPSANDconst:
+ return rewriteValueMIPS_OpMIPSANDconst(v)
+ case OpMIPSCMOVZ:
+ return rewriteValueMIPS_OpMIPSCMOVZ(v)
+ case OpMIPSCMOVZzero:
+ return rewriteValueMIPS_OpMIPSCMOVZzero(v)
+ case OpMIPSLoweredAtomicAdd:
+ return rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v)
+ case OpMIPSLoweredAtomicStore32:
+ return rewriteValueMIPS_OpMIPSLoweredAtomicStore32(v)
+ case OpMIPSMOVBUload:
+ return rewriteValueMIPS_OpMIPSMOVBUload(v)
+ case OpMIPSMOVBUreg:
+ return rewriteValueMIPS_OpMIPSMOVBUreg(v)
+ case OpMIPSMOVBload:
+ return rewriteValueMIPS_OpMIPSMOVBload(v)
+ case OpMIPSMOVBreg:
+ return rewriteValueMIPS_OpMIPSMOVBreg(v)
+ case OpMIPSMOVBstore:
+ return rewriteValueMIPS_OpMIPSMOVBstore(v)
+ case OpMIPSMOVBstorezero:
+ return rewriteValueMIPS_OpMIPSMOVBstorezero(v)
+ case OpMIPSMOVDload:
+ return rewriteValueMIPS_OpMIPSMOVDload(v)
+ case OpMIPSMOVDstore:
+ return rewriteValueMIPS_OpMIPSMOVDstore(v)
+ case OpMIPSMOVFload:
+ return rewriteValueMIPS_OpMIPSMOVFload(v)
+ case OpMIPSMOVFstore:
+ return rewriteValueMIPS_OpMIPSMOVFstore(v)
+ case OpMIPSMOVHUload:
+ return rewriteValueMIPS_OpMIPSMOVHUload(v)
+ case OpMIPSMOVHUreg:
+ return rewriteValueMIPS_OpMIPSMOVHUreg(v)
+ case OpMIPSMOVHload:
+ return rewriteValueMIPS_OpMIPSMOVHload(v)
+ case OpMIPSMOVHreg:
+ return rewriteValueMIPS_OpMIPSMOVHreg(v)
+ case OpMIPSMOVHstore:
+ return rewriteValueMIPS_OpMIPSMOVHstore(v)
+ case OpMIPSMOVHstorezero:
+ return rewriteValueMIPS_OpMIPSMOVHstorezero(v)
+ case OpMIPSMOVWload:
+ return rewriteValueMIPS_OpMIPSMOVWload(v)
+ case OpMIPSMOVWnop:
+ return rewriteValueMIPS_OpMIPSMOVWnop(v)
+ case OpMIPSMOVWreg:
+ return rewriteValueMIPS_OpMIPSMOVWreg(v)
+ case OpMIPSMOVWstore:
+ return rewriteValueMIPS_OpMIPSMOVWstore(v)
+ case OpMIPSMOVWstorezero:
+ return rewriteValueMIPS_OpMIPSMOVWstorezero(v)
+ case OpMIPSMUL:
+ return rewriteValueMIPS_OpMIPSMUL(v)
+ case OpMIPSNEG:
+ return rewriteValueMIPS_OpMIPSNEG(v)
+ case OpMIPSNOR:
+ return rewriteValueMIPS_OpMIPSNOR(v)
+ case OpMIPSNORconst:
+ return rewriteValueMIPS_OpMIPSNORconst(v)
+ case OpMIPSOR:
+ return rewriteValueMIPS_OpMIPSOR(v)
+ case OpMIPSORconst:
+ return rewriteValueMIPS_OpMIPSORconst(v)
+ case OpMIPSSGT:
+ return rewriteValueMIPS_OpMIPSSGT(v)
+ case OpMIPSSGTU:
+ return rewriteValueMIPS_OpMIPSSGTU(v)
+ case OpMIPSSGTUconst:
+ return rewriteValueMIPS_OpMIPSSGTUconst(v)
+ case OpMIPSSGTUzero:
+ return rewriteValueMIPS_OpMIPSSGTUzero(v)
+ case OpMIPSSGTconst:
+ return rewriteValueMIPS_OpMIPSSGTconst(v)
+ case OpMIPSSGTzero:
+ return rewriteValueMIPS_OpMIPSSGTzero(v)
+ case OpMIPSSLL:
+ return rewriteValueMIPS_OpMIPSSLL(v)
+ case OpMIPSSLLconst:
+ return rewriteValueMIPS_OpMIPSSLLconst(v)
+ case OpMIPSSRA:
+ return rewriteValueMIPS_OpMIPSSRA(v)
+ case OpMIPSSRAconst:
+ return rewriteValueMIPS_OpMIPSSRAconst(v)
+ case OpMIPSSRL:
+ return rewriteValueMIPS_OpMIPSSRL(v)
+ case OpMIPSSRLconst:
+ return rewriteValueMIPS_OpMIPSSRLconst(v)
+ case OpMIPSSUB:
+ return rewriteValueMIPS_OpMIPSSUB(v)
+ case OpMIPSSUBconst:
+ return rewriteValueMIPS_OpMIPSSUBconst(v)
+ case OpMIPSXOR:
+ return rewriteValueMIPS_OpMIPSXOR(v)
+ case OpMIPSXORconst:
+ return rewriteValueMIPS_OpMIPSXORconst(v)
+ case OpMod16:
+ return rewriteValueMIPS_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueMIPS_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueMIPS_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueMIPS_OpMod32u(v)
+ case OpMod8:
+ return rewriteValueMIPS_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueMIPS_OpMod8u(v)
+ case OpMove:
+ return rewriteValueMIPS_OpMove(v)
+ case OpMul16:
+ v.Op = OpMIPSMUL
+ return true
+ case OpMul32:
+ v.Op = OpMIPSMUL
+ return true
+ case OpMul32F:
+ v.Op = OpMIPSMULF
+ return true
+ case OpMul32uhilo:
+ v.Op = OpMIPSMULTU
+ return true
+ case OpMul64F:
+ v.Op = OpMIPSMULD
+ return true
+ case OpMul8:
+ v.Op = OpMIPSMUL
+ return true
+ case OpNeg16:
+ v.Op = OpMIPSNEG
+ return true
+ case OpNeg32:
+ v.Op = OpMIPSNEG
+ return true
+ case OpNeg32F:
+ v.Op = OpMIPSNEGF
+ return true
+ case OpNeg64F:
+ v.Op = OpMIPSNEGD
+ return true
+ case OpNeg8:
+ v.Op = OpMIPSNEG
+ return true
+ case OpNeq16:
+ return rewriteValueMIPS_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueMIPS_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueMIPS_OpNeq32F(v)
+ case OpNeq64F:
+ return rewriteValueMIPS_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueMIPS_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpMIPSXOR
+ return true
+ case OpNeqPtr:
+ return rewriteValueMIPS_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpMIPSLoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueMIPS_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueMIPS_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpMIPSOR
+ return true
+ case OpOr32:
+ v.Op = OpMIPSOR
+ return true
+ case OpOr8:
+ v.Op = OpMIPSOR
+ return true
+ case OpOrB:
+ v.Op = OpMIPSOR
+ return true
+ case OpPanicBounds:
+ return rewriteValueMIPS_OpPanicBounds(v)
+ case OpPanicExtend:
+ return rewriteValueMIPS_OpPanicExtend(v)
+ case OpRotateLeft16:
+ return rewriteValueMIPS_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValueMIPS_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValueMIPS_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValueMIPS_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueMIPS_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueMIPS_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueMIPS_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueMIPS_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueMIPS_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueMIPS_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueMIPS_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueMIPS_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueMIPS_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueMIPS_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueMIPS_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueMIPS_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueMIPS_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueMIPS_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueMIPS_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueMIPS_OpRsh32x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueMIPS_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueMIPS_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueMIPS_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueMIPS_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueMIPS_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueMIPS_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueMIPS_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueMIPS_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueMIPS_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueMIPS_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = OpMIPSMOVHreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpMIPSMOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpMIPSMOVBreg
+ return true
+ case OpSignmask:
+ return rewriteValueMIPS_OpSignmask(v)
+ case OpSlicemask:
+ return rewriteValueMIPS_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpMIPSSQRTD
+ return true
+ case OpSqrt32:
+ v.Op = OpMIPSSQRTF
+ return true
+ case OpStaticCall:
+ v.Op = OpMIPSCALLstatic
+ return true
+ case OpStore:
+ return rewriteValueMIPS_OpStore(v)
+ case OpSub16:
+ v.Op = OpMIPSSUB
+ return true
+ case OpSub32:
+ v.Op = OpMIPSSUB
+ return true
+ case OpSub32F:
+ v.Op = OpMIPSSUBF
+ return true
+ case OpSub32withcarry:
+ return rewriteValueMIPS_OpSub32withcarry(v)
+ case OpSub64F:
+ v.Op = OpMIPSSUBD
+ return true
+ case OpSub8:
+ v.Op = OpMIPSSUB
+ return true
+ case OpSubPtr:
+ v.Op = OpMIPSSUB
+ return true
+ case OpTailCall:
+ v.Op = OpMIPSCALLtail
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpMIPSLoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpMIPSXOR
+ return true
+ case OpXor32:
+ v.Op = OpMIPSXOR
+ return true
+ case OpXor8:
+ v.Op = OpMIPSXOR
+ return true
+ case OpZero:
+ return rewriteValueMIPS_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpMIPSMOVHUreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpMIPSMOVBUreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpMIPSMOVBUreg
+ return true
+ case OpZeromask:
+ return rewriteValueMIPS_OpZeromask(v)
+ }
+ return false
+}
+func rewriteValueMIPS_OpAdd32withcarry(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Add32withcarry <t> x y c)
+ // result: (ADD c (ADD <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ c := v_2
+ v.reset(OpMIPSADD)
+ v0 := b.NewValue0(v.Pos, OpMIPSADD, t)
+ v0.AddArg2(x, y)
+ v.AddArg2(c, v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVWaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpMIPSMOVWaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd8 ptr val mem)
+ // cond: !config.BigEndian
+ // result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))) (NORconst [0] <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(!config.BigEndian) {
+ break
+ }
+ v.reset(OpMIPSLoweredAtomicAnd)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(^3)
+ v0.AddArg2(v1, ptr)
+ v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v4.AddArg(val)
+ v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(3)
+ v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(3)
+ v6.AddArg(ptr)
+ v5.AddArg(v6)
+ v3.AddArg2(v4, v5)
+ v7 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32)
+ v7.AuxInt = int32ToAuxInt(0)
+ v8 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v9.AuxInt = int32ToAuxInt(0xff)
+ v8.AddArg2(v9, v5)
+ v7.AddArg(v8)
+ v2.AddArg2(v3, v7)
+ v.AddArg3(v0, v2, mem)
+ return true
+ }
+ // match: (AtomicAnd8 ptr val mem)
+ // cond: config.BigEndian
+ // result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))) (NORconst [0] <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(config.BigEndian) {
+ break
+ }
+ v.reset(OpMIPSLoweredAtomicAnd)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(^3)
+ v0.AddArg2(v1, ptr)
+ v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v4.AddArg(val)
+ v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(3)
+ v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(3)
+ v7 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32)
+ v7.AuxInt = int32ToAuxInt(3)
+ v7.AddArg(ptr)
+ v6.AddArg(v7)
+ v5.AddArg(v6)
+ v3.AddArg2(v4, v5)
+ v8 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32)
+ v8.AuxInt = int32ToAuxInt(0)
+ v9 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v10.AuxInt = int32ToAuxInt(0xff)
+ v9.AddArg2(v10, v5)
+ v8.AddArg(v9)
+ v2.AddArg2(v3, v8)
+ v.AddArg3(v0, v2, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpAtomicOr8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr8 ptr val mem)
+ // cond: !config.BigEndian
+ // result: (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(!config.BigEndian) {
+ break
+ }
+ v.reset(OpMIPSLoweredAtomicOr)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(^3)
+ v0.AddArg2(v1, ptr)
+ v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v3.AddArg(val)
+ v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(3)
+ v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(3)
+ v5.AddArg(ptr)
+ v4.AddArg(v5)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v2, mem)
+ return true
+ }
+ // match: (AtomicOr8 ptr val mem)
+ // cond: config.BigEndian
+ // result: (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(config.BigEndian) {
+ break
+ }
+ v.reset(OpMIPSLoweredAtomicOr)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(^3)
+ v0.AddArg2(v1, ptr)
+ v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v3.AddArg(val)
+ v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(3)
+ v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(3)
+ v6 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(3)
+ v6.AddArg(ptr)
+ v5.AddArg(v6)
+ v4.AddArg(v5)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v2, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpAvg32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg32u <t> x y)
+ // result: (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSADD)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRLconst, t)
+ v0.AuxInt = int32ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPSSUB, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueMIPS_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen32 <t> x)
+ // result: (SUB (MOVWconst [32]) (CLZ <t> x))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpMIPSSUB)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpCom16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com16 x)
+ // result: (NORconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpMIPSNORconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpCom32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com32 x)
+ // result: (NORconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpMIPSNORconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpCom8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com8 x)
+ // result: (NORconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpMIPSNORconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueMIPS_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueMIPS_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueMIPS_OpConstBool(v *Value) bool {
+ // match: (ConstBool [t])
+ // result: (MOVWconst [b2i32(t)])
+ for {
+ t := auxIntToBool(v.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(t))
+ return true
+ }
+}
+func rewriteValueMIPS_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVWconst [0])
+ for {
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz32 <t> x)
+ // result: (SUB (MOVWconst [32]) (CLZ <t> (SUBconst <t> [1] (AND <t> x (NEG <t> x)))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpMIPSSUB)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t)
+ v2 := b.NewValue0(v.Pos, OpMIPSSUBconst, t)
+ v2.AuxInt = int32ToAuxInt(1)
+ v3 := b.NewValue0(v.Pos, OpMIPSAND, t)
+ v4 := b.NewValue0(v.Pos, OpMIPSNEG, t)
+ v4.AddArg(x)
+ v3.AddArg2(x, v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y)
+ // result: (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 x y)
+ // result: (Select1 (DIV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (Select1 (DIVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x y)
+ // result: (SGTUconst [1] (XOR x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (FPFlagTrue (CMPEQF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (FPFlagTrue (CMPEQD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (XORconst [1] (XOR <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqPtr x y)
+ // result: (SGTUconst [1] (XOR x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpHmul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (Select0 (MULT x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSMULT, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpHmul32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (Select0 (MULTU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSMULTU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IsInBounds idx len)
+ // result: (SGTU len idx)
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpMIPSSGTU)
+ v.AddArg2(len, idx)
+ return true
+ }
+}
+func rewriteValueMIPS_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsNonNil ptr)
+ // result: (SGTU ptr (MOVWconst [0]))
+ for {
+ ptr := v_0
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(ptr, v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsSliceInBounds idx len)
+ // result: (XORconst [1] (SGTU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (XORconst [1] (SGT x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (FPFlagTrue (CMPGEF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (XORconst [1] (SGTU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (FPFlagTrue (CMPGED y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (SGT (SignExt16to32 y) (SignExt16to32 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less32 x y)
+ // result: (SGT y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGT)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (FPFlagTrue (CMPGTF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less32U x y)
+ // result: (SGTU y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (FPFlagTrue (CMPGTD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (SGT (SignExt8to32 y) (SignExt8to32 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpMIPSMOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && t.IsSigned())
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpMIPSMOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !t.IsSigned())
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpMIPSMOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && t.IsSigned())
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpMIPSMOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !t.IsSigned())
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpMIPSMOVHUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVFload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVFload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpLocalAddr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (LocalAddr <t> {sym} base mem)
+ // cond: t.Elem().HasPointers()
+ // result: (MOVWaddr {sym} (SPanchored base mem))
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ mem := v_1
+ if !(t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpMIPSMOVWaddr)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
+ v0.AddArg2(base, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LocalAddr <t> {sym} base _)
+ // cond: !t.Elem().HasPointers()
+ // result: (MOVWaddr {sym} base)
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ if !(!t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpMIPSMOVWaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 <t> x y)
+ // result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint32(c) < 16
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 16) {
+ break
+ }
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint32(c) >= 16
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 16) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 <t> x y)
+ // result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint32(c) < 32
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 32) {
+ break
+ }
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint32(c) >= 32
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 32) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 <t> x y)
+ // result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint32(c) < 8
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 8) {
+ break
+ }
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint32(c) >= 8
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 8) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMIPSADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADD x (MOVWconst <t> [c]))
+ // cond: !t.IsPtr()
+ // result: (ADDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ t := v_1.Type
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(!t.IsPtr()) {
+ continue
+ }
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (NEG y))
+ // result: (SUB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSNEG {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpMIPSSUB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSADDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr))
+ // result: (MOVWaddr [off1+off2] {sym} ptr)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ v.reset(OpMIPSMOVWaddr)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [int32(c+d)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(c + d))
+ return true
+ }
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // result: (ADDconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (SUBconst [d] x))
+ // result: (ADDconst [c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSAND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AND x (MOVWconst [c]))
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (AND (SGTUconst [1] x) (SGTUconst [1] y))
+ // result: (SGTUconst [1] (OR <x.Type> x y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSSGTUconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpMIPSSGTUconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [0] _)
+ // result: (MOVWconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSCMOVZ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVZ _ f (MOVWconst [0]))
+ // result: f
+ for {
+ f := v_1
+ if v_2.Op != OpMIPSMOVWconst || auxIntToInt32(v_2.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(f)
+ return true
+ }
+ // match: (CMOVZ a _ (MOVWconst [c]))
+ // cond: c!=0
+ // result: a
+ for {
+ a := v_0
+ if v_2.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (CMOVZ a (MOVWconst [0]) c)
+ // result: (CMOVZzero a c)
+ for {
+ a := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ c := v_2
+ v.reset(OpMIPSCMOVZzero)
+ v.AddArg2(a, c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSCMOVZzero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVZzero _ (MOVWconst [0]))
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (CMOVZzero a (MOVWconst [c]))
+ // cond: c!=0
+ // result: a
+ for {
+ a := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicAdd ptr (MOVWconst [c]) mem)
+ // cond: is16Bit(int64(c))
+ // result: (LoweredAtomicAddconst [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(int64(c))) {
+ break
+ }
+ v.reset(OpMIPSLoweredAtomicAddconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSLoweredAtomicStore32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicStore32 ptr (MOVWconst [0]) mem)
+ // result: (LoweredAtomicStorezero ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPSLoweredAtomicStorezero)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVBUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBUreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpMIPSMOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpMIPSMOVBUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUreg (ANDconst [c] x))
+ // result: (ANDconst [c&0xff] x)
+ for {
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0xff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(uint8(c))])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpMIPSMOVBUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpMIPSMOVBload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBreg (ANDconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDconst [c&0x7f] x)
+ for {
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7f)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(int8(c))])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(int8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWconst [0]) mem)
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPSMOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVBstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVDstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVFload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _))
+ // result: (MOVWgpfp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpMIPSMOVWgpfp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVFload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVFstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVFstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVFstore [off] {sym} ptr (MOVWgpfp val) mem)
+ // result: (MOVWstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWgpfp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVFstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVHUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVHUreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHUreg)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVHUload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVHUreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpMIPSMOVHload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpMIPSMOVHUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUreg (ANDconst [c] x))
+ // result: (ANDconst [c&0xffff] x)
+ for {
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0xffff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(uint16(c))])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVHload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVHreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVHload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVHreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpMIPSMOVHUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpMIPSMOVHload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHreg (ANDconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDconst [c&0x7fff] x)
+ for {
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
+ break
+ }
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7fff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(int16(c))])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(int16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWconst [0]) mem)
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPSMOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVHstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _))
+ // result: (MOVWfpgp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVFstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpMIPSMOVWfpgp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVWnop(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWnop (MOVWconst [c]))
+ // result: (MOVWconst [c])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWreg x)
+ // cond: x.Uses == 1
+ // result: (MOVWnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVWnop)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (MOVWconst [c]))
+ // result: (MOVWconst [c])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off] {sym} ptr (MOVWfpgp val) mem)
+ // result: (MOVFstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWfpgp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVFstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem)
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPSMOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVWstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMUL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MUL (MOVWconst [0]) _ )
+ // result: (MOVWconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVWconst [1]) x )
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVWconst [-1]) x )
+ // result: (NEG x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpMIPSNEG)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVWconst [c]) x )
+ // cond: isPowerOfTwo64(int64(uint32(c)))
+ // result: (SLLconst [int32(log2uint32(int64(c)))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ if !(isPowerOfTwo64(int64(uint32(c)))) {
+ continue
+ }
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log2uint32(int64(c))))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVWconst [c]) (MOVWconst [d]))
+ // result: (MOVWconst [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSNEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEG (MOVWconst [c]))
+ // result: (MOVWconst [-c])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSNOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NOR x (MOVWconst [c]))
+ // result: (NORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSNORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSNORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NORconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [^(c|d)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(^(c | d))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (OR x (MOVWconst [c]))
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (OR (SGTUzero x) (SGTUzero y))
+ // result: (SGTUzero (OR <x.Type> x y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSSGTUzero {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpMIPSSGTUzero {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpMIPSSGTUzero)
+ v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // result: (MOVWconst [-1])
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // result: (ORconst [c|d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSORconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSORconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGT(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SGT (MOVWconst [c]) x)
+ // result: (SGTconst [c] x)
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpMIPSSGTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SGT x (MOVWconst [0]))
+ // result: (SGTzero x)
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpMIPSSGTzero)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGTU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SGTU (MOVWconst [c]) x)
+ // result: (SGTUconst [c] x)
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SGTU x (MOVWconst [0]))
+ // result: (SGTUzero x)
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpMIPSSGTUzero)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGTUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTUconst [c] (MOVWconst [d]))
+ // cond: uint32(c) > uint32(d)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(uint32(c) > uint32(d)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVWconst [d]))
+ // cond: uint32(c) <= uint32(d)
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(uint32(c) <= uint32(d)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVBUreg _))
+ // cond: 0xff < uint32(c)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBUreg || !(0xff < uint32(c)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVHUreg _))
+ // cond: 0xffff < uint32(c)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHUreg || !(0xffff < uint32(c)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (ANDconst [m] _))
+ // cond: uint32(m) < uint32(c)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(uint32(m) < uint32(c)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (SRLconst _ [d]))
+ // cond: uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSSRLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGTUzero(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTUzero (MOVWconst [d]))
+ // cond: d != 0
+ // result: (MOVWconst [1])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUzero (MOVWconst [d]))
+ // cond: d == 0
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(d == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGTconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTconst [c] (MOVWconst [d]))
+ // cond: c > d
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(c > d) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVWconst [d]))
+ // cond: c <= d
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(c <= d) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBreg _))
+ // cond: 0x7f < c
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBreg || !(0x7f < c) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBreg _))
+ // cond: c <= -0x80
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBreg || !(c <= -0x80) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBUreg _))
+ // cond: 0xff < c
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBUreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBUreg _))
+ // cond: c < 0
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHreg _))
+ // cond: 0x7fff < c
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHreg || !(0x7fff < c) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHreg _))
+ // cond: c <= -0x8000
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHreg || !(c <= -0x8000) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHUreg _))
+ // cond: 0xffff < c
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHUreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHUreg _))
+ // cond: c < 0
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (ANDconst [m] _))
+ // cond: 0 <= m && m < c
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < c) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (SRLconst _ [d]))
+ // cond: 0 <= c && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSSRLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= c && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGTzero(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTzero (MOVWconst [d]))
+ // cond: d > 0
+ // result: (MOVWconst [1])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(d > 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTzero (MOVWconst [d]))
+ // cond: d <= 0
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(d <= 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLL x (MOVWconst [c]))
+ // result: (SLLconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLLconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d<<uint32(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(d << uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRA x (MOVWconst [c]))
+ // result: (SRAconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSRAconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d>>uint32(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(d >> uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRL x (MOVWconst [c]))
+ // result: (SRLconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSSRLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRLconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [int32(uint32(d)>>uint32(c))])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(d) >> uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUB x (MOVWconst [c]))
+ // result: (SUBconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SUB (MOVWconst [0]) x)
+ // result: (NEG x)
+ for {
+ if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpMIPSNEG)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSUBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d-c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(d - c)
+ return true
+ }
+ // match: (SUBconst [c] (SUBconst [d] x))
+ // result: (ADDconst [-c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (ADDconst [d] x))
+ // result: (ADDconst [-c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(-c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSXOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR x (MOVWconst [c]))
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSXORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [-1] x)
+ // result: (NORconst [0] x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(OpMIPSNORconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // result: (XORconst [c^d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSXORconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 x y)
+ // result: (Select0 (DIV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (Select0 (DIVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore dst (MOVHUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(1)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v4.AuxInt = int32ToAuxInt(1)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v2.AuxInt = int32ToAuxInt(1)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(2)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v4.AuxInt = int32ToAuxInt(2)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [12] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [16] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [12] dst (MOVWload [12] src mem) (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(12)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(12)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(4)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(4)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: (s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0)
+ // result: (LoweredMove [int32(t.Alignment())] dst src (ADDconst <src.Type> src [int32(s-moveSize(t.Alignment(), config))]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0) {
+ break
+ }
+ v.reset(OpMIPSLoweredMove)
+ v.AuxInt = int32ToAuxInt(int32(t.Alignment()))
+ v0 := b.NewValue0(v.Pos, OpMIPSADDconst, src.Type)
+ v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config)))
+ v0.AddArg(src)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x y)
+ // result: (SGTU (XOR x y) (MOVWconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (FPFlagFalse (CMPEQF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagFalse)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (FPFlagFalse (CMPEQD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagFalse)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqPtr x y)
+ // result: (SGTU (XOR x y) (MOVWconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr [off] ptr:(SP))
+ // result: (MOVWaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpMIPSMOVWaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADDconst [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueMIPS_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpPanicExtend(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicExtendA [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicExtendA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicExtendB [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicExtendB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicExtendC [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicExtendC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVWconst [c]))
+ // result: (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x32, t)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft32 <t> x (MOVWconst [c]))
+ // result: (Or32 (Lsh32x32 <t> x (MOVWconst [c&31])) (Rsh32Ux32 <t> x (MOVWconst [-c&31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpLsh32x32, t)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 31)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh32Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 31)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft64 <t> x (MOVWconst [c]))
+ // result: (Or64 (Lsh64x32 <t> x (MOVWconst [c&63])) (Rsh64Ux32 <t> x (MOVWconst [-c&63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpLsh64x32, t)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 63)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh64Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 63)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVWconst [c]))
+ // result: (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x32, t)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 x (Const64 [c]))
+ // cond: uint32(c) < 16
+ // result: (SRLconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 16) {
+ break
+ }
+ v.reset(OpMIPSSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 16))
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint32(c) >= 16
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 16) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(31)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(31)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint32(c) < 16
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 16) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 16))
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint32(c) >= 16
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 16) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(31)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 <t> x y)
+ // result: (CMOVZ (SRL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 <t> x y)
+ // result: (CMOVZ (SRL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint32(c) < 32
+ // result: (SRLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 32) {
+ break
+ }
+ v.reset(OpMIPSSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint32(c) >= 32
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 32) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 <t> x y)
+ // result: (CMOVZ (SRL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 x y)
+ // result: (SRA x ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(31)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v0.AddArg3(v1, v2, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 x y)
+ // result: (SRA x ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(31)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint32(c) < 32
+ // result: (SRAconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 32) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint32(c) >= 32
+ // result: (SRAconst x [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 32) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 x y)
+ // result: (SRA x ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(31)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v0.AddArg3(v1, v2, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 x (Const64 [c]))
+ // cond: uint32(c) < 8
+ // result: (SRLconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 8) {
+ break
+ }
+ v.reset(OpMIPSSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 24))
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint32(c) >= 8
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 8) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(31)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(31)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint32(c) < 8
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 8) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 24))
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint32(c) >= 8
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 8) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(31)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Add32carry <t> x y))
+ // result: (ADD <t.FieldType(0)> x y)
+ for {
+ if v_0.Op != OpAdd32carry {
+ break
+ }
+ t := v_0.Type
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpMIPSADD)
+ v.Type = t.FieldType(0)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Select0 (Sub32carry <t> x y))
+ // result: (SUB <t.FieldType(0)> x y)
+ for {
+ if v_0.Op != OpSub32carry {
+ break
+ }
+ t := v_0.Type
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpMIPSSUB)
+ v.Type = t.FieldType(0)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Select0 (MULTU (MOVWconst [0]) _ ))
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (MULTU (MOVWconst [1]) _ ))
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (MULTU (MOVWconst [-1]) x ))
+ // result: (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSADDconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(-1)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(v0, v1, x)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (MULTU (MOVWconst [c]) x ))
+ // cond: isPowerOfTwo64(int64(uint32(c)))
+ // result: (SRLconst [int32(32-log2uint32(int64(c)))] x)
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isPowerOfTwo64(int64(uint32(c)))) {
+ continue
+ }
+ v.reset(OpMIPSSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(32 - log2uint32(int64(c))))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (MULTU (MOVWconst [c]) (MOVWconst [d])))
+ // result: (MOVWconst [int32((int64(uint32(c))*int64(uint32(d)))>>32)])
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_0_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32((int64(uint32(c)) * int64(uint32(d))) >> 32))
+ return true
+ }
+ break
+ }
+ // match: (Select0 (DIV (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [c%d])
+ for {
+ if v_0.Op != OpMIPSDIV {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c % d)
+ return true
+ }
+ // match: (Select0 (DIVU (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [int32(uint32(c)%uint32(d))])
+ for {
+ if v_0.Op != OpMIPSDIVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) % uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Add32carry <t> x y))
+ // result: (SGTU <typ.Bool> x (ADD <t.FieldType(0)> x y))
+ for {
+ if v_0.Op != OpAdd32carry {
+ break
+ }
+ t := v_0.Type
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpMIPSSGTU)
+ v.Type = typ.Bool
+ v0 := b.NewValue0(v.Pos, OpMIPSADD, t.FieldType(0))
+ v0.AddArg2(x, y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Select1 (Sub32carry <t> x y))
+ // result: (SGTU <typ.Bool> (SUB <t.FieldType(0)> x y) x)
+ for {
+ if v_0.Op != OpSub32carry {
+ break
+ }
+ t := v_0.Type
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpMIPSSGTU)
+ v.Type = typ.Bool
+ v0 := b.NewValue0(v.Pos, OpMIPSSUB, t.FieldType(0))
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Select1 (MULTU (MOVWconst [0]) _ ))
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULTU (MOVWconst [1]) x ))
+ // result: x
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_0_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULTU (MOVWconst [-1]) x ))
+ // result: (NEG <x.Type> x)
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpMIPSNEG)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULTU (MOVWconst [c]) x ))
+ // cond: isPowerOfTwo64(int64(uint32(c)))
+ // result: (SLLconst [int32(log2uint32(int64(c)))] x)
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isPowerOfTwo64(int64(uint32(c)))) {
+ continue
+ }
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log2uint32(int64(c))))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULTU (MOVWconst [c]) (MOVWconst [d])))
+ // result: (MOVWconst [int32(uint32(c)*uint32(d))])
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_0_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) * uint32(d)))
+ return true
+ }
+ break
+ }
+ // match: (Select1 (DIV (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [c/d])
+ for {
+ if v_0.Op != OpMIPSDIV {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c / d)
+ return true
+ }
+ // match: (Select1 (DIVU (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [int32(uint32(c)/uint32(d))])
+ for {
+ if v_0.Op != OpMIPSDIVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) / uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpSignmask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Signmask x)
+ // result: (SRAconst x [31])
+ for {
+ x := v_0
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRAconst (NEG <t> x) [31])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpMIPSNEG, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !t.IsFloat()
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !t.IsFloat()) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && t.IsFloat()
+ // result: (MOVFstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpMIPSMOVFstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && t.IsFloat()
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpMIPSMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpSub32withcarry(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Sub32withcarry <t> x y c)
+ // result: (SUB (SUB <t> x y) c)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ c := v_2
+ v.reset(OpMIPSSUB)
+ v0 := b.NewValue0(v.Pos, OpMIPSSUB, t)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, c)
+ return true
+ }
+}
+func rewriteValueMIPS_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPSMOVBstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(1)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [3] ptr mem)
+ // result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [6] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] ptr (MOVWconst [0]) (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [12] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [16] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [12] ptr (MOVWconst [0]) (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(12)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(4)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: (s > 16 || t.Alignment()%4 != 0)
+ // result: (LoweredZero [int32(t.Alignment())] ptr (ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(s > 16 || t.Alignment()%4 != 0) {
+ break
+ }
+ v.reset(OpMIPSLoweredZero)
+ v.AuxInt = int32ToAuxInt(int32(t.Alignment()))
+ v0 := b.NewValue0(v.Pos, OpMIPSADDconst, ptr.Type)
+ v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config)))
+ v0.AddArg(ptr)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpZeromask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Zeromask x)
+ // result: (NEG (SGTU x (MOVWconst [0])))
+ for {
+ x := v_0
+ v.reset(OpMIPSNEG)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v0.AddArg2(x, v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteBlockMIPS(b *Block) bool {
+ switch b.Kind {
+ case BlockMIPSEQ:
+ // match: (EQ (FPFlagTrue cmp) yes no)
+ // result: (FPF cmp yes no)
+ for b.Controls[0].Op == OpMIPSFPFlagTrue {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPSFPF, cmp)
+ return true
+ }
+ // match: (EQ (FPFlagFalse cmp) yes no)
+ // result: (FPT cmp yes no)
+ for b.Controls[0].Op == OpMIPSFPFlagFalse {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPSFPT, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGT {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTU {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTconst {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTUconst {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTzero _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTzero {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTUzero _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTUzero {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (SGTUconst [1] x) yes no)
+ // result: (NE x yes no)
+ for b.Controls[0].Op == OpMIPSSGTUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSNE, x)
+ return true
+ }
+ // match: (EQ (SGTUzero x) yes no)
+ // result: (EQ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTUzero {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSEQ, x)
+ return true
+ }
+ // match: (EQ (SGTconst [0] x) yes no)
+ // result: (GEZ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSGEZ, x)
+ return true
+ }
+ // match: (EQ (SGTzero x) yes no)
+ // result: (LEZ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTzero {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSLEZ, x)
+ return true
+ }
+ // match: (EQ (MOVWconst [0]) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (MOVWconst [c]) yes no)
+ // cond: c != 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPSGEZ:
+ // match: (GEZ (MOVWconst [c]) yes no)
+ // cond: c >= 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c >= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GEZ (MOVWconst [c]) yes no)
+ // cond: c < 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c < 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPSGTZ:
+ // match: (GTZ (MOVWconst [c]) yes no)
+ // cond: c > 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GTZ (MOVWconst [c]) yes no)
+ // cond: c <= 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c <= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockIf:
+ // match: (If cond yes no)
+ // result: (NE cond yes no)
+ for {
+ cond := b.Controls[0]
+ b.resetWithControl(BlockMIPSNE, cond)
+ return true
+ }
+ case BlockMIPSLEZ:
+ // match: (LEZ (MOVWconst [c]) yes no)
+ // cond: c <= 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c <= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LEZ (MOVWconst [c]) yes no)
+ // cond: c > 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPSLTZ:
+ // match: (LTZ (MOVWconst [c]) yes no)
+ // cond: c < 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c < 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LTZ (MOVWconst [c]) yes no)
+ // cond: c >= 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c >= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPSNE:
+ // match: (NE (FPFlagTrue cmp) yes no)
+ // result: (FPT cmp yes no)
+ for b.Controls[0].Op == OpMIPSFPFlagTrue {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPSFPT, cmp)
+ return true
+ }
+ // match: (NE (FPFlagFalse cmp) yes no)
+ // result: (FPF cmp yes no)
+ for b.Controls[0].Op == OpMIPSFPFlagFalse {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPSFPF, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGT {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTU {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTconst {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTUconst {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTzero _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTzero {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTUzero _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTUzero {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (SGTUconst [1] x) yes no)
+ // result: (EQ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSEQ, x)
+ return true
+ }
+ // match: (NE (SGTUzero x) yes no)
+ // result: (NE x yes no)
+ for b.Controls[0].Op == OpMIPSSGTUzero {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSNE, x)
+ return true
+ }
+ // match: (NE (SGTconst [0] x) yes no)
+ // result: (LTZ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSLTZ, x)
+ return true
+ }
+ // match: (NE (SGTzero x) yes no)
+ // result: (GTZ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTzero {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSGTZ, x)
+ return true
+ }
+ // match: (NE (MOVWconst [0]) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (MOVWconst [c]) yes no)
+ // cond: c != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
new file mode 100644
index 0000000..764465d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
@@ -0,0 +1,8604 @@
+// Code generated from _gen/MIPS64.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+func rewriteValueMIPS64(v *Value) bool {
+ switch v.Op {
+ case OpAbs:
+ v.Op = OpMIPS64ABSD
+ return true
+ case OpAdd16:
+ v.Op = OpMIPS64ADDV
+ return true
+ case OpAdd32:
+ v.Op = OpMIPS64ADDV
+ return true
+ case OpAdd32F:
+ v.Op = OpMIPS64ADDF
+ return true
+ case OpAdd64:
+ v.Op = OpMIPS64ADDV
+ return true
+ case OpAdd64F:
+ v.Op = OpMIPS64ADDD
+ return true
+ case OpAdd8:
+ v.Op = OpMIPS64ADDV
+ return true
+ case OpAddPtr:
+ v.Op = OpMIPS64ADDV
+ return true
+ case OpAddr:
+ return rewriteValueMIPS64_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpMIPS64AND
+ return true
+ case OpAnd32:
+ v.Op = OpMIPS64AND
+ return true
+ case OpAnd64:
+ v.Op = OpMIPS64AND
+ return true
+ case OpAnd8:
+ v.Op = OpMIPS64AND
+ return true
+ case OpAndB:
+ v.Op = OpMIPS64AND
+ return true
+ case OpAtomicAdd32:
+ v.Op = OpMIPS64LoweredAtomicAdd32
+ return true
+ case OpAtomicAdd64:
+ v.Op = OpMIPS64LoweredAtomicAdd64
+ return true
+ case OpAtomicAnd32:
+ v.Op = OpMIPS64LoweredAtomicAnd32
+ return true
+ case OpAtomicAnd8:
+ return rewriteValueMIPS64_OpAtomicAnd8(v)
+ case OpAtomicCompareAndSwap32:
+ return rewriteValueMIPS64_OpAtomicCompareAndSwap32(v)
+ case OpAtomicCompareAndSwap64:
+ v.Op = OpMIPS64LoweredAtomicCas64
+ return true
+ case OpAtomicExchange32:
+ v.Op = OpMIPS64LoweredAtomicExchange32
+ return true
+ case OpAtomicExchange64:
+ v.Op = OpMIPS64LoweredAtomicExchange64
+ return true
+ case OpAtomicLoad32:
+ v.Op = OpMIPS64LoweredAtomicLoad32
+ return true
+ case OpAtomicLoad64:
+ v.Op = OpMIPS64LoweredAtomicLoad64
+ return true
+ case OpAtomicLoad8:
+ v.Op = OpMIPS64LoweredAtomicLoad8
+ return true
+ case OpAtomicLoadPtr:
+ v.Op = OpMIPS64LoweredAtomicLoad64
+ return true
+ case OpAtomicOr32:
+ v.Op = OpMIPS64LoweredAtomicOr32
+ return true
+ case OpAtomicOr8:
+ return rewriteValueMIPS64_OpAtomicOr8(v)
+ case OpAtomicStore32:
+ v.Op = OpMIPS64LoweredAtomicStore32
+ return true
+ case OpAtomicStore64:
+ v.Op = OpMIPS64LoweredAtomicStore64
+ return true
+ case OpAtomicStore8:
+ v.Op = OpMIPS64LoweredAtomicStore8
+ return true
+ case OpAtomicStorePtrNoWB:
+ v.Op = OpMIPS64LoweredAtomicStore64
+ return true
+ case OpAvg64u:
+ return rewriteValueMIPS64_OpAvg64u(v)
+ case OpClosureCall:
+ v.Op = OpMIPS64CALLclosure
+ return true
+ case OpCom16:
+ return rewriteValueMIPS64_OpCom16(v)
+ case OpCom32:
+ return rewriteValueMIPS64_OpCom32(v)
+ case OpCom64:
+ return rewriteValueMIPS64_OpCom64(v)
+ case OpCom8:
+ return rewriteValueMIPS64_OpCom8(v)
+ case OpConst16:
+ return rewriteValueMIPS64_OpConst16(v)
+ case OpConst32:
+ return rewriteValueMIPS64_OpConst32(v)
+ case OpConst32F:
+ return rewriteValueMIPS64_OpConst32F(v)
+ case OpConst64:
+ return rewriteValueMIPS64_OpConst64(v)
+ case OpConst64F:
+ return rewriteValueMIPS64_OpConst64F(v)
+ case OpConst8:
+ return rewriteValueMIPS64_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueMIPS64_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueMIPS64_OpConstNil(v)
+ case OpCvt32Fto32:
+ v.Op = OpMIPS64TRUNCFW
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpMIPS64TRUNCFV
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpMIPS64MOVFD
+ return true
+ case OpCvt32to32F:
+ v.Op = OpMIPS64MOVWF
+ return true
+ case OpCvt32to64F:
+ v.Op = OpMIPS64MOVWD
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpMIPS64TRUNCDW
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpMIPS64MOVDF
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpMIPS64TRUNCDV
+ return true
+ case OpCvt64to32F:
+ v.Op = OpMIPS64MOVVF
+ return true
+ case OpCvt64to64F:
+ v.Op = OpMIPS64MOVVD
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueMIPS64_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueMIPS64_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueMIPS64_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpMIPS64DIVF
+ return true
+ case OpDiv32u:
+ return rewriteValueMIPS64_OpDiv32u(v)
+ case OpDiv64:
+ return rewriteValueMIPS64_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpMIPS64DIVD
+ return true
+ case OpDiv64u:
+ return rewriteValueMIPS64_OpDiv64u(v)
+ case OpDiv8:
+ return rewriteValueMIPS64_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueMIPS64_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueMIPS64_OpEq16(v)
+ case OpEq32:
+ return rewriteValueMIPS64_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueMIPS64_OpEq32F(v)
+ case OpEq64:
+ return rewriteValueMIPS64_OpEq64(v)
+ case OpEq64F:
+ return rewriteValueMIPS64_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueMIPS64_OpEq8(v)
+ case OpEqB:
+ return rewriteValueMIPS64_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueMIPS64_OpEqPtr(v)
+ case OpGetCallerPC:
+ v.Op = OpMIPS64LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpMIPS64LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpMIPS64LoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ return rewriteValueMIPS64_OpHmul32(v)
+ case OpHmul32u:
+ return rewriteValueMIPS64_OpHmul32u(v)
+ case OpHmul64:
+ return rewriteValueMIPS64_OpHmul64(v)
+ case OpHmul64u:
+ return rewriteValueMIPS64_OpHmul64u(v)
+ case OpInterCall:
+ v.Op = OpMIPS64CALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueMIPS64_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueMIPS64_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueMIPS64_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueMIPS64_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueMIPS64_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueMIPS64_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueMIPS64_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueMIPS64_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValueMIPS64_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValueMIPS64_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValueMIPS64_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValueMIPS64_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueMIPS64_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueMIPS64_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueMIPS64_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueMIPS64_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueMIPS64_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueMIPS64_OpLess32U(v)
+ case OpLess64:
+ return rewriteValueMIPS64_OpLess64(v)
+ case OpLess64F:
+ return rewriteValueMIPS64_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValueMIPS64_OpLess64U(v)
+ case OpLess8:
+ return rewriteValueMIPS64_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueMIPS64_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueMIPS64_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueMIPS64_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueMIPS64_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueMIPS64_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueMIPS64_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueMIPS64_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueMIPS64_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueMIPS64_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueMIPS64_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueMIPS64_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueMIPS64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueMIPS64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueMIPS64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueMIPS64_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueMIPS64_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueMIPS64_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueMIPS64_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueMIPS64_OpLsh8x8(v)
+ case OpMIPS64ADDV:
+ return rewriteValueMIPS64_OpMIPS64ADDV(v)
+ case OpMIPS64ADDVconst:
+ return rewriteValueMIPS64_OpMIPS64ADDVconst(v)
+ case OpMIPS64AND:
+ return rewriteValueMIPS64_OpMIPS64AND(v)
+ case OpMIPS64ANDconst:
+ return rewriteValueMIPS64_OpMIPS64ANDconst(v)
+ case OpMIPS64LoweredAtomicAdd32:
+ return rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32(v)
+ case OpMIPS64LoweredAtomicAdd64:
+ return rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64(v)
+ case OpMIPS64LoweredAtomicStore32:
+ return rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32(v)
+ case OpMIPS64LoweredAtomicStore64:
+ return rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64(v)
+ case OpMIPS64MOVBUload:
+ return rewriteValueMIPS64_OpMIPS64MOVBUload(v)
+ case OpMIPS64MOVBUreg:
+ return rewriteValueMIPS64_OpMIPS64MOVBUreg(v)
+ case OpMIPS64MOVBload:
+ return rewriteValueMIPS64_OpMIPS64MOVBload(v)
+ case OpMIPS64MOVBreg:
+ return rewriteValueMIPS64_OpMIPS64MOVBreg(v)
+ case OpMIPS64MOVBstore:
+ return rewriteValueMIPS64_OpMIPS64MOVBstore(v)
+ case OpMIPS64MOVBstorezero:
+ return rewriteValueMIPS64_OpMIPS64MOVBstorezero(v)
+ case OpMIPS64MOVDload:
+ return rewriteValueMIPS64_OpMIPS64MOVDload(v)
+ case OpMIPS64MOVDstore:
+ return rewriteValueMIPS64_OpMIPS64MOVDstore(v)
+ case OpMIPS64MOVFload:
+ return rewriteValueMIPS64_OpMIPS64MOVFload(v)
+ case OpMIPS64MOVFstore:
+ return rewriteValueMIPS64_OpMIPS64MOVFstore(v)
+ case OpMIPS64MOVHUload:
+ return rewriteValueMIPS64_OpMIPS64MOVHUload(v)
+ case OpMIPS64MOVHUreg:
+ return rewriteValueMIPS64_OpMIPS64MOVHUreg(v)
+ case OpMIPS64MOVHload:
+ return rewriteValueMIPS64_OpMIPS64MOVHload(v)
+ case OpMIPS64MOVHreg:
+ return rewriteValueMIPS64_OpMIPS64MOVHreg(v)
+ case OpMIPS64MOVHstore:
+ return rewriteValueMIPS64_OpMIPS64MOVHstore(v)
+ case OpMIPS64MOVHstorezero:
+ return rewriteValueMIPS64_OpMIPS64MOVHstorezero(v)
+ case OpMIPS64MOVVload:
+ return rewriteValueMIPS64_OpMIPS64MOVVload(v)
+ case OpMIPS64MOVVnop:
+ return rewriteValueMIPS64_OpMIPS64MOVVnop(v)
+ case OpMIPS64MOVVreg:
+ return rewriteValueMIPS64_OpMIPS64MOVVreg(v)
+ case OpMIPS64MOVVstore:
+ return rewriteValueMIPS64_OpMIPS64MOVVstore(v)
+ case OpMIPS64MOVVstorezero:
+ return rewriteValueMIPS64_OpMIPS64MOVVstorezero(v)
+ case OpMIPS64MOVWUload:
+ return rewriteValueMIPS64_OpMIPS64MOVWUload(v)
+ case OpMIPS64MOVWUreg:
+ return rewriteValueMIPS64_OpMIPS64MOVWUreg(v)
+ case OpMIPS64MOVWload:
+ return rewriteValueMIPS64_OpMIPS64MOVWload(v)
+ case OpMIPS64MOVWreg:
+ return rewriteValueMIPS64_OpMIPS64MOVWreg(v)
+ case OpMIPS64MOVWstore:
+ return rewriteValueMIPS64_OpMIPS64MOVWstore(v)
+ case OpMIPS64MOVWstorezero:
+ return rewriteValueMIPS64_OpMIPS64MOVWstorezero(v)
+ case OpMIPS64NEGV:
+ return rewriteValueMIPS64_OpMIPS64NEGV(v)
+ case OpMIPS64NOR:
+ return rewriteValueMIPS64_OpMIPS64NOR(v)
+ case OpMIPS64NORconst:
+ return rewriteValueMIPS64_OpMIPS64NORconst(v)
+ case OpMIPS64OR:
+ return rewriteValueMIPS64_OpMIPS64OR(v)
+ case OpMIPS64ORconst:
+ return rewriteValueMIPS64_OpMIPS64ORconst(v)
+ case OpMIPS64SGT:
+ return rewriteValueMIPS64_OpMIPS64SGT(v)
+ case OpMIPS64SGTU:
+ return rewriteValueMIPS64_OpMIPS64SGTU(v)
+ case OpMIPS64SGTUconst:
+ return rewriteValueMIPS64_OpMIPS64SGTUconst(v)
+ case OpMIPS64SGTconst:
+ return rewriteValueMIPS64_OpMIPS64SGTconst(v)
+ case OpMIPS64SLLV:
+ return rewriteValueMIPS64_OpMIPS64SLLV(v)
+ case OpMIPS64SLLVconst:
+ return rewriteValueMIPS64_OpMIPS64SLLVconst(v)
+ case OpMIPS64SRAV:
+ return rewriteValueMIPS64_OpMIPS64SRAV(v)
+ case OpMIPS64SRAVconst:
+ return rewriteValueMIPS64_OpMIPS64SRAVconst(v)
+ case OpMIPS64SRLV:
+ return rewriteValueMIPS64_OpMIPS64SRLV(v)
+ case OpMIPS64SRLVconst:
+ return rewriteValueMIPS64_OpMIPS64SRLVconst(v)
+ case OpMIPS64SUBV:
+ return rewriteValueMIPS64_OpMIPS64SUBV(v)
+ case OpMIPS64SUBVconst:
+ return rewriteValueMIPS64_OpMIPS64SUBVconst(v)
+ case OpMIPS64XOR:
+ return rewriteValueMIPS64_OpMIPS64XOR(v)
+ case OpMIPS64XORconst:
+ return rewriteValueMIPS64_OpMIPS64XORconst(v)
+ case OpMod16:
+ return rewriteValueMIPS64_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueMIPS64_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueMIPS64_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueMIPS64_OpMod32u(v)
+ case OpMod64:
+ return rewriteValueMIPS64_OpMod64(v)
+ case OpMod64u:
+ return rewriteValueMIPS64_OpMod64u(v)
+ case OpMod8:
+ return rewriteValueMIPS64_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueMIPS64_OpMod8u(v)
+ case OpMove:
+ return rewriteValueMIPS64_OpMove(v)
+ case OpMul16:
+ return rewriteValueMIPS64_OpMul16(v)
+ case OpMul32:
+ return rewriteValueMIPS64_OpMul32(v)
+ case OpMul32F:
+ v.Op = OpMIPS64MULF
+ return true
+ case OpMul64:
+ return rewriteValueMIPS64_OpMul64(v)
+ case OpMul64F:
+ v.Op = OpMIPS64MULD
+ return true
+ case OpMul64uhilo:
+ v.Op = OpMIPS64MULVU
+ return true
+ case OpMul8:
+ return rewriteValueMIPS64_OpMul8(v)
+ case OpNeg16:
+ v.Op = OpMIPS64NEGV
+ return true
+ case OpNeg32:
+ v.Op = OpMIPS64NEGV
+ return true
+ case OpNeg32F:
+ v.Op = OpMIPS64NEGF
+ return true
+ case OpNeg64:
+ v.Op = OpMIPS64NEGV
+ return true
+ case OpNeg64F:
+ v.Op = OpMIPS64NEGD
+ return true
+ case OpNeg8:
+ v.Op = OpMIPS64NEGV
+ return true
+ case OpNeq16:
+ return rewriteValueMIPS64_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueMIPS64_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueMIPS64_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValueMIPS64_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValueMIPS64_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueMIPS64_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpMIPS64XOR
+ return true
+ case OpNeqPtr:
+ return rewriteValueMIPS64_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpMIPS64LoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueMIPS64_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueMIPS64_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpMIPS64OR
+ return true
+ case OpOr32:
+ v.Op = OpMIPS64OR
+ return true
+ case OpOr64:
+ v.Op = OpMIPS64OR
+ return true
+ case OpOr8:
+ v.Op = OpMIPS64OR
+ return true
+ case OpOrB:
+ v.Op = OpMIPS64OR
+ return true
+ case OpPanicBounds:
+ return rewriteValueMIPS64_OpPanicBounds(v)
+ case OpRotateLeft16:
+ return rewriteValueMIPS64_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValueMIPS64_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValueMIPS64_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValueMIPS64_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueMIPS64_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueMIPS64_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueMIPS64_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueMIPS64_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueMIPS64_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueMIPS64_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueMIPS64_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueMIPS64_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueMIPS64_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueMIPS64_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueMIPS64_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueMIPS64_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueMIPS64_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueMIPS64_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueMIPS64_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueMIPS64_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueMIPS64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueMIPS64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueMIPS64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueMIPS64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueMIPS64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueMIPS64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueMIPS64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueMIPS64_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueMIPS64_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueMIPS64_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueMIPS64_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueMIPS64_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueMIPS64_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueMIPS64_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueMIPS64_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueMIPS64_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueMIPS64_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueMIPS64_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = OpMIPS64MOVHreg
+ return true
+ case OpSignExt16to64:
+ v.Op = OpMIPS64MOVHreg
+ return true
+ case OpSignExt32to64:
+ v.Op = OpMIPS64MOVWreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpMIPS64MOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpMIPS64MOVBreg
+ return true
+ case OpSignExt8to64:
+ v.Op = OpMIPS64MOVBreg
+ return true
+ case OpSlicemask:
+ return rewriteValueMIPS64_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpMIPS64SQRTD
+ return true
+ case OpSqrt32:
+ v.Op = OpMIPS64SQRTF
+ return true
+ case OpStaticCall:
+ v.Op = OpMIPS64CALLstatic
+ return true
+ case OpStore:
+ return rewriteValueMIPS64_OpStore(v)
+ case OpSub16:
+ v.Op = OpMIPS64SUBV
+ return true
+ case OpSub32:
+ v.Op = OpMIPS64SUBV
+ return true
+ case OpSub32F:
+ v.Op = OpMIPS64SUBF
+ return true
+ case OpSub64:
+ v.Op = OpMIPS64SUBV
+ return true
+ case OpSub64F:
+ v.Op = OpMIPS64SUBD
+ return true
+ case OpSub8:
+ v.Op = OpMIPS64SUBV
+ return true
+ case OpSubPtr:
+ v.Op = OpMIPS64SUBV
+ return true
+ case OpTailCall:
+ v.Op = OpMIPS64CALLtail
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpMIPS64LoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpMIPS64XOR
+ return true
+ case OpXor32:
+ v.Op = OpMIPS64XOR
+ return true
+ case OpXor64:
+ v.Op = OpMIPS64XOR
+ return true
+ case OpXor8:
+ v.Op = OpMIPS64XOR
+ return true
+ case OpZero:
+ return rewriteValueMIPS64_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpMIPS64MOVHUreg
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpMIPS64MOVHUreg
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpMIPS64MOVWUreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpMIPS64MOVBUreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpMIPS64MOVBUreg
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpMIPS64MOVBUreg
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVVaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpMIPS64MOVVaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpAtomicAnd8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd8 ptr val mem)
+ // cond: !config.BigEndian
+ // result: (LoweredAtomicAnd32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr) (OR <typ.UInt64> (SLLV <typ.UInt32> (ZeroExt8to32 val) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr))) (NORconst [0] <typ.UInt64> (SLLV <typ.UInt64> (MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr))))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(!config.BigEndian) {
+ break
+ }
+ v.reset(OpMIPS64LoweredAtomicAnd32)
+ v0 := b.NewValue0(v.Pos, OpMIPS64AND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(^3)
+ v0.AddArg2(v1, ptr)
+ v2 := b.NewValue0(v.Pos, OpMIPS64OR, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v4.AddArg(val)
+ v5 := b.NewValue0(v.Pos, OpMIPS64SLLVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(3)
+ v6 := b.NewValue0(v.Pos, OpMIPS64ANDconst, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(3)
+ v6.AddArg(ptr)
+ v5.AddArg(v6)
+ v3.AddArg2(v4, v5)
+ v7 := b.NewValue0(v.Pos, OpMIPS64NORconst, typ.UInt64)
+ v7.AuxInt = int64ToAuxInt(0)
+ v8 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt64)
+ v9 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v9.AuxInt = int64ToAuxInt(0xff)
+ v8.AddArg2(v9, v5)
+ v7.AddArg(v8)
+ v2.AddArg2(v3, v7)
+ v.AddArg3(v0, v2, mem)
+ return true
+ }
+ // match: (AtomicAnd8 ptr val mem)
+ // cond: config.BigEndian
+ // result: (LoweredAtomicAnd32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr) (OR <typ.UInt64> (SLLV <typ.UInt32> (ZeroExt8to32 val) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] (XORconst <typ.UInt64> [3] ptr)))) (NORconst [0] <typ.UInt64> (SLLV <typ.UInt64> (MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] (XORconst <typ.UInt64> [3] ptr)))))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(config.BigEndian) {
+ break
+ }
+ v.reset(OpMIPS64LoweredAtomicAnd32)
+ v0 := b.NewValue0(v.Pos, OpMIPS64AND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(^3)
+ v0.AddArg2(v1, ptr)
+ v2 := b.NewValue0(v.Pos, OpMIPS64OR, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v4.AddArg(val)
+ v5 := b.NewValue0(v.Pos, OpMIPS64SLLVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(3)
+ v6 := b.NewValue0(v.Pos, OpMIPS64ANDconst, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(3)
+ v7 := b.NewValue0(v.Pos, OpMIPS64XORconst, typ.UInt64)
+ v7.AuxInt = int64ToAuxInt(3)
+ v7.AddArg(ptr)
+ v6.AddArg(v7)
+ v5.AddArg(v6)
+ v3.AddArg2(v4, v5)
+ v8 := b.NewValue0(v.Pos, OpMIPS64NORconst, typ.UInt64)
+ v8.AuxInt = int64ToAuxInt(0)
+ v9 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt64)
+ v10 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v10.AuxInt = int64ToAuxInt(0xff)
+ v9.AddArg2(v10, v5)
+ v8.AddArg(v9)
+ v2.AddArg2(v3, v8)
+ v.AddArg3(v0, v2, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpAtomicCompareAndSwap32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicCompareAndSwap32 ptr old new mem)
+ // result: (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new := v_2
+ mem := v_3
+ v.reset(OpMIPS64LoweredAtomicCas32)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(old)
+ v.AddArg4(ptr, v0, new, mem)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpAtomicOr8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr8 ptr val mem)
+ // cond: !config.BigEndian
+ // result: (LoweredAtomicOr32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr) (SLLV <typ.UInt32> (ZeroExt8to32 val) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(!config.BigEndian) {
+ break
+ }
+ v.reset(OpMIPS64LoweredAtomicOr32)
+ v0 := b.NewValue0(v.Pos, OpMIPS64AND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(^3)
+ v0.AddArg2(v1, ptr)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v3.AddArg(val)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(3)
+ v5 := b.NewValue0(v.Pos, OpMIPS64ANDconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(3)
+ v5.AddArg(ptr)
+ v4.AddArg(v5)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v2, mem)
+ return true
+ }
+ // match: (AtomicOr8 ptr val mem)
+ // cond: config.BigEndian
+ // result: (LoweredAtomicOr32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr) (SLLV <typ.UInt32> (ZeroExt8to32 val) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] (XORconst <typ.UInt64> [3] ptr)))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(config.BigEndian) {
+ break
+ }
+ v.reset(OpMIPS64LoweredAtomicOr32)
+ v0 := b.NewValue0(v.Pos, OpMIPS64AND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(^3)
+ v0.AddArg2(v1, ptr)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v3.AddArg(val)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(3)
+ v5 := b.NewValue0(v.Pos, OpMIPS64ANDconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(3)
+ v6 := b.NewValue0(v.Pos, OpMIPS64XORconst, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(3)
+ v6.AddArg(ptr)
+ v5.AddArg(v6)
+ v4.AddArg(v5)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v2, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpAvg64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64ADDV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64SRLVconst, t)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SUBV, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpCom16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com16 x)
+ // result: (NOR (MOVVconst [0]) x)
+ for {
+ x := v_0
+ v.reset(OpMIPS64NOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpCom32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com32 x)
+ // result: (NOR (MOVVconst [0]) x)
+ for {
+ x := v_0
+ v.reset(OpMIPS64NOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpCom64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com64 x)
+ // result: (NOR (MOVVconst [0]) x)
+ for {
+ x := v_0
+ v.reset(OpMIPS64NOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpCom8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com8 x)
+ // result: (NOR (MOVVconst [0]) x)
+ for {
+ x := v_0
+ v.reset(OpMIPS64NOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVVconst [int64(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVVconst [int64(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst32F(v *Value) bool {
+ // match: (Const32F [val])
+ // result: (MOVFconst [float64(val)])
+ for {
+ val := auxIntToFloat32(v.AuxInt)
+ v.reset(OpMIPS64MOVFconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst64(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVVconst [int64(val)])
+ for {
+ val := auxIntToInt64(v.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst64F(v *Value) bool {
+ // match: (Const64F [val])
+ // result: (MOVDconst [float64(val)])
+ for {
+ val := auxIntToFloat64(v.AuxInt)
+ v.reset(OpMIPS64MOVDconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVVconst [int64(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [t])
+ // result: (MOVVconst [int64(b2i(t))])
+ for {
+ t := auxIntToBool(v.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(b2i(t)))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVVconst [0])
+ for {
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y)
+ // result: (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 x y)
+ // result: (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div64 x y)
+ // result: (Select1 (DIVV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div64u x y)
+ // result: (Select1 (DIVVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x y)
+ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (FPFlagTrue (CMPEQF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq64 x y)
+ // result: (SGTU (MOVVconst [1]) (XOR x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (FPFlagTrue (CMPEQD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.Bool)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqPtr x y)
+ // result: (SGTU (MOVVconst [1]) (XOR x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpHmul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (SRAVconst (Select1 <typ.Int64> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAVconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpSelect1, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64))
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpHmul32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (SRLVconst (Select1 <typ.UInt64> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRLVconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpHmul64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul64 x y)
+ // result: (Select0 (MULV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpHmul64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul64u x y)
+ // result: (Select0 (MULVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IsInBounds idx len)
+ // result: (SGTU len idx)
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpMIPS64SGTU)
+ v.AddArg2(len, idx)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsNonNil ptr)
+ // result: (SGTU ptr (MOVVconst [0]))
+ for {
+ ptr := v_0
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(ptr, v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsSliceInBounds idx len)
+ // result: (XOR (MOVVconst [1]) (SGTU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v1.AddArg2(idx, len)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (FPFlagTrue (CMPGEF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGEF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64 x y)
+ // result: (XOR (MOVVconst [1]) (SGT x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (FPFlagTrue (CMPGED y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGED, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x y)
+ // result: (XOR (MOVVconst [1]) (SGTU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (SGT (SignExt16to64 y) (SignExt16to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32 x y)
+ // result: (SGT (SignExt32to64 y) (SignExt32to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (FPFlagTrue (CMPGTF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U x y)
+ // result: (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less64 x y)
+ // result: (SGT y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGT)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (FPFlagTrue (CMPGTD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less64U x y)
+ // result: (SGTU y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (SGT (SignExt8to64 y) (SignExt8to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpMIPS64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && t.IsSigned())
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpMIPS64MOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !t.IsSigned())
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpMIPS64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && t.IsSigned())
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpMIPS64MOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !t.IsSigned())
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpMIPS64MOVHUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && t.IsSigned())
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpMIPS64MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && !t.IsSigned())
+ // result: (MOVWUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpMIPS64MOVWUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVVload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVFload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVFload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpLocalAddr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (LocalAddr <t> {sym} base mem)
+ // cond: t.Elem().HasPointers()
+ // result: (MOVVaddr {sym} (SPanchored base mem))
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ mem := v_1
+ if !(t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpMIPS64MOVVaddr)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
+ v0.AddArg2(base, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LocalAddr <t> {sym} base _)
+ // cond: !t.Elem().HasPointers()
+ // result: (MOVVaddr {sym} base)
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ if !(!t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpMIPS64MOVVaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v3.AddArg2(x, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v3.AddArg2(x, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v3.AddArg2(x, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v3.AddArg2(x, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMIPS64ADDV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDV x (MOVVconst <t> [c]))
+ // cond: is32Bit(c) && !t.IsPtr()
+ // result: (ADDVconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c) && !t.IsPtr()) {
+ continue
+ }
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDV x (NEGV y))
+ // result: (SUBV x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64NEGV {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpMIPS64SUBV)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr))
+ // cond: is32Bit(off1+int64(off2))
+ // result: (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
+ for {
+ off1 := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ if !(is32Bit(off1 + int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVVaddr)
+ v.AuxInt = int32ToAuxInt(int32(off1) + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (ADDVconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [c+d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ return true
+ }
+ // match: (ADDVconst [c] (ADDVconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (ADDVconst [c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDVconst [c] (SUBVconst [d] x))
+ // cond: is32Bit(c-d)
+ // result: (ADDVconst [c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64SUBVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c - d)) {
+ break
+ }
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64AND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AND x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpMIPS64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64ANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [0] _)
+ // result: (MOVVconst [0])
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [c&d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPS64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (LoweredAtomicAddconst32 [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpMIPS64LoweredAtomicAddconst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicAdd64 ptr (MOVVconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (LoweredAtomicAddconst64 [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpMIPS64LoweredAtomicAddconst64)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicStore32 ptr (MOVVconst [0]) mem)
+ // result: (LoweredAtomicStorezero32 ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64LoweredAtomicStorezero32)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicStore64 ptr (MOVVconst [0]) mem)
+ // result: (LoweredAtomicStorezero64 ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64LoweredAtomicStorezero64)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(uint8(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVVconst [int64(read8(sym, int64(off)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(read8(sym, int64(off))))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(int8(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVVconst [0]) mem)
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVDload [off] {sym} ptr (MOVVstore [off] {sym} ptr val _))
+ // result: (MOVVgpfp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpMIPS64MOVVgpfp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVDload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVDstore [off] {sym} ptr (MOVVgpfp val) mem)
+ // result: (MOVVstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVgpfp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _))
+ // result: (MOVWgpfp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpMIPS64MOVWgpfp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVFload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVFstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVFstore [off] {sym} ptr (MOVWgpfp val) mem)
+ // result: (MOVWstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWgpfp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVFstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(int16(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVVconst [0]) mem)
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVVload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _))
+ // result: (MOVVfpgp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpMIPS64MOVVfpgp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVVload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVVload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVnop(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVVnop (MOVVconst [c]))
+ // result: (MOVVconst [c])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVVreg x)
+ // cond: x.Uses == 1
+ // result: (MOVVnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPS64MOVVnop)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVVreg (MOVVconst [c]))
+ // result: (MOVVconst [c])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVVstore [off] {sym} ptr (MOVVfpgp val) mem)
+ // result: (MOVDstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVfpgp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVVstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVVstore [off] {sym} ptr (MOVVconst [0]) mem)
+ // result: (MOVVstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64MOVVstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (MOVWUload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _))
+ // result: (ZeroExt32to64 (MOVWfpgp <typ.Float32> val))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVFstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpZeroExt32to64)
+ v0 := b.NewValue0(v_1.Pos, OpMIPS64MOVWfpgp, typ.Float32)
+ v0.AddArg(val)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWUreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVWUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVWUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWreg x:(MOVBload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVWload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVWreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(int32(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstore [off] {sym} ptr (MOVWfpgp val) mem)
+ // result: (MOVFstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWfpgp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVFstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVVconst [0]) mem)
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64NEGV(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGV (MOVVconst [c]))
+ // result: (MOVVconst [-c])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64NOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NOR x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (NORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpMIPS64NORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64NORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NORconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [^(c|d)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(^(c | d))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (OR x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpMIPS64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64ORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // result: (MOVVconst [-1])
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [c|d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // cond: is32Bit(c|d)
+ // result: (ORconst [c|d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c | d)) {
+ break
+ }
+ v.reset(OpMIPS64ORconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SGT(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SGT (MOVVconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (SGTconst [c] x)
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpMIPS64SGTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SGT x x)
+ // result: (MOVVconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SGTU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SGTU (MOVVconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (SGTUconst [c] x)
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpMIPS64SGTUconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SGTU x x)
+ // result: (MOVVconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SGTUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTUconst [c] (MOVVconst [d]))
+ // cond: uint64(c)>uint64(d)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(c) > uint64(d)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVVconst [d]))
+ // cond: uint64(c)<=uint64(d)
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(c) <= uint64(d)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVBUreg _))
+ // cond: 0xff < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVBUreg || !(0xff < uint64(c)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVHUreg _))
+ // cond: 0xffff < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVHUreg || !(0xffff < uint64(c)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (ANDconst [m] _))
+ // cond: uint64(m) < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(m) < uint64(c)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (SRLVconst _ [d]))
+ // cond: 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64SRLVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SGTconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTconst [c] (MOVVconst [d]))
+ // cond: c>d
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(c > d) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVVconst [d]))
+ // cond: c<=d
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(c <= d) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBreg _))
+ // cond: 0x7f < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVBreg || !(0x7f < c) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBreg _))
+ // cond: c <= -0x80
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVBreg || !(c <= -0x80) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBUreg _))
+ // cond: 0xff < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVBUreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBUreg _))
+ // cond: c < 0
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVBUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHreg _))
+ // cond: 0x7fff < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVHreg || !(0x7fff < c) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHreg _))
+ // cond: c <= -0x8000
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVHreg || !(c <= -0x8000) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHUreg _))
+ // cond: 0xffff < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVHUreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHUreg _))
+ // cond: c < 0
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVHUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVWUreg _))
+ // cond: c < 0
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVWUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (ANDconst [m] _))
+ // cond: 0 <= m && m < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if !(0 <= m && m < c) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (SRLVconst _ [d]))
+ // cond: 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64SRLVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SLLV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLLV _ (MOVVconst [c]))
+ // cond: uint64(c)>=64
+ // result: (MOVVconst [0])
+ for {
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SLLV x (MOVVconst [c]))
+ // result: (SLLVconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpMIPS64SLLVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SLLVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLLVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [d<<uint64(c)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(d << uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SRAV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRAV x (MOVVconst [c]))
+ // cond: uint64(c)>=64
+ // result: (SRAVconst x [63])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpMIPS64SRAVconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAV x (MOVVconst [c]))
+ // result: (SRAVconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpMIPS64SRAVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SRAVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SRLV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRLV _ (MOVVconst [c]))
+ // cond: uint64(c)>=64
+ // result: (MOVVconst [0])
+ for {
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLV x (MOVVconst [c]))
+ // result: (SRLVconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpMIPS64SRLVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SRLVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRLVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [int64(uint64(d)>>uint64(c))])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SUBV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBV x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (SUBVconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpMIPS64SUBVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBV x x)
+ // result: (MOVVconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SUBV (MOVVconst [0]) x)
+ // result: (NEGV x)
+ for {
+ if v_0.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpMIPS64NEGV)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBVconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [d-c])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(d - c)
+ return true
+ }
+ // match: (SUBVconst [c] (SUBVconst [d] x))
+ // cond: is32Bit(-c-d)
+ // result: (ADDVconst [-c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64SUBVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-c - d)) {
+ break
+ }
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBVconst [c] (ADDVconst [d] x))
+ // cond: is32Bit(-c+d)
+ // result: (ADDVconst [-c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-c + d)) {
+ break
+ }
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(-c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64XOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpMIPS64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR x x)
+ // result: (MOVVconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64XORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [-1] x)
+ // result: (NORconst [0] x)
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(OpMIPS64NORconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [c^d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // cond: is32Bit(c^d)
+ // result: (XORconst [c^d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64XORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c ^ d)) {
+ break
+ }
+ v.reset(OpMIPS64XORconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 x y)
+ // result: (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64 x y)
+ // result: (Select0 (DIVV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64u x y)
+ // result: (Select0 (DIVVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore dst (MOVHload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v4.AuxInt = int32ToAuxInt(1)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore dst (MOVVload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(2)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v4.AuxInt = int32ToAuxInt(2)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v2.AuxInt = int32ToAuxInt(1)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [12] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [16] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [24] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore [16] dst (MOVVload [16] src mem) (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [16 * (128 - s/8)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpMIPS64DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(16 * (128 - s/8))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0
+ // result: (LoweredMove [t.Alignment()] dst src (ADDVconst <src.Type> src [s-moveSize(t.Alignment(), config)]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0) {
+ break
+ }
+ v.reset(OpMIPS64LoweredMove)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, src.Type)
+ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
+ v0.AddArg(src)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMul16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul16 x y)
+ // result: (Select1 (MULVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul32 x y)
+ // result: (Select1 (MULVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMul64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul64 x y)
+ // result: (Select1 (MULVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMul8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul8 x y)
+ // result: (Select1 (MULVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x y)
+ // result: (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (FPFlagFalse (CMPEQF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagFalse)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64 x y)
+ // result: (SGTU (XOR x y) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (FPFlagFalse (CMPEQD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagFalse)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqPtr x y)
+ // result: (SGTU (XOR x y) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpMIPS64XORconst)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr [off] ptr:(SP))
+ // cond: is32Bit(off)
+ // result: (MOVVaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP || !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADDVconst [off] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(off)
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpMIPS64LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpMIPS64LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpMIPS64LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVVconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVVconst [c&15])) (Rsh16Ux64 <t> x (MOVVconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft32 <t> x (MOVVconst [c]))
+ // result: (Or32 (Lsh32x64 <t> x (MOVVconst [c&31])) (Rsh32Ux64 <t> x (MOVVconst [-c&31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpLsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 31)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 31)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft64 <t> x (MOVVconst [c]))
+ // result: (Or64 (Lsh64x64 <t> x (MOVVconst [c&63])) (Rsh64Ux64 <t> x (MOVVconst [-c&63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpLsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 63)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 63)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVVconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVVconst [c&7])) (Rsh8Ux64 <t> x (MOVVconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(x)
+ v3.AddArg2(v4, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 <t> x y)
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 <t> x y)
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 <t> x y)
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(y, v4)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 <t> x y)
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(x)
+ v3.AddArg2(v4, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 <t> x y)
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 <t> x y)
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 <t> x y)
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(y, v4)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 <t> x y)
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v3.AddArg2(x, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 <t> x y)
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 <t> x y)
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x64 <t> x y)
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(y, v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 <t> x y)
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(x)
+ v3.AddArg2(v4, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 <t> x y)
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 <t> x y)
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 <t> x y)
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(y, v4)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 <t> x y)
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Mul64uover x y))
+ // result: (Select1 <typ.UInt64> (MULVU x y))
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSelect1)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 <t> (Add64carry x y c))
+ // result: (ADDV (ADDV <t> x y) c)
+ for {
+ t := v.Type
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpMIPS64ADDV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64ADDV, t)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, c)
+ return true
+ }
+ // match: (Select0 <t> (Sub64borrow x y c))
+ // result: (SUBV (SUBV <t> x y) c)
+ for {
+ t := v.Type
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpMIPS64SUBV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64SUBV, t)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, c)
+ return true
+ }
+ // match: (Select0 (DIVVU _ (MOVVconst [1])))
+ // result: (MOVVconst [0])
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Select0 (DIVVU x (MOVVconst [c])))
+ // cond: isPowerOfTwo64(c)
+ // result: (ANDconst [c-1] x)
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpMIPS64ANDconst)
+ v.AuxInt = int64ToAuxInt(c - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select0 (DIVV (MOVVconst [c]) (MOVVconst [d])))
+ // cond: d != 0
+ // result: (MOVVconst [c%d])
+ for {
+ if v_0.Op != OpMIPS64DIVV {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c % d)
+ return true
+ }
+ // match: (Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d])))
+ // cond: d != 0
+ // result: (MOVVconst [int64(uint64(c)%uint64(d))])
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Mul64uover x y))
+ // result: (SGTU <typ.Bool> (Select0 <typ.UInt64> (MULVU x y)) (MOVVconst <typ.UInt64> [0]))
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpMIPS64SGTU)
+ v.Type = typ.Bool
+ v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Select1 <t> (Add64carry x y c))
+ // result: (OR (SGTU <t> x s:(ADDV <t> x y)) (SGTU <t> s (ADDV <t> s c)))
+ for {
+ t := v.Type
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpMIPS64OR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64SGTU, t)
+ s := b.NewValue0(v.Pos, OpMIPS64ADDV, t)
+ s.AddArg2(x, y)
+ v0.AddArg2(x, s)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64ADDV, t)
+ v3.AddArg2(s, c)
+ v2.AddArg2(s, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Select1 <t> (Sub64borrow x y c))
+ // result: (OR (SGTU <t> s:(SUBV <t> x y) x) (SGTU <t> (SUBV <t> s c) s))
+ for {
+ t := v.Type
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpMIPS64OR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64SGTU, t)
+ s := b.NewValue0(v.Pos, OpMIPS64SUBV, t)
+ s.AddArg2(x, y)
+ v0.AddArg2(s, x)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SUBV, t)
+ v3.AddArg2(s, c)
+ v2.AddArg2(v3, s)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Select1 (MULVU x (MOVVconst [-1])))
+ // result: (NEGV x)
+ for {
+ if v_0.Op != OpMIPS64MULVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpMIPS64NEGV)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULVU _ (MOVVconst [0])))
+ // result: (MOVVconst [0])
+ for {
+ if v_0.Op != OpMIPS64MULVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULVU x (MOVVconst [1])))
+ // result: x
+ for {
+ if v_0.Op != OpMIPS64MULVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULVU x (MOVVconst [c])))
+ // cond: isPowerOfTwo64(c)
+ // result: (SLLVconst [log64(c)] x)
+ for {
+ if v_0.Op != OpMIPS64MULVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpMIPS64SLLVconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (DIVVU x (MOVVconst [1])))
+ // result: x
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Select1 (DIVVU x (MOVVconst [c])))
+ // cond: isPowerOfTwo64(c)
+ // result: (SRLVconst [log64(c)] x)
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpMIPS64SRLVconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select1 (MULVU (MOVVconst [c]) (MOVVconst [d])))
+ // result: (MOVVconst [c*d])
+ for {
+ if v_0.Op != OpMIPS64MULVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d])))
+ // cond: d != 0
+ // result: (MOVVconst [c/d])
+ for {
+ if v_0.Op != OpMIPS64DIVV {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c / d)
+ return true
+ }
+ // match: (Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d])))
+ // cond: d != 0
+ // result: (MOVVconst [int64(uint64(c)/uint64(d))])
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRAVconst (NEGV <t> x) [63])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpMIPS64SRAVconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpMIPS64MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !t.IsFloat()
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !t.IsFloat()) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && !t.IsFloat()
+ // result: (MOVVstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && !t.IsFloat()) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && t.IsFloat()
+ // result: (MOVFstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpMIPS64MOVFstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && t.IsFloat()
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpMIPS64MOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVVconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPS64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore ptr (MOVVconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore ptr (MOVVconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVBstore [3] ptr (MOVVconst [0]) (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore ptr (MOVVconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] ptr (MOVVconst [0]) (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(2)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [3] ptr mem)
+ // result: (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [6] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [12] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] ptr (MOVVconst [0]) (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [16] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [24] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore [16] ptr (MOVVconst [0]) (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [8 * (128 - s/8)] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpMIPS64DUFFZERO)
+ v.AuxInt = int64ToAuxInt(8 * (128 - s/8))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0
+ // result: (LoweredZero [t.Alignment()] ptr (ADDVconst <ptr.Type> ptr [s-moveSize(t.Alignment(), config)]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !((s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0) {
+ break
+ }
+ v.reset(OpMIPS64LoweredZero)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, ptr.Type)
+ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
+ v0.AddArg(ptr)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockMIPS64(b *Block) bool {
+ switch b.Kind {
+ case BlockMIPS64EQ:
+ // match: (EQ (FPFlagTrue cmp) yes no)
+ // result: (FPF cmp yes no)
+ for b.Controls[0].Op == OpMIPS64FPFlagTrue {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64FPF, cmp)
+ return true
+ }
+ // match: (EQ (FPFlagFalse cmp) yes no)
+ // result: (FPT cmp yes no)
+ for b.Controls[0].Op == OpMIPS64FPFlagFalse {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64FPT, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGT {
+ break
+ }
+ b.resetWithControl(BlockMIPS64NE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTU {
+ break
+ }
+ b.resetWithControl(BlockMIPS64NE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTconst {
+ break
+ }
+ b.resetWithControl(BlockMIPS64NE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTUconst {
+ break
+ }
+ b.resetWithControl(BlockMIPS64NE, cmp)
+ return true
+ }
+ // match: (EQ (SGTUconst [1] x) yes no)
+ // result: (NE x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64NE, x)
+ return true
+ }
+ // match: (EQ (SGTU x (MOVVconst [0])) yes no)
+ // result: (EQ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTU {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockMIPS64EQ, x)
+ return true
+ }
+ // match: (EQ (SGTconst [0] x) yes no)
+ // result: (GEZ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64GEZ, x)
+ return true
+ }
+ // match: (EQ (SGT x (MOVVconst [0])) yes no)
+ // result: (LEZ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGT {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockMIPS64LEZ, x)
+ return true
+ }
+ // match: (EQ (MOVVconst [0]) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (MOVVconst [c]) yes no)
+ // cond: c != 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPS64GEZ:
+ // match: (GEZ (MOVVconst [c]) yes no)
+ // cond: c >= 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c >= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GEZ (MOVVconst [c]) yes no)
+ // cond: c < 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c < 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPS64GTZ:
+ // match: (GTZ (MOVVconst [c]) yes no)
+ // cond: c > 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GTZ (MOVVconst [c]) yes no)
+ // cond: c <= 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c <= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockIf:
+ // match: (If cond yes no)
+ // result: (NE cond yes no)
+ for {
+ cond := b.Controls[0]
+ b.resetWithControl(BlockMIPS64NE, cond)
+ return true
+ }
+ case BlockMIPS64LEZ:
+ // match: (LEZ (MOVVconst [c]) yes no)
+ // cond: c <= 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c <= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LEZ (MOVVconst [c]) yes no)
+ // cond: c > 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPS64LTZ:
+ // match: (LTZ (MOVVconst [c]) yes no)
+ // cond: c < 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c < 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LTZ (MOVVconst [c]) yes no)
+ // cond: c >= 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c >= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPS64NE:
+ // match: (NE (FPFlagTrue cmp) yes no)
+ // result: (FPT cmp yes no)
+ for b.Controls[0].Op == OpMIPS64FPFlagTrue {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64FPT, cmp)
+ return true
+ }
+ // match: (NE (FPFlagFalse cmp) yes no)
+ // result: (FPF cmp yes no)
+ for b.Controls[0].Op == OpMIPS64FPFlagFalse {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64FPF, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGT {
+ break
+ }
+ b.resetWithControl(BlockMIPS64EQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTU {
+ break
+ }
+ b.resetWithControl(BlockMIPS64EQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTconst {
+ break
+ }
+ b.resetWithControl(BlockMIPS64EQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTUconst {
+ break
+ }
+ b.resetWithControl(BlockMIPS64EQ, cmp)
+ return true
+ }
+ // match: (NE (SGTUconst [1] x) yes no)
+ // result: (EQ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64EQ, x)
+ return true
+ }
+ // match: (NE (SGTU x (MOVVconst [0])) yes no)
+ // result: (NE x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTU {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockMIPS64NE, x)
+ return true
+ }
+ // match: (NE (SGTconst [0] x) yes no)
+ // result: (LTZ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64LTZ, x)
+ return true
+ }
+ // match: (NE (SGT x (MOVVconst [0])) yes no)
+ // result: (GTZ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGT {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockMIPS64GTZ, x)
+ return true
+ }
+ // match: (NE (MOVVconst [0]) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (MOVVconst [c]) yes no)
+ // cond: c != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
new file mode 100644
index 0000000..473a8ff
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -0,0 +1,16564 @@
+// Code generated from _gen/PPC64.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import "internal/buildcfg"
+import "math"
+import "cmd/compile/internal/types"
+
+func rewriteValuePPC64(v *Value) bool {
+ switch v.Op {
+ case OpAbs:
+ v.Op = OpPPC64FABS
+ return true
+ case OpAdd16:
+ v.Op = OpPPC64ADD
+ return true
+ case OpAdd32:
+ v.Op = OpPPC64ADD
+ return true
+ case OpAdd32F:
+ v.Op = OpPPC64FADDS
+ return true
+ case OpAdd64:
+ v.Op = OpPPC64ADD
+ return true
+ case OpAdd64F:
+ v.Op = OpPPC64FADD
+ return true
+ case OpAdd8:
+ v.Op = OpPPC64ADD
+ return true
+ case OpAddPtr:
+ v.Op = OpPPC64ADD
+ return true
+ case OpAddr:
+ return rewriteValuePPC64_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpPPC64AND
+ return true
+ case OpAnd32:
+ v.Op = OpPPC64AND
+ return true
+ case OpAnd64:
+ v.Op = OpPPC64AND
+ return true
+ case OpAnd8:
+ v.Op = OpPPC64AND
+ return true
+ case OpAndB:
+ v.Op = OpPPC64AND
+ return true
+ case OpAtomicAdd32:
+ v.Op = OpPPC64LoweredAtomicAdd32
+ return true
+ case OpAtomicAdd64:
+ v.Op = OpPPC64LoweredAtomicAdd64
+ return true
+ case OpAtomicAnd32:
+ v.Op = OpPPC64LoweredAtomicAnd32
+ return true
+ case OpAtomicAnd8:
+ v.Op = OpPPC64LoweredAtomicAnd8
+ return true
+ case OpAtomicCompareAndSwap32:
+ return rewriteValuePPC64_OpAtomicCompareAndSwap32(v)
+ case OpAtomicCompareAndSwap64:
+ return rewriteValuePPC64_OpAtomicCompareAndSwap64(v)
+ case OpAtomicCompareAndSwapRel32:
+ return rewriteValuePPC64_OpAtomicCompareAndSwapRel32(v)
+ case OpAtomicExchange32:
+ v.Op = OpPPC64LoweredAtomicExchange32
+ return true
+ case OpAtomicExchange64:
+ v.Op = OpPPC64LoweredAtomicExchange64
+ return true
+ case OpAtomicLoad32:
+ return rewriteValuePPC64_OpAtomicLoad32(v)
+ case OpAtomicLoad64:
+ return rewriteValuePPC64_OpAtomicLoad64(v)
+ case OpAtomicLoad8:
+ return rewriteValuePPC64_OpAtomicLoad8(v)
+ case OpAtomicLoadAcq32:
+ return rewriteValuePPC64_OpAtomicLoadAcq32(v)
+ case OpAtomicLoadAcq64:
+ return rewriteValuePPC64_OpAtomicLoadAcq64(v)
+ case OpAtomicLoadPtr:
+ return rewriteValuePPC64_OpAtomicLoadPtr(v)
+ case OpAtomicOr32:
+ v.Op = OpPPC64LoweredAtomicOr32
+ return true
+ case OpAtomicOr8:
+ v.Op = OpPPC64LoweredAtomicOr8
+ return true
+ case OpAtomicStore32:
+ return rewriteValuePPC64_OpAtomicStore32(v)
+ case OpAtomicStore64:
+ return rewriteValuePPC64_OpAtomicStore64(v)
+ case OpAtomicStore8:
+ return rewriteValuePPC64_OpAtomicStore8(v)
+ case OpAtomicStoreRel32:
+ return rewriteValuePPC64_OpAtomicStoreRel32(v)
+ case OpAtomicStoreRel64:
+ return rewriteValuePPC64_OpAtomicStoreRel64(v)
+ case OpAvg64u:
+ return rewriteValuePPC64_OpAvg64u(v)
+ case OpBitLen32:
+ return rewriteValuePPC64_OpBitLen32(v)
+ case OpBitLen64:
+ return rewriteValuePPC64_OpBitLen64(v)
+ case OpBswap16:
+ return rewriteValuePPC64_OpBswap16(v)
+ case OpBswap32:
+ return rewriteValuePPC64_OpBswap32(v)
+ case OpBswap64:
+ return rewriteValuePPC64_OpBswap64(v)
+ case OpCeil:
+ v.Op = OpPPC64FCEIL
+ return true
+ case OpClosureCall:
+ v.Op = OpPPC64CALLclosure
+ return true
+ case OpCom16:
+ return rewriteValuePPC64_OpCom16(v)
+ case OpCom32:
+ return rewriteValuePPC64_OpCom32(v)
+ case OpCom64:
+ return rewriteValuePPC64_OpCom64(v)
+ case OpCom8:
+ return rewriteValuePPC64_OpCom8(v)
+ case OpCondSelect:
+ return rewriteValuePPC64_OpCondSelect(v)
+ case OpConst16:
+ return rewriteValuePPC64_OpConst16(v)
+ case OpConst32:
+ return rewriteValuePPC64_OpConst32(v)
+ case OpConst32F:
+ v.Op = OpPPC64FMOVSconst
+ return true
+ case OpConst64:
+ return rewriteValuePPC64_OpConst64(v)
+ case OpConst64F:
+ v.Op = OpPPC64FMOVDconst
+ return true
+ case OpConst8:
+ return rewriteValuePPC64_OpConst8(v)
+ case OpConstBool:
+ return rewriteValuePPC64_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValuePPC64_OpConstNil(v)
+ case OpCopysign:
+ return rewriteValuePPC64_OpCopysign(v)
+ case OpCtz16:
+ return rewriteValuePPC64_OpCtz16(v)
+ case OpCtz32:
+ return rewriteValuePPC64_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz64:
+ return rewriteValuePPC64_OpCtz64(v)
+ case OpCtz64NonZero:
+ v.Op = OpCtz64
+ return true
+ case OpCtz8:
+ return rewriteValuePPC64_OpCtz8(v)
+ case OpCvt32Fto32:
+ return rewriteValuePPC64_OpCvt32Fto32(v)
+ case OpCvt32Fto64:
+ return rewriteValuePPC64_OpCvt32Fto64(v)
+ case OpCvt32Fto64F:
+ v.Op = OpCopy
+ return true
+ case OpCvt32to32F:
+ return rewriteValuePPC64_OpCvt32to32F(v)
+ case OpCvt32to64F:
+ return rewriteValuePPC64_OpCvt32to64F(v)
+ case OpCvt64Fto32:
+ return rewriteValuePPC64_OpCvt64Fto32(v)
+ case OpCvt64Fto32F:
+ v.Op = OpPPC64FRSP
+ return true
+ case OpCvt64Fto64:
+ return rewriteValuePPC64_OpCvt64Fto64(v)
+ case OpCvt64to32F:
+ return rewriteValuePPC64_OpCvt64to32F(v)
+ case OpCvt64to64F:
+ return rewriteValuePPC64_OpCvt64to64F(v)
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValuePPC64_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValuePPC64_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValuePPC64_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpPPC64FDIVS
+ return true
+ case OpDiv32u:
+ v.Op = OpPPC64DIVWU
+ return true
+ case OpDiv64:
+ return rewriteValuePPC64_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpPPC64FDIV
+ return true
+ case OpDiv64u:
+ v.Op = OpPPC64DIVDU
+ return true
+ case OpDiv8:
+ return rewriteValuePPC64_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValuePPC64_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValuePPC64_OpEq16(v)
+ case OpEq32:
+ return rewriteValuePPC64_OpEq32(v)
+ case OpEq32F:
+ return rewriteValuePPC64_OpEq32F(v)
+ case OpEq64:
+ return rewriteValuePPC64_OpEq64(v)
+ case OpEq64F:
+ return rewriteValuePPC64_OpEq64F(v)
+ case OpEq8:
+ return rewriteValuePPC64_OpEq8(v)
+ case OpEqB:
+ return rewriteValuePPC64_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValuePPC64_OpEqPtr(v)
+ case OpFMA:
+ v.Op = OpPPC64FMADD
+ return true
+ case OpFloor:
+ v.Op = OpPPC64FFLOOR
+ return true
+ case OpGetCallerPC:
+ v.Op = OpPPC64LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpPPC64LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpPPC64LoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ v.Op = OpPPC64MULHW
+ return true
+ case OpHmul32u:
+ v.Op = OpPPC64MULHWU
+ return true
+ case OpHmul64:
+ v.Op = OpPPC64MULHD
+ return true
+ case OpHmul64u:
+ v.Op = OpPPC64MULHDU
+ return true
+ case OpInterCall:
+ v.Op = OpPPC64CALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValuePPC64_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValuePPC64_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValuePPC64_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValuePPC64_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValuePPC64_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValuePPC64_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValuePPC64_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValuePPC64_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValuePPC64_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValuePPC64_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValuePPC64_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValuePPC64_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValuePPC64_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValuePPC64_OpLess16(v)
+ case OpLess16U:
+ return rewriteValuePPC64_OpLess16U(v)
+ case OpLess32:
+ return rewriteValuePPC64_OpLess32(v)
+ case OpLess32F:
+ return rewriteValuePPC64_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValuePPC64_OpLess32U(v)
+ case OpLess64:
+ return rewriteValuePPC64_OpLess64(v)
+ case OpLess64F:
+ return rewriteValuePPC64_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValuePPC64_OpLess64U(v)
+ case OpLess8:
+ return rewriteValuePPC64_OpLess8(v)
+ case OpLess8U:
+ return rewriteValuePPC64_OpLess8U(v)
+ case OpLoad:
+ return rewriteValuePPC64_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValuePPC64_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValuePPC64_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValuePPC64_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValuePPC64_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValuePPC64_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValuePPC64_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValuePPC64_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValuePPC64_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValuePPC64_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValuePPC64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValuePPC64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValuePPC64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValuePPC64_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValuePPC64_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValuePPC64_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValuePPC64_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValuePPC64_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValuePPC64_OpMod16(v)
+ case OpMod16u:
+ return rewriteValuePPC64_OpMod16u(v)
+ case OpMod32:
+ return rewriteValuePPC64_OpMod32(v)
+ case OpMod32u:
+ return rewriteValuePPC64_OpMod32u(v)
+ case OpMod64:
+ return rewriteValuePPC64_OpMod64(v)
+ case OpMod64u:
+ return rewriteValuePPC64_OpMod64u(v)
+ case OpMod8:
+ return rewriteValuePPC64_OpMod8(v)
+ case OpMod8u:
+ return rewriteValuePPC64_OpMod8u(v)
+ case OpMove:
+ return rewriteValuePPC64_OpMove(v)
+ case OpMul16:
+ v.Op = OpPPC64MULLW
+ return true
+ case OpMul32:
+ v.Op = OpPPC64MULLW
+ return true
+ case OpMul32F:
+ v.Op = OpPPC64FMULS
+ return true
+ case OpMul64:
+ v.Op = OpPPC64MULLD
+ return true
+ case OpMul64F:
+ v.Op = OpPPC64FMUL
+ return true
+ case OpMul8:
+ v.Op = OpPPC64MULLW
+ return true
+ case OpNeg16:
+ v.Op = OpPPC64NEG
+ return true
+ case OpNeg32:
+ v.Op = OpPPC64NEG
+ return true
+ case OpNeg32F:
+ v.Op = OpPPC64FNEG
+ return true
+ case OpNeg64:
+ v.Op = OpPPC64NEG
+ return true
+ case OpNeg64F:
+ v.Op = OpPPC64FNEG
+ return true
+ case OpNeg8:
+ v.Op = OpPPC64NEG
+ return true
+ case OpNeq16:
+ return rewriteValuePPC64_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValuePPC64_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValuePPC64_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValuePPC64_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValuePPC64_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValuePPC64_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpPPC64XOR
+ return true
+ case OpNeqPtr:
+ return rewriteValuePPC64_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpPPC64LoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValuePPC64_OpNot(v)
+ case OpOffPtr:
+ return rewriteValuePPC64_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpPPC64OR
+ return true
+ case OpOr32:
+ v.Op = OpPPC64OR
+ return true
+ case OpOr64:
+ v.Op = OpPPC64OR
+ return true
+ case OpOr8:
+ v.Op = OpPPC64OR
+ return true
+ case OpOrB:
+ v.Op = OpPPC64OR
+ return true
+ case OpPPC64ADD:
+ return rewriteValuePPC64_OpPPC64ADD(v)
+ case OpPPC64ADDE:
+ return rewriteValuePPC64_OpPPC64ADDE(v)
+ case OpPPC64ADDconst:
+ return rewriteValuePPC64_OpPPC64ADDconst(v)
+ case OpPPC64AND:
+ return rewriteValuePPC64_OpPPC64AND(v)
+ case OpPPC64ANDCCconst:
+ return rewriteValuePPC64_OpPPC64ANDCCconst(v)
+ case OpPPC64ANDN:
+ return rewriteValuePPC64_OpPPC64ANDN(v)
+ case OpPPC64BRD:
+ return rewriteValuePPC64_OpPPC64BRD(v)
+ case OpPPC64BRH:
+ return rewriteValuePPC64_OpPPC64BRH(v)
+ case OpPPC64BRW:
+ return rewriteValuePPC64_OpPPC64BRW(v)
+ case OpPPC64CLRLSLDI:
+ return rewriteValuePPC64_OpPPC64CLRLSLDI(v)
+ case OpPPC64CMP:
+ return rewriteValuePPC64_OpPPC64CMP(v)
+ case OpPPC64CMPU:
+ return rewriteValuePPC64_OpPPC64CMPU(v)
+ case OpPPC64CMPUconst:
+ return rewriteValuePPC64_OpPPC64CMPUconst(v)
+ case OpPPC64CMPW:
+ return rewriteValuePPC64_OpPPC64CMPW(v)
+ case OpPPC64CMPWU:
+ return rewriteValuePPC64_OpPPC64CMPWU(v)
+ case OpPPC64CMPWUconst:
+ return rewriteValuePPC64_OpPPC64CMPWUconst(v)
+ case OpPPC64CMPWconst:
+ return rewriteValuePPC64_OpPPC64CMPWconst(v)
+ case OpPPC64CMPconst:
+ return rewriteValuePPC64_OpPPC64CMPconst(v)
+ case OpPPC64Equal:
+ return rewriteValuePPC64_OpPPC64Equal(v)
+ case OpPPC64FABS:
+ return rewriteValuePPC64_OpPPC64FABS(v)
+ case OpPPC64FADD:
+ return rewriteValuePPC64_OpPPC64FADD(v)
+ case OpPPC64FADDS:
+ return rewriteValuePPC64_OpPPC64FADDS(v)
+ case OpPPC64FCEIL:
+ return rewriteValuePPC64_OpPPC64FCEIL(v)
+ case OpPPC64FFLOOR:
+ return rewriteValuePPC64_OpPPC64FFLOOR(v)
+ case OpPPC64FGreaterEqual:
+ return rewriteValuePPC64_OpPPC64FGreaterEqual(v)
+ case OpPPC64FGreaterThan:
+ return rewriteValuePPC64_OpPPC64FGreaterThan(v)
+ case OpPPC64FLessEqual:
+ return rewriteValuePPC64_OpPPC64FLessEqual(v)
+ case OpPPC64FLessThan:
+ return rewriteValuePPC64_OpPPC64FLessThan(v)
+ case OpPPC64FMOVDload:
+ return rewriteValuePPC64_OpPPC64FMOVDload(v)
+ case OpPPC64FMOVDstore:
+ return rewriteValuePPC64_OpPPC64FMOVDstore(v)
+ case OpPPC64FMOVSload:
+ return rewriteValuePPC64_OpPPC64FMOVSload(v)
+ case OpPPC64FMOVSstore:
+ return rewriteValuePPC64_OpPPC64FMOVSstore(v)
+ case OpPPC64FNEG:
+ return rewriteValuePPC64_OpPPC64FNEG(v)
+ case OpPPC64FSQRT:
+ return rewriteValuePPC64_OpPPC64FSQRT(v)
+ case OpPPC64FSUB:
+ return rewriteValuePPC64_OpPPC64FSUB(v)
+ case OpPPC64FSUBS:
+ return rewriteValuePPC64_OpPPC64FSUBS(v)
+ case OpPPC64FTRUNC:
+ return rewriteValuePPC64_OpPPC64FTRUNC(v)
+ case OpPPC64GreaterEqual:
+ return rewriteValuePPC64_OpPPC64GreaterEqual(v)
+ case OpPPC64GreaterThan:
+ return rewriteValuePPC64_OpPPC64GreaterThan(v)
+ case OpPPC64ISEL:
+ return rewriteValuePPC64_OpPPC64ISEL(v)
+ case OpPPC64LessEqual:
+ return rewriteValuePPC64_OpPPC64LessEqual(v)
+ case OpPPC64LessThan:
+ return rewriteValuePPC64_OpPPC64LessThan(v)
+ case OpPPC64MFVSRD:
+ return rewriteValuePPC64_OpPPC64MFVSRD(v)
+ case OpPPC64MOVBZload:
+ return rewriteValuePPC64_OpPPC64MOVBZload(v)
+ case OpPPC64MOVBZloadidx:
+ return rewriteValuePPC64_OpPPC64MOVBZloadidx(v)
+ case OpPPC64MOVBZreg:
+ return rewriteValuePPC64_OpPPC64MOVBZreg(v)
+ case OpPPC64MOVBreg:
+ return rewriteValuePPC64_OpPPC64MOVBreg(v)
+ case OpPPC64MOVBstore:
+ return rewriteValuePPC64_OpPPC64MOVBstore(v)
+ case OpPPC64MOVBstoreidx:
+ return rewriteValuePPC64_OpPPC64MOVBstoreidx(v)
+ case OpPPC64MOVBstorezero:
+ return rewriteValuePPC64_OpPPC64MOVBstorezero(v)
+ case OpPPC64MOVDaddr:
+ return rewriteValuePPC64_OpPPC64MOVDaddr(v)
+ case OpPPC64MOVDload:
+ return rewriteValuePPC64_OpPPC64MOVDload(v)
+ case OpPPC64MOVDloadidx:
+ return rewriteValuePPC64_OpPPC64MOVDloadidx(v)
+ case OpPPC64MOVDstore:
+ return rewriteValuePPC64_OpPPC64MOVDstore(v)
+ case OpPPC64MOVDstoreidx:
+ return rewriteValuePPC64_OpPPC64MOVDstoreidx(v)
+ case OpPPC64MOVDstorezero:
+ return rewriteValuePPC64_OpPPC64MOVDstorezero(v)
+ case OpPPC64MOVHBRstore:
+ return rewriteValuePPC64_OpPPC64MOVHBRstore(v)
+ case OpPPC64MOVHZload:
+ return rewriteValuePPC64_OpPPC64MOVHZload(v)
+ case OpPPC64MOVHZloadidx:
+ return rewriteValuePPC64_OpPPC64MOVHZloadidx(v)
+ case OpPPC64MOVHZreg:
+ return rewriteValuePPC64_OpPPC64MOVHZreg(v)
+ case OpPPC64MOVHload:
+ return rewriteValuePPC64_OpPPC64MOVHload(v)
+ case OpPPC64MOVHloadidx:
+ return rewriteValuePPC64_OpPPC64MOVHloadidx(v)
+ case OpPPC64MOVHreg:
+ return rewriteValuePPC64_OpPPC64MOVHreg(v)
+ case OpPPC64MOVHstore:
+ return rewriteValuePPC64_OpPPC64MOVHstore(v)
+ case OpPPC64MOVHstoreidx:
+ return rewriteValuePPC64_OpPPC64MOVHstoreidx(v)
+ case OpPPC64MOVHstorezero:
+ return rewriteValuePPC64_OpPPC64MOVHstorezero(v)
+ case OpPPC64MOVWBRstore:
+ return rewriteValuePPC64_OpPPC64MOVWBRstore(v)
+ case OpPPC64MOVWZload:
+ return rewriteValuePPC64_OpPPC64MOVWZload(v)
+ case OpPPC64MOVWZloadidx:
+ return rewriteValuePPC64_OpPPC64MOVWZloadidx(v)
+ case OpPPC64MOVWZreg:
+ return rewriteValuePPC64_OpPPC64MOVWZreg(v)
+ case OpPPC64MOVWload:
+ return rewriteValuePPC64_OpPPC64MOVWload(v)
+ case OpPPC64MOVWloadidx:
+ return rewriteValuePPC64_OpPPC64MOVWloadidx(v)
+ case OpPPC64MOVWreg:
+ return rewriteValuePPC64_OpPPC64MOVWreg(v)
+ case OpPPC64MOVWstore:
+ return rewriteValuePPC64_OpPPC64MOVWstore(v)
+ case OpPPC64MOVWstoreidx:
+ return rewriteValuePPC64_OpPPC64MOVWstoreidx(v)
+ case OpPPC64MOVWstorezero:
+ return rewriteValuePPC64_OpPPC64MOVWstorezero(v)
+ case OpPPC64MTVSRD:
+ return rewriteValuePPC64_OpPPC64MTVSRD(v)
+ case OpPPC64MULLD:
+ return rewriteValuePPC64_OpPPC64MULLD(v)
+ case OpPPC64MULLW:
+ return rewriteValuePPC64_OpPPC64MULLW(v)
+ case OpPPC64NEG:
+ return rewriteValuePPC64_OpPPC64NEG(v)
+ case OpPPC64NOR:
+ return rewriteValuePPC64_OpPPC64NOR(v)
+ case OpPPC64NotEqual:
+ return rewriteValuePPC64_OpPPC64NotEqual(v)
+ case OpPPC64OR:
+ return rewriteValuePPC64_OpPPC64OR(v)
+ case OpPPC64ORN:
+ return rewriteValuePPC64_OpPPC64ORN(v)
+ case OpPPC64ORconst:
+ return rewriteValuePPC64_OpPPC64ORconst(v)
+ case OpPPC64ROTL:
+ return rewriteValuePPC64_OpPPC64ROTL(v)
+ case OpPPC64ROTLW:
+ return rewriteValuePPC64_OpPPC64ROTLW(v)
+ case OpPPC64ROTLWconst:
+ return rewriteValuePPC64_OpPPC64ROTLWconst(v)
+ case OpPPC64SETBC:
+ return rewriteValuePPC64_OpPPC64SETBC(v)
+ case OpPPC64SETBCR:
+ return rewriteValuePPC64_OpPPC64SETBCR(v)
+ case OpPPC64SLD:
+ return rewriteValuePPC64_OpPPC64SLD(v)
+ case OpPPC64SLDconst:
+ return rewriteValuePPC64_OpPPC64SLDconst(v)
+ case OpPPC64SLW:
+ return rewriteValuePPC64_OpPPC64SLW(v)
+ case OpPPC64SLWconst:
+ return rewriteValuePPC64_OpPPC64SLWconst(v)
+ case OpPPC64SRAD:
+ return rewriteValuePPC64_OpPPC64SRAD(v)
+ case OpPPC64SRAW:
+ return rewriteValuePPC64_OpPPC64SRAW(v)
+ case OpPPC64SRD:
+ return rewriteValuePPC64_OpPPC64SRD(v)
+ case OpPPC64SRW:
+ return rewriteValuePPC64_OpPPC64SRW(v)
+ case OpPPC64SRWconst:
+ return rewriteValuePPC64_OpPPC64SRWconst(v)
+ case OpPPC64SUB:
+ return rewriteValuePPC64_OpPPC64SUB(v)
+ case OpPPC64SUBE:
+ return rewriteValuePPC64_OpPPC64SUBE(v)
+ case OpPPC64SUBFCconst:
+ return rewriteValuePPC64_OpPPC64SUBFCconst(v)
+ case OpPPC64XOR:
+ return rewriteValuePPC64_OpPPC64XOR(v)
+ case OpPPC64XORconst:
+ return rewriteValuePPC64_OpPPC64XORconst(v)
+ case OpPanicBounds:
+ return rewriteValuePPC64_OpPanicBounds(v)
+ case OpPopCount16:
+ return rewriteValuePPC64_OpPopCount16(v)
+ case OpPopCount32:
+ return rewriteValuePPC64_OpPopCount32(v)
+ case OpPopCount64:
+ v.Op = OpPPC64POPCNTD
+ return true
+ case OpPopCount8:
+ return rewriteValuePPC64_OpPopCount8(v)
+ case OpPrefetchCache:
+ return rewriteValuePPC64_OpPrefetchCache(v)
+ case OpPrefetchCacheStreamed:
+ return rewriteValuePPC64_OpPrefetchCacheStreamed(v)
+ case OpPubBarrier:
+ v.Op = OpPPC64LoweredPubBarrier
+ return true
+ case OpRotateLeft16:
+ return rewriteValuePPC64_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ v.Op = OpPPC64ROTLW
+ return true
+ case OpRotateLeft64:
+ v.Op = OpPPC64ROTL
+ return true
+ case OpRotateLeft8:
+ return rewriteValuePPC64_OpRotateLeft8(v)
+ case OpRound:
+ v.Op = OpPPC64FROUND
+ return true
+ case OpRound32F:
+ v.Op = OpPPC64LoweredRound32F
+ return true
+ case OpRound64F:
+ v.Op = OpPPC64LoweredRound64F
+ return true
+ case OpRsh16Ux16:
+ return rewriteValuePPC64_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValuePPC64_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValuePPC64_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValuePPC64_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValuePPC64_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValuePPC64_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValuePPC64_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValuePPC64_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValuePPC64_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValuePPC64_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValuePPC64_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValuePPC64_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValuePPC64_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValuePPC64_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValuePPC64_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValuePPC64_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValuePPC64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValuePPC64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValuePPC64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValuePPC64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValuePPC64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValuePPC64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValuePPC64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValuePPC64_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValuePPC64_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValuePPC64_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValuePPC64_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValuePPC64_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValuePPC64_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValuePPC64_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValuePPC64_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValuePPC64_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValuePPC64_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValuePPC64_OpSelect1(v)
+ case OpSelectN:
+ return rewriteValuePPC64_OpSelectN(v)
+ case OpSignExt16to32:
+ v.Op = OpPPC64MOVHreg
+ return true
+ case OpSignExt16to64:
+ v.Op = OpPPC64MOVHreg
+ return true
+ case OpSignExt32to64:
+ v.Op = OpPPC64MOVWreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpPPC64MOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpPPC64MOVBreg
+ return true
+ case OpSignExt8to64:
+ v.Op = OpPPC64MOVBreg
+ return true
+ case OpSlicemask:
+ return rewriteValuePPC64_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpPPC64FSQRT
+ return true
+ case OpSqrt32:
+ v.Op = OpPPC64FSQRTS
+ return true
+ case OpStaticCall:
+ v.Op = OpPPC64CALLstatic
+ return true
+ case OpStore:
+ return rewriteValuePPC64_OpStore(v)
+ case OpSub16:
+ v.Op = OpPPC64SUB
+ return true
+ case OpSub32:
+ v.Op = OpPPC64SUB
+ return true
+ case OpSub32F:
+ v.Op = OpPPC64FSUBS
+ return true
+ case OpSub64:
+ v.Op = OpPPC64SUB
+ return true
+ case OpSub64F:
+ v.Op = OpPPC64FSUB
+ return true
+ case OpSub8:
+ v.Op = OpPPC64SUB
+ return true
+ case OpSubPtr:
+ v.Op = OpPPC64SUB
+ return true
+ case OpTailCall:
+ v.Op = OpPPC64CALLtail
+ return true
+ case OpTrunc:
+ v.Op = OpPPC64FTRUNC
+ return true
+ case OpTrunc16to8:
+ return rewriteValuePPC64_OpTrunc16to8(v)
+ case OpTrunc32to16:
+ return rewriteValuePPC64_OpTrunc32to16(v)
+ case OpTrunc32to8:
+ return rewriteValuePPC64_OpTrunc32to8(v)
+ case OpTrunc64to16:
+ return rewriteValuePPC64_OpTrunc64to16(v)
+ case OpTrunc64to32:
+ return rewriteValuePPC64_OpTrunc64to32(v)
+ case OpTrunc64to8:
+ return rewriteValuePPC64_OpTrunc64to8(v)
+ case OpWB:
+ v.Op = OpPPC64LoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpPPC64XOR
+ return true
+ case OpXor32:
+ v.Op = OpPPC64XOR
+ return true
+ case OpXor64:
+ v.Op = OpPPC64XOR
+ return true
+ case OpXor8:
+ v.Op = OpPPC64XOR
+ return true
+ case OpZero:
+ return rewriteValuePPC64_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpPPC64MOVHZreg
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpPPC64MOVHZreg
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpPPC64MOVWZreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpPPC64MOVBZreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpPPC64MOVBZreg
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpPPC64MOVBZreg
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVDaddr {sym} [0] base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpPPC64MOVDaddr)
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicCompareAndSwap32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap32 ptr old new_ mem)
+ // result: (LoweredAtomicCas32 [1] ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpPPC64LoweredAtomicCas32)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicCompareAndSwap64(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap64 ptr old new_ mem)
+ // result: (LoweredAtomicCas64 [1] ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpPPC64LoweredAtomicCas64)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicCompareAndSwapRel32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwapRel32 ptr old new_ mem)
+ // result: (LoweredAtomicCas32 [0] ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpPPC64LoweredAtomicCas32)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoad32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad32 ptr mem)
+ // result: (LoweredAtomicLoad32 [1] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad32)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoad64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad64 ptr mem)
+ // result: (LoweredAtomicLoad64 [1] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad64)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoad8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad8 ptr mem)
+ // result: (LoweredAtomicLoad8 [1] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad8)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoadAcq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadAcq32 ptr mem)
+ // result: (LoweredAtomicLoad32 [0] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad32)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoadAcq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadAcq64 ptr mem)
+ // result: (LoweredAtomicLoad64 [0] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad64)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoadPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadPtr ptr mem)
+ // result: (LoweredAtomicLoadPtr [1] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoadPtr)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicStore32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStore32 ptr val mem)
+ // result: (LoweredAtomicStore32 [1] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore32)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicStore64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStore64 ptr val mem)
+ // result: (LoweredAtomicStore64 [1] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore64)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicStore8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStore8 ptr val mem)
+ // result: (LoweredAtomicStore8 [1] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore8)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicStoreRel32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStoreRel32 ptr val mem)
+ // result: (LoweredAtomicStore32 [0] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore32)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicStoreRel64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStoreRel64 ptr val mem)
+ // result: (LoweredAtomicStore64 [0] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore64)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAvg64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ADD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRDconst, t)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpPPC64SUB, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen32 x)
+ // result: (SUBFCconst [32] (CNTLZW <typ.Int> x))
+ for {
+ x := v_0
+ v.reset(OpPPC64SUBFCconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CNTLZW, typ.Int)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 x)
+ // result: (SUBFCconst [64] (CNTLZD <typ.Int> x))
+ for {
+ x := v_0
+ v.reset(OpPPC64SUBFCconst)
+ v.AuxInt = int64ToAuxInt(64)
+ v0 := b.NewValue0(v.Pos, OpPPC64CNTLZD, typ.Int)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpBswap16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Bswap16 x)
+ // cond: buildcfg.GOPPC64>=10
+ // result: (BRH x)
+ for {
+ x := v_0
+ if !(buildcfg.GOPPC64 >= 10) {
+ break
+ }
+ v.reset(OpPPC64BRH)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Bswap16 x:(MOVHZload [off] {sym} ptr mem))
+ // result: @x.Block (MOVHBRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHZload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpPPC64MOVHBRload, typ.UInt16)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x.Pos, OpPPC64MOVDaddr, ptr.Type)
+ v1.AuxInt = int32ToAuxInt(off)
+ v1.Aux = symToAux(sym)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (Bswap16 x:(MOVHZloadidx ptr idx mem))
+ // result: @x.Block (MOVHBRloadidx ptr idx mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHZloadidx {
+ break
+ }
+ mem := x.Args[2]
+ ptr := x.Args[0]
+ idx := x.Args[1]
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHBRloadidx, typ.Int16)
+ v.copyOf(v0)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpBswap32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Bswap32 x)
+ // cond: buildcfg.GOPPC64>=10
+ // result: (BRW x)
+ for {
+ x := v_0
+ if !(buildcfg.GOPPC64 >= 10) {
+ break
+ }
+ v.reset(OpPPC64BRW)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Bswap32 x:(MOVWZload [off] {sym} ptr mem))
+ // result: @x.Block (MOVWBRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVWZload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpPPC64MOVWBRload, typ.UInt32)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x.Pos, OpPPC64MOVDaddr, ptr.Type)
+ v1.AuxInt = int32ToAuxInt(off)
+ v1.Aux = symToAux(sym)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (Bswap32 x:(MOVWZloadidx ptr idx mem))
+ // result: @x.Block (MOVWBRloadidx ptr idx mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVWZloadidx {
+ break
+ }
+ mem := x.Args[2]
+ ptr := x.Args[0]
+ idx := x.Args[1]
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRloadidx, typ.Int32)
+ v.copyOf(v0)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpBswap64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Bswap64 x)
+ // cond: buildcfg.GOPPC64>=10
+ // result: (BRD x)
+ for {
+ x := v_0
+ if !(buildcfg.GOPPC64 >= 10) {
+ break
+ }
+ v.reset(OpPPC64BRD)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Bswap64 x:(MOVDload [off] {sym} ptr mem))
+ // result: @x.Block (MOVDBRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVDload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpPPC64MOVDBRload, typ.UInt64)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x.Pos, OpPPC64MOVDaddr, ptr.Type)
+ v1.AuxInt = int32ToAuxInt(off)
+ v1.Aux = symToAux(sym)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (Bswap64 x:(MOVDloadidx ptr idx mem))
+ // result: @x.Block (MOVDBRloadidx ptr idx mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVDloadidx {
+ break
+ }
+ mem := x.Args[2]
+ ptr := x.Args[0]
+ idx := x.Args[1]
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRloadidx, typ.Int64)
+ v.copyOf(v0)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpCom16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com16 x)
+ // result: (NOR x x)
+ for {
+ x := v_0
+ v.reset(OpPPC64NOR)
+ v.AddArg2(x, x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCom32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com32 x)
+ // result: (NOR x x)
+ for {
+ x := v_0
+ v.reset(OpPPC64NOR)
+ v.AddArg2(x, x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCom64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com64 x)
+ // result: (NOR x x)
+ for {
+ x := v_0
+ v.reset(OpPPC64NOR)
+ v.AddArg2(x, x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCom8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com8 x)
+ // result: (NOR x x)
+ for {
+ x := v_0
+ v.reset(OpPPC64NOR)
+ v.AddArg2(x, x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCondSelect(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CondSelect x y (SETBC [a] cmp))
+ // result: (ISEL [a] x y cmp)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64SETBC {
+ break
+ }
+ a := auxIntToInt32(v_2.AuxInt)
+ cmp := v_2.Args[0]
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(a)
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (CondSelect x y (SETBCR [a] cmp))
+ // result: (ISEL [a+4] x y cmp)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64SETBCR {
+ break
+ }
+ a := auxIntToInt32(v_2.AuxInt)
+ cmp := v_2.Args[0]
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(a + 4)
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (CondSelect x y bool)
+ // cond: flagArg(bool) == nil
+ // result: (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [1] bool)))
+ for {
+ x := v_0
+ y := v_1
+ bool := v_2
+ if !(flagArg(bool) == nil) {
+ break
+ }
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AuxInt = int64ToAuxInt(1)
+ v1.AddArg(bool)
+ v0.AddArg(v1)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst64(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt64(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [t])
+ // result: (MOVDconst [b2i(t)])
+ for {
+ t := auxIntToBool(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(t))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCopysign(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Copysign x y)
+ // result: (FCPSGN y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64FCPSGN)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 x)
+ // result: (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
+ for {
+ x := v_0
+ v.reset(OpPPC64POPCNTW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int16)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int16)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v2.AddArg(x)
+ v1.AddArg2(v2, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz32 x)
+ // cond: buildcfg.GOPPC64<=8
+ // result: (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
+ for {
+ x := v_0
+ if !(buildcfg.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64POPCNTW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v2.AddArg(x)
+ v1.AddArg2(v2, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Ctz32 x)
+ // result: (CNTTZW (MOVWZreg x))
+ for {
+ x := v_0
+ v.reset(OpPPC64CNTTZW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz64 x)
+ // cond: buildcfg.GOPPC64<=8
+ // result: (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
+ for {
+ x := v_0
+ if !(buildcfg.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64POPCNTD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Ctz64 x)
+ // result: (CNTTZD x)
+ for {
+ x := v_0
+ v.reset(OpPPC64CNTTZD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz8 x)
+ // result: (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
+ for {
+ x := v_0
+ v.reset(OpPPC64POPCNTB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.UInt8)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.UInt8)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v2.AddArg(x)
+ v1.AddArg2(v2, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32Fto32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32Fto32 x)
+ // result: (MFVSRD (FCTIWZ x))
+ for {
+ x := v_0
+ v.reset(OpPPC64MFVSRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32Fto64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32Fto64 x)
+ // result: (MFVSRD (FCTIDZ x))
+ for {
+ x := v_0
+ v.reset(OpPPC64MFVSRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32to32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32to32F x)
+ // result: (FCFIDS (MTVSRD (SignExt32to64 x)))
+ for {
+ x := v_0
+ v.reset(OpPPC64FCFIDS)
+ v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32to64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32to64F x)
+ // result: (FCFID (MTVSRD (SignExt32to64 x)))
+ for {
+ x := v_0
+ v.reset(OpPPC64FCFID)
+ v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64Fto32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt64Fto32 x)
+ // result: (MFVSRD (FCTIWZ x))
+ for {
+ x := v_0
+ v.reset(OpPPC64MFVSRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64Fto64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt64Fto64 x)
+ // result: (MFVSRD (FCTIDZ x))
+ for {
+ x := v_0
+ v.reset(OpPPC64MFVSRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64to32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt64to32F x)
+ // result: (FCFIDS (MTVSRD x))
+ for {
+ x := v_0
+ v.reset(OpPPC64FCFIDS)
+ v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64to64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt64to64F x)
+ // result: (FCFID (MTVSRD x))
+ for {
+ x := v_0
+ v.reset(OpPPC64FCFID)
+ v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 [false] x y)
+ // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVWU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div32 [false] x y)
+ // result: (DIVW x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 [false] x y)
+ // result: (DIVD x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVWU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // cond: x.Type.IsSigned() && y.Type.IsSigned()
+ // result: (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ y := v_1
+ if !(x.Type.IsSigned() && y.Type.IsSigned()) {
+ continue
+ }
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Eq16 x y)
+ // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32 x y)
+ // result: (Equal (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (Equal (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64 x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (Equal (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // cond: x.Type.IsSigned() && y.Type.IsSigned()
+ // result: (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ y := v_1
+ if !(x.Type.IsSigned() && y.Type.IsSigned()) {
+ continue
+ }
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Eq8 x y)
+ // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (Select0 <typ.Int> (ANDCCconst [1] (EQV x y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v.Type = typ.Int
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsInBounds idx len)
+ // result: (LessThan (CMPU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsNonNil ptr)
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v_0
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(0)
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsSliceInBounds idx len)
+ // result: (LessEqual (CMPU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32 x y)
+ // result: (LessEqual (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (FLessEqual (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64FLessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32U x y)
+ // result: (LessEqual (CMPWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64 x y)
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (FLessEqual (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64FLessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64U x y)
+ // result: (LessEqual (CMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 x y)
+ // result: (LessThan (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (FLessThan (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64FLessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32U x y)
+ // result: (LessThan (CMPWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64 x y)
+ // result: (LessThan (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (FLessThan (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64FLessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64U x y)
+ // result: (LessThan (CMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t) && t.IsSigned()
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t) && !t.IsSigned()
+ // result: (MOVWZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t) && t.IsSigned()
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t) && !t.IsSigned()
+ // result: (MOVHZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is8BitInt(t) && t.IsSigned()
+ // result: (MOVBreg (MOVBZload ptr mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpPPC64MOVBreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is8BitInt(t) && !t.IsSigned()
+ // result: (MOVBZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpPPC64FMOVSload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpLocalAddr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (LocalAddr <t> {sym} base mem)
+ // cond: t.Elem().HasPointers()
+ // result: (MOVDaddr {sym} (SPanchored base mem))
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ mem := v_1
+ if !(t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpPPC64MOVDaddr)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
+ v0.AddArg2(base, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LocalAddr <t> {sym} base _)
+ // cond: !t.Elem().HasPointers()
+ // result: (MOVDaddr {sym} base)
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ if !(!t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpPPC64MOVDaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x16 <t> x y)
+ // result: (ISEL [2] (SLD <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SLD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v4.AuxInt = int64ToAuxInt(0xFFF0)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x32 <t> x y)
+ // result: (ISEL [0] (SLD <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst y [16]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SLD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(16)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 16
+ // result: (SLWconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x64 <t> x y)
+ // result: (ISEL [0] (SLD <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPUconst y [16]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SLD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(16)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x8 <t> x y)
+ // result: (ISEL [2] (SLD <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SLD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v4.AuxInt = int64ToAuxInt(0x00F0)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x16 <t> x y)
+ // result: (ISEL [2] (SLW <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3.AuxInt = int64ToAuxInt(0xFFE0)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x32 <t> x y)
+ // result: (ISEL [0] (SLW <t> x y) (MOVDconst [0]) (CMPWUconst y [32]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 32
+ // result: (SLWconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x64 <t> x y)
+ // result: (ISEL [0] (SLW <t> x y) (MOVDconst [0]) (CMPUconst y [32]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x8 <t> x y)
+ // result: (ISEL [2] (SLW <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3.AuxInt = int64ToAuxInt(0x00E0)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x16 <t> x y)
+ // result: (ISEL [2] (SLD <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SLD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3.AuxInt = int64ToAuxInt(0xFFC0)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x32 <t> x y)
+ // result: (ISEL [0] (SLD <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SLD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 64
+ // result: (SLDconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpPPC64SLDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x64 <t> x y)
+ // result: (ISEL [0] (SLD <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SLD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x8 <t> x y)
+ // result: (ISEL [2] (SLD <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SLD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3.AuxInt = int64ToAuxInt(0x00C0)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x16 <t> x y)
+ // result: (ISEL [2] (SLD <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SLD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v4.AuxInt = int64ToAuxInt(0xFFF8)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x32 <t> x y)
+ // result: (ISEL [0] (SLD <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst y [8]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SLD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(8)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 8
+ // result: (SLWconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x64 <t> x y)
+ // result: (ISEL [0] (SLD <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPUconst y [8]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SLD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(8)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x8 <t> x y)
+ // result: (ISEL [2] (SLD <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SLD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v4.AuxInt = int64ToAuxInt(0x00F8)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (Mod32 (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 x y)
+ // cond: buildcfg.GOPPC64 >= 9
+ // result: (MODSW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64MODSW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Mod32 x y)
+ // cond: buildcfg.GOPPC64 <= 8
+ // result: (SUB x (MULLW y (DIVW x y)))
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64SUB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLW, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVW, typ.Int32)
+ v1.AddArg2(x, y)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // cond: buildcfg.GOPPC64 >= 9
+ // result: (MODUW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64MODUW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Mod32u x y)
+ // cond: buildcfg.GOPPC64 <= 8
+ // result: (SUB x (MULLW y (DIVWU x y)))
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64SUB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLW, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVWU, typ.Int32)
+ v1.AddArg2(x, y)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64 x y)
+ // cond: buildcfg.GOPPC64 >=9
+ // result: (MODSD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64MODSD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Mod64 x y)
+ // cond: buildcfg.GOPPC64 <=8
+ // result: (SUB x (MULLD y (DIVD x y)))
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64SUB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLD, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVD, typ.Int64)
+ v1.AddArg2(x, y)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpMod64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64u x y)
+ // cond: buildcfg.GOPPC64 >= 9
+ // result: (MODUD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64MODUD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Mod64u x y)
+ // cond: buildcfg.GOPPC64 <= 8
+ // result: (SUB x (MULLD y (DIVDU x y)))
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64SUB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLD, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVDU, typ.Int64)
+ v1.AddArg2(x, y)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (Mod32 (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVHstore dst (MOVHZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVWstore dst (MOVWZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // result: (MOVDstore dst (MOVDload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVDstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, typ.Int64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVHload, typ.Int16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (MOVBstore [4] dst (MOVBZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (MOVBstore [6] dst (MOVBZload [6] src mem) (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s)
+ // result: (LoweredMove [s] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpPPC64LoweredMove)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9
+ // result: (LoweredQuadMoveShort [s] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64LoweredQuadMoveShort)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s)
+ // result: (LoweredQuadMove [s] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpPPC64LoweredQuadMove)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // cond: x.Type.IsSigned() && y.Type.IsSigned()
+ // result: (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ y := v_1
+ if !(x.Type.IsSigned() && y.Type.IsSigned()) {
+ continue
+ }
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Neq16 x y)
+ // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32 x y)
+ // result: (NotEqual (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (NotEqual (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64 x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (NotEqual (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // cond: x.Type.IsSigned() && y.Type.IsSigned()
+ // result: (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ y := v_1
+ if !(x.Type.IsSigned() && y.Type.IsSigned()) {
+ continue
+ }
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Neq8 x y)
+ // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpPPC64XORconst)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OffPtr [off] ptr)
+ // result: (ADD (MOVDconst <typ.Int64> [off]) ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpPPC64ADD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(off)
+ v.AddArg2(v0, ptr)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64ADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADD l:(MULLD x y) z)
+ // cond: buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l)
+ // result: (MADDLD x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ l := v_0
+ if l.Op != OpPPC64MULLD {
+ continue
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ z := v_1
+ if !(buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l)) {
+ continue
+ }
+ v.reset(OpPPC64MADDLD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (MOVDconst <t> [c]))
+ // cond: is32Bit(c) && !t.IsPtr()
+ // result: (ADDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c) && !t.IsPtr()) {
+ continue
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ADDE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDE x y (Select1 <typ.UInt64> (ADDCconst (MOVDconst [0]) [-1])))
+ // result: (ADDC x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 || v_2.Type != typ.UInt64 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64ADDCconst || auxIntToInt64(v_2_0.AuxInt) != -1 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpPPC64ADDC)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (ADDconst [c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDaddr [d] {sym} x))
+ // cond: is32Bit(c+int64(d))
+ // result: (MOVDaddr [int32(c+int64(d))] {sym} x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDaddr {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(c + int64(d))) {
+ break
+ }
+ v.reset(OpPPC64MOVDaddr)
+ v.AuxInt = int32ToAuxInt(int32(c + int64(d)))
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] x:(SP))
+ // cond: is32Bit(c)
+ // result: (MOVDaddr [int32(c)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if x.Op != OpSP || !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVDaddr)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (SUBFCconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (SUBFCconst [c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SUBFCconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpPPC64SUBFCconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64AND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AND (MOVDconst [m]) (ROTLWconst [r] x))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64ROTLWconst {
+ continue
+ }
+ r := auxIntToInt64(v_1.AuxInt)
+ x := v_1.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [m]) (ROTLW x r))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64ROTLW {
+ continue
+ }
+ r := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ continue
+ }
+ v.reset(OpPPC64RLWNM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
+ v.AddArg2(x, r)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [m]) (SRWconst x [s]))
+ // cond: mergePPC64RShiftMask(m,s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64SRWconst {
+ continue
+ }
+ s := auxIntToInt64(v_1.AuxInt)
+ if !(mergePPC64RShiftMask(m, s, 32) == 0) {
+ continue
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [m]) (SRWconst x [s]))
+ // cond: mergePPC64AndSrwi(m,s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m,s)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64SRWconst {
+ continue
+ }
+ s := auxIntToInt64(v_1.AuxInt)
+ x := v_1.Args[0]
+ if !(mergePPC64AndSrwi(m, s) != 0) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x (NOR y y))
+ // result: (ANDN x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64NOR {
+ continue
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ANDN)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MOVDconst [-1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MOVDconst [c]))
+ // cond: isU16Bit(c)
+ // result: (Select0 (ANDCCconst [c] x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU16Bit(c)) {
+ continue
+ }
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [c]) y:(MOVWZreg _))
+ // cond: c&0xFFFFFFFF == 0xFFFFFFFF
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if y.Op != OpPPC64MOVWZreg || !(c&0xFFFFFFFF == 0xFFFFFFFF) {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x))
+ // result: (MOVWZreg x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0xFFFFFFFF {
+ continue
+ }
+ y := v_1
+ if y.Op != OpPPC64MOVWreg {
+ continue
+ }
+ x := y.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [c]) x:(MOVBZload _ _))
+ // result: (Select0 (ANDCCconst [c&0xFF] x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if x.Op != OpPPC64MOVBZload {
+ continue
+ }
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(x.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v0.AuxInt = int64ToAuxInt(c & 0xFF)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ANDCCconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDCCconst [c] (Select0 (ANDCCconst [d] x)))
+ // result: (ANDCCconst [c&d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ d := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64ANDCCconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ANDN(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDN (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c&^d])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c &^ d)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64BRD(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BRD x:(MOVDload [off] {sym} ptr mem))
+ // cond: x.Uses == 1
+ // result: @x.Block (MOVDBRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVDload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpPPC64MOVDBRload, typ.UInt64)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x.Pos, OpPPC64MOVDaddr, ptr.Type)
+ v1.AuxInt = int32ToAuxInt(off)
+ v1.Aux = symToAux(sym)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (BRD x:(MOVDloadidx ptr idx mem))
+ // cond: x.Uses == 1
+ // result: @x.Block (MOVDBRloadidx ptr idx mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVDloadidx {
+ break
+ }
+ mem := x.Args[2]
+ ptr := x.Args[0]
+ idx := x.Args[1]
+ if !(x.Uses == 1) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDBRloadidx, typ.Int64)
+ v.copyOf(v0)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64BRH(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BRH x:(MOVHZload [off] {sym} ptr mem))
+ // cond: x.Uses == 1
+ // result: @x.Block (MOVHBRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHZload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpPPC64MOVHBRload, typ.UInt16)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x.Pos, OpPPC64MOVDaddr, ptr.Type)
+ v1.AuxInt = int32ToAuxInt(off)
+ v1.Aux = symToAux(sym)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (BRH x:(MOVHZloadidx ptr idx mem))
+ // cond: x.Uses == 1
+ // result: @x.Block (MOVHBRloadidx ptr idx mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHZloadidx {
+ break
+ }
+ mem := x.Args[2]
+ ptr := x.Args[0]
+ idx := x.Args[1]
+ if !(x.Uses == 1) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHBRloadidx, typ.Int16)
+ v.copyOf(v0)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64BRW(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BRW x:(MOVWZload [off] {sym} ptr mem))
+ // cond: x.Uses == 1
+ // result: @x.Block (MOVWBRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVWZload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpPPC64MOVWBRload, typ.UInt32)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x.Pos, OpPPC64MOVDaddr, ptr.Type)
+ v1.AuxInt = int32ToAuxInt(off)
+ v1.Aux = symToAux(sym)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (BRW x:(MOVWZloadidx ptr idx mem))
+ // cond: x.Uses == 1
+ // result: @x.Block (MOVWBRloadidx ptr idx mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVWZloadidx {
+ break
+ }
+ mem := x.Args[2]
+ ptr := x.Args[0]
+ idx := x.Args[1]
+ if !(x.Uses == 1) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWBRloadidx, typ.Int32)
+ v.copyOf(v0)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CLRLSLDI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CLRLSLDI [c] (SRWconst [s] x))
+ // cond: mergePPC64ClrlsldiSrw(int64(c),s) != 0
+ // result: (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64ClrlsldiSrw(int64(c), s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64ClrlsldiSrw(int64(c), s))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CLRLSLDI [c] i:(RLWINM [s] x))
+ // cond: mergePPC64ClrlsldiRlwinm(c,s) != 0
+ // result: (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ i := v_0
+ if i.Op != OpPPC64RLWINM {
+ break
+ }
+ s := auxIntToInt64(i.AuxInt)
+ x := i.Args[0]
+ if !(mergePPC64ClrlsldiRlwinm(c, s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64ClrlsldiRlwinm(c, s))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMP(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMP x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (CMPconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMP (MOVDconst [c]) y)
+ // cond: is16Bit(c)
+ // result: (InvertFlags (CMPconst y [c]))
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMP y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPU x (MOVDconst [c]))
+ // cond: isU16Bit(c)
+ // result: (CMPUconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64CMPUconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPU (MOVDconst [c]) y)
+ // cond: isU16Bit(c)
+ // result: (InvertFlags (CMPUconst y [c]))
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(isU16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPU x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPU y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPUconst [d] (Select0 (ANDCCconst z [c])))
+ // cond: uint64(d) > uint64(c)
+ // result: (FlagLT)
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if !(uint64(d) > uint64(c)) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)<uint64(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)>uint64(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPW x (MOVWreg y))
+ // result: (CMPW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64CMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPW (MOVWreg x) y)
+ // result: (CMPW x y)
+ for {
+ if v_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpPPC64CMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPW x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (CMPWconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64CMPWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVDconst [c]) y)
+ // cond: is16Bit(c)
+ // result: (InvertFlags (CMPWconst y [int32(c)]))
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPW y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPWU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPWU x (MOVWZreg y))
+ // result: (CMPWU x y)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64CMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWU (MOVWZreg x) y)
+ // result: (CMPWU x y)
+ for {
+ if v_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpPPC64CMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWU x (MOVDconst [c]))
+ // cond: isU16Bit(c)
+ // result: (CMPWUconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64CMPWUconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWU (MOVDconst [c]) y)
+ // cond: isU16Bit(c)
+ // result: (InvertFlags (CMPWUconst y [int32(c)]))
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(isU16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPWU x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPWU y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPWUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPWUconst [d] (Select0 (ANDCCconst z [c])))
+ // cond: uint64(d) > uint64(c)
+ // result: (FlagLT)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if !(uint64(d) > uint64(c)) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) == int32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)<uint32(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)>uint32(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) == int32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)<int32(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) < int32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)>int32(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) > int32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x<y
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x < y) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x>y
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > y) {
+ break
+ }
+ v.reset(OpPPC64FlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64Equal(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Equal (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (Equal (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Equal (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Equal (InvertFlags x))
+ // result: (Equal x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64Equal)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Equal cmp)
+ // result: (SETBC [2] cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64SETBC)
+ v.AuxInt = int32ToAuxInt(2)
+ v.AddArg(cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64FABS(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FABS (FMOVDconst [x]))
+ // result: (FMOVDconst [math.Abs(x)])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Abs(x))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FADD (FMUL x y) z)
+ // cond: x.Block.Func.useFMA(v)
+ // result: (FMADD x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64FMUL {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ z := v_1
+ if !(x.Block.Func.useFMA(v)) {
+ continue
+ }
+ v.reset(OpPPC64FMADD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FADDS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FADDS (FMULS x y) z)
+ // cond: x.Block.Func.useFMA(v)
+ // result: (FMADDS x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64FMULS {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ z := v_1
+ if !(x.Block.Func.useFMA(v)) {
+ continue
+ }
+ v.reset(OpPPC64FMADDS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FCEIL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FCEIL (FMOVDconst [x]))
+ // result: (FMOVDconst [math.Ceil(x)])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Ceil(x))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FFLOOR(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FFLOOR (FMOVDconst [x]))
+ // result: (FMOVDconst [math.Floor(x)])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Floor(x))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FGreaterEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FGreaterEqual cmp)
+ // result: (OR (SETBC [2] cmp) (SETBC [1] cmp))
+ for {
+ cmp := v_0
+ v.reset(OpPPC64OR)
+ v0 := b.NewValue0(v.Pos, OpPPC64SETBC, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg(cmp)
+ v1 := b.NewValue0(v.Pos, OpPPC64SETBC, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg(cmp)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64FGreaterThan(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FGreaterThan cmp)
+ // result: (SETBC [1] cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64SETBC)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64FLessEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FLessEqual cmp)
+ // result: (OR (SETBC [2] cmp) (SETBC [0] cmp))
+ for {
+ cmp := v_0
+ v.reset(OpPPC64OR)
+ v0 := b.NewValue0(v.Pos, OpPPC64SETBC, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg(cmp)
+ v1 := b.NewValue0(v.Pos, OpPPC64SETBC, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg(cmp)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64FLessThan(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FLessThan cmp)
+ // result: (SETBC [0] cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64SETBC)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _))
+ // result: (MTVSRD x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpPPC64MTVSRD)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
+ // result: (FMOVDload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
+ break
+ }
+ v.reset(OpPPC64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDstore [off] {sym} ptr (MTVSRD x) mem)
+ // result: (MOVDstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MTVSRD {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
+ // result: (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
+ break
+ }
+ v.reset(OpPPC64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FMOVSload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64FMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
+ // result: (FMOVSload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
+ break
+ }
+ v.reset(OpPPC64FMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
+ // result: (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
+ break
+ }
+ v.reset(OpPPC64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FNEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FNEG (FABS x))
+ // result: (FNABS x)
+ for {
+ if v_0.Op != OpPPC64FABS {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64FNABS)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FNEG (FNABS x))
+ // result: (FABS x)
+ for {
+ if v_0.Op != OpPPC64FNABS {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64FABS)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FSQRT(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FSQRT (FMOVDconst [x]))
+ // cond: x >= 0
+ // result: (FMOVDconst [math.Sqrt(x)])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ if !(x >= 0) {
+ break
+ }
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Sqrt(x))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FSUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FSUB (FMUL x y) z)
+ // cond: x.Block.Func.useFMA(v)
+ // result: (FMSUB x y z)
+ for {
+ if v_0.Op != OpPPC64FMUL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ z := v_1
+ if !(x.Block.Func.useFMA(v)) {
+ continue
+ }
+ v.reset(OpPPC64FMSUB)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FSUBS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FSUBS (FMULS x y) z)
+ // cond: x.Block.Func.useFMA(v)
+ // result: (FMSUBS x y z)
+ for {
+ if v_0.Op != OpPPC64FMULS {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ z := v_1
+ if !(x.Block.Func.useFMA(v)) {
+ continue
+ }
+ v.reset(OpPPC64FMSUBS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FTRUNC(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FTRUNC (FMOVDconst [x]))
+ // result: (FMOVDconst [math.Trunc(x)])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Trunc(x))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64GreaterEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterEqual (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (GreaterEqual (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (GreaterEqual (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (GreaterEqual (InvertFlags x))
+ // result: (LessEqual x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64LessEqual)
+ v.AddArg(x)
+ return true
+ }
+ // match: (GreaterEqual cmp)
+ // result: (SETBCR [0] cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64SETBCR)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64GreaterThan(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterThan (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (GreaterThan (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (GreaterThan (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (GreaterThan (InvertFlags x))
+ // result: (LessThan x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64LessThan)
+ v.AddArg(x)
+ return true
+ }
+ // match: (GreaterThan cmp)
+ // result: (SETBC [1] cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64SETBC)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ISEL [6] x y (Select1 (ANDCCconst [1] (SETBC [c] cmp))))
+ // result: (ISEL [c] x y cmp)
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0.AuxInt) != 1 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpPPC64SETBC {
+ break
+ }
+ c := auxIntToInt32(v_2_0_0.AuxInt)
+ cmp := v_2_0_0.Args[0]
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (ISEL [6] x y (CMPconst [0] (SETBC [c] cmp)))
+ // result: (ISEL [c] x y cmp)
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64CMPconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64SETBC {
+ break
+ }
+ c := auxIntToInt32(v_2_0.AuxInt)
+ cmp := v_2_0.Args[0]
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (ISEL [6] x y (CMPWconst [0] (SETBC [c] cmp)))
+ // result: (ISEL [c] x y cmp)
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64SETBC {
+ break
+ }
+ c := auxIntToInt32(v_2_0.AuxInt)
+ cmp := v_2_0.Args[0]
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (ISEL [6] x y (CMPconst [0] (SETBCR [c] cmp)))
+ // result: (ISEL [c+4] x y cmp)
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64CMPconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64SETBCR {
+ break
+ }
+ c := auxIntToInt32(v_2_0.AuxInt)
+ cmp := v_2_0.Args[0]
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(c + 4)
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (ISEL [6] x y (CMPWconst [0] (SETBCR [c] cmp)))
+ // result: (ISEL [c+4] x y cmp)
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64SETBCR {
+ break
+ }
+ c := auxIntToInt32(v_2_0.AuxInt)
+ cmp := v_2_0.Args[0]
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(c + 4)
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (ISEL [2] x _ (FlagEQ))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [2] _ y (FlagLT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [2] _ y (FlagGT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [6] _ y (FlagEQ))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [6] x _ (FlagLT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [6] x _ (FlagGT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [0] _ y (FlagEQ))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [0] _ y (FlagGT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [0] x _ (FlagLT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [5] _ x (FlagEQ))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ x := v_1
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [5] _ x (FlagLT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ x := v_1
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [5] y _ (FlagGT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ y := v_0
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [1] _ y (FlagEQ))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [1] _ y (FlagLT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [1] x _ (FlagGT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [4] x _ (FlagEQ))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [4] x _ (FlagGT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [4] _ y (FlagLT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [2] x y (CMPconst [0] (Select0 (ANDCCconst [n] z))))
+ // result: (ISEL [2] x y (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64CMPconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpSelect0 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ n := auxIntToInt64(v_2_0_0.AuxInt)
+ z := v_2_0_0.Args[0]
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AuxInt = int64ToAuxInt(n)
+ v1.AddArg(z)
+ v0.AddArg(v1)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (ISEL [2] x y (CMPWconst [0] (Select0 (ANDCCconst [n] z))))
+ // result: (ISEL [2] x y (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpSelect0 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ n := auxIntToInt64(v_2_0_0.AuxInt)
+ z := v_2_0_0.Args[0]
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AuxInt = int64ToAuxInt(n)
+ v1.AddArg(z)
+ v0.AddArg(v1)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (ISEL [6] x y (CMPconst [0] (Select0 (ANDCCconst [n] z))))
+ // result: (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64CMPconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpSelect0 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ n := auxIntToInt64(v_2_0_0.AuxInt)
+ z := v_2_0_0.Args[0]
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AuxInt = int64ToAuxInt(n)
+ v1.AddArg(z)
+ v0.AddArg(v1)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (ISEL [6] x y (CMPWconst [0] (Select0 (ANDCCconst [n] z))))
+ // result: (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpSelect0 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ n := auxIntToInt64(v_2_0_0.AuxInt)
+ z := v_2_0_0.Args[0]
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AuxInt = int64ToAuxInt(n)
+ v1.AddArg(z)
+ v0.AddArg(v1)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (ISEL [n] x y (InvertFlags bool))
+ // cond: n%4 == 0
+ // result: (ISEL [n+1] x y bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_2.Args[0]
+ if !(n%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(n + 1)
+ v.AddArg3(x, y, bool)
+ return true
+ }
+ // match: (ISEL [n] x y (InvertFlags bool))
+ // cond: n%4 == 1
+ // result: (ISEL [n-1] x y bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_2.Args[0]
+ if !(n%4 == 1) {
+ break
+ }
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(n - 1)
+ v.AddArg3(x, y, bool)
+ return true
+ }
+ // match: (ISEL [n] x y (InvertFlags bool))
+ // cond: n%4 == 2
+ // result: (ISEL [n] x y bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_2.Args[0]
+ if !(n%4 == 2) {
+ break
+ }
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(n)
+ v.AddArg3(x, y, bool)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64LessEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessEqual (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (LessEqual (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (LessEqual (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (LessEqual (InvertFlags x))
+ // result: (GreaterEqual x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64GreaterEqual)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LessEqual cmp)
+ // result: (SETBCR [1] cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64SETBCR)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64LessThan(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessThan (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (LessThan (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (LessThan (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (LessThan (InvertFlags x))
+ // result: (GreaterThan x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64GreaterThan)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LessThan cmp)
+ // result: (SETBC [0] cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64SETBC)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64MFVSRD(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MFVSRD (FMOVDconst [c]))
+ // result: (MOVDconst [int64(math.Float64bits(c))])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(math.Float64bits(c)))
+ return true
+ }
+ // match: (MFVSRD x:(FMOVDload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVDload [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64FMOVDload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpPPC64MOVDload, typ.Int64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBZload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
+ // result: (MOVBZload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVBZload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVBZloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVBZloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBZloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBZloadidx ptr (MOVDconst [c]) mem)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVBZload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBZloadidx (MOVDconst [c]) ptr mem)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVBZload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBZreg y:(Select0 (ANDCCconst [c] _)))
+ // cond: uint64(c) <= 0xFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpSelect0 {
+ break
+ }
+ y_0 := y.Args[0]
+ if y_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ c := auxIntToInt64(y_0.AuxInt)
+ if !(uint64(c) <= 0xFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVBZreg (SRWconst [c] (MOVBZreg x)))
+ // result: (SRWconst [c] (MOVBZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVBZreg (SRWconst [c] x))
+ // cond: sizeof(x.Type) == 8
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) == 8) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (SRDconst [c] x))
+ // cond: c>=56
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 56) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (SRWconst [c] x))
+ // cond: c>=24
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 24) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg y:(MOVBZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVBZreg (MOVBreg x))
+ // result: (MOVBZreg x)
+ for {
+ if v_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (OR <t> x (MOVWZreg y)))
+ // result: (MOVBZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (XOR <t> x (MOVWZreg y)))
+ // result: (MOVBZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (AND <t> x (MOVWZreg y)))
+ // result: (MOVBZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (OR <t> x (MOVHZreg y)))
+ // result: (MOVBZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (XOR <t> x (MOVHZreg y)))
+ // result: (MOVBZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (AND <t> x (MOVHZreg y)))
+ // result: (MOVBZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (OR <t> x (MOVBZreg y)))
+ // result: (MOVBZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVBZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (XOR <t> x (MOVBZreg y)))
+ // result: (MOVBZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVBZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (AND <t> x (MOVBZreg y)))
+ // result: (MOVBZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVBZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x))))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpSelect0 {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ z_0_0 := z_0.Args[0]
+ if z_0_0.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVBZreg z:(AND y (MOVBZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ if z_1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ v.copyOf(z)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg x:(MOVBZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg x:(MOVBZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg x:(Select0 (LoweredAtomicLoad8 _ _)))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpSelect0 {
+ break
+ }
+ x_0 := x.Args[0]
+ if x_0.Op != OpPPC64LoweredAtomicLoad8 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg x:(Arg <t>))
+ // cond: is8BitInt(t) && !t.IsSigned()
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(is8BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint8(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBreg y:(Select0 (ANDCCconst [c] _)))
+ // cond: uint64(c) <= 0x7F
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpSelect0 {
+ break
+ }
+ y_0 := y.Args[0]
+ if y_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ c := auxIntToInt64(y_0.AuxInt)
+ if !(uint64(c) <= 0x7F) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVBreg (SRAWconst [c] (MOVBreg x)))
+ // result: (SRAWconst [c] (MOVBreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVBreg (SRAWconst [c] x))
+ // cond: sizeof(x.Type) == 8
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) == 8) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRDconst [c] x))
+ // cond: c>56
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > 56) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRDconst [c] x))
+ // cond: c==56
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c == 56) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRADconst [c] x))
+ // cond: c>=56
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRADconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 56) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRWconst [c] x))
+ // cond: c>24
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > 24) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRWconst [c] x))
+ // cond: c==24
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c == 24) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRAWconst [c] x))
+ // cond: c>=24
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 24) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg y:(MOVBreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVBreg (MOVBZreg x))
+ // result: (MOVBreg x)
+ for {
+ if v_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(Arg <t>))
+ // cond: is8BitInt(t) && t.IsSigned()
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(is8BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int8(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
+ // result: (MOVBstore [off1+int32(off2)] {sym} x val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [0] {sym} p:(ADD ptr idx) val mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVBstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHZreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWZreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVHreg x) [c]) mem)
+ // cond: c <= 8
+ // result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_1_0.Args[0]
+ mem := v_2
+ if !(c <= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVHZreg x) [c]) mem)
+ // cond: c <= 8
+ // result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_1_0.Args[0]
+ mem := v_2
+ if !(c <= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVWreg x) [c]) mem)
+ // cond: c <= 24
+ // result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1_0.Args[0]
+ mem := v_2
+ if !(c <= 24) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVWZreg x) [c]) mem)
+ // cond: c <= 24
+ // result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1_0.Args[0]
+ mem := v_2
+ if !(c <= 24) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVBstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstoreidx (MOVDconst [c]) ptr val mem)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVBstore [int32(c)] ptr val mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVBreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVBZreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVHreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVHZreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVWZreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (SRWconst (MOVHreg x) [c]) mem)
+ // cond: c <= 8
+ // result: (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_2_0.Args[0]
+ mem := v_3
+ if !(c <= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg4(ptr, idx, v0, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (SRWconst (MOVHZreg x) [c]) mem)
+ // cond: c <= 8
+ // result: (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_2_0.Args[0]
+ mem := v_3
+ if !(c <= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg4(ptr, idx, v0, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (SRWconst (MOVWreg x) [c]) mem)
+ // cond: c <= 24
+ // result: (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_2_0.Args[0]
+ mem := v_3
+ if !(c <= 24) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg4(ptr, idx, v0, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (SRWconst (MOVWZreg x) [c]) mem)
+ // cond: c <= 24
+ // result: (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_2_0.Args[0]
+ mem := v_3
+ if !(c <= 24) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg4(ptr, idx, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2)))
+ // result: (MOVBstorezero [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1) + off2))) {
+ break
+ }
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ x := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDaddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVDaddr {sym} [n] p:(ADD x y))
+ // cond: sym == nil && n == 0
+ // result: p
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ if !(sym == nil && n == 0) {
+ break
+ }
+ v.copyOf(p)
+ return true
+ }
+ // match: (MOVDaddr {sym} [n] ptr)
+ // cond: sym == nil && n == 0 && (ptr.Op == OpArgIntReg || ptr.Op == OpPhi)
+ // result: ptr
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if !(sym == nil && n == 0 && (ptr.Op == OpArgIntReg || ptr.Op == OpPhi)) {
+ break
+ }
+ v.copyOf(ptr)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _))
+ // result: (MFVSRD x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64FMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpPPC64MFVSRD)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
+ // result: (MOVDload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVDload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVDloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVDloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDloadidx ptr (MOVDconst [c]) mem)
+ // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVDload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDloadidx (MOVDconst [c]) ptr mem)
+ // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVDload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVDstore [off] {sym} ptr (MFVSRD x) mem)
+ // result: (FMOVDstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MFVSRD {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
+ // result: (MOVDstore [off1+int32(off2)] {sym} x val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVDstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstore [0] {sym} p:(ADD ptr idx) val mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVDstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVDstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr r:(BRD val) mem)
+ // cond: r.Uses == 1
+ // result: (MOVDBRstore (MOVDaddr <ptr.Type> [off] {sym} ptr) val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ r := v_1
+ if r.Op != OpPPC64BRD {
+ break
+ }
+ val := r.Args[0]
+ mem := v_2
+ if !(r.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVDBRstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, ptr.Type)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg(ptr)
+ v.AddArg3(v0, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (Bswap64 val) mem)
+ // result: (MOVDBRstore (MOVDaddr <ptr.Type> [off] {sym} ptr) val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpBswap64 {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVDBRstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, ptr.Type)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg(ptr)
+ v.AddArg3(v0, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVDstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx (MOVDconst [c]) ptr val mem)
+ // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVDstore [int32(c)] ptr val mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx ptr idx r:(BRD val) mem)
+ // cond: r.Uses == 1
+ // result: (MOVDBRstoreidx ptr idx val mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ r := v_2
+ if r.Op != OpPPC64BRD {
+ break
+ }
+ val := r.Args[0]
+ mem := v_3
+ if !(r.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVDBRstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx ptr idx (Bswap64 val) mem)
+ // result: (MOVDBRstoreidx ptr idx val mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpBswap64 {
+ break
+ }
+ val := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVDBRstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2)))
+ // result: (MOVDstorezero [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1) + off2))) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ x := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHBRstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHBRstore ptr (MOVHreg x) mem)
+ // result: (MOVHBRstore ptr x mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHBRstore)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHBRstore ptr (MOVHZreg x) mem)
+ // result: (MOVHBRstore ptr x mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHBRstore)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHBRstore ptr (MOVWreg x) mem)
+ // result: (MOVHBRstore ptr x mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHBRstore)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHBRstore ptr (MOVWZreg x) mem)
+ // result: (MOVHBRstore ptr x mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHBRstore)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHZload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
+ // result: (MOVHZload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVHZload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVHZloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVHZloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHZloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHZloadidx ptr (MOVDconst [c]) mem)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVHZload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHZloadidx (MOVDconst [c]) ptr mem)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVHZload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVHZreg y:(Select0 (ANDCCconst [c] _)))
+ // cond: uint64(c) <= 0xFFFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpSelect0 {
+ break
+ }
+ y_0 := y.Args[0]
+ if y_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ c := auxIntToInt64(y_0.AuxInt)
+ if !(uint64(c) <= 0xFFFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHZreg (SRWconst [c] (MOVBZreg x)))
+ // result: (SRWconst [c] (MOVBZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVHZreg (SRWconst [c] (MOVHZreg x)))
+ // result: (SRWconst [c] (MOVHZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVHZreg (SRWconst [c] x))
+ // cond: sizeof(x.Type) <= 16
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) <= 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg (SRDconst [c] x))
+ // cond: c>=48
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 48) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg (SRWconst [c] x))
+ // cond: c>=16
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg y:(MOVHZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHZreg y:(MOVBZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHZreg y:(MOVHBRload _ _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHBRload {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHZreg y:(MOVHreg x))
+ // result: (MOVHZreg x)
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := y.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg (OR <t> x (MOVWZreg y)))
+ // result: (MOVHZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg (XOR <t> x (MOVWZreg y)))
+ // result: (MOVHZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg (AND <t> x (MOVWZreg y)))
+ // result: (MOVHZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg (OR <t> x (MOVHZreg y)))
+ // result: (MOVHZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg (XOR <t> x (MOVHZreg y)))
+ // result: (MOVHZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg (AND <t> x (MOVHZreg y)))
+ // result: (MOVHZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x))))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpSelect0 {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ z_0_0 := z_0.Args[0]
+ if z_0_0.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVHZreg z:(AND y (MOVHZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ if z_1.Op != OpPPC64MOVHZload {
+ continue
+ }
+ v.copyOf(z)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x))))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpSelect0 {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ z_0_0 := z_0.Args[0]
+ if z_0_0.Op != OpPPC64MOVHZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVBZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVBZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVHZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVHZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg x:(Arg <t>))
+ // cond: (is8BitInt(t) || is16BitInt(t)) && !t.IsSigned()
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !((is8BitInt(t) || is16BitInt(t)) && !t.IsSigned()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
+ // result: (MOVHload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVHload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVHloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVHloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHloadidx ptr (MOVDconst [c]) mem)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVHload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHloadidx (MOVDconst [c]) ptr mem)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVHload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVHreg y:(Select0 (ANDCCconst [c] _)))
+ // cond: uint64(c) <= 0x7FFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpSelect0 {
+ break
+ }
+ y_0 := y.Args[0]
+ if y_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ c := auxIntToInt64(y_0.AuxInt)
+ if !(uint64(c) <= 0x7FFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHreg (SRAWconst [c] (MOVBreg x)))
+ // result: (SRAWconst [c] (MOVBreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVHreg (SRAWconst [c] (MOVHreg x)))
+ // result: (SRAWconst [c] (MOVHreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVHreg (SRAWconst [c] x))
+ // cond: sizeof(x.Type) <= 16
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) <= 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRDconst [c] x))
+ // cond: c>48
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > 48) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRDconst [c] x))
+ // cond: c==48
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c == 48) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRADconst [c] x))
+ // cond: c>=48
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRADconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 48) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRWconst [c] x))
+ // cond: c>16
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRAWconst [c] x))
+ // cond: c>=16
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRWconst [c] x))
+ // cond: c==16
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c == 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg y:(MOVHreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHreg y:(MOVBreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHreg y:(MOVHZreg x))
+ // result: (MOVHreg x)
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := y.Args[0]
+ v.reset(OpPPC64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg x:(Arg <t>))
+ // cond: (is8BitInt(t) || is16BitInt(t)) && t.IsSigned()
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !((is8BitInt(t) || is16BitInt(t)) && t.IsSigned()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int16(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
+ // result: (MOVHstore [off1+int32(off2)] {sym} x val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [0] {sym} p:(ADD ptr idx) val mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVHstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWZreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr r:(BRH val) mem)
+ // cond: r.Uses == 1
+ // result: (MOVHBRstore (MOVDaddr <ptr.Type> [off] {sym} ptr) val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ r := v_1
+ if r.Op != OpPPC64BRH {
+ break
+ }
+ val := r.Args[0]
+ mem := v_2
+ if !(r.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVHBRstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, ptr.Type)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg(ptr)
+ v.AddArg3(v0, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (Bswap16 val) mem)
+ // result: (MOVHBRstore (MOVDaddr <ptr.Type> [off] {sym} ptr) val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpBswap16 {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHBRstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, ptr.Type)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg(ptr)
+ v.AddArg3(v0, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVHstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx (MOVDconst [c]) ptr val mem)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVHstore [int32(c)] ptr val mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVHreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVHZreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVWZreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx r:(BRH val) mem)
+ // cond: r.Uses == 1
+ // result: (MOVHBRstoreidx ptr idx val mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ r := v_2
+ if r.Op != OpPPC64BRH {
+ break
+ }
+ val := r.Args[0]
+ mem := v_3
+ if !(r.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVHBRstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (Bswap16 val) mem)
+ // result: (MOVHBRstoreidx ptr idx val mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpBswap16 {
+ break
+ }
+ val := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVHBRstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2)))
+ // result: (MOVHstorezero [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1) + off2))) {
+ break
+ }
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ x := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWBRstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWBRstore ptr (MOVWreg x) mem)
+ // result: (MOVWBRstore ptr x mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVWBRstore)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWBRstore ptr (MOVWZreg x) mem)
+ // result: (MOVWBRstore ptr x mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVWBRstore)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWZload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
+ // result: (MOVWZload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVWZload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVWZloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVWZloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWZloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWZloadidx ptr (MOVDconst [c]) mem)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVWZload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWZloadidx (MOVDconst [c]) ptr mem)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVWZload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVWZreg y:(Select0 (ANDCCconst [c] _)))
+ // cond: uint64(c) <= 0xFFFFFFFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpSelect0 {
+ break
+ }
+ y_0 := y.Args[0]
+ if y_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ c := auxIntToInt64(y_0.AuxInt)
+ if !(uint64(c) <= 0xFFFFFFFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(AND (MOVDconst [c]) _))
+ // cond: uint64(c) <= 0xFFFFFFFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64AND {
+ break
+ }
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ if y_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(y_0.AuxInt)
+ if !(uint64(c) <= 0xFFFFFFFF) {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (MOVWZreg (SRWconst [c] (MOVBZreg x)))
+ // result: (SRWconst [c] (MOVBZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWZreg (SRWconst [c] (MOVHZreg x)))
+ // result: (SRWconst [c] (MOVHZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWZreg (SRWconst [c] (MOVWZreg x)))
+ // result: (SRWconst [c] (MOVWZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWZreg (SRWconst [c] x))
+ // cond: sizeof(x.Type) <= 32
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) <= 32) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg (SRDconst [c] x))
+ // cond: c>=32
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 32) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVWZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVWZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVHZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVBZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVHBRload _ _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHBRload {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVWBRload _ _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVWBRload {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVWreg x))
+ // result: (MOVWZreg x)
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := y.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg (OR <t> x (MOVWZreg y)))
+ // result: (MOVWZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVWZreg (XOR <t> x (MOVWZreg y)))
+ // result: (MOVWZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVWZreg (AND <t> x (MOVWZreg y)))
+ // result: (MOVWZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x))))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpSelect0 {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ z_0_0 := z_0.Args[0]
+ if z_0_0.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVWZreg z:(AND y (MOVWZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ if z_1.Op != OpPPC64MOVWZload {
+ continue
+ }
+ v.copyOf(z)
+ return true
+ }
+ break
+ }
+ // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x))))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpSelect0 {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ z_0_0 := z_0.Args[0]
+ if z_0_0.Op != OpPPC64MOVHZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVWZload ptr x))))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpSelect0 {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ z_0_0 := z_0.Args[0]
+ if z_0_0.Op != OpPPC64MOVWZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVBZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVBZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVHZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVHZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVWZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVWZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVWZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVWZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(Select0 (LoweredAtomicLoad32 _ _)))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpSelect0 {
+ break
+ }
+ x_0 := x.Args[0]
+ if x_0.Op != OpPPC64LoweredAtomicLoad32 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(Arg <t>))
+ // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !t.IsSigned()
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !t.IsSigned()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
+ // result: (MOVWload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVWloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVWloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadidx ptr (MOVDconst [c]) mem)
+ // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVWload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx (MOVDconst [c]) ptr mem)
+ // cond: ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVWload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVWreg y:(Select0 (ANDCCconst [c] _)))
+ // cond: uint64(c) <= 0xFFFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpSelect0 {
+ break
+ }
+ y_0 := y.Args[0]
+ if y_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ c := auxIntToInt64(y_0.AuxInt)
+ if !(uint64(c) <= 0xFFFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWreg y:(AND (MOVDconst [c]) _))
+ // cond: uint64(c) <= 0x7FFFFFFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64AND {
+ break
+ }
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ if y_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(y_0.AuxInt)
+ if !(uint64(c) <= 0x7FFFFFFF) {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (MOVWreg (SRAWconst [c] (MOVBreg x)))
+ // result: (SRAWconst [c] (MOVBreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWreg (SRAWconst [c] (MOVHreg x)))
+ // result: (SRAWconst [c] (MOVHreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWreg (SRAWconst [c] (MOVWreg x)))
+ // result: (SRAWconst [c] (MOVWreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWreg (SRAWconst [c] x))
+ // cond: sizeof(x.Type) <= 32
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) <= 32) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (SRDconst [c] x))
+ // cond: c>32
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > 32) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (SRADconst [c] x))
+ // cond: c>=32
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRADconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 32) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (SRDconst [c] x))
+ // cond: c==32
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c == 32) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg y:(MOVWreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVWreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWreg y:(MOVHreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWreg y:(MOVBreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWreg y:(MOVWZreg x))
+ // result: (MOVWreg x)
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := y.Args[0]
+ v.reset(OpPPC64MOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVWload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVWloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(Arg <t>))
+ // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && t.IsSigned()
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && t.IsSigned()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int32(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2)))
+ // result: (MOVWstore [off1+int32(off2)] {sym} x val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} p:(ADD ptr idx) val mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVWstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVWstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr r:(BRW val) mem)
+ // cond: r.Uses == 1
+ // result: (MOVWBRstore (MOVDaddr <ptr.Type> [off] {sym} ptr) val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ r := v_1
+ if r.Op != OpPPC64BRW {
+ break
+ }
+ val := r.Args[0]
+ mem := v_2
+ if !(r.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVWBRstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, ptr.Type)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg(ptr)
+ v.AddArg3(v0, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (Bswap32 val) mem)
+ // result: (MOVWBRstore (MOVDaddr <ptr.Type> [off] {sym} ptr) val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpBswap32 {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVWBRstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDaddr, ptr.Type)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg(ptr)
+ v.AddArg3(v0, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVWstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (MOVDconst [c]) ptr val mem)
+ // cond: (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c)))
+ // result: (MOVWstore [int32(c)] ptr val mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVWstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVWstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx (MOVWZreg x) mem)
+ // result: (MOVWstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVWstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx r:(BRW val) mem)
+ // cond: r.Uses == 1
+ // result: (MOVWBRstoreidx ptr idx val mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ r := v_2
+ if r.Op != OpPPC64BRW {
+ break
+ }
+ val := r.Args[0]
+ mem := v_3
+ if !(r.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVWBRstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx (Bswap32 val) mem)
+ // result: (MOVWBRstoreidx ptr idx val mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpBswap32 {
+ break
+ }
+ val := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVWBRstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2)))
+ // result: (MOVWstorezero [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1) + off2))) {
+ break
+ }
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
+ // cond: canMergeSym(sym1,sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))
+ // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ x := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2))))) {
+ break
+ }
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MTVSRD(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MTVSRD (MOVDconst [c]))
+ // cond: !math.IsNaN(math.Float64frombits(uint64(c)))
+ // result: (FMOVDconst [math.Float64frombits(uint64(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(!math.IsNaN(math.Float64frombits(uint64(c)))) {
+ break
+ }
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Float64frombits(uint64(c)))
+ return true
+ }
+ // match: (MTVSRD x:(MOVDload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (FMOVDload [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVDload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpPPC64FMOVDload, typ.Float64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MULLD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLD x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (MULLDconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64MULLDconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MULLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLW x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (MULLWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64MULLWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64NEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEG (ADDconst [c] x))
+ // cond: is32Bit(-c)
+ // result: (SUBFCconst [-c] x)
+ for {
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-c)) {
+ break
+ }
+ v.reset(OpPPC64SUBFCconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (NEG (SUBFCconst [c] x))
+ // cond: is32Bit(-c)
+ // result: (ADDconst [-c] x)
+ for {
+ if v_0.Op != OpPPC64SUBFCconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-c)) {
+ break
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (NEG (SUB x y))
+ // result: (SUB y x)
+ for {
+ if v_0.Op != OpPPC64SUB {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpPPC64SUB)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (NEG (NEG x))
+ // result: x
+ for {
+ if v_0.Op != OpPPC64NEG {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64NOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NOR (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [^(c|d)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^(c | d))
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64NotEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NotEqual (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (NotEqual (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (NotEqual (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (NotEqual (InvertFlags x))
+ // result: (NotEqual x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64NotEqual)
+ v.AddArg(x)
+ return true
+ }
+ // match: (NotEqual cmp)
+ // result: (SETBCR [2] cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64SETBCR)
+ v.AuxInt = int32ToAuxInt(2)
+ v.AddArg(cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64OR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (OR x (NOR y y))
+ // result: (ORN x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64NOR {
+ continue
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ORN)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (OR x (MOVDconst [c]))
+ // cond: isU32Bit(c)
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU32Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ORN(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORN x (MOVDconst [-1]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORN (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c|^d])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c | ^d)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [c] (ORconst [d] x))
+ // result: (ORconst [c|d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpPPC64ORconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // result: (MOVDconst [-1])
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ROTL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROTL x (MOVDconst [c]))
+ // result: (ROTLconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64ROTLconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ROTLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROTLW x (MOVDconst [c]))
+ // result: (ROTLWconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64ROTLWconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ROTLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROTLWconst [r] (AND (MOVDconst [m]) x))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+ for {
+ r := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isPPC64WordRotateMask(m)) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, rotateLeft32(m, r), 32))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ROTLWconst [r] (Select0 (ANDCCconst [m] x)))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+ for {
+ r := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ m := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, rotateLeft32(m, r), 32))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SETBC(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETBC [0] (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SETBC [0] (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SETBC [0] (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SETBC [1] (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SETBC [1] (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SETBC [1] (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SETBC [2] (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SETBC [2] (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SETBC [2] (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SETBC [0] (InvertFlags bool))
+ // result: (SETBC [1] bool)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_0.Args[0]
+ v.reset(OpPPC64SETBC)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(bool)
+ return true
+ }
+ // match: (SETBC [1] (InvertFlags bool))
+ // result: (SETBC [0] bool)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_0.Args[0]
+ v.reset(OpPPC64SETBC)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(bool)
+ return true
+ }
+ // match: (SETBC [2] (InvertFlags bool))
+ // result: (SETBC [2] bool)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_0.Args[0]
+ v.reset(OpPPC64SETBC)
+ v.AuxInt = int32ToAuxInt(2)
+ v.AddArg(bool)
+ return true
+ }
+ // match: (SETBC [n] (InvertFlags bool))
+ // result: (SETBCR [n] bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_0.Args[0]
+ v.reset(OpPPC64SETBCR)
+ v.AuxInt = int32ToAuxInt(n)
+ v.AddArg(bool)
+ return true
+ }
+ // match: (SETBC [2] (CMPconst [0] (Select0 (ANDCCconst [1] z))))
+ // result: (XORconst [1] (Select0 <typ.UInt64> (ANDCCconst [1] z )))
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ break
+ }
+ z := v_0_0_0.Args[0]
+ v.reset(OpPPC64XORconst)
+ v.AuxInt = int64ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AuxInt = int64ToAuxInt(1)
+ v1.AddArg(z)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETBC [2] (CMPWconst [0] (Select0 (ANDCCconst [1] z))))
+ // result: (XORconst [1] (Select0 <typ.UInt64> (ANDCCconst [1] z )))
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ break
+ }
+ z := v_0_0_0.Args[0]
+ v.reset(OpPPC64XORconst)
+ v.AuxInt = int64ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AuxInt = int64ToAuxInt(1)
+ v1.AddArg(z)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETBC [2] (CMPWconst [0] (Select0 (ANDCCconst [n] z))))
+ // result: (SETBC [2] (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ n := auxIntToInt64(v_0_0_0.AuxInt)
+ z := v_0_0_0.Args[0]
+ v.reset(OpPPC64SETBC)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AuxInt = int64ToAuxInt(n)
+ v1.AddArg(z)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETBC [2] (CMPconst [0] a:(AND y z)))
+ // cond: a.Uses == 1
+ // result: (SETBC [2] (Select1 <types.TypeFlags> (ANDCC y z )))
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ a := v_0.Args[0]
+ if a.Op != OpPPC64AND {
+ break
+ }
+ z := a.Args[1]
+ y := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64SETBC)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags))
+ v1.AddArg2(y, z)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETBC [2] (CMPconst [0] o:(OR y z)))
+ // cond: o.Uses == 1
+ // result: (SETBC [2] (Select1 <types.TypeFlags> (ORCC y z )))
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ o := v_0.Args[0]
+ if o.Op != OpPPC64OR {
+ break
+ }
+ z := o.Args[1]
+ y := o.Args[0]
+ if !(o.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64SETBC)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AddArg2(y, z)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETBC [2] (CMPconst [0] a:(XOR y z)))
+ // cond: a.Uses == 1
+ // result: (SETBC [2] (Select1 <types.TypeFlags> (XORCC y z )))
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ a := v_0.Args[0]
+ if a.Op != OpPPC64XOR {
+ break
+ }
+ z := a.Args[1]
+ y := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64SETBC)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AddArg2(y, z)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SETBCR(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETBCR [0] (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SETBCR [0] (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SETBCR [0] (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SETBCR [1] (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SETBCR [1] (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SETBCR [1] (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SETBCR [2] (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SETBCR [2] (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SETBCR [2] (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SETBCR [0] (InvertFlags bool))
+ // result: (SETBCR [1] bool)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_0.Args[0]
+ v.reset(OpPPC64SETBCR)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(bool)
+ return true
+ }
+ // match: (SETBCR [1] (InvertFlags bool))
+ // result: (SETBCR [0] bool)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_0.Args[0]
+ v.reset(OpPPC64SETBCR)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(bool)
+ return true
+ }
+ // match: (SETBCR [2] (InvertFlags bool))
+ // result: (SETBCR [2] bool)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_0.Args[0]
+ v.reset(OpPPC64SETBCR)
+ v.AuxInt = int32ToAuxInt(2)
+ v.AddArg(bool)
+ return true
+ }
+ // match: (SETBCR [n] (InvertFlags bool))
+ // result: (SETBC [n] bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_0.Args[0]
+ v.reset(OpPPC64SETBC)
+ v.AuxInt = int32ToAuxInt(n)
+ v.AddArg(bool)
+ return true
+ }
+ // match: (SETBCR [2] (CMPconst [0] (Select0 (ANDCCconst [1] z))))
+ // result: (Select0 <typ.UInt64> (ANDCCconst [1] z ))
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ break
+ }
+ z := v_0_0_0.Args[0]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg(z)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETBCR [2] (CMPWconst [0] (Select0 (ANDCCconst [1] z))))
+ // result: (Select0 <typ.UInt64> (ANDCCconst [1] z ))
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ break
+ }
+ z := v_0_0_0.Args[0]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg(z)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETBCR [2] (CMPWconst [0] (Select0 (ANDCCconst [n] z))))
+ // result: (SETBCR [2] (Select1 <types.TypeFlags> (ANDCCconst [n] z )))
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ n := auxIntToInt64(v_0_0_0.AuxInt)
+ z := v_0_0_0.Args[0]
+ v.reset(OpPPC64SETBCR)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AuxInt = int64ToAuxInt(n)
+ v1.AddArg(z)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETBCR [2] (CMPconst [0] a:(AND y z)))
+ // cond: a.Uses == 1
+ // result: (SETBCR [2] (Select1 <types.TypeFlags> (ANDCC y z )))
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ a := v_0.Args[0]
+ if a.Op != OpPPC64AND {
+ break
+ }
+ z := a.Args[1]
+ y := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64SETBCR)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags))
+ v1.AddArg2(y, z)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETBCR [2] (CMPconst [0] o:(OR y z)))
+ // cond: o.Uses == 1
+ // result: (SETBCR [2] (Select1 <types.TypeFlags> (ORCC y z )))
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ o := v_0.Args[0]
+ if o.Op != OpPPC64OR {
+ break
+ }
+ z := o.Args[1]
+ y := o.Args[0]
+ if !(o.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64SETBCR)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AddArg2(y, z)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETBCR [2] (CMPconst [0] a:(XOR y z)))
+ // cond: a.Uses == 1
+ // result: (SETBCR [2] (Select1 <types.TypeFlags> (XORCC y z )))
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_0.Op != OpPPC64CMPconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ a := v_0.Args[0]
+ if a.Op != OpPPC64XOR {
+ break
+ }
+ z := a.Args[1]
+ y := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64SETBCR)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AddArg2(y, z)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SLD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLD x (MOVDconst [c]))
+ // result: (SLDconst [c&63 | (c>>6&1*63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SLDconst)
+ v.AuxInt = int64ToAuxInt(c&63 | (c >> 6 & 1 * 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLDconst [l] (SRWconst [r] x))
+ // cond: mergePPC64SldiSrw(l,r) != 0
+ // result: (RLWINM [mergePPC64SldiSrw(l,r)] x)
+ for {
+ l := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64SldiSrw(l, r) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64SldiSrw(l, r))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst [c] z:(MOVBZreg x))
+ // cond: c < 8 && z.Uses == 1
+ // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := z.Args[0]
+ if !(c < 8 && z.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLDI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 56, 63, 64))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst [c] z:(MOVHZreg x))
+ // cond: c < 16 && z.Uses == 1
+ // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := z.Args[0]
+ if !(c < 16 && z.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLDI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 48, 63, 64))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst [c] z:(MOVWZreg x))
+ // cond: c < 32 && z.Uses == 1
+ // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := z.Args[0]
+ if !(c < 32 && z.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLDI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 32, 63, 64))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst [c] z:(Select0 (ANDCCconst [d] x)))
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))
+ // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpSelect0 {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ d := auxIntToInt64(z_0.AuxInt)
+ x := z_0.Args[0]
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLDI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 64-getPPC64ShiftMaskLength(d), 63, 64))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst [c] z:(AND (MOVDconst [d]) x))
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d))
+ // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ if z_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(z_0.AuxInt)
+ x := z_1
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))) {
+ continue
+ }
+ v.reset(OpPPC64CLRLSLDI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 64-getPPC64ShiftMaskLength(d), 63, 64))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (SLDconst [c] z:(MOVWreg x))
+ // cond: c < 32 && buildcfg.GOPPC64 >= 9
+ // result: (EXTSWSLconst [c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := z.Args[0]
+ if !(c < 32 && buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64EXTSWSLconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLW x (MOVDconst [c]))
+ // result: (SLWconst [c&31 | (c>>5&1*31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c&31 | (c >> 5 & 1 * 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLWconst [c] z:(MOVBZreg x))
+ // cond: z.Uses == 1 && c < 8
+ // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := z.Args[0]
+ if !(z.Uses == 1 && c < 8) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLWI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 24, 31, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLWconst [c] z:(MOVHZreg x))
+ // cond: z.Uses == 1 && c < 16
+ // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := z.Args[0]
+ if !(z.Uses == 1 && c < 16) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLWI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 16, 31, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLWconst [c] z:(Select0 (ANDCCconst [d] x)))
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d))
+ // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpSelect0 {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ d := auxIntToInt64(z_0.AuxInt)
+ x := z_0.Args[0]
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (32-getPPC64ShiftMaskLength(d))) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLWI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 32-getPPC64ShiftMaskLength(d), 31, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLWconst [c] z:(AND (MOVDconst [d]) x))
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d))
+ // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ if z_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(z_0.AuxInt)
+ x := z_1
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (32-getPPC64ShiftMaskLength(d))) {
+ continue
+ }
+ v.reset(OpPPC64CLRLSLWI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 32-getPPC64ShiftMaskLength(d), 31, 32))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (SLWconst [c] z:(MOVWreg x))
+ // cond: c < 32 && buildcfg.GOPPC64 >= 9
+ // result: (EXTSWSLconst [c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := z.Args[0]
+ if !(c < 32 && buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64EXTSWSLconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SRAD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRAD x (MOVDconst [c]))
+ // result: (SRADconst [c&63 | (c>>6&1*63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c&63 | (c >> 6 & 1 * 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SRAW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRAW x (MOVDconst [c]))
+ // result: (SRAWconst [c&31 | (c>>5&1*31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c&31 | (c >> 5 & 1 * 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SRD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRD x (MOVDconst [c]))
+ // result: (SRDconst [c&63 | (c>>6&1*63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c&63 | (c >> 6 & 1 * 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SRW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRW x (MOVDconst [c]))
+ // result: (SRWconst [c&31 | (c>>5&1*31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c&31 | (c >> 5 & 1 * 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SRWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRWconst (Select0 (ANDCCconst [m] x)) [s])
+ // cond: mergePPC64RShiftMask(m>>uint(s),s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ m := auxIntToInt64(v_0_0.AuxInt)
+ if !(mergePPC64RShiftMask(m>>uint(s), s, 32) == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRWconst (Select0 (ANDCCconst [m] x)) [s])
+ // cond: mergePPC64AndSrwi(m>>uint(s),s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ m := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if !(mergePPC64AndSrwi(m>>uint(s), s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m>>uint(s), s))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRWconst (AND (MOVDconst [m]) x) [s])
+ // cond: mergePPC64RShiftMask(m>>uint(s),s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0_0.AuxInt)
+ if !(mergePPC64RShiftMask(m>>uint(s), s, 32) == 0) {
+ continue
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (SRWconst (AND (MOVDconst [m]) x) [s])
+ // cond: mergePPC64AndSrwi(m>>uint(s),s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(mergePPC64AndSrwi(m>>uint(s), s) != 0) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m>>uint(s), s))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUB x (MOVDconst [c]))
+ // cond: is32Bit(-c)
+ // result: (ADDconst [-c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(-c)) {
+ break
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (SUBFCconst [c] x)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64SUBFCconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SUBE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SUBE x y (Select1 <typ.UInt64> (SUBCconst (MOVDconst [0]) [0])))
+ // result: (SUBC x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 || v_2.Type != typ.UInt64 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64SUBCconst || auxIntToInt64(v_2_0.AuxInt) != 0 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpPPC64SUBC)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SUBFCconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBFCconst [c] (NEG x))
+ // result: (ADDconst [c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64NEG {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBFCconst [c] (SUBFCconst [d] x))
+ // cond: is32Bit(c-d)
+ // result: (ADDconst [c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SUBFCconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c - d)) {
+ break
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBFCconst [0] x)
+ // result: (NEG x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(OpPPC64NEG)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64XOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (MOVDconst [c]))
+ // cond: isU32Bit(c)
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU32Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64XORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [c] (XORconst [d] x))
+ // result: (XORconst [c^d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64XORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpPPC64XORconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [1] (SETBCR [n] cmp))
+ // result: (SETBC [n] cmp)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpPPC64SETBCR {
+ break
+ }
+ n := auxIntToInt32(v_0.AuxInt)
+ cmp := v_0.Args[0]
+ v.reset(OpPPC64SETBC)
+ v.AuxInt = int32ToAuxInt(n)
+ v.AddArg(cmp)
+ return true
+ }
+ // match: (XORconst [1] (SETBC [n] cmp))
+ // result: (SETBCR [n] cmp)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpPPC64SETBC {
+ break
+ }
+ n := auxIntToInt32(v_0.AuxInt)
+ cmp := v_0.Args[0]
+ v.reset(OpPPC64SETBCR)
+ v.AuxInt = int32ToAuxInt(n)
+ v.AddArg(cmp)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpPPC64LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpPPC64LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpPPC64LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPopCount16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount16 x)
+ // result: (POPCNTW (MOVHZreg x))
+ for {
+ x := v_0
+ v.reset(OpPPC64POPCNTW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPopCount32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount32 x)
+ // result: (POPCNTW (MOVWZreg x))
+ for {
+ x := v_0
+ v.reset(OpPPC64POPCNTW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPopCount8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount8 x)
+ // result: (POPCNTB (MOVBZreg x))
+ for {
+ x := v_0
+ v.reset(OpPPC64POPCNTB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPrefetchCache(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PrefetchCache ptr mem)
+ // result: (DCBT ptr mem [0])
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64DCBT)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPrefetchCacheStreamed(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PrefetchCacheStreamed ptr mem)
+ // result: (DCBT ptr mem [16])
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64DCBT)
+ v.AuxInt = int64ToAuxInt(16)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVDconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVDconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux16 <t> x y)
+ // result: (ISEL [2] (SRD <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v4.AuxInt = int64ToAuxInt(0xFFF0)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux32 <t> x y)
+ // result: (ISEL [0] (SRD <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst y [16]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(16)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 x (MOVDconst [c]))
+ // cond: uint64(c) < 16
+ // result: (SRWconst (ZeroExt16to32 x) [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux64 <t> x y)
+ // result: (ISEL [0] (SRD <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPUconst y [16]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(16)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux8 <t> x y)
+ // result: (ISEL [2] (SRD <t> (MOVHZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v4.AuxInt = int64ToAuxInt(0x00F0)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x16 <t> x y)
+ // result: (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
+ v2.AuxInt = int64ToAuxInt(15)
+ v2.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v4.AuxInt = int64ToAuxInt(0xFFF0)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x32 <t> x y)
+ // result: (ISEL [0] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (CMPWUconst y [16]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
+ v2.AuxInt = int64ToAuxInt(15)
+ v2.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(16)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x (MOVDconst [c]))
+ // cond: uint64(c) >= 16
+ // result: (SRAWconst (SignExt16to32 x) [63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 16
+ // result: (SRAWconst (SignExt16to32 x) [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x64 <t> x y)
+ // result: (ISEL [0] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (CMPUconst y [16]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
+ v2.AuxInt = int64ToAuxInt(15)
+ v2.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(16)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x8 <t> x y)
+ // result: (ISEL [2] (SRAD <t> (MOVHreg x) y) (SRADconst <t> (MOVHreg x) [15]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
+ v2.AuxInt = int64ToAuxInt(15)
+ v2.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v4.AuxInt = int64ToAuxInt(0x00F0)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux16 <t> x y)
+ // result: (ISEL [2] (SRW <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3.AuxInt = int64ToAuxInt(0xFFE0)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux32 <t> x y)
+ // result: (ISEL [0] (SRW <t> x y) (MOVDconst [0]) (CMPWUconst y [32]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 x (MOVDconst [c]))
+ // cond: uint64(c) < 32
+ // result: (SRWconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux64 <t> x y)
+ // result: (ISEL [0] (SRW <t> x y) (MOVDconst [0]) (CMPUconst y [32]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux8 <t> x y)
+ // result: (ISEL [2] (SRW <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3.AuxInt = int64ToAuxInt(0x00E0)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x16 <t> x y)
+ // result: (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFE0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRAW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64SRAWconst, t)
+ v1.AuxInt = int64ToAuxInt(31)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3.AuxInt = int64ToAuxInt(0xFFE0)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x32 <t> x y)
+ // result: (ISEL [0] (SRAW <t> x y) (SRAWconst <t> x [31]) (CMPWUconst y [32]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRAW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64SRAWconst, t)
+ v1.AuxInt = int64ToAuxInt(31)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x64 x (MOVDconst [c]))
+ // cond: uint64(c) >= 32
+ // result: (SRAWconst x [63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 32
+ // result: (SRAWconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x64 <t> x y)
+ // result: (ISEL [0] (SRAW <t> x y) (SRAWconst <t> x [31]) (CMPUconst y [32]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRAW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64SRAWconst, t)
+ v1.AuxInt = int64ToAuxInt(31)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x8 <t> x y)
+ // result: (ISEL [2] (SRAW <t> x y) (SRAWconst <t> x [31]) (Select1 <types.TypeFlags> (ANDCCconst [0x00E0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRAW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64SRAWconst, t)
+ v1.AuxInt = int64ToAuxInt(31)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3.AuxInt = int64ToAuxInt(0x00E0)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux16 <t> x y)
+ // result: (ISEL [2] (SRD <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3.AuxInt = int64ToAuxInt(0xFFC0)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux32 <t> x y)
+ // result: (ISEL [0] (SRD <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 x (MOVDconst [c]))
+ // cond: uint64(c) < 64
+ // result: (SRDconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux64 <t> x y)
+ // result: (ISEL [0] (SRD <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux8 <t> x y)
+ // result: (ISEL [2] (SRD <t> x y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3.AuxInt = int64ToAuxInt(0x00C0)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x16 <t> x y)
+ // result: (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFC0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
+ v1.AuxInt = int64ToAuxInt(63)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3.AuxInt = int64ToAuxInt(0xFFC0)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x32 <t> x y)
+ // result: (ISEL [0] (SRAD <t> x y) (SRADconst <t> x [63]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
+ v1.AuxInt = int64ToAuxInt(63)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x64 x (MOVDconst [c]))
+ // cond: uint64(c) >= 64
+ // result: (SRADconst x [63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh64x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 64
+ // result: (SRADconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x64 <t> x y)
+ // result: (ISEL [0] (SRAD <t> x y) (SRADconst <t> x [63]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
+ v1.AuxInt = int64ToAuxInt(63)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x8 <t> x y)
+ // result: (ISEL [2] (SRAD <t> x y) (SRADconst <t> x [63]) (Select1 <types.TypeFlags> (ANDCCconst [0x00C0] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
+ v1.AuxInt = int64ToAuxInt(63)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v3.AuxInt = int64ToAuxInt(0x00C0)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux16 <t> x y)
+ // result: (ISEL [2] (SRD <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v4.AuxInt = int64ToAuxInt(0xFFF8)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux32 <t> x y)
+ // result: (ISEL [0] (SRD <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst y [8]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(8)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 x (MOVDconst [c]))
+ // cond: uint64(c) < 8
+ // result: (SRWconst (ZeroExt8to32 x) [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux64 <t> x y)
+ // result: (ISEL [0] (SRD <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPUconst y [8]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(8)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux8 <t> x y)
+ // result: (ISEL [2] (SRD <t> (MOVBZreg x) y) (MOVDconst [0]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v4.AuxInt = int64ToAuxInt(0x00F8)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x16 <t> x y)
+ // result: (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (Select1 <types.TypeFlags> (ANDCCconst [0xFFF8] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
+ v2.AuxInt = int64ToAuxInt(7)
+ v2.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v4.AuxInt = int64ToAuxInt(0xFFF8)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x32 <t> x y)
+ // result: (ISEL [0] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (CMPWUconst y [8]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
+ v2.AuxInt = int64ToAuxInt(7)
+ v2.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(8)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x (MOVDconst [c]))
+ // cond: uint64(c) >= 8
+ // result: (SRAWconst (SignExt8to32 x) [63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 8
+ // result: (SRAWconst (SignExt8to32 x) [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x64 <t> x y)
+ // result: (ISEL [0] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (CMPUconst y [8]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
+ v2.AuxInt = int64ToAuxInt(7)
+ v2.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(8)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x8 <t> x y)
+ // result: (ISEL [2] (SRAD <t> (MOVBreg x) y) (SRADconst <t> (MOVBreg x) [7]) (Select1 <types.TypeFlags> (ANDCCconst [0x00F8] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRAD, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
+ v2.AuxInt = int64ToAuxInt(7)
+ v2.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v4.AuxInt = int64ToAuxInt(0x00F8)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValuePPC64_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Mul64uhilo x y))
+ // result: (MULHDU x y)
+ for {
+ if v_0.Op != OpMul64uhilo {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpPPC64MULHDU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Select0 (Add64carry x y c))
+ // result: (Select0 <typ.UInt64> (ADDE x y (Select1 <typ.UInt64> (ADDCconst c [-1]))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpPPC64ADDE, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDCconst, types.NewTuple(typ.UInt64, typ.UInt64))
+ v2.AuxInt = int64ToAuxInt(-1)
+ v2.AddArg(c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Sub64borrow x y c))
+ // result: (Select0 <typ.UInt64> (SUBE x y (Select1 <typ.UInt64> (SUBCconst c [0]))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBE, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpPPC64SUBCconst, types.NewTuple(typ.UInt64, typ.UInt64))
+ v2.AuxInt = int64ToAuxInt(0)
+ v2.AddArg(c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (ANDCCconst [m] (ROTLWconst [r] x)))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+ for {
+ if v_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ROTLWconst {
+ break
+ }
+ r := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select0 (ANDCCconst [m] (ROTLW x r)))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+ for {
+ if v_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ROTLW {
+ break
+ }
+ r := v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLWNM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
+ v.AddArg2(x, r)
+ return true
+ }
+ // match: (Select0 (ANDCCconst [m] (SRWconst x [s])))
+ // cond: mergePPC64RShiftMask(m,s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0_0.AuxInt)
+ if !(mergePPC64RShiftMask(m, s, 32) == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Select0 (ANDCCconst [m] (SRWconst x [s])))
+ // cond: mergePPC64AndSrwi(m,s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m,s)] x)
+ for {
+ if v_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if !(mergePPC64AndSrwi(m, s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select0 (ANDCCconst [-1] x))
+ // result: x
+ for {
+ if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != -1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Select0 (ANDCCconst [0] _))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Select0 (ANDCCconst [c] y:(MOVBZreg _)))
+ // cond: c&0xFF == 0xFF
+ // result: y
+ for {
+ if v_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if y.Op != OpPPC64MOVBZreg || !(c&0xFF == 0xFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (Select0 (ANDCCconst [0xFF] (MOVBreg x)))
+ // result: (MOVBZreg x)
+ for {
+ if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0xFF {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select0 (ANDCCconst [c] y:(MOVHZreg _)))
+ // cond: c&0xFFFF == 0xFFFF
+ // result: y
+ for {
+ if v_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if y.Op != OpPPC64MOVHZreg || !(c&0xFFFF == 0xFFFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (Select0 (ANDCCconst [0xFFFF] (MOVHreg x)))
+ // result: (MOVHZreg x)
+ for {
+ if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0xFFFF {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select0 (ANDCCconst [c] (MOVBZreg x)))
+ // result: (Select0 (ANDCCconst [c&0xFF] x))
+ for {
+ if v_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v0.AuxInt = int64ToAuxInt(c & 0xFF)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (ANDCCconst [c] (MOVHZreg x)))
+ // result: (Select0 (ANDCCconst [c&0xFFFF] x))
+ for {
+ if v_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v0.AuxInt = int64ToAuxInt(c & 0xFFFF)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (ANDCCconst [c] (MOVWZreg x)))
+ // result: (Select0 (ANDCCconst [c&0xFFFFFFFF] x))
+ for {
+ if v_0.Op != OpPPC64ANDCCconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v0.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (ANDCCconst [1] z:(SRADconst [63] x)))
+ // cond: z.Uses == 1
+ // result: (SRDconst [63] x)
+ for {
+ if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64SRADconst || auxIntToInt64(z.AuxInt) != 63 {
+ break
+ }
+ x := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Mul64uhilo x y))
+ // result: (MULLD x y)
+ for {
+ if v_0.Op != OpMul64uhilo {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpPPC64MULLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Select1 (Add64carry x y c))
+ // result: (ADDZEzero (Select1 <typ.UInt64> (ADDE x y (Select1 <typ.UInt64> (ADDCconst c [-1])))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpPPC64ADDZEzero)
+ v0 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ADDE, types.NewTuple(typ.UInt64, typ.UInt64))
+ v2 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpPPC64ADDCconst, types.NewTuple(typ.UInt64, typ.UInt64))
+ v3.AuxInt = int64ToAuxInt(-1)
+ v3.AddArg(c)
+ v2.AddArg(v3)
+ v1.AddArg3(x, y, v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (ADDCconst n:(ADDZEzero x) [-1]))
+ // cond: n.Uses <= 2
+ // result: x
+ for {
+ if v_0.Op != OpPPC64ADDCconst || auxIntToInt64(v_0.AuxInt) != -1 {
+ break
+ }
+ n := v_0.Args[0]
+ if n.Op != OpPPC64ADDZEzero {
+ break
+ }
+ x := n.Args[0]
+ if !(n.Uses <= 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Select1 (Sub64borrow x y c))
+ // result: (NEG (SUBZEzero (Select1 <typ.UInt64> (SUBE x y (Select1 <typ.UInt64> (SUBCconst c [0]))))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpPPC64NEG)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBZEzero, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpPPC64SUBE, types.NewTuple(typ.UInt64, typ.UInt64))
+ v3 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v4 := b.NewValue0(v.Pos, OpPPC64SUBCconst, types.NewTuple(typ.UInt64, typ.UInt64))
+ v4.AuxInt = int64ToAuxInt(0)
+ v4.AddArg(c)
+ v3.AddArg(v4)
+ v2.AddArg3(x, y, v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (SUBCconst n:(NEG (SUBZEzero x)) [0]))
+ // cond: n.Uses <= 2
+ // result: x
+ for {
+ if v_0.Op != OpPPC64SUBCconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ n := v_0.Args[0]
+ if n.Op != OpPPC64NEG {
+ break
+ }
+ n_0 := n.Args[0]
+ if n_0.Op != OpPPC64SUBZEzero {
+ break
+ }
+ x := n_0.Args[0]
+ if !(n.Uses <= 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Select1 (ANDCCconst [0] _))
+ // result: (FlagEQ)
+ for {
+ if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpSelectN(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore _ src s3:(MOVDstore {t} _ dst mem)))))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1, s2, s3, call)
+ // result: (Move [sz] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpPPC64CALLstatic || len(call.Args) != 1 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ s1 := call.Args[0]
+ if s1.Op != OpPPC64MOVDstore {
+ break
+ }
+ _ = s1.Args[2]
+ s1_1 := s1.Args[1]
+ if s1_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ sz := auxIntToInt64(s1_1.AuxInt)
+ s2 := s1.Args[2]
+ if s2.Op != OpPPC64MOVDstore {
+ break
+ }
+ _ = s2.Args[2]
+ src := s2.Args[1]
+ s3 := s2.Args[2]
+ if s3.Op != OpPPC64MOVDstore {
+ break
+ }
+ mem := s3.Args[2]
+ dst := s3.Args[1]
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1, s2, s3, call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(sz)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)
+ // result: (Move [sz] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpPPC64CALLstatic || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpPPC64MOVDconst {
+ break
+ }
+ sz := auxIntToInt64(call_2.AuxInt)
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(sz)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRADconst (NEG <t> x) [63])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpPPC64NEG, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && t.IsFloat()
+ // result: (FMOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpPPC64FMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && t.IsFloat()
+ // result: (FMOVSstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpPPC64FMOVSstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && !t.IsFloat()
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && !t.IsFloat()) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !t.IsFloat()
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !t.IsFloat()) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpTrunc16to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc16to8 <t> x)
+ // cond: t.IsSigned()
+ // result: (MOVBreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(t.IsSigned()) {
+ break
+ }
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc16to8 x)
+ // result: (MOVBZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc32to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc32to16 <t> x)
+ // cond: t.IsSigned()
+ // result: (MOVHreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(t.IsSigned()) {
+ break
+ }
+ v.reset(OpPPC64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc32to16 x)
+ // result: (MOVHZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc32to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc32to8 <t> x)
+ // cond: t.IsSigned()
+ // result: (MOVBreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(t.IsSigned()) {
+ break
+ }
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc32to8 x)
+ // result: (MOVBZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc64to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to16 <t> x)
+ // cond: t.IsSigned()
+ // result: (MOVHreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(t.IsSigned()) {
+ break
+ }
+ v.reset(OpPPC64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to16 x)
+ // result: (MOVHZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc64to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to32 <t> x)
+ // cond: t.IsSigned()
+ // result: (MOVWreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(t.IsSigned()) {
+ break
+ }
+ v.reset(OpPPC64MOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 x)
+ // result: (MOVWZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc64to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to8 <t> x)
+ // cond: t.IsSigned()
+ // result: (MOVBreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(t.IsSigned()) {
+ break
+ }
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to8 x)
+ // result: (MOVBZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] destptr mem)
+ // result: (MOVBstorezero destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVBstorezero)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [2] destptr mem)
+ // result: (MOVHstorezero destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVHstorezero)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [3] destptr mem)
+ // result: (MOVBstorezero [2] destptr (MOVHstorezero destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, types.TypeMem)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [4] destptr mem)
+ // result: (MOVWstorezero destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVWstorezero)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [5] destptr mem)
+ // result: (MOVBstorezero [4] destptr (MOVWstorezero destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [6] destptr mem)
+ // result: (MOVHstorezero [4] destptr (MOVWstorezero destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [7] destptr mem)
+ // result: (MOVBstorezero [6] destptr (MOVHstorezero [4] destptr (MOVWstorezero destptr mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, types.TypeMem)
+ v0.AuxInt = int32ToAuxInt(4)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
+ v1.AddArg2(destptr, mem)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [8] {t} destptr mem)
+ // result: (MOVDstorezero destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVDstorezero)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [12] {t} destptr mem)
+ // result: (MOVWstorezero [8] destptr (MOVDstorezero [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [16] {t} destptr mem)
+ // result: (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [24] {t} destptr mem)
+ // result: (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v0.AuxInt = int32ToAuxInt(8)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg2(destptr, mem)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [32] {t} destptr mem)
+ // result: (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(24)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v0.AuxInt = int32ToAuxInt(16)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg2(destptr, mem)
+ v1.AddArg2(destptr, v2)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: buildcfg.GOPPC64 <= 8 && s < 64
+ // result: (LoweredZeroShort [s] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(buildcfg.GOPPC64 <= 8 && s < 64) {
+ break
+ }
+ v.reset(OpPPC64LoweredZeroShort)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: buildcfg.GOPPC64 <= 8
+ // result: (LoweredZero [s] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(buildcfg.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64LoweredZero)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: s < 128 && buildcfg.GOPPC64 >= 9
+ // result: (LoweredQuadZeroShort [s] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(s < 128 && buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64LoweredQuadZeroShort)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: buildcfg.GOPPC64 >= 9
+ // result: (LoweredQuadZero [s] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64LoweredQuadZero)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockPPC64(b *Block) bool {
+ typ := &b.Func.Config.Types
+ switch b.Kind {
+ case BlockPPC64EQ:
+ // match: (EQ (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (FlagLT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (InvertFlags cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64EQ, cmp)
+ return true
+ }
+ // match: (EQ (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (EQ (Select1 <types.TypeFlags> z) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(z)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (EQ (Select1 <types.TypeFlags> z) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(z)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (Select1 <types.TypeFlags> (ORCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (Select1 <types.TypeFlags> (XORCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ break
+ }
+ case BlockPPC64GE:
+ // match: (GE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagLT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (FlagGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (InvertFlags cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64LE, cmp)
+ return true
+ }
+ // match: (GE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (GE (Select1 <types.TypeFlags> z) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(z)
+ b.resetWithControl(BlockPPC64GE, v0)
+ return true
+ }
+ // match: (GE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (GE (Select1 <types.TypeFlags> z) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(z)
+ b.resetWithControl(BlockPPC64GE, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64GE, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (Select1 <types.TypeFlags> (ORCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64GE, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (Select1 <types.TypeFlags> (XORCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64GE, v0)
+ return true
+ }
+ break
+ }
+ case BlockPPC64GT:
+ // match: (GT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagLT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GT (InvertFlags cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64LT, cmp)
+ return true
+ }
+ // match: (GT (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (GT (Select1 <types.TypeFlags> z) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(z)
+ b.resetWithControl(BlockPPC64GT, v0)
+ return true
+ }
+ // match: (GT (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (GT (Select1 <types.TypeFlags> z) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(z)
+ b.resetWithControl(BlockPPC64GT, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64GT, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (Select1 <types.TypeFlags> (ORCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64GT, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (Select1 <types.TypeFlags> (XORCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64GT, v0)
+ return true
+ }
+ break
+ }
+ case BlockIf:
+ // match: (If (Equal cc) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpPPC64Equal {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64EQ, cc)
+ return true
+ }
+ // match: (If (NotEqual cc) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpPPC64NotEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64NE, cc)
+ return true
+ }
+ // match: (If (LessThan cc) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpPPC64LessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64LT, cc)
+ return true
+ }
+ // match: (If (LessEqual cc) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpPPC64LessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64LE, cc)
+ return true
+ }
+ // match: (If (GreaterThan cc) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpPPC64GreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64GT, cc)
+ return true
+ }
+ // match: (If (GreaterEqual cc) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpPPC64GreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64GE, cc)
+ return true
+ }
+ // match: (If (FLessThan cc) yes no)
+ // result: (FLT cc yes no)
+ for b.Controls[0].Op == OpPPC64FLessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64FLT, cc)
+ return true
+ }
+ // match: (If (FLessEqual cc) yes no)
+ // result: (FLE cc yes no)
+ for b.Controls[0].Op == OpPPC64FLessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64FLE, cc)
+ return true
+ }
+ // match: (If (FGreaterThan cc) yes no)
+ // result: (FGT cc yes no)
+ for b.Controls[0].Op == OpPPC64FGreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64FGT, cc)
+ return true
+ }
+ // match: (If (FGreaterEqual cc) yes no)
+ // result: (FGE cc yes no)
+ for b.Controls[0].Op == OpPPC64FGreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64FGE, cc)
+ return true
+ }
+ // match: (If cond yes no)
+ // result: (NE (CMPWconst [0] (Select0 <typ.UInt32> (ANDCCconst [1] cond))) yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, OpPPC64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(cond.Pos, OpSelect0, typ.UInt32)
+ v2 := b.NewValue0(cond.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v2.AuxInt = int64ToAuxInt(1)
+ v2.AddArg(cond)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ case BlockPPC64LE:
+ // match: (LE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagLT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LE (InvertFlags cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64GE, cmp)
+ return true
+ }
+ // match: (LE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (LE (Select1 <types.TypeFlags> z) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(z)
+ b.resetWithControl(BlockPPC64LE, v0)
+ return true
+ }
+ // match: (LE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (LE (Select1 <types.TypeFlags> z) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(z)
+ b.resetWithControl(BlockPPC64LE, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64LE, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (Select1 <types.TypeFlags> (ORCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64LE, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (Select1 <types.TypeFlags> (XORCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64LE, v0)
+ return true
+ }
+ break
+ }
+ case BlockPPC64LT:
+ // match: (LT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (FlagLT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (InvertFlags cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64GT, cmp)
+ return true
+ }
+ // match: (LT (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (LT (Select1 <types.TypeFlags> z) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(z)
+ b.resetWithControl(BlockPPC64LT, v0)
+ return true
+ }
+ // match: (LT (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (LT (Select1 <types.TypeFlags> z) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(z)
+ b.resetWithControl(BlockPPC64LT, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64LT, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (Select1 <types.TypeFlags> (ORCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64LT, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (Select1 <types.TypeFlags> (XORCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64LT, v0)
+ return true
+ }
+ break
+ }
+ case BlockPPC64NE:
+ // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (Equal cc)))) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpPPC64Equal {
+ break
+ }
+ cc := v_0_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64EQ, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (NotEqual cc)))) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpPPC64NotEqual {
+ break
+ }
+ cc := v_0_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64NE, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (LessThan cc)))) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpPPC64LessThan {
+ break
+ }
+ cc := v_0_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64LT, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (LessEqual cc)))) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpPPC64LessEqual {
+ break
+ }
+ cc := v_0_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64LE, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (GreaterThan cc)))) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpPPC64GreaterThan {
+ break
+ }
+ cc := v_0_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64GT, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (GreaterEqual cc)))) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpPPC64GreaterEqual {
+ break
+ }
+ cc := v_0_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64GE, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FLessThan cc)))) yes no)
+ // result: (FLT cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpPPC64FLessThan {
+ break
+ }
+ cc := v_0_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64FLT, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FLessEqual cc)))) yes no)
+ // result: (FLE cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpPPC64FLessEqual {
+ break
+ }
+ cc := v_0_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64FLE, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FGreaterThan cc)))) yes no)
+ // result: (FGT cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpPPC64FGreaterThan {
+ break
+ }
+ cc := v_0_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64FGT, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FGreaterEqual cc)))) yes no)
+ // result: (FGE cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpPPC64FGreaterEqual {
+ break
+ }
+ cc := v_0_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64FGE, cc)
+ return true
+ }
+ // match: (NE (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (FlagLT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64NE, cmp)
+ return true
+ }
+ // match: (NE (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (NE (Select1 <types.TypeFlags> z) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(z)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no)
+ // result: (NE (Select1 <types.TypeFlags> z) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0_0.Args[0]
+ if z.Op != OpPPC64ANDCCconst {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v0.AddArg(z)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.NewTuple(typ.Int64, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (Select1 <types.TypeFlags> (ORCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (Select1 <types.TypeFlags> (XORCC x y)) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ break
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64latelower.go b/src/cmd/compile/internal/ssa/rewritePPC64latelower.go
new file mode 100644
index 0000000..771dd6a
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewritePPC64latelower.go
@@ -0,0 +1,705 @@
+// Code generated from _gen/PPC64latelower.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import "internal/buildcfg"
+import "cmd/compile/internal/types"
+
+func rewriteValuePPC64latelower(v *Value) bool {
+ switch v.Op {
+ case OpPPC64ADD:
+ return rewriteValuePPC64latelower_OpPPC64ADD(v)
+ case OpPPC64AND:
+ return rewriteValuePPC64latelower_OpPPC64AND(v)
+ case OpPPC64CMPconst:
+ return rewriteValuePPC64latelower_OpPPC64CMPconst(v)
+ case OpPPC64ISEL:
+ return rewriteValuePPC64latelower_OpPPC64ISEL(v)
+ case OpPPC64RLDICL:
+ return rewriteValuePPC64latelower_OpPPC64RLDICL(v)
+ case OpPPC64SETBC:
+ return rewriteValuePPC64latelower_OpPPC64SETBC(v)
+ case OpPPC64SETBCR:
+ return rewriteValuePPC64latelower_OpPPC64SETBCR(v)
+ case OpSelect0:
+ return rewriteValuePPC64latelower_OpSelect0(v)
+ }
+ return false
+}
+func rewriteValuePPC64latelower_OpPPC64ADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADD (MOVDconst [m]) x)
+ // cond: supportsPPC64PCRel() && (m<<30)>>30 == m
+ // result: (ADDconst [m] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(supportsPPC64PCRel() && (m<<30)>>30 == m) {
+ continue
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(m)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64latelower_OpPPC64AND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AND <t> x:(MOVDconst [m]) n)
+ // cond: t.Size() <= 2
+ // result: (Select0 (ANDCCconst [int64(int16(m))] n))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if x.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(x.AuxInt)
+ n := v_1
+ if !(t.Size() <= 2) {
+ continue
+ }
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v0.AuxInt = int64ToAuxInt(int64(int16(m)))
+ v0.AddArg(n)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (AND x:(MOVDconst [m]) n)
+ // cond: isPPC64ValidShiftMask(m)
+ // result: (RLDICL [encodePPC64RotateMask(0,m,64)] n)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if x.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(x.AuxInt)
+ n := v_1
+ if !(isPPC64ValidShiftMask(m)) {
+ continue
+ }
+ v.reset(OpPPC64RLDICL)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64))
+ v.AddArg(n)
+ return true
+ }
+ break
+ }
+ // match: (AND x:(MOVDconst [m]) n)
+ // cond: m != 0 && isPPC64ValidShiftMask(^m)
+ // result: (RLDICR [encodePPC64RotateMask(0,m,64)] n)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if x.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(x.AuxInt)
+ n := v_1
+ if !(m != 0 && isPPC64ValidShiftMask(^m)) {
+ continue
+ }
+ v.reset(OpPPC64RLDICR)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64))
+ v.AddArg(n)
+ return true
+ }
+ break
+ }
+ // match: (AND <t> x:(MOVDconst [m]) n)
+ // cond: t.Size() == 4 && isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(0,m,32)] n)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if x.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(x.AuxInt)
+ n := v_1
+ if !(t.Size() == 4 && isPPC64WordRotateMask(m)) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
+ v.AddArg(n)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64latelower_OpPPC64CMPconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPconst [0] z:(ADD x y))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64ADD {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(AND x y))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(ANDN x y))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64ANDN {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(OR x y))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64OR {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(SUB x y))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64SUB {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(NOR x y))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64NOR {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(XOR x y))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(NEG x))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64NEG {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(CNTLZD x))
+ // cond: v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64CNTLZD {
+ break
+ }
+ if !(v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst [0] z:(ADDconst [c] x))
+ // cond: int64(int16(c)) == c && v.Block == z.Block
+ // result: (CMPconst [0] convertPPC64OpToOpCC(z))
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ z := v_0
+ if z.Op != OpPPC64ADDconst {
+ break
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if !(int64(int16(c)) == c && v.Block == z.Block) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(convertPPC64OpToOpCC(z))
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(ADDCC x y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64ADDCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(ANDCC x y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64ANDCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(ANDNCC x y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64ANDNCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(ORCC x y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64ORCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(SUBCC x y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64SUBCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(NORCC x y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64NORCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(XORCC x y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XORCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(ADDCCconst y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64ADDCCconst {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(NEGCC y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64NEGCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ // match: (CMPconst <t> [0] (Select0 z:(CNTLZDCC y)))
+ // result: (Select1 <t> z)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpSelect0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64CNTLZDCC {
+ break
+ }
+ v.reset(OpSelect1)
+ v.Type = t
+ v.AddArg(z)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64latelower_OpPPC64ISEL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ISEL [a] x (MOVDconst [0]) z)
+ // result: (ISELZ [a] x z)
+ for {
+ a := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ z := v_2
+ v.reset(OpPPC64ISELZ)
+ v.AuxInt = int32ToAuxInt(a)
+ v.AddArg2(x, z)
+ return true
+ }
+ // match: (ISEL [a] (MOVDconst [0]) y z)
+ // result: (ISELZ [a^0x4] y z)
+ for {
+ a := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ y := v_1
+ z := v_2
+ v.reset(OpPPC64ISELZ)
+ v.AuxInt = int32ToAuxInt(a ^ 0x4)
+ v.AddArg2(y, z)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64latelower_OpPPC64RLDICL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RLDICL [em] x:(SRDconst [s] a))
+ // cond: (em&0xFF0000) == 0
+ // result: (RLDICL [mergePPC64RLDICLandSRDconst(em, s)] a)
+ for {
+ em := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if x.Op != OpPPC64SRDconst {
+ break
+ }
+ s := auxIntToInt64(x.AuxInt)
+ a := x.Args[0]
+ if !((em & 0xFF0000) == 0) {
+ break
+ }
+ v.reset(OpPPC64RLDICL)
+ v.AuxInt = int64ToAuxInt(mergePPC64RLDICLandSRDconst(em, s))
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64latelower_OpPPC64SETBC(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETBC [2] cmp)
+ // cond: buildcfg.GOPPC64 <= 9
+ // result: (ISELZ [2] (MOVDconst [1]) cmp)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ cmp := v_0
+ if !(buildcfg.GOPPC64 <= 9) {
+ break
+ }
+ v.reset(OpPPC64ISELZ)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+ // match: (SETBC [0] cmp)
+ // cond: buildcfg.GOPPC64 <= 9
+ // result: (ISELZ [0] (MOVDconst [1]) cmp)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ cmp := v_0
+ if !(buildcfg.GOPPC64 <= 9) {
+ break
+ }
+ v.reset(OpPPC64ISELZ)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+ // match: (SETBC [1] cmp)
+ // cond: buildcfg.GOPPC64 <= 9
+ // result: (ISELZ [1] (MOVDconst [1]) cmp)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0
+ if !(buildcfg.GOPPC64 <= 9) {
+ break
+ }
+ v.reset(OpPPC64ISELZ)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64latelower_OpPPC64SETBCR(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETBCR [2] cmp)
+ // cond: buildcfg.GOPPC64 <= 9
+ // result: (ISELZ [6] (MOVDconst [1]) cmp)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ cmp := v_0
+ if !(buildcfg.GOPPC64 <= 9) {
+ break
+ }
+ v.reset(OpPPC64ISELZ)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+ // match: (SETBCR [0] cmp)
+ // cond: buildcfg.GOPPC64 <= 9
+ // result: (ISELZ [4] (MOVDconst [1]) cmp)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ cmp := v_0
+ if !(buildcfg.GOPPC64 <= 9) {
+ break
+ }
+ v.reset(OpPPC64ISELZ)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+ // match: (SETBCR [1] cmp)
+ // cond: buildcfg.GOPPC64 <= 9
+ // result: (ISELZ [5] (MOVDconst [1]) cmp)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0
+ if !(buildcfg.GOPPC64 <= 9) {
+ break
+ }
+ v.reset(OpPPC64ISELZ)
+ v.AuxInt = int32ToAuxInt(5)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64latelower_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Select0 z:(ANDCCconst [m] x))
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(m)
+ // result: (RLDICL [encodePPC64RotateMask(0,m,64)] x)
+ for {
+ z := v_0
+ if z.Op != OpPPC64ANDCCconst {
+ break
+ }
+ m := auxIntToInt64(z.AuxInt)
+ x := z.Args[0]
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLDICL)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 64))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteBlockPPC64latelower(b *Block) bool {
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
new file mode 100644
index 0000000..52ddca1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -0,0 +1,9124 @@
+// Code generated from _gen/RISCV64.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import "math"
+import "cmd/compile/internal/types"
+
+func rewriteValueRISCV64(v *Value) bool {
+ switch v.Op {
+ case OpAbs:
+ v.Op = OpRISCV64FABSD
+ return true
+ case OpAdd16:
+ v.Op = OpRISCV64ADD
+ return true
+ case OpAdd32:
+ v.Op = OpRISCV64ADD
+ return true
+ case OpAdd32F:
+ v.Op = OpRISCV64FADDS
+ return true
+ case OpAdd64:
+ v.Op = OpRISCV64ADD
+ return true
+ case OpAdd64F:
+ v.Op = OpRISCV64FADDD
+ return true
+ case OpAdd8:
+ v.Op = OpRISCV64ADD
+ return true
+ case OpAddPtr:
+ v.Op = OpRISCV64ADD
+ return true
+ case OpAddr:
+ return rewriteValueRISCV64_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpRISCV64AND
+ return true
+ case OpAnd32:
+ v.Op = OpRISCV64AND
+ return true
+ case OpAnd64:
+ v.Op = OpRISCV64AND
+ return true
+ case OpAnd8:
+ v.Op = OpRISCV64AND
+ return true
+ case OpAndB:
+ v.Op = OpRISCV64AND
+ return true
+ case OpAtomicAdd32:
+ v.Op = OpRISCV64LoweredAtomicAdd32
+ return true
+ case OpAtomicAdd64:
+ v.Op = OpRISCV64LoweredAtomicAdd64
+ return true
+ case OpAtomicAnd32:
+ v.Op = OpRISCV64LoweredAtomicAnd32
+ return true
+ case OpAtomicAnd8:
+ return rewriteValueRISCV64_OpAtomicAnd8(v)
+ case OpAtomicCompareAndSwap32:
+ return rewriteValueRISCV64_OpAtomicCompareAndSwap32(v)
+ case OpAtomicCompareAndSwap64:
+ v.Op = OpRISCV64LoweredAtomicCas64
+ return true
+ case OpAtomicExchange32:
+ v.Op = OpRISCV64LoweredAtomicExchange32
+ return true
+ case OpAtomicExchange64:
+ v.Op = OpRISCV64LoweredAtomicExchange64
+ return true
+ case OpAtomicLoad32:
+ v.Op = OpRISCV64LoweredAtomicLoad32
+ return true
+ case OpAtomicLoad64:
+ v.Op = OpRISCV64LoweredAtomicLoad64
+ return true
+ case OpAtomicLoad8:
+ v.Op = OpRISCV64LoweredAtomicLoad8
+ return true
+ case OpAtomicLoadPtr:
+ v.Op = OpRISCV64LoweredAtomicLoad64
+ return true
+ case OpAtomicOr32:
+ v.Op = OpRISCV64LoweredAtomicOr32
+ return true
+ case OpAtomicOr8:
+ return rewriteValueRISCV64_OpAtomicOr8(v)
+ case OpAtomicStore32:
+ v.Op = OpRISCV64LoweredAtomicStore32
+ return true
+ case OpAtomicStore64:
+ v.Op = OpRISCV64LoweredAtomicStore64
+ return true
+ case OpAtomicStore8:
+ v.Op = OpRISCV64LoweredAtomicStore8
+ return true
+ case OpAtomicStorePtrNoWB:
+ v.Op = OpRISCV64LoweredAtomicStore64
+ return true
+ case OpAvg64u:
+ return rewriteValueRISCV64_OpAvg64u(v)
+ case OpClosureCall:
+ v.Op = OpRISCV64CALLclosure
+ return true
+ case OpCom16:
+ v.Op = OpRISCV64NOT
+ return true
+ case OpCom32:
+ v.Op = OpRISCV64NOT
+ return true
+ case OpCom64:
+ v.Op = OpRISCV64NOT
+ return true
+ case OpCom8:
+ v.Op = OpRISCV64NOT
+ return true
+ case OpConst16:
+ return rewriteValueRISCV64_OpConst16(v)
+ case OpConst32:
+ return rewriteValueRISCV64_OpConst32(v)
+ case OpConst32F:
+ return rewriteValueRISCV64_OpConst32F(v)
+ case OpConst64:
+ return rewriteValueRISCV64_OpConst64(v)
+ case OpConst64F:
+ return rewriteValueRISCV64_OpConst64F(v)
+ case OpConst8:
+ return rewriteValueRISCV64_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueRISCV64_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueRISCV64_OpConstNil(v)
+ case OpCopysign:
+ v.Op = OpRISCV64FSGNJD
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpRISCV64FCVTWS
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpRISCV64FCVTLS
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpRISCV64FCVTDS
+ return true
+ case OpCvt32to32F:
+ v.Op = OpRISCV64FCVTSW
+ return true
+ case OpCvt32to64F:
+ v.Op = OpRISCV64FCVTDW
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpRISCV64FCVTWD
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpRISCV64FCVTSD
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpRISCV64FCVTLD
+ return true
+ case OpCvt64to32F:
+ v.Op = OpRISCV64FCVTSL
+ return true
+ case OpCvt64to64F:
+ v.Op = OpRISCV64FCVTDL
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueRISCV64_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueRISCV64_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueRISCV64_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpRISCV64FDIVS
+ return true
+ case OpDiv32u:
+ v.Op = OpRISCV64DIVUW
+ return true
+ case OpDiv64:
+ return rewriteValueRISCV64_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpRISCV64FDIVD
+ return true
+ case OpDiv64u:
+ v.Op = OpRISCV64DIVU
+ return true
+ case OpDiv8:
+ return rewriteValueRISCV64_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueRISCV64_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueRISCV64_OpEq16(v)
+ case OpEq32:
+ return rewriteValueRISCV64_OpEq32(v)
+ case OpEq32F:
+ v.Op = OpRISCV64FEQS
+ return true
+ case OpEq64:
+ return rewriteValueRISCV64_OpEq64(v)
+ case OpEq64F:
+ v.Op = OpRISCV64FEQD
+ return true
+ case OpEq8:
+ return rewriteValueRISCV64_OpEq8(v)
+ case OpEqB:
+ return rewriteValueRISCV64_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueRISCV64_OpEqPtr(v)
+ case OpFMA:
+ v.Op = OpRISCV64FMADDD
+ return true
+ case OpGetCallerPC:
+ v.Op = OpRISCV64LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpRISCV64LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpRISCV64LoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ return rewriteValueRISCV64_OpHmul32(v)
+ case OpHmul32u:
+ return rewriteValueRISCV64_OpHmul32u(v)
+ case OpHmul64:
+ v.Op = OpRISCV64MULH
+ return true
+ case OpHmul64u:
+ v.Op = OpRISCV64MULHU
+ return true
+ case OpInterCall:
+ v.Op = OpRISCV64CALLinter
+ return true
+ case OpIsInBounds:
+ v.Op = OpLess64U
+ return true
+ case OpIsNonNil:
+ v.Op = OpRISCV64SNEZ
+ return true
+ case OpIsSliceInBounds:
+ v.Op = OpLeq64U
+ return true
+ case OpLeq16:
+ return rewriteValueRISCV64_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueRISCV64_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueRISCV64_OpLeq32(v)
+ case OpLeq32F:
+ v.Op = OpRISCV64FLES
+ return true
+ case OpLeq32U:
+ return rewriteValueRISCV64_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValueRISCV64_OpLeq64(v)
+ case OpLeq64F:
+ v.Op = OpRISCV64FLED
+ return true
+ case OpLeq64U:
+ return rewriteValueRISCV64_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValueRISCV64_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueRISCV64_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueRISCV64_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueRISCV64_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueRISCV64_OpLess32(v)
+ case OpLess32F:
+ v.Op = OpRISCV64FLTS
+ return true
+ case OpLess32U:
+ return rewriteValueRISCV64_OpLess32U(v)
+ case OpLess64:
+ v.Op = OpRISCV64SLT
+ return true
+ case OpLess64F:
+ v.Op = OpRISCV64FLTD
+ return true
+ case OpLess64U:
+ v.Op = OpRISCV64SLTU
+ return true
+ case OpLess8:
+ return rewriteValueRISCV64_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueRISCV64_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueRISCV64_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueRISCV64_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueRISCV64_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueRISCV64_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueRISCV64_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueRISCV64_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueRISCV64_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueRISCV64_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueRISCV64_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueRISCV64_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueRISCV64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueRISCV64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueRISCV64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueRISCV64_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueRISCV64_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueRISCV64_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueRISCV64_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueRISCV64_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueRISCV64_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueRISCV64_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueRISCV64_OpMod32(v)
+ case OpMod32u:
+ v.Op = OpRISCV64REMUW
+ return true
+ case OpMod64:
+ return rewriteValueRISCV64_OpMod64(v)
+ case OpMod64u:
+ v.Op = OpRISCV64REMU
+ return true
+ case OpMod8:
+ return rewriteValueRISCV64_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueRISCV64_OpMod8u(v)
+ case OpMove:
+ return rewriteValueRISCV64_OpMove(v)
+ case OpMul16:
+ return rewriteValueRISCV64_OpMul16(v)
+ case OpMul32:
+ v.Op = OpRISCV64MULW
+ return true
+ case OpMul32F:
+ v.Op = OpRISCV64FMULS
+ return true
+ case OpMul64:
+ v.Op = OpRISCV64MUL
+ return true
+ case OpMul64F:
+ v.Op = OpRISCV64FMULD
+ return true
+ case OpMul64uhilo:
+ v.Op = OpRISCV64LoweredMuluhilo
+ return true
+ case OpMul64uover:
+ v.Op = OpRISCV64LoweredMuluover
+ return true
+ case OpMul8:
+ return rewriteValueRISCV64_OpMul8(v)
+ case OpNeg16:
+ v.Op = OpRISCV64NEG
+ return true
+ case OpNeg32:
+ v.Op = OpRISCV64NEG
+ return true
+ case OpNeg32F:
+ v.Op = OpRISCV64FNEGS
+ return true
+ case OpNeg64:
+ v.Op = OpRISCV64NEG
+ return true
+ case OpNeg64F:
+ v.Op = OpRISCV64FNEGD
+ return true
+ case OpNeg8:
+ v.Op = OpRISCV64NEG
+ return true
+ case OpNeq16:
+ return rewriteValueRISCV64_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueRISCV64_OpNeq32(v)
+ case OpNeq32F:
+ v.Op = OpRISCV64FNES
+ return true
+ case OpNeq64:
+ return rewriteValueRISCV64_OpNeq64(v)
+ case OpNeq64F:
+ v.Op = OpRISCV64FNED
+ return true
+ case OpNeq8:
+ return rewriteValueRISCV64_OpNeq8(v)
+ case OpNeqB:
+ return rewriteValueRISCV64_OpNeqB(v)
+ case OpNeqPtr:
+ return rewriteValueRISCV64_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpRISCV64LoweredNilCheck
+ return true
+ case OpNot:
+ v.Op = OpRISCV64SEQZ
+ return true
+ case OpOffPtr:
+ return rewriteValueRISCV64_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpRISCV64OR
+ return true
+ case OpOr32:
+ v.Op = OpRISCV64OR
+ return true
+ case OpOr64:
+ v.Op = OpRISCV64OR
+ return true
+ case OpOr8:
+ v.Op = OpRISCV64OR
+ return true
+ case OpOrB:
+ v.Op = OpRISCV64OR
+ return true
+ case OpPanicBounds:
+ return rewriteValueRISCV64_OpPanicBounds(v)
+ case OpPubBarrier:
+ v.Op = OpRISCV64LoweredPubBarrier
+ return true
+ case OpRISCV64ADD:
+ return rewriteValueRISCV64_OpRISCV64ADD(v)
+ case OpRISCV64ADDI:
+ return rewriteValueRISCV64_OpRISCV64ADDI(v)
+ case OpRISCV64AND:
+ return rewriteValueRISCV64_OpRISCV64AND(v)
+ case OpRISCV64ANDI:
+ return rewriteValueRISCV64_OpRISCV64ANDI(v)
+ case OpRISCV64FADDD:
+ return rewriteValueRISCV64_OpRISCV64FADDD(v)
+ case OpRISCV64FADDS:
+ return rewriteValueRISCV64_OpRISCV64FADDS(v)
+ case OpRISCV64FMADDD:
+ return rewriteValueRISCV64_OpRISCV64FMADDD(v)
+ case OpRISCV64FMADDS:
+ return rewriteValueRISCV64_OpRISCV64FMADDS(v)
+ case OpRISCV64FMSUBD:
+ return rewriteValueRISCV64_OpRISCV64FMSUBD(v)
+ case OpRISCV64FMSUBS:
+ return rewriteValueRISCV64_OpRISCV64FMSUBS(v)
+ case OpRISCV64FNMADDD:
+ return rewriteValueRISCV64_OpRISCV64FNMADDD(v)
+ case OpRISCV64FNMADDS:
+ return rewriteValueRISCV64_OpRISCV64FNMADDS(v)
+ case OpRISCV64FNMSUBD:
+ return rewriteValueRISCV64_OpRISCV64FNMSUBD(v)
+ case OpRISCV64FNMSUBS:
+ return rewriteValueRISCV64_OpRISCV64FNMSUBS(v)
+ case OpRISCV64FSUBD:
+ return rewriteValueRISCV64_OpRISCV64FSUBD(v)
+ case OpRISCV64FSUBS:
+ return rewriteValueRISCV64_OpRISCV64FSUBS(v)
+ case OpRISCV64MOVBUload:
+ return rewriteValueRISCV64_OpRISCV64MOVBUload(v)
+ case OpRISCV64MOVBUreg:
+ return rewriteValueRISCV64_OpRISCV64MOVBUreg(v)
+ case OpRISCV64MOVBload:
+ return rewriteValueRISCV64_OpRISCV64MOVBload(v)
+ case OpRISCV64MOVBreg:
+ return rewriteValueRISCV64_OpRISCV64MOVBreg(v)
+ case OpRISCV64MOVBstore:
+ return rewriteValueRISCV64_OpRISCV64MOVBstore(v)
+ case OpRISCV64MOVBstorezero:
+ return rewriteValueRISCV64_OpRISCV64MOVBstorezero(v)
+ case OpRISCV64MOVDload:
+ return rewriteValueRISCV64_OpRISCV64MOVDload(v)
+ case OpRISCV64MOVDnop:
+ return rewriteValueRISCV64_OpRISCV64MOVDnop(v)
+ case OpRISCV64MOVDreg:
+ return rewriteValueRISCV64_OpRISCV64MOVDreg(v)
+ case OpRISCV64MOVDstore:
+ return rewriteValueRISCV64_OpRISCV64MOVDstore(v)
+ case OpRISCV64MOVDstorezero:
+ return rewriteValueRISCV64_OpRISCV64MOVDstorezero(v)
+ case OpRISCV64MOVHUload:
+ return rewriteValueRISCV64_OpRISCV64MOVHUload(v)
+ case OpRISCV64MOVHUreg:
+ return rewriteValueRISCV64_OpRISCV64MOVHUreg(v)
+ case OpRISCV64MOVHload:
+ return rewriteValueRISCV64_OpRISCV64MOVHload(v)
+ case OpRISCV64MOVHreg:
+ return rewriteValueRISCV64_OpRISCV64MOVHreg(v)
+ case OpRISCV64MOVHstore:
+ return rewriteValueRISCV64_OpRISCV64MOVHstore(v)
+ case OpRISCV64MOVHstorezero:
+ return rewriteValueRISCV64_OpRISCV64MOVHstorezero(v)
+ case OpRISCV64MOVWUload:
+ return rewriteValueRISCV64_OpRISCV64MOVWUload(v)
+ case OpRISCV64MOVWUreg:
+ return rewriteValueRISCV64_OpRISCV64MOVWUreg(v)
+ case OpRISCV64MOVWload:
+ return rewriteValueRISCV64_OpRISCV64MOVWload(v)
+ case OpRISCV64MOVWreg:
+ return rewriteValueRISCV64_OpRISCV64MOVWreg(v)
+ case OpRISCV64MOVWstore:
+ return rewriteValueRISCV64_OpRISCV64MOVWstore(v)
+ case OpRISCV64MOVWstorezero:
+ return rewriteValueRISCV64_OpRISCV64MOVWstorezero(v)
+ case OpRISCV64NEG:
+ return rewriteValueRISCV64_OpRISCV64NEG(v)
+ case OpRISCV64NEGW:
+ return rewriteValueRISCV64_OpRISCV64NEGW(v)
+ case OpRISCV64OR:
+ return rewriteValueRISCV64_OpRISCV64OR(v)
+ case OpRISCV64ORI:
+ return rewriteValueRISCV64_OpRISCV64ORI(v)
+ case OpRISCV64SEQZ:
+ return rewriteValueRISCV64_OpRISCV64SEQZ(v)
+ case OpRISCV64SLL:
+ return rewriteValueRISCV64_OpRISCV64SLL(v)
+ case OpRISCV64SLLI:
+ return rewriteValueRISCV64_OpRISCV64SLLI(v)
+ case OpRISCV64SLT:
+ return rewriteValueRISCV64_OpRISCV64SLT(v)
+ case OpRISCV64SLTI:
+ return rewriteValueRISCV64_OpRISCV64SLTI(v)
+ case OpRISCV64SLTIU:
+ return rewriteValueRISCV64_OpRISCV64SLTIU(v)
+ case OpRISCV64SLTU:
+ return rewriteValueRISCV64_OpRISCV64SLTU(v)
+ case OpRISCV64SNEZ:
+ return rewriteValueRISCV64_OpRISCV64SNEZ(v)
+ case OpRISCV64SRA:
+ return rewriteValueRISCV64_OpRISCV64SRA(v)
+ case OpRISCV64SRAI:
+ return rewriteValueRISCV64_OpRISCV64SRAI(v)
+ case OpRISCV64SRAW:
+ return rewriteValueRISCV64_OpRISCV64SRAW(v)
+ case OpRISCV64SRL:
+ return rewriteValueRISCV64_OpRISCV64SRL(v)
+ case OpRISCV64SRLI:
+ return rewriteValueRISCV64_OpRISCV64SRLI(v)
+ case OpRISCV64SRLW:
+ return rewriteValueRISCV64_OpRISCV64SRLW(v)
+ case OpRISCV64SUB:
+ return rewriteValueRISCV64_OpRISCV64SUB(v)
+ case OpRISCV64SUBW:
+ return rewriteValueRISCV64_OpRISCV64SUBW(v)
+ case OpRISCV64XOR:
+ return rewriteValueRISCV64_OpRISCV64XOR(v)
+ case OpRotateLeft16:
+ return rewriteValueRISCV64_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValueRISCV64_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValueRISCV64_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValueRISCV64_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpRISCV64LoweredRound32F
+ return true
+ case OpRound64F:
+ v.Op = OpRISCV64LoweredRound64F
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueRISCV64_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueRISCV64_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueRISCV64_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueRISCV64_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueRISCV64_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueRISCV64_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueRISCV64_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueRISCV64_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueRISCV64_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueRISCV64_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueRISCV64_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueRISCV64_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueRISCV64_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueRISCV64_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueRISCV64_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueRISCV64_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueRISCV64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueRISCV64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueRISCV64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueRISCV64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueRISCV64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueRISCV64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueRISCV64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueRISCV64_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueRISCV64_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueRISCV64_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueRISCV64_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueRISCV64_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueRISCV64_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueRISCV64_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueRISCV64_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueRISCV64_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueRISCV64_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueRISCV64_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = OpRISCV64MOVHreg
+ return true
+ case OpSignExt16to64:
+ v.Op = OpRISCV64MOVHreg
+ return true
+ case OpSignExt32to64:
+ v.Op = OpRISCV64MOVWreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpRISCV64MOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpRISCV64MOVBreg
+ return true
+ case OpSignExt8to64:
+ v.Op = OpRISCV64MOVBreg
+ return true
+ case OpSlicemask:
+ return rewriteValueRISCV64_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpRISCV64FSQRTD
+ return true
+ case OpSqrt32:
+ v.Op = OpRISCV64FSQRTS
+ return true
+ case OpStaticCall:
+ v.Op = OpRISCV64CALLstatic
+ return true
+ case OpStore:
+ return rewriteValueRISCV64_OpStore(v)
+ case OpSub16:
+ v.Op = OpRISCV64SUB
+ return true
+ case OpSub32:
+ v.Op = OpRISCV64SUB
+ return true
+ case OpSub32F:
+ v.Op = OpRISCV64FSUBS
+ return true
+ case OpSub64:
+ v.Op = OpRISCV64SUB
+ return true
+ case OpSub64F:
+ v.Op = OpRISCV64FSUBD
+ return true
+ case OpSub8:
+ v.Op = OpRISCV64SUB
+ return true
+ case OpSubPtr:
+ v.Op = OpRISCV64SUB
+ return true
+ case OpTailCall:
+ v.Op = OpRISCV64CALLtail
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpRISCV64LoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpRISCV64XOR
+ return true
+ case OpXor32:
+ v.Op = OpRISCV64XOR
+ return true
+ case OpXor64:
+ v.Op = OpRISCV64XOR
+ return true
+ case OpXor8:
+ v.Op = OpRISCV64XOR
+ return true
+ case OpZero:
+ return rewriteValueRISCV64_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpRISCV64MOVHUreg
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpRISCV64MOVHUreg
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpRISCV64MOVWUreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpRISCV64MOVBUreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpRISCV64MOVBUreg
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpRISCV64MOVBUreg
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVaddr {sym} [0] base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpRISCV64MOVaddr)
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAtomicAnd8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd8 ptr val mem)
+ // result: (LoweredAtomicAnd32 (ANDI <typ.Uintptr> [^3] ptr) (NOT <typ.UInt32> (SLL <typ.UInt32> (XORI <typ.UInt32> [0xff] (ZeroExt8to32 val)) (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr)))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpRISCV64LoweredAtomicAnd32)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Uintptr)
+ v0.AuxInt = int64ToAuxInt(^3)
+ v0.AddArg(ptr)
+ v1 := b.NewValue0(v.Pos, OpRISCV64NOT, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpRISCV64XORI, typ.UInt32)
+ v3.AuxInt = int64ToAuxInt(0xff)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v4.AddArg(val)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(3)
+ v6 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(3)
+ v6.AddArg(ptr)
+ v5.AddArg(v6)
+ v2.AddArg2(v3, v5)
+ v1.AddArg(v2)
+ v.AddArg3(v0, v1, mem)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAtomicCompareAndSwap32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicCompareAndSwap32 ptr old new mem)
+ // result: (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new := v_2
+ mem := v_3
+ v.reset(OpRISCV64LoweredAtomicCas32)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(old)
+ v.AddArg4(ptr, v0, new, mem)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAtomicOr8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr8 ptr val mem)
+ // result: (LoweredAtomicOr32 (ANDI <typ.Uintptr> [^3] ptr) (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpRISCV64LoweredAtomicOr32)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Uintptr)
+ v0.AuxInt = int64ToAuxInt(^3)
+ v0.AddArg(ptr)
+ v1 := b.NewValue0(v.Pos, OpRISCV64SLL, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(val)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(3)
+ v4 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(3)
+ v4.AddArg(ptr)
+ v3.AddArg(v4)
+ v1.AddArg2(v2, v3)
+ v.AddArg3(v0, v1, mem)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAvg64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64ADD)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ADD, t)
+ v1 := b.NewValue0(v.Pos, OpRISCV64SRLI, t)
+ v1.AuxInt = int64ToAuxInt(1)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SRLI, t)
+ v2.AuxInt = int64ToAuxInt(1)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpRISCV64ANDI, t)
+ v3.AuxInt = int64ToAuxInt(1)
+ v4 := b.NewValue0(v.Pos, OpRISCV64AND, t)
+ v4.AddArg2(x, y)
+ v3.AddArg(v4)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst32F(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Const32F [val])
+ // result: (FMVSX (MOVDconst [int64(math.Float32bits(val))]))
+ for {
+ val := auxIntToFloat32(v.AuxInt)
+ v.reset(OpRISCV64FMVSX)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(int64(math.Float32bits(val)))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst64(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt64(v.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst64F(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Const64F [val])
+ // result: (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
+ for {
+ val := auxIntToFloat64(v.AuxInt)
+ v.reset(OpRISCV64FMVDX)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(int64(math.Float64bits(val)))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [val])
+ // result: (MOVDconst [int64(b2i(val))])
+ for {
+ val := auxIntToBool(v.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(b2i(val)))
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y [false])
+ // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIVUW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div32 x y [false])
+ // result: (DIVW x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIVW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 x y [false])
+ // result: (DIV x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIV)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIVUW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x y)
+ // cond: x.Type.IsSigned()
+ // result: (SEQZ (SUB <x.Type> (SignExt32to64 x) (SignExt32to64 y)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ y := v_1
+ if !(x.Type.IsSigned()) {
+ continue
+ }
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Eq32 x y)
+ // cond: !x.Type.IsSigned()
+ // result: (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ y := v_1
+ if !(!x.Type.IsSigned()) {
+ continue
+ }
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64 x y)
+ // result: (SEQZ (SUB <x.Type> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (SEQZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (SEQZ (SUB <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqPtr x y)
+ // result: (SEQZ (SUB <typ.Uintptr> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, typ.Uintptr)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpHmul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpHmul32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (Not (Less16 y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess16, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (Not (Less16U y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess16U, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (Not (Less32 y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (Not (Less32U y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64 x y)
+ // result: (Not (Less64 y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess64, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x y)
+ // result: (Not (Less64U y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess64U, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (Not (Less8 y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess8, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (Not (Less8U y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess8U, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (SLT (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLT)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32 x y)
+ // result: (SLT (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLT)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U x y)
+ // result: (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (SLT (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLT)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: ( is8BitInt(t) && t.IsSigned())
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpRISCV64MOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: ( is8BitInt(t) && !t.IsSigned())
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && t.IsSigned())
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpRISCV64MOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !t.IsSigned())
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpRISCV64MOVHUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && t.IsSigned())
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpRISCV64MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && !t.IsSigned())
+ // result: (MOVWUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpRISCV64MOVWUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLocalAddr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (LocalAddr <t> {sym} base mem)
+ // cond: t.Elem().HasPointers()
+ // result: (MOVaddr {sym} (SPanchored base mem))
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ mem := v_1
+ if !(t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpRISCV64MOVaddr)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
+ v0.AddArg2(base, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LocalAddr <t> {sym} base _)
+ // cond: !t.Elem().HasPointers()
+ // result: (MOVaddr {sym} base)
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ if !(!t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpRISCV64MOVaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y [false])
+ // result: (REMW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REMW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REMUW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod32 x y [false])
+ // result: (REMW x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REMW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod64 x y [false])
+ // result: (REM x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REM)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (REMW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REMW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REMUW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore dst (MOVHload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(1)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v4.AuxInt = int32ToAuxInt(1)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore dst (MOVDload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(2)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v4.AuxInt = int32ToAuxInt(2)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v2.AuxInt = int32ToAuxInt(1)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [12] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [16] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [24] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [32] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [24] dst (MOVDload [24] src mem) (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(24)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(16)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v2.AuxInt = int32ToAuxInt(16)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(8)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v4.AuxInt = int32ToAuxInt(8)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [16 * (128 - s/8)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpRISCV64DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(16 * (128 - s/8))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: (s <= 16 || logLargeCopy(v, s))
+ // result: (LoweredMove [t.Alignment()] dst src (ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s <= 16 || logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpRISCV64LoweredMove)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpRISCV64ADDI, src.Type)
+ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
+ v0.AddArg(src)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpMul16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul16 x y)
+ // result: (MULW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64MULW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMul8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul8 x y)
+ // result: (MULW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64MULW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (Not (Eq16 x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpEq16, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x y)
+ // result: (Not (Eq32 x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64 x y)
+ // result: (Not (Eq64 x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (Not (Eq8 x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpEq8, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqB x y)
+ // result: (SNEZ (SUB <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqPtr x y)
+ // result: (Not (EqPtr x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpEqPtr, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OffPtr [off] ptr:(SP))
+ // cond: is32Bit(off)
+ // result: (MOVaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP || !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpRISCV64MOVaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond: is32Bit(off)
+ // result: (ADDI [off] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(off)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADD (MOVDconst [off]) ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpRISCV64ADD)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(off)
+ v.AddArg2(v0, ptr)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpRISCV64LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpRISCV64LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpRISCV64LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64ADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADD (MOVDconst <t> [val]) x)
+ // cond: is32Bit(val) && !t.IsPtr()
+ // result: (ADDI [val] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVDconst {
+ continue
+ }
+ t := v_0.Type
+ val := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(val) && !t.IsPtr()) {
+ continue
+ }
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(val)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64ADDI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDI [c] (MOVaddr [d] {s} x))
+ // cond: is32Bit(c+int64(d))
+ // result: (MOVaddr [int32(c)+d] {s} x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(c + int64(d))) {
+ break
+ }
+ v.reset(OpRISCV64MOVaddr)
+ v.AuxInt = int32ToAuxInt(int32(c) + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDI [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDI [x] (MOVDconst [y]))
+ // cond: is32Bit(x + y)
+ // result: (MOVDconst [x + y])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ if !(is32Bit(x + y)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(x + y)
+ return true
+ }
+ // match: (ADDI [x] (ADDI [y] z))
+ // cond: is32Bit(x + y)
+ // result: (ADDI [x + y] z)
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ z := v_0.Args[0]
+ if !(is32Bit(x + y)) {
+ break
+ }
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(x + y)
+ v.AddArg(z)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64AND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AND (MOVDconst [val]) x)
+ // cond: is32Bit(val)
+ // result: (ANDI [val] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVDconst {
+ continue
+ }
+ val := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(val)) {
+ continue
+ }
+ v.reset(OpRISCV64ANDI)
+ v.AuxInt = int64ToAuxInt(val)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64ANDI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDI [0] x)
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDI [-1] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDI [x] (MOVDconst [y]))
+ // result: (MOVDconst [x & y])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(x & y)
+ return true
+ }
+ // match: (ANDI [x] (ANDI [y] z))
+ // result: (ANDI [x & y] z)
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64ANDI {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ z := v_0.Args[0]
+ v.reset(OpRISCV64ANDI)
+ v.AuxInt = int64ToAuxInt(x & y)
+ v.AddArg(z)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FADDD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FADDD a (FMULD x y))
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FMADDD x y a)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpRISCV64FMULD {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Block.Func.useFMA(v)) {
+ continue
+ }
+ v.reset(OpRISCV64FMADDD)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FADDS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FADDS a (FMULS x y))
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FMADDS x y a)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpRISCV64FMULS {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Block.Func.useFMA(v)) {
+ continue
+ }
+ v.reset(OpRISCV64FMADDS)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FMADDD(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMADDD neg:(FNEGD x) y z)
+ // cond: neg.Uses == 1
+ // result: (FNMSUBD x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ neg := v_0
+ if neg.Op != OpRISCV64FNEGD {
+ continue
+ }
+ x := neg.Args[0]
+ y := v_1
+ z := v_2
+ if !(neg.Uses == 1) {
+ continue
+ }
+ v.reset(OpRISCV64FNMSUBD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (FMADDD x y neg:(FNEGD z))
+ // cond: neg.Uses == 1
+ // result: (FMSUBD x y z)
+ for {
+ x := v_0
+ y := v_1
+ neg := v_2
+ if neg.Op != OpRISCV64FNEGD {
+ break
+ }
+ z := neg.Args[0]
+ if !(neg.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64FMSUBD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FMADDS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMADDS neg:(FNEGS x) y z)
+ // cond: neg.Uses == 1
+ // result: (FNMSUBS x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ neg := v_0
+ if neg.Op != OpRISCV64FNEGS {
+ continue
+ }
+ x := neg.Args[0]
+ y := v_1
+ z := v_2
+ if !(neg.Uses == 1) {
+ continue
+ }
+ v.reset(OpRISCV64FNMSUBS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (FMADDS x y neg:(FNEGS z))
+ // cond: neg.Uses == 1
+ // result: (FMSUBS x y z)
+ for {
+ x := v_0
+ y := v_1
+ neg := v_2
+ if neg.Op != OpRISCV64FNEGS {
+ break
+ }
+ z := neg.Args[0]
+ if !(neg.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64FMSUBS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FMSUBD(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMSUBD neg:(FNEGD x) y z)
+ // cond: neg.Uses == 1
+ // result: (FNMADDD x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ neg := v_0
+ if neg.Op != OpRISCV64FNEGD {
+ continue
+ }
+ x := neg.Args[0]
+ y := v_1
+ z := v_2
+ if !(neg.Uses == 1) {
+ continue
+ }
+ v.reset(OpRISCV64FNMADDD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (FMSUBD x y neg:(FNEGD z))
+ // cond: neg.Uses == 1
+ // result: (FMADDD x y z)
+ for {
+ x := v_0
+ y := v_1
+ neg := v_2
+ if neg.Op != OpRISCV64FNEGD {
+ break
+ }
+ z := neg.Args[0]
+ if !(neg.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64FMADDD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FMSUBS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMSUBS neg:(FNEGS x) y z)
+ // cond: neg.Uses == 1
+ // result: (FNMADDS x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ neg := v_0
+ if neg.Op != OpRISCV64FNEGS {
+ continue
+ }
+ x := neg.Args[0]
+ y := v_1
+ z := v_2
+ if !(neg.Uses == 1) {
+ continue
+ }
+ v.reset(OpRISCV64FNMADDS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (FMSUBS x y neg:(FNEGS z))
+ // cond: neg.Uses == 1
+ // result: (FMADDS x y z)
+ for {
+ x := v_0
+ y := v_1
+ neg := v_2
+ if neg.Op != OpRISCV64FNEGS {
+ break
+ }
+ z := neg.Args[0]
+ if !(neg.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64FMADDS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FNMADDD(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FNMADDD neg:(FNEGD x) y z)
+ // cond: neg.Uses == 1
+ // result: (FMSUBD x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ neg := v_0
+ if neg.Op != OpRISCV64FNEGD {
+ continue
+ }
+ x := neg.Args[0]
+ y := v_1
+ z := v_2
+ if !(neg.Uses == 1) {
+ continue
+ }
+ v.reset(OpRISCV64FMSUBD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (FNMADDD x y neg:(FNEGD z))
+ // cond: neg.Uses == 1
+ // result: (FNMSUBD x y z)
+ for {
+ x := v_0
+ y := v_1
+ neg := v_2
+ if neg.Op != OpRISCV64FNEGD {
+ break
+ }
+ z := neg.Args[0]
+ if !(neg.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64FNMSUBD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FNMADDS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FNMADDS neg:(FNEGS x) y z)
+ // cond: neg.Uses == 1
+ // result: (FMSUBS x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ neg := v_0
+ if neg.Op != OpRISCV64FNEGS {
+ continue
+ }
+ x := neg.Args[0]
+ y := v_1
+ z := v_2
+ if !(neg.Uses == 1) {
+ continue
+ }
+ v.reset(OpRISCV64FMSUBS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (FNMADDS x y neg:(FNEGS z))
+ // cond: neg.Uses == 1
+ // result: (FNMSUBS x y z)
+ for {
+ x := v_0
+ y := v_1
+ neg := v_2
+ if neg.Op != OpRISCV64FNEGS {
+ break
+ }
+ z := neg.Args[0]
+ if !(neg.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64FNMSUBS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FNMSUBD(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FNMSUBD neg:(FNEGD x) y z)
+ // cond: neg.Uses == 1
+ // result: (FMADDD x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ neg := v_0
+ if neg.Op != OpRISCV64FNEGD {
+ continue
+ }
+ x := neg.Args[0]
+ y := v_1
+ z := v_2
+ if !(neg.Uses == 1) {
+ continue
+ }
+ v.reset(OpRISCV64FMADDD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (FNMSUBD x y neg:(FNEGD z))
+ // cond: neg.Uses == 1
+ // result: (FNMADDD x y z)
+ for {
+ x := v_0
+ y := v_1
+ neg := v_2
+ if neg.Op != OpRISCV64FNEGD {
+ break
+ }
+ z := neg.Args[0]
+ if !(neg.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64FNMADDD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FNMSUBS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FNMSUBS neg:(FNEGS x) y z)
+ // cond: neg.Uses == 1
+ // result: (FMADDS x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ neg := v_0
+ if neg.Op != OpRISCV64FNEGS {
+ continue
+ }
+ x := neg.Args[0]
+ y := v_1
+ z := v_2
+ if !(neg.Uses == 1) {
+ continue
+ }
+ v.reset(OpRISCV64FMADDS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (FNMSUBS x y neg:(FNEGS z))
+ // cond: neg.Uses == 1
+ // result: (FNMADDS x y z)
+ for {
+ x := v_0
+ y := v_1
+ neg := v_2
+ if neg.Op != OpRISCV64FNEGS {
+ break
+ }
+ z := neg.Args[0]
+ if !(neg.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64FNMADDS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FSUBD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FSUBD a (FMULD x y))
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FNMSUBD x y a)
+ for {
+ a := v_0
+ if v_1.Op != OpRISCV64FMULD {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpRISCV64FNMSUBD)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ // match: (FSUBD (FMULD x y) a)
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FMSUBD x y a)
+ for {
+ if v_0.Op != OpRISCV64FMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ if !(a.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpRISCV64FMSUBD)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FSUBS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FSUBS a (FMULS x y))
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FNMSUBS x y a)
+ for {
+ a := v_0
+ if v_1.Op != OpRISCV64FMULS {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpRISCV64FNMSUBS)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ // match: (FSUBS (FMULS x y) a)
+ // cond: a.Block.Func.useFMA(v)
+ // result: (FMSUBS x y a)
+ for {
+ if v_0.Op != OpRISCV64FMULS {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ if !(a.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpRISCV64FMSUBS)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBUload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBUreg x:(FLES _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64FLES {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(FLTS _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64FLTS {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(FEQS _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64FEQS {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(FNES _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64FNES {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(FLED _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64FLED {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(FLTD _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64FLTD {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(FEQD _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64FEQD {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(FNED _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64FNED {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(SEQZ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64SEQZ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(SNEZ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64SNEZ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(SLT _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64SLT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(SLTU _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64SLTU {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(ANDI [c] y))
+ // cond: c >= 0 && int64(uint8(c)) == c
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64ANDI {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ if !(c >= 0 && int64(uint8(c)) == c) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg (ANDI [c] x))
+ // cond: c < 0
+ // result: (ANDI [int64(uint8(c))] x)
+ for {
+ if v_0.Op != OpRISCV64ANDI {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c < 0) {
+ break
+ }
+ v.reset(OpRISCV64ANDI)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint8(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(Select0 (LoweredAtomicLoad8 _ _)))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpSelect0 {
+ break
+ }
+ x_0 := x.Args[0]
+ if x_0.Op != OpRISCV64LoweredAtomicLoad8 {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(Select0 (LoweredAtomicCas32 _ _ _ _)))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpSelect0 {
+ break
+ }
+ x_0 := x.Args[0]
+ if x_0.Op != OpRISCV64LoweredAtomicCas32 {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(Select0 (LoweredAtomicCas64 _ _ _ _)))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpSelect0 {
+ break
+ }
+ x_0 := x.Args[0]
+ if x_0.Op != OpRISCV64LoweredAtomicCas64 {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVBUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBreg x:(ANDI [c] y))
+ // cond: c >= 0 && int64(int8(c)) == c
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64ANDI {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ if !(c >= 0 && int64(int8(c)) == c) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int8(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
+ return true
+ }
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVBload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpRISCV64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVDload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDnop(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVDnop (MOVDconst [c]))
+ // result: (MOVDconst [c])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVDreg x)
+ // cond: x.Uses == 1
+ // result: (MOVDnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64MOVDnop)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVDstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVDstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpRISCV64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHUload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVHUreg x:(ANDI [c] y))
+ // cond: c >= 0 && int64(uint16(c)) == c
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64ANDI {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ if !(c >= 0 && int64(uint16(c)) == c) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHUreg (ANDI [c] x))
+ // cond: c < 0
+ // result: (ANDI [int64(uint16(c))] x)
+ for {
+ if v_0.Op != OpRISCV64ANDI {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c < 0) {
+ break
+ }
+ v.reset(OpRISCV64ANDI)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVHload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVHUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVHreg x:(ANDI [c] y))
+ // cond: c >= 0 && int64(int16(c)) == c
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64ANDI {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ if !(c >= 0 && int64(int16(c)) == c) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int16(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
+ return true
+ }
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVHUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVHload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpRISCV64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWUload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVWUreg x:(ANDI [c] y))
+ // cond: c >= 0 && int64(uint32(c)) == c
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64ANDI {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ if !(c >= 0 && int64(uint32(c)) == c) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWUreg (ANDI [c] x))
+ // cond: c < 0
+ // result: (AND (MOVDconst [int64(uint32(c))]) x)
+ for {
+ if v_0.Op != OpRISCV64ANDI {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c < 0) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MOVWUreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVWUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVWUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWreg x:(ANDI [c] y))
+ // cond: c >= 0 && int64(int32(c)) == c
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64ANDI {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ if !(c >= 0 && int64(int32(c)) == c) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int32(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
+ return true
+ }
+ // match: (MOVWreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVWload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(ADDIW _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64ADDIW {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(SUBW _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64SUBW {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(NEGW _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64NEGW {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MULW _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MULW {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(DIVW _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64DIVW {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(DIVUW _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64DIVUW {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(REMW _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64REMW {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(REMUW _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64REMUW {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVWreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg <t> x:(MOVWUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVWUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVWload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpRISCV64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64NEG(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NEG (SUB x y))
+ // result: (SUB y x)
+ for {
+ if v_0.Op != OpRISCV64SUB {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpRISCV64SUB)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (NEG <t> s:(ADDI [val] (SUB x y)))
+ // cond: s.Uses == 1 && is32Bit(-val)
+ // result: (ADDI [-val] (SUB <t> y x))
+ for {
+ t := v.Type
+ s := v_0
+ if s.Op != OpRISCV64ADDI {
+ break
+ }
+ val := auxIntToInt64(s.AuxInt)
+ s_0 := s.Args[0]
+ if s_0.Op != OpRISCV64SUB {
+ break
+ }
+ y := s_0.Args[1]
+ x := s_0.Args[0]
+ if !(s.Uses == 1 && is32Bit(-val)) {
+ break
+ }
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(-val)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, t)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (NEG (NEG x))
+ // result: x
+ for {
+ if v_0.Op != OpRISCV64NEG {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (NEG (MOVDconst [x]))
+ // result: (MOVDconst [-x])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64NEGW(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGW (MOVDconst [x]))
+ // result: (MOVDconst [int64(int32(-x))])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(-x)))
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64OR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (OR (MOVDconst [val]) x)
+ // cond: is32Bit(val)
+ // result: (ORI [val] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVDconst {
+ continue
+ }
+ val := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(val)) {
+ continue
+ }
+ v.reset(OpRISCV64ORI)
+ v.AuxInt = int64ToAuxInt(val)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64ORI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORI [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORI [-1] x)
+ // result: (MOVDconst [-1])
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORI [x] (MOVDconst [y]))
+ // result: (MOVDconst [x | y])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(x | y)
+ return true
+ }
+ // match: (ORI [x] (ORI [y] z))
+ // result: (ORI [x | y] z)
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64ORI {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ z := v_0.Args[0]
+ v.reset(OpRISCV64ORI)
+ v.AuxInt = int64ToAuxInt(x | y)
+ v.AddArg(z)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SEQZ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SEQZ (NEG x))
+ // result: (SEQZ x)
+ for {
+ if v_0.Op != OpRISCV64NEG {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpRISCV64SEQZ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SEQZ (SEQZ x))
+ // result: (SNEZ x)
+ for {
+ if v_0.Op != OpRISCV64SEQZ {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpRISCV64SNEZ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SEQZ (SNEZ x))
+ // result: (SEQZ x)
+ for {
+ if v_0.Op != OpRISCV64SNEZ {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpRISCV64SEQZ)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLL x (MOVDconst [val]))
+ // result: (SLLI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpRISCV64SLLI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SLLI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLLI [x] (MOVDconst [y]))
+ // cond: is32Bit(y << uint32(x))
+ // result: (MOVDconst [y << uint32(x)])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ if !(is32Bit(y << uint32(x))) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(y << uint32(x))
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SLT(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLT x (MOVDconst [val]))
+ // cond: val >= -2048 && val <= 2047
+ // result: (SLTI [val] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ if !(val >= -2048 && val <= 2047) {
+ break
+ }
+ v.reset(OpRISCV64SLTI)
+ v.AuxInt = int64ToAuxInt(val)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLT x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SLTI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLTI [x] (MOVDconst [y]))
+ // result: (MOVDconst [b2i(int64(y) < int64(x))])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(int64(y) < int64(x)))
+ return true
+ }
+ // match: (SLTI [x] (ANDI [y] _))
+ // cond: y >= 0 && int64(y) < int64(x)
+ // result: (MOVDconst [1])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64ANDI {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ if !(y >= 0 && int64(y) < int64(x)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SLTI [x] (ORI [y] _))
+ // cond: y >= 0 && int64(y) >= int64(x)
+ // result: (MOVDconst [0])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64ORI {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ if !(y >= 0 && int64(y) >= int64(x)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SLTIU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLTIU [x] (MOVDconst [y]))
+ // result: (MOVDconst [b2i(uint64(y) < uint64(x))])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(uint64(y) < uint64(x)))
+ return true
+ }
+ // match: (SLTIU [x] (ANDI [y] _))
+ // cond: y >= 0 && uint64(y) < uint64(x)
+ // result: (MOVDconst [1])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64ANDI {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ if !(y >= 0 && uint64(y) < uint64(x)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SLTIU [x] (ORI [y] _))
+ // cond: y >= 0 && uint64(y) >= uint64(x)
+ // result: (MOVDconst [0])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64ORI {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ if !(y >= 0 && uint64(y) >= uint64(x)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SLTU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLTU x (MOVDconst [val]))
+ // cond: val >= -2048 && val <= 2047
+ // result: (SLTIU [val] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ if !(val >= -2048 && val <= 2047) {
+ break
+ }
+ v.reset(OpRISCV64SLTIU)
+ v.AuxInt = int64ToAuxInt(val)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLTU x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SNEZ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SNEZ (NEG x))
+ // result: (SNEZ x)
+ for {
+ if v_0.Op != OpRISCV64NEG {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpRISCV64SNEZ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SNEZ (SEQZ x))
+ // result: (SEQZ x)
+ for {
+ if v_0.Op != OpRISCV64SEQZ {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpRISCV64SEQZ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SNEZ (SNEZ x))
+ // result: (SNEZ x)
+ for {
+ if v_0.Op != OpRISCV64SNEZ {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpRISCV64SNEZ)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRA x (MOVDconst [val]))
+ // result: (SRAI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SRAI(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SRAI <t> [x] (MOVWreg y))
+ // cond: x >= 0 && x <= 31
+ // result: (SRAIW <t> [int64(x)] y)
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVWreg {
+ break
+ }
+ y := v_0.Args[0]
+ if !(x >= 0 && x <= 31) {
+ break
+ }
+ v.reset(OpRISCV64SRAIW)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(int64(x))
+ v.AddArg(y)
+ return true
+ }
+ // match: (SRAI <t> [x] (MOVBreg y))
+ // cond: x >= 8
+ // result: (SRAI [63] (SLLI <t> [56] y))
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVBreg {
+ break
+ }
+ y := v_0.Args[0]
+ if !(x >= 8) {
+ break
+ }
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = int64ToAuxInt(56)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SRAI <t> [x] (MOVHreg y))
+ // cond: x >= 16
+ // result: (SRAI [63] (SLLI <t> [48] y))
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVHreg {
+ break
+ }
+ y := v_0.Args[0]
+ if !(x >= 16) {
+ break
+ }
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+ v0.AuxInt = int64ToAuxInt(48)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SRAI <t> [x] (MOVWreg y))
+ // cond: x >= 32
+ // result: (SRAIW [31] y)
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVWreg {
+ break
+ }
+ y := v_0.Args[0]
+ if !(x >= 32) {
+ break
+ }
+ v.reset(OpRISCV64SRAIW)
+ v.AuxInt = int64ToAuxInt(31)
+ v.AddArg(y)
+ return true
+ }
+ // match: (SRAI [x] (MOVDconst [y]))
+ // result: (MOVDconst [int64(y) >> uint32(x)])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(y) >> uint32(x))
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SRAW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRAW x (MOVDconst [val]))
+ // result: (SRAIW [int64(val&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpRISCV64SRAIW)
+ v.AuxInt = int64ToAuxInt(int64(val & 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRL x (MOVDconst [val]))
+ // result: (SRLI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRLI <t> [x] (MOVWUreg y))
+ // cond: x >= 0 && x <= 31
+ // result: (SRLIW <t> [int64(x)] y)
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ y := v_0.Args[0]
+ if !(x >= 0 && x <= 31) {
+ break
+ }
+ v.reset(OpRISCV64SRLIW)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(int64(x))
+ v.AddArg(y)
+ return true
+ }
+ // match: (SRLI <t> [x] (MOVBUreg y))
+ // cond: x >= 8
+ // result: (MOVDconst <t> [0])
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ if !(x >= 8) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLI <t> [x] (MOVHUreg y))
+ // cond: x >= 16
+ // result: (MOVDconst <t> [0])
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ if !(x >= 16) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLI <t> [x] (MOVWUreg y))
+ // cond: x >= 32
+ // result: (MOVDconst <t> [0])
+ for {
+ t := v.Type
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ if !(x >= 32) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLI [x] (MOVDconst [y]))
+ // result: (MOVDconst [int64(uint64(y) >> uint32(x))])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(y) >> uint32(x)))
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SRLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRLW x (MOVDconst [val]))
+ // result: (SRLIW [int64(val&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpRISCV64SRLIW)
+ v.AuxInt = int64ToAuxInt(int64(val & 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUB x (MOVDconst [val]))
+ // cond: is32Bit(-val)
+ // result: (ADDI [-val] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(-val)) {
+ break
+ }
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(-val)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB <t> (MOVDconst [val]) y)
+ // cond: is32Bit(-val)
+ // result: (NEG (ADDI <t> [-val] y))
+ for {
+ t := v.Type
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(is32Bit(-val)) {
+ break
+ }
+ v.reset(OpRISCV64NEG)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ADDI, t)
+ v0.AuxInt = int64ToAuxInt(-val)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUB x (MOVDconst [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUB (MOVDconst [0]) x)
+ // result: (NEG x)
+ for {
+ if v_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpRISCV64NEG)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SUBW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBW x (MOVDconst [0]))
+ // result: (ADDIW [0] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRISCV64ADDIW)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBW (MOVDconst [0]) x)
+ // result: (NEGW x)
+ for {
+ if v_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpRISCV64NEGW)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64XOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR (MOVDconst [val]) x)
+ // cond: is32Bit(val)
+ // result: (XORI [val] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVDconst {
+ continue
+ }
+ val := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(val)) {
+ continue
+ }
+ v.reset(OpRISCV64XORI)
+ v.AuxInt = int64ToAuxInt(val)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVDconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft32 <t> x (MOVDconst [c]))
+ // result: (Or32 (Lsh32x64 <t> x (MOVDconst [c&31])) (Rsh32Ux64 <t> x (MOVDconst [-c&31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpLsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 31)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 31)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft64 <t> x (MOVDconst [c]))
+ // result: (Or64 (Lsh64x64 <t> x (MOVDconst [c&63])) (Rsh64Ux64 <t> x (MOVDconst [-c&63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpLsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 63)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 63)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVDconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg16, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Rsh16Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg16, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Rsh16Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg16, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Rsh16Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg16, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Rsh16Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt16to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt16to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRAW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt32to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRAW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRAW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt8to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRAW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh64Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh64Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh64Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg8, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Rsh8Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg8, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Rsh8Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg8, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Rsh8Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg8, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Rsh8Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRL (ZeroExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRA (SignExt8to64 x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpRISCV64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Add64carry x y c))
+ // result: (ADD (ADD <typ.UInt64> x y) c)
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpRISCV64ADD)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ADD, typ.UInt64)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, c)
+ return true
+ }
+ // match: (Select0 (Sub64borrow x y c))
+ // result: (SUB (SUB <typ.UInt64> x y) c)
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpRISCV64SUB)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, typ.UInt64)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, c)
+ return true
+ }
+ // match: (Select0 m:(LoweredMuluhilo x y))
+ // cond: m.Uses == 1
+ // result: (MULHU x y)
+ for {
+ m := v_0
+ if m.Op != OpRISCV64LoweredMuluhilo {
+ break
+ }
+ y := m.Args[1]
+ x := m.Args[0]
+ if !(m.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64MULHU)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Add64carry x y c))
+ // result: (OR (SLTU <typ.UInt64> s:(ADD <typ.UInt64> x y) x) (SLTU <typ.UInt64> (ADD <typ.UInt64> s c) s))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpRISCV64OR)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLTU, typ.UInt64)
+ s := b.NewValue0(v.Pos, OpRISCV64ADD, typ.UInt64)
+ s.AddArg2(x, y)
+ v0.AddArg2(s, x)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTU, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpRISCV64ADD, typ.UInt64)
+ v3.AddArg2(s, c)
+ v2.AddArg2(v3, s)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Select1 (Sub64borrow x y c))
+ // result: (OR (SLTU <typ.UInt64> x s:(SUB <typ.UInt64> x y)) (SLTU <typ.UInt64> s (SUB <typ.UInt64> s c)))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpRISCV64OR)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLTU, typ.UInt64)
+ s := b.NewValue0(v.Pos, OpRISCV64SUB, typ.UInt64)
+ s.AddArg2(x, y)
+ v0.AddArg2(x, s)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTU, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SUB, typ.UInt64)
+ v3.AddArg2(s, c)
+ v2.AddArg2(s, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Select1 m:(LoweredMuluhilo x y))
+ // cond: m.Uses == 1
+ // result: (MUL x y)
+ for {
+ m := v_0
+ if m.Op != OpRISCV64LoweredMuluhilo {
+ break
+ }
+ y := m.Args[1]
+ x := m.Args[0]
+ if !(m.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64MUL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRAI [63] (NEG <t> x))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpRISCV64NEG, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !t.IsFloat()
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !t.IsFloat()) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && !t.IsFloat()
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && !t.IsFloat()) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && t.IsFloat()
+ // result: (FMOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && t.IsFloat()
+ // result: (FMOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpRISCV64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVBstore [1] ptr (MOVDconst [0]) (MOVBstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVBstore [3] ptr (MOVDconst [0]) (MOVBstore [2] ptr (MOVDconst [0]) (MOVBstore [1] ptr (MOVDconst [0]) (MOVBstore ptr (MOVDconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] ptr (MOVDconst [0]) (MOVHstore [4] ptr (MOVDconst [0]) (MOVHstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(2)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [3] ptr mem)
+ // result: (MOVBstore [2] ptr (MOVDconst [0]) (MOVBstore [1] ptr (MOVDconst [0]) (MOVBstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [6] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] ptr (MOVDconst [0]) (MOVHstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [12] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] ptr (MOVDconst [0]) (MOVWstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [16] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [24] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [16] ptr (MOVDconst [0]) (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [32] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [24] ptr (MOVDconst [0]) (MOVDstore [16] ptr (MOVDconst [0]) (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(24)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(16)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(8)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [8 * (128 - s/8)] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpRISCV64DUFFZERO)
+ v.AuxInt = int64ToAuxInt(8 * (128 - s/8))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // result: (LoweredZero [t.Alignment()] ptr (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)])) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ v.reset(OpRISCV64LoweredZero)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpRISCV64ADD, ptr.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
+ v0.AddArg2(ptr, v1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+}
+func rewriteBlockRISCV64(b *Block) bool {
+ typ := &b.Func.Config.Types
+ switch b.Kind {
+ case BlockRISCV64BEQ:
+ // match: (BEQ (MOVDconst [0]) cond yes no)
+ // result: (BEQZ cond yes no)
+ for b.Controls[0].Op == OpRISCV64MOVDconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockRISCV64BEQZ, cond)
+ return true
+ }
+ // match: (BEQ cond (MOVDconst [0]) yes no)
+ // result: (BEQZ cond yes no)
+ for b.Controls[1].Op == OpRISCV64MOVDconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockRISCV64BEQZ, cond)
+ return true
+ }
+ case BlockRISCV64BEQZ:
+ // match: (BEQZ (SEQZ x) yes no)
+ // result: (BNEZ x yes no)
+ for b.Controls[0].Op == OpRISCV64SEQZ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockRISCV64BNEZ, x)
+ return true
+ }
+ // match: (BEQZ (SNEZ x) yes no)
+ // result: (BEQZ x yes no)
+ for b.Controls[0].Op == OpRISCV64SNEZ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockRISCV64BEQZ, x)
+ return true
+ }
+ // match: (BEQZ (NEG x) yes no)
+ // result: (BEQZ x yes no)
+ for b.Controls[0].Op == OpRISCV64NEG {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockRISCV64BEQZ, x)
+ return true
+ }
+ // match: (BEQZ (FNES <t> x y) yes no)
+ // result: (BNEZ (FEQS <t> x y) yes no)
+ for b.Controls[0].Op == OpRISCV64FNES {
+ v_0 := b.Controls[0]
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpRISCV64FEQS, t)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockRISCV64BNEZ, v0)
+ return true
+ }
+ }
+ // match: (BEQZ (FNED <t> x y) yes no)
+ // result: (BNEZ (FEQD <t> x y) yes no)
+ for b.Controls[0].Op == OpRISCV64FNED {
+ v_0 := b.Controls[0]
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpRISCV64FEQD, t)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockRISCV64BNEZ, v0)
+ return true
+ }
+ }
+ // match: (BEQZ (SUB x y) yes no)
+ // result: (BEQ x y yes no)
+ for b.Controls[0].Op == OpRISCV64SUB {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BEQ, x, y)
+ return true
+ }
+ // match: (BEQZ (SLT x y) yes no)
+ // result: (BGE x y yes no)
+ for b.Controls[0].Op == OpRISCV64SLT {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BGE, x, y)
+ return true
+ }
+ // match: (BEQZ (SLTU x y) yes no)
+ // result: (BGEU x y yes no)
+ for b.Controls[0].Op == OpRISCV64SLTU {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BGEU, x, y)
+ return true
+ }
+ // match: (BEQZ (SLTI [x] y) yes no)
+ // result: (BGE y (MOVDconst [x]) yes no)
+ for b.Controls[0].Op == OpRISCV64SLTI {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ v0 := b.NewValue0(b.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(x)
+ b.resetWithControl2(BlockRISCV64BGE, y, v0)
+ return true
+ }
+ // match: (BEQZ (SLTIU [x] y) yes no)
+ // result: (BGEU y (MOVDconst [x]) yes no)
+ for b.Controls[0].Op == OpRISCV64SLTIU {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ v0 := b.NewValue0(b.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(x)
+ b.resetWithControl2(BlockRISCV64BGEU, y, v0)
+ return true
+ }
+ case BlockRISCV64BGE:
+ // match: (BGE (MOVDconst [0]) cond yes no)
+ // result: (BLEZ cond yes no)
+ for b.Controls[0].Op == OpRISCV64MOVDconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockRISCV64BLEZ, cond)
+ return true
+ }
+ // match: (BGE cond (MOVDconst [0]) yes no)
+ // result: (BGEZ cond yes no)
+ for b.Controls[1].Op == OpRISCV64MOVDconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockRISCV64BGEZ, cond)
+ return true
+ }
+ case BlockRISCV64BLT:
+ // match: (BLT (MOVDconst [0]) cond yes no)
+ // result: (BGTZ cond yes no)
+ for b.Controls[0].Op == OpRISCV64MOVDconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockRISCV64BGTZ, cond)
+ return true
+ }
+ // match: (BLT cond (MOVDconst [0]) yes no)
+ // result: (BLTZ cond yes no)
+ for b.Controls[1].Op == OpRISCV64MOVDconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockRISCV64BLTZ, cond)
+ return true
+ }
+ case BlockRISCV64BNE:
+ // match: (BNE (MOVDconst [0]) cond yes no)
+ // result: (BNEZ cond yes no)
+ for b.Controls[0].Op == OpRISCV64MOVDconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockRISCV64BNEZ, cond)
+ return true
+ }
+ // match: (BNE cond (MOVDconst [0]) yes no)
+ // result: (BNEZ cond yes no)
+ for b.Controls[1].Op == OpRISCV64MOVDconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockRISCV64BNEZ, cond)
+ return true
+ }
+ case BlockRISCV64BNEZ:
+ // match: (BNEZ (SEQZ x) yes no)
+ // result: (BEQZ x yes no)
+ for b.Controls[0].Op == OpRISCV64SEQZ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockRISCV64BEQZ, x)
+ return true
+ }
+ // match: (BNEZ (SNEZ x) yes no)
+ // result: (BNEZ x yes no)
+ for b.Controls[0].Op == OpRISCV64SNEZ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockRISCV64BNEZ, x)
+ return true
+ }
+ // match: (BNEZ (NEG x) yes no)
+ // result: (BNEZ x yes no)
+ for b.Controls[0].Op == OpRISCV64NEG {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockRISCV64BNEZ, x)
+ return true
+ }
+ // match: (BNEZ (FNES <t> x y) yes no)
+ // result: (BEQZ (FEQS <t> x y) yes no)
+ for b.Controls[0].Op == OpRISCV64FNES {
+ v_0 := b.Controls[0]
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpRISCV64FEQS, t)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockRISCV64BEQZ, v0)
+ return true
+ }
+ }
+ // match: (BNEZ (FNED <t> x y) yes no)
+ // result: (BEQZ (FEQD <t> x y) yes no)
+ for b.Controls[0].Op == OpRISCV64FNED {
+ v_0 := b.Controls[0]
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpRISCV64FEQD, t)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockRISCV64BEQZ, v0)
+ return true
+ }
+ }
+ // match: (BNEZ (SUB x y) yes no)
+ // result: (BNE x y yes no)
+ for b.Controls[0].Op == OpRISCV64SUB {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BNE, x, y)
+ return true
+ }
+ // match: (BNEZ (SLT x y) yes no)
+ // result: (BLT x y yes no)
+ for b.Controls[0].Op == OpRISCV64SLT {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BLT, x, y)
+ return true
+ }
+ // match: (BNEZ (SLTU x y) yes no)
+ // result: (BLTU x y yes no)
+ for b.Controls[0].Op == OpRISCV64SLTU {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BLTU, x, y)
+ return true
+ }
+ // match: (BNEZ (SLTI [x] y) yes no)
+ // result: (BLT y (MOVDconst [x]) yes no)
+ for b.Controls[0].Op == OpRISCV64SLTI {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ v0 := b.NewValue0(b.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(x)
+ b.resetWithControl2(BlockRISCV64BLT, y, v0)
+ return true
+ }
+ // match: (BNEZ (SLTIU [x] y) yes no)
+ // result: (BLTU y (MOVDconst [x]) yes no)
+ for b.Controls[0].Op == OpRISCV64SLTIU {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ v0 := b.NewValue0(b.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(x)
+ b.resetWithControl2(BlockRISCV64BLTU, y, v0)
+ return true
+ }
+ case BlockIf:
+ // match: (If cond yes no)
+ // result: (BNEZ (MOVBUreg <typ.UInt64> cond) yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, OpRISCV64MOVBUreg, typ.UInt64)
+ v0.AddArg(cond)
+ b.resetWithControl(BlockRISCV64BNEZ, v0)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64latelower.go b/src/cmd/compile/internal/ssa/rewriteRISCV64latelower.go
new file mode 100644
index 0000000..6dd97d6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64latelower.go
@@ -0,0 +1,246 @@
+// Code generated from _gen/RISCV64latelower.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+func rewriteValueRISCV64latelower(v *Value) bool {
+ switch v.Op {
+ case OpRISCV64SLLI:
+ return rewriteValueRISCV64latelower_OpRISCV64SLLI(v)
+ case OpRISCV64SRAI:
+ return rewriteValueRISCV64latelower_OpRISCV64SRAI(v)
+ case OpRISCV64SRLI:
+ return rewriteValueRISCV64latelower_OpRISCV64SRLI(v)
+ }
+ return false
+}
+func rewriteValueRISCV64latelower_OpRISCV64SLLI(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SLLI [c] (MOVBUreg x))
+ // cond: c <= 56
+ // result: (SRLI [56-c] (SLLI <typ.UInt64> [56] x))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(c <= 56) {
+ break
+ }
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = int64ToAuxInt(56 - c)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(56)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SLLI [c] (MOVHUreg x))
+ // cond: c <= 48
+ // result: (SRLI [48-c] (SLLI <typ.UInt64> [48] x))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(c <= 48) {
+ break
+ }
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = int64ToAuxInt(48 - c)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(48)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SLLI [c] (MOVWUreg x))
+ // cond: c <= 32
+ // result: (SRLI [32-c] (SLLI <typ.UInt64> [32] x))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(c <= 32) {
+ break
+ }
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = int64ToAuxInt(32 - c)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SLLI [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64latelower_OpRISCV64SRAI(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SRAI [c] (MOVBreg x))
+ // cond: c < 8
+ // result: (SRAI [56+c] (SLLI <typ.Int64> [56] x))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVBreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(c < 8) {
+ break
+ }
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(56 + c)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(56)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SRAI [c] (MOVHreg x))
+ // cond: c < 16
+ // result: (SRAI [48+c] (SLLI <typ.Int64> [48] x))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVHreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(c < 16) {
+ break
+ }
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(48 + c)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(48)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SRAI [c] (MOVWreg x))
+ // cond: c < 32
+ // result: (SRAI [32+c] (SLLI <typ.Int64> [32] x))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(c < 32) {
+ break
+ }
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(32 + c)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SRAI [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64latelower_OpRISCV64SRLI(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SRLI [c] (MOVBUreg x))
+ // cond: c < 8
+ // result: (SRLI [56+c] (SLLI <typ.UInt64> [56] x))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(c < 8) {
+ break
+ }
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = int64ToAuxInt(56 + c)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(56)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SRLI [c] (MOVHUreg x))
+ // cond: c < 16
+ // result: (SRLI [48+c] (SLLI <typ.UInt64> [48] x))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(c < 16) {
+ break
+ }
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = int64ToAuxInt(48 + c)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(48)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SRLI [c] (MOVWUreg x))
+ // cond: c < 32
+ // result: (SRLI [32+c] (SLLI <typ.UInt64> [32] x))
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(c < 32) {
+ break
+ }
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = int64ToAuxInt(32 + c)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SRLI [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteBlockRISCV64latelower(b *Block) bool {
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go
new file mode 100644
index 0000000..c2342c9
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteS390X.go
@@ -0,0 +1,16638 @@
+// Code generated from _gen/S390X.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import "math"
+import "cmd/compile/internal/types"
+import "cmd/internal/obj/s390x"
+
+func rewriteValueS390X(v *Value) bool {
+ switch v.Op {
+ case OpAdd16:
+ v.Op = OpS390XADDW
+ return true
+ case OpAdd32:
+ v.Op = OpS390XADDW
+ return true
+ case OpAdd32F:
+ return rewriteValueS390X_OpAdd32F(v)
+ case OpAdd64:
+ v.Op = OpS390XADD
+ return true
+ case OpAdd64F:
+ return rewriteValueS390X_OpAdd64F(v)
+ case OpAdd8:
+ v.Op = OpS390XADDW
+ return true
+ case OpAddPtr:
+ v.Op = OpS390XADD
+ return true
+ case OpAddr:
+ return rewriteValueS390X_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpS390XANDW
+ return true
+ case OpAnd32:
+ v.Op = OpS390XANDW
+ return true
+ case OpAnd64:
+ v.Op = OpS390XAND
+ return true
+ case OpAnd8:
+ v.Op = OpS390XANDW
+ return true
+ case OpAndB:
+ v.Op = OpS390XANDW
+ return true
+ case OpAtomicAdd32:
+ return rewriteValueS390X_OpAtomicAdd32(v)
+ case OpAtomicAdd64:
+ return rewriteValueS390X_OpAtomicAdd64(v)
+ case OpAtomicAnd32:
+ v.Op = OpS390XLAN
+ return true
+ case OpAtomicAnd8:
+ return rewriteValueS390X_OpAtomicAnd8(v)
+ case OpAtomicCompareAndSwap32:
+ return rewriteValueS390X_OpAtomicCompareAndSwap32(v)
+ case OpAtomicCompareAndSwap64:
+ return rewriteValueS390X_OpAtomicCompareAndSwap64(v)
+ case OpAtomicExchange32:
+ return rewriteValueS390X_OpAtomicExchange32(v)
+ case OpAtomicExchange64:
+ return rewriteValueS390X_OpAtomicExchange64(v)
+ case OpAtomicLoad32:
+ return rewriteValueS390X_OpAtomicLoad32(v)
+ case OpAtomicLoad64:
+ return rewriteValueS390X_OpAtomicLoad64(v)
+ case OpAtomicLoad8:
+ return rewriteValueS390X_OpAtomicLoad8(v)
+ case OpAtomicLoadAcq32:
+ return rewriteValueS390X_OpAtomicLoadAcq32(v)
+ case OpAtomicLoadPtr:
+ return rewriteValueS390X_OpAtomicLoadPtr(v)
+ case OpAtomicOr32:
+ v.Op = OpS390XLAO
+ return true
+ case OpAtomicOr8:
+ return rewriteValueS390X_OpAtomicOr8(v)
+ case OpAtomicStore32:
+ return rewriteValueS390X_OpAtomicStore32(v)
+ case OpAtomicStore64:
+ return rewriteValueS390X_OpAtomicStore64(v)
+ case OpAtomicStore8:
+ return rewriteValueS390X_OpAtomicStore8(v)
+ case OpAtomicStorePtrNoWB:
+ return rewriteValueS390X_OpAtomicStorePtrNoWB(v)
+ case OpAtomicStoreRel32:
+ return rewriteValueS390X_OpAtomicStoreRel32(v)
+ case OpAvg64u:
+ return rewriteValueS390X_OpAvg64u(v)
+ case OpBitLen64:
+ return rewriteValueS390X_OpBitLen64(v)
+ case OpBswap16:
+ return rewriteValueS390X_OpBswap16(v)
+ case OpBswap32:
+ v.Op = OpS390XMOVWBR
+ return true
+ case OpBswap64:
+ v.Op = OpS390XMOVDBR
+ return true
+ case OpCeil:
+ return rewriteValueS390X_OpCeil(v)
+ case OpClosureCall:
+ v.Op = OpS390XCALLclosure
+ return true
+ case OpCom16:
+ v.Op = OpS390XNOTW
+ return true
+ case OpCom32:
+ v.Op = OpS390XNOTW
+ return true
+ case OpCom64:
+ v.Op = OpS390XNOT
+ return true
+ case OpCom8:
+ v.Op = OpS390XNOTW
+ return true
+ case OpConst16:
+ return rewriteValueS390X_OpConst16(v)
+ case OpConst32:
+ return rewriteValueS390X_OpConst32(v)
+ case OpConst32F:
+ v.Op = OpS390XFMOVSconst
+ return true
+ case OpConst64:
+ return rewriteValueS390X_OpConst64(v)
+ case OpConst64F:
+ v.Op = OpS390XFMOVDconst
+ return true
+ case OpConst8:
+ return rewriteValueS390X_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueS390X_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueS390X_OpConstNil(v)
+ case OpCtz32:
+ return rewriteValueS390X_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz64:
+ return rewriteValueS390X_OpCtz64(v)
+ case OpCtz64NonZero:
+ v.Op = OpCtz64
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpS390XCFEBRA
+ return true
+ case OpCvt32Fto32U:
+ v.Op = OpS390XCLFEBR
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpS390XCGEBRA
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpS390XLDEBR
+ return true
+ case OpCvt32Fto64U:
+ v.Op = OpS390XCLGEBR
+ return true
+ case OpCvt32Uto32F:
+ v.Op = OpS390XCELFBR
+ return true
+ case OpCvt32Uto64F:
+ v.Op = OpS390XCDLFBR
+ return true
+ case OpCvt32to32F:
+ v.Op = OpS390XCEFBRA
+ return true
+ case OpCvt32to64F:
+ v.Op = OpS390XCDFBRA
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpS390XCFDBRA
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpS390XLEDBR
+ return true
+ case OpCvt64Fto32U:
+ v.Op = OpS390XCLFDBR
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpS390XCGDBRA
+ return true
+ case OpCvt64Fto64U:
+ v.Op = OpS390XCLGDBR
+ return true
+ case OpCvt64Uto32F:
+ v.Op = OpS390XCELGBR
+ return true
+ case OpCvt64Uto64F:
+ v.Op = OpS390XCDLGBR
+ return true
+ case OpCvt64to32F:
+ v.Op = OpS390XCEGBRA
+ return true
+ case OpCvt64to64F:
+ v.Op = OpS390XCDGBRA
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueS390X_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueS390X_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueS390X_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpS390XFDIVS
+ return true
+ case OpDiv32u:
+ return rewriteValueS390X_OpDiv32u(v)
+ case OpDiv64:
+ return rewriteValueS390X_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpS390XFDIV
+ return true
+ case OpDiv64u:
+ v.Op = OpS390XDIVDU
+ return true
+ case OpDiv8:
+ return rewriteValueS390X_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueS390X_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueS390X_OpEq16(v)
+ case OpEq32:
+ return rewriteValueS390X_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueS390X_OpEq32F(v)
+ case OpEq64:
+ return rewriteValueS390X_OpEq64(v)
+ case OpEq64F:
+ return rewriteValueS390X_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueS390X_OpEq8(v)
+ case OpEqB:
+ return rewriteValueS390X_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueS390X_OpEqPtr(v)
+ case OpFMA:
+ return rewriteValueS390X_OpFMA(v)
+ case OpFloor:
+ return rewriteValueS390X_OpFloor(v)
+ case OpGetCallerPC:
+ v.Op = OpS390XLoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpS390XLoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpS390XLoweredGetClosurePtr
+ return true
+ case OpGetG:
+ v.Op = OpS390XLoweredGetG
+ return true
+ case OpHmul32:
+ return rewriteValueS390X_OpHmul32(v)
+ case OpHmul32u:
+ return rewriteValueS390X_OpHmul32u(v)
+ case OpHmul64:
+ v.Op = OpS390XMULHD
+ return true
+ case OpHmul64u:
+ v.Op = OpS390XMULHDU
+ return true
+ case OpITab:
+ return rewriteValueS390X_OpITab(v)
+ case OpInterCall:
+ v.Op = OpS390XCALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueS390X_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueS390X_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueS390X_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueS390X_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueS390X_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueS390X_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueS390X_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueS390X_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValueS390X_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValueS390X_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValueS390X_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValueS390X_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueS390X_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueS390X_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueS390X_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueS390X_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueS390X_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueS390X_OpLess32U(v)
+ case OpLess64:
+ return rewriteValueS390X_OpLess64(v)
+ case OpLess64F:
+ return rewriteValueS390X_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValueS390X_OpLess64U(v)
+ case OpLess8:
+ return rewriteValueS390X_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueS390X_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueS390X_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueS390X_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueS390X_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueS390X_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueS390X_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueS390X_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueS390X_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueS390X_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueS390X_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueS390X_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueS390X_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueS390X_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueS390X_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueS390X_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueS390X_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueS390X_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueS390X_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueS390X_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueS390X_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueS390X_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueS390X_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueS390X_OpMod32u(v)
+ case OpMod64:
+ return rewriteValueS390X_OpMod64(v)
+ case OpMod64u:
+ v.Op = OpS390XMODDU
+ return true
+ case OpMod8:
+ return rewriteValueS390X_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueS390X_OpMod8u(v)
+ case OpMove:
+ return rewriteValueS390X_OpMove(v)
+ case OpMul16:
+ v.Op = OpS390XMULLW
+ return true
+ case OpMul32:
+ v.Op = OpS390XMULLW
+ return true
+ case OpMul32F:
+ v.Op = OpS390XFMULS
+ return true
+ case OpMul64:
+ v.Op = OpS390XMULLD
+ return true
+ case OpMul64F:
+ v.Op = OpS390XFMUL
+ return true
+ case OpMul64uhilo:
+ v.Op = OpS390XMLGR
+ return true
+ case OpMul8:
+ v.Op = OpS390XMULLW
+ return true
+ case OpNeg16:
+ v.Op = OpS390XNEGW
+ return true
+ case OpNeg32:
+ v.Op = OpS390XNEGW
+ return true
+ case OpNeg32F:
+ v.Op = OpS390XFNEGS
+ return true
+ case OpNeg64:
+ v.Op = OpS390XNEG
+ return true
+ case OpNeg64F:
+ v.Op = OpS390XFNEG
+ return true
+ case OpNeg8:
+ v.Op = OpS390XNEGW
+ return true
+ case OpNeq16:
+ return rewriteValueS390X_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueS390X_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueS390X_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValueS390X_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValueS390X_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueS390X_OpNeq8(v)
+ case OpNeqB:
+ return rewriteValueS390X_OpNeqB(v)
+ case OpNeqPtr:
+ return rewriteValueS390X_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpS390XLoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueS390X_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueS390X_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpS390XORW
+ return true
+ case OpOr32:
+ v.Op = OpS390XORW
+ return true
+ case OpOr64:
+ v.Op = OpS390XOR
+ return true
+ case OpOr8:
+ v.Op = OpS390XORW
+ return true
+ case OpOrB:
+ v.Op = OpS390XORW
+ return true
+ case OpPanicBounds:
+ return rewriteValueS390X_OpPanicBounds(v)
+ case OpPopCount16:
+ return rewriteValueS390X_OpPopCount16(v)
+ case OpPopCount32:
+ return rewriteValueS390X_OpPopCount32(v)
+ case OpPopCount64:
+ return rewriteValueS390X_OpPopCount64(v)
+ case OpPopCount8:
+ return rewriteValueS390X_OpPopCount8(v)
+ case OpRotateLeft16:
+ return rewriteValueS390X_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ v.Op = OpS390XRLL
+ return true
+ case OpRotateLeft64:
+ v.Op = OpS390XRLLG
+ return true
+ case OpRotateLeft8:
+ return rewriteValueS390X_OpRotateLeft8(v)
+ case OpRound:
+ return rewriteValueS390X_OpRound(v)
+ case OpRound32F:
+ v.Op = OpS390XLoweredRound32F
+ return true
+ case OpRound64F:
+ v.Op = OpS390XLoweredRound64F
+ return true
+ case OpRoundToEven:
+ return rewriteValueS390X_OpRoundToEven(v)
+ case OpRsh16Ux16:
+ return rewriteValueS390X_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueS390X_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueS390X_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueS390X_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueS390X_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueS390X_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueS390X_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueS390X_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueS390X_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueS390X_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueS390X_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueS390X_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueS390X_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueS390X_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueS390X_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueS390X_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueS390X_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueS390X_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueS390X_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueS390X_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueS390X_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueS390X_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueS390X_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueS390X_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueS390X_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueS390X_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueS390X_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueS390X_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueS390X_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueS390X_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueS390X_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueS390X_OpRsh8x8(v)
+ case OpS390XADD:
+ return rewriteValueS390X_OpS390XADD(v)
+ case OpS390XADDC:
+ return rewriteValueS390X_OpS390XADDC(v)
+ case OpS390XADDE:
+ return rewriteValueS390X_OpS390XADDE(v)
+ case OpS390XADDW:
+ return rewriteValueS390X_OpS390XADDW(v)
+ case OpS390XADDWconst:
+ return rewriteValueS390X_OpS390XADDWconst(v)
+ case OpS390XADDWload:
+ return rewriteValueS390X_OpS390XADDWload(v)
+ case OpS390XADDconst:
+ return rewriteValueS390X_OpS390XADDconst(v)
+ case OpS390XADDload:
+ return rewriteValueS390X_OpS390XADDload(v)
+ case OpS390XAND:
+ return rewriteValueS390X_OpS390XAND(v)
+ case OpS390XANDW:
+ return rewriteValueS390X_OpS390XANDW(v)
+ case OpS390XANDWconst:
+ return rewriteValueS390X_OpS390XANDWconst(v)
+ case OpS390XANDWload:
+ return rewriteValueS390X_OpS390XANDWload(v)
+ case OpS390XANDconst:
+ return rewriteValueS390X_OpS390XANDconst(v)
+ case OpS390XANDload:
+ return rewriteValueS390X_OpS390XANDload(v)
+ case OpS390XCMP:
+ return rewriteValueS390X_OpS390XCMP(v)
+ case OpS390XCMPU:
+ return rewriteValueS390X_OpS390XCMPU(v)
+ case OpS390XCMPUconst:
+ return rewriteValueS390X_OpS390XCMPUconst(v)
+ case OpS390XCMPW:
+ return rewriteValueS390X_OpS390XCMPW(v)
+ case OpS390XCMPWU:
+ return rewriteValueS390X_OpS390XCMPWU(v)
+ case OpS390XCMPWUconst:
+ return rewriteValueS390X_OpS390XCMPWUconst(v)
+ case OpS390XCMPWconst:
+ return rewriteValueS390X_OpS390XCMPWconst(v)
+ case OpS390XCMPconst:
+ return rewriteValueS390X_OpS390XCMPconst(v)
+ case OpS390XCPSDR:
+ return rewriteValueS390X_OpS390XCPSDR(v)
+ case OpS390XFCMP:
+ return rewriteValueS390X_OpS390XFCMP(v)
+ case OpS390XFCMPS:
+ return rewriteValueS390X_OpS390XFCMPS(v)
+ case OpS390XFMOVDload:
+ return rewriteValueS390X_OpS390XFMOVDload(v)
+ case OpS390XFMOVDstore:
+ return rewriteValueS390X_OpS390XFMOVDstore(v)
+ case OpS390XFMOVSload:
+ return rewriteValueS390X_OpS390XFMOVSload(v)
+ case OpS390XFMOVSstore:
+ return rewriteValueS390X_OpS390XFMOVSstore(v)
+ case OpS390XFNEG:
+ return rewriteValueS390X_OpS390XFNEG(v)
+ case OpS390XFNEGS:
+ return rewriteValueS390X_OpS390XFNEGS(v)
+ case OpS390XLDGR:
+ return rewriteValueS390X_OpS390XLDGR(v)
+ case OpS390XLEDBR:
+ return rewriteValueS390X_OpS390XLEDBR(v)
+ case OpS390XLGDR:
+ return rewriteValueS390X_OpS390XLGDR(v)
+ case OpS390XLOCGR:
+ return rewriteValueS390X_OpS390XLOCGR(v)
+ case OpS390XLTDBR:
+ return rewriteValueS390X_OpS390XLTDBR(v)
+ case OpS390XLTEBR:
+ return rewriteValueS390X_OpS390XLTEBR(v)
+ case OpS390XLoweredRound32F:
+ return rewriteValueS390X_OpS390XLoweredRound32F(v)
+ case OpS390XLoweredRound64F:
+ return rewriteValueS390X_OpS390XLoweredRound64F(v)
+ case OpS390XMOVBZload:
+ return rewriteValueS390X_OpS390XMOVBZload(v)
+ case OpS390XMOVBZreg:
+ return rewriteValueS390X_OpS390XMOVBZreg(v)
+ case OpS390XMOVBload:
+ return rewriteValueS390X_OpS390XMOVBload(v)
+ case OpS390XMOVBreg:
+ return rewriteValueS390X_OpS390XMOVBreg(v)
+ case OpS390XMOVBstore:
+ return rewriteValueS390X_OpS390XMOVBstore(v)
+ case OpS390XMOVBstoreconst:
+ return rewriteValueS390X_OpS390XMOVBstoreconst(v)
+ case OpS390XMOVDBR:
+ return rewriteValueS390X_OpS390XMOVDBR(v)
+ case OpS390XMOVDaddridx:
+ return rewriteValueS390X_OpS390XMOVDaddridx(v)
+ case OpS390XMOVDload:
+ return rewriteValueS390X_OpS390XMOVDload(v)
+ case OpS390XMOVDstore:
+ return rewriteValueS390X_OpS390XMOVDstore(v)
+ case OpS390XMOVDstoreconst:
+ return rewriteValueS390X_OpS390XMOVDstoreconst(v)
+ case OpS390XMOVDstoreidx:
+ return rewriteValueS390X_OpS390XMOVDstoreidx(v)
+ case OpS390XMOVHZload:
+ return rewriteValueS390X_OpS390XMOVHZload(v)
+ case OpS390XMOVHZreg:
+ return rewriteValueS390X_OpS390XMOVHZreg(v)
+ case OpS390XMOVHload:
+ return rewriteValueS390X_OpS390XMOVHload(v)
+ case OpS390XMOVHreg:
+ return rewriteValueS390X_OpS390XMOVHreg(v)
+ case OpS390XMOVHstore:
+ return rewriteValueS390X_OpS390XMOVHstore(v)
+ case OpS390XMOVHstoreconst:
+ return rewriteValueS390X_OpS390XMOVHstoreconst(v)
+ case OpS390XMOVHstoreidx:
+ return rewriteValueS390X_OpS390XMOVHstoreidx(v)
+ case OpS390XMOVWBR:
+ return rewriteValueS390X_OpS390XMOVWBR(v)
+ case OpS390XMOVWZload:
+ return rewriteValueS390X_OpS390XMOVWZload(v)
+ case OpS390XMOVWZreg:
+ return rewriteValueS390X_OpS390XMOVWZreg(v)
+ case OpS390XMOVWload:
+ return rewriteValueS390X_OpS390XMOVWload(v)
+ case OpS390XMOVWreg:
+ return rewriteValueS390X_OpS390XMOVWreg(v)
+ case OpS390XMOVWstore:
+ return rewriteValueS390X_OpS390XMOVWstore(v)
+ case OpS390XMOVWstoreconst:
+ return rewriteValueS390X_OpS390XMOVWstoreconst(v)
+ case OpS390XMOVWstoreidx:
+ return rewriteValueS390X_OpS390XMOVWstoreidx(v)
+ case OpS390XMULLD:
+ return rewriteValueS390X_OpS390XMULLD(v)
+ case OpS390XMULLDconst:
+ return rewriteValueS390X_OpS390XMULLDconst(v)
+ case OpS390XMULLDload:
+ return rewriteValueS390X_OpS390XMULLDload(v)
+ case OpS390XMULLW:
+ return rewriteValueS390X_OpS390XMULLW(v)
+ case OpS390XMULLWconst:
+ return rewriteValueS390X_OpS390XMULLWconst(v)
+ case OpS390XMULLWload:
+ return rewriteValueS390X_OpS390XMULLWload(v)
+ case OpS390XNEG:
+ return rewriteValueS390X_OpS390XNEG(v)
+ case OpS390XNEGW:
+ return rewriteValueS390X_OpS390XNEGW(v)
+ case OpS390XNOT:
+ return rewriteValueS390X_OpS390XNOT(v)
+ case OpS390XNOTW:
+ return rewriteValueS390X_OpS390XNOTW(v)
+ case OpS390XOR:
+ return rewriteValueS390X_OpS390XOR(v)
+ case OpS390XORW:
+ return rewriteValueS390X_OpS390XORW(v)
+ case OpS390XORWconst:
+ return rewriteValueS390X_OpS390XORWconst(v)
+ case OpS390XORWload:
+ return rewriteValueS390X_OpS390XORWload(v)
+ case OpS390XORconst:
+ return rewriteValueS390X_OpS390XORconst(v)
+ case OpS390XORload:
+ return rewriteValueS390X_OpS390XORload(v)
+ case OpS390XRISBGZ:
+ return rewriteValueS390X_OpS390XRISBGZ(v)
+ case OpS390XRLL:
+ return rewriteValueS390X_OpS390XRLL(v)
+ case OpS390XRLLG:
+ return rewriteValueS390X_OpS390XRLLG(v)
+ case OpS390XSLD:
+ return rewriteValueS390X_OpS390XSLD(v)
+ case OpS390XSLDconst:
+ return rewriteValueS390X_OpS390XSLDconst(v)
+ case OpS390XSLW:
+ return rewriteValueS390X_OpS390XSLW(v)
+ case OpS390XSLWconst:
+ return rewriteValueS390X_OpS390XSLWconst(v)
+ case OpS390XSRAD:
+ return rewriteValueS390X_OpS390XSRAD(v)
+ case OpS390XSRADconst:
+ return rewriteValueS390X_OpS390XSRADconst(v)
+ case OpS390XSRAW:
+ return rewriteValueS390X_OpS390XSRAW(v)
+ case OpS390XSRAWconst:
+ return rewriteValueS390X_OpS390XSRAWconst(v)
+ case OpS390XSRD:
+ return rewriteValueS390X_OpS390XSRD(v)
+ case OpS390XSRDconst:
+ return rewriteValueS390X_OpS390XSRDconst(v)
+ case OpS390XSRW:
+ return rewriteValueS390X_OpS390XSRW(v)
+ case OpS390XSRWconst:
+ return rewriteValueS390X_OpS390XSRWconst(v)
+ case OpS390XSTM2:
+ return rewriteValueS390X_OpS390XSTM2(v)
+ case OpS390XSTMG2:
+ return rewriteValueS390X_OpS390XSTMG2(v)
+ case OpS390XSUB:
+ return rewriteValueS390X_OpS390XSUB(v)
+ case OpS390XSUBE:
+ return rewriteValueS390X_OpS390XSUBE(v)
+ case OpS390XSUBW:
+ return rewriteValueS390X_OpS390XSUBW(v)
+ case OpS390XSUBWconst:
+ return rewriteValueS390X_OpS390XSUBWconst(v)
+ case OpS390XSUBWload:
+ return rewriteValueS390X_OpS390XSUBWload(v)
+ case OpS390XSUBconst:
+ return rewriteValueS390X_OpS390XSUBconst(v)
+ case OpS390XSUBload:
+ return rewriteValueS390X_OpS390XSUBload(v)
+ case OpS390XSumBytes2:
+ return rewriteValueS390X_OpS390XSumBytes2(v)
+ case OpS390XSumBytes4:
+ return rewriteValueS390X_OpS390XSumBytes4(v)
+ case OpS390XSumBytes8:
+ return rewriteValueS390X_OpS390XSumBytes8(v)
+ case OpS390XXOR:
+ return rewriteValueS390X_OpS390XXOR(v)
+ case OpS390XXORW:
+ return rewriteValueS390X_OpS390XXORW(v)
+ case OpS390XXORWconst:
+ return rewriteValueS390X_OpS390XXORWconst(v)
+ case OpS390XXORWload:
+ return rewriteValueS390X_OpS390XXORWload(v)
+ case OpS390XXORconst:
+ return rewriteValueS390X_OpS390XXORconst(v)
+ case OpS390XXORload:
+ return rewriteValueS390X_OpS390XXORload(v)
+ case OpSelect0:
+ return rewriteValueS390X_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueS390X_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = OpS390XMOVHreg
+ return true
+ case OpSignExt16to64:
+ v.Op = OpS390XMOVHreg
+ return true
+ case OpSignExt32to64:
+ v.Op = OpS390XMOVWreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpS390XMOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpS390XMOVBreg
+ return true
+ case OpSignExt8to64:
+ v.Op = OpS390XMOVBreg
+ return true
+ case OpSlicemask:
+ return rewriteValueS390X_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpS390XFSQRT
+ return true
+ case OpSqrt32:
+ v.Op = OpS390XFSQRTS
+ return true
+ case OpStaticCall:
+ v.Op = OpS390XCALLstatic
+ return true
+ case OpStore:
+ return rewriteValueS390X_OpStore(v)
+ case OpSub16:
+ v.Op = OpS390XSUBW
+ return true
+ case OpSub32:
+ v.Op = OpS390XSUBW
+ return true
+ case OpSub32F:
+ return rewriteValueS390X_OpSub32F(v)
+ case OpSub64:
+ v.Op = OpS390XSUB
+ return true
+ case OpSub64F:
+ return rewriteValueS390X_OpSub64F(v)
+ case OpSub8:
+ v.Op = OpS390XSUBW
+ return true
+ case OpSubPtr:
+ v.Op = OpS390XSUB
+ return true
+ case OpTailCall:
+ v.Op = OpS390XCALLtail
+ return true
+ case OpTrunc:
+ return rewriteValueS390X_OpTrunc(v)
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpS390XLoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpS390XXORW
+ return true
+ case OpXor32:
+ v.Op = OpS390XXORW
+ return true
+ case OpXor64:
+ v.Op = OpS390XXOR
+ return true
+ case OpXor8:
+ v.Op = OpS390XXORW
+ return true
+ case OpZero:
+ return rewriteValueS390X_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpS390XMOVHZreg
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpS390XMOVHZreg
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpS390XMOVWZreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpS390XMOVBZreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpS390XMOVBZreg
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpS390XMOVBZreg
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpAdd32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Add32F x y)
+ // result: (Select0 (FADDS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpS390XFADDS, types.NewTuple(typ.Float32, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAdd64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Add64F x y)
+ // result: (Select0 (FADD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpS390XFADD, types.NewTuple(typ.Float64, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVDaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpS390XMOVDaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicAdd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAdd32 ptr val mem)
+ // result: (AddTupleFirst32 val (LAA ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XAddTupleFirst32)
+ v0 := b.NewValue0(v.Pos, OpS390XLAA, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg2(val, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicAdd64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAdd64 ptr val mem)
+ // result: (AddTupleFirst64 val (LAAG ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XAddTupleFirst64)
+ v0 := b.NewValue0(v.Pos, OpS390XLAAG, types.NewTuple(typ.UInt64, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg2(val, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicAnd8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd8 ptr val mem)
+ // result: (LANfloor ptr (RLL <typ.UInt32> (ORWconst <typ.UInt32> val [-1<<8]) (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr)) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XLANfloor)
+ v0 := b.NewValue0(v.Pos, OpS390XRLL, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpS390XORWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(-1 << 8)
+ v1.AddArg(val)
+ v2 := b.NewValue0(v.Pos, OpS390XRXSBG, typ.UInt32)
+ v2.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(59, 60, 3))
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(3 << 3)
+ v2.AddArg2(v3, ptr)
+ v0.AddArg2(v1, v2)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicCompareAndSwap32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap32 ptr old new_ mem)
+ // result: (LoweredAtomicCas32 ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpS390XLoweredAtomicCas32)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicCompareAndSwap64(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap64 ptr old new_ mem)
+ // result: (LoweredAtomicCas64 ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpS390XLoweredAtomicCas64)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicExchange32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicExchange32 ptr val mem)
+ // result: (LoweredAtomicExchange32 ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XLoweredAtomicExchange32)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicExchange64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicExchange64 ptr val mem)
+ // result: (LoweredAtomicExchange64 ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XLoweredAtomicExchange64)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicLoad32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad32 ptr mem)
+ // result: (MOVWZatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVWZatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicLoad64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad64 ptr mem)
+ // result: (MOVDatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVDatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicLoad8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad8 ptr mem)
+ // result: (MOVBZatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVBZatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicLoadAcq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadAcq32 ptr mem)
+ // result: (MOVWZatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVWZatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicLoadPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadPtr ptr mem)
+ // result: (MOVDatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVDatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicOr8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr8 ptr val mem)
+ // result: (LAOfloor ptr (SLW <typ.UInt32> (MOVBZreg <typ.UInt32> val) (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr)) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XLAOfloor)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt32)
+ v1.AddArg(val)
+ v2 := b.NewValue0(v.Pos, OpS390XRXSBG, typ.UInt32)
+ v2.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(59, 60, 3))
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(3 << 3)
+ v2.AddArg2(v3, ptr)
+ v0.AddArg2(v1, v2)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicStore32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AtomicStore32 ptr val mem)
+ // result: (SYNC (MOVWatomicstore ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XSYNC)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWatomicstore, types.TypeMem)
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicStore64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AtomicStore64 ptr val mem)
+ // result: (SYNC (MOVDatomicstore ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XSYNC)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDatomicstore, types.TypeMem)
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicStore8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AtomicStore8 ptr val mem)
+ // result: (SYNC (MOVBatomicstore ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XSYNC)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBatomicstore, types.TypeMem)
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicStorePtrNoWB(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AtomicStorePtrNoWB ptr val mem)
+ // result: (SYNC (MOVDatomicstore ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XSYNC)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDatomicstore, types.TypeMem)
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicStoreRel32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStoreRel32 ptr val mem)
+ // result: (MOVWatomicstore ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XMOVWatomicstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAvg64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v.Pos, OpS390XSRDconst, t)
+ v0.AuxInt = uint8ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpS390XSUB, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 x)
+ // result: (SUB (MOVDconst [64]) (FLOGR x))
+ for {
+ x := v_0
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpBswap16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Bswap16 x:(MOVHZload [off] {sym} ptr mem))
+ // result: @x.Block (MOVHZreg (MOVHBRload [off] {sym} ptr mem))
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHZload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x.Pos, OpS390XMOVHBRload, typ.UInt16)
+ v1.AuxInt = int32ToAuxInt(off)
+ v1.Aux = symToAux(sym)
+ v1.AddArg2(ptr, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (Bswap16 x:(MOVHZloadidx [off] {sym} ptr idx mem))
+ // result: @x.Block (MOVHZreg (MOVHBRloadidx [off] {sym} ptr idx mem))
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHZloadidx {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[2]
+ ptr := x.Args[0]
+ idx := x.Args[1]
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, typ.Int16)
+ v1.AuxInt = int32ToAuxInt(off)
+ v1.Aux = symToAux(sym)
+ v1.AddArg3(ptr, idx, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpCeil(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Ceil x)
+ // result: (FIDBR [6] x)
+ for {
+ x := v_0
+ v.reset(OpS390XFIDBR)
+ v.AuxInt = int8ToAuxInt(6)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueS390X_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueS390X_OpConst64(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt64(v.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueS390X_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueS390X_OpConstBool(v *Value) bool {
+ // match: (ConstBool [t])
+ // result: (MOVDconst [b2i(t)])
+ for {
+ t := auxIntToBool(v.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(t))
+ return true
+ }
+}
+func rewriteValueS390X_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueS390X_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz32 <t> x)
+ // result: (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW <t> (SUBWconst <t> [1] x) (NOTW <t> x)))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XANDW, t)
+ v4 := b.NewValue0(v.Pos, OpS390XSUBWconst, t)
+ v4.AuxInt = int32ToAuxInt(1)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpS390XNOTW, t)
+ v5.AddArg(x)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz64 <t> x)
+ // result: (SUB (MOVDconst [64]) (FLOGR (AND <t> (SUBconst <t> [1] x) (NOT <t> x))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XAND, t)
+ v3 := b.NewValue0(v.Pos, OpS390XSUBconst, t)
+ v3.AuxInt = int32ToAuxInt(1)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XNOT, t)
+ v4.AddArg(x)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y)
+ // result: (DIVW (MOVHreg x) (MOVHreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (DIVWU (MOVHZreg x) (MOVHZreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 x y)
+ // result: (DIVW (MOVWreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (DIVWU (MOVWZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 x y)
+ // result: (DIVD x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVD)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVW (MOVBreg x) (MOVBreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (DIVWU (MOVBZreg x) (MOVBZreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32F x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq64 x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq64F x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqPtr x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpFMA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMA x y z)
+ // result: (FMADD z x y)
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ v.reset(OpS390XFMADD)
+ v.AddArg3(z, x, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpFloor(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Floor x)
+ // result: (FIDBR [7] x)
+ for {
+ x := v_0
+ v.reset(OpS390XFIDBR)
+ v.AuxInt = int8ToAuxInt(7)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpHmul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = uint8ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpS390XMULLD, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpHmul32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = uint8ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpS390XMULLD, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpITab(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ITab (Load ptr mem))
+ // result: (MOVDload ptr mem)
+ for {
+ if v_0.Op != OpLoad {
+ break
+ }
+ mem := v_0.Args[1]
+ ptr := v_0.Args[0]
+ v.reset(OpS390XMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsInBounds idx len)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v2.AddArg2(idx, len)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsNonNil p)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
+ for {
+ p := v_0
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg(p)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsSliceInBounds idx len)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v2.AddArg2(idx, len)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVHZreg x) (MOVHZreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32F x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64 x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64F x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVBZreg x) (MOVBZreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVHZreg x) (MOVHZreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32 x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32F x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64 x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64F x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64U x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVBZreg x) (MOVBZreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpS390XMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t) && t.IsSigned()
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpS390XMOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t) && !t.IsSigned()
+ // result: (MOVWZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpS390XMOVWZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t) && t.IsSigned()
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpS390XMOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t) && !t.IsSigned()
+ // result: (MOVHZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpS390XMOVHZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is8BitInt(t) && t.IsSigned()
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && t.IsSigned()) {
+ break
+ }
+ v.reset(OpS390XMOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || (is8BitInt(t) && !t.IsSigned()))
+ // result: (MOVBZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean() || (is8BitInt(t) && !t.IsSigned())) {
+ break
+ }
+ v.reset(OpS390XMOVBZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpS390XFMOVSload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpS390XFMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpLocalAddr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (LocalAddr <t> {sym} base mem)
+ // cond: t.Elem().HasPointers()
+ // result: (MOVDaddr {sym} (SPanchored base mem))
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ mem := v_1
+ if !(t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
+ v0.AddArg2(base, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LocalAddr <t> {sym} base _)
+ // cond: !t.Elem().HasPointers()
+ // result: (MOVDaddr {sym} base)
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ if !(!t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLD <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLD <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLD <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLD <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (MODW (MOVHreg x) (MOVHreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (MODWU (MOVHZreg x) (MOVHZreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 x y)
+ // result: (MODW (MOVWreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (MODWU (MOVWZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod64 x y)
+ // result: (MODD x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODD)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (MODW (MOVBreg x) (MOVBreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (MODWU (MOVBZreg x) (MOVBZreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVHstore dst (MOVHZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVHstore)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVWstore dst (MOVWZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVWstore)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (MOVDstore dst (MOVDload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVDstore)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [16] dst src mem)
+ // result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [24] dst src mem)
+ // result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVHstore dst (MOVHZload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (MOVBstore [4] dst (MOVBZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (MOVBstore [6] dst (MOVBZload [6] src mem) (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 0 && s <= 256 && logLargeCopy(v, s)
+ // result: (MVC [makeValAndOff(int32(s), 0)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 0 && s <= 256 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpS390XMVC)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s), 0))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 256 && s <= 512 && logLargeCopy(v, s)
+ // result: (MVC [makeValAndOff(int32(s)-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 256 && s <= 512 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpS390XMVC)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s)-256, 256))
+ v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 0))
+ v0.AddArg3(dst, src, mem)
+ v.AddArg3(dst, src, v0)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 512 && s <= 768 && logLargeCopy(v, s)
+ // result: (MVC [makeValAndOff(int32(s)-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 512 && s <= 768 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpS390XMVC)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s)-512, 512))
+ v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 256))
+ v1 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 0))
+ v1.AddArg3(dst, src, mem)
+ v0.AddArg3(dst, src, v1)
+ v.AddArg3(dst, src, v0)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 768 && s <= 1024 && logLargeCopy(v, s)
+ // result: (MVC [makeValAndOff(int32(s)-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 768 && s <= 1024 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpS390XMVC)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s)-768, 768))
+ v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 512))
+ v1 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 256))
+ v2 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v2.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 0))
+ v2.AddArg3(dst, src, mem)
+ v1.AddArg3(dst, src, v2)
+ v0.AddArg3(dst, src, v1)
+ v.AddArg3(dst, src, v0)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 1024 && logLargeCopy(v, s)
+ // result: (LoweredMove [s%256] dst src (ADD <src.Type> src (MOVDconst [(s/256)*256])) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 1024 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpS390XLoweredMove)
+ v.AuxInt = int64ToAuxInt(s % 256)
+ v0 := b.NewValue0(v.Pos, OpS390XADD, src.Type)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt((s / 256) * 256)
+ v0.AddArg2(src, v1)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32F x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64 x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64F x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqB x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqPtr x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORWconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpS390XXORWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OffPtr [off] ptr:(SP))
+ // result: (MOVDaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond: is32Bit(off)
+ // result: (ADDconst [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADD (MOVDconst [off]) ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(off)
+ v.AddArg2(v0, ptr)
+ return true
+ }
+}
+func rewriteValueS390X_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpS390XLoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpS390XLoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpS390XLoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpPopCount16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount16 x)
+ // result: (MOVBZreg (SumBytes2 (POPCNT <typ.UInt16> x)))
+ for {
+ x := v_0
+ v.reset(OpS390XMOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XSumBytes2, typ.UInt8)
+ v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt16)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpPopCount32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount32 x)
+ // result: (MOVBZreg (SumBytes4 (POPCNT <typ.UInt32> x)))
+ for {
+ x := v_0
+ v.reset(OpS390XMOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XSumBytes4, typ.UInt8)
+ v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpPopCount64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount64 x)
+ // result: (MOVBZreg (SumBytes8 (POPCNT <typ.UInt64> x)))
+ for {
+ x := v_0
+ v.reset(OpS390XMOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XSumBytes8, typ.UInt8)
+ v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpPopCount8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount8 x)
+ // result: (POPCNT (MOVBZreg x))
+ for {
+ x := v_0
+ v.reset(OpS390XPOPCNT)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVDconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVDconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpRound(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Round x)
+ // result: (FIDBR [1] x)
+ for {
+ x := v_0
+ v.reset(OpS390XFIDBR)
+ v.AuxInt = int8ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpRoundToEven(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RoundToEven x)
+ // result: (FIDBR [4] x)
+ for {
+ x := v_0
+ v.reset(OpS390XFIDBR)
+ v.AuxInt = int8ToAuxInt(4)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x16 x y)
+ // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x32 x y)
+ // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x8 x y)
+ // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x16 x y)
+ // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x32 x y)
+ // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x64 x y)
+ // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x8 x y)
+ // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRD <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRD <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRD <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRD <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x16 x y)
+ // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAD)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x32 x y)
+ // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAD)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAD)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x8 x y)
+ // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAD)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x16 x y)
+ // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x32 x y)
+ // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x8 x y)
+ // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADD x (MOVDconst <t> [c]))
+ // cond: is32Bit(c) && !t.IsPtr()
+ // result: (ADDconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c) && !t.IsPtr()) {
+ continue
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD idx (MOVDaddr [c] {s} ptr))
+ // cond: ptr.Op != OpSB
+ // result: (MOVDaddridx [c] {s} ptr idx)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ idx := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ s := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ if !(ptr.Op != OpSB) {
+ continue
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, idx)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (NEG y))
+ // result: (SUB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XNEG {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSUB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ADDload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XADDload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDC x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (ADDCconst x [int16(c)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ continue
+ }
+ v.reset(OpS390XADDCconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDE x y (FlagEQ))
+ // result: (ADDC x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpS390XFlagEQ {
+ break
+ }
+ v.reset(OpS390XADDC)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDE x y (FlagLT))
+ // result: (ADDC x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpS390XFlagLT {
+ break
+ }
+ v.reset(OpS390XADDC)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDE x y (Select1 (ADDCconst [-1] (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) c)))))
+ // result: (ADDE x y c)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpS390XADDCconst || auxIntToInt16(v_2_0.AuxInt) != -1 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpSelect0 {
+ break
+ }
+ v_2_0_0_0 := v_2_0_0.Args[0]
+ if v_2_0_0_0.Op != OpS390XADDE {
+ break
+ }
+ c := v_2_0_0_0.Args[2]
+ v_2_0_0_0_0 := v_2_0_0_0.Args[0]
+ if v_2_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_2_0_0_0_1 := v_2_0_0_0.Args[1]
+ if v_2_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_0_0_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpS390XADDE)
+ v.AddArg3(x, y, c)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDW x (MOVDconst [c]))
+ // result: (ADDWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XADDWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDW x (NEGW y))
+ // result: (SUBW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XNEGW {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSUBW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ADDWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XADDWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ADDW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ADDWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XADDWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDWconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) + d)
+ return true
+ }
+ // match: (ADDWconst [c] (ADDWconst [d] x))
+ // result: (ADDWconst [int32(c+d)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XADDWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XADDWconst)
+ v.AuxInt = int32ToAuxInt(int32(c + d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ADDWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XADDWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ADDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XADDWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDconst [c] (MOVDaddr [d] {s} x:(SB)))
+ // cond: ((c+d)&1 == 0) && is32Bit(int64(c)+int64(d))
+ // result: (MOVDaddr [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if x.Op != OpSB || !(((c+d)&1 == 0) && is32Bit(int64(c)+int64(d))) {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDaddr [d] {s} x))
+ // cond: x.Op != OpSB && is20Bit(int64(c)+int64(d))
+ // result: (MOVDaddr [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(x.Op != OpSB && is20Bit(int64(c)+int64(d))) {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDaddridx [d] {s} x y))
+ // cond: is20Bit(int64(c)+int64(d))
+ // result: (MOVDaddridx [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is20Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) + d)
+ return true
+ }
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (ADDconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (ADD x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (ADDload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ADDload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XADDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ADDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XADDload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XAND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AND x (MOVDconst [c]))
+ // cond: s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)) != nil
+ // result: (RISBGZ x {*s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c))})
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)) != nil) {
+ continue
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MOVDconst [c]))
+ // cond: is32Bit(c) && c < 0
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c) && c < 0) {
+ continue
+ }
+ v.reset(OpS390XANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MOVDconst [c]))
+ // cond: is32Bit(c) && c >= 0
+ // result: (MOVWZreg (ANDWconst <typ.UInt32> [int32(c)] x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c) && c >= 0) {
+ continue
+ }
+ v.reset(OpS390XMOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (AND x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (AND <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ANDload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XANDload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDW x (MOVDconst [c]))
+ // result: (ANDWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XANDWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDW x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ANDWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XANDWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ANDW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ANDWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XANDWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDWconst [c] (ANDWconst [d] x))
+ // result: (ANDWconst [c&d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XANDWconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDWconst [0x00ff] x)
+ // result: (MOVBZreg x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0x00ff {
+ break
+ }
+ x := v_0
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDWconst [0xffff] x)
+ // result: (MOVHZreg x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0xffff {
+ break
+ }
+ x := v_0
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDWconst [c] _)
+ // cond: int32(c)==0
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDWconst [c] x)
+ // cond: int32(c)==-1
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == -1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) & d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ANDWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XANDWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ANDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XANDWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpS390XANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XANDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [0] _)
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c&d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (AND x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XAND)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (ANDload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ANDload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XANDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ANDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XANDload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMP(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMP x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (CMPconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XCMPconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMP (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (InvertFlags (CMPconst x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMP y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPU x (MOVDconst [c]))
+ // cond: isU32Bit(c)
+ // result: (CMPUconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XCMPUconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPU (MOVDconst [c]) x)
+ // cond: isU32Bit(c)
+ // result: (InvertFlags (CMPUconst x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(isU32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPU x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPU y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)==uint64(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(x) == uint64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)<uint64(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)>uint64(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPUconst (SRDconst _ [c]) [n])
+ // cond: c > 0 && c < 64 && (1<<uint(64-c)) <= uint64(n)
+ // result: (FlagLT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XSRDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ if !(c > 0 && c < 64 && (1<<uint(64-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPUconst (RISBGZ x {r}) [c])
+ // cond: r.OutMask() < uint64(uint32(c))
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ if !(r.OutMask() < uint64(uint32(c))) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPUconst (MOVWZreg x) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst x:(MOVHreg _) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVHreg {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst x:(MOVHZreg _) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVHZreg {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst x:(MOVBreg _) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVBreg {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst x:(MOVBZreg _) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVBZreg {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst (MOVWZreg x:(ANDWconst [m] _)) [c])
+ // cond: int32(m) >= 0
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(x.AuxInt)
+ if !(int32(m) >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst (MOVWreg x:(ANDWconst [m] _)) [c])
+ // cond: int32(m) >= 0
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(x.AuxInt)
+ if !(int32(m) >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPW x (MOVDconst [c]))
+ // result: (CMPWconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVDconst [c]) x)
+ // result: (InvertFlags (CMPWconst x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPW y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x (MOVWreg y))
+ // result: (CMPW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XCMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPW x (MOVWZreg y))
+ // result: (CMPW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XCMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPW (MOVWreg x) y)
+ // result: (CMPW x y)
+ for {
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpS390XCMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPW (MOVWZreg x) y)
+ // result: (CMPW x y)
+ for {
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpS390XCMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPWU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPWU x (MOVDconst [c]))
+ // result: (CMPWUconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWU (MOVDconst [c]) x)
+ // result: (InvertFlags (CMPWUconst x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPWU x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPWU y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPWU x (MOVWreg y))
+ // result: (CMPWU x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XCMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWU x (MOVWZreg y))
+ // result: (CMPWU x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XCMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWU (MOVWreg x) y)
+ // result: (CMPWU x y)
+ for {
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpS390XCMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWU (MOVWZreg x) y)
+ // result: (CMPWU x y)
+ for {
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpS390XCMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPWUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)==uint32(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint32(x) == uint32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)<uint32(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)>uint32(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPWUconst (MOVBZreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVBZreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWUconst (MOVHZreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVHZreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWUconst (SRWconst _ [c]) [n])
+ // cond: c > 0 && c < 32 && (1<<uint(32-c)) <= uint32(n)
+ // result: (FlagLT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XSRWconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ if !(c > 0 && c < 32 && (1<<uint(32-c)) <= uint32(n)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWUconst (ANDWconst _ [m]) [n])
+ // cond: uint32(m) < uint32(n)
+ // result: (FlagLT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(uint32(m) < uint32(n)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWUconst (MOVWreg x) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWUconst (MOVWZreg x) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) == int32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)<int32(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) < int32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)>int32(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) > int32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPWconst (MOVBZreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVBZreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWconst (MOVHZreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVHZreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWconst (SRWconst _ [c]) [n])
+ // cond: c > 0 && n < 0
+ // result: (FlagGT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XSRWconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ if !(c > 0 && n < 0) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPWconst (ANDWconst _ [m]) [n])
+ // cond: int32(m) >= 0 && int32(m) < int32(n)
+ // result: (FlagLT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(int32(m) >= 0 && int32(m) < int32(n)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWconst x:(SRWconst _ [c]) [n])
+ // cond: c > 0 && n >= 0
+ // result: (CMPWUconst x [n])
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XSRWconst {
+ break
+ }
+ c := auxIntToUint8(x.AuxInt)
+ if !(c > 0 && n >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(n)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWconst (MOVWreg x) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWconst (MOVWZreg x) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x==int64(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x == int64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x<int64(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x < int64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x>int64(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > int64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPconst (SRDconst _ [c]) [n])
+ // cond: c > 0 && n < 0
+ // result: (FlagGT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XSRDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ if !(c > 0 && n < 0) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPconst (RISBGZ x {r}) [c])
+ // cond: c > 0 && r.OutMask() < uint64(c)
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ if !(c > 0 && r.OutMask() < uint64(c)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPconst (MOVWreg x) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst x:(MOVHreg _) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVHreg {
+ break
+ }
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst x:(MOVHZreg _) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVHZreg {
+ break
+ }
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst x:(MOVBreg _) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVBreg {
+ break
+ }
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst x:(MOVBZreg _) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVBZreg {
+ break
+ }
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst (MOVWZreg x:(ANDWconst [m] _)) [c])
+ // cond: int32(m) >= 0 && c >= 0
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(x.AuxInt)
+ if !(int32(m) >= 0 && c >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst (MOVWreg x:(ANDWconst [m] _)) [c])
+ // cond: int32(m) >= 0 && c >= 0
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(x.AuxInt)
+ if !(int32(m) >= 0 && c >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst x:(SRDconst _ [c]) [n])
+ // cond: c > 0 && n >= 0
+ // result: (CMPUconst x [n])
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XSRDconst {
+ break
+ }
+ c := auxIntToUint8(x.AuxInt)
+ if !(c > 0 && n >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPUconst)
+ v.AuxInt = int32ToAuxInt(n)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCPSDR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CPSDR y (FMOVDconst [c]))
+ // cond: !math.Signbit(c)
+ // result: (LPDFR y)
+ for {
+ y := v_0
+ if v_1.Op != OpS390XFMOVDconst {
+ break
+ }
+ c := auxIntToFloat64(v_1.AuxInt)
+ if !(!math.Signbit(c)) {
+ break
+ }
+ v.reset(OpS390XLPDFR)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CPSDR y (FMOVDconst [c]))
+ // cond: math.Signbit(c)
+ // result: (LNDFR y)
+ for {
+ y := v_0
+ if v_1.Op != OpS390XFMOVDconst {
+ break
+ }
+ c := auxIntToFloat64(v_1.AuxInt)
+ if !(math.Signbit(c)) {
+ break
+ }
+ v.reset(OpS390XLNDFR)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFCMP(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FCMP x (FMOVDconst [0.0]))
+ // result: (LTDBR x)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XFMOVDconst || auxIntToFloat64(v_1.AuxInt) != 0.0 {
+ break
+ }
+ v.reset(OpS390XLTDBR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FCMP (FMOVDconst [0.0]) x)
+ // result: (InvertFlags (LTDBR <v.Type> x))
+ for {
+ if v_0.Op != OpS390XFMOVDconst || auxIntToFloat64(v_0.AuxInt) != 0.0 {
+ break
+ }
+ x := v_1
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XLTDBR, v.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFCMPS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FCMPS x (FMOVSconst [0.0]))
+ // result: (LTEBR x)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XFMOVSconst || auxIntToFloat32(v_1.AuxInt) != 0.0 {
+ break
+ }
+ v.reset(OpS390XLTEBR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FCMPS (FMOVSconst [0.0]) x)
+ // result: (InvertFlags (LTEBR <v.Type> x))
+ for {
+ if v_0.Op != OpS390XFMOVSconst || auxIntToFloat32(v_0.AuxInt) != 0.0 {
+ break
+ }
+ x := v_1
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XLTEBR, v.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (LDGR x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XLDGR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FMOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XFMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (FMOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XFMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XFMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSload [off] {sym} ptr1 (FMOVSstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XFMOVSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (FMOVSload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XFMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XFMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFNEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FNEG (LPDFR x))
+ // result: (LNDFR x)
+ for {
+ if v_0.Op != OpS390XLPDFR {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XLNDFR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FNEG (LNDFR x))
+ // result: (LPDFR x)
+ for {
+ if v_0.Op != OpS390XLNDFR {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XLPDFR)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFNEGS(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FNEGS (LPDFR x))
+ // result: (LNDFR x)
+ for {
+ if v_0.Op != OpS390XLPDFR {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XLNDFR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FNEGS (LNDFR x))
+ // result: (LPDFR x)
+ for {
+ if v_0.Op != OpS390XLNDFR {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XLPDFR)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLDGR(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (LDGR <t> (RISBGZ x {r}))
+ // cond: r == s390x.NewRotateParams(1, 63, 0)
+ // result: (LPDFR (LDGR <t> x))
+ for {
+ t := v.Type
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(r == s390x.NewRotateParams(1, 63, 0)) {
+ break
+ }
+ v.reset(OpS390XLPDFR)
+ v0 := b.NewValue0(v.Pos, OpS390XLDGR, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LDGR <t> (OR (MOVDconst [-1<<63]) x))
+ // result: (LNDFR (LDGR <t> x))
+ for {
+ t := v.Type
+ if v_0.Op != OpS390XOR {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0.AuxInt) != -1<<63 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpS390XLNDFR)
+ v0 := b.NewValue0(v.Pos, OpS390XLDGR, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (LDGR <t> x:(ORload <t1> [off] {sym} (MOVDconst [-1<<63]) ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (LNDFR <t> (LDGR <t> (MOVDload <t1> [off] {sym} ptr mem)))
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XORload {
+ break
+ }
+ t1 := x.Type
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[2]
+ x_0 := x.Args[0]
+ if x_0.Op != OpS390XMOVDconst || auxIntToInt64(x_0.AuxInt) != -1<<63 {
+ break
+ }
+ ptr := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XLNDFR, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x.Pos, OpS390XLDGR, t)
+ v2 := b.NewValue0(x.Pos, OpS390XMOVDload, t1)
+ v2.AuxInt = int32ToAuxInt(off)
+ v2.Aux = symToAux(sym)
+ v2.AddArg2(ptr, mem)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (LDGR (LGDR x))
+ // result: x
+ for {
+ if v_0.Op != OpS390XLGDR {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLEDBR(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LEDBR (LPDFR (LDEBR x)))
+ // result: (LPDFR x)
+ for {
+ if v_0.Op != OpS390XLPDFR {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XLDEBR {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpS390XLPDFR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEDBR (LNDFR (LDEBR x)))
+ // result: (LNDFR x)
+ for {
+ if v_0.Op != OpS390XLNDFR {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XLDEBR {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpS390XLNDFR)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLGDR(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LGDR (LDGR x))
+ // result: x
+ for {
+ if v_0.Op != OpS390XLDGR {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLOCGR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LOCGR {c} x y (InvertFlags cmp))
+ // result: (LOCGR {c.ReverseComparison()} x y cmp)
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpS390XInvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (LOCGR {c} _ x (FlagEQ))
+ // cond: c&s390x.Equal != 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_1
+ if v_2.Op != OpS390XFlagEQ || !(c&s390x.Equal != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} _ x (FlagLT))
+ // cond: c&s390x.Less != 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_1
+ if v_2.Op != OpS390XFlagLT || !(c&s390x.Less != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} _ x (FlagGT))
+ // cond: c&s390x.Greater != 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_1
+ if v_2.Op != OpS390XFlagGT || !(c&s390x.Greater != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} _ x (FlagOV))
+ // cond: c&s390x.Unordered != 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_1
+ if v_2.Op != OpS390XFlagOV || !(c&s390x.Unordered != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} x _ (FlagEQ))
+ // cond: c&s390x.Equal == 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_0
+ if v_2.Op != OpS390XFlagEQ || !(c&s390x.Equal == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} x _ (FlagLT))
+ // cond: c&s390x.Less == 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_0
+ if v_2.Op != OpS390XFlagLT || !(c&s390x.Less == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} x _ (FlagGT))
+ // cond: c&s390x.Greater == 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_0
+ if v_2.Op != OpS390XFlagGT || !(c&s390x.Greater == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} x _ (FlagOV))
+ // cond: c&s390x.Unordered == 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_0
+ if v_2.Op != OpS390XFlagOV || !(c&s390x.Unordered == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLTDBR(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (LTDBR (Select0 x:(FADD _ _)))
+ // cond: b == x.Block
+ // result: (Select1 x)
+ for {
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XFADD || !(b == x.Block) {
+ break
+ }
+ v.reset(OpSelect1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LTDBR (Select0 x:(FSUB _ _)))
+ // cond: b == x.Block
+ // result: (Select1 x)
+ for {
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XFSUB || !(b == x.Block) {
+ break
+ }
+ v.reset(OpSelect1)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLTEBR(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (LTEBR (Select0 x:(FADDS _ _)))
+ // cond: b == x.Block
+ // result: (Select1 x)
+ for {
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XFADDS || !(b == x.Block) {
+ break
+ }
+ v.reset(OpSelect1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LTEBR (Select0 x:(FSUBS _ _)))
+ // cond: b == x.Block
+ // result: (Select1 x)
+ for {
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XFSUBS || !(b == x.Block) {
+ break
+ }
+ v.reset(OpSelect1)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLoweredRound32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LoweredRound32F x:(FMOVSconst))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XFMOVSconst {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLoweredRound64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LoweredRound64F x:(FMOVDconst))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XFMOVDconst {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBZload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVBZreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVBstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVBZload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVBZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVBZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBZreg e:(MOVBreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg e:(MOVHreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg e:(MOVBZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg e:(MOVHZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg x:(MOVBZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg <t> x:(MOVBload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBZload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVBload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVBZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVBZreg x:(Arg <t>))
+ // cond: !t.IsSigned() && t.Size() == 1
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(!t.IsSigned() && t.Size() == 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64( uint8(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ // match: (MOVBZreg x:(LOCGR (MOVDconst [c]) (MOVDconst [d]) _))
+ // cond: int64(uint8(c)) == c && int64(uint8(d)) == d && (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XLOCGR {
+ break
+ }
+ _ = x.Args[1]
+ x_0 := x.Args[0]
+ if x_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(x_0.AuxInt)
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(x_1.AuxInt)
+ if !(int64(uint8(c)) == c && int64(uint8(d)) == d && (!x.Type.IsSigned() || x.Type.Size() > 1)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg (RISBGZ x {r}))
+ // cond: r.OutMerge(0x000000ff) != nil
+ // result: (RISBGZ x {*r.OutMerge(0x000000ff)})
+ for {
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(r.OutMerge(0x000000ff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.OutMerge(0x000000ff))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (ANDWconst [m] x))
+ // result: (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
+ for {
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XMOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(uint8(m)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVBreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVBstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBreg e:(MOVBreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg e:(MOVHreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg e:(MOVBZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg e:(MOVHZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBreg <t> x:(MOVBZload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVBZload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVBload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVBreg x:(Arg <t>))
+ // cond: t.IsSigned() && t.Size() == 1
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(t.IsSigned() && t.Size() == 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBreg (MOVDconst [c]))
+ // result: (MOVDconst [int64( int8(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
+ return true
+ }
+ // match: (MOVBreg (ANDWconst [m] x))
+ // cond: int8(m) >= 0
+ // result: (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
+ for {
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(int8(m) >= 0) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(uint8(m)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: is20Bit(int64(off)) && ptr.Op != OpSB
+ // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is20Bit(int64(off)) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem)
+ // cond: is20Bit(sc.Off64()+int64(off))
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(sc.Off64() + int64(off))) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
+ // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDBR(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVDBR x:(MOVDload [off] {sym} ptr mem))
+ // cond: x.Uses == 1
+ // result: @x.Block (MOVDBRload [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVDload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVDBRload, typ.UInt64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDBR x:(MOVDloadidx [off] {sym} ptr idx mem))
+ // cond: x.Uses == 1
+ // result: @x.Block (MOVDBRloadidx [off] {sym} ptr idx mem)
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVDloadidx {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[2]
+ ptr := x.Args[0]
+ idx := x.Args[1]
+ if !(x.Uses == 1) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, typ.Int64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDaddridx [c] {s} (ADDconst [d] x) y)
+ // cond: is20Bit(int64(c)+int64(d))
+ // result: (MOVDaddridx [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is20Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MOVDaddridx [c] {s} x (ADDconst [d] y))
+ // cond: is20Bit(int64(c)+int64(d))
+ // result: (MOVDaddridx [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is20Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB
+ // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (LGDR x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XFMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XLGDR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB
+ // result: (MOVDstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)
+ // result: (STMG2 [i-8] {s} p w0 w1 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w1 := v_1
+ x := v_2
+ if x.Op != OpS390XMOVDstore || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTMG2)
+ v.AuxInt = int32ToAuxInt(i - 8)
+ v.Aux = symToAux(s)
+ v.AddArg4(p, w0, w1, mem)
+ return true
+ }
+ // match: (MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x)
+ // result: (STMG3 [i-16] {s} p w0 w1 w2 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w2 := v_1
+ x := v_2
+ if x.Op != OpS390XSTMG2 || auxIntToInt32(x.AuxInt) != i-16 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ if !(x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTMG3)
+ v.AuxInt = int32ToAuxInt(i - 16)
+ v.Aux = symToAux(s)
+ v.AddArg5(p, w0, w1, w2, mem)
+ return true
+ }
+ // match: (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-24) && setPos(v, x.Pos) && clobber(x)
+ // result: (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w3 := v_1
+ x := v_2
+ if x.Op != OpS390XSTMG3 || auxIntToInt32(x.AuxInt) != i-24 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[4]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ w2 := x.Args[3]
+ if !(x.Uses == 1 && is20Bit(int64(i)-24) && setPos(v, x.Pos) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTMG4)
+ v.AuxInt = int32ToAuxInt(i - 24)
+ v.Aux = symToAux(s)
+ v.AddArg6(p, w0, w1, w2, w3, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr r:(MOVDBR x) mem)
+ // cond: r.Uses == 1
+ // result: (MOVDBRstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ r := v_1
+ if r.Op != OpS390XMOVDBR {
+ break
+ }
+ x := r.Args[0]
+ mem := v_2
+ if !(r.Uses == 1) {
+ break
+ }
+ v.reset(OpS390XMOVDBRstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem)
+ // cond: isU12Bit(sc.Off64()+int64(off))
+ // result: (MOVDstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU12Bit(sc.Off64() + int64(off))) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
+ // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVDstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstoreidx [off] {sym} ptr idx r:(MOVDBR x) mem)
+ // cond: r.Uses == 1
+ // result: (MOVDBRstoreidx [off] {sym} ptr idx x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ r := v_2
+ if r.Op != OpS390XMOVDBR {
+ break
+ }
+ x := r.Args[0]
+ mem := v_3
+ if !(r.Uses == 1) {
+ break
+ }
+ v.reset(OpS390XMOVDBRstoreidx)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHZload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVHZreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVHstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVHZload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVHZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))
+ // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVHZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVHZreg e:(MOVBZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg e:(MOVHreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg e:(MOVHZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVBZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVHZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 2)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg <t> x:(MOVHload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHZload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVHload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVHZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVHZreg x:(Arg <t>))
+ // cond: !t.IsSigned() && t.Size() <= 2
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(!t.IsSigned() && t.Size() <= 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ // match: (MOVHZreg (RISBGZ x {r}))
+ // cond: r.OutMerge(0x0000ffff) != nil
+ // result: (RISBGZ x {*r.OutMerge(0x0000ffff)})
+ for {
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(r.OutMerge(0x0000ffff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.OutMerge(0x0000ffff))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg (ANDWconst [m] x))
+ // result: (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
+ for {
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XMOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(uint16(m)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVHreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVHstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVHload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVHreg e:(MOVBreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg e:(MOVHreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg e:(MOVHZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg <t> x:(MOVHZload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVHZload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVHload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVHreg x:(Arg <t>))
+ // cond: t.IsSigned() && t.Size() <= 2
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(t.IsSigned() && t.Size() <= 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int16(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
+ return true
+ }
+ // match: (MOVHreg (ANDWconst [m] x))
+ // cond: int16(m) >= 0
+ // result: (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
+ for {
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(int16(m) >= 0) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(uint16(m)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: isU12Bit(int64(off)) && ptr.Op != OpSB
+ // result: (MOVHstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(isU12Bit(int64(off)) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (Bswap16 val) mem)
+ // result: (MOVHBRstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpBswap16 {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVHBRstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem)
+ // cond: isU12Bit(sc.Off64()+int64(off))
+ // result: (MOVHstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU12Bit(sc.Off64() + int64(off))) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
+ // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVHstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstoreidx [off] {sym} ptr idx (Bswap16 val) mem)
+ // result: (MOVHBRstoreidx [off] {sym} ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpBswap16 {
+ break
+ }
+ val := v_2.Args[0]
+ mem := v_3
+ v.reset(OpS390XMOVHBRstoreidx)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWBR(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVWBR x:(MOVWZload [off] {sym} ptr mem))
+ // cond: x.Uses == 1
+ // result: @x.Block (MOVWZreg (MOVWBRload [off] {sym} ptr mem))
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVWZload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x.Pos, OpS390XMOVWBRload, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(off)
+ v1.Aux = symToAux(sym)
+ v1.AddArg2(ptr, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (MOVWBR x:(MOVWZloadidx [off] {sym} ptr idx mem))
+ // cond: x.Uses == 1
+ // result: @x.Block (MOVWZreg (MOVWBRloadidx [off] {sym} ptr idx mem))
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVWZloadidx {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[2]
+ ptr := x.Args[0]
+ idx := x.Args[1]
+ if !(x.Uses == 1) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(off)
+ v1.Aux = symToAux(sym)
+ v1.AddArg3(ptr, idx, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWZload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVWZreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVWZload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVWZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))
+ // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVWZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWZreg e:(MOVBZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg e:(MOVHZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVWZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVWZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVBZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVHZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 2)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVWZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 4)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVWZload || !(!x.Type.IsSigned() || x.Type.Size() > 4) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg <t> x:(MOVWload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWZload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVWload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVWZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVWZreg x:(Arg <t>))
+ // cond: !t.IsSigned() && t.Size() <= 4
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(!t.IsSigned() && t.Size() <= 4) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ // match: (MOVWZreg (RISBGZ x {r}))
+ // cond: r.OutMerge(0xffffffff) != nil
+ // result: (RISBGZ x {*r.OutMerge(0xffffffff)})
+ for {
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(r.OutMerge(0xffffffff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.OutMerge(0xffffffff))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVWreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWreg e:(MOVBreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg e:(MOVHreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVWreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVWreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVWload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 2)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg <t> x:(MOVWZload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVWZload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVWload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVWreg x:(Arg <t>))
+ // cond: t.IsSigned() && t.Size() <= 4
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(t.IsSigned() && t.Size() <= 4) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int32(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB
+ // result: (MOVWstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && setPos(v, x.Pos) && clobber(x)
+ // result: (STM2 [i-4] {s} p w0 w1 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w1 := v_1
+ x := v_2
+ if x.Op != OpS390XMOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && setPos(v, x.Pos) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTM2)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg4(p, w0, w1, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)
+ // result: (STM3 [i-8] {s} p w0 w1 w2 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w2 := v_1
+ x := v_2
+ if x.Op != OpS390XSTM2 || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ if !(x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTM3)
+ v.AuxInt = int32ToAuxInt(i - 8)
+ v.Aux = symToAux(s)
+ v.AddArg5(p, w0, w1, w2, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-12) && setPos(v, x.Pos) && clobber(x)
+ // result: (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w3 := v_1
+ x := v_2
+ if x.Op != OpS390XSTM3 || auxIntToInt32(x.AuxInt) != i-12 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[4]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ w2 := x.Args[3]
+ if !(x.Uses == 1 && is20Bit(int64(i)-12) && setPos(v, x.Pos) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTM4)
+ v.AuxInt = int32ToAuxInt(i - 12)
+ v.Aux = symToAux(s)
+ v.AddArg6(p, w0, w1, w2, w3, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr r:(MOVWBR x) mem)
+ // cond: r.Uses == 1
+ // result: (MOVWBRstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ r := v_1
+ if r.Op != OpS390XMOVWBR {
+ break
+ }
+ x := r.Args[0]
+ mem := v_2
+ if !(r.Uses == 1) {
+ break
+ }
+ v.reset(OpS390XMOVWBRstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem)
+ // cond: isU12Bit(sc.Off64()+int64(off))
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU12Bit(sc.Off64() + int64(off))) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
+ // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreidx [off] {sym} ptr idx r:(MOVWBR x) mem)
+ // cond: r.Uses == 1
+ // result: (MOVWBRstoreidx [off] {sym} ptr idx x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ r := v_2
+ if r.Op != OpS390XMOVWBR {
+ break
+ }
+ x := r.Args[0]
+ mem := v_3
+ if !(r.Uses == 1) {
+ break
+ }
+ v.reset(OpS390XMOVWBRstoreidx)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLD x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (MULLDconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpS390XMULLDconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MULLD <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (MULLDload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XMULLDload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULLDconst <t> x [c])
+ // cond: isPowerOfTwo32(c&(c-1))
+ // result: (ADD (SLDconst <t> x [uint8(log32(c&(c-1)))]) (SLDconst <t> x [uint8(log32(c&^(c-1)))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c & (c - 1))) {
+ break
+ }
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(c & (c - 1))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(c &^ (c - 1))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLDconst <t> x [c])
+ // cond: isPowerOfTwo32(c+(c&^(c-1)))
+ // result: (SUB (SLDconst <t> x [uint8(log32(c+(c&^(c-1))))]) (SLDconst <t> x [uint8(log32(c&^(c-1)))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c + (c &^ (c - 1)))) {
+ break
+ }
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(c + (c &^ (c - 1)))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(c &^ (c - 1))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLDconst <t> x [c])
+ // cond: isPowerOfTwo32(-c+(-c&^(-c-1)))
+ // result: (SUB (SLDconst <t> x [uint8(log32(-c&^(-c-1)))]) (SLDconst <t> x [uint8(log32(-c+(-c&^(-c-1))))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(-c + (-c &^ (-c - 1)))) {
+ break
+ }
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(-c &^ (-c - 1))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(-c + (-c &^ (-c - 1)))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLDconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)*d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) * d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULLDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MULLD x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMULLD)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (MULLDload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMULLDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XMULLDload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLW x (MOVDconst [c]))
+ // result: (MULLWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XMULLWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MULLW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (MULLWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XMULLWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (MULLW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (MULLWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XMULLWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULLWconst <t> x [c])
+ // cond: isPowerOfTwo32(c&(c-1))
+ // result: (ADDW (SLWconst <t> x [uint8(log32(c&(c-1)))]) (SLWconst <t> x [uint8(log32(c&^(c-1)))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c & (c - 1))) {
+ break
+ }
+ v.reset(OpS390XADDW)
+ v0 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(c & (c - 1))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(c &^ (c - 1))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLWconst <t> x [c])
+ // cond: isPowerOfTwo32(c+(c&^(c-1)))
+ // result: (SUBW (SLWconst <t> x [uint8(log32(c+(c&^(c-1))))]) (SLWconst <t> x [uint8(log32(c&^(c-1)))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c + (c &^ (c - 1)))) {
+ break
+ }
+ v.reset(OpS390XSUBW)
+ v0 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(c + (c &^ (c - 1)))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(c &^ (c - 1))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLWconst <t> x [c])
+ // cond: isPowerOfTwo32(-c+(-c&^(-c-1)))
+ // result: (SUBW (SLWconst <t> x [uint8(log32(-c&^(-c-1)))]) (SLWconst <t> x [uint8(log32(-c+(-c&^(-c-1))))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(-c + (-c &^ (-c - 1)))) {
+ break
+ }
+ v.reset(OpS390XSUBW)
+ v0 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(-c &^ (-c - 1))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(-c + (-c &^ (-c - 1)))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c*int32(d))])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c * int32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (MULLWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMULLWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XMULLWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XNEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEG (MOVDconst [c]))
+ // result: (MOVDconst [-c])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ return true
+ }
+ // match: (NEG (ADDconst [c] (NEG x)))
+ // cond: c != -(1<<31)
+ // result: (ADDconst [-c] x)
+ for {
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XNEG {
+ break
+ }
+ x := v_0_0.Args[0]
+ if !(c != -(1 << 31)) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XNEGW(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGW (MOVDconst [c]))
+ // result: (MOVDconst [int64(int32(-c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(-c)))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XNOT(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NOT x)
+ // result: (XOR (MOVDconst [-1]) x)
+ for {
+ x := v_0
+ v.reset(OpS390XXOR)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(-1)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XNOTW(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NOTW x)
+ // result: (XORWconst [-1] x)
+ for {
+ x := v_0
+ v.reset(OpS390XXORWconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (OR x (MOVDconst [c]))
+ // cond: isU32Bit(c)
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU32Bit(c)) {
+ continue
+ }
+ v.reset(OpS390XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR (MOVDconst [-1<<63]) (LGDR <t> x))
+ // result: (LGDR <t> (LNDFR <x.Type> x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0.AuxInt) != -1<<63 || v_1.Op != OpS390XLGDR {
+ continue
+ }
+ t := v_1.Type
+ x := v_1.Args[0]
+ v.reset(OpS390XLGDR)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpS390XLNDFR, x.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OR (RISBGZ (LGDR x) {r}) (LGDR (LPDFR <t> y)))
+ // cond: r == s390x.NewRotateParams(0, 0, 0)
+ // result: (LGDR (CPSDR <t> y x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XRISBGZ {
+ continue
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XLGDR {
+ continue
+ }
+ x := v_0_0.Args[0]
+ if v_1.Op != OpS390XLGDR {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpS390XLPDFR {
+ continue
+ }
+ t := v_1_0.Type
+ y := v_1_0.Args[0]
+ if !(r == s390x.NewRotateParams(0, 0, 0)) {
+ continue
+ }
+ v.reset(OpS390XLGDR)
+ v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OR (RISBGZ (LGDR x) {r}) (MOVDconst [c]))
+ // cond: c >= 0 && r == s390x.NewRotateParams(0, 0, 0)
+ // result: (LGDR (CPSDR <x.Type> (FMOVDconst <x.Type> [math.Float64frombits(uint64(c))]) x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XRISBGZ {
+ continue
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XLGDR {
+ continue
+ }
+ x := v_0_0.Args[0]
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c >= 0 && r == s390x.NewRotateParams(0, 0, 0)) {
+ continue
+ }
+ v.reset(OpS390XLGDR)
+ v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type)
+ v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type)
+ v1.AuxInt = float64ToAuxInt(math.Float64frombits(uint64(c)))
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OR (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (OR x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (OR <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ORload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XORload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORW x (MOVDconst [c]))
+ // result: (ORWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XORWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORW x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ORWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XORWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ORWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XORWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORWconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORWconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) | d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ORWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XORWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XORWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // result: (MOVDconst [-1])
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c|d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (OR x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XOR)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (ORload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ORload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XORload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XORload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XRISBGZ(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RISBGZ (MOVWZreg x) {r})
+ // cond: r.InMerge(0xffffffff) != nil
+ // result: (RISBGZ x {*r.InMerge(0xffffffff)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(r.InMerge(0xffffffff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.InMerge(0xffffffff))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (MOVHZreg x) {r})
+ // cond: r.InMerge(0x0000ffff) != nil
+ // result: (RISBGZ x {*r.InMerge(0x0000ffff)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(r.InMerge(0x0000ffff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.InMerge(0x0000ffff))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (MOVBZreg x) {r})
+ // cond: r.InMerge(0x000000ff) != nil
+ // result: (RISBGZ x {*r.InMerge(0x000000ff)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(r.InMerge(0x000000ff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.InMerge(0x000000ff))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (SLDconst x [c]) {r})
+ // cond: r.InMerge(^uint64(0)<<c) != nil
+ // result: (RISBGZ x {(*r.InMerge(^uint64(0)<<c)).RotateLeft(c)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XSLDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(r.InMerge(^uint64(0)<<c) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux((*r.InMerge(^uint64(0) << c)).RotateLeft(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (SRDconst x [c]) {r})
+ // cond: r.InMerge(^uint64(0)>>c) != nil
+ // result: (RISBGZ x {(*r.InMerge(^uint64(0)>>c)).RotateLeft(-c)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XSRDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(r.InMerge(^uint64(0)>>c) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux((*r.InMerge(^uint64(0) >> c)).RotateLeft(-c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (RISBGZ x {y}) {z})
+ // cond: z.InMerge(y.OutMask()) != nil
+ // result: (RISBGZ x {(*z.InMerge(y.OutMask())).RotateLeft(y.Amount)})
+ for {
+ z := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ y := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(z.InMerge(y.OutMask()) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux((*z.InMerge(y.OutMask())).RotateLeft(y.Amount))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ x {r})
+ // cond: r.End == 63 && r.Start == -r.Amount&63
+ // result: (SRDconst x [-r.Amount&63])
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ x := v_0
+ if !(r.End == 63 && r.Start == -r.Amount&63) {
+ break
+ }
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = uint8ToAuxInt(-r.Amount & 63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ x {r})
+ // cond: r.Start == 0 && r.End == 63-r.Amount
+ // result: (SLDconst x [r.Amount])
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ x := v_0
+ if !(r.Start == 0 && r.End == 63-r.Amount) {
+ break
+ }
+ v.reset(OpS390XSLDconst)
+ v.AuxInt = uint8ToAuxInt(r.Amount)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (SRADconst x [c]) {r})
+ // cond: r.Start == r.End && (r.Start+r.Amount)&63 <= c
+ // result: (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XSRADconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(r.Start == r.End && (r.Start+r.Amount)&63 <= c) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(r.Start, r.Start, -r.Start&63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ x {r})
+ // cond: r == s390x.NewRotateParams(56, 63, 0)
+ // result: (MOVBZreg x)
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ x := v_0
+ if !(r == s390x.NewRotateParams(56, 63, 0)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ x {r})
+ // cond: r == s390x.NewRotateParams(48, 63, 0)
+ // result: (MOVHZreg x)
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ x := v_0
+ if !(r == s390x.NewRotateParams(48, 63, 0)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ x {r})
+ // cond: r == s390x.NewRotateParams(32, 63, 0)
+ // result: (MOVWZreg x)
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ x := v_0
+ if !(r == s390x.NewRotateParams(32, 63, 0)) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (LGDR <t> x) {r})
+ // cond: r == s390x.NewRotateParams(1, 63, 0)
+ // result: (LGDR <t> (LPDFR <x.Type> x))
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XLGDR {
+ break
+ }
+ t := v_0.Type
+ x := v_0.Args[0]
+ if !(r == s390x.NewRotateParams(1, 63, 0)) {
+ break
+ }
+ v.reset(OpS390XLGDR)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpS390XLPDFR, x.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XRLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RLL x (MOVDconst [c]))
+ // result: (RLLconst x [uint8(c&31)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XRLLconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XRLLG(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RLLG x (MOVDconst [c]))
+ // result: (RISBGZ x {s390x.NewRotateParams(0, 63, uint8(c&63))})
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(0, 63, uint8(c&63)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSLD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SLD x (MOVDconst [c]))
+ // result: (SLDconst x [uint8(c&63)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XSLDconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLD x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (AND (MOVDconst [c]) y))
+ // result: (SLD x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSLD)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SLD x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVWreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVHreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVBreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVWZreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVHZreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVBZreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSLDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLDconst (SRDconst x [c]) [d])
+ // result: (RISBGZ x {s390x.NewRotateParams(uint8(max8(0, int8(c-d))), 63-d, uint8(int8(d-c)&63))})
+ for {
+ d := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XSRDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(uint8(max8(0, int8(c-d))), 63-d, uint8(int8(d-c)&63)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst (RISBGZ x {r}) [c])
+ // cond: s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask()) != nil
+ // result: (RISBGZ x {(*s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask())).RotateLeft(r.Amount)})
+ for {
+ c := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask()) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux((*s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask())).RotateLeft(r.Amount))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SLW x (MOVDconst [c]))
+ // cond: c&32 == 0
+ // result: (SLWconst x [uint8(c&31)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 == 0) {
+ break
+ }
+ v.reset(OpS390XSLWconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLW _ (MOVDconst [c]))
+ // cond: c&32 != 0
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 != 0) {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SLW x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (AND (MOVDconst [c]) y))
+ // result: (SLW x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSLW)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SLW x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVWreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVHreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVBreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVWZreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVHZreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVBZreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLWconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRAD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SRAD x (MOVDconst [c]))
+ // result: (SRADconst x [uint8(c&63)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XSRADconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAD x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (AND (MOVDconst [c]) y))
+ // result: (SRAD x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSRAD)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SRAD x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVWreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVHreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVBreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVWZreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVHZreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVBZreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRADconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRADconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SRADconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [d>>uint64(c)])
+ for {
+ c := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRAW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SRAW x (MOVDconst [c]))
+ // cond: c&32 == 0
+ // result: (SRAWconst x [uint8(c&31)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 == 0) {
+ break
+ }
+ v.reset(OpS390XSRAWconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAW x (MOVDconst [c]))
+ // cond: c&32 != 0
+ // result: (SRAWconst x [31])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 != 0) {
+ break
+ }
+ v.reset(OpS390XSRAWconst)
+ v.AuxInt = uint8ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAW x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (AND (MOVDconst [c]) y))
+ // result: (SRAW x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SRAW x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVWreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVHreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVBreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVWZreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVHZreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVBZreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRAWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAWconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SRAWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(int32(d))>>uint64(c)])
+ for {
+ c := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SRD x (MOVDconst [c]))
+ // result: (SRDconst x [uint8(c&63)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRD x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (AND (MOVDconst [c]) y))
+ // result: (SRD x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSRD)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SRD x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVWreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVHreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVBreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVWZreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVHZreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVBZreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRDconst (SLDconst x [c]) [d])
+ // result: (RISBGZ x {s390x.NewRotateParams(d, uint8(min8(63, int8(63-c+d))), uint8(int8(c-d)&63))})
+ for {
+ d := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XSLDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(d, uint8(min8(63, int8(63-c+d))), uint8(int8(c-d)&63)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRDconst (RISBGZ x {r}) [c])
+ // cond: s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask()) != nil
+ // result: (RISBGZ x {(*s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask())).RotateLeft(r.Amount)})
+ for {
+ c := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask()) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux((*s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask())).RotateLeft(r.Amount))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRDconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SRW x (MOVDconst [c]))
+ // cond: c&32 == 0
+ // result: (SRWconst x [uint8(c&31)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 == 0) {
+ break
+ }
+ v.reset(OpS390XSRWconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRW _ (MOVDconst [c]))
+ // cond: c&32 != 0
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 != 0) {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRW x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (AND (MOVDconst [c]) y))
+ // result: (SRW x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SRW x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVWreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVHreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVBreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVWZreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVHZreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVBZreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRWconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSTM2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)
+ // result: (STM4 [i-8] {s} p w0 w1 w2 w3 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w2 := v_1
+ w3 := v_2
+ x := v_3
+ if x.Op != OpS390XSTM2 || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ if !(x.Uses == 1 && is20Bit(int64(i)-8) && setPos(v, x.Pos) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTM4)
+ v.AuxInt = int32ToAuxInt(i - 8)
+ v.Aux = symToAux(s)
+ v.AddArg6(p, w0, w1, w2, w3, mem)
+ return true
+ }
+ // match: (STM2 [i] {s} p (SRDconst [32] x) x mem)
+ // result: (MOVDstore [i] {s} p x mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 32 {
+ break
+ }
+ x := v_1.Args[0]
+ if x != v_2 {
+ break
+ }
+ mem := v_3
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSTMG2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x)
+ // result: (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w2 := v_1
+ w3 := v_2
+ x := v_3
+ if x.Op != OpS390XSTMG2 || auxIntToInt32(x.AuxInt) != i-16 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ if !(x.Uses == 1 && is20Bit(int64(i)-16) && setPos(v, x.Pos) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTMG4)
+ v.AuxInt = int32ToAuxInt(i - 16)
+ v.Aux = symToAux(s)
+ v.AddArg6(p, w0, w1, w2, w3, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUB x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (SUBconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XSUBconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (NEG (SUBconst <v.Type> x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XNEG)
+ v0 := b.NewValue0(v.Pos, OpS390XSUBconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUB x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SUB <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (SUBload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ break
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ break
+ }
+ v.reset(OpS390XSUBload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBE x y (FlagGT))
+ // result: (SUBC x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpS390XFlagGT {
+ break
+ }
+ v.reset(OpS390XSUBC)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBE x y (FlagOV))
+ // result: (SUBC x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpS390XFlagOV {
+ break
+ }
+ v.reset(OpS390XSUBC)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBE x y (Select1 (SUBC (MOVDconst [0]) (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) c))))))
+ // result: (SUBE x y c)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpS390XSUBC {
+ break
+ }
+ _ = v_2_0.Args[1]
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 {
+ break
+ }
+ v_2_0_1 := v_2_0.Args[1]
+ if v_2_0_1.Op != OpS390XNEG {
+ break
+ }
+ v_2_0_1_0 := v_2_0_1.Args[0]
+ if v_2_0_1_0.Op != OpSelect0 {
+ break
+ }
+ v_2_0_1_0_0 := v_2_0_1_0.Args[0]
+ if v_2_0_1_0_0.Op != OpS390XSUBE {
+ break
+ }
+ c := v_2_0_1_0_0.Args[2]
+ v_2_0_1_0_0_0 := v_2_0_1_0_0.Args[0]
+ if v_2_0_1_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_1_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_2_0_1_0_0_1 := v_2_0_1_0_0.Args[1]
+ if v_2_0_1_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_1_0_0_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpS390XSUBE)
+ v.AddArg3(x, y, c)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBW x (MOVDconst [c]))
+ // result: (SUBWconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XSUBWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBW (MOVDconst [c]) x)
+ // result: (NEGW (SUBWconst <v.Type> x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpS390XNEGW)
+ v0 := b.NewValue0(v.Pos, OpS390XSUBWconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBW x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SUBW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (SUBWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ break
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ break
+ }
+ v.reset(OpS390XSUBWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (SUBW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (SUBWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ break
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ break
+ }
+ v.reset(OpS390XSUBWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBWconst [c] x)
+ // cond: int32(c) == 0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBWconst [c] x)
+ // result: (ADDWconst [-int32(c)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ v.reset(OpS390XADDWconst)
+ v.AuxInt = int32ToAuxInt(-int32(c))
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XSUBWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (SUBWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XSUBWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (SUBWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XSUBWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBconst [c] x)
+ // cond: c != -(1<<31)
+ // result: (ADDconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c != -(1 << 31)) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst (MOVDconst [d]) [c])
+ // result: (MOVDconst [d-int64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(d - int64(c))
+ return true
+ }
+ // match: (SUBconst (SUBconst x [d]) [c])
+ // cond: is32Bit(-int64(c)-int64(d))
+ // result: (ADDconst [-c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-int64(c) - int64(d))) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (SUB x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SUBload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (SUBload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XSUBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (SUBload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XSUBload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSumBytes2(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SumBytes2 x)
+ // result: (ADDW (SRWconst <typ.UInt8> x [8]) x)
+ for {
+ x := v_0
+ v.reset(OpS390XADDW)
+ v0 := b.NewValue0(v.Pos, OpS390XSRWconst, typ.UInt8)
+ v0.AuxInt = uint8ToAuxInt(8)
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XSumBytes4(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SumBytes4 x)
+ // result: (SumBytes2 (ADDW <typ.UInt16> (SRWconst <typ.UInt16> x [16]) x))
+ for {
+ x := v_0
+ v.reset(OpS390XSumBytes2)
+ v0 := b.NewValue0(v.Pos, OpS390XADDW, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpS390XSRWconst, typ.UInt16)
+ v1.AuxInt = uint8ToAuxInt(16)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XSumBytes8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SumBytes8 x)
+ // result: (SumBytes4 (ADDW <typ.UInt32> (SRDconst <typ.UInt32> x [32]) x))
+ for {
+ x := v_0
+ v.reset(OpS390XSumBytes4)
+ v0 := b.NewValue0(v.Pos, OpS390XADDW, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpS390XSRDconst, typ.UInt32)
+ v1.AuxInt = uint8ToAuxInt(32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XXOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR x (MOVDconst [c]))
+ // cond: isU32Bit(c)
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU32Bit(c)) {
+ continue
+ }
+ v.reset(OpS390XXORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (XOR x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XOR <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (XORload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XXORload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORW x (MOVDconst [c]))
+ // result: (XORWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XXORWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORW x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XORW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (XORWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XXORWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (XORW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (XORWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XXORWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORWconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) ^ d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (XORWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XXORWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (XORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XXORWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c^d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (XOR x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XXOR)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (XORload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (XORload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XXORload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (XORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XXORload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Add64carry x y c))
+ // result: (Select0 <typ.UInt64> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XADDCconst, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2.AuxInt = int16ToAuxInt(-1)
+ v2.AddArg(c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Sub64borrow x y c))
+ // result: (Select0 <typ.UInt64> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XSUBC, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(0)
+ v2.AddArg2(v3, c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 <t> (AddTupleFirst32 val tuple))
+ // result: (ADDW val (Select0 <t> tuple))
+ for {
+ t := v.Type
+ if v_0.Op != OpS390XAddTupleFirst32 {
+ break
+ }
+ tuple := v_0.Args[1]
+ val := v_0.Args[0]
+ v.reset(OpS390XADDW)
+ v0 := b.NewValue0(v.Pos, OpSelect0, t)
+ v0.AddArg(tuple)
+ v.AddArg2(val, v0)
+ return true
+ }
+ // match: (Select0 <t> (AddTupleFirst64 val tuple))
+ // result: (ADD val (Select0 <t> tuple))
+ for {
+ t := v.Type
+ if v_0.Op != OpS390XAddTupleFirst64 {
+ break
+ }
+ tuple := v_0.Args[1]
+ val := v_0.Args[0]
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v.Pos, OpSelect0, t)
+ v0.AddArg(tuple)
+ v.AddArg2(val, v0)
+ return true
+ }
+ // match: (Select0 (ADDCconst (MOVDconst [c]) [d]))
+ // result: (MOVDconst [c+int64(d)])
+ for {
+ if v_0.Op != OpS390XADDCconst {
+ break
+ }
+ d := auxIntToInt16(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c + int64(d))
+ return true
+ }
+ // match: (Select0 (SUBC (MOVDconst [c]) (MOVDconst [d])))
+ // result: (MOVDconst [c-d])
+ for {
+ if v_0.Op != OpS390XSUBC {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c - d)
+ return true
+ }
+ // match: (Select0 (FADD (FMUL y z) x))
+ // cond: x.Block.Func.useFMA(v)
+ // result: (FMADD x y z)
+ for {
+ if v_0.Op != OpS390XFADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpS390XFMUL {
+ continue
+ }
+ z := v_0_0.Args[1]
+ y := v_0_0.Args[0]
+ x := v_0_1
+ if !(x.Block.Func.useFMA(v)) {
+ continue
+ }
+ v.reset(OpS390XFMADD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (FSUB (FMUL y z) x))
+ // cond: x.Block.Func.useFMA(v)
+ // result: (FMSUB x y z)
+ for {
+ if v_0.Op != OpS390XFSUB {
+ break
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XFMUL {
+ break
+ }
+ z := v_0_0.Args[1]
+ y := v_0_0.Args[0]
+ if !(x.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpS390XFMSUB)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (Select0 (FADDS (FMULS y z) x))
+ // cond: x.Block.Func.useFMA(v)
+ // result: (FMADDS x y z)
+ for {
+ if v_0.Op != OpS390XFADDS {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpS390XFMULS {
+ continue
+ }
+ z := v_0_0.Args[1]
+ y := v_0_0.Args[0]
+ x := v_0_1
+ if !(x.Block.Func.useFMA(v)) {
+ continue
+ }
+ v.reset(OpS390XFMADDS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (FSUBS (FMULS y z) x))
+ // cond: x.Block.Func.useFMA(v)
+ // result: (FMSUBS x y z)
+ for {
+ if v_0.Op != OpS390XFSUBS {
+ break
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XFMULS {
+ break
+ }
+ z := v_0_0.Args[1]
+ y := v_0_0.Args[0]
+ if !(x.Block.Func.useFMA(v)) {
+ break
+ }
+ v.reset(OpS390XFMSUBS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Add64carry x y c))
+ // result: (Select0 <typ.UInt64> (ADDE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v5 := b.NewValue0(v.Pos, OpS390XADDCconst, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v5.AuxInt = int16ToAuxInt(-1)
+ v5.AddArg(c)
+ v4.AddArg(v5)
+ v3.AddArg3(x, y, v4)
+ v2.AddArg(v3)
+ v0.AddArg3(v1, v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Sub64borrow x y c))
+ // result: (NEG (Select0 <typ.UInt64> (SUBE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c)))))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpS390XNEG)
+ v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v5 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v6 := b.NewValue0(v.Pos, OpS390XSUBC, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v6.AddArg2(v2, c)
+ v5.AddArg(v6)
+ v4.AddArg3(x, y, v5)
+ v3.AddArg(v4)
+ v1.AddArg3(v2, v2, v3)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (AddTupleFirst32 _ tuple))
+ // result: (Select1 tuple)
+ for {
+ if v_0.Op != OpS390XAddTupleFirst32 {
+ break
+ }
+ tuple := v_0.Args[1]
+ v.reset(OpSelect1)
+ v.AddArg(tuple)
+ return true
+ }
+ // match: (Select1 (AddTupleFirst64 _ tuple))
+ // result: (Select1 tuple)
+ for {
+ if v_0.Op != OpS390XAddTupleFirst64 {
+ break
+ }
+ tuple := v_0.Args[1]
+ v.reset(OpSelect1)
+ v.AddArg(tuple)
+ return true
+ }
+ // match: (Select1 (ADDCconst (MOVDconst [c]) [d]))
+ // cond: uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0
+ // result: (FlagEQ)
+ for {
+ if v_0.Op != OpS390XADDCconst {
+ break
+ }
+ d := auxIntToInt16(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if !(uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (Select1 (ADDCconst (MOVDconst [c]) [d]))
+ // cond: uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0
+ // result: (FlagLT)
+ for {
+ if v_0.Op != OpS390XADDCconst {
+ break
+ }
+ d := auxIntToInt16(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if !(uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
+ // cond: uint64(d) <= uint64(c) && c-d == 0
+ // result: (FlagGT)
+ for {
+ if v_0.Op != OpS390XSUBC {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(uint64(d) <= uint64(c) && c-d == 0) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
+ // cond: uint64(d) <= uint64(c) && c-d != 0
+ // result: (FlagOV)
+ for {
+ if v_0.Op != OpS390XSUBC {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(uint64(d) <= uint64(c) && c-d != 0) {
+ break
+ }
+ v.reset(OpS390XFlagOV)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRADconst (NEG <t> x) [63])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpS390XSRADconst)
+ v.AuxInt = uint8ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpS390XNEG, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && t.IsFloat()
+ // result: (FMOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpS390XFMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && t.IsFloat()
+ // result: (FMOVSstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && t.IsFloat()) {
+ break
+ }
+ v.reset(OpS390XFMOVSstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && !t.IsFloat()
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && !t.IsFloat()) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !t.IsFloat()
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !t.IsFloat()) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpS390XMOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpSub32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Sub32F x y)
+ // result: (Select0 (FSUBS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpS390XFSUBS, types.NewTuple(typ.Float32, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpSub64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Sub64F x y)
+ // result: (Select0 (FSUB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpS390XFSUB, types.NewTuple(typ.Float64, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpTrunc(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc x)
+ // result: (FIDBR [5] x)
+ for {
+ x := v_0
+ v.reset(OpS390XFIDBR)
+ v.AuxInt = int8ToAuxInt(5)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] destptr mem)
+ // result: (MOVBstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [2] destptr mem)
+ // result: (MOVHstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [4] destptr mem)
+ // result: (MOVWstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [8] destptr mem)
+ // result: (MOVDstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVDstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [3] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVHstoreconst [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2))
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [5] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [6] destptr mem)
+ // result: (MOVHstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [7] destptr mem)
+ // result: (MOVWstoreconst [makeValAndOff(0,3)] destptr (MOVWstoreconst [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3))
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s > 0 && s <= 1024
+ // result: (CLEAR [makeValAndOff(int32(s), 0)] destptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s > 0 && s <= 1024) {
+ break
+ }
+ v.reset(OpS390XCLEAR)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s), 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s > 1024
+ // result: (LoweredZero [s%256] destptr (ADDconst <destptr.Type> destptr [(int32(s)/256)*256]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s > 1024) {
+ break
+ }
+ v.reset(OpS390XLoweredZero)
+ v.AuxInt = int64ToAuxInt(s % 256)
+ v0 := b.NewValue0(v.Pos, OpS390XADDconst, destptr.Type)
+ v0.AuxInt = int32ToAuxInt((int32(s) / 256) * 256)
+ v0.AddArg(destptr)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockS390X(b *Block) bool {
+ typ := &b.Func.Config.Types
+ switch b.Kind {
+ case BlockS390XBRC:
+ // match: (BRC {c} x:(CMP _ _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMP {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPW _ _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPW {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPU _ _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPU {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPWU _ _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPWU {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPconst _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPWconst _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPUconst _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPUconst {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPWUconst _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPWUconst {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMP x y) yes no)
+ // result: (CGRJ {c&^s390x.Unordered} x y yes no)
+ for b.Controls[0].Op == OpS390XCMP {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl2(BlockS390XCGRJ, x, y)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPW x y) yes no)
+ // result: (CRJ {c&^s390x.Unordered} x y yes no)
+ for b.Controls[0].Op == OpS390XCMPW {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl2(BlockS390XCRJ, x, y)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPU x y) yes no)
+ // result: (CLGRJ {c&^s390x.Unordered} x y yes no)
+ for b.Controls[0].Op == OpS390XCMPU {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl2(BlockS390XCLGRJ, x, y)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPWU x y) yes no)
+ // result: (CLRJ {c&^s390x.Unordered} x y yes no)
+ for b.Controls[0].Op == OpS390XCMPWU {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl2(BlockS390XCLRJ, x, y)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPconst x [y]) yes no)
+ // cond: y == int32( int8(y))
+ // result: (CGIJ {c&^s390x.Unordered} x [ int8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(int8(y))) {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPWconst x [y]) yes no)
+ // cond: y == int32( int8(y))
+ // result: (CIJ {c&^s390x.Unordered} x [ int8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(int8(y))) {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPUconst x [y]) yes no)
+ // cond: y == int32(uint8(y))
+ // result: (CLGIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPUconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(uint8(y))) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPWUconst x [y]) yes no)
+ // cond: y == int32(uint8(y))
+ // result: (CLIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPWUconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(uint8(y))) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {s390x.Less} (CMPconst x [ 128]) yes no)
+ // result: (CGIJ {s390x.LessOrEqual} x [ 127] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 128 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(127)
+ b.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.Less} (CMPWconst x [ 128]) yes no)
+ // result: (CIJ {s390x.LessOrEqual} x [ 127] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 128 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(127)
+ b.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.LessOrEqual} (CMPconst x [-129]) yes no)
+ // result: (CGIJ {s390x.Less} x [-128] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != -129 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.LessOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(-128)
+ b.Aux = s390xCCMaskToAux(s390x.Less)
+ return true
+ }
+ // match: (BRC {s390x.LessOrEqual} (CMPWconst x [-129]) yes no)
+ // result: (CIJ {s390x.Less} x [-128] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != -129 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.LessOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(-128)
+ b.Aux = s390xCCMaskToAux(s390x.Less)
+ return true
+ }
+ // match: (BRC {s390x.Greater} (CMPconst x [-129]) yes no)
+ // result: (CGIJ {s390x.GreaterOrEqual} x [-128] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != -129 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(-128)
+ b.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.Greater} (CMPWconst x [-129]) yes no)
+ // result: (CIJ {s390x.GreaterOrEqual} x [-128] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != -129 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(-128)
+ b.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.GreaterOrEqual} (CMPconst x [ 128]) yes no)
+ // result: (CGIJ {s390x.Greater} x [ 127] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 128 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(127)
+ b.Aux = s390xCCMaskToAux(s390x.Greater)
+ return true
+ }
+ // match: (BRC {s390x.GreaterOrEqual} (CMPWconst x [ 128]) yes no)
+ // result: (CIJ {s390x.Greater} x [ 127] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 128 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(127)
+ b.Aux = s390xCCMaskToAux(s390x.Greater)
+ return true
+ }
+ // match: (BRC {s390x.Less} (CMPWUconst x [256]) yes no)
+ // result: (CLIJ {s390x.LessOrEqual} x [255] yes no)
+ for b.Controls[0].Op == OpS390XCMPWUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 256 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(255)
+ b.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.Less} (CMPUconst x [256]) yes no)
+ // result: (CLGIJ {s390x.LessOrEqual} x [255] yes no)
+ for b.Controls[0].Op == OpS390XCMPUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 256 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, x)
+ b.AuxInt = uint8ToAuxInt(255)
+ b.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.GreaterOrEqual} (CMPWUconst x [256]) yes no)
+ // result: (CLIJ {s390x.Greater} x [255] yes no)
+ for b.Controls[0].Op == OpS390XCMPWUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 256 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(255)
+ b.Aux = s390xCCMaskToAux(s390x.Greater)
+ return true
+ }
+ // match: (BRC {s390x.GreaterOrEqual} (CMPUconst x [256]) yes no)
+ // result: (CLGIJ {s390x.Greater} x [255] yes no)
+ for b.Controls[0].Op == OpS390XCMPUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 256 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, x)
+ b.AuxInt = uint8ToAuxInt(255)
+ b.Aux = s390xCCMaskToAux(s390x.Greater)
+ return true
+ }
+ // match: (BRC {c} (CMPconst x [y]) yes no)
+ // cond: y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)
+ // result: (CLGIJ {c} x [uint8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (BRC {c} (CMPWconst x [y]) yes no)
+ // cond: y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)
+ // result: (CLIJ {c} x [uint8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (BRC {c} (CMPUconst x [y]) yes no)
+ // cond: y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)
+ // result: (CGIJ {c} x [ int8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPUconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (BRC {c} (CMPWUconst x [y]) yes no)
+ // cond: y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)
+ // result: (CIJ {c} x [ int8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPWUconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (BRC {c} (InvertFlags cmp) yes no)
+ // result: (BRC {c.ReverseComparison()} cmp yes no)
+ for b.Controls[0].Op == OpS390XInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl(BlockS390XBRC, cmp)
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (BRC {c} (FlagEQ) yes no)
+ // cond: c&s390x.Equal != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XFlagEQ {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (BRC {c} (FlagLT) yes no)
+ // cond: c&s390x.Less != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XFlagLT {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (BRC {c} (FlagGT) yes no)
+ // cond: c&s390x.Greater != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XFlagGT {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (BRC {c} (FlagOV) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XFlagOV {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (BRC {c} (FlagEQ) yes no)
+ // cond: c&s390x.Equal == 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XFlagEQ {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (BRC {c} (FlagLT) yes no)
+ // cond: c&s390x.Less == 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XFlagLT {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (BRC {c} (FlagGT) yes no)
+ // cond: c&s390x.Greater == 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XFlagGT {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (BRC {c} (FlagOV) yes no)
+ // cond: c&s390x.Unordered == 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XFlagOV {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCGIJ:
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal != 0 && int64(x) == int64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal != 0 && int64(x) == int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less != 0 && int64(x) < int64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less != 0 && int64(x) < int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater != 0 && int64(x) > int64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater != 0 && int64(x) > int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal == 0 && int64(x) == int64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal == 0 && int64(x) == int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less == 0 && int64(x) < int64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less == 0 && int64(x) < int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater == 0 && int64(x) > int64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater == 0 && int64(x) > int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CGIJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.NoCarry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.NoCarry)
+ return true
+ }
+ // match: (CGIJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CGIJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CGIJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1])
+ // result: (BRC {s390x.NoCarry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.NoCarry)
+ return true
+ }
+ // match: (CGIJ {s390x.Greater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CGIJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.NoBorrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.NoBorrow)
+ return true
+ }
+ // match: (CGIJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ // match: (CGIJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ // match: (CGIJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1])
+ // result: (BRC {s390x.NoBorrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.NoBorrow)
+ return true
+ }
+ // match: (CGIJ {s390x.Greater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ case BlockS390XCGRJ:
+ // match: (CGRJ {c} x (MOVDconst [y]) yes no)
+ // cond: is8Bit(y)
+ // result: (CGIJ {c} x [ int8(y)] yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(is8Bit(y)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CGRJ {c} (MOVDconst [x]) y yes no)
+ // cond: is8Bit(x)
+ // result: (CGIJ {c.ReverseComparison()} y [ int8(x)] yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(is8Bit(x)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, y)
+ b.AuxInt = int8ToAuxInt(int8(x))
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CGRJ {c} x (MOVDconst [y]) yes no)
+ // cond: !is8Bit(y) && is32Bit(y)
+ // result: (BRC {c} (CMPconst x [int32(y)]) yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(!is8Bit(y) && is32Bit(y)) {
+ break
+ }
+ v0 := b.NewValue0(x.Pos, OpS390XCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(y))
+ v0.AddArg(x)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CGRJ {c} (MOVDconst [x]) y yes no)
+ // cond: !is8Bit(x) && is32Bit(x)
+ // result: (BRC {c.ReverseComparison()} (CMPconst y [int32(x)]) yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(!is8Bit(x) && is32Bit(x)) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpS390XCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(x))
+ v0.AddArg(y)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CGRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal != 0
+ // result: (First yes no)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CGRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal == 0
+ // result: (First no yes)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCIJ:
+ // match: (CIJ {c} (MOVWreg x) [y] yes no)
+ // result: (CIJ {c} x [y] yes no)
+ for b.Controls[0].Op == OpS390XMOVWreg {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(y)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CIJ {c} (MOVWZreg x) [y] yes no)
+ // result: (CIJ {c} x [y] yes no)
+ for b.Controls[0].Op == OpS390XMOVWZreg {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(y)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal != 0 && int32(x) == int32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal != 0 && int32(x) == int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less != 0 && int32(x) < int32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less != 0 && int32(x) < int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater != 0 && int32(x) > int32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater != 0 && int32(x) > int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal == 0 && int32(x) == int32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal == 0 && int32(x) == int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less == 0 && int32(x) < int32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less == 0 && int32(x) < int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater == 0 && int32(x) > int32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater == 0 && int32(x) > int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCLGIJ:
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal != 0 && uint64(x) == uint64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal != 0 && uint64(x) == uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less != 0 && uint64(x) < uint64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less != 0 && uint64(x) < uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater != 0 && uint64(x) > uint64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater != 0 && uint64(x) > uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal == 0 && uint64(x) == uint64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal == 0 && uint64(x) == uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less == 0 && uint64(x) < uint64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less == 0 && uint64(x) < uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater == 0 && uint64(x) > uint64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater == 0 && uint64(x) > uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLGIJ {s390x.GreaterOrEqual} _ [0] yes no)
+ // result: (First yes no)
+ for {
+ if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLGIJ {s390x.Less} _ [0] yes no)
+ // result: (First no yes)
+ for {
+ if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLGIJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.NoCarry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.NoCarry)
+ return true
+ }
+ // match: (CLGIJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CLGIJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CLGIJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1])
+ // result: (BRC {s390x.NoCarry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.NoCarry)
+ return true
+ }
+ // match: (CLGIJ {s390x.Greater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CLGIJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.NoBorrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.NoBorrow)
+ return true
+ }
+ // match: (CLGIJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ // match: (CLGIJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ // match: (CLGIJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1])
+ // result: (BRC {s390x.NoBorrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.NoBorrow)
+ return true
+ }
+ // match: (CLGIJ {s390x.Greater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ case BlockS390XCLGRJ:
+ // match: (CLGRJ {c} x (MOVDconst [y]) yes no)
+ // cond: isU8Bit(y)
+ // result: (CLGIJ {c} x [uint8(y)] yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(isU8Bit(y)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLGRJ {c} (MOVDconst [x]) y yes no)
+ // cond: isU8Bit(x)
+ // result: (CLGIJ {c.ReverseComparison()} y [uint8(x)] yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(isU8Bit(x)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, y)
+ b.AuxInt = uint8ToAuxInt(uint8(x))
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CLGRJ {c} x (MOVDconst [y]) yes no)
+ // cond: !isU8Bit(y) && isU32Bit(y)
+ // result: (BRC {c} (CMPUconst x [int32(y)]) yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(!isU8Bit(y) && isU32Bit(y)) {
+ break
+ }
+ v0 := b.NewValue0(x.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(y))
+ v0.AddArg(x)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLGRJ {c} (MOVDconst [x]) y yes no)
+ // cond: !isU8Bit(x) && isU32Bit(x)
+ // result: (BRC {c.ReverseComparison()} (CMPUconst y [int32(x)]) yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(!isU8Bit(x) && isU32Bit(x)) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(x))
+ v0.AddArg(y)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CLGRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal != 0
+ // result: (First yes no)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLGRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal == 0
+ // result: (First no yes)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCLIJ:
+ // match: (CLIJ {s390x.LessOrGreater} (LOCGR {d} (MOVDconst [0]) (MOVDconst [x]) cmp) [0] yes no)
+ // cond: int32(x) != 0
+ // result: (BRC {d} cmp yes no)
+ for b.Controls[0].Op == OpS390XLOCGR {
+ v_0 := b.Controls[0]
+ d := auxToS390xCCMask(v_0.Aux)
+ cmp := v_0.Args[2]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0_1.AuxInt)
+ if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater || !(int32(x) != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, cmp)
+ b.Aux = s390xCCMaskToAux(d)
+ return true
+ }
+ // match: (CLIJ {c} (MOVWreg x) [y] yes no)
+ // result: (CLIJ {c} x [y] yes no)
+ for b.Controls[0].Op == OpS390XMOVWreg {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(y)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLIJ {c} (MOVWZreg x) [y] yes no)
+ // result: (CLIJ {c} x [y] yes no)
+ for b.Controls[0].Op == OpS390XMOVWZreg {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(y)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal != 0 && uint32(x) == uint32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal != 0 && uint32(x) == uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less != 0 && uint32(x) < uint32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less != 0 && uint32(x) < uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater != 0 && uint32(x) > uint32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater != 0 && uint32(x) > uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal == 0 && uint32(x) == uint32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal == 0 && uint32(x) == uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less == 0 && uint32(x) < uint32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less == 0 && uint32(x) < uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater == 0 && uint32(x) > uint32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater == 0 && uint32(x) > uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLIJ {s390x.GreaterOrEqual} _ [0] yes no)
+ // result: (First yes no)
+ for {
+ if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLIJ {s390x.Less} _ [0] yes no)
+ // result: (First no yes)
+ for {
+ if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCLRJ:
+ // match: (CLRJ {c} x (MOVDconst [y]) yes no)
+ // cond: isU8Bit(y)
+ // result: (CLIJ {c} x [uint8(y)] yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(isU8Bit(y)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLRJ {c} (MOVDconst [x]) y yes no)
+ // cond: isU8Bit(x)
+ // result: (CLIJ {c.ReverseComparison()} y [uint8(x)] yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(isU8Bit(x)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, y)
+ b.AuxInt = uint8ToAuxInt(uint8(x))
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CLRJ {c} x (MOVDconst [y]) yes no)
+ // cond: !isU8Bit(y) && isU32Bit(y)
+ // result: (BRC {c} (CMPWUconst x [int32(y)]) yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(!isU8Bit(y) && isU32Bit(y)) {
+ break
+ }
+ v0 := b.NewValue0(x.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(y))
+ v0.AddArg(x)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLRJ {c} (MOVDconst [x]) y yes no)
+ // cond: !isU8Bit(x) && isU32Bit(x)
+ // result: (BRC {c.ReverseComparison()} (CMPWUconst y [int32(x)]) yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(!isU8Bit(x) && isU32Bit(x)) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(x))
+ v0.AddArg(y)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CLRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal != 0
+ // result: (First yes no)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal == 0
+ // result: (First no yes)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCRJ:
+ // match: (CRJ {c} x (MOVDconst [y]) yes no)
+ // cond: is8Bit(y)
+ // result: (CIJ {c} x [ int8(y)] yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(is8Bit(y)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CRJ {c} (MOVDconst [x]) y yes no)
+ // cond: is8Bit(x)
+ // result: (CIJ {c.ReverseComparison()} y [ int8(x)] yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(is8Bit(x)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, y)
+ b.AuxInt = int8ToAuxInt(int8(x))
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CRJ {c} x (MOVDconst [y]) yes no)
+ // cond: !is8Bit(y) && is32Bit(y)
+ // result: (BRC {c} (CMPWconst x [int32(y)]) yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(!is8Bit(y) && is32Bit(y)) {
+ break
+ }
+ v0 := b.NewValue0(x.Pos, OpS390XCMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(y))
+ v0.AddArg(x)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CRJ {c} (MOVDconst [x]) y yes no)
+ // cond: !is8Bit(x) && is32Bit(x)
+ // result: (BRC {c.ReverseComparison()} (CMPWconst y [int32(x)]) yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(!is8Bit(x) && is32Bit(x)) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpS390XCMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(x))
+ v0.AddArg(y)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal != 0
+ // result: (First yes no)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal == 0
+ // result: (First no yes)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockIf:
+ // match: (If cond yes no)
+ // result: (CLIJ {s390x.LessOrGreater} (MOVBZreg <typ.Bool> cond) [0] yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, OpS390XMOVBZreg, typ.Bool)
+ v0.AddArg(cond)
+ b.resetWithControl(BlockS390XCLIJ, v0)
+ b.AuxInt = uint8ToAuxInt(0)
+ b.Aux = s390xCCMaskToAux(s390x.LessOrGreater)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go
new file mode 100644
index 0000000..6f83aea
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteWasm.go
@@ -0,0 +1,4877 @@
+// Code generated from _gen/Wasm.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import "internal/buildcfg"
+import "math"
+import "cmd/compile/internal/types"
+
+func rewriteValueWasm(v *Value) bool {
+ switch v.Op {
+ case OpAbs:
+ v.Op = OpWasmF64Abs
+ return true
+ case OpAdd16:
+ v.Op = OpWasmI64Add
+ return true
+ case OpAdd32:
+ v.Op = OpWasmI64Add
+ return true
+ case OpAdd32F:
+ v.Op = OpWasmF32Add
+ return true
+ case OpAdd64:
+ v.Op = OpWasmI64Add
+ return true
+ case OpAdd64F:
+ v.Op = OpWasmF64Add
+ return true
+ case OpAdd8:
+ v.Op = OpWasmI64Add
+ return true
+ case OpAddPtr:
+ v.Op = OpWasmI64Add
+ return true
+ case OpAddr:
+ return rewriteValueWasm_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpWasmI64And
+ return true
+ case OpAnd32:
+ v.Op = OpWasmI64And
+ return true
+ case OpAnd64:
+ v.Op = OpWasmI64And
+ return true
+ case OpAnd8:
+ v.Op = OpWasmI64And
+ return true
+ case OpAndB:
+ v.Op = OpWasmI64And
+ return true
+ case OpBitLen64:
+ return rewriteValueWasm_OpBitLen64(v)
+ case OpCeil:
+ v.Op = OpWasmF64Ceil
+ return true
+ case OpClosureCall:
+ v.Op = OpWasmLoweredClosureCall
+ return true
+ case OpCom16:
+ return rewriteValueWasm_OpCom16(v)
+ case OpCom32:
+ return rewriteValueWasm_OpCom32(v)
+ case OpCom64:
+ return rewriteValueWasm_OpCom64(v)
+ case OpCom8:
+ return rewriteValueWasm_OpCom8(v)
+ case OpCondSelect:
+ v.Op = OpWasmSelect
+ return true
+ case OpConst16:
+ return rewriteValueWasm_OpConst16(v)
+ case OpConst32:
+ return rewriteValueWasm_OpConst32(v)
+ case OpConst32F:
+ v.Op = OpWasmF32Const
+ return true
+ case OpConst64:
+ v.Op = OpWasmI64Const
+ return true
+ case OpConst64F:
+ v.Op = OpWasmF64Const
+ return true
+ case OpConst8:
+ return rewriteValueWasm_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueWasm_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueWasm_OpConstNil(v)
+ case OpConvert:
+ v.Op = OpWasmLoweredConvert
+ return true
+ case OpCopysign:
+ v.Op = OpWasmF64Copysign
+ return true
+ case OpCtz16:
+ return rewriteValueWasm_OpCtz16(v)
+ case OpCtz16NonZero:
+ v.Op = OpWasmI64Ctz
+ return true
+ case OpCtz32:
+ return rewriteValueWasm_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpWasmI64Ctz
+ return true
+ case OpCtz64:
+ v.Op = OpWasmI64Ctz
+ return true
+ case OpCtz64NonZero:
+ v.Op = OpWasmI64Ctz
+ return true
+ case OpCtz8:
+ return rewriteValueWasm_OpCtz8(v)
+ case OpCtz8NonZero:
+ v.Op = OpWasmI64Ctz
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpWasmI64TruncSatF32S
+ return true
+ case OpCvt32Fto32U:
+ v.Op = OpWasmI64TruncSatF32U
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpWasmI64TruncSatF32S
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpWasmF64PromoteF32
+ return true
+ case OpCvt32Fto64U:
+ v.Op = OpWasmI64TruncSatF32U
+ return true
+ case OpCvt32Uto32F:
+ return rewriteValueWasm_OpCvt32Uto32F(v)
+ case OpCvt32Uto64F:
+ return rewriteValueWasm_OpCvt32Uto64F(v)
+ case OpCvt32to32F:
+ return rewriteValueWasm_OpCvt32to32F(v)
+ case OpCvt32to64F:
+ return rewriteValueWasm_OpCvt32to64F(v)
+ case OpCvt64Fto32:
+ v.Op = OpWasmI64TruncSatF64S
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpWasmF32DemoteF64
+ return true
+ case OpCvt64Fto32U:
+ v.Op = OpWasmI64TruncSatF64U
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpWasmI64TruncSatF64S
+ return true
+ case OpCvt64Fto64U:
+ v.Op = OpWasmI64TruncSatF64U
+ return true
+ case OpCvt64Uto32F:
+ v.Op = OpWasmF32ConvertI64U
+ return true
+ case OpCvt64Uto64F:
+ v.Op = OpWasmF64ConvertI64U
+ return true
+ case OpCvt64to32F:
+ v.Op = OpWasmF32ConvertI64S
+ return true
+ case OpCvt64to64F:
+ v.Op = OpWasmF64ConvertI64S
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueWasm_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueWasm_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueWasm_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpWasmF32Div
+ return true
+ case OpDiv32u:
+ return rewriteValueWasm_OpDiv32u(v)
+ case OpDiv64:
+ return rewriteValueWasm_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpWasmF64Div
+ return true
+ case OpDiv64u:
+ v.Op = OpWasmI64DivU
+ return true
+ case OpDiv8:
+ return rewriteValueWasm_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueWasm_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueWasm_OpEq16(v)
+ case OpEq32:
+ return rewriteValueWasm_OpEq32(v)
+ case OpEq32F:
+ v.Op = OpWasmF32Eq
+ return true
+ case OpEq64:
+ v.Op = OpWasmI64Eq
+ return true
+ case OpEq64F:
+ v.Op = OpWasmF64Eq
+ return true
+ case OpEq8:
+ return rewriteValueWasm_OpEq8(v)
+ case OpEqB:
+ v.Op = OpWasmI64Eq
+ return true
+ case OpEqPtr:
+ v.Op = OpWasmI64Eq
+ return true
+ case OpFloor:
+ v.Op = OpWasmF64Floor
+ return true
+ case OpGetCallerPC:
+ v.Op = OpWasmLoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpWasmLoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpWasmLoweredGetClosurePtr
+ return true
+ case OpInterCall:
+ v.Op = OpWasmLoweredInterCall
+ return true
+ case OpIsInBounds:
+ v.Op = OpWasmI64LtU
+ return true
+ case OpIsNonNil:
+ return rewriteValueWasm_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ v.Op = OpWasmI64LeU
+ return true
+ case OpLeq16:
+ return rewriteValueWasm_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueWasm_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueWasm_OpLeq32(v)
+ case OpLeq32F:
+ v.Op = OpWasmF32Le
+ return true
+ case OpLeq32U:
+ return rewriteValueWasm_OpLeq32U(v)
+ case OpLeq64:
+ v.Op = OpWasmI64LeS
+ return true
+ case OpLeq64F:
+ v.Op = OpWasmF64Le
+ return true
+ case OpLeq64U:
+ v.Op = OpWasmI64LeU
+ return true
+ case OpLeq8:
+ return rewriteValueWasm_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueWasm_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueWasm_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueWasm_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueWasm_OpLess32(v)
+ case OpLess32F:
+ v.Op = OpWasmF32Lt
+ return true
+ case OpLess32U:
+ return rewriteValueWasm_OpLess32U(v)
+ case OpLess64:
+ v.Op = OpWasmI64LtS
+ return true
+ case OpLess64F:
+ v.Op = OpWasmF64Lt
+ return true
+ case OpLess64U:
+ v.Op = OpWasmI64LtU
+ return true
+ case OpLess8:
+ return rewriteValueWasm_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueWasm_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueWasm_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueWasm_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueWasm_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueWasm_OpLsh16x32(v)
+ case OpLsh16x64:
+ v.Op = OpLsh64x64
+ return true
+ case OpLsh16x8:
+ return rewriteValueWasm_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueWasm_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueWasm_OpLsh32x32(v)
+ case OpLsh32x64:
+ v.Op = OpLsh64x64
+ return true
+ case OpLsh32x8:
+ return rewriteValueWasm_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueWasm_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueWasm_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueWasm_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueWasm_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueWasm_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueWasm_OpLsh8x32(v)
+ case OpLsh8x64:
+ v.Op = OpLsh64x64
+ return true
+ case OpLsh8x8:
+ return rewriteValueWasm_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueWasm_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueWasm_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueWasm_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueWasm_OpMod32u(v)
+ case OpMod64:
+ return rewriteValueWasm_OpMod64(v)
+ case OpMod64u:
+ v.Op = OpWasmI64RemU
+ return true
+ case OpMod8:
+ return rewriteValueWasm_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueWasm_OpMod8u(v)
+ case OpMove:
+ return rewriteValueWasm_OpMove(v)
+ case OpMul16:
+ v.Op = OpWasmI64Mul
+ return true
+ case OpMul32:
+ v.Op = OpWasmI64Mul
+ return true
+ case OpMul32F:
+ v.Op = OpWasmF32Mul
+ return true
+ case OpMul64:
+ v.Op = OpWasmI64Mul
+ return true
+ case OpMul64F:
+ v.Op = OpWasmF64Mul
+ return true
+ case OpMul8:
+ v.Op = OpWasmI64Mul
+ return true
+ case OpNeg16:
+ return rewriteValueWasm_OpNeg16(v)
+ case OpNeg32:
+ return rewriteValueWasm_OpNeg32(v)
+ case OpNeg32F:
+ v.Op = OpWasmF32Neg
+ return true
+ case OpNeg64:
+ return rewriteValueWasm_OpNeg64(v)
+ case OpNeg64F:
+ v.Op = OpWasmF64Neg
+ return true
+ case OpNeg8:
+ return rewriteValueWasm_OpNeg8(v)
+ case OpNeq16:
+ return rewriteValueWasm_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueWasm_OpNeq32(v)
+ case OpNeq32F:
+ v.Op = OpWasmF32Ne
+ return true
+ case OpNeq64:
+ v.Op = OpWasmI64Ne
+ return true
+ case OpNeq64F:
+ v.Op = OpWasmF64Ne
+ return true
+ case OpNeq8:
+ return rewriteValueWasm_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpWasmI64Ne
+ return true
+ case OpNeqPtr:
+ v.Op = OpWasmI64Ne
+ return true
+ case OpNilCheck:
+ v.Op = OpWasmLoweredNilCheck
+ return true
+ case OpNot:
+ v.Op = OpWasmI64Eqz
+ return true
+ case OpOffPtr:
+ v.Op = OpWasmI64AddConst
+ return true
+ case OpOr16:
+ v.Op = OpWasmI64Or
+ return true
+ case OpOr32:
+ v.Op = OpWasmI64Or
+ return true
+ case OpOr64:
+ v.Op = OpWasmI64Or
+ return true
+ case OpOr8:
+ v.Op = OpWasmI64Or
+ return true
+ case OpOrB:
+ v.Op = OpWasmI64Or
+ return true
+ case OpPopCount16:
+ return rewriteValueWasm_OpPopCount16(v)
+ case OpPopCount32:
+ return rewriteValueWasm_OpPopCount32(v)
+ case OpPopCount64:
+ v.Op = OpWasmI64Popcnt
+ return true
+ case OpPopCount8:
+ return rewriteValueWasm_OpPopCount8(v)
+ case OpRotateLeft16:
+ return rewriteValueWasm_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ v.Op = OpWasmI32Rotl
+ return true
+ case OpRotateLeft64:
+ v.Op = OpWasmI64Rotl
+ return true
+ case OpRotateLeft8:
+ return rewriteValueWasm_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRoundToEven:
+ v.Op = OpWasmF64Nearest
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueWasm_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueWasm_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueWasm_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueWasm_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueWasm_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueWasm_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueWasm_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueWasm_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueWasm_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueWasm_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueWasm_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueWasm_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueWasm_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueWasm_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueWasm_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueWasm_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueWasm_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueWasm_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueWasm_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueWasm_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueWasm_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueWasm_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueWasm_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueWasm_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueWasm_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueWasm_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueWasm_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueWasm_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueWasm_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueWasm_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueWasm_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueWasm_OpRsh8x8(v)
+ case OpSignExt16to32:
+ return rewriteValueWasm_OpSignExt16to32(v)
+ case OpSignExt16to64:
+ return rewriteValueWasm_OpSignExt16to64(v)
+ case OpSignExt32to64:
+ return rewriteValueWasm_OpSignExt32to64(v)
+ case OpSignExt8to16:
+ return rewriteValueWasm_OpSignExt8to16(v)
+ case OpSignExt8to32:
+ return rewriteValueWasm_OpSignExt8to32(v)
+ case OpSignExt8to64:
+ return rewriteValueWasm_OpSignExt8to64(v)
+ case OpSlicemask:
+ return rewriteValueWasm_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpWasmF64Sqrt
+ return true
+ case OpSqrt32:
+ v.Op = OpWasmF32Sqrt
+ return true
+ case OpStaticCall:
+ v.Op = OpWasmLoweredStaticCall
+ return true
+ case OpStore:
+ return rewriteValueWasm_OpStore(v)
+ case OpSub16:
+ v.Op = OpWasmI64Sub
+ return true
+ case OpSub32:
+ v.Op = OpWasmI64Sub
+ return true
+ case OpSub32F:
+ v.Op = OpWasmF32Sub
+ return true
+ case OpSub64:
+ v.Op = OpWasmI64Sub
+ return true
+ case OpSub64F:
+ v.Op = OpWasmF64Sub
+ return true
+ case OpSub8:
+ v.Op = OpWasmI64Sub
+ return true
+ case OpSubPtr:
+ v.Op = OpWasmI64Sub
+ return true
+ case OpTailCall:
+ v.Op = OpWasmLoweredTailCall
+ return true
+ case OpTrunc:
+ v.Op = OpWasmF64Trunc
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpWasmLoweredWB
+ return true
+ case OpWasmF64Add:
+ return rewriteValueWasm_OpWasmF64Add(v)
+ case OpWasmF64Mul:
+ return rewriteValueWasm_OpWasmF64Mul(v)
+ case OpWasmI64Add:
+ return rewriteValueWasm_OpWasmI64Add(v)
+ case OpWasmI64AddConst:
+ return rewriteValueWasm_OpWasmI64AddConst(v)
+ case OpWasmI64And:
+ return rewriteValueWasm_OpWasmI64And(v)
+ case OpWasmI64Eq:
+ return rewriteValueWasm_OpWasmI64Eq(v)
+ case OpWasmI64Eqz:
+ return rewriteValueWasm_OpWasmI64Eqz(v)
+ case OpWasmI64LeU:
+ return rewriteValueWasm_OpWasmI64LeU(v)
+ case OpWasmI64Load:
+ return rewriteValueWasm_OpWasmI64Load(v)
+ case OpWasmI64Load16S:
+ return rewriteValueWasm_OpWasmI64Load16S(v)
+ case OpWasmI64Load16U:
+ return rewriteValueWasm_OpWasmI64Load16U(v)
+ case OpWasmI64Load32S:
+ return rewriteValueWasm_OpWasmI64Load32S(v)
+ case OpWasmI64Load32U:
+ return rewriteValueWasm_OpWasmI64Load32U(v)
+ case OpWasmI64Load8S:
+ return rewriteValueWasm_OpWasmI64Load8S(v)
+ case OpWasmI64Load8U:
+ return rewriteValueWasm_OpWasmI64Load8U(v)
+ case OpWasmI64LtU:
+ return rewriteValueWasm_OpWasmI64LtU(v)
+ case OpWasmI64Mul:
+ return rewriteValueWasm_OpWasmI64Mul(v)
+ case OpWasmI64Ne:
+ return rewriteValueWasm_OpWasmI64Ne(v)
+ case OpWasmI64Or:
+ return rewriteValueWasm_OpWasmI64Or(v)
+ case OpWasmI64Shl:
+ return rewriteValueWasm_OpWasmI64Shl(v)
+ case OpWasmI64ShrS:
+ return rewriteValueWasm_OpWasmI64ShrS(v)
+ case OpWasmI64ShrU:
+ return rewriteValueWasm_OpWasmI64ShrU(v)
+ case OpWasmI64Store:
+ return rewriteValueWasm_OpWasmI64Store(v)
+ case OpWasmI64Store16:
+ return rewriteValueWasm_OpWasmI64Store16(v)
+ case OpWasmI64Store32:
+ return rewriteValueWasm_OpWasmI64Store32(v)
+ case OpWasmI64Store8:
+ return rewriteValueWasm_OpWasmI64Store8(v)
+ case OpWasmI64Xor:
+ return rewriteValueWasm_OpWasmI64Xor(v)
+ case OpXor16:
+ v.Op = OpWasmI64Xor
+ return true
+ case OpXor32:
+ v.Op = OpWasmI64Xor
+ return true
+ case OpXor64:
+ v.Op = OpWasmI64Xor
+ return true
+ case OpXor8:
+ v.Op = OpWasmI64Xor
+ return true
+ case OpZero:
+ return rewriteValueWasm_OpZero(v)
+ case OpZeroExt16to32:
+ return rewriteValueWasm_OpZeroExt16to32(v)
+ case OpZeroExt16to64:
+ return rewriteValueWasm_OpZeroExt16to64(v)
+ case OpZeroExt32to64:
+ return rewriteValueWasm_OpZeroExt32to64(v)
+ case OpZeroExt8to16:
+ return rewriteValueWasm_OpZeroExt8to16(v)
+ case OpZeroExt8to32:
+ return rewriteValueWasm_OpZeroExt8to32(v)
+ case OpZeroExt8to64:
+ return rewriteValueWasm_OpZeroExt8to64(v)
+ }
+ return false
+}
+func rewriteValueWasm_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (LoweredAddr {sym} [0] base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpWasmLoweredAddr)
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueWasm_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 x)
+ // result: (I64Sub (I64Const [64]) (I64Clz x))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Sub)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Clz, typ.Int64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpCom16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com16 x)
+ // result: (I64Xor x (I64Const [-1]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Xor)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(-1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCom32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com32 x)
+ // result: (I64Xor x (I64Const [-1]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Xor)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(-1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCom64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com64 x)
+ // result: (I64Xor x (I64Const [-1]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Xor)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(-1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCom8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com8 x)
+ // result: (I64Xor x (I64Const [-1]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Xor)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(-1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpConst16(v *Value) bool {
+ // match: (Const16 [c])
+ // result: (I64Const [int64(c)])
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+}
+func rewriteValueWasm_OpConst32(v *Value) bool {
+ // match: (Const32 [c])
+ // result: (I64Const [int64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+}
+func rewriteValueWasm_OpConst8(v *Value) bool {
+ // match: (Const8 [c])
+ // result: (I64Const [int64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+}
+func rewriteValueWasm_OpConstBool(v *Value) bool {
+ // match: (ConstBool [c])
+ // result: (I64Const [b2i(c)])
+ for {
+ c := auxIntToBool(v.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(b2i(c))
+ return true
+ }
+}
+func rewriteValueWasm_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (I64Const [0])
+ for {
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 x)
+ // result: (I64Ctz (I64Or x (I64Const [0x10000])))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Ctz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0x10000)
+ v0.AddArg2(x, v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz32 x)
+ // result: (I64Ctz (I64Or x (I64Const [0x100000000])))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Ctz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0x100000000)
+ v0.AddArg2(x, v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz8 x)
+ // result: (I64Ctz (I64Or x (I64Const [0x100])))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Ctz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0x100)
+ v0.AddArg2(x, v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCvt32Uto32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32Uto32F x)
+ // result: (F32ConvertI64U (ZeroExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmF32ConvertI64U)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCvt32Uto64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32Uto64F x)
+ // result: (F64ConvertI64U (ZeroExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmF64ConvertI64U)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCvt32to32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32to32F x)
+ // result: (F32ConvertI64S (SignExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmF32ConvertI64S)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCvt32to64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32to64F x)
+ // result: (F64ConvertI64S (SignExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmF64ConvertI64S)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 [false] x y)
+ // result: (I64DivS (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivS)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (I64DivU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 [false] x y)
+ // result: (I64DivS (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivS)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (I64DivU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 [false] x y)
+ // result: (I64DivS x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivS)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (I64DivS (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivS)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (I64DivU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (I64Eq (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Eq)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x y)
+ // result: (I64Eq (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Eq)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (I64Eq (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Eq)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsNonNil p)
+ // result: (I64Eqz (I64Eqz p))
+ for {
+ p := v_0
+ v.reset(OpWasmI64Eqz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (I64LeS (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeS)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (I64LeU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (I64LeS (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeS)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (I64LeU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (I64LeS (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeS)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (I64LeU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (I64LtS (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtS)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (I64LtU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32 x y)
+ // result: (I64LtS (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtS)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U x y)
+ // result: (I64LtU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (I64LtS (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtS)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (I64LtU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (F32Load ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpWasmF32Load)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (F64Load ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpWasmF64Load)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 8
+ // result: (I64Load ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 8) {
+ break
+ }
+ v.reset(OpWasmI64Load)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 4 && !t.IsSigned()
+ // result: (I64Load32U ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 4 && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load32U)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 4 && t.IsSigned()
+ // result: (I64Load32S ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 4 && t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load32S)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 2 && !t.IsSigned()
+ // result: (I64Load16U ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 2 && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load16U)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 2 && t.IsSigned()
+ // result: (I64Load16S ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 2 && t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load16S)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 1 && !t.IsSigned()
+ // result: (I64Load8U ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 1 && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load8U)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 1 && t.IsSigned()
+ // result: (I64Load8S ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 1 && t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load8S)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpLocalAddr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (LocalAddr <t> {sym} base mem)
+ // cond: t.Elem().HasPointers()
+ // result: (LoweredAddr {sym} (SPanchored base mem))
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ mem := v_1
+ if !(t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpWasmLoweredAddr)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
+ v0.AddArg2(base, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LocalAddr <t> {sym} base _)
+ // cond: !t.Elem().HasPointers()
+ // result: (LoweredAddr {sym} base)
+ for {
+ t := v.Type
+ sym := auxToSym(v.Aux)
+ base := v_0
+ if !(!t.Elem().HasPointers()) {
+ break
+ }
+ v.reset(OpWasmLoweredAddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (I64Shl x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpWasmI64Shl)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x64 x (I64Const [c]))
+ // cond: uint64(c) < 64
+ // result: (I64Shl x (I64Const [c]))
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpWasmI64Shl)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x64 x (I64Const [c]))
+ // cond: uint64(c) >= 64
+ // result: (I64Const [0])
+ for {
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh64x64 x y)
+ // result: (Select (I64Shl x y) (I64Const [0]) (I64LtU y (I64Const [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmSelect)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 [false] x y)
+ // result: (I64RemS (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemS)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (I64RemU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 [false] x y)
+ // result: (I64RemS (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemS)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (I64RemU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod64 [false] x y)
+ // result: (I64RemS x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemS)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (I64RemS (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemS)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (I64RemU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (I64Store8 dst (I64Load8U src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store8)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (I64Store16 dst (I64Load16U src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store16)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (I64Store32 dst (I64Load32U src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store32)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (I64Store dst (I64Load src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [16] dst src mem)
+ // result: (I64Store [8] dst (I64Load [8] src mem) (I64Store dst (I64Load src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (I64Store8 [2] dst (I64Load8U [2] src mem) (I64Store16 dst (I64Load16U src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store8)
+ v.AuxInt = int64ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store16, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (I64Store8 [4] dst (I64Load8U [4] src mem) (I64Store32 dst (I64Load32U src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store8)
+ v.AuxInt = int64ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8)
+ v0.AuxInt = int64ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (I64Store16 [4] dst (I64Load16U [4] src mem) (I64Store32 dst (I64Load32U src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store16)
+ v.AuxInt = int64ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16)
+ v0.AuxInt = int64ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (I64Store32 [3] dst (I64Load32U [3] src mem) (I64Store32 dst (I64Load32U src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store32)
+ v.AuxInt = int64ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && s < 16
+ // result: (I64Store [s-8] dst (I64Load [s-8] src mem) (I64Store dst (I64Load src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && s < 16) {
+ break
+ }
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(s - 8)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(s - 8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: logLargeCopy(v, s)
+ // result: (LoweredMove [s] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpWasmLoweredMove)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpNeg16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg16 x)
+ // result: (I64Sub (I64Const [0]) x)
+ for {
+ x := v_0
+ v.reset(OpWasmI64Sub)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeg32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg32 x)
+ // result: (I64Sub (I64Const [0]) x)
+ for {
+ x := v_0
+ v.reset(OpWasmI64Sub)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeg64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg64 x)
+ // result: (I64Sub (I64Const [0]) x)
+ for {
+ x := v_0
+ v.reset(OpWasmI64Sub)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeg8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg8 x)
+ // result: (I64Sub (I64Const [0]) x)
+ for {
+ x := v_0
+ v.reset(OpWasmI64Sub)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (I64Ne (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Ne)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x y)
+ // result: (I64Ne (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Ne)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (I64Ne (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Ne)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpPopCount16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount16 x)
+ // result: (I64Popcnt (ZeroExt16to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Popcnt)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpPopCount32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount32 x)
+ // result: (I64Popcnt (ZeroExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Popcnt)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpPopCount8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount8 x)
+ // result: (I64Popcnt (ZeroExt8to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Popcnt)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (I64Const [c]))
+ // result: (Or16 (Lsh16x64 <t> x (I64Const [c&15])) (Rsh16Ux64 <t> x (I64Const [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (I64Const [c]))
+ // result: (Or8 (Lsh8x64 <t> x (I64Const [c&7])) (Rsh8Ux64 <t> x (I64Const [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt16to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt16to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt16to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt16to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt16to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt16to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt16to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt32to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt32to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt32to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt32to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt32to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt32to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt32to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 [c] x y)
+ // result: (Rsh64Ux64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 [c] x y)
+ // result: (Rsh64Ux64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (I64ShrU x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpWasmI64ShrU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux64 x (I64Const [c]))
+ // cond: uint64(c) < 64
+ // result: (I64ShrU x (I64Const [c]))
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpWasmI64ShrU)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 x (I64Const [c]))
+ // cond: uint64(c) >= 64
+ // result: (I64Const [0])
+ for {
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64Ux64 x y)
+ // result: (Select (I64ShrU x y) (I64Const [0]) (I64LtU y (I64Const [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmSelect)
+ v0 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 [c] x y)
+ // result: (Rsh64Ux64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 [c] x y)
+ // result: (Rsh64x64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 [c] x y)
+ // result: (Rsh64x64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (I64ShrS x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpWasmI64ShrS)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x64 x (I64Const [c]))
+ // cond: uint64(c) < 64
+ // result: (I64ShrS x (I64Const [c]))
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x (I64Const [c]))
+ // cond: uint64(c) >= 64
+ // result: (I64ShrS x (I64Const [63]))
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(63)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // result: (I64ShrS x (Select <typ.Int64> y (I64Const [63]) (I64LtU y (I64Const [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmSelect, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 [c] x y)
+ // result: (Rsh64x64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt8to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt8to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt8to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt8to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt8to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt8to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt8to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt16to32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt16to32 x:(I64Load16S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load16S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt16to32 x)
+ // cond: buildcfg.GOWASM.SignExt
+ // result: (I64Extend16S x)
+ for {
+ x := v_0
+ if !(buildcfg.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend16S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt16to32 x)
+ // result: (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(48)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt16to64 x:(I64Load16S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load16S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt16to64 x)
+ // cond: buildcfg.GOWASM.SignExt
+ // result: (I64Extend16S x)
+ for {
+ x := v_0
+ if !(buildcfg.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend16S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt16to64 x)
+ // result: (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(48)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt32to64 x:(I64Load32S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load32S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt32to64 x)
+ // cond: buildcfg.GOWASM.SignExt
+ // result: (I64Extend32S x)
+ for {
+ x := v_0
+ if !(buildcfg.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend32S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt32to64 x)
+ // result: (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(32)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt8to16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt8to16 x:(I64Load8S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt8to16 x)
+ // cond: buildcfg.GOWASM.SignExt
+ // result: (I64Extend8S x)
+ for {
+ x := v_0
+ if !(buildcfg.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend8S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt8to16 x)
+ // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(56)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt8to32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt8to32 x:(I64Load8S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt8to32 x)
+ // cond: buildcfg.GOWASM.SignExt
+ // result: (I64Extend8S x)
+ for {
+ x := v_0
+ if !(buildcfg.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend8S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt8to32 x)
+ // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(56)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt8to64 x:(I64Load8S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt8to64 x)
+ // cond: buildcfg.GOWASM.SignExt
+ // result: (I64Extend8S x)
+ for {
+ x := v_0
+ if !(buildcfg.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend8S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt8to64 x)
+ // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(56)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Slicemask x)
+ // result: (I64ShrS (I64Sub (I64Const [0]) x) (I64Const [63]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Sub, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v0.AddArg2(v1, x)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(63)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueWasm_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: is64BitFloat(t)
+ // result: (F64Store ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpWasmF64Store)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: is32BitFloat(t)
+ // result: (F32Store ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpWasmF32Store)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8
+ // result: (I64Store ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8) {
+ break
+ }
+ v.reset(OpWasmI64Store)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4
+ // result: (I64Store32 ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4) {
+ break
+ }
+ v.reset(OpWasmI64Store32)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (I64Store16 ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpWasmI64Store16)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (I64Store8 ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpWasmI64Store8)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmF64Add(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (F64Add (F64Const [x]) (F64Const [y]))
+ // result: (F64Const [x + y])
+ for {
+ if v_0.Op != OpWasmF64Const {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpWasmF64Const {
+ break
+ }
+ y := auxIntToFloat64(v_1.AuxInt)
+ v.reset(OpWasmF64Const)
+ v.AuxInt = float64ToAuxInt(x + y)
+ return true
+ }
+ // match: (F64Add (F64Const [x]) y)
+ // cond: y.Op != OpWasmF64Const
+ // result: (F64Add y (F64Const [x]))
+ for {
+ if v_0.Op != OpWasmF64Const {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmF64Const) {
+ break
+ }
+ v.reset(OpWasmF64Add)
+ v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64)
+ v0.AuxInt = float64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmF64Mul(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (F64Mul (F64Const [x]) (F64Const [y]))
+ // cond: !math.IsNaN(x * y)
+ // result: (F64Const [x * y])
+ for {
+ if v_0.Op != OpWasmF64Const {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpWasmF64Const {
+ break
+ }
+ y := auxIntToFloat64(v_1.AuxInt)
+ if !(!math.IsNaN(x * y)) {
+ break
+ }
+ v.reset(OpWasmF64Const)
+ v.AuxInt = float64ToAuxInt(x * y)
+ return true
+ }
+ // match: (F64Mul (F64Const [x]) y)
+ // cond: y.Op != OpWasmF64Const
+ // result: (F64Mul y (F64Const [x]))
+ for {
+ if v_0.Op != OpWasmF64Const {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmF64Const) {
+ break
+ }
+ v.reset(OpWasmF64Mul)
+ v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64)
+ v0.AuxInt = float64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Add(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Add (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x + y])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x + y)
+ return true
+ }
+ // match: (I64Add (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Add y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Add)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ // match: (I64Add x (I64Const <t> [y]))
+ // cond: !t.IsPtr()
+ // result: (I64AddConst [y] x)
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ t := v_1.Type
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(!t.IsPtr()) {
+ break
+ }
+ v.reset(OpWasmI64AddConst)
+ v.AuxInt = int64ToAuxInt(y)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64AddConst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (I64AddConst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (I64AddConst [off] (LoweredAddr {sym} [off2] base))
+ // cond: isU32Bit(off+int64(off2))
+ // result: (LoweredAddr {sym} [int32(off)+off2] base)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ if !(isU32Bit(off + int64(off2))) {
+ break
+ }
+ v.reset(OpWasmLoweredAddr)
+ v.AuxInt = int32ToAuxInt(int32(off) + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+ // match: (I64AddConst [off] x:(SP))
+ // cond: isU32Bit(off)
+ // result: (LoweredAddr [int32(off)] x)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if x.Op != OpSP || !(isU32Bit(off)) {
+ break
+ }
+ v.reset(OpWasmLoweredAddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64And(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64And (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x & y])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x & y)
+ return true
+ }
+ // match: (I64And (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64And y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Eq(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Eq (I64Const [x]) (I64Const [y]))
+ // cond: x == y
+ // result: (I64Const [1])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (I64Eq (I64Const [x]) (I64Const [y]))
+ // cond: x != y
+ // result: (I64Const [0])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x != y) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (I64Eq (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Eq y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Eq)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ // match: (I64Eq x (I64Const [0]))
+ // result: (I64Eqz x)
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpWasmI64Eqz)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Eqz(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (I64Eqz (I64Eqz (I64Eqz x)))
+ // result: (I64Eqz x)
+ for {
+ if v_0.Op != OpWasmI64Eqz {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpWasmI64Eqz {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpWasmI64Eqz)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64LeU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64LeU x (I64Const [0]))
+ // result: (I64Eqz x)
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpWasmI64Eqz)
+ v.AddArg(x)
+ return true
+ }
+ // match: (I64LeU (I64Const [1]) x)
+ // result: (I64Eqz (I64Eqz x))
+ for {
+ if v_0.Op != OpWasmI64Const || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpWasmI64Eqz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (I64Load [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (I64Load [off] (LoweredAddr {sym} [off2] (SB)) _)
+ // cond: symIsRO(sym) && isU32Bit(off+int64(off2))
+ // result: (I64Const [int64(read64(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(read64(sym, off+int64(off2), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load16S(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Load16S [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load16S [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load16S)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (I64Load16U [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load16U [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load16U)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (I64Load16U [off] (LoweredAddr {sym} [off2] (SB)) _)
+ // cond: symIsRO(sym) && isU32Bit(off+int64(off2))
+ // result: (I64Const [int64(read16(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(read16(sym, off+int64(off2), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load32S(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Load32S [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load32S [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load32S)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (I64Load32U [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load32U [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load32U)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (I64Load32U [off] (LoweredAddr {sym} [off2] (SB)) _)
+ // cond: symIsRO(sym) && isU32Bit(off+int64(off2))
+ // result: (I64Const [int64(read32(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(read32(sym, off+int64(off2), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load8S(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Load8S [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load8S [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load8S)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Load8U [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load8U [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load8U)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (I64Load8U [off] (LoweredAddr {sym} [off2] (SB)) _)
+ // cond: symIsRO(sym) && isU32Bit(off+int64(off2))
+ // result: (I64Const [int64(read8(sym, off+int64(off2)))])
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(read8(sym, off+int64(off2))))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64LtU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64LtU (I64Const [0]) x)
+ // result: (I64Eqz (I64Eqz x))
+ for {
+ if v_0.Op != OpWasmI64Const || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpWasmI64Eqz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (I64LtU x (I64Const [1]))
+ // result: (I64Eqz x)
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpWasmI64Eqz)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Mul(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Mul (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x * y])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x * y)
+ return true
+ }
+ // match: (I64Mul (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Mul y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Mul)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Ne(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Ne (I64Const [x]) (I64Const [y]))
+ // cond: x == y
+ // result: (I64Const [0])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (I64Ne (I64Const [x]) (I64Const [y]))
+ // cond: x != y
+ // result: (I64Const [1])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x != y) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (I64Ne (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Ne y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Ne)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ // match: (I64Ne x (I64Const [0]))
+ // result: (I64Eqz (I64Eqz x))
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpWasmI64Eqz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Or(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Or (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x | y])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x | y)
+ return true
+ }
+ // match: (I64Or (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Or y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Or)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Shl(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Shl (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x << uint64(y)])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x << uint64(y))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64ShrS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64ShrS (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x >> uint64(y)])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x >> uint64(y))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64ShrU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64ShrU (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [int64(uint64(x) >> uint64(y))])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(uint64(x) >> uint64(y)))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Store(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Store [off] (I64AddConst [off2] ptr) val mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Store [off+off2] ptr val mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Store16(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Store16 [off] (I64AddConst [off2] ptr) val mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Store16 [off+off2] ptr val mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Store16)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Store32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Store32 [off] (I64AddConst [off2] ptr) val mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Store32 [off+off2] ptr val mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Store32)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Store8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Store8 [off] (I64AddConst [off2] ptr) val mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Store8 [off+off2] ptr val mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Store8)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Xor(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Xor (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x ^ y])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x ^ y)
+ return true
+ }
+ // match: (I64Xor (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Xor y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Xor)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] destptr mem)
+ // result: (I64Store8 destptr (I64Const [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store8)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] destptr mem)
+ // result: (I64Store16 destptr (I64Const [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store16)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] destptr mem)
+ // result: (I64Store32 destptr (I64Const [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store32)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [8] destptr mem)
+ // result: (I64Store destptr (I64Const [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [3] destptr mem)
+ // result: (I64Store8 [2] destptr (I64Const [0]) (I64Store16 destptr (I64Const [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store8)
+ v.AuxInt = int64ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store16, types.TypeMem)
+ v1.AddArg3(destptr, v0, mem)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [5] destptr mem)
+ // result: (I64Store8 [4] destptr (I64Const [0]) (I64Store32 destptr (I64Const [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store8)
+ v.AuxInt = int64ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v1.AddArg3(destptr, v0, mem)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [6] destptr mem)
+ // result: (I64Store16 [4] destptr (I64Const [0]) (I64Store32 destptr (I64Const [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store16)
+ v.AuxInt = int64ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v1.AddArg3(destptr, v0, mem)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [7] destptr mem)
+ // result: (I64Store32 [3] destptr (I64Const [0]) (I64Store32 destptr (I64Const [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store32)
+ v.AuxInt = int64ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v1.AddArg3(destptr, v0, mem)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%8 != 0 && s > 8 && s < 32
+ // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (I64Store destptr (I64Const [0]) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%8 != 0 && s > 8 && s < 32) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(s - s%8)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v0.AuxInt = int64ToAuxInt(s % 8)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v1.AddArg3(destptr, v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [16] destptr mem)
+ // result: (I64Store [8] destptr (I64Const [0]) (I64Store destptr (I64Const [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v1.AddArg3(destptr, v0, mem)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [24] destptr mem)
+ // result: (I64Store [16] destptr (I64Const [0]) (I64Store [8] destptr (I64Const [0]) (I64Store destptr (I64Const [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2.AddArg3(destptr, v0, mem)
+ v1.AddArg3(destptr, v0, v2)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [32] destptr mem)
+ // result: (I64Store [24] destptr (I64Const [0]) (I64Store [16] destptr (I64Const [0]) (I64Store [8] destptr (I64Const [0]) (I64Store destptr (I64Const [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(24)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(16)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(8)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v3.AddArg3(destptr, v0, mem)
+ v2.AddArg3(destptr, v0, v3)
+ v1.AddArg3(destptr, v0, v2)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // result: (LoweredZero [s] destptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmLoweredZero)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+}
+func rewriteValueWasm_OpZeroExt16to32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt16to32 x:(I64Load16U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load16U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt16to32 x)
+ // result: (I64And x (I64Const [0xffff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xffff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpZeroExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt16to64 x:(I64Load16U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load16U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt16to64 x)
+ // result: (I64And x (I64Const [0xffff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xffff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpZeroExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt32to64 x:(I64Load32U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load32U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt32to64 x)
+ // result: (I64And x (I64Const [0xffffffff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xffffffff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpZeroExt8to16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt8to16 x:(I64Load8U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt8to16 x)
+ // result: (I64And x (I64Const [0xff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpZeroExt8to32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt8to32 x:(I64Load8U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt8to32 x)
+ // result: (I64And x (I64Const [0xff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpZeroExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt8to64 x:(I64Load8U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt8to64 x)
+ // result: (I64And x (I64Const [0xff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteBlockWasm(b *Block) bool {
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewrite_test.go b/src/cmd/compile/internal/ssa/rewrite_test.go
new file mode 100644
index 0000000..357fe11
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewrite_test.go
@@ -0,0 +1,220 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "testing"
+
+// We generate memmove for copy(x[1:], x[:]), however we may change it to OpMove,
+// because size is known. Check that OpMove is alias-safe, or we did call memmove.
+func TestMove(t *testing.T) {
+ x := [...]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40}
+ copy(x[1:], x[:])
+ for i := 1; i < len(x); i++ {
+ if int(x[i]) != i {
+ t.Errorf("Memmove got converted to OpMove in alias-unsafe way. Got %d instead of %d in position %d", int(x[i]), i, i+1)
+ }
+ }
+}
+
+func TestMoveSmall(t *testing.T) {
+ x := [...]byte{1, 2, 3, 4, 5, 6, 7}
+ copy(x[1:], x[:])
+ for i := 1; i < len(x); i++ {
+ if int(x[i]) != i {
+ t.Errorf("Memmove got converted to OpMove in alias-unsafe way. Got %d instead of %d in position %d", int(x[i]), i, i+1)
+ }
+ }
+}
+
+func TestSubFlags(t *testing.T) {
+ if !subFlags32(0, 1).lt() {
+ t.Errorf("subFlags32(0,1).lt() returned false")
+ }
+ if !subFlags32(0, 1).ult() {
+ t.Errorf("subFlags32(0,1).ult() returned false")
+ }
+}
+
+func TestIsPPC64WordRotateMask(t *testing.T) {
+ tests := []struct {
+ input int64
+ expected bool
+ }{
+ {0x00000001, true},
+ {0x80000001, true},
+ {0x80010001, false},
+ {0xFFFFFFFA, false},
+ {0xF0F0F0F0, false},
+ {0xFFFFFFFD, true},
+ {0x80000000, true},
+ {0x00000000, false},
+ {0xFFFFFFFF, true},
+ {0x0000FFFF, true},
+ {0xFF0000FF, true},
+ {0x00FFFF00, true},
+ }
+
+ for _, v := range tests {
+ if v.expected != isPPC64WordRotateMask(v.input) {
+ t.Errorf("isPPC64WordRotateMask(0x%x) failed", v.input)
+ }
+ }
+}
+
+func TestEncodeDecodePPC64WordRotateMask(t *testing.T) {
+ tests := []struct {
+ rotate int64
+ mask uint64
+ nbits,
+ mb,
+ me,
+ encoded int64
+ }{
+ {1, 0x00000001, 32, 31, 31, 0x20011f20},
+ {2, 0x80000001, 32, 31, 0, 0x20021f01},
+ {3, 0xFFFFFFFD, 32, 31, 29, 0x20031f1e},
+ {4, 0x80000000, 32, 0, 0, 0x20040001},
+ {5, 0xFFFFFFFF, 32, 0, 31, 0x20050020},
+ {6, 0x0000FFFF, 32, 16, 31, 0x20061020},
+ {7, 0xFF0000FF, 32, 24, 7, 0x20071808},
+ {8, 0x00FFFF00, 32, 8, 23, 0x20080818},
+
+ {9, 0x0000000000FFFF00, 64, 40, 55, 0x40092838},
+ {10, 0xFFFF000000000000, 64, 0, 15, 0x400A0010},
+ {10, 0xFFFF000000000001, 64, 63, 15, 0x400A3f10},
+ }
+
+ for i, v := range tests {
+ result := encodePPC64RotateMask(v.rotate, int64(v.mask), v.nbits)
+ if result != v.encoded {
+ t.Errorf("encodePPC64RotateMask(%d,0x%x,%d) = 0x%x, expected 0x%x", v.rotate, v.mask, v.nbits, result, v.encoded)
+ }
+ rotate, mb, me, mask := DecodePPC64RotateMask(result)
+ if rotate != v.rotate || mb != v.mb || me != v.me || mask != v.mask {
+ t.Errorf("DecodePPC64Failure(Test %d) got (%d, %d, %d, %x) expected (%d, %d, %d, %x)", i, rotate, mb, me, mask, v.rotate, v.mb, v.me, v.mask)
+ }
+ }
+}
+
+func TestMergePPC64ClrlsldiSrw(t *testing.T) {
+ tests := []struct {
+ clrlsldi int32
+ srw int64
+ valid bool
+ rotate int64
+ mask uint64
+ }{
+ // ((x>>4)&0xFF)<<4
+ {newPPC64ShiftAuxInt(4, 56, 63, 64), 4, true, 0, 0xFF0},
+ // ((x>>4)&0xFFFF)<<4
+ {newPPC64ShiftAuxInt(4, 48, 63, 64), 4, true, 0, 0xFFFF0},
+ // ((x>>4)&0xFFFF)<<17
+ {newPPC64ShiftAuxInt(17, 48, 63, 64), 4, false, 0, 0},
+ // ((x>>4)&0xFFFF)<<16
+ {newPPC64ShiftAuxInt(16, 48, 63, 64), 4, true, 12, 0xFFFF0000},
+ // ((x>>32)&0xFFFF)<<17
+ {newPPC64ShiftAuxInt(17, 48, 63, 64), 32, false, 0, 0},
+ }
+ for i, v := range tests {
+ result := mergePPC64ClrlsldiSrw(int64(v.clrlsldi), v.srw)
+ if v.valid && result == 0 {
+ t.Errorf("mergePPC64ClrlsldiSrw(Test %d) did not merge", i)
+ } else if !v.valid && result != 0 {
+ t.Errorf("mergePPC64ClrlsldiSrw(Test %d) should return 0", i)
+ } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
+ t.Errorf("mergePPC64ClrlsldiSrw(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
+ }
+ }
+}
+
+func TestMergePPC64ClrlsldiRlwinm(t *testing.T) {
+ tests := []struct {
+ clrlsldi int32
+ rlwinm int64
+ valid bool
+ rotate int64
+ mask uint64
+ }{
+ // ((x<<4)&0xFF00)<<4
+ {newPPC64ShiftAuxInt(4, 56, 63, 64), encodePPC64RotateMask(4, 0xFF00, 32), false, 0, 0},
+ // ((x>>4)&0xFF)<<4
+ {newPPC64ShiftAuxInt(4, 56, 63, 64), encodePPC64RotateMask(28, 0x0FFFFFFF, 32), true, 0, 0xFF0},
+ // ((x>>4)&0xFFFF)<<4
+ {newPPC64ShiftAuxInt(4, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), true, 0, 0xFFFF0},
+ // ((x>>4)&0xFFFF)<<17
+ {newPPC64ShiftAuxInt(17, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), false, 0, 0},
+ // ((x>>4)&0xFFFF)<<16
+ {newPPC64ShiftAuxInt(16, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), true, 12, 0xFFFF0000},
+ // ((x>>4)&0xF000FFFF)<<16
+ {newPPC64ShiftAuxInt(16, 48, 63, 64), encodePPC64RotateMask(28, 0xF000FFFF, 32), true, 12, 0xFFFF0000},
+ }
+ for i, v := range tests {
+ result := mergePPC64ClrlsldiRlwinm(v.clrlsldi, v.rlwinm)
+ if v.valid && result == 0 {
+ t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) did not merge", i)
+ } else if !v.valid && result != 0 {
+ t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) should return 0", i)
+ } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
+ t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
+ }
+ }
+}
+
+func TestMergePPC64SldiSrw(t *testing.T) {
+ tests := []struct {
+ sld int64
+ srw int64
+ valid bool
+ rotate int64
+ mask uint64
+ }{
+ {4, 4, true, 0, 0xFFFFFFF0},
+ {4, 8, true, 28, 0x0FFFFFF0},
+ {0, 0, true, 0, 0xFFFFFFFF},
+ {8, 4, false, 0, 0},
+ {0, 32, false, 0, 0},
+ {0, 31, true, 1, 0x1},
+ {31, 31, true, 0, 0x80000000},
+ {32, 32, false, 0, 0},
+ }
+ for i, v := range tests {
+ result := mergePPC64SldiSrw(v.sld, v.srw)
+ if v.valid && result == 0 {
+ t.Errorf("mergePPC64SldiSrw(Test %d) did not merge", i)
+ } else if !v.valid && result != 0 {
+ t.Errorf("mergePPC64SldiSrw(Test %d) should return 0", i)
+ } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
+ t.Errorf("mergePPC64SldiSrw(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
+ }
+ }
+}
+
+func TestMergePPC64AndSrwi(t *testing.T) {
+ tests := []struct {
+ and int64
+ srw int64
+ valid bool
+ rotate int64
+ mask uint64
+ }{
+ {0x000000FF, 8, true, 24, 0xFF},
+ {0xF00000FF, 8, true, 24, 0xFF},
+ {0x0F0000FF, 4, false, 0, 0},
+ {0x00000000, 4, false, 0, 0},
+ {0xF0000000, 4, false, 0, 0},
+ {0xF0000000, 32, false, 0, 0},
+ {0xFFFFFFFF, 0, true, 0, 0xFFFFFFFF},
+ }
+ for i, v := range tests {
+ result := mergePPC64AndSrwi(v.and, v.srw)
+ if v.valid && result == 0 {
+ t.Errorf("mergePPC64AndSrwi(Test %d) did not merge", i)
+ } else if !v.valid && result != 0 {
+ t.Errorf("mergePPC64AndSrwi(Test %d) should return 0", i)
+ } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
+ t.Errorf("mergePPC64AndSrwi(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go
new file mode 100644
index 0000000..3c481ad
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewritedec.go
@@ -0,0 +1,1094 @@
+// Code generated from _gen/dec.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+func rewriteValuedec(v *Value) bool {
+ switch v.Op {
+ case OpArrayMake1:
+ return rewriteValuedec_OpArrayMake1(v)
+ case OpArraySelect:
+ return rewriteValuedec_OpArraySelect(v)
+ case OpComplexImag:
+ return rewriteValuedec_OpComplexImag(v)
+ case OpComplexReal:
+ return rewriteValuedec_OpComplexReal(v)
+ case OpIData:
+ return rewriteValuedec_OpIData(v)
+ case OpIMake:
+ return rewriteValuedec_OpIMake(v)
+ case OpITab:
+ return rewriteValuedec_OpITab(v)
+ case OpLoad:
+ return rewriteValuedec_OpLoad(v)
+ case OpSliceCap:
+ return rewriteValuedec_OpSliceCap(v)
+ case OpSliceLen:
+ return rewriteValuedec_OpSliceLen(v)
+ case OpSlicePtr:
+ return rewriteValuedec_OpSlicePtr(v)
+ case OpSlicePtrUnchecked:
+ return rewriteValuedec_OpSlicePtrUnchecked(v)
+ case OpStore:
+ return rewriteValuedec_OpStore(v)
+ case OpStringLen:
+ return rewriteValuedec_OpStringLen(v)
+ case OpStringPtr:
+ return rewriteValuedec_OpStringPtr(v)
+ case OpStructMake1:
+ return rewriteValuedec_OpStructMake1(v)
+ case OpStructSelect:
+ return rewriteValuedec_OpStructSelect(v)
+ }
+ return false
+}
+func rewriteValuedec_OpArrayMake1(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ArrayMake1 x)
+ // cond: x.Type.IsPtrShaped()
+ // result: x
+ for {
+ x := v_0
+ if !(x.Type.IsPtrShaped()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpArraySelect(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ArraySelect [0] x)
+ // cond: x.Type.IsPtrShaped()
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ if !(x.Type.IsPtrShaped()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ArraySelect (ArrayMake1 x))
+ // result: x
+ for {
+ if v_0.Op != OpArrayMake1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (ArraySelect [0] (IData x))
+ // result: (IData x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpIData)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ArraySelect [i] x:(Load <t> ptr mem))
+ // result: @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.Elem().Size()*i] ptr) mem)
+ for {
+ i := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, v.Type.PtrTo())
+ v1.AuxInt = int64ToAuxInt(t.Elem().Size() * i)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpComplexImag(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ComplexImag (ComplexMake _ imag ))
+ // result: imag
+ for {
+ if v_0.Op != OpComplexMake {
+ break
+ }
+ imag := v_0.Args[1]
+ v.copyOf(imag)
+ return true
+ }
+ // match: (ComplexImag x:(Load <t> ptr mem))
+ // cond: t.IsComplex() && t.Size() == 8
+ // result: @x.Block (Load <typ.Float32> (OffPtr <typ.Float32Ptr> [4] ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsComplex() && t.Size() == 8) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Float32)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr)
+ v1.AuxInt = int64ToAuxInt(4)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (ComplexImag x:(Load <t> ptr mem))
+ // cond: t.IsComplex() && t.Size() == 16
+ // result: @x.Block (Load <typ.Float64> (OffPtr <typ.Float64Ptr> [8] ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsComplex() && t.Size() == 16) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Float64)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr)
+ v1.AuxInt = int64ToAuxInt(8)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpComplexReal(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ComplexReal (ComplexMake real _ ))
+ // result: real
+ for {
+ if v_0.Op != OpComplexMake {
+ break
+ }
+ real := v_0.Args[0]
+ v.copyOf(real)
+ return true
+ }
+ // match: (ComplexReal x:(Load <t> ptr mem))
+ // cond: t.IsComplex() && t.Size() == 8
+ // result: @x.Block (Load <typ.Float32> ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsComplex() && t.Size() == 8) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Float32)
+ v.copyOf(v0)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (ComplexReal x:(Load <t> ptr mem))
+ // cond: t.IsComplex() && t.Size() == 16
+ // result: @x.Block (Load <typ.Float64> ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsComplex() && t.Size() == 16) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Float64)
+ v.copyOf(v0)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpIData(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (IData (IMake _ data))
+ // result: data
+ for {
+ if v_0.Op != OpIMake {
+ break
+ }
+ data := v_0.Args[1]
+ v.copyOf(data)
+ return true
+ }
+ // match: (IData x:(Load <t> ptr mem))
+ // cond: t.IsInterface()
+ // result: @x.Block (Load <typ.BytePtr> (OffPtr <typ.BytePtrPtr> [config.PtrSize] ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsInterface()) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr)
+ v1.AuxInt = int64ToAuxInt(config.PtrSize)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpIMake(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IMake _typ (StructMake1 val))
+ // result: (IMake _typ val)
+ for {
+ _typ := v_0
+ if v_1.Op != OpStructMake1 {
+ break
+ }
+ val := v_1.Args[0]
+ v.reset(OpIMake)
+ v.AddArg2(_typ, val)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpITab(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ITab (IMake itab _))
+ // result: itab
+ for {
+ if v_0.Op != OpIMake {
+ break
+ }
+ itab := v_0.Args[0]
+ v.copyOf(itab)
+ return true
+ }
+ // match: (ITab x:(Load <t> ptr mem))
+ // cond: t.IsInterface()
+ // result: @x.Block (Load <typ.Uintptr> ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsInterface()) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Uintptr)
+ v.copyOf(v0)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Load <t> ptr mem)
+ // cond: t.IsComplex() && t.Size() == 8
+ // result: (ComplexMake (Load <typ.Float32> ptr mem) (Load <typ.Float32> (OffPtr <typ.Float32Ptr> [4] ptr) mem) )
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsComplex() && t.Size() == 8) {
+ break
+ }
+ v.reset(OpComplexMake)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Float32)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Float32)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr)
+ v2.AuxInt = int64ToAuxInt(4)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsComplex() && t.Size() == 16
+ // result: (ComplexMake (Load <typ.Float64> ptr mem) (Load <typ.Float64> (OffPtr <typ.Float64Ptr> [8] ptr) mem) )
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsComplex() && t.Size() == 16) {
+ break
+ }
+ v.reset(OpComplexMake)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Float64)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Float64)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr)
+ v2.AuxInt = int64ToAuxInt(8)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsString()
+ // result: (StringMake (Load <typ.BytePtr> ptr mem) (Load <typ.Int> (OffPtr <typ.IntPtr> [config.PtrSize] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsString()) {
+ break
+ }
+ v.reset(OpStringMake)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v2.AuxInt = int64ToAuxInt(config.PtrSize)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsSlice()
+ // result: (SliceMake (Load <t.Elem().PtrTo()> ptr mem) (Load <typ.Int> (OffPtr <typ.IntPtr> [config.PtrSize] ptr) mem) (Load <typ.Int> (OffPtr <typ.IntPtr> [2*config.PtrSize] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsSlice()) {
+ break
+ }
+ v.reset(OpSliceMake)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.Elem().PtrTo())
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v2.AuxInt = int64ToAuxInt(config.PtrSize)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v3 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v4.AuxInt = int64ToAuxInt(2 * config.PtrSize)
+ v4.AddArg(ptr)
+ v3.AddArg2(v4, mem)
+ v.AddArg3(v0, v1, v3)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsInterface()
+ // result: (IMake (Load <typ.Uintptr> ptr mem) (Load <typ.BytePtr> (OffPtr <typ.BytePtrPtr> [config.PtrSize] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsInterface()) {
+ break
+ }
+ v.reset(OpIMake)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Uintptr)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr)
+ v2.AuxInt = int64ToAuxInt(config.PtrSize)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpSliceCap(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (SliceCap (SliceMake _ _ cap))
+ // result: cap
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ cap := v_0.Args[2]
+ v.copyOf(cap)
+ return true
+ }
+ // match: (SliceCap x:(Load <t> ptr mem))
+ // cond: t.IsSlice()
+ // result: @x.Block (Load <typ.Int> (OffPtr <typ.IntPtr> [2*config.PtrSize] ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsSlice()) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v1.AuxInt = int64ToAuxInt(2 * config.PtrSize)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpSliceLen(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (SliceLen (SliceMake _ len _))
+ // result: len
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ len := v_0.Args[1]
+ v.copyOf(len)
+ return true
+ }
+ // match: (SliceLen x:(Load <t> ptr mem))
+ // cond: t.IsSlice()
+ // result: @x.Block (Load <typ.Int> (OffPtr <typ.IntPtr> [config.PtrSize] ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsSlice()) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v1.AuxInt = int64ToAuxInt(config.PtrSize)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpSlicePtr(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SlicePtr (SliceMake ptr _ _ ))
+ // result: ptr
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ ptr := v_0.Args[0]
+ v.copyOf(ptr)
+ return true
+ }
+ // match: (SlicePtr x:(Load <t> ptr mem))
+ // cond: t.IsSlice()
+ // result: @x.Block (Load <t.Elem().PtrTo()> ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsSlice()) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, t.Elem().PtrTo())
+ v.copyOf(v0)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpSlicePtrUnchecked(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SlicePtrUnchecked (SliceMake ptr _ _ ))
+ // result: ptr
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ ptr := v_0.Args[0]
+ v.copyOf(ptr)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Store {t} _ _ mem)
+ // cond: t.Size() == 0
+ // result: mem
+ for {
+ t := auxToType(v.Aux)
+ mem := v_2
+ if !(t.Size() == 0) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t} dst (ComplexMake real imag) mem)
+ // cond: t.Size() == 8
+ // result: (Store {typ.Float32} (OffPtr <typ.Float32Ptr> [4] dst) imag (Store {typ.Float32} dst real mem))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpComplexMake {
+ break
+ }
+ imag := v_1.Args[1]
+ real := v_1.Args[0]
+ mem := v_2
+ if !(t.Size() == 8) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(typ.Float32)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr)
+ v0.AuxInt = int64ToAuxInt(4)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(typ.Float32)
+ v1.AddArg3(dst, real, mem)
+ v.AddArg3(v0, imag, v1)
+ return true
+ }
+ // match: (Store {t} dst (ComplexMake real imag) mem)
+ // cond: t.Size() == 16
+ // result: (Store {typ.Float64} (OffPtr <typ.Float64Ptr> [8] dst) imag (Store {typ.Float64} dst real mem))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpComplexMake {
+ break
+ }
+ imag := v_1.Args[1]
+ real := v_1.Args[0]
+ mem := v_2
+ if !(t.Size() == 16) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(typ.Float64)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr)
+ v0.AuxInt = int64ToAuxInt(8)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(typ.Float64)
+ v1.AddArg3(dst, real, mem)
+ v.AddArg3(v0, imag, v1)
+ return true
+ }
+ // match: (Store dst (StringMake ptr len) mem)
+ // result: (Store {typ.Int} (OffPtr <typ.IntPtr> [config.PtrSize] dst) len (Store {typ.BytePtr} dst ptr mem))
+ for {
+ dst := v_0
+ if v_1.Op != OpStringMake {
+ break
+ }
+ len := v_1.Args[1]
+ ptr := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(typ.Int)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v0.AuxInt = int64ToAuxInt(config.PtrSize)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(typ.BytePtr)
+ v1.AddArg3(dst, ptr, mem)
+ v.AddArg3(v0, len, v1)
+ return true
+ }
+ // match: (Store {t} dst (SliceMake ptr len cap) mem)
+ // result: (Store {typ.Int} (OffPtr <typ.IntPtr> [2*config.PtrSize] dst) cap (Store {typ.Int} (OffPtr <typ.IntPtr> [config.PtrSize] dst) len (Store {t.Elem().PtrTo()} dst ptr mem)))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpSliceMake {
+ break
+ }
+ cap := v_1.Args[2]
+ ptr := v_1.Args[0]
+ len := v_1.Args[1]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(typ.Int)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v0.AuxInt = int64ToAuxInt(2 * config.PtrSize)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(typ.Int)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v2.AuxInt = int64ToAuxInt(config.PtrSize)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t.Elem().PtrTo())
+ v3.AddArg3(dst, ptr, mem)
+ v1.AddArg3(v2, len, v3)
+ v.AddArg3(v0, cap, v1)
+ return true
+ }
+ // match: (Store dst (IMake itab data) mem)
+ // result: (Store {typ.BytePtr} (OffPtr <typ.BytePtrPtr> [config.PtrSize] dst) data (Store {typ.Uintptr} dst itab mem))
+ for {
+ dst := v_0
+ if v_1.Op != OpIMake {
+ break
+ }
+ data := v_1.Args[1]
+ itab := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(typ.BytePtr)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr)
+ v0.AuxInt = int64ToAuxInt(config.PtrSize)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(typ.Uintptr)
+ v1.AddArg3(dst, itab, mem)
+ v.AddArg3(v0, data, v1)
+ return true
+ }
+ // match: (Store dst (StructMake1 <t> f0) mem)
+ // result: (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake1 {
+ break
+ }
+ t := v_1.Type
+ f0 := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(0))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v0.AuxInt = int64ToAuxInt(0)
+ v0.AddArg(dst)
+ v.AddArg3(v0, f0, mem)
+ return true
+ }
+ // match: (Store dst (StructMake2 <t> f0 f1) mem)
+ // result: (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem))
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake2 {
+ break
+ }
+ t := v_1.Type
+ f1 := v_1.Args[1]
+ f0 := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(1))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v0.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t.FieldType(0))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v2.AuxInt = int64ToAuxInt(0)
+ v2.AddArg(dst)
+ v1.AddArg3(v2, f0, mem)
+ v.AddArg3(v0, f1, v1)
+ return true
+ }
+ // match: (Store dst (StructMake3 <t> f0 f1 f2) mem)
+ // result: (Store {t.FieldType(2)} (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)))
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake3 {
+ break
+ }
+ t := v_1.Type
+ f2 := v_1.Args[2]
+ f0 := v_1.Args[0]
+ f1 := v_1.Args[1]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(2))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+ v0.AuxInt = int64ToAuxInt(t.FieldOff(2))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t.FieldType(1))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v2.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t.FieldType(0))
+ v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v4.AuxInt = int64ToAuxInt(0)
+ v4.AddArg(dst)
+ v3.AddArg3(v4, f0, mem)
+ v1.AddArg3(v2, f1, v3)
+ v.AddArg3(v0, f2, v1)
+ return true
+ }
+ // match: (Store dst (StructMake4 <t> f0 f1 f2 f3) mem)
+ // result: (Store {t.FieldType(3)} (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] dst) f3 (Store {t.FieldType(2)} (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem))))
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake4 {
+ break
+ }
+ t := v_1.Type
+ f3 := v_1.Args[3]
+ f0 := v_1.Args[0]
+ f1 := v_1.Args[1]
+ f2 := v_1.Args[2]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(3))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo())
+ v0.AuxInt = int64ToAuxInt(t.FieldOff(3))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t.FieldType(2))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+ v2.AuxInt = int64ToAuxInt(t.FieldOff(2))
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t.FieldType(1))
+ v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v4.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t.FieldType(0))
+ v6 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v6.AuxInt = int64ToAuxInt(0)
+ v6.AddArg(dst)
+ v5.AddArg3(v6, f0, mem)
+ v3.AddArg3(v4, f1, v5)
+ v1.AddArg3(v2, f2, v3)
+ v.AddArg3(v0, f3, v1)
+ return true
+ }
+ // match: (Store dst (ArrayMake1 e) mem)
+ // result: (Store {e.Type} dst e mem)
+ for {
+ dst := v_0
+ if v_1.Op != OpArrayMake1 {
+ break
+ }
+ e := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(e.Type)
+ v.AddArg3(dst, e, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpStringLen(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (StringLen (StringMake _ len))
+ // result: len
+ for {
+ if v_0.Op != OpStringMake {
+ break
+ }
+ len := v_0.Args[1]
+ v.copyOf(len)
+ return true
+ }
+ // match: (StringLen x:(Load <t> ptr mem))
+ // cond: t.IsString()
+ // result: @x.Block (Load <typ.Int> (OffPtr <typ.IntPtr> [config.PtrSize] ptr) mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsString()) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v1.AuxInt = int64ToAuxInt(config.PtrSize)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpStringPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (StringPtr (StringMake ptr _))
+ // result: ptr
+ for {
+ if v_0.Op != OpStringMake {
+ break
+ }
+ ptr := v_0.Args[0]
+ v.copyOf(ptr)
+ return true
+ }
+ // match: (StringPtr x:(Load <t> ptr mem))
+ // cond: t.IsString()
+ // result: @x.Block (Load <typ.BytePtr> ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(t.IsString()) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
+ v.copyOf(v0)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpStructMake1(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (StructMake1 x)
+ // cond: x.Type.IsPtrShaped()
+ // result: x
+ for {
+ x := v_0
+ if !(x.Type.IsPtrShaped()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpStructSelect(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (StructSelect [0] (IData x))
+ // result: (IData x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpIData)
+ v.AddArg(x)
+ return true
+ }
+ // match: (StructSelect (StructMake1 x))
+ // result: x
+ for {
+ if v_0.Op != OpStructMake1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] (StructMake2 x _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake2 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [1] (StructMake2 _ x))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake2 {
+ break
+ }
+ x := v_0.Args[1]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] (StructMake3 x _ _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake3 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [1] (StructMake3 _ x _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake3 {
+ break
+ }
+ x := v_0.Args[1]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [2] (StructMake3 _ _ x))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpStructMake3 {
+ break
+ }
+ x := v_0.Args[2]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] (StructMake4 x _ _ _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [1] (StructMake4 _ x _ _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[1]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [2] (StructMake4 _ _ x _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[2]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [3] (StructMake4 _ _ _ x))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[3]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] x)
+ // cond: x.Type.IsPtrShaped()
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ if !(x.Type.IsPtrShaped()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [i] x:(Load <t> ptr mem))
+ // result: @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
+ for {
+ i := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, v.Type.PtrTo())
+ v1.AuxInt = int64ToAuxInt(t.FieldOff(int(i)))
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockdec(b *Block) bool {
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go
new file mode 100644
index 0000000..901dc75
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewritedec64.go
@@ -0,0 +1,2537 @@
+// Code generated from _gen/dec64.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+func rewriteValuedec64(v *Value) bool {
+ switch v.Op {
+ case OpAdd64:
+ return rewriteValuedec64_OpAdd64(v)
+ case OpAnd64:
+ return rewriteValuedec64_OpAnd64(v)
+ case OpArg:
+ return rewriteValuedec64_OpArg(v)
+ case OpBitLen64:
+ return rewriteValuedec64_OpBitLen64(v)
+ case OpBswap64:
+ return rewriteValuedec64_OpBswap64(v)
+ case OpCom64:
+ return rewriteValuedec64_OpCom64(v)
+ case OpConst64:
+ return rewriteValuedec64_OpConst64(v)
+ case OpCtz64:
+ return rewriteValuedec64_OpCtz64(v)
+ case OpCtz64NonZero:
+ v.Op = OpCtz64
+ return true
+ case OpEq64:
+ return rewriteValuedec64_OpEq64(v)
+ case OpInt64Hi:
+ return rewriteValuedec64_OpInt64Hi(v)
+ case OpInt64Lo:
+ return rewriteValuedec64_OpInt64Lo(v)
+ case OpLeq64:
+ return rewriteValuedec64_OpLeq64(v)
+ case OpLeq64U:
+ return rewriteValuedec64_OpLeq64U(v)
+ case OpLess64:
+ return rewriteValuedec64_OpLess64(v)
+ case OpLess64U:
+ return rewriteValuedec64_OpLess64U(v)
+ case OpLoad:
+ return rewriteValuedec64_OpLoad(v)
+ case OpLsh16x64:
+ return rewriteValuedec64_OpLsh16x64(v)
+ case OpLsh32x64:
+ return rewriteValuedec64_OpLsh32x64(v)
+ case OpLsh64x16:
+ return rewriteValuedec64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValuedec64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValuedec64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValuedec64_OpLsh64x8(v)
+ case OpLsh8x64:
+ return rewriteValuedec64_OpLsh8x64(v)
+ case OpMul64:
+ return rewriteValuedec64_OpMul64(v)
+ case OpNeg64:
+ return rewriteValuedec64_OpNeg64(v)
+ case OpNeq64:
+ return rewriteValuedec64_OpNeq64(v)
+ case OpOr32:
+ return rewriteValuedec64_OpOr32(v)
+ case OpOr64:
+ return rewriteValuedec64_OpOr64(v)
+ case OpRotateLeft16:
+ return rewriteValuedec64_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValuedec64_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValuedec64_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValuedec64_OpRotateLeft8(v)
+ case OpRsh16Ux64:
+ return rewriteValuedec64_OpRsh16Ux64(v)
+ case OpRsh16x64:
+ return rewriteValuedec64_OpRsh16x64(v)
+ case OpRsh32Ux64:
+ return rewriteValuedec64_OpRsh32Ux64(v)
+ case OpRsh32x64:
+ return rewriteValuedec64_OpRsh32x64(v)
+ case OpRsh64Ux16:
+ return rewriteValuedec64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValuedec64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValuedec64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValuedec64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValuedec64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValuedec64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValuedec64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValuedec64_OpRsh64x8(v)
+ case OpRsh8Ux64:
+ return rewriteValuedec64_OpRsh8Ux64(v)
+ case OpRsh8x64:
+ return rewriteValuedec64_OpRsh8x64(v)
+ case OpSignExt16to64:
+ return rewriteValuedec64_OpSignExt16to64(v)
+ case OpSignExt32to64:
+ return rewriteValuedec64_OpSignExt32to64(v)
+ case OpSignExt8to64:
+ return rewriteValuedec64_OpSignExt8to64(v)
+ case OpStore:
+ return rewriteValuedec64_OpStore(v)
+ case OpSub64:
+ return rewriteValuedec64_OpSub64(v)
+ case OpTrunc64to16:
+ return rewriteValuedec64_OpTrunc64to16(v)
+ case OpTrunc64to32:
+ return rewriteValuedec64_OpTrunc64to32(v)
+ case OpTrunc64to8:
+ return rewriteValuedec64_OpTrunc64to8(v)
+ case OpXor64:
+ return rewriteValuedec64_OpXor64(v)
+ case OpZeroExt16to64:
+ return rewriteValuedec64_OpZeroExt16to64(v)
+ case OpZeroExt32to64:
+ return rewriteValuedec64_OpZeroExt32to64(v)
+ case OpZeroExt8to64:
+ return rewriteValuedec64_OpZeroExt8to64(v)
+ }
+ return false
+}
+func rewriteValuedec64_OpAdd64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Add64 x y)
+ // result: (Int64Make (Add32withcarry <typ.Int32> (Int64Hi x) (Int64Hi y) (Select1 <types.TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y)))) (Select0 <typ.UInt32> (Add32carry (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpAdd32withcarry, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpAdd32carry, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(y)
+ v4.AddArg2(v5, v6)
+ v3.AddArg(v4)
+ v0.AddArg3(v1, v2, v3)
+ v7 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32)
+ v7.AddArg(v4)
+ v.AddArg2(v0, v7)
+ return true
+ }
+}
+func rewriteValuedec64_OpAnd64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (And64 x y)
+ // result: (Int64Make (And32 <typ.UInt32> (Int64Hi x) (Int64Hi y)) (And32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(y)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpArg(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Arg {n} [off])
+ // cond: is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")
+ // result: (Int64Make (Arg <typ.Int32> {n} [off+4]) (Arg <typ.UInt32> {n} [off]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(off + 4)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(off)
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Arg {n} [off])
+ // cond: is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")
+ // result: (Int64Make (Arg <typ.UInt32> {n} [off+4]) (Arg <typ.UInt32> {n} [off]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off + 4)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(off)
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Arg {n} [off])
+ // cond: is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")
+ // result: (Int64Make (Arg <typ.Int32> {n} [off]) (Arg <typ.UInt32> {n} [off+4]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(off + 4)
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Arg {n} [off])
+ // cond: is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")
+ // result: (Int64Make (Arg <typ.UInt32> {n} [off]) (Arg <typ.UInt32> {n} [off+4]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(off + 4)
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 x)
+ // result: (Add32 <typ.Int> (BitLen32 <typ.Int> (Int64Hi x)) (BitLen32 <typ.Int> (Or32 <typ.UInt32> (Int64Lo x) (Zeromask (Int64Hi x)))))
+ for {
+ x := v_0
+ v.reset(OpAdd32)
+ v.Type = typ.Int
+ v0 := b.NewValue0(v.Pos, OpBitLen32, typ.Int)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpBitLen32, typ.Int)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v5.AddArg(v1)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpBswap64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Bswap64 x)
+ // result: (Int64Make (Bswap32 <typ.UInt32> (Int64Lo x)) (Bswap32 <typ.UInt32> (Int64Hi x)))
+ for {
+ x := v_0
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpBswap32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpBswap32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpCom64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com64 x)
+ // result: (Int64Make (Com32 <typ.UInt32> (Int64Hi x)) (Com32 <typ.UInt32> (Int64Lo x)))
+ for {
+ x := v_0
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpCom32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpCom32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpConst64(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Const64 <t> [c])
+ // cond: t.IsSigned()
+ // result: (Int64Make (Const32 <typ.Int32> [int32(c>>32)]) (Const32 <typ.UInt32> [int32(c)]))
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if !(t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(int32(c >> 32))
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Const64 <t> [c])
+ // cond: !t.IsSigned()
+ // result: (Int64Make (Const32 <typ.UInt32> [int32(c>>32)]) (Const32 <typ.UInt32> [int32(c)]))
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if !(!t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c >> 32))
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz64 x)
+ // result: (Add32 <typ.UInt32> (Ctz32 <typ.UInt32> (Int64Lo x)) (And32 <typ.UInt32> (Com32 <typ.UInt32> (Zeromask (Int64Lo x))) (Ctz32 <typ.UInt32> (Int64Hi x))))
+ for {
+ x := v_0
+ v.reset(OpAdd32)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpCtz32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpCom32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v4.AddArg(v1)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Pos, OpCtz32, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v6.AddArg(x)
+ v5.AddArg(v6)
+ v2.AddArg2(v3, v5)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq64 x y)
+ // result: (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Eq32 (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAndB)
+ v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(y)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpInt64Hi(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Int64Hi (Int64Make hi _))
+ // result: hi
+ for {
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ hi := v_0.Args[0]
+ v.copyOf(hi)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpInt64Lo(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Int64Lo (Int64Make _ lo))
+ // result: lo
+ for {
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ lo := v_0.Args[1]
+ v.copyOf(lo)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64 x y)
+ // result: (OrB (Less32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Leq32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v4.AddArg2(v1, v2)
+ v5 := b.NewValue0(v.Pos, OpLeq32U, typ.Bool)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(x)
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v7.AddArg(y)
+ v5.AddArg2(v6, v7)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x y)
+ // result: (OrB (Less32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Leq32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v4.AddArg2(v1, v2)
+ v5 := b.NewValue0(v.Pos, OpLeq32U, typ.Bool)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(x)
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v7.AddArg(y)
+ v5.AddArg2(v6, v7)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64 x y)
+ // result: (OrB (Less32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Less32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v4.AddArg2(v1, v2)
+ v5 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(x)
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v7.AddArg(y)
+ v5.AddArg2(v6, v7)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64U x y)
+ // result: (OrB (Less32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Less32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v4.AddArg2(v1, v2)
+ v5 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(x)
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v7.AddArg(y)
+ v5.AddArg2(v6, v7)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Load <t> ptr mem)
+ // cond: is64BitInt(t) && !config.BigEndian && t.IsSigned()
+ // result: (Int64Make (Load <typ.Int32> (OffPtr <typ.Int32Ptr> [4] ptr) mem) (Load <typ.UInt32> ptr mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) && !config.BigEndian && t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.Int32Ptr)
+ v1.AuxInt = int64ToAuxInt(4)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v2 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v2.AddArg2(ptr, mem)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitInt(t) && !config.BigEndian && !t.IsSigned()
+ // result: (Int64Make (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem) (Load <typ.UInt32> ptr mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) && !config.BigEndian && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr)
+ v1.AuxInt = int64ToAuxInt(4)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v2 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v2.AddArg2(ptr, mem)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitInt(t) && config.BigEndian && t.IsSigned()
+ // result: (Int64Make (Load <typ.Int32> ptr mem) (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) && config.BigEndian && t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr)
+ v2.AuxInt = int64ToAuxInt(4)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitInt(t) && config.BigEndian && !t.IsSigned()
+ // result: (Int64Make (Load <typ.UInt32> ptr mem) (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) && config.BigEndian && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr)
+ v2.AuxInt = int64ToAuxInt(4)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh16x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Lsh16x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpLsh16x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Lsh16x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpLsh16x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x64 x y)
+ // result: (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLsh16x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh32x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Lsh32x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpLsh32x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Lsh32x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpLsh32x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x64 x y)
+ // result: (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLsh32x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 x s)
+ // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x16 <typ.UInt32> (Int64Hi x) s) (Rsh32Ux16 <typ.UInt32> (Int64Lo x) (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (Lsh32x16 <typ.UInt32> (Int64Lo x) (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))) (Lsh32x16 <typ.UInt32> (Int64Lo x) s))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg2(v3, s)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v7 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v7.AuxInt = int16ToAuxInt(32)
+ v6.AddArg2(v7, s)
+ v4.AddArg2(v5, v6)
+ v1.AddArg2(v2, v4)
+ v8 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v9.AddArg2(s, v7)
+ v8.AddArg2(v5, v9)
+ v0.AddArg2(v1, v8)
+ v10 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v10.AddArg2(v5, s)
+ v.AddArg2(v0, v10)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 x s)
+ // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x32 <typ.UInt32> (Int64Hi x) s) (Rsh32Ux32 <typ.UInt32> (Int64Lo x) (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (Lsh32x32 <typ.UInt32> (Int64Lo x) (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))) (Lsh32x32 <typ.UInt32> (Int64Lo x) s))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg2(v3, s)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v7.AuxInt = int32ToAuxInt(32)
+ v6.AddArg2(v7, s)
+ v4.AddArg2(v5, v6)
+ v1.AddArg2(v2, v4)
+ v8 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v9.AddArg2(s, v7)
+ v8.AddArg2(v5, v9)
+ v0.AddArg2(v1, v8)
+ v10 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v10.AddArg2(v5, s)
+ v.AddArg2(v0, v10)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const64 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh64x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Lsh64x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpLsh64x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Lsh64x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpLsh64x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x64 x y)
+ // result: (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 x s)
+ // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x8 <typ.UInt32> (Int64Hi x) s) (Rsh32Ux8 <typ.UInt32> (Int64Lo x) (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (Lsh32x8 <typ.UInt32> (Int64Lo x) (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))) (Lsh32x8 <typ.UInt32> (Int64Lo x) s))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg2(v3, s)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v7 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v7.AuxInt = int8ToAuxInt(32)
+ v6.AddArg2(v7, s)
+ v4.AddArg2(v5, v6)
+ v1.AddArg2(v2, v4)
+ v8 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v9.AddArg2(s, v7)
+ v8.AddArg2(v5, v9)
+ v0.AddArg2(v1, v8)
+ v10 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v10.AddArg2(v5, s)
+ v.AddArg2(v0, v10)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh8x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Lsh8x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpLsh8x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Lsh8x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpLsh8x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x64 x y)
+ // result: (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLsh8x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpMul64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul64 x y)
+ // result: (Int64Make (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Int64Lo x) (Int64Hi y)) (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Int64Hi x) (Int64Lo y)) (Select0 <typ.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y))))) (Select1 <typ.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v6.AddArg(x)
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v7.AddArg(y)
+ v5.AddArg2(v6, v7)
+ v8 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpMul32uhilo, types.NewTuple(typ.UInt32, typ.UInt32))
+ v9.AddArg2(v2, v7)
+ v8.AddArg(v9)
+ v4.AddArg2(v5, v8)
+ v0.AddArg2(v1, v4)
+ v10 := b.NewValue0(v.Pos, OpSelect1, typ.UInt32)
+ v10.AddArg(v9)
+ v.AddArg2(v0, v10)
+ return true
+ }
+}
+func rewriteValuedec64_OpNeg64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neg64 <t> x)
+ // result: (Sub64 (Const64 <t> [0]) x)
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValuedec64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64 x y)
+ // result: (OrB (Neq32 (Int64Hi x) (Int64Hi y)) (Neq32 (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Pos, OpNeq32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpNeq32, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(y)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpOr32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Or32 <typ.UInt32> (Zeromask (Const32 [c])) y)
+ // cond: c == 0
+ // result: y
+ for {
+ if v.Type != typ.UInt32 {
+ break
+ }
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpZeromask {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ y := v_1
+ if !(c == 0) {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Or32 <typ.UInt32> (Zeromask (Const32 [c])) y)
+ // cond: c != 0
+ // result: (Const32 <typ.UInt32> [-1])
+ for {
+ if v.Type != typ.UInt32 {
+ break
+ }
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpZeromask {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if !(c != 0) {
+ continue
+ }
+ v.reset(OpConst32)
+ v.Type = typ.UInt32
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuedec64_OpOr64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Or64 x y)
+ // result: (Int64Make (Or32 <typ.UInt32> (Int64Hi x) (Int64Hi y)) (Or32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(y)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft16 x (Int64Make hi lo))
+ // result: (RotateLeft16 x lo)
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, lo)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft32 x (Int64Make hi lo))
+ // result: (RotateLeft32 x lo)
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, lo)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft64 x (Int64Make hi lo))
+ // result: (RotateLeft64 x lo)
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, lo)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft8 x (Int64Make hi lo))
+ // result: (RotateLeft8 x lo)
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, lo)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh16Ux64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh16Ux32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh16Ux32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh16Ux64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh16Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux64 x y)
+ // result: (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh16Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Signmask (SignExt16to32 x))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpSignmask)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh16x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh16x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh16x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh16x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // result: (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh16x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh32Ux64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh32Ux32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh32Ux32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh32Ux64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh32Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 x y)
+ // result: (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh32Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 x (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Signmask x)
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpSignmask)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh32x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh32x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh32x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh32x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x64 x y)
+ // result: (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh32x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 x s)
+ // result: (Int64Make (Rsh32Ux16 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux16 <typ.UInt32> (Int64Lo x) s) (Lsh32x16 <typ.UInt32> (Int64Hi x) (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (Rsh32Ux16 <typ.UInt32> (Int64Hi x) (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v8 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v8.AuxInt = int16ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v10.AddArg2(s, v8)
+ v9.AddArg2(v1, v10)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 x s)
+ // result: (Int64Make (Rsh32Ux32 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux32 <typ.UInt32> (Int64Lo x) s) (Lsh32x32 <typ.UInt32> (Int64Hi x) (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (Rsh32Ux32 <typ.UInt32> (Int64Hi x) (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v8.AuxInt = int32ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v10.AddArg2(s, v8)
+ v9.AddArg2(v1, v10)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const64 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64Ux64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh64Ux32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh64Ux32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh64Ux64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh64Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 x y)
+ // result: (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 x s)
+ // result: (Int64Make (Rsh32Ux8 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux8 <typ.UInt32> (Int64Lo x) s) (Lsh32x8 <typ.UInt32> (Int64Hi x) (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (Rsh32Ux8 <typ.UInt32> (Int64Hi x) (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v8 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v8.AuxInt = int8ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v10.AddArg2(s, v8)
+ v9.AddArg2(v1, v10)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 x s)
+ // result: (Int64Make (Rsh32x16 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux16 <typ.UInt32> (Int64Lo x) s) (Lsh32x16 <typ.UInt32> (Int64Hi x) (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (And32 <typ.UInt32> (Rsh32x16 <typ.UInt32> (Int64Hi x) (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32]))) (Zeromask (ZeroExt16to32 (Rsh16Ux32 <typ.UInt16> s (Const32 <typ.UInt32> [5])))))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v8 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v8.AuxInt = int16ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32)
+ v11 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v11.AddArg2(s, v8)
+ v10.AddArg2(v1, v11)
+ v12 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v13 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v14 := b.NewValue0(v.Pos, OpRsh16Ux32, typ.UInt16)
+ v15 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v15.AuxInt = int32ToAuxInt(5)
+ v14.AddArg2(s, v15)
+ v13.AddArg(v14)
+ v12.AddArg(v13)
+ v9.AddArg2(v10, v12)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 x s)
+ // result: (Int64Make (Rsh32x32 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux32 <typ.UInt32> (Int64Lo x) s) (Lsh32x32 <typ.UInt32> (Int64Hi x) (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (And32 <typ.UInt32> (Rsh32x32 <typ.UInt32> (Int64Hi x) (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32]))) (Zeromask (Rsh32Ux32 <typ.UInt32> s (Const32 <typ.UInt32> [5]))))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v8.AuxInt = int32ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32)
+ v11 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v11.AddArg2(s, v8)
+ v10.AddArg2(v1, v11)
+ v12 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v13 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v14 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v14.AuxInt = int32ToAuxInt(5)
+ v13.AddArg2(s, v14)
+ v12.AddArg(v13)
+ v9.AddArg2(v10, v12)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x64 x (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Int64Make (Signmask (Int64Hi x)) (Signmask (Int64Hi x)))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (Rsh64x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh64x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh64x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh64x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh64x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // result: (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 x s)
+ // result: (Int64Make (Rsh32x8 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux8 <typ.UInt32> (Int64Lo x) s) (Lsh32x8 <typ.UInt32> (Int64Hi x) (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (And32 <typ.UInt32> (Rsh32x8 <typ.UInt32> (Int64Hi x) (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32]))) (Zeromask (ZeroExt8to32 (Rsh8Ux32 <typ.UInt8> s (Const32 <typ.UInt32> [5])))))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v8 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v8.AuxInt = int8ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32)
+ v11 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v11.AddArg2(s, v8)
+ v10.AddArg2(v1, v11)
+ v12 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v13 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v14 := b.NewValue0(v.Pos, OpRsh8Ux32, typ.UInt8)
+ v15 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v15.AuxInt = int32ToAuxInt(5)
+ v14.AddArg2(s, v15)
+ v13.AddArg(v14)
+ v12.AddArg(v13)
+ v9.AddArg2(v10, v12)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh8Ux64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh8Ux32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh8Ux32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh8Ux64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh8Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux64 x y)
+ // result: (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh8Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Signmask (SignExt8to32 x))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpSignmask)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh8x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh8x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh8x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh8x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // result: (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh8x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpSignExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt16to64 x)
+ // result: (SignExt32to64 (SignExt16to32 x))
+ for {
+ x := v_0
+ v.reset(OpSignExt32to64)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpSignExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt32to64 x)
+ // result: (Int64Make (Signmask x) x)
+ for {
+ x := v_0
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValuedec64_OpSignExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt8to64 x)
+ // result: (SignExt32to64 (SignExt8to32 x))
+ for {
+ x := v_0
+ v.reset(OpSignExt32to64)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Store {t} dst (Int64Make hi lo) mem)
+ // cond: t.Size() == 8 && !config.BigEndian
+ // result: (Store {hi.Type} (OffPtr <hi.Type.PtrTo()> [4] dst) hi (Store {lo.Type} dst lo mem))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ mem := v_2
+ if !(t.Size() == 8 && !config.BigEndian) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(hi.Type)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, hi.Type.PtrTo())
+ v0.AuxInt = int64ToAuxInt(4)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(lo.Type)
+ v1.AddArg3(dst, lo, mem)
+ v.AddArg3(v0, hi, v1)
+ return true
+ }
+ // match: (Store {t} dst (Int64Make hi lo) mem)
+ // cond: t.Size() == 8 && config.BigEndian
+ // result: (Store {lo.Type} (OffPtr <lo.Type.PtrTo()> [4] dst) lo (Store {hi.Type} dst hi mem))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ mem := v_2
+ if !(t.Size() == 8 && config.BigEndian) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(lo.Type)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, lo.Type.PtrTo())
+ v0.AuxInt = int64ToAuxInt(4)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(hi.Type)
+ v1.AddArg3(dst, hi, mem)
+ v.AddArg3(v0, lo, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpSub64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Sub64 x y)
+ // result: (Int64Make (Sub32withcarry <typ.Int32> (Int64Hi x) (Int64Hi y) (Select1 <types.TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y)))) (Select0 <typ.UInt32> (Sub32carry (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpSub32withcarry, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpSub32carry, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(y)
+ v4.AddArg2(v5, v6)
+ v3.AddArg(v4)
+ v0.AddArg3(v1, v2, v3)
+ v7 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32)
+ v7.AddArg(v4)
+ v.AddArg2(v0, v7)
+ return true
+ }
+}
+func rewriteValuedec64_OpTrunc64to16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Trunc64to16 (Int64Make _ lo))
+ // result: (Trunc32to16 lo)
+ for {
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ lo := v_0.Args[1]
+ v.reset(OpTrunc32to16)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Trunc64to16 x)
+ // result: (Trunc32to16 (Int64Lo x))
+ for {
+ x := v_0
+ v.reset(OpTrunc32to16)
+ v0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpTrunc64to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to32 (Int64Make _ lo))
+ // result: lo
+ for {
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ lo := v_0.Args[1]
+ v.copyOf(lo)
+ return true
+ }
+ // match: (Trunc64to32 x)
+ // result: (Int64Lo x)
+ for {
+ x := v_0
+ v.reset(OpInt64Lo)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuedec64_OpTrunc64to8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Trunc64to8 (Int64Make _ lo))
+ // result: (Trunc32to8 lo)
+ for {
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ lo := v_0.Args[1]
+ v.reset(OpTrunc32to8)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Trunc64to8 x)
+ // result: (Trunc32to8 (Int64Lo x))
+ for {
+ x := v_0
+ v.reset(OpTrunc32to8)
+ v0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpXor64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Xor64 x y)
+ // result: (Int64Make (Xor32 <typ.UInt32> (Int64Hi x) (Int64Hi y)) (Xor32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpXor32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpXor32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(y)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpZeroExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt16to64 x)
+ // result: (ZeroExt32to64 (ZeroExt16to32 x))
+ for {
+ x := v_0
+ v.reset(OpZeroExt32to64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpZeroExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt32to64 x)
+ // result: (Int64Make (Const32 <typ.UInt32> [0]) x)
+ for {
+ x := v_0
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValuedec64_OpZeroExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt8to64 x)
+ // result: (ZeroExt32to64 (ZeroExt8to32 x))
+ for {
+ x := v_0
+ v.reset(OpZeroExt32to64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteBlockdec64(b *Block) bool {
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
new file mode 100644
index 0000000..a018ca0
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -0,0 +1,33938 @@
+// Code generated from _gen/generic.rules using 'go generate'; DO NOT EDIT.
+
+package ssa
+
+import "math"
+import "cmd/internal/obj"
+import "cmd/compile/internal/types"
+import "cmd/compile/internal/ir"
+
+func rewriteValuegeneric(v *Value) bool {
+ switch v.Op {
+ case OpAdd16:
+ return rewriteValuegeneric_OpAdd16(v)
+ case OpAdd32:
+ return rewriteValuegeneric_OpAdd32(v)
+ case OpAdd32F:
+ return rewriteValuegeneric_OpAdd32F(v)
+ case OpAdd64:
+ return rewriteValuegeneric_OpAdd64(v)
+ case OpAdd64F:
+ return rewriteValuegeneric_OpAdd64F(v)
+ case OpAdd8:
+ return rewriteValuegeneric_OpAdd8(v)
+ case OpAddPtr:
+ return rewriteValuegeneric_OpAddPtr(v)
+ case OpAnd16:
+ return rewriteValuegeneric_OpAnd16(v)
+ case OpAnd32:
+ return rewriteValuegeneric_OpAnd32(v)
+ case OpAnd64:
+ return rewriteValuegeneric_OpAnd64(v)
+ case OpAnd8:
+ return rewriteValuegeneric_OpAnd8(v)
+ case OpAndB:
+ return rewriteValuegeneric_OpAndB(v)
+ case OpArraySelect:
+ return rewriteValuegeneric_OpArraySelect(v)
+ case OpCeil:
+ return rewriteValuegeneric_OpCeil(v)
+ case OpCom16:
+ return rewriteValuegeneric_OpCom16(v)
+ case OpCom32:
+ return rewriteValuegeneric_OpCom32(v)
+ case OpCom64:
+ return rewriteValuegeneric_OpCom64(v)
+ case OpCom8:
+ return rewriteValuegeneric_OpCom8(v)
+ case OpConstInterface:
+ return rewriteValuegeneric_OpConstInterface(v)
+ case OpConstSlice:
+ return rewriteValuegeneric_OpConstSlice(v)
+ case OpConstString:
+ return rewriteValuegeneric_OpConstString(v)
+ case OpConvert:
+ return rewriteValuegeneric_OpConvert(v)
+ case OpCtz16:
+ return rewriteValuegeneric_OpCtz16(v)
+ case OpCtz32:
+ return rewriteValuegeneric_OpCtz32(v)
+ case OpCtz64:
+ return rewriteValuegeneric_OpCtz64(v)
+ case OpCtz8:
+ return rewriteValuegeneric_OpCtz8(v)
+ case OpCvt32Fto32:
+ return rewriteValuegeneric_OpCvt32Fto32(v)
+ case OpCvt32Fto64:
+ return rewriteValuegeneric_OpCvt32Fto64(v)
+ case OpCvt32Fto64F:
+ return rewriteValuegeneric_OpCvt32Fto64F(v)
+ case OpCvt32to32F:
+ return rewriteValuegeneric_OpCvt32to32F(v)
+ case OpCvt32to64F:
+ return rewriteValuegeneric_OpCvt32to64F(v)
+ case OpCvt64Fto32:
+ return rewriteValuegeneric_OpCvt64Fto32(v)
+ case OpCvt64Fto32F:
+ return rewriteValuegeneric_OpCvt64Fto32F(v)
+ case OpCvt64Fto64:
+ return rewriteValuegeneric_OpCvt64Fto64(v)
+ case OpCvt64to32F:
+ return rewriteValuegeneric_OpCvt64to32F(v)
+ case OpCvt64to64F:
+ return rewriteValuegeneric_OpCvt64to64F(v)
+ case OpCvtBoolToUint8:
+ return rewriteValuegeneric_OpCvtBoolToUint8(v)
+ case OpDiv16:
+ return rewriteValuegeneric_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValuegeneric_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValuegeneric_OpDiv32(v)
+ case OpDiv32F:
+ return rewriteValuegeneric_OpDiv32F(v)
+ case OpDiv32u:
+ return rewriteValuegeneric_OpDiv32u(v)
+ case OpDiv64:
+ return rewriteValuegeneric_OpDiv64(v)
+ case OpDiv64F:
+ return rewriteValuegeneric_OpDiv64F(v)
+ case OpDiv64u:
+ return rewriteValuegeneric_OpDiv64u(v)
+ case OpDiv8:
+ return rewriteValuegeneric_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValuegeneric_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValuegeneric_OpEq16(v)
+ case OpEq32:
+ return rewriteValuegeneric_OpEq32(v)
+ case OpEq32F:
+ return rewriteValuegeneric_OpEq32F(v)
+ case OpEq64:
+ return rewriteValuegeneric_OpEq64(v)
+ case OpEq64F:
+ return rewriteValuegeneric_OpEq64F(v)
+ case OpEq8:
+ return rewriteValuegeneric_OpEq8(v)
+ case OpEqB:
+ return rewriteValuegeneric_OpEqB(v)
+ case OpEqInter:
+ return rewriteValuegeneric_OpEqInter(v)
+ case OpEqPtr:
+ return rewriteValuegeneric_OpEqPtr(v)
+ case OpEqSlice:
+ return rewriteValuegeneric_OpEqSlice(v)
+ case OpFloor:
+ return rewriteValuegeneric_OpFloor(v)
+ case OpIMake:
+ return rewriteValuegeneric_OpIMake(v)
+ case OpInterLECall:
+ return rewriteValuegeneric_OpInterLECall(v)
+ case OpIsInBounds:
+ return rewriteValuegeneric_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValuegeneric_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValuegeneric_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValuegeneric_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValuegeneric_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValuegeneric_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValuegeneric_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValuegeneric_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValuegeneric_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValuegeneric_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValuegeneric_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValuegeneric_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValuegeneric_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValuegeneric_OpLess16(v)
+ case OpLess16U:
+ return rewriteValuegeneric_OpLess16U(v)
+ case OpLess32:
+ return rewriteValuegeneric_OpLess32(v)
+ case OpLess32F:
+ return rewriteValuegeneric_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValuegeneric_OpLess32U(v)
+ case OpLess64:
+ return rewriteValuegeneric_OpLess64(v)
+ case OpLess64F:
+ return rewriteValuegeneric_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValuegeneric_OpLess64U(v)
+ case OpLess8:
+ return rewriteValuegeneric_OpLess8(v)
+ case OpLess8U:
+ return rewriteValuegeneric_OpLess8U(v)
+ case OpLoad:
+ return rewriteValuegeneric_OpLoad(v)
+ case OpLsh16x16:
+ return rewriteValuegeneric_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValuegeneric_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValuegeneric_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValuegeneric_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValuegeneric_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValuegeneric_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValuegeneric_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValuegeneric_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValuegeneric_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValuegeneric_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValuegeneric_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValuegeneric_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValuegeneric_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValuegeneric_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValuegeneric_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValuegeneric_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValuegeneric_OpMod16(v)
+ case OpMod16u:
+ return rewriteValuegeneric_OpMod16u(v)
+ case OpMod32:
+ return rewriteValuegeneric_OpMod32(v)
+ case OpMod32u:
+ return rewriteValuegeneric_OpMod32u(v)
+ case OpMod64:
+ return rewriteValuegeneric_OpMod64(v)
+ case OpMod64u:
+ return rewriteValuegeneric_OpMod64u(v)
+ case OpMod8:
+ return rewriteValuegeneric_OpMod8(v)
+ case OpMod8u:
+ return rewriteValuegeneric_OpMod8u(v)
+ case OpMove:
+ return rewriteValuegeneric_OpMove(v)
+ case OpMul16:
+ return rewriteValuegeneric_OpMul16(v)
+ case OpMul32:
+ return rewriteValuegeneric_OpMul32(v)
+ case OpMul32F:
+ return rewriteValuegeneric_OpMul32F(v)
+ case OpMul64:
+ return rewriteValuegeneric_OpMul64(v)
+ case OpMul64F:
+ return rewriteValuegeneric_OpMul64F(v)
+ case OpMul8:
+ return rewriteValuegeneric_OpMul8(v)
+ case OpNeg16:
+ return rewriteValuegeneric_OpNeg16(v)
+ case OpNeg32:
+ return rewriteValuegeneric_OpNeg32(v)
+ case OpNeg32F:
+ return rewriteValuegeneric_OpNeg32F(v)
+ case OpNeg64:
+ return rewriteValuegeneric_OpNeg64(v)
+ case OpNeg64F:
+ return rewriteValuegeneric_OpNeg64F(v)
+ case OpNeg8:
+ return rewriteValuegeneric_OpNeg8(v)
+ case OpNeq16:
+ return rewriteValuegeneric_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValuegeneric_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValuegeneric_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValuegeneric_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValuegeneric_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValuegeneric_OpNeq8(v)
+ case OpNeqB:
+ return rewriteValuegeneric_OpNeqB(v)
+ case OpNeqInter:
+ return rewriteValuegeneric_OpNeqInter(v)
+ case OpNeqPtr:
+ return rewriteValuegeneric_OpNeqPtr(v)
+ case OpNeqSlice:
+ return rewriteValuegeneric_OpNeqSlice(v)
+ case OpNilCheck:
+ return rewriteValuegeneric_OpNilCheck(v)
+ case OpNot:
+ return rewriteValuegeneric_OpNot(v)
+ case OpOffPtr:
+ return rewriteValuegeneric_OpOffPtr(v)
+ case OpOr16:
+ return rewriteValuegeneric_OpOr16(v)
+ case OpOr32:
+ return rewriteValuegeneric_OpOr32(v)
+ case OpOr64:
+ return rewriteValuegeneric_OpOr64(v)
+ case OpOr8:
+ return rewriteValuegeneric_OpOr8(v)
+ case OpOrB:
+ return rewriteValuegeneric_OpOrB(v)
+ case OpPhi:
+ return rewriteValuegeneric_OpPhi(v)
+ case OpPtrIndex:
+ return rewriteValuegeneric_OpPtrIndex(v)
+ case OpRotateLeft16:
+ return rewriteValuegeneric_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValuegeneric_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValuegeneric_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValuegeneric_OpRotateLeft8(v)
+ case OpRound32F:
+ return rewriteValuegeneric_OpRound32F(v)
+ case OpRound64F:
+ return rewriteValuegeneric_OpRound64F(v)
+ case OpRoundToEven:
+ return rewriteValuegeneric_OpRoundToEven(v)
+ case OpRsh16Ux16:
+ return rewriteValuegeneric_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValuegeneric_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValuegeneric_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValuegeneric_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValuegeneric_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValuegeneric_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValuegeneric_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValuegeneric_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValuegeneric_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValuegeneric_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValuegeneric_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValuegeneric_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValuegeneric_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValuegeneric_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValuegeneric_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValuegeneric_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValuegeneric_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValuegeneric_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValuegeneric_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValuegeneric_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValuegeneric_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValuegeneric_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValuegeneric_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValuegeneric_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValuegeneric_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValuegeneric_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValuegeneric_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValuegeneric_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValuegeneric_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValuegeneric_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValuegeneric_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValuegeneric_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValuegeneric_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValuegeneric_OpSelect1(v)
+ case OpSelectN:
+ return rewriteValuegeneric_OpSelectN(v)
+ case OpSignExt16to32:
+ return rewriteValuegeneric_OpSignExt16to32(v)
+ case OpSignExt16to64:
+ return rewriteValuegeneric_OpSignExt16to64(v)
+ case OpSignExt32to64:
+ return rewriteValuegeneric_OpSignExt32to64(v)
+ case OpSignExt8to16:
+ return rewriteValuegeneric_OpSignExt8to16(v)
+ case OpSignExt8to32:
+ return rewriteValuegeneric_OpSignExt8to32(v)
+ case OpSignExt8to64:
+ return rewriteValuegeneric_OpSignExt8to64(v)
+ case OpSliceCap:
+ return rewriteValuegeneric_OpSliceCap(v)
+ case OpSliceLen:
+ return rewriteValuegeneric_OpSliceLen(v)
+ case OpSlicePtr:
+ return rewriteValuegeneric_OpSlicePtr(v)
+ case OpSlicemask:
+ return rewriteValuegeneric_OpSlicemask(v)
+ case OpSqrt:
+ return rewriteValuegeneric_OpSqrt(v)
+ case OpStaticCall:
+ return rewriteValuegeneric_OpStaticCall(v)
+ case OpStaticLECall:
+ return rewriteValuegeneric_OpStaticLECall(v)
+ case OpStore:
+ return rewriteValuegeneric_OpStore(v)
+ case OpStringLen:
+ return rewriteValuegeneric_OpStringLen(v)
+ case OpStringPtr:
+ return rewriteValuegeneric_OpStringPtr(v)
+ case OpStructSelect:
+ return rewriteValuegeneric_OpStructSelect(v)
+ case OpSub16:
+ return rewriteValuegeneric_OpSub16(v)
+ case OpSub32:
+ return rewriteValuegeneric_OpSub32(v)
+ case OpSub32F:
+ return rewriteValuegeneric_OpSub32F(v)
+ case OpSub64:
+ return rewriteValuegeneric_OpSub64(v)
+ case OpSub64F:
+ return rewriteValuegeneric_OpSub64F(v)
+ case OpSub8:
+ return rewriteValuegeneric_OpSub8(v)
+ case OpTrunc:
+ return rewriteValuegeneric_OpTrunc(v)
+ case OpTrunc16to8:
+ return rewriteValuegeneric_OpTrunc16to8(v)
+ case OpTrunc32to16:
+ return rewriteValuegeneric_OpTrunc32to16(v)
+ case OpTrunc32to8:
+ return rewriteValuegeneric_OpTrunc32to8(v)
+ case OpTrunc64to16:
+ return rewriteValuegeneric_OpTrunc64to16(v)
+ case OpTrunc64to32:
+ return rewriteValuegeneric_OpTrunc64to32(v)
+ case OpTrunc64to8:
+ return rewriteValuegeneric_OpTrunc64to8(v)
+ case OpXor16:
+ return rewriteValuegeneric_OpXor16(v)
+ case OpXor32:
+ return rewriteValuegeneric_OpXor32(v)
+ case OpXor64:
+ return rewriteValuegeneric_OpXor64(v)
+ case OpXor8:
+ return rewriteValuegeneric_OpXor8(v)
+ case OpZero:
+ return rewriteValuegeneric_OpZero(v)
+ case OpZeroExt16to32:
+ return rewriteValuegeneric_OpZeroExt16to32(v)
+ case OpZeroExt16to64:
+ return rewriteValuegeneric_OpZeroExt16to64(v)
+ case OpZeroExt32to64:
+ return rewriteValuegeneric_OpZeroExt32to64(v)
+ case OpZeroExt8to16:
+ return rewriteValuegeneric_OpZeroExt8to16(v)
+ case OpZeroExt8to32:
+ return rewriteValuegeneric_OpZeroExt8to32(v)
+ case OpZeroExt8to64:
+ return rewriteValuegeneric_OpZeroExt8to64(v)
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Add16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ // match: (Add16 <t> (Mul16 x y) (Mul16 x z))
+ // result: (Mul16 x (Add16 <t> y z))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul16)
+ v0 := b.NewValue0(v.Pos, OpAdd16, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Add16 (Const16 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Add16 x (Neg16 y))
+ // result: (Sub16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpNeg16 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpSub16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add16 (Com16 x) x)
+ // result: (Const16 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom16 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Add16 (Sub16 x t) (Add16 t y))
+ // result: (Add16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub16 {
+ continue
+ }
+ t := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAdd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if t != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAdd16)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add16 (Const16 [1]) (Com16 x))
+ // result: (Neg16 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 1 || v_1.Op != OpCom16 {
+ continue
+ }
+ x := v_1.Args[0]
+ v.reset(OpNeg16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Add16 x (Sub16 y x))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpSub16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if x != v_1.Args[1] {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Add16 x (Add16 y (Sub16 z x)))
+ // result: (Add16 y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAdd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpSub16 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ z := v_1_1.Args[0]
+ if x != v_1_1.Args[1] {
+ continue
+ }
+ v.reset(OpAdd16)
+ v.AddArg2(y, z)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add16 (Add16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Add16 i (Add16 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAdd16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpAdd16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add16 (Sub16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Add16 i (Sub16 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub16 {
+ continue
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpSub16, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Add16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x))
+ // result: (Add16 (Const16 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpAdd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x))
+ // result: (Sub16 (Const16 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpSub16 {
+ continue
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ // match: (Add16 (Lsh16x64 x z:(Const64 <t> [c])) (Rsh16Ux64 x (Const64 [d])))
+ // cond: c < 16 && d == 16-c && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLsh16x64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh16Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 16 && d == 16-c && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add16 left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add16 left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add16 left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add16 left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add16 right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add16 right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add16 right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add16 right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Add32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ // match: (Add32 <t> (Mul32 x y) (Mul32 x z))
+ // result: (Mul32 x (Add32 <t> y z))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul32)
+ v0 := b.NewValue0(v.Pos, OpAdd32, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Add32 (Const32 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Add32 x (Neg32 y))
+ // result: (Sub32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpNeg32 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpSub32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add32 (Com32 x) x)
+ // result: (Const32 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom32 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Add32 (Sub32 x t) (Add32 t y))
+ // result: (Add32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub32 {
+ continue
+ }
+ t := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if t != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAdd32)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add32 (Const32 [1]) (Com32 x))
+ // result: (Neg32 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 1 || v_1.Op != OpCom32 {
+ continue
+ }
+ x := v_1.Args[0]
+ v.reset(OpNeg32)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Add32 x (Sub32 y x))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpSub32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if x != v_1.Args[1] {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Add32 x (Add32 y (Sub32 z x)))
+ // result: (Add32 y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpSub32 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ z := v_1_1.Args[0]
+ if x != v_1_1.Args[1] {
+ continue
+ }
+ v.reset(OpAdd32)
+ v.AddArg2(y, z)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add32 (Add32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Add32 i (Add32 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAdd32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpAdd32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add32 (Sub32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Add32 i (Sub32 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub32 {
+ continue
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpSub32, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Add32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x))
+ // result: (Add32 (Const32 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x))
+ // result: (Sub32 (Const32 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpSub32 {
+ continue
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ // match: (Add32 (Lsh32x64 x z:(Const64 <t> [c])) (Rsh32Ux64 x (Const64 [d])))
+ // cond: c < 32 && d == 32-c && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLsh32x64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 32 && d == 32-c && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add32 left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh32x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add32 left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh32x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add32 left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh32x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add32 left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh32x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add32 right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh32Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add32 right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh32Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add32 right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh32Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add32 right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh32Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Add32F (Const32F [c]) (Const32F [d]))
+ // cond: c+d == c+d
+ // result: (Const32F [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32F {
+ continue
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ continue
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ if !(c+d == c+d) {
+ continue
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Add64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ // match: (Add64 <t> (Mul64 x y) (Mul64 x z))
+ // result: (Mul64 x (Add64 <t> y z))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Add64 (Const64 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Add64 x (Neg64 y))
+ // result: (Sub64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpNeg64 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpSub64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add64 (Com64 x) x)
+ // result: (Const64 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom64 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Add64 (Sub64 x t) (Add64 t y))
+ // result: (Add64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub64 {
+ continue
+ }
+ t := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if t != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAdd64)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add64 (Const64 [1]) (Com64 x))
+ // result: (Neg64 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 1 || v_1.Op != OpCom64 {
+ continue
+ }
+ x := v_1.Args[0]
+ v.reset(OpNeg64)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Add64 x (Sub64 y x))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpSub64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if x != v_1.Args[1] {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Add64 x (Add64 y (Sub64 z x)))
+ // result: (Add64 y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpSub64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ z := v_1_1.Args[0]
+ if x != v_1_1.Args[1] {
+ continue
+ }
+ v.reset(OpAdd64)
+ v.AddArg2(y, z)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add64 (Add64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Add64 i (Add64 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAdd64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add64 (Sub64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Add64 i (Sub64 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub64 {
+ continue
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpSub64, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Add64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x))
+ // result: (Add64 (Const64 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x))
+ // result: (Sub64 (Const64 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpSub64 {
+ continue
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ // match: (Add64 (Lsh64x64 x z:(Const64 <t> [c])) (Rsh64Ux64 x (Const64 [d])))
+ // cond: c < 64 && d == 64-c && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLsh64x64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 64 && d == 64-c && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add64 left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh64x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add64 left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh64x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add64 left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh64x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add64 left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh64x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add64 right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh64Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add64 right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh64Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add64 right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh64Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add64 right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh64Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Add64F (Const64F [c]) (Const64F [d]))
+ // cond: c+d == c+d
+ // result: (Const64F [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64F {
+ continue
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ continue
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ if !(c+d == c+d) {
+ continue
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Add8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ // match: (Add8 <t> (Mul8 x y) (Mul8 x z))
+ // result: (Mul8 x (Add8 <t> y z))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul8)
+ v0 := b.NewValue0(v.Pos, OpAdd8, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Add8 (Const8 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Add8 x (Neg8 y))
+ // result: (Sub8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpNeg8 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpSub8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add8 (Com8 x) x)
+ // result: (Const8 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom8 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Add8 (Sub8 x t) (Add8 t y))
+ // result: (Add8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub8 {
+ continue
+ }
+ t := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAdd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if t != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAdd8)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add8 (Const8 [1]) (Com8 x))
+ // result: (Neg8 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 1 || v_1.Op != OpCom8 {
+ continue
+ }
+ x := v_1.Args[0]
+ v.reset(OpNeg8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Add8 x (Sub8 y x))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpSub8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if x != v_1.Args[1] {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Add8 x (Add8 y (Sub8 z x)))
+ // result: (Add8 y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAdd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpSub8 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ z := v_1_1.Args[0]
+ if x != v_1_1.Args[1] {
+ continue
+ }
+ v.reset(OpAdd8)
+ v.AddArg2(y, z)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add8 (Add8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Add8 i (Add8 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAdd8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpAdd8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add8 (Sub8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Add8 i (Sub8 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub8 {
+ continue
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpSub8, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Add8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x))
+ // result: (Add8 (Const8 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpAdd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x))
+ // result: (Sub8 (Const8 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpSub8 {
+ continue
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ // match: (Add8 (Lsh8x64 x z:(Const64 <t> [c])) (Rsh8Ux64 x (Const64 [d])))
+ // cond: c < 8 && d == 8-c && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLsh8x64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh8Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 8 && d == 8-c && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add8 left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add8 left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add8 left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add8 left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Add8 right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add8 right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add8 right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Add8 right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAddPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AddPtr <t> x (Const64 [c]))
+ // result: (OffPtr <t> x [c])
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOffPtr)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (AddPtr <t> x (Const32 [c]))
+ // result: (OffPtr <t> x [int64(c)])
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOffPtr)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(int64(c))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpAnd16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (And16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (And16 <t> (Com16 x) (Com16 y))
+ // result: (Com16 (Or16 <t> x y))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom16 {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpCom16 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpCom16)
+ v0 := b.NewValue0(v.Pos, OpOr16, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (And16 (Const16 [m]) (Rsh16Ux64 _ (Const64 [c])))
+ // cond: c >= int64(16-ntz16(m))
+ // result: (Const16 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ m := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpRsh16Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(16-ntz16(m))) {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And16 (Const16 [m]) (Lsh16x64 _ (Const64 [c])))
+ // cond: c >= int64(16-nlz16(m))
+ // result: (Const16 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ m := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpLsh16x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(16-nlz16(m))) {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And16 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (And16 (Const16 [-1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (And16 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And16 (Com16 x) x)
+ // result: (Const16 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom16 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And16 x (And16 x y))
+ // result: (And16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAnd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAnd16)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And16 (And16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (And16 i (And16 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpAnd16)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And16 (Const16 <t> [c]) (And16 (Const16 <t> [d]) x))
+ // result: (And16 (Const16 <t> [c&d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpAnd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAnd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c & d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAnd32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (And32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (And32 <t> (Com32 x) (Com32 y))
+ // result: (Com32 (Or32 <t> x y))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom32 {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpCom32 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpCom32)
+ v0 := b.NewValue0(v.Pos, OpOr32, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (And32 (Const32 [m]) (Rsh32Ux64 _ (Const64 [c])))
+ // cond: c >= int64(32-ntz32(m))
+ // result: (Const32 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(32-ntz32(m))) {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And32 (Const32 [m]) (Lsh32x64 _ (Const64 [c])))
+ // cond: c >= int64(32-nlz32(m))
+ // result: (Const32 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpLsh32x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(32-nlz32(m))) {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And32 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (And32 (Const32 [-1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (And32 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And32 (Com32 x) x)
+ // result: (Const32 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom32 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And32 x (And32 x y))
+ // result: (And32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAnd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAnd32)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And32 (And32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (And32 i (And32 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpAnd32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And32 (Const32 <t> [c]) (And32 (Const32 <t> [d]) x))
+ // result: (And32 (Const32 <t> [c&d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAnd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAnd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAnd64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (And64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (And64 <t> (Com64 x) (Com64 y))
+ // result: (Com64 (Or64 <t> x y))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom64 {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpCom64 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpCom64)
+ v0 := b.NewValue0(v.Pos, OpOr64, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (And64 (Const64 [m]) (Rsh64Ux64 _ (Const64 [c])))
+ // cond: c >= int64(64-ntz64(m))
+ // result: (Const64 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(64-ntz64(m))) {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And64 (Const64 [m]) (Lsh64x64 _ (Const64 [c])))
+ // cond: c >= int64(64-nlz64(m))
+ // result: (Const64 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpLsh64x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(64-nlz64(m))) {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And64 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (And64 (Const64 [-1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (And64 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And64 (Com64 x) x)
+ // result: (Const64 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom64 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And64 x (And64 x y))
+ // result: (And64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAnd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAnd64)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And64 (And64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (And64 i (And64 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And64 (Const64 <t> [c]) (And64 (Const64 <t> [d]) x))
+ // result: (And64 (Const64 <t> [c&d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAnd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAnd8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (And8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (And8 <t> (Com8 x) (Com8 y))
+ // result: (Com8 (Or8 <t> x y))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom8 {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpCom8 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpCom8)
+ v0 := b.NewValue0(v.Pos, OpOr8, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (And8 (Const8 [m]) (Rsh8Ux64 _ (Const64 [c])))
+ // cond: c >= int64(8-ntz8(m))
+ // result: (Const8 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ m := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpRsh8Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(8-ntz8(m))) {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And8 (Const8 [m]) (Lsh8x64 _ (Const64 [c])))
+ // cond: c >= int64(8-nlz8(m))
+ // result: (Const8 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ m := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpLsh8x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(8-nlz8(m))) {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And8 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (And8 (Const8 [-1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (And8 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And8 (Com8 x) x)
+ // result: (Const8 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom8 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And8 x (And8 x y))
+ // result: (And8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAnd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAnd8)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And8 (And8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (And8 i (And8 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpAnd8)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And8 (Const8 <t> [c]) (And8 (Const8 <t> [d]) x))
+ // result: (And8 (Const8 <t> [c&d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpAnd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAnd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c & d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAndB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AndB (Leq64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+ // cond: d >= c
+ // result: (Less64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+ // cond: d >= c
+ // result: (Leq64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+ // cond: d >= c
+ // result: (Less32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+ // cond: d >= c
+ // result: (Leq32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+ // cond: d >= c
+ // result: (Less16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+ // cond: d >= c
+ // result: (Leq16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+ // cond: d >= c
+ // result: (Less8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+ // cond: d >= c
+ // result: (Leq8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Less64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Leq64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Less32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Leq32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Less16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Leq16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Less8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Leq8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+ // cond: uint64(d) >= uint64(c)
+ // result: (Less64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(d) >= uint64(c)) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+ // cond: uint64(d) >= uint64(c)
+ // result: (Leq64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(d) >= uint64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+ // cond: uint32(d) >= uint32(c)
+ // result: (Less32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(d) >= uint32(c)) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+ // cond: uint32(d) >= uint32(c)
+ // result: (Leq32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(d) >= uint32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+ // cond: uint16(d) >= uint16(c)
+ // result: (Less16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(d) >= uint16(c)) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+ // cond: uint16(d) >= uint16(c)
+ // result: (Leq16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(d) >= uint16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+ // cond: uint8(d) >= uint8(c)
+ // result: (Less8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(d) >= uint8(c)) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+ // cond: uint8(d) >= uint8(c)
+ // result: (Leq8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(d) >= uint8(c)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+ // cond: uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)
+ // result: (Less64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+ // cond: uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)
+ // result: (Leq64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+ // cond: uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)
+ // result: (Less32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+ // cond: uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)
+ // result: (Leq32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+ // cond: uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)
+ // result: (Less16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+ // cond: uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)
+ // result: (Leq16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+ // cond: uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)
+ // result: (Less8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+ // cond: uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)
+ // result: (Leq8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpArraySelect(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ArraySelect (ArrayMake1 x))
+ // result: x
+ for {
+ if v_0.Op != OpArrayMake1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (ArraySelect [0] (IData x))
+ // result: (IData x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpIData)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCeil(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Ceil (Const64F [c]))
+ // result: (Const64F [math.Ceil(c)])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(math.Ceil(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCom16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com16 (Com16 x))
+ // result: x
+ for {
+ if v_0.Op != OpCom16 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Com16 (Const16 [c]))
+ // result: (Const16 [^c])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(^c)
+ return true
+ }
+ // match: (Com16 (Add16 (Const16 [-1]) x))
+ // result: (Neg16 x)
+ for {
+ if v_0.Op != OpAdd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst16 || auxIntToInt16(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpNeg16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpCom32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com32 (Com32 x))
+ // result: x
+ for {
+ if v_0.Op != OpCom32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Com32 (Const32 [c]))
+ // result: (Const32 [^c])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(^c)
+ return true
+ }
+ // match: (Com32 (Add32 (Const32 [-1]) x))
+ // result: (Neg32 x)
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 || auxIntToInt32(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpNeg32)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpCom64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com64 (Com64 x))
+ // result: x
+ for {
+ if v_0.Op != OpCom64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Com64 (Const64 [c]))
+ // result: (Const64 [^c])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(^c)
+ return true
+ }
+ // match: (Com64 (Add64 (Const64 [-1]) x))
+ // result: (Neg64 x)
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpNeg64)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpCom8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com8 (Com8 x))
+ // result: x
+ for {
+ if v_0.Op != OpCom8 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Com8 (Const8 [c]))
+ // result: (Const8 [^c])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(^c)
+ return true
+ }
+ // match: (Com8 (Add8 (Const8 [-1]) x))
+ // result: (Neg8 x)
+ for {
+ if v_0.Op != OpAdd8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst8 || auxIntToInt8(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpNeg8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpConstInterface(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ConstInterface)
+ // result: (IMake (ConstNil <typ.Uintptr>) (ConstNil <typ.BytePtr>))
+ for {
+ v.reset(OpIMake)
+ v0 := b.NewValue0(v.Pos, OpConstNil, typ.Uintptr)
+ v1 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuegeneric_OpConstSlice(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (ConstSlice)
+ // cond: config.PtrSize == 4
+ // result: (SliceMake (ConstNil <v.Type.Elem().PtrTo()>) (Const32 <typ.Int> [0]) (Const32 <typ.Int> [0]))
+ for {
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpSliceMake)
+ v0 := b.NewValue0(v.Pos, OpConstNil, v.Type.Elem().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.Int)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(v0, v1, v1)
+ return true
+ }
+ // match: (ConstSlice)
+ // cond: config.PtrSize == 8
+ // result: (SliceMake (ConstNil <v.Type.Elem().PtrTo()>) (Const64 <typ.Int> [0]) (Const64 <typ.Int> [0]))
+ for {
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpSliceMake)
+ v0 := b.NewValue0(v.Pos, OpConstNil, v.Type.Elem().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.Int)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(v0, v1, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpConstString(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ fe := b.Func.fe
+ typ := &b.Func.Config.Types
+ // match: (ConstString {str})
+ // cond: config.PtrSize == 4 && str == ""
+ // result: (StringMake (ConstNil) (Const32 <typ.Int> [0]))
+ for {
+ str := auxToString(v.Aux)
+ if !(config.PtrSize == 4 && str == "") {
+ break
+ }
+ v.reset(OpStringMake)
+ v0 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.Int)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (ConstString {str})
+ // cond: config.PtrSize == 8 && str == ""
+ // result: (StringMake (ConstNil) (Const64 <typ.Int> [0]))
+ for {
+ str := auxToString(v.Aux)
+ if !(config.PtrSize == 8 && str == "") {
+ break
+ }
+ v.reset(OpStringMake)
+ v0 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.Int)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (ConstString {str})
+ // cond: config.PtrSize == 4 && str != ""
+ // result: (StringMake (Addr <typ.BytePtr> {fe.StringData(str)} (SB)) (Const32 <typ.Int> [int32(len(str))]))
+ for {
+ str := auxToString(v.Aux)
+ if !(config.PtrSize == 4 && str != "") {
+ break
+ }
+ v.reset(OpStringMake)
+ v0 := b.NewValue0(v.Pos, OpAddr, typ.BytePtr)
+ v0.Aux = symToAux(fe.StringData(str))
+ v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.Int)
+ v2.AuxInt = int32ToAuxInt(int32(len(str)))
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (ConstString {str})
+ // cond: config.PtrSize == 8 && str != ""
+ // result: (StringMake (Addr <typ.BytePtr> {fe.StringData(str)} (SB)) (Const64 <typ.Int> [int64(len(str))]))
+ for {
+ str := auxToString(v.Aux)
+ if !(config.PtrSize == 8 && str != "") {
+ break
+ }
+ v.reset(OpStringMake)
+ v0 := b.NewValue0(v.Pos, OpAddr, typ.BytePtr)
+ v0.Aux = symToAux(fe.StringData(str))
+ v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.Int)
+ v2.AuxInt = int64ToAuxInt(int64(len(str)))
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpConvert(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Convert (Add64 (Convert ptr mem) off) mem)
+ // result: (AddPtr ptr off)
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConvert {
+ continue
+ }
+ mem := v_0_0.Args[1]
+ ptr := v_0_0.Args[0]
+ off := v_0_1
+ if mem != v_1 {
+ continue
+ }
+ v.reset(OpAddPtr)
+ v.AddArg2(ptr, off)
+ return true
+ }
+ break
+ }
+ // match: (Convert (Add32 (Convert ptr mem) off) mem)
+ // result: (AddPtr ptr off)
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConvert {
+ continue
+ }
+ mem := v_0_0.Args[1]
+ ptr := v_0_0.Args[0]
+ off := v_0_1
+ if mem != v_1 {
+ continue
+ }
+ v.reset(OpAddPtr)
+ v.AddArg2(ptr, off)
+ return true
+ }
+ break
+ }
+ // match: (Convert (Convert ptr mem) mem)
+ // result: ptr
+ for {
+ if v_0.Op != OpConvert {
+ break
+ }
+ mem := v_0.Args[1]
+ ptr := v_0.Args[0]
+ if mem != v_1 {
+ break
+ }
+ v.copyOf(ptr)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Ctz16 (Const16 [c]))
+ // cond: config.PtrSize == 4
+ // result: (Const32 [int32(ntz16(c))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(ntz16(c)))
+ return true
+ }
+ // match: (Ctz16 (Const16 [c]))
+ // cond: config.PtrSize == 8
+ // result: (Const64 [int64(ntz16(c))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(ntz16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Ctz32 (Const32 [c]))
+ // cond: config.PtrSize == 4
+ // result: (Const32 [int32(ntz32(c))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(ntz32(c)))
+ return true
+ }
+ // match: (Ctz32 (Const32 [c]))
+ // cond: config.PtrSize == 8
+ // result: (Const64 [int64(ntz32(c))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(ntz32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Ctz64 (Const64 [c]))
+ // cond: config.PtrSize == 4
+ // result: (Const32 [int32(ntz64(c))])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(ntz64(c)))
+ return true
+ }
+ // match: (Ctz64 (Const64 [c]))
+ // cond: config.PtrSize == 8
+ // result: (Const64 [int64(ntz64(c))])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(ntz64(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Ctz8 (Const8 [c]))
+ // cond: config.PtrSize == 4
+ // result: (Const32 [int32(ntz8(c))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(ntz8(c)))
+ return true
+ }
+ // match: (Ctz8 (Const8 [c]))
+ // cond: config.PtrSize == 8
+ // result: (Const64 [int64(ntz8(c))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(ntz8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt32Fto32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt32Fto32 (Const32F [c]))
+ // result: (Const32 [int32(c)])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt32Fto64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt32Fto64 (Const32F [c]))
+ // result: (Const64 [int64(c)])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt32Fto64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt32Fto64F (Const32F [c]))
+ // result: (Const64F [float64(c)])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(float64(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt32to32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt32to32F (Const32 [c]))
+ // result: (Const32F [float32(c)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(float32(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt32to64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt32to64F (Const32 [c]))
+ // result: (Const64F [float64(c)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(float64(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt64Fto32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt64Fto32 (Const64F [c]))
+ // result: (Const32 [int32(c)])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt64Fto32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt64Fto32F (Const64F [c]))
+ // result: (Const32F [float32(c)])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(float32(c))
+ return true
+ }
+ // match: (Cvt64Fto32F sqrt0:(Sqrt (Cvt32Fto64F x)))
+ // cond: sqrt0.Uses==1
+ // result: (Sqrt32 x)
+ for {
+ sqrt0 := v_0
+ if sqrt0.Op != OpSqrt {
+ break
+ }
+ sqrt0_0 := sqrt0.Args[0]
+ if sqrt0_0.Op != OpCvt32Fto64F {
+ break
+ }
+ x := sqrt0_0.Args[0]
+ if !(sqrt0.Uses == 1) {
+ break
+ }
+ v.reset(OpSqrt32)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt64Fto64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt64Fto64 (Const64F [c]))
+ // result: (Const64 [int64(c)])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt64to32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt64to32F (Const64 [c]))
+ // result: (Const32F [float32(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(float32(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt64to64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt64to64F (Const64 [c]))
+ // result: (Const64F [float64(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(float64(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvtBoolToUint8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CvtBoolToUint8 (ConstBool [false]))
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != false {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (CvtBoolToUint8 (ConstBool [true]))
+ // result: (Const8 [1])
+ for {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != true {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 (Const16 [c]) (Const16 [d]))
+ // cond: d != 0
+ // result: (Const16 [c/d])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div16 n (Const16 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo16(c)
+ // result: (Rsh16Ux64 n (Const64 <typ.UInt64> [log16(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo16(c)) {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log16(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div16 <t> n (Const16 [c]))
+ // cond: c < 0 && c != -1<<15
+ // result: (Neg16 (Div16 <t> n (Const16 <t> [-c])))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(c < 0 && c != -1<<15) {
+ break
+ }
+ v.reset(OpNeg16)
+ v0 := b.NewValue0(v.Pos, OpDiv16, t)
+ v1 := b.NewValue0(v.Pos, OpConst16, t)
+ v1.AuxInt = int16ToAuxInt(-c)
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div16 <t> x (Const16 [-1<<15]))
+ // result: (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <typ.UInt64> [15]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != -1<<15 {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v1.AddArg(x)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(15)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div16 <t> n (Const16 [c]))
+ // cond: isPowerOfTwo16(c)
+ // result: (Rsh16x64 (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [int64(16-log16(c))]))) (Const64 <typ.UInt64> [int64(log16(c))]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isPowerOfTwo16(c)) {
+ break
+ }
+ v.reset(OpRsh16x64)
+ v0 := b.NewValue0(v.Pos, OpAdd16, t)
+ v1 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v2 := b.NewValue0(v.Pos, OpRsh16x64, t)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(15)
+ v2.AddArg2(n, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(16 - log16(c)))
+ v1.AddArg2(v2, v4)
+ v0.AddArg2(n, v1)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(int64(log16(c)))
+ v.AddArg2(v0, v5)
+ return true
+ }
+ // match: (Div16 <t> x (Const16 [c]))
+ // cond: smagicOK16(c)
+ // result: (Sub16 <t> (Rsh32x64 <t> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(smagic16(c).m)]) (SignExt16to32 x)) (Const64 <typ.UInt64> [16+smagic16(c).s])) (Rsh32x64 <t> (SignExt16to32 x) (Const64 <typ.UInt64> [31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(smagicOK16(c)) {
+ break
+ }
+ v.reset(OpSub16)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(smagic16(c).m))
+ v3 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(16 + smagic16(c).s)
+ v0.AddArg2(v1, v4)
+ v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(31)
+ v5.AddArg2(v3, v6)
+ v.AddArg2(v0, v5)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Div16u (Const16 [c]) (Const16 [d]))
+ // cond: d != 0
+ // result: (Const16 [int16(uint16(c)/uint16(d))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(uint16(c) / uint16(d)))
+ return true
+ }
+ // match: (Div16u n (Const16 [c]))
+ // cond: isPowerOfTwo16(c)
+ // result: (Rsh16Ux64 n (Const64 <typ.UInt64> [log16(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isPowerOfTwo16(c)) {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log16(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div16u x (Const16 [c]))
+ // cond: umagicOK16(c) && config.RegSize == 8
+ // result: (Trunc64to16 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<16+umagic16(c).m)]) (ZeroExt16to64 x)) (Const64 <typ.UInt64> [16+umagic16(c).s])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(umagicOK16(c) && config.RegSize == 8) {
+ break
+ }
+ v.reset(OpTrunc64to16)
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(1<<16 + umagic16(c).m))
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(16 + umagic16(c).s)
+ v0.AddArg2(v1, v4)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div16u x (Const16 [c]))
+ // cond: umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0
+ // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<15+umagic16(c).m/2)]) (ZeroExt16to32 x)) (Const64 <typ.UInt64> [16+umagic16(c).s-1])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0) {
+ break
+ }
+ v.reset(OpTrunc32to16)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(1<<15 + umagic16(c).m/2))
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 1)
+ v0.AddArg2(v1, v4)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div16u x (Const16 [c]))
+ // cond: umagicOK16(c) && config.RegSize == 4 && c&1 == 0
+ // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<15+(umagic16(c).m+1)/2)]) (Rsh32Ux64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [16+umagic16(c).s-2])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(umagicOK16(c) && config.RegSize == 4 && c&1 == 0) {
+ break
+ }
+ v.reset(OpTrunc32to16)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(1<<15 + (umagic16(c).m+1)/2))
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(1)
+ v3.AddArg2(v4, v5)
+ v1.AddArg2(v2, v3)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 2)
+ v0.AddArg2(v1, v6)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div16u x (Const16 [c]))
+ // cond: umagicOK16(c) && config.RegSize == 4 && config.useAvg
+ // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Avg32u (Lsh32x64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [16])) (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(umagic16(c).m)]) (ZeroExt16to32 x))) (Const64 <typ.UInt64> [16+umagic16(c).s-1])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(umagicOK16(c) && config.RegSize == 4 && config.useAvg) {
+ break
+ }
+ v.reset(OpTrunc32to16)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLsh32x64, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(16)
+ v2.AddArg2(v3, v4)
+ v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(int32(umagic16(c).m))
+ v5.AddArg2(v6, v3)
+ v1.AddArg2(v2, v5)
+ v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v7.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 1)
+ v0.AddArg2(v1, v7)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Div32 (Const32 [c]) (Const32 [d]))
+ // cond: d != 0
+ // result: (Const32 [c/d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div32 n (Const32 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo32(c)
+ // result: (Rsh32Ux64 n (Const64 <typ.UInt64> [log32(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log32(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div32 <t> n (Const32 [c]))
+ // cond: c < 0 && c != -1<<31
+ // result: (Neg32 (Div32 <t> n (Const32 <t> [-c])))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c < 0 && c != -1<<31) {
+ break
+ }
+ v.reset(OpNeg32)
+ v0 := b.NewValue0(v.Pos, OpDiv32, t)
+ v1 := b.NewValue0(v.Pos, OpConst32, t)
+ v1.AuxInt = int32ToAuxInt(-c)
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div32 <t> x (Const32 [-1<<31]))
+ // result: (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <typ.UInt64> [31]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != -1<<31 {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v1.AddArg(x)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(31)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div32 <t> n (Const32 [c]))
+ // cond: isPowerOfTwo32(c)
+ // result: (Rsh32x64 (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [int64(32-log32(c))]))) (Const64 <typ.UInt64> [int64(log32(c))]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpRsh32x64)
+ v0 := b.NewValue0(v.Pos, OpAdd32, t)
+ v1 := b.NewValue0(v.Pos, OpRsh32Ux64, t)
+ v2 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(31)
+ v2.AddArg2(n, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(32 - log32(c)))
+ v1.AddArg2(v2, v4)
+ v0.AddArg2(n, v1)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(int64(log32(c)))
+ v.AddArg2(v0, v5)
+ return true
+ }
+ // match: (Div32 <t> x (Const32 [c]))
+ // cond: smagicOK32(c) && config.RegSize == 8
+ // result: (Sub32 <t> (Rsh64x64 <t> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(smagic32(c).m)]) (SignExt32to64 x)) (Const64 <typ.UInt64> [32+smagic32(c).s])) (Rsh64x64 <t> (SignExt32to64 x) (Const64 <typ.UInt64> [63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(smagicOK32(c) && config.RegSize == 8) {
+ break
+ }
+ v.reset(OpSub32)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(smagic32(c).m))
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(32 + smagic32(c).s)
+ v0.AddArg2(v1, v4)
+ v5 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(63)
+ v5.AddArg2(v3, v6)
+ v.AddArg2(v0, v5)
+ return true
+ }
+ // match: (Div32 <t> x (Const32 [c]))
+ // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul
+ // result: (Sub32 <t> (Rsh32x64 <t> (Hmul32 <t> (Const32 <typ.UInt32> [int32(smagic32(c).m/2)]) x) (Const64 <typ.UInt64> [smagic32(c).s-1])) (Rsh32x64 <t> x (Const64 <typ.UInt64> [31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpSub32)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpHmul32, t)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(smagic32(c).m / 2))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(smagic32(c).s - 1)
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(31)
+ v4.AddArg2(x, v5)
+ v.AddArg2(v0, v4)
+ return true
+ }
+ // match: (Div32 <t> x (Const32 [c]))
+ // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul
+ // result: (Sub32 <t> (Rsh32x64 <t> (Add32 <t> (Hmul32 <t> (Const32 <typ.UInt32> [int32(smagic32(c).m)]) x) x) (Const64 <typ.UInt64> [smagic32(c).s])) (Rsh32x64 <t> x (Const64 <typ.UInt64> [31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpSub32)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpAdd32, t)
+ v2 := b.NewValue0(v.Pos, OpHmul32, t)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(smagic32(c).m))
+ v2.AddArg2(v3, x)
+ v1.AddArg2(v2, x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(smagic32(c).s)
+ v0.AddArg2(v1, v4)
+ v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(31)
+ v5.AddArg2(x, v6)
+ v.AddArg2(v0, v5)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Div32F (Const32F [c]) (Const32F [d]))
+ // cond: c/d == c/d
+ // result: (Const32F [c/d])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ break
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ if !(c/d == c/d) {
+ break
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div32F x (Const32F <t> [c]))
+ // cond: reciprocalExact32(c)
+ // result: (Mul32F x (Const32F <t> [1/c]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32F {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToFloat32(v_1.AuxInt)
+ if !(reciprocalExact32(c)) {
+ break
+ }
+ v.reset(OpMul32F)
+ v0 := b.NewValue0(v.Pos, OpConst32F, t)
+ v0.AuxInt = float32ToAuxInt(1 / c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Div32u (Const32 [c]) (Const32 [d]))
+ // cond: d != 0
+ // result: (Const32 [int32(uint32(c)/uint32(d))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) / uint32(d)))
+ return true
+ }
+ // match: (Div32u n (Const32 [c]))
+ // cond: isPowerOfTwo32(c)
+ // result: (Rsh32Ux64 n (Const64 <typ.UInt64> [log32(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log32(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul
+ // result: (Rsh32Ux64 <typ.UInt32> (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<31+umagic32(c).m/2)]) x) (Const64 <typ.UInt64> [umagic32(c).s-1]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(int32(1<<31 + umagic32(c).m/2))
+ v0.AddArg2(v1, x)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(umagic32(c).s - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul
+ // result: (Rsh32Ux64 <typ.UInt32> (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<31+(umagic32(c).m+1)/2)]) (Rsh32Ux64 <typ.UInt32> x (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [umagic32(c).s-2]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(int32(1<<31 + (umagic32(c).m+1)/2))
+ v2 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(1)
+ v2.AddArg2(x, v3)
+ v0.AddArg2(v1, v2)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(umagic32(c).s - 2)
+ v.AddArg2(v0, v4)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul
+ // result: (Rsh32Ux64 <typ.UInt32> (Avg32u x (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int32(umagic32(c).m)]) x)) (Const64 <typ.UInt64> [umagic32(c).s-1]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(umagic32(c).m))
+ v1.AddArg2(v2, x)
+ v0.AddArg2(x, v1)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(umagic32(c).s - 1)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0
+ // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<31+umagic32(c).m/2)]) (ZeroExt32to64 x)) (Const64 <typ.UInt64> [32+umagic32(c).s-1])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0) {
+ break
+ }
+ v.reset(OpTrunc64to32)
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(1<<31 + umagic32(c).m/2))
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 1)
+ v0.AddArg2(v1, v4)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 8 && c&1 == 0
+ // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<31+(umagic32(c).m+1)/2)]) (Rsh64Ux64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [32+umagic32(c).s-2])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 8 && c&1 == 0) {
+ break
+ }
+ v.reset(OpTrunc64to32)
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(1<<31 + (umagic32(c).m+1)/2))
+ v3 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(1)
+ v3.AddArg2(v4, v5)
+ v1.AddArg2(v2, v3)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 2)
+ v0.AddArg2(v1, v6)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 8 && config.useAvg
+ // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Avg64u (Lsh64x64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [32])) (Mul64 <typ.UInt64> (Const64 <typ.UInt32> [int64(umagic32(c).m)]) (ZeroExt32to64 x))) (Const64 <typ.UInt64> [32+umagic32(c).s-1])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 8 && config.useAvg) {
+ break
+ }
+ v.reset(OpTrunc64to32)
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpLsh64x64, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(v3, v4)
+ v5 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt32)
+ v6.AuxInt = int64ToAuxInt(int64(umagic32(c).m))
+ v5.AddArg2(v6, v3)
+ v1.AddArg2(v2, v5)
+ v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v7.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 1)
+ v0.AddArg2(v1, v7)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Div64 (Const64 [c]) (Const64 [d]))
+ // cond: d != 0
+ // result: (Const64 [c/d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div64 n (Const64 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo64(c)
+ // result: (Rsh64Ux64 n (Const64 <typ.UInt64> [log64(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div64 n (Const64 [-1<<63]))
+ // cond: isNonNegative(n)
+ // result: (Const64 [0])
+ for {
+ n := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 || !(isNonNegative(n)) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Div64 <t> n (Const64 [c]))
+ // cond: c < 0 && c != -1<<63
+ // result: (Neg64 (Div64 <t> n (Const64 <t> [-c])))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c < 0 && c != -1<<63) {
+ break
+ }
+ v.reset(OpNeg64)
+ v0 := b.NewValue0(v.Pos, OpDiv64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(-c)
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div64 <t> x (Const64 [-1<<63]))
+ // result: (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <typ.UInt64> [63]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v1.AddArg(x)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(63)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div64 <t> n (Const64 [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (Rsh64x64 (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [int64(64-log64(c))]))) (Const64 <typ.UInt64> [int64(log64(c))]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpRsh64x64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, t)
+ v1 := b.NewValue0(v.Pos, OpRsh64Ux64, t)
+ v2 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(n, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(64 - log64(c)))
+ v1.AddArg2(v2, v4)
+ v0.AddArg2(n, v1)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(int64(log64(c)))
+ v.AddArg2(v0, v5)
+ return true
+ }
+ // match: (Div64 <t> x (Const64 [c]))
+ // cond: smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul
+ // result: (Sub64 <t> (Rsh64x64 <t> (Hmul64 <t> (Const64 <typ.UInt64> [int64(smagic64(c).m/2)]) x) (Const64 <typ.UInt64> [smagic64(c).s-1])) (Rsh64x64 <t> x (Const64 <typ.UInt64> [63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpSub64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpHmul64, t)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(smagic64(c).m / 2))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(smagic64(c).s - 1)
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v4.AddArg2(x, v5)
+ v.AddArg2(v0, v4)
+ return true
+ }
+ // match: (Div64 <t> x (Const64 [c]))
+ // cond: smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul
+ // result: (Sub64 <t> (Rsh64x64 <t> (Add64 <t> (Hmul64 <t> (Const64 <typ.UInt64> [int64(smagic64(c).m)]) x) x) (Const64 <typ.UInt64> [smagic64(c).s])) (Rsh64x64 <t> x (Const64 <typ.UInt64> [63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpSub64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpAdd64, t)
+ v2 := b.NewValue0(v.Pos, OpHmul64, t)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(int64(smagic64(c).m))
+ v2.AddArg2(v3, x)
+ v1.AddArg2(v2, x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(smagic64(c).s)
+ v0.AddArg2(v1, v4)
+ v5 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(63)
+ v5.AddArg2(x, v6)
+ v.AddArg2(v0, v5)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Div64F (Const64F [c]) (Const64F [d]))
+ // cond: c/d == c/d
+ // result: (Const64F [c/d])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ break
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ if !(c/d == c/d) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div64F x (Const64F <t> [c]))
+ // cond: reciprocalExact64(c)
+ // result: (Mul64F x (Const64F <t> [1/c]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64F {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToFloat64(v_1.AuxInt)
+ if !(reciprocalExact64(c)) {
+ break
+ }
+ v.reset(OpMul64F)
+ v0 := b.NewValue0(v.Pos, OpConst64F, t)
+ v0.AuxInt = float64ToAuxInt(1 / c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Div64u (Const64 [c]) (Const64 [d]))
+ // cond: d != 0
+ // result: (Const64 [int64(uint64(c)/uint64(d))])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d)))
+ return true
+ }
+ // match: (Div64u n (Const64 [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (Rsh64Ux64 n (Const64 <typ.UInt64> [log64(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div64u n (Const64 [-1<<63]))
+ // result: (Rsh64Ux64 n (Const64 <typ.UInt64> [63]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(63)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div64u x (Const64 [c]))
+ // cond: c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul
+ // result: (Add64 (Add64 <typ.UInt64> (Add64 <typ.UInt64> (Lsh64x64 <typ.UInt64> (ZeroExt32to64 (Div32u <typ.UInt32> (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32]))) (Const32 <typ.UInt32> [int32(c)]))) (Const64 <typ.UInt64> [32])) (ZeroExt32to64 (Div32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)])))) (Mul64 <typ.UInt64> (ZeroExt32to64 <typ.UInt64> (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32]))) (Const32 <typ.UInt32> [int32(c)]))) (Const64 <typ.UInt64> [int64((1<<32)/c)]))) (ZeroExt32to64 (Div32u <typ.UInt32> (Add32 <typ.UInt32> (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)])) (Mul32 <typ.UInt32> (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32]))) (Const32 <typ.UInt32> [int32(c)])) (Const32 <typ.UInt32> [int32((1<<32)%c)]))) (Const32 <typ.UInt32> [int32(c)]))))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul) {
+ break
+ }
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpLsh64x64, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v7.AuxInt = int64ToAuxInt(32)
+ v6.AddArg2(x, v7)
+ v5.AddArg(v6)
+ v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v8.AuxInt = int32ToAuxInt(int32(c))
+ v4.AddArg2(v5, v8)
+ v3.AddArg(v4)
+ v2.AddArg2(v3, v7)
+ v9 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v10 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32)
+ v11 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32)
+ v11.AddArg(x)
+ v10.AddArg2(v11, v8)
+ v9.AddArg(v10)
+ v1.AddArg2(v2, v9)
+ v12 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v13 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v14 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32)
+ v14.AddArg2(v5, v8)
+ v13.AddArg(v14)
+ v15 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v15.AuxInt = int64ToAuxInt(int64((1 << 32) / c))
+ v12.AddArg2(v13, v15)
+ v0.AddArg2(v1, v12)
+ v16 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v17 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32)
+ v18 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v19 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32)
+ v19.AddArg2(v11, v8)
+ v20 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v21 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v21.AuxInt = int32ToAuxInt(int32((1 << 32) % c))
+ v20.AddArg2(v14, v21)
+ v18.AddArg2(v19, v20)
+ v17.AddArg2(v18, v8)
+ v16.AddArg(v17)
+ v.AddArg2(v0, v16)
+ return true
+ }
+ // match: (Div64u x (Const64 [c]))
+ // cond: umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul
+ // result: (Rsh64Ux64 <typ.UInt64> (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<63+umagic64(c).m/2)]) x) (Const64 <typ.UInt64> [umagic64(c).s-1]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(int64(1<<63 + umagic64(c).m/2))
+ v0.AddArg2(v1, x)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(umagic64(c).s - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div64u x (Const64 [c]))
+ // cond: umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul
+ // result: (Rsh64Ux64 <typ.UInt64> (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<63+(umagic64(c).m+1)/2)]) (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [umagic64(c).s-2]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(int64(1<<63 + (umagic64(c).m+1)/2))
+ v2 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(1)
+ v2.AddArg2(x, v3)
+ v0.AddArg2(v1, v2)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(umagic64(c).s - 2)
+ v.AddArg2(v0, v4)
+ return true
+ }
+ // match: (Div64u x (Const64 [c]))
+ // cond: umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul
+ // result: (Rsh64Ux64 <typ.UInt64> (Avg64u x (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(umagic64(c).m)]) x)) (Const64 <typ.UInt64> [umagic64(c).s-1]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(umagic64(c).m))
+ v1.AddArg2(v2, x)
+ v0.AddArg2(x, v1)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(umagic64(c).s - 1)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 (Const8 [c]) (Const8 [d]))
+ // cond: d != 0
+ // result: (Const8 [c/d])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div8 n (Const8 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo8(c)
+ // result: (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo8(c)) {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log8(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div8 <t> n (Const8 [c]))
+ // cond: c < 0 && c != -1<<7
+ // result: (Neg8 (Div8 <t> n (Const8 <t> [-c])))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(c < 0 && c != -1<<7) {
+ break
+ }
+ v.reset(OpNeg8)
+ v0 := b.NewValue0(v.Pos, OpDiv8, t)
+ v1 := b.NewValue0(v.Pos, OpConst8, t)
+ v1.AuxInt = int8ToAuxInt(-c)
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div8 <t> x (Const8 [-1<<7 ]))
+ // result: (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <typ.UInt64> [7 ]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != -1<<7 {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v1.AddArg(x)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(7)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div8 <t> n (Const8 [c]))
+ // cond: isPowerOfTwo8(c)
+ // result: (Rsh8x64 (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [int64( 8-log8(c))]))) (Const64 <typ.UInt64> [int64(log8(c))]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isPowerOfTwo8(c)) {
+ break
+ }
+ v.reset(OpRsh8x64)
+ v0 := b.NewValue0(v.Pos, OpAdd8, t)
+ v1 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v2 := b.NewValue0(v.Pos, OpRsh8x64, t)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(7)
+ v2.AddArg2(n, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(8 - log8(c)))
+ v1.AddArg2(v2, v4)
+ v0.AddArg2(n, v1)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(int64(log8(c)))
+ v.AddArg2(v0, v5)
+ return true
+ }
+ // match: (Div8 <t> x (Const8 [c]))
+ // cond: smagicOK8(c)
+ // result: (Sub8 <t> (Rsh32x64 <t> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(smagic8(c).m)]) (SignExt8to32 x)) (Const64 <typ.UInt64> [8+smagic8(c).s])) (Rsh32x64 <t> (SignExt8to32 x) (Const64 <typ.UInt64> [31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(smagicOK8(c)) {
+ break
+ }
+ v.reset(OpSub8)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(smagic8(c).m))
+ v3 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(8 + smagic8(c).s)
+ v0.AddArg2(v1, v4)
+ v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(31)
+ v5.AddArg2(v3, v6)
+ v.AddArg2(v0, v5)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u (Const8 [c]) (Const8 [d]))
+ // cond: d != 0
+ // result: (Const8 [int8(uint8(c)/uint8(d))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(uint8(c) / uint8(d)))
+ return true
+ }
+ // match: (Div8u n (Const8 [c]))
+ // cond: isPowerOfTwo8(c)
+ // result: (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isPowerOfTwo8(c)) {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log8(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div8u x (Const8 [c]))
+ // cond: umagicOK8(c)
+ // result: (Trunc32to8 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<8+umagic8(c).m)]) (ZeroExt8to32 x)) (Const64 <typ.UInt64> [8+umagic8(c).s])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(umagicOK8(c)) {
+ break
+ }
+ v.reset(OpTrunc32to8)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(1<<8 + umagic8(c).m))
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(8 + umagic8(c).s)
+ v0.AddArg2(v1, v4)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (Eq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x))
+ // result: (Eq16 (Const16 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpAdd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpEq16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq16 (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (Eq16 (Mod16u x (Const16 [c])) (Const16 [0]))
+ // cond: x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config)
+ // result: (Eq32 (Mod32u <typ.UInt32> (ZeroExt16to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint16(c))])) (Const32 <typ.UInt32> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMod16u {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 || !(x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config)) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(uint16(c)))
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ break
+ }
+ // match: (Eq16 (Mod16 x (Const16 [c])) (Const16 [0]))
+ // cond: x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config)
+ // result: (Eq32 (Mod32 <typ.Int32> (SignExt16to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMod16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 || !(x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config)) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpMod32, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v2.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ break
+ }
+ // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc64to16 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt16to64 x)) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s && x.Op != OpConst16 && udivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc64to16 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt16to64 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s && x.Op != OpConst16 && udivisibleOK16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
+ v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x)) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc32to16 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
+ v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2 && x.Op != OpConst16 && udivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc32to16 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = mul_1.Args[1]
+ mul_1_0 := mul_1.Args[0]
+ if mul_1_0.Op != OpZeroExt16to32 || x != mul_1_0.Args[0] {
+ continue
+ }
+ mul_1_1 := mul_1.Args[1]
+ if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2 && x.Op != OpConst16 && udivisibleOK16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
+ v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x))) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc32to16 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAvg32u {
+ continue
+ }
+ _ = v_1_1_0_0.Args[1]
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpLsh32x64 {
+ continue
+ }
+ _ = v_1_1_0_0_0.Args[1]
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpZeroExt16to32 || x != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1]
+ if v_1_1_0_0_0_1.Op != OpConst64 || auxIntToInt64(v_1_1_0_0_0_1.AuxInt) != 16 {
+ continue
+ }
+ mul := v_1_1_0_0.Args[1]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
+ v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq16 x (Mul16 (Const16 [c]) (Sub16 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt16to32 x)) (Const64 [s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic16(c).m) && s == 16+smagic16(c).s && x.Op != OpConst16 && sdivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Add16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(sdivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(sdivisible16(c).a)]) ) (Const16 <typ.UInt16> [int16(16-sdivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(sdivisible16(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub16 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpSignExt16to32 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ v_1_1_1_0 := v_1_1_1.Args[0]
+ if v_1_1_1_0.Op != OpSignExt16to32 || x != v_1_1_1_0.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic16(c).m) && s == 16+smagic16(c).s && x.Op != OpConst16 && sdivisibleOK16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAdd16, typ.UInt16)
+ v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
+ v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v3.AuxInt = int16ToAuxInt(int16(sdivisible16(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v4.AuxInt = int16ToAuxInt(int16(sdivisible16(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v5.AuxInt = int16ToAuxInt(int16(16 - sdivisible16(c).k))
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v6.AuxInt = int16ToAuxInt(int16(sdivisible16(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq16 n (Lsh16x64 (Rsh16x64 (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 15 && kbar == 16 - k
+ // result: (Eq16 (And16 <t> n (Const16 <t> [1<<uint(k)-1])) (Const16 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh16x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh16x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd16 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh16Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh16x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 15 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 15 && kbar == 16-k) {
+ continue
+ }
+ v.reset(OpEq16)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v1 := b.NewValue0(v.Pos, OpConst16, t)
+ v1.AuxInt = int16ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq16 s:(Sub16 x y) (Const16 [0]))
+ // cond: s.Uses == 1
+ // result: (Eq16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub16 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpEq16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Eq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [y]))
+ // cond: oneBit16(y)
+ // result: (Neq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd16 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst16 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 || v_1.Type != t || auxIntToInt16(v_1.AuxInt) != y || !(oneBit16(y)) {
+ continue
+ }
+ v.reset(OpNeq16)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v1 := b.NewValue0(v.Pos, OpConst16, t)
+ v1.AuxInt = int16ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (Eq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x))
+ // result: (Eq32 (Const32 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq32 (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) x) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ mul := v_1_1.Args[0]
+ if mul.Op != OpHmul32u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 <typ.UInt32> [m]) (Rsh32Ux64 x (Const64 [1]))) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ mul := v_1_1.Args[0]
+ if mul.Op != OpHmul32u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 || mul_0.Type != typ.UInt32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = mul_1.Args[1]
+ if x != mul_1.Args[0] {
+ continue
+ }
+ mul_1_1 := mul_1.Args[1]
+ if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 (Avg32u x mul:(Hmul32u (Const32 [m]) x)) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic32(c).m) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAvg32u {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ if x != v_1_1_0.Args[0] {
+ continue
+ }
+ mul := v_1_1_0.Args[1]
+ if mul.Op != OpHmul32u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic32(c).m) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x)) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc64to32 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc64to32 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = mul_1.Args[1]
+ mul_1_0 := mul_1.Args[0]
+ if mul_1_0.Op != OpZeroExt32to64 || x != mul_1_0.Args[0] {
+ continue
+ }
+ mul_1_1 := mul_1.Args[1]
+ if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x))) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc64to32 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAvg64u {
+ continue
+ }
+ _ = v_1_1_0_0.Args[1]
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpLsh64x64 {
+ continue
+ }
+ _ = v_1_1_0_0_0.Args[1]
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpZeroExt32to64 || x != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1]
+ if v_1_1_0_0_0_1.Op != OpConst64 || auxIntToInt64(v_1_1_0_0_0_1.AuxInt) != 32 {
+ continue
+ }
+ mul := v_1_1_0_0.Args[1]
+ if mul.Op != OpMul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh64x64 mul:(Mul64 (Const64 [m]) (SignExt32to64 x)) (Const64 [s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic32(c).m) && s == 32+smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(sdivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(sdivisible32(c).a)]) ) (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(sdivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub32 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpSignExt32to64 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ v_1_1_1_0 := v_1_1_1.Args[0]
+ if v_1_1_1_0.Op != OpSignExt32to64 || x != v_1_1_1_0.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic32(c).m) && s == 32+smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k))
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 mul:(Hmul32 (Const32 [m]) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1 && x.Op != OpConst32 && sdivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(sdivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(sdivisible32(c).a)]) ) (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(sdivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub32 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpHmul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ if x != v_1_1_1.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1 && x.Op != OpConst32 && sdivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k))
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 (Add32 mul:(Hmul32 (Const32 [m]) x) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m) && s == smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(sdivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(sdivisible32(c).a)]) ) (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(sdivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub32 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1_1_0_0.Args[1]
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ v_1_1_0_0_1 := v_1_1_0_0.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_1_0_0_0, v_1_1_0_0_1 = _i2+1, v_1_1_0_0_1, v_1_1_0_0_0 {
+ mul := v_1_1_0_0_0
+ if mul.Op != OpHmul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i3 := 0; _i3 <= 1; _i3, mul_0, mul_1 = _i3+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if x != mul_1 || x != v_1_1_0_0_1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ if x != v_1_1_1.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m) && s == smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k))
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 n (Lsh32x64 (Rsh32x64 (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 31 && kbar == 32 - k
+ // result: (Eq32 (And32 <t> n (Const32 <t> [1<<uint(k)-1])) (Const32 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh32x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd32 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh32Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh32x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 31 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 31 && kbar == 32-k) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v1 := b.NewValue0(v.Pos, OpConst32, t)
+ v1.AuxInt = int32ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq32 s:(Sub32 x y) (Const32 [0]))
+ // cond: s.Uses == 1
+ // result: (Eq32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub32 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpEq32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Eq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [y]))
+ // cond: oneBit32(y)
+ // result: (Neq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd32 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst32 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt32(v_0_1.AuxInt)
+ if v_1.Op != OpConst32 || v_1.Type != t || auxIntToInt32(v_1.AuxInt) != y || !(oneBit32(y)) {
+ continue
+ }
+ v.reset(OpNeq32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v1 := b.NewValue0(v.Pos, OpConst32, t)
+ v1.AuxInt = int32ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Eq32F (Const32F [c]) (Const32F [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32F {
+ continue
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ continue
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq64 x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (Eq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x))
+ // result: (Eq64 (Const64 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpEq64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq64 (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) x) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible64(c).m)]) x) (Const64 <typ.UInt64> [64-udivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(udivisible64(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ mul := v_1_1.Args[0]
+ if mul.Op != OpHmul64u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k)
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) (Rsh64Ux64 x (Const64 [1]))) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2 && x.Op != OpConst64 && udivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible64(c).m)]) x) (Const64 <typ.UInt64> [64-udivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(udivisible64(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ mul := v_1_1.Args[0]
+ if mul.Op != OpHmul64u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = mul_1.Args[1]
+ if x != mul_1.Args[0] {
+ continue
+ }
+ mul_1_1 := mul_1.Args[1]
+ if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2 && x.Op != OpConst64 && udivisibleOK64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k)
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 (Avg64u x mul:(Hmul64u (Const64 [m]) x)) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic64(c).m) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible64(c).m)]) x) (Const64 <typ.UInt64> [64-udivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(udivisible64(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAvg64u {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ if x != v_1_1_0.Args[0] {
+ continue
+ }
+ mul := v_1_1_0.Args[1]
+ if mul.Op != OpHmul64u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic64(c).m) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k)
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 mul:(Hmul64 (Const64 [m]) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1 && x.Op != OpConst64 && sdivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Add64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(sdivisible64(c).m)]) x) (Const64 <typ.UInt64> [int64(sdivisible64(c).a)]) ) (Const64 <typ.UInt64> [64-sdivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(sdivisible64(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpHmul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ if x != v_1_1_1.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1 && x.Op != OpConst64 && sdivisibleOK64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(int64(sdivisible64(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(sdivisible64(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(64 - sdivisible64(c).k)
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(int64(sdivisible64(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 (Add64 mul:(Hmul64 (Const64 [m]) x) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m) && s == smagic64(c).s && x.Op != OpConst64 && sdivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Add64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(sdivisible64(c).m)]) x) (Const64 <typ.UInt64> [int64(sdivisible64(c).a)]) ) (Const64 <typ.UInt64> [64-sdivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(sdivisible64(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1_1_0_0.Args[1]
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ v_1_1_0_0_1 := v_1_1_0_0.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_1_0_0_0, v_1_1_0_0_1 = _i2+1, v_1_1_0_0_1, v_1_1_0_0_0 {
+ mul := v_1_1_0_0_0
+ if mul.Op != OpHmul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i3 := 0; _i3 <= 1; _i3, mul_0, mul_1 = _i3+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if x != mul_1 || x != v_1_1_0_0_1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ if x != v_1_1_1.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m) && s == smagic64(c).s && x.Op != OpConst64 && sdivisibleOK64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(int64(sdivisible64(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(sdivisible64(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(64 - sdivisible64(c).k)
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(int64(sdivisible64(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq64 n (Lsh64x64 (Rsh64x64 (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 63 && kbar == 64 - k
+ // result: (Eq64 (And64 <t> n (Const64 <t> [1<<uint(k)-1])) (Const64 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh64x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd64 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh64Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh64x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 63 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 63 && kbar == 64-k) {
+ continue
+ }
+ v.reset(OpEq64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq64 s:(Sub64 x y) (Const64 [0]))
+ // cond: s.Uses == 1
+ // result: (Eq64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub64 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpEq64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Eq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [y]))
+ // cond: oneBit64(y)
+ // result: (Neq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd64 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst64 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 || v_1.Type != t || auxIntToInt64(v_1.AuxInt) != y || !(oneBit64(y)) {
+ continue
+ }
+ v.reset(OpNeq64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Eq64F (Const64F [c]) (Const64F [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64F {
+ continue
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ continue
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (Eq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x))
+ // result: (Eq8 (Const8 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpAdd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpEq8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq8 (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (Eq8 (Mod8u x (Const8 [c])) (Const8 [0]))
+ // cond: x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config)
+ // result: (Eq32 (Mod32u <typ.UInt32> (ZeroExt8to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint8(c))])) (Const32 <typ.UInt32> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMod8u {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 || !(x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config)) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(uint8(c)))
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ break
+ }
+ // match: (Eq8 (Mod8 x (Const8 [c])) (Const8 [0]))
+ // cond: x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config)
+ // result: (Eq32 (Mod32 <typ.Int32> (SignExt8to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMod8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 || !(x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config)) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpMod32, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v2.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ break
+ }
+ // match: (Eq8 x (Mul8 (Const8 [c]) (Trunc32to8 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt8to32 x)) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s && x.Op != OpConst8 && udivisibleOK8(c)
+ // result: (Leq8U (RotateLeft8 <typ.UInt8> (Mul8 <typ.UInt8> (Const8 <typ.UInt8> [int8(udivisible8(c).m)]) x) (Const8 <typ.UInt8> [int8(8-udivisible8(c).k)]) ) (Const8 <typ.UInt8> [int8(udivisible8(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc32to8 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt8to32 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s && x.Op != OpConst8 && udivisibleOK8(c)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8)
+ v1 := b.NewValue0(v.Pos, OpMul8, typ.UInt8)
+ v2 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v2.AuxInt = int8ToAuxInt(int8(udivisible8(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v3.AuxInt = int8ToAuxInt(int8(8 - udivisible8(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v4.AuxInt = int8ToAuxInt(int8(udivisible8(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq8 x (Mul8 (Const8 [c]) (Sub8 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt8to32 x)) (Const64 [s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic8(c).m) && s == 8+smagic8(c).s && x.Op != OpConst8 && sdivisibleOK8(c)
+ // result: (Leq8U (RotateLeft8 <typ.UInt8> (Add8 <typ.UInt8> (Mul8 <typ.UInt8> (Const8 <typ.UInt8> [int8(sdivisible8(c).m)]) x) (Const8 <typ.UInt8> [int8(sdivisible8(c).a)]) ) (Const8 <typ.UInt8> [int8(8-sdivisible8(c).k)]) ) (Const8 <typ.UInt8> [int8(sdivisible8(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub8 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpSignExt8to32 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ v_1_1_1_0 := v_1_1_1.Args[0]
+ if v_1_1_1_0.Op != OpSignExt8to32 || x != v_1_1_1_0.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic8(c).m) && s == 8+smagic8(c).s && x.Op != OpConst8 && sdivisibleOK8(c)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8)
+ v1 := b.NewValue0(v.Pos, OpAdd8, typ.UInt8)
+ v2 := b.NewValue0(v.Pos, OpMul8, typ.UInt8)
+ v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v3.AuxInt = int8ToAuxInt(int8(sdivisible8(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v4.AuxInt = int8ToAuxInt(int8(sdivisible8(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v5.AuxInt = int8ToAuxInt(int8(8 - sdivisible8(c).k))
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v6.AuxInt = int8ToAuxInt(int8(sdivisible8(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq8 n (Lsh8x64 (Rsh8x64 (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 7 && kbar == 8 - k
+ // result: (Eq8 (And8 <t> n (Const8 <t> [1<<uint(k)-1])) (Const8 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh8x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh8x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd8 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh8Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh8x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 7 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 7 && kbar == 8-k) {
+ continue
+ }
+ v.reset(OpEq8)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v1 := b.NewValue0(v.Pos, OpConst8, t)
+ v1.AuxInt = int8ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq8 s:(Sub8 x y) (Const8 [0]))
+ // cond: s.Uses == 1
+ // result: (Eq8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub8 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpEq8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Eq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [y]))
+ // cond: oneBit8(y)
+ // result: (Neq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd8 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst8 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 || v_1.Type != t || auxIntToInt8(v_1.AuxInt) != y || !(oneBit8(y)) {
+ continue
+ }
+ v.reset(OpNeq8)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v1 := b.NewValue0(v.Pos, OpConst8, t)
+ v1.AuxInt = int8ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EqB (ConstBool [c]) (ConstBool [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool {
+ continue
+ }
+ c := auxIntToBool(v_0.AuxInt)
+ if v_1.Op != OpConstBool {
+ continue
+ }
+ d := auxIntToBool(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (EqB (ConstBool [false]) x)
+ // result: (Not x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != false {
+ continue
+ }
+ x := v_1
+ v.reset(OpNot)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (EqB (ConstBool [true]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != true {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEqInter(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqInter x y)
+ // result: (EqPtr (ITab x) (ITab y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpEqPtr)
+ v0 := b.NewValue0(v.Pos, OpITab, typ.Uintptr)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpITab, typ.Uintptr)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuegeneric_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqPtr x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (EqPtr (Addr {x} _) (Addr {y} _))
+ // result: (ConstBool [x == y])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddr {
+ continue
+ }
+ x := auxToSym(v_0.Aux)
+ if v_1.Op != OpAddr {
+ continue
+ }
+ y := auxToSym(v_1.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x == y)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _)))
+ // result: (ConstBool [x == y && o == 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddr {
+ continue
+ }
+ x := auxToSym(v_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ y := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x == y && o == 0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _)))
+ // result: (ConstBool [x == y && o1 == o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAddr {
+ continue
+ }
+ x := auxToSym(v_0_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ y := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x == y && o1 == o2)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _))
+ // result: (ConstBool [x == y])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr {
+ continue
+ }
+ x := auxToSym(v_0.Aux)
+ if v_1.Op != OpLocalAddr {
+ continue
+ }
+ y := auxToSym(v_1.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x == y)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _)))
+ // result: (ConstBool [x == y && o == 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr {
+ continue
+ }
+ x := auxToSym(v_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpLocalAddr {
+ continue
+ }
+ y := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x == y && o == 0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _)))
+ // result: (ConstBool [x == y && o1 == o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr {
+ continue
+ }
+ x := auxToSym(v_0_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpLocalAddr {
+ continue
+ }
+ y := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x == y && o1 == o2)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr [o1] p1) p2)
+ // cond: isSamePtr(p1, p2)
+ // result: (ConstBool [o1 == 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ p2 := v_1
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(o1 == 0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr [o1] p1) (OffPtr [o2] p2))
+ // cond: isSamePtr(p1, p2)
+ // result: (ConstBool [o1 == o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(o1 == o2)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (Convert (Addr {x} _) _) (Addr {y} _))
+ // result: (ConstBool [x==y])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConvert {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAddr {
+ continue
+ }
+ x := auxToSym(v_0_0.Aux)
+ if v_1.Op != OpAddr {
+ continue
+ }
+ y := auxToSym(v_1.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x == y)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (LocalAddr _ _) (Addr _))
+ // result: (ConstBool [false])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr || v_1.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr (LocalAddr _ _)) (Addr _))
+ // result: (ConstBool [false])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr || v_1.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (LocalAddr _ _) (OffPtr (Addr _)))
+ // result: (ConstBool [false])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr || v_1.Op != OpOffPtr {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _)))
+ // result: (ConstBool [false])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr || v_1.Op != OpOffPtr {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (AddPtr p1 o1) p2)
+ // cond: isSamePtr(p1, p2)
+ // result: (Not (IsNonNil o1))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddPtr {
+ continue
+ }
+ o1 := v_0.Args[1]
+ p1 := v_0.Args[0]
+ p2 := v_1
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool)
+ v0.AddArg(o1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (Const32 [0]) p)
+ // result: (Not (IsNonNil p))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ p := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (Const64 [0]) p)
+ // result: (Not (IsNonNil p))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ p := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (ConstNil) p)
+ // result: (Not (IsNonNil p))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstNil {
+ continue
+ }
+ p := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEqSlice(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqSlice x y)
+ // result: (EqPtr (SlicePtr x) (SlicePtr y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpEqPtr)
+ v0 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuegeneric_OpFloor(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Floor (Const64F [c]))
+ // result: (Const64F [math.Floor(c)])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(math.Floor(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpIMake(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IMake _typ (StructMake1 val))
+ // result: (IMake _typ val)
+ for {
+ _typ := v_0
+ if v_1.Op != OpStructMake1 {
+ break
+ }
+ val := v_1.Args[0]
+ v.reset(OpIMake)
+ v.AddArg2(_typ, val)
+ return true
+ }
+ // match: (IMake _typ (ArrayMake1 val))
+ // result: (IMake _typ val)
+ for {
+ _typ := v_0
+ if v_1.Op != OpArrayMake1 {
+ break
+ }
+ val := v_1.Args[0]
+ v.reset(OpIMake)
+ v.AddArg2(_typ, val)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpInterLECall(v *Value) bool {
+ // match: (InterLECall [argsize] {auxCall} (Addr {fn} (SB)) ___)
+ // result: devirtLECall(v, fn.(*obj.LSym))
+ for {
+ if len(v.Args) < 1 {
+ break
+ }
+ v_0 := v.Args[0]
+ if v_0.Op != OpAddr {
+ break
+ }
+ fn := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB {
+ break
+ }
+ v.copyOf(devirtLECall(v, fn.(*obj.LSym)))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IsInBounds (ZeroExt8to32 _) (Const32 [c]))
+ // cond: (1 << 8) <= c
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to32 || v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !((1 << 8) <= c) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt8to64 _) (Const64 [c]))
+ // cond: (1 << 8) <= c
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to64 || v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !((1 << 8) <= c) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt16to32 _) (Const32 [c]))
+ // cond: (1 << 16) <= c
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to32 || v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !((1 << 16) <= c) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt16to64 _) (Const64 [c]))
+ // cond: (1 << 16) <= c
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to64 || v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !((1 << 16) <= c) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (IsInBounds (And8 (Const8 [c]) _) (Const8 [d]))
+ // cond: 0 <= c && c < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if !(0 <= c && c < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt8to16 (And8 (Const8 [c]) _)) (Const16 [d]))
+ // cond: 0 <= c && int16(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to16 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd8 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(0 <= c && int16(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt8to32 (And8 (Const8 [c]) _)) (Const32 [d]))
+ // cond: 0 <= c && int32(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd8 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && int32(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt8to64 (And8 (Const8 [c]) _)) (Const64 [d]))
+ // cond: 0 <= c && int64(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd8 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 <= c && int64(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (And16 (Const16 [c]) _) (Const16 [d]))
+ // cond: 0 <= c && c < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd16 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(0 <= c && c < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt16to32 (And16 (Const16 [c]) _)) (Const32 [d]))
+ // cond: 0 <= c && int32(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd16 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && int32(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt16to64 (And16 (Const16 [c]) _)) (Const64 [d]))
+ // cond: 0 <= c && int64(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd16 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 <= c && int64(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (And32 (Const32 [c]) _) (Const32 [d]))
+ // cond: 0 <= c && c < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt32to64 (And32 (Const32 [c]) _)) (Const64 [d]))
+ // cond: 0 <= c && int64(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt32to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd32 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 <= c && int64(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (And64 (Const64 [c]) _) (Const64 [d]))
+ // cond: 0 <= c && c < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 <= c && c < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [0 <= c && c < d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(0 <= c && c < d)
+ return true
+ }
+ // match: (IsInBounds (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [0 <= c && c < d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(0 <= c && c < d)
+ return true
+ }
+ // match: (IsInBounds (Mod32u _ y) y)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpMod32u {
+ break
+ }
+ y := v_0.Args[1]
+ if y != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (Mod64u _ y) y)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpMod64u {
+ break
+ }
+ y := v_0.Args[1]
+ if y != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt8to64 (Rsh8Ux64 _ (Const64 [c]))) (Const64 [d]))
+ // cond: 0 < c && c < 8 && 1<<uint( 8-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 8 && 1<<uint(8-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt8to32 (Rsh8Ux64 _ (Const64 [c]))) (Const32 [d]))
+ // cond: 0 < c && c < 8 && 1<<uint( 8-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(0 < c && c < 8 && 1<<uint(8-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt8to16 (Rsh8Ux64 _ (Const64 [c]))) (Const16 [d]))
+ // cond: 0 < c && c < 8 && 1<<uint( 8-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to16 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(0 < c && c < 8 && 1<<uint(8-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (Rsh8Ux64 _ (Const64 [c])) (Const64 [d]))
+ // cond: 0 < c && c < 8 && 1<<uint( 8-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 8 && 1<<uint(8-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt16to64 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d]))
+ // cond: 0 < c && c < 16 && 1<<uint(16-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 16 && 1<<uint(16-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt16to32 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d]))
+ // cond: 0 < c && c < 16 && 1<<uint(16-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 16 && 1<<uint(16-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (Rsh16Ux64 _ (Const64 [c])) (Const64 [d]))
+ // cond: 0 < c && c < 16 && 1<<uint(16-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 16 && 1<<uint(16-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt32to64 (Rsh32Ux64 _ (Const64 [c]))) (Const64 [d]))
+ // cond: 0 < c && c < 32 && 1<<uint(32-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt32to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 32 && 1<<uint(32-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (Rsh32Ux64 _ (Const64 [c])) (Const64 [d]))
+ // cond: 0 < c && c < 32 && 1<<uint(32-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 32 && 1<<uint(32-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (Rsh64Ux64 _ (Const64 [c])) (Const64 [d]))
+ // cond: 0 < c && c < 64 && 1<<uint(64-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 64 && 1<<uint(64-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (IsNonNil (ConstNil))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpConstNil {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (IsNonNil (Const32 [c]))
+ // result: (ConstBool [c != 0])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != 0)
+ return true
+ }
+ // match: (IsNonNil (Const64 [c]))
+ // result: (ConstBool [c != 0])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != 0)
+ return true
+ }
+ // match: (IsNonNil (Addr _) )
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAddr {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsNonNil (Convert (Addr _) _))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConvert {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAddr {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsNonNil (LocalAddr _ _))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpLocalAddr {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IsSliceInBounds x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsSliceInBounds (And32 (Const32 [c]) _) (Const32 [d]))
+ // cond: 0 <= c && c <= d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c <= d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsSliceInBounds (And64 (Const64 [c]) _) (Const64 [d]))
+ // cond: 0 <= c && c <= d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 <= c && c <= d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsSliceInBounds (Const32 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsSliceInBounds (Const64 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsSliceInBounds (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [0 <= c && c <= d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(0 <= c && c <= d)
+ return true
+ }
+ // match: (IsSliceInBounds (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [0 <= c && c <= d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(0 <= c && c <= d)
+ return true
+ }
+ // match: (IsSliceInBounds (SliceLen x) (SliceCap x))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpSliceLen {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpSliceCap || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq16 (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ // match: (Leq16 (Const16 [0]) (And16 _ (Const16 [c])))
+ // cond: c >= 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 || v_1.Op != OpAnd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c >= 0) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (Leq16 (Const16 [0]) (Rsh16Ux64 _ (Const64 [c])))
+ // cond: c > 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 || v_1.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (Leq16 x (Const16 <t> [-1]))
+ // result: (Less16 x (Const16 <t> [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt16(v_1.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpLess16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Leq16 (Const16 <t> [1]) x)
+ // result: (Less16 (Const16 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt16(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpLess16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq16U (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [uint16(c) <= uint16(d)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint16(c) <= uint16(d))
+ return true
+ }
+ // match: (Leq16U (Const16 <t> [1]) x)
+ // result: (Neq16 (Const16 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt16(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Leq16U (Const16 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32 (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ // match: (Leq32 (Const32 [0]) (And32 _ (Const32 [c])))
+ // cond: c >= 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 || v_1.Op != OpAnd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c >= 0) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (Leq32 (Const32 [0]) (Rsh32Ux64 _ (Const64 [c])))
+ // cond: c > 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 || v_1.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (Leq32 x (Const32 <t> [-1]))
+ // result: (Less32 x (Const32 <t> [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt32(v_1.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpLess32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Leq32 (Const32 <t> [1]) x)
+ // result: (Less32 (Const32 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpLess32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq32F (Const32F [c]) (Const32F [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ break
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32U (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [uint32(c) <= uint32(d)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint32(c) <= uint32(d))
+ return true
+ }
+ // match: (Leq32U (Const32 <t> [1]) x)
+ // result: (Neq32 (Const32 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Leq32U (Const32 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64 (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ // match: (Leq64 (Const64 [0]) (And64 _ (Const64 [c])))
+ // cond: c >= 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 || v_1.Op != OpAnd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= 0) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (Leq64 (Const64 [0]) (Rsh64Ux64 _ (Const64 [c])))
+ // cond: c > 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 || v_1.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (Leq64 x (Const64 <t> [-1]))
+ // result: (Less64 x (Const64 <t> [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpLess64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Leq64 (Const64 <t> [1]) x)
+ // result: (Less64 (Const64 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpLess64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq64F (Const64F [c]) (Const64F [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ break
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64U (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [uint64(c) <= uint64(d)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint64(c) <= uint64(d))
+ return true
+ }
+ // match: (Leq64U (Const64 <t> [1]) x)
+ // result: (Neq64 (Const64 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Leq64U (Const64 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq8 (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ // match: (Leq8 (Const8 [0]) (And8 _ (Const8 [c])))
+ // cond: c >= 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 || v_1.Op != OpAnd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c >= 0) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (Leq8 (Const8 [0]) (Rsh8Ux64 _ (Const64 [c])))
+ // cond: c > 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 || v_1.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (Leq8 x (Const8 <t> [-1]))
+ // result: (Less8 x (Const8 <t> [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt8(v_1.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpLess8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Leq8 (Const8 <t> [1]) x)
+ // result: (Less8 (Const8 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpLess8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq8U (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [ uint8(c) <= uint8(d)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint8(c) <= uint8(d))
+ return true
+ }
+ // match: (Leq8U (Const8 <t> [1]) x)
+ // result: (Neq8 (Const8 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Leq8U (Const8 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less16 (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ // match: (Less16 (Const16 <t> [0]) x)
+ // cond: isNonNegative(x)
+ // result: (Neq16 (Const16 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ if !(isNonNegative(x)) {
+ break
+ }
+ v.reset(OpNeq16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Less16 x (Const16 <t> [1]))
+ // cond: isNonNegative(x)
+ // result: (Eq16 (Const16 <t> [0]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt16(v_1.AuxInt) != 1 || !(isNonNegative(x)) {
+ break
+ }
+ v.reset(OpEq16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Less16 x (Const16 <t> [1]))
+ // result: (Leq16 x (Const16 <t> [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt16(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpLeq16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Less16 (Const16 <t> [-1]) x)
+ // result: (Leq16 (Const16 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt16(v_0.AuxInt) != -1 {
+ break
+ }
+ x := v_1
+ v.reset(OpLeq16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less16U (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [uint16(c) < uint16(d)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint16(c) < uint16(d))
+ return true
+ }
+ // match: (Less16U x (Const16 <t> [1]))
+ // result: (Eq16 (Const16 <t> [0]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt16(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpEq16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Less16U _ (Const16 [0]))
+ // result: (ConstBool [false])
+ for {
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ // match: (Less32 (Const32 <t> [0]) x)
+ // cond: isNonNegative(x)
+ // result: (Neq32 (Const32 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ if !(isNonNegative(x)) {
+ break
+ }
+ v.reset(OpNeq32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Less32 x (Const32 <t> [1]))
+ // cond: isNonNegative(x)
+ // result: (Eq32 (Const32 <t> [0]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt32(v_1.AuxInt) != 1 || !(isNonNegative(x)) {
+ break
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Less32 x (Const32 <t> [1]))
+ // result: (Leq32 x (Const32 <t> [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpLeq32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Less32 (Const32 <t> [-1]) x)
+ // result: (Leq32 (Const32 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt32(v_0.AuxInt) != -1 {
+ break
+ }
+ x := v_1
+ v.reset(OpLeq32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less32F (Const32F [c]) (Const32F [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ break
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32U (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [uint32(c) < uint32(d)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint32(c) < uint32(d))
+ return true
+ }
+ // match: (Less32U x (Const32 <t> [1]))
+ // result: (Eq32 (Const32 <t> [0]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Less32U _ (Const32 [0]))
+ // result: (ConstBool [false])
+ for {
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64 (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ // match: (Less64 (Const64 <t> [0]) x)
+ // cond: isNonNegative(x)
+ // result: (Neq64 (Const64 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ if !(isNonNegative(x)) {
+ break
+ }
+ v.reset(OpNeq64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Less64 x (Const64 <t> [1]))
+ // cond: isNonNegative(x)
+ // result: (Eq64 (Const64 <t> [0]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != 1 || !(isNonNegative(x)) {
+ break
+ }
+ v.reset(OpEq64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Less64 x (Const64 <t> [1]))
+ // result: (Leq64 x (Const64 <t> [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpLeq64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Less64 (Const64 <t> [-1]) x)
+ // result: (Leq64 (Const64 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt64(v_0.AuxInt) != -1 {
+ break
+ }
+ x := v_1
+ v.reset(OpLeq64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less64F (Const64F [c]) (Const64F [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ break
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64U (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [uint64(c) < uint64(d)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint64(c) < uint64(d))
+ return true
+ }
+ // match: (Less64U x (Const64 <t> [1]))
+ // result: (Eq64 (Const64 <t> [0]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpEq64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Less64U _ (Const64 [0]))
+ // result: (ConstBool [false])
+ for {
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less8 (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ // match: (Less8 (Const8 <t> [0]) x)
+ // cond: isNonNegative(x)
+ // result: (Neq8 (Const8 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ if !(isNonNegative(x)) {
+ break
+ }
+ v.reset(OpNeq8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Less8 x (Const8 <t> [1]))
+ // cond: isNonNegative(x)
+ // result: (Eq8 (Const8 <t> [0]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt8(v_1.AuxInt) != 1 || !(isNonNegative(x)) {
+ break
+ }
+ v.reset(OpEq8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Less8 x (Const8 <t> [1]))
+ // result: (Leq8 x (Const8 <t> [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt8(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpLeq8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Less8 (Const8 <t> [-1]) x)
+ // result: (Leq8 (Const8 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt8(v_0.AuxInt) != -1 {
+ break
+ }
+ x := v_1
+ v.reset(OpLeq8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less8U (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [ uint8(c) < uint8(d)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint8(c) < uint8(d))
+ return true
+ }
+ // match: (Less8U x (Const8 <t> [1]))
+ // result: (Eq8 (Const8 <t> [0]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt8(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpEq8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Less8U _ (Const8 [0]))
+ // result: (ConstBool [false])
+ for {
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Load <t1> p1 (Store {t2} p2 x _))
+ // cond: isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size()
+ // result: x
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ x := v_1.Args[1]
+ p2 := v_1.Args[0]
+ if !(isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 x _)))
+ // cond: isSamePtr(p1, p3) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p3, t3.Size(), p2, t2.Size())
+ // result: x
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ x := v_1_2.Args[1]
+ p3 := v_1_2.Args[0]
+ if !(isSamePtr(p1, p3) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p3, t3.Size(), p2, t2.Size())) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 x _))))
+ // cond: isSamePtr(p1, p4) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p4, t4.Size(), p2, t2.Size()) && disjoint(p4, t4.Size(), p3, t3.Size())
+ // result: x
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ _ = v_1_2.Args[2]
+ p3 := v_1_2.Args[0]
+ v_1_2_2 := v_1_2.Args[2]
+ if v_1_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(v_1_2_2.Aux)
+ x := v_1_2_2.Args[1]
+ p4 := v_1_2_2.Args[0]
+ if !(isSamePtr(p1, p4) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p4, t4.Size(), p2, t2.Size()) && disjoint(p4, t4.Size(), p3, t3.Size())) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 x _)))))
+ // cond: isSamePtr(p1, p5) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p5, t5.Size(), p2, t2.Size()) && disjoint(p5, t5.Size(), p3, t3.Size()) && disjoint(p5, t5.Size(), p4, t4.Size())
+ // result: x
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ _ = v_1_2.Args[2]
+ p3 := v_1_2.Args[0]
+ v_1_2_2 := v_1_2.Args[2]
+ if v_1_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(v_1_2_2.Aux)
+ _ = v_1_2_2.Args[2]
+ p4 := v_1_2_2.Args[0]
+ v_1_2_2_2 := v_1_2_2.Args[2]
+ if v_1_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(v_1_2_2_2.Aux)
+ x := v_1_2_2_2.Args[1]
+ p5 := v_1_2_2_2.Args[0]
+ if !(isSamePtr(p1, p5) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p5, t5.Size(), p2, t2.Size()) && disjoint(p5, t5.Size(), p3, t3.Size()) && disjoint(p5, t5.Size(), p4, t4.Size())) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 (Const64 [x]) _))
+ // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))
+ // result: (Const64F [math.Float64frombits(uint64(x))])
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[1]
+ p2 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ break
+ }
+ x := auxIntToInt64(v_1_1.AuxInt)
+ if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(math.Float64frombits(uint64(x)))
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 (Const32 [x]) _))
+ // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))
+ // result: (Const32F [math.Float32frombits(uint32(x))])
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[1]
+ p2 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ break
+ }
+ x := auxIntToInt32(v_1_1.AuxInt)
+ if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))) {
+ break
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(math.Float32frombits(uint32(x)))
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 (Const64F [x]) _))
+ // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1)
+ // result: (Const64 [int64(math.Float64bits(x))])
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[1]
+ p2 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64F {
+ break
+ }
+ x := auxIntToFloat64(v_1_1.AuxInt)
+ if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitInt(t1)) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(math.Float64bits(x)))
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 (Const32F [x]) _))
+ // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1)
+ // result: (Const32 [int32(math.Float32bits(x))])
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[1]
+ p2 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32F {
+ break
+ }
+ x := auxIntToFloat32(v_1_1.AuxInt)
+ if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitInt(t1)) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(math.Float32bits(x)))
+ return true
+ }
+ // match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ mem:(Zero [n] p3 _)))
+ // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size())
+ // result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p3) mem)
+ for {
+ t1 := v.Type
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ mem := v_1.Args[2]
+ if mem.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem.AuxInt)
+ p3 := mem.Args[0]
+ if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size())) {
+ break
+ }
+ b = mem.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, t1)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type)
+ v1.AuxInt = int64ToAuxInt(o1)
+ v1.AddArg(p3)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ mem:(Zero [n] p4 _))))
+ // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())
+ // result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p4) mem)
+ for {
+ t1 := v.Type
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ _ = v_1_2.Args[2]
+ p3 := v_1_2.Args[0]
+ mem := v_1_2.Args[2]
+ if mem.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem.AuxInt)
+ p4 := mem.Args[0]
+ if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())) {
+ break
+ }
+ b = mem.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, t1)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type)
+ v1.AuxInt = int64ToAuxInt(o1)
+ v1.AddArg(p4)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ mem:(Zero [n] p5 _)))))
+ // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())
+ // result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p5) mem)
+ for {
+ t1 := v.Type
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ _ = v_1_2.Args[2]
+ p3 := v_1_2.Args[0]
+ v_1_2_2 := v_1_2.Args[2]
+ if v_1_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(v_1_2_2.Aux)
+ _ = v_1_2_2.Args[2]
+ p4 := v_1_2_2.Args[0]
+ mem := v_1_2_2.Args[2]
+ if mem.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem.AuxInt)
+ p5 := mem.Args[0]
+ if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())) {
+ break
+ }
+ b = mem.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, t1)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type)
+ v1.AuxInt = int64ToAuxInt(o1)
+ v1.AddArg(p5)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ mem:(Zero [n] p6 _))))))
+ // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size())
+ // result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p6) mem)
+ for {
+ t1 := v.Type
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ _ = v_1_2.Args[2]
+ p3 := v_1_2.Args[0]
+ v_1_2_2 := v_1_2.Args[2]
+ if v_1_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(v_1_2_2.Aux)
+ _ = v_1_2_2.Args[2]
+ p4 := v_1_2_2.Args[0]
+ v_1_2_2_2 := v_1_2_2.Args[2]
+ if v_1_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(v_1_2_2_2.Aux)
+ _ = v_1_2_2_2.Args[2]
+ p5 := v_1_2_2_2.Args[0]
+ mem := v_1_2_2_2.Args[2]
+ if mem.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem.AuxInt)
+ p6 := mem.Args[0]
+ if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size())) {
+ break
+ }
+ b = mem.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, t1)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type)
+ v1.AuxInt = int64ToAuxInt(o1)
+ v1.AddArg(p6)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: t1.IsBoolean() && isSamePtr(p1, p2) && n >= o + 1
+ // result: (ConstBool [false])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(t1.IsBoolean() && isSamePtr(p1, p2) && n >= o+1) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is8BitInt(t1) && isSamePtr(p1, p2) && n >= o + 1
+ // result: (Const8 [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is8BitInt(t1) && isSamePtr(p1, p2) && n >= o+1) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is16BitInt(t1) && isSamePtr(p1, p2) && n >= o + 2
+ // result: (Const16 [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is16BitInt(t1) && isSamePtr(p1, p2) && n >= o+2) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is32BitInt(t1) && isSamePtr(p1, p2) && n >= o + 4
+ // result: (Const32 [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is32BitInt(t1) && isSamePtr(p1, p2) && n >= o+4) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is64BitInt(t1) && isSamePtr(p1, p2) && n >= o + 8
+ // result: (Const64 [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is64BitInt(t1) && isSamePtr(p1, p2) && n >= o+8) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is32BitFloat(t1) && isSamePtr(p1, p2) && n >= o + 4
+ // result: (Const32F [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is32BitFloat(t1) && isSamePtr(p1, p2) && n >= o+4) {
+ break
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is64BitFloat(t1) && isSamePtr(p1, p2) && n >= o + 8
+ // result: (Const64F [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is64BitFloat(t1) && isSamePtr(p1, p2) && n >= o+8) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t> _ _)
+ // cond: t.IsStruct() && t.NumFields() == 0 && CanSSA(t)
+ // result: (StructMake0)
+ for {
+ t := v.Type
+ if !(t.IsStruct() && t.NumFields() == 0 && CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake0)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsStruct() && t.NumFields() == 1 && CanSSA(t)
+ // result: (StructMake1 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsStruct() && t.NumFields() == 1 && CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake1)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0))
+ v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v1.AuxInt = int64ToAuxInt(0)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsStruct() && t.NumFields() == 2 && CanSSA(t)
+ // result: (StructMake2 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsStruct() && t.NumFields() == 2 && CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake2)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0))
+ v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v1.AuxInt = int64ToAuxInt(0)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1))
+ v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v3.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v3.AddArg(ptr)
+ v2.AddArg2(v3, mem)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsStruct() && t.NumFields() == 3 && CanSSA(t)
+ // result: (StructMake3 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem) (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsStruct() && t.NumFields() == 3 && CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake3)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0))
+ v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v1.AuxInt = int64ToAuxInt(0)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1))
+ v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v3.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v3.AddArg(ptr)
+ v2.AddArg2(v3, mem)
+ v4 := b.NewValue0(v.Pos, OpLoad, t.FieldType(2))
+ v5 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+ v5.AuxInt = int64ToAuxInt(t.FieldOff(2))
+ v5.AddArg(ptr)
+ v4.AddArg2(v5, mem)
+ v.AddArg3(v0, v2, v4)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsStruct() && t.NumFields() == 4 && CanSSA(t)
+ // result: (StructMake4 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem) (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem) (Load <t.FieldType(3)> (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsStruct() && t.NumFields() == 4 && CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake4)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0))
+ v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v1.AuxInt = int64ToAuxInt(0)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1))
+ v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v3.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v3.AddArg(ptr)
+ v2.AddArg2(v3, mem)
+ v4 := b.NewValue0(v.Pos, OpLoad, t.FieldType(2))
+ v5 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+ v5.AuxInt = int64ToAuxInt(t.FieldOff(2))
+ v5.AddArg(ptr)
+ v4.AddArg2(v5, mem)
+ v6 := b.NewValue0(v.Pos, OpLoad, t.FieldType(3))
+ v7 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo())
+ v7.AuxInt = int64ToAuxInt(t.FieldOff(3))
+ v7.AddArg(ptr)
+ v6.AddArg2(v7, mem)
+ v.AddArg4(v0, v2, v4, v6)
+ return true
+ }
+ // match: (Load <t> _ _)
+ // cond: t.IsArray() && t.NumElem() == 0
+ // result: (ArrayMake0)
+ for {
+ t := v.Type
+ if !(t.IsArray() && t.NumElem() == 0) {
+ break
+ }
+ v.reset(OpArrayMake0)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsArray() && t.NumElem() == 1 && CanSSA(t)
+ // result: (ArrayMake1 (Load <t.Elem()> ptr mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsArray() && t.NumElem() == 1 && CanSSA(t)) {
+ break
+ }
+ v.reset(OpArrayMake1)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.Elem())
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Load <t> (OffPtr [off] (Addr {s} sb) ) _)
+ // cond: t.IsUintptr() && isFixedSym(s, off)
+ // result: (Addr {fixedSym(b.Func, s, off)} sb)
+ for {
+ t := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ off := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAddr {
+ break
+ }
+ s := auxToSym(v_0_0.Aux)
+ sb := v_0_0.Args[0]
+ if !(t.IsUintptr() && isFixedSym(s, off)) {
+ break
+ }
+ v.reset(OpAddr)
+ v.Aux = symToAux(fixedSym(b.Func, s, off))
+ v.AddArg(sb)
+ return true
+ }
+ // match: (Load <t> (OffPtr [off] (Convert (Addr {s} sb) _) ) _)
+ // cond: t.IsUintptr() && isFixedSym(s, off)
+ // result: (Addr {fixedSym(b.Func, s, off)} sb)
+ for {
+ t := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ off := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConvert {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAddr {
+ break
+ }
+ s := auxToSym(v_0_0_0.Aux)
+ sb := v_0_0_0.Args[0]
+ if !(t.IsUintptr() && isFixedSym(s, off)) {
+ break
+ }
+ v.reset(OpAddr)
+ v.Aux = symToAux(fixedSym(b.Func, s, off))
+ v.AddArg(sb)
+ return true
+ }
+ // match: (Load <t> (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _)
+ // cond: t.IsUintptr() && isFixedSym(s, off)
+ // result: (Addr {fixedSym(b.Func, s, off)} sb)
+ for {
+ t := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ off := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpITab {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpIMake {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpAddr {
+ break
+ }
+ s := auxToSym(v_0_0_0_0.Aux)
+ sb := v_0_0_0_0.Args[0]
+ if !(t.IsUintptr() && isFixedSym(s, off)) {
+ break
+ }
+ v.reset(OpAddr)
+ v.Aux = symToAux(fixedSym(b.Func, s, off))
+ v.AddArg(sb)
+ return true
+ }
+ // match: (Load <t> (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _)
+ // cond: t.IsUintptr() && isFixedSym(s, off)
+ // result: (Addr {fixedSym(b.Func, s, off)} sb)
+ for {
+ t := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ off := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpITab {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpIMake {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpConvert {
+ break
+ }
+ v_0_0_0_0_0 := v_0_0_0_0.Args[0]
+ if v_0_0_0_0_0.Op != OpAddr {
+ break
+ }
+ s := auxToSym(v_0_0_0_0_0.Aux)
+ sb := v_0_0_0_0_0.Args[0]
+ if !(t.IsUintptr() && isFixedSym(s, off)) {
+ break
+ }
+ v.reset(OpAddr)
+ v.Aux = symToAux(fixedSym(b.Func, s, off))
+ v.AddArg(sb)
+ return true
+ }
+ // match: (Load <t> (OffPtr [off] (Addr {sym} _) ) _)
+ // cond: t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)
+ // result: (Const32 [fixed32(config, sym, off)])
+ for {
+ t := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ off := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAddr {
+ break
+ }
+ sym := auxToSym(v_0_0.Aux)
+ if !(t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(fixed32(config, sym, off))
+ return true
+ }
+ // match: (Load <t> (OffPtr [off] (Convert (Addr {sym} _) _) ) _)
+ // cond: t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)
+ // result: (Const32 [fixed32(config, sym, off)])
+ for {
+ t := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ off := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConvert {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAddr {
+ break
+ }
+ sym := auxToSym(v_0_0_0.Aux)
+ if !(t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(fixed32(config, sym, off))
+ return true
+ }
+ // match: (Load <t> (OffPtr [off] (ITab (IMake (Addr {sym} _) _))) _)
+ // cond: t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)
+ // result: (Const32 [fixed32(config, sym, off)])
+ for {
+ t := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ off := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpITab {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpIMake {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpAddr {
+ break
+ }
+ sym := auxToSym(v_0_0_0_0.Aux)
+ if !(t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(fixed32(config, sym, off))
+ return true
+ }
+ // match: (Load <t> (OffPtr [off] (ITab (IMake (Convert (Addr {sym} _) _) _))) _)
+ // cond: t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)
+ // result: (Const32 [fixed32(config, sym, off)])
+ for {
+ t := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ off := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpITab {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpIMake {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpConvert {
+ break
+ }
+ v_0_0_0_0_0 := v_0_0_0_0.Args[0]
+ if v_0_0_0_0_0.Op != OpAddr {
+ break
+ }
+ sym := auxToSym(v_0_0_0_0_0.Aux)
+ if !(t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off)) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(fixed32(config, sym, off))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x16 <t> x (Const16 [c]))
+ // result: (Lsh16x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpLsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x16 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x32 <t> x (Const32 [c]))
+ // result: (Lsh16x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpLsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x32 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x64 (Const16 [c]) (Const64 [d]))
+ // result: (Const16 [c << uint64(d)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c << uint64(d))
+ return true
+ }
+ // match: (Lsh16x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Lsh16x64 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh16x64 <t> (Lsh16x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Lsh16x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpLsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpLsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x64 i:(Rsh16x64 x (Const64 [c])) (Const64 [c]))
+ // cond: c >= 0 && c < 16 && i.Uses == 1
+ // result: (And16 x (Const16 <v.Type> [int16(-1) << c]))
+ for {
+ i := v_0
+ if i.Op != OpRsh16x64 {
+ break
+ }
+ _ = i.Args[1]
+ x := i.Args[0]
+ i_1 := i.Args[1]
+ if i_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(i_1.AuxInt)
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 16 && i.Uses == 1) {
+ break
+ }
+ v.reset(OpAnd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, v.Type)
+ v0.AuxInt = int16ToAuxInt(int16(-1) << c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x64 i:(Rsh16Ux64 x (Const64 [c])) (Const64 [c]))
+ // cond: c >= 0 && c < 16 && i.Uses == 1
+ // result: (And16 x (Const16 <v.Type> [int16(-1) << c]))
+ for {
+ i := v_0
+ if i.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = i.Args[1]
+ x := i.Args[0]
+ i_1 := i.Args[1]
+ if i_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(i_1.AuxInt)
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 16 && i.Uses == 1) {
+ break
+ }
+ v.reset(OpAnd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, v.Type)
+ v0.AuxInt = int16ToAuxInt(int16(-1) << c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Lsh16x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLsh16x64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpLsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x8 <t> x (Const8 [c]))
+ // result: (Lsh16x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpLsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x8 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x16 <t> x (Const16 [c]))
+ // result: (Lsh32x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpLsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x16 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x32 <t> x (Const32 [c]))
+ // result: (Lsh32x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpLsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x32 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x64 (Const32 [c]) (Const64 [d]))
+ // result: (Const32 [c << uint64(d)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ return true
+ }
+ // match: (Lsh32x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Lsh32x64 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh32x64 <t> (Lsh32x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Lsh32x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpLsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x64 i:(Rsh32x64 x (Const64 [c])) (Const64 [c]))
+ // cond: c >= 0 && c < 32 && i.Uses == 1
+ // result: (And32 x (Const32 <v.Type> [int32(-1) << c]))
+ for {
+ i := v_0
+ if i.Op != OpRsh32x64 {
+ break
+ }
+ _ = i.Args[1]
+ x := i.Args[0]
+ i_1 := i.Args[1]
+ if i_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(i_1.AuxInt)
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 32 && i.Uses == 1) {
+ break
+ }
+ v.reset(OpAnd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(-1) << c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x64 i:(Rsh32Ux64 x (Const64 [c])) (Const64 [c]))
+ // cond: c >= 0 && c < 32 && i.Uses == 1
+ // result: (And32 x (Const32 <v.Type> [int32(-1) << c]))
+ for {
+ i := v_0
+ if i.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = i.Args[1]
+ x := i.Args[0]
+ i_1 := i.Args[1]
+ if i_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(i_1.AuxInt)
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 32 && i.Uses == 1) {
+ break
+ }
+ v.reset(OpAnd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(-1) << c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Lsh32x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpLsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x8 <t> x (Const8 [c]))
+ // result: (Lsh32x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpLsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x8 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x16 <t> x (Const16 [c]))
+ // result: (Lsh64x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpLsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x16 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x32 <t> x (Const32 [c]))
+ // result: (Lsh64x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpLsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x32 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c << uint64(d)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c << uint64(d))
+ return true
+ }
+ // match: (Lsh64x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Lsh64x64 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh64x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 64
+ // result: (Const64 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh64x64 <t> (Lsh64x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Lsh64x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpLsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x64 i:(Rsh64x64 x (Const64 [c])) (Const64 [c]))
+ // cond: c >= 0 && c < 64 && i.Uses == 1
+ // result: (And64 x (Const64 <v.Type> [int64(-1) << c]))
+ for {
+ i := v_0
+ if i.Op != OpRsh64x64 {
+ break
+ }
+ _ = i.Args[1]
+ x := i.Args[0]
+ i_1 := i.Args[1]
+ if i_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(i_1.AuxInt)
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 64 && i.Uses == 1) {
+ break
+ }
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, v.Type)
+ v0.AuxInt = int64ToAuxInt(int64(-1) << c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x64 i:(Rsh64Ux64 x (Const64 [c])) (Const64 [c]))
+ // cond: c >= 0 && c < 64 && i.Uses == 1
+ // result: (And64 x (Const64 <v.Type> [int64(-1) << c]))
+ for {
+ i := v_0
+ if i.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = i.Args[1]
+ x := i.Args[0]
+ i_1 := i.Args[1]
+ if i_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(i_1.AuxInt)
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 64 && i.Uses == 1) {
+ break
+ }
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, v.Type)
+ v0.AuxInt = int64ToAuxInt(int64(-1) << c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Lsh64x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpLsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x8 <t> x (Const8 [c]))
+ // result: (Lsh64x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpLsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x8 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x16 <t> x (Const16 [c]))
+ // result: (Lsh8x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpLsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x16 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x32 <t> x (Const32 [c]))
+ // result: (Lsh8x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpLsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x32 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x64 (Const8 [c]) (Const64 [d]))
+ // result: (Const8 [c << uint64(d)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c << uint64(d))
+ return true
+ }
+ // match: (Lsh8x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Lsh8x64 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh8x64 <t> (Lsh8x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Lsh8x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpLsh8x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpLsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x64 i:(Rsh8x64 x (Const64 [c])) (Const64 [c]))
+ // cond: c >= 0 && c < 8 && i.Uses == 1
+ // result: (And8 x (Const8 <v.Type> [int8(-1) << c]))
+ for {
+ i := v_0
+ if i.Op != OpRsh8x64 {
+ break
+ }
+ _ = i.Args[1]
+ x := i.Args[0]
+ i_1 := i.Args[1]
+ if i_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(i_1.AuxInt)
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 8 && i.Uses == 1) {
+ break
+ }
+ v.reset(OpAnd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(-1) << c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x64 i:(Rsh8Ux64 x (Const64 [c])) (Const64 [c]))
+ // cond: c >= 0 && c < 8 && i.Uses == 1
+ // result: (And8 x (Const8 <v.Type> [int8(-1) << c]))
+ for {
+ i := v_0
+ if i.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = i.Args[1]
+ x := i.Args[0]
+ i_1 := i.Args[1]
+ if i_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(i_1.AuxInt)
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 8 && i.Uses == 1) {
+ break
+ }
+ v.reset(OpAnd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(-1) << c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Lsh8x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLsh8x64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpLsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x8 <t> x (Const8 [c]))
+ // result: (Lsh8x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpLsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x8 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod16 (Const16 [c]) (Const16 [d]))
+ // cond: d != 0
+ // result: (Const16 [c % d])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c % d)
+ return true
+ }
+ // match: (Mod16 <t> n (Const16 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo16(c)
+ // result: (And16 n (Const16 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo16(c)) {
+ break
+ }
+ v.reset(OpAnd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod16 <t> n (Const16 [c]))
+ // cond: c < 0 && c != -1<<15
+ // result: (Mod16 <t> n (Const16 <t> [-c]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(c < 0 && c != -1<<15) {
+ break
+ }
+ v.reset(OpMod16)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(-c)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod16 <t> x (Const16 [c]))
+ // cond: x.Op != OpConst16 && (c > 0 || c == -1<<15)
+ // result: (Sub16 x (Mul16 <t> (Div16 <t> x (Const16 <t> [c])) (Const16 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(x.Op != OpConst16 && (c > 0 || c == -1<<15)) {
+ break
+ }
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpMul16, t)
+ v1 := b.NewValue0(v.Pos, OpDiv16, t)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod16u (Const16 [c]) (Const16 [d]))
+ // cond: d != 0
+ // result: (Const16 [int16(uint16(c) % uint16(d))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(uint16(c) % uint16(d)))
+ return true
+ }
+ // match: (Mod16u <t> n (Const16 [c]))
+ // cond: isPowerOfTwo16(c)
+ // result: (And16 n (Const16 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isPowerOfTwo16(c)) {
+ break
+ }
+ v.reset(OpAnd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod16u <t> x (Const16 [c]))
+ // cond: x.Op != OpConst16 && c > 0 && umagicOK16(c)
+ // result: (Sub16 x (Mul16 <t> (Div16u <t> x (Const16 <t> [c])) (Const16 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(x.Op != OpConst16 && c > 0 && umagicOK16(c)) {
+ break
+ }
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpMul16, t)
+ v1 := b.NewValue0(v.Pos, OpDiv16u, t)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod32 (Const32 [c]) (Const32 [d]))
+ // cond: d != 0
+ // result: (Const32 [c % d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c % d)
+ return true
+ }
+ // match: (Mod32 <t> n (Const32 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo32(c)
+ // result: (And32 n (Const32 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpAnd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod32 <t> n (Const32 [c]))
+ // cond: c < 0 && c != -1<<31
+ // result: (Mod32 <t> n (Const32 <t> [-c]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c < 0 && c != -1<<31) {
+ break
+ }
+ v.reset(OpMod32)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(-c)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod32 <t> x (Const32 [c]))
+ // cond: x.Op != OpConst32 && (c > 0 || c == -1<<31)
+ // result: (Sub32 x (Mul32 <t> (Div32 <t> x (Const32 <t> [c])) (Const32 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(x.Op != OpConst32 && (c > 0 || c == -1<<31)) {
+ break
+ }
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpMul32, t)
+ v1 := b.NewValue0(v.Pos, OpDiv32, t)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod32u (Const32 [c]) (Const32 [d]))
+ // cond: d != 0
+ // result: (Const32 [int32(uint32(c) % uint32(d))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) % uint32(d)))
+ return true
+ }
+ // match: (Mod32u <t> n (Const32 [c]))
+ // cond: isPowerOfTwo32(c)
+ // result: (And32 n (Const32 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpAnd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod32u <t> x (Const32 [c]))
+ // cond: x.Op != OpConst32 && c > 0 && umagicOK32(c)
+ // result: (Sub32 x (Mul32 <t> (Div32u <t> x (Const32 <t> [c])) (Const32 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(x.Op != OpConst32 && c > 0 && umagicOK32(c)) {
+ break
+ }
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpMul32, t)
+ v1 := b.NewValue0(v.Pos, OpDiv32u, t)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod64 (Const64 [c]) (Const64 [d]))
+ // cond: d != 0
+ // result: (Const64 [c % d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c % d)
+ return true
+ }
+ // match: (Mod64 <t> n (Const64 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo64(c)
+ // result: (And64 n (Const64 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod64 n (Const64 [-1<<63]))
+ // cond: isNonNegative(n)
+ // result: n
+ for {
+ n := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 || !(isNonNegative(n)) {
+ break
+ }
+ v.copyOf(n)
+ return true
+ }
+ // match: (Mod64 <t> n (Const64 [c]))
+ // cond: c < 0 && c != -1<<63
+ // result: (Mod64 <t> n (Const64 <t> [-c]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c < 0 && c != -1<<63) {
+ break
+ }
+ v.reset(OpMod64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(-c)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod64 <t> x (Const64 [c]))
+ // cond: x.Op != OpConst64 && (c > 0 || c == -1<<63)
+ // result: (Sub64 x (Mul64 <t> (Div64 <t> x (Const64 <t> [c])) (Const64 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(x.Op != OpConst64 && (c > 0 || c == -1<<63)) {
+ break
+ }
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpMul64, t)
+ v1 := b.NewValue0(v.Pos, OpDiv64, t)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod64u (Const64 [c]) (Const64 [d]))
+ // cond: d != 0
+ // result: (Const64 [int64(uint64(c) % uint64(d))])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d)))
+ return true
+ }
+ // match: (Mod64u <t> n (Const64 [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (And64 n (Const64 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod64u <t> n (Const64 [-1<<63]))
+ // result: (And64 n (Const64 <t> [1<<63-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 {
+ break
+ }
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(1<<63 - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod64u <t> x (Const64 [c]))
+ // cond: x.Op != OpConst64 && c > 0 && umagicOK64(c)
+ // result: (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(x.Op != OpConst64 && c > 0 && umagicOK64(c)) {
+ break
+ }
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpMul64, t)
+ v1 := b.NewValue0(v.Pos, OpDiv64u, t)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod8 (Const8 [c]) (Const8 [d]))
+ // cond: d != 0
+ // result: (Const8 [c % d])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c % d)
+ return true
+ }
+ // match: (Mod8 <t> n (Const8 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo8(c)
+ // result: (And8 n (Const8 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo8(c)) {
+ break
+ }
+ v.reset(OpAnd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod8 <t> n (Const8 [c]))
+ // cond: c < 0 && c != -1<<7
+ // result: (Mod8 <t> n (Const8 <t> [-c]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(c < 0 && c != -1<<7) {
+ break
+ }
+ v.reset(OpMod8)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(-c)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod8 <t> x (Const8 [c]))
+ // cond: x.Op != OpConst8 && (c > 0 || c == -1<<7)
+ // result: (Sub8 x (Mul8 <t> (Div8 <t> x (Const8 <t> [c])) (Const8 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(x.Op != OpConst8 && (c > 0 || c == -1<<7)) {
+ break
+ }
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpMul8, t)
+ v1 := b.NewValue0(v.Pos, OpDiv8, t)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod8u (Const8 [c]) (Const8 [d]))
+ // cond: d != 0
+ // result: (Const8 [int8(uint8(c) % uint8(d))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(uint8(c) % uint8(d)))
+ return true
+ }
+ // match: (Mod8u <t> n (Const8 [c]))
+ // cond: isPowerOfTwo8(c)
+ // result: (And8 n (Const8 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isPowerOfTwo8(c)) {
+ break
+ }
+ v.reset(OpAnd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod8u <t> x (Const8 [c]))
+ // cond: x.Op != OpConst8 && c > 0 && umagicOK8( c)
+ // result: (Sub8 x (Mul8 <t> (Div8u <t> x (Const8 <t> [c])) (Const8 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(x.Op != OpConst8 && c > 0 && umagicOK8(c)) {
+ break
+ }
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpMul8, t)
+ v1 := b.NewValue0(v.Pos, OpDiv8u, t)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Move {t} [n] dst1 src mem:(Zero {t} [n] dst2 _))
+ // cond: isSamePtr(src, dst2)
+ // result: (Zero {t} [n] dst1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src := v_1
+ mem := v_2
+ if mem.Op != OpZero || auxIntToInt64(mem.AuxInt) != n || auxToType(mem.Aux) != t {
+ break
+ }
+ dst2 := mem.Args[0]
+ if !(isSamePtr(src, dst2)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg2(dst1, mem)
+ return true
+ }
+ // match: (Move {t} [n] dst1 src mem:(VarDef (Zero {t} [n] dst0 _)))
+ // cond: isSamePtr(src, dst0)
+ // result: (Zero {t} [n] dst1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpZero || auxIntToInt64(mem_0.AuxInt) != n || auxToType(mem_0.Aux) != t {
+ break
+ }
+ dst0 := mem_0.Args[0]
+ if !(isSamePtr(src, dst0)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg2(dst1, mem)
+ return true
+ }
+ // match: (Move {t} [n] dst (Addr {sym} (SB)) mem)
+ // cond: symIsROZero(sym)
+ // result: (Zero {t} [n] dst mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpAddr {
+ break
+ }
+ sym := auxToSym(v_1.Aux)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpSB {
+ break
+ }
+ mem := v_2
+ if !(symIsROZero(sym)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg2(dst, mem)
+ return true
+ }
+ // match: (Move {t1} [n] dst1 src1 store:(Store {t2} op:(OffPtr [o2] dst2) _ mem))
+ // cond: isSamePtr(dst1, dst2) && store.Uses == 1 && n >= o2 + t2.Size() && disjoint(src1, n, op, t2.Size()) && clobber(store)
+ // result: (Move {t1} [n] dst1 src1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst1 := v_0
+ src1 := v_1
+ store := v_2
+ if store.Op != OpStore {
+ break
+ }
+ t2 := auxToType(store.Aux)
+ mem := store.Args[2]
+ op := store.Args[0]
+ if op.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(op.AuxInt)
+ dst2 := op.Args[0]
+ if !(isSamePtr(dst1, dst2) && store.Uses == 1 && n >= o2+t2.Size() && disjoint(src1, n, op, t2.Size()) && clobber(store)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t1)
+ v.AddArg3(dst1, src1, mem)
+ return true
+ }
+ // match: (Move {t} [n] dst1 src1 move:(Move {t} [n] dst2 _ mem))
+ // cond: move.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move)
+ // result: (Move {t} [n] dst1 src1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src1 := v_1
+ move := v_2
+ if move.Op != OpMove || auxIntToInt64(move.AuxInt) != n || auxToType(move.Aux) != t {
+ break
+ }
+ mem := move.Args[2]
+ dst2 := move.Args[0]
+ if !(move.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg3(dst1, src1, mem)
+ return true
+ }
+ // match: (Move {t} [n] dst1 src1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem)))
+ // cond: move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move, vardef)
+ // result: (Move {t} [n] dst1 src1 (VarDef {x} mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src1 := v_1
+ vardef := v_2
+ if vardef.Op != OpVarDef {
+ break
+ }
+ x := auxToSym(vardef.Aux)
+ move := vardef.Args[0]
+ if move.Op != OpMove || auxIntToInt64(move.AuxInt) != n || auxToType(move.Aux) != t {
+ break
+ }
+ mem := move.Args[2]
+ dst2 := move.Args[0]
+ if !(move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move, vardef)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem)
+ v0.Aux = symToAux(x)
+ v0.AddArg(mem)
+ v.AddArg3(dst1, src1, v0)
+ return true
+ }
+ // match: (Move {t} [n] dst1 src1 zero:(Zero {t} [n] dst2 mem))
+ // cond: zero.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero)
+ // result: (Move {t} [n] dst1 src1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src1 := v_1
+ zero := v_2
+ if zero.Op != OpZero || auxIntToInt64(zero.AuxInt) != n || auxToType(zero.Aux) != t {
+ break
+ }
+ mem := zero.Args[1]
+ dst2 := zero.Args[0]
+ if !(zero.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg3(dst1, src1, mem)
+ return true
+ }
+ // match: (Move {t} [n] dst1 src1 vardef:(VarDef {x} zero:(Zero {t} [n] dst2 mem)))
+ // cond: zero.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero, vardef)
+ // result: (Move {t} [n] dst1 src1 (VarDef {x} mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src1 := v_1
+ vardef := v_2
+ if vardef.Op != OpVarDef {
+ break
+ }
+ x := auxToSym(vardef.Aux)
+ zero := vardef.Args[0]
+ if zero.Op != OpZero || auxIntToInt64(zero.AuxInt) != n || auxToType(zero.Aux) != t {
+ break
+ }
+ mem := zero.Args[1]
+ dst2 := zero.Args[0]
+ if !(zero.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero, vardef)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem)
+ v0.Aux = symToAux(x)
+ v0.AddArg(mem)
+ v.AddArg3(dst1, src1, v0)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _)))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size() + t3.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ op2 := mem.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ d2 := mem_2.Args[1]
+ op3 := mem_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ if auxIntToInt64(op3.AuxInt) != 0 {
+ break
+ }
+ p3 := op3.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size()+t3.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(0)
+ v2.AddArg(dst)
+ v1.AddArg3(v2, d2, mem)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ op2 := mem.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ op3 := mem_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d2 := mem_2.Args[1]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ d3 := mem_2_2.Args[1]
+ op4 := mem_2_2.Args[0]
+ if op4.Op != OpOffPtr {
+ break
+ }
+ tt4 := op4.Type
+ if auxIntToInt64(op4.AuxInt) != 0 {
+ break
+ }
+ p4 := op4.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size()+t3.Size()+t4.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(0)
+ v4.AddArg(dst)
+ v3.AddArg3(v4, d3, mem)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3 (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _)))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size() + t5.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ op2 := mem.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ op3 := mem_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d2 := mem_2.Args[1]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ _ = mem_2_2.Args[2]
+ op4 := mem_2_2.Args[0]
+ if op4.Op != OpOffPtr {
+ break
+ }
+ tt4 := op4.Type
+ o4 := auxIntToInt64(op4.AuxInt)
+ p4 := op4.Args[0]
+ d3 := mem_2_2.Args[1]
+ mem_2_2_2 := mem_2_2.Args[2]
+ if mem_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(mem_2_2_2.Aux)
+ d4 := mem_2_2_2.Args[1]
+ op5 := mem_2_2_2.Args[0]
+ if op5.Op != OpOffPtr {
+ break
+ }
+ tt5 := op5.Type
+ if auxIntToInt64(op5.AuxInt) != 0 {
+ break
+ }
+ p5 := op5.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size()+t3.Size()+t4.Size()+t5.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t5)
+ v6 := b.NewValue0(v.Pos, OpOffPtr, tt5)
+ v6.AuxInt = int64ToAuxInt(0)
+ v6.AddArg(dst)
+ v5.AddArg3(v6, d4, mem)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size() + t3.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ op2 := mem_0.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ d2 := mem_0_2.Args[1]
+ op3 := mem_0_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ if auxIntToInt64(op3.AuxInt) != 0 {
+ break
+ }
+ p3 := op3.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size()+t3.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(0)
+ v2.AddArg(dst)
+ v1.AddArg3(v2, d2, mem)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _)))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ op2 := mem_0.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ _ = mem_0_2.Args[2]
+ op3 := mem_0_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d2 := mem_0_2.Args[1]
+ mem_0_2_2 := mem_0_2.Args[2]
+ if mem_0_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_0_2_2.Aux)
+ d3 := mem_0_2_2.Args[1]
+ op4 := mem_0_2_2.Args[0]
+ if op4.Op != OpOffPtr {
+ break
+ }
+ tt4 := op4.Type
+ if auxIntToInt64(op4.AuxInt) != 0 {
+ break
+ }
+ p4 := op4.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size()+t3.Size()+t4.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(0)
+ v4.AddArg(dst)
+ v3.AddArg3(v4, d3, mem)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3 (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _))))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size() + t5.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ op2 := mem_0.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ _ = mem_0_2.Args[2]
+ op3 := mem_0_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d2 := mem_0_2.Args[1]
+ mem_0_2_2 := mem_0_2.Args[2]
+ if mem_0_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_0_2_2.Aux)
+ _ = mem_0_2_2.Args[2]
+ op4 := mem_0_2_2.Args[0]
+ if op4.Op != OpOffPtr {
+ break
+ }
+ tt4 := op4.Type
+ o4 := auxIntToInt64(op4.AuxInt)
+ p4 := op4.Args[0]
+ d3 := mem_0_2_2.Args[1]
+ mem_0_2_2_2 := mem_0_2_2.Args[2]
+ if mem_0_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(mem_0_2_2_2.Aux)
+ d4 := mem_0_2_2_2.Args[1]
+ op5 := mem_0_2_2_2.Args[0]
+ if op5.Op != OpOffPtr {
+ break
+ }
+ tt5 := op5.Type
+ if auxIntToInt64(op5.AuxInt) != 0 {
+ break
+ }
+ p5 := op5.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size()+t3.Size()+t4.Size()+t5.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t5)
+ v6 := b.NewValue0(v.Pos, OpOffPtr, tt5)
+ v6.AuxInt = int64ToAuxInt(0)
+ v6.AddArg(dst)
+ v5.AddArg3(v6, d4, mem)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Zero {t3} [n] p3 _)))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2 + t2.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Zero {t1} [n] dst mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ op2 := mem.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpZero || auxIntToInt64(mem_2.AuxInt) != n {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ p3 := mem_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2+t2.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(n)
+ v1.Aux = typeToAux(t1)
+ v1.AddArg2(dst, mem)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Zero {t4} [n] p4 _))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2 + t2.Size() && n >= o3 + t3.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Zero {t1} [n] dst mem)))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0.Type
+ o2 := auxIntToInt64(mem_0.AuxInt)
+ p2 := mem_0.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ mem_2_0 := mem_2.Args[0]
+ if mem_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_2_0.Type
+ o3 := auxIntToInt64(mem_2_0.AuxInt)
+ p3 := mem_2_0.Args[0]
+ d2 := mem_2.Args[1]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpZero || auxIntToInt64(mem_2_2.AuxInt) != n {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ p4 := mem_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2+t2.Size() && n >= o3+t3.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v3.AuxInt = int64ToAuxInt(n)
+ v3.Aux = typeToAux(t1)
+ v3.AddArg2(dst, mem)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Store {t4} (OffPtr <tt4> [o4] p4) d3 (Zero {t5} [n] p5 _)))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Zero {t1} [n] dst mem))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0.Type
+ o2 := auxIntToInt64(mem_0.AuxInt)
+ p2 := mem_0.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ mem_2_0 := mem_2.Args[0]
+ if mem_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_2_0.Type
+ o3 := auxIntToInt64(mem_2_0.AuxInt)
+ p3 := mem_2_0.Args[0]
+ d2 := mem_2.Args[1]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ _ = mem_2_2.Args[2]
+ mem_2_2_0 := mem_2_2.Args[0]
+ if mem_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt4 := mem_2_2_0.Type
+ o4 := auxIntToInt64(mem_2_2_0.AuxInt)
+ p4 := mem_2_2_0.Args[0]
+ d3 := mem_2_2.Args[1]
+ mem_2_2_2 := mem_2_2.Args[2]
+ if mem_2_2_2.Op != OpZero || auxIntToInt64(mem_2_2_2.AuxInt) != n {
+ break
+ }
+ t5 := auxToType(mem_2_2_2.Aux)
+ p5 := mem_2_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2+t2.Size() && n >= o3+t3.Size() && n >= o4+t4.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v5.AuxInt = int64ToAuxInt(n)
+ v5.Aux = typeToAux(t1)
+ v5.AddArg2(dst, mem)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Store {t4} (OffPtr <tt4> [o4] p4) d3 (Store {t5} (OffPtr <tt5> [o5] p5) d4 (Zero {t6} [n] p6 _))))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size() && n >= o5 + t5.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [o5] dst) d4 (Zero {t1} [n] dst mem)))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0.Type
+ o2 := auxIntToInt64(mem_0.AuxInt)
+ p2 := mem_0.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ mem_2_0 := mem_2.Args[0]
+ if mem_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_2_0.Type
+ o3 := auxIntToInt64(mem_2_0.AuxInt)
+ p3 := mem_2_0.Args[0]
+ d2 := mem_2.Args[1]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ _ = mem_2_2.Args[2]
+ mem_2_2_0 := mem_2_2.Args[0]
+ if mem_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt4 := mem_2_2_0.Type
+ o4 := auxIntToInt64(mem_2_2_0.AuxInt)
+ p4 := mem_2_2_0.Args[0]
+ d3 := mem_2_2.Args[1]
+ mem_2_2_2 := mem_2_2.Args[2]
+ if mem_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(mem_2_2_2.Aux)
+ _ = mem_2_2_2.Args[2]
+ mem_2_2_2_0 := mem_2_2_2.Args[0]
+ if mem_2_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt5 := mem_2_2_2_0.Type
+ o5 := auxIntToInt64(mem_2_2_2_0.AuxInt)
+ p5 := mem_2_2_2_0.Args[0]
+ d4 := mem_2_2_2.Args[1]
+ mem_2_2_2_2 := mem_2_2_2.Args[2]
+ if mem_2_2_2_2.Op != OpZero || auxIntToInt64(mem_2_2_2_2.AuxInt) != n {
+ break
+ }
+ t6 := auxToType(mem_2_2_2_2.Aux)
+ p6 := mem_2_2_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2+t2.Size() && n >= o3+t3.Size() && n >= o4+t4.Size() && n >= o5+t5.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t5)
+ v6 := b.NewValue0(v.Pos, OpOffPtr, tt5)
+ v6.AuxInt = int64ToAuxInt(o5)
+ v6.AddArg(dst)
+ v7 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v7.AuxInt = int64ToAuxInt(n)
+ v7.Aux = typeToAux(t1)
+ v7.AddArg2(dst, mem)
+ v5.AddArg3(v6, d4, v7)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Zero {t3} [n] p3 _))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2 + t2.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Zero {t1} [n] dst mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ op2 := mem_0.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpZero || auxIntToInt64(mem_0_2.AuxInt) != n {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ p3 := mem_0_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2+t2.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(n)
+ v1.Aux = typeToAux(t1)
+ v1.AddArg2(dst, mem)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Zero {t4} [n] p4 _)))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2 + t2.Size() && n >= o3 + t3.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Zero {t1} [n] dst mem)))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ mem_0_0 := mem_0.Args[0]
+ if mem_0_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0_0.Type
+ o2 := auxIntToInt64(mem_0_0.AuxInt)
+ p2 := mem_0_0.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ _ = mem_0_2.Args[2]
+ mem_0_2_0 := mem_0_2.Args[0]
+ if mem_0_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_0_2_0.Type
+ o3 := auxIntToInt64(mem_0_2_0.AuxInt)
+ p3 := mem_0_2_0.Args[0]
+ d2 := mem_0_2.Args[1]
+ mem_0_2_2 := mem_0_2.Args[2]
+ if mem_0_2_2.Op != OpZero || auxIntToInt64(mem_0_2_2.AuxInt) != n {
+ break
+ }
+ t4 := auxToType(mem_0_2_2.Aux)
+ p4 := mem_0_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2+t2.Size() && n >= o3+t3.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v3.AuxInt = int64ToAuxInt(n)
+ v3.Aux = typeToAux(t1)
+ v3.AddArg2(dst, mem)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Store {t4} (OffPtr <tt4> [o4] p4) d3 (Zero {t5} [n] p5 _))))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Zero {t1} [n] dst mem))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ mem_0_0 := mem_0.Args[0]
+ if mem_0_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0_0.Type
+ o2 := auxIntToInt64(mem_0_0.AuxInt)
+ p2 := mem_0_0.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ _ = mem_0_2.Args[2]
+ mem_0_2_0 := mem_0_2.Args[0]
+ if mem_0_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_0_2_0.Type
+ o3 := auxIntToInt64(mem_0_2_0.AuxInt)
+ p3 := mem_0_2_0.Args[0]
+ d2 := mem_0_2.Args[1]
+ mem_0_2_2 := mem_0_2.Args[2]
+ if mem_0_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_0_2_2.Aux)
+ _ = mem_0_2_2.Args[2]
+ mem_0_2_2_0 := mem_0_2_2.Args[0]
+ if mem_0_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt4 := mem_0_2_2_0.Type
+ o4 := auxIntToInt64(mem_0_2_2_0.AuxInt)
+ p4 := mem_0_2_2_0.Args[0]
+ d3 := mem_0_2_2.Args[1]
+ mem_0_2_2_2 := mem_0_2_2.Args[2]
+ if mem_0_2_2_2.Op != OpZero || auxIntToInt64(mem_0_2_2_2.AuxInt) != n {
+ break
+ }
+ t5 := auxToType(mem_0_2_2_2.Aux)
+ p5 := mem_0_2_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2+t2.Size() && n >= o3+t3.Size() && n >= o4+t4.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v5.AuxInt = int64ToAuxInt(n)
+ v5.Aux = typeToAux(t1)
+ v5.AddArg2(dst, mem)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Store {t4} (OffPtr <tt4> [o4] p4) d3 (Store {t5} (OffPtr <tt5> [o5] p5) d4 (Zero {t6} [n] p6 _)))))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size() && n >= o5 + t5.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [o5] dst) d4 (Zero {t1} [n] dst mem)))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ mem_0_0 := mem_0.Args[0]
+ if mem_0_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0_0.Type
+ o2 := auxIntToInt64(mem_0_0.AuxInt)
+ p2 := mem_0_0.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ _ = mem_0_2.Args[2]
+ mem_0_2_0 := mem_0_2.Args[0]
+ if mem_0_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_0_2_0.Type
+ o3 := auxIntToInt64(mem_0_2_0.AuxInt)
+ p3 := mem_0_2_0.Args[0]
+ d2 := mem_0_2.Args[1]
+ mem_0_2_2 := mem_0_2.Args[2]
+ if mem_0_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_0_2_2.Aux)
+ _ = mem_0_2_2.Args[2]
+ mem_0_2_2_0 := mem_0_2_2.Args[0]
+ if mem_0_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt4 := mem_0_2_2_0.Type
+ o4 := auxIntToInt64(mem_0_2_2_0.AuxInt)
+ p4 := mem_0_2_2_0.Args[0]
+ d3 := mem_0_2_2.Args[1]
+ mem_0_2_2_2 := mem_0_2_2.Args[2]
+ if mem_0_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(mem_0_2_2_2.Aux)
+ _ = mem_0_2_2_2.Args[2]
+ mem_0_2_2_2_0 := mem_0_2_2_2.Args[0]
+ if mem_0_2_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt5 := mem_0_2_2_2_0.Type
+ o5 := auxIntToInt64(mem_0_2_2_2_0.AuxInt)
+ p5 := mem_0_2_2_2_0.Args[0]
+ d4 := mem_0_2_2_2.Args[1]
+ mem_0_2_2_2_2 := mem_0_2_2_2.Args[2]
+ if mem_0_2_2_2_2.Op != OpZero || auxIntToInt64(mem_0_2_2_2_2.AuxInt) != n {
+ break
+ }
+ t6 := auxToType(mem_0_2_2_2_2.Aux)
+ p6 := mem_0_2_2_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2+t2.Size() && n >= o3+t3.Size() && n >= o4+t4.Size() && n >= o5+t5.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t5)
+ v6 := b.NewValue0(v.Pos, OpOffPtr, tt5)
+ v6.AuxInt = int64ToAuxInt(o5)
+ v6.AddArg(dst)
+ v7 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v7.AuxInt = int64ToAuxInt(n)
+ v7.Aux = typeToAux(t1)
+ v7.AddArg2(dst, mem)
+ v5.AddArg3(v6, d4, v7)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _))
+ // cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
+ // result: (Move {t1} [s] dst src midmem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ tmp1 := v_1
+ midmem := v_2
+ if midmem.Op != OpMove || auxIntToInt64(midmem.AuxInt) != s {
+ break
+ }
+ t2 := auxToType(midmem.Aux)
+ src := midmem.Args[1]
+ tmp2 := midmem.Args[0]
+ if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s)
+ v.Aux = typeToAux(t1)
+ v.AddArg3(dst, src, midmem)
+ return true
+ }
+ // match: (Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _)))
+ // cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
+ // result: (Move {t1} [s] dst src midmem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ tmp1 := v_1
+ midmem := v_2
+ if midmem.Op != OpVarDef {
+ break
+ }
+ midmem_0 := midmem.Args[0]
+ if midmem_0.Op != OpMove || auxIntToInt64(midmem_0.AuxInt) != s {
+ break
+ }
+ t2 := auxToType(midmem_0.Aux)
+ src := midmem_0.Args[1]
+ tmp2 := midmem_0.Args[0]
+ if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s)
+ v.Aux = typeToAux(t1)
+ v.AddArg3(dst, src, midmem)
+ return true
+ }
+ // match: (Move dst src mem)
+ // cond: isSamePtr(dst, src)
+ // result: mem
+ for {
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(isSamePtr(dst, src)) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 (Const16 [1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 (Const16 [-1]) x)
+ // result: (Neg16 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpNeg16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 <t> n (Const16 [c]))
+ // cond: isPowerOfTwo16(c)
+ // result: (Lsh16x64 <t> n (Const64 <typ.UInt64> [log16(c)]))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isPowerOfTwo16(c)) {
+ continue
+ }
+ v.reset(OpLsh16x64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log16(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 <t> n (Const16 [c]))
+ // cond: t.IsSigned() && isPowerOfTwo16(-c)
+ // result: (Neg16 (Lsh16x64 <t> n (Const64 <typ.UInt64> [log16(-c)])))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(t.IsSigned() && isPowerOfTwo16(-c)) {
+ continue
+ }
+ v.reset(OpNeg16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(log16(-c))
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 (Mul16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Mul16 i (Mul16 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpMul16)
+ v0 := b.NewValue0(v.Pos, OpMul16, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul16 (Const16 <t> [c]) (Mul16 (Const16 <t> [d]) x))
+ // result: (Mul16 (Const16 <t> [c*d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpMul16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c * d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 (Const32 [1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 (Const32 [-1]) x)
+ // result: (Neg32 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpNeg32)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 <t> n (Const32 [c]))
+ // cond: isPowerOfTwo32(c)
+ // result: (Lsh32x64 <t> n (Const64 <typ.UInt64> [log32(c)]))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ continue
+ }
+ v.reset(OpLsh32x64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log32(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 <t> n (Const32 [c]))
+ // cond: t.IsSigned() && isPowerOfTwo32(-c)
+ // result: (Neg32 (Lsh32x64 <t> n (Const64 <typ.UInt64> [log32(-c)])))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(t.IsSigned() && isPowerOfTwo32(-c)) {
+ continue
+ }
+ v.reset(OpNeg32)
+ v0 := b.NewValue0(v.Pos, OpLsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(log32(-c))
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 (Const32 <t> [c]) (Add32 <t> (Const32 <t> [d]) x))
+ // result: (Add32 (Const32 <t> [c*d]) (Mul32 <t> (Const32 <t> [c]) x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAdd32 || v_1.Type != t {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c * d)
+ v1 := b.NewValue0(v.Pos, OpMul32, t)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(c)
+ v1.AddArg2(v2, x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul32 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 (Mul32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Mul32 i (Mul32 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpMul32)
+ v0 := b.NewValue0(v.Pos, OpMul32, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul32 (Const32 <t> [c]) (Mul32 (Const32 <t> [d]) x))
+ // result: (Mul32 (Const32 <t> [c*d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpMul32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mul32F (Const32F [c]) (Const32F [d]))
+ // cond: c*d == c*d
+ // result: (Const32F [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32F {
+ continue
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ continue
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ if !(c*d == c*d) {
+ continue
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul32F x (Const32F [1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst32F || auxIntToFloat32(v_1.AuxInt) != 1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul32F x (Const32F [-1]))
+ // result: (Neg32F x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst32F || auxIntToFloat32(v_1.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpNeg32F)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul32F x (Const32F [2]))
+ // result: (Add32F x x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst32F || auxIntToFloat32(v_1.AuxInt) != 2 {
+ continue
+ }
+ v.reset(OpAdd32F)
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 (Const64 [1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 (Const64 [-1]) x)
+ // result: (Neg64 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpNeg64)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 <t> n (Const64 [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (Lsh64x64 <t> n (Const64 <typ.UInt64> [log64(c)]))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpLsh64x64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 <t> n (Const64 [c]))
+ // cond: t.IsSigned() && isPowerOfTwo64(-c)
+ // result: (Neg64 (Lsh64x64 <t> n (Const64 <typ.UInt64> [log64(-c)])))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(t.IsSigned() && isPowerOfTwo64(-c)) {
+ continue
+ }
+ v.reset(OpNeg64)
+ v0 := b.NewValue0(v.Pos, OpLsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(log64(-c))
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 (Const64 <t> [c]) (Add64 <t> (Const64 <t> [d]) x))
+ // result: (Add64 (Const64 <t> [c*d]) (Mul64 <t> (Const64 <t> [c]) x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAdd64 || v_1.Type != t {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c * d)
+ v1 := b.NewValue0(v.Pos, OpMul64, t)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(c)
+ v1.AddArg2(v2, x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul64 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 (Mul64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Mul64 i (Mul64 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpMul64)
+ v0 := b.NewValue0(v.Pos, OpMul64, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul64 (Const64 <t> [c]) (Mul64 (Const64 <t> [d]) x))
+ // result: (Mul64 (Const64 <t> [c*d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpMul64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c * d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mul64F (Const64F [c]) (Const64F [d]))
+ // cond: c*d == c*d
+ // result: (Const64F [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64F {
+ continue
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ continue
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ if !(c*d == c*d) {
+ continue
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul64F x (Const64F [1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst64F || auxIntToFloat64(v_1.AuxInt) != 1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul64F x (Const64F [-1]))
+ // result: (Neg64F x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst64F || auxIntToFloat64(v_1.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpNeg64F)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul64F x (Const64F [2]))
+ // result: (Add64F x x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst64F || auxIntToFloat64(v_1.AuxInt) != 2 {
+ continue
+ }
+ v.reset(OpAdd64F)
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 (Const8 [1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 (Const8 [-1]) x)
+ // result: (Neg8 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpNeg8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 <t> n (Const8 [c]))
+ // cond: isPowerOfTwo8(c)
+ // result: (Lsh8x64 <t> n (Const64 <typ.UInt64> [log8(c)]))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isPowerOfTwo8(c)) {
+ continue
+ }
+ v.reset(OpLsh8x64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log8(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 <t> n (Const8 [c]))
+ // cond: t.IsSigned() && isPowerOfTwo8(-c)
+ // result: (Neg8 (Lsh8x64 <t> n (Const64 <typ.UInt64> [log8(-c)])))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(t.IsSigned() && isPowerOfTwo8(-c)) {
+ continue
+ }
+ v.reset(OpNeg8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(log8(-c))
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 (Mul8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Mul8 i (Mul8 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpMul8)
+ v0 := b.NewValue0(v.Pos, OpMul8, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul8 (Const8 <t> [c]) (Mul8 (Const8 <t> [d]) x))
+ // result: (Mul8 (Const8 <t> [c*d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpMul8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpMul8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c * d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neg16 (Const16 [c]))
+ // result: (Const16 [-c])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(-c)
+ return true
+ }
+ // match: (Neg16 (Sub16 x y))
+ // result: (Sub16 y x)
+ for {
+ if v_0.Op != OpSub16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSub16)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Neg16 (Neg16 x))
+ // result: x
+ for {
+ if v_0.Op != OpNeg16 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Neg16 <t> (Com16 x))
+ // result: (Add16 (Const16 <t> [1]) x)
+ for {
+ t := v.Type
+ if v_0.Op != OpCom16 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neg32 (Const32 [c]))
+ // result: (Const32 [-c])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-c)
+ return true
+ }
+ // match: (Neg32 (Sub32 x y))
+ // result: (Sub32 y x)
+ for {
+ if v_0.Op != OpSub32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSub32)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Neg32 (Neg32 x))
+ // result: x
+ for {
+ if v_0.Op != OpNeg32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Neg32 <t> (Com32 x))
+ // result: (Add32 (Const32 <t> [1]) x)
+ for {
+ t := v.Type
+ if v_0.Op != OpCom32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Neg32F (Const32F [c]))
+ // cond: c != 0
+ // result: (Const32F [-c])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neg64 (Const64 [c]))
+ // result: (Const64 [-c])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-c)
+ return true
+ }
+ // match: (Neg64 (Sub64 x y))
+ // result: (Sub64 y x)
+ for {
+ if v_0.Op != OpSub64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSub64)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Neg64 (Neg64 x))
+ // result: x
+ for {
+ if v_0.Op != OpNeg64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Neg64 <t> (Com64 x))
+ // result: (Add64 (Const64 <t> [1]) x)
+ for {
+ t := v.Type
+ if v_0.Op != OpCom64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Neg64F (Const64F [c]))
+ // cond: c != 0
+ // result: (Const64F [-c])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neg8 (Const8 [c]))
+ // result: (Const8 [-c])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(-c)
+ return true
+ }
+ // match: (Neg8 (Sub8 x y))
+ // result: (Sub8 y x)
+ for {
+ if v_0.Op != OpSub8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSub8)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Neg8 (Neg8 x))
+ // result: x
+ for {
+ if v_0.Op != OpNeg8 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Neg8 <t> (Com8 x))
+ // result: (Add8 (Const8 <t> [1]) x)
+ for {
+ t := v.Type
+ if v_0.Op != OpCom8 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (Neq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x))
+ // result: (Neq16 (Const16 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpAdd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpNeq16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq16 (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (Neq16 n (Lsh16x64 (Rsh16x64 (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 15 && kbar == 16 - k
+ // result: (Neq16 (And16 <t> n (Const16 <t> [1<<uint(k)-1])) (Const16 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh16x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh16x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd16 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh16Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh16x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 15 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 15 && kbar == 16-k) {
+ continue
+ }
+ v.reset(OpNeq16)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v1 := b.NewValue0(v.Pos, OpConst16, t)
+ v1.AuxInt = int16ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq16 s:(Sub16 x y) (Const16 [0]))
+ // cond: s.Uses == 1
+ // result: (Neq16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub16 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpNeq16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Neq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [y]))
+ // cond: oneBit16(y)
+ // result: (Eq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd16 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst16 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 || v_1.Type != t || auxIntToInt16(v_1.AuxInt) != y || !(oneBit16(y)) {
+ continue
+ }
+ v.reset(OpEq16)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v1 := b.NewValue0(v.Pos, OpConst16, t)
+ v1.AuxInt = int16ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (Neq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x))
+ // result: (Neq32 (Const32 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpNeq32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq32 (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (Neq32 n (Lsh32x64 (Rsh32x64 (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 31 && kbar == 32 - k
+ // result: (Neq32 (And32 <t> n (Const32 <t> [1<<uint(k)-1])) (Const32 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh32x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd32 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh32Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh32x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 31 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 31 && kbar == 32-k) {
+ continue
+ }
+ v.reset(OpNeq32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v1 := b.NewValue0(v.Pos, OpConst32, t)
+ v1.AuxInt = int32ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq32 s:(Sub32 x y) (Const32 [0]))
+ // cond: s.Uses == 1
+ // result: (Neq32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub32 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpNeq32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Neq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [y]))
+ // cond: oneBit32(y)
+ // result: (Eq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd32 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst32 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt32(v_0_1.AuxInt)
+ if v_1.Op != OpConst32 || v_1.Type != t || auxIntToInt32(v_1.AuxInt) != y || !(oneBit32(y)) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v1 := b.NewValue0(v.Pos, OpConst32, t)
+ v1.AuxInt = int32ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Neq32F (Const32F [c]) (Const32F [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32F {
+ continue
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ continue
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64 x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (Neq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x))
+ // result: (Neq64 (Const64 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpNeq64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq64 (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (Neq64 n (Lsh64x64 (Rsh64x64 (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 63 && kbar == 64 - k
+ // result: (Neq64 (And64 <t> n (Const64 <t> [1<<uint(k)-1])) (Const64 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh64x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd64 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh64Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh64x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 63 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 63 && kbar == 64-k) {
+ continue
+ }
+ v.reset(OpNeq64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq64 s:(Sub64 x y) (Const64 [0]))
+ // cond: s.Uses == 1
+ // result: (Neq64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub64 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpNeq64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Neq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [y]))
+ // cond: oneBit64(y)
+ // result: (Eq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd64 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst64 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 || v_1.Type != t || auxIntToInt64(v_1.AuxInt) != y || !(oneBit64(y)) {
+ continue
+ }
+ v.reset(OpEq64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Neq64F (Const64F [c]) (Const64F [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64F {
+ continue
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ continue
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (Neq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x))
+ // result: (Neq8 (Const8 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpAdd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpNeq8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq8 (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (Neq8 n (Lsh8x64 (Rsh8x64 (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 7 && kbar == 8 - k
+ // result: (Neq8 (And8 <t> n (Const8 <t> [1<<uint(k)-1])) (Const8 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh8x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh8x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd8 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh8Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh8x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 7 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 7 && kbar == 8-k) {
+ continue
+ }
+ v.reset(OpNeq8)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v1 := b.NewValue0(v.Pos, OpConst8, t)
+ v1.AuxInt = int8ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq8 s:(Sub8 x y) (Const8 [0]))
+ // cond: s.Uses == 1
+ // result: (Neq8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub8 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpNeq8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Neq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [y]))
+ // cond: oneBit8(y)
+ // result: (Eq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd8 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst8 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 || v_1.Type != t || auxIntToInt8(v_1.AuxInt) != y || !(oneBit8(y)) {
+ continue
+ }
+ v.reset(OpEq8)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v1 := b.NewValue0(v.Pos, OpConst8, t)
+ v1.AuxInt = int8ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NeqB (ConstBool [c]) (ConstBool [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool {
+ continue
+ }
+ c := auxIntToBool(v_0.AuxInt)
+ if v_1.Op != OpConstBool {
+ continue
+ }
+ d := auxIntToBool(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (NeqB (ConstBool [false]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != false {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (NeqB (ConstBool [true]) x)
+ // result: (Not x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != true {
+ continue
+ }
+ x := v_1
+ v.reset(OpNot)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (NeqB (Not x) (Not y))
+ // result: (NeqB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNot {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpNot {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpNeqB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeqInter(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqInter x y)
+ // result: (NeqPtr (ITab x) (ITab y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNeqPtr)
+ v0 := b.NewValue0(v.Pos, OpITab, typ.Uintptr)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpITab, typ.Uintptr)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuegeneric_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NeqPtr x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (NeqPtr (Addr {x} _) (Addr {y} _))
+ // result: (ConstBool [x != y])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddr {
+ continue
+ }
+ x := auxToSym(v_0.Aux)
+ if v_1.Op != OpAddr {
+ continue
+ }
+ y := auxToSym(v_1.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x != y)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _)))
+ // result: (ConstBool [x != y || o != 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddr {
+ continue
+ }
+ x := auxToSym(v_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ y := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x != y || o != 0)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _)))
+ // result: (ConstBool [x != y || o1 != o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAddr {
+ continue
+ }
+ x := auxToSym(v_0_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ y := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x != y || o1 != o2)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _))
+ // result: (ConstBool [x != y])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr {
+ continue
+ }
+ x := auxToSym(v_0.Aux)
+ if v_1.Op != OpLocalAddr {
+ continue
+ }
+ y := auxToSym(v_1.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x != y)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _)))
+ // result: (ConstBool [x != y || o != 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr {
+ continue
+ }
+ x := auxToSym(v_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpLocalAddr {
+ continue
+ }
+ y := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x != y || o != 0)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _)))
+ // result: (ConstBool [x != y || o1 != o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr {
+ continue
+ }
+ x := auxToSym(v_0_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpLocalAddr {
+ continue
+ }
+ y := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x != y || o1 != o2)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr [o1] p1) p2)
+ // cond: isSamePtr(p1, p2)
+ // result: (ConstBool [o1 != 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ p2 := v_1
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(o1 != 0)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr [o1] p1) (OffPtr [o2] p2))
+ // cond: isSamePtr(p1, p2)
+ // result: (ConstBool [o1 != o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(o1 != o2)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (Convert (Addr {x} _) _) (Addr {y} _))
+ // result: (ConstBool [x!=y])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConvert {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAddr {
+ continue
+ }
+ x := auxToSym(v_0_0.Aux)
+ if v_1.Op != OpAddr {
+ continue
+ }
+ y := auxToSym(v_1.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x != y)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (LocalAddr _ _) (Addr _))
+ // result: (ConstBool [true])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr || v_1.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr (LocalAddr _ _)) (Addr _))
+ // result: (ConstBool [true])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr || v_1.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (LocalAddr _ _) (OffPtr (Addr _)))
+ // result: (ConstBool [true])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr || v_1.Op != OpOffPtr {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _)))
+ // result: (ConstBool [true])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr || v_1.Op != OpOffPtr {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (AddPtr p1 o1) p2)
+ // cond: isSamePtr(p1, p2)
+ // result: (IsNonNil o1)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddPtr {
+ continue
+ }
+ o1 := v_0.Args[1]
+ p1 := v_0.Args[0]
+ p2 := v_1
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpIsNonNil)
+ v.AddArg(o1)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (Const32 [0]) p)
+ // result: (IsNonNil p)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ p := v_1
+ v.reset(OpIsNonNil)
+ v.AddArg(p)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (Const64 [0]) p)
+ // result: (IsNonNil p)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ p := v_1
+ v.reset(OpIsNonNil)
+ v.AddArg(p)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (ConstNil) p)
+ // result: (IsNonNil p)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstNil {
+ continue
+ }
+ p := v_1
+ v.reset(OpIsNonNil)
+ v.AddArg(p)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeqSlice(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqSlice x y)
+ // result: (NeqPtr (SlicePtr x) (SlicePtr y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNeqPtr)
+ v0 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuegeneric_OpNilCheck(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ fe := b.Func.fe
+ // match: (NilCheck ptr:(GetG mem) mem)
+ // result: ptr
+ for {
+ ptr := v_0
+ if ptr.Op != OpGetG {
+ break
+ }
+ mem := ptr.Args[0]
+ if mem != v_1 {
+ break
+ }
+ v.copyOf(ptr)
+ return true
+ }
+ // match: (NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _)
+ // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ // result: ptr
+ for {
+ ptr := v_0
+ if ptr.Op != OpSelectN || auxIntToInt64(ptr.AuxInt) != 0 {
+ break
+ }
+ call := ptr.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
+ break
+ }
+ v.copyOf(ptr)
+ return true
+ }
+ // match: (NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
+ // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ // result: ptr
+ for {
+ ptr := v_0
+ if ptr.Op != OpOffPtr {
+ break
+ }
+ ptr_0 := ptr.Args[0]
+ if ptr_0.Op != OpSelectN || auxIntToInt64(ptr_0.AuxInt) != 0 {
+ break
+ }
+ call := ptr_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
+ break
+ }
+ v.copyOf(ptr)
+ return true
+ }
+ // match: (NilCheck ptr:(Addr {_} (SB)) _)
+ // result: ptr
+ for {
+ ptr := v_0
+ if ptr.Op != OpAddr {
+ break
+ }
+ ptr_0 := ptr.Args[0]
+ if ptr_0.Op != OpSB {
+ break
+ }
+ v.copyOf(ptr)
+ return true
+ }
+ // match: (NilCheck ptr:(Convert (Addr {_} (SB)) _) _)
+ // result: ptr
+ for {
+ ptr := v_0
+ if ptr.Op != OpConvert {
+ break
+ }
+ ptr_0 := ptr.Args[0]
+ if ptr_0.Op != OpAddr {
+ break
+ }
+ ptr_0_0 := ptr_0.Args[0]
+ if ptr_0_0.Op != OpSB {
+ break
+ }
+ v.copyOf(ptr)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not (ConstBool [c]))
+ // result: (ConstBool [!c])
+ for {
+ if v_0.Op != OpConstBool {
+ break
+ }
+ c := auxIntToBool(v_0.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(!c)
+ return true
+ }
+ // match: (Not (Eq64 x y))
+ // result: (Neq64 x y)
+ for {
+ if v_0.Op != OpEq64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq64)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Eq32 x y))
+ // result: (Neq32 x y)
+ for {
+ if v_0.Op != OpEq32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq32)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Eq16 x y))
+ // result: (Neq16 x y)
+ for {
+ if v_0.Op != OpEq16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq16)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Eq8 x y))
+ // result: (Neq8 x y)
+ for {
+ if v_0.Op != OpEq8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq8)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (EqB x y))
+ // result: (NeqB x y)
+ for {
+ if v_0.Op != OpEqB {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeqB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (EqPtr x y))
+ // result: (NeqPtr x y)
+ for {
+ if v_0.Op != OpEqPtr {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeqPtr)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Eq64F x y))
+ // result: (Neq64F x y)
+ for {
+ if v_0.Op != OpEq64F {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq64F)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Eq32F x y))
+ // result: (Neq32F x y)
+ for {
+ if v_0.Op != OpEq32F {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq32F)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq64 x y))
+ // result: (Eq64 x y)
+ for {
+ if v_0.Op != OpNeq64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq64)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq32 x y))
+ // result: (Eq32 x y)
+ for {
+ if v_0.Op != OpNeq32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq32)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq16 x y))
+ // result: (Eq16 x y)
+ for {
+ if v_0.Op != OpNeq16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq16)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq8 x y))
+ // result: (Eq8 x y)
+ for {
+ if v_0.Op != OpNeq8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq8)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (NeqB x y))
+ // result: (EqB x y)
+ for {
+ if v_0.Op != OpNeqB {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEqB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (NeqPtr x y))
+ // result: (EqPtr x y)
+ for {
+ if v_0.Op != OpNeqPtr {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEqPtr)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq64F x y))
+ // result: (Eq64F x y)
+ for {
+ if v_0.Op != OpNeq64F {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq64F)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq32F x y))
+ // result: (Eq32F x y)
+ for {
+ if v_0.Op != OpNeq32F {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq32F)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Less64 x y))
+ // result: (Leq64 y x)
+ for {
+ if v_0.Op != OpLess64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq64)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less32 x y))
+ // result: (Leq32 y x)
+ for {
+ if v_0.Op != OpLess32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq32)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less16 x y))
+ // result: (Leq16 y x)
+ for {
+ if v_0.Op != OpLess16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq16)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less8 x y))
+ // result: (Leq8 y x)
+ for {
+ if v_0.Op != OpLess8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq8)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less64U x y))
+ // result: (Leq64U y x)
+ for {
+ if v_0.Op != OpLess64U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq64U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less32U x y))
+ // result: (Leq32U y x)
+ for {
+ if v_0.Op != OpLess32U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq32U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less16U x y))
+ // result: (Leq16U y x)
+ for {
+ if v_0.Op != OpLess16U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq16U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less8U x y))
+ // result: (Leq8U y x)
+ for {
+ if v_0.Op != OpLess8U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq8U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq64 x y))
+ // result: (Less64 y x)
+ for {
+ if v_0.Op != OpLeq64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess64)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq32 x y))
+ // result: (Less32 y x)
+ for {
+ if v_0.Op != OpLeq32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess32)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq16 x y))
+ // result: (Less16 y x)
+ for {
+ if v_0.Op != OpLeq16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess16)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq8 x y))
+ // result: (Less8 y x)
+ for {
+ if v_0.Op != OpLeq8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess8)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq64U x y))
+ // result: (Less64U y x)
+ for {
+ if v_0.Op != OpLeq64U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess64U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq32U x y))
+ // result: (Less32U y x)
+ for {
+ if v_0.Op != OpLeq32U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess32U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq16U x y))
+ // result: (Less16U y x)
+ for {
+ if v_0.Op != OpLeq16U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess16U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq8U x y))
+ // result: (Less8U y x)
+ for {
+ if v_0.Op != OpLeq8U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess8U)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr (OffPtr p [y]) [x])
+ // result: (OffPtr p [x+y])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ p := v_0.Args[0]
+ v.reset(OpOffPtr)
+ v.AuxInt = int64ToAuxInt(x + y)
+ v.AddArg(p)
+ return true
+ }
+ // match: (OffPtr p [0])
+ // cond: v.Type.Compare(p.Type) == types.CMPeq
+ // result: p
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ p := v_0
+ if !(v.Type.Compare(p.Type) == types.CMPeq) {
+ break
+ }
+ v.copyOf(p)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpOr16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Or16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (Or16 <t> (Com16 x) (Com16 y))
+ // result: (Com16 (And16 <t> x y))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom16 {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpCom16 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpCom16)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Or16 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Or16 (Const16 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Or16 (Const16 [-1]) _)
+ // result: (Const16 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or16 (Com16 x) x)
+ // result: (Const16 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom16 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or16 x (Or16 x y))
+ // result: (Or16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpOr16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpOr16)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or16 (And16 x (Const16 [c2])) (Const16 <t> [c1]))
+ // cond: ^(c1 | c2) == 0
+ // result: (Or16 (Const16 <t> [c1]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst16 {
+ continue
+ }
+ c2 := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ t := v_1.Type
+ c1 := auxIntToInt16(v_1.AuxInt)
+ if !(^(c1 | c2) == 0) {
+ continue
+ }
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or16 (Or16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Or16 i (Or16 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOr16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpOr16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or16 (Const16 <t> [c]) (Or16 (Const16 <t> [d]) x))
+ // result: (Or16 (Const16 <t> [c|d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpOr16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c | d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or16 (Lsh16x64 x z:(Const64 <t> [c])) (Rsh16Ux64 x (Const64 [d])))
+ // cond: c < 16 && d == 16-c && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLsh16x64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh16Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 16 && d == 16-c && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or16 left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or16 left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or16 left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or16 left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or16 right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or16 right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or16 right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or16 right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpOr32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Or32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (Or32 <t> (Com32 x) (Com32 y))
+ // result: (Com32 (And32 <t> x y))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom32 {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpCom32 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpCom32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Or32 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Or32 (Const32 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Or32 (Const32 [-1]) _)
+ // result: (Const32 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or32 (Com32 x) x)
+ // result: (Const32 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom32 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or32 x (Or32 x y))
+ // result: (Or32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpOr32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpOr32)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or32 (And32 x (Const32 [c2])) (Const32 <t> [c1]))
+ // cond: ^(c1 | c2) == 0
+ // result: (Or32 (Const32 <t> [c1]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst32 {
+ continue
+ }
+ c2 := auxIntToInt32(v_0_1.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ t := v_1.Type
+ c1 := auxIntToInt32(v_1.AuxInt)
+ if !(^(c1 | c2) == 0) {
+ continue
+ }
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or32 (Or32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Or32 i (Or32 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOr32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpOr32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or32 (Const32 <t> [c]) (Or32 (Const32 <t> [d]) x))
+ // result: (Or32 (Const32 <t> [c|d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpOr32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or32 (Lsh32x64 x z:(Const64 <t> [c])) (Rsh32Ux64 x (Const64 [d])))
+ // cond: c < 32 && d == 32-c && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLsh32x64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 32 && d == 32-c && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or32 left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh32x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or32 left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh32x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or32 left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh32x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or32 left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh32x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or32 right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh32Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or32 right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh32Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or32 right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh32Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or32 right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh32Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpOr64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Or64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (Or64 <t> (Com64 x) (Com64 y))
+ // result: (Com64 (And64 <t> x y))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom64 {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpCom64 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpCom64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Or64 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Or64 (Const64 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Or64 (Const64 [-1]) _)
+ // result: (Const64 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or64 (Com64 x) x)
+ // result: (Const64 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom64 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or64 x (Or64 x y))
+ // result: (Or64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpOr64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpOr64)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or64 (And64 x (Const64 [c2])) (Const64 <t> [c1]))
+ // cond: ^(c1 | c2) == 0
+ // result: (Or64 (Const64 <t> [c1]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst64 {
+ continue
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ t := v_1.Type
+ c1 := auxIntToInt64(v_1.AuxInt)
+ if !(^(c1 | c2) == 0) {
+ continue
+ }
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or64 (Or64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Or64 i (Or64 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOr64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpOr64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or64 (Const64 <t> [c]) (Or64 (Const64 <t> [d]) x))
+ // result: (Or64 (Const64 <t> [c|d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpOr64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c | d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or64 (Lsh64x64 x z:(Const64 <t> [c])) (Rsh64Ux64 x (Const64 [d])))
+ // cond: c < 64 && d == 64-c && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLsh64x64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 64 && d == 64-c && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or64 left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh64x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or64 left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh64x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or64 left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh64x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or64 left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh64x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or64 right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh64Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or64 right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh64Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or64 right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh64Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or64 right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh64Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpOr8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Or8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (Or8 <t> (Com8 x) (Com8 y))
+ // result: (Com8 (And8 <t> x y))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom8 {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpCom8 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpCom8)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Or8 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Or8 (Const8 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Or8 (Const8 [-1]) _)
+ // result: (Const8 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or8 (Com8 x) x)
+ // result: (Const8 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom8 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or8 x (Or8 x y))
+ // result: (Or8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpOr8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpOr8)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or8 (And8 x (Const8 [c2])) (Const8 <t> [c1]))
+ // cond: ^(c1 | c2) == 0
+ // result: (Or8 (Const8 <t> [c1]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst8 {
+ continue
+ }
+ c2 := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ t := v_1.Type
+ c1 := auxIntToInt8(v_1.AuxInt)
+ if !(^(c1 | c2) == 0) {
+ continue
+ }
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or8 (Or8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Or8 i (Or8 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOr8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpOr8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or8 (Const8 <t> [c]) (Or8 (Const8 <t> [d]) x))
+ // result: (Or8 (Const8 <t> [c|d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpOr8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c | d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or8 (Lsh8x64 x z:(Const64 <t> [c])) (Rsh8Ux64 x (Const64 [d])))
+ // cond: c < 8 && d == 8-c && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLsh8x64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh8Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 8 && d == 8-c && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or8 left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or8 left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or8 left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or8 left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Or8 right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or8 right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or8 right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Or8 right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpOrB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (OrB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+ // cond: c >= d
+ // result: (Less64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+ // cond: c >= d
+ // result: (Leq64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+ // cond: c >= d
+ // result: (Less32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+ // cond: c >= d
+ // result: (Leq32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+ // cond: c >= d
+ // result: (Less16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+ // cond: c >= d
+ // result: (Leq16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+ // cond: c >= d
+ // result: (Less8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+ // cond: c >= d
+ // result: (Leq8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Less64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Leq64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Less32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Leq32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Less16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Leq16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Less8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Leq8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+ // cond: uint64(c) >= uint64(d)
+ // result: (Less64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(c) >= uint64(d)) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+ // cond: uint64(c) >= uint64(d)
+ // result: (Leq64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(c) >= uint64(d)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+ // cond: uint32(c) >= uint32(d)
+ // result: (Less32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(c) >= uint32(d)) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+ // cond: uint32(c) >= uint32(d)
+ // result: (Leq32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(c) >= uint32(d)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+ // cond: uint16(c) >= uint16(d)
+ // result: (Less16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(c) >= uint16(d)) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+ // cond: uint16(c) >= uint16(d)
+ // result: (Leq16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(c) >= uint16(d)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+ // cond: uint8(c) >= uint8(d)
+ // result: (Less8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(c) >= uint8(d)) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+ // cond: uint8(c) >= uint8(d)
+ // result: (Leq8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(c) >= uint8(d)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+ // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)
+ // result: (Less64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+ // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)
+ // result: (Leq64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+ // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)
+ // result: (Less32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+ // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)
+ // result: (Leq32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+ // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)
+ // result: (Less16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+ // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)
+ // result: (Leq16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+ // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)
+ // result: (Less8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+ // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)
+ // result: (Leq8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpPhi(v *Value) bool {
+ b := v.Block
+ // match: (Phi (Const8 [c]) (Const8 [c]))
+ // result: (Const8 [c])
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != c {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c)
+ return true
+ }
+ // match: (Phi (Const16 [c]) (Const16 [c]))
+ // result: (Const16 [c])
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != c {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c)
+ return true
+ }
+ // match: (Phi (Const32 [c]) (Const32 [c]))
+ // result: (Const32 [c])
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != c {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ // match: (Phi (Const64 [c]) (Const64 [c]))
+ // result: (Const64 [c])
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ // match: (Phi <t> nx:(Not x) ny:(Not y))
+ // cond: nx.Uses == 1 && ny.Uses == 1
+ // result: (Not (Phi <t> x y))
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ t := v.Type
+ _ = v.Args[1]
+ nx := v.Args[0]
+ if nx.Op != OpNot {
+ break
+ }
+ x := nx.Args[0]
+ ny := v.Args[1]
+ if ny.Op != OpNot {
+ break
+ }
+ y := ny.Args[0]
+ if !(nx.Uses == 1 && ny.Uses == 1) {
+ break
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpPhi, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpPtrIndex(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (PtrIndex <t> ptr idx)
+ // cond: config.PtrSize == 4 && is32Bit(t.Elem().Size())
+ // result: (AddPtr ptr (Mul32 <typ.Int> idx (Const32 <typ.Int> [int32(t.Elem().Size())])))
+ for {
+ t := v.Type
+ ptr := v_0
+ idx := v_1
+ if !(config.PtrSize == 4 && is32Bit(t.Elem().Size())) {
+ break
+ }
+ v.reset(OpAddPtr)
+ v0 := b.NewValue0(v.Pos, OpMul32, typ.Int)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.Int)
+ v1.AuxInt = int32ToAuxInt(int32(t.Elem().Size()))
+ v0.AddArg2(idx, v1)
+ v.AddArg2(ptr, v0)
+ return true
+ }
+ // match: (PtrIndex <t> ptr idx)
+ // cond: config.PtrSize == 8
+ // result: (AddPtr ptr (Mul64 <typ.Int> idx (Const64 <typ.Int> [t.Elem().Size()])))
+ for {
+ t := v.Type
+ ptr := v_0
+ idx := v_1
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpAddPtr)
+ v0 := b.NewValue0(v.Pos, OpMul64, typ.Int)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.Int)
+ v1.AuxInt = int64ToAuxInt(t.Elem().Size())
+ v0.AddArg2(idx, v1)
+ v.AddArg2(ptr, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (RotateLeft16 x (Const16 [c]))
+ // cond: c%16 == 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(c%16 == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (RotateLeft16 x (And64 y (Const64 [c])))
+ // cond: c&15 == 15
+ // result: (RotateLeft16 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c&15 == 15) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (And32 y (Const32 [c])))
+ // cond: c&15 == 15
+ // result: (RotateLeft16 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c&15 == 15) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (And16 y (Const16 [c])))
+ // cond: c&15 == 15
+ // result: (RotateLeft16 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c&15 == 15) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (And8 y (Const8 [c])))
+ // cond: c&15 == 15
+ // result: (RotateLeft16 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c&15 == 15) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Neg64 (And64 y (Const64 [c]))))
+ // cond: c&15 == 15
+ // result: (RotateLeft16 x (Neg64 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg64 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0_1.AuxInt)
+ if !(c&15 == 15) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpNeg64, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Neg32 (And32 y (Const32 [c]))))
+ // cond: c&15 == 15
+ // result: (RotateLeft16 x (Neg32 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg32 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd32 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0_1.AuxInt)
+ if !(c&15 == 15) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpNeg32, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Neg16 (And16 y (Const16 [c]))))
+ // cond: c&15 == 15
+ // result: (RotateLeft16 x (Neg16 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg16 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd16 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0_1.AuxInt)
+ if !(c&15 == 15) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpNeg16, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Neg8 (And8 y (Const8 [c]))))
+ // cond: c&15 == 15
+ // result: (RotateLeft16 x (Neg8 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg8 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd8 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_0_1.AuxInt)
+ if !(c&15 == 15) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpNeg8, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Add64 y (Const64 [c])))
+ // cond: c&15 == 0
+ // result: (RotateLeft16 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c&15 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Add32 y (Const32 [c])))
+ // cond: c&15 == 0
+ // result: (RotateLeft16 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c&15 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Add16 y (Const16 [c])))
+ // cond: c&15 == 0
+ // result: (RotateLeft16 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c&15 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Add8 y (Const8 [c])))
+ // cond: c&15 == 0
+ // result: (RotateLeft16 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c&15 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft16 x (Sub64 (Const64 [c]) y))
+ // cond: c&15 == 0
+ // result: (RotateLeft16 x (Neg64 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub64 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if !(c&15 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpNeg64, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft16 x (Sub32 (Const32 [c]) y))
+ // cond: c&15 == 0
+ // result: (RotateLeft16 x (Neg32 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub32 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c&15 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpNeg32, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft16 x (Sub16 (Const16 [c]) y))
+ // cond: c&15 == 0
+ // result: (RotateLeft16 x (Neg16 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub16 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if !(c&15 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpNeg16, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft16 x (Sub8 (Const8 [c]) y))
+ // cond: c&15 == 0
+ // result: (RotateLeft16 x (Neg8 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub8 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1_0.AuxInt)
+ if !(c&15 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpNeg8, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft16 x (Const64 <t> [c]))
+ // cond: config.PtrSize == 4
+ // result: (RotateLeft16 x (Const32 <t> [int32(c)]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft16 (RotateLeft16 x c) d)
+ // cond: c.Type.Size() == 8 && d.Type.Size() == 8
+ // result: (RotateLeft16 x (Add64 <c.Type> c d))
+ for {
+ if v_0.Op != OpRotateLeft16 {
+ break
+ }
+ c := v_0.Args[1]
+ x := v_0.Args[0]
+ d := v_1
+ if !(c.Type.Size() == 8 && d.Type.Size() == 8) {
+ break
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpAdd64, c.Type)
+ v0.AddArg2(c, d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft16 (RotateLeft16 x c) d)
+ // cond: c.Type.Size() == 4 && d.Type.Size() == 4
+ // result: (RotateLeft16 x (Add32 <c.Type> c d))
+ for {
+ if v_0.Op != OpRotateLeft16 {
+ break
+ }
+ c := v_0.Args[1]
+ x := v_0.Args[0]
+ d := v_1
+ if !(c.Type.Size() == 4 && d.Type.Size() == 4) {
+ break
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpAdd32, c.Type)
+ v0.AddArg2(c, d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft16 (RotateLeft16 x c) d)
+ // cond: c.Type.Size() == 2 && d.Type.Size() == 2
+ // result: (RotateLeft16 x (Add16 <c.Type> c d))
+ for {
+ if v_0.Op != OpRotateLeft16 {
+ break
+ }
+ c := v_0.Args[1]
+ x := v_0.Args[0]
+ d := v_1
+ if !(c.Type.Size() == 2 && d.Type.Size() == 2) {
+ break
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpAdd16, c.Type)
+ v0.AddArg2(c, d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft16 (RotateLeft16 x c) d)
+ // cond: c.Type.Size() == 1 && d.Type.Size() == 1
+ // result: (RotateLeft16 x (Add8 <c.Type> c d))
+ for {
+ if v_0.Op != OpRotateLeft16 {
+ break
+ }
+ c := v_0.Args[1]
+ x := v_0.Args[0]
+ d := v_1
+ if !(c.Type.Size() == 1 && d.Type.Size() == 1) {
+ break
+ }
+ v.reset(OpRotateLeft16)
+ v0 := b.NewValue0(v.Pos, OpAdd8, c.Type)
+ v0.AddArg2(c, d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (RotateLeft32 x (Const32 [c]))
+ // cond: c%32 == 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c%32 == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (RotateLeft32 x (And64 y (Const64 [c])))
+ // cond: c&31 == 31
+ // result: (RotateLeft32 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c&31 == 31) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (And32 y (Const32 [c])))
+ // cond: c&31 == 31
+ // result: (RotateLeft32 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c&31 == 31) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (And16 y (Const16 [c])))
+ // cond: c&31 == 31
+ // result: (RotateLeft32 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c&31 == 31) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (And8 y (Const8 [c])))
+ // cond: c&31 == 31
+ // result: (RotateLeft32 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c&31 == 31) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Neg64 (And64 y (Const64 [c]))))
+ // cond: c&31 == 31
+ // result: (RotateLeft32 x (Neg64 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg64 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0_1.AuxInt)
+ if !(c&31 == 31) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpNeg64, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Neg32 (And32 y (Const32 [c]))))
+ // cond: c&31 == 31
+ // result: (RotateLeft32 x (Neg32 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg32 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd32 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0_1.AuxInt)
+ if !(c&31 == 31) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpNeg32, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Neg16 (And16 y (Const16 [c]))))
+ // cond: c&31 == 31
+ // result: (RotateLeft32 x (Neg16 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg16 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd16 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0_1.AuxInt)
+ if !(c&31 == 31) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpNeg16, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Neg8 (And8 y (Const8 [c]))))
+ // cond: c&31 == 31
+ // result: (RotateLeft32 x (Neg8 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg8 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd8 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_0_1.AuxInt)
+ if !(c&31 == 31) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpNeg8, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Add64 y (Const64 [c])))
+ // cond: c&31 == 0
+ // result: (RotateLeft32 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c&31 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Add32 y (Const32 [c])))
+ // cond: c&31 == 0
+ // result: (RotateLeft32 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c&31 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Add16 y (Const16 [c])))
+ // cond: c&31 == 0
+ // result: (RotateLeft32 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c&31 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Add8 y (Const8 [c])))
+ // cond: c&31 == 0
+ // result: (RotateLeft32 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c&31 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft32 x (Sub64 (Const64 [c]) y))
+ // cond: c&31 == 0
+ // result: (RotateLeft32 x (Neg64 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub64 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpNeg64, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft32 x (Sub32 (Const32 [c]) y))
+ // cond: c&31 == 0
+ // result: (RotateLeft32 x (Neg32 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub32 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpNeg32, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft32 x (Sub16 (Const16 [c]) y))
+ // cond: c&31 == 0
+ // result: (RotateLeft32 x (Neg16 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub16 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpNeg16, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft32 x (Sub8 (Const8 [c]) y))
+ // cond: c&31 == 0
+ // result: (RotateLeft32 x (Neg8 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub8 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1_0.AuxInt)
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpNeg8, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft32 x (Const64 <t> [c]))
+ // cond: config.PtrSize == 4
+ // result: (RotateLeft32 x (Const32 <t> [int32(c)]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft32 (RotateLeft32 x c) d)
+ // cond: c.Type.Size() == 8 && d.Type.Size() == 8
+ // result: (RotateLeft32 x (Add64 <c.Type> c d))
+ for {
+ if v_0.Op != OpRotateLeft32 {
+ break
+ }
+ c := v_0.Args[1]
+ x := v_0.Args[0]
+ d := v_1
+ if !(c.Type.Size() == 8 && d.Type.Size() == 8) {
+ break
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpAdd64, c.Type)
+ v0.AddArg2(c, d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft32 (RotateLeft32 x c) d)
+ // cond: c.Type.Size() == 4 && d.Type.Size() == 4
+ // result: (RotateLeft32 x (Add32 <c.Type> c d))
+ for {
+ if v_0.Op != OpRotateLeft32 {
+ break
+ }
+ c := v_0.Args[1]
+ x := v_0.Args[0]
+ d := v_1
+ if !(c.Type.Size() == 4 && d.Type.Size() == 4) {
+ break
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpAdd32, c.Type)
+ v0.AddArg2(c, d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft32 (RotateLeft32 x c) d)
+ // cond: c.Type.Size() == 2 && d.Type.Size() == 2
+ // result: (RotateLeft32 x (Add16 <c.Type> c d))
+ for {
+ if v_0.Op != OpRotateLeft32 {
+ break
+ }
+ c := v_0.Args[1]
+ x := v_0.Args[0]
+ d := v_1
+ if !(c.Type.Size() == 2 && d.Type.Size() == 2) {
+ break
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpAdd16, c.Type)
+ v0.AddArg2(c, d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft32 (RotateLeft32 x c) d)
+ // cond: c.Type.Size() == 1 && d.Type.Size() == 1
+ // result: (RotateLeft32 x (Add8 <c.Type> c d))
+ for {
+ if v_0.Op != OpRotateLeft32 {
+ break
+ }
+ c := v_0.Args[1]
+ x := v_0.Args[0]
+ d := v_1
+ if !(c.Type.Size() == 1 && d.Type.Size() == 1) {
+ break
+ }
+ v.reset(OpRotateLeft32)
+ v0 := b.NewValue0(v.Pos, OpAdd8, c.Type)
+ v0.AddArg2(c, d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (RotateLeft64 x (Const64 [c]))
+ // cond: c%64 == 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%64 == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (RotateLeft64 x (And64 y (Const64 [c])))
+ // cond: c&63 == 63
+ // result: (RotateLeft64 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c&63 == 63) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (And32 y (Const32 [c])))
+ // cond: c&63 == 63
+ // result: (RotateLeft64 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c&63 == 63) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (And16 y (Const16 [c])))
+ // cond: c&63 == 63
+ // result: (RotateLeft64 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c&63 == 63) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (And8 y (Const8 [c])))
+ // cond: c&63 == 63
+ // result: (RotateLeft64 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c&63 == 63) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Neg64 (And64 y (Const64 [c]))))
+ // cond: c&63 == 63
+ // result: (RotateLeft64 x (Neg64 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg64 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0_1.AuxInt)
+ if !(c&63 == 63) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpNeg64, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Neg32 (And32 y (Const32 [c]))))
+ // cond: c&63 == 63
+ // result: (RotateLeft64 x (Neg32 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg32 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd32 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0_1.AuxInt)
+ if !(c&63 == 63) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpNeg32, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Neg16 (And16 y (Const16 [c]))))
+ // cond: c&63 == 63
+ // result: (RotateLeft64 x (Neg16 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg16 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd16 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0_1.AuxInt)
+ if !(c&63 == 63) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpNeg16, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Neg8 (And8 y (Const8 [c]))))
+ // cond: c&63 == 63
+ // result: (RotateLeft64 x (Neg8 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg8 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd8 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_0_1.AuxInt)
+ if !(c&63 == 63) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpNeg8, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Add64 y (Const64 [c])))
+ // cond: c&63 == 0
+ // result: (RotateLeft64 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c&63 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Add32 y (Const32 [c])))
+ // cond: c&63 == 0
+ // result: (RotateLeft64 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c&63 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Add16 y (Const16 [c])))
+ // cond: c&63 == 0
+ // result: (RotateLeft64 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c&63 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Add8 y (Const8 [c])))
+ // cond: c&63 == 0
+ // result: (RotateLeft64 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c&63 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft64 x (Sub64 (Const64 [c]) y))
+ // cond: c&63 == 0
+ // result: (RotateLeft64 x (Neg64 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub64 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpNeg64, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft64 x (Sub32 (Const32 [c]) y))
+ // cond: c&63 == 0
+ // result: (RotateLeft64 x (Neg32 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub32 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpNeg32, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft64 x (Sub16 (Const16 [c]) y))
+ // cond: c&63 == 0
+ // result: (RotateLeft64 x (Neg16 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub16 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpNeg16, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft64 x (Sub8 (Const8 [c]) y))
+ // cond: c&63 == 0
+ // result: (RotateLeft64 x (Neg8 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub8 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1_0.AuxInt)
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpNeg8, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft64 x (Const64 <t> [c]))
+ // cond: config.PtrSize == 4
+ // result: (RotateLeft64 x (Const32 <t> [int32(c)]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft64 (RotateLeft64 x c) d)
+ // cond: c.Type.Size() == 8 && d.Type.Size() == 8
+ // result: (RotateLeft64 x (Add64 <c.Type> c d))
+ for {
+ if v_0.Op != OpRotateLeft64 {
+ break
+ }
+ c := v_0.Args[1]
+ x := v_0.Args[0]
+ d := v_1
+ if !(c.Type.Size() == 8 && d.Type.Size() == 8) {
+ break
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, c.Type)
+ v0.AddArg2(c, d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft64 (RotateLeft64 x c) d)
+ // cond: c.Type.Size() == 4 && d.Type.Size() == 4
+ // result: (RotateLeft64 x (Add32 <c.Type> c d))
+ for {
+ if v_0.Op != OpRotateLeft64 {
+ break
+ }
+ c := v_0.Args[1]
+ x := v_0.Args[0]
+ d := v_1
+ if !(c.Type.Size() == 4 && d.Type.Size() == 4) {
+ break
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpAdd32, c.Type)
+ v0.AddArg2(c, d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft64 (RotateLeft64 x c) d)
+ // cond: c.Type.Size() == 2 && d.Type.Size() == 2
+ // result: (RotateLeft64 x (Add16 <c.Type> c d))
+ for {
+ if v_0.Op != OpRotateLeft64 {
+ break
+ }
+ c := v_0.Args[1]
+ x := v_0.Args[0]
+ d := v_1
+ if !(c.Type.Size() == 2 && d.Type.Size() == 2) {
+ break
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpAdd16, c.Type)
+ v0.AddArg2(c, d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft64 (RotateLeft64 x c) d)
+ // cond: c.Type.Size() == 1 && d.Type.Size() == 1
+ // result: (RotateLeft64 x (Add8 <c.Type> c d))
+ for {
+ if v_0.Op != OpRotateLeft64 {
+ break
+ }
+ c := v_0.Args[1]
+ x := v_0.Args[0]
+ d := v_1
+ if !(c.Type.Size() == 1 && d.Type.Size() == 1) {
+ break
+ }
+ v.reset(OpRotateLeft64)
+ v0 := b.NewValue0(v.Pos, OpAdd8, c.Type)
+ v0.AddArg2(c, d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (RotateLeft8 x (Const8 [c]))
+ // cond: c%8 == 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(c%8 == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (RotateLeft8 x (And64 y (Const64 [c])))
+ // cond: c&7 == 7
+ // result: (RotateLeft8 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c&7 == 7) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (And32 y (Const32 [c])))
+ // cond: c&7 == 7
+ // result: (RotateLeft8 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c&7 == 7) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (And16 y (Const16 [c])))
+ // cond: c&7 == 7
+ // result: (RotateLeft8 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c&7 == 7) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (And8 y (Const8 [c])))
+ // cond: c&7 == 7
+ // result: (RotateLeft8 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAnd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c&7 == 7) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Neg64 (And64 y (Const64 [c]))))
+ // cond: c&7 == 7
+ // result: (RotateLeft8 x (Neg64 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg64 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0_1.AuxInt)
+ if !(c&7 == 7) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpNeg64, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Neg32 (And32 y (Const32 [c]))))
+ // cond: c&7 == 7
+ // result: (RotateLeft8 x (Neg32 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg32 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd32 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0_1.AuxInt)
+ if !(c&7 == 7) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpNeg32, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Neg16 (And16 y (Const16 [c]))))
+ // cond: c&7 == 7
+ // result: (RotateLeft8 x (Neg16 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg16 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd16 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0_1.AuxInt)
+ if !(c&7 == 7) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpNeg16, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Neg8 (And8 y (Const8 [c]))))
+ // cond: c&7 == 7
+ // result: (RotateLeft8 x (Neg8 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpNeg8 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAnd8 {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_0_1.AuxInt)
+ if !(c&7 == 7) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpNeg8, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Add64 y (Const64 [c])))
+ // cond: c&7 == 0
+ // result: (RotateLeft8 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c&7 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Add32 y (Const32 [c])))
+ // cond: c&7 == 0
+ // result: (RotateLeft8 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c&7 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Add16 y (Const16 [c])))
+ // cond: c&7 == 0
+ // result: (RotateLeft8 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c&7 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Add8 y (Const8 [c])))
+ // cond: c&7 == 0
+ // result: (RotateLeft8 x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c&7 == 0) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (RotateLeft8 x (Sub64 (Const64 [c]) y))
+ // cond: c&7 == 0
+ // result: (RotateLeft8 x (Neg64 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub64 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if !(c&7 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpNeg64, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft8 x (Sub32 (Const32 [c]) y))
+ // cond: c&7 == 0
+ // result: (RotateLeft8 x (Neg32 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub32 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c&7 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpNeg32, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft8 x (Sub16 (Const16 [c]) y))
+ // cond: c&7 == 0
+ // result: (RotateLeft8 x (Neg16 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub16 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if !(c&7 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpNeg16, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft8 x (Sub8 (Const8 [c]) y))
+ // cond: c&7 == 0
+ // result: (RotateLeft8 x (Neg8 <y.Type> y))
+ for {
+ x := v_0
+ if v_1.Op != OpSub8 {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1_0.AuxInt)
+ if !(c&7 == 0) {
+ break
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpNeg8, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft8 x (Const64 <t> [c]))
+ // cond: config.PtrSize == 4
+ // result: (RotateLeft8 x (Const32 <t> [int32(c)]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft8 (RotateLeft8 x c) d)
+ // cond: c.Type.Size() == 8 && d.Type.Size() == 8
+ // result: (RotateLeft8 x (Add64 <c.Type> c d))
+ for {
+ if v_0.Op != OpRotateLeft8 {
+ break
+ }
+ c := v_0.Args[1]
+ x := v_0.Args[0]
+ d := v_1
+ if !(c.Type.Size() == 8 && d.Type.Size() == 8) {
+ break
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpAdd64, c.Type)
+ v0.AddArg2(c, d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft8 (RotateLeft8 x c) d)
+ // cond: c.Type.Size() == 4 && d.Type.Size() == 4
+ // result: (RotateLeft8 x (Add32 <c.Type> c d))
+ for {
+ if v_0.Op != OpRotateLeft8 {
+ break
+ }
+ c := v_0.Args[1]
+ x := v_0.Args[0]
+ d := v_1
+ if !(c.Type.Size() == 4 && d.Type.Size() == 4) {
+ break
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpAdd32, c.Type)
+ v0.AddArg2(c, d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft8 (RotateLeft8 x c) d)
+ // cond: c.Type.Size() == 2 && d.Type.Size() == 2
+ // result: (RotateLeft8 x (Add16 <c.Type> c d))
+ for {
+ if v_0.Op != OpRotateLeft8 {
+ break
+ }
+ c := v_0.Args[1]
+ x := v_0.Args[0]
+ d := v_1
+ if !(c.Type.Size() == 2 && d.Type.Size() == 2) {
+ break
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpAdd16, c.Type)
+ v0.AddArg2(c, d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (RotateLeft8 (RotateLeft8 x c) d)
+ // cond: c.Type.Size() == 1 && d.Type.Size() == 1
+ // result: (RotateLeft8 x (Add8 <c.Type> c d))
+ for {
+ if v_0.Op != OpRotateLeft8 {
+ break
+ }
+ c := v_0.Args[1]
+ x := v_0.Args[0]
+ d := v_1
+ if !(c.Type.Size() == 1 && d.Type.Size() == 1) {
+ break
+ }
+ v.reset(OpRotateLeft8)
+ v0 := b.NewValue0(v.Pos, OpAdd8, c.Type)
+ v0.AddArg2(c, d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRound32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Round32F x:(Const32F))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpConst32F {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRound64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Round64F x:(Const64F))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpConst64F {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRoundToEven(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RoundToEven (Const64F [c]))
+ // result: (Const64F [math.RoundToEven(c)])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(math.RoundToEven(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux16 <t> x (Const16 [c]))
+ // result: (Rsh16Ux64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux16 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux32 <t> x (Const32 [c]))
+ // result: (Rsh16Ux64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux32 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 (Const16 [c]) (Const64 [d]))
+ // result: (Const16 [int16(uint16(c) >> uint64(d))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(uint16(c) >> uint64(d)))
+ return true
+ }
+ // match: (Rsh16Ux64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh16Ux64 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh16Ux64 <t> (Rsh16Ux64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh16Ux64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux64 (Rsh16x64 x _) (Const64 <t> [15]))
+ // result: (Rsh16Ux64 x (Const64 <t> [15]))
+ for {
+ if v_0.Op != OpRsh16x64 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != 15 {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(15)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux64 i:(Lsh16x64 x (Const64 [c])) (Const64 [c]))
+ // cond: c >= 0 && c < 16 && i.Uses == 1
+ // result: (And16 x (Const16 <v.Type> [int16(^uint16(0)>>c)]))
+ for {
+ i := v_0
+ if i.Op != OpLsh16x64 {
+ break
+ }
+ _ = i.Args[1]
+ x := i.Args[0]
+ i_1 := i.Args[1]
+ if i_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(i_1.AuxInt)
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 16 && i.Uses == 1) {
+ break
+ }
+ v.reset(OpAnd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, v.Type)
+ v0.AuxInt = int16ToAuxInt(int16(^uint16(0) >> c))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Rsh16Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpLsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8]))
+ // result: (ZeroExt8to16 (Trunc16to8 <typ.UInt8> x))
+ for {
+ if v_0.Op != OpLsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 8 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 8 {
+ break
+ }
+ v.reset(OpZeroExt8to16)
+ v0 := b.NewValue0(v.Pos, OpTrunc16to8, typ.UInt8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux8 <t> x (Const8 [c]))
+ // result: (Rsh16Ux64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux8 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x16 <t> x (Const16 [c]))
+ // result: (Rsh16x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x16 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x32 <t> x (Const32 [c]))
+ // result: (Rsh16x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x32 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 (Const16 [c]) (Const64 [d]))
+ // result: (Const16 [c >> uint64(d)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c >> uint64(d))
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh16x64 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh16x64 <t> (Rsh16x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh16x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8]))
+ // result: (SignExt8to16 (Trunc16to8 <typ.Int8> x))
+ for {
+ if v_0.Op != OpLsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 8 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 8 {
+ break
+ }
+ v.reset(OpSignExt8to16)
+ v0 := b.NewValue0(v.Pos, OpTrunc16to8, typ.Int8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x8 <t> x (Const8 [c]))
+ // result: (Rsh16x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x8 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux16 <t> x (Const16 [c]))
+ // result: (Rsh32Ux64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux16 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux32 <t> x (Const32 [c]))
+ // result: (Rsh32Ux64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux32 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 (Const32 [c]) (Const64 [d]))
+ // result: (Const32 [int32(uint32(c) >> uint64(d))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ return true
+ }
+ // match: (Rsh32Ux64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh32Ux64 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh32Ux64 <t> (Rsh32Ux64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh32Ux64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 (Rsh32x64 x _) (Const64 <t> [31]))
+ // result: (Rsh32Ux64 x (Const64 <t> [31]))
+ for {
+ if v_0.Op != OpRsh32x64 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != 31 {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(31)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 i:(Lsh32x64 x (Const64 [c])) (Const64 [c]))
+ // cond: c >= 0 && c < 32 && i.Uses == 1
+ // result: (And32 x (Const32 <v.Type> [int32(^uint32(0)>>c)]))
+ for {
+ i := v_0
+ if i.Op != OpLsh32x64 {
+ break
+ }
+ _ = i.Args[1]
+ x := i.Args[0]
+ i_1 := i.Args[1]
+ if i_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(i_1.AuxInt)
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 32 && i.Uses == 1) {
+ break
+ }
+ v.reset(OpAnd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(^uint32(0) >> c))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Rsh32Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24]))
+ // result: (ZeroExt8to32 (Trunc32to8 <typ.UInt8> x))
+ for {
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 24 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 24 {
+ break
+ }
+ v.reset(OpZeroExt8to32)
+ v0 := b.NewValue0(v.Pos, OpTrunc32to8, typ.UInt8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16]))
+ // result: (ZeroExt16to32 (Trunc32to16 <typ.UInt16> x))
+ for {
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 16 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ v.reset(OpZeroExt16to32)
+ v0 := b.NewValue0(v.Pos, OpTrunc32to16, typ.UInt16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux8 <t> x (Const8 [c]))
+ // result: (Rsh32Ux64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux8 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x16 <t> x (Const16 [c]))
+ // result: (Rsh32x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x16 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x32 <t> x (Const32 [c]))
+ // result: (Rsh32x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x32 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 (Const32 [c]) (Const64 [d]))
+ // result: (Const32 [c >> uint64(d)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh32x64 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh32x64 <t> (Rsh32x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh32x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24]))
+ // result: (SignExt8to32 (Trunc32to8 <typ.Int8> x))
+ for {
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 24 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 24 {
+ break
+ }
+ v.reset(OpSignExt8to32)
+ v0 := b.NewValue0(v.Pos, OpTrunc32to8, typ.Int8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16]))
+ // result: (SignExt16to32 (Trunc32to16 <typ.Int16> x))
+ for {
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 16 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ v.reset(OpSignExt16to32)
+ v0 := b.NewValue0(v.Pos, OpTrunc32to16, typ.Int16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x8 <t> x (Const8 [c]))
+ // result: (Rsh32x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x8 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux16 <t> x (Const16 [c]))
+ // result: (Rsh64Ux64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux16 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux32 <t> x (Const32 [c]))
+ // result: (Rsh64Ux64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux32 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [int64(uint64(c) >> uint64(d))])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ return true
+ }
+ // match: (Rsh64Ux64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh64Ux64 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 64
+ // result: (Const64 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64Ux64 <t> (Rsh64Ux64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh64Ux64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 (Rsh64x64 x _) (Const64 <t> [63]))
+ // result: (Rsh64Ux64 x (Const64 <t> [63]))
+ for {
+ if v_0.Op != OpRsh64x64 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != 63 {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(63)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 i:(Lsh64x64 x (Const64 [c])) (Const64 [c]))
+ // cond: c >= 0 && c < 64 && i.Uses == 1
+ // result: (And64 x (Const64 <v.Type> [int64(^uint64(0)>>c)]))
+ for {
+ i := v_0
+ if i.Op != OpLsh64x64 {
+ break
+ }
+ _ = i.Args[1]
+ x := i.Args[0]
+ i_1 := i.Args[1]
+ if i_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(i_1.AuxInt)
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 64 && i.Uses == 1) {
+ break
+ }
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, v.Type)
+ v0.AuxInt = int64ToAuxInt(int64(^uint64(0) >> c))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Rsh64Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56]))
+ // result: (ZeroExt8to64 (Trunc64to8 <typ.UInt8> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 56 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 56 {
+ break
+ }
+ v.reset(OpZeroExt8to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to8, typ.UInt8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48]))
+ // result: (ZeroExt16to64 (Trunc64to16 <typ.UInt16> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 48 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 48 {
+ break
+ }
+ v.reset(OpZeroExt16to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to16, typ.UInt16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32]))
+ // result: (ZeroExt32to64 (Trunc64to32 <typ.UInt32> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 32 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ v.reset(OpZeroExt32to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux8 <t> x (Const8 [c]))
+ // result: (Rsh64Ux64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux8 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x16 <t> x (Const16 [c]))
+ // result: (Rsh64x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x16 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x32 <t> x (Const32 [c]))
+ // result: (Rsh64x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x32 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c >> uint64(d)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ return true
+ }
+ // match: (Rsh64x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh64x64 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64x64 <t> (Rsh64x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh64x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56]))
+ // result: (SignExt8to64 (Trunc64to8 <typ.Int8> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 56 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 56 {
+ break
+ }
+ v.reset(OpSignExt8to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to8, typ.Int8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48]))
+ // result: (SignExt16to64 (Trunc64to16 <typ.Int16> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 48 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 48 {
+ break
+ }
+ v.reset(OpSignExt16to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to16, typ.Int16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32]))
+ // result: (SignExt32to64 (Trunc64to32 <typ.Int32> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 32 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ v.reset(OpSignExt32to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x8 <t> x (Const8 [c]))
+ // result: (Rsh64x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x8 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux16 <t> x (Const16 [c]))
+ // result: (Rsh8Ux64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux16 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux32 <t> x (Const32 [c]))
+ // result: (Rsh8Ux64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux32 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 (Const8 [c]) (Const64 [d]))
+ // result: (Const8 [int8(uint8(c) >> uint64(d))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(uint8(c) >> uint64(d)))
+ return true
+ }
+ // match: (Rsh8Ux64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh8Ux64 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh8Ux64 <t> (Rsh8Ux64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh8Ux64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux64 (Rsh8x64 x _) (Const64 <t> [7] ))
+ // result: (Rsh8Ux64 x (Const64 <t> [7] ))
+ for {
+ if v_0.Op != OpRsh8x64 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != 7 {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(7)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux64 i:(Lsh8x64 x (Const64 [c])) (Const64 [c]))
+ // cond: c >= 0 && c < 8 && i.Uses == 1
+ // result: (And8 x (Const8 <v.Type> [int8 (^uint8 (0)>>c)]))
+ for {
+ i := v_0
+ if i.Op != OpLsh8x64 {
+ break
+ }
+ _ = i.Args[1]
+ x := i.Args[0]
+ i_1 := i.Args[1]
+ if i_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(i_1.AuxInt)
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 8 && i.Uses == 1) {
+ break
+ }
+ v.reset(OpAnd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(^uint8(0) >> c))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Rsh8Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpLsh8x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux8 <t> x (Const8 [c]))
+ // result: (Rsh8Ux64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux8 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x16 <t> x (Const16 [c]))
+ // result: (Rsh8x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x16 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x32 <t> x (Const32 [c]))
+ // result: (Rsh8x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x32 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x64 (Const8 [c]) (Const64 [d]))
+ // result: (Const8 [c >> uint64(d)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c >> uint64(d))
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh8x64 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh8x64 <t> (Rsh8x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh8x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh8x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x8 <t> x (Const8 [c]))
+ // result: (Rsh8x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x8 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Select0 (Div128u (Const64 [0]) lo y))
+ // result: (Div64u lo y)
+ for {
+ if v_0.Op != OpDiv128u {
+ break
+ }
+ y := v_0.Args[2]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ lo := v_0.Args[1]
+ v.reset(OpDiv64u)
+ v.AddArg2(lo, y)
+ return true
+ }
+ // match: (Select0 (Mul32uover (Const32 [1]) x))
+ // result: x
+ for {
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_0_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (Mul64uover (Const64 [1]) x))
+ // result: x
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_0_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (Mul64uover (Const64 [0]) x))
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (Mul32uover (Const32 [0]) x))
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 || auxIntToInt32(v_0_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Select1 (Div128u (Const64 [0]) lo y))
+ // result: (Mod64u lo y)
+ for {
+ if v_0.Op != OpDiv128u {
+ break
+ }
+ y := v_0.Args[2]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ lo := v_0.Args[1]
+ v.reset(OpMod64u)
+ v.AddArg2(lo, y)
+ return true
+ }
+ // match: (Select1 (Mul32uover (Const32 [1]) x))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (Mul64uover (Const64 [1]) x))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (Mul64uover (Const64 [0]) x))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (Mul32uover (Const32 [0]) x))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 || auxIntToInt32(v_0_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpSelectN(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SelectN [0] (MakeResult x ___))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpMakeResult || len(v_0.Args) < 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (SelectN [1] (MakeResult x y ___))
+ // result: y
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpMakeResult || len(v_0.Args) < 2 {
+ break
+ }
+ y := v_0.Args[1]
+ v.copyOf(y)
+ return true
+ }
+ // match: (SelectN [2] (MakeResult x y z ___))
+ // result: z
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpMakeResult || len(v_0.Args) < 3 {
+ break
+ }
+ z := v_0.Args[2]
+ v.copyOf(z)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticCall {sym} sptr (Const64 [c]) mem))
+ // cond: isInlinableMemclr(config, int64(c)) && isSameCall(sym, "runtime.memclrNoHeapPointers") && call.Uses == 1 && clobber(call)
+ // result: (Zero {types.Types[types.TUINT8]} [int64(c)] sptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticCall || len(call.Args) != 3 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[2]
+ sptr := call.Args[0]
+ call_1 := call.Args[1]
+ if call_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(call_1.AuxInt)
+ if !(isInlinableMemclr(config, int64(c)) && isSameCall(sym, "runtime.memclrNoHeapPointers") && call.Uses == 1 && clobber(call)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ v.Aux = typeToAux(types.Types[types.TUINT8])
+ v.AddArg2(sptr, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticCall {sym} sptr (Const32 [c]) mem))
+ // cond: isInlinableMemclr(config, int64(c)) && isSameCall(sym, "runtime.memclrNoHeapPointers") && call.Uses == 1 && clobber(call)
+ // result: (Zero {types.Types[types.TUINT8]} [int64(c)] sptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticCall || len(call.Args) != 3 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[2]
+ sptr := call.Args[0]
+ call_1 := call.Args[1]
+ if call_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(call_1.AuxInt)
+ if !(isInlinableMemclr(config, int64(c)) && isSameCall(sym, "runtime.memclrNoHeapPointers") && call.Uses == 1 && clobber(call)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ v.Aux = typeToAux(types.Types[types.TUINT8])
+ v.AddArg2(sptr, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const64 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call)
+ // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticCall || len(call.Args) != 1 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ s1 := call.Args[0]
+ if s1.Op != OpStore {
+ break
+ }
+ _ = s1.Args[2]
+ s1_1 := s1.Args[1]
+ if s1_1.Op != OpConst64 {
+ break
+ }
+ sz := auxIntToInt64(s1_1.AuxInt)
+ s2 := s1.Args[2]
+ if s2.Op != OpStore {
+ break
+ }
+ _ = s2.Args[2]
+ src := s2.Args[1]
+ s3 := s2.Args[2]
+ if s3.Op != OpStore {
+ break
+ }
+ mem := s3.Args[2]
+ dst := s3.Args[1]
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(types.Types[types.TUINT8])
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const32 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call)
+ // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticCall || len(call.Args) != 1 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ s1 := call.Args[0]
+ if s1.Op != OpStore {
+ break
+ }
+ _ = s1.Args[2]
+ s1_1 := s1.Args[1]
+ if s1_1.Op != OpConst32 {
+ break
+ }
+ sz := auxIntToInt32(s1_1.AuxInt)
+ s2 := s1.Args[2]
+ if s2.Op != OpStore {
+ break
+ }
+ _ = s2.Args[2]
+ src := s2.Args[1]
+ s3 := s2.Args[2]
+ if s3.Op != OpStore {
+ break
+ }
+ mem := s3.Args[2]
+ dst := s3.Args[1]
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(types.Types[types.TUINT8])
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticCall {sym} dst src (Const64 [sz]) mem))
+ // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)
+ // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticCall || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpConst64 {
+ break
+ }
+ sz := auxIntToInt64(call_2.AuxInt)
+ if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(types.Types[types.TUINT8])
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticCall {sym} dst src (Const32 [sz]) mem))
+ // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)
+ // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticCall || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpConst32 {
+ break
+ }
+ sz := auxIntToInt32(call_2.AuxInt)
+ if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(types.Types[types.TUINT8])
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticLECall {sym} dst src (Const64 [sz]) mem))
+ // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)
+ // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticLECall || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpConst64 {
+ break
+ }
+ sz := auxIntToInt64(call_2.AuxInt)
+ if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(types.Types[types.TUINT8])
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticLECall {sym} dst src (Const32 [sz]) mem))
+ // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)
+ // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticLECall || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpConst32 {
+ break
+ }
+ sz := auxIntToInt32(call_2.AuxInt)
+ if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(types.Types[types.TUINT8])
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticLECall {sym} a x))
+ // cond: needRaceCleanup(sym, call) && clobber(call)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ x := call.Args[1]
+ if !(needRaceCleanup(sym, call) && clobber(call)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticLECall {sym} x))
+ // cond: needRaceCleanup(sym, call) && clobber(call)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticLECall || len(call.Args) != 1 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ x := call.Args[0]
+ if !(needRaceCleanup(sym, call) && clobber(call)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SelectN [1] (StaticCall {sym} _ newLen:(Const64) _ _ _ _))
+ // cond: v.Type.IsInteger() && isSameCall(sym, "runtime.growslice")
+ // result: newLen
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStaticCall || len(v_0.Args) != 6 {
+ break
+ }
+ sym := auxToCall(v_0.Aux)
+ _ = v_0.Args[1]
+ newLen := v_0.Args[1]
+ if newLen.Op != OpConst64 || !(v.Type.IsInteger() && isSameCall(sym, "runtime.growslice")) {
+ break
+ }
+ v.copyOf(newLen)
+ return true
+ }
+ // match: (SelectN [1] (StaticCall {sym} _ newLen:(Const32) _ _ _ _))
+ // cond: v.Type.IsInteger() && isSameCall(sym, "runtime.growslice")
+ // result: newLen
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStaticCall || len(v_0.Args) != 6 {
+ break
+ }
+ sym := auxToCall(v_0.Aux)
+ _ = v_0.Args[1]
+ newLen := v_0.Args[1]
+ if newLen.Op != OpConst32 || !(v.Type.IsInteger() && isSameCall(sym, "runtime.growslice")) {
+ break
+ }
+ v.copyOf(newLen)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt16to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt16to32 (Const16 [c]))
+ // result: (Const32 [int32(c)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+ // match: (SignExt16to32 (Trunc32to16 x:(Rsh32x64 _ (Const64 [s]))))
+ // cond: s >= 16
+ // result: x
+ for {
+ if v_0.Op != OpTrunc32to16 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh32x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 16) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt16to64 (Const16 [c]))
+ // result: (Const64 [int64(c)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ // match: (SignExt16to64 (Trunc64to16 x:(Rsh64x64 _ (Const64 [s]))))
+ // cond: s >= 48
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to16 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 48) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt32to64 (Const32 [c]))
+ // result: (Const64 [int64(c)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ // match: (SignExt32to64 (Trunc64to32 x:(Rsh64x64 _ (Const64 [s]))))
+ // cond: s >= 32
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to32 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 32) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt8to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt8to16 (Const8 [c]))
+ // result: (Const16 [int16(c)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ return true
+ }
+ // match: (SignExt8to16 (Trunc16to8 x:(Rsh16x64 _ (Const64 [s]))))
+ // cond: s >= 8
+ // result: x
+ for {
+ if v_0.Op != OpTrunc16to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh16x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt8to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt8to32 (Const8 [c]))
+ // result: (Const32 [int32(c)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+ // match: (SignExt8to32 (Trunc32to8 x:(Rsh32x64 _ (Const64 [s]))))
+ // cond: s >= 24
+ // result: x
+ for {
+ if v_0.Op != OpTrunc32to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh32x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 24) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt8to64 (Const8 [c]))
+ // result: (Const64 [int64(c)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ // match: (SignExt8to64 (Trunc64to8 x:(Rsh64x64 _ (Const64 [s]))))
+ // cond: s >= 56
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 56) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSliceCap(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SliceCap (SliceMake _ _ (Const64 <t> [c])))
+ // result: (Const64 <t> [c])
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[2]
+ v_0_2 := v_0.Args[2]
+ if v_0_2.Op != OpConst64 {
+ break
+ }
+ t := v_0_2.Type
+ c := auxIntToInt64(v_0_2.AuxInt)
+ v.reset(OpConst64)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ // match: (SliceCap (SliceMake _ _ (Const32 <t> [c])))
+ // result: (Const32 <t> [c])
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[2]
+ v_0_2 := v_0.Args[2]
+ if v_0_2.Op != OpConst32 {
+ break
+ }
+ t := v_0_2.Type
+ c := auxIntToInt32(v_0_2.AuxInt)
+ v.reset(OpConst32)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ // match: (SliceCap (SliceMake _ _ (SliceCap x)))
+ // result: (SliceCap x)
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[2]
+ v_0_2 := v_0.Args[2]
+ if v_0_2.Op != OpSliceCap {
+ break
+ }
+ x := v_0_2.Args[0]
+ v.reset(OpSliceCap)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SliceCap (SliceMake _ _ (SliceLen x)))
+ // result: (SliceLen x)
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[2]
+ v_0_2 := v_0.Args[2]
+ if v_0_2.Op != OpSliceLen {
+ break
+ }
+ x := v_0_2.Args[0]
+ v.reset(OpSliceLen)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSliceLen(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SliceLen (SliceMake _ (Const64 <t> [c]) _))
+ // result: (Const64 <t> [c])
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ t := v_0_1.Type
+ c := auxIntToInt64(v_0_1.AuxInt)
+ v.reset(OpConst64)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ // match: (SliceLen (SliceMake _ (Const32 <t> [c]) _))
+ // result: (Const32 <t> [c])
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst32 {
+ break
+ }
+ t := v_0_1.Type
+ c := auxIntToInt32(v_0_1.AuxInt)
+ v.reset(OpConst32)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ // match: (SliceLen (SliceMake _ (SliceLen x) _))
+ // result: (SliceLen x)
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpSliceLen {
+ break
+ }
+ x := v_0_1.Args[0]
+ v.reset(OpSliceLen)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SliceLen (SelectN [0] (StaticLECall {sym} _ newLen:(Const64) _ _ _ _)))
+ // cond: isSameCall(sym, "runtime.growslice")
+ // result: newLen
+ for {
+ if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpStaticLECall || len(v_0_0.Args) != 6 {
+ break
+ }
+ sym := auxToCall(v_0_0.Aux)
+ _ = v_0_0.Args[1]
+ newLen := v_0_0.Args[1]
+ if newLen.Op != OpConst64 || !(isSameCall(sym, "runtime.growslice")) {
+ break
+ }
+ v.copyOf(newLen)
+ return true
+ }
+ // match: (SliceLen (SelectN [0] (StaticLECall {sym} _ newLen:(Const32) _ _ _ _)))
+ // cond: isSameCall(sym, "runtime.growslice")
+ // result: newLen
+ for {
+ if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpStaticLECall || len(v_0_0.Args) != 6 {
+ break
+ }
+ sym := auxToCall(v_0_0.Aux)
+ _ = v_0_0.Args[1]
+ newLen := v_0_0.Args[1]
+ if newLen.Op != OpConst32 || !(isSameCall(sym, "runtime.growslice")) {
+ break
+ }
+ v.copyOf(newLen)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSlicePtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SlicePtr (SliceMake (SlicePtr x) _ _))
+ // result: (SlicePtr x)
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSlicePtr {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpSlicePtr)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Slicemask (Const32 [x]))
+ // cond: x > 0
+ // result: (Const32 [-1])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (Slicemask (Const32 [0]))
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Slicemask (Const64 [x]))
+ // cond: x > 0
+ // result: (Const64 [-1])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (Slicemask (Const64 [0]))
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSqrt(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Sqrt (Const64F [c]))
+ // cond: !math.IsNaN(math.Sqrt(c))
+ // result: (Const64F [math.Sqrt(c)])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if !(!math.IsNaN(math.Sqrt(c))) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(math.Sqrt(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStaticCall(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (StaticCall {callAux} p q _ mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q)
+ // result: (MakeResult (ConstBool <typ.Bool> [true]) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ p := v.Args[0]
+ q := v.Args[1]
+ if !(isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q)) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpConstBool, typ.Bool)
+ v0.AuxInt = boolToAuxInt(true)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStaticLECall(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon)
+ // result: (MakeResult (Eq8 (Load <typ.Int8> sptr mem) (Const8 <typ.Int8> [int8(read8(scon,0))])) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ sptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAddr {
+ break
+ }
+ scon := auxToSym(v_1.Aux)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpSB {
+ break
+ }
+ v_2 := v.Args[2]
+ if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 1 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon)) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpEq8, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
+ v1.AddArg2(sptr, mem)
+ v2 := b.NewValue0(v.Pos, OpConst8, typ.Int8)
+ v2.AuxInt = int8ToAuxInt(int8(read8(scon, 0)))
+ v0.AddArg2(v1, v2)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ // match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [1]) mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon)
+ // result: (MakeResult (Eq8 (Load <typ.Int8> sptr mem) (Const8 <typ.Int8> [int8(read8(scon,0))])) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAddr {
+ break
+ }
+ scon := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB {
+ break
+ }
+ sptr := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 1 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon)) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpEq8, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
+ v1.AddArg2(sptr, mem)
+ v2 := b.NewValue0(v.Pos, OpConst8, typ.Int8)
+ v2.AuxInt = int8ToAuxInt(int8(read8(scon, 0)))
+ v0.AddArg2(v1, v2)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ // match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [2]) mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)
+ // result: (MakeResult (Eq16 (Load <typ.Int16> sptr mem) (Const16 <typ.Int16> [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ sptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAddr {
+ break
+ }
+ scon := auxToSym(v_1.Aux)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpSB {
+ break
+ }
+ v_2 := v.Args[2]
+ if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 2 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpEq16, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
+ v1.AddArg2(sptr, mem)
+ v2 := b.NewValue0(v.Pos, OpConst16, typ.Int16)
+ v2.AuxInt = int16ToAuxInt(int16(read16(scon, 0, config.ctxt.Arch.ByteOrder)))
+ v0.AddArg2(v1, v2)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ // match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [2]) mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)
+ // result: (MakeResult (Eq16 (Load <typ.Int16> sptr mem) (Const16 <typ.Int16> [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAddr {
+ break
+ }
+ scon := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB {
+ break
+ }
+ sptr := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 2 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpEq16, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
+ v1.AddArg2(sptr, mem)
+ v2 := b.NewValue0(v.Pos, OpConst16, typ.Int16)
+ v2.AuxInt = int16ToAuxInt(int16(read16(scon, 0, config.ctxt.Arch.ByteOrder)))
+ v0.AddArg2(v1, v2)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ // match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [4]) mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)
+ // result: (MakeResult (Eq32 (Load <typ.Int32> sptr mem) (Const32 <typ.Int32> [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ sptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAddr {
+ break
+ }
+ scon := auxToSym(v_1.Aux)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpSB {
+ break
+ }
+ v_2 := v.Args[2]
+ if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 4 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
+ v1.AddArg2(sptr, mem)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v2.AuxInt = int32ToAuxInt(int32(read32(scon, 0, config.ctxt.Arch.ByteOrder)))
+ v0.AddArg2(v1, v2)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ // match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [4]) mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)
+ // result: (MakeResult (Eq32 (Load <typ.Int32> sptr mem) (Const32 <typ.Int32> [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAddr {
+ break
+ }
+ scon := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB {
+ break
+ }
+ sptr := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 4 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
+ v1.AddArg2(sptr, mem)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v2.AuxInt = int32ToAuxInt(int32(read32(scon, 0, config.ctxt.Arch.ByteOrder)))
+ v0.AddArg2(v1, v2)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ // match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [8]) mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
+ // result: (MakeResult (Eq64 (Load <typ.Int64> sptr mem) (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ sptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAddr {
+ break
+ }
+ scon := auxToSym(v_1.Aux)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpSB {
+ break
+ }
+ v_2 := v.Args[2]
+ if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 8 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int64)
+ v1.AddArg2(sptr, mem)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(int64(read64(scon, 0, config.ctxt.Arch.ByteOrder)))
+ v0.AddArg2(v1, v2)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ // match: (StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [8]) mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
+ // result: (MakeResult (Eq64 (Load <typ.Int64> sptr mem) (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAddr {
+ break
+ }
+ scon := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB {
+ break
+ }
+ sptr := v.Args[1]
+ v_2 := v.Args[2]
+ if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 8 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int64)
+ v1.AddArg2(sptr, mem)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(int64(read64(scon, 0, config.ctxt.Arch.ByteOrder)))
+ v0.AddArg2(v1, v2)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ // match: (StaticLECall {callAux} _ _ (Const64 [0]) mem)
+ // cond: isSameCall(callAux, "runtime.memequal")
+ // result: (MakeResult (ConstBool <typ.Bool> [true]) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ v_2 := v.Args[2]
+ if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 0 || !(isSameCall(callAux, "runtime.memequal")) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpConstBool, typ.Bool)
+ v0.AuxInt = boolToAuxInt(true)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ // match: (StaticLECall {callAux} p q _ mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q)
+ // result: (MakeResult (ConstBool <typ.Bool> [true]) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ p := v.Args[0]
+ q := v.Args[1]
+ if !(isSameCall(callAux, "runtime.memequal") && isSamePtr(p, q)) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpConstBool, typ.Bool)
+ v0.AuxInt = boolToAuxInt(true)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ // match: (StaticLECall {callAux} _ (Const64 [0]) (Const64 [0]) mem)
+ // cond: isSameCall(callAux, "runtime.makeslice")
+ // result: (MakeResult (Addr <v.Type.FieldType(0)> {ir.Syms.Zerobase} (SB)) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v_2 := v.Args[2]
+ if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 0 || !(isSameCall(callAux, "runtime.makeslice")) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpAddr, v.Type.FieldType(0))
+ v0.Aux = symToAux(ir.Syms.Zerobase)
+ v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr)
+ v0.AddArg(v1)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ // match: (StaticLECall {callAux} _ (Const32 [0]) (Const32 [0]) mem)
+ // cond: isSameCall(callAux, "runtime.makeslice")
+ // result: (MakeResult (Addr <v.Type.FieldType(0)> {ir.Syms.Zerobase} (SB)) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ v_2 := v.Args[2]
+ if v_2.Op != OpConst32 || auxIntToInt32(v_2.AuxInt) != 0 || !(isSameCall(callAux, "runtime.makeslice")) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpAddr, v.Type.FieldType(0))
+ v0.Aux = symToAux(ir.Syms.Zerobase)
+ v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr)
+ v0.AddArg(v1)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Store {t1} p1 (Load <t2> p2 mem) mem)
+ // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size()
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ t2 := v_1.Type
+ mem := v_1.Args[1]
+ p2 := v_1.Args[0]
+ if mem != v_2 || !(isSamePtr(p1, p2) && t2.Size() == t1.Size()) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ oldmem))
+ // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ t2 := v_1.Type
+ oldmem := v_1.Args[1]
+ p2 := v_1.Args[0]
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p3 := mem.Args[0]
+ if oldmem != mem.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ oldmem)))
+ // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ t2 := v_1.Type
+ oldmem := v_1.Args[1]
+ p2 := v_1.Args[0]
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p3 := mem.Args[0]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ p4 := mem_2.Args[0]
+ if oldmem != mem_2.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ oldmem))))
+ // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size()) && disjoint(p1, t1.Size(), p5, t5.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ t2 := v_1.Type
+ oldmem := v_1.Args[1]
+ p2 := v_1.Args[0]
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p3 := mem.Args[0]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ p4 := mem_2.Args[0]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(mem_2_2.Aux)
+ _ = mem_2_2.Args[2]
+ p5 := mem_2_2.Args[0]
+ if oldmem != mem_2_2.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size()) && disjoint(p1, t1.Size(), p5, t5.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t} (OffPtr [o] p1) x mem:(Zero [n] p2 _))
+ // cond: isConstZero(x) && o >= 0 && t.Size() + o <= n && isSamePtr(p1, p2)
+ // result: mem
+ for {
+ t := auxToType(v.Aux)
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ x := v_1
+ mem := v_2
+ if mem.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem.AuxInt)
+ p2 := mem.Args[0]
+ if !(isConstZero(x) && o >= 0 && t.Size()+o <= n && isSamePtr(p1, p2)) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Zero [n] p3 _)))
+ // cond: isConstZero(x) && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p3) && disjoint(op, t1.Size(), p2, t2.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ x := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p2 := mem.Args[0]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem_2.AuxInt)
+ p3 := mem_2.Args[0]
+ if !(isConstZero(x) && o1 >= 0 && t1.Size()+o1 <= n && isSamePtr(p1, p3) && disjoint(op, t1.Size(), p2, t2.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Zero [n] p4 _))))
+ // cond: isConstZero(x) && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p4) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ x := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p2 := mem.Args[0]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ p3 := mem_2.Args[0]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem_2_2.AuxInt)
+ p4 := mem_2_2.Args[0]
+ if !(isConstZero(x) && o1 >= 0 && t1.Size()+o1 <= n && isSamePtr(p1, p4) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Zero [n] p5 _)))))
+ // cond: isConstZero(x) && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p5) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ x := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p2 := mem.Args[0]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ p3 := mem_2.Args[0]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ _ = mem_2_2.Args[2]
+ p4 := mem_2_2.Args[0]
+ mem_2_2_2 := mem_2_2.Args[2]
+ if mem_2_2_2.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem_2_2_2.AuxInt)
+ p5 := mem_2_2_2.Args[0]
+ if !(isConstZero(x) && o1 >= 0 && t1.Size()+o1 <= n && isSamePtr(p1, p5) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store _ (StructMake0) mem)
+ // result: mem
+ for {
+ if v_1.Op != OpStructMake0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store dst (StructMake1 <t> f0) mem)
+ // result: (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake1 {
+ break
+ }
+ t := v_1.Type
+ f0 := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(0))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v0.AuxInt = int64ToAuxInt(0)
+ v0.AddArg(dst)
+ v.AddArg3(v0, f0, mem)
+ return true
+ }
+ // match: (Store dst (StructMake2 <t> f0 f1) mem)
+ // result: (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem))
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake2 {
+ break
+ }
+ t := v_1.Type
+ f1 := v_1.Args[1]
+ f0 := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(1))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v0.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t.FieldType(0))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v2.AuxInt = int64ToAuxInt(0)
+ v2.AddArg(dst)
+ v1.AddArg3(v2, f0, mem)
+ v.AddArg3(v0, f1, v1)
+ return true
+ }
+ // match: (Store dst (StructMake3 <t> f0 f1 f2) mem)
+ // result: (Store {t.FieldType(2)} (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)))
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake3 {
+ break
+ }
+ t := v_1.Type
+ f2 := v_1.Args[2]
+ f0 := v_1.Args[0]
+ f1 := v_1.Args[1]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(2))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+ v0.AuxInt = int64ToAuxInt(t.FieldOff(2))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t.FieldType(1))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v2.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t.FieldType(0))
+ v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v4.AuxInt = int64ToAuxInt(0)
+ v4.AddArg(dst)
+ v3.AddArg3(v4, f0, mem)
+ v1.AddArg3(v2, f1, v3)
+ v.AddArg3(v0, f2, v1)
+ return true
+ }
+ // match: (Store dst (StructMake4 <t> f0 f1 f2 f3) mem)
+ // result: (Store {t.FieldType(3)} (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] dst) f3 (Store {t.FieldType(2)} (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem))))
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake4 {
+ break
+ }
+ t := v_1.Type
+ f3 := v_1.Args[3]
+ f0 := v_1.Args[0]
+ f1 := v_1.Args[1]
+ f2 := v_1.Args[2]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(3))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo())
+ v0.AuxInt = int64ToAuxInt(t.FieldOff(3))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t.FieldType(2))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+ v2.AuxInt = int64ToAuxInt(t.FieldOff(2))
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t.FieldType(1))
+ v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v4.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t.FieldType(0))
+ v6 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v6.AuxInt = int64ToAuxInt(0)
+ v6.AddArg(dst)
+ v5.AddArg3(v6, f0, mem)
+ v3.AddArg3(v4, f1, v5)
+ v1.AddArg3(v2, f2, v3)
+ v.AddArg3(v0, f3, v1)
+ return true
+ }
+ // match: (Store {t} dst (Load src mem) mem)
+ // cond: !CanSSA(t)
+ // result: (Move {t} [t.Size()] dst src mem)
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ mem := v_1.Args[1]
+ src := v_1.Args[0]
+ if mem != v_2 || !(!CanSSA(t)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(t.Size())
+ v.Aux = typeToAux(t)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Store {t} dst (Load src mem) (VarDef {x} mem))
+ // cond: !CanSSA(t)
+ // result: (Move {t} [t.Size()] dst src (VarDef {x} mem))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ mem := v_1.Args[1]
+ src := v_1.Args[0]
+ if v_2.Op != OpVarDef {
+ break
+ }
+ x := auxToSym(v_2.Aux)
+ if mem != v_2.Args[0] || !(!CanSSA(t)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(t.Size())
+ v.Aux = typeToAux(t)
+ v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem)
+ v0.Aux = symToAux(x)
+ v0.AddArg(mem)
+ v.AddArg3(dst, src, v0)
+ return true
+ }
+ // match: (Store _ (ArrayMake0) mem)
+ // result: mem
+ for {
+ if v_1.Op != OpArrayMake0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store dst (ArrayMake1 e) mem)
+ // result: (Store {e.Type} dst e mem)
+ for {
+ dst := v_0
+ if v_1.Op != OpArrayMake1 {
+ break
+ }
+ e := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(e.Type)
+ v.AddArg3(dst, e, mem)
+ return true
+ }
+ // match: (Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call))
+ // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")
+ // result: mem
+ for {
+ if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ break
+ }
+ x := v_1
+ mem := v_2
+ if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call))
+ // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")
+ // result: mem
+ for {
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelectN || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ break
+ }
+ x := v_1
+ mem := v_2
+ if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Move [n] p3 _ mem)))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr || auxIntToInt64(op2.AuxInt) != 0 {
+ break
+ }
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpMove {
+ break
+ }
+ n := auxIntToInt64(m3.AuxInt)
+ mem := m3.Args[2]
+ p3 := m3.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v0.AddArg3(op2, d2, mem)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Move [n] p4 _ mem))))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem)))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpStore {
+ break
+ }
+ t3 := auxToType(m3.Aux)
+ _ = m3.Args[2]
+ op3 := m3.Args[0]
+ if op3.Op != OpOffPtr || auxIntToInt64(op3.AuxInt) != 0 {
+ break
+ }
+ p3 := op3.Args[0]
+ d3 := m3.Args[1]
+ m4 := m3.Args[2]
+ if m4.Op != OpMove {
+ break
+ }
+ n := auxIntToInt64(m4.AuxInt)
+ mem := m4.Args[2]
+ p4 := m4.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size()+t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v1.AddArg3(op3, d3, mem)
+ v0.AddArg3(op2, d2, v1)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [o3] p3) d3 m4:(Store {t4} op4:(OffPtr [0] p4) d4 m5:(Move [n] p5 _ mem)))))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size() + t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem))))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpStore {
+ break
+ }
+ t3 := auxToType(m3.Aux)
+ _ = m3.Args[2]
+ op3 := m3.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d3 := m3.Args[1]
+ m4 := m3.Args[2]
+ if m4.Op != OpStore {
+ break
+ }
+ t4 := auxToType(m4.Aux)
+ _ = m4.Args[2]
+ op4 := m4.Args[0]
+ if op4.Op != OpOffPtr || auxIntToInt64(op4.AuxInt) != 0 {
+ break
+ }
+ p4 := op4.Args[0]
+ d4 := m4.Args[1]
+ m5 := m4.Args[2]
+ if m5.Op != OpMove {
+ break
+ }
+ n := auxIntToInt64(m5.AuxInt)
+ mem := m5.Args[2]
+ p5 := m5.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size()+t3.Size()+t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v2.Aux = typeToAux(t4)
+ v2.AddArg3(op4, d4, mem)
+ v1.AddArg3(op3, d3, v2)
+ v0.AddArg3(op2, d2, v1)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Zero [n] p3 mem)))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr || auxIntToInt64(op2.AuxInt) != 0 {
+ break
+ }
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(m3.AuxInt)
+ mem := m3.Args[1]
+ p3 := m3.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v0.AddArg3(op2, d2, mem)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Zero [n] p4 mem))))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem)))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpStore {
+ break
+ }
+ t3 := auxToType(m3.Aux)
+ _ = m3.Args[2]
+ op3 := m3.Args[0]
+ if op3.Op != OpOffPtr || auxIntToInt64(op3.AuxInt) != 0 {
+ break
+ }
+ p3 := op3.Args[0]
+ d3 := m3.Args[1]
+ m4 := m3.Args[2]
+ if m4.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(m4.AuxInt)
+ mem := m4.Args[1]
+ p4 := m4.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size()+t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v1.AddArg3(op3, d3, mem)
+ v0.AddArg3(op2, d2, v1)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [o3] p3) d3 m4:(Store {t4} op4:(OffPtr [0] p4) d4 m5:(Zero [n] p5 mem)))))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size() + t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem))))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpStore {
+ break
+ }
+ t3 := auxToType(m3.Aux)
+ _ = m3.Args[2]
+ op3 := m3.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d3 := m3.Args[1]
+ m4 := m3.Args[2]
+ if m4.Op != OpStore {
+ break
+ }
+ t4 := auxToType(m4.Aux)
+ _ = m4.Args[2]
+ op4 := m4.Args[0]
+ if op4.Op != OpOffPtr || auxIntToInt64(op4.AuxInt) != 0 {
+ break
+ }
+ p4 := op4.Args[0]
+ d4 := m4.Args[1]
+ m5 := m4.Args[2]
+ if m5.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(m5.AuxInt)
+ mem := m5.Args[1]
+ p5 := m5.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size()+t3.Size()+t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v2.Aux = typeToAux(t4)
+ v2.AddArg3(op4, d4, mem)
+ v1.AddArg3(op3, d3, v2)
+ v0.AddArg3(op2, d2, v1)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStringLen(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (StringLen (StringMake _ (Const64 <t> [c])))
+ // result: (Const64 <t> [c])
+ for {
+ if v_0.Op != OpStringMake {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ t := v_0_1.Type
+ c := auxIntToInt64(v_0_1.AuxInt)
+ v.reset(OpConst64)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStringPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (StringPtr (StringMake (Addr <t> {s} base) _))
+ // result: (Addr <t> {s} base)
+ for {
+ if v_0.Op != OpStringMake {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAddr {
+ break
+ }
+ t := v_0_0.Type
+ s := auxToSym(v_0_0.Aux)
+ base := v_0_0.Args[0]
+ v.reset(OpAddr)
+ v.Type = t
+ v.Aux = symToAux(s)
+ v.AddArg(base)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStructSelect(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (StructSelect (StructMake1 x))
+ // result: x
+ for {
+ if v_0.Op != OpStructMake1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] (StructMake2 x _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake2 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [1] (StructMake2 _ x))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake2 {
+ break
+ }
+ x := v_0.Args[1]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] (StructMake3 x _ _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake3 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [1] (StructMake3 _ x _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake3 {
+ break
+ }
+ x := v_0.Args[1]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [2] (StructMake3 _ _ x))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpStructMake3 {
+ break
+ }
+ x := v_0.Args[2]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] (StructMake4 x _ _ _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [1] (StructMake4 _ x _ _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[1]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [2] (StructMake4 _ _ x _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[2]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [3] (StructMake4 _ _ _ x))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[3]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [i] x:(Load <t> ptr mem))
+ // cond: !CanSSA(t)
+ // result: @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
+ for {
+ i := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(!CanSSA(t)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, v.Type.PtrTo())
+ v1.AuxInt = int64ToAuxInt(t.FieldOff(int(i)))
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (StructSelect [0] (IData x))
+ // result: (IData x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpIData)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Sub16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c-d])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c - d)
+ return true
+ }
+ // match: (Sub16 x (Const16 <t> [c]))
+ // cond: x.Op != OpConst16
+ // result: (Add16 (Const16 <t> [-c]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(x.Op != OpConst16) {
+ break
+ }
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(-c)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub16 <t> (Mul16 x y) (Mul16 x z))
+ // result: (Mul16 x (Sub16 <t> y z))
+ for {
+ t := v.Type
+ if v_0.Op != OpMul16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul16)
+ v0 := b.NewValue0(v.Pos, OpSub16, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Sub16 x x)
+ // result: (Const16 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Sub16 (Neg16 x) (Com16 x))
+ // result: (Const16 [1])
+ for {
+ if v_0.Op != OpNeg16 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpCom16 || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(1)
+ return true
+ }
+ // match: (Sub16 (Com16 x) (Neg16 x))
+ // result: (Const16 [-1])
+ for {
+ if v_0.Op != OpCom16 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpNeg16 || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(-1)
+ return true
+ }
+ // match: (Sub16 (Add16 t x) (Add16 t y))
+ // result: (Sub16 x y)
+ for {
+ if v_0.Op != OpAdd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ t := v_0_0
+ x := v_0_1
+ if v_1.Op != OpAdd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if t != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpSub16)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Sub16 (Add16 x y) x)
+ // result: y
+ for {
+ if v_0.Op != OpAdd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if x != v_1 {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub16 (Add16 x y) y)
+ // result: x
+ for {
+ if v_0.Op != OpAdd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if y != v_1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Sub16 (Sub16 x y) x)
+ // result: (Neg16 y)
+ for {
+ if v_0.Op != OpSub16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpNeg16)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Sub16 x (Add16 x y))
+ // result: (Neg16 y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpNeg16)
+ v.AddArg(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub16 x (Sub16 i:(Const16 <t>) z))
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Sub16 (Add16 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpSub16 {
+ break
+ }
+ z := v_1.Args[1]
+ i := v_1.Args[0]
+ if i.Op != OpConst16 {
+ break
+ }
+ t := i.Type
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ break
+ }
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpAdd16, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ // match: (Sub16 x (Add16 z i:(Const16 <t>)))
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Sub16 (Sub16 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z := v_1_0
+ i := v_1_1
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpSub16, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ break
+ }
+ // match: (Sub16 (Sub16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Sub16 i (Add16 <t> z x))
+ for {
+ if v_0.Op != OpSub16 {
+ break
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst16 {
+ break
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ break
+ }
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpAdd16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ // match: (Sub16 (Add16 z i:(Const16 <t>)) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Add16 i (Sub16 <t> z x))
+ for {
+ if v_0.Op != OpAdd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z := v_0_0
+ i := v_0_1
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpSub16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Sub16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x))
+ // result: (Add16 (Const16 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpSub16 {
+ break
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ break
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x))
+ // result: (Sub16 (Const16 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpAdd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Sub32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c-d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c - d)
+ return true
+ }
+ // match: (Sub32 x (Const32 <t> [c]))
+ // cond: x.Op != OpConst32
+ // result: (Add32 (Const32 <t> [-c]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(x.Op != OpConst32) {
+ break
+ }
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(-c)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub32 <t> (Mul32 x y) (Mul32 x z))
+ // result: (Mul32 x (Sub32 <t> y z))
+ for {
+ t := v.Type
+ if v_0.Op != OpMul32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul32)
+ v0 := b.NewValue0(v.Pos, OpSub32, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Sub32 x x)
+ // result: (Const32 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Sub32 (Neg32 x) (Com32 x))
+ // result: (Const32 [1])
+ for {
+ if v_0.Op != OpNeg32 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpCom32 || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (Sub32 (Com32 x) (Neg32 x))
+ // result: (Const32 [-1])
+ for {
+ if v_0.Op != OpCom32 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpNeg32 || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (Sub32 (Add32 t x) (Add32 t y))
+ // result: (Sub32 x y)
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ t := v_0_0
+ x := v_0_1
+ if v_1.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if t != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpSub32)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Sub32 (Add32 x y) x)
+ // result: y
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if x != v_1 {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub32 (Add32 x y) y)
+ // result: x
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if y != v_1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Sub32 (Sub32 x y) x)
+ // result: (Neg32 y)
+ for {
+ if v_0.Op != OpSub32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpNeg32)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Sub32 x (Add32 x y))
+ // result: (Neg32 y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpNeg32)
+ v.AddArg(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub32 x (Sub32 i:(Const32 <t>) z))
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Sub32 (Add32 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpSub32 {
+ break
+ }
+ z := v_1.Args[1]
+ i := v_1.Args[0]
+ if i.Op != OpConst32 {
+ break
+ }
+ t := i.Type
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ break
+ }
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpAdd32, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ // match: (Sub32 x (Add32 z i:(Const32 <t>)))
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Sub32 (Sub32 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z := v_1_0
+ i := v_1_1
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpSub32, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ break
+ }
+ // match: (Sub32 (Sub32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Sub32 i (Add32 <t> z x))
+ for {
+ if v_0.Op != OpSub32 {
+ break
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst32 {
+ break
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ break
+ }
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpAdd32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ // match: (Sub32 (Add32 z i:(Const32 <t>)) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Add32 i (Sub32 <t> z x))
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z := v_0_0
+ i := v_0_1
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpSub32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Sub32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x))
+ // result: (Add32 (Const32 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpSub32 {
+ break
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ break
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x))
+ // result: (Sub32 (Const32 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAdd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Sub32F (Const32F [c]) (Const32F [d]))
+ // cond: c-d == c-d
+ // result: (Const32F [c-d])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ break
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ if !(c-d == c-d) {
+ break
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(c - d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Sub64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c-d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c - d)
+ return true
+ }
+ // match: (Sub64 x (Const64 <t> [c]))
+ // cond: x.Op != OpConst64
+ // result: (Add64 (Const64 <t> [-c]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(x.Op != OpConst64) {
+ break
+ }
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(-c)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub64 <t> (Mul64 x y) (Mul64 x z))
+ // result: (Mul64 x (Sub64 <t> y z))
+ for {
+ t := v.Type
+ if v_0.Op != OpMul64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul64)
+ v0 := b.NewValue0(v.Pos, OpSub64, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Sub64 x x)
+ // result: (Const64 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Sub64 (Neg64 x) (Com64 x))
+ // result: (Const64 [1])
+ for {
+ if v_0.Op != OpNeg64 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpCom64 || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (Sub64 (Com64 x) (Neg64 x))
+ // result: (Const64 [-1])
+ for {
+ if v_0.Op != OpCom64 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpNeg64 || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (Sub64 (Add64 t x) (Add64 t y))
+ // result: (Sub64 x y)
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ t := v_0_0
+ x := v_0_1
+ if v_1.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if t != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpSub64)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Sub64 (Add64 x y) x)
+ // result: y
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if x != v_1 {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub64 (Add64 x y) y)
+ // result: x
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if y != v_1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Sub64 (Sub64 x y) x)
+ // result: (Neg64 y)
+ for {
+ if v_0.Op != OpSub64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpNeg64)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Sub64 x (Add64 x y))
+ // result: (Neg64 y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpNeg64)
+ v.AddArg(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub64 x (Sub64 i:(Const64 <t>) z))
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Sub64 (Add64 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpSub64 {
+ break
+ }
+ z := v_1.Args[1]
+ i := v_1.Args[0]
+ if i.Op != OpConst64 {
+ break
+ }
+ t := i.Type
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ break
+ }
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ // match: (Sub64 x (Add64 z i:(Const64 <t>)))
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Sub64 (Sub64 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z := v_1_0
+ i := v_1_1
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpSub64, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ break
+ }
+ // match: (Sub64 (Sub64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Sub64 i (Add64 <t> z x))
+ for {
+ if v_0.Op != OpSub64 {
+ break
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst64 {
+ break
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ break
+ }
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ // match: (Sub64 (Add64 z i:(Const64 <t>)) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Add64 i (Sub64 <t> z x))
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z := v_0_0
+ i := v_0_1
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpSub64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Sub64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x))
+ // result: (Add64 (Const64 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpSub64 {
+ break
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ break
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x))
+ // result: (Sub64 (Const64 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAdd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Sub64F (Const64F [c]) (Const64F [d]))
+ // cond: c-d == c-d
+ // result: (Const64F [c-d])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ break
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ if !(c-d == c-d) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(c - d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Sub8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c-d])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c - d)
+ return true
+ }
+ // match: (Sub8 x (Const8 <t> [c]))
+ // cond: x.Op != OpConst8
+ // result: (Add8 (Const8 <t> [-c]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(x.Op != OpConst8) {
+ break
+ }
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(-c)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub8 <t> (Mul8 x y) (Mul8 x z))
+ // result: (Mul8 x (Sub8 <t> y z))
+ for {
+ t := v.Type
+ if v_0.Op != OpMul8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul8)
+ v0 := b.NewValue0(v.Pos, OpSub8, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Sub8 x x)
+ // result: (Const8 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Sub8 (Neg8 x) (Com8 x))
+ // result: (Const8 [1])
+ for {
+ if v_0.Op != OpNeg8 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpCom8 || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(1)
+ return true
+ }
+ // match: (Sub8 (Com8 x) (Neg8 x))
+ // result: (Const8 [-1])
+ for {
+ if v_0.Op != OpCom8 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpNeg8 || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(-1)
+ return true
+ }
+ // match: (Sub8 (Add8 t x) (Add8 t y))
+ // result: (Sub8 x y)
+ for {
+ if v_0.Op != OpAdd8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ t := v_0_0
+ x := v_0_1
+ if v_1.Op != OpAdd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if t != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpSub8)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Sub8 (Add8 x y) x)
+ // result: y
+ for {
+ if v_0.Op != OpAdd8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if x != v_1 {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub8 (Add8 x y) y)
+ // result: x
+ for {
+ if v_0.Op != OpAdd8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if y != v_1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Sub8 (Sub8 x y) x)
+ // result: (Neg8 y)
+ for {
+ if v_0.Op != OpSub8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpNeg8)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Sub8 x (Add8 x y))
+ // result: (Neg8 y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpNeg8)
+ v.AddArg(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub8 x (Sub8 i:(Const8 <t>) z))
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Sub8 (Add8 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpSub8 {
+ break
+ }
+ z := v_1.Args[1]
+ i := v_1.Args[0]
+ if i.Op != OpConst8 {
+ break
+ }
+ t := i.Type
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ break
+ }
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpAdd8, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ // match: (Sub8 x (Add8 z i:(Const8 <t>)))
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Sub8 (Sub8 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z := v_1_0
+ i := v_1_1
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpSub8, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ break
+ }
+ // match: (Sub8 (Sub8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Sub8 i (Add8 <t> z x))
+ for {
+ if v_0.Op != OpSub8 {
+ break
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst8 {
+ break
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ break
+ }
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpAdd8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ // match: (Sub8 (Add8 z i:(Const8 <t>)) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Add8 i (Sub8 <t> z x))
+ for {
+ if v_0.Op != OpAdd8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z := v_0_0
+ i := v_0_1
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpSub8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Sub8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x))
+ // result: (Add8 (Const8 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpSub8 {
+ break
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ break
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x))
+ // result: (Sub8 (Const8 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpAdd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc (Const64F [c]))
+ // result: (Const64F [math.Trunc(c)])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(math.Trunc(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc16to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc16to8 (Const16 [c]))
+ // result: (Const8 [int8(c)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ return true
+ }
+ // match: (Trunc16to8 (ZeroExt8to16 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt8to16 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc16to8 (SignExt8to16 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt8to16 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc16to8 (And16 (Const16 [y]) x))
+ // cond: y&0xFF == 0xFF
+ // result: (Trunc16to8 x)
+ for {
+ if v_0.Op != OpAnd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ y := auxIntToInt16(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFF == 0xFF) {
+ continue
+ }
+ v.reset(OpTrunc16to8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc32to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc32to16 (Const32 [c]))
+ // result: (Const16 [int16(c)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ return true
+ }
+ // match: (Trunc32to16 (ZeroExt8to32 x))
+ // result: (ZeroExt8to16 x)
+ for {
+ if v_0.Op != OpZeroExt8to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpZeroExt8to16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc32to16 (ZeroExt16to32 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt16to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc32to16 (SignExt8to32 x))
+ // result: (SignExt8to16 x)
+ for {
+ if v_0.Op != OpSignExt8to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpSignExt8to16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc32to16 (SignExt16to32 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt16to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc32to16 (And32 (Const32 [y]) x))
+ // cond: y&0xFFFF == 0xFFFF
+ // result: (Trunc32to16 x)
+ for {
+ if v_0.Op != OpAnd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ y := auxIntToInt32(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFFFF == 0xFFFF) {
+ continue
+ }
+ v.reset(OpTrunc32to16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc32to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc32to8 (Const32 [c]))
+ // result: (Const8 [int8(c)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ return true
+ }
+ // match: (Trunc32to8 (ZeroExt8to32 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt8to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc32to8 (SignExt8to32 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt8to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc32to8 (And32 (Const32 [y]) x))
+ // cond: y&0xFF == 0xFF
+ // result: (Trunc32to8 x)
+ for {
+ if v_0.Op != OpAnd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ y := auxIntToInt32(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFF == 0xFF) {
+ continue
+ }
+ v.reset(OpTrunc32to8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc64to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to16 (Const64 [c]))
+ // result: (Const16 [int16(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ return true
+ }
+ // match: (Trunc64to16 (ZeroExt8to64 x))
+ // result: (ZeroExt8to16 x)
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpZeroExt8to16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to16 (ZeroExt16to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt16to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to16 (SignExt8to64 x))
+ // result: (SignExt8to16 x)
+ for {
+ if v_0.Op != OpSignExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpSignExt8to16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to16 (SignExt16to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt16to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to16 (And64 (Const64 [y]) x))
+ // cond: y&0xFFFF == 0xFFFF
+ // result: (Trunc64to16 x)
+ for {
+ if v_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ y := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFFFF == 0xFFFF) {
+ continue
+ }
+ v.reset(OpTrunc64to16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc64to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to32 (Const64 [c]))
+ // result: (Const32 [int32(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+ // match: (Trunc64to32 (ZeroExt8to64 x))
+ // result: (ZeroExt8to32 x)
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpZeroExt8to32)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 (ZeroExt16to64 x))
+ // result: (ZeroExt16to32 x)
+ for {
+ if v_0.Op != OpZeroExt16to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpZeroExt16to32)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 (ZeroExt32to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt32to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to32 (SignExt8to64 x))
+ // result: (SignExt8to32 x)
+ for {
+ if v_0.Op != OpSignExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpSignExt8to32)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 (SignExt16to64 x))
+ // result: (SignExt16to32 x)
+ for {
+ if v_0.Op != OpSignExt16to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpSignExt16to32)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 (SignExt32to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt32to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to32 (And64 (Const64 [y]) x))
+ // cond: y&0xFFFFFFFF == 0xFFFFFFFF
+ // result: (Trunc64to32 x)
+ for {
+ if v_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ y := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFFFFFFFF == 0xFFFFFFFF) {
+ continue
+ }
+ v.reset(OpTrunc64to32)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc64to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to8 (Const64 [c]))
+ // result: (Const8 [int8(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ return true
+ }
+ // match: (Trunc64to8 (ZeroExt8to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to8 (SignExt8to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to8 (And64 (Const64 [y]) x))
+ // cond: y&0xFF == 0xFF
+ // result: (Trunc64to8 x)
+ for {
+ if v_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ y := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFF == 0xFF) {
+ continue
+ }
+ v.reset(OpTrunc64to8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpXor16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Xor16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 x x)
+ // result: (Const16 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Xor16 (Const16 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 (Com16 x) x)
+ // result: (Const16 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom16 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 (Const16 [-1]) x)
+ // result: (Com16 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpCom16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 x (Xor16 x y))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpXor16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.copyOf(y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor16 (Xor16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Xor16 i (Xor16 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpXor16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpXor16)
+ v0 := b.NewValue0(v.Pos, OpXor16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor16 (Const16 <t> [c]) (Xor16 (Const16 <t> [d]) x))
+ // result: (Xor16 (Const16 <t> [c^d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpXor16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpXor16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c ^ d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor16 (Lsh16x64 x z:(Const64 <t> [c])) (Rsh16Ux64 x (Const64 [d])))
+ // cond: c < 16 && d == 16-c && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLsh16x64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh16Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 16 && d == 16-c && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh16x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh16Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)
+ // result: (RotateLeft16 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh16Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh16x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) {
+ continue
+ }
+ v.reset(OpRotateLeft16)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpXor32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Xor32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 x x)
+ // result: (Const32 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Xor32 (Const32 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 (Com32 x) x)
+ // result: (Const32 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom32 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 (Const32 [-1]) x)
+ // result: (Com32 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpCom32)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 x (Xor32 x y))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpXor32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.copyOf(y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor32 (Xor32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Xor32 i (Xor32 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpXor32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpXor32)
+ v0 := b.NewValue0(v.Pos, OpXor32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor32 (Const32 <t> [c]) (Xor32 (Const32 <t> [d]) x))
+ // result: (Xor32 (Const32 <t> [c^d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpXor32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpXor32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor32 (Lsh32x64 x z:(Const64 <t> [c])) (Rsh32Ux64 x (Const64 [d])))
+ // cond: c < 32 && d == 32-c && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLsh32x64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 32 && d == 32-c && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh32x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh32x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh32x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh32x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh32Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh32Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh32Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh32Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)
+ // result: (RotateLeft32 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh32Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh32x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) {
+ continue
+ }
+ v.reset(OpRotateLeft32)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpXor64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Xor64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 x x)
+ // result: (Const64 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Xor64 (Const64 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 (Com64 x) x)
+ // result: (Const64 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom64 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 (Const64 [-1]) x)
+ // result: (Com64 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpCom64)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 x (Xor64 x y))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpXor64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.copyOf(y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor64 (Xor64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Xor64 i (Xor64 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpXor64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpXor64)
+ v0 := b.NewValue0(v.Pos, OpXor64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor64 (Const64 <t> [c]) (Xor64 (Const64 <t> [d]) x))
+ // result: (Xor64 (Const64 <t> [c^d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpXor64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpXor64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c ^ d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor64 (Lsh64x64 x z:(Const64 <t> [c])) (Rsh64Ux64 x (Const64 [d])))
+ // cond: c < 64 && d == 64-c && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLsh64x64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 64 && d == 64-c && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh64x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh64x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh64x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh64x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh64Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh64Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh64Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh64Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)
+ // result: (RotateLeft64 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh64Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh64x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) {
+ continue
+ }
+ v.reset(OpRotateLeft64)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpXor8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Xor8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 x x)
+ // result: (Const8 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Xor8 (Const8 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 (Com8 x) x)
+ // result: (Const8 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpCom8 {
+ continue
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 (Const8 [-1]) x)
+ // result: (Com8 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpCom8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 x (Xor8 x y))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpXor8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.copyOf(y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor8 (Xor8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Xor8 i (Xor8 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpXor8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpXor8)
+ v0 := b.NewValue0(v.Pos, OpXor8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor8 (Const8 <t> [c]) (Xor8 (Const8 <t> [d]) x))
+ // result: (Xor8 (Const8 <t> [c^d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpXor8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpXor8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c ^ d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor8 (Lsh8x64 x z:(Const64 <t> [c])) (Rsh8Ux64 x (Const64 [d])))
+ // cond: c < 8 && d == 8-c && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLsh8x64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(z.AuxInt)
+ if v_1.Op != OpRsh8Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c < 8 && d == 8-c && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x64 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux64 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub64 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x32 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux32 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub32 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x16 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux16 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub16 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ left := v_0
+ if left.Op != OpLsh8x8 {
+ continue
+ }
+ y := left.Args[1]
+ x := left.Args[0]
+ right := v_1
+ if right.Op != OpRsh8Ux8 {
+ continue
+ }
+ _ = right.Args[1]
+ if x != right.Args[0] {
+ continue
+ }
+ right_1 := right.Args[1]
+ if right_1.Op != OpSub8 {
+ continue
+ }
+ _ = right_1.Args[1]
+ right_1_0 := right_1.Args[0]
+ if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux64 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x64 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub64 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux32 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x32 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub32 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux16 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x16 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub16 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y)))
+ // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)
+ // result: (RotateLeft8 x z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ right := v_0
+ if right.Op != OpRsh8Ux8 {
+ continue
+ }
+ y := right.Args[1]
+ x := right.Args[0]
+ left := v_1
+ if left.Op != OpLsh8x8 {
+ continue
+ }
+ _ = left.Args[1]
+ if x != left.Args[0] {
+ continue
+ }
+ z := left.Args[1]
+ if z.Op != OpSub8 {
+ continue
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) {
+ continue
+ }
+ v.reset(OpRotateLeft8)
+ v.AddArg2(x, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call))
+ // cond: isSameCall(call.Aux, "runtime.newobject")
+ // result: mem
+ for {
+ if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ break
+ }
+ mem := v_1
+ if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isSameCall(call.Aux, "runtime.newobject")) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero {t1} [n] p1 store:(Store {t2} (OffPtr [o2] p2) _ mem))
+ // cond: isSamePtr(p1, p2) && store.Uses == 1 && n >= o2 + t2.Size() && clobber(store)
+ // result: (Zero {t1} [n] p1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ p1 := v_0
+ store := v_1
+ if store.Op != OpStore {
+ break
+ }
+ t2 := auxToType(store.Aux)
+ mem := store.Args[2]
+ store_0 := store.Args[0]
+ if store_0.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(store_0.AuxInt)
+ p2 := store_0.Args[0]
+ if !(isSamePtr(p1, p2) && store.Uses == 1 && n >= o2+t2.Size() && clobber(store)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t1)
+ v.AddArg2(p1, mem)
+ return true
+ }
+ // match: (Zero {t} [n] dst1 move:(Move {t} [n] dst2 _ mem))
+ // cond: move.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move)
+ // result: (Zero {t} [n] dst1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ move := v_1
+ if move.Op != OpMove || auxIntToInt64(move.AuxInt) != n || auxToType(move.Aux) != t {
+ break
+ }
+ mem := move.Args[2]
+ dst2 := move.Args[0]
+ if !(move.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg2(dst1, mem)
+ return true
+ }
+ // match: (Zero {t} [n] dst1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem)))
+ // cond: move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move, vardef)
+ // result: (Zero {t} [n] dst1 (VarDef {x} mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ vardef := v_1
+ if vardef.Op != OpVarDef {
+ break
+ }
+ x := auxToSym(vardef.Aux)
+ move := vardef.Args[0]
+ if move.Op != OpMove || auxIntToInt64(move.AuxInt) != n || auxToType(move.Aux) != t {
+ break
+ }
+ mem := move.Args[2]
+ dst2 := move.Args[0]
+ if !(move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move, vardef)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem)
+ v0.Aux = symToAux(x)
+ v0.AddArg(mem)
+ v.AddArg2(dst1, v0)
+ return true
+ }
+ // match: (Zero {t} [s] dst1 zero:(Zero {t} [s] dst2 _))
+ // cond: isSamePtr(dst1, dst2)
+ // result: zero
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ zero := v_1
+ if zero.Op != OpZero || auxIntToInt64(zero.AuxInt) != s || auxToType(zero.Aux) != t {
+ break
+ }
+ dst2 := zero.Args[0]
+ if !(isSamePtr(dst1, dst2)) {
+ break
+ }
+ v.copyOf(zero)
+ return true
+ }
+ // match: (Zero {t} [s] dst1 vardef:(VarDef (Zero {t} [s] dst2 _)))
+ // cond: isSamePtr(dst1, dst2)
+ // result: vardef
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ vardef := v_1
+ if vardef.Op != OpVarDef {
+ break
+ }
+ vardef_0 := vardef.Args[0]
+ if vardef_0.Op != OpZero || auxIntToInt64(vardef_0.AuxInt) != s || auxToType(vardef_0.Aux) != t {
+ break
+ }
+ dst2 := vardef_0.Args[0]
+ if !(isSamePtr(dst1, dst2)) {
+ break
+ }
+ v.copyOf(vardef)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt16to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt16to32 (Const16 [c]))
+ // result: (Const32 [int32(uint16(c))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(uint16(c)))
+ return true
+ }
+ // match: (ZeroExt16to32 (Trunc32to16 x:(Rsh32Ux64 _ (Const64 [s]))))
+ // cond: s >= 16
+ // result: x
+ for {
+ if v_0.Op != OpTrunc32to16 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 16) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt16to64 (Const16 [c]))
+ // result: (Const64 [int64(uint16(c))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ // match: (ZeroExt16to64 (Trunc64to16 x:(Rsh64Ux64 _ (Const64 [s]))))
+ // cond: s >= 48
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to16 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 48) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt32to64 (Const32 [c]))
+ // result: (Const64 [int64(uint32(c))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ // match: (ZeroExt32to64 (Trunc64to32 x:(Rsh64Ux64 _ (Const64 [s]))))
+ // cond: s >= 32
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to32 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 32) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt8to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt8to16 (Const8 [c]))
+ // result: (Const16 [int16( uint8(c))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(uint8(c)))
+ return true
+ }
+ // match: (ZeroExt8to16 (Trunc16to8 x:(Rsh16Ux64 _ (Const64 [s]))))
+ // cond: s >= 8
+ // result: x
+ for {
+ if v_0.Op != OpTrunc16to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt8to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt8to32 (Const8 [c]))
+ // result: (Const32 [int32( uint8(c))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(uint8(c)))
+ return true
+ }
+ // match: (ZeroExt8to32 (Trunc32to8 x:(Rsh32Ux64 _ (Const64 [s]))))
+ // cond: s >= 24
+ // result: x
+ for {
+ if v_0.Op != OpTrunc32to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 24) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt8to64 (Const8 [c]))
+ // result: (Const64 [int64( uint8(c))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ // match: (ZeroExt8to64 (Trunc64to8 x:(Rsh64Ux64 _ (Const64 [s]))))
+ // cond: s >= 56
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 56) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteBlockgeneric(b *Block) bool {
+ switch b.Kind {
+ case BlockIf:
+ // match: (If (Not cond) yes no)
+ // result: (If cond no yes)
+ for b.Controls[0].Op == OpNot {
+ v_0 := b.Controls[0]
+ cond := v_0.Args[0]
+ b.resetWithControl(BlockIf, cond)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (If (ConstBool [c]) yes no)
+ // cond: c
+ // result: (First yes no)
+ for b.Controls[0].Op == OpConstBool {
+ v_0 := b.Controls[0]
+ c := auxIntToBool(v_0.AuxInt)
+ if !(c) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (If (ConstBool [c]) yes no)
+ // cond: !c
+ // result: (First no yes)
+ for b.Controls[0].Op == OpConstBool {
+ v_0 := b.Controls[0]
+ c := auxIntToBool(v_0.AuxInt)
+ if !(!c) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/sccp.go b/src/cmd/compile/internal/ssa/sccp.go
new file mode 100644
index 0000000..77a6f50
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sccp.go
@@ -0,0 +1,585 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+)
+
+// ----------------------------------------------------------------------------
+// Sparse Conditional Constant Propagation
+//
+// Described in
+// Mark N. Wegman, F. Kenneth Zadeck: Constant Propagation with Conditional Branches.
+// TOPLAS 1991.
+//
+// This algorithm uses three level lattice for SSA value
+//
+// Top undefined
+// / | \
+// .. 1 2 3 .. constant
+// \ | /
+// Bottom not constant
+//
+// It starts with optimistically assuming that all SSA values are initially Top
+// and then propagates constant facts only along reachable control flow paths.
+// Since some basic blocks are not visited yet, corresponding inputs of phi become
+// Top, we use the meet(phi) to compute its lattice.
+//
+// Top ∩ any = any
+// Bottom ∩ any = Bottom
+// ConstantA ∩ ConstantA = ConstantA
+// ConstantA ∩ ConstantB = Bottom
+//
+// Each lattice value is lowered most twice(Top to Constant, Constant to Bottom)
+// due to lattice depth, resulting in a fast convergence speed of the algorithm.
+// In this way, sccp can discover optimization opportunities that cannot be found
+// by just combining constant folding and constant propagation and dead code
+// elimination separately.
+
+// Three level lattice holds compile time knowledge about SSA value
+const (
+ top int8 = iota // undefined
+ constant // constant
+ bottom // not a constant
+)
+
+type lattice struct {
+ tag int8 // lattice type
+ val *Value // constant value
+}
+
+type worklist struct {
+ f *Func // the target function to be optimized out
+ edges []Edge // propagate constant facts through edges
+ uses []*Value // re-visiting set
+ visited map[Edge]bool // visited edges
+ latticeCells map[*Value]lattice // constant lattices
+ defUse map[*Value][]*Value // def-use chains for some values
+ defBlock map[*Value][]*Block // use blocks of def
+ visitedBlock []bool // visited block
+}
+
+// sccp stands for sparse conditional constant propagation, it propagates constants
+// through CFG conditionally and applies constant folding, constant replacement and
+// dead code elimination all together.
+func sccp(f *Func) {
+ var t worklist
+ t.f = f
+ t.edges = make([]Edge, 0)
+ t.visited = make(map[Edge]bool)
+ t.edges = append(t.edges, Edge{f.Entry, 0})
+ t.defUse = make(map[*Value][]*Value)
+ t.defBlock = make(map[*Value][]*Block)
+ t.latticeCells = make(map[*Value]lattice)
+ t.visitedBlock = f.Cache.allocBoolSlice(f.NumBlocks())
+ defer f.Cache.freeBoolSlice(t.visitedBlock)
+
+ // build it early since we rely heavily on the def-use chain later
+ t.buildDefUses()
+
+ // pick up either an edge or SSA value from worklilst, process it
+ for {
+ if len(t.edges) > 0 {
+ edge := t.edges[0]
+ t.edges = t.edges[1:]
+ if _, exist := t.visited[edge]; !exist {
+ dest := edge.b
+ destVisited := t.visitedBlock[dest.ID]
+
+ // mark edge as visited
+ t.visited[edge] = true
+ t.visitedBlock[dest.ID] = true
+ for _, val := range dest.Values {
+ if val.Op == OpPhi || !destVisited {
+ t.visitValue(val)
+ }
+ }
+ // propagates constants facts through CFG, taking condition test
+ // into account
+ if !destVisited {
+ t.propagate(dest)
+ }
+ }
+ continue
+ }
+ if len(t.uses) > 0 {
+ use := t.uses[0]
+ t.uses = t.uses[1:]
+ t.visitValue(use)
+ continue
+ }
+ break
+ }
+
+ // apply optimizations based on discovered constants
+ constCnt, rewireCnt := t.replaceConst()
+ if f.pass.debug > 0 {
+ if constCnt > 0 || rewireCnt > 0 {
+ fmt.Printf("Phase SCCP for %v : %v constants, %v dce\n", f.Name, constCnt, rewireCnt)
+ }
+ }
+}
+
+func equals(a, b lattice) bool {
+ if a == b {
+ // fast path
+ return true
+ }
+ if a.tag != b.tag {
+ return false
+ }
+ if a.tag == constant {
+ // The same content of const value may be different, we should
+ // compare with auxInt instead
+ v1 := a.val
+ v2 := b.val
+ if v1.Op == v2.Op && v1.AuxInt == v2.AuxInt {
+ return true
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// possibleConst checks if Value can be fold to const. For those Values that can
+// never become constants(e.g. StaticCall), we don't make futile efforts.
+func possibleConst(val *Value) bool {
+ if isConst(val) {
+ return true
+ }
+ switch val.Op {
+ case OpCopy:
+ return true
+ case OpPhi:
+ return true
+ case
+ // negate
+ OpNeg8, OpNeg16, OpNeg32, OpNeg64, OpNeg32F, OpNeg64F,
+ OpCom8, OpCom16, OpCom32, OpCom64,
+ // math
+ OpFloor, OpCeil, OpTrunc, OpRoundToEven, OpSqrt,
+ // conversion
+ OpTrunc16to8, OpTrunc32to8, OpTrunc32to16, OpTrunc64to8,
+ OpTrunc64to16, OpTrunc64to32, OpCvt32to32F, OpCvt32to64F,
+ OpCvt64to32F, OpCvt64to64F, OpCvt32Fto32, OpCvt32Fto64,
+ OpCvt64Fto32, OpCvt64Fto64, OpCvt32Fto64F, OpCvt64Fto32F,
+ OpCvtBoolToUint8,
+ OpZeroExt8to16, OpZeroExt8to32, OpZeroExt8to64, OpZeroExt16to32,
+ OpZeroExt16to64, OpZeroExt32to64, OpSignExt8to16, OpSignExt8to32,
+ OpSignExt8to64, OpSignExt16to32, OpSignExt16to64, OpSignExt32to64,
+ // bit
+ OpCtz8, OpCtz16, OpCtz32, OpCtz64,
+ // mask
+ OpSlicemask,
+ // safety check
+ OpIsNonNil,
+ // not
+ OpNot:
+ return true
+ case
+ // add
+ OpAdd64, OpAdd32, OpAdd16, OpAdd8,
+ OpAdd32F, OpAdd64F,
+ // sub
+ OpSub64, OpSub32, OpSub16, OpSub8,
+ OpSub32F, OpSub64F,
+ // mul
+ OpMul64, OpMul32, OpMul16, OpMul8,
+ OpMul32F, OpMul64F,
+ // div
+ OpDiv32F, OpDiv64F,
+ OpDiv8, OpDiv16, OpDiv32, OpDiv64,
+ OpDiv8u, OpDiv16u, OpDiv32u, OpDiv64u,
+ OpMod8, OpMod16, OpMod32, OpMod64,
+ OpMod8u, OpMod16u, OpMod32u, OpMod64u,
+ // compare
+ OpEq64, OpEq32, OpEq16, OpEq8,
+ OpEq32F, OpEq64F,
+ OpLess64, OpLess32, OpLess16, OpLess8,
+ OpLess64U, OpLess32U, OpLess16U, OpLess8U,
+ OpLess32F, OpLess64F,
+ OpLeq64, OpLeq32, OpLeq16, OpLeq8,
+ OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U,
+ OpLeq32F, OpLeq64F,
+ OpEqB, OpNeqB,
+ // shift
+ OpLsh64x64, OpRsh64x64, OpRsh64Ux64, OpLsh32x64,
+ OpRsh32x64, OpRsh32Ux64, OpLsh16x64, OpRsh16x64,
+ OpRsh16Ux64, OpLsh8x64, OpRsh8x64, OpRsh8Ux64,
+ // safety check
+ OpIsInBounds, OpIsSliceInBounds,
+ // bit
+ OpAnd8, OpAnd16, OpAnd32, OpAnd64,
+ OpOr8, OpOr16, OpOr32, OpOr64,
+ OpXor8, OpXor16, OpXor32, OpXor64:
+ return true
+ default:
+ return false
+ }
+}
+
+func (t *worklist) getLatticeCell(val *Value) lattice {
+ if !possibleConst(val) {
+ // they are always worst
+ return lattice{bottom, nil}
+ }
+ lt, exist := t.latticeCells[val]
+ if !exist {
+ return lattice{top, nil} // optimistically for un-visited value
+ }
+ return lt
+}
+
+func isConst(val *Value) bool {
+ switch val.Op {
+ case OpConst64, OpConst32, OpConst16, OpConst8,
+ OpConstBool, OpConst32F, OpConst64F:
+ return true
+ default:
+ return false
+ }
+}
+
+// buildDefUses builds def-use chain for some values early, because once the
+// lattice of a value is changed, we need to update lattices of use. But we don't
+// need all uses of it, only uses that can become constants would be added into
+// re-visit worklist since no matter how many times they are revisited, uses which
+// can't become constants lattice remains unchanged, i.e. Bottom.
+func (t *worklist) buildDefUses() {
+ for _, block := range t.f.Blocks {
+ for _, val := range block.Values {
+ for _, arg := range val.Args {
+ // find its uses, only uses that can become constants take into account
+ if possibleConst(arg) && possibleConst(val) {
+ if _, exist := t.defUse[arg]; !exist {
+ t.defUse[arg] = make([]*Value, 0, arg.Uses)
+ }
+ t.defUse[arg] = append(t.defUse[arg], val)
+ }
+ }
+ }
+ for _, ctl := range block.ControlValues() {
+ // for control values that can become constants, find their use blocks
+ if possibleConst(ctl) {
+ t.defBlock[ctl] = append(t.defBlock[ctl], block)
+ }
+ }
+ }
+}
+
+// addUses finds all uses of value and appends them into work list for further process
+func (t *worklist) addUses(val *Value) {
+ for _, use := range t.defUse[val] {
+ if val == use {
+ // Phi may refer to itself as uses, ignore them to avoid re-visiting phi
+ // for performance reason
+ continue
+ }
+ t.uses = append(t.uses, use)
+ }
+ for _, block := range t.defBlock[val] {
+ if t.visitedBlock[block.ID] {
+ t.propagate(block)
+ }
+ }
+}
+
+// meet meets all of phi arguments and computes result lattice
+func (t *worklist) meet(val *Value) lattice {
+ optimisticLt := lattice{top, nil}
+ for i := 0; i < len(val.Args); i++ {
+ edge := Edge{val.Block, i}
+ // If incoming edge for phi is not visited, assume top optimistically.
+ // According to rules of meet:
+ // Top ∩ any = any
+ // Top participates in meet() but does not affect the result, so here
+ // we will ignore Top and only take other lattices into consideration.
+ if _, exist := t.visited[edge]; exist {
+ lt := t.getLatticeCell(val.Args[i])
+ if lt.tag == constant {
+ if optimisticLt.tag == top {
+ optimisticLt = lt
+ } else {
+ if !equals(optimisticLt, lt) {
+ // ConstantA ∩ ConstantB = Bottom
+ return lattice{bottom, nil}
+ }
+ }
+ } else if lt.tag == bottom {
+ // Bottom ∩ any = Bottom
+ return lattice{bottom, nil}
+ } else {
+ // Top ∩ any = any
+ }
+ } else {
+ // Top ∩ any = any
+ }
+ }
+
+ // ConstantA ∩ ConstantA = ConstantA or Top ∩ any = any
+ return optimisticLt
+}
+
+func computeLattice(f *Func, val *Value, args ...*Value) lattice {
+ // In general, we need to perform constant evaluation based on constant args:
+ //
+ // res := lattice{constant, nil}
+ // switch op {
+ // case OpAdd16:
+ // res.val = newConst(argLt1.val.AuxInt16() + argLt2.val.AuxInt16())
+ // case OpAdd32:
+ // res.val = newConst(argLt1.val.AuxInt32() + argLt2.val.AuxInt32())
+ // case OpDiv8:
+ // if !isDivideByZero(argLt2.val.AuxInt8()) {
+ // res.val = newConst(argLt1.val.AuxInt8() / argLt2.val.AuxInt8())
+ // }
+ // ...
+ // }
+ //
+ // However, this would create a huge switch for all opcodes that can be
+ // evaluated during compile time. Moreover, some operations can be evaluated
+ // only if its arguments satisfy additional conditions(e.g. divide by zero).
+ // It's fragile and error prone. We did a trick by reusing the existing rules
+ // in generic rules for compile-time evaluation. But generic rules rewrite
+ // original value, this behavior is undesired, because the lattice of values
+ // may change multiple times, once it was rewritten, we lose the opportunity
+ // to change it permanently, which can lead to errors. For example, We cannot
+ // change its value immediately after visiting Phi, because some of its input
+ // edges may still not be visited at this moment.
+ constValue := f.newValue(val.Op, val.Type, f.Entry, val.Pos)
+ constValue.AddArgs(args...)
+ matched := rewriteValuegeneric(constValue)
+ if matched {
+ if isConst(constValue) {
+ return lattice{constant, constValue}
+ }
+ }
+ // Either we can not match generic rules for given value or it does not
+ // satisfy additional constraints(e.g. divide by zero), in these cases, clean
+ // up temporary value immediately in case they are not dominated by their args.
+ constValue.reset(OpInvalid)
+ return lattice{bottom, nil}
+}
+
+func (t *worklist) visitValue(val *Value) {
+ if !possibleConst(val) {
+ // fast fail for always worst Values, i.e. there is no lowering happen
+ // on them, their lattices must be initially worse Bottom.
+ return
+ }
+
+ oldLt := t.getLatticeCell(val)
+ defer func() {
+ // re-visit all uses of value if its lattice is changed
+ newLt := t.getLatticeCell(val)
+ if !equals(newLt, oldLt) {
+ if int8(oldLt.tag) > int8(newLt.tag) {
+ t.f.Fatalf("Must lower lattice\n")
+ }
+ t.addUses(val)
+ }
+ }()
+
+ switch val.Op {
+ // they are constant values, aren't they?
+ case OpConst64, OpConst32, OpConst16, OpConst8,
+ OpConstBool, OpConst32F, OpConst64F: //TODO: support ConstNil ConstString etc
+ t.latticeCells[val] = lattice{constant, val}
+ // lattice value of copy(x) actually means lattice value of (x)
+ case OpCopy:
+ t.latticeCells[val] = t.getLatticeCell(val.Args[0])
+ // phi should be processed specially
+ case OpPhi:
+ t.latticeCells[val] = t.meet(val)
+ // fold 1-input operations:
+ case
+ // negate
+ OpNeg8, OpNeg16, OpNeg32, OpNeg64, OpNeg32F, OpNeg64F,
+ OpCom8, OpCom16, OpCom32, OpCom64,
+ // math
+ OpFloor, OpCeil, OpTrunc, OpRoundToEven, OpSqrt,
+ // conversion
+ OpTrunc16to8, OpTrunc32to8, OpTrunc32to16, OpTrunc64to8,
+ OpTrunc64to16, OpTrunc64to32, OpCvt32to32F, OpCvt32to64F,
+ OpCvt64to32F, OpCvt64to64F, OpCvt32Fto32, OpCvt32Fto64,
+ OpCvt64Fto32, OpCvt64Fto64, OpCvt32Fto64F, OpCvt64Fto32F,
+ OpCvtBoolToUint8,
+ OpZeroExt8to16, OpZeroExt8to32, OpZeroExt8to64, OpZeroExt16to32,
+ OpZeroExt16to64, OpZeroExt32to64, OpSignExt8to16, OpSignExt8to32,
+ OpSignExt8to64, OpSignExt16to32, OpSignExt16to64, OpSignExt32to64,
+ // bit
+ OpCtz8, OpCtz16, OpCtz32, OpCtz64,
+ // mask
+ OpSlicemask,
+ // safety check
+ OpIsNonNil,
+ // not
+ OpNot:
+ lt1 := t.getLatticeCell(val.Args[0])
+
+ if lt1.tag == constant {
+ // here we take a shortcut by reusing generic rules to fold constants
+ t.latticeCells[val] = computeLattice(t.f, val, lt1.val)
+ } else {
+ t.latticeCells[val] = lattice{lt1.tag, nil}
+ }
+ // fold 2-input operations
+ case
+ // add
+ OpAdd64, OpAdd32, OpAdd16, OpAdd8,
+ OpAdd32F, OpAdd64F,
+ // sub
+ OpSub64, OpSub32, OpSub16, OpSub8,
+ OpSub32F, OpSub64F,
+ // mul
+ OpMul64, OpMul32, OpMul16, OpMul8,
+ OpMul32F, OpMul64F,
+ // div
+ OpDiv32F, OpDiv64F,
+ OpDiv8, OpDiv16, OpDiv32, OpDiv64,
+ OpDiv8u, OpDiv16u, OpDiv32u, OpDiv64u, //TODO: support div128u
+ // mod
+ OpMod8, OpMod16, OpMod32, OpMod64,
+ OpMod8u, OpMod16u, OpMod32u, OpMod64u,
+ // compare
+ OpEq64, OpEq32, OpEq16, OpEq8,
+ OpEq32F, OpEq64F,
+ OpLess64, OpLess32, OpLess16, OpLess8,
+ OpLess64U, OpLess32U, OpLess16U, OpLess8U,
+ OpLess32F, OpLess64F,
+ OpLeq64, OpLeq32, OpLeq16, OpLeq8,
+ OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U,
+ OpLeq32F, OpLeq64F,
+ OpEqB, OpNeqB,
+ // shift
+ OpLsh64x64, OpRsh64x64, OpRsh64Ux64, OpLsh32x64,
+ OpRsh32x64, OpRsh32Ux64, OpLsh16x64, OpRsh16x64,
+ OpRsh16Ux64, OpLsh8x64, OpRsh8x64, OpRsh8Ux64,
+ // safety check
+ OpIsInBounds, OpIsSliceInBounds,
+ // bit
+ OpAnd8, OpAnd16, OpAnd32, OpAnd64,
+ OpOr8, OpOr16, OpOr32, OpOr64,
+ OpXor8, OpXor16, OpXor32, OpXor64:
+ lt1 := t.getLatticeCell(val.Args[0])
+ lt2 := t.getLatticeCell(val.Args[1])
+
+ if lt1.tag == constant && lt2.tag == constant {
+ // here we take a shortcut by reusing generic rules to fold constants
+ t.latticeCells[val] = computeLattice(t.f, val, lt1.val, lt2.val)
+ } else {
+ if lt1.tag == bottom || lt2.tag == bottom {
+ t.latticeCells[val] = lattice{bottom, nil}
+ } else {
+ t.latticeCells[val] = lattice{top, nil}
+ }
+ }
+ default:
+ // Any other type of value cannot be a constant, they are always worst(Bottom)
+ }
+}
+
+// propagate propagates constants facts through CFG. If the block has single successor,
+// add the successor anyway. If the block has multiple successors, only add the
+// branch destination corresponding to lattice value of condition value.
+func (t *worklist) propagate(block *Block) {
+ switch block.Kind {
+ case BlockExit, BlockRet, BlockRetJmp, BlockInvalid:
+ // control flow ends, do nothing then
+ break
+ case BlockDefer:
+ // we know nothing about control flow, add all branch destinations
+ t.edges = append(t.edges, block.Succs...)
+ case BlockFirst:
+ fallthrough // always takes the first branch
+ case BlockPlain:
+ t.edges = append(t.edges, block.Succs[0])
+ case BlockIf, BlockJumpTable:
+ cond := block.ControlValues()[0]
+ condLattice := t.getLatticeCell(cond)
+ if condLattice.tag == bottom {
+ // we know nothing about control flow, add all branch destinations
+ t.edges = append(t.edges, block.Succs...)
+ } else if condLattice.tag == constant {
+ // add branchIdx destinations depends on its condition
+ var branchIdx int64
+ if block.Kind == BlockIf {
+ branchIdx = 1 - condLattice.val.AuxInt
+ } else {
+ branchIdx = condLattice.val.AuxInt
+ }
+ t.edges = append(t.edges, block.Succs[branchIdx])
+ } else {
+ // condition value is not visited yet, don't propagate it now
+ }
+ default:
+ t.f.Fatalf("All kind of block should be processed above.")
+ }
+}
+
+// rewireSuccessor rewires corresponding successors according to constant value
+// discovered by previous analysis. As the result, some successors become unreachable
+// and thus can be removed in further deadcode phase
+func rewireSuccessor(block *Block, constVal *Value) bool {
+ switch block.Kind {
+ case BlockIf:
+ block.removeEdge(int(constVal.AuxInt))
+ block.Kind = BlockPlain
+ block.Likely = BranchUnknown
+ block.ResetControls()
+ return true
+ case BlockJumpTable:
+ // Remove everything but the known taken branch.
+ idx := int(constVal.AuxInt)
+ if idx < 0 || idx >= len(block.Succs) {
+ // This can only happen in unreachable code,
+ // as an invariant of jump tables is that their
+ // input index is in range.
+ // See issue 64826.
+ return false
+ }
+ block.swapSuccessorsByIdx(0, idx)
+ for len(block.Succs) > 1 {
+ block.removeEdge(1)
+ }
+ block.Kind = BlockPlain
+ block.Likely = BranchUnknown
+ block.ResetControls()
+ return true
+ default:
+ return false
+ }
+}
+
+// replaceConst will replace non-constant values that have been proven by sccp
+// to be constants.
+func (t *worklist) replaceConst() (int, int) {
+ constCnt, rewireCnt := 0, 0
+ for val, lt := range t.latticeCells {
+ if lt.tag == constant {
+ if !isConst(val) {
+ if t.f.pass.debug > 0 {
+ fmt.Printf("Replace %v with %v\n", val.LongString(), lt.val.LongString())
+ }
+ val.reset(lt.val.Op)
+ val.AuxInt = lt.val.AuxInt
+ constCnt++
+ }
+ // If const value controls this block, rewires successors according to its value
+ ctrlBlock := t.defBlock[val]
+ for _, block := range ctrlBlock {
+ if rewireSuccessor(block, lt.val) {
+ rewireCnt++
+ if t.f.pass.debug > 0 {
+ fmt.Printf("Rewire %v %v successors\n", block.Kind, block)
+ }
+ }
+ }
+ }
+ }
+ return constCnt, rewireCnt
+}
diff --git a/src/cmd/compile/internal/ssa/sccp_test.go b/src/cmd/compile/internal/ssa/sccp_test.go
new file mode 100644
index 0000000..70c23e7
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sccp_test.go
@@ -0,0 +1,95 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "strings"
+ "testing"
+)
+
+func TestSCCPBasic(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("b1",
+ Bloc("b1",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("v1", OpConst64, c.config.Types.Int64, 20, nil),
+ Valu("v2", OpConst64, c.config.Types.Int64, 21, nil),
+ Valu("v3", OpConst64F, c.config.Types.Float64, 21.0, nil),
+ Valu("v4", OpConstBool, c.config.Types.Bool, 1, nil),
+ Valu("t1", OpAdd64, c.config.Types.Int64, 0, nil, "v1", "v2"),
+ Valu("t2", OpDiv64, c.config.Types.Int64, 0, nil, "t1", "v1"),
+ Valu("t3", OpAdd64, c.config.Types.Int64, 0, nil, "t1", "t2"),
+ Valu("t4", OpSub64, c.config.Types.Int64, 0, nil, "t3", "v2"),
+ Valu("t5", OpMul64, c.config.Types.Int64, 0, nil, "t4", "v2"),
+ Valu("t6", OpMod64, c.config.Types.Int64, 0, nil, "t5", "v2"),
+ Valu("t7", OpAnd64, c.config.Types.Int64, 0, nil, "t6", "v2"),
+ Valu("t8", OpOr64, c.config.Types.Int64, 0, nil, "t7", "v2"),
+ Valu("t9", OpXor64, c.config.Types.Int64, 0, nil, "t8", "v2"),
+ Valu("t10", OpNeg64, c.config.Types.Int64, 0, nil, "t9"),
+ Valu("t11", OpCom64, c.config.Types.Int64, 0, nil, "t10"),
+ Valu("t12", OpNeg64, c.config.Types.Int64, 0, nil, "t11"),
+ Valu("t13", OpFloor, c.config.Types.Float64, 0, nil, "v3"),
+ Valu("t14", OpSqrt, c.config.Types.Float64, 0, nil, "t13"),
+ Valu("t15", OpCeil, c.config.Types.Float64, 0, nil, "t14"),
+ Valu("t16", OpTrunc, c.config.Types.Float64, 0, nil, "t15"),
+ Valu("t17", OpRoundToEven, c.config.Types.Float64, 0, nil, "t16"),
+ Valu("t18", OpTrunc64to32, c.config.Types.Int64, 0, nil, "t12"),
+ Valu("t19", OpCvt64Fto64, c.config.Types.Float64, 0, nil, "t17"),
+ Valu("t20", OpCtz64, c.config.Types.Int64, 0, nil, "v2"),
+ Valu("t21", OpSlicemask, c.config.Types.Int64, 0, nil, "t20"),
+ Valu("t22", OpIsNonNil, c.config.Types.Int64, 0, nil, "v2"),
+ Valu("t23", OpNot, c.config.Types.Bool, 0, nil, "v4"),
+ Valu("t24", OpEq64, c.config.Types.Bool, 0, nil, "v1", "v2"),
+ Valu("t25", OpLess64, c.config.Types.Bool, 0, nil, "v1", "v2"),
+ Valu("t26", OpLeq64, c.config.Types.Bool, 0, nil, "v1", "v2"),
+ Valu("t27", OpEqB, c.config.Types.Bool, 0, nil, "v4", "v4"),
+ Valu("t28", OpLsh64x64, c.config.Types.Int64, 0, nil, "v2", "v1"),
+ Valu("t29", OpIsInBounds, c.config.Types.Int64, 0, nil, "v2", "v1"),
+ Valu("t30", OpIsSliceInBounds, c.config.Types.Int64, 0, nil, "v2", "v1"),
+ Goto("b2")),
+ Bloc("b2",
+ Exit("mem")))
+ sccp(fun.f)
+ CheckFunc(fun.f)
+ for name, value := range fun.values {
+ if strings.HasPrefix(name, "t") {
+ if !isConst(value) {
+ t.Errorf("Must be constant: %v", value.LongString())
+ }
+ }
+ }
+}
+
+func TestSCCPIf(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("b1",
+ Bloc("b1",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("v1", OpConst64, c.config.Types.Int64, 0, nil),
+ Valu("v2", OpConst64, c.config.Types.Int64, 1, nil),
+ Valu("cmp", OpLess64, c.config.Types.Bool, 0, nil, "v1", "v2"),
+ If("cmp", "b2", "b3")),
+ Bloc("b2",
+ Valu("v3", OpConst64, c.config.Types.Int64, 3, nil),
+ Goto("b4")),
+ Bloc("b3",
+ Valu("v4", OpConst64, c.config.Types.Int64, 4, nil),
+ Goto("b4")),
+ Bloc("b4",
+ Valu("merge", OpPhi, c.config.Types.Int64, 0, nil, "v3", "v4"),
+ Exit("mem")))
+ sccp(fun.f)
+ CheckFunc(fun.f)
+ for _, b := range fun.blocks {
+ for _, v := range b.Values {
+ if v == fun.values["merge"] {
+ if !isConst(v) {
+ t.Errorf("Must be constant: %v", v.LongString())
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go
new file mode 100644
index 0000000..fb38f40
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/schedule.go
@@ -0,0 +1,575 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "container/heap"
+ "sort"
+)
+
+const (
+ ScorePhi = iota // towards top of block
+ ScoreArg // must occur at the top of the entry block
+ ScoreInitMem // after the args - used as mark by debug info generation
+ ScoreReadTuple // must occur immediately after tuple-generating insn (or call)
+ ScoreNilCheck
+ ScoreMemory
+ ScoreReadFlags
+ ScoreDefault
+ ScoreFlags
+ ScoreControl // towards bottom of block
+)
+
+type ValHeap struct {
+ a []*Value
+ score []int8
+ inBlockUses []bool
+}
+
+func (h ValHeap) Len() int { return len(h.a) }
+func (h ValHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] }
+
+func (h *ValHeap) Push(x interface{}) {
+ // Push and Pop use pointer receivers because they modify the slice's length,
+ // not just its contents.
+ v := x.(*Value)
+ h.a = append(h.a, v)
+}
+func (h *ValHeap) Pop() interface{} {
+ old := h.a
+ n := len(old)
+ x := old[n-1]
+ h.a = old[0 : n-1]
+ return x
+}
+func (h ValHeap) Less(i, j int) bool {
+ x := h.a[i]
+ y := h.a[j]
+ sx := h.score[x.ID]
+ sy := h.score[y.ID]
+ if c := sx - sy; c != 0 {
+ return c < 0 // lower scores come earlier.
+ }
+ // Note: only scores are required for correct scheduling.
+ // Everything else is just heuristics.
+
+ ix := h.inBlockUses[x.ID]
+ iy := h.inBlockUses[y.ID]
+ if ix != iy {
+ return ix // values with in-block uses come earlier
+ }
+
+ if x.Pos != y.Pos { // Favor in-order line stepping
+ return x.Pos.Before(y.Pos)
+ }
+ if x.Op != OpPhi {
+ if c := len(x.Args) - len(y.Args); c != 0 {
+ return c > 0 // smaller args come later
+ }
+ }
+ if c := x.Uses - y.Uses; c != 0 {
+ return c > 0 // smaller uses come later
+ }
+ // These comparisons are fairly arbitrary.
+ // The goal here is stability in the face
+ // of unrelated changes elsewhere in the compiler.
+ if c := x.AuxInt - y.AuxInt; c != 0 {
+ return c < 0
+ }
+ if cmp := x.Type.Compare(y.Type); cmp != types.CMPeq {
+ return cmp == types.CMPlt
+ }
+ return x.ID < y.ID
+}
+
+func (op Op) isLoweredGetClosurePtr() bool {
+ switch op {
+ case OpAMD64LoweredGetClosurePtr, OpPPC64LoweredGetClosurePtr, OpARMLoweredGetClosurePtr, OpARM64LoweredGetClosurePtr,
+ Op386LoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr, OpLOONG64LoweredGetClosurePtr, OpS390XLoweredGetClosurePtr, OpMIPSLoweredGetClosurePtr,
+ OpRISCV64LoweredGetClosurePtr, OpWasmLoweredGetClosurePtr:
+ return true
+ }
+ return false
+}
+
+// Schedule the Values in each Block. After this phase returns, the
+// order of b.Values matters and is the order in which those values
+// will appear in the assembly output. For now it generates a
+// reasonable valid schedule using a priority queue. TODO(khr):
+// schedule smarter.
+func schedule(f *Func) {
+ // reusable priority queue
+ priq := new(ValHeap)
+
+ // "priority" for a value
+ score := f.Cache.allocInt8Slice(f.NumValues())
+ defer f.Cache.freeInt8Slice(score)
+
+ // maps mem values to the next live memory value
+ nextMem := f.Cache.allocValueSlice(f.NumValues())
+ defer f.Cache.freeValueSlice(nextMem)
+
+ // inBlockUses records whether a value is used in the block
+ // in which it lives. (block control values don't count as uses.)
+ inBlockUses := f.Cache.allocBoolSlice(f.NumValues())
+ defer f.Cache.freeBoolSlice(inBlockUses)
+ if f.Config.optimize {
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for _, a := range v.Args {
+ if a.Block == b {
+ inBlockUses[a.ID] = true
+ }
+ }
+ }
+ }
+ }
+ priq.inBlockUses = inBlockUses
+
+ for _, b := range f.Blocks {
+ // Compute score. Larger numbers are scheduled closer to the end of the block.
+ for _, v := range b.Values {
+ switch {
+ case v.Op.isLoweredGetClosurePtr():
+ // We also score GetLoweredClosurePtr as early as possible to ensure that the
+ // context register is not stomped. GetLoweredClosurePtr should only appear
+ // in the entry block where there are no phi functions, so there is no
+ // conflict or ambiguity here.
+ if b != f.Entry {
+ f.Fatalf("LoweredGetClosurePtr appeared outside of entry block, b=%s", b.String())
+ }
+ score[v.ID] = ScorePhi
+ case opcodeTable[v.Op].nilCheck:
+ // Nil checks must come before loads from the same address.
+ score[v.ID] = ScoreNilCheck
+ case v.Op == OpPhi:
+ // We want all the phis first.
+ score[v.ID] = ScorePhi
+ case v.Op == OpArgIntReg || v.Op == OpArgFloatReg:
+ // In-register args must be scheduled as early as possible to ensure that they
+ // are not stomped (similar to the closure pointer above).
+ // In particular, they need to come before regular OpArg operations because
+ // of how regalloc places spill code (see regalloc.go:placeSpills:mustBeFirst).
+ if b != f.Entry {
+ f.Fatalf("%s appeared outside of entry block, b=%s", v.Op, b.String())
+ }
+ score[v.ID] = ScorePhi
+ case v.Op == OpArg || v.Op == OpSP || v.Op == OpSB:
+ // We want all the args as early as possible, for better debugging.
+ score[v.ID] = ScoreArg
+ case v.Op == OpInitMem:
+ // Early, but after args. See debug.go:buildLocationLists
+ score[v.ID] = ScoreInitMem
+ case v.Type.IsMemory():
+ // Schedule stores as early as possible. This tends to
+ // reduce register pressure.
+ score[v.ID] = ScoreMemory
+ case v.Op == OpSelect0 || v.Op == OpSelect1 || v.Op == OpSelectN:
+ // Tuple selectors need to appear immediately after the instruction
+ // that generates the tuple.
+ score[v.ID] = ScoreReadTuple
+ case v.hasFlagInput():
+ // Schedule flag-reading ops earlier, to minimize the lifetime
+ // of flag values.
+ score[v.ID] = ScoreReadFlags
+ case v.isFlagOp():
+ // Schedule flag register generation as late as possible.
+ // This makes sure that we only have one live flags
+ // value at a time.
+ // Note that this case is after the case above, so values
+ // which both read and generate flags are given ScoreReadFlags.
+ score[v.ID] = ScoreFlags
+ default:
+ score[v.ID] = ScoreDefault
+ // If we're reading flags, schedule earlier to keep flag lifetime short.
+ for _, a := range v.Args {
+ if a.isFlagOp() {
+ score[v.ID] = ScoreReadFlags
+ }
+ }
+ }
+ }
+ for _, c := range b.ControlValues() {
+ // Force the control values to be scheduled at the end,
+ // unless they have other special priority.
+ if c.Block != b || score[c.ID] < ScoreReadTuple {
+ continue
+ }
+ if score[c.ID] == ScoreReadTuple {
+ score[c.Args[0].ID] = ScoreControl
+ continue
+ }
+ score[c.ID] = ScoreControl
+ }
+ }
+ priq.score = score
+
+ // An edge represents a scheduling constraint that x must appear before y in the schedule.
+ type edge struct {
+ x, y *Value
+ }
+ edges := make([]edge, 0, 64)
+
+ // inEdges is the number of scheduling edges incoming from values that haven't been scheduled yet.
+ // i.e. inEdges[y.ID] = |e in edges where e.y == y and e.x is not in the schedule yet|.
+ inEdges := f.Cache.allocInt32Slice(f.NumValues())
+ defer f.Cache.freeInt32Slice(inEdges)
+
+ for _, b := range f.Blocks {
+ edges = edges[:0]
+ // Standard edges: from the argument of a value to that value.
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ // If a value is used by a phi, it does not induce
+ // a scheduling edge because that use is from the
+ // previous iteration.
+ continue
+ }
+ for _, a := range v.Args {
+ if a.Block == b {
+ edges = append(edges, edge{a, v})
+ }
+ }
+ }
+
+ // Find store chain for block.
+ // Store chains for different blocks overwrite each other, so
+ // the calculated store chain is good only for this block.
+ for _, v := range b.Values {
+ if v.Op != OpPhi && v.Op != OpInitMem && v.Type.IsMemory() {
+ nextMem[v.MemoryArg().ID] = v
+ }
+ }
+
+ // Add edges to enforce that any load must come before the following store.
+ for _, v := range b.Values {
+ if v.Op == OpPhi || v.Type.IsMemory() {
+ continue
+ }
+ w := v.MemoryArg()
+ if w == nil {
+ continue
+ }
+ if s := nextMem[w.ID]; s != nil && s.Block == b {
+ edges = append(edges, edge{v, s})
+ }
+ }
+
+ // Sort all the edges by source Value ID.
+ sort.Slice(edges, func(i, j int) bool {
+ return edges[i].x.ID < edges[j].x.ID
+ })
+ // Compute inEdges for values in this block.
+ for _, e := range edges {
+ inEdges[e.y.ID]++
+ }
+
+ // Initialize priority queue with schedulable values.
+ priq.a = priq.a[:0]
+ for _, v := range b.Values {
+ if inEdges[v.ID] == 0 {
+ heap.Push(priq, v)
+ }
+ }
+
+ // Produce the schedule. Pick the highest priority scheduleable value,
+ // add it to the schedule, add any of its uses that are now scheduleable
+ // to the queue, and repeat.
+ nv := len(b.Values)
+ b.Values = b.Values[:0]
+ for priq.Len() > 0 {
+ // Schedule the next schedulable value in priority order.
+ v := heap.Pop(priq).(*Value)
+ b.Values = append(b.Values, v)
+
+ // Find all the scheduling edges out from this value.
+ i := sort.Search(len(edges), func(i int) bool {
+ return edges[i].x.ID >= v.ID
+ })
+ j := sort.Search(len(edges), func(i int) bool {
+ return edges[i].x.ID > v.ID
+ })
+ // Decrement inEdges for each target of edges from v.
+ for _, e := range edges[i:j] {
+ inEdges[e.y.ID]--
+ if inEdges[e.y.ID] == 0 {
+ heap.Push(priq, e.y)
+ }
+ }
+ }
+ if len(b.Values) != nv {
+ f.Fatalf("schedule does not include all values in block %s", b)
+ }
+ }
+
+ // Remove SPanchored now that we've scheduled.
+ // Also unlink nil checks now that ordering is assured
+ // between the nil check and the uses of the nil-checked pointer.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, a := range v.Args {
+ if a.Op == OpSPanchored || opcodeTable[a.Op].nilCheck {
+ v.SetArg(i, a.Args[0])
+ }
+ }
+ }
+ for i, c := range b.ControlValues() {
+ if c.Op == OpSPanchored || opcodeTable[c.Op].nilCheck {
+ b.ReplaceControl(i, c.Args[0])
+ }
+ }
+ }
+ for _, b := range f.Blocks {
+ i := 0
+ for _, v := range b.Values {
+ if v.Op == OpSPanchored {
+ // Free this value
+ if v.Uses != 0 {
+ base.Fatalf("SPAnchored still has %d uses", v.Uses)
+ }
+ v.resetArgs()
+ f.freeValue(v)
+ } else {
+ if opcodeTable[v.Op].nilCheck {
+ if v.Uses != 0 {
+ base.Fatalf("nilcheck still has %d uses", v.Uses)
+ }
+ // We can't delete the nil check, but we mark
+ // it as having void type so regalloc won't
+ // try to allocate a register for it.
+ v.Type = types.TypeVoid
+ }
+ b.Values[i] = v
+ i++
+ }
+ }
+ b.truncateValues(i)
+ }
+
+ f.scheduled = true
+}
+
+// storeOrder orders values with respect to stores. That is,
+// if v transitively depends on store s, v is ordered after s,
+// otherwise v is ordered before s.
+// Specifically, values are ordered like
+//
+// store1
+// NilCheck that depends on store1
+// other values that depends on store1
+// store2
+// NilCheck that depends on store2
+// other values that depends on store2
+// ...
+//
+// The order of non-store and non-NilCheck values are undefined
+// (not necessarily dependency order). This should be cheaper
+// than a full scheduling as done above.
+// Note that simple dependency order won't work: there is no
+// dependency between NilChecks and values like IsNonNil.
+// Auxiliary data structures are passed in as arguments, so
+// that they can be allocated in the caller and be reused.
+// This function takes care of reset them.
+func storeOrder(values []*Value, sset *sparseSet, storeNumber []int32) []*Value {
+ if len(values) == 0 {
+ return values
+ }
+
+ f := values[0].Block.Func
+
+ // find all stores
+
+ // Members of values that are store values.
+ // A constant bound allows this to be stack-allocated. 64 is
+ // enough to cover almost every storeOrder call.
+ stores := make([]*Value, 0, 64)
+ hasNilCheck := false
+ sset.clear() // sset is the set of stores that are used in other values
+ for _, v := range values {
+ if v.Type.IsMemory() {
+ stores = append(stores, v)
+ if v.Op == OpInitMem || v.Op == OpPhi {
+ continue
+ }
+ sset.add(v.MemoryArg().ID) // record that v's memory arg is used
+ }
+ if v.Op == OpNilCheck {
+ hasNilCheck = true
+ }
+ }
+ if len(stores) == 0 || !hasNilCheck && f.pass.name == "nilcheckelim" {
+ // there is no store, the order does not matter
+ return values
+ }
+
+ // find last store, which is the one that is not used by other stores
+ var last *Value
+ for _, v := range stores {
+ if !sset.contains(v.ID) {
+ if last != nil {
+ f.Fatalf("two stores live simultaneously: %v and %v", v, last)
+ }
+ last = v
+ }
+ }
+
+ // We assign a store number to each value. Store number is the
+ // index of the latest store that this value transitively depends.
+ // The i-th store in the current block gets store number 3*i. A nil
+ // check that depends on the i-th store gets store number 3*i+1.
+ // Other values that depends on the i-th store gets store number 3*i+2.
+ // Special case: 0 -- unassigned, 1 or 2 -- the latest store it depends
+ // is in the previous block (or no store at all, e.g. value is Const).
+ // First we assign the number to all stores by walking back the store chain,
+ // then assign the number to other values in DFS order.
+ count := make([]int32, 3*(len(stores)+1))
+ sset.clear() // reuse sparse set to ensure that a value is pushed to stack only once
+ for n, w := len(stores), last; n > 0; n-- {
+ storeNumber[w.ID] = int32(3 * n)
+ count[3*n]++
+ sset.add(w.ID)
+ if w.Op == OpInitMem || w.Op == OpPhi {
+ if n != 1 {
+ f.Fatalf("store order is wrong: there are stores before %v", w)
+ }
+ break
+ }
+ w = w.MemoryArg()
+ }
+ var stack []*Value
+ for _, v := range values {
+ if sset.contains(v.ID) {
+ // in sset means v is a store, or already pushed to stack, or already assigned a store number
+ continue
+ }
+ stack = append(stack, v)
+ sset.add(v.ID)
+
+ for len(stack) > 0 {
+ w := stack[len(stack)-1]
+ if storeNumber[w.ID] != 0 {
+ stack = stack[:len(stack)-1]
+ continue
+ }
+ if w.Op == OpPhi {
+ // Phi value doesn't depend on store in the current block.
+ // Do this early to avoid dependency cycle.
+ storeNumber[w.ID] = 2
+ count[2]++
+ stack = stack[:len(stack)-1]
+ continue
+ }
+
+ max := int32(0) // latest store dependency
+ argsdone := true
+ for _, a := range w.Args {
+ if a.Block != w.Block {
+ continue
+ }
+ if !sset.contains(a.ID) {
+ stack = append(stack, a)
+ sset.add(a.ID)
+ argsdone = false
+ break
+ }
+ if storeNumber[a.ID]/3 > max {
+ max = storeNumber[a.ID] / 3
+ }
+ }
+ if !argsdone {
+ continue
+ }
+
+ n := 3*max + 2
+ if w.Op == OpNilCheck {
+ n = 3*max + 1
+ }
+ storeNumber[w.ID] = n
+ count[n]++
+ stack = stack[:len(stack)-1]
+ }
+ }
+
+ // convert count to prefix sum of counts: count'[i] = sum_{j<=i} count[i]
+ for i := range count {
+ if i == 0 {
+ continue
+ }
+ count[i] += count[i-1]
+ }
+ if count[len(count)-1] != int32(len(values)) {
+ f.Fatalf("storeOrder: value is missing, total count = %d, values = %v", count[len(count)-1], values)
+ }
+
+ // place values in count-indexed bins, which are in the desired store order
+ order := make([]*Value, len(values))
+ for _, v := range values {
+ s := storeNumber[v.ID]
+ order[count[s-1]] = v
+ count[s-1]++
+ }
+
+ // Order nil checks in source order. We want the first in source order to trigger.
+ // If two are on the same line, we don't really care which happens first.
+ // See issue 18169.
+ if hasNilCheck {
+ start := -1
+ for i, v := range order {
+ if v.Op == OpNilCheck {
+ if start == -1 {
+ start = i
+ }
+ } else {
+ if start != -1 {
+ sort.Sort(bySourcePos(order[start:i]))
+ start = -1
+ }
+ }
+ }
+ if start != -1 {
+ sort.Sort(bySourcePos(order[start:]))
+ }
+ }
+
+ return order
+}
+
+// isFlagOp reports if v is an OP with the flag type.
+func (v *Value) isFlagOp() bool {
+ if v.Type.IsFlags() || v.Type.IsTuple() && v.Type.FieldType(1).IsFlags() {
+ return true
+ }
+ // PPC64 carry generators put their carry in a non-flag-typed register
+ // in their output.
+ switch v.Op {
+ case OpPPC64SUBC, OpPPC64ADDC, OpPPC64SUBCconst, OpPPC64ADDCconst:
+ return true
+ }
+ return false
+}
+
+// hasFlagInput reports whether v has a flag value as any of its inputs.
+func (v *Value) hasFlagInput() bool {
+ for _, a := range v.Args {
+ if a.isFlagOp() {
+ return true
+ }
+ }
+ // PPC64 carry dependencies are conveyed through their final argument,
+ // so we treat those operations as taking flags as well.
+ switch v.Op {
+ case OpPPC64SUBE, OpPPC64ADDE, OpPPC64SUBZEzero, OpPPC64ADDZEzero:
+ return true
+ }
+ return false
+}
+
+type bySourcePos []*Value
+
+func (s bySourcePos) Len() int { return len(s) }
+func (s bySourcePos) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s bySourcePos) Less(i, j int) bool { return s[i].Pos.Before(s[j].Pos) }
diff --git a/src/cmd/compile/internal/ssa/schedule_test.go b/src/cmd/compile/internal/ssa/schedule_test.go
new file mode 100644
index 0000000..6cf5105
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/schedule_test.go
@@ -0,0 +1,160 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestSchedule(t *testing.T) {
+ c := testConfig(t)
+ cases := []fun{
+ c.Fun("entry",
+ Bloc("entry",
+ Valu("mem0", OpInitMem, types.TypeMem, 0, nil),
+ Valu("ptr", OpConst64, c.config.Types.Int64, 0xABCD, nil),
+ Valu("v", OpConst64, c.config.Types.Int64, 12, nil),
+ Valu("mem1", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "v", "mem0"),
+ Valu("mem2", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "v", "mem1"),
+ Valu("mem3", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "sum", "mem2"),
+ Valu("l1", OpLoad, c.config.Types.Int64, 0, nil, "ptr", "mem1"),
+ Valu("l2", OpLoad, c.config.Types.Int64, 0, nil, "ptr", "mem2"),
+ Valu("sum", OpAdd64, c.config.Types.Int64, 0, nil, "l1", "l2"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem3"))),
+ }
+ for _, c := range cases {
+ schedule(c.f)
+ if !isSingleLiveMem(c.f) {
+ t.Error("single-live-mem restriction not enforced by schedule for func:")
+ printFunc(c.f)
+ }
+ }
+}
+
+func isSingleLiveMem(f *Func) bool {
+ for _, b := range f.Blocks {
+ var liveMem *Value
+ for _, v := range b.Values {
+ for _, w := range v.Args {
+ if w.Type.IsMemory() {
+ if liveMem == nil {
+ liveMem = w
+ continue
+ }
+ if w != liveMem {
+ return false
+ }
+ }
+ }
+ if v.Type.IsMemory() {
+ liveMem = v
+ }
+ }
+ }
+ return true
+}
+
+func TestStoreOrder(t *testing.T) {
+ // In the function below, v2 depends on v3 and v4, v4 depends on v3, and v3 depends on store v5.
+ // storeOrder did not handle this case correctly.
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem0", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpAdd64, c.config.Types.Int64, 0, nil, "b", "c"), // v2
+ Valu("b", OpLoad, c.config.Types.Int64, 0, nil, "ptr", "mem1"), // v3
+ Valu("c", OpNeg64, c.config.Types.Int64, 0, nil, "b"), // v4
+ Valu("mem1", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "v", "mem0"), // v5
+ Valu("mem2", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "a", "mem1"),
+ Valu("ptr", OpConst64, c.config.Types.Int64, 0xABCD, nil),
+ Valu("v", OpConst64, c.config.Types.Int64, 12, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem2")))
+
+ CheckFunc(fun.f)
+ order := storeOrder(fun.f.Blocks[0].Values, fun.f.newSparseSet(fun.f.NumValues()), make([]int32, fun.f.NumValues()))
+
+ // check that v2, v3, v4 is sorted after v5
+ var ai, bi, ci, si int
+ for i, v := range order {
+ switch v.ID {
+ case 2:
+ ai = i
+ case 3:
+ bi = i
+ case 4:
+ ci = i
+ case 5:
+ si = i
+ }
+ }
+ if ai < si || bi < si || ci < si {
+ t.Logf("Func: %s", fun.f)
+ t.Errorf("store order is wrong: got %v, want v2 v3 v4 after v5", order)
+ }
+}
+
+func TestCarryChainOrder(t *testing.T) {
+ // In the function below, there are two carry chains that have no dependencies on each other,
+ // one is A1 -> A1carry -> A1Carryvalue, the other is A2 -> A2carry -> A2Carryvalue. If they
+ // are not scheduled properly, the carry will be clobbered, causing the carry to be regenerated.
+ c := testConfigARM64(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem0", OpInitMem, types.TypeMem, 0, nil),
+ Valu("x", OpARM64MOVDconst, c.config.Types.UInt64, 5, nil),
+ Valu("y", OpARM64MOVDconst, c.config.Types.UInt64, 6, nil),
+ Valu("z", OpARM64MOVDconst, c.config.Types.UInt64, 7, nil),
+ Valu("A1", OpARM64ADDSflags, types.NewTuple(c.config.Types.UInt64, types.TypeFlags), 0, nil, "x", "z"), // x+z, set flags
+ Valu("A1carry", OpSelect1, types.TypeFlags, 0, nil, "A1"),
+ Valu("A2", OpARM64ADDSflags, types.NewTuple(c.config.Types.UInt64, types.TypeFlags), 0, nil, "y", "z"), // y+z, set flags
+ Valu("A2carry", OpSelect1, types.TypeFlags, 0, nil, "A2"),
+ Valu("A1value", OpSelect0, c.config.Types.UInt64, 0, nil, "A1"),
+ Valu("A1Carryvalue", OpARM64ADCzerocarry, c.config.Types.UInt64, 0, nil, "A1carry"), // 0+0+A1carry
+ Valu("A2value", OpSelect0, c.config.Types.UInt64, 0, nil, "A2"),
+ Valu("A2Carryvalue", OpARM64ADCzerocarry, c.config.Types.UInt64, 0, nil, "A2carry"), // 0+0+A2carry
+ Valu("ValueSum", OpARM64ADD, c.config.Types.UInt64, 0, nil, "A1value", "A2value"),
+ Valu("CarrySum", OpARM64ADD, c.config.Types.UInt64, 0, nil, "A1Carryvalue", "A2Carryvalue"),
+ Valu("Sum", OpARM64AND, c.config.Types.UInt64, 0, nil, "ValueSum", "CarrySum"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem0")),
+ )
+
+ CheckFunc(fun.f)
+ schedule(fun.f)
+
+ // The expected order is A1 < A1carry < A1Carryvalue < A2 < A2carry < A2Carryvalue.
+ // There is no dependency between the two carry chains, so it doesn't matter which
+ // comes first and which comes after, but the unsorted position of A1 is before A2,
+ // so A1Carryvalue < A2.
+ var ai, bi, ci, di, ei, fi int
+ for i, v := range fun.f.Blocks[0].Values {
+ switch {
+ case fun.values["A1"] == v:
+ ai = i
+ case fun.values["A1carry"] == v:
+ bi = i
+ case fun.values["A1Carryvalue"] == v:
+ ci = i
+ case fun.values["A2"] == v:
+ di = i
+ case fun.values["A2carry"] == v:
+ ei = i
+ case fun.values["A2Carryvalue"] == v:
+ fi = i
+ }
+ }
+ if !(ai < bi && bi < ci && ci < di && di < ei && ei < fi) {
+ t.Logf("Func: %s", fun.f)
+ t.Errorf("carry chain order is wrong: got %v, want V%d after V%d after V%d after V%d after V%d after V%d,",
+ fun.f.Blocks[0], fun.values["A1"].ID, fun.values["A1carry"].ID, fun.values["A1Carryvalue"].ID,
+ fun.values["A2"].ID, fun.values["A2carry"].ID, fun.values["A2Carryvalue"].ID)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/shift_test.go b/src/cmd/compile/internal/ssa/shift_test.go
new file mode 100644
index 0000000..06c2f67
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/shift_test.go
@@ -0,0 +1,107 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestShiftConstAMD64(t *testing.T) {
+ c := testConfig(t)
+ fun := makeConstShiftFunc(c, 18, OpLsh64x64, c.config.Types.UInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
+
+ fun = makeConstShiftFunc(c, 66, OpLsh64x64, c.config.Types.UInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
+
+ fun = makeConstShiftFunc(c, 18, OpRsh64Ux64, c.config.Types.UInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
+
+ fun = makeConstShiftFunc(c, 66, OpRsh64Ux64, c.config.Types.UInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
+
+ fun = makeConstShiftFunc(c, 18, OpRsh64x64, c.config.Types.Int64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0})
+
+ fun = makeConstShiftFunc(c, 66, OpRsh64x64, c.config.Types.Int64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0})
+}
+
+func makeConstShiftFunc(c *Conf, amount int64, op Op, typ *types.Type) fun {
+ ptyp := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("SP", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("argptr", OpOffPtr, ptyp, 8, nil, "SP"),
+ Valu("resptr", OpOffPtr, ptyp, 16, nil, "SP"),
+ Valu("load", OpLoad, typ, 0, nil, "argptr", "mem"),
+ Valu("c", OpConst64, c.config.Types.UInt64, amount, nil),
+ Valu("shift", op, typ, 0, nil, "load", "c"),
+ Valu("store", OpStore, types.TypeMem, 0, c.config.Types.UInt64, "resptr", "shift", "mem"),
+ Exit("store")))
+ Compile(fun.f)
+ return fun
+}
+
+func TestShiftToExtensionAMD64(t *testing.T) {
+ c := testConfig(t)
+ // Test that eligible pairs of constant shifts are converted to extensions.
+ // For example:
+ // (uint64(x) << 32) >> 32 -> uint64(uint32(x))
+ ops := map[Op]int{
+ OpAMD64SHLQconst: 0, OpAMD64SHLLconst: 0,
+ OpAMD64SHRQconst: 0, OpAMD64SHRLconst: 0,
+ OpAMD64SARQconst: 0, OpAMD64SARLconst: 0,
+ }
+ tests := [...]struct {
+ amount int64
+ left, right Op
+ typ *types.Type
+ }{
+ // unsigned
+ {56, OpLsh64x64, OpRsh64Ux64, c.config.Types.UInt64},
+ {48, OpLsh64x64, OpRsh64Ux64, c.config.Types.UInt64},
+ {32, OpLsh64x64, OpRsh64Ux64, c.config.Types.UInt64},
+ {24, OpLsh32x64, OpRsh32Ux64, c.config.Types.UInt32},
+ {16, OpLsh32x64, OpRsh32Ux64, c.config.Types.UInt32},
+ {8, OpLsh16x64, OpRsh16Ux64, c.config.Types.UInt16},
+ // signed
+ {56, OpLsh64x64, OpRsh64x64, c.config.Types.Int64},
+ {48, OpLsh64x64, OpRsh64x64, c.config.Types.Int64},
+ {32, OpLsh64x64, OpRsh64x64, c.config.Types.Int64},
+ {24, OpLsh32x64, OpRsh32x64, c.config.Types.Int32},
+ {16, OpLsh32x64, OpRsh32x64, c.config.Types.Int32},
+ {8, OpLsh16x64, OpRsh16x64, c.config.Types.Int16},
+ }
+ for _, tc := range tests {
+ fun := makeShiftExtensionFunc(c, tc.amount, tc.left, tc.right, tc.typ)
+ checkOpcodeCounts(t, fun.f, ops)
+ }
+}
+
+// makeShiftExtensionFunc generates a function containing:
+//
+// (rshift (lshift (Const64 [amount])) (Const64 [amount]))
+//
+// This may be equivalent to a sign or zero extension.
+func makeShiftExtensionFunc(c *Conf, amount int64, lshift, rshift Op, typ *types.Type) fun {
+ ptyp := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("SP", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("argptr", OpOffPtr, ptyp, 8, nil, "SP"),
+ Valu("resptr", OpOffPtr, ptyp, 16, nil, "SP"),
+ Valu("load", OpLoad, typ, 0, nil, "argptr", "mem"),
+ Valu("c", OpConst64, c.config.Types.UInt64, amount, nil),
+ Valu("lshift", lshift, typ, 0, nil, "load", "c"),
+ Valu("rshift", rshift, typ, 0, nil, "lshift", "c"),
+ Valu("store", OpStore, types.TypeMem, 0, c.config.Types.UInt64, "resptr", "rshift", "mem"),
+ Exit("store")))
+ Compile(fun.f)
+ return fun
+}
diff --git a/src/cmd/compile/internal/ssa/shortcircuit.go b/src/cmd/compile/internal/ssa/shortcircuit.go
new file mode 100644
index 0000000..d7d0b6f
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/shortcircuit.go
@@ -0,0 +1,513 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// shortcircuit finds situations where branch directions
+// are always correlated and rewrites the CFG to take
+// advantage of that fact.
+// This optimization is useful for compiling && and || expressions.
+func shortcircuit(f *Func) {
+ // Step 1: Replace a phi arg with a constant if that arg
+ // is the control value of a preceding If block.
+ // b1:
+ // If a goto b2 else b3
+ // b2: <- b1 ...
+ // x = phi(a, ...)
+ //
+ // We can replace the "a" in the phi with the constant true.
+ var ct, cf *Value
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ if !v.Type.IsBoolean() {
+ continue
+ }
+ for i, a := range v.Args {
+ e := b.Preds[i]
+ p := e.b
+ if p.Kind != BlockIf {
+ continue
+ }
+ if p.Controls[0] != a {
+ continue
+ }
+ if e.i == 0 {
+ if ct == nil {
+ ct = f.ConstBool(f.Config.Types.Bool, true)
+ }
+ v.SetArg(i, ct)
+ } else {
+ if cf == nil {
+ cf = f.ConstBool(f.Config.Types.Bool, false)
+ }
+ v.SetArg(i, cf)
+ }
+ }
+ }
+ }
+
+ // Step 2: Redirect control flow around known branches.
+ // p:
+ // ... goto b ...
+ // b: <- p ...
+ // v = phi(true, ...)
+ // if v goto t else u
+ // We can redirect p to go directly to t instead of b.
+ // (If v is not live after b).
+ fuse(f, fuseTypePlain|fuseTypeShortCircuit)
+}
+
+// shortcircuitBlock checks for a CFG in which an If block
+// has as its control value a Phi that has a ConstBool arg.
+// In some such cases, we can rewrite the CFG into a flatter form.
+//
+// (1) Look for a CFG of the form
+//
+// p other pred(s)
+// \ /
+// b
+// / \
+// t other succ
+//
+// in which b is an If block containing a single phi value with a single use (b's Control),
+// which has a ConstBool arg.
+// p is the predecessor corresponding to the argument slot in which the ConstBool is found.
+// t is the successor corresponding to the value of the ConstBool arg.
+//
+// Rewrite this into
+//
+// p other pred(s)
+// | /
+// | b
+// |/ \
+// t u
+//
+// and remove the appropriate phi arg(s).
+//
+// (2) Look for a CFG of the form
+//
+// p q
+// \ /
+// b
+// / \
+// t u
+//
+// in which b is as described in (1).
+// However, b may also contain other phi values.
+// The CFG will be modified as described in (1).
+// However, in order to handle those other phi values,
+// for each other phi value w, we must be able to eliminate w from b.
+// We can do that though a combination of moving w to a different block
+// and rewriting uses of w to use a different value instead.
+// See shortcircuitPhiPlan for details.
+func shortcircuitBlock(b *Block) bool {
+ if b.Kind != BlockIf {
+ return false
+ }
+ // Look for control values of the form Copy(Not(Copy(Phi(const, ...)))).
+ // Those must be the only values in the b, and they each must be used only by b.
+ // Track the negations so that we can swap successors as needed later.
+ ctl := b.Controls[0]
+ nval := 1 // the control value
+ var swap int64
+ for ctl.Uses == 1 && ctl.Block == b && (ctl.Op == OpCopy || ctl.Op == OpNot) {
+ if ctl.Op == OpNot {
+ swap = 1 ^ swap
+ }
+ ctl = ctl.Args[0]
+ nval++ // wrapper around control value
+ }
+ if ctl.Op != OpPhi || ctl.Block != b || ctl.Uses != 1 {
+ return false
+ }
+ nOtherPhi := 0
+ for _, w := range b.Values {
+ if w.Op == OpPhi && w != ctl {
+ nOtherPhi++
+ }
+ }
+ if nOtherPhi > 0 && len(b.Preds) != 2 {
+ // We rely on b having exactly two preds in shortcircuitPhiPlan
+ // to reason about the values of phis.
+ return false
+ }
+ if len(b.Values) != nval+nOtherPhi {
+ return false
+ }
+ if nOtherPhi > 0 {
+ // Check for any phi which is the argument of another phi.
+ // These cases are tricky, as substitutions done by replaceUses
+ // are no longer trivial to do in any ordering. See issue 45175.
+ m := make(map[*Value]bool, 1+nOtherPhi)
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ m[v] = true
+ }
+ }
+ for v := range m {
+ for _, a := range v.Args {
+ if a != v && m[a] {
+ return false
+ }
+ }
+ }
+ }
+
+ // Locate index of first const phi arg.
+ cidx := -1
+ for i, a := range ctl.Args {
+ if a.Op == OpConstBool {
+ cidx = i
+ break
+ }
+ }
+ if cidx == -1 {
+ return false
+ }
+
+ // p is the predecessor corresponding to cidx.
+ pe := b.Preds[cidx]
+ p := pe.b
+ pi := pe.i
+
+ // t is the "taken" branch: the successor we always go to when coming in from p.
+ ti := 1 ^ ctl.Args[cidx].AuxInt ^ swap
+ te := b.Succs[ti]
+ t := te.b
+ if p == b || t == b {
+ // This is an infinite loop; we can't remove it. See issue 33903.
+ return false
+ }
+
+ var fixPhi func(*Value, int)
+ if nOtherPhi > 0 {
+ fixPhi = shortcircuitPhiPlan(b, ctl, cidx, ti)
+ if fixPhi == nil {
+ return false
+ }
+ }
+
+ // We're committed. Update CFG and Phis.
+ // If you modify this section, update shortcircuitPhiPlan corresponding.
+
+ // Remove b's incoming edge from p.
+ b.removePred(cidx)
+ b.removePhiArg(ctl, cidx)
+
+ // Redirect p's outgoing edge to t.
+ p.Succs[pi] = Edge{t, len(t.Preds)}
+
+ // Fix up t to have one more predecessor.
+ t.Preds = append(t.Preds, Edge{p, pi})
+ for _, v := range t.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ v.AddArg(v.Args[te.i])
+ }
+
+ if nOtherPhi != 0 {
+ // Adjust all other phis as necessary.
+ // Use a plain for loop instead of range because fixPhi may move phis,
+ // thus modifying b.Values.
+ for i := 0; i < len(b.Values); i++ {
+ phi := b.Values[i]
+ if phi.Uses == 0 || phi == ctl || phi.Op != OpPhi {
+ continue
+ }
+ fixPhi(phi, i)
+ if phi.Block == b {
+ continue
+ }
+ // phi got moved to a different block with v.moveTo.
+ // Adjust phi values in this new block that refer
+ // to phi to refer to the corresponding phi arg instead.
+ // phi used to be evaluated prior to this block,
+ // and now it is evaluated in this block.
+ for _, v := range phi.Block.Values {
+ if v.Op != OpPhi || v == phi {
+ continue
+ }
+ for j, a := range v.Args {
+ if a == phi {
+ v.SetArg(j, phi.Args[j])
+ }
+ }
+ }
+ if phi.Uses != 0 {
+ phielimValue(phi)
+ } else {
+ phi.reset(OpInvalid)
+ }
+ i-- // v.moveTo put a new value at index i; reprocess
+ }
+
+ // We may have left behind some phi values with no uses
+ // but the wrong number of arguments. Eliminate those.
+ for _, v := range b.Values {
+ if v.Uses == 0 {
+ v.reset(OpInvalid)
+ }
+ }
+ }
+
+ if len(b.Preds) == 0 {
+ // Block is now dead.
+ b.Kind = BlockInvalid
+ }
+
+ phielimValue(ctl)
+ return true
+}
+
+// shortcircuitPhiPlan returns a function to handle non-ctl phi values in b,
+// where b is as described in shortcircuitBlock.
+// The returned function accepts a value v
+// and the index i of v in v.Block: v.Block.Values[i] == v.
+// If the returned function moves v to a different block, it will use v.moveTo.
+// cidx is the index in ctl of the ConstBool arg.
+// ti is the index in b.Succs of the always taken branch when arriving from p.
+// If shortcircuitPhiPlan returns nil, there is no plan available,
+// and the CFG modifications must not proceed.
+// The returned function assumes that shortcircuitBlock has completed its CFG modifications.
+func shortcircuitPhiPlan(b *Block, ctl *Value, cidx int, ti int64) func(*Value, int) {
+ // t is the "taken" branch: the successor we always go to when coming in from p.
+ t := b.Succs[ti].b
+ // u is the "untaken" branch: the successor we never go to when coming in from p.
+ u := b.Succs[1^ti].b
+
+ // In the following CFG matching, ensure that b's preds are entirely distinct from b's succs.
+ // This is probably a stronger condition than required, but this happens extremely rarely,
+ // and it makes it easier to avoid getting deceived by pretty ASCII charts. See #44465.
+ if p0, p1 := b.Preds[0].b, b.Preds[1].b; p0 == t || p1 == t || p0 == u || p1 == u {
+ return nil
+ }
+
+ // Look for some common CFG structures
+ // in which the outbound paths from b merge,
+ // with no other preds joining them.
+ // In these cases, we can reconstruct what the value
+ // of any phi in b must be in the successor blocks.
+
+ if len(t.Preds) == 1 && len(t.Succs) == 1 &&
+ len(u.Preds) == 1 && len(u.Succs) == 1 &&
+ t.Succs[0].b == u.Succs[0].b && len(t.Succs[0].b.Preds) == 2 {
+ // p q
+ // \ /
+ // b
+ // / \
+ // t u
+ // \ /
+ // m
+ //
+ // After the CFG modifications, this will look like
+ //
+ // p q
+ // | /
+ // | b
+ // |/ \
+ // t u
+ // \ /
+ // m
+ //
+ // NB: t.Preds is (b, p), not (p, b).
+ m := t.Succs[0].b
+ return func(v *Value, i int) {
+ // Replace any uses of v in t and u with the value v must have,
+ // given that we have arrived at that block.
+ // Then move v to m and adjust its value accordingly;
+ // this handles all other uses of v.
+ argP, argQ := v.Args[cidx], v.Args[1^cidx]
+ u.replaceUses(v, argQ)
+ phi := t.Func.newValue(OpPhi, v.Type, t, v.Pos)
+ phi.AddArg2(argQ, argP)
+ t.replaceUses(v, phi)
+ if v.Uses == 0 {
+ return
+ }
+ v.moveTo(m, i)
+ // The phi in m belongs to whichever pred idx corresponds to t.
+ if m.Preds[0].b == t {
+ v.SetArgs2(phi, argQ)
+ } else {
+ v.SetArgs2(argQ, phi)
+ }
+ }
+ }
+
+ if len(t.Preds) == 2 && len(u.Preds) == 1 && len(u.Succs) == 1 && u.Succs[0].b == t {
+ // p q
+ // \ /
+ // b
+ // |\
+ // | u
+ // |/
+ // t
+ //
+ // After the CFG modifications, this will look like
+ //
+ // q
+ // /
+ // b
+ // |\
+ // p | u
+ // \|/
+ // t
+ //
+ // NB: t.Preds is (b or u, b or u, p).
+ return func(v *Value, i int) {
+ // Replace any uses of v in u. Then move v to t.
+ argP, argQ := v.Args[cidx], v.Args[1^cidx]
+ u.replaceUses(v, argQ)
+ v.moveTo(t, i)
+ v.SetArgs3(argQ, argQ, argP)
+ }
+ }
+
+ if len(u.Preds) == 2 && len(t.Preds) == 1 && len(t.Succs) == 1 && t.Succs[0].b == u {
+ // p q
+ // \ /
+ // b
+ // /|
+ // t |
+ // \|
+ // u
+ //
+ // After the CFG modifications, this will look like
+ //
+ // p q
+ // | /
+ // | b
+ // |/|
+ // t |
+ // \|
+ // u
+ //
+ // NB: t.Preds is (b, p), not (p, b).
+ return func(v *Value, i int) {
+ // Replace any uses of v in t. Then move v to u.
+ argP, argQ := v.Args[cidx], v.Args[1^cidx]
+ phi := t.Func.newValue(OpPhi, v.Type, t, v.Pos)
+ phi.AddArg2(argQ, argP)
+ t.replaceUses(v, phi)
+ if v.Uses == 0 {
+ return
+ }
+ v.moveTo(u, i)
+ v.SetArgs2(argQ, phi)
+ }
+ }
+
+ // Look for some common CFG structures
+ // in which one outbound path from b exits,
+ // with no other preds joining.
+ // In these cases, we can reconstruct what the value
+ // of any phi in b must be in the path leading to exit,
+ // and move the phi to the non-exit path.
+
+ if len(t.Preds) == 1 && len(u.Preds) == 1 && len(t.Succs) == 0 {
+ // p q
+ // \ /
+ // b
+ // / \
+ // t u
+ //
+ // where t is an Exit/Ret block.
+ //
+ // After the CFG modifications, this will look like
+ //
+ // p q
+ // | /
+ // | b
+ // |/ \
+ // t u
+ //
+ // NB: t.Preds is (b, p), not (p, b).
+ return func(v *Value, i int) {
+ // Replace any uses of v in t and x. Then move v to u.
+ argP, argQ := v.Args[cidx], v.Args[1^cidx]
+ // If there are no uses of v in t or x, this phi will be unused.
+ // That's OK; it's not worth the cost to prevent that.
+ phi := t.Func.newValue(OpPhi, v.Type, t, v.Pos)
+ phi.AddArg2(argQ, argP)
+ t.replaceUses(v, phi)
+ if v.Uses == 0 {
+ return
+ }
+ v.moveTo(u, i)
+ v.SetArgs1(argQ)
+ }
+ }
+
+ if len(u.Preds) == 1 && len(t.Preds) == 1 && len(u.Succs) == 0 {
+ // p q
+ // \ /
+ // b
+ // / \
+ // t u
+ //
+ // where u is an Exit/Ret block.
+ //
+ // After the CFG modifications, this will look like
+ //
+ // p q
+ // | /
+ // | b
+ // |/ \
+ // t u
+ //
+ // NB: t.Preds is (b, p), not (p, b).
+ return func(v *Value, i int) {
+ // Replace any uses of v in u (and x). Then move v to t.
+ argP, argQ := v.Args[cidx], v.Args[1^cidx]
+ u.replaceUses(v, argQ)
+ v.moveTo(t, i)
+ v.SetArgs2(argQ, argP)
+ }
+ }
+
+ // TODO: handle more cases; shortcircuit optimizations turn out to be reasonably high impact
+ return nil
+}
+
+// replaceUses replaces all uses of old in b with new.
+func (b *Block) replaceUses(old, new *Value) {
+ for _, v := range b.Values {
+ for i, a := range v.Args {
+ if a == old {
+ v.SetArg(i, new)
+ }
+ }
+ }
+ for i, v := range b.ControlValues() {
+ if v == old {
+ b.ReplaceControl(i, new)
+ }
+ }
+}
+
+// moveTo moves v to dst, adjusting the appropriate Block.Values slices.
+// The caller is responsible for ensuring that this is safe.
+// i is the index of v in v.Block.Values.
+func (v *Value) moveTo(dst *Block, i int) {
+ if dst.Func.scheduled {
+ v.Fatalf("moveTo after scheduling")
+ }
+ src := v.Block
+ if src.Values[i] != v {
+ v.Fatalf("moveTo bad index %d", v, i)
+ }
+ if src == dst {
+ return
+ }
+ v.Block = dst
+ dst.Values = append(dst.Values, v)
+ last := len(src.Values) - 1
+ src.Values[i] = src.Values[last]
+ src.Values[last] = nil
+ src.Values = src.Values[:last]
+}
diff --git a/src/cmd/compile/internal/ssa/shortcircuit_test.go b/src/cmd/compile/internal/ssa/shortcircuit_test.go
new file mode 100644
index 0000000..b25eeb4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/shortcircuit_test.go
@@ -0,0 +1,53 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestShortCircuit(t *testing.T) {
+ c := testConfig(t)
+
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("arg1", OpArg, c.config.Types.Int64, 0, nil),
+ Valu("arg2", OpArg, c.config.Types.Int64, 0, nil),
+ Valu("arg3", OpArg, c.config.Types.Int64, 0, nil),
+ Goto("b1")),
+ Bloc("b1",
+ Valu("cmp1", OpLess64, c.config.Types.Bool, 0, nil, "arg1", "arg2"),
+ If("cmp1", "b2", "b3")),
+ Bloc("b2",
+ Valu("cmp2", OpLess64, c.config.Types.Bool, 0, nil, "arg2", "arg3"),
+ Goto("b3")),
+ Bloc("b3",
+ Valu("phi2", OpPhi, c.config.Types.Bool, 0, nil, "cmp1", "cmp2"),
+ If("phi2", "b4", "b5")),
+ Bloc("b4",
+ Valu("cmp3", OpLess64, c.config.Types.Bool, 0, nil, "arg3", "arg1"),
+ Goto("b5")),
+ Bloc("b5",
+ Valu("phi3", OpPhi, c.config.Types.Bool, 0, nil, "phi2", "cmp3"),
+ If("phi3", "b6", "b7")),
+ Bloc("b6",
+ Exit("mem")),
+ Bloc("b7",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ shortcircuit(fun.f)
+ CheckFunc(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ t.Errorf("phi %s remains", v)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/sizeof_test.go b/src/cmd/compile/internal/ssa/sizeof_test.go
new file mode 100644
index 0000000..a27002e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sizeof_test.go
@@ -0,0 +1,39 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "reflect"
+ "testing"
+ "unsafe"
+)
+
+// Assert that the size of important structures do not change unexpectedly.
+
+func TestSizeof(t *testing.T) {
+ const _64bit = unsafe.Sizeof(uintptr(0)) == 8
+
+ var tests = []struct {
+ val interface{} // type as a value
+ _32bit uintptr // size on 32bit platforms
+ _64bit uintptr // size on 64bit platforms
+ }{
+ {Value{}, 72, 112},
+ {Block{}, 164, 304},
+ {LocalSlot{}, 28, 40},
+ {valState{}, 28, 40},
+ }
+
+ for _, tt := range tests {
+ want := tt._32bit
+ if _64bit {
+ want = tt._64bit
+ }
+ got := reflect.TypeOf(tt.val).Size()
+ if want != got {
+ t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/softfloat.go b/src/cmd/compile/internal/ssa/softfloat.go
new file mode 100644
index 0000000..351f824
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/softfloat.go
@@ -0,0 +1,80 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "math"
+)
+
+func softfloat(f *Func) {
+ if !f.Config.SoftFloat {
+ return
+ }
+ newInt64 := false
+
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Type.IsFloat() {
+ f.unCache(v)
+ switch v.Op {
+ case OpPhi, OpLoad, OpArg:
+ if v.Type.Size() == 4 {
+ v.Type = f.Config.Types.UInt32
+ } else {
+ v.Type = f.Config.Types.UInt64
+ }
+ case OpConst32F:
+ v.Op = OpConst32
+ v.Type = f.Config.Types.UInt32
+ v.AuxInt = int64(int32(math.Float32bits(auxTo32F(v.AuxInt))))
+ case OpConst64F:
+ v.Op = OpConst64
+ v.Type = f.Config.Types.UInt64
+ case OpNeg32F:
+ arg0 := v.Args[0]
+ v.reset(OpXor32)
+ v.Type = f.Config.Types.UInt32
+ v.AddArg(arg0)
+ mask := v.Block.NewValue0(v.Pos, OpConst32, v.Type)
+ mask.AuxInt = -0x80000000
+ v.AddArg(mask)
+ case OpNeg64F:
+ arg0 := v.Args[0]
+ v.reset(OpXor64)
+ v.Type = f.Config.Types.UInt64
+ v.AddArg(arg0)
+ mask := v.Block.NewValue0(v.Pos, OpConst64, v.Type)
+ mask.AuxInt = -0x8000000000000000
+ v.AddArg(mask)
+ case OpRound32F:
+ v.Op = OpCopy
+ v.Type = f.Config.Types.UInt32
+ case OpRound64F:
+ v.Op = OpCopy
+ v.Type = f.Config.Types.UInt64
+ }
+ newInt64 = newInt64 || v.Type.Size() == 8
+ } else if (v.Op == OpStore || v.Op == OpZero || v.Op == OpMove) && v.Aux.(*types.Type).IsFloat() {
+ switch size := v.Aux.(*types.Type).Size(); size {
+ case 4:
+ v.Aux = f.Config.Types.UInt32
+ case 8:
+ v.Aux = f.Config.Types.UInt64
+ newInt64 = true
+ default:
+ v.Fatalf("bad float type with size %d", size)
+ }
+ }
+ }
+ }
+
+ if newInt64 && f.Config.RegSize == 4 {
+ // On 32bit arch, decompose Uint64 introduced in the switch above.
+ decomposeBuiltIn(f)
+ applyRewrite(f, rewriteBlockdec64, rewriteValuedec64, removeDeadValues)
+ }
+
+}
diff --git a/src/cmd/compile/internal/ssa/sparsemap.go b/src/cmd/compile/internal/ssa/sparsemap.go
new file mode 100644
index 0000000..9443c8b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sparsemap.go
@@ -0,0 +1,89 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// from https://research.swtch.com/sparse
+// in turn, from Briggs and Torczon
+
+type sparseEntry struct {
+ key ID
+ val int32
+}
+
+type sparseMap struct {
+ dense []sparseEntry
+ sparse []int32
+}
+
+// newSparseMap returns a sparseMap that can map
+// integers between 0 and n-1 to int32s.
+func newSparseMap(n int) *sparseMap {
+ return &sparseMap{dense: nil, sparse: make([]int32, n)}
+}
+
+func (s *sparseMap) cap() int {
+ return len(s.sparse)
+}
+
+func (s *sparseMap) size() int {
+ return len(s.dense)
+}
+
+func (s *sparseMap) contains(k ID) bool {
+ i := s.sparse[k]
+ return i < int32(len(s.dense)) && s.dense[i].key == k
+}
+
+// get returns the value for key k, or -1 if k does
+// not appear in the map.
+func (s *sparseMap) get(k ID) int32 {
+ i := s.sparse[k]
+ if i < int32(len(s.dense)) && s.dense[i].key == k {
+ return s.dense[i].val
+ }
+ return -1
+}
+
+func (s *sparseMap) set(k ID, v int32) {
+ i := s.sparse[k]
+ if i < int32(len(s.dense)) && s.dense[i].key == k {
+ s.dense[i].val = v
+ return
+ }
+ s.dense = append(s.dense, sparseEntry{k, v})
+ s.sparse[k] = int32(len(s.dense)) - 1
+}
+
+// setBit sets the v'th bit of k's value, where 0 <= v < 32
+func (s *sparseMap) setBit(k ID, v uint) {
+ if v >= 32 {
+ panic("bit index too large.")
+ }
+ i := s.sparse[k]
+ if i < int32(len(s.dense)) && s.dense[i].key == k {
+ s.dense[i].val |= 1 << v
+ return
+ }
+ s.dense = append(s.dense, sparseEntry{k, 1 << v})
+ s.sparse[k] = int32(len(s.dense)) - 1
+}
+
+func (s *sparseMap) remove(k ID) {
+ i := s.sparse[k]
+ if i < int32(len(s.dense)) && s.dense[i].key == k {
+ y := s.dense[len(s.dense)-1]
+ s.dense[i] = y
+ s.sparse[y.key] = i
+ s.dense = s.dense[:len(s.dense)-1]
+ }
+}
+
+func (s *sparseMap) clear() {
+ s.dense = s.dense[:0]
+}
+
+func (s *sparseMap) contents() []sparseEntry {
+ return s.dense
+}
diff --git a/src/cmd/compile/internal/ssa/sparsemappos.go b/src/cmd/compile/internal/ssa/sparsemappos.go
new file mode 100644
index 0000000..60bad82
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sparsemappos.go
@@ -0,0 +1,79 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "cmd/internal/src"
+
+// from https://research.swtch.com/sparse
+// in turn, from Briggs and Torczon
+
+type sparseEntryPos struct {
+ key ID
+ val int32
+ pos src.XPos
+}
+
+type sparseMapPos struct {
+ dense []sparseEntryPos
+ sparse []int32
+}
+
+// newSparseMapPos returns a sparseMapPos that can map
+// integers between 0 and n-1 to the pair <int32,src.XPos>.
+func newSparseMapPos(n int) *sparseMapPos {
+ return &sparseMapPos{dense: nil, sparse: make([]int32, n)}
+}
+
+func (s *sparseMapPos) cap() int {
+ return len(s.sparse)
+}
+
+func (s *sparseMapPos) size() int {
+ return len(s.dense)
+}
+
+func (s *sparseMapPos) contains(k ID) bool {
+ i := s.sparse[k]
+ return i < int32(len(s.dense)) && s.dense[i].key == k
+}
+
+// get returns the value for key k, or -1 if k does
+// not appear in the map.
+func (s *sparseMapPos) get(k ID) int32 {
+ i := s.sparse[k]
+ if i < int32(len(s.dense)) && s.dense[i].key == k {
+ return s.dense[i].val
+ }
+ return -1
+}
+
+func (s *sparseMapPos) set(k ID, v int32, a src.XPos) {
+ i := s.sparse[k]
+ if i < int32(len(s.dense)) && s.dense[i].key == k {
+ s.dense[i].val = v
+ s.dense[i].pos = a
+ return
+ }
+ s.dense = append(s.dense, sparseEntryPos{k, v, a})
+ s.sparse[k] = int32(len(s.dense)) - 1
+}
+
+func (s *sparseMapPos) remove(k ID) {
+ i := s.sparse[k]
+ if i < int32(len(s.dense)) && s.dense[i].key == k {
+ y := s.dense[len(s.dense)-1]
+ s.dense[i] = y
+ s.sparse[y.key] = i
+ s.dense = s.dense[:len(s.dense)-1]
+ }
+}
+
+func (s *sparseMapPos) clear() {
+ s.dense = s.dense[:0]
+}
+
+func (s *sparseMapPos) contents() []sparseEntryPos {
+ return s.dense
+}
diff --git a/src/cmd/compile/internal/ssa/sparseset.go b/src/cmd/compile/internal/ssa/sparseset.go
new file mode 100644
index 0000000..07d40dc
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sparseset.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// from https://research.swtch.com/sparse
+// in turn, from Briggs and Torczon
+
+type sparseSet struct {
+ dense []ID
+ sparse []int32
+}
+
+// newSparseSet returns a sparseSet that can represent
+// integers between 0 and n-1.
+func newSparseSet(n int) *sparseSet {
+ return &sparseSet{dense: nil, sparse: make([]int32, n)}
+}
+
+func (s *sparseSet) cap() int {
+ return len(s.sparse)
+}
+
+func (s *sparseSet) size() int {
+ return len(s.dense)
+}
+
+func (s *sparseSet) contains(x ID) bool {
+ i := s.sparse[x]
+ return i < int32(len(s.dense)) && s.dense[i] == x
+}
+
+func (s *sparseSet) add(x ID) {
+ i := s.sparse[x]
+ if i < int32(len(s.dense)) && s.dense[i] == x {
+ return
+ }
+ s.dense = append(s.dense, x)
+ s.sparse[x] = int32(len(s.dense)) - 1
+}
+
+func (s *sparseSet) addAll(a []ID) {
+ for _, x := range a {
+ s.add(x)
+ }
+}
+
+func (s *sparseSet) addAllValues(a []*Value) {
+ for _, v := range a {
+ s.add(v.ID)
+ }
+}
+
+func (s *sparseSet) remove(x ID) {
+ i := s.sparse[x]
+ if i < int32(len(s.dense)) && s.dense[i] == x {
+ y := s.dense[len(s.dense)-1]
+ s.dense[i] = y
+ s.sparse[y] = i
+ s.dense = s.dense[:len(s.dense)-1]
+ }
+}
+
+// pop removes an arbitrary element from the set.
+// The set must be nonempty.
+func (s *sparseSet) pop() ID {
+ x := s.dense[len(s.dense)-1]
+ s.dense = s.dense[:len(s.dense)-1]
+ return x
+}
+
+func (s *sparseSet) clear() {
+ s.dense = s.dense[:0]
+}
+
+func (s *sparseSet) contents() []ID {
+ return s.dense
+}
diff --git a/src/cmd/compile/internal/ssa/sparsetree.go b/src/cmd/compile/internal/ssa/sparsetree.go
new file mode 100644
index 0000000..6f2bd04
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sparsetree.go
@@ -0,0 +1,242 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+ "strings"
+)
+
+type SparseTreeNode struct {
+ child *Block
+ sibling *Block
+ parent *Block
+
+ // Every block has 6 numbers associated with it:
+ // entry-1, entry, entry+1, exit-1, and exit, exit+1.
+ // entry and exit are conceptually the top of the block (phi functions)
+ // entry+1 and exit-1 are conceptually the bottom of the block (ordinary defs)
+ // entry-1 and exit+1 are conceptually "just before" the block (conditions flowing in)
+ //
+ // This simplifies life if we wish to query information about x
+ // when x is both an input to and output of a block.
+ entry, exit int32
+}
+
+func (s *SparseTreeNode) String() string {
+ return fmt.Sprintf("[%d,%d]", s.entry, s.exit)
+}
+
+func (s *SparseTreeNode) Entry() int32 {
+ return s.entry
+}
+
+func (s *SparseTreeNode) Exit() int32 {
+ return s.exit
+}
+
+const (
+ // When used to lookup up definitions in a sparse tree,
+ // these adjustments to a block's entry (+adjust) and
+ // exit (-adjust) numbers allow a distinction to be made
+ // between assignments (typically branch-dependent
+ // conditionals) occurring "before" the block (e.g., as inputs
+ // to the block and its phi functions), "within" the block,
+ // and "after" the block.
+ AdjustBefore = -1 // defined before phi
+ AdjustWithin = 0 // defined by phi
+ AdjustAfter = 1 // defined within block
+)
+
+// A SparseTree is a tree of Blocks.
+// It allows rapid ancestor queries,
+// such as whether one block dominates another.
+type SparseTree []SparseTreeNode
+
+// newSparseTree creates a SparseTree from a block-to-parent map (array indexed by Block.ID).
+func newSparseTree(f *Func, parentOf []*Block) SparseTree {
+ t := make(SparseTree, f.NumBlocks())
+ for _, b := range f.Blocks {
+ n := &t[b.ID]
+ if p := parentOf[b.ID]; p != nil {
+ n.parent = p
+ n.sibling = t[p.ID].child
+ t[p.ID].child = b
+ }
+ }
+ t.numberBlock(f.Entry, 1)
+ return t
+}
+
+// newSparseOrderedTree creates a SparseTree from a block-to-parent map (array indexed by Block.ID)
+// children will appear in the reverse of their order in reverseOrder
+// in particular, if reverseOrder is a dfs-reversePostOrder, then the root-to-children
+// walk of the tree will yield a pre-order.
+func newSparseOrderedTree(f *Func, parentOf, reverseOrder []*Block) SparseTree {
+ t := make(SparseTree, f.NumBlocks())
+ for _, b := range reverseOrder {
+ n := &t[b.ID]
+ if p := parentOf[b.ID]; p != nil {
+ n.parent = p
+ n.sibling = t[p.ID].child
+ t[p.ID].child = b
+ }
+ }
+ t.numberBlock(f.Entry, 1)
+ return t
+}
+
+// treestructure provides a string description of the dominator
+// tree and flow structure of block b and all blocks that it
+// dominates.
+func (t SparseTree) treestructure(b *Block) string {
+ return t.treestructure1(b, 0)
+}
+func (t SparseTree) treestructure1(b *Block, i int) string {
+ s := "\n" + strings.Repeat("\t", i) + b.String() + "->["
+ for i, e := range b.Succs {
+ if i > 0 {
+ s += ","
+ }
+ s += e.b.String()
+ }
+ s += "]"
+ if c0 := t[b.ID].child; c0 != nil {
+ s += "("
+ for c := c0; c != nil; c = t[c.ID].sibling {
+ if c != c0 {
+ s += " "
+ }
+ s += t.treestructure1(c, i+1)
+ }
+ s += ")"
+ }
+ return s
+}
+
+// numberBlock assigns entry and exit numbers for b and b's
+// children in an in-order walk from a gappy sequence, where n
+// is the first number not yet assigned or reserved. N should
+// be larger than zero. For each entry and exit number, the
+// values one larger and smaller are reserved to indicate
+// "strictly above" and "strictly below". numberBlock returns
+// the smallest number not yet assigned or reserved (i.e., the
+// exit number of the last block visited, plus two, because
+// last.exit+1 is a reserved value.)
+//
+// examples:
+//
+// single node tree Root, call with n=1
+// entry=2 Root exit=5; returns 7
+//
+// two node tree, Root->Child, call with n=1
+// entry=2 Root exit=11; returns 13
+// entry=5 Child exit=8
+//
+// three node tree, Root->(Left, Right), call with n=1
+// entry=2 Root exit=17; returns 19
+// entry=5 Left exit=8; entry=11 Right exit=14
+//
+// This is the in-order sequence of assigned and reserved numbers
+// for the last example:
+// root left left right right root
+// 1 2e 3 | 4 5e 6 | 7 8x 9 | 10 11e 12 | 13 14x 15 | 16 17x 18
+
+func (t SparseTree) numberBlock(b *Block, n int32) int32 {
+ // reserve n for entry-1, assign n+1 to entry
+ n++
+ t[b.ID].entry = n
+ // reserve n+1 for entry+1, n+2 is next free number
+ n += 2
+ for c := t[b.ID].child; c != nil; c = t[c.ID].sibling {
+ n = t.numberBlock(c, n) // preserves n = next free number
+ }
+ // reserve n for exit-1, assign n+1 to exit
+ n++
+ t[b.ID].exit = n
+ // reserve n+1 for exit+1, n+2 is next free number, returned.
+ return n + 2
+}
+
+// Sibling returns a sibling of x in the dominator tree (i.e.,
+// a node with the same immediate dominator) or nil if there
+// are no remaining siblings in the arbitrary but repeatable
+// order chosen. Because the Child-Sibling order is used
+// to assign entry and exit numbers in the treewalk, those
+// numbers are also consistent with this order (i.e.,
+// Sibling(x) has entry number larger than x's exit number).
+func (t SparseTree) Sibling(x *Block) *Block {
+ return t[x.ID].sibling
+}
+
+// Child returns a child of x in the dominator tree, or
+// nil if there are none. The choice of first child is
+// arbitrary but repeatable.
+func (t SparseTree) Child(x *Block) *Block {
+ return t[x.ID].child
+}
+
+// Parent returns the parent of x in the dominator tree, or
+// nil if x is the function's entry.
+func (t SparseTree) Parent(x *Block) *Block {
+ return t[x.ID].parent
+}
+
+// IsAncestorEq reports whether x is an ancestor of or equal to y.
+func (t SparseTree) IsAncestorEq(x, y *Block) bool {
+ if x == y {
+ return true
+ }
+ xx := &t[x.ID]
+ yy := &t[y.ID]
+ return xx.entry <= yy.entry && yy.exit <= xx.exit
+}
+
+// isAncestor reports whether x is a strict ancestor of y.
+func (t SparseTree) isAncestor(x, y *Block) bool {
+ if x == y {
+ return false
+ }
+ xx := &t[x.ID]
+ yy := &t[y.ID]
+ return xx.entry < yy.entry && yy.exit < xx.exit
+}
+
+// domorder returns a value for dominator-oriented sorting.
+// Block domination does not provide a total ordering,
+// but domorder two has useful properties.
+// 1. If domorder(x) > domorder(y) then x does not dominate y.
+// 2. If domorder(x) < domorder(y) and domorder(y) < domorder(z) and x does not dominate y,
+// then x does not dominate z.
+//
+// Property (1) means that blocks sorted by domorder always have a maximal dominant block first.
+// Property (2) allows searches for dominated blocks to exit early.
+func (t SparseTree) domorder(x *Block) int32 {
+ // Here is an argument that entry(x) provides the properties documented above.
+ //
+ // Entry and exit values are assigned in a depth-first dominator tree walk.
+ // For all blocks x and y, one of the following holds:
+ //
+ // (x-dom-y) x dominates y => entry(x) < entry(y) < exit(y) < exit(x)
+ // (y-dom-x) y dominates x => entry(y) < entry(x) < exit(x) < exit(y)
+ // (x-then-y) neither x nor y dominates the other and x walked before y => entry(x) < exit(x) < entry(y) < exit(y)
+ // (y-then-x) neither x nor y dominates the other and y walked before y => entry(y) < exit(y) < entry(x) < exit(x)
+ //
+ // entry(x) > entry(y) eliminates case x-dom-y. This provides property (1) above.
+ //
+ // For property (2), assume entry(x) < entry(y) and entry(y) < entry(z) and x does not dominate y.
+ // entry(x) < entry(y) allows cases x-dom-y and x-then-y.
+ // But by supposition, x does not dominate y. So we have x-then-y.
+ //
+ // For contradiction, assume x dominates z.
+ // Then entry(x) < entry(z) < exit(z) < exit(x).
+ // But we know x-then-y, so entry(x) < exit(x) < entry(y) < exit(y).
+ // Combining those, entry(x) < entry(z) < exit(z) < exit(x) < entry(y) < exit(y).
+ // By supposition, entry(y) < entry(z), which allows cases y-dom-z and y-then-z.
+ // y-dom-z requires entry(y) < entry(z), but we have entry(z) < entry(y).
+ // y-then-z requires exit(y) < entry(z), but we have entry(z) < exit(y).
+ // We have a contradiction, so x does not dominate z, as required.
+ return t[x.ID].entry
+}
diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go
new file mode 100644
index 0000000..c9ca778
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/stackalloc.go
@@ -0,0 +1,454 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO: live at start of block instead?
+
+package ssa
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+)
+
+type stackAllocState struct {
+ f *Func
+
+ // live is the output of stackalloc.
+ // live[b.id] = live values at the end of block b.
+ live [][]ID
+
+ // The following slices are reused across multiple users
+ // of stackAllocState.
+ values []stackValState
+ interfere [][]ID // interfere[v.id] = values that interfere with v.
+ names []LocalSlot
+
+ nArgSlot, // Number of Values sourced to arg slot
+ nNotNeed, // Number of Values not needing a stack slot
+ nNamedSlot, // Number of Values using a named stack slot
+ nReuse, // Number of values reusing a stack slot
+ nAuto, // Number of autos allocated for stack slots.
+ nSelfInterfere int32 // Number of self-interferences
+}
+
+func newStackAllocState(f *Func) *stackAllocState {
+ s := f.Cache.stackAllocState
+ if s == nil {
+ return new(stackAllocState)
+ }
+ if s.f != nil {
+ f.fe.Fatalf(src.NoXPos, "newStackAllocState called without previous free")
+ }
+ return s
+}
+
+func putStackAllocState(s *stackAllocState) {
+ for i := range s.values {
+ s.values[i] = stackValState{}
+ }
+ for i := range s.interfere {
+ s.interfere[i] = nil
+ }
+ for i := range s.names {
+ s.names[i] = LocalSlot{}
+ }
+ s.f.Cache.stackAllocState = s
+ s.f = nil
+ s.live = nil
+ s.nArgSlot, s.nNotNeed, s.nNamedSlot, s.nReuse, s.nAuto, s.nSelfInterfere = 0, 0, 0, 0, 0, 0
+}
+
+type stackValState struct {
+ typ *types.Type
+ spill *Value
+ needSlot bool
+ isArg bool
+}
+
+// stackalloc allocates storage in the stack frame for
+// all Values that did not get a register.
+// Returns a map from block ID to the stack values live at the end of that block.
+func stackalloc(f *Func, spillLive [][]ID) [][]ID {
+ if f.pass.debug > stackDebug {
+ fmt.Println("before stackalloc")
+ fmt.Println(f.String())
+ }
+ s := newStackAllocState(f)
+ s.init(f, spillLive)
+ defer putStackAllocState(s)
+
+ s.stackalloc()
+ if f.pass.stats > 0 {
+ f.LogStat("stack_alloc_stats",
+ s.nArgSlot, "arg_slots", s.nNotNeed, "slot_not_needed",
+ s.nNamedSlot, "named_slots", s.nAuto, "auto_slots",
+ s.nReuse, "reused_slots", s.nSelfInterfere, "self_interfering")
+ }
+
+ return s.live
+}
+
+func (s *stackAllocState) init(f *Func, spillLive [][]ID) {
+ s.f = f
+
+ // Initialize value information.
+ if n := f.NumValues(); cap(s.values) >= n {
+ s.values = s.values[:n]
+ } else {
+ s.values = make([]stackValState, n)
+ }
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ s.values[v.ID].typ = v.Type
+ s.values[v.ID].needSlot = !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && f.getHome(v.ID) == nil && !v.rematerializeable() && !v.OnWasmStack
+ s.values[v.ID].isArg = hasAnyArgOp(v)
+ if f.pass.debug > stackDebug && s.values[v.ID].needSlot {
+ fmt.Printf("%s needs a stack slot\n", v)
+ }
+ if v.Op == OpStoreReg {
+ s.values[v.Args[0].ID].spill = v
+ }
+ }
+ }
+
+ // Compute liveness info for values needing a slot.
+ s.computeLive(spillLive)
+
+ // Build interference graph among values needing a slot.
+ s.buildInterferenceGraph()
+}
+
+func (s *stackAllocState) stackalloc() {
+ f := s.f
+
+ // Build map from values to their names, if any.
+ // A value may be associated with more than one name (e.g. after
+ // the assignment i=j). This step picks one name per value arbitrarily.
+ if n := f.NumValues(); cap(s.names) >= n {
+ s.names = s.names[:n]
+ } else {
+ s.names = make([]LocalSlot, n)
+ }
+ names := s.names
+ empty := LocalSlot{}
+ for _, name := range f.Names {
+ // Note: not "range f.NamedValues" above, because
+ // that would be nondeterministic.
+ for _, v := range f.NamedValues[*name] {
+ if v.Op == OpArgIntReg || v.Op == OpArgFloatReg {
+ aux := v.Aux.(*AuxNameOffset)
+ // Never let an arg be bound to a differently named thing.
+ if name.N != aux.Name || name.Off != aux.Offset {
+ if f.pass.debug > stackDebug {
+ fmt.Printf("stackalloc register arg %s skipping name %s\n", v, name)
+ }
+ continue
+ }
+ } else if name.N.Class == ir.PPARAM && v.Op != OpArg {
+ // PPARAM's only bind to OpArg
+ if f.pass.debug > stackDebug {
+ fmt.Printf("stackalloc PPARAM name %s skipping non-Arg %s\n", name, v)
+ }
+ continue
+ }
+
+ if names[v.ID] == empty {
+ if f.pass.debug > stackDebug {
+ fmt.Printf("stackalloc value %s to name %s\n", v, *name)
+ }
+ names[v.ID] = *name
+ }
+ }
+ }
+
+ // Allocate args to their assigned locations.
+ for _, v := range f.Entry.Values {
+ if !hasAnyArgOp(v) {
+ continue
+ }
+ if v.Aux == nil {
+ f.Fatalf("%s has nil Aux\n", v.LongString())
+ }
+ if v.Op == OpArg {
+ loc := LocalSlot{N: v.Aux.(*ir.Name), Type: v.Type, Off: v.AuxInt}
+ if f.pass.debug > stackDebug {
+ fmt.Printf("stackalloc OpArg %s to %s\n", v, loc)
+ }
+ f.setHome(v, loc)
+ continue
+ }
+ // You might think this below would be the right idea, but you would be wrong.
+ // It almost works; as of 105a6e9518 - 2021-04-23,
+ // GOSSAHASH=11011011001011111 == cmd/compile/internal/noder.(*noder).embedded
+ // is compiled incorrectly. I believe the cause is one of those SSA-to-registers
+ // puzzles that the register allocator untangles; in the event that a register
+ // parameter does not end up bound to a name, "fixing" it is a bad idea.
+ //
+ //if f.DebugTest {
+ // if v.Op == OpArgIntReg || v.Op == OpArgFloatReg {
+ // aux := v.Aux.(*AuxNameOffset)
+ // loc := LocalSlot{N: aux.Name, Type: v.Type, Off: aux.Offset}
+ // if f.pass.debug > stackDebug {
+ // fmt.Printf("stackalloc Op%s %s to %s\n", v.Op, v, loc)
+ // }
+ // names[v.ID] = loc
+ // continue
+ // }
+ //}
+
+ }
+
+ // For each type, we keep track of all the stack slots we
+ // have allocated for that type.
+ // TODO: share slots among equivalent types. We would need to
+ // only share among types with the same GC signature. See the
+ // type.Equal calls below for where this matters.
+ locations := map[*types.Type][]LocalSlot{}
+
+ // Each time we assign a stack slot to a value v, we remember
+ // the slot we used via an index into locations[v.Type].
+ slots := f.Cache.allocIntSlice(f.NumValues())
+ defer f.Cache.freeIntSlice(slots)
+ for i := range slots {
+ slots[i] = -1
+ }
+
+ // Pick a stack slot for each value needing one.
+ used := f.Cache.allocBoolSlice(f.NumValues())
+ defer f.Cache.freeBoolSlice(used)
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if !s.values[v.ID].needSlot {
+ s.nNotNeed++
+ continue
+ }
+ if hasAnyArgOp(v) {
+ s.nArgSlot++
+ continue // already picked
+ }
+
+ // If this is a named value, try to use the name as
+ // the spill location.
+ var name LocalSlot
+ if v.Op == OpStoreReg {
+ name = names[v.Args[0].ID]
+ } else {
+ name = names[v.ID]
+ }
+ if name.N != nil && v.Type.Compare(name.Type) == types.CMPeq {
+ for _, id := range s.interfere[v.ID] {
+ h := f.getHome(id)
+ if h != nil && h.(LocalSlot).N == name.N && h.(LocalSlot).Off == name.Off {
+ // A variable can interfere with itself.
+ // It is rare, but it can happen.
+ s.nSelfInterfere++
+ goto noname
+ }
+ }
+ if f.pass.debug > stackDebug {
+ fmt.Printf("stackalloc %s to %s\n", v, name)
+ }
+ s.nNamedSlot++
+ f.setHome(v, name)
+ continue
+ }
+
+ noname:
+ // Set of stack slots we could reuse.
+ locs := locations[v.Type]
+ // Mark all positions in locs used by interfering values.
+ for i := 0; i < len(locs); i++ {
+ used[i] = false
+ }
+ for _, xid := range s.interfere[v.ID] {
+ slot := slots[xid]
+ if slot >= 0 {
+ used[slot] = true
+ }
+ }
+ // Find an unused stack slot.
+ var i int
+ for i = 0; i < len(locs); i++ {
+ if !used[i] {
+ s.nReuse++
+ break
+ }
+ }
+ // If there is no unused stack slot, allocate a new one.
+ if i == len(locs) {
+ s.nAuto++
+ locs = append(locs, LocalSlot{N: f.NewLocal(v.Pos, v.Type), Type: v.Type, Off: 0})
+ locations[v.Type] = locs
+ }
+ // Use the stack variable at that index for v.
+ loc := locs[i]
+ if f.pass.debug > stackDebug {
+ fmt.Printf("stackalloc %s to %s\n", v, loc)
+ }
+ f.setHome(v, loc)
+ slots[v.ID] = i
+ }
+ }
+}
+
+// computeLive computes a map from block ID to a list of
+// stack-slot-needing value IDs live at the end of that block.
+// TODO: this could be quadratic if lots of variables are live across lots of
+// basic blocks. Figure out a way to make this function (or, more precisely, the user
+// of this function) require only linear size & time.
+func (s *stackAllocState) computeLive(spillLive [][]ID) {
+ s.live = make([][]ID, s.f.NumBlocks())
+ var phis []*Value
+ live := s.f.newSparseSet(s.f.NumValues())
+ defer s.f.retSparseSet(live)
+ t := s.f.newSparseSet(s.f.NumValues())
+ defer s.f.retSparseSet(t)
+
+ // Instead of iterating over f.Blocks, iterate over their postordering.
+ // Liveness information flows backward, so starting at the end
+ // increases the probability that we will stabilize quickly.
+ po := s.f.postorder()
+ for {
+ changed := false
+ for _, b := range po {
+ // Start with known live values at the end of the block
+ live.clear()
+ live.addAll(s.live[b.ID])
+
+ // Propagate backwards to the start of the block
+ phis = phis[:0]
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ live.remove(v.ID)
+ if v.Op == OpPhi {
+ // Save phi for later.
+ // Note: its args might need a stack slot even though
+ // the phi itself doesn't. So don't use needSlot.
+ if !v.Type.IsMemory() && !v.Type.IsVoid() {
+ phis = append(phis, v)
+ }
+ continue
+ }
+ for _, a := range v.Args {
+ if s.values[a.ID].needSlot {
+ live.add(a.ID)
+ }
+ }
+ }
+
+ // for each predecessor of b, expand its list of live-at-end values
+ // invariant: s contains the values live at the start of b (excluding phi inputs)
+ for i, e := range b.Preds {
+ p := e.b
+ t.clear()
+ t.addAll(s.live[p.ID])
+ t.addAll(live.contents())
+ t.addAll(spillLive[p.ID])
+ for _, v := range phis {
+ a := v.Args[i]
+ if s.values[a.ID].needSlot {
+ t.add(a.ID)
+ }
+ if spill := s.values[a.ID].spill; spill != nil {
+ //TODO: remove? Subsumed by SpillUse?
+ t.add(spill.ID)
+ }
+ }
+ if t.size() == len(s.live[p.ID]) {
+ continue
+ }
+ // grow p's live set
+ s.live[p.ID] = append(s.live[p.ID][:0], t.contents()...)
+ changed = true
+ }
+ }
+
+ if !changed {
+ break
+ }
+ }
+ if s.f.pass.debug > stackDebug {
+ for _, b := range s.f.Blocks {
+ fmt.Printf("stacklive %s %v\n", b, s.live[b.ID])
+ }
+ }
+}
+
+func (f *Func) getHome(vid ID) Location {
+ if int(vid) >= len(f.RegAlloc) {
+ return nil
+ }
+ return f.RegAlloc[vid]
+}
+
+func (f *Func) setHome(v *Value, loc Location) {
+ for v.ID >= ID(len(f.RegAlloc)) {
+ f.RegAlloc = append(f.RegAlloc, nil)
+ }
+ f.RegAlloc[v.ID] = loc
+}
+
+func (s *stackAllocState) buildInterferenceGraph() {
+ f := s.f
+ if n := f.NumValues(); cap(s.interfere) >= n {
+ s.interfere = s.interfere[:n]
+ } else {
+ s.interfere = make([][]ID, n)
+ }
+ live := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(live)
+ for _, b := range f.Blocks {
+ // Propagate liveness backwards to the start of the block.
+ // Two values interfere if one is defined while the other is live.
+ live.clear()
+ live.addAll(s.live[b.ID])
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if s.values[v.ID].needSlot {
+ live.remove(v.ID)
+ for _, id := range live.contents() {
+ // Note: args can have different types and still interfere
+ // (with each other or with other values). See issue 23522.
+ if s.values[v.ID].typ.Compare(s.values[id].typ) == types.CMPeq || hasAnyArgOp(v) || s.values[id].isArg {
+ s.interfere[v.ID] = append(s.interfere[v.ID], id)
+ s.interfere[id] = append(s.interfere[id], v.ID)
+ }
+ }
+ }
+ for _, a := range v.Args {
+ if s.values[a.ID].needSlot {
+ live.add(a.ID)
+ }
+ }
+ if hasAnyArgOp(v) && s.values[v.ID].needSlot {
+ // OpArg is an input argument which is pre-spilled.
+ // We add back v.ID here because we want this value
+ // to appear live even before this point. Being live
+ // all the way to the start of the entry block prevents other
+ // values from being allocated to the same slot and clobbering
+ // the input value before we have a chance to load it.
+
+ // TODO(register args) this is apparently not wrong for register args -- is it necessary?
+ live.add(v.ID)
+ }
+ }
+ }
+ if f.pass.debug > stackDebug {
+ for vid, i := range s.interfere {
+ if len(i) > 0 {
+ fmt.Printf("v%d interferes with", vid)
+ for _, x := range i {
+ fmt.Printf(" v%d", x)
+ }
+ fmt.Println()
+ }
+ }
+ }
+}
+
+func hasAnyArgOp(v *Value) bool {
+ return v.Op == OpArg || v.Op == OpArgIntReg || v.Op == OpArgFloatReg
+}
diff --git a/src/cmd/compile/internal/ssa/stmtlines_test.go b/src/cmd/compile/internal/ssa/stmtlines_test.go
new file mode 100644
index 0000000..79bcab0
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/stmtlines_test.go
@@ -0,0 +1,158 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ cmddwarf "cmd/internal/dwarf"
+ "cmd/internal/quoted"
+ "debug/dwarf"
+ "debug/elf"
+ "debug/macho"
+ "debug/pe"
+ "fmt"
+ "internal/platform"
+ "internal/testenv"
+ "internal/xcoff"
+ "io"
+ "os"
+ "runtime"
+ "sort"
+ "testing"
+)
+
+func open(path string) (*dwarf.Data, error) {
+ if fh, err := elf.Open(path); err == nil {
+ return fh.DWARF()
+ }
+
+ if fh, err := pe.Open(path); err == nil {
+ return fh.DWARF()
+ }
+
+ if fh, err := macho.Open(path); err == nil {
+ return fh.DWARF()
+ }
+
+ if fh, err := xcoff.Open(path); err == nil {
+ return fh.DWARF()
+ }
+
+ return nil, fmt.Errorf("unrecognized executable format")
+}
+
+func must(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
+
+type Line struct {
+ File string
+ Line int
+}
+
+func TestStmtLines(t *testing.T) {
+ if !platform.ExecutableHasDWARF(runtime.GOOS, runtime.GOARCH) {
+ t.Skipf("skipping on %s/%s: no DWARF symbol table in executables", runtime.GOOS, runtime.GOARCH)
+ }
+
+ if runtime.GOOS == "aix" {
+ extld := os.Getenv("CC")
+ if extld == "" {
+ extld = "gcc"
+ }
+ extldArgs, err := quoted.Split(extld)
+ if err != nil {
+ t.Fatal(err)
+ }
+ enabled, err := cmddwarf.IsDWARFEnabledOnAIXLd(extldArgs)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !enabled {
+ t.Skip("skipping on aix: no DWARF with ld version < 7.2.2 ")
+ }
+ }
+
+ // Build cmd/go forcing DWARF enabled, as a large test case.
+ dir := t.TempDir()
+ out, err := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags=-w=0", "-o", dir+"/test.exe", "cmd/go").CombinedOutput()
+ if err != nil {
+ t.Fatalf("go build: %v\n%s", err, out)
+ }
+
+ lines := map[Line]bool{}
+ dw, err := open(dir + "/test.exe")
+ must(err)
+ rdr := dw.Reader()
+ rdr.Seek(0)
+ for {
+ e, err := rdr.Next()
+ must(err)
+ if e == nil {
+ break
+ }
+ if e.Tag != dwarf.TagCompileUnit {
+ continue
+ }
+ pkgname, _ := e.Val(dwarf.AttrName).(string)
+ if pkgname == "runtime" {
+ continue
+ }
+ if pkgname == "crypto/internal/nistec/fiat" {
+ continue // golang.org/issue/49372
+ }
+ if e.Val(dwarf.AttrStmtList) == nil {
+ continue
+ }
+ lrdr, err := dw.LineReader(e)
+ must(err)
+
+ var le dwarf.LineEntry
+
+ for {
+ err := lrdr.Next(&le)
+ if err == io.EOF {
+ break
+ }
+ must(err)
+ fl := Line{le.File.Name, le.Line}
+ lines[fl] = lines[fl] || le.IsStmt
+ }
+ }
+
+ nonStmtLines := []Line{}
+ for line, isstmt := range lines {
+ if !isstmt {
+ nonStmtLines = append(nonStmtLines, line)
+ }
+ }
+
+ var m int
+ if runtime.GOARCH == "amd64" {
+ m = 1 // > 99% obtained on amd64, no backsliding
+ } else if runtime.GOARCH == "riscv64" {
+ m = 3 // XXX temporary update threshold to 97% for regabi
+ } else {
+ m = 2 // expect 98% elsewhere.
+ }
+
+ if len(nonStmtLines)*100 > m*len(lines) {
+ t.Errorf("Saw too many (%s, > %d%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", runtime.GOARCH, m, len(lines), len(nonStmtLines))
+ }
+ t.Logf("Saw %d out of %d lines without statement marks", len(nonStmtLines), len(lines))
+ if testing.Verbose() {
+ sort.Slice(nonStmtLines, func(i, j int) bool {
+ if nonStmtLines[i].File != nonStmtLines[j].File {
+ return nonStmtLines[i].File < nonStmtLines[j].File
+ }
+ return nonStmtLines[i].Line < nonStmtLines[j].Line
+ })
+ for _, l := range nonStmtLines {
+ t.Logf("%s:%d has no DWARF is_stmt mark\n", l.File, l.Line)
+ }
+ }
+ t.Logf("total=%d, nostmt=%d\n", len(lines), len(nonStmtLines))
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/b53456.go b/src/cmd/compile/internal/ssa/testdata/b53456.go
new file mode 100644
index 0000000..8104d3e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/b53456.go
@@ -0,0 +1,19 @@
+package main
+
+type T struct {
+ m map[int]int
+}
+
+func main() {
+ t := T{
+ m: make(map[int]int),
+ }
+ t.Inc(5)
+ t.Inc(7)
+}
+
+func (s *T) Inc(key int) {
+ v := s.m[key] // break, line 16
+ v++
+ s.m[key] = v // also here
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/convertline.go b/src/cmd/compile/internal/ssa/testdata/convertline.go
new file mode 100644
index 0000000..08f3ae8
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/convertline.go
@@ -0,0 +1,16 @@
+package main
+
+import "fmt"
+
+func F[T any](n T) {
+ fmt.Printf("called\n")
+}
+
+func G[T any](n T) {
+ F(n)
+ fmt.Printf("after\n")
+}
+
+func main() {
+ G(3)
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/fma.go b/src/cmd/compile/internal/ssa/testdata/fma.go
new file mode 100644
index 0000000..13a7ff1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/fma.go
@@ -0,0 +1,37 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+//go:noinline
+func f(x float64) float64 {
+ return x
+}
+
+func inlineFma(x, y, z float64) float64 {
+ return x + y*z
+}
+
+func main() {
+ w, x, y := 1.0, 1.0, 1.0
+ x = f(x + x/(1<<52))
+ w = f(w / (1 << 27))
+ y = f(y + y/(1<<52))
+ w0 := f(2 * w * (1 - w))
+ w1 := f(w * (1 + w))
+ x = x + w0*w1
+ x = inlineFma(x, w0, w1)
+ y = y + f(w0*w1)
+ y = y + f(w0*w1)
+ fmt.Println(x, y, x-y)
+
+ if x != y {
+ os.Exit(1)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.dlv-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/hist.dlv-dbg.nexts
new file mode 100644
index 0000000..a0404e4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/hist.dlv-dbg.nexts
@@ -0,0 +1,99 @@
+ ./testdata/hist.go
+55: func test() {
+57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}}
+58: tinycall() // this forces l etc to stack
+59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O)
+60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O)
+61: sink = dx + dy //gdb-opt=(dx,dy)
+63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main'
+64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main'
+65: if len(os.Args) > 1 {
+73: scanner := bufio.NewScanner(reader)
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+84: t := 0
+85: n := 0
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+99: }
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts b/src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts
new file mode 100644
index 0000000..2be83ce
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts
@@ -0,0 +1,94 @@
+ ./testdata/hist.go
+55: func test() {
+57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}}
+58: tinycall() // this forces l etc to stack
+59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O)
+60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O)
+61: sink = dx + dy //gdb-opt=(dx,dy)
+63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main'
+64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main'
+65: if len(os.Args) > 1 {
+73: scanner := bufio.NewScanner(reader)
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+86: for i, a := range hist {
+99: }
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.gdb-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/hist.gdb-dbg.nexts
new file mode 100644
index 0000000..72df60c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/hist.gdb-dbg.nexts
@@ -0,0 +1,123 @@
+ src/cmd/compile/internal/ssa/testdata/hist.go
+55: func test() {
+57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}}
+58: tinycall() // this forces l etc to stack
+59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O)
+l.begin.x = 1
+l.end.y = 4
+60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O)
+61: sink = dx + dy //gdb-opt=(dx,dy)
+63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main'
+64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main'
+hist = {array = <A>, len = 7, cap = 7}
+65: if len(os.Args) > 1 {
+73: scanner := bufio.NewScanner(reader)
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 4
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 4
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 5
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+84: t := 0
+85: n := 0
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+n = 3
+i = 1
+t = 3
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+n = 6
+i = 2
+t = 9
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+n = 8
+i = 4
+t = 17
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+n = 9
+i = 5
+t = 22
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+99: }
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts b/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts
new file mode 100644
index 0000000..d3a34ac
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts
@@ -0,0 +1,143 @@
+ src/cmd/compile/internal/ssa/testdata/hist.go
+55: func test() {
+57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}}
+58: tinycall() // this forces l etc to stack
+59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O)
+l = {begin = {x = 1, y = 2}, end = {x = 3, y = 4}}
+dx = <Optimized out, as expected>
+dy = <Optimized out, as expected>
+60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O)
+dx = 2
+dy = <Optimized out, as expected>
+61: sink = dx + dy //gdb-opt=(dx,dy)
+dx = 2
+dy = 2
+63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main'
+dx = 2
+dy = <Optimized out, as expected>
+64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main'
+65: if len(os.Args) > 1 {
+73: scanner := bufio.NewScanner(reader)
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+scanner = (bufio.Scanner *) <A>
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 4
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 4
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 5
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 0
+n = 0
+t = 0
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 3
+n = 0
+t = 0
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 3
+n = 3
+t = 3
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 0
+n = 6
+t = 9
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 2
+n = 6
+t = 9
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 1
+n = 8
+t = 17
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 0
+n = 9
+t = 22
+86: for i, a := range hist {
+99: }
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.go b/src/cmd/compile/internal/ssa/testdata/hist.go
new file mode 100644
index 0000000..f8fa6e6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/hist.go
@@ -0,0 +1,106 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is the input program for an end-to-end test of the DWARF produced
+// by the compiler. It is compiled with various flags, then the resulting
+// binary is "debugged" under the control of a harness. Because the compile+debug
+// step is time-consuming, the tests for different bugs are all accumulated here
+// so that their cost is only the time to "n" through the additional code.
+
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+type point struct {
+ x, y int
+}
+
+type line struct {
+ begin, end point
+}
+
+var zero int
+var sink int
+
+//go:noinline
+func tinycall() {
+}
+
+func ensure(n int, sl []int) []int {
+ for len(sl) <= n {
+ sl = append(sl, 0)
+ }
+ return sl
+}
+
+var cannedInput string = `1
+1
+1
+2
+2
+2
+4
+4
+5
+`
+
+func test() {
+ // For #19868
+ l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}}
+ tinycall() // this forces l etc to stack
+ dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O)
+ dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O)
+ sink = dx + dy //gdb-opt=(dx,dy)
+ // For #21098
+ hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main'
+ var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main'
+ if len(os.Args) > 1 {
+ var err error
+ reader, err = os.Open(os.Args[1])
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "There was an error opening %s: %v\n", os.Args[1], err)
+ return
+ }
+ }
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() { //gdb-opt=(scanner/A)
+ s := scanner.Text()
+ i, err := strconv.ParseInt(s, 10, 64)
+ if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+ fmt.Fprintf(os.Stderr, "There was an error: %v\n", err)
+ return
+ }
+ hist = ensure(int(i), hist)
+ hist[int(i)]++
+ }
+ t := 0
+ n := 0
+ for i, a := range hist {
+ if a == 0 { //gdb-opt=(a,n,t)
+ continue
+ }
+ t += i * a
+ n += a
+ fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+ }
+}
+
+func main() {
+ growstack() // Use stack early to prevent growth during test, which confuses gdb
+ test()
+}
+
+var snk string
+
+//go:noinline
+func growstack() {
+ snk = fmt.Sprintf("%#v,%#v,%#v", 1, true, "cat")
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/i22558.dlv-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/i22558.dlv-dbg.nexts
new file mode 100644
index 0000000..a00934b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22558.dlv-dbg.nexts
@@ -0,0 +1,11 @@
+ ./testdata/i22558.go
+19: func test(t *thing, u *thing) {
+20: if t.next != nil {
+23: fmt.Fprintf(os.Stderr, "%s\n", t.name)
+24: u.self = u
+25: t.self = t
+26: t.next = u
+27: for _, p := range t.stuff {
+28: if isFoo(t, p) {
+29: return
+44: }
diff --git a/src/cmd/compile/internal/ssa/testdata/i22558.gdb-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/i22558.gdb-dbg.nexts
new file mode 100644
index 0000000..70dfa07
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22558.gdb-dbg.nexts
@@ -0,0 +1,11 @@
+ src/cmd/compile/internal/ssa/testdata/i22558.go
+19: func test(t *thing, u *thing) {
+20: if t.next != nil {
+23: fmt.Fprintf(os.Stderr, "%s\n", t.name)
+24: u.self = u
+25: t.self = t
+26: t.next = u
+27: for _, p := range t.stuff {
+28: if isFoo(t, p) {
+29: return
+44: }
diff --git a/src/cmd/compile/internal/ssa/testdata/i22558.go b/src/cmd/compile/internal/ssa/testdata/i22558.go
new file mode 100644
index 0000000..8aea76c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22558.go
@@ -0,0 +1,51 @@
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+type big struct {
+ pile [768]int8
+}
+
+type thing struct {
+ name string
+ next *thing
+ self *thing
+ stuff []big
+}
+
+func test(t *thing, u *thing) {
+ if t.next != nil {
+ return
+ }
+ fmt.Fprintf(os.Stderr, "%s\n", t.name)
+ u.self = u
+ t.self = t
+ t.next = u
+ for _, p := range t.stuff {
+ if isFoo(t, p) {
+ return
+ }
+ }
+}
+
+//go:noinline
+func isFoo(t *thing, b big) bool {
+ return true
+}
+
+func main() {
+ growstack() // Use stack early to prevent growth during test, which confuses gdb
+ t := &thing{name: "t", self: nil, next: nil, stuff: make([]big, 1)}
+ u := thing{name: "u", self: t, next: t, stuff: make([]big, 1)}
+ test(t, &u)
+}
+
+var snk string
+
+//go:noinline
+func growstack() {
+ snk = fmt.Sprintf("%#v,%#v,%#v", 1, true, "cat")
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/i22600.dlv-dbg-race.nexts b/src/cmd/compile/internal/ssa/testdata/i22600.dlv-dbg-race.nexts
new file mode 100644
index 0000000..18a5ff9
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22600.dlv-dbg-race.nexts
@@ -0,0 +1,7 @@
+ ./testdata/i22600.go
+8: func test() {
+9: pwd, err := os.Getwd()
+10: if err != nil {
+14: fmt.Println(pwd)
+15: }
+20: }
diff --git a/src/cmd/compile/internal/ssa/testdata/i22600.gdb-dbg-race.nexts b/src/cmd/compile/internal/ssa/testdata/i22600.gdb-dbg-race.nexts
new file mode 100644
index 0000000..46285e2
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22600.gdb-dbg-race.nexts
@@ -0,0 +1,7 @@
+ src/cmd/compile/internal/ssa/testdata/i22600.go
+8: func test() {
+9: pwd, err := os.Getwd()
+10: if err != nil {
+14: fmt.Println(pwd)
+15: }
+20: }
diff --git a/src/cmd/compile/internal/ssa/testdata/i22600.go b/src/cmd/compile/internal/ssa/testdata/i22600.go
new file mode 100644
index 0000000..27f0d3d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22600.go
@@ -0,0 +1,27 @@
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+func test() {
+ pwd, err := os.Getwd()
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ fmt.Println(pwd)
+}
+
+func main() {
+ growstack() // Use stack early to prevent growth during test, which confuses gdb
+ test()
+}
+
+var snk string
+
+//go:noinline
+func growstack() {
+ snk = fmt.Sprintf("%#v,%#v,%#v", 1, true, "cat")
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts b/src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts
new file mode 100644
index 0000000..0b9f06f
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts
@@ -0,0 +1,12 @@
+ ./testdata/infloop.go
+6: func test() {
+8: go func() {}()
+10: for {
+1: package main
+10: for {
+1: package main
+10: for {
+1: package main
+10: for {
+1: package main
+10: for {
diff --git a/src/cmd/compile/internal/ssa/testdata/infloop.gdb-opt.nexts b/src/cmd/compile/internal/ssa/testdata/infloop.gdb-opt.nexts
new file mode 100644
index 0000000..d465ad1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/infloop.gdb-opt.nexts
@@ -0,0 +1,4 @@
+ src/cmd/compile/internal/ssa/testdata/infloop.go
+6: func test() {
+8: go func() {}()
+10: for {
diff --git a/src/cmd/compile/internal/ssa/testdata/infloop.go b/src/cmd/compile/internal/ssa/testdata/infloop.go
new file mode 100644
index 0000000..cdb374f
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/infloop.go
@@ -0,0 +1,16 @@
+package main
+
+var sink int
+
+//go:noinline
+func test() {
+ // This is for #30167, incorrect line numbers in an infinite loop
+ go func() {}()
+
+ for {
+ }
+}
+
+func main() {
+ test()
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/inline-dump.go b/src/cmd/compile/internal/ssa/testdata/inline-dump.go
new file mode 100644
index 0000000..97893b6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/inline-dump.go
@@ -0,0 +1,17 @@
+package foo
+
+func f(m, n int) int {
+ a := g(n)
+ b := g(m)
+ return a + b
+}
+
+func g(x int) int {
+ y := h(x + 1)
+ z := h(x - 1)
+ return y + z
+}
+
+func h(x int) int {
+ return x * x
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/pushback.go b/src/cmd/compile/internal/ssa/testdata/pushback.go
new file mode 100644
index 0000000..754e6cb
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/pushback.go
@@ -0,0 +1,30 @@
+package main
+
+type Node struct {
+ Circular bool
+}
+
+type ExtNode[V any] struct {
+ v V
+ Node
+}
+
+type List[V any] struct {
+ root *ExtNode[V]
+ len int
+}
+
+func (list *List[V]) PushBack(arg V) {
+ if list.len == 0 {
+ list.root = &ExtNode[V]{v: arg}
+ list.root.Circular = true
+ list.len++
+ return
+ }
+ list.len++
+}
+
+func main() {
+ var v List[int]
+ v.PushBack(1)
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/sayhi.go b/src/cmd/compile/internal/ssa/testdata/sayhi.go
new file mode 100644
index 0000000..680e1eb
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/sayhi.go
@@ -0,0 +1,12 @@
+package foo
+
+import (
+ "fmt"
+ "sync"
+)
+
+func sayhi(n int, wg *sync.WaitGroup) {
+ fmt.Println("hi", n)
+ fmt.Println("hi", n)
+ wg.Done()
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.dlv-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/scopes.dlv-dbg.nexts
new file mode 100644
index 0000000..f182ff4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/scopes.dlv-dbg.nexts
@@ -0,0 +1,56 @@
+ ./testdata/scopes.go
+22: func test() {
+23: x := id(0)
+24: y := id(0)
+25: fmt.Println(x)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y)
+31: fmt.Println(x, y)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+34: a := y
+35: f1(a)
+37: b := 0
+38: f2(b)
+39: if gretbool() {
+40: c := 0
+41: f3(c)
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+34: a := y
+35: f1(a)
+37: b := 0
+38: f2(b)
+39: if gretbool() {
+43: c := 1.1
+44: f4(int(c))
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+53: j = id(1)
+54: f = id(2)
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+59: fmt.Println("foo")
+60: break
+64: helloworld()
+66: }
+15: }
diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts b/src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts
new file mode 100644
index 0000000..b5e41aa
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts
@@ -0,0 +1,46 @@
+ ./testdata/scopes.go
+22: func test() {
+23: x := id(0)
+24: y := id(0)
+25: fmt.Println(x)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+31: fmt.Println(x, y)
+30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y)
+31: fmt.Println(x, y)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+35: f1(a)
+38: f2(b)
+39: if gretbool() {
+41: f3(c)
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+35: f1(a)
+38: f2(b)
+39: if gretbool() {
+44: f4(int(c))
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+53: j = id(1)
+54: f = id(2)
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+59: fmt.Println("foo")
+64: helloworld()
+15: }
diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.gdb-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/scopes.gdb-dbg.nexts
new file mode 100644
index 0000000..6eb4903
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/scopes.gdb-dbg.nexts
@@ -0,0 +1,64 @@
+ src/cmd/compile/internal/ssa/testdata/scopes.go
+22: func test() {
+23: x := id(0)
+24: y := id(0)
+25: fmt.Println(x)
+0:
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 0
+y = 0
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 1
+y = 0
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 4
+y = 1
+26: for i := x; i < 3; i++ {
+30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 0
+y = 5
+31: fmt.Println(x, y)
+0: 5
+34: a := y
+35: f1(a)
+37: b := 0
+38: f2(b)
+39: if gretbool() {
+40: c := 0
+41: f3(c)
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+34: a := y
+35: f1(a)
+37: b := 0
+38: f2(b)
+39: if gretbool() {
+43: c := 1.1
+44: f4(int(c))
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+53: j = id(1)
+54: f = id(2)
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+59: fmt.Println("foo")
+60: break
+64: helloworld()
+66: }
+15: }
diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts b/src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts
new file mode 100644
index 0000000..5a186b5
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts
@@ -0,0 +1,55 @@
+ src/cmd/compile/internal/ssa/testdata/scopes.go
+22: func test() {
+23: x := id(0)
+24: y := id(0)
+25: fmt.Println(x)
+0:
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 0
+y = 0
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 1
+y = 0
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 4
+y = 1
+26: for i := x; i < 3; i++ {
+31: fmt.Println(x, y)
+30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 0
+y = 5
+31: fmt.Println(x, y)
+0: 5
+35: f1(a)
+38: f2(b)
+39: if gretbool() {
+41: f3(c)
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+35: f1(a)
+38: f2(b)
+39: if gretbool() {
+44: f4(int(c))
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+53: j = id(1)
+54: f = id(2)
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+59: fmt.Println("foo")
+64: helloworld()
+66: }
+15: }
diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.go b/src/cmd/compile/internal/ssa/testdata/scopes.go
new file mode 100644
index 0000000..e93d699
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/scopes.go
@@ -0,0 +1,107 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "time"
+)
+
+func main() {
+ growstack() // Use stack early to prevent growth during test, which confuses gdb
+ test()
+}
+
+//go:noinline
+func id(x int) int {
+ return x
+}
+
+func test() {
+ x := id(0)
+ y := id(0)
+ fmt.Println(x)
+ for i := x; i < 3; i++ {
+ x := i * i
+ y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+ }
+ y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y)
+ fmt.Println(x, y)
+
+ for x := 0; x <= 1; x++ { // From delve scopetest.go
+ a := y
+ f1(a)
+ {
+ b := 0
+ f2(b)
+ if gretbool() {
+ c := 0
+ f3(c)
+ } else {
+ c := 1.1
+ f4(int(c))
+ }
+ f5(b)
+ }
+ f6(a)
+ }
+
+ { // From delve testnextprog.go
+ var (
+ j = id(1)
+ f = id(2)
+ )
+ for i := 0; i <= 5; i++ {
+ j += j * (j ^ 3) / 100
+ if i == f {
+ fmt.Println("foo")
+ break
+ }
+ sleepytime()
+ }
+ helloworld()
+ }
+}
+
+func sleepytime() {
+ time.Sleep(5 * time.Millisecond)
+}
+
+func helloworld() {
+ fmt.Println("Hello, World!")
+}
+
+//go:noinline
+func f1(x int) {}
+
+//go:noinline
+func f2(x int) {}
+
+//go:noinline
+func f3(x int) {}
+
+//go:noinline
+func f4(x int) {}
+
+//go:noinline
+func f5(x int) {}
+
+//go:noinline
+func f6(x int) {}
+
+var boolvar = true
+
+func gretbool() bool {
+ x := boolvar
+ boolvar = !boolvar
+ return x
+}
+
+var sink string
+
+//go:noinline
+func growstack() {
+ sink = fmt.Sprintf("%#v,%#v,%#v", 1, true, "cat")
+}
diff --git a/src/cmd/compile/internal/ssa/tighten.go b/src/cmd/compile/internal/ssa/tighten.go
new file mode 100644
index 0000000..85b6a84
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/tighten.go
@@ -0,0 +1,269 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "cmd/compile/internal/base"
+
+// tighten moves Values closer to the Blocks in which they are used.
+// This can reduce the amount of register spilling required,
+// if it doesn't also create more live values.
+// A Value can be moved to any block that
+// dominates all blocks in which it is used.
+func tighten(f *Func) {
+ if base.Flag.N != 0 && len(f.Blocks) < 10000 {
+ // Skip the optimization in -N mode, except for huge functions.
+ // Too many values live across blocks can cause pathological
+ // behavior in the register allocator (see issue 52180).
+ return
+ }
+
+ canMove := f.Cache.allocBoolSlice(f.NumValues())
+ defer f.Cache.freeBoolSlice(canMove)
+
+ // Compute the memory states of each block.
+ startMem := f.Cache.allocValueSlice(f.NumBlocks())
+ defer f.Cache.freeValueSlice(startMem)
+ endMem := f.Cache.allocValueSlice(f.NumBlocks())
+ defer f.Cache.freeValueSlice(endMem)
+ memState(f, startMem, endMem)
+
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op.isLoweredGetClosurePtr() {
+ // Must stay in the entry block.
+ continue
+ }
+ switch v.Op {
+ case OpPhi, OpArg, OpArgIntReg, OpArgFloatReg, OpSelect0, OpSelect1, OpSelectN:
+ // Phis need to stay in their block.
+ // Arg must stay in the entry block.
+ // Tuple selectors must stay with the tuple generator.
+ // SelectN is typically, ultimately, a register.
+ continue
+ }
+ // Count arguments which will need a register.
+ narg := 0
+ for _, a := range v.Args {
+ // SP and SB are special registers and have no effect on
+ // the allocation of general-purpose registers.
+ if a.needRegister() && a.Op != OpSB && a.Op != OpSP {
+ narg++
+ }
+ }
+ if narg >= 2 && !v.Type.IsFlags() {
+ // Don't move values with more than one input, as that may
+ // increase register pressure.
+ // We make an exception for flags, as we want flag generators
+ // moved next to uses (because we only have 1 flag register).
+ continue
+ }
+ canMove[v.ID] = true
+ }
+ }
+
+ // Build data structure for fast least-common-ancestor queries.
+ lca := makeLCArange(f)
+
+ // For each moveable value, record the block that dominates all uses found so far.
+ target := f.Cache.allocBlockSlice(f.NumValues())
+ defer f.Cache.freeBlockSlice(target)
+
+ // Grab loop information.
+ // We use this to make sure we don't tighten a value into a (deeper) loop.
+ idom := f.Idom()
+ loops := f.loopnest()
+ loops.calculateDepths()
+
+ changed := true
+ for changed {
+ changed = false
+
+ // Reset target
+ for i := range target {
+ target[i] = nil
+ }
+
+ // Compute target locations (for moveable values only).
+ // target location = the least common ancestor of all uses in the dominator tree.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, a := range v.Args {
+ if !canMove[a.ID] {
+ continue
+ }
+ use := b
+ if v.Op == OpPhi {
+ use = b.Preds[i].b
+ }
+ if target[a.ID] == nil {
+ target[a.ID] = use
+ } else {
+ target[a.ID] = lca.find(target[a.ID], use)
+ }
+ }
+ }
+ for _, c := range b.ControlValues() {
+ if !canMove[c.ID] {
+ continue
+ }
+ if target[c.ID] == nil {
+ target[c.ID] = b
+ } else {
+ target[c.ID] = lca.find(target[c.ID], b)
+ }
+ }
+ }
+
+ // If the target location is inside a loop,
+ // move the target location up to just before the loop head.
+ for _, b := range f.Blocks {
+ origloop := loops.b2l[b.ID]
+ for _, v := range b.Values {
+ t := target[v.ID]
+ if t == nil {
+ continue
+ }
+ targetloop := loops.b2l[t.ID]
+ for targetloop != nil && (origloop == nil || targetloop.depth > origloop.depth) {
+ t = idom[targetloop.header.ID]
+ target[v.ID] = t
+ targetloop = loops.b2l[t.ID]
+ }
+ }
+ }
+
+ // Move values to target locations.
+ for _, b := range f.Blocks {
+ for i := 0; i < len(b.Values); i++ {
+ v := b.Values[i]
+ t := target[v.ID]
+ if t == nil || t == b {
+ // v is not moveable, or is already in correct place.
+ continue
+ }
+ if mem := v.MemoryArg(); mem != nil {
+ if startMem[t.ID] != mem {
+ // We can't move a value with a memory arg unless the target block
+ // has that memory arg as its starting memory.
+ continue
+ }
+ }
+ if f.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "%v is moved", v.Op)
+ }
+ // Move v to the block which dominates its uses.
+ t.Values = append(t.Values, v)
+ v.Block = t
+ last := len(b.Values) - 1
+ b.Values[i] = b.Values[last]
+ b.Values[last] = nil
+ b.Values = b.Values[:last]
+ changed = true
+ i--
+ }
+ }
+ }
+}
+
+// phiTighten moves constants closer to phi users.
+// This pass avoids having lots of constants live for lots of the program.
+// See issue 16407.
+func phiTighten(f *Func) {
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ for i, a := range v.Args {
+ if !a.rematerializeable() {
+ continue // not a constant we can move around
+ }
+ if a.Block == b.Preds[i].b {
+ continue // already in the right place
+ }
+ // Make a copy of a, put in predecessor block.
+ v.SetArg(i, a.copyInto(b.Preds[i].b))
+ }
+ }
+ }
+}
+
+// memState computes the memory state at the beginning and end of each block of
+// the function. The memory state is represented by a value of mem type.
+// The returned result is stored in startMem and endMem, and endMem is nil for
+// blocks with no successors (Exit,Ret,RetJmp blocks). This algorithm is not
+// suitable for infinite loop blocks that do not contain any mem operations.
+// For example:
+// b1:
+//
+// (some values)
+//
+// plain -> b2
+// b2: <- b1 b2
+// Plain -> b2
+//
+// Algorithm introduction:
+// 1. The start memory state of a block is InitMem, a Phi node of type mem or
+// an incoming memory value.
+// 2. The start memory state of a block is consistent with the end memory state
+// of its parent nodes. If the start memory state of a block is a Phi value,
+// then the end memory state of its parent nodes is consistent with the
+// corresponding argument value of the Phi node.
+// 3. The algorithm first obtains the memory state of some blocks in the tree
+// in the first step. Then floods the known memory state to other nodes in
+// the second step.
+func memState(f *Func, startMem, endMem []*Value) {
+ // This slice contains the set of blocks that have had their startMem set but this
+ // startMem value has not yet been propagated to the endMem of its predecessors
+ changed := make([]*Block, 0)
+ // First step, init the memory state of some blocks.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ var mem *Value
+ if v.Op == OpPhi {
+ if v.Type.IsMemory() {
+ mem = v
+ }
+ } else if v.Op == OpInitMem {
+ mem = v // This is actually not needed.
+ } else if a := v.MemoryArg(); a != nil && a.Block != b {
+ // The only incoming memory value doesn't belong to this block.
+ mem = a
+ }
+ if mem != nil {
+ if old := startMem[b.ID]; old != nil {
+ if old == mem {
+ continue
+ }
+ f.Fatalf("func %s, startMem[%v] has different values, old %v, new %v", f.Name, b, old, mem)
+ }
+ startMem[b.ID] = mem
+ changed = append(changed, b)
+ }
+ }
+ }
+
+ // Second step, floods the known memory state of some blocks to others.
+ for len(changed) != 0 {
+ top := changed[0]
+ changed = changed[1:]
+ mem := startMem[top.ID]
+ for i, p := range top.Preds {
+ pb := p.b
+ if endMem[pb.ID] != nil {
+ continue
+ }
+ if mem.Op == OpPhi && mem.Block == top {
+ endMem[pb.ID] = mem.Args[i]
+ } else {
+ endMem[pb.ID] = mem
+ }
+ if startMem[pb.ID] == nil {
+ startMem[pb.ID] = endMem[pb.ID]
+ changed = append(changed, pb)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/trim.go b/src/cmd/compile/internal/ssa/trim.go
new file mode 100644
index 0000000..13798c6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/trim.go
@@ -0,0 +1,172 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "cmd/internal/src"
+
+// trim removes blocks with no code in them.
+// These blocks were inserted to remove critical edges.
+func trim(f *Func) {
+ n := 0
+ for _, b := range f.Blocks {
+ if !trimmableBlock(b) {
+ f.Blocks[n] = b
+ n++
+ continue
+ }
+
+ bPos := b.Pos
+ bIsStmt := bPos.IsStmt() == src.PosIsStmt
+
+ // Splice b out of the graph. NOTE: `mergePhi` depends on the
+ // order, in which the predecessors edges are merged here.
+ p, i := b.Preds[0].b, b.Preds[0].i
+ s, j := b.Succs[0].b, b.Succs[0].i
+ ns := len(s.Preds)
+ p.Succs[i] = Edge{s, j}
+ s.Preds[j] = Edge{p, i}
+
+ for _, e := range b.Preds[1:] {
+ p, i := e.b, e.i
+ p.Succs[i] = Edge{s, len(s.Preds)}
+ s.Preds = append(s.Preds, Edge{p, i})
+ }
+
+ // Attempt to preserve a statement boundary
+ if bIsStmt {
+ sawStmt := false
+ for _, v := range s.Values {
+ if isPoorStatementOp(v.Op) {
+ continue
+ }
+ if v.Pos.SameFileAndLine(bPos) {
+ v.Pos = v.Pos.WithIsStmt()
+ }
+ sawStmt = true
+ break
+ }
+ if !sawStmt && s.Pos.SameFileAndLine(bPos) {
+ s.Pos = s.Pos.WithIsStmt()
+ }
+ }
+ // If `s` had more than one predecessor, update its phi-ops to
+ // account for the merge.
+ if ns > 1 {
+ for _, v := range s.Values {
+ if v.Op == OpPhi {
+ mergePhi(v, j, b)
+ }
+
+ }
+ // Remove the phi-ops from `b` if they were merged into the
+ // phi-ops of `s`.
+ k := 0
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ if v.Uses == 0 {
+ v.resetArgs()
+ continue
+ }
+ // Pad the arguments of the remaining phi-ops so
+ // they match the new predecessor count of `s`.
+ // Since s did not have a Phi op corresponding to
+ // the phi op in b, the other edges coming into s
+ // must be loopback edges from s, so v is the right
+ // argument to v!
+ args := make([]*Value, len(v.Args))
+ copy(args, v.Args)
+ v.resetArgs()
+ for x := 0; x < j; x++ {
+ v.AddArg(v)
+ }
+ v.AddArg(args[0])
+ for x := j + 1; x < ns; x++ {
+ v.AddArg(v)
+ }
+ for _, a := range args[1:] {
+ v.AddArg(a)
+ }
+ }
+ b.Values[k] = v
+ k++
+ }
+ b.Values = b.Values[:k]
+ }
+
+ // Merge the blocks' values.
+ for _, v := range b.Values {
+ v.Block = s
+ }
+ k := len(b.Values)
+ m := len(s.Values)
+ for i := 0; i < k; i++ {
+ s.Values = append(s.Values, nil)
+ }
+ copy(s.Values[k:], s.Values[:m])
+ copy(s.Values, b.Values)
+ }
+ if n < len(f.Blocks) {
+ f.invalidateCFG()
+ tail := f.Blocks[n:]
+ for i := range tail {
+ tail[i] = nil
+ }
+ f.Blocks = f.Blocks[:n]
+ }
+}
+
+// emptyBlock reports whether the block does not contain actual
+// instructions.
+func emptyBlock(b *Block) bool {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ return false
+ }
+ }
+ return true
+}
+
+// trimmableBlock reports whether the block can be trimmed from the CFG,
+// subject to the following criteria:
+// - it should not be the first block.
+// - it should be BlockPlain.
+// - it should not loop back to itself.
+// - it either is the single predecessor of the successor block or
+// contains no actual instructions.
+func trimmableBlock(b *Block) bool {
+ if b.Kind != BlockPlain || b == b.Func.Entry {
+ return false
+ }
+ s := b.Succs[0].b
+ return s != b && (len(s.Preds) == 1 || emptyBlock(b))
+}
+
+// mergePhi adjusts the number of `v`s arguments to account for merge
+// of `b`, which was `i`th predecessor of the `v`s block.
+func mergePhi(v *Value, i int, b *Block) {
+ u := v.Args[i]
+ if u.Block == b {
+ if u.Op != OpPhi {
+ b.Func.Fatalf("value %s is not a phi operation", u.LongString())
+ }
+ // If the original block contained u = φ(u0, u1, ..., un) and
+ // the current phi is
+ // v = φ(v0, v1, ..., u, ..., vk)
+ // then the merged phi is
+ // v = φ(v0, v1, ..., u0, ..., vk, u1, ..., un)
+ v.SetArg(i, u.Args[0])
+ v.AddArgs(u.Args[1:]...)
+ } else {
+ // If the original block contained u = φ(u0, u1, ..., un) and
+ // the current phi is
+ // v = φ(v0, v1, ..., vi, ..., vk)
+ // i.e. it does not use a value from the predecessor block,
+ // then the merged phi is
+ // v = φ(v0, v1, ..., vk, vi, vi, ...)
+ for j := 1; j < len(b.Preds); j++ {
+ v.AddArg(v.Args[i])
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/tuple.go b/src/cmd/compile/internal/ssa/tuple.go
new file mode 100644
index 0000000..289df40
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/tuple.go
@@ -0,0 +1,71 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// tightenTupleSelectors ensures that tuple selectors (Select0, Select1,
+// and SelectN ops) are in the same block as their tuple generator. The
+// function also ensures that there are no duplicate tuple selectors.
+// These properties are expected by the scheduler but may not have
+// been maintained by the optimization pipeline up to this point.
+//
+// See issues 16741 and 39472.
+func tightenTupleSelectors(f *Func) {
+ selectors := make(map[struct {
+ id ID
+ which int
+ }]*Value)
+ for _, b := range f.Blocks {
+ for _, selector := range b.Values {
+ // Key fields for de-duplication
+ var tuple *Value
+ idx := 0
+ switch selector.Op {
+ default:
+ continue
+ case OpSelect1:
+ idx = 1
+ fallthrough
+ case OpSelect0:
+ tuple = selector.Args[0]
+ if !tuple.Type.IsTuple() {
+ f.Fatalf("arg of tuple selector %s is not a tuple: %s", selector.String(), tuple.LongString())
+ }
+ case OpSelectN:
+ tuple = selector.Args[0]
+ idx = int(selector.AuxInt)
+ if !tuple.Type.IsResults() {
+ f.Fatalf("arg of result selector %s is not a results: %s", selector.String(), tuple.LongString())
+ }
+ }
+
+ // If there is a pre-existing selector in the target block then
+ // use that. Do this even if the selector is already in the
+ // target block to avoid duplicate tuple selectors.
+ key := struct {
+ id ID
+ which int
+ }{tuple.ID, idx}
+ if t := selectors[key]; t != nil {
+ if selector != t {
+ selector.copyOf(t)
+ }
+ continue
+ }
+
+ // If the selector is in the wrong block copy it into the target
+ // block.
+ if selector.Block != tuple.Block {
+ t := selector.copyInto(tuple.Block)
+ selector.copyOf(t)
+ selectors[key] = t
+ continue
+ }
+
+ // The selector is in the target block. Add it to the map so it
+ // cannot be duplicated.
+ selectors[key] = selector
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go
new file mode 100644
index 0000000..4eaab40
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/value.go
@@ -0,0 +1,620 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "math"
+ "sort"
+ "strings"
+)
+
+// A Value represents a value in the SSA representation of the program.
+// The ID and Type fields must not be modified. The remainder may be modified
+// if they preserve the value of the Value (e.g. changing a (mul 2 x) to an (add x x)).
+type Value struct {
+ // A unique identifier for the value. For performance we allocate these IDs
+ // densely starting at 1. There is no guarantee that there won't be occasional holes, though.
+ ID ID
+
+ // The operation that computes this value. See op.go.
+ Op Op
+
+ // The type of this value. Normally this will be a Go type, but there
+ // are a few other pseudo-types, see ../types/type.go.
+ Type *types.Type
+
+ // Auxiliary info for this value. The type of this information depends on the opcode and type.
+ // AuxInt is used for integer values, Aux is used for other values.
+ // Floats are stored in AuxInt using math.Float64bits(f).
+ // Unused portions of AuxInt are filled by sign-extending the used portion,
+ // even if the represented value is unsigned.
+ // Users of AuxInt which interpret AuxInt as unsigned (e.g. shifts) must be careful.
+ // Use Value.AuxUnsigned to get the zero-extended value of AuxInt.
+ AuxInt int64
+ Aux Aux
+
+ // Arguments of this value
+ Args []*Value
+
+ // Containing basic block
+ Block *Block
+
+ // Source position
+ Pos src.XPos
+
+ // Use count. Each appearance in Value.Args and Block.Controls counts once.
+ Uses int32
+
+ // wasm: Value stays on the WebAssembly stack. This value will not get a "register" (WebAssembly variable)
+ // nor a slot on Go stack, and the generation of this value is delayed to its use time.
+ OnWasmStack bool
+
+ // Is this value in the per-function constant cache? If so, remove from cache before changing it or recycling it.
+ InCache bool
+
+ // Storage for the first three args
+ argstorage [3]*Value
+}
+
+// Examples:
+// Opcode aux args
+// OpAdd nil 2
+// OpConst string 0 string constant
+// OpConst int64 0 int64 constant
+// OpAddcq int64 1 amd64 op: v = arg[0] + constant
+
+// short form print. Just v#.
+func (v *Value) String() string {
+ if v == nil {
+ return "nil" // should never happen, but not panicking helps with debugging
+ }
+ return fmt.Sprintf("v%d", v.ID)
+}
+
+func (v *Value) AuxInt8() int8 {
+ if opcodeTable[v.Op].auxType != auxInt8 && opcodeTable[v.Op].auxType != auxNameOffsetInt8 {
+ v.Fatalf("op %s doesn't have an int8 aux field", v.Op)
+ }
+ return int8(v.AuxInt)
+}
+
+func (v *Value) AuxUInt8() uint8 {
+ if opcodeTable[v.Op].auxType != auxUInt8 {
+ v.Fatalf("op %s doesn't have a uint8 aux field", v.Op)
+ }
+ return uint8(v.AuxInt)
+}
+
+func (v *Value) AuxInt16() int16 {
+ if opcodeTable[v.Op].auxType != auxInt16 {
+ v.Fatalf("op %s doesn't have an int16 aux field", v.Op)
+ }
+ return int16(v.AuxInt)
+}
+
+func (v *Value) AuxInt32() int32 {
+ if opcodeTable[v.Op].auxType != auxInt32 {
+ v.Fatalf("op %s doesn't have an int32 aux field", v.Op)
+ }
+ return int32(v.AuxInt)
+}
+
+// AuxUnsigned returns v.AuxInt as an unsigned value for OpConst*.
+// v.AuxInt is always sign-extended to 64 bits, even if the
+// represented value is unsigned. This undoes that sign extension.
+func (v *Value) AuxUnsigned() uint64 {
+ c := v.AuxInt
+ switch v.Op {
+ case OpConst64:
+ return uint64(c)
+ case OpConst32:
+ return uint64(uint32(c))
+ case OpConst16:
+ return uint64(uint16(c))
+ case OpConst8:
+ return uint64(uint8(c))
+ }
+ v.Fatalf("op %s isn't OpConst*", v.Op)
+ return 0
+}
+
+func (v *Value) AuxFloat() float64 {
+ if opcodeTable[v.Op].auxType != auxFloat32 && opcodeTable[v.Op].auxType != auxFloat64 {
+ v.Fatalf("op %s doesn't have a float aux field", v.Op)
+ }
+ return math.Float64frombits(uint64(v.AuxInt))
+}
+func (v *Value) AuxValAndOff() ValAndOff {
+ if opcodeTable[v.Op].auxType != auxSymValAndOff {
+ v.Fatalf("op %s doesn't have a ValAndOff aux field", v.Op)
+ }
+ return ValAndOff(v.AuxInt)
+}
+
+func (v *Value) AuxArm64BitField() arm64BitField {
+ if opcodeTable[v.Op].auxType != auxARM64BitField {
+ v.Fatalf("op %s doesn't have a ValAndOff aux field", v.Op)
+ }
+ return arm64BitField(v.AuxInt)
+}
+
+// long form print. v# = opcode <type> [aux] args [: reg] (names)
+func (v *Value) LongString() string {
+ if v == nil {
+ return "<NIL VALUE>"
+ }
+ s := fmt.Sprintf("v%d = %s", v.ID, v.Op)
+ s += " <" + v.Type.String() + ">"
+ s += v.auxString()
+ for _, a := range v.Args {
+ s += fmt.Sprintf(" %v", a)
+ }
+ if v.Block == nil {
+ return s
+ }
+ r := v.Block.Func.RegAlloc
+ if int(v.ID) < len(r) && r[v.ID] != nil {
+ s += " : " + r[v.ID].String()
+ }
+ if reg := v.Block.Func.tempRegs[v.ID]; reg != nil {
+ s += " tmp=" + reg.String()
+ }
+ var names []string
+ for name, values := range v.Block.Func.NamedValues {
+ for _, value := range values {
+ if value == v {
+ names = append(names, name.String())
+ break // drop duplicates.
+ }
+ }
+ }
+ if len(names) != 0 {
+ sort.Strings(names) // Otherwise a source of variation in debugging output.
+ s += " (" + strings.Join(names, ", ") + ")"
+ }
+ return s
+}
+
+func (v *Value) auxString() string {
+ switch opcodeTable[v.Op].auxType {
+ case auxBool:
+ if v.AuxInt == 0 {
+ return " [false]"
+ } else {
+ return " [true]"
+ }
+ case auxInt8:
+ return fmt.Sprintf(" [%d]", v.AuxInt8())
+ case auxInt16:
+ return fmt.Sprintf(" [%d]", v.AuxInt16())
+ case auxInt32:
+ return fmt.Sprintf(" [%d]", v.AuxInt32())
+ case auxInt64, auxInt128:
+ return fmt.Sprintf(" [%d]", v.AuxInt)
+ case auxUInt8:
+ return fmt.Sprintf(" [%d]", v.AuxUInt8())
+ case auxARM64BitField:
+ lsb := v.AuxArm64BitField().getARM64BFlsb()
+ width := v.AuxArm64BitField().getARM64BFwidth()
+ return fmt.Sprintf(" [lsb=%d,width=%d]", lsb, width)
+ case auxFloat32, auxFloat64:
+ return fmt.Sprintf(" [%g]", v.AuxFloat())
+ case auxString:
+ return fmt.Sprintf(" {%q}", v.Aux)
+ case auxSym, auxCall, auxTyp:
+ if v.Aux != nil {
+ return fmt.Sprintf(" {%v}", v.Aux)
+ }
+ return ""
+ case auxSymOff, auxCallOff, auxTypSize, auxNameOffsetInt8:
+ s := ""
+ if v.Aux != nil {
+ s = fmt.Sprintf(" {%v}", v.Aux)
+ }
+ if v.AuxInt != 0 || opcodeTable[v.Op].auxType == auxNameOffsetInt8 {
+ s += fmt.Sprintf(" [%v]", v.AuxInt)
+ }
+ return s
+ case auxSymValAndOff:
+ s := ""
+ if v.Aux != nil {
+ s = fmt.Sprintf(" {%v}", v.Aux)
+ }
+ return s + fmt.Sprintf(" [%s]", v.AuxValAndOff())
+ case auxCCop:
+ return fmt.Sprintf(" {%s}", Op(v.AuxInt))
+ case auxS390XCCMask, auxS390XRotateParams:
+ return fmt.Sprintf(" {%v}", v.Aux)
+ case auxFlagConstant:
+ return fmt.Sprintf("[%s]", flagConstant(v.AuxInt))
+ case auxNone:
+ return ""
+ default:
+ // If you see this, add a case above instead.
+ return fmt.Sprintf("[auxtype=%d AuxInt=%d Aux=%v]", opcodeTable[v.Op].auxType, v.AuxInt, v.Aux)
+ }
+}
+
+// If/when midstack inlining is enabled (-l=4), the compiler gets both larger and slower.
+// Not-inlining this method is a help (*Value.reset and *Block.NewValue0 are similar).
+//
+//go:noinline
+func (v *Value) AddArg(w *Value) {
+ if v.Args == nil {
+ v.resetArgs() // use argstorage
+ }
+ v.Args = append(v.Args, w)
+ w.Uses++
+}
+
+//go:noinline
+func (v *Value) AddArg2(w1, w2 *Value) {
+ if v.Args == nil {
+ v.resetArgs() // use argstorage
+ }
+ v.Args = append(v.Args, w1, w2)
+ w1.Uses++
+ w2.Uses++
+}
+
+//go:noinline
+func (v *Value) AddArg3(w1, w2, w3 *Value) {
+ if v.Args == nil {
+ v.resetArgs() // use argstorage
+ }
+ v.Args = append(v.Args, w1, w2, w3)
+ w1.Uses++
+ w2.Uses++
+ w3.Uses++
+}
+
+//go:noinline
+func (v *Value) AddArg4(w1, w2, w3, w4 *Value) {
+ v.Args = append(v.Args, w1, w2, w3, w4)
+ w1.Uses++
+ w2.Uses++
+ w3.Uses++
+ w4.Uses++
+}
+
+//go:noinline
+func (v *Value) AddArg5(w1, w2, w3, w4, w5 *Value) {
+ v.Args = append(v.Args, w1, w2, w3, w4, w5)
+ w1.Uses++
+ w2.Uses++
+ w3.Uses++
+ w4.Uses++
+ w5.Uses++
+}
+
+//go:noinline
+func (v *Value) AddArg6(w1, w2, w3, w4, w5, w6 *Value) {
+ v.Args = append(v.Args, w1, w2, w3, w4, w5, w6)
+ w1.Uses++
+ w2.Uses++
+ w3.Uses++
+ w4.Uses++
+ w5.Uses++
+ w6.Uses++
+}
+
+func (v *Value) AddArgs(a ...*Value) {
+ if v.Args == nil {
+ v.resetArgs() // use argstorage
+ }
+ v.Args = append(v.Args, a...)
+ for _, x := range a {
+ x.Uses++
+ }
+}
+func (v *Value) SetArg(i int, w *Value) {
+ v.Args[i].Uses--
+ v.Args[i] = w
+ w.Uses++
+}
+func (v *Value) SetArgs1(a *Value) {
+ v.resetArgs()
+ v.AddArg(a)
+}
+func (v *Value) SetArgs2(a, b *Value) {
+ v.resetArgs()
+ v.AddArg(a)
+ v.AddArg(b)
+}
+func (v *Value) SetArgs3(a, b, c *Value) {
+ v.resetArgs()
+ v.AddArg(a)
+ v.AddArg(b)
+ v.AddArg(c)
+}
+
+func (v *Value) resetArgs() {
+ for _, a := range v.Args {
+ a.Uses--
+ }
+ v.argstorage[0] = nil
+ v.argstorage[1] = nil
+ v.argstorage[2] = nil
+ v.Args = v.argstorage[:0]
+}
+
+// reset is called from most rewrite rules.
+// Allowing it to be inlined increases the size
+// of cmd/compile by almost 10%, and slows it down.
+//
+//go:noinline
+func (v *Value) reset(op Op) {
+ if v.InCache {
+ v.Block.Func.unCache(v)
+ }
+ v.Op = op
+ v.resetArgs()
+ v.AuxInt = 0
+ v.Aux = nil
+}
+
+// invalidateRecursively marks a value as invalid (unused)
+// and after decrementing reference counts on its Args,
+// also recursively invalidates any of those whose use
+// count goes to zero. It returns whether any of the
+// invalidated values was marked with IsStmt.
+//
+// BEWARE of doing this *before* you've applied intended
+// updates to SSA.
+func (v *Value) invalidateRecursively() bool {
+ lostStmt := v.Pos.IsStmt() == src.PosIsStmt
+ if v.InCache {
+ v.Block.Func.unCache(v)
+ }
+ v.Op = OpInvalid
+
+ for _, a := range v.Args {
+ a.Uses--
+ if a.Uses == 0 {
+ lost := a.invalidateRecursively()
+ lostStmt = lost || lostStmt
+ }
+ }
+
+ v.argstorage[0] = nil
+ v.argstorage[1] = nil
+ v.argstorage[2] = nil
+ v.Args = v.argstorage[:0]
+
+ v.AuxInt = 0
+ v.Aux = nil
+ return lostStmt
+}
+
+// copyOf is called from rewrite rules.
+// It modifies v to be (Copy a).
+//
+//go:noinline
+func (v *Value) copyOf(a *Value) {
+ if v == a {
+ return
+ }
+ if v.InCache {
+ v.Block.Func.unCache(v)
+ }
+ v.Op = OpCopy
+ v.resetArgs()
+ v.AddArg(a)
+ v.AuxInt = 0
+ v.Aux = nil
+ v.Type = a.Type
+}
+
+// copyInto makes a new value identical to v and adds it to the end of b.
+// unlike copyIntoWithXPos this does not check for v.Pos being a statement.
+func (v *Value) copyInto(b *Block) *Value {
+ c := b.NewValue0(v.Pos.WithNotStmt(), v.Op, v.Type) // Lose the position, this causes line number churn otherwise.
+ c.Aux = v.Aux
+ c.AuxInt = v.AuxInt
+ c.AddArgs(v.Args...)
+ for _, a := range v.Args {
+ if a.Type.IsMemory() {
+ v.Fatalf("can't move a value with a memory arg %s", v.LongString())
+ }
+ }
+ return c
+}
+
+// copyIntoWithXPos makes a new value identical to v and adds it to the end of b.
+// The supplied position is used as the position of the new value.
+// Because this is used for rematerialization, check for case that (rematerialized)
+// input to value with position 'pos' carried a statement mark, and that the supplied
+// position (of the instruction using the rematerialized value) is not marked, and
+// preserve that mark if its line matches the supplied position.
+func (v *Value) copyIntoWithXPos(b *Block, pos src.XPos) *Value {
+ if v.Pos.IsStmt() == src.PosIsStmt && pos.IsStmt() != src.PosIsStmt && v.Pos.SameFileAndLine(pos) {
+ pos = pos.WithIsStmt()
+ }
+ c := b.NewValue0(pos, v.Op, v.Type)
+ c.Aux = v.Aux
+ c.AuxInt = v.AuxInt
+ c.AddArgs(v.Args...)
+ for _, a := range v.Args {
+ if a.Type.IsMemory() {
+ v.Fatalf("can't move a value with a memory arg %s", v.LongString())
+ }
+ }
+ return c
+}
+
+func (v *Value) Logf(msg string, args ...interface{}) { v.Block.Logf(msg, args...) }
+func (v *Value) Log() bool { return v.Block.Log() }
+func (v *Value) Fatalf(msg string, args ...interface{}) {
+ v.Block.Func.fe.Fatalf(v.Pos, msg, args...)
+}
+
+// isGenericIntConst reports whether v is a generic integer constant.
+func (v *Value) isGenericIntConst() bool {
+ return v != nil && (v.Op == OpConst64 || v.Op == OpConst32 || v.Op == OpConst16 || v.Op == OpConst8)
+}
+
+// ResultReg returns the result register assigned to v, in cmd/internal/obj/$ARCH numbering.
+// It is similar to Reg and Reg0, except that it is usable interchangeably for all Value Ops.
+// If you know v.Op, using Reg or Reg0 (as appropriate) will be more efficient.
+func (v *Value) ResultReg() int16 {
+ reg := v.Block.Func.RegAlloc[v.ID]
+ if reg == nil {
+ v.Fatalf("nil reg for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ if pair, ok := reg.(LocPair); ok {
+ reg = pair[0]
+ }
+ if reg == nil {
+ v.Fatalf("nil reg0 for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*Register).objNum
+}
+
+// Reg returns the register assigned to v, in cmd/internal/obj/$ARCH numbering.
+func (v *Value) Reg() int16 {
+ reg := v.Block.Func.RegAlloc[v.ID]
+ if reg == nil {
+ v.Fatalf("nil register for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*Register).objNum
+}
+
+// Reg0 returns the register assigned to the first output of v, in cmd/internal/obj/$ARCH numbering.
+func (v *Value) Reg0() int16 {
+ reg := v.Block.Func.RegAlloc[v.ID].(LocPair)[0]
+ if reg == nil {
+ v.Fatalf("nil first register for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*Register).objNum
+}
+
+// Reg1 returns the register assigned to the second output of v, in cmd/internal/obj/$ARCH numbering.
+func (v *Value) Reg1() int16 {
+ reg := v.Block.Func.RegAlloc[v.ID].(LocPair)[1]
+ if reg == nil {
+ v.Fatalf("nil second register for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*Register).objNum
+}
+
+// RegTmp returns the temporary register assigned to v, in cmd/internal/obj/$ARCH numbering.
+func (v *Value) RegTmp() int16 {
+ reg := v.Block.Func.tempRegs[v.ID]
+ if reg == nil {
+ v.Fatalf("nil tmp register for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.objNum
+}
+
+func (v *Value) RegName() string {
+ reg := v.Block.Func.RegAlloc[v.ID]
+ if reg == nil {
+ v.Fatalf("nil register for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*Register).name
+}
+
+// MemoryArg returns the memory argument for the Value.
+// The returned value, if non-nil, will be memory-typed (or a tuple with a memory-typed second part).
+// Otherwise, nil is returned.
+func (v *Value) MemoryArg() *Value {
+ if v.Op == OpPhi {
+ v.Fatalf("MemoryArg on Phi")
+ }
+ na := len(v.Args)
+ if na == 0 {
+ return nil
+ }
+ if m := v.Args[na-1]; m.Type.IsMemory() {
+ return m
+ }
+ return nil
+}
+
+// LackingPos indicates whether v is a value that is unlikely to have a correct
+// position assigned to it. Ignoring such values leads to more user-friendly positions
+// assigned to nearby values and the blocks containing them.
+func (v *Value) LackingPos() bool {
+ // The exact definition of LackingPos is somewhat heuristically defined and may change
+ // in the future, for example if some of these operations are generated more carefully
+ // with respect to their source position.
+ return v.Op == OpVarDef || v.Op == OpVarLive || v.Op == OpPhi ||
+ (v.Op == OpFwdRef || v.Op == OpCopy) && v.Type == types.TypeMem
+}
+
+// removeable reports whether the value v can be removed from the SSA graph entirely
+// if its use count drops to 0.
+func (v *Value) removeable() bool {
+ if v.Type.IsVoid() {
+ // Void ops (inline marks), must stay.
+ return false
+ }
+ if opcodeTable[v.Op].nilCheck {
+ // Nil pointer checks must stay.
+ return false
+ }
+ if v.Type.IsMemory() {
+ // We don't need to preserve all memory ops, but we do need
+ // to keep calls at least (because they might have
+ // synchronization operations we can't see).
+ return false
+ }
+ if v.Op.HasSideEffects() {
+ // These are mostly synchronization operations.
+ return false
+ }
+ return true
+}
+
+// AutoVar returns a *Name and int64 representing the auto variable and offset within it
+// where v should be spilled.
+func AutoVar(v *Value) (*ir.Name, int64) {
+ if loc, ok := v.Block.Func.RegAlloc[v.ID].(LocalSlot); ok {
+ if v.Type.Size() > loc.Type.Size() {
+ v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
+ }
+ return loc.N, loc.Off
+ }
+ // Assume it is a register, return its spill slot, which needs to be live
+ nameOff := v.Aux.(*AuxNameOffset)
+ return nameOff.Name, nameOff.Offset
+}
+
+// CanSSA reports whether values of type t can be represented as a Value.
+func CanSSA(t *types.Type) bool {
+ types.CalcSize(t)
+ if t.Size() > int64(4*types.PtrSize) {
+ // 4*Widthptr is an arbitrary constant. We want it
+ // to be at least 3*Widthptr so slices can be registerized.
+ // Too big and we'll introduce too much register pressure.
+ return false
+ }
+ switch t.Kind() {
+ case types.TARRAY:
+ // We can't do larger arrays because dynamic indexing is
+ // not supported on SSA variables.
+ // TODO: allow if all indexes are constant.
+ if t.NumElem() <= 1 {
+ return CanSSA(t.Elem())
+ }
+ return false
+ case types.TSTRUCT:
+ if t.NumFields() > MaxStruct {
+ return false
+ }
+ for _, t1 := range t.Fields() {
+ if !CanSSA(t1.Type) {
+ return false
+ }
+ }
+ return true
+ default:
+ return true
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go
new file mode 100644
index 0000000..1caccb7
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/writebarrier.go
@@ -0,0 +1,804 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "fmt"
+ "internal/buildcfg"
+)
+
+// A ZeroRegion records parts of an object which are known to be zero.
+// A ZeroRegion only applies to a single memory state.
+// Each bit in mask is set if the corresponding pointer-sized word of
+// the base object is known to be zero.
+// In other words, if mask & (1<<i) != 0, then [base+i*ptrSize, base+(i+1)*ptrSize)
+// is known to be zero.
+type ZeroRegion struct {
+ base *Value
+ mask uint64
+}
+
+// mightBeHeapPointer reports whether v might point to the heap.
+// v must have pointer type.
+func mightBeHeapPointer(v *Value) bool {
+ if IsGlobalAddr(v) {
+ return false
+ }
+ return true
+}
+
+// mightContainHeapPointer reports whether the data currently at addresses
+// [ptr,ptr+size) might contain heap pointers. "currently" means at memory state mem.
+// zeroes contains ZeroRegion data to help make that decision (see computeZeroMap).
+func mightContainHeapPointer(ptr *Value, size int64, mem *Value, zeroes map[ID]ZeroRegion) bool {
+ if IsReadOnlyGlobalAddr(ptr) {
+ // The read-only globals section cannot contain any heap pointers.
+ return false
+ }
+
+ // See if we can prove that the queried memory is all zero.
+
+ // Find base pointer and offset. Hopefully, the base is the result of a new(T).
+ var off int64
+ for ptr.Op == OpOffPtr {
+ off += ptr.AuxInt
+ ptr = ptr.Args[0]
+ }
+
+ ptrSize := ptr.Block.Func.Config.PtrSize
+ if off%ptrSize != 0 {
+ return true // see issue 61187
+ }
+ if size%ptrSize != 0 {
+ ptr.Fatalf("unaligned pointer write")
+ }
+ if off < 0 || off+size > 64*ptrSize {
+ // memory range goes off end of tracked offsets
+ return true
+ }
+ z := zeroes[mem.ID]
+ if ptr != z.base {
+ // This isn't the object we know about at this memory state.
+ return true
+ }
+ // Mask of bits we're asking about
+ m := (uint64(1)<<(size/ptrSize) - 1) << (off / ptrSize)
+
+ if z.mask&m == m {
+ // All locations are known to be zero, so no heap pointers.
+ return false
+ }
+ return true
+}
+
+// needwb reports whether we need write barrier for store op v.
+// v must be Store/Move/Zero.
+// zeroes provides known zero information (keyed by ID of memory-type values).
+func needwb(v *Value, zeroes map[ID]ZeroRegion) bool {
+ t, ok := v.Aux.(*types.Type)
+ if !ok {
+ v.Fatalf("store aux is not a type: %s", v.LongString())
+ }
+ if !t.HasPointers() {
+ return false
+ }
+ dst := v.Args[0]
+ if IsStackAddr(dst) {
+ return false // writes into the stack don't need write barrier
+ }
+ // If we're writing to a place that might have heap pointers, we need
+ // the write barrier.
+ if mightContainHeapPointer(dst, t.Size(), v.MemoryArg(), zeroes) {
+ return true
+ }
+ // Lastly, check if the values we're writing might be heap pointers.
+ // If they aren't, we don't need a write barrier.
+ switch v.Op {
+ case OpStore:
+ if !mightBeHeapPointer(v.Args[1]) {
+ return false
+ }
+ case OpZero:
+ return false // nil is not a heap pointer
+ case OpMove:
+ if !mightContainHeapPointer(v.Args[1], t.Size(), v.Args[2], zeroes) {
+ return false
+ }
+ default:
+ v.Fatalf("store op unknown: %s", v.LongString())
+ }
+ return true
+}
+
+// needWBsrc reports whether GC needs to see v when it is the source of a store.
+func needWBsrc(v *Value) bool {
+ return !IsGlobalAddr(v)
+}
+
+// needWBdst reports whether GC needs to see what used to be in *ptr when ptr is
+// the target of a pointer store.
+func needWBdst(ptr, mem *Value, zeroes map[ID]ZeroRegion) bool {
+ // Detect storing to zeroed memory.
+ var off int64
+ for ptr.Op == OpOffPtr {
+ off += ptr.AuxInt
+ ptr = ptr.Args[0]
+ }
+ ptrSize := ptr.Block.Func.Config.PtrSize
+ if off%ptrSize != 0 {
+ return true // see issue 61187
+ }
+ if off < 0 || off >= 64*ptrSize {
+ // write goes off end of tracked offsets
+ return true
+ }
+ z := zeroes[mem.ID]
+ if ptr != z.base {
+ return true
+ }
+ // If destination is known to be zeroed, we don't need the write barrier
+ // to record the old value in *ptr.
+ return z.mask>>uint(off/ptrSize)&1 == 0
+}
+
+// writebarrier pass inserts write barriers for store ops (Store, Move, Zero)
+// when necessary (the condition above). It rewrites store ops to branches
+// and runtime calls, like
+//
+// if writeBarrier.enabled {
+// buf := gcWriteBarrier2() // Not a regular Go call
+// buf[0] = val
+// buf[1] = *ptr
+// }
+// *ptr = val
+//
+// A sequence of WB stores for many pointer fields of a single type will
+// be emitted together, with a single branch.
+func writebarrier(f *Func) {
+ if !f.fe.UseWriteBarrier() {
+ return
+ }
+
+ // Number of write buffer entries we can request at once.
+ // Must match runtime/mwbbuf.go:wbMaxEntriesPerCall.
+ // It must also match the number of instances of runtime.gcWriteBarrier{X}.
+ const maxEntries = 8
+
+ var sb, sp, wbaddr, const0 *Value
+ var cgoCheckPtrWrite, cgoCheckMemmove *obj.LSym
+ var wbZero, wbMove *obj.LSym
+ var stores, after []*Value
+ var sset, sset2 *sparseSet
+ var storeNumber []int32
+
+ // Compute map from a value to the SelectN [1] value that uses it.
+ select1 := f.Cache.allocValueSlice(f.NumValues())
+ defer func() { f.Cache.freeValueSlice(select1) }()
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpSelectN {
+ continue
+ }
+ if v.AuxInt != 1 {
+ continue
+ }
+ select1[v.Args[0].ID] = v
+ }
+ }
+
+ zeroes := f.computeZeroMap(select1)
+ for _, b := range f.Blocks { // range loop is safe since the blocks we added contain no stores to expand
+ // first, identify all the stores that need to insert a write barrier.
+ // mark them with WB ops temporarily. record presence of WB ops.
+ nWBops := 0 // count of temporarily created WB ops remaining to be rewritten in the current block
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpStore, OpMove, OpZero:
+ if needwb(v, zeroes) {
+ switch v.Op {
+ case OpStore:
+ v.Op = OpStoreWB
+ case OpMove:
+ v.Op = OpMoveWB
+ case OpZero:
+ v.Op = OpZeroWB
+ }
+ nWBops++
+ }
+ }
+ }
+ if nWBops == 0 {
+ continue
+ }
+
+ if wbaddr == nil {
+ // lazily initialize global values for write barrier test and calls
+ // find SB and SP values in entry block
+ initpos := f.Entry.Pos
+ sp, sb = f.spSb()
+ wbsym := f.fe.Syslook("writeBarrier")
+ wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.Config.Types.UInt32Ptr, wbsym, sb)
+ wbZero = f.fe.Syslook("wbZero")
+ wbMove = f.fe.Syslook("wbMove")
+ if buildcfg.Experiment.CgoCheck2 {
+ cgoCheckPtrWrite = f.fe.Syslook("cgoCheckPtrWrite")
+ cgoCheckMemmove = f.fe.Syslook("cgoCheckMemmove")
+ }
+ const0 = f.ConstInt32(f.Config.Types.UInt32, 0)
+
+ // allocate auxiliary data structures for computing store order
+ sset = f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(sset)
+ sset2 = f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(sset2)
+ storeNumber = f.Cache.allocInt32Slice(f.NumValues())
+ defer f.Cache.freeInt32Slice(storeNumber)
+ }
+
+ // order values in store order
+ b.Values = storeOrder(b.Values, sset, storeNumber)
+ again:
+ // find the start and end of the last contiguous WB store sequence.
+ // a branch will be inserted there. values after it will be moved
+ // to a new block.
+ var last *Value
+ var start, end int
+ var nonPtrStores int
+ values := b.Values
+ FindSeq:
+ for i := len(values) - 1; i >= 0; i-- {
+ w := values[i]
+ switch w.Op {
+ case OpStoreWB, OpMoveWB, OpZeroWB:
+ start = i
+ if last == nil {
+ last = w
+ end = i + 1
+ }
+ nonPtrStores = 0
+ case OpVarDef, OpVarLive:
+ continue
+ case OpStore:
+ if last == nil {
+ continue
+ }
+ nonPtrStores++
+ if nonPtrStores > 2 {
+ break FindSeq
+ }
+ default:
+ if last == nil {
+ continue
+ }
+ break FindSeq
+ }
+ }
+ stores = append(stores[:0], b.Values[start:end]...) // copy to avoid aliasing
+ after = append(after[:0], b.Values[end:]...)
+ b.Values = b.Values[:start]
+
+ // find the memory before the WB stores
+ mem := stores[0].MemoryArg()
+ pos := stores[0].Pos
+
+ // If the source of a MoveWB is volatile (will be clobbered by a
+ // function call), we need to copy it to a temporary location, as
+ // marshaling the args of wbMove might clobber the value we're
+ // trying to move.
+ // Look for volatile source, copy it to temporary before we check
+ // the write barrier flag.
+ // It is unlikely to have more than one of them. Just do a linear
+ // search instead of using a map.
+ // See issue 15854.
+ type volatileCopy struct {
+ src *Value // address of original volatile value
+ tmp *Value // address of temporary we've copied the volatile value into
+ }
+ var volatiles []volatileCopy
+
+ if !(f.ABIDefault == f.ABI1 && len(f.Config.intParamRegs) >= 3) {
+ // We don't need to do this if the calls we're going to do take
+ // all their arguments in registers.
+ // 3 is the magic number because it covers wbZero, wbMove, cgoCheckMemmove.
+ copyLoop:
+ for _, w := range stores {
+ if w.Op == OpMoveWB {
+ val := w.Args[1]
+ if isVolatile(val) {
+ for _, c := range volatiles {
+ if val == c.src {
+ continue copyLoop // already copied
+ }
+ }
+
+ t := val.Type.Elem()
+ tmp := f.NewLocal(w.Pos, t)
+ mem = b.NewValue1A(w.Pos, OpVarDef, types.TypeMem, tmp, mem)
+ tmpaddr := b.NewValue2A(w.Pos, OpLocalAddr, t.PtrTo(), tmp, sp, mem)
+ siz := t.Size()
+ mem = b.NewValue3I(w.Pos, OpMove, types.TypeMem, siz, tmpaddr, val, mem)
+ mem.Aux = t
+ volatiles = append(volatiles, volatileCopy{val, tmpaddr})
+ }
+ }
+ }
+ }
+
+ // Build branch point.
+ bThen := f.NewBlock(BlockPlain)
+ bEnd := f.NewBlock(b.Kind)
+ bThen.Pos = pos
+ bEnd.Pos = b.Pos
+ b.Pos = pos
+
+ // Set up control flow for end block.
+ bEnd.CopyControls(b)
+ bEnd.Likely = b.Likely
+ for _, e := range b.Succs {
+ bEnd.Succs = append(bEnd.Succs, e)
+ e.b.Preds[e.i].b = bEnd
+ }
+
+ // set up control flow for write barrier test
+ // load word, test word, avoiding partial register write from load byte.
+ cfgtypes := &f.Config.Types
+ flag := b.NewValue2(pos, OpLoad, cfgtypes.UInt32, wbaddr, mem)
+ flag = b.NewValue2(pos, OpNeq32, cfgtypes.Bool, flag, const0)
+ b.Kind = BlockIf
+ b.SetControl(flag)
+ b.Likely = BranchUnlikely
+ b.Succs = b.Succs[:0]
+ b.AddEdgeTo(bThen)
+ b.AddEdgeTo(bEnd)
+ bThen.AddEdgeTo(bEnd)
+
+ // For each write barrier store, append write barrier code to bThen.
+ memThen := mem
+ var curCall *Value
+ var curPtr *Value
+ addEntry := func(pos src.XPos, v *Value) {
+ if curCall == nil || curCall.AuxInt == maxEntries {
+ t := types.NewTuple(types.Types[types.TUINTPTR].PtrTo(), types.TypeMem)
+ curCall = bThen.NewValue1(pos, OpWB, t, memThen)
+ curPtr = bThen.NewValue1(pos, OpSelect0, types.Types[types.TUINTPTR].PtrTo(), curCall)
+ memThen = bThen.NewValue1(pos, OpSelect1, types.TypeMem, curCall)
+ }
+ // Store value in write buffer
+ num := curCall.AuxInt
+ curCall.AuxInt = num + 1
+ wbuf := bThen.NewValue1I(pos, OpOffPtr, types.Types[types.TUINTPTR].PtrTo(), num*f.Config.PtrSize, curPtr)
+ memThen = bThen.NewValue3A(pos, OpStore, types.TypeMem, types.Types[types.TUINTPTR], wbuf, v, memThen)
+ }
+
+ // Note: we can issue the write barrier code in any order. In particular,
+ // it doesn't matter if they are in a different order *even if* they end
+ // up referring to overlapping memory regions. For instance if an OpStore
+ // stores to a location that is later read by an OpMove. In all cases
+ // any pointers we must get into the write barrier buffer still make it,
+ // possibly in a different order and possibly a different (but definitely
+ // more than 0) number of times.
+ // In light of that, we process all the OpStoreWBs first. This minimizes
+ // the amount of spill/restore code we need around the Zero/Move calls.
+
+ // srcs contains the value IDs of pointer values we've put in the write barrier buffer.
+ srcs := sset
+ srcs.clear()
+ // dsts contains the value IDs of locations which we've read a pointer out of
+ // and put the result in the write barrier buffer.
+ dsts := sset2
+ dsts.clear()
+
+ for _, w := range stores {
+ if w.Op != OpStoreWB {
+ continue
+ }
+ pos := w.Pos
+ ptr := w.Args[0]
+ val := w.Args[1]
+ if !srcs.contains(val.ID) && needWBsrc(val) {
+ srcs.add(val.ID)
+ addEntry(pos, val)
+ }
+ if !dsts.contains(ptr.ID) && needWBdst(ptr, w.Args[2], zeroes) {
+ dsts.add(ptr.ID)
+ // Load old value from store target.
+ // Note: This turns bad pointer writes into bad
+ // pointer reads, which could be confusing. We could avoid
+ // reading from obviously bad pointers, which would
+ // take care of the vast majority of these. We could
+ // patch this up in the signal handler, or use XCHG to
+ // combine the read and the write.
+ oldVal := bThen.NewValue2(pos, OpLoad, types.Types[types.TUINTPTR], ptr, memThen)
+ // Save old value to write buffer.
+ addEntry(pos, oldVal)
+ }
+ f.fe.Func().SetWBPos(pos)
+ nWBops--
+ }
+
+ for _, w := range stores {
+ pos := w.Pos
+ switch w.Op {
+ case OpZeroWB:
+ dst := w.Args[0]
+ typ := reflectdata.TypeLinksym(w.Aux.(*types.Type))
+ // zeroWB(&typ, dst)
+ taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
+ memThen = wbcall(pos, bThen, wbZero, sp, memThen, taddr, dst)
+ f.fe.Func().SetWBPos(pos)
+ nWBops--
+ case OpMoveWB:
+ dst := w.Args[0]
+ src := w.Args[1]
+ if isVolatile(src) {
+ for _, c := range volatiles {
+ if src == c.src {
+ src = c.tmp
+ break
+ }
+ }
+ }
+ typ := reflectdata.TypeLinksym(w.Aux.(*types.Type))
+ // moveWB(&typ, dst, src)
+ taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
+ memThen = wbcall(pos, bThen, wbMove, sp, memThen, taddr, dst, src)
+ f.fe.Func().SetWBPos(pos)
+ nWBops--
+ }
+ }
+
+ // merge memory
+ mem = bEnd.NewValue2(pos, OpPhi, types.TypeMem, mem, memThen)
+
+ // Do raw stores after merge point.
+ for _, w := range stores {
+ pos := w.Pos
+ switch w.Op {
+ case OpStoreWB:
+ ptr := w.Args[0]
+ val := w.Args[1]
+ if buildcfg.Experiment.CgoCheck2 {
+ // Issue cgo checking code.
+ mem = wbcall(pos, bEnd, cgoCheckPtrWrite, sp, mem, ptr, val)
+ }
+ mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, mem)
+ case OpZeroWB:
+ dst := w.Args[0]
+ mem = bEnd.NewValue2I(pos, OpZero, types.TypeMem, w.AuxInt, dst, mem)
+ mem.Aux = w.Aux
+ case OpMoveWB:
+ dst := w.Args[0]
+ src := w.Args[1]
+ if isVolatile(src) {
+ for _, c := range volatiles {
+ if src == c.src {
+ src = c.tmp
+ break
+ }
+ }
+ }
+ if buildcfg.Experiment.CgoCheck2 {
+ // Issue cgo checking code.
+ typ := reflectdata.TypeLinksym(w.Aux.(*types.Type))
+ taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
+ mem = wbcall(pos, bEnd, cgoCheckMemmove, sp, mem, taddr, dst, src)
+ }
+ mem = bEnd.NewValue3I(pos, OpMove, types.TypeMem, w.AuxInt, dst, src, mem)
+ mem.Aux = w.Aux
+ case OpVarDef, OpVarLive:
+ mem = bEnd.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, mem)
+ case OpStore:
+ ptr := w.Args[0]
+ val := w.Args[1]
+ mem = bEnd.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, mem)
+ }
+ }
+
+ // The last store becomes the WBend marker. This marker is used by the liveness
+ // pass to determine what parts of the code are preemption-unsafe.
+ // All subsequent memory operations use this memory, so we have to sacrifice the
+ // previous last memory op to become this new value.
+ bEnd.Values = append(bEnd.Values, last)
+ last.Block = bEnd
+ last.reset(OpWBend)
+ last.Pos = last.Pos.WithNotStmt()
+ last.Type = types.TypeMem
+ last.AddArg(mem)
+
+ // Free all the old stores, except last which became the WBend marker.
+ for _, w := range stores {
+ if w != last {
+ w.resetArgs()
+ }
+ }
+ for _, w := range stores {
+ if w != last {
+ f.freeValue(w)
+ }
+ }
+
+ // put values after the store sequence into the end block
+ bEnd.Values = append(bEnd.Values, after...)
+ for _, w := range after {
+ w.Block = bEnd
+ }
+
+ // if we have more stores in this block, do this block again
+ if nWBops > 0 {
+ goto again
+ }
+ }
+}
+
+// computeZeroMap returns a map from an ID of a memory value to
+// a set of locations that are known to be zeroed at that memory value.
+func (f *Func) computeZeroMap(select1 []*Value) map[ID]ZeroRegion {
+
+ ptrSize := f.Config.PtrSize
+ // Keep track of which parts of memory are known to be zero.
+ // This helps with removing write barriers for various initialization patterns.
+ // This analysis is conservative. We only keep track, for each memory state, of
+ // which of the first 64 words of a single object are known to be zero.
+ zeroes := map[ID]ZeroRegion{}
+ // Find new objects.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if mem, ok := IsNewObject(v, select1); ok {
+ // While compiling package runtime itself, we might see user
+ // calls to newobject, which will have result type
+ // unsafe.Pointer instead. We can't easily infer how large the
+ // allocated memory is, so just skip it.
+ if types.LocalPkg.Path == "runtime" && v.Type.IsUnsafePtr() {
+ continue
+ }
+
+ nptr := v.Type.Elem().Size() / ptrSize
+ if nptr > 64 {
+ nptr = 64
+ }
+ zeroes[mem.ID] = ZeroRegion{base: v, mask: 1<<uint(nptr) - 1}
+ }
+ }
+ }
+ // Find stores to those new objects.
+ for {
+ changed := false
+ for _, b := range f.Blocks {
+ // Note: iterating forwards helps convergence, as values are
+ // typically (but not always!) in store order.
+ for _, v := range b.Values {
+ if v.Op != OpStore {
+ continue
+ }
+ z, ok := zeroes[v.MemoryArg().ID]
+ if !ok {
+ continue
+ }
+ ptr := v.Args[0]
+ var off int64
+ size := v.Aux.(*types.Type).Size()
+ for ptr.Op == OpOffPtr {
+ off += ptr.AuxInt
+ ptr = ptr.Args[0]
+ }
+ if ptr != z.base {
+ // Different base object - we don't know anything.
+ // We could even be writing to the base object we know
+ // about, but through an aliased but offset pointer.
+ // So we have to throw all the zero information we have away.
+ continue
+ }
+ // Round to cover any partially written pointer slots.
+ // Pointer writes should never be unaligned like this, but non-pointer
+ // writes to pointer-containing types will do this.
+ if d := off % ptrSize; d != 0 {
+ off -= d
+ size += d
+ }
+ if d := size % ptrSize; d != 0 {
+ size += ptrSize - d
+ }
+ // Clip to the 64 words that we track.
+ min := off
+ max := off + size
+ if min < 0 {
+ min = 0
+ }
+ if max > 64*ptrSize {
+ max = 64 * ptrSize
+ }
+ // Clear bits for parts that we are writing (and hence
+ // will no longer necessarily be zero).
+ for i := min; i < max; i += ptrSize {
+ bit := i / ptrSize
+ z.mask &^= 1 << uint(bit)
+ }
+ if z.mask == 0 {
+ // No more known zeros - don't bother keeping.
+ continue
+ }
+ // Save updated known zero contents for new store.
+ if zeroes[v.ID] != z {
+ zeroes[v.ID] = z
+ changed = true
+ }
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+ if f.pass.debug > 0 {
+ fmt.Printf("func %s\n", f.Name)
+ for mem, z := range zeroes {
+ fmt.Printf(" memory=v%d ptr=%v zeromask=%b\n", mem, z.base, z.mask)
+ }
+ }
+ return zeroes
+}
+
+// wbcall emits write barrier runtime call in b, returns memory.
+func wbcall(pos src.XPos, b *Block, fn *obj.LSym, sp, mem *Value, args ...*Value) *Value {
+ config := b.Func.Config
+ typ := config.Types.Uintptr // type of all argument values
+ nargs := len(args)
+
+ // TODO (register args) this is a bit of a hack.
+ inRegs := b.Func.ABIDefault == b.Func.ABI1 && len(config.intParamRegs) >= 3
+
+ if !inRegs {
+ // Store arguments to the appropriate stack slot.
+ off := config.ctxt.Arch.FixedFrameSize
+ for _, arg := range args {
+ stkaddr := b.NewValue1I(pos, OpOffPtr, typ.PtrTo(), off, sp)
+ mem = b.NewValue3A(pos, OpStore, types.TypeMem, typ, stkaddr, arg, mem)
+ off += typ.Size()
+ }
+ args = args[:0]
+ }
+
+ args = append(args, mem)
+
+ // issue call
+ argTypes := make([]*types.Type, nargs, 3) // at most 3 args; allows stack allocation
+ for i := 0; i < nargs; i++ {
+ argTypes[i] = typ
+ }
+ call := b.NewValue0A(pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(fn, b.Func.ABIDefault.ABIAnalyzeTypes(argTypes, nil)))
+ call.AddArgs(args...)
+ call.AuxInt = int64(nargs) * typ.Size()
+ return b.NewValue1I(pos, OpSelectN, types.TypeMem, 0, call)
+}
+
+// round to a multiple of r, r is a power of 2.
+func round(o int64, r int64) int64 {
+ return (o + r - 1) &^ (r - 1)
+}
+
+// IsStackAddr reports whether v is known to be an address of a stack slot.
+func IsStackAddr(v *Value) bool {
+ for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
+ v = v.Args[0]
+ }
+ switch v.Op {
+ case OpSP, OpLocalAddr, OpSelectNAddr, OpGetCallerSP:
+ return true
+ }
+ return false
+}
+
+// IsGlobalAddr reports whether v is known to be an address of a global (or nil).
+func IsGlobalAddr(v *Value) bool {
+ for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
+ v = v.Args[0]
+ }
+ if v.Op == OpAddr && v.Args[0].Op == OpSB {
+ return true // address of a global
+ }
+ if v.Op == OpConstNil {
+ return true
+ }
+ if v.Op == OpLoad && IsReadOnlyGlobalAddr(v.Args[0]) {
+ return true // loading from a read-only global - the resulting address can't be a heap address.
+ }
+ return false
+}
+
+// IsReadOnlyGlobalAddr reports whether v is known to be an address of a read-only global.
+func IsReadOnlyGlobalAddr(v *Value) bool {
+ if v.Op == OpConstNil {
+ // Nil pointers are read only. See issue 33438.
+ return true
+ }
+ if v.Op == OpAddr && v.Aux != nil && v.Aux.(*obj.LSym).Type == objabi.SRODATA {
+ return true
+ }
+ return false
+}
+
+// IsNewObject reports whether v is a pointer to a freshly allocated & zeroed object,
+// if so, also returns the memory state mem at which v is zero.
+func IsNewObject(v *Value, select1 []*Value) (mem *Value, ok bool) {
+ f := v.Block.Func
+ c := f.Config
+ if f.ABIDefault == f.ABI1 && len(c.intParamRegs) >= 1 {
+ if v.Op != OpSelectN || v.AuxInt != 0 {
+ return nil, false
+ }
+ mem = select1[v.Args[0].ID]
+ if mem == nil {
+ return nil, false
+ }
+ } else {
+ if v.Op != OpLoad {
+ return nil, false
+ }
+ mem = v.MemoryArg()
+ if mem.Op != OpSelectN {
+ return nil, false
+ }
+ if mem.Type != types.TypeMem {
+ return nil, false
+ } // assume it is the right selection if true
+ }
+ call := mem.Args[0]
+ if call.Op != OpStaticCall {
+ return nil, false
+ }
+ if !isSameCall(call.Aux, "runtime.newobject") {
+ return nil, false
+ }
+ if f.ABIDefault == f.ABI1 && len(c.intParamRegs) >= 1 {
+ if v.Args[0] == call {
+ return mem, true
+ }
+ return nil, false
+ }
+ if v.Args[0].Op != OpOffPtr {
+ return nil, false
+ }
+ if v.Args[0].Args[0].Op != OpSP {
+ return nil, false
+ }
+ if v.Args[0].AuxInt != c.ctxt.Arch.FixedFrameSize+c.RegSize { // offset of return value
+ return nil, false
+ }
+ return mem, true
+}
+
+// IsSanitizerSafeAddr reports whether v is known to be an address
+// that doesn't need instrumentation.
+func IsSanitizerSafeAddr(v *Value) bool {
+ for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
+ v = v.Args[0]
+ }
+ switch v.Op {
+ case OpSP, OpLocalAddr, OpSelectNAddr:
+ // Stack addresses are always safe.
+ return true
+ case OpITab, OpStringPtr, OpGetClosurePtr:
+ // Itabs, string data, and closure fields are
+ // read-only once initialized.
+ return true
+ case OpAddr:
+ vt := v.Aux.(*obj.LSym).Type
+ return vt == objabi.SRODATA || vt == objabi.SLIBFUZZER_8BIT_COUNTER || vt == objabi.SCOVERAGE_COUNTER || vt == objabi.SCOVERAGE_AUXVAR
+ }
+ return false
+}
+
+// isVolatile reports whether v is a pointer to argument region on stack which
+// will be clobbered by a function call.
+func isVolatile(v *Value) bool {
+ for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy || v.Op == OpSelectNAddr {
+ v = v.Args[0]
+ }
+ return v.Op == OpSP
+}
diff --git a/src/cmd/compile/internal/ssa/writebarrier_test.go b/src/cmd/compile/internal/ssa/writebarrier_test.go
new file mode 100644
index 0000000..0b11afc
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/writebarrier_test.go
@@ -0,0 +1,56 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestWriteBarrierStoreOrder(t *testing.T) {
+ // Make sure writebarrier phase works even StoreWB ops are not in dependency order
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("v", OpConstNil, ptrType, 0, nil),
+ Valu("addr1", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("wb2", OpStore, types.TypeMem, 0, ptrType, "addr1", "v", "wb1"),
+ Valu("wb1", OpStore, types.TypeMem, 0, ptrType, "addr1", "v", "start"), // wb1 and wb2 are out of order
+ Goto("exit")),
+ Bloc("exit",
+ Exit("wb2")))
+
+ CheckFunc(fun.f)
+ writebarrier(fun.f)
+ CheckFunc(fun.f)
+}
+
+func TestWriteBarrierPhi(t *testing.T) {
+ // Make sure writebarrier phase works for single-block loop, where
+ // a Phi op takes the store in the same block as argument.
+ // See issue #19067.
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
+ Goto("loop")),
+ Bloc("loop",
+ Valu("phi", OpPhi, types.TypeMem, 0, nil, "start", "wb"),
+ Valu("v", OpConstNil, ptrType, 0, nil),
+ Valu("addr", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("wb", OpStore, types.TypeMem, 0, ptrType, "addr", "v", "phi"), // has write barrier
+ Goto("loop")))
+
+ CheckFunc(fun.f)
+ writebarrier(fun.f)
+ CheckFunc(fun.f)
+}
diff --git a/src/cmd/compile/internal/ssa/xposmap.go b/src/cmd/compile/internal/ssa/xposmap.go
new file mode 100644
index 0000000..93582e1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/xposmap.go
@@ -0,0 +1,116 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+ "fmt"
+)
+
+type lineRange struct {
+ first, last uint32
+}
+
+// An xposmap is a map from fileindex and line of src.XPos to int32,
+// implemented sparsely to save space (column and statement status are ignored).
+// The sparse skeleton is constructed once, and then reused by ssa phases
+// that (re)move values with statements attached.
+type xposmap struct {
+ // A map from file index to maps from line range to integers (block numbers)
+ maps map[int32]*biasedSparseMap
+ // The next two fields provide a single-item cache for common case of repeated lines from same file.
+ lastIndex int32 // -1 means no entry in cache
+ lastMap *biasedSparseMap // map found at maps[lastIndex]
+}
+
+// newXposmap constructs an xposmap valid for inputs which have a file index in the keys of x,
+// and line numbers in the range x[file index].
+// The resulting xposmap will panic if a caller attempts to set or add an XPos not in that range.
+func newXposmap(x map[int]lineRange) *xposmap {
+ maps := make(map[int32]*biasedSparseMap)
+ for i, p := range x {
+ maps[int32(i)] = newBiasedSparseMap(int(p.first), int(p.last))
+ }
+ return &xposmap{maps: maps, lastIndex: -1} // zero for the rest is okay
+}
+
+// clear removes data from the map but leaves the sparse skeleton.
+func (m *xposmap) clear() {
+ for _, l := range m.maps {
+ if l != nil {
+ l.clear()
+ }
+ }
+ m.lastIndex = -1
+ m.lastMap = nil
+}
+
+// mapFor returns the line range map for a given file index.
+func (m *xposmap) mapFor(index int32) *biasedSparseMap {
+ if index == m.lastIndex {
+ return m.lastMap
+ }
+ mf := m.maps[index]
+ m.lastIndex = index
+ m.lastMap = mf
+ return mf
+}
+
+// set inserts p->v into the map.
+// If p does not fall within the set of fileindex->lineRange used to construct m, this will panic.
+func (m *xposmap) set(p src.XPos, v int32) {
+ s := m.mapFor(p.FileIndex())
+ if s == nil {
+ panic(fmt.Sprintf("xposmap.set(%d), file index not found in map\n", p.FileIndex()))
+ }
+ s.set(p.Line(), v)
+}
+
+// get returns the int32 associated with the file index and line of p.
+func (m *xposmap) get(p src.XPos) int32 {
+ s := m.mapFor(p.FileIndex())
+ if s == nil {
+ return -1
+ }
+ return s.get(p.Line())
+}
+
+// add adds p to m, treating m as a set instead of as a map.
+// If p does not fall within the set of fileindex->lineRange used to construct m, this will panic.
+// Use clear() in between set/map interpretations of m.
+func (m *xposmap) add(p src.XPos) {
+ m.set(p, 0)
+}
+
+// contains returns whether the file index and line of p are in m,
+// treating m as a set instead of as a map.
+func (m *xposmap) contains(p src.XPos) bool {
+ s := m.mapFor(p.FileIndex())
+ if s == nil {
+ return false
+ }
+ return s.contains(p.Line())
+}
+
+// remove removes the file index and line for p from m,
+// whether m is currently treated as a map or set.
+func (m *xposmap) remove(p src.XPos) {
+ s := m.mapFor(p.FileIndex())
+ if s == nil {
+ return
+ }
+ s.remove(p.Line())
+}
+
+// foreachEntry applies f to each (fileindex, line, value) triple in m.
+func (m *xposmap) foreachEntry(f func(j int32, l uint, v int32)) {
+ for j, mm := range m.maps {
+ s := mm.size()
+ for i := 0; i < s; i++ {
+ l, v := mm.getEntry(i)
+ f(j, l, v)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/zcse.go b/src/cmd/compile/internal/ssa/zcse.go
new file mode 100644
index 0000000..e08272c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/zcse.go
@@ -0,0 +1,79 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+// zcse does an initial pass of common-subexpression elimination on the
+// function for values with zero arguments to allow the more expensive cse
+// to begin with a reduced number of values. Values are just relinked,
+// nothing is deleted. A subsequent deadcode pass is required to actually
+// remove duplicate expressions.
+func zcse(f *Func) {
+ vals := make(map[vkey]*Value)
+
+ for _, b := range f.Blocks {
+ for i := 0; i < len(b.Values); i++ {
+ v := b.Values[i]
+ if opcodeTable[v.Op].argLen == 0 {
+ key := vkey{v.Op, keyFor(v), v.Aux, v.Type}
+ if vals[key] == nil {
+ vals[key] = v
+ if b != f.Entry {
+ // Move v to the entry block so it will dominate every block
+ // where we might use it. This prevents the need for any dominator
+ // calculations in this pass.
+ v.Block = f.Entry
+ f.Entry.Values = append(f.Entry.Values, v)
+ last := len(b.Values) - 1
+ b.Values[i] = b.Values[last]
+ b.Values[last] = nil
+ b.Values = b.Values[:last]
+
+ i-- // process b.Values[i] again
+ }
+ }
+ }
+ }
+ }
+
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, a := range v.Args {
+ if opcodeTable[a.Op].argLen == 0 {
+ key := vkey{a.Op, keyFor(a), a.Aux, a.Type}
+ if rv, ok := vals[key]; ok {
+ v.SetArg(i, rv)
+ }
+ }
+ }
+ }
+ }
+}
+
+// vkey is a type used to uniquely identify a zero arg value.
+type vkey struct {
+ op Op
+ ai int64 // aux int
+ ax Aux // aux
+ t *types.Type // type
+}
+
+// keyFor returns the AuxInt portion of a key structure uniquely identifying a
+// zero arg value for the supported ops.
+func keyFor(v *Value) int64 {
+ switch v.Op {
+ case OpConst64, OpConst64F, OpConst32F:
+ return v.AuxInt
+ case OpConst32:
+ return int64(int32(v.AuxInt))
+ case OpConst16:
+ return int64(int16(v.AuxInt))
+ case OpConst8, OpConstBool:
+ return int64(int8(v.AuxInt))
+ default:
+ return v.AuxInt
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/zeroextension_test.go b/src/cmd/compile/internal/ssa/zeroextension_test.go
new file mode 100644
index 0000000..2e31621
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/zeroextension_test.go
@@ -0,0 +1,34 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "testing"
+
+type extTest struct {
+ f func(uint64, uint64) uint64
+ arg1 uint64
+ arg2 uint64
+ res uint64
+ name string
+}
+
+var extTests = [...]extTest{
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 / op2)) }, arg1: 0x1, arg2: 0xfffffffeffffffff, res: 0xffffffff, name: "div"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 * op2)) }, arg1: 0x1, arg2: 0x100000001, res: 0x1, name: "mul"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 + op2)) }, arg1: 0x1, arg2: 0xeeeeeeeeffffffff, res: 0x0, name: "add"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 - op2)) }, arg1: 0x1, arg2: 0xeeeeeeeeffffffff, res: 0x2, name: "sub"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 | op2)) }, arg1: 0x100000000000001, arg2: 0xfffffffffffffff, res: 0xffffffff, name: "or"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 ^ op2)) }, arg1: 0x100000000000001, arg2: 0xfffffffffffffff, res: 0xfffffffe, name: "xor"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 & op2)) }, arg1: 0x100000000000001, arg2: 0x100000000000001, res: 0x1, name: "and"},
+}
+
+func TestZeroExtension(t *testing.T) {
+ for _, x := range extTests {
+ r := x.f(x.arg1, x.arg2)
+ if x.res != r {
+ t.Errorf("%s: got %d want %d", x.name, r, x.res)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go
new file mode 100644
index 0000000..56af9ce
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/abi.go
@@ -0,0 +1,440 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+ "fmt"
+ "internal/buildcfg"
+ "log"
+ "os"
+ "strings"
+
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/wasm"
+)
+
+// SymABIs records information provided by the assembler about symbol
+// definition ABIs and reference ABIs.
+type SymABIs struct {
+ defs map[string]obj.ABI
+ refs map[string]obj.ABISet
+}
+
+func NewSymABIs() *SymABIs {
+ return &SymABIs{
+ defs: make(map[string]obj.ABI),
+ refs: make(map[string]obj.ABISet),
+ }
+}
+
+// canonicalize returns the canonical name used for a linker symbol in
+// s's maps. Symbols in this package may be written either as "".X or
+// with the package's import path already in the symbol. This rewrites
+// both to use the full path, which matches compiler-generated linker
+// symbol names.
+func (s *SymABIs) canonicalize(linksym string) string {
+ if strings.HasPrefix(linksym, `"".`) {
+ panic("non-canonical symbol name: " + linksym)
+ }
+ return linksym
+}
+
+// ReadSymABIs reads a symabis file that specifies definitions and
+// references of text symbols by ABI.
+//
+// The symabis format is a set of lines, where each line is a sequence
+// of whitespace-separated fields. The first field is a verb and is
+// either "def" for defining a symbol ABI or "ref" for referencing a
+// symbol using an ABI. For both "def" and "ref", the second field is
+// the symbol name and the third field is the ABI name, as one of the
+// named cmd/internal/obj.ABI constants.
+func (s *SymABIs) ReadSymABIs(file string) {
+ data, err := os.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-symabis: %v", err)
+ }
+
+ for lineNum, line := range strings.Split(string(data), "\n") {
+ lineNum++ // 1-based
+ line = strings.TrimSpace(line)
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+
+ parts := strings.Fields(line)
+ switch parts[0] {
+ case "def", "ref":
+ // Parse line.
+ if len(parts) != 3 {
+ log.Fatalf(`%s:%d: invalid symabi: syntax is "%s sym abi"`, file, lineNum, parts[0])
+ }
+ sym, abistr := parts[1], parts[2]
+ abi, valid := obj.ParseABI(abistr)
+ if !valid {
+ log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abistr)
+ }
+
+ sym = s.canonicalize(sym)
+
+ // Record for later.
+ if parts[0] == "def" {
+ s.defs[sym] = abi
+ } else {
+ s.refs[sym] |= obj.ABISetOf(abi)
+ }
+ default:
+ log.Fatalf(`%s:%d: invalid symabi type "%s"`, file, lineNum, parts[0])
+ }
+ }
+}
+
+// GenABIWrappers applies ABI information to Funcs and generates ABI
+// wrapper functions where necessary.
+func (s *SymABIs) GenABIWrappers() {
+ // For cgo exported symbols, we tell the linker to export the
+ // definition ABI to C. That also means that we don't want to
+ // create ABI wrappers even if there's a linkname.
+ //
+ // TODO(austin): Maybe we want to create the ABI wrappers, but
+ // ensure the linker exports the right ABI definition under
+ // the unmangled name?
+ cgoExports := make(map[string][]*[]string)
+ for i, prag := range typecheck.Target.CgoPragmas {
+ switch prag[0] {
+ case "cgo_export_static", "cgo_export_dynamic":
+ symName := s.canonicalize(prag[1])
+ pprag := &typecheck.Target.CgoPragmas[i]
+ cgoExports[symName] = append(cgoExports[symName], pprag)
+ }
+ }
+
+ // Apply ABI defs and refs to Funcs and generate wrappers.
+ //
+ // This may generate new decls for the wrappers, but we
+ // specifically *don't* want to visit those, lest we create
+ // wrappers for wrappers.
+ for _, fn := range typecheck.Target.Funcs {
+ nam := fn.Nname
+ if ir.IsBlank(nam) {
+ continue
+ }
+ sym := nam.Sym()
+
+ symName := sym.Linkname
+ if symName == "" {
+ symName = sym.Pkg.Prefix + "." + sym.Name
+ }
+ symName = s.canonicalize(symName)
+
+ // Apply definitions.
+ defABI, hasDefABI := s.defs[symName]
+ if hasDefABI {
+ if len(fn.Body) != 0 {
+ base.ErrorfAt(fn.Pos(), 0, "%v defined in both Go and assembly", fn)
+ }
+ fn.ABI = defABI
+ }
+
+ if fn.Pragma&ir.CgoUnsafeArgs != 0 {
+ // CgoUnsafeArgs indicates the function (or its callee) uses
+ // offsets to dispatch arguments, which currently using ABI0
+ // frame layout. Pin it to ABI0.
+ fn.ABI = obj.ABI0
+ }
+
+ // If cgo-exported, add the definition ABI to the cgo
+ // pragmas.
+ cgoExport := cgoExports[symName]
+ for _, pprag := range cgoExport {
+ // The export pragmas have the form:
+ //
+ // cgo_export_* <local> [<remote>]
+ //
+ // If <remote> is omitted, it's the same as
+ // <local>.
+ //
+ // Expand to
+ //
+ // cgo_export_* <local> <remote> <ABI>
+ if len(*pprag) == 2 {
+ *pprag = append(*pprag, (*pprag)[1])
+ }
+ // Add the ABI argument.
+ *pprag = append(*pprag, fn.ABI.String())
+ }
+
+ // Apply references.
+ if abis, ok := s.refs[symName]; ok {
+ fn.ABIRefs |= abis
+ }
+ // Assume all functions are referenced at least as
+ // ABIInternal, since they may be referenced from
+ // other packages.
+ fn.ABIRefs.Set(obj.ABIInternal, true)
+
+ // If a symbol is defined in this package (either in
+ // Go or assembly) and given a linkname, it may be
+ // referenced from another package, so make it
+ // callable via any ABI. It's important that we know
+ // it's defined in this package since other packages
+ // may "pull" symbols using linkname and we don't want
+ // to create duplicate ABI wrappers.
+ //
+ // However, if it's given a linkname for exporting to
+ // C, then we don't make ABI wrappers because the cgo
+ // tool wants the original definition.
+ hasBody := len(fn.Body) != 0
+ if sym.Linkname != "" && (hasBody || hasDefABI) && len(cgoExport) == 0 {
+ fn.ABIRefs |= obj.ABISetCallable
+ }
+
+ // Double check that cgo-exported symbols don't get
+ // any wrappers.
+ if len(cgoExport) > 0 && fn.ABIRefs&^obj.ABISetOf(fn.ABI) != 0 {
+ base.Fatalf("cgo exported function %v cannot have ABI wrappers", fn)
+ }
+
+ if !buildcfg.Experiment.RegabiWrappers {
+ continue
+ }
+
+ forEachWrapperABI(fn, makeABIWrapper)
+ }
+}
+
+func forEachWrapperABI(fn *ir.Func, cb func(fn *ir.Func, wrapperABI obj.ABI)) {
+ need := fn.ABIRefs &^ obj.ABISetOf(fn.ABI)
+ if need == 0 {
+ return
+ }
+
+ for wrapperABI := obj.ABI(0); wrapperABI < obj.ABICount; wrapperABI++ {
+ if !need.Get(wrapperABI) {
+ continue
+ }
+ cb(fn, wrapperABI)
+ }
+}
+
+// makeABIWrapper creates a new function that will be called with
+// wrapperABI and calls "f" using f.ABI.
+func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
+ if base.Debug.ABIWrap != 0 {
+ fmt.Fprintf(os.Stderr, "=-= %v to %v wrapper for %v\n", wrapperABI, f.ABI, f)
+ }
+
+ // Q: is this needed?
+ savepos := base.Pos
+ savedcurfn := ir.CurFunc
+
+ pos := base.AutogeneratedPos
+ base.Pos = pos
+
+ // At the moment we don't support wrapping a method, we'd need machinery
+ // below to handle the receiver. Panic if we see this scenario.
+ ft := f.Nname.Type()
+ if ft.NumRecvs() != 0 {
+ base.ErrorfAt(f.Pos(), 0, "makeABIWrapper support for wrapping methods not implemented")
+ return
+ }
+
+ // Reuse f's types.Sym to create a new ODCLFUNC/function.
+ // TODO(mdempsky): Means we can't set sym.Def in Declfunc, ugh.
+ fn := ir.NewFunc(pos, pos, f.Sym(), types.NewSignature(nil,
+ typecheck.NewFuncParams(ft.Params()),
+ typecheck.NewFuncParams(ft.Results())))
+ fn.ABI = wrapperABI
+ typecheck.DeclFunc(fn)
+
+ fn.SetABIWrapper(true)
+ fn.SetDupok(true)
+
+ // ABI0-to-ABIInternal wrappers will be mainly loading params from
+ // stack into registers (and/or storing stack locations back to
+ // registers after the wrapped call); in most cases they won't
+ // need to allocate stack space, so it should be OK to mark them
+ // as NOSPLIT in these cases. In addition, my assumption is that
+ // functions written in assembly are NOSPLIT in most (but not all)
+ // cases. In the case of an ABIInternal target that has too many
+ // parameters to fit into registers, the wrapper would need to
+ // allocate stack space, but this seems like an unlikely scenario.
+ // Hence: mark these wrappers NOSPLIT.
+ //
+ // ABIInternal-to-ABI0 wrappers on the other hand will be taking
+ // things in registers and pushing them onto the stack prior to
+ // the ABI0 call, meaning that they will always need to allocate
+ // stack space. If the compiler marks them as NOSPLIT this seems
+ // as though it could lead to situations where the linker's
+ // nosplit-overflow analysis would trigger a link failure. On the
+ // other hand if they not tagged NOSPLIT then this could cause
+ // problems when building the runtime (since there may be calls to
+ // asm routine in cases where it's not safe to grow the stack). In
+ // most cases the wrapper would be (in effect) inlined, but are
+ // there (perhaps) indirect calls from the runtime that could run
+ // into trouble here.
+ // FIXME: at the moment all.bash does not pass when I leave out
+ // NOSPLIT for these wrappers, so all are currently tagged with NOSPLIT.
+ fn.Pragma |= ir.Nosplit
+
+ // Generate call. Use tail call if no params and no returns,
+ // but a regular call otherwise.
+ //
+ // Note: ideally we would be using a tail call in cases where
+ // there are params but no returns for ABI0->ABIInternal wrappers,
+ // provided that all params fit into registers (e.g. we don't have
+ // to allocate any stack space). Doing this will require some
+ // extra work in typecheck/walk/ssa, might want to add a new node
+ // OTAILCALL or something to this effect.
+ tailcall := fn.Type().NumResults() == 0 && fn.Type().NumParams() == 0 && fn.Type().NumRecvs() == 0
+ if base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink {
+ // cannot tailcall on PPC64 with dynamic linking, as we need
+ // to restore R2 after call.
+ tailcall = false
+ }
+ if base.Ctxt.Arch.Name == "amd64" && wrapperABI == obj.ABIInternal {
+ // cannot tailcall from ABIInternal to ABI0 on AMD64, as we need
+ // to special registers (X15) when returning to ABIInternal.
+ tailcall = false
+ }
+
+ var tail ir.Node
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil)
+ call.Args = ir.ParamNames(fn.Type())
+ call.IsDDD = fn.Type().IsVariadic()
+ tail = call
+ if tailcall {
+ tail = ir.NewTailCallStmt(base.Pos, call)
+ } else if fn.Type().NumResults() > 0 {
+ n := ir.NewReturnStmt(base.Pos, nil)
+ n.Results = []ir.Node{call}
+ tail = n
+ }
+ fn.Body.Append(tail)
+
+ typecheck.FinishFuncBody()
+
+ ir.CurFunc = fn
+ typecheck.Stmts(fn.Body)
+
+ // Restore previous context.
+ base.Pos = savepos
+ ir.CurFunc = savedcurfn
+}
+
+// CreateWasmImportWrapper creates a wrapper for imported WASM functions to
+// adapt them to the Go calling convention. The body for this function is
+// generated in cmd/internal/obj/wasm/wasmobj.go
+func CreateWasmImportWrapper(fn *ir.Func) bool {
+ if fn.WasmImport == nil {
+ return false
+ }
+ if buildcfg.GOARCH != "wasm" {
+ base.FatalfAt(fn.Pos(), "CreateWasmImportWrapper call not supported on %s: func was %v", buildcfg.GOARCH, fn)
+ }
+
+ ir.InitLSym(fn, true)
+
+ setupWasmABI(fn)
+
+ pp := objw.NewProgs(fn, 0)
+ defer pp.Free()
+ pp.Text.To.Type = obj.TYPE_TEXTSIZE
+ pp.Text.To.Val = int32(types.RoundUp(fn.Type().ArgWidth(), int64(types.RegSize)))
+ // Wrapper functions never need their own stack frame
+ pp.Text.To.Offset = 0
+ pp.Flush()
+
+ return true
+}
+
+func paramsToWasmFields(f *ir.Func, result *abi.ABIParamResultInfo, abiParams []abi.ABIParamAssignment) []obj.WasmField {
+ wfs := make([]obj.WasmField, len(abiParams))
+ for i, p := range abiParams {
+ t := p.Type
+ switch t.Kind() {
+ case types.TINT32, types.TUINT32:
+ wfs[i].Type = obj.WasmI32
+ case types.TINT64, types.TUINT64:
+ wfs[i].Type = obj.WasmI64
+ case types.TFLOAT32:
+ wfs[i].Type = obj.WasmF32
+ case types.TFLOAT64:
+ wfs[i].Type = obj.WasmF64
+ case types.TUNSAFEPTR:
+ wfs[i].Type = obj.WasmPtr
+ default:
+ base.ErrorfAt(f.Pos(), 0, "go:wasmimport %s %s: unsupported parameter type %s", f.WasmImport.Module, f.WasmImport.Name, t.String())
+ }
+ wfs[i].Offset = p.FrameOffset(result)
+ }
+ return wfs
+}
+
+func resultsToWasmFields(f *ir.Func, result *abi.ABIParamResultInfo, abiParams []abi.ABIParamAssignment) []obj.WasmField {
+ if len(abiParams) > 1 {
+ base.ErrorfAt(f.Pos(), 0, "go:wasmimport %s %s: too many return values", f.WasmImport.Module, f.WasmImport.Name)
+ return nil
+ }
+ wfs := make([]obj.WasmField, len(abiParams))
+ for i, p := range abiParams {
+ t := p.Type
+ switch t.Kind() {
+ case types.TINT32, types.TUINT32:
+ wfs[i].Type = obj.WasmI32
+ case types.TINT64, types.TUINT64:
+ wfs[i].Type = obj.WasmI64
+ case types.TFLOAT32:
+ wfs[i].Type = obj.WasmF32
+ case types.TFLOAT64:
+ wfs[i].Type = obj.WasmF64
+ default:
+ base.ErrorfAt(f.Pos(), 0, "go:wasmimport %s %s: unsupported result type %s", f.WasmImport.Module, f.WasmImport.Name, t.String())
+ }
+ wfs[i].Offset = p.FrameOffset(result)
+ }
+ return wfs
+}
+
+// setupTextLSym initializes the LSym for a with-body text symbol.
+func setupWasmABI(f *ir.Func) {
+ wi := obj.WasmImport{
+ Module: f.WasmImport.Module,
+ Name: f.WasmImport.Name,
+ }
+ if wi.Module == wasm.GojsModule {
+ // Functions that are imported from the "gojs" module use a special
+ // ABI that just accepts the stack pointer.
+ // Example:
+ //
+ // //go:wasmimport gojs add
+ // func importedAdd(a, b uint) uint
+ //
+ // will roughly become
+ //
+ // (import "gojs" "add" (func (param i32)))
+ wi.Params = []obj.WasmField{{Type: obj.WasmI32}}
+ } else {
+ // All other imported functions use the normal WASM ABI.
+ // Example:
+ //
+ // //go:wasmimport a_module add
+ // func importedAdd(a, b uint) uint
+ //
+ // will roughly become
+ //
+ // (import "a_module" "add" (func (param i32 i32) (result i32)))
+ abiConfig := AbiForBodylessFuncStackMap(f)
+ abiInfo := abiConfig.ABIAnalyzeFuncType(f.Type())
+ wi.Params = paramsToWasmFields(f, abiInfo, abiInfo.InParams())
+ wi.Results = resultsToWasmFields(f, abiInfo, abiInfo.OutParams())
+ }
+ f.LSym.Func().WasmImport = &wi
+}
diff --git a/src/cmd/compile/internal/ssagen/arch.go b/src/cmd/compile/internal/ssagen/arch.go
new file mode 100644
index 0000000..483e45c
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/arch.go
@@ -0,0 +1,51 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+var Arch ArchInfo
+
+// interface to back end
+
+type ArchInfo struct {
+ LinkArch *obj.LinkArch
+
+ REGSP int
+ MAXWIDTH int64
+ SoftFloat bool
+
+ PadFrame func(int64) int64
+
+ // ZeroRange zeroes a range of memory on stack. It is only inserted
+ // at function entry, and it is ok to clobber registers.
+ ZeroRange func(*objw.Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
+
+ Ginsnop func(*objw.Progs) *obj.Prog
+
+ // SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
+ SSAMarkMoves func(*State, *ssa.Block)
+
+ // SSAGenValue emits Prog(s) for the Value.
+ SSAGenValue func(*State, *ssa.Value)
+
+ // SSAGenBlock emits end-of-block Progs. SSAGenValue should be called
+ // for all values in the block before SSAGenBlock.
+ SSAGenBlock func(s *State, b, next *ssa.Block)
+
+ // LoadRegResult emits instructions that loads register-assigned result
+ // at n+off (n is PPARAMOUT) to register reg. The result is already in
+ // memory. Used in open-coded defer return path.
+ LoadRegResult func(s *State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog
+
+ // SpillArgReg emits instructions that spill reg to n+off.
+ SpillArgReg func(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog
+}
diff --git a/src/cmd/compile/internal/ssagen/nowb.go b/src/cmd/compile/internal/ssagen/nowb.go
new file mode 100644
index 0000000..b8756ee
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/nowb.go
@@ -0,0 +1,195 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+ "fmt"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+func EnableNoWriteBarrierRecCheck() {
+ nowritebarrierrecCheck = newNowritebarrierrecChecker()
+}
+
+func NoWriteBarrierRecCheck() {
+ // Write barriers are now known. Check the
+ // call graph.
+ nowritebarrierrecCheck.check()
+ nowritebarrierrecCheck = nil
+}
+
+var nowritebarrierrecCheck *nowritebarrierrecChecker
+
+type nowritebarrierrecChecker struct {
+ // extraCalls contains extra function calls that may not be
+ // visible during later analysis. It maps from the ODCLFUNC of
+ // the caller to a list of callees.
+ extraCalls map[*ir.Func][]nowritebarrierrecCall
+
+ // curfn is the current function during AST walks.
+ curfn *ir.Func
+}
+
+type nowritebarrierrecCall struct {
+ target *ir.Func // caller or callee
+ lineno src.XPos // line of call
+}
+
+// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It
+// must be called before walk.
+func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
+ c := &nowritebarrierrecChecker{
+ extraCalls: make(map[*ir.Func][]nowritebarrierrecCall),
+ }
+
+ // Find all systemstack calls and record their targets. In
+ // general, flow analysis can't see into systemstack, but it's
+ // important to handle it for this check, so we model it
+ // directly. This has to happen before transforming closures in walk since
+ // it's a lot harder to work out the argument after.
+ for _, n := range typecheck.Target.Funcs {
+ c.curfn = n
+ if c.curfn.ABIWrapper() {
+ // We only want "real" calls to these
+ // functions, not the generated ones within
+ // their own ABI wrappers.
+ continue
+ }
+ ir.Visit(n, c.findExtraCalls)
+ }
+ c.curfn = nil
+ return c
+}
+
+func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) {
+ if nn.Op() != ir.OCALLFUNC {
+ return
+ }
+ n := nn.(*ir.CallExpr)
+ if n.Fun == nil || n.Fun.Op() != ir.ONAME {
+ return
+ }
+ fn := n.Fun.(*ir.Name)
+ if fn.Class != ir.PFUNC || fn.Defn == nil {
+ return
+ }
+ if types.RuntimeSymName(fn.Sym()) != "systemstack" {
+ return
+ }
+
+ var callee *ir.Func
+ arg := n.Args[0]
+ switch arg.Op() {
+ case ir.ONAME:
+ arg := arg.(*ir.Name)
+ callee = arg.Defn.(*ir.Func)
+ case ir.OCLOSURE:
+ arg := arg.(*ir.ClosureExpr)
+ callee = arg.Func
+ default:
+ base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
+ }
+ c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos()})
+}
+
+// recordCall records a call from ODCLFUNC node "from", to function
+// symbol "to" at position pos.
+//
+// This should be done as late as possible during compilation to
+// capture precise call graphs. The target of the call is an LSym
+// because that's all we know after we start SSA.
+//
+// This can be called concurrently for different from Nodes.
+func (c *nowritebarrierrecChecker) recordCall(fn *ir.Func, to *obj.LSym, pos src.XPos) {
+ // We record this information on the *Func so this is concurrent-safe.
+ if fn.NWBRCalls == nil {
+ fn.NWBRCalls = new([]ir.SymAndPos)
+ }
+ *fn.NWBRCalls = append(*fn.NWBRCalls, ir.SymAndPos{Sym: to, Pos: pos})
+}
+
+func (c *nowritebarrierrecChecker) check() {
+ // We walk the call graph as late as possible so we can
+ // capture all calls created by lowering, but this means we
+ // only get to see the obj.LSyms of calls. symToFunc lets us
+ // get back to the ODCLFUNCs.
+ symToFunc := make(map[*obj.LSym]*ir.Func)
+ // funcs records the back-edges of the BFS call graph walk. It
+ // maps from the ODCLFUNC of each function that must not have
+ // write barriers to the call that inhibits them. Functions
+ // that are directly marked go:nowritebarrierrec are in this
+ // map with a zero-valued nowritebarrierrecCall. This also
+ // acts as the set of marks for the BFS of the call graph.
+ funcs := make(map[*ir.Func]nowritebarrierrecCall)
+ // q is the queue of ODCLFUNC Nodes to visit in BFS order.
+ var q ir.NameQueue
+
+ for _, fn := range typecheck.Target.Funcs {
+ symToFunc[fn.LSym] = fn
+
+ // Make nowritebarrierrec functions BFS roots.
+ if fn.Pragma&ir.Nowritebarrierrec != 0 {
+ funcs[fn] = nowritebarrierrecCall{}
+ q.PushRight(fn.Nname)
+ }
+ // Check go:nowritebarrier functions.
+ if fn.Pragma&ir.Nowritebarrier != 0 && fn.WBPos.IsKnown() {
+ base.ErrorfAt(fn.WBPos, 0, "write barrier prohibited")
+ }
+ }
+
+ // Perform a BFS of the call graph from all
+ // go:nowritebarrierrec functions.
+ enqueue := func(src, target *ir.Func, pos src.XPos) {
+ if target.Pragma&ir.Yeswritebarrierrec != 0 {
+ // Don't flow into this function.
+ return
+ }
+ if _, ok := funcs[target]; ok {
+ // Already found a path to target.
+ return
+ }
+
+ // Record the path.
+ funcs[target] = nowritebarrierrecCall{target: src, lineno: pos}
+ q.PushRight(target.Nname)
+ }
+ for !q.Empty() {
+ fn := q.PopLeft().Func
+
+ // Check fn.
+ if fn.WBPos.IsKnown() {
+ var err strings.Builder
+ call := funcs[fn]
+ for call.target != nil {
+ fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Nname)
+ call = funcs[call.target]
+ }
+ base.ErrorfAt(fn.WBPos, 0, "write barrier prohibited by caller; %v%s", fn.Nname, err.String())
+ continue
+ }
+
+ // Enqueue fn's calls.
+ for _, callee := range c.extraCalls[fn] {
+ enqueue(fn, callee.target, callee.lineno)
+ }
+ if fn.NWBRCalls == nil {
+ continue
+ }
+ for _, callee := range *fn.NWBRCalls {
+ target := symToFunc[callee.Sym]
+ if target != nil {
+ enqueue(fn, target, callee.Pos)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go
new file mode 100644
index 0000000..e7a0699
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/pgen.go
@@ -0,0 +1,364 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+ "fmt"
+ "internal/buildcfg"
+ "os"
+ "sort"
+ "sync"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+// cmpstackvarlt reports whether the stack variable a sorts before b.
+func cmpstackvarlt(a, b *ir.Name) bool {
+ // Sort non-autos before autos.
+ if needAlloc(a) != needAlloc(b) {
+ return needAlloc(b)
+ }
+
+ // If both are non-auto (e.g., parameters, results), then sort by
+ // frame offset (defined by ABI).
+ if !needAlloc(a) {
+ return a.FrameOffset() < b.FrameOffset()
+ }
+
+ // From here on, a and b are both autos (i.e., local variables).
+
+ // Sort used before unused (so AllocFrame can truncate unused
+ // variables).
+ if a.Used() != b.Used() {
+ return a.Used()
+ }
+
+ // Sort pointer-typed before non-pointer types.
+ // Keeps the stack's GC bitmap compact.
+ ap := a.Type().HasPointers()
+ bp := b.Type().HasPointers()
+ if ap != bp {
+ return ap
+ }
+
+ // Group variables that need zeroing, so we can efficiently zero
+ // them altogether.
+ ap = a.Needzero()
+ bp = b.Needzero()
+ if ap != bp {
+ return ap
+ }
+
+ // Sort variables in descending alignment order, so we can optimally
+ // pack variables into the frame.
+ if a.Type().Alignment() != b.Type().Alignment() {
+ return a.Type().Alignment() > b.Type().Alignment()
+ }
+
+ // Sort normal variables before open-coded-defer slots, so that the
+ // latter are grouped together and near the top of the frame (to
+ // minimize varint encoding of their varp offset).
+ if a.OpenDeferSlot() != b.OpenDeferSlot() {
+ return a.OpenDeferSlot()
+ }
+
+ // If a and b are both open-coded defer slots, then order them by
+ // index in descending order, so they'll be laid out in the frame in
+ // ascending order.
+ //
+ // Their index was saved in FrameOffset in state.openDeferSave.
+ if a.OpenDeferSlot() {
+ return a.FrameOffset() > b.FrameOffset()
+ }
+
+ // Tie breaker for stable results.
+ return a.Sym().Name < b.Sym().Name
+}
+
+// byStackVar implements sort.Interface for []*Node using cmpstackvarlt.
+type byStackVar []*ir.Name
+
+func (s byStackVar) Len() int { return len(s) }
+func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
+func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// needAlloc reports whether n is within the current frame, for which we need to
+// allocate space. In particular, it excludes arguments and results, which are in
+// the callers frame.
+func needAlloc(n *ir.Name) bool {
+ if n.Op() != ir.ONAME {
+ base.FatalfAt(n.Pos(), "%v has unexpected Op %v", n, n.Op())
+ }
+
+ switch n.Class {
+ case ir.PAUTO:
+ return true
+ case ir.PPARAM:
+ return false
+ case ir.PPARAMOUT:
+ return n.IsOutputParamInRegisters()
+
+ default:
+ base.FatalfAt(n.Pos(), "%v has unexpected Class %v", n, n.Class)
+ return false
+ }
+}
+
+func (s *ssafn) AllocFrame(f *ssa.Func) {
+ s.stksize = 0
+ s.stkptrsize = 0
+ s.stkalign = int64(types.RegSize)
+ fn := s.curfn
+
+ // Mark the PAUTO's unused.
+ for _, ln := range fn.Dcl {
+ if ln.OpenDeferSlot() {
+ // Open-coded defer slots have indices that were assigned
+ // upfront during SSA construction, but the defer statement can
+ // later get removed during deadcode elimination (#61895). To
+ // keep their relative offsets correct, treat them all as used.
+ continue
+ }
+
+ if needAlloc(ln) {
+ ln.SetUsed(false)
+ }
+ }
+
+ for _, l := range f.RegAlloc {
+ if ls, ok := l.(ssa.LocalSlot); ok {
+ ls.N.SetUsed(true)
+ }
+ }
+
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if n, ok := v.Aux.(*ir.Name); ok {
+ switch n.Class {
+ case ir.PPARAMOUT:
+ if n.IsOutputParamInRegisters() && v.Op == ssa.OpVarDef {
+ // ignore VarDef, look for "real" uses.
+ // TODO: maybe do this for PAUTO as well?
+ continue
+ }
+ fallthrough
+ case ir.PPARAM, ir.PAUTO:
+ n.SetUsed(true)
+ }
+ }
+ }
+ }
+
+ // Use sort.Stable instead of sort.Sort so stack layout (and thus
+ // compiler output) is less sensitive to frontend changes that
+ // introduce or remove unused variables.
+ sort.Stable(byStackVar(fn.Dcl))
+
+ // Reassign stack offsets of the locals that are used.
+ lastHasPtr := false
+ for i, n := range fn.Dcl {
+ if n.Op() != ir.ONAME || n.Class != ir.PAUTO && !(n.Class == ir.PPARAMOUT && n.IsOutputParamInRegisters()) {
+ // i.e., stack assign if AUTO, or if PARAMOUT in registers (which has no predefined spill locations)
+ continue
+ }
+ if !n.Used() {
+ fn.DebugInfo.(*ssa.FuncDebug).OptDcl = fn.Dcl[i:]
+ fn.Dcl = fn.Dcl[:i]
+ break
+ }
+
+ types.CalcSize(n.Type())
+ w := n.Type().Size()
+ if w >= types.MaxWidth || w < 0 {
+ base.Fatalf("bad width")
+ }
+ if w == 0 && lastHasPtr {
+ // Pad between a pointer-containing object and a zero-sized object.
+ // This prevents a pointer to the zero-sized object from being interpreted
+ // as a pointer to the pointer-containing object (and causing it
+ // to be scanned when it shouldn't be). See issue 24993.
+ w = 1
+ }
+ s.stksize += w
+ s.stksize = types.RoundUp(s.stksize, n.Type().Alignment())
+ if n.Type().Alignment() > int64(types.RegSize) {
+ s.stkalign = n.Type().Alignment()
+ }
+ if n.Type().HasPointers() {
+ s.stkptrsize = s.stksize
+ lastHasPtr = true
+ } else {
+ lastHasPtr = false
+ }
+ n.SetFrameOffset(-s.stksize)
+ }
+
+ s.stksize = types.RoundUp(s.stksize, s.stkalign)
+ s.stkptrsize = types.RoundUp(s.stkptrsize, s.stkalign)
+}
+
+const maxStackSize = 1 << 30
+
+// Compile builds an SSA backend function,
+// uses it to generate a plist,
+// and flushes that plist to machine code.
+// worker indicates which of the backend workers is doing the processing.
+func Compile(fn *ir.Func, worker int) {
+ f := buildssa(fn, worker)
+ // Note: check arg size to fix issue 25507.
+ if f.Frontend().(*ssafn).stksize >= maxStackSize || f.OwnAux.ArgWidth() >= maxStackSize {
+ largeStackFramesMu.Lock()
+ largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: f.OwnAux.ArgWidth(), pos: fn.Pos()})
+ largeStackFramesMu.Unlock()
+ return
+ }
+ pp := objw.NewProgs(fn, worker)
+ defer pp.Free()
+ genssa(f, pp)
+ // Check frame size again.
+ // The check above included only the space needed for local variables.
+ // After genssa, the space needed includes local variables and the callee arg region.
+ // We must do this check prior to calling pp.Flush.
+ // If there are any oversized stack frames,
+ // the assembler may emit inscrutable complaints about invalid instructions.
+ if pp.Text.To.Offset >= maxStackSize {
+ largeStackFramesMu.Lock()
+ locals := f.Frontend().(*ssafn).stksize
+ largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: f.OwnAux.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()})
+ largeStackFramesMu.Unlock()
+ return
+ }
+
+ pp.Flush() // assemble, fill in boilerplate, etc.
+
+ // If we're compiling the package init function, search for any
+ // relocations that target global map init outline functions and
+ // turn them into weak relocs.
+ if fn.IsPackageInit() && base.Debug.WrapGlobalMapCtl != 1 {
+ weakenGlobalMapInitRelocs(fn)
+ }
+
+ // fieldtrack must be called after pp.Flush. See issue 20014.
+ fieldtrack(pp.Text.From.Sym, fn.FieldTrack)
+}
+
+// globalMapInitLsyms records the LSym of each map.init.NNN outlined
+// map initializer function created by the compiler.
+var globalMapInitLsyms map[*obj.LSym]struct{}
+
+// RegisterMapInitLsym records "s" in the set of outlined map initializer
+// functions.
+func RegisterMapInitLsym(s *obj.LSym) {
+ if globalMapInitLsyms == nil {
+ globalMapInitLsyms = make(map[*obj.LSym]struct{})
+ }
+ globalMapInitLsyms[s] = struct{}{}
+}
+
+// weakenGlobalMapInitRelocs walks through all of the relocations on a
+// given a package init function "fn" and looks for relocs that target
+// outlined global map initializer functions; if it finds any such
+// relocs, it flags them as R_WEAK.
+func weakenGlobalMapInitRelocs(fn *ir.Func) {
+ if globalMapInitLsyms == nil {
+ return
+ }
+ for i := range fn.LSym.R {
+ tgt := fn.LSym.R[i].Sym
+ if tgt == nil {
+ continue
+ }
+ if _, ok := globalMapInitLsyms[tgt]; !ok {
+ continue
+ }
+ if base.Debug.WrapGlobalMapDbg > 1 {
+ fmt.Fprintf(os.Stderr, "=-= weakify fn %v reloc %d %+v\n", fn, i,
+ fn.LSym.R[i])
+ }
+ // set the R_WEAK bit, leave rest of reloc type intact
+ fn.LSym.R[i].Type |= objabi.R_WEAK
+ }
+}
+
+// StackOffset returns the stack location of a LocalSlot relative to the
+// stack pointer, suitable for use in a DWARF location entry. This has nothing
+// to do with its offset in the user variable.
+func StackOffset(slot ssa.LocalSlot) int32 {
+ n := slot.N
+ var off int64
+ switch n.Class {
+ case ir.PPARAM, ir.PPARAMOUT:
+ if !n.IsOutputParamInRegisters() {
+ off = n.FrameOffset() + base.Ctxt.Arch.FixedFrameSize
+ break
+ }
+ fallthrough // PPARAMOUT in registers allocates like an AUTO
+ case ir.PAUTO:
+ off = n.FrameOffset()
+ if base.Ctxt.Arch.FixedFrameSize == 0 {
+ off -= int64(types.PtrSize)
+ }
+ if buildcfg.FramePointerEnabled {
+ off -= int64(types.PtrSize)
+ }
+ }
+ return int32(off + slot.Off)
+}
+
+// fieldtrack adds R_USEFIELD relocations to fnsym to record any
+// struct fields that it used.
+func fieldtrack(fnsym *obj.LSym, tracked map[*obj.LSym]struct{}) {
+ if fnsym == nil {
+ return
+ }
+ if !buildcfg.Experiment.FieldTrack || len(tracked) == 0 {
+ return
+ }
+
+ trackSyms := make([]*obj.LSym, 0, len(tracked))
+ for sym := range tracked {
+ trackSyms = append(trackSyms, sym)
+ }
+ sort.Slice(trackSyms, func(i, j int) bool { return trackSyms[i].Name < trackSyms[j].Name })
+ for _, sym := range trackSyms {
+ r := obj.Addrel(fnsym)
+ r.Sym = sym
+ r.Type = objabi.R_USEFIELD
+ }
+}
+
+// largeStack is info about a function whose stack frame is too large (rare).
+type largeStack struct {
+ locals int64
+ args int64
+ callee int64
+ pos src.XPos
+}
+
+var (
+ largeStackFramesMu sync.Mutex // protects largeStackFrames
+ largeStackFrames []largeStack
+)
+
+func CheckLargeStacks() {
+ // Check whether any of the functions we have compiled have gigantic stack frames.
+ sort.Slice(largeStackFrames, func(i, j int) bool {
+ return largeStackFrames[i].pos.Before(largeStackFrames[j].pos)
+ })
+ for _, large := range largeStackFrames {
+ if large.callee != 0 {
+ base.ErrorfAt(large.pos, 0, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
+ } else {
+ base.ErrorfAt(large.pos, 0, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssagen/phi.go b/src/cmd/compile/internal/ssagen/phi.go
new file mode 100644
index 0000000..19b6920
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/phi.go
@@ -0,0 +1,557 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+ "container/heap"
+ "fmt"
+
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// This file contains the algorithm to place phi nodes in a function.
+// For small functions, we use Braun, Buchwald, Hack, Leißa, Mallon, and Zwinkau.
+// https://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf
+// For large functions, we use Sreedhar & Gao: A Linear Time Algorithm for Placing Φ-Nodes.
+// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.8.1979&rep=rep1&type=pdf
+
+const smallBlocks = 500
+
+const debugPhi = false
+
+// fwdRefAux wraps an arbitrary ir.Node as an ssa.Aux for use with OpFwdref.
+type fwdRefAux struct {
+ _ [0]func() // ensure ir.Node isn't compared for equality
+ N ir.Node
+}
+
+func (fwdRefAux) CanBeAnSSAAux() {}
+
+// insertPhis finds all the places in the function where a phi is
+// necessary and inserts them.
+// Uses FwdRef ops to find all uses of variables, and s.defvars to find
+// all definitions.
+// Phi values are inserted, and all FwdRefs are changed to a Copy
+// of the appropriate phi or definition.
+// TODO: make this part of cmd/compile/internal/ssa somehow?
+func (s *state) insertPhis() {
+ if len(s.f.Blocks) <= smallBlocks {
+ sps := simplePhiState{s: s, f: s.f, defvars: s.defvars}
+ sps.insertPhis()
+ return
+ }
+ ps := phiState{s: s, f: s.f, defvars: s.defvars}
+ ps.insertPhis()
+}
+
+type phiState struct {
+ s *state // SSA state
+ f *ssa.Func // function to work on
+ defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
+
+ varnum map[ir.Node]int32 // variable numbering
+
+ // properties of the dominator tree
+ idom []*ssa.Block // dominator parents
+ tree []domBlock // dominator child+sibling
+ level []int32 // level in dominator tree (0 = root or unreachable, 1 = children of root, ...)
+
+ // scratch locations
+ priq blockHeap // priority queue of blocks, higher level (toward leaves) = higher priority
+ q []*ssa.Block // inner loop queue
+ queued *sparseSet // has been put in q
+ hasPhi *sparseSet // has a phi
+ hasDef *sparseSet // has a write of the variable we're processing
+
+ // miscellaneous
+ placeholder *ssa.Value // value to use as a "not set yet" placeholder.
+}
+
+func (s *phiState) insertPhis() {
+ if debugPhi {
+ fmt.Println(s.f.String())
+ }
+
+ // Find all the variables for which we need to match up reads & writes.
+ // This step prunes any basic-block-only variables from consideration.
+ // Generate a numbering for these variables.
+ s.varnum = map[ir.Node]int32{}
+ var vars []ir.Node
+ var vartypes []*types.Type
+ for _, b := range s.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != ssa.OpFwdRef {
+ continue
+ }
+ var_ := v.Aux.(fwdRefAux).N
+
+ // Optimization: look back 1 block for the definition.
+ if len(b.Preds) == 1 {
+ c := b.Preds[0].Block()
+ if w := s.defvars[c.ID][var_]; w != nil {
+ v.Op = ssa.OpCopy
+ v.Aux = nil
+ v.AddArg(w)
+ continue
+ }
+ }
+
+ if _, ok := s.varnum[var_]; ok {
+ continue
+ }
+ s.varnum[var_] = int32(len(vartypes))
+ if debugPhi {
+ fmt.Printf("var%d = %v\n", len(vartypes), var_)
+ }
+ vars = append(vars, var_)
+ vartypes = append(vartypes, v.Type)
+ }
+ }
+
+ if len(vartypes) == 0 {
+ return
+ }
+
+ // Find all definitions of the variables we need to process.
+ // defs[n] contains all the blocks in which variable number n is assigned.
+ defs := make([][]*ssa.Block, len(vartypes))
+ for _, b := range s.f.Blocks {
+ for var_ := range s.defvars[b.ID] { // TODO: encode defvars some other way (explicit ops)? make defvars[n] a slice instead of a map.
+ if n, ok := s.varnum[var_]; ok {
+ defs[n] = append(defs[n], b)
+ }
+ }
+ }
+
+ // Make dominator tree.
+ s.idom = s.f.Idom()
+ s.tree = make([]domBlock, s.f.NumBlocks())
+ for _, b := range s.f.Blocks {
+ p := s.idom[b.ID]
+ if p != nil {
+ s.tree[b.ID].sibling = s.tree[p.ID].firstChild
+ s.tree[p.ID].firstChild = b
+ }
+ }
+ // Compute levels in dominator tree.
+ // With parent pointers we can do a depth-first walk without
+ // any auxiliary storage.
+ s.level = make([]int32, s.f.NumBlocks())
+ b := s.f.Entry
+levels:
+ for {
+ if p := s.idom[b.ID]; p != nil {
+ s.level[b.ID] = s.level[p.ID] + 1
+ if debugPhi {
+ fmt.Printf("level %s = %d\n", b, s.level[b.ID])
+ }
+ }
+ if c := s.tree[b.ID].firstChild; c != nil {
+ b = c
+ continue
+ }
+ for {
+ if c := s.tree[b.ID].sibling; c != nil {
+ b = c
+ continue levels
+ }
+ b = s.idom[b.ID]
+ if b == nil {
+ break levels
+ }
+ }
+ }
+
+ // Allocate scratch locations.
+ s.priq.level = s.level
+ s.q = make([]*ssa.Block, 0, s.f.NumBlocks())
+ s.queued = newSparseSet(s.f.NumBlocks())
+ s.hasPhi = newSparseSet(s.f.NumBlocks())
+ s.hasDef = newSparseSet(s.f.NumBlocks())
+ s.placeholder = s.s.entryNewValue0(ssa.OpUnknown, types.TypeInvalid)
+
+ // Generate phi ops for each variable.
+ for n := range vartypes {
+ s.insertVarPhis(n, vars[n], defs[n], vartypes[n])
+ }
+
+ // Resolve FwdRefs to the correct write or phi.
+ s.resolveFwdRefs()
+
+ // Erase variable numbers stored in AuxInt fields of phi ops. They are no longer needed.
+ for _, b := range s.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == ssa.OpPhi {
+ v.AuxInt = 0
+ }
+ // Any remaining FwdRefs are dead code.
+ if v.Op == ssa.OpFwdRef {
+ v.Op = ssa.OpUnknown
+ v.Aux = nil
+ }
+ }
+ }
+}
+
+func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *types.Type) {
+ priq := &s.priq
+ q := s.q
+ queued := s.queued
+ queued.clear()
+ hasPhi := s.hasPhi
+ hasPhi.clear()
+ hasDef := s.hasDef
+ hasDef.clear()
+
+ // Add defining blocks to priority queue.
+ for _, b := range defs {
+ priq.a = append(priq.a, b)
+ hasDef.add(b.ID)
+ if debugPhi {
+ fmt.Printf("def of var%d in %s\n", n, b)
+ }
+ }
+ heap.Init(priq)
+
+ // Visit blocks defining variable n, from deepest to shallowest.
+ for len(priq.a) > 0 {
+ currentRoot := heap.Pop(priq).(*ssa.Block)
+ if debugPhi {
+ fmt.Printf("currentRoot %s\n", currentRoot)
+ }
+ // Walk subtree below definition.
+ // Skip subtrees we've done in previous iterations.
+ // Find edges exiting tree dominated by definition (the dominance frontier).
+ // Insert phis at target blocks.
+ if queued.contains(currentRoot.ID) {
+ s.s.Fatalf("root already in queue")
+ }
+ q = append(q, currentRoot)
+ queued.add(currentRoot.ID)
+ for len(q) > 0 {
+ b := q[len(q)-1]
+ q = q[:len(q)-1]
+ if debugPhi {
+ fmt.Printf(" processing %s\n", b)
+ }
+
+ currentRootLevel := s.level[currentRoot.ID]
+ for _, e := range b.Succs {
+ c := e.Block()
+ // TODO: if the variable is dead at c, skip it.
+ if s.level[c.ID] > currentRootLevel {
+ // a D-edge, or an edge whose target is in currentRoot's subtree.
+ continue
+ }
+ if hasPhi.contains(c.ID) {
+ continue
+ }
+ // Add a phi to block c for variable n.
+ hasPhi.add(c.ID)
+ v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right?
+ // Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building.
+ if var_.Op() == ir.ONAME {
+ s.s.addNamedValue(var_.(*ir.Name), v)
+ }
+ for range c.Preds {
+ v.AddArg(s.placeholder) // Actual args will be filled in by resolveFwdRefs.
+ }
+ if debugPhi {
+ fmt.Printf("new phi for var%d in %s: %s\n", n, c, v)
+ }
+ if !hasDef.contains(c.ID) {
+ // There's now a new definition of this variable in block c.
+ // Add it to the priority queue to explore.
+ heap.Push(priq, c)
+ hasDef.add(c.ID)
+ }
+ }
+
+ // Visit children if they have not been visited yet.
+ for c := s.tree[b.ID].firstChild; c != nil; c = s.tree[c.ID].sibling {
+ if !queued.contains(c.ID) {
+ q = append(q, c)
+ queued.add(c.ID)
+ }
+ }
+ }
+ }
+}
+
+// resolveFwdRefs links all FwdRef uses up to their nearest dominating definition.
+func (s *phiState) resolveFwdRefs() {
+ // Do a depth-first walk of the dominator tree, keeping track
+ // of the most-recently-seen value for each variable.
+
+ // Map from variable ID to SSA value at the current point of the walk.
+ values := make([]*ssa.Value, len(s.varnum))
+ for i := range values {
+ values[i] = s.placeholder
+ }
+
+ // Stack of work to do.
+ type stackEntry struct {
+ b *ssa.Block // block to explore
+
+ // variable/value pair to reinstate on exit
+ n int32 // variable ID
+ v *ssa.Value
+
+ // Note: only one of b or n,v will be set.
+ }
+ var stk []stackEntry
+
+ stk = append(stk, stackEntry{b: s.f.Entry})
+ for len(stk) > 0 {
+ work := stk[len(stk)-1]
+ stk = stk[:len(stk)-1]
+
+ b := work.b
+ if b == nil {
+ // On exit from a block, this case will undo any assignments done below.
+ values[work.n] = work.v
+ continue
+ }
+
+ // Process phis as new defs. They come before FwdRefs in this block.
+ for _, v := range b.Values {
+ if v.Op != ssa.OpPhi {
+ continue
+ }
+ n := int32(v.AuxInt)
+ // Remember the old assignment so we can undo it when we exit b.
+ stk = append(stk, stackEntry{n: n, v: values[n]})
+ // Record the new assignment.
+ values[n] = v
+ }
+
+ // Replace a FwdRef op with the current incoming value for its variable.
+ for _, v := range b.Values {
+ if v.Op != ssa.OpFwdRef {
+ continue
+ }
+ n := s.varnum[v.Aux.(fwdRefAux).N]
+ v.Op = ssa.OpCopy
+ v.Aux = nil
+ v.AddArg(values[n])
+ }
+
+ // Establish values for variables defined in b.
+ for var_, v := range s.defvars[b.ID] {
+ n, ok := s.varnum[var_]
+ if !ok {
+ // some variable not live across a basic block boundary.
+ continue
+ }
+ // Remember the old assignment so we can undo it when we exit b.
+ stk = append(stk, stackEntry{n: n, v: values[n]})
+ // Record the new assignment.
+ values[n] = v
+ }
+
+ // Replace phi args in successors with the current incoming value.
+ for _, e := range b.Succs {
+ c, i := e.Block(), e.Index()
+ for j := len(c.Values) - 1; j >= 0; j-- {
+ v := c.Values[j]
+ if v.Op != ssa.OpPhi {
+ break // All phis will be at the end of the block during phi building.
+ }
+ // Only set arguments that have been resolved.
+ // For very wide CFGs, this significantly speeds up phi resolution.
+ // See golang.org/issue/8225.
+ if w := values[v.AuxInt]; w.Op != ssa.OpUnknown {
+ v.SetArg(i, w)
+ }
+ }
+ }
+
+ // Walk children in dominator tree.
+ for c := s.tree[b.ID].firstChild; c != nil; c = s.tree[c.ID].sibling {
+ stk = append(stk, stackEntry{b: c})
+ }
+ }
+}
+
+// domBlock contains extra per-block information to record the dominator tree.
+type domBlock struct {
+ firstChild *ssa.Block // first child of block in dominator tree
+ sibling *ssa.Block // next child of parent in dominator tree
+}
+
+// A block heap is used as a priority queue to implement the PiggyBank
+// from Sreedhar and Gao. That paper uses an array which is better
+// asymptotically but worse in the common case when the PiggyBank
+// holds a sparse set of blocks.
+type blockHeap struct {
+ a []*ssa.Block // block IDs in heap
+ level []int32 // depth in dominator tree (static, used for determining priority)
+}
+
+func (h *blockHeap) Len() int { return len(h.a) }
+func (h *blockHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] }
+
+func (h *blockHeap) Push(x interface{}) {
+ v := x.(*ssa.Block)
+ h.a = append(h.a, v)
+}
+func (h *blockHeap) Pop() interface{} {
+ old := h.a
+ n := len(old)
+ x := old[n-1]
+ h.a = old[:n-1]
+ return x
+}
+func (h *blockHeap) Less(i, j int) bool {
+ return h.level[h.a[i].ID] > h.level[h.a[j].ID]
+}
+
+// TODO: stop walking the iterated domininance frontier when
+// the variable is dead. Maybe detect that by checking if the
+// node we're on is reverse dominated by all the reads?
+// Reverse dominated by the highest common successor of all the reads?
+
+// copy of ../ssa/sparseset.go
+// TODO: move this file to ../ssa, then use sparseSet there.
+type sparseSet struct {
+ dense []ssa.ID
+ sparse []int32
+}
+
+// newSparseSet returns a sparseSet that can represent
+// integers between 0 and n-1.
+func newSparseSet(n int) *sparseSet {
+ return &sparseSet{dense: nil, sparse: make([]int32, n)}
+}
+
+func (s *sparseSet) contains(x ssa.ID) bool {
+ i := s.sparse[x]
+ return i < int32(len(s.dense)) && s.dense[i] == x
+}
+
+func (s *sparseSet) add(x ssa.ID) {
+ i := s.sparse[x]
+ if i < int32(len(s.dense)) && s.dense[i] == x {
+ return
+ }
+ s.dense = append(s.dense, x)
+ s.sparse[x] = int32(len(s.dense)) - 1
+}
+
+func (s *sparseSet) clear() {
+ s.dense = s.dense[:0]
+}
+
+// Variant to use for small functions.
+type simplePhiState struct {
+ s *state // SSA state
+ f *ssa.Func // function to work on
+ fwdrefs []*ssa.Value // list of FwdRefs to be processed
+ defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
+ reachable []bool // which blocks are reachable
+}
+
+func (s *simplePhiState) insertPhis() {
+ s.reachable = ssa.ReachableBlocks(s.f)
+
+ // Find FwdRef ops.
+ for _, b := range s.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != ssa.OpFwdRef {
+ continue
+ }
+ s.fwdrefs = append(s.fwdrefs, v)
+ var_ := v.Aux.(fwdRefAux).N
+ if _, ok := s.defvars[b.ID][var_]; !ok {
+ s.defvars[b.ID][var_] = v // treat FwdDefs as definitions.
+ }
+ }
+ }
+
+ var args []*ssa.Value
+
+loop:
+ for len(s.fwdrefs) > 0 {
+ v := s.fwdrefs[len(s.fwdrefs)-1]
+ s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1]
+ b := v.Block
+ var_ := v.Aux.(fwdRefAux).N
+ if b == s.f.Entry {
+ // No variable should be live at entry.
+ s.s.Fatalf("value %v (%v) incorrectly live at entry", var_, v)
+ }
+ if !s.reachable[b.ID] {
+ // This block is dead.
+ // It doesn't matter what we use here as long as it is well-formed.
+ v.Op = ssa.OpUnknown
+ v.Aux = nil
+ continue
+ }
+ // Find variable value on each predecessor.
+ args = args[:0]
+ for _, e := range b.Preds {
+ args = append(args, s.lookupVarOutgoing(e.Block(), v.Type, var_, v.Pos))
+ }
+
+ // Decide if we need a phi or not. We need a phi if there
+ // are two different args (which are both not v).
+ var w *ssa.Value
+ for _, a := range args {
+ if a == v {
+ continue // self-reference
+ }
+ if a == w {
+ continue // already have this witness
+ }
+ if w != nil {
+ // two witnesses, need a phi value
+ v.Op = ssa.OpPhi
+ v.AddArgs(args...)
+ v.Aux = nil
+ continue loop
+ }
+ w = a // save witness
+ }
+ if w == nil {
+ s.s.Fatalf("no witness for reachable phi %s", v)
+ }
+ // One witness. Make v a copy of w.
+ v.Op = ssa.OpCopy
+ v.Aux = nil
+ v.AddArg(w)
+ }
+}
+
+// lookupVarOutgoing finds the variable's value at the end of block b.
+func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir.Node, line src.XPos) *ssa.Value {
+ for {
+ if v := s.defvars[b.ID][var_]; v != nil {
+ return v
+ }
+ // The variable is not defined by b and we haven't looked it up yet.
+ // If b has exactly one predecessor, loop to look it up there.
+ // Otherwise, give up and insert a new FwdRef and resolve it later.
+ if len(b.Preds) != 1 {
+ break
+ }
+ b = b.Preds[0].Block()
+ if !s.reachable[b.ID] {
+ // This is rare; it happens with oddly interleaved infinite loops in dead code.
+ // See issue 19783.
+ break
+ }
+ }
+ // Generate a FwdRef for the variable and return that.
+ v := b.NewValue0A(line, ssa.OpFwdRef, t, fwdRefAux{N: var_})
+ s.defvars[b.ID][var_] = v
+ if var_.Op() == ir.ONAME {
+ s.s.addNamedValue(var_.(*ir.Name), v)
+ }
+ s.fwdrefs = append(s.fwdrefs, v)
+ return v
+}
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
new file mode 100644
index 0000000..c794d6f
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -0,0 +1,8369 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/constant"
+ "html"
+ "internal/buildcfg"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/liveness"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "cmd/internal/sys"
+
+ rtabi "internal/abi"
+)
+
+var ssaConfig *ssa.Config
+var ssaCaches []ssa.Cache
+
+var ssaDump string // early copy of $GOSSAFUNC; the func name to dump output for
+var ssaDir string // optional destination for ssa dump file
+var ssaDumpStdout bool // whether to dump to stdout
+var ssaDumpCFG string // generate CFGs for these phases
+const ssaDumpFile = "ssa.html"
+
+// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
+var ssaDumpInlined []*ir.Func
+
+func DumpInline(fn *ir.Func) {
+ if ssaDump != "" && ssaDump == ir.FuncName(fn) {
+ ssaDumpInlined = append(ssaDumpInlined, fn)
+ }
+}
+
+func InitEnv() {
+ ssaDump = os.Getenv("GOSSAFUNC")
+ ssaDir = os.Getenv("GOSSADIR")
+ if ssaDump != "" {
+ if strings.HasSuffix(ssaDump, "+") {
+ ssaDump = ssaDump[:len(ssaDump)-1]
+ ssaDumpStdout = true
+ }
+ spl := strings.Split(ssaDump, ":")
+ if len(spl) > 1 {
+ ssaDump = spl[0]
+ ssaDumpCFG = spl[1]
+ }
+ }
+}
+
+func InitConfig() {
+ types_ := ssa.NewTypes()
+
+ if Arch.SoftFloat {
+ softfloatInit()
+ }
+
+ // Generate a few pointer types that are uncommon in the frontend but common in the backend.
+ // Caching is disabled in the backend, so generating these here avoids allocations.
+ _ = types.NewPtr(types.Types[types.TINTER]) // *interface{}
+ _ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING])) // **string
+ _ = types.NewPtr(types.NewSlice(types.Types[types.TINTER])) // *[]interface{}
+ _ = types.NewPtr(types.NewPtr(types.ByteType)) // **byte
+ _ = types.NewPtr(types.NewSlice(types.ByteType)) // *[]byte
+ _ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING])) // *[]string
+ _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8
+ _ = types.NewPtr(types.Types[types.TINT16]) // *int16
+ _ = types.NewPtr(types.Types[types.TINT64]) // *int64
+ _ = types.NewPtr(types.ErrorType) // *error
+ _ = types.NewPtr(reflectdata.MapType()) // *runtime.hmap
+ _ = types.NewPtr(deferstruct()) // *runtime._defer
+ types.NewPtrCacheEnabled = false
+ ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat)
+ ssaConfig.Race = base.Flag.Race
+ ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
+
+ // Set up some runtime functions we'll need to call.
+ ir.Syms.AssertE2I = typecheck.LookupRuntimeFunc("assertE2I")
+ ir.Syms.AssertE2I2 = typecheck.LookupRuntimeFunc("assertE2I2")
+ ir.Syms.AssertI2I = typecheck.LookupRuntimeFunc("assertI2I")
+ ir.Syms.AssertI2I2 = typecheck.LookupRuntimeFunc("assertI2I2")
+ ir.Syms.CgoCheckMemmove = typecheck.LookupRuntimeFunc("cgoCheckMemmove")
+ ir.Syms.CgoCheckPtrWrite = typecheck.LookupRuntimeFunc("cgoCheckPtrWrite")
+ ir.Syms.CheckPtrAlignment = typecheck.LookupRuntimeFunc("checkptrAlignment")
+ ir.Syms.Deferproc = typecheck.LookupRuntimeFunc("deferproc")
+ ir.Syms.Deferprocat = typecheck.LookupRuntimeFunc("deferprocat")
+ ir.Syms.DeferprocStack = typecheck.LookupRuntimeFunc("deferprocStack")
+ ir.Syms.Deferreturn = typecheck.LookupRuntimeFunc("deferreturn")
+ ir.Syms.Duffcopy = typecheck.LookupRuntimeFunc("duffcopy")
+ ir.Syms.Duffzero = typecheck.LookupRuntimeFunc("duffzero")
+ ir.Syms.GCWriteBarrier[0] = typecheck.LookupRuntimeFunc("gcWriteBarrier1")
+ ir.Syms.GCWriteBarrier[1] = typecheck.LookupRuntimeFunc("gcWriteBarrier2")
+ ir.Syms.GCWriteBarrier[2] = typecheck.LookupRuntimeFunc("gcWriteBarrier3")
+ ir.Syms.GCWriteBarrier[3] = typecheck.LookupRuntimeFunc("gcWriteBarrier4")
+ ir.Syms.GCWriteBarrier[4] = typecheck.LookupRuntimeFunc("gcWriteBarrier5")
+ ir.Syms.GCWriteBarrier[5] = typecheck.LookupRuntimeFunc("gcWriteBarrier6")
+ ir.Syms.GCWriteBarrier[6] = typecheck.LookupRuntimeFunc("gcWriteBarrier7")
+ ir.Syms.GCWriteBarrier[7] = typecheck.LookupRuntimeFunc("gcWriteBarrier8")
+ ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded")
+ ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice")
+ ir.Syms.InterfaceSwitch = typecheck.LookupRuntimeFunc("interfaceSwitch")
+ ir.Syms.Memmove = typecheck.LookupRuntimeFunc("memmove")
+ ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
+ ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite")
+ ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove")
+ ir.Syms.Asanread = typecheck.LookupRuntimeFunc("asanread")
+ ir.Syms.Asanwrite = typecheck.LookupRuntimeFunc("asanwrite")
+ ir.Syms.Newobject = typecheck.LookupRuntimeFunc("newobject")
+ ir.Syms.Newproc = typecheck.LookupRuntimeFunc("newproc")
+ ir.Syms.Panicdivide = typecheck.LookupRuntimeFunc("panicdivide")
+ ir.Syms.PanicdottypeE = typecheck.LookupRuntimeFunc("panicdottypeE")
+ ir.Syms.PanicdottypeI = typecheck.LookupRuntimeFunc("panicdottypeI")
+ ir.Syms.Panicnildottype = typecheck.LookupRuntimeFunc("panicnildottype")
+ ir.Syms.Panicoverflow = typecheck.LookupRuntimeFunc("panicoverflow")
+ ir.Syms.Panicshift = typecheck.LookupRuntimeFunc("panicshift")
+ ir.Syms.Racefuncenter = typecheck.LookupRuntimeFunc("racefuncenter")
+ ir.Syms.Racefuncexit = typecheck.LookupRuntimeFunc("racefuncexit")
+ ir.Syms.Raceread = typecheck.LookupRuntimeFunc("raceread")
+ ir.Syms.Racereadrange = typecheck.LookupRuntimeFunc("racereadrange")
+ ir.Syms.Racewrite = typecheck.LookupRuntimeFunc("racewrite")
+ ir.Syms.Racewriterange = typecheck.LookupRuntimeFunc("racewriterange")
+ ir.Syms.TypeAssert = typecheck.LookupRuntimeFunc("typeAssert")
+ ir.Syms.WBZero = typecheck.LookupRuntimeFunc("wbZero")
+ ir.Syms.WBMove = typecheck.LookupRuntimeFunc("wbMove")
+ ir.Syms.X86HasPOPCNT = typecheck.LookupRuntimeVar("x86HasPOPCNT") // bool
+ ir.Syms.X86HasSSE41 = typecheck.LookupRuntimeVar("x86HasSSE41") // bool
+ ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool
+ ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4") // bool
+ ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool
+ ir.Syms.Staticuint64s = typecheck.LookupRuntimeVar("staticuint64s")
+ ir.Syms.Typedmemmove = typecheck.LookupRuntimeFunc("typedmemmove")
+ ir.Syms.Udiv = typecheck.LookupRuntimeVar("udiv") // asm func with special ABI
+ ir.Syms.WriteBarrier = typecheck.LookupRuntimeVar("writeBarrier") // struct { bool; ... }
+ ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase")
+
+ if Arch.LinkArch.Family == sys.Wasm {
+ BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("goPanicIndex")
+ BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("goPanicIndexU")
+ BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("goPanicSliceAlen")
+ BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("goPanicSliceAlenU")
+ BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("goPanicSliceAcap")
+ BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("goPanicSliceAcapU")
+ BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("goPanicSliceB")
+ BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("goPanicSliceBU")
+ BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("goPanicSlice3Alen")
+ BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("goPanicSlice3AlenU")
+ BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("goPanicSlice3Acap")
+ BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("goPanicSlice3AcapU")
+ BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("goPanicSlice3B")
+ BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("goPanicSlice3BU")
+ BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("goPanicSlice3C")
+ BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("goPanicSlice3CU")
+ BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("goPanicSliceConvert")
+ } else {
+ BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("panicIndex")
+ BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("panicIndexU")
+ BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("panicSliceAlen")
+ BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("panicSliceAlenU")
+ BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("panicSliceAcap")
+ BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("panicSliceAcapU")
+ BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("panicSliceB")
+ BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("panicSliceBU")
+ BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("panicSlice3Alen")
+ BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("panicSlice3AlenU")
+ BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("panicSlice3Acap")
+ BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("panicSlice3AcapU")
+ BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("panicSlice3B")
+ BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("panicSlice3BU")
+ BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C")
+ BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU")
+ BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("panicSliceConvert")
+ }
+ if Arch.LinkArch.PtrSize == 4 {
+ ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex")
+ ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU")
+ ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen")
+ ExtendCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeVar("panicExtendSliceAlenU")
+ ExtendCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeVar("panicExtendSliceAcap")
+ ExtendCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeVar("panicExtendSliceAcapU")
+ ExtendCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeVar("panicExtendSliceB")
+ ExtendCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeVar("panicExtendSliceBU")
+ ExtendCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeVar("panicExtendSlice3Alen")
+ ExtendCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeVar("panicExtendSlice3AlenU")
+ ExtendCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeVar("panicExtendSlice3Acap")
+ ExtendCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeVar("panicExtendSlice3AcapU")
+ ExtendCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeVar("panicExtendSlice3B")
+ ExtendCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeVar("panicExtendSlice3BU")
+ ExtendCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeVar("panicExtendSlice3C")
+ ExtendCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeVar("panicExtendSlice3CU")
+ }
+
+ // Wasm (all asm funcs with special ABIs)
+ ir.Syms.WasmDiv = typecheck.LookupRuntimeVar("wasmDiv")
+ ir.Syms.WasmTruncS = typecheck.LookupRuntimeVar("wasmTruncS")
+ ir.Syms.WasmTruncU = typecheck.LookupRuntimeVar("wasmTruncU")
+ ir.Syms.SigPanic = typecheck.LookupRuntimeFunc("sigpanic")
+}
+
+// AbiForBodylessFuncStackMap returns the ABI for a bodyless function's stack map.
+// This is not necessarily the ABI used to call it.
+// Currently (1.17 dev) such a stack map is always ABI0;
+// any ABI wrapper that is present is nosplit, hence a precise
+// stack map is not needed there (the parameters survive only long
+// enough to call the wrapped assembly function).
+// This always returns a freshly copied ABI.
+func AbiForBodylessFuncStackMap(fn *ir.Func) *abi.ABIConfig {
+ return ssaConfig.ABI0.Copy() // No idea what races will result, be safe
+}
+
+// abiForFunc implements ABI policy for a function, but does not return a copy of the ABI.
+// Passing a nil function returns the default ABI based on experiment configuration.
+func abiForFunc(fn *ir.Func, abi0, abi1 *abi.ABIConfig) *abi.ABIConfig {
+ if buildcfg.Experiment.RegabiArgs {
+ // Select the ABI based on the function's defining ABI.
+ if fn == nil {
+ return abi1
+ }
+ switch fn.ABI {
+ case obj.ABI0:
+ return abi0
+ case obj.ABIInternal:
+ // TODO(austin): Clean up the nomenclature here.
+ // It's not clear that "abi1" is ABIInternal.
+ return abi1
+ }
+ base.Fatalf("function %v has unknown ABI %v", fn, fn.ABI)
+ panic("not reachable")
+ }
+
+ a := abi0
+ if fn != nil {
+ if fn.Pragma&ir.RegisterParams != 0 { // TODO(register args) remove after register abi is working
+ a = abi1
+ }
+ }
+ return a
+}
+
+// emitOpenDeferInfo emits FUNCDATA information about the defers in a function
+// that is using open-coded defers. This funcdata is used to determine the active
+// defers in a function and execute those defers during panic processing.
+//
+// The funcdata is all encoded in varints (since values will almost always be less than
+// 128, but stack offsets could potentially be up to 2Gbyte). All "locations" (offsets)
+// for stack variables are specified as the number of bytes below varp (pointer to the
+// top of the local variables) for their starting address. The format is:
+//
+// - Offset of the deferBits variable
+// - Offset of the first closure slot (the rest are laid out consecutively).
+func (s *state) emitOpenDeferInfo() {
+ firstOffset := s.openDefers[0].closureNode.FrameOffset()
+
+ // Verify that cmpstackvarlt laid out the slots in order.
+ for i, r := range s.openDefers {
+ have := r.closureNode.FrameOffset()
+ want := firstOffset + int64(i)*int64(types.PtrSize)
+ if have != want {
+ base.FatalfAt(s.curfn.Pos(), "unexpected frame offset for open-coded defer slot #%v: have %v, want %v", i, have, want)
+ }
+ }
+
+ x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
+ x.Set(obj.AttrContentAddressable, true)
+ s.curfn.LSym.Func().OpenCodedDeferInfo = x
+
+ off := 0
+ off = objw.Uvarint(x, off, uint64(-s.deferBitsTemp.FrameOffset()))
+ off = objw.Uvarint(x, off, uint64(-firstOffset))
+}
+
+// buildssa builds an SSA function for fn.
+// worker indicates which of the backend workers is doing the processing.
+func buildssa(fn *ir.Func, worker int) *ssa.Func {
+ name := ir.FuncName(fn)
+
+ abiSelf := abiForFunc(fn, ssaConfig.ABI0, ssaConfig.ABI1)
+
+ printssa := false
+ // match either a simple name e.g. "(*Reader).Reset", package.name e.g. "compress/gzip.(*Reader).Reset", or subpackage name "gzip.(*Reader).Reset"
+ // optionally allows an ABI suffix specification in the GOSSAHASH, e.g. "(*Reader).Reset<0>" etc
+ if strings.Contains(ssaDump, name) { // in all the cases the function name is entirely contained within the GOSSAFUNC string.
+ nameOptABI := name
+ if strings.Contains(ssaDump, ",") { // ABI specification
+ nameOptABI = ssa.FuncNameABI(name, abiSelf.Which())
+ } else if strings.HasSuffix(ssaDump, ">") { // if they use the linker syntax instead....
+ l := len(ssaDump)
+ if l >= 3 && ssaDump[l-3] == '<' {
+ nameOptABI = ssa.FuncNameABI(name, abiSelf.Which())
+ ssaDump = ssaDump[:l-3] + "," + ssaDump[l-2:l-1]
+ }
+ }
+ pkgDotName := base.Ctxt.Pkgpath + "." + nameOptABI
+ printssa = nameOptABI == ssaDump || // "(*Reader).Reset"
+ pkgDotName == ssaDump || // "compress/gzip.(*Reader).Reset"
+ strings.HasSuffix(pkgDotName, ssaDump) && strings.HasSuffix(pkgDotName, "/"+ssaDump) // "gzip.(*Reader).Reset"
+ }
+
+ var astBuf *bytes.Buffer
+ if printssa {
+ astBuf = &bytes.Buffer{}
+ ir.FDumpList(astBuf, "buildssa-body", fn.Body)
+ if ssaDumpStdout {
+ fmt.Println("generating SSA for", name)
+ fmt.Print(astBuf.String())
+ }
+ }
+
+ var s state
+ s.pushLine(fn.Pos())
+ defer s.popLine()
+
+ s.hasdefer = fn.HasDefer()
+ if fn.Pragma&ir.CgoUnsafeArgs != 0 {
+ s.cgoUnsafeArgs = true
+ }
+ s.checkPtrEnabled = ir.ShouldCheckPtr(fn, 1)
+
+ if base.Flag.Cfg.Instrumenting && fn.Pragma&ir.Norace == 0 && !fn.Linksym().ABIWrapper() {
+ if !base.Flag.Race || !objabi.LookupPkgSpecial(fn.Sym().Pkg.Path).NoRaceFunc {
+ s.instrumentMemory = true
+ }
+ if base.Flag.Race {
+ s.instrumentEnterExit = true
+ }
+ }
+
+ fe := ssafn{
+ curfn: fn,
+ log: printssa && ssaDumpStdout,
+ }
+ s.curfn = fn
+
+ cache := &ssaCaches[worker]
+ cache.Reset()
+
+ s.f = ssaConfig.NewFunc(&fe, cache)
+ s.config = ssaConfig
+ s.f.Type = fn.Type()
+ s.f.Name = name
+ s.f.PrintOrHtmlSSA = printssa
+ if fn.Pragma&ir.Nosplit != 0 {
+ s.f.NoSplit = true
+ }
+ s.f.ABI0 = ssaConfig.ABI0
+ s.f.ABI1 = ssaConfig.ABI1
+ s.f.ABIDefault = abiForFunc(nil, ssaConfig.ABI0, ssaConfig.ABI1)
+ s.f.ABISelf = abiSelf
+
+ s.panics = map[funcLine]*ssa.Block{}
+ s.softFloat = s.config.SoftFloat
+
+ // Allocate starting block
+ s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
+ s.f.Entry.Pos = fn.Pos()
+
+ if printssa {
+ ssaDF := ssaDumpFile
+ if ssaDir != "" {
+ ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+s.f.NameABI()+".html")
+ ssaD := filepath.Dir(ssaDF)
+ os.MkdirAll(ssaD, 0755)
+ }
+ s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG)
+ // TODO: generate and print a mapping from nodes to values and blocks
+ dumpSourcesColumn(s.f.HTMLWriter, fn)
+ s.f.HTMLWriter.WriteAST("AST", astBuf)
+ }
+
+ // Allocate starting values
+ s.labels = map[string]*ssaLabel{}
+ s.fwdVars = map[ir.Node]*ssa.Value{}
+ s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
+
+ s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed()
+ switch {
+ case base.Debug.NoOpenDefer != 0:
+ s.hasOpenDefers = false
+ case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && base.Ctxt.Arch.Name == "386":
+ // Don't support open-coded defers for 386 ONLY when using shared
+ // libraries, because there is extra code (added by rewriteToUseGot())
+ // preceding the deferreturn/ret code that we don't track correctly.
+ s.hasOpenDefers = false
+ }
+ if s.hasOpenDefers && s.instrumentEnterExit {
+ // Skip doing open defers if we need to instrument function
+ // returns for the race detector, since we will not generate that
+ // code in the case of the extra deferreturn/ret segment.
+ s.hasOpenDefers = false
+ }
+ if s.hasOpenDefers {
+ // Similarly, skip if there are any heap-allocated result
+ // parameters that need to be copied back to their stack slots.
+ for _, f := range s.curfn.Type().Results() {
+ if !f.Nname.(*ir.Name).OnStack() {
+ s.hasOpenDefers = false
+ break
+ }
+ }
+ }
+ if s.hasOpenDefers &&
+ s.curfn.NumReturns*s.curfn.NumDefers > 15 {
+ // Since we are generating defer calls at every exit for
+ // open-coded defers, skip doing open-coded defers if there are
+ // too many returns (especially if there are multiple defers).
+ // Open-coded defers are most important for improving performance
+ // for smaller functions (which don't have many returns).
+ s.hasOpenDefers = false
+ }
+
+ s.sp = s.entryNewValue0(ssa.OpSP, types.Types[types.TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
+ s.sb = s.entryNewValue0(ssa.OpSB, types.Types[types.TUINTPTR])
+
+ s.startBlock(s.f.Entry)
+ s.vars[memVar] = s.startmem
+ if s.hasOpenDefers {
+ // Create the deferBits variable and stack slot. deferBits is a
+ // bitmask showing which of the open-coded defers in this function
+ // have been activated.
+ deferBitsTemp := typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8])
+ deferBitsTemp.SetAddrtaken(true)
+ s.deferBitsTemp = deferBitsTemp
+ // For this value, AuxInt is initialized to zero by default
+ startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8])
+ s.vars[deferBitsVar] = startDeferBits
+ s.deferBitsAddr = s.addr(deferBitsTemp)
+ s.store(types.Types[types.TUINT8], s.deferBitsAddr, startDeferBits)
+ // Make sure that the deferBits stack slot is kept alive (for use
+ // by panics) and stores to deferBits are not eliminated, even if
+ // all checking code on deferBits in the function exit can be
+ // eliminated, because the defer statements were all
+ // unconditional.
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
+ }
+
+ var params *abi.ABIParamResultInfo
+ params = s.f.ABISelf.ABIAnalyze(fn.Type(), true)
+
+ // The backend's stackframe pass prunes away entries from the fn's
+ // Dcl list, including PARAMOUT nodes that correspond to output
+ // params passed in registers. Walk the Dcl list and capture these
+ // nodes to a side list, so that we'll have them available during
+ // DWARF-gen later on. See issue 48573 for more details.
+ var debugInfo ssa.FuncDebug
+ for _, n := range fn.Dcl {
+ if n.Class == ir.PPARAMOUT && n.IsOutputParamInRegisters() {
+ debugInfo.RegOutputParams = append(debugInfo.RegOutputParams, n)
+ }
+ }
+ fn.DebugInfo = &debugInfo
+
+ // Generate addresses of local declarations
+ s.decladdrs = map[*ir.Name]*ssa.Value{}
+ for _, n := range fn.Dcl {
+ switch n.Class {
+ case ir.PPARAM:
+ // Be aware that blank and unnamed input parameters will not appear here, but do appear in the type
+ s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
+ case ir.PPARAMOUT:
+ s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
+ case ir.PAUTO:
+ // processed at each use, to prevent Addr coming
+ // before the decl.
+ default:
+ s.Fatalf("local variable with class %v unimplemented", n.Class)
+ }
+ }
+
+ s.f.OwnAux = ssa.OwnAuxCall(fn.LSym, params)
+
+ // Populate SSAable arguments.
+ for _, n := range fn.Dcl {
+ if n.Class == ir.PPARAM {
+ if s.canSSA(n) {
+ v := s.newValue0A(ssa.OpArg, n.Type(), n)
+ s.vars[n] = v
+ s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
+ } else { // address was taken AND/OR too large for SSA
+ paramAssignment := ssa.ParamAssignmentForArgName(s.f, n)
+ if len(paramAssignment.Registers) > 0 {
+ if ssa.CanSSA(n.Type()) { // SSA-able type, so address was taken -- receive value in OpArg, DO NOT bind to var, store immediately to memory.
+ v := s.newValue0A(ssa.OpArg, n.Type(), n)
+ s.store(n.Type(), s.decladdrs[n], v)
+ } else { // Too big for SSA.
+ // Brute force, and early, do a bunch of stores from registers
+ // Note that expand calls knows about this and doesn't trouble itself with larger-than-SSA-able Args in registers.
+ s.storeParameterRegsToStack(s.f.ABISelf, paramAssignment, n, s.decladdrs[n], false)
+ }
+ }
+ }
+ }
+ }
+
+ // Populate closure variables.
+ if fn.Needctxt() {
+ clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)
+ offset := int64(types.PtrSize) // PtrSize to skip past function entry PC field
+ for _, n := range fn.ClosureVars {
+ typ := n.Type()
+ if !n.Byval() {
+ typ = types.NewPtr(typ)
+ }
+
+ offset = types.RoundUp(offset, typ.Alignment())
+ ptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo)
+ offset += typ.Size()
+
+ // If n is a small variable captured by value, promote
+ // it to PAUTO so it can be converted to SSA.
+ //
+ // Note: While we never capture a variable by value if
+ // the user took its address, we may have generated
+ // runtime calls that did (#43701). Since we don't
+ // convert Addrtaken variables to SSA anyway, no point
+ // in promoting them either.
+ if n.Byval() && !n.Addrtaken() && ssa.CanSSA(n.Type()) {
+ n.Class = ir.PAUTO
+ fn.Dcl = append(fn.Dcl, n)
+ s.assign(n, s.load(n.Type(), ptr), false, 0)
+ continue
+ }
+
+ if !n.Byval() {
+ ptr = s.load(typ, ptr)
+ }
+ s.setHeapaddr(fn.Pos(), n, ptr)
+ }
+ }
+
+ // Convert the AST-based IR to the SSA-based IR
+ if s.instrumentEnterExit {
+ s.rtcall(ir.Syms.Racefuncenter, true, nil, s.newValue0(ssa.OpGetCallerPC, types.Types[types.TUINTPTR]))
+ }
+ s.zeroResults()
+ s.paramsToHeap()
+ s.stmtList(fn.Body)
+
+ // fallthrough to exit
+ if s.curBlock != nil {
+ s.pushLine(fn.Endlineno)
+ s.exit()
+ s.popLine()
+ }
+
+ for _, b := range s.f.Blocks {
+ if b.Pos != src.NoXPos {
+ s.updateUnsetPredPos(b)
+ }
+ }
+
+ s.f.HTMLWriter.WritePhase("before insert phis", "before insert phis")
+
+ s.insertPhis()
+
+ // Main call to ssa package to compile function
+ ssa.Compile(s.f)
+
+ fe.AllocFrame(s.f)
+
+ if len(s.openDefers) != 0 {
+ s.emitOpenDeferInfo()
+ }
+
+ // Record incoming parameter spill information for morestack calls emitted in the assembler.
+ // This is done here, using all the parameters (used, partially used, and unused) because
+ // it mimics the behavior of the former ABI (everything stored) and because it's not 100%
+ // clear if naming conventions are respected in autogenerated code.
+ // TODO figure out exactly what's unused, don't spill it. Make liveness fine-grained, also.
+ for _, p := range params.InParams() {
+ typs, offs := p.RegisterTypesAndOffsets()
+ for i, t := range typs {
+ o := offs[i] // offset within parameter
+ fo := p.FrameOffset(params) // offset of parameter in frame
+ reg := ssa.ObjRegForAbiReg(p.Registers[i], s.f.Config)
+ s.f.RegArgs = append(s.f.RegArgs, ssa.Spill{Reg: reg, Offset: fo + o, Type: t})
+ }
+ }
+
+ return s.f
+}
+
+func (s *state) storeParameterRegsToStack(abi *abi.ABIConfig, paramAssignment *abi.ABIParamAssignment, n *ir.Name, addr *ssa.Value, pointersOnly bool) {
+ typs, offs := paramAssignment.RegisterTypesAndOffsets()
+ for i, t := range typs {
+ if pointersOnly && !t.IsPtrShaped() {
+ continue
+ }
+ r := paramAssignment.Registers[i]
+ o := offs[i]
+ op, reg := ssa.ArgOpAndRegisterFor(r, abi)
+ aux := &ssa.AuxNameOffset{Name: n, Offset: o}
+ v := s.newValue0I(op, t, reg)
+ v.Aux = aux
+ p := s.newValue1I(ssa.OpOffPtr, types.NewPtr(t), o, addr)
+ s.store(t, p, v)
+ }
+}
+
+// zeroResults zeros the return values at the start of the function.
+// We need to do this very early in the function. Defer might stop a
+// panic and show the return values as they exist at the time of
+// panic. For precise stacks, the garbage collector assumes results
+// are always live, so we need to zero them before any allocations,
+// even allocations to move params/results to the heap.
+func (s *state) zeroResults() {
+ for _, f := range s.curfn.Type().Results() {
+ n := f.Nname.(*ir.Name)
+ if !n.OnStack() {
+ // The local which points to the return value is the
+ // thing that needs zeroing. This is already handled
+ // by a Needzero annotation in plive.go:(*liveness).epilogue.
+ continue
+ }
+ // Zero the stack location containing f.
+ if typ := n.Type(); ssa.CanSSA(typ) {
+ s.assign(n, s.zeroVal(typ), false, 0)
+ } else {
+ if typ.HasPointers() {
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+ }
+ s.zero(n.Type(), s.decladdrs[n])
+ }
+ }
+}
+
+// paramsToHeap produces code to allocate memory for heap-escaped parameters
+// and to copy non-result parameters' values from the stack.
+func (s *state) paramsToHeap() {
+ do := func(params []*types.Field) {
+ for _, f := range params {
+ if f.Nname == nil {
+ continue // anonymous or blank parameter
+ }
+ n := f.Nname.(*ir.Name)
+ if ir.IsBlank(n) || n.OnStack() {
+ continue
+ }
+ s.newHeapaddr(n)
+ if n.Class == ir.PPARAM {
+ s.move(n.Type(), s.expr(n.Heapaddr), s.decladdrs[n])
+ }
+ }
+ }
+
+ typ := s.curfn.Type()
+ do(typ.Recvs())
+ do(typ.Params())
+ do(typ.Results())
+}
+
+// newHeapaddr allocates heap memory for n and sets its heap address.
+func (s *state) newHeapaddr(n *ir.Name) {
+ s.setHeapaddr(n.Pos(), n, s.newObject(n.Type(), nil))
+}
+
+// setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil)
+// and then sets it as n's heap address.
+func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) {
+ if !ptr.Type.IsPtr() || !types.Identical(n.Type(), ptr.Type.Elem()) {
+ base.FatalfAt(n.Pos(), "setHeapaddr %L with type %v", n, ptr.Type)
+ }
+
+ // Declare variable to hold address.
+ sym := &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg}
+ addr := s.curfn.NewLocal(pos, sym, types.NewPtr(n.Type()))
+ addr.SetUsed(true)
+ types.CalcSize(addr.Type())
+
+ if n.Class == ir.PPARAMOUT {
+ addr.SetIsOutputParamHeapAddr(true)
+ }
+
+ n.Heapaddr = addr
+ s.assign(addr, ptr, false, 0)
+}
+
+// newObject returns an SSA value denoting new(typ).
+func (s *state) newObject(typ *types.Type, rtype *ssa.Value) *ssa.Value {
+ if typ.Size() == 0 {
+ return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb)
+ }
+ if rtype == nil {
+ rtype = s.reflectType(typ)
+ }
+ return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, rtype)[0]
+}
+
+func (s *state) checkPtrAlignment(n *ir.ConvExpr, v *ssa.Value, count *ssa.Value) {
+ if !n.Type().IsPtr() {
+ s.Fatalf("expected pointer type: %v", n.Type())
+ }
+ elem, rtypeExpr := n.Type().Elem(), n.ElemRType
+ if count != nil {
+ if !elem.IsArray() {
+ s.Fatalf("expected array type: %v", elem)
+ }
+ elem, rtypeExpr = elem.Elem(), n.ElemElemRType
+ }
+ size := elem.Size()
+ // Casting from larger type to smaller one is ok, so for smallest type, do nothing.
+ if elem.Alignment() == 1 && (size == 0 || size == 1 || count == nil) {
+ return
+ }
+ if count == nil {
+ count = s.constInt(types.Types[types.TUINTPTR], 1)
+ }
+ if count.Type.Size() != s.config.PtrSize {
+ s.Fatalf("expected count fit to a uintptr size, have: %d, want: %d", count.Type.Size(), s.config.PtrSize)
+ }
+ var rtype *ssa.Value
+ if rtypeExpr != nil {
+ rtype = s.expr(rtypeExpr)
+ } else {
+ rtype = s.reflectType(elem)
+ }
+ s.rtcall(ir.Syms.CheckPtrAlignment, true, nil, v, rtype, count)
+}
+
+// reflectType returns an SSA value representing a pointer to typ's
+// reflection type descriptor.
+func (s *state) reflectType(typ *types.Type) *ssa.Value {
+ // TODO(mdempsky): Make this Fatalf under Unified IR; frontend needs
+ // to supply RType expressions.
+ lsym := reflectdata.TypeLinksym(typ)
+ return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(types.Types[types.TUINT8]), lsym, s.sb)
+}
+
+func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) {
+ // Read sources of target function fn.
+ fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
+ targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Endlineno.Line())
+ if err != nil {
+ writer.Logf("cannot read sources for function %v: %v", fn, err)
+ }
+
+ // Read sources of inlined functions.
+ var inlFns []*ssa.FuncLines
+ for _, fi := range ssaDumpInlined {
+ elno := fi.Endlineno
+ fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename()
+ fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line())
+ if err != nil {
+ writer.Logf("cannot read sources for inlined function %v: %v", fi, err)
+ continue
+ }
+ inlFns = append(inlFns, fnLines)
+ }
+
+ sort.Sort(ssa.ByTopo(inlFns))
+ if targetFn != nil {
+ inlFns = append([]*ssa.FuncLines{targetFn}, inlFns...)
+ }
+
+ writer.WriteSources("sources", inlFns)
+}
+
+func readFuncLines(file string, start, end uint) (*ssa.FuncLines, error) {
+ f, err := os.Open(os.ExpandEnv(file))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ var lines []string
+ ln := uint(1)
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() && ln <= end {
+ if ln >= start {
+ lines = append(lines, scanner.Text())
+ }
+ ln++
+ }
+ return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil
+}
+
+// updateUnsetPredPos propagates the earliest-value position information for b
+// towards all of b's predecessors that need a position, and recurs on that
+// predecessor if its position is updated. B should have a non-empty position.
+func (s *state) updateUnsetPredPos(b *ssa.Block) {
+ if b.Pos == src.NoXPos {
+ s.Fatalf("Block %s should have a position", b)
+ }
+ bestPos := src.NoXPos
+ for _, e := range b.Preds {
+ p := e.Block()
+ if !p.LackingPos() {
+ continue
+ }
+ if bestPos == src.NoXPos {
+ bestPos = b.Pos
+ for _, v := range b.Values {
+ if v.LackingPos() {
+ continue
+ }
+ if v.Pos != src.NoXPos {
+ // Assume values are still in roughly textual order;
+ // TODO: could also seek minimum position?
+ bestPos = v.Pos
+ break
+ }
+ }
+ }
+ p.Pos = bestPos
+ s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay.
+ }
+}
+
+// Information about each open-coded defer.
+type openDeferInfo struct {
+ // The node representing the call of the defer
+ n *ir.CallExpr
+ // If defer call is closure call, the address of the argtmp where the
+ // closure is stored.
+ closure *ssa.Value
+ // The node representing the argtmp where the closure is stored - used for
+ // function, method, or interface call, to store a closure that panic
+ // processing can use for this defer.
+ closureNode *ir.Name
+}
+
+type state struct {
+ // configuration (arch) information
+ config *ssa.Config
+
+ // function we're building
+ f *ssa.Func
+
+ // Node for function
+ curfn *ir.Func
+
+ // labels in f
+ labels map[string]*ssaLabel
+
+ // unlabeled break and continue statement tracking
+ breakTo *ssa.Block // current target for plain break statement
+ continueTo *ssa.Block // current target for plain continue statement
+
+ // current location where we're interpreting the AST
+ curBlock *ssa.Block
+
+ // variable assignments in the current block (map from variable symbol to ssa value)
+ // *Node is the unique identifier (an ONAME Node) for the variable.
+ // TODO: keep a single varnum map, then make all of these maps slices instead?
+ vars map[ir.Node]*ssa.Value
+
+ // fwdVars are variables that are used before they are defined in the current block.
+ // This map exists just to coalesce multiple references into a single FwdRef op.
+ // *Node is the unique identifier (an ONAME Node) for the variable.
+ fwdVars map[ir.Node]*ssa.Value
+
+ // all defined variables at the end of each block. Indexed by block ID.
+ defvars []map[ir.Node]*ssa.Value
+
+ // addresses of PPARAM and PPARAMOUT variables on the stack.
+ decladdrs map[*ir.Name]*ssa.Value
+
+ // starting values. Memory, stack pointer, and globals pointer
+ startmem *ssa.Value
+ sp *ssa.Value
+ sb *ssa.Value
+ // value representing address of where deferBits autotmp is stored
+ deferBitsAddr *ssa.Value
+ deferBitsTemp *ir.Name
+
+ // line number stack. The current line number is top of stack
+ line []src.XPos
+ // the last line number processed; it may have been popped
+ lastPos src.XPos
+
+ // list of panic calls by function name and line number.
+ // Used to deduplicate panic calls.
+ panics map[funcLine]*ssa.Block
+
+ cgoUnsafeArgs bool
+ hasdefer bool // whether the function contains a defer statement
+ softFloat bool
+ hasOpenDefers bool // whether we are doing open-coded defers
+ checkPtrEnabled bool // whether to insert checkptr instrumentation
+ instrumentEnterExit bool // whether to instrument function enter/exit
+ instrumentMemory bool // whether to instrument memory operations
+
+ // If doing open-coded defers, list of info about the defer calls in
+ // scanning order. Hence, at exit we should run these defers in reverse
+ // order of this list
+ openDefers []*openDeferInfo
+ // For open-coded defers, this is the beginning and end blocks of the last
+ // defer exit code that we have generated so far. We use these to share
+ // code between exits if the shareDeferExits option (disabled by default)
+ // is on.
+ lastDeferExit *ssa.Block // Entry block of last defer exit code we generated
+ lastDeferFinalBlock *ssa.Block // Final block of last defer exit code we generated
+ lastDeferCount int // Number of defers encountered at that point
+
+ prevCall *ssa.Value // the previous call; use this to tie results to the call op.
+}
+
+type funcLine struct {
+ f *obj.LSym
+ base *src.PosBase
+ line uint
+}
+
+type ssaLabel struct {
+ target *ssa.Block // block identified by this label
+ breakTarget *ssa.Block // block to break to in control flow node identified by this label
+ continueTarget *ssa.Block // block to continue to in control flow node identified by this label
+}
+
+// label returns the label associated with sym, creating it if necessary.
+func (s *state) label(sym *types.Sym) *ssaLabel {
+ lab := s.labels[sym.Name]
+ if lab == nil {
+ lab = new(ssaLabel)
+ s.labels[sym.Name] = lab
+ }
+ return lab
+}
+
+func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
+func (s *state) Log() bool { return s.f.Log() }
+func (s *state) Fatalf(msg string, args ...interface{}) {
+ s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
+}
+func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
+func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
+
+func ssaMarker(name string) *ir.Name {
+ return ir.NewNameAt(base.Pos, &types.Sym{Name: name}, nil)
+}
+
+var (
+ // marker node for the memory variable
+ memVar = ssaMarker("mem")
+
+ // marker nodes for temporary variables
+ ptrVar = ssaMarker("ptr")
+ lenVar = ssaMarker("len")
+ capVar = ssaMarker("cap")
+ typVar = ssaMarker("typ")
+ okVar = ssaMarker("ok")
+ deferBitsVar = ssaMarker("deferBits")
+ hashVar = ssaMarker("hash")
+)
+
+// startBlock sets the current block we're generating code in to b.
+func (s *state) startBlock(b *ssa.Block) {
+ if s.curBlock != nil {
+ s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
+ }
+ s.curBlock = b
+ s.vars = map[ir.Node]*ssa.Value{}
+ for n := range s.fwdVars {
+ delete(s.fwdVars, n)
+ }
+}
+
+// endBlock marks the end of generating code for the current block.
+// Returns the (former) current block. Returns nil if there is no current
+// block, i.e. if no code flows to the current execution point.
+func (s *state) endBlock() *ssa.Block {
+ b := s.curBlock
+ if b == nil {
+ return nil
+ }
+ for len(s.defvars) <= int(b.ID) {
+ s.defvars = append(s.defvars, nil)
+ }
+ s.defvars[b.ID] = s.vars
+ s.curBlock = nil
+ s.vars = nil
+ if b.LackingPos() {
+ // Empty plain blocks get the line of their successor (handled after all blocks created),
+ // except for increment blocks in For statements (handled in ssa conversion of OFOR),
+ // and for blocks ending in GOTO/BREAK/CONTINUE.
+ b.Pos = src.NoXPos
+ } else {
+ b.Pos = s.lastPos
+ }
+ return b
+}
+
+// pushLine pushes a line number on the line number stack.
+func (s *state) pushLine(line src.XPos) {
+ if !line.IsKnown() {
+ // the frontend may emit node with line number missing,
+ // use the parent line number in this case.
+ line = s.peekPos()
+ if base.Flag.K != 0 {
+ base.Warn("buildssa: unknown position (line 0)")
+ }
+ } else {
+ s.lastPos = line
+ }
+
+ s.line = append(s.line, line)
+}
+
+// popLine pops the top of the line number stack.
+func (s *state) popLine() {
+ s.line = s.line[:len(s.line)-1]
+}
+
+// peekPos peeks the top of the line number stack.
+func (s *state) peekPos() src.XPos {
+ return s.line[len(s.line)-1]
+}
+
+// newValue0 adds a new value with no arguments to the current block.
+func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
+ return s.curBlock.NewValue0(s.peekPos(), op, t)
+}
+
+// newValue0A adds a new value with no arguments and an aux value to the current block.
+func (s *state) newValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
+ return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
+}
+
+// newValue0I adds a new value with no arguments and an auxint value to the current block.
+func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
+ return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
+}
+
+// newValue1 adds a new value with one argument to the current block.
+func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
+}
+
+// newValue1A adds a new value with one argument and an aux value to the current block.
+func (s *state) newValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
+}
+
+// newValue1Apos adds a new value with one argument and an aux value to the current block.
+// isStmt determines whether the created values may be a statement or not
+// (i.e., false means never, yes means maybe).
+func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value, isStmt bool) *ssa.Value {
+ if isStmt {
+ return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
+ }
+ return s.curBlock.NewValue1A(s.peekPos().WithNotStmt(), op, t, aux, arg)
+}
+
+// newValue1I adds a new value with one argument and an auxint value to the current block.
+func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
+}
+
+// newValue2 adds a new value with two arguments to the current block.
+func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
+}
+
+// newValue2A adds a new value with two arguments and an aux value to the current block.
+func (s *state) newValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
+}
+
+// newValue2Apos adds a new value with two arguments and an aux value to the current block.
+// isStmt determines whether the created values may be a statement or not
+// (i.e., false means never, yes means maybe).
+func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value {
+ if isStmt {
+ return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
+ }
+ return s.curBlock.NewValue2A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1)
+}
+
+// newValue2I adds a new value with two arguments and an auxint value to the current block.
+func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
+}
+
+// newValue3 adds a new value with three arguments to the current block.
+func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
+}
+
+// newValue3I adds a new value with three arguments and an auxint value to the current block.
+func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
+}
+
+// newValue3A adds a new value with three arguments and an aux value to the current block.
+func (s *state) newValue3A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
+}
+
+// newValue3Apos adds a new value with three arguments and an aux value to the current block.
+// isStmt determines whether the created values may be a statement or not
+// (i.e., false means never, yes means maybe).
+func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value {
+ if isStmt {
+ return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
+ }
+ return s.curBlock.NewValue3A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1, arg2)
+}
+
+// newValue4 adds a new value with four arguments to the current block.
+func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
+}
+
+// newValue4I adds a new value with four arguments and an auxint value to the current block.
+func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3)
+}
+
+func (s *state) entryBlock() *ssa.Block {
+ b := s.f.Entry
+ if base.Flag.N > 0 && s.curBlock != nil {
+ // If optimizations are off, allocate in current block instead. Since with -N
+ // we're not doing the CSE or tighten passes, putting lots of stuff in the
+ // entry block leads to O(n^2) entries in the live value map during regalloc.
+ // See issue 45897.
+ b = s.curBlock
+ }
+ return b
+}
+
+// entryNewValue0 adds a new value with no arguments to the entry block.
+func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
+ return s.entryBlock().NewValue0(src.NoXPos, op, t)
+}
+
+// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
+func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
+ return s.entryBlock().NewValue0A(src.NoXPos, op, t, aux)
+}
+
+// entryNewValue1 adds a new value with one argument to the entry block.
+func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
+ return s.entryBlock().NewValue1(src.NoXPos, op, t, arg)
+}
+
+// entryNewValue1I adds a new value with one argument and an auxint value to the entry block.
+func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
+ return s.entryBlock().NewValue1I(src.NoXPos, op, t, auxint, arg)
+}
+
+// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
+func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
+ return s.entryBlock().NewValue1A(src.NoXPos, op, t, aux, arg)
+}
+
+// entryNewValue2 adds a new value with two arguments to the entry block.
+func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.entryBlock().NewValue2(src.NoXPos, op, t, arg0, arg1)
+}
+
+// entryNewValue2A adds a new value with two arguments and an aux value to the entry block.
+func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.entryBlock().NewValue2A(src.NoXPos, op, t, aux, arg0, arg1)
+}
+
+// const* routines add a new const value to the entry block.
+func (s *state) constSlice(t *types.Type) *ssa.Value {
+ return s.f.ConstSlice(t)
+}
+func (s *state) constInterface(t *types.Type) *ssa.Value {
+ return s.f.ConstInterface(t)
+}
+func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(t) }
+func (s *state) constEmptyString(t *types.Type) *ssa.Value {
+ return s.f.ConstEmptyString(t)
+}
+func (s *state) constBool(c bool) *ssa.Value {
+ return s.f.ConstBool(types.Types[types.TBOOL], c)
+}
+func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
+ return s.f.ConstInt8(t, c)
+}
+func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
+ return s.f.ConstInt16(t, c)
+}
+func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
+ return s.f.ConstInt32(t, c)
+}
+func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
+ return s.f.ConstInt64(t, c)
+}
+func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
+ return s.f.ConstFloat32(t, c)
+}
+func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
+ return s.f.ConstFloat64(t, c)
+}
+func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
+ if s.config.PtrSize == 8 {
+ return s.constInt64(t, c)
+ }
+ if int64(int32(c)) != c {
+ s.Fatalf("integer constant too big %d", c)
+ }
+ return s.constInt32(t, int32(c))
+}
+func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
+ return s.f.ConstOffPtrSP(t, c, s.sp)
+}
+
+// newValueOrSfCall* are wrappers around newValue*, which may create a call to a
+// soft-float runtime function instead (when emitting soft-float code).
+func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
+ if s.softFloat {
+ if c, ok := s.sfcall(op, arg); ok {
+ return c
+ }
+ }
+ return s.newValue1(op, t, arg)
+}
+func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+ if s.softFloat {
+ if c, ok := s.sfcall(op, arg0, arg1); ok {
+ return c
+ }
+ }
+ return s.newValue2(op, t, arg0, arg1)
+}
+
+type instrumentKind uint8
+
+const (
+ instrumentRead = iota
+ instrumentWrite
+ instrumentMove
+)
+
+func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind) {
+ s.instrument2(t, addr, nil, kind)
+}
+
+// instrumentFields instruments a read/write operation on addr.
+// If it is instrumenting for MSAN or ASAN and t is a struct type, it instruments
+// operation for each field, instead of for the whole struct.
+func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) {
+ if !(base.Flag.MSan || base.Flag.ASan) || !t.IsStruct() {
+ s.instrument(t, addr, kind)
+ return
+ }
+ for _, f := range t.Fields() {
+ if f.Sym.IsBlank() {
+ continue
+ }
+ offptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(f.Type), f.Offset, addr)
+ s.instrumentFields(f.Type, offptr, kind)
+ }
+}
+
+func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) {
+ if base.Flag.MSan {
+ s.instrument2(t, dst, src, instrumentMove)
+ } else {
+ s.instrument(t, src, instrumentRead)
+ s.instrument(t, dst, instrumentWrite)
+ }
+}
+
+func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) {
+ if !s.instrumentMemory {
+ return
+ }
+
+ w := t.Size()
+ if w == 0 {
+ return // can't race on zero-sized things
+ }
+
+ if ssa.IsSanitizerSafeAddr(addr) {
+ return
+ }
+
+ var fn *obj.LSym
+ needWidth := false
+
+ if addr2 != nil && kind != instrumentMove {
+ panic("instrument2: non-nil addr2 for non-move instrumentation")
+ }
+
+ if base.Flag.MSan {
+ switch kind {
+ case instrumentRead:
+ fn = ir.Syms.Msanread
+ case instrumentWrite:
+ fn = ir.Syms.Msanwrite
+ case instrumentMove:
+ fn = ir.Syms.Msanmove
+ default:
+ panic("unreachable")
+ }
+ needWidth = true
+ } else if base.Flag.Race && t.NumComponents(types.CountBlankFields) > 1 {
+ // for composite objects we have to write every address
+ // because a write might happen to any subobject.
+ // composites with only one element don't have subobjects, though.
+ switch kind {
+ case instrumentRead:
+ fn = ir.Syms.Racereadrange
+ case instrumentWrite:
+ fn = ir.Syms.Racewriterange
+ default:
+ panic("unreachable")
+ }
+ needWidth = true
+ } else if base.Flag.Race {
+ // for non-composite objects we can write just the start
+ // address, as any write must write the first byte.
+ switch kind {
+ case instrumentRead:
+ fn = ir.Syms.Raceread
+ case instrumentWrite:
+ fn = ir.Syms.Racewrite
+ default:
+ panic("unreachable")
+ }
+ } else if base.Flag.ASan {
+ switch kind {
+ case instrumentRead:
+ fn = ir.Syms.Asanread
+ case instrumentWrite:
+ fn = ir.Syms.Asanwrite
+ default:
+ panic("unreachable")
+ }
+ needWidth = true
+ } else {
+ panic("unreachable")
+ }
+
+ args := []*ssa.Value{addr}
+ if addr2 != nil {
+ args = append(args, addr2)
+ }
+ if needWidth {
+ args = append(args, s.constInt(types.Types[types.TUINTPTR], w))
+ }
+ s.rtcall(fn, true, nil, args...)
+}
+
+func (s *state) load(t *types.Type, src *ssa.Value) *ssa.Value {
+ s.instrumentFields(t, src, instrumentRead)
+ return s.rawLoad(t, src)
+}
+
+func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpLoad, t, src, s.mem())
+}
+
+func (s *state) store(t *types.Type, dst, val *ssa.Value) {
+ s.vars[memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
+}
+
+func (s *state) zero(t *types.Type, dst *ssa.Value) {
+ s.instrument(t, dst, instrumentWrite)
+ store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
+ store.Aux = t
+ s.vars[memVar] = store
+}
+
+func (s *state) move(t *types.Type, dst, src *ssa.Value) {
+ s.moveWhichMayOverlap(t, dst, src, false)
+}
+func (s *state) moveWhichMayOverlap(t *types.Type, dst, src *ssa.Value, mayOverlap bool) {
+ s.instrumentMove(t, dst, src)
+ if mayOverlap && t.IsArray() && t.NumElem() > 1 && !ssa.IsInlinableMemmove(dst, src, t.Size(), s.f.Config) {
+ // Normally, when moving Go values of type T from one location to another,
+ // we don't need to worry about partial overlaps. The two Ts must either be
+ // in disjoint (nonoverlapping) memory or in exactly the same location.
+ // There are 2 cases where this isn't true:
+ // 1) Using unsafe you can arrange partial overlaps.
+ // 2) Since Go 1.17, you can use a cast from a slice to a ptr-to-array.
+ // https://go.dev/ref/spec#Conversions_from_slice_to_array_pointer
+ // This feature can be used to construct partial overlaps of array types.
+ // var a [3]int
+ // p := (*[2]int)(a[:])
+ // q := (*[2]int)(a[1:])
+ // *p = *q
+ // We don't care about solving 1. Or at least, we haven't historically
+ // and no one has complained.
+ // For 2, we need to ensure that if there might be partial overlap,
+ // then we can't use OpMove; we must use memmove instead.
+ // (memmove handles partial overlap by copying in the correct
+ // direction. OpMove does not.)
+ //
+ // Note that we have to be careful here not to introduce a call when
+ // we're marshaling arguments to a call or unmarshaling results from a call.
+ // Cases where this is happening must pass mayOverlap to false.
+ // (Currently this only happens when unmarshaling results of a call.)
+ if t.HasPointers() {
+ s.rtcall(ir.Syms.Typedmemmove, true, nil, s.reflectType(t), dst, src)
+ // We would have otherwise implemented this move with straightline code,
+ // including a write barrier. Pretend we issue a write barrier here,
+ // so that the write barrier tests work. (Otherwise they'd need to know
+ // the details of IsInlineableMemmove.)
+ s.curfn.SetWBPos(s.peekPos())
+ } else {
+ s.rtcall(ir.Syms.Memmove, true, nil, dst, src, s.constInt(types.Types[types.TUINTPTR], t.Size()))
+ }
+ ssa.LogLargeCopy(s.f.Name, s.peekPos(), t.Size())
+ return
+ }
+ store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
+ store.Aux = t
+ s.vars[memVar] = store
+}
+
+// stmtList converts the statement list n to SSA and adds it to s.
+func (s *state) stmtList(l ir.Nodes) {
+ for _, n := range l {
+ s.stmt(n)
+ }
+}
+
+// stmt converts the statement n to SSA and adds it to s.
+func (s *state) stmt(n ir.Node) {
+ s.pushLine(n.Pos())
+ defer s.popLine()
+
+ // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
+ // then this code is dead. Stop here.
+ if s.curBlock == nil && n.Op() != ir.OLABEL {
+ return
+ }
+
+ s.stmtList(n.Init())
+ switch n.Op() {
+
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ s.stmtList(n.List)
+
+ case ir.OFALL: // no-op
+
+ // Expression statements
+ case ir.OCALLFUNC:
+ n := n.(*ir.CallExpr)
+ if ir.IsIntrinsicCall(n) {
+ s.intrinsicCall(n)
+ return
+ }
+ fallthrough
+
+ case ir.OCALLINTER:
+ n := n.(*ir.CallExpr)
+ s.callResult(n, callNormal)
+ if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.ONAME && n.Fun.(*ir.Name).Class == ir.PFUNC {
+ if fn := n.Fun.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
+ n.Fun.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap" || fn == "panicunsafeslicelen" || fn == "panicunsafeslicenilptr" || fn == "panicunsafestringlen" || fn == "panicunsafestringnilptr") {
+ m := s.mem()
+ b := s.endBlock()
+ b.Kind = ssa.BlockExit
+ b.SetControl(m)
+ // TODO: never rewrite OPANIC to OCALLFUNC in the
+ // first place. Need to wait until all backends
+ // go through SSA.
+ }
+ }
+ case ir.ODEFER:
+ n := n.(*ir.GoDeferStmt)
+ if base.Debug.Defer > 0 {
+ var defertype string
+ if s.hasOpenDefers {
+ defertype = "open-coded"
+ } else if n.Esc() == ir.EscNever {
+ defertype = "stack-allocated"
+ } else {
+ defertype = "heap-allocated"
+ }
+ base.WarnfAt(n.Pos(), "%s defer", defertype)
+ }
+ if s.hasOpenDefers {
+ s.openDeferRecord(n.Call.(*ir.CallExpr))
+ } else {
+ d := callDefer
+ if n.Esc() == ir.EscNever && n.DeferAt == nil {
+ d = callDeferStack
+ }
+ s.call(n.Call.(*ir.CallExpr), d, false, n.DeferAt)
+ }
+ case ir.OGO:
+ n := n.(*ir.GoDeferStmt)
+ s.callResult(n.Call.(*ir.CallExpr), callGo)
+
+ case ir.OAS2DOTTYPE:
+ n := n.(*ir.AssignListStmt)
+ var res, resok *ssa.Value
+ if n.Rhs[0].Op() == ir.ODOTTYPE2 {
+ res, resok = s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true)
+ } else {
+ res, resok = s.dynamicDottype(n.Rhs[0].(*ir.DynamicTypeAssertExpr), true)
+ }
+ deref := false
+ if !ssa.CanSSA(n.Rhs[0].Type()) {
+ if res.Op != ssa.OpLoad {
+ s.Fatalf("dottype of non-load")
+ }
+ mem := s.mem()
+ if res.Args[1] != mem {
+ s.Fatalf("memory no longer live from 2-result dottype load")
+ }
+ deref = true
+ res = res.Args[0]
+ }
+ s.assign(n.Lhs[0], res, deref, 0)
+ s.assign(n.Lhs[1], resok, false, 0)
+ return
+
+ case ir.OAS2FUNC:
+ // We come here only when it is an intrinsic call returning two values.
+ n := n.(*ir.AssignListStmt)
+ call := n.Rhs[0].(*ir.CallExpr)
+ if !ir.IsIntrinsicCall(call) {
+ s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call)
+ }
+ v := s.intrinsicCall(call)
+ v1 := s.newValue1(ssa.OpSelect0, n.Lhs[0].Type(), v)
+ v2 := s.newValue1(ssa.OpSelect1, n.Lhs[1].Type(), v)
+ s.assign(n.Lhs[0], v1, false, 0)
+ s.assign(n.Lhs[1], v2, false, 0)
+ return
+
+ case ir.ODCL:
+ n := n.(*ir.Decl)
+ if v := n.X; v.Esc() == ir.EscHeap {
+ s.newHeapaddr(v)
+ }
+
+ case ir.OLABEL:
+ n := n.(*ir.LabelStmt)
+ sym := n.Label
+ if sym.IsBlank() {
+ // Nothing to do because the label isn't targetable. See issue 52278.
+ break
+ }
+ lab := s.label(sym)
+
+ // The label might already have a target block via a goto.
+ if lab.target == nil {
+ lab.target = s.f.NewBlock(ssa.BlockPlain)
+ }
+
+ // Go to that label.
+ // (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
+ if s.curBlock != nil {
+ b := s.endBlock()
+ b.AddEdgeTo(lab.target)
+ }
+ s.startBlock(lab.target)
+
+ case ir.OGOTO:
+ n := n.(*ir.BranchStmt)
+ sym := n.Label
+
+ lab := s.label(sym)
+ if lab.target == nil {
+ lab.target = s.f.NewBlock(ssa.BlockPlain)
+ }
+
+ b := s.endBlock()
+ b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
+ b.AddEdgeTo(lab.target)
+
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ if n.X == n.Y && n.X.Op() == ir.ONAME {
+ // An x=x assignment. No point in doing anything
+ // here. In addition, skipping this assignment
+ // prevents generating:
+ // VARDEF x
+ // COPY x -> x
+ // which is bad because x is incorrectly considered
+ // dead before the vardef. See issue #14904.
+ return
+ }
+
+ // mayOverlap keeps track of whether the LHS and RHS might
+ // refer to partially overlapping memory. Partial overlapping can
+ // only happen for arrays, see the comment in moveWhichMayOverlap.
+ //
+ // If both sides of the assignment are not dereferences, then partial
+ // overlap can't happen. Partial overlap can only occur only when the
+ // arrays referenced are strictly smaller parts of the same base array.
+ // If one side of the assignment is a full array, then partial overlap
+ // can't happen. (The arrays are either disjoint or identical.)
+ mayOverlap := n.X.Op() == ir.ODEREF && (n.Y != nil && n.Y.Op() == ir.ODEREF)
+ if n.Y != nil && n.Y.Op() == ir.ODEREF {
+ p := n.Y.(*ir.StarExpr).X
+ for p.Op() == ir.OCONVNOP {
+ p = p.(*ir.ConvExpr).X
+ }
+ if p.Op() == ir.OSPTR && p.(*ir.UnaryExpr).X.Type().IsString() {
+ // Pointer fields of strings point to unmodifiable memory.
+ // That memory can't overlap with the memory being written.
+ mayOverlap = false
+ }
+ }
+
+ // Evaluate RHS.
+ rhs := n.Y
+ if rhs != nil {
+ switch rhs.Op() {
+ case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
+ // All literals with nonzero fields have already been
+ // rewritten during walk. Any that remain are just T{}
+ // or equivalents. Use the zero value.
+ if !ir.IsZero(rhs) {
+ s.Fatalf("literal with nonzero value in SSA: %v", rhs)
+ }
+ rhs = nil
+ case ir.OAPPEND:
+ rhs := rhs.(*ir.CallExpr)
+ // Check whether we're writing the result of an append back to the same slice.
+ // If so, we handle it specially to avoid write barriers on the fast
+ // (non-growth) path.
+ if !ir.SameSafeExpr(n.X, rhs.Args[0]) || base.Flag.N != 0 {
+ break
+ }
+ // If the slice can be SSA'd, it'll be on the stack,
+ // so there will be no write barriers,
+ // so there's no need to attempt to prevent them.
+ if s.canSSA(n.X) {
+ if base.Debug.Append > 0 { // replicating old diagnostic message
+ base.WarnfAt(n.Pos(), "append: len-only update (in local slice)")
+ }
+ break
+ }
+ if base.Debug.Append > 0 {
+ base.WarnfAt(n.Pos(), "append: len-only update")
+ }
+ s.append(rhs, true)
+ return
+ }
+ }
+
+ if ir.IsBlank(n.X) {
+ // _ = rhs
+ // Just evaluate rhs for side-effects.
+ if rhs != nil {
+ s.expr(rhs)
+ }
+ return
+ }
+
+ var t *types.Type
+ if n.Y != nil {
+ t = n.Y.Type()
+ } else {
+ t = n.X.Type()
+ }
+
+ var r *ssa.Value
+ deref := !ssa.CanSSA(t)
+ if deref {
+ if rhs == nil {
+ r = nil // Signal assign to use OpZero.
+ } else {
+ r = s.addr(rhs)
+ }
+ } else {
+ if rhs == nil {
+ r = s.zeroVal(t)
+ } else {
+ r = s.expr(rhs)
+ }
+ }
+
+ var skip skipMask
+ if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && ir.SameSafeExpr(rhs.(*ir.SliceExpr).X, n.X) {
+ // We're assigning a slicing operation back to its source.
+ // Don't write back fields we aren't changing. See issue #14855.
+ rhs := rhs.(*ir.SliceExpr)
+ i, j, k := rhs.Low, rhs.High, rhs.Max
+ if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && ir.Int64Val(i) == 0) {
+ // [0:...] is the same as [:...]
+ i = nil
+ }
+ // TODO: detect defaults for len/cap also.
+ // Currently doesn't really work because (*p)[:len(*p)] appears here as:
+ // tmp = len(*p)
+ // (*p)[:tmp]
+ // if j != nil && (j.Op == OLEN && SameSafeExpr(j.Left, n.Left)) {
+ // j = nil
+ // }
+ // if k != nil && (k.Op == OCAP && SameSafeExpr(k.Left, n.Left)) {
+ // k = nil
+ // }
+ if i == nil {
+ skip |= skipPtr
+ if j == nil {
+ skip |= skipLen
+ }
+ if k == nil {
+ skip |= skipCap
+ }
+ }
+ }
+
+ s.assignWhichMayOverlap(n.X, r, deref, skip, mayOverlap)
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ if ir.IsConst(n.Cond, constant.Bool) {
+ s.stmtList(n.Cond.Init())
+ if ir.BoolVal(n.Cond) {
+ s.stmtList(n.Body)
+ } else {
+ s.stmtList(n.Else)
+ }
+ break
+ }
+
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ var likely int8
+ if n.Likely {
+ likely = 1
+ }
+ var bThen *ssa.Block
+ if len(n.Body) != 0 {
+ bThen = s.f.NewBlock(ssa.BlockPlain)
+ } else {
+ bThen = bEnd
+ }
+ var bElse *ssa.Block
+ if len(n.Else) != 0 {
+ bElse = s.f.NewBlock(ssa.BlockPlain)
+ } else {
+ bElse = bEnd
+ }
+ s.condBranch(n.Cond, bThen, bElse, likely)
+
+ if len(n.Body) != 0 {
+ s.startBlock(bThen)
+ s.stmtList(n.Body)
+ if b := s.endBlock(); b != nil {
+ b.AddEdgeTo(bEnd)
+ }
+ }
+ if len(n.Else) != 0 {
+ s.startBlock(bElse)
+ s.stmtList(n.Else)
+ if b := s.endBlock(); b != nil {
+ b.AddEdgeTo(bEnd)
+ }
+ }
+ s.startBlock(bEnd)
+
+ case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
+ s.stmtList(n.Results)
+ b := s.exit()
+ b.Pos = s.lastPos.WithIsStmt()
+
+ case ir.OTAILCALL:
+ n := n.(*ir.TailCallStmt)
+ s.callResult(n.Call, callTail)
+ call := s.mem()
+ b := s.endBlock()
+ b.Kind = ssa.BlockRetJmp // could use BlockExit. BlockRetJmp is mostly for clarity.
+ b.SetControl(call)
+
+ case ir.OCONTINUE, ir.OBREAK:
+ n := n.(*ir.BranchStmt)
+ var to *ssa.Block
+ if n.Label == nil {
+ // plain break/continue
+ switch n.Op() {
+ case ir.OCONTINUE:
+ to = s.continueTo
+ case ir.OBREAK:
+ to = s.breakTo
+ }
+ } else {
+ // labeled break/continue; look up the target
+ sym := n.Label
+ lab := s.label(sym)
+ switch n.Op() {
+ case ir.OCONTINUE:
+ to = lab.continueTarget
+ case ir.OBREAK:
+ to = lab.breakTarget
+ }
+ }
+
+ b := s.endBlock()
+ b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
+ b.AddEdgeTo(to)
+
+ case ir.OFOR:
+ // OFOR: for Ninit; Left; Right { Nbody }
+ // cond (Left); body (Nbody); incr (Right)
+ n := n.(*ir.ForStmt)
+ base.Assert(!n.DistinctVars) // Should all be rewritten before escape analysis
+ bCond := s.f.NewBlock(ssa.BlockPlain)
+ bBody := s.f.NewBlock(ssa.BlockPlain)
+ bIncr := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+ // ensure empty for loops have correct position; issue #30167
+ bBody.Pos = n.Pos()
+
+ // first, jump to condition test
+ b := s.endBlock()
+ b.AddEdgeTo(bCond)
+
+ // generate code to test condition
+ s.startBlock(bCond)
+ if n.Cond != nil {
+ s.condBranch(n.Cond, bBody, bEnd, 1)
+ } else {
+ b := s.endBlock()
+ b.Kind = ssa.BlockPlain
+ b.AddEdgeTo(bBody)
+ }
+
+ // set up for continue/break in body
+ prevContinue := s.continueTo
+ prevBreak := s.breakTo
+ s.continueTo = bIncr
+ s.breakTo = bEnd
+ var lab *ssaLabel
+ if sym := n.Label; sym != nil {
+ // labeled for loop
+ lab = s.label(sym)
+ lab.continueTarget = bIncr
+ lab.breakTarget = bEnd
+ }
+
+ // generate body
+ s.startBlock(bBody)
+ s.stmtList(n.Body)
+
+ // tear down continue/break
+ s.continueTo = prevContinue
+ s.breakTo = prevBreak
+ if lab != nil {
+ lab.continueTarget = nil
+ lab.breakTarget = nil
+ }
+
+ // done with body, goto incr
+ if b := s.endBlock(); b != nil {
+ b.AddEdgeTo(bIncr)
+ }
+
+ // generate incr
+ s.startBlock(bIncr)
+ if n.Post != nil {
+ s.stmt(n.Post)
+ }
+ if b := s.endBlock(); b != nil {
+ b.AddEdgeTo(bCond)
+ // It can happen that bIncr ends in a block containing only VARKILL,
+ // and that muddles the debugging experience.
+ if b.Pos == src.NoXPos {
+ b.Pos = bCond.Pos
+ }
+ }
+
+ s.startBlock(bEnd)
+
+ case ir.OSWITCH, ir.OSELECT:
+ // These have been mostly rewritten by the front end into their Nbody fields.
+ // Our main task is to correctly hook up any break statements.
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+ prevBreak := s.breakTo
+ s.breakTo = bEnd
+ var sym *types.Sym
+ var body ir.Nodes
+ if n.Op() == ir.OSWITCH {
+ n := n.(*ir.SwitchStmt)
+ sym = n.Label
+ body = n.Compiled
+ } else {
+ n := n.(*ir.SelectStmt)
+ sym = n.Label
+ body = n.Compiled
+ }
+
+ var lab *ssaLabel
+ if sym != nil {
+ // labeled
+ lab = s.label(sym)
+ lab.breakTarget = bEnd
+ }
+
+ // generate body code
+ s.stmtList(body)
+
+ s.breakTo = prevBreak
+ if lab != nil {
+ lab.breakTarget = nil
+ }
+
+ // walk adds explicit OBREAK nodes to the end of all reachable code paths.
+ // If we still have a current block here, then mark it unreachable.
+ if s.curBlock != nil {
+ m := s.mem()
+ b := s.endBlock()
+ b.Kind = ssa.BlockExit
+ b.SetControl(m)
+ }
+ s.startBlock(bEnd)
+
+ case ir.OJUMPTABLE:
+ n := n.(*ir.JumpTableStmt)
+
+ // Make blocks we'll need.
+ jt := s.f.NewBlock(ssa.BlockJumpTable)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+ // The only thing that needs evaluating is the index we're looking up.
+ idx := s.expr(n.Idx)
+ unsigned := idx.Type.IsUnsigned()
+
+ // Extend so we can do everything in uintptr arithmetic.
+ t := types.Types[types.TUINTPTR]
+ idx = s.conv(nil, idx, idx.Type, t)
+
+ // The ending condition for the current block decides whether we'll use
+ // the jump table at all.
+ // We check that min <= idx <= max and jump around the jump table
+ // if that test fails.
+ // We implement min <= idx <= max with 0 <= idx-min <= max-min, because
+ // we'll need idx-min anyway as the control value for the jump table.
+ var min, max uint64
+ if unsigned {
+ min, _ = constant.Uint64Val(n.Cases[0])
+ max, _ = constant.Uint64Val(n.Cases[len(n.Cases)-1])
+ } else {
+ mn, _ := constant.Int64Val(n.Cases[0])
+ mx, _ := constant.Int64Val(n.Cases[len(n.Cases)-1])
+ min = uint64(mn)
+ max = uint64(mx)
+ }
+ // Compare idx-min with max-min, to see if we can use the jump table.
+ idx = s.newValue2(s.ssaOp(ir.OSUB, t), t, idx, s.uintptrConstant(min))
+ width := s.uintptrConstant(max - min)
+ cmp := s.newValue2(s.ssaOp(ir.OLE, t), types.Types[types.TBOOL], idx, width)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.AddEdgeTo(jt) // in range - use jump table
+ b.AddEdgeTo(bEnd) // out of range - no case in the jump table will trigger
+ b.Likely = ssa.BranchLikely // TODO: assumes missing the table entirely is unlikely. True?
+
+ // Build jump table block.
+ s.startBlock(jt)
+ jt.Pos = n.Pos()
+ if base.Flag.Cfg.SpectreIndex {
+ idx = s.newValue2(ssa.OpSpectreSliceIndex, t, idx, width)
+ }
+ jt.SetControl(idx)
+
+ // Figure out where we should go for each index in the table.
+ table := make([]*ssa.Block, max-min+1)
+ for i := range table {
+ table[i] = bEnd // default target
+ }
+ for i := range n.Targets {
+ c := n.Cases[i]
+ lab := s.label(n.Targets[i])
+ if lab.target == nil {
+ lab.target = s.f.NewBlock(ssa.BlockPlain)
+ }
+ var val uint64
+ if unsigned {
+ val, _ = constant.Uint64Val(c)
+ } else {
+ vl, _ := constant.Int64Val(c)
+ val = uint64(vl)
+ }
+ // Overwrite the default target.
+ table[val-min] = lab.target
+ }
+ for _, t := range table {
+ jt.AddEdgeTo(t)
+ }
+ s.endBlock()
+
+ s.startBlock(bEnd)
+
+ case ir.OINTERFACESWITCH:
+ n := n.(*ir.InterfaceSwitchStmt)
+ typs := s.f.Config.Types
+
+ t := s.expr(n.RuntimeType)
+ h := s.expr(n.Hash)
+ d := s.newValue1A(ssa.OpAddr, typs.BytePtr, n.Descriptor, s.sb)
+
+ // Check the cache first.
+ var merge *ssa.Block
+ if base.Flag.N == 0 && rtabi.UseInterfaceSwitchCache(Arch.LinkArch.Name) {
+ // Note: we can only use the cache if we have the right atomic load instruction.
+ // Double-check that here.
+ if _, ok := intrinsics[intrinsicKey{Arch.LinkArch.Arch, "runtime/internal/atomic", "Loadp"}]; !ok {
+ s.Fatalf("atomic load not available")
+ }
+ merge = s.f.NewBlock(ssa.BlockPlain)
+ cacheHit := s.f.NewBlock(ssa.BlockPlain)
+ cacheMiss := s.f.NewBlock(ssa.BlockPlain)
+ loopHead := s.f.NewBlock(ssa.BlockPlain)
+ loopBody := s.f.NewBlock(ssa.BlockPlain)
+
+ // Pick right size ops.
+ var mul, and, add, zext ssa.Op
+ if s.config.PtrSize == 4 {
+ mul = ssa.OpMul32
+ and = ssa.OpAnd32
+ add = ssa.OpAdd32
+ zext = ssa.OpCopy
+ } else {
+ mul = ssa.OpMul64
+ and = ssa.OpAnd64
+ add = ssa.OpAdd64
+ zext = ssa.OpZeroExt32to64
+ }
+
+ // Load cache pointer out of descriptor, with an atomic load so
+ // we ensure that we see a fully written cache.
+ atomicLoad := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(typs.BytePtr, types.TypeMem), d, s.mem())
+ cache := s.newValue1(ssa.OpSelect0, typs.BytePtr, atomicLoad)
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, atomicLoad)
+
+ // Initialize hash variable.
+ s.vars[hashVar] = s.newValue1(zext, typs.Uintptr, h)
+
+ // Load mask from cache.
+ mask := s.newValue2(ssa.OpLoad, typs.Uintptr, cache, s.mem())
+ // Jump to loop head.
+ b := s.endBlock()
+ b.AddEdgeTo(loopHead)
+
+ // At loop head, get pointer to the cache entry.
+ // e := &cache.Entries[hash&mask]
+ s.startBlock(loopHead)
+ entries := s.newValue2(ssa.OpAddPtr, typs.UintptrPtr, cache, s.uintptrConstant(uint64(s.config.PtrSize)))
+ idx := s.newValue2(and, typs.Uintptr, s.variable(hashVar, typs.Uintptr), mask)
+ idx = s.newValue2(mul, typs.Uintptr, idx, s.uintptrConstant(uint64(3*s.config.PtrSize)))
+ e := s.newValue2(ssa.OpAddPtr, typs.UintptrPtr, entries, idx)
+ // hash++
+ s.vars[hashVar] = s.newValue2(add, typs.Uintptr, s.variable(hashVar, typs.Uintptr), s.uintptrConstant(1))
+
+ // Look for a cache hit.
+ // if e.Typ == t { goto hit }
+ eTyp := s.newValue2(ssa.OpLoad, typs.Uintptr, e, s.mem())
+ cmp1 := s.newValue2(ssa.OpEqPtr, typs.Bool, t, eTyp)
+ b = s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp1)
+ b.AddEdgeTo(cacheHit)
+ b.AddEdgeTo(loopBody)
+
+ // Look for an empty entry, the tombstone for this hash table.
+ // if e.Typ == nil { goto miss }
+ s.startBlock(loopBody)
+ cmp2 := s.newValue2(ssa.OpEqPtr, typs.Bool, eTyp, s.constNil(typs.BytePtr))
+ b = s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp2)
+ b.AddEdgeTo(cacheMiss)
+ b.AddEdgeTo(loopHead)
+
+ // On a hit, load the data fields of the cache entry.
+ // Case = e.Case
+ // Itab = e.Itab
+ s.startBlock(cacheHit)
+ eCase := s.newValue2(ssa.OpLoad, typs.Int, s.newValue1I(ssa.OpOffPtr, typs.IntPtr, s.config.PtrSize, e), s.mem())
+ eItab := s.newValue2(ssa.OpLoad, typs.BytePtr, s.newValue1I(ssa.OpOffPtr, typs.BytePtrPtr, 2*s.config.PtrSize, e), s.mem())
+ s.assign(n.Case, eCase, false, 0)
+ s.assign(n.Itab, eItab, false, 0)
+ b = s.endBlock()
+ b.AddEdgeTo(merge)
+
+ // On a miss, call into the runtime to get the answer.
+ s.startBlock(cacheMiss)
+ }
+
+ r := s.rtcall(ir.Syms.InterfaceSwitch, true, []*types.Type{typs.Int, typs.BytePtr}, d, t)
+ s.assign(n.Case, r[0], false, 0)
+ s.assign(n.Itab, r[1], false, 0)
+
+ if merge != nil {
+ // Cache hits merge in here.
+ b := s.endBlock()
+ b.Kind = ssa.BlockPlain
+ b.AddEdgeTo(merge)
+ s.startBlock(merge)
+ }
+
+ case ir.OCHECKNIL:
+ n := n.(*ir.UnaryExpr)
+ p := s.expr(n.X)
+ _ = s.nilCheck(p)
+ // TODO: check that throwing away the nilcheck result is ok.
+
+ case ir.OINLMARK:
+ n := n.(*ir.InlineMarkStmt)
+ s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Index, s.mem())
+
+ default:
+ s.Fatalf("unhandled stmt %v", n.Op())
+ }
+}
+
+// If true, share as many open-coded defer exits as possible (with the downside of
+// worse line-number information)
+const shareDeferExits = false
+
+// exit processes any code that needs to be generated just before returning.
+// It returns a BlockRet block that ends the control flow. Its control value
+// will be set to the final memory state.
+func (s *state) exit() *ssa.Block {
+ if s.hasdefer {
+ if s.hasOpenDefers {
+ if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount {
+ if s.curBlock.Kind != ssa.BlockPlain {
+ panic("Block for an exit should be BlockPlain")
+ }
+ s.curBlock.AddEdgeTo(s.lastDeferExit)
+ s.endBlock()
+ return s.lastDeferFinalBlock
+ }
+ s.openDeferExit()
+ } else {
+ s.rtcall(ir.Syms.Deferreturn, true, nil)
+ }
+ }
+
+ // Do actual return.
+ // These currently turn into self-copies (in many cases).
+ resultFields := s.curfn.Type().Results()
+ results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1)
+ // Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
+ for i, f := range resultFields {
+ n := f.Nname.(*ir.Name)
+ if s.canSSA(n) { // result is in some SSA variable
+ if !n.IsOutputParamInRegisters() && n.Type().HasPointers() {
+ // We are about to store to the result slot.
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+ }
+ results[i] = s.variable(n, n.Type())
+ } else if !n.OnStack() { // result is actually heap allocated
+ // We are about to copy the in-heap result to the result slot.
+ if n.Type().HasPointers() {
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+ }
+ ha := s.expr(n.Heapaddr)
+ s.instrumentFields(n.Type(), ha, instrumentRead)
+ results[i] = s.newValue2(ssa.OpDereference, n.Type(), ha, s.mem())
+ } else { // result is not SSA-able; not escaped, so not on heap, but too large for SSA.
+ // Before register ABI this ought to be a self-move, home=dest,
+ // With register ABI, it's still a self-move if parameter is on stack (i.e., too big or overflowed)
+ // No VarDef, as the result slot is already holding live value.
+ results[i] = s.newValue2(ssa.OpDereference, n.Type(), s.addr(n), s.mem())
+ }
+ }
+
+ // In -race mode, we need to call racefuncexit.
+ // Note: This has to happen after we load any heap-allocated results,
+ // otherwise races will be attributed to the caller instead.
+ if s.instrumentEnterExit {
+ s.rtcall(ir.Syms.Racefuncexit, true, nil)
+ }
+
+ results[len(results)-1] = s.mem()
+ m := s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType())
+ m.AddArgs(results...)
+
+ b := s.endBlock()
+ b.Kind = ssa.BlockRet
+ b.SetControl(m)
+ if s.hasdefer && s.hasOpenDefers {
+ s.lastDeferFinalBlock = b
+ }
+ return b
+}
+
+type opAndType struct {
+ op ir.Op
+ etype types.Kind
+}
+
+var opToSSA = map[opAndType]ssa.Op{
+ {ir.OADD, types.TINT8}: ssa.OpAdd8,
+ {ir.OADD, types.TUINT8}: ssa.OpAdd8,
+ {ir.OADD, types.TINT16}: ssa.OpAdd16,
+ {ir.OADD, types.TUINT16}: ssa.OpAdd16,
+ {ir.OADD, types.TINT32}: ssa.OpAdd32,
+ {ir.OADD, types.TUINT32}: ssa.OpAdd32,
+ {ir.OADD, types.TINT64}: ssa.OpAdd64,
+ {ir.OADD, types.TUINT64}: ssa.OpAdd64,
+ {ir.OADD, types.TFLOAT32}: ssa.OpAdd32F,
+ {ir.OADD, types.TFLOAT64}: ssa.OpAdd64F,
+
+ {ir.OSUB, types.TINT8}: ssa.OpSub8,
+ {ir.OSUB, types.TUINT8}: ssa.OpSub8,
+ {ir.OSUB, types.TINT16}: ssa.OpSub16,
+ {ir.OSUB, types.TUINT16}: ssa.OpSub16,
+ {ir.OSUB, types.TINT32}: ssa.OpSub32,
+ {ir.OSUB, types.TUINT32}: ssa.OpSub32,
+ {ir.OSUB, types.TINT64}: ssa.OpSub64,
+ {ir.OSUB, types.TUINT64}: ssa.OpSub64,
+ {ir.OSUB, types.TFLOAT32}: ssa.OpSub32F,
+ {ir.OSUB, types.TFLOAT64}: ssa.OpSub64F,
+
+ {ir.ONOT, types.TBOOL}: ssa.OpNot,
+
+ {ir.ONEG, types.TINT8}: ssa.OpNeg8,
+ {ir.ONEG, types.TUINT8}: ssa.OpNeg8,
+ {ir.ONEG, types.TINT16}: ssa.OpNeg16,
+ {ir.ONEG, types.TUINT16}: ssa.OpNeg16,
+ {ir.ONEG, types.TINT32}: ssa.OpNeg32,
+ {ir.ONEG, types.TUINT32}: ssa.OpNeg32,
+ {ir.ONEG, types.TINT64}: ssa.OpNeg64,
+ {ir.ONEG, types.TUINT64}: ssa.OpNeg64,
+ {ir.ONEG, types.TFLOAT32}: ssa.OpNeg32F,
+ {ir.ONEG, types.TFLOAT64}: ssa.OpNeg64F,
+
+ {ir.OBITNOT, types.TINT8}: ssa.OpCom8,
+ {ir.OBITNOT, types.TUINT8}: ssa.OpCom8,
+ {ir.OBITNOT, types.TINT16}: ssa.OpCom16,
+ {ir.OBITNOT, types.TUINT16}: ssa.OpCom16,
+ {ir.OBITNOT, types.TINT32}: ssa.OpCom32,
+ {ir.OBITNOT, types.TUINT32}: ssa.OpCom32,
+ {ir.OBITNOT, types.TINT64}: ssa.OpCom64,
+ {ir.OBITNOT, types.TUINT64}: ssa.OpCom64,
+
+ {ir.OIMAG, types.TCOMPLEX64}: ssa.OpComplexImag,
+ {ir.OIMAG, types.TCOMPLEX128}: ssa.OpComplexImag,
+ {ir.OREAL, types.TCOMPLEX64}: ssa.OpComplexReal,
+ {ir.OREAL, types.TCOMPLEX128}: ssa.OpComplexReal,
+
+ {ir.OMUL, types.TINT8}: ssa.OpMul8,
+ {ir.OMUL, types.TUINT8}: ssa.OpMul8,
+ {ir.OMUL, types.TINT16}: ssa.OpMul16,
+ {ir.OMUL, types.TUINT16}: ssa.OpMul16,
+ {ir.OMUL, types.TINT32}: ssa.OpMul32,
+ {ir.OMUL, types.TUINT32}: ssa.OpMul32,
+ {ir.OMUL, types.TINT64}: ssa.OpMul64,
+ {ir.OMUL, types.TUINT64}: ssa.OpMul64,
+ {ir.OMUL, types.TFLOAT32}: ssa.OpMul32F,
+ {ir.OMUL, types.TFLOAT64}: ssa.OpMul64F,
+
+ {ir.ODIV, types.TFLOAT32}: ssa.OpDiv32F,
+ {ir.ODIV, types.TFLOAT64}: ssa.OpDiv64F,
+
+ {ir.ODIV, types.TINT8}: ssa.OpDiv8,
+ {ir.ODIV, types.TUINT8}: ssa.OpDiv8u,
+ {ir.ODIV, types.TINT16}: ssa.OpDiv16,
+ {ir.ODIV, types.TUINT16}: ssa.OpDiv16u,
+ {ir.ODIV, types.TINT32}: ssa.OpDiv32,
+ {ir.ODIV, types.TUINT32}: ssa.OpDiv32u,
+ {ir.ODIV, types.TINT64}: ssa.OpDiv64,
+ {ir.ODIV, types.TUINT64}: ssa.OpDiv64u,
+
+ {ir.OMOD, types.TINT8}: ssa.OpMod8,
+ {ir.OMOD, types.TUINT8}: ssa.OpMod8u,
+ {ir.OMOD, types.TINT16}: ssa.OpMod16,
+ {ir.OMOD, types.TUINT16}: ssa.OpMod16u,
+ {ir.OMOD, types.TINT32}: ssa.OpMod32,
+ {ir.OMOD, types.TUINT32}: ssa.OpMod32u,
+ {ir.OMOD, types.TINT64}: ssa.OpMod64,
+ {ir.OMOD, types.TUINT64}: ssa.OpMod64u,
+
+ {ir.OAND, types.TINT8}: ssa.OpAnd8,
+ {ir.OAND, types.TUINT8}: ssa.OpAnd8,
+ {ir.OAND, types.TINT16}: ssa.OpAnd16,
+ {ir.OAND, types.TUINT16}: ssa.OpAnd16,
+ {ir.OAND, types.TINT32}: ssa.OpAnd32,
+ {ir.OAND, types.TUINT32}: ssa.OpAnd32,
+ {ir.OAND, types.TINT64}: ssa.OpAnd64,
+ {ir.OAND, types.TUINT64}: ssa.OpAnd64,
+
+ {ir.OOR, types.TINT8}: ssa.OpOr8,
+ {ir.OOR, types.TUINT8}: ssa.OpOr8,
+ {ir.OOR, types.TINT16}: ssa.OpOr16,
+ {ir.OOR, types.TUINT16}: ssa.OpOr16,
+ {ir.OOR, types.TINT32}: ssa.OpOr32,
+ {ir.OOR, types.TUINT32}: ssa.OpOr32,
+ {ir.OOR, types.TINT64}: ssa.OpOr64,
+ {ir.OOR, types.TUINT64}: ssa.OpOr64,
+
+ {ir.OXOR, types.TINT8}: ssa.OpXor8,
+ {ir.OXOR, types.TUINT8}: ssa.OpXor8,
+ {ir.OXOR, types.TINT16}: ssa.OpXor16,
+ {ir.OXOR, types.TUINT16}: ssa.OpXor16,
+ {ir.OXOR, types.TINT32}: ssa.OpXor32,
+ {ir.OXOR, types.TUINT32}: ssa.OpXor32,
+ {ir.OXOR, types.TINT64}: ssa.OpXor64,
+ {ir.OXOR, types.TUINT64}: ssa.OpXor64,
+
+ {ir.OEQ, types.TBOOL}: ssa.OpEqB,
+ {ir.OEQ, types.TINT8}: ssa.OpEq8,
+ {ir.OEQ, types.TUINT8}: ssa.OpEq8,
+ {ir.OEQ, types.TINT16}: ssa.OpEq16,
+ {ir.OEQ, types.TUINT16}: ssa.OpEq16,
+ {ir.OEQ, types.TINT32}: ssa.OpEq32,
+ {ir.OEQ, types.TUINT32}: ssa.OpEq32,
+ {ir.OEQ, types.TINT64}: ssa.OpEq64,
+ {ir.OEQ, types.TUINT64}: ssa.OpEq64,
+ {ir.OEQ, types.TINTER}: ssa.OpEqInter,
+ {ir.OEQ, types.TSLICE}: ssa.OpEqSlice,
+ {ir.OEQ, types.TFUNC}: ssa.OpEqPtr,
+ {ir.OEQ, types.TMAP}: ssa.OpEqPtr,
+ {ir.OEQ, types.TCHAN}: ssa.OpEqPtr,
+ {ir.OEQ, types.TPTR}: ssa.OpEqPtr,
+ {ir.OEQ, types.TUINTPTR}: ssa.OpEqPtr,
+ {ir.OEQ, types.TUNSAFEPTR}: ssa.OpEqPtr,
+ {ir.OEQ, types.TFLOAT64}: ssa.OpEq64F,
+ {ir.OEQ, types.TFLOAT32}: ssa.OpEq32F,
+
+ {ir.ONE, types.TBOOL}: ssa.OpNeqB,
+ {ir.ONE, types.TINT8}: ssa.OpNeq8,
+ {ir.ONE, types.TUINT8}: ssa.OpNeq8,
+ {ir.ONE, types.TINT16}: ssa.OpNeq16,
+ {ir.ONE, types.TUINT16}: ssa.OpNeq16,
+ {ir.ONE, types.TINT32}: ssa.OpNeq32,
+ {ir.ONE, types.TUINT32}: ssa.OpNeq32,
+ {ir.ONE, types.TINT64}: ssa.OpNeq64,
+ {ir.ONE, types.TUINT64}: ssa.OpNeq64,
+ {ir.ONE, types.TINTER}: ssa.OpNeqInter,
+ {ir.ONE, types.TSLICE}: ssa.OpNeqSlice,
+ {ir.ONE, types.TFUNC}: ssa.OpNeqPtr,
+ {ir.ONE, types.TMAP}: ssa.OpNeqPtr,
+ {ir.ONE, types.TCHAN}: ssa.OpNeqPtr,
+ {ir.ONE, types.TPTR}: ssa.OpNeqPtr,
+ {ir.ONE, types.TUINTPTR}: ssa.OpNeqPtr,
+ {ir.ONE, types.TUNSAFEPTR}: ssa.OpNeqPtr,
+ {ir.ONE, types.TFLOAT64}: ssa.OpNeq64F,
+ {ir.ONE, types.TFLOAT32}: ssa.OpNeq32F,
+
+ {ir.OLT, types.TINT8}: ssa.OpLess8,
+ {ir.OLT, types.TUINT8}: ssa.OpLess8U,
+ {ir.OLT, types.TINT16}: ssa.OpLess16,
+ {ir.OLT, types.TUINT16}: ssa.OpLess16U,
+ {ir.OLT, types.TINT32}: ssa.OpLess32,
+ {ir.OLT, types.TUINT32}: ssa.OpLess32U,
+ {ir.OLT, types.TINT64}: ssa.OpLess64,
+ {ir.OLT, types.TUINT64}: ssa.OpLess64U,
+ {ir.OLT, types.TFLOAT64}: ssa.OpLess64F,
+ {ir.OLT, types.TFLOAT32}: ssa.OpLess32F,
+
+ {ir.OLE, types.TINT8}: ssa.OpLeq8,
+ {ir.OLE, types.TUINT8}: ssa.OpLeq8U,
+ {ir.OLE, types.TINT16}: ssa.OpLeq16,
+ {ir.OLE, types.TUINT16}: ssa.OpLeq16U,
+ {ir.OLE, types.TINT32}: ssa.OpLeq32,
+ {ir.OLE, types.TUINT32}: ssa.OpLeq32U,
+ {ir.OLE, types.TINT64}: ssa.OpLeq64,
+ {ir.OLE, types.TUINT64}: ssa.OpLeq64U,
+ {ir.OLE, types.TFLOAT64}: ssa.OpLeq64F,
+ {ir.OLE, types.TFLOAT32}: ssa.OpLeq32F,
+}
+
+func (s *state) concreteEtype(t *types.Type) types.Kind {
+ e := t.Kind()
+ switch e {
+ default:
+ return e
+ case types.TINT:
+ if s.config.PtrSize == 8 {
+ return types.TINT64
+ }
+ return types.TINT32
+ case types.TUINT:
+ if s.config.PtrSize == 8 {
+ return types.TUINT64
+ }
+ return types.TUINT32
+ case types.TUINTPTR:
+ if s.config.PtrSize == 8 {
+ return types.TUINT64
+ }
+ return types.TUINT32
+ }
+}
+
+func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op {
+ etype := s.concreteEtype(t)
+ x, ok := opToSSA[opAndType{op, etype}]
+ if !ok {
+ s.Fatalf("unhandled binary op %v %s", op, etype)
+ }
+ return x
+}
+
+type opAndTwoTypes struct {
+ op ir.Op
+ etype1 types.Kind
+ etype2 types.Kind
+}
+
+type twoTypes struct {
+ etype1 types.Kind
+ etype2 types.Kind
+}
+
+type twoOpsAndType struct {
+ op1 ssa.Op
+ op2 ssa.Op
+ intermediateType types.Kind
+}
+
+var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
+
+ {types.TINT8, types.TFLOAT32}: {ssa.OpSignExt8to32, ssa.OpCvt32to32F, types.TINT32},
+ {types.TINT16, types.TFLOAT32}: {ssa.OpSignExt16to32, ssa.OpCvt32to32F, types.TINT32},
+ {types.TINT32, types.TFLOAT32}: {ssa.OpCopy, ssa.OpCvt32to32F, types.TINT32},
+ {types.TINT64, types.TFLOAT32}: {ssa.OpCopy, ssa.OpCvt64to32F, types.TINT64},
+
+ {types.TINT8, types.TFLOAT64}: {ssa.OpSignExt8to32, ssa.OpCvt32to64F, types.TINT32},
+ {types.TINT16, types.TFLOAT64}: {ssa.OpSignExt16to32, ssa.OpCvt32to64F, types.TINT32},
+ {types.TINT32, types.TFLOAT64}: {ssa.OpCopy, ssa.OpCvt32to64F, types.TINT32},
+ {types.TINT64, types.TFLOAT64}: {ssa.OpCopy, ssa.OpCvt64to64F, types.TINT64},
+
+ {types.TFLOAT32, types.TINT8}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
+ {types.TFLOAT32, types.TINT16}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
+ {types.TFLOAT32, types.TINT32}: {ssa.OpCvt32Fto32, ssa.OpCopy, types.TINT32},
+ {types.TFLOAT32, types.TINT64}: {ssa.OpCvt32Fto64, ssa.OpCopy, types.TINT64},
+
+ {types.TFLOAT64, types.TINT8}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
+ {types.TFLOAT64, types.TINT16}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
+ {types.TFLOAT64, types.TINT32}: {ssa.OpCvt64Fto32, ssa.OpCopy, types.TINT32},
+ {types.TFLOAT64, types.TINT64}: {ssa.OpCvt64Fto64, ssa.OpCopy, types.TINT64},
+ // unsigned
+ {types.TUINT8, types.TFLOAT32}: {ssa.OpZeroExt8to32, ssa.OpCvt32to32F, types.TINT32},
+ {types.TUINT16, types.TFLOAT32}: {ssa.OpZeroExt16to32, ssa.OpCvt32to32F, types.TINT32},
+ {types.TUINT32, types.TFLOAT32}: {ssa.OpZeroExt32to64, ssa.OpCvt64to32F, types.TINT64}, // go wide to dodge unsigned
+ {types.TUINT64, types.TFLOAT32}: {ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto32F, branchy code expansion instead
+
+ {types.TUINT8, types.TFLOAT64}: {ssa.OpZeroExt8to32, ssa.OpCvt32to64F, types.TINT32},
+ {types.TUINT16, types.TFLOAT64}: {ssa.OpZeroExt16to32, ssa.OpCvt32to64F, types.TINT32},
+ {types.TUINT32, types.TFLOAT64}: {ssa.OpZeroExt32to64, ssa.OpCvt64to64F, types.TINT64}, // go wide to dodge unsigned
+ {types.TUINT64, types.TFLOAT64}: {ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto64F, branchy code expansion instead
+
+ {types.TFLOAT32, types.TUINT8}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
+ {types.TFLOAT32, types.TUINT16}: {ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
+ {types.TFLOAT32, types.TUINT32}: {ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
+ {types.TFLOAT32, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead
+
+ {types.TFLOAT64, types.TUINT8}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
+ {types.TFLOAT64, types.TUINT16}: {ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
+ {types.TFLOAT64, types.TUINT32}: {ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
+ {types.TFLOAT64, types.TUINT64}: {ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead
+
+ // float
+ {types.TFLOAT64, types.TFLOAT32}: {ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32},
+ {types.TFLOAT64, types.TFLOAT64}: {ssa.OpRound64F, ssa.OpCopy, types.TFLOAT64},
+ {types.TFLOAT32, types.TFLOAT32}: {ssa.OpRound32F, ssa.OpCopy, types.TFLOAT32},
+ {types.TFLOAT32, types.TFLOAT64}: {ssa.OpCvt32Fto64F, ssa.OpCopy, types.TFLOAT64},
+}
+
+// this map is used only for 32-bit arch, and only includes the difference
+// on 32-bit arch, don't use int64<->float conversion for uint32
+var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
+ {types.TUINT32, types.TFLOAT32}: {ssa.OpCopy, ssa.OpCvt32Uto32F, types.TUINT32},
+ {types.TUINT32, types.TFLOAT64}: {ssa.OpCopy, ssa.OpCvt32Uto64F, types.TUINT32},
+ {types.TFLOAT32, types.TUINT32}: {ssa.OpCvt32Fto32U, ssa.OpCopy, types.TUINT32},
+ {types.TFLOAT64, types.TUINT32}: {ssa.OpCvt64Fto32U, ssa.OpCopy, types.TUINT32},
+}
+
+// uint64<->float conversions, only on machines that have instructions for that
+var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
+ {types.TUINT64, types.TFLOAT32}: {ssa.OpCopy, ssa.OpCvt64Uto32F, types.TUINT64},
+ {types.TUINT64, types.TFLOAT64}: {ssa.OpCopy, ssa.OpCvt64Uto64F, types.TUINT64},
+ {types.TFLOAT32, types.TUINT64}: {ssa.OpCvt32Fto64U, ssa.OpCopy, types.TUINT64},
+ {types.TFLOAT64, types.TUINT64}: {ssa.OpCvt64Fto64U, ssa.OpCopy, types.TUINT64},
+}
+
+var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
+ {ir.OLSH, types.TINT8, types.TUINT8}: ssa.OpLsh8x8,
+ {ir.OLSH, types.TUINT8, types.TUINT8}: ssa.OpLsh8x8,
+ {ir.OLSH, types.TINT8, types.TUINT16}: ssa.OpLsh8x16,
+ {ir.OLSH, types.TUINT8, types.TUINT16}: ssa.OpLsh8x16,
+ {ir.OLSH, types.TINT8, types.TUINT32}: ssa.OpLsh8x32,
+ {ir.OLSH, types.TUINT8, types.TUINT32}: ssa.OpLsh8x32,
+ {ir.OLSH, types.TINT8, types.TUINT64}: ssa.OpLsh8x64,
+ {ir.OLSH, types.TUINT8, types.TUINT64}: ssa.OpLsh8x64,
+
+ {ir.OLSH, types.TINT16, types.TUINT8}: ssa.OpLsh16x8,
+ {ir.OLSH, types.TUINT16, types.TUINT8}: ssa.OpLsh16x8,
+ {ir.OLSH, types.TINT16, types.TUINT16}: ssa.OpLsh16x16,
+ {ir.OLSH, types.TUINT16, types.TUINT16}: ssa.OpLsh16x16,
+ {ir.OLSH, types.TINT16, types.TUINT32}: ssa.OpLsh16x32,
+ {ir.OLSH, types.TUINT16, types.TUINT32}: ssa.OpLsh16x32,
+ {ir.OLSH, types.TINT16, types.TUINT64}: ssa.OpLsh16x64,
+ {ir.OLSH, types.TUINT16, types.TUINT64}: ssa.OpLsh16x64,
+
+ {ir.OLSH, types.TINT32, types.TUINT8}: ssa.OpLsh32x8,
+ {ir.OLSH, types.TUINT32, types.TUINT8}: ssa.OpLsh32x8,
+ {ir.OLSH, types.TINT32, types.TUINT16}: ssa.OpLsh32x16,
+ {ir.OLSH, types.TUINT32, types.TUINT16}: ssa.OpLsh32x16,
+ {ir.OLSH, types.TINT32, types.TUINT32}: ssa.OpLsh32x32,
+ {ir.OLSH, types.TUINT32, types.TUINT32}: ssa.OpLsh32x32,
+ {ir.OLSH, types.TINT32, types.TUINT64}: ssa.OpLsh32x64,
+ {ir.OLSH, types.TUINT32, types.TUINT64}: ssa.OpLsh32x64,
+
+ {ir.OLSH, types.TINT64, types.TUINT8}: ssa.OpLsh64x8,
+ {ir.OLSH, types.TUINT64, types.TUINT8}: ssa.OpLsh64x8,
+ {ir.OLSH, types.TINT64, types.TUINT16}: ssa.OpLsh64x16,
+ {ir.OLSH, types.TUINT64, types.TUINT16}: ssa.OpLsh64x16,
+ {ir.OLSH, types.TINT64, types.TUINT32}: ssa.OpLsh64x32,
+ {ir.OLSH, types.TUINT64, types.TUINT32}: ssa.OpLsh64x32,
+ {ir.OLSH, types.TINT64, types.TUINT64}: ssa.OpLsh64x64,
+ {ir.OLSH, types.TUINT64, types.TUINT64}: ssa.OpLsh64x64,
+
+ {ir.ORSH, types.TINT8, types.TUINT8}: ssa.OpRsh8x8,
+ {ir.ORSH, types.TUINT8, types.TUINT8}: ssa.OpRsh8Ux8,
+ {ir.ORSH, types.TINT8, types.TUINT16}: ssa.OpRsh8x16,
+ {ir.ORSH, types.TUINT8, types.TUINT16}: ssa.OpRsh8Ux16,
+ {ir.ORSH, types.TINT8, types.TUINT32}: ssa.OpRsh8x32,
+ {ir.ORSH, types.TUINT8, types.TUINT32}: ssa.OpRsh8Ux32,
+ {ir.ORSH, types.TINT8, types.TUINT64}: ssa.OpRsh8x64,
+ {ir.ORSH, types.TUINT8, types.TUINT64}: ssa.OpRsh8Ux64,
+
+ {ir.ORSH, types.TINT16, types.TUINT8}: ssa.OpRsh16x8,
+ {ir.ORSH, types.TUINT16, types.TUINT8}: ssa.OpRsh16Ux8,
+ {ir.ORSH, types.TINT16, types.TUINT16}: ssa.OpRsh16x16,
+ {ir.ORSH, types.TUINT16, types.TUINT16}: ssa.OpRsh16Ux16,
+ {ir.ORSH, types.TINT16, types.TUINT32}: ssa.OpRsh16x32,
+ {ir.ORSH, types.TUINT16, types.TUINT32}: ssa.OpRsh16Ux32,
+ {ir.ORSH, types.TINT16, types.TUINT64}: ssa.OpRsh16x64,
+ {ir.ORSH, types.TUINT16, types.TUINT64}: ssa.OpRsh16Ux64,
+
+ {ir.ORSH, types.TINT32, types.TUINT8}: ssa.OpRsh32x8,
+ {ir.ORSH, types.TUINT32, types.TUINT8}: ssa.OpRsh32Ux8,
+ {ir.ORSH, types.TINT32, types.TUINT16}: ssa.OpRsh32x16,
+ {ir.ORSH, types.TUINT32, types.TUINT16}: ssa.OpRsh32Ux16,
+ {ir.ORSH, types.TINT32, types.TUINT32}: ssa.OpRsh32x32,
+ {ir.ORSH, types.TUINT32, types.TUINT32}: ssa.OpRsh32Ux32,
+ {ir.ORSH, types.TINT32, types.TUINT64}: ssa.OpRsh32x64,
+ {ir.ORSH, types.TUINT32, types.TUINT64}: ssa.OpRsh32Ux64,
+
+ {ir.ORSH, types.TINT64, types.TUINT8}: ssa.OpRsh64x8,
+ {ir.ORSH, types.TUINT64, types.TUINT8}: ssa.OpRsh64Ux8,
+ {ir.ORSH, types.TINT64, types.TUINT16}: ssa.OpRsh64x16,
+ {ir.ORSH, types.TUINT64, types.TUINT16}: ssa.OpRsh64Ux16,
+ {ir.ORSH, types.TINT64, types.TUINT32}: ssa.OpRsh64x32,
+ {ir.ORSH, types.TUINT64, types.TUINT32}: ssa.OpRsh64Ux32,
+ {ir.ORSH, types.TINT64, types.TUINT64}: ssa.OpRsh64x64,
+ {ir.ORSH, types.TUINT64, types.TUINT64}: ssa.OpRsh64Ux64,
+}
+
+func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
+ etype1 := s.concreteEtype(t)
+ etype2 := s.concreteEtype(u)
+ x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
+ if !ok {
+ s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
+ }
+ return x
+}
+
+func (s *state) uintptrConstant(v uint64) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ return s.newValue0I(ssa.OpConst32, types.Types[types.TUINTPTR], int64(v))
+ }
+ return s.newValue0I(ssa.OpConst64, types.Types[types.TUINTPTR], int64(v))
+}
+
+func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
+ // Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
+ return s.newValue1(ssa.OpCvtBoolToUint8, tt, v)
+ }
+ if ft.IsInteger() && tt.IsInteger() {
+ var op ssa.Op
+ if tt.Size() == ft.Size() {
+ op = ssa.OpCopy
+ } else if tt.Size() < ft.Size() {
+ // truncation
+ switch 10*ft.Size() + tt.Size() {
+ case 21:
+ op = ssa.OpTrunc16to8
+ case 41:
+ op = ssa.OpTrunc32to8
+ case 42:
+ op = ssa.OpTrunc32to16
+ case 81:
+ op = ssa.OpTrunc64to8
+ case 82:
+ op = ssa.OpTrunc64to16
+ case 84:
+ op = ssa.OpTrunc64to32
+ default:
+ s.Fatalf("weird integer truncation %v -> %v", ft, tt)
+ }
+ } else if ft.IsSigned() {
+ // sign extension
+ switch 10*ft.Size() + tt.Size() {
+ case 12:
+ op = ssa.OpSignExt8to16
+ case 14:
+ op = ssa.OpSignExt8to32
+ case 18:
+ op = ssa.OpSignExt8to64
+ case 24:
+ op = ssa.OpSignExt16to32
+ case 28:
+ op = ssa.OpSignExt16to64
+ case 48:
+ op = ssa.OpSignExt32to64
+ default:
+ s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
+ }
+ } else {
+ // zero extension
+ switch 10*ft.Size() + tt.Size() {
+ case 12:
+ op = ssa.OpZeroExt8to16
+ case 14:
+ op = ssa.OpZeroExt8to32
+ case 18:
+ op = ssa.OpZeroExt8to64
+ case 24:
+ op = ssa.OpZeroExt16to32
+ case 28:
+ op = ssa.OpZeroExt16to64
+ case 48:
+ op = ssa.OpZeroExt32to64
+ default:
+ s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
+ }
+ }
+ return s.newValue1(op, tt, v)
+ }
+
+ if ft.IsComplex() && tt.IsComplex() {
+ var op ssa.Op
+ if ft.Size() == tt.Size() {
+ switch ft.Size() {
+ case 8:
+ op = ssa.OpRound32F
+ case 16:
+ op = ssa.OpRound64F
+ default:
+ s.Fatalf("weird complex conversion %v -> %v", ft, tt)
+ }
+ } else if ft.Size() == 8 && tt.Size() == 16 {
+ op = ssa.OpCvt32Fto64F
+ } else if ft.Size() == 16 && tt.Size() == 8 {
+ op = ssa.OpCvt64Fto32F
+ } else {
+ s.Fatalf("weird complex conversion %v -> %v", ft, tt)
+ }
+ ftp := types.FloatForComplex(ft)
+ ttp := types.FloatForComplex(tt)
+ return s.newValue2(ssa.OpComplexMake, tt,
+ s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, v)),
+ s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, v)))
+ }
+
+ if tt.IsComplex() { // and ft is not complex
+ // Needed for generics support - can't happen in normal Go code.
+ et := types.FloatForComplex(tt)
+ v = s.conv(n, v, ft, et)
+ return s.newValue2(ssa.OpComplexMake, tt, v, s.zeroVal(et))
+ }
+
+ if ft.IsFloat() || tt.IsFloat() {
+ conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
+ if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat {
+ if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
+ conv = conv1
+ }
+ }
+ if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat {
+ if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
+ conv = conv1
+ }
+ }
+
+ if Arch.LinkArch.Family == sys.MIPS && !s.softFloat {
+ if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
+ // tt is float32 or float64, and ft is also unsigned
+ if tt.Size() == 4 {
+ return s.uint32Tofloat32(n, v, ft, tt)
+ }
+ if tt.Size() == 8 {
+ return s.uint32Tofloat64(n, v, ft, tt)
+ }
+ } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
+ // ft is float32 or float64, and tt is unsigned integer
+ if ft.Size() == 4 {
+ return s.float32ToUint32(n, v, ft, tt)
+ }
+ if ft.Size() == 8 {
+ return s.float64ToUint32(n, v, ft, tt)
+ }
+ }
+ }
+
+ if !ok {
+ s.Fatalf("weird float conversion %v -> %v", ft, tt)
+ }
+ op1, op2, it := conv.op1, conv.op2, conv.intermediateType
+
+ if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
+ // normal case, not tripping over unsigned 64
+ if op1 == ssa.OpCopy {
+ if op2 == ssa.OpCopy {
+ return v
+ }
+ return s.newValueOrSfCall1(op2, tt, v)
+ }
+ if op2 == ssa.OpCopy {
+ return s.newValueOrSfCall1(op1, tt, v)
+ }
+ return s.newValueOrSfCall1(op2, tt, s.newValueOrSfCall1(op1, types.Types[it], v))
+ }
+ // Tricky 64-bit unsigned cases.
+ if ft.IsInteger() {
+ // tt is float32 or float64, and ft is also unsigned
+ if tt.Size() == 4 {
+ return s.uint64Tofloat32(n, v, ft, tt)
+ }
+ if tt.Size() == 8 {
+ return s.uint64Tofloat64(n, v, ft, tt)
+ }
+ s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
+ }
+ // ft is float32 or float64, and tt is unsigned integer
+ if ft.Size() == 4 {
+ return s.float32ToUint64(n, v, ft, tt)
+ }
+ if ft.Size() == 8 {
+ return s.float64ToUint64(n, v, ft, tt)
+ }
+ s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
+ return nil
+ }
+
+ s.Fatalf("unhandled OCONV %s -> %s", ft.Kind(), tt.Kind())
+ return nil
+}
+
+// expr converts the expression n to ssa, adds it to s and returns the ssa result.
+func (s *state) expr(n ir.Node) *ssa.Value {
+ return s.exprCheckPtr(n, true)
+}
+
+func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
+ if ir.HasUniquePos(n) {
+ // ONAMEs and named OLITERALs have the line number
+ // of the decl, not the use. See issue 14742.
+ s.pushLine(n.Pos())
+ defer s.popLine()
+ }
+
+ s.stmtList(n.Init())
+ switch n.Op() {
+ case ir.OBYTES2STRTMP:
+ n := n.(*ir.ConvExpr)
+ slice := s.expr(n.X)
+ ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
+ len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
+ return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len)
+ case ir.OSTR2BYTESTMP:
+ n := n.(*ir.ConvExpr)
+ str := s.expr(n.X)
+ ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
+ if !n.NonNil() {
+ // We need to ensure []byte("") evaluates to []byte{}, and not []byte(nil).
+ //
+ // TODO(mdempsky): Investigate using "len != 0" instead of "ptr != nil".
+ cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], ptr, s.constNil(ptr.Type))
+ zerobase := s.newValue1A(ssa.OpAddr, ptr.Type, ir.Syms.Zerobase, s.sb)
+ ptr = s.ternary(cond, ptr, zerobase)
+ }
+ len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
+ return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
+ case ir.OCFUNC:
+ n := n.(*ir.UnaryExpr)
+ aux := n.X.(*ir.Name).Linksym()
+ // OCFUNC is used to build function values, which must
+ // always reference ABIInternal entry points.
+ if aux.ABI() != obj.ABIInternal {
+ s.Fatalf("expected ABIInternal: %v", aux.ABI())
+ }
+ return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb)
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class == ir.PFUNC {
+ // "value" of a function is the address of the function's closure
+ sym := staticdata.FuncLinksym(n)
+ return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
+ }
+ if s.canSSA(n) {
+ return s.variable(n, n.Type())
+ }
+ return s.load(n.Type(), s.addr(n))
+ case ir.OLINKSYMOFFSET:
+ n := n.(*ir.LinksymOffsetExpr)
+ return s.load(n.Type(), s.addr(n))
+ case ir.ONIL:
+ n := n.(*ir.NilExpr)
+ t := n.Type()
+ switch {
+ case t.IsSlice():
+ return s.constSlice(t)
+ case t.IsInterface():
+ return s.constInterface(t)
+ default:
+ return s.constNil(t)
+ }
+ case ir.OLITERAL:
+ switch u := n.Val(); u.Kind() {
+ case constant.Int:
+ i := ir.IntVal(n.Type(), u)
+ switch n.Type().Size() {
+ case 1:
+ return s.constInt8(n.Type(), int8(i))
+ case 2:
+ return s.constInt16(n.Type(), int16(i))
+ case 4:
+ return s.constInt32(n.Type(), int32(i))
+ case 8:
+ return s.constInt64(n.Type(), i)
+ default:
+ s.Fatalf("bad integer size %d", n.Type().Size())
+ return nil
+ }
+ case constant.String:
+ i := constant.StringVal(u)
+ if i == "" {
+ return s.constEmptyString(n.Type())
+ }
+ return s.entryNewValue0A(ssa.OpConstString, n.Type(), ssa.StringToAux(i))
+ case constant.Bool:
+ return s.constBool(constant.BoolVal(u))
+ case constant.Float:
+ f, _ := constant.Float64Val(u)
+ switch n.Type().Size() {
+ case 4:
+ return s.constFloat32(n.Type(), f)
+ case 8:
+ return s.constFloat64(n.Type(), f)
+ default:
+ s.Fatalf("bad float size %d", n.Type().Size())
+ return nil
+ }
+ case constant.Complex:
+ re, _ := constant.Float64Val(constant.Real(u))
+ im, _ := constant.Float64Val(constant.Imag(u))
+ switch n.Type().Size() {
+ case 8:
+ pt := types.Types[types.TFLOAT32]
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
+ s.constFloat32(pt, re),
+ s.constFloat32(pt, im))
+ case 16:
+ pt := types.Types[types.TFLOAT64]
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
+ s.constFloat64(pt, re),
+ s.constFloat64(pt, im))
+ default:
+ s.Fatalf("bad complex size %d", n.Type().Size())
+ return nil
+ }
+ default:
+ s.Fatalf("unhandled OLITERAL %v", u.Kind())
+ return nil
+ }
+ case ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ to := n.Type()
+ from := n.X.Type()
+
+ // Assume everything will work out, so set up our return value.
+ // Anything interesting that happens from here is a fatal.
+ x := s.expr(n.X)
+ if to == from {
+ return x
+ }
+
+ // Special case for not confusing GC and liveness.
+ // We don't want pointers accidentally classified
+ // as not-pointers or vice-versa because of copy
+ // elision.
+ if to.IsPtrShaped() != from.IsPtrShaped() {
+ return s.newValue2(ssa.OpConvert, to, x, s.mem())
+ }
+
+ v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
+
+ // CONVNOP closure
+ if to.Kind() == types.TFUNC && from.IsPtrShaped() {
+ return v
+ }
+
+ // named <--> unnamed type or typed <--> untyped const
+ if from.Kind() == to.Kind() {
+ return v
+ }
+
+ // unsafe.Pointer <--> *T
+ if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() {
+ if s.checkPtrEnabled && checkPtrOK && to.IsPtr() && from.IsUnsafePtr() {
+ s.checkPtrAlignment(n, v, nil)
+ }
+ return v
+ }
+
+ // map <--> *hmap
+ if to.Kind() == types.TMAP && from == types.NewPtr(reflectdata.MapType()) {
+ return v
+ }
+
+ types.CalcSize(from)
+ types.CalcSize(to)
+ if from.Size() != to.Size() {
+ s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Size(), to, to.Size())
+ return nil
+ }
+ if etypesign(from.Kind()) != etypesign(to.Kind()) {
+ s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Kind(), to, to.Kind())
+ return nil
+ }
+
+ if base.Flag.Cfg.Instrumenting {
+ // These appear to be fine, but they fail the
+ // integer constraint below, so okay them here.
+ // Sample non-integer conversion: map[string]string -> *uint8
+ return v
+ }
+
+ if etypesign(from.Kind()) == 0 {
+ s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
+ return nil
+ }
+
+ // integer, same width, same sign
+ return v
+
+ case ir.OCONV:
+ n := n.(*ir.ConvExpr)
+ x := s.expr(n.X)
+ return s.conv(n, x, n.X.Type(), n.Type())
+
+ case ir.ODOTTYPE:
+ n := n.(*ir.TypeAssertExpr)
+ res, _ := s.dottype(n, false)
+ return res
+
+ case ir.ODYNAMICDOTTYPE:
+ n := n.(*ir.DynamicTypeAssertExpr)
+ res, _ := s.dynamicDottype(n, false)
+ return res
+
+ // binary ops
+ case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ if n.X.Type().IsComplex() {
+ pt := types.FloatForComplex(n.X.Type())
+ op := s.ssaOp(ir.OEQ, pt)
+ r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
+ i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
+ c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i)
+ switch n.Op() {
+ case ir.OEQ:
+ return c
+ case ir.ONE:
+ return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c)
+ default:
+ s.Fatalf("ordered complex compare %v", n.Op())
+ }
+ }
+
+ // Convert OGE and OGT into OLE and OLT.
+ op := n.Op()
+ switch op {
+ case ir.OGE:
+ op, a, b = ir.OLE, b, a
+ case ir.OGT:
+ op, a, b = ir.OLT, b, a
+ }
+ if n.X.Type().IsFloat() {
+ // float comparison
+ return s.newValueOrSfCall2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
+ }
+ // integer comparison
+ return s.newValue2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
+ case ir.OMUL:
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ if n.Type().IsComplex() {
+ mulop := ssa.OpMul64F
+ addop := ssa.OpAdd64F
+ subop := ssa.OpSub64F
+ pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
+ wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
+
+ areal := s.newValue1(ssa.OpComplexReal, pt, a)
+ breal := s.newValue1(ssa.OpComplexReal, pt, b)
+ aimag := s.newValue1(ssa.OpComplexImag, pt, a)
+ bimag := s.newValue1(ssa.OpComplexImag, pt, b)
+
+ if pt != wt { // Widen for calculation
+ areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
+ breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
+ aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
+ bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
+ }
+
+ xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
+ ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal))
+
+ if pt != wt { // Narrow to store back
+ xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
+ ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
+ }
+
+ return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
+ }
+
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ }
+
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+
+ case ir.ODIV:
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ if n.Type().IsComplex() {
+ // TODO this is not executed because the front-end substitutes a runtime call.
+ // That probably ought to change; with modest optimization the widen/narrow
+ // conversions could all be elided in larger expression trees.
+ mulop := ssa.OpMul64F
+ addop := ssa.OpAdd64F
+ subop := ssa.OpSub64F
+ divop := ssa.OpDiv64F
+ pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
+ wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
+
+ areal := s.newValue1(ssa.OpComplexReal, pt, a)
+ breal := s.newValue1(ssa.OpComplexReal, pt, b)
+ aimag := s.newValue1(ssa.OpComplexImag, pt, a)
+ bimag := s.newValue1(ssa.OpComplexImag, pt, b)
+
+ if pt != wt { // Widen for calculation
+ areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
+ breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
+ aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
+ bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
+ }
+
+ denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag))
+ xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
+ ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag))
+
+ // TODO not sure if this is best done in wide precision or narrow
+ // Double-rounding might be an issue.
+ // Note that the pre-SSA implementation does the entire calculation
+ // in wide format, so wide is compatible.
+ xreal = s.newValueOrSfCall2(divop, wt, xreal, denom)
+ ximag = s.newValueOrSfCall2(divop, wt, ximag, denom)
+
+ if pt != wt { // Narrow to store back
+ xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
+ ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
+ }
+ return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
+ }
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ }
+ return s.intDivide(n, a, b)
+ case ir.OMOD:
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ return s.intDivide(n, a, b)
+ case ir.OADD, ir.OSUB:
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ if n.Type().IsComplex() {
+ pt := types.FloatForComplex(n.Type())
+ op := s.ssaOp(n.Op(), pt)
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
+ s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
+ s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
+ }
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ }
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ case ir.OAND, ir.OOR, ir.OXOR:
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ case ir.OANDNOT:
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b)
+ return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b)
+ case ir.OLSH, ir.ORSH:
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ bt := b.Type
+ if bt.IsSigned() {
+ cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b)
+ s.check(cmp, ir.Syms.Panicshift)
+ bt = bt.ToUnsigned()
+ }
+ return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b)
+ case ir.OANDAND, ir.OOROR:
+ // To implement OANDAND (and OOROR), we introduce a
+ // new temporary variable to hold the result. The
+ // variable is associated with the OANDAND node in the
+ // s.vars table (normally variables are only
+ // associated with ONAME nodes). We convert
+ // A && B
+ // to
+ // var = A
+ // if var {
+ // var = B
+ // }
+ // Using var in the subsequent block introduces the
+ // necessary phi variable.
+ n := n.(*ir.LogicalExpr)
+ el := s.expr(n.X)
+ s.vars[n] = el
+
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(el)
+ // In theory, we should set b.Likely here based on context.
+ // However, gc only gives us likeliness hints
+ // in a single place, for plain OIF statements,
+ // and passing around context is finnicky, so don't bother for now.
+
+ bRight := s.f.NewBlock(ssa.BlockPlain)
+ bResult := s.f.NewBlock(ssa.BlockPlain)
+ if n.Op() == ir.OANDAND {
+ b.AddEdgeTo(bRight)
+ b.AddEdgeTo(bResult)
+ } else if n.Op() == ir.OOROR {
+ b.AddEdgeTo(bResult)
+ b.AddEdgeTo(bRight)
+ }
+
+ s.startBlock(bRight)
+ er := s.expr(n.Y)
+ s.vars[n] = er
+
+ b = s.endBlock()
+ b.AddEdgeTo(bResult)
+
+ s.startBlock(bResult)
+ return s.variable(n, types.Types[types.TBOOL])
+ case ir.OCOMPLEX:
+ n := n.(*ir.BinaryExpr)
+ r := s.expr(n.X)
+ i := s.expr(n.Y)
+ return s.newValue2(ssa.OpComplexMake, n.Type(), r, i)
+
+ // unary ops
+ case ir.ONEG:
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
+ if n.Type().IsComplex() {
+ tp := types.FloatForComplex(n.Type())
+ negop := s.ssaOp(n.Op(), tp)
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
+ s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
+ s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
+ }
+ return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
+ case ir.ONOT, ir.OBITNOT:
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
+ return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
+ case ir.OIMAG, ir.OREAL:
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
+ return s.newValue1(s.ssaOp(n.Op(), n.X.Type()), n.Type(), a)
+ case ir.OPLUS:
+ n := n.(*ir.UnaryExpr)
+ return s.expr(n.X)
+
+ case ir.OADDR:
+ n := n.(*ir.AddrExpr)
+ return s.addr(n.X)
+
+ case ir.ORESULT:
+ n := n.(*ir.ResultExpr)
+ if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
+ panic("Expected to see a previous call")
+ }
+ which := n.Index
+ if which == -1 {
+ panic(fmt.Errorf("ORESULT %v does not match call %s", n, s.prevCall))
+ }
+ return s.resultOfCall(s.prevCall, which, n.Type())
+
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ p := s.exprPtr(n.X, n.Bounded(), n.Pos())
+ return s.load(n.Type(), p)
+
+ case ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ if n.X.Op() == ir.OSTRUCTLIT {
+ // All literals with nonzero fields have already been
+ // rewritten during walk. Any that remain are just T{}
+ // or equivalents. Use the zero value.
+ if !ir.IsZero(n.X) {
+ s.Fatalf("literal with nonzero value in SSA: %v", n.X)
+ }
+ return s.zeroVal(n.Type())
+ }
+ // If n is addressable and can't be represented in
+ // SSA, then load just the selected field. This
+ // prevents false memory dependencies in race/msan/asan
+ // instrumentation.
+ if ir.IsAddressable(n) && !s.canSSA(n) {
+ p := s.addr(n)
+ return s.load(n.Type(), p)
+ }
+ v := s.expr(n.X)
+ return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v)
+
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ p := s.exprPtr(n.X, n.Bounded(), n.Pos())
+ p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p)
+ return s.load(n.Type(), p)
+
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ switch {
+ case n.X.Type().IsString():
+ if n.Bounded() && ir.IsConst(n.X, constant.String) && ir.IsConst(n.Index, constant.Int) {
+ // Replace "abc"[1] with 'b'.
+ // Delayed until now because "abc"[1] is not an ideal constant.
+ // See test/fixedbugs/issue11370.go.
+ return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.X)[ir.Int64Val(n.Index)])))
+ }
+ a := s.expr(n.X)
+ i := s.expr(n.Index)
+ len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a)
+ i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
+ ptrtyp := s.f.Config.Types.BytePtr
+ ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
+ if ir.IsConst(n.Index, constant.Int) {
+ ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Index), ptr)
+ } else {
+ ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
+ }
+ return s.load(types.Types[types.TUINT8], ptr)
+ case n.X.Type().IsSlice():
+ p := s.addr(n)
+ return s.load(n.X.Type().Elem(), p)
+ case n.X.Type().IsArray():
+ if ssa.CanSSA(n.X.Type()) {
+ // SSA can handle arrays of length at most 1.
+ bound := n.X.Type().NumElem()
+ a := s.expr(n.X)
+ i := s.expr(n.Index)
+ if bound == 0 {
+ // Bounds check will never succeed. Might as well
+ // use constants for the bounds check.
+ z := s.constInt(types.Types[types.TINT], 0)
+ s.boundsCheck(z, z, ssa.BoundsIndex, false)
+ // The return value won't be live, return junk.
+ // But not quite junk, in case bounds checks are turned off. See issue 48092.
+ return s.zeroVal(n.Type())
+ }
+ len := s.constInt(types.Types[types.TINT], bound)
+ s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
+ return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a)
+ }
+ p := s.addr(n)
+ return s.load(n.X.Type().Elem(), p)
+ default:
+ s.Fatalf("bad type for index %v", n.X.Type())
+ return nil
+ }
+
+ case ir.OLEN, ir.OCAP:
+ n := n.(*ir.UnaryExpr)
+ switch {
+ case n.X.Type().IsSlice():
+ op := ssa.OpSliceLen
+ if n.Op() == ir.OCAP {
+ op = ssa.OpSliceCap
+ }
+ return s.newValue1(op, types.Types[types.TINT], s.expr(n.X))
+ case n.X.Type().IsString(): // string; not reachable for OCAP
+ return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.X))
+ case n.X.Type().IsMap(), n.X.Type().IsChan():
+ return s.referenceTypeBuiltin(n, s.expr(n.X))
+ default: // array
+ return s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
+ }
+
+ case ir.OSPTR:
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
+ if n.X.Type().IsSlice() {
+ if n.Bounded() {
+ return s.newValue1(ssa.OpSlicePtr, n.Type(), a)
+ }
+ return s.newValue1(ssa.OpSlicePtrUnchecked, n.Type(), a)
+ } else {
+ return s.newValue1(ssa.OpStringPtr, n.Type(), a)
+ }
+
+ case ir.OITAB:
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
+ return s.newValue1(ssa.OpITab, n.Type(), a)
+
+ case ir.OIDATA:
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
+ return s.newValue1(ssa.OpIData, n.Type(), a)
+
+ case ir.OMAKEFACE:
+ n := n.(*ir.BinaryExpr)
+ tab := s.expr(n.X)
+ data := s.expr(n.Y)
+ return s.newValue2(ssa.OpIMake, n.Type(), tab, data)
+
+ case ir.OSLICEHEADER:
+ n := n.(*ir.SliceHeaderExpr)
+ p := s.expr(n.Ptr)
+ l := s.expr(n.Len)
+ c := s.expr(n.Cap)
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
+
+ case ir.OSTRINGHEADER:
+ n := n.(*ir.StringHeaderExpr)
+ p := s.expr(n.Ptr)
+ l := s.expr(n.Len)
+ return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
+
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
+ n := n.(*ir.SliceExpr)
+ check := s.checkPtrEnabled && n.Op() == ir.OSLICE3ARR && n.X.Op() == ir.OCONVNOP && n.X.(*ir.ConvExpr).X.Type().IsUnsafePtr()
+ v := s.exprCheckPtr(n.X, !check)
+ var i, j, k *ssa.Value
+ if n.Low != nil {
+ i = s.expr(n.Low)
+ }
+ if n.High != nil {
+ j = s.expr(n.High)
+ }
+ if n.Max != nil {
+ k = s.expr(n.Max)
+ }
+ p, l, c := s.slice(v, i, j, k, n.Bounded())
+ if check {
+ // Emit checkptr instrumentation after bound check to prevent false positive, see #46938.
+ s.checkPtrAlignment(n.X.(*ir.ConvExpr), v, s.conv(n.Max, k, k.Type, types.Types[types.TUINTPTR]))
+ }
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
+
+ case ir.OSLICESTR:
+ n := n.(*ir.SliceExpr)
+ v := s.expr(n.X)
+ var i, j *ssa.Value
+ if n.Low != nil {
+ i = s.expr(n.Low)
+ }
+ if n.High != nil {
+ j = s.expr(n.High)
+ }
+ p, l, _ := s.slice(v, i, j, nil, n.Bounded())
+ return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
+
+ case ir.OSLICE2ARRPTR:
+ // if arrlen > slice.len {
+ // panic(...)
+ // }
+ // slice.ptr
+ n := n.(*ir.ConvExpr)
+ v := s.expr(n.X)
+ nelem := n.Type().Elem().NumElem()
+ arrlen := s.constInt(types.Types[types.TINT], nelem)
+ cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
+ s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false)
+ op := ssa.OpSlicePtr
+ if nelem == 0 {
+ op = ssa.OpSlicePtrUnchecked
+ }
+ return s.newValue1(op, n.Type(), v)
+
+ case ir.OCALLFUNC:
+ n := n.(*ir.CallExpr)
+ if ir.IsIntrinsicCall(n) {
+ return s.intrinsicCall(n)
+ }
+ fallthrough
+
+ case ir.OCALLINTER:
+ n := n.(*ir.CallExpr)
+ return s.callResult(n, callNormal)
+
+ case ir.OGETG:
+ n := n.(*ir.CallExpr)
+ return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
+
+ case ir.OGETCALLERPC:
+ n := n.(*ir.CallExpr)
+ return s.newValue0(ssa.OpGetCallerPC, n.Type())
+
+ case ir.OGETCALLERSP:
+ n := n.(*ir.CallExpr)
+ return s.newValue1(ssa.OpGetCallerSP, n.Type(), s.mem())
+
+ case ir.OAPPEND:
+ return s.append(n.(*ir.CallExpr), false)
+
+ case ir.OMIN, ir.OMAX:
+ return s.minMax(n.(*ir.CallExpr))
+
+ case ir.OSTRUCTLIT, ir.OARRAYLIT:
+ // All literals with nonzero fields have already been
+ // rewritten during walk. Any that remain are just T{}
+ // or equivalents. Use the zero value.
+ n := n.(*ir.CompLitExpr)
+ if !ir.IsZero(n) {
+ s.Fatalf("literal with nonzero value in SSA: %v", n)
+ }
+ return s.zeroVal(n.Type())
+
+ case ir.ONEW:
+ n := n.(*ir.UnaryExpr)
+ var rtype *ssa.Value
+ if x, ok := n.X.(*ir.DynamicType); ok && x.Op() == ir.ODYNAMICTYPE {
+ rtype = s.expr(x.RType)
+ }
+ return s.newObject(n.Type().Elem(), rtype)
+
+ case ir.OUNSAFEADD:
+ n := n.(*ir.BinaryExpr)
+ ptr := s.expr(n.X)
+ len := s.expr(n.Y)
+
+ // Force len to uintptr to prevent misuse of garbage bits in the
+ // upper part of the register (#48536).
+ len = s.conv(n, len, len.Type, types.Types[types.TUINTPTR])
+
+ return s.newValue2(ssa.OpAddPtr, n.Type(), ptr, len)
+
+ default:
+ s.Fatalf("unhandled expr %v", n.Op())
+ return nil
+ }
+}
+
+func (s *state) resultOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
+ aux := c.Aux.(*ssa.AuxCall)
+ pa := aux.ParamAssignmentForResult(which)
+ // TODO(register args) determine if in-memory TypeOK is better loaded early from SelectNAddr or later when SelectN is expanded.
+ // SelectN is better for pattern-matching and possible call-aware analysis we might want to do in the future.
+ if len(pa.Registers) == 0 && !ssa.CanSSA(t) {
+ addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
+ return s.rawLoad(t, addr)
+ }
+ return s.newValue1I(ssa.OpSelectN, t, which, c)
+}
+
+func (s *state) resultAddrOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
+ aux := c.Aux.(*ssa.AuxCall)
+ pa := aux.ParamAssignmentForResult(which)
+ if len(pa.Registers) == 0 {
+ return s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
+ }
+ _, addr := s.temp(c.Pos, t)
+ rval := s.newValue1I(ssa.OpSelectN, t, which, c)
+ s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, addr, rval, s.mem(), false)
+ return addr
+}
+
+// append converts an OAPPEND node to SSA.
+// If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
+// adds it to s, and returns the Value.
+// If inplace is true, it writes the result of the OAPPEND expression n
+// back to the slice being appended to, and returns nil.
+// inplace MUST be set to false if the slice can be SSA'd.
+// Note: this code only handles fixed-count appends. Dotdotdot appends
+// have already been rewritten at this point (by walk).
+func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value {
+ // If inplace is false, process as expression "append(s, e1, e2, e3)":
+ //
+ // ptr, len, cap := s
+ // len += 3
+ // if uint(len) > uint(cap) {
+ // ptr, len, cap = growslice(ptr, len, cap, 3, typ)
+ // Note that len is unmodified by growslice.
+ // }
+ // // with write barriers, if needed:
+ // *(ptr+(len-3)) = e1
+ // *(ptr+(len-2)) = e2
+ // *(ptr+(len-1)) = e3
+ // return makeslice(ptr, len, cap)
+ //
+ //
+ // If inplace is true, process as statement "s = append(s, e1, e2, e3)":
+ //
+ // a := &s
+ // ptr, len, cap := s
+ // len += 3
+ // if uint(len) > uint(cap) {
+ // ptr, len, cap = growslice(ptr, len, cap, 3, typ)
+ // vardef(a) // if necessary, advise liveness we are writing a new a
+ // *a.cap = cap // write before ptr to avoid a spill
+ // *a.ptr = ptr // with write barrier
+ // }
+ // *a.len = len
+ // // with write barriers, if needed:
+ // *(ptr+(len-3)) = e1
+ // *(ptr+(len-2)) = e2
+ // *(ptr+(len-1)) = e3
+
+ et := n.Type().Elem()
+ pt := types.NewPtr(et)
+
+ // Evaluate slice
+ sn := n.Args[0] // the slice node is the first in the list
+ var slice, addr *ssa.Value
+ if inplace {
+ addr = s.addr(sn)
+ slice = s.load(n.Type(), addr)
+ } else {
+ slice = s.expr(sn)
+ }
+
+ // Allocate new blocks
+ grow := s.f.NewBlock(ssa.BlockPlain)
+ assign := s.f.NewBlock(ssa.BlockPlain)
+
+ // Decomposse input slice.
+ p := s.newValue1(ssa.OpSlicePtr, pt, slice)
+ l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
+ c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice)
+
+ // Add number of new elements to length.
+ nargs := s.constInt(types.Types[types.TINT], int64(len(n.Args)-1))
+ l = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, nargs)
+
+ // Decide if we need to grow
+ cmp := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT]), types.Types[types.TBOOL], c, l)
+
+ // Record values of ptr/len/cap before branch.
+ s.vars[ptrVar] = p
+ s.vars[lenVar] = l
+ if !inplace {
+ s.vars[capVar] = c
+ }
+
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.Likely = ssa.BranchUnlikely
+ b.SetControl(cmp)
+ b.AddEdgeTo(grow)
+ b.AddEdgeTo(assign)
+
+ // Call growslice
+ s.startBlock(grow)
+ taddr := s.expr(n.Fun)
+ r := s.rtcall(ir.Syms.Growslice, true, []*types.Type{n.Type()}, p, l, c, nargs, taddr)
+
+ // Decompose output slice
+ p = s.newValue1(ssa.OpSlicePtr, pt, r[0])
+ l = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], r[0])
+ c = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], r[0])
+
+ s.vars[ptrVar] = p
+ s.vars[lenVar] = l
+ s.vars[capVar] = c
+ if inplace {
+ if sn.Op() == ir.ONAME {
+ sn := sn.(*ir.Name)
+ if sn.Class != ir.PEXTERN {
+ // Tell liveness we're about to build a new slice
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
+ }
+ }
+ capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceCapOffset, addr)
+ s.store(types.Types[types.TINT], capaddr, c)
+ s.store(pt, addr, p)
+ }
+
+ b = s.endBlock()
+ b.AddEdgeTo(assign)
+
+ // assign new elements to slots
+ s.startBlock(assign)
+ p = s.variable(ptrVar, pt) // generates phi for ptr
+ l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len
+ if !inplace {
+ c = s.variable(capVar, types.Types[types.TINT]) // generates phi for cap
+ }
+
+ if inplace {
+ // Update length in place.
+ // We have to wait until here to make sure growslice succeeded.
+ lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceLenOffset, addr)
+ s.store(types.Types[types.TINT], lenaddr, l)
+ }
+
+ // Evaluate args
+ type argRec struct {
+ // if store is true, we're appending the value v. If false, we're appending the
+ // value at *v.
+ v *ssa.Value
+ store bool
+ }
+ args := make([]argRec, 0, len(n.Args[1:]))
+ for _, n := range n.Args[1:] {
+ if ssa.CanSSA(n.Type()) {
+ args = append(args, argRec{v: s.expr(n), store: true})
+ } else {
+ v := s.addr(n)
+ args = append(args, argRec{v: v})
+ }
+ }
+
+ // Write args into slice.
+ oldLen := s.newValue2(s.ssaOp(ir.OSUB, types.Types[types.TINT]), types.Types[types.TINT], l, nargs)
+ p2 := s.newValue2(ssa.OpPtrIndex, pt, p, oldLen)
+ for i, arg := range args {
+ addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[types.TINT], int64(i)))
+ if arg.store {
+ s.storeType(et, addr, arg.v, 0, true)
+ } else {
+ s.move(et, addr, arg.v)
+ }
+ }
+
+ // The following deletions have no practical effect at this time
+ // because state.vars has been reset by the preceding state.startBlock.
+ // They only enforce the fact that these variables are no longer need in
+ // the current scope.
+ delete(s.vars, ptrVar)
+ delete(s.vars, lenVar)
+ if !inplace {
+ delete(s.vars, capVar)
+ }
+
+ // make result
+ if inplace {
+ return nil
+ }
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
+}
+
+// minMax converts an OMIN/OMAX builtin call into SSA.
+func (s *state) minMax(n *ir.CallExpr) *ssa.Value {
+ // The OMIN/OMAX builtin is variadic, but its semantics are
+ // equivalent to left-folding a binary min/max operation across the
+ // arguments list.
+ fold := func(op func(x, a *ssa.Value) *ssa.Value) *ssa.Value {
+ x := s.expr(n.Args[0])
+ for _, arg := range n.Args[1:] {
+ x = op(x, s.expr(arg))
+ }
+ return x
+ }
+
+ typ := n.Type()
+
+ if typ.IsFloat() || typ.IsString() {
+ // min/max semantics for floats are tricky because of NaNs and
+ // negative zero. Some architectures have instructions which
+ // we can use to generate the right result. For others we must
+ // call into the runtime instead.
+ //
+ // Strings are conceptually simpler, but we currently desugar
+ // string comparisons during walk, not ssagen.
+
+ if typ.IsFloat() {
+ switch Arch.LinkArch.Family {
+ case sys.AMD64, sys.ARM64:
+ var op ssa.Op
+ switch {
+ case typ.Kind() == types.TFLOAT64 && n.Op() == ir.OMIN:
+ op = ssa.OpMin64F
+ case typ.Kind() == types.TFLOAT64 && n.Op() == ir.OMAX:
+ op = ssa.OpMax64F
+ case typ.Kind() == types.TFLOAT32 && n.Op() == ir.OMIN:
+ op = ssa.OpMin32F
+ case typ.Kind() == types.TFLOAT32 && n.Op() == ir.OMAX:
+ op = ssa.OpMax32F
+ }
+ return fold(func(x, a *ssa.Value) *ssa.Value {
+ return s.newValue2(op, typ, x, a)
+ })
+ }
+ }
+ var name string
+ switch typ.Kind() {
+ case types.TFLOAT32:
+ switch n.Op() {
+ case ir.OMIN:
+ name = "fmin32"
+ case ir.OMAX:
+ name = "fmax32"
+ }
+ case types.TFLOAT64:
+ switch n.Op() {
+ case ir.OMIN:
+ name = "fmin64"
+ case ir.OMAX:
+ name = "fmax64"
+ }
+ case types.TSTRING:
+ switch n.Op() {
+ case ir.OMIN:
+ name = "strmin"
+ case ir.OMAX:
+ name = "strmax"
+ }
+ }
+ fn := typecheck.LookupRuntimeFunc(name)
+
+ return fold(func(x, a *ssa.Value) *ssa.Value {
+ return s.rtcall(fn, true, []*types.Type{typ}, x, a)[0]
+ })
+ }
+
+ lt := s.ssaOp(ir.OLT, typ)
+
+ return fold(func(x, a *ssa.Value) *ssa.Value {
+ switch n.Op() {
+ case ir.OMIN:
+ // a < x ? a : x
+ return s.ternary(s.newValue2(lt, types.Types[types.TBOOL], a, x), a, x)
+ case ir.OMAX:
+ // x < a ? a : x
+ return s.ternary(s.newValue2(lt, types.Types[types.TBOOL], x, a), a, x)
+ }
+ panic("unreachable")
+ })
+}
+
+// ternary emits code to evaluate cond ? x : y.
+func (s *state) ternary(cond, x, y *ssa.Value) *ssa.Value {
+ // Note that we need a new ternaryVar each time (unlike okVar where we can
+ // reuse the variable) because it might have a different type every time.
+ ternaryVar := ssaMarker("ternary")
+
+ bThen := s.f.NewBlock(ssa.BlockPlain)
+ bElse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cond)
+ b.AddEdgeTo(bThen)
+ b.AddEdgeTo(bElse)
+
+ s.startBlock(bThen)
+ s.vars[ternaryVar] = x
+ s.endBlock().AddEdgeTo(bEnd)
+
+ s.startBlock(bElse)
+ s.vars[ternaryVar] = y
+ s.endBlock().AddEdgeTo(bEnd)
+
+ s.startBlock(bEnd)
+ r := s.variable(ternaryVar, x.Type)
+ delete(s.vars, ternaryVar)
+ return r
+}
+
+// condBranch evaluates the boolean expression cond and branches to yes
+// if cond is true and no if cond is false.
+// This function is intended to handle && and || better than just calling
+// s.expr(cond) and branching on the result.
+func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) {
+ switch cond.Op() {
+ case ir.OANDAND:
+ cond := cond.(*ir.LogicalExpr)
+ mid := s.f.NewBlock(ssa.BlockPlain)
+ s.stmtList(cond.Init())
+ s.condBranch(cond.X, mid, no, max8(likely, 0))
+ s.startBlock(mid)
+ s.condBranch(cond.Y, yes, no, likely)
+ return
+ // Note: if likely==1, then both recursive calls pass 1.
+ // If likely==-1, then we don't have enough information to decide
+ // whether the first branch is likely or not. So we pass 0 for
+ // the likeliness of the first branch.
+ // TODO: have the frontend give us branch prediction hints for
+ // OANDAND and OOROR nodes (if it ever has such info).
+ case ir.OOROR:
+ cond := cond.(*ir.LogicalExpr)
+ mid := s.f.NewBlock(ssa.BlockPlain)
+ s.stmtList(cond.Init())
+ s.condBranch(cond.X, yes, mid, min8(likely, 0))
+ s.startBlock(mid)
+ s.condBranch(cond.Y, yes, no, likely)
+ return
+ // Note: if likely==-1, then both recursive calls pass -1.
+ // If likely==1, then we don't have enough info to decide
+ // the likelihood of the first branch.
+ case ir.ONOT:
+ cond := cond.(*ir.UnaryExpr)
+ s.stmtList(cond.Init())
+ s.condBranch(cond.X, no, yes, -likely)
+ return
+ case ir.OCONVNOP:
+ cond := cond.(*ir.ConvExpr)
+ s.stmtList(cond.Init())
+ s.condBranch(cond.X, yes, no, likely)
+ return
+ }
+ c := s.expr(cond)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(c)
+ b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
+ b.AddEdgeTo(yes)
+ b.AddEdgeTo(no)
+}
+
+type skipMask uint8
+
+const (
+ skipPtr skipMask = 1 << iota
+ skipLen
+ skipCap
+)
+
+// assign does left = right.
+// Right has already been evaluated to ssa, left has not.
+// If deref is true, then we do left = *right instead (and right has already been nil-checked).
+// If deref is true and right == nil, just do left = 0.
+// skip indicates assignments (at the top level) that can be avoided.
+// mayOverlap indicates whether left&right might partially overlap in memory. Default is false.
+func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) {
+ s.assignWhichMayOverlap(left, right, deref, skip, false)
+}
+func (s *state) assignWhichMayOverlap(left ir.Node, right *ssa.Value, deref bool, skip skipMask, mayOverlap bool) {
+ if left.Op() == ir.ONAME && ir.IsBlank(left) {
+ return
+ }
+ t := left.Type()
+ types.CalcSize(t)
+ if s.canSSA(left) {
+ if deref {
+ s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
+ }
+ if left.Op() == ir.ODOT {
+ // We're assigning to a field of an ssa-able value.
+ // We need to build a new structure with the new value for the
+ // field we're assigning and the old values for the other fields.
+ // For instance:
+ // type T struct {a, b, c int}
+ // var T x
+ // x.b = 5
+ // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
+
+ // Grab information about the structure type.
+ left := left.(*ir.SelectorExpr)
+ t := left.X.Type()
+ nf := t.NumFields()
+ idx := fieldIdx(left)
+
+ // Grab old value of structure.
+ old := s.expr(left.X)
+
+ // Make new structure.
+ new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
+
+ // Add fields as args.
+ for i := 0; i < nf; i++ {
+ if i == idx {
+ new.AddArg(right)
+ } else {
+ new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
+ }
+ }
+
+ // Recursively assign the new value we've made to the base of the dot op.
+ s.assign(left.X, new, false, 0)
+ // TODO: do we need to update named values here?
+ return
+ }
+ if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).X.Type().IsArray() {
+ left := left.(*ir.IndexExpr)
+ s.pushLine(left.Pos())
+ defer s.popLine()
+ // We're assigning to an element of an ssa-able array.
+ // a[i] = v
+ t := left.X.Type()
+ n := t.NumElem()
+
+ i := s.expr(left.Index) // index
+ if n == 0 {
+ // The bounds check must fail. Might as well
+ // ignore the actual index and just use zeros.
+ z := s.constInt(types.Types[types.TINT], 0)
+ s.boundsCheck(z, z, ssa.BoundsIndex, false)
+ return
+ }
+ if n != 1 {
+ s.Fatalf("assigning to non-1-length array")
+ }
+ // Rewrite to a = [1]{v}
+ len := s.constInt(types.Types[types.TINT], 1)
+ s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
+ v := s.newValue1(ssa.OpArrayMake1, t, right)
+ s.assign(left.X, v, false, 0)
+ return
+ }
+ left := left.(*ir.Name)
+ // Update variable assignment.
+ s.vars[left] = right
+ s.addNamedValue(left, right)
+ return
+ }
+
+ // If this assignment clobbers an entire local variable, then emit
+ // OpVarDef so liveness analysis knows the variable is redefined.
+ if base, ok := clobberBase(left).(*ir.Name); ok && base.OnStack() && skip == 0 && t.HasPointers() {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base))
+ }
+
+ // Left is not ssa-able. Compute its address.
+ addr := s.addr(left)
+ if ir.IsReflectHeaderDataField(left) {
+ // Package unsafe's documentation says storing pointers into
+ // reflect.SliceHeader and reflect.StringHeader's Data fields
+ // is valid, even though they have type uintptr (#19168).
+ // Mark it pointer type to signal the writebarrier pass to
+ // insert a write barrier.
+ t = types.Types[types.TUNSAFEPTR]
+ }
+ if deref {
+ // Treat as a mem->mem move.
+ if right == nil {
+ s.zero(t, addr)
+ } else {
+ s.moveWhichMayOverlap(t, addr, right, mayOverlap)
+ }
+ return
+ }
+ // Treat as a store.
+ s.storeType(t, addr, right, skip, !ir.IsAutoTmp(left))
+}
+
+// zeroVal returns the zero value for type t.
+func (s *state) zeroVal(t *types.Type) *ssa.Value {
+ switch {
+ case t.IsInteger():
+ switch t.Size() {
+ case 1:
+ return s.constInt8(t, 0)
+ case 2:
+ return s.constInt16(t, 0)
+ case 4:
+ return s.constInt32(t, 0)
+ case 8:
+ return s.constInt64(t, 0)
+ default:
+ s.Fatalf("bad sized integer type %v", t)
+ }
+ case t.IsFloat():
+ switch t.Size() {
+ case 4:
+ return s.constFloat32(t, 0)
+ case 8:
+ return s.constFloat64(t, 0)
+ default:
+ s.Fatalf("bad sized float type %v", t)
+ }
+ case t.IsComplex():
+ switch t.Size() {
+ case 8:
+ z := s.constFloat32(types.Types[types.TFLOAT32], 0)
+ return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
+ case 16:
+ z := s.constFloat64(types.Types[types.TFLOAT64], 0)
+ return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
+ default:
+ s.Fatalf("bad sized complex type %v", t)
+ }
+
+ case t.IsString():
+ return s.constEmptyString(t)
+ case t.IsPtrShaped():
+ return s.constNil(t)
+ case t.IsBoolean():
+ return s.constBool(false)
+ case t.IsInterface():
+ return s.constInterface(t)
+ case t.IsSlice():
+ return s.constSlice(t)
+ case t.IsStruct():
+ n := t.NumFields()
+ v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
+ for i := 0; i < n; i++ {
+ v.AddArg(s.zeroVal(t.FieldType(i)))
+ }
+ return v
+ case t.IsArray():
+ switch t.NumElem() {
+ case 0:
+ return s.entryNewValue0(ssa.OpArrayMake0, t)
+ case 1:
+ return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
+ }
+ }
+ s.Fatalf("zero for type %v not implemented", t)
+ return nil
+}
+
+type callKind int8
+
+const (
+ callNormal callKind = iota
+ callDefer
+ callDeferStack
+ callGo
+ callTail
+)
+
+type sfRtCallDef struct {
+ rtfn *obj.LSym
+ rtype types.Kind
+}
+
+var softFloatOps map[ssa.Op]sfRtCallDef
+
+func softfloatInit() {
+ // Some of these operations get transformed by sfcall.
+ softFloatOps = map[ssa.Op]sfRtCallDef{
+ ssa.OpAdd32F: {typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
+ ssa.OpAdd64F: {typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
+ ssa.OpSub32F: {typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
+ ssa.OpSub64F: {typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
+ ssa.OpMul32F: {typecheck.LookupRuntimeFunc("fmul32"), types.TFLOAT32},
+ ssa.OpMul64F: {typecheck.LookupRuntimeFunc("fmul64"), types.TFLOAT64},
+ ssa.OpDiv32F: {typecheck.LookupRuntimeFunc("fdiv32"), types.TFLOAT32},
+ ssa.OpDiv64F: {typecheck.LookupRuntimeFunc("fdiv64"), types.TFLOAT64},
+
+ ssa.OpEq64F: {typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
+ ssa.OpEq32F: {typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
+ ssa.OpNeq64F: {typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
+ ssa.OpNeq32F: {typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
+ ssa.OpLess64F: {typecheck.LookupRuntimeFunc("fgt64"), types.TBOOL},
+ ssa.OpLess32F: {typecheck.LookupRuntimeFunc("fgt32"), types.TBOOL},
+ ssa.OpLeq64F: {typecheck.LookupRuntimeFunc("fge64"), types.TBOOL},
+ ssa.OpLeq32F: {typecheck.LookupRuntimeFunc("fge32"), types.TBOOL},
+
+ ssa.OpCvt32to32F: {typecheck.LookupRuntimeFunc("fint32to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto32: {typecheck.LookupRuntimeFunc("f32toint32"), types.TINT32},
+ ssa.OpCvt64to32F: {typecheck.LookupRuntimeFunc("fint64to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto64: {typecheck.LookupRuntimeFunc("f32toint64"), types.TINT64},
+ ssa.OpCvt64Uto32F: {typecheck.LookupRuntimeFunc("fuint64to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto64U: {typecheck.LookupRuntimeFunc("f32touint64"), types.TUINT64},
+ ssa.OpCvt32to64F: {typecheck.LookupRuntimeFunc("fint32to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto32: {typecheck.LookupRuntimeFunc("f64toint32"), types.TINT32},
+ ssa.OpCvt64to64F: {typecheck.LookupRuntimeFunc("fint64to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto64: {typecheck.LookupRuntimeFunc("f64toint64"), types.TINT64},
+ ssa.OpCvt64Uto64F: {typecheck.LookupRuntimeFunc("fuint64to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto64U: {typecheck.LookupRuntimeFunc("f64touint64"), types.TUINT64},
+ ssa.OpCvt32Fto64F: {typecheck.LookupRuntimeFunc("f32to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto32F: {typecheck.LookupRuntimeFunc("f64to32"), types.TFLOAT32},
+ }
+}
+
+// TODO: do not emit sfcall if operation can be optimized to constant in later
+// opt phase
+func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
+ f2i := func(t *types.Type) *types.Type {
+ switch t.Kind() {
+ case types.TFLOAT32:
+ return types.Types[types.TUINT32]
+ case types.TFLOAT64:
+ return types.Types[types.TUINT64]
+ }
+ return t
+ }
+
+ if callDef, ok := softFloatOps[op]; ok {
+ switch op {
+ case ssa.OpLess32F,
+ ssa.OpLess64F,
+ ssa.OpLeq32F,
+ ssa.OpLeq64F:
+ args[0], args[1] = args[1], args[0]
+ case ssa.OpSub32F,
+ ssa.OpSub64F:
+ args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
+ }
+
+ // runtime functions take uints for floats and returns uints.
+ // Convert to uints so we use the right calling convention.
+ for i, a := range args {
+ if a.Type.IsFloat() {
+ args[i] = s.newValue1(ssa.OpCopy, f2i(a.Type), a)
+ }
+ }
+
+ rt := types.Types[callDef.rtype]
+ result := s.rtcall(callDef.rtfn, true, []*types.Type{f2i(rt)}, args...)[0]
+ if rt.IsFloat() {
+ result = s.newValue1(ssa.OpCopy, rt, result)
+ }
+ if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
+ result = s.newValue1(ssa.OpNot, result.Type, result)
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+var intrinsics map[intrinsicKey]intrinsicBuilder
+
+// An intrinsicBuilder converts a call node n into an ssa value that
+// implements that call as an intrinsic. args is a list of arguments to the func.
+type intrinsicBuilder func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value
+
+type intrinsicKey struct {
+ arch *sys.Arch
+ pkg string
+ fn string
+}
+
+func InitTables() {
+ intrinsics = map[intrinsicKey]intrinsicBuilder{}
+
+ var all []*sys.Arch
+ var p4 []*sys.Arch
+ var p8 []*sys.Arch
+ var lwatomics []*sys.Arch
+ for _, a := range &sys.Archs {
+ all = append(all, a)
+ if a.PtrSize == 4 {
+ p4 = append(p4, a)
+ } else {
+ p8 = append(p8, a)
+ }
+ if a.Family != sys.PPC64 {
+ lwatomics = append(lwatomics, a)
+ }
+ }
+
+ // add adds the intrinsic b for pkg.fn for the given list of architectures.
+ add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
+ for _, a := range archs {
+ intrinsics[intrinsicKey{a, pkg, fn}] = b
+ }
+ }
+ // addF does the same as add but operates on architecture families.
+ addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
+ m := 0
+ for _, f := range archFamilies {
+ if f >= 32 {
+ panic("too many architecture families")
+ }
+ m |= 1 << uint(f)
+ }
+ for _, a := range all {
+ if m>>uint(a.Family)&1 != 0 {
+ intrinsics[intrinsicKey{a, pkg, fn}] = b
+ }
+ }
+ }
+ // alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
+ alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
+ aliased := false
+ for _, a := range archs {
+ if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
+ intrinsics[intrinsicKey{a, pkg, fn}] = b
+ aliased = true
+ }
+ }
+ if !aliased {
+ panic(fmt.Sprintf("attempted to alias undefined intrinsic: %s.%s", pkg, fn))
+ }
+ }
+
+ /******** runtime ********/
+ if !base.Flag.Cfg.Instrumenting {
+ add("runtime", "slicebytetostringtmp",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ // Compiler frontend optimizations emit OBYTES2STRTMP nodes
+ // for the backend instead of slicebytetostringtmp calls
+ // when not instrumenting.
+ return s.newValue2(ssa.OpStringMake, n.Type(), args[0], args[1])
+ },
+ all...)
+ }
+ addF("runtime/internal/math", "MulUintptr",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
+ }
+ return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
+ },
+ sys.AMD64, sys.I386, sys.Loong64, sys.MIPS64, sys.RISCV64, sys.ARM64)
+ add("runtime", "KeepAlive",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
+ s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
+ return nil
+ },
+ all...)
+ add("runtime", "getclosureptr",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
+ },
+ all...)
+
+ add("runtime", "getcallerpc",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
+ },
+ all...)
+
+ add("runtime", "getcallersp",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr, s.mem())
+ },
+ all...)
+
+ addF("runtime", "publicationBarrier",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue1(ssa.OpPubBarrier, types.TypeMem, s.mem())
+ return nil
+ },
+ sys.ARM64, sys.PPC64, sys.RISCV64)
+
+ brev_arch := []sys.ArchFamily{sys.AMD64, sys.I386, sys.ARM64, sys.ARM, sys.S390X}
+ if buildcfg.GOPPC64 >= 10 {
+ // Use only on Power10 as the new byte reverse instructions that Power10 provide
+ // make it worthwhile as an intrinsic
+ brev_arch = append(brev_arch, sys.PPC64)
+ }
+ /******** runtime/internal/sys ********/
+ addF("runtime/internal/sys", "Bswap32",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0])
+ },
+ brev_arch...)
+ addF("runtime/internal/sys", "Bswap64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0])
+ },
+ brev_arch...)
+
+ /****** Prefetch ******/
+ makePrefetchFunc := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue2(op, types.TypeMem, args[0], s.mem())
+ return nil
+ }
+ }
+
+ // Make Prefetch intrinsics for supported platforms
+ // On the unsupported platforms stub function will be eliminated
+ addF("runtime/internal/sys", "Prefetch", makePrefetchFunc(ssa.OpPrefetchCache),
+ sys.AMD64, sys.ARM64, sys.PPC64)
+ addF("runtime/internal/sys", "PrefetchStreamed", makePrefetchFunc(ssa.OpPrefetchCacheStreamed),
+ sys.AMD64, sys.ARM64, sys.PPC64)
+
+ /******** runtime/internal/atomic ********/
+ addF("runtime/internal/atomic", "Load",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
+ },
+ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Load8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v)
+ },
+ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Load64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
+ },
+ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "LoadAcq",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
+ },
+ sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "LoadAcq64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
+ },
+ sys.PPC64)
+ addF("runtime/internal/atomic", "Loadp",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
+ },
+ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+
+ addF("runtime/internal/atomic", "Store",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Store8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Store64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "StorepNoWB",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "StoreRel",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "StoreRel64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.PPC64)
+
+ addF("runtime/internal/atomic", "Xchg",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
+ },
+ sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Xchg64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
+ },
+ sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+
+ type atomicOpEmitter func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind)
+
+ makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.Kind, emit atomicOpEmitter) intrinsicBuilder {
+
+ return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ // Target Atomic feature is identified by dynamic detection
+ addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARM64HasATOMICS, s.sb)
+ v := s.load(types.Types[types.TBOOL], addr)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(v)
+ bTrue := s.f.NewBlock(ssa.BlockPlain)
+ bFalse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bTrue)
+ b.AddEdgeTo(bFalse)
+ b.Likely = ssa.BranchLikely
+
+ // We have atomic instructions - use it directly.
+ s.startBlock(bTrue)
+ emit(s, n, args, op1, typ)
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Use original instruction sequence.
+ s.startBlock(bFalse)
+ emit(s, n, args, op0, typ)
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Merge results.
+ s.startBlock(bEnd)
+ if rtyp == types.TNIL {
+ return nil
+ } else {
+ return s.variable(n, types.Types[rtyp])
+ }
+ }
+ }
+
+ atomicXchgXaddEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
+ v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
+ }
+ addF("runtime/internal/atomic", "Xchg",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "Xchg64",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
+ sys.ARM64)
+
+ addF("runtime/internal/atomic", "Xadd",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
+ },
+ sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Xadd64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
+ },
+ sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+
+ addF("runtime/internal/atomic", "Xadd",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "Xadd64",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
+ sys.ARM64)
+
+ addF("runtime/internal/atomic", "Cas",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
+ },
+ sys.AMD64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Cas64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
+ },
+ sys.AMD64, sys.Loong64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "CasRel",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
+ },
+ sys.PPC64)
+
+ atomicCasEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
+ v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
+ }
+
+ addF("runtime/internal/atomic", "Cas",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "Cas64",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64),
+ sys.ARM64)
+
+ addF("runtime/internal/atomic", "And8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "And",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Or8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Or",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+
+ atomicAndOrEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
+ s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
+ }
+
+ addF("runtime/internal/atomic", "And8",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "And",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "Or8",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "Or",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
+ sys.ARM64)
+
+ // Aliases for atomic load operations
+ alias("runtime/internal/atomic", "Loadint32", "runtime/internal/atomic", "Load", all...)
+ alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
+ alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
+ alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
+ alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
+ alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
+ alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
+ alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
+ alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
+ alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed
+ alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
+ alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed
+
+ // Aliases for atomic store operations
+ alias("runtime/internal/atomic", "Storeint32", "runtime/internal/atomic", "Store", all...)
+ alias("runtime/internal/atomic", "Storeint64", "runtime/internal/atomic", "Store64", all...)
+ alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
+ alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
+ alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
+ alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
+ alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
+ alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed
+ alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
+ alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed
+
+ // Aliases for atomic swap operations
+ alias("runtime/internal/atomic", "Xchgint32", "runtime/internal/atomic", "Xchg", all...)
+ alias("runtime/internal/atomic", "Xchgint64", "runtime/internal/atomic", "Xchg64", all...)
+ alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
+ alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
+
+ // Aliases for atomic add operations
+ alias("runtime/internal/atomic", "Xaddint32", "runtime/internal/atomic", "Xadd", all...)
+ alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
+ alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
+ alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
+
+ // Aliases for atomic CAS operations
+ alias("runtime/internal/atomic", "Casint32", "runtime/internal/atomic", "Cas", all...)
+ alias("runtime/internal/atomic", "Casint64", "runtime/internal/atomic", "Cas64", all...)
+ alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
+ alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
+ alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
+ alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
+ alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...)
+
+ /******** math ********/
+ addF("math", "sqrt",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0])
+ },
+ sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
+ addF("math", "Trunc",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
+ addF("math", "Ceil",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
+ addF("math", "Floor",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
+ addF("math", "Round",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.PPC64, sys.S390X)
+ addF("math", "RoundToEven",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.S390X, sys.Wasm)
+ addF("math", "Abs",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.ARM, sys.PPC64, sys.RISCV64, sys.Wasm, sys.MIPS, sys.MIPS64)
+ addF("math", "Copysign",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1])
+ },
+ sys.PPC64, sys.RISCV64, sys.Wasm)
+ addF("math", "FMA",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
+ },
+ sys.ARM64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("math", "FMA",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if !s.config.UseFMA {
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+ return s.variable(n, types.Types[types.TFLOAT64])
+ }
+
+ if buildcfg.GOAMD64 >= 3 {
+ return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
+ }
+
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasFMA)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(v)
+ bTrue := s.f.NewBlock(ssa.BlockPlain)
+ bFalse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bTrue)
+ b.AddEdgeTo(bFalse)
+ b.Likely = ssa.BranchLikely // >= haswell cpus are common
+
+ // We have the intrinsic - use it directly.
+ s.startBlock(bTrue)
+ s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Call the pure Go version.
+ s.startBlock(bFalse)
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Merge results.
+ s.startBlock(bEnd)
+ return s.variable(n, types.Types[types.TFLOAT64])
+ },
+ sys.AMD64)
+ addF("math", "FMA",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if !s.config.UseFMA {
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+ return s.variable(n, types.Types[types.TFLOAT64])
+ }
+ addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARMHasVFPv4, s.sb)
+ v := s.load(types.Types[types.TBOOL], addr)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(v)
+ bTrue := s.f.NewBlock(ssa.BlockPlain)
+ bFalse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bTrue)
+ b.AddEdgeTo(bFalse)
+ b.Likely = ssa.BranchLikely
+
+ // We have the intrinsic - use it directly.
+ s.startBlock(bTrue)
+ s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Call the pure Go version.
+ s.startBlock(bFalse)
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Merge results.
+ s.startBlock(bEnd)
+ return s.variable(n, types.Types[types.TFLOAT64])
+ },
+ sys.ARM)
+
+ makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if buildcfg.GOAMD64 >= 2 {
+ return s.newValue1(op, types.Types[types.TFLOAT64], args[0])
+ }
+
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasSSE41)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(v)
+ bTrue := s.f.NewBlock(ssa.BlockPlain)
+ bFalse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bTrue)
+ b.AddEdgeTo(bFalse)
+ b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays
+
+ // We have the intrinsic - use it directly.
+ s.startBlock(bTrue)
+ s.vars[n] = s.newValue1(op, types.Types[types.TFLOAT64], args[0])
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Call the pure Go version.
+ s.startBlock(bFalse)
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Merge results.
+ s.startBlock(bEnd)
+ return s.variable(n, types.Types[types.TFLOAT64])
+ }
+ }
+ addF("math", "RoundToEven",
+ makeRoundAMD64(ssa.OpRoundToEven),
+ sys.AMD64)
+ addF("math", "Floor",
+ makeRoundAMD64(ssa.OpFloor),
+ sys.AMD64)
+ addF("math", "Ceil",
+ makeRoundAMD64(ssa.OpCeil),
+ sys.AMD64)
+ addF("math", "Trunc",
+ makeRoundAMD64(ssa.OpTrunc),
+ sys.AMD64)
+
+ /******** math/bits ********/
+ addF("math/bits", "TrailingZeros64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64, sys.I386, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ addF("math/bits", "TrailingZeros32",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64, sys.I386, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ addF("math/bits", "TrailingZeros16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
+ c := s.constInt32(types.Types[types.TUINT32], 1<<16)
+ y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
+ },
+ sys.MIPS)
+ addF("math/bits", "TrailingZeros16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
+ addF("math/bits", "TrailingZeros16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
+ c := s.constInt64(types.Types[types.TUINT64], 1<<16)
+ y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
+ },
+ sys.S390X, sys.PPC64)
+ addF("math/bits", "TrailingZeros8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
+ c := s.constInt32(types.Types[types.TUINT32], 1<<8)
+ y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
+ },
+ sys.MIPS)
+ addF("math/bits", "TrailingZeros8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
+ addF("math/bits", "TrailingZeros8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
+ c := s.constInt64(types.Types[types.TUINT64], 1<<8)
+ y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
+ },
+ sys.S390X)
+ alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
+ alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
+ // ReverseBytes inlines correctly, no need to intrinsify it.
+ // Nothing special is needed for targets where ReverseBytes16 lowers to a rotate
+ // On Power10, 16-bit rotate is not available so use BRH instruction
+ if buildcfg.GOPPC64 >= 10 {
+ addF("math/bits", "ReverseBytes16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBswap16, types.Types[types.TUINT], args[0])
+ },
+ sys.PPC64)
+ }
+
+ addF("math/bits", "Len64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ addF("math/bits", "Len32",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.PPC64)
+ addF("math/bits", "Len32",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
+ }
+ x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
+ },
+ sys.ARM, sys.S390X, sys.MIPS, sys.Wasm)
+ addF("math/bits", "Len16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
+ }
+ x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
+ },
+ sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ addF("math/bits", "Len16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64)
+ addF("math/bits", "Len8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
+ }
+ x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
+ },
+ sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ addF("math/bits", "Len8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64)
+ addF("math/bits", "Len",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
+ }
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ // LeadingZeros is handled because it trivially calls Len.
+ addF("math/bits", "Reverse64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
+ },
+ sys.ARM64)
+ addF("math/bits", "Reverse32",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
+ },
+ sys.ARM64)
+ addF("math/bits", "Reverse16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0])
+ },
+ sys.ARM64)
+ addF("math/bits", "Reverse8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0])
+ },
+ sys.ARM64)
+ addF("math/bits", "Reverse",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
+ },
+ sys.ARM64)
+ addF("math/bits", "RotateLeft8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1])
+ },
+ sys.AMD64)
+ addF("math/bits", "RotateLeft16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1])
+ },
+ sys.AMD64)
+ addF("math/bits", "RotateLeft32",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1])
+ },
+ sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm, sys.Loong64)
+ addF("math/bits", "RotateLeft64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1])
+ },
+ sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm, sys.Loong64)
+ alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
+
+ makeOnesCountAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if buildcfg.GOAMD64 >= 2 {
+ return s.newValue1(op, types.Types[types.TINT], args[0])
+ }
+
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasPOPCNT)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(v)
+ bTrue := s.f.NewBlock(ssa.BlockPlain)
+ bFalse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bTrue)
+ b.AddEdgeTo(bFalse)
+ b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
+
+ // We have the intrinsic - use it directly.
+ s.startBlock(bTrue)
+ s.vars[n] = s.newValue1(op, types.Types[types.TINT], args[0])
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Call the pure Go version.
+ s.startBlock(bFalse)
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TINT]
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Merge results.
+ s.startBlock(bEnd)
+ return s.variable(n, types.Types[types.TINT])
+ }
+ }
+ addF("math/bits", "OnesCount64",
+ makeOnesCountAMD64(ssa.OpPopCount64),
+ sys.AMD64)
+ addF("math/bits", "OnesCount64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0])
+ },
+ sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
+ addF("math/bits", "OnesCount32",
+ makeOnesCountAMD64(ssa.OpPopCount32),
+ sys.AMD64)
+ addF("math/bits", "OnesCount32",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0])
+ },
+ sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
+ addF("math/bits", "OnesCount16",
+ makeOnesCountAMD64(ssa.OpPopCount16),
+ sys.AMD64)
+ addF("math/bits", "OnesCount16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0])
+ },
+ sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
+ addF("math/bits", "OnesCount8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0])
+ },
+ sys.S390X, sys.PPC64, sys.Wasm)
+ addF("math/bits", "OnesCount",
+ makeOnesCountAMD64(ssa.OpPopCount64),
+ sys.AMD64)
+ addF("math/bits", "Mul64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
+ },
+ sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64, sys.RISCV64, sys.Loong64)
+ alias("math/bits", "Mul", "math/bits", "Mul64", p8...)
+ alias("runtime/internal/math", "Mul64", "math/bits", "Mul64", p8...)
+ addF("math/bits", "Add64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
+ },
+ sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.RISCV64, sys.Loong64, sys.MIPS64)
+ alias("math/bits", "Add", "math/bits", "Add64", p8...)
+ alias("runtime/internal/math", "Add64", "math/bits", "Add64", all...)
+ addF("math/bits", "Sub64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
+ },
+ sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.RISCV64, sys.Loong64, sys.MIPS64)
+ alias("math/bits", "Sub", "math/bits", "Sub64", p8...)
+ addF("math/bits", "Div64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ // check for divide-by-zero/overflow and panic with appropriate message
+ cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64]))
+ s.check(cmpZero, ir.Syms.Panicdivide)
+ cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2])
+ s.check(cmpOverflow, ir.Syms.Panicoverflow)
+ return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
+ },
+ sys.AMD64)
+ alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64)
+
+ alias("runtime/internal/sys", "TrailingZeros8", "math/bits", "TrailingZeros8", all...)
+ alias("runtime/internal/sys", "TrailingZeros32", "math/bits", "TrailingZeros32", all...)
+ alias("runtime/internal/sys", "TrailingZeros64", "math/bits", "TrailingZeros64", all...)
+ alias("runtime/internal/sys", "Len8", "math/bits", "Len8", all...)
+ alias("runtime/internal/sys", "Len64", "math/bits", "Len64", all...)
+ alias("runtime/internal/sys", "OnesCount64", "math/bits", "OnesCount64", all...)
+
+ /******** sync/atomic ********/
+
+ // Note: these are disabled by flag_race in findIntrinsic below.
+ alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
+ alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
+ alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
+ alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
+ alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
+ alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
+ alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
+
+ alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
+ alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
+ // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap.
+ alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
+ alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
+ alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
+ alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
+
+ alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
+ alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
+ alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
+ alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
+ alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
+ alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
+
+ alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
+ alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
+ alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
+ alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
+ alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
+ alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
+
+ alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
+ alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
+ alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
+ alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
+ alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
+ alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
+
+ /******** math/big ********/
+ alias("math/big", "mulWW", "math/bits", "Mul64", p8...)
+}
+
+// findIntrinsic returns a function which builds the SSA equivalent of the
+// function identified by the symbol sym. If sym is not an intrinsic call, returns nil.
+func findIntrinsic(sym *types.Sym) intrinsicBuilder {
+ if sym == nil || sym.Pkg == nil {
+ return nil
+ }
+ pkg := sym.Pkg.Path
+ if sym.Pkg == ir.Pkgs.Runtime {
+ pkg = "runtime"
+ }
+ if base.Flag.Race && pkg == "sync/atomic" {
+ // The race detector needs to be able to intercept these calls.
+ // We can't intrinsify them.
+ return nil
+ }
+ // Skip intrinsifying math functions (which may contain hard-float
+ // instructions) when soft-float
+ if Arch.SoftFloat && pkg == "math" {
+ return nil
+ }
+
+ fn := sym.Name
+ if ssa.IntrinsicsDisable {
+ if pkg == "runtime" && (fn == "getcallerpc" || fn == "getcallersp" || fn == "getclosureptr") {
+ // These runtime functions don't have definitions, must be intrinsics.
+ } else {
+ return nil
+ }
+ }
+ return intrinsics[intrinsicKey{Arch.LinkArch.Arch, pkg, fn}]
+}
+
+func IsIntrinsicCall(n *ir.CallExpr) bool {
+ if n == nil {
+ return false
+ }
+ name, ok := n.Fun.(*ir.Name)
+ if !ok {
+ return false
+ }
+ return findIntrinsic(name.Sym()) != nil
+}
+
+// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
+func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
+ v := findIntrinsic(n.Fun.Sym())(s, n, s.intrinsicArgs(n))
+ if ssa.IntrinsicsDebug > 0 {
+ x := v
+ if x == nil {
+ x = s.mem()
+ }
+ if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
+ x = x.Args[0]
+ }
+ base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.Fun.Sym().Name, x.LongString())
+ }
+ return v
+}
+
+// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
+func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
+ args := make([]*ssa.Value, len(n.Args))
+ for i, n := range n.Args {
+ args[i] = s.expr(n)
+ }
+ return args
+}
+
+// openDeferRecord adds code to evaluate and store the function for an open-code defer
+// call, and records info about the defer, so we can generate proper code on the
+// exit paths. n is the sub-node of the defer node that is the actual function
+// call. We will also record funcdata information on where the function is stored
+// (as well as the deferBits variable), and this will enable us to run the proper
+// defer calls during panics.
+func (s *state) openDeferRecord(n *ir.CallExpr) {
+ if len(n.Args) != 0 || n.Op() != ir.OCALLFUNC || n.Fun.Type().NumResults() != 0 {
+ s.Fatalf("defer call with arguments or results: %v", n)
+ }
+
+ opendefer := &openDeferInfo{
+ n: n,
+ }
+ fn := n.Fun
+ // We must always store the function value in a stack slot for the
+ // runtime panic code to use. But in the defer exit code, we will
+ // call the function directly if it is a static function.
+ closureVal := s.expr(fn)
+ closure := s.openDeferSave(fn.Type(), closureVal)
+ opendefer.closureNode = closure.Aux.(*ir.Name)
+ if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
+ opendefer.closure = closure
+ }
+ index := len(s.openDefers)
+ s.openDefers = append(s.openDefers, opendefer)
+
+ // Update deferBits only after evaluation and storage to stack of
+ // the function is successful.
+ bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
+ newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
+ s.vars[deferBitsVar] = newDeferBits
+ s.store(types.Types[types.TUINT8], s.deferBitsAddr, newDeferBits)
+}
+
+// openDeferSave generates SSA nodes to store a value (with type t) for an
+// open-coded defer at an explicit autotmp location on the stack, so it can be
+// reloaded and used for the appropriate call on exit. Type t must be a function type
+// (therefore SSAable). val is the value to be stored. The function returns an SSA
+// value representing a pointer to the autotmp location.
+func (s *state) openDeferSave(t *types.Type, val *ssa.Value) *ssa.Value {
+ if !ssa.CanSSA(t) {
+ s.Fatalf("openDeferSave of non-SSA-able type %v val=%v", t, val)
+ }
+ if !t.HasPointers() {
+ s.Fatalf("openDeferSave of pointerless type %v val=%v", t, val)
+ }
+ pos := val.Pos
+ temp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
+ temp.SetOpenDeferSlot(true)
+ temp.SetFrameOffset(int64(len(s.openDefers))) // so cmpstackvarlt can order them
+ var addrTemp *ssa.Value
+ // Use OpVarLive to make sure stack slot for the closure is not removed by
+ // dead-store elimination
+ if s.curBlock.ID != s.f.Entry.ID {
+ // Force the tmp storing this defer function to be declared in the entry
+ // block, so that it will be live for the defer exit code (which will
+ // actually access it only if the associated defer call has been activated).
+ if t.HasPointers() {
+ s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
+ }
+ s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
+ addrTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.defvars[s.f.Entry.ID][memVar])
+ } else {
+ // Special case if we're still in the entry block. We can't use
+ // the above code, since s.defvars[s.f.Entry.ID] isn't defined
+ // until we end the entry block with s.endBlock().
+ if t.HasPointers() {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, temp, s.mem(), false)
+ }
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, temp, s.mem(), false)
+ addrTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.mem(), false)
+ }
+ // Since we may use this temp during exit depending on the
+ // deferBits, we must define it unconditionally on entry.
+ // Therefore, we must make sure it is zeroed out in the entry
+ // block if it contains pointers, else GC may wrongly follow an
+ // uninitialized pointer value.
+ temp.SetNeedzero(true)
+ // We are storing to the stack, hence we can avoid the full checks in
+ // storeType() (no write barrier) and do a simple store().
+ s.store(t, addrTemp, val)
+ return addrTemp
+}
+
+// openDeferExit generates SSA for processing all the open coded defers at exit.
+// The code involves loading deferBits, and checking each of the bits to see if
+// the corresponding defer statement was executed. For each bit that is turned
+// on, the associated defer call is made.
+func (s *state) openDeferExit() {
+ deferExit := s.f.NewBlock(ssa.BlockPlain)
+ s.endBlock().AddEdgeTo(deferExit)
+ s.startBlock(deferExit)
+ s.lastDeferExit = deferExit
+ s.lastDeferCount = len(s.openDefers)
+ zeroval := s.constInt8(types.Types[types.TUINT8], 0)
+ // Test for and run defers in reverse order
+ for i := len(s.openDefers) - 1; i >= 0; i-- {
+ r := s.openDefers[i]
+ bCond := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+ deferBits := s.variable(deferBitsVar, types.Types[types.TUINT8])
+ // Generate code to check if the bit associated with the current
+ // defer is set.
+ bitval := s.constInt8(types.Types[types.TUINT8], 1<<uint(i))
+ andval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, bitval)
+ eqVal := s.newValue2(ssa.OpEq8, types.Types[types.TBOOL], andval, zeroval)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(eqVal)
+ b.AddEdgeTo(bEnd)
+ b.AddEdgeTo(bCond)
+ bCond.AddEdgeTo(bEnd)
+ s.startBlock(bCond)
+
+ // Clear this bit in deferBits and force store back to stack, so
+ // we will not try to re-run this defer call if this defer call panics.
+ nbitval := s.newValue1(ssa.OpCom8, types.Types[types.TUINT8], bitval)
+ maskedval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, nbitval)
+ s.store(types.Types[types.TUINT8], s.deferBitsAddr, maskedval)
+ // Use this value for following tests, so we keep previous
+ // bits cleared.
+ s.vars[deferBitsVar] = maskedval
+
+ // Generate code to call the function call of the defer, using the
+ // closure that were stored in argtmps at the point of the defer
+ // statement.
+ fn := r.n.Fun
+ stksize := fn.Type().ArgWidth()
+ var callArgs []*ssa.Value
+ var call *ssa.Value
+ if r.closure != nil {
+ v := s.load(r.closure.Type.Elem(), r.closure)
+ s.maybeNilCheckClosure(v, callDefer)
+ codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
+ aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, nil))
+ call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
+ } else {
+ aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, nil))
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ }
+ callArgs = append(callArgs, s.mem())
+ call.AddArgs(callArgs...)
+ call.AuxInt = stksize
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, 0, call)
+ // Make sure that the stack slots with pointers are kept live
+ // through the call (which is a pre-emption point). Also, we will
+ // use the first call of the last defer exit to compute liveness
+ // for the deferreturn, so we want all stack slots to be live.
+ if r.closureNode != nil {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
+ }
+
+ s.endBlock()
+ s.startBlock(bEnd)
+ }
+}
+
+func (s *state) callResult(n *ir.CallExpr, k callKind) *ssa.Value {
+ return s.call(n, k, false, nil)
+}
+
+func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value {
+ return s.call(n, k, true, nil)
+}
+
+// Calls the function n using the specified call type.
+// Returns the address of the return value (or nil if none).
+func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool, deferExtra ir.Expr) *ssa.Value {
+ s.prevCall = nil
+ var calleeLSym *obj.LSym // target function (if static)
+ var closure *ssa.Value // ptr to closure to run (if dynamic)
+ var codeptr *ssa.Value // ptr to target code (if dynamic)
+ var dextra *ssa.Value // defer extra arg
+ var rcvr *ssa.Value // receiver to set
+ fn := n.Fun
+ var ACArgs []*types.Type // AuxCall args
+ var ACResults []*types.Type // AuxCall results
+ var callArgs []*ssa.Value // For late-expansion, the args themselves (not stored, args to the call instead).
+
+ callABI := s.f.ABIDefault
+
+ if k != callNormal && k != callTail && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.Fun.Type().NumResults() != 0) {
+ s.Fatalf("go/defer call with arguments: %v", n)
+ }
+
+ switch n.Op() {
+ case ir.OCALLFUNC:
+ if (k == callNormal || k == callTail) && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
+ fn := fn.(*ir.Name)
+ calleeLSym = callTargetLSym(fn)
+ if buildcfg.Experiment.RegabiArgs {
+ // This is a static call, so it may be
+ // a direct call to a non-ABIInternal
+ // function. fn.Func may be nil for
+ // some compiler-generated functions,
+ // but those are all ABIInternal.
+ if fn.Func != nil {
+ callABI = abiForFunc(fn.Func, s.f.ABI0, s.f.ABI1)
+ }
+ } else {
+ // TODO(register args) remove after register abi is working
+ inRegistersImported := fn.Pragma()&ir.RegisterParams != 0
+ inRegistersSamePackage := fn.Func != nil && fn.Func.Pragma&ir.RegisterParams != 0
+ if inRegistersImported || inRegistersSamePackage {
+ callABI = s.f.ABI1
+ }
+ }
+ break
+ }
+ closure = s.expr(fn)
+ if k != callDefer && k != callDeferStack {
+ // Deferred nil function needs to panic when the function is invoked,
+ // not the point of defer statement.
+ s.maybeNilCheckClosure(closure, k)
+ }
+ case ir.OCALLINTER:
+ if fn.Op() != ir.ODOTINTER {
+ s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
+ }
+ fn := fn.(*ir.SelectorExpr)
+ var iclosure *ssa.Value
+ iclosure, rcvr = s.getClosureAndRcvr(fn)
+ if k == callNormal {
+ codeptr = s.load(types.Types[types.TUINTPTR], iclosure)
+ } else {
+ closure = iclosure
+ }
+ }
+ if deferExtra != nil {
+ dextra = s.expr(deferExtra)
+ }
+
+ params := callABI.ABIAnalyze(n.Fun.Type(), false /* Do not set (register) nNames from caller side -- can cause races. */)
+ types.CalcSize(fn.Type())
+ stksize := params.ArgWidth() // includes receiver, args, and results
+
+ res := n.Fun.Type().Results()
+ if k == callNormal || k == callTail {
+ for _, p := range params.OutParams() {
+ ACResults = append(ACResults, p.Type)
+ }
+ }
+
+ var call *ssa.Value
+ if k == callDeferStack {
+ if stksize != 0 {
+ s.Fatalf("deferprocStack with non-zero stack size %d: %v", stksize, n)
+ }
+ // Make a defer struct on the stack.
+ t := deferstruct()
+ _, addr := s.temp(n.Pos(), t)
+ s.store(closure.Type,
+ s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(deferStructFnField), addr),
+ closure)
+
+ // Call runtime.deferprocStack with pointer to _defer record.
+ ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
+ aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, s.f.ABIDefault.ABIAnalyzeTypes(ACArgs, ACResults))
+ callArgs = append(callArgs, addr, s.mem())
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ call.AuxInt = int64(types.PtrSize) // deferprocStack takes a *_defer arg
+ } else {
+ // Store arguments to stack, including defer/go arguments and receiver for method calls.
+ // These are written in SP-offset order.
+ argStart := base.Ctxt.Arch.FixedFrameSize
+ // Defer/go args.
+ if k != callNormal && k != callTail {
+ // Write closure (arg to newproc/deferproc).
+ ACArgs = append(ACArgs, types.Types[types.TUINTPTR]) // not argExtra
+ callArgs = append(callArgs, closure)
+ stksize += int64(types.PtrSize)
+ argStart += int64(types.PtrSize)
+ if dextra != nil {
+ // Extra token of type any for deferproc
+ ACArgs = append(ACArgs, types.Types[types.TINTER])
+ callArgs = append(callArgs, dextra)
+ stksize += 2 * int64(types.PtrSize)
+ argStart += 2 * int64(types.PtrSize)
+ }
+ }
+
+ // Set receiver (for interface calls).
+ if rcvr != nil {
+ callArgs = append(callArgs, rcvr)
+ }
+
+ // Write args.
+ t := n.Fun.Type()
+ args := n.Args
+
+ for _, p := range params.InParams() { // includes receiver for interface calls
+ ACArgs = append(ACArgs, p.Type)
+ }
+
+ // Split the entry block if there are open defers, because later calls to
+ // openDeferSave may cause a mismatch between the mem for an OpDereference
+ // and the call site which uses it. See #49282.
+ if s.curBlock.ID == s.f.Entry.ID && s.hasOpenDefers {
+ b := s.endBlock()
+ b.Kind = ssa.BlockPlain
+ curb := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(curb)
+ s.startBlock(curb)
+ }
+
+ for i, n := range args {
+ callArgs = append(callArgs, s.putArg(n, t.Param(i).Type))
+ }
+
+ callArgs = append(callArgs, s.mem())
+
+ // call target
+ switch {
+ case k == callDefer:
+ sym := ir.Syms.Deferproc
+ if dextra != nil {
+ sym = ir.Syms.Deferprocat
+ }
+ aux := ssa.StaticAuxCall(sym, s.f.ABIDefault.ABIAnalyzeTypes(ACArgs, ACResults)) // TODO paramResultInfo for Deferproc(at)
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ case k == callGo:
+ aux := ssa.StaticAuxCall(ir.Syms.Newproc, s.f.ABIDefault.ABIAnalyzeTypes(ACArgs, ACResults))
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) // TODO paramResultInfo for Newproc
+ case closure != nil:
+ // rawLoad because loading the code pointer from a
+ // closure is always safe, but IsSanitizerSafeAddr
+ // can't always figure that out currently, and it's
+ // critical that we not clobber any arguments already
+ // stored onto the stack.
+ codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure)
+ aux := ssa.ClosureAuxCall(callABI.ABIAnalyzeTypes(ACArgs, ACResults))
+ call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
+ case codeptr != nil:
+ // Note that the "receiver" parameter is nil because the actual receiver is the first input parameter.
+ aux := ssa.InterfaceAuxCall(params)
+ call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
+ case calleeLSym != nil:
+ aux := ssa.StaticAuxCall(calleeLSym, params)
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ if k == callTail {
+ call.Op = ssa.OpTailLECall
+ stksize = 0 // Tail call does not use stack. We reuse caller's frame.
+ }
+ default:
+ s.Fatalf("bad call type %v %v", n.Op(), n)
+ }
+ call.AddArgs(callArgs...)
+ call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
+ }
+ s.prevCall = call
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ // Insert VarLive opcodes.
+ for _, v := range n.KeepAlive {
+ if !v.Addrtaken() {
+ s.Fatalf("KeepAlive variable %v must have Addrtaken set", v)
+ }
+ switch v.Class {
+ case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
+ default:
+ s.Fatalf("KeepAlive variable %v must be Auto or Arg", v)
+ }
+ s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, v, s.mem())
+ }
+
+ // Finish block for defers
+ if k == callDefer || k == callDeferStack {
+ b := s.endBlock()
+ b.Kind = ssa.BlockDefer
+ b.SetControl(call)
+ bNext := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bNext)
+ // Add recover edge to exit code.
+ r := s.f.NewBlock(ssa.BlockPlain)
+ s.startBlock(r)
+ s.exit()
+ b.AddEdgeTo(r)
+ b.Likely = ssa.BranchLikely
+ s.startBlock(bNext)
+ }
+
+ if len(res) == 0 || k != callNormal {
+ // call has no return value. Continue with the next statement.
+ return nil
+ }
+ fp := res[0]
+ if returnResultAddr {
+ return s.resultAddrOfCall(call, 0, fp.Type)
+ }
+ return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
+}
+
+// maybeNilCheckClosure checks if a nil check of a closure is needed in some
+// architecture-dependent situations and, if so, emits the nil check.
+func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
+ if Arch.LinkArch.Family == sys.Wasm || buildcfg.GOOS == "aix" && k != callGo {
+ // On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error.
+ // TODO(neelance): On other architectures this should be eliminated by the optimization steps
+ s.nilCheck(closure)
+ }
+}
+
+// getClosureAndRcvr returns values for the appropriate closure and receiver of an
+// interface call
+func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) {
+ i := s.expr(fn.X)
+ itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
+ s.nilCheck(itab)
+ itabidx := fn.Offset() + 2*int64(types.PtrSize) + 8 // offset of fun field in runtime.itab
+ closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
+ rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
+ return closure, rcvr
+}
+
+// etypesign returns the signed-ness of e, for integer/pointer etypes.
+// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
+func etypesign(e types.Kind) int8 {
+ switch e {
+ case types.TINT8, types.TINT16, types.TINT32, types.TINT64, types.TINT:
+ return -1
+ case types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINT, types.TUINTPTR, types.TUNSAFEPTR:
+ return +1
+ }
+ return 0
+}
+
+// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
+// The value that the returned Value represents is guaranteed to be non-nil.
+func (s *state) addr(n ir.Node) *ssa.Value {
+ if n.Op() != ir.ONAME {
+ s.pushLine(n.Pos())
+ defer s.popLine()
+ }
+
+ if s.canSSA(n) {
+ s.Fatalf("addr of canSSA expression: %+v", n)
+ }
+
+ t := types.NewPtr(n.Type())
+ linksymOffset := func(lsym *obj.LSym, offset int64) *ssa.Value {
+ v := s.entryNewValue1A(ssa.OpAddr, t, lsym, s.sb)
+ // TODO: Make OpAddr use AuxInt as well as Aux.
+ if offset != 0 {
+ v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v)
+ }
+ return v
+ }
+ switch n.Op() {
+ case ir.OLINKSYMOFFSET:
+ no := n.(*ir.LinksymOffsetExpr)
+ return linksymOffset(no.Linksym, no.Offset_)
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Heapaddr != nil {
+ return s.expr(n.Heapaddr)
+ }
+ switch n.Class {
+ case ir.PEXTERN:
+ // global variable
+ return linksymOffset(n.Linksym(), 0)
+ case ir.PPARAM:
+ // parameter slot
+ v := s.decladdrs[n]
+ if v != nil {
+ return v
+ }
+ s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
+ return nil
+ case ir.PAUTO:
+ return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !ir.IsAutoTmp(n))
+
+ case ir.PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
+ // ensure that we reuse symbols for out parameters so
+ // that cse works on their addresses
+ return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
+ default:
+ s.Fatalf("variable address class %v not implemented", n.Class)
+ return nil
+ }
+ case ir.ORESULT:
+ // load return from callee
+ n := n.(*ir.ResultExpr)
+ return s.resultAddrOfCall(s.prevCall, n.Index, n.Type())
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ if n.X.Type().IsSlice() {
+ a := s.expr(n.X)
+ i := s.expr(n.Index)
+ len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a)
+ i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
+ p := s.newValue1(ssa.OpSlicePtr, t, a)
+ return s.newValue2(ssa.OpPtrIndex, t, p, i)
+ } else { // array
+ a := s.addr(n.X)
+ i := s.expr(n.Index)
+ len := s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
+ i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
+ return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.X.Type().Elem()), a, i)
+ }
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ return s.exprPtr(n.X, n.Bounded(), n.Pos())
+ case ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ p := s.addr(n.X)
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ p := s.exprPtr(n.X, n.Bounded(), n.Pos())
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
+ case ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if n.Type() == n.X.Type() {
+ return s.addr(n.X)
+ }
+ addr := s.addr(n.X)
+ return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
+ case ir.OCALLFUNC, ir.OCALLINTER:
+ n := n.(*ir.CallExpr)
+ return s.callAddr(n, callNormal)
+ case ir.ODOTTYPE, ir.ODYNAMICDOTTYPE:
+ var v *ssa.Value
+ if n.Op() == ir.ODOTTYPE {
+ v, _ = s.dottype(n.(*ir.TypeAssertExpr), false)
+ } else {
+ v, _ = s.dynamicDottype(n.(*ir.DynamicTypeAssertExpr), false)
+ }
+ if v.Op != ssa.OpLoad {
+ s.Fatalf("dottype of non-load")
+ }
+ if v.Args[1] != s.mem() {
+ s.Fatalf("memory no longer live from dottype load")
+ }
+ return v.Args[0]
+ default:
+ s.Fatalf("unhandled addr %v", n.Op())
+ return nil
+ }
+}
+
+// canSSA reports whether n is SSA-able.
+// n must be an ONAME (or an ODOT sequence with an ONAME base).
+func (s *state) canSSA(n ir.Node) bool {
+ if base.Flag.N != 0 {
+ return false
+ }
+ for {
+ nn := n
+ if nn.Op() == ir.ODOT {
+ nn := nn.(*ir.SelectorExpr)
+ n = nn.X
+ continue
+ }
+ if nn.Op() == ir.OINDEX {
+ nn := nn.(*ir.IndexExpr)
+ if nn.X.Type().IsArray() {
+ n = nn.X
+ continue
+ }
+ }
+ break
+ }
+ if n.Op() != ir.ONAME {
+ return false
+ }
+ return s.canSSAName(n.(*ir.Name)) && ssa.CanSSA(n.Type())
+}
+
+func (s *state) canSSAName(name *ir.Name) bool {
+ if name.Addrtaken() || !name.OnStack() {
+ return false
+ }
+ switch name.Class {
+ case ir.PPARAMOUT:
+ if s.hasdefer {
+ // TODO: handle this case? Named return values must be
+ // in memory so that the deferred function can see them.
+ // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
+ // Or maybe not, see issue 18860. Even unnamed return values
+ // must be written back so if a defer recovers, the caller can see them.
+ return false
+ }
+ if s.cgoUnsafeArgs {
+ // Cgo effectively takes the address of all result args,
+ // but the compiler can't see that.
+ return false
+ }
+ }
+ return true
+ // TODO: try to make more variables SSAable?
+}
+
+// exprPtr evaluates n to a pointer and nil-checks it.
+func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
+ p := s.expr(n)
+ if bounded || n.NonNil() {
+ if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
+ s.f.Warnl(lineno, "removed nil check")
+ }
+ return p
+ }
+ p = s.nilCheck(p)
+ return p
+}
+
+// nilCheck generates nil pointer checking code.
+// Used only for automatically inserted nil checks,
+// not for user code like 'x != nil'.
+// Returns a "definitely not nil" copy of x to ensure proper ordering
+// of the uses of the post-nilcheck pointer.
+func (s *state) nilCheck(ptr *ssa.Value) *ssa.Value {
+ if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() {
+ return ptr
+ }
+ return s.newValue2(ssa.OpNilCheck, ptr.Type, ptr, s.mem())
+}
+
+// boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not.
+// Starts a new block on return.
+// On input, len must be converted to full int width and be nonnegative.
+// Returns idx converted to full int width.
+// If bounded is true then caller guarantees the index is not out of bounds
+// (but boundsCheck will still extend the index to full int width).
+func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
+ idx = s.extendIndex(idx, len, kind, bounded)
+
+ if bounded || base.Flag.B != 0 {
+ // If bounded or bounds checking is flag-disabled, then no check necessary,
+ // just return the extended index.
+ //
+ // Here, bounded == true if the compiler generated the index itself,
+ // such as in the expansion of a slice initializer. These indexes are
+ // compiler-generated, not Go program variables, so they cannot be
+ // attacker-controlled, so we can omit Spectre masking as well.
+ //
+ // Note that we do not want to omit Spectre masking in code like:
+ //
+ // if 0 <= i && i < len(x) {
+ // use(x[i])
+ // }
+ //
+ // Lucky for us, bounded==false for that code.
+ // In that case (handled below), we emit a bound check (and Spectre mask)
+ // and then the prove pass will remove the bounds check.
+ // In theory the prove pass could potentially remove certain
+ // Spectre masks, but it's very delicate and probably better
+ // to be conservative and leave them all in.
+ return idx
+ }
+
+ bNext := s.f.NewBlock(ssa.BlockPlain)
+ bPanic := s.f.NewBlock(ssa.BlockExit)
+
+ if !idx.Type.IsSigned() {
+ switch kind {
+ case ssa.BoundsIndex:
+ kind = ssa.BoundsIndexU
+ case ssa.BoundsSliceAlen:
+ kind = ssa.BoundsSliceAlenU
+ case ssa.BoundsSliceAcap:
+ kind = ssa.BoundsSliceAcapU
+ case ssa.BoundsSliceB:
+ kind = ssa.BoundsSliceBU
+ case ssa.BoundsSlice3Alen:
+ kind = ssa.BoundsSlice3AlenU
+ case ssa.BoundsSlice3Acap:
+ kind = ssa.BoundsSlice3AcapU
+ case ssa.BoundsSlice3B:
+ kind = ssa.BoundsSlice3BU
+ case ssa.BoundsSlice3C:
+ kind = ssa.BoundsSlice3CU
+ }
+ }
+
+ var cmp *ssa.Value
+ if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU {
+ cmp = s.newValue2(ssa.OpIsInBounds, types.Types[types.TBOOL], idx, len)
+ } else {
+ cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[types.TBOOL], idx, len)
+ }
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+ b.AddEdgeTo(bNext)
+ b.AddEdgeTo(bPanic)
+
+ s.startBlock(bPanic)
+ if Arch.LinkArch.Family == sys.Wasm {
+ // TODO(khr): figure out how to do "register" based calling convention for bounds checks.
+ // Should be similar to gcWriteBarrier, but I can't make it work.
+ s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len)
+ } else {
+ mem := s.newValue3I(ssa.OpPanicBounds, types.TypeMem, int64(kind), idx, len, s.mem())
+ s.endBlock().SetControl(mem)
+ }
+ s.startBlock(bNext)
+
+ // In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses.
+ if base.Flag.Cfg.SpectreIndex {
+ op := ssa.OpSpectreIndex
+ if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU {
+ op = ssa.OpSpectreSliceIndex
+ }
+ idx = s.newValue2(op, types.Types[types.TINT], idx, len)
+ }
+
+ return idx
+}
+
+// If cmp (a bool) is false, panic using the given function.
+func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+ bNext := s.f.NewBlock(ssa.BlockPlain)
+ line := s.peekPos()
+ pos := base.Ctxt.PosTable.Pos(line)
+ fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
+ bPanic := s.panics[fl]
+ if bPanic == nil {
+ bPanic = s.f.NewBlock(ssa.BlockPlain)
+ s.panics[fl] = bPanic
+ s.startBlock(bPanic)
+ // The panic call takes/returns memory to ensure that the right
+ // memory state is observed if the panic happens.
+ s.rtcall(fn, false, nil)
+ }
+ b.AddEdgeTo(bNext)
+ b.AddEdgeTo(bPanic)
+ s.startBlock(bNext)
+}
+
+func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value {
+ needcheck := true
+ switch b.Op {
+ case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
+ if b.AuxInt != 0 {
+ needcheck = false
+ }
+ }
+ if needcheck {
+ // do a size-appropriate check for zero
+ cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type()))
+ s.check(cmp, ir.Syms.Panicdivide)
+ }
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+}
+
+// rtcall issues a call to the given runtime function fn with the listed args.
+// Returns a slice of results of the given result types.
+// The call is added to the end of the current block.
+// If returns is false, the block is marked as an exit block.
+func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
+ s.prevCall = nil
+ // Write args to the stack
+ off := base.Ctxt.Arch.FixedFrameSize
+ var callArgs []*ssa.Value
+ var callArgTypes []*types.Type
+
+ for _, arg := range args {
+ t := arg.Type
+ off = types.RoundUp(off, t.Alignment())
+ size := t.Size()
+ callArgs = append(callArgs, arg)
+ callArgTypes = append(callArgTypes, t)
+ off += size
+ }
+ off = types.RoundUp(off, int64(types.RegSize))
+
+ // Issue call
+ var call *ssa.Value
+ aux := ssa.StaticAuxCall(fn, s.f.ABIDefault.ABIAnalyzeTypes(callArgTypes, results))
+ callArgs = append(callArgs, s.mem())
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(results)), call)
+
+ if !returns {
+ // Finish block
+ b := s.endBlock()
+ b.Kind = ssa.BlockExit
+ b.SetControl(call)
+ call.AuxInt = off - base.Ctxt.Arch.FixedFrameSize
+ if len(results) > 0 {
+ s.Fatalf("panic call can't have results")
+ }
+ return nil
+ }
+
+ // Load results
+ res := make([]*ssa.Value, len(results))
+ for i, t := range results {
+ off = types.RoundUp(off, t.Alignment())
+ res[i] = s.resultOfCall(call, int64(i), t)
+ off += t.Size()
+ }
+ off = types.RoundUp(off, int64(types.PtrSize))
+
+ // Remember how much callee stack space we needed.
+ call.AuxInt = off
+
+ return res
+}
+
+// do *left = right for type t.
+func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) {
+ s.instrument(t, left, instrumentWrite)
+
+ if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
+ // Known to not have write barrier. Store the whole type.
+ s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
+ return
+ }
+
+ // store scalar fields first, so write barrier stores for
+ // pointer fields can be grouped together, and scalar values
+ // don't need to be live across the write barrier call.
+ // TODO: if the writebarrier pass knows how to reorder stores,
+ // we can do a single store here as long as skip==0.
+ s.storeTypeScalars(t, left, right, skip)
+ if skip&skipPtr == 0 && t.HasPointers() {
+ s.storeTypePtrs(t, left, right)
+ }
+}
+
+// do *left = right for all scalar (non-pointer) parts of t.
+func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
+ switch {
+ case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
+ s.store(t, left, right)
+ case t.IsPtrShaped():
+ if t.IsPtr() && t.Elem().NotInHeap() {
+ s.store(t, left, right) // see issue 42032
+ }
+ // otherwise, no scalar fields.
+ case t.IsString():
+ if skip&skipLen != 0 {
+ return
+ }
+ len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], right)
+ lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
+ s.store(types.Types[types.TINT], lenAddr, len)
+ case t.IsSlice():
+ if skip&skipLen == 0 {
+ len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], right)
+ lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
+ s.store(types.Types[types.TINT], lenAddr, len)
+ }
+ if skip&skipCap == 0 {
+ cap := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], right)
+ capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
+ s.store(types.Types[types.TINT], capAddr, cap)
+ }
+ case t.IsInterface():
+ // itab field doesn't need a write barrier (even though it is a pointer).
+ itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
+ s.store(types.Types[types.TUINTPTR], left, itab)
+ case t.IsStruct():
+ n := t.NumFields()
+ for i := 0; i < n; i++ {
+ ft := t.FieldType(i)
+ addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
+ val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
+ s.storeTypeScalars(ft, addr, val, 0)
+ }
+ case t.IsArray() && t.NumElem() == 0:
+ // nothing
+ case t.IsArray() && t.NumElem() == 1:
+ s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
+ default:
+ s.Fatalf("bad write barrier type %v", t)
+ }
+}
+
+// do *left = right for all pointer parts of t.
+func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
+ switch {
+ case t.IsPtrShaped():
+ if t.IsPtr() && t.Elem().NotInHeap() {
+ break // see issue 42032
+ }
+ s.store(t, left, right)
+ case t.IsString():
+ ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
+ s.store(s.f.Config.Types.BytePtr, left, ptr)
+ case t.IsSlice():
+ elType := types.NewPtr(t.Elem())
+ ptr := s.newValue1(ssa.OpSlicePtr, elType, right)
+ s.store(elType, left, ptr)
+ case t.IsInterface():
+ // itab field is treated as a scalar.
+ idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
+ idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
+ s.store(s.f.Config.Types.BytePtr, idataAddr, idata)
+ case t.IsStruct():
+ n := t.NumFields()
+ for i := 0; i < n; i++ {
+ ft := t.FieldType(i)
+ if !ft.HasPointers() {
+ continue
+ }
+ addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
+ val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
+ s.storeTypePtrs(ft, addr, val)
+ }
+ case t.IsArray() && t.NumElem() == 0:
+ // nothing
+ case t.IsArray() && t.NumElem() == 1:
+ s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
+ default:
+ s.Fatalf("bad write barrier type %v", t)
+ }
+}
+
+// putArg evaluates n for the purpose of passing it as an argument to a function and returns the value for the call.
+func (s *state) putArg(n ir.Node, t *types.Type) *ssa.Value {
+ var a *ssa.Value
+ if !ssa.CanSSA(t) {
+ a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
+ } else {
+ a = s.expr(n)
+ }
+ return a
+}
+
+func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
+ pt := types.NewPtr(t)
+ var addr *ssa.Value
+ if base == s.sp {
+ // Use special routine that avoids allocation on duplicate offsets.
+ addr = s.constOffPtrSP(pt, off)
+ } else {
+ addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
+ }
+
+ if !ssa.CanSSA(t) {
+ a := s.addr(n)
+ s.move(t, addr, a)
+ return
+ }
+
+ a := s.expr(n)
+ s.storeType(t, addr, a, 0, false)
+}
+
+// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
+// i,j,k may be nil, in which case they are set to their default value.
+// v may be a slice, string or pointer to an array.
+func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) {
+ t := v.Type
+ var ptr, len, cap *ssa.Value
+ switch {
+ case t.IsSlice():
+ ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v)
+ len = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
+ cap = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], v)
+ case t.IsString():
+ ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[types.TUINT8]), v)
+ len = s.newValue1(ssa.OpStringLen, types.Types[types.TINT], v)
+ cap = len
+ case t.IsPtr():
+ if !t.Elem().IsArray() {
+ s.Fatalf("bad ptr to array in slice %v\n", t)
+ }
+ nv := s.nilCheck(v)
+ ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), nv)
+ len = s.constInt(types.Types[types.TINT], t.Elem().NumElem())
+ cap = len
+ default:
+ s.Fatalf("bad type in slice %v\n", t)
+ }
+
+ // Set default values
+ if i == nil {
+ i = s.constInt(types.Types[types.TINT], 0)
+ }
+ if j == nil {
+ j = len
+ }
+ three := true
+ if k == nil {
+ three = false
+ k = cap
+ }
+
+ // Panic if slice indices are not in bounds.
+ // Make sure we check these in reverse order so that we're always
+ // comparing against a value known to be nonnegative. See issue 28797.
+ if three {
+ if k != cap {
+ kind := ssa.BoundsSlice3Alen
+ if t.IsSlice() {
+ kind = ssa.BoundsSlice3Acap
+ }
+ k = s.boundsCheck(k, cap, kind, bounded)
+ }
+ if j != k {
+ j = s.boundsCheck(j, k, ssa.BoundsSlice3B, bounded)
+ }
+ i = s.boundsCheck(i, j, ssa.BoundsSlice3C, bounded)
+ } else {
+ if j != k {
+ kind := ssa.BoundsSliceAlen
+ if t.IsSlice() {
+ kind = ssa.BoundsSliceAcap
+ }
+ j = s.boundsCheck(j, k, kind, bounded)
+ }
+ i = s.boundsCheck(i, j, ssa.BoundsSliceB, bounded)
+ }
+
+ // Word-sized integer operations.
+ subOp := s.ssaOp(ir.OSUB, types.Types[types.TINT])
+ mulOp := s.ssaOp(ir.OMUL, types.Types[types.TINT])
+ andOp := s.ssaOp(ir.OAND, types.Types[types.TINT])
+
+ // Calculate the length (rlen) and capacity (rcap) of the new slice.
+ // For strings the capacity of the result is unimportant. However,
+ // we use rcap to test if we've generated a zero-length slice.
+ // Use length of strings for that.
+ rlen := s.newValue2(subOp, types.Types[types.TINT], j, i)
+ rcap := rlen
+ if j != k && !t.IsString() {
+ rcap = s.newValue2(subOp, types.Types[types.TINT], k, i)
+ }
+
+ if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
+ // No pointer arithmetic necessary.
+ return ptr, rlen, rcap
+ }
+
+ // Calculate the base pointer (rptr) for the new slice.
+ //
+ // Generate the following code assuming that indexes are in bounds.
+ // The masking is to make sure that we don't generate a slice
+ // that points to the next object in memory. We cannot just set
+ // the pointer to nil because then we would create a nil slice or
+ // string.
+ //
+ // rcap = k - i
+ // rlen = j - i
+ // rptr = ptr + (mask(rcap) & (i * stride))
+ //
+ // Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width
+ // of the element type.
+ stride := s.constInt(types.Types[types.TINT], ptr.Type.Elem().Size())
+
+ // The delta is the number of bytes to offset ptr by.
+ delta := s.newValue2(mulOp, types.Types[types.TINT], i, stride)
+
+ // If we're slicing to the point where the capacity is zero,
+ // zero out the delta.
+ mask := s.newValue1(ssa.OpSlicemask, types.Types[types.TINT], rcap)
+ delta = s.newValue2(andOp, types.Types[types.TINT], delta, mask)
+
+ // Compute rptr = ptr + delta.
+ rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta)
+
+ return rptr, rlen, rcap
+}
+
+type u642fcvtTab struct {
+ leq, cvt2F, and, rsh, or, add ssa.Op
+ one func(*state, *types.Type, int64) *ssa.Value
+}
+
+var u64_f64 = u642fcvtTab{
+ leq: ssa.OpLeq64,
+ cvt2F: ssa.OpCvt64to64F,
+ and: ssa.OpAnd64,
+ rsh: ssa.OpRsh64Ux64,
+ or: ssa.OpOr64,
+ add: ssa.OpAdd64F,
+ one: (*state).constInt64,
+}
+
+var u64_f32 = u642fcvtTab{
+ leq: ssa.OpLeq64,
+ cvt2F: ssa.OpCvt64to32F,
+ and: ssa.OpAnd64,
+ rsh: ssa.OpRsh64Ux64,
+ or: ssa.OpOr64,
+ add: ssa.OpAdd32F,
+ one: (*state).constInt64,
+}
+
+func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
+}
+
+func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
+}
+
+func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ // if x >= 0 {
+ // result = (floatY) x
+ // } else {
+ // y = uintX(x) ; y = x & 1
+ // z = uintX(x) ; z = z >> 1
+ // z = z | y
+ // result = floatY(z)
+ // result = result + result
+ // }
+ //
+ // Code borrowed from old code generator.
+ // What's going on: large 64-bit "unsigned" looks like
+ // negative number to hardware's integer-to-float
+ // conversion. However, because the mantissa is only
+ // 63 bits, we don't need the LSB, so instead we do an
+ // unsigned right shift (divide by two), convert, and
+ // double. However, before we do that, we need to be
+ // sure that we do not lose a "1" if that made the
+ // difference in the resulting rounding. Therefore, we
+ // preserve it, and OR (not ADD) it back in. The case
+ // that matters is when the eleven discarded bits are
+ // equal to 10000000001; that rounds up, and the 1 cannot
+ // be lost else it would round down if the LSB of the
+ // candidate mantissa is 0.
+ cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+
+ bThen := s.f.NewBlock(ssa.BlockPlain)
+ bElse := s.f.NewBlock(ssa.BlockPlain)
+ bAfter := s.f.NewBlock(ssa.BlockPlain)
+
+ b.AddEdgeTo(bThen)
+ s.startBlock(bThen)
+ a0 := s.newValue1(cvttab.cvt2F, tt, x)
+ s.vars[n] = a0
+ s.endBlock()
+ bThen.AddEdgeTo(bAfter)
+
+ b.AddEdgeTo(bElse)
+ s.startBlock(bElse)
+ one := cvttab.one(s, ft, 1)
+ y := s.newValue2(cvttab.and, ft, x, one)
+ z := s.newValue2(cvttab.rsh, ft, x, one)
+ z = s.newValue2(cvttab.or, ft, z, y)
+ a := s.newValue1(cvttab.cvt2F, tt, z)
+ a1 := s.newValue2(cvttab.add, tt, a, a)
+ s.vars[n] = a1
+ s.endBlock()
+ bElse.AddEdgeTo(bAfter)
+
+ s.startBlock(bAfter)
+ return s.variable(n, n.Type())
+}
+
+type u322fcvtTab struct {
+ cvtI2F, cvtF2F ssa.Op
+}
+
+var u32_f64 = u322fcvtTab{
+ cvtI2F: ssa.OpCvt32to64F,
+ cvtF2F: ssa.OpCopy,
+}
+
+var u32_f32 = u322fcvtTab{
+ cvtI2F: ssa.OpCvt32to32F,
+ cvtF2F: ssa.OpCvt64Fto32F,
+}
+
+func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
+}
+
+func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
+}
+
+func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ // if x >= 0 {
+ // result = floatY(x)
+ // } else {
+ // result = floatY(float64(x) + (1<<32))
+ // }
+ cmp := s.newValue2(ssa.OpLeq32, types.Types[types.TBOOL], s.zeroVal(ft), x)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+
+ bThen := s.f.NewBlock(ssa.BlockPlain)
+ bElse := s.f.NewBlock(ssa.BlockPlain)
+ bAfter := s.f.NewBlock(ssa.BlockPlain)
+
+ b.AddEdgeTo(bThen)
+ s.startBlock(bThen)
+ a0 := s.newValue1(cvttab.cvtI2F, tt, x)
+ s.vars[n] = a0
+ s.endBlock()
+ bThen.AddEdgeTo(bAfter)
+
+ b.AddEdgeTo(bElse)
+ s.startBlock(bElse)
+ a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[types.TFLOAT64], x)
+ twoToThe32 := s.constFloat64(types.Types[types.TFLOAT64], float64(1<<32))
+ a2 := s.newValue2(ssa.OpAdd64F, types.Types[types.TFLOAT64], a1, twoToThe32)
+ a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
+
+ s.vars[n] = a3
+ s.endBlock()
+ bElse.AddEdgeTo(bAfter)
+
+ s.startBlock(bAfter)
+ return s.variable(n, n.Type())
+}
+
+// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
+func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
+ if !n.X.Type().IsMap() && !n.X.Type().IsChan() {
+ s.Fatalf("node must be a map or a channel")
+ }
+ // if n == nil {
+ // return 0
+ // } else {
+ // // len
+ // return *((*int)n)
+ // // cap
+ // return *(((*int)n)+1)
+ // }
+ lenType := n.Type()
+ nilValue := s.constNil(types.Types[types.TUINTPTR])
+ cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchUnlikely
+
+ bThen := s.f.NewBlock(ssa.BlockPlain)
+ bElse := s.f.NewBlock(ssa.BlockPlain)
+ bAfter := s.f.NewBlock(ssa.BlockPlain)
+
+ // length/capacity of a nil map/chan is zero
+ b.AddEdgeTo(bThen)
+ s.startBlock(bThen)
+ s.vars[n] = s.zeroVal(lenType)
+ s.endBlock()
+ bThen.AddEdgeTo(bAfter)
+
+ b.AddEdgeTo(bElse)
+ s.startBlock(bElse)
+ switch n.Op() {
+ case ir.OLEN:
+ // length is stored in the first word for map/chan
+ s.vars[n] = s.load(lenType, x)
+ case ir.OCAP:
+ // capacity is stored in the second word for chan
+ sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Size(), x)
+ s.vars[n] = s.load(lenType, sw)
+ default:
+ s.Fatalf("op must be OLEN or OCAP")
+ }
+ s.endBlock()
+ bElse.AddEdgeTo(bAfter)
+
+ s.startBlock(bAfter)
+ return s.variable(n, lenType)
+}
+
+type f2uCvtTab struct {
+ ltf, cvt2U, subf, or ssa.Op
+ floatValue func(*state, *types.Type, float64) *ssa.Value
+ intValue func(*state, *types.Type, int64) *ssa.Value
+ cutoff uint64
+}
+
+var f32_u64 = f2uCvtTab{
+ ltf: ssa.OpLess32F,
+ cvt2U: ssa.OpCvt32Fto64,
+ subf: ssa.OpSub32F,
+ or: ssa.OpOr64,
+ floatValue: (*state).constFloat32,
+ intValue: (*state).constInt64,
+ cutoff: 1 << 63,
+}
+
+var f64_u64 = f2uCvtTab{
+ ltf: ssa.OpLess64F,
+ cvt2U: ssa.OpCvt64Fto64,
+ subf: ssa.OpSub64F,
+ or: ssa.OpOr64,
+ floatValue: (*state).constFloat64,
+ intValue: (*state).constInt64,
+ cutoff: 1 << 63,
+}
+
+var f32_u32 = f2uCvtTab{
+ ltf: ssa.OpLess32F,
+ cvt2U: ssa.OpCvt32Fto32,
+ subf: ssa.OpSub32F,
+ or: ssa.OpOr32,
+ floatValue: (*state).constFloat32,
+ intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
+ cutoff: 1 << 31,
+}
+
+var f64_u32 = f2uCvtTab{
+ ltf: ssa.OpLess64F,
+ cvt2U: ssa.OpCvt64Fto32,
+ subf: ssa.OpSub64F,
+ or: ssa.OpOr32,
+ floatValue: (*state).constFloat64,
+ intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
+ cutoff: 1 << 31,
+}
+
+func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.floatToUint(&f32_u64, n, x, ft, tt)
+}
+func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.floatToUint(&f64_u64, n, x, ft, tt)
+}
+
+func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.floatToUint(&f32_u32, n, x, ft, tt)
+}
+
+func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.floatToUint(&f64_u32, n, x, ft, tt)
+}
+
+func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ // cutoff:=1<<(intY_Size-1)
+ // if x < floatX(cutoff) {
+ // result = uintY(x)
+ // } else {
+ // y = x - floatX(cutoff)
+ // z = uintY(y)
+ // result = z | -(cutoff)
+ // }
+ cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
+ cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+
+ bThen := s.f.NewBlock(ssa.BlockPlain)
+ bElse := s.f.NewBlock(ssa.BlockPlain)
+ bAfter := s.f.NewBlock(ssa.BlockPlain)
+
+ b.AddEdgeTo(bThen)
+ s.startBlock(bThen)
+ a0 := s.newValue1(cvttab.cvt2U, tt, x)
+ s.vars[n] = a0
+ s.endBlock()
+ bThen.AddEdgeTo(bAfter)
+
+ b.AddEdgeTo(bElse)
+ s.startBlock(bElse)
+ y := s.newValue2(cvttab.subf, ft, x, cutoff)
+ y = s.newValue1(cvttab.cvt2U, tt, y)
+ z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
+ a1 := s.newValue2(cvttab.or, tt, y, z)
+ s.vars[n] = a1
+ s.endBlock()
+ bElse.AddEdgeTo(bAfter)
+
+ s.startBlock(bAfter)
+ return s.variable(n, n.Type())
+}
+
+// dottype generates SSA for a type assertion node.
+// commaok indicates whether to panic or return a bool.
+// If commaok is false, resok will be nil.
+func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
+ iface := s.expr(n.X) // input interface
+ target := s.reflectType(n.Type()) // target type
+ var targetItab *ssa.Value
+ if n.ITab != nil {
+ targetItab = s.expr(n.ITab)
+ }
+ return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, nil, target, targetItab, commaok, n.Descriptor)
+}
+
+func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
+ iface := s.expr(n.X)
+ var source, target, targetItab *ssa.Value
+ if n.SrcRType != nil {
+ source = s.expr(n.SrcRType)
+ }
+ if !n.X.Type().IsEmptyInterface() && !n.Type().IsInterface() {
+ byteptr := s.f.Config.Types.BytePtr
+ targetItab = s.expr(n.ITab)
+ // TODO(mdempsky): Investigate whether compiling n.RType could be
+ // better than loading itab.typ.
+ target = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), targetItab)) // itab.typ
+ } else {
+ target = s.expr(n.RType)
+ }
+ return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, source, target, targetItab, commaok, nil)
+}
+
+// dottype1 implements a x.(T) operation. iface is the argument (x), dst is the type we're asserting to (T)
+// and src is the type we're asserting from.
+// source is the *runtime._type of src
+// target is the *runtime._type of dst.
+// If src is a nonempty interface and dst is not an interface, targetItab is an itab representing (dst, src). Otherwise it is nil.
+// commaok is true if the caller wants a boolean success value. Otherwise, the generated code panics if the conversion fails.
+// descriptor is a compiler-allocated internal/abi.TypeAssert whose address is passed to runtime.typeAssert when
+// the target type is a compile-time-known non-empty interface. It may be nil.
+func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, target, targetItab *ssa.Value, commaok bool, descriptor *obj.LSym) (res, resok *ssa.Value) {
+ typs := s.f.Config.Types
+ byteptr := typs.BytePtr
+ if dst.IsInterface() {
+ if dst.IsEmptyInterface() {
+ // Converting to an empty interface.
+ // Input could be an empty or nonempty interface.
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(pos, "type assertion inlined")
+ }
+
+ // Get itab/type field from input.
+ itab := s.newValue1(ssa.OpITab, byteptr, iface)
+ // Conversion succeeds iff that field is not nil.
+ cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
+
+ if src.IsEmptyInterface() && commaok {
+ // Converting empty interface to empty interface with ,ok is just a nil check.
+ return iface, cond
+ }
+
+ // Branch on nilness.
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cond)
+ b.Likely = ssa.BranchLikely
+ bOk := s.f.NewBlock(ssa.BlockPlain)
+ bFail := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bOk)
+ b.AddEdgeTo(bFail)
+
+ if !commaok {
+ // On failure, panic by calling panicnildottype.
+ s.startBlock(bFail)
+ s.rtcall(ir.Syms.Panicnildottype, false, nil, target)
+
+ // On success, return (perhaps modified) input interface.
+ s.startBlock(bOk)
+ if src.IsEmptyInterface() {
+ res = iface // Use input interface unchanged.
+ return
+ }
+ // Load type out of itab, build interface with existing idata.
+ off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
+ typ := s.load(byteptr, off)
+ idata := s.newValue1(ssa.OpIData, byteptr, iface)
+ res = s.newValue2(ssa.OpIMake, dst, typ, idata)
+ return
+ }
+
+ s.startBlock(bOk)
+ // nonempty -> empty
+ // Need to load type from itab
+ off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
+ s.vars[typVar] = s.load(byteptr, off)
+ s.endBlock()
+
+ // itab is nil, might as well use that as the nil result.
+ s.startBlock(bFail)
+ s.vars[typVar] = itab
+ s.endBlock()
+
+ // Merge point.
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ bOk.AddEdgeTo(bEnd)
+ bFail.AddEdgeTo(bEnd)
+ s.startBlock(bEnd)
+ idata := s.newValue1(ssa.OpIData, byteptr, iface)
+ res = s.newValue2(ssa.OpIMake, dst, s.variable(typVar, byteptr), idata)
+ resok = cond
+ delete(s.vars, typVar) // no practical effect, just to indicate typVar is no longer live.
+ return
+ }
+ // converting to a nonempty interface needs a runtime call.
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(pos, "type assertion not inlined")
+ }
+
+ itab := s.newValue1(ssa.OpITab, byteptr, iface)
+ data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface)
+
+ // First, check for nil.
+ bNil := s.f.NewBlock(ssa.BlockPlain)
+ bNonNil := s.f.NewBlock(ssa.BlockPlain)
+ bMerge := s.f.NewBlock(ssa.BlockPlain)
+ cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cond)
+ b.Likely = ssa.BranchLikely
+ b.AddEdgeTo(bNonNil)
+ b.AddEdgeTo(bNil)
+
+ s.startBlock(bNil)
+ if commaok {
+ s.vars[typVar] = itab // which will be nil
+ b := s.endBlock()
+ b.AddEdgeTo(bMerge)
+ } else {
+ // Panic if input is nil.
+ s.rtcall(ir.Syms.Panicnildottype, false, nil, target)
+ }
+
+ // Get typ, possibly by loading out of itab.
+ s.startBlock(bNonNil)
+ typ := itab
+ if !src.IsEmptyInterface() {
+ typ = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab))
+ }
+
+ // Check the cache first.
+ var d *ssa.Value
+ if descriptor != nil {
+ d = s.newValue1A(ssa.OpAddr, byteptr, descriptor, s.sb)
+ if base.Flag.N == 0 && rtabi.UseInterfaceSwitchCache(Arch.LinkArch.Name) {
+ // Note: we can only use the cache if we have the right atomic load instruction.
+ // Double-check that here.
+ if _, ok := intrinsics[intrinsicKey{Arch.LinkArch.Arch, "runtime/internal/atomic", "Loadp"}]; !ok {
+ s.Fatalf("atomic load not available")
+ }
+ // Pick right size ops.
+ var mul, and, add, zext ssa.Op
+ if s.config.PtrSize == 4 {
+ mul = ssa.OpMul32
+ and = ssa.OpAnd32
+ add = ssa.OpAdd32
+ zext = ssa.OpCopy
+ } else {
+ mul = ssa.OpMul64
+ and = ssa.OpAnd64
+ add = ssa.OpAdd64
+ zext = ssa.OpZeroExt32to64
+ }
+
+ loopHead := s.f.NewBlock(ssa.BlockPlain)
+ loopBody := s.f.NewBlock(ssa.BlockPlain)
+ cacheHit := s.f.NewBlock(ssa.BlockPlain)
+ cacheMiss := s.f.NewBlock(ssa.BlockPlain)
+
+ // Load cache pointer out of descriptor, with an atomic load so
+ // we ensure that we see a fully written cache.
+ atomicLoad := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(typs.BytePtr, types.TypeMem), d, s.mem())
+ cache := s.newValue1(ssa.OpSelect0, typs.BytePtr, atomicLoad)
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, atomicLoad)
+
+ // Load hash from type or itab.
+ var hash *ssa.Value
+ if src.IsEmptyInterface() {
+ hash = s.newValue2(ssa.OpLoad, typs.UInt32, s.newValue1I(ssa.OpOffPtr, typs.UInt32Ptr, 2*s.config.PtrSize, typ), s.mem())
+ } else {
+ hash = s.newValue2(ssa.OpLoad, typs.UInt32, s.newValue1I(ssa.OpOffPtr, typs.UInt32Ptr, 2*s.config.PtrSize, itab), s.mem())
+ }
+ hash = s.newValue1(zext, typs.Uintptr, hash)
+ s.vars[hashVar] = hash
+ // Load mask from cache.
+ mask := s.newValue2(ssa.OpLoad, typs.Uintptr, cache, s.mem())
+ // Jump to loop head.
+ b := s.endBlock()
+ b.AddEdgeTo(loopHead)
+
+ // At loop head, get pointer to the cache entry.
+ // e := &cache.Entries[hash&mask]
+ s.startBlock(loopHead)
+ idx := s.newValue2(and, typs.Uintptr, s.variable(hashVar, typs.Uintptr), mask)
+ idx = s.newValue2(mul, typs.Uintptr, idx, s.uintptrConstant(uint64(2*s.config.PtrSize)))
+ idx = s.newValue2(add, typs.Uintptr, idx, s.uintptrConstant(uint64(s.config.PtrSize)))
+ e := s.newValue2(ssa.OpAddPtr, typs.UintptrPtr, cache, idx)
+ // hash++
+ s.vars[hashVar] = s.newValue2(add, typs.Uintptr, s.variable(hashVar, typs.Uintptr), s.uintptrConstant(1))
+
+ // Look for a cache hit.
+ // if e.Typ == typ { goto hit }
+ eTyp := s.newValue2(ssa.OpLoad, typs.Uintptr, e, s.mem())
+ cmp1 := s.newValue2(ssa.OpEqPtr, typs.Bool, typ, eTyp)
+ b = s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp1)
+ b.AddEdgeTo(cacheHit)
+ b.AddEdgeTo(loopBody)
+
+ // Look for an empty entry, the tombstone for this hash table.
+ // if e.Typ == nil { goto miss }
+ s.startBlock(loopBody)
+ cmp2 := s.newValue2(ssa.OpEqPtr, typs.Bool, eTyp, s.constNil(typs.BytePtr))
+ b = s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp2)
+ b.AddEdgeTo(cacheMiss)
+ b.AddEdgeTo(loopHead)
+
+ // On a hit, load the data fields of the cache entry.
+ // Itab = e.Itab
+ s.startBlock(cacheHit)
+ eItab := s.newValue2(ssa.OpLoad, typs.BytePtr, s.newValue1I(ssa.OpOffPtr, typs.BytePtrPtr, s.config.PtrSize, e), s.mem())
+ s.vars[typVar] = eItab
+ b = s.endBlock()
+ b.AddEdgeTo(bMerge)
+
+ // On a miss, call into the runtime to get the answer.
+ s.startBlock(cacheMiss)
+ }
+ }
+
+ // Call into runtime to get itab for result.
+ if descriptor != nil {
+ itab = s.rtcall(ir.Syms.TypeAssert, true, []*types.Type{byteptr}, d, typ)[0]
+ } else {
+ var fn *obj.LSym
+ if commaok {
+ fn = ir.Syms.AssertE2I2
+ } else {
+ fn = ir.Syms.AssertE2I
+ }
+ itab = s.rtcall(fn, true, []*types.Type{byteptr}, target, typ)[0]
+ }
+ s.vars[typVar] = itab
+ b = s.endBlock()
+ b.AddEdgeTo(bMerge)
+
+ // Build resulting interface.
+ s.startBlock(bMerge)
+ itab = s.variable(typVar, byteptr)
+ var ok *ssa.Value
+ if commaok {
+ ok = s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
+ }
+ return s.newValue2(ssa.OpIMake, dst, itab, data), ok
+ }
+
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(pos, "type assertion inlined")
+ }
+
+ // Converting to a concrete type.
+ direct := types.IsDirectIface(dst)
+ itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(pos, "type assertion inlined")
+ }
+ var wantedFirstWord *ssa.Value
+ if src.IsEmptyInterface() {
+ // Looking for pointer to target type.
+ wantedFirstWord = target
+ } else {
+ // Looking for pointer to itab for target type and source interface.
+ wantedFirstWord = targetItab
+ }
+
+ var tmp ir.Node // temporary for use with large types
+ var addr *ssa.Value // address of tmp
+ if commaok && !ssa.CanSSA(dst) {
+ // unSSAable type, use temporary.
+ // TODO: get rid of some of these temporaries.
+ tmp, addr = s.temp(pos, dst)
+ }
+
+ cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, wantedFirstWord)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cond)
+ b.Likely = ssa.BranchLikely
+
+ bOk := s.f.NewBlock(ssa.BlockPlain)
+ bFail := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bOk)
+ b.AddEdgeTo(bFail)
+
+ if !commaok {
+ // on failure, panic by calling panicdottype
+ s.startBlock(bFail)
+ taddr := source
+ if taddr == nil {
+ taddr = s.reflectType(src)
+ }
+ if src.IsEmptyInterface() {
+ s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr)
+ } else {
+ s.rtcall(ir.Syms.PanicdottypeI, false, nil, itab, target, taddr)
+ }
+
+ // on success, return data from interface
+ s.startBlock(bOk)
+ if direct {
+ return s.newValue1(ssa.OpIData, dst, iface), nil
+ }
+ p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
+ return s.load(dst, p), nil
+ }
+
+ // commaok is the more complicated case because we have
+ // a control flow merge point.
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ // Note that we need a new valVar each time (unlike okVar where we can
+ // reuse the variable) because it might have a different type every time.
+ valVar := ssaMarker("val")
+
+ // type assertion succeeded
+ s.startBlock(bOk)
+ if tmp == nil {
+ if direct {
+ s.vars[valVar] = s.newValue1(ssa.OpIData, dst, iface)
+ } else {
+ p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
+ s.vars[valVar] = s.load(dst, p)
+ }
+ } else {
+ p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
+ s.move(dst, addr, p)
+ }
+ s.vars[okVar] = s.constBool(true)
+ s.endBlock()
+ bOk.AddEdgeTo(bEnd)
+
+ // type assertion failed
+ s.startBlock(bFail)
+ if tmp == nil {
+ s.vars[valVar] = s.zeroVal(dst)
+ } else {
+ s.zero(dst, addr)
+ }
+ s.vars[okVar] = s.constBool(false)
+ s.endBlock()
+ bFail.AddEdgeTo(bEnd)
+
+ // merge point
+ s.startBlock(bEnd)
+ if tmp == nil {
+ res = s.variable(valVar, dst)
+ delete(s.vars, valVar) // no practical effect, just to indicate typVar is no longer live.
+ } else {
+ res = s.load(dst, addr)
+ }
+ resok = s.variable(okVar, types.Types[types.TBOOL])
+ delete(s.vars, okVar) // ditto
+ return res, resok
+}
+
+// temp allocates a temp of type t at position pos
+func (s *state) temp(pos src.XPos, t *types.Type) (*ir.Name, *ssa.Value) {
+ tmp := typecheck.TempAt(pos, s.curfn, t)
+ if t.HasPointers() {
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
+ }
+ addr := s.addr(tmp)
+ return tmp, addr
+}
+
+// variable returns the value of a variable at the current location.
+func (s *state) variable(n ir.Node, t *types.Type) *ssa.Value {
+ v := s.vars[n]
+ if v != nil {
+ return v
+ }
+ v = s.fwdVars[n]
+ if v != nil {
+ return v
+ }
+
+ if s.curBlock == s.f.Entry {
+ // No variable should be live at entry.
+ s.f.Fatalf("value %v (%v) incorrectly live at entry", n, v)
+ }
+ // Make a FwdRef, which records a value that's live on block input.
+ // We'll find the matching definition as part of insertPhis.
+ v = s.newValue0A(ssa.OpFwdRef, t, fwdRefAux{N: n})
+ s.fwdVars[n] = v
+ if n.Op() == ir.ONAME {
+ s.addNamedValue(n.(*ir.Name), v)
+ }
+ return v
+}
+
+func (s *state) mem() *ssa.Value {
+ return s.variable(memVar, types.TypeMem)
+}
+
+func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) {
+ if n.Class == ir.Pxxx {
+ // Don't track our marker nodes (memVar etc.).
+ return
+ }
+ if ir.IsAutoTmp(n) {
+ // Don't track temporary variables.
+ return
+ }
+ if n.Class == ir.PPARAMOUT {
+ // Don't track named output values. This prevents return values
+ // from being assigned too early. See #14591 and #14762. TODO: allow this.
+ return
+ }
+ loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0}
+ values, ok := s.f.NamedValues[loc]
+ if !ok {
+ s.f.Names = append(s.f.Names, &loc)
+ s.f.CanonicalLocalSlots[loc] = &loc
+ }
+ s.f.NamedValues[loc] = append(values, v)
+}
+
+// Branch is an unresolved branch.
+type Branch struct {
+ P *obj.Prog // branch instruction
+ B *ssa.Block // target
+}
+
+// State contains state needed during Prog generation.
+type State struct {
+ ABI obj.ABI
+
+ pp *objw.Progs
+
+ // Branches remembers all the branch instructions we've seen
+ // and where they would like to go.
+ Branches []Branch
+
+ // JumpTables remembers all the jump tables we've seen.
+ JumpTables []*ssa.Block
+
+ // bstart remembers where each block starts (indexed by block ID)
+ bstart []*obj.Prog
+
+ maxarg int64 // largest frame size for arguments to calls made by the function
+
+ // Map from GC safe points to liveness index, generated by
+ // liveness analysis.
+ livenessMap liveness.Map
+
+ // partLiveArgs includes arguments that may be partially live, for which we
+ // need to generate instructions that spill the argument registers.
+ partLiveArgs map[*ir.Name]bool
+
+ // lineRunStart records the beginning of the current run of instructions
+ // within a single block sharing the same line number
+ // Used to move statement marks to the beginning of such runs.
+ lineRunStart *obj.Prog
+
+ // wasm: The number of values on the WebAssembly stack. This is only used as a safeguard.
+ OnWasmStackSkipped int
+}
+
+func (s *State) FuncInfo() *obj.FuncInfo {
+ return s.pp.CurFunc.LSym.Func()
+}
+
+// Prog appends a new Prog.
+func (s *State) Prog(as obj.As) *obj.Prog {
+ p := s.pp.Prog(as)
+ if objw.LosesStmtMark(as) {
+ return p
+ }
+ // Float a statement start to the beginning of any same-line run.
+ // lineRunStart is reset at block boundaries, which appears to work well.
+ if s.lineRunStart == nil || s.lineRunStart.Pos.Line() != p.Pos.Line() {
+ s.lineRunStart = p
+ } else if p.Pos.IsStmt() == src.PosIsStmt {
+ s.lineRunStart.Pos = s.lineRunStart.Pos.WithIsStmt()
+ p.Pos = p.Pos.WithNotStmt()
+ }
+ return p
+}
+
+// Pc returns the current Prog.
+func (s *State) Pc() *obj.Prog {
+ return s.pp.Next
+}
+
+// SetPos sets the current source position.
+func (s *State) SetPos(pos src.XPos) {
+ s.pp.Pos = pos
+}
+
+// Br emits a single branch instruction and returns the instruction.
+// Not all architectures need the returned instruction, but otherwise
+// the boilerplate is common to all.
+func (s *State) Br(op obj.As, target *ssa.Block) *obj.Prog {
+ p := s.Prog(op)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, Branch{P: p, B: target})
+ return p
+}
+
+// DebugFriendlySetPosFrom adjusts Pos.IsStmt subject to heuristics
+// that reduce "jumpy" line number churn when debugging.
+// Spill/fill/copy instructions from the register allocator,
+// phi functions, and instructions with a no-pos position
+// are examples of instructions that can cause churn.
+func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
+ // These are not statements
+ s.SetPos(v.Pos.WithNotStmt())
+ default:
+ p := v.Pos
+ if p != src.NoXPos {
+ // If the position is defined, update the position.
+ // Also convert default IsStmt to NotStmt; only
+ // explicit statement boundaries should appear
+ // in the generated code.
+ if p.IsStmt() != src.PosIsStmt {
+ if s.pp.Pos.IsStmt() == src.PosIsStmt && s.pp.Pos.SameFileAndLine(p) {
+ // If s.pp.Pos already has a statement mark, then it was set here (below) for
+ // the previous value. If an actual instruction had been emitted for that
+ // value, then the statement mark would have been reset. Since the statement
+ // mark of s.pp.Pos was not reset, this position (file/line) still needs a
+ // statement mark on an instruction. If file and line for this value are
+ // the same as the previous value, then the first instruction for this
+ // value will work to take the statement mark. Return early to avoid
+ // resetting the statement mark.
+ //
+ // The reset of s.pp.Pos occurs in (*Progs).Prog() -- if it emits
+ // an instruction, and the instruction's statement mark was set,
+ // and it is not one of the LosesStmtMark instructions,
+ // then Prog() resets the statement mark on the (*Progs).Pos.
+ return
+ }
+ p = p.WithNotStmt()
+ // Calls use the pos attached to v, but copy the statement mark from State
+ }
+ s.SetPos(p)
+ } else {
+ s.SetPos(s.pp.Pos.WithNotStmt())
+ }
+ }
+}
+
+// emit argument info (locations on stack) for traceback.
+func emitArgInfo(e *ssafn, f *ssa.Func, pp *objw.Progs) {
+ ft := e.curfn.Type()
+ if ft.NumRecvs() == 0 && ft.NumParams() == 0 {
+ return
+ }
+
+ x := EmitArgInfo(e.curfn, f.OwnAux.ABIInfo())
+ x.Set(obj.AttrContentAddressable, true)
+ e.curfn.LSym.Func().ArgInfo = x
+
+ // Emit a funcdata pointing at the arg info data.
+ p := pp.Prog(obj.AFUNCDATA)
+ p.From.SetConst(rtabi.FUNCDATA_ArgInfo)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = x
+}
+
+// emit argument info (locations on stack) of f for traceback.
+func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym {
+ x := base.Ctxt.Lookup(fmt.Sprintf("%s.arginfo%d", f.LSym.Name, f.ABI))
+ // NOTE: do not set ContentAddressable here. This may be referenced from
+ // assembly code by name (in this case f is a declaration).
+ // Instead, set it in emitArgInfo above.
+
+ PtrSize := int64(types.PtrSize)
+ uintptrTyp := types.Types[types.TUINTPTR]
+
+ isAggregate := func(t *types.Type) bool {
+ return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice()
+ }
+
+ // Populate the data.
+ // The data is a stream of bytes, which contains the offsets and sizes of the
+ // non-aggregate arguments or non-aggregate fields/elements of aggregate-typed
+ // arguments, along with special "operators". Specifically,
+ // - for each non-aggrgate arg/field/element, its offset from FP (1 byte) and
+ // size (1 byte)
+ // - special operators:
+ // - 0xff - end of sequence
+ // - 0xfe - print { (at the start of an aggregate-typed argument)
+ // - 0xfd - print } (at the end of an aggregate-typed argument)
+ // - 0xfc - print ... (more args/fields/elements)
+ // - 0xfb - print _ (offset too large)
+ // These constants need to be in sync with runtime.traceback.go:printArgs.
+ const (
+ _endSeq = 0xff
+ _startAgg = 0xfe
+ _endAgg = 0xfd
+ _dotdotdot = 0xfc
+ _offsetTooLarge = 0xfb
+ _special = 0xf0 // above this are operators, below this are ordinary offsets
+ )
+
+ const (
+ limit = 10 // print no more than 10 args/components
+ maxDepth = 5 // no more than 5 layers of nesting
+
+ // maxLen is a (conservative) upper bound of the byte stream length. For
+ // each arg/component, it has no more than 2 bytes of data (size, offset),
+ // and no more than one {, }, ... at each level (it cannot have both the
+ // data and ... unless it is the last one, just be conservative). Plus 1
+ // for _endSeq.
+ maxLen = (maxDepth*3+2)*limit + 1
+ )
+
+ wOff := 0
+ n := 0
+ writebyte := func(o uint8) { wOff = objw.Uint8(x, wOff, o) }
+
+ // Write one non-aggregate arg/field/element.
+ write1 := func(sz, offset int64) {
+ if offset >= _special {
+ writebyte(_offsetTooLarge)
+ } else {
+ writebyte(uint8(offset))
+ writebyte(uint8(sz))
+ }
+ n++
+ }
+
+ // Visit t recursively and write it out.
+ // Returns whether to continue visiting.
+ var visitType func(baseOffset int64, t *types.Type, depth int) bool
+ visitType = func(baseOffset int64, t *types.Type, depth int) bool {
+ if n >= limit {
+ writebyte(_dotdotdot)
+ return false
+ }
+ if !isAggregate(t) {
+ write1(t.Size(), baseOffset)
+ return true
+ }
+ writebyte(_startAgg)
+ depth++
+ if depth >= maxDepth {
+ writebyte(_dotdotdot)
+ writebyte(_endAgg)
+ n++
+ return true
+ }
+ switch {
+ case t.IsInterface(), t.IsString():
+ _ = visitType(baseOffset, uintptrTyp, depth) &&
+ visitType(baseOffset+PtrSize, uintptrTyp, depth)
+ case t.IsSlice():
+ _ = visitType(baseOffset, uintptrTyp, depth) &&
+ visitType(baseOffset+PtrSize, uintptrTyp, depth) &&
+ visitType(baseOffset+PtrSize*2, uintptrTyp, depth)
+ case t.IsComplex():
+ _ = visitType(baseOffset, types.FloatForComplex(t), depth) &&
+ visitType(baseOffset+t.Size()/2, types.FloatForComplex(t), depth)
+ case t.IsArray():
+ if t.NumElem() == 0 {
+ n++ // {} counts as a component
+ break
+ }
+ for i := int64(0); i < t.NumElem(); i++ {
+ if !visitType(baseOffset, t.Elem(), depth) {
+ break
+ }
+ baseOffset += t.Elem().Size()
+ }
+ case t.IsStruct():
+ if t.NumFields() == 0 {
+ n++ // {} counts as a component
+ break
+ }
+ for _, field := range t.Fields() {
+ if !visitType(baseOffset+field.Offset, field.Type, depth) {
+ break
+ }
+ }
+ }
+ writebyte(_endAgg)
+ return true
+ }
+
+ start := 0
+ if strings.Contains(f.LSym.Name, "[") {
+ // Skip the dictionary argument - it is implicit and the user doesn't need to see it.
+ start = 1
+ }
+
+ for _, a := range abiInfo.InParams()[start:] {
+ if !visitType(a.FrameOffset(abiInfo), a.Type, 0) {
+ break
+ }
+ }
+ writebyte(_endSeq)
+ if wOff > maxLen {
+ base.Fatalf("ArgInfo too large")
+ }
+
+ return x
+}
+
+// for wrapper, emit info of wrapped function.
+func emitWrappedFuncInfo(e *ssafn, pp *objw.Progs) {
+ if base.Ctxt.Flag_linkshared {
+ // Relative reference (SymPtrOff) to another shared object doesn't work.
+ // Unfortunate.
+ return
+ }
+
+ wfn := e.curfn.WrappedFunc
+ if wfn == nil {
+ return
+ }
+
+ wsym := wfn.Linksym()
+ x := base.Ctxt.LookupInit(fmt.Sprintf("%s.wrapinfo", wsym.Name), func(x *obj.LSym) {
+ objw.SymPtrOff(x, 0, wsym)
+ x.Set(obj.AttrContentAddressable, true)
+ })
+ e.curfn.LSym.Func().WrapInfo = x
+
+ // Emit a funcdata pointing at the wrap info data.
+ p := pp.Prog(obj.AFUNCDATA)
+ p.From.SetConst(rtabi.FUNCDATA_WrapInfo)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = x
+}
+
+// genssa appends entries to pp for each instruction in f.
+func genssa(f *ssa.Func, pp *objw.Progs) {
+ var s State
+ s.ABI = f.OwnAux.Fn.ABI()
+
+ e := f.Frontend().(*ssafn)
+
+ s.livenessMap, s.partLiveArgs = liveness.Compute(e.curfn, f, e.stkptrsize, pp)
+ emitArgInfo(e, f, pp)
+ argLiveBlockMap, argLiveValueMap := liveness.ArgLiveness(e.curfn, f, pp)
+
+ openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo
+ if openDeferInfo != nil {
+ // This function uses open-coded defers -- write out the funcdata
+ // info that we computed at the end of genssa.
+ p := pp.Prog(obj.AFUNCDATA)
+ p.From.SetConst(rtabi.FUNCDATA_OpenCodedDeferInfo)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = openDeferInfo
+ }
+
+ emitWrappedFuncInfo(e, pp)
+
+ // Remember where each block starts.
+ s.bstart = make([]*obj.Prog, f.NumBlocks())
+ s.pp = pp
+ var progToValue map[*obj.Prog]*ssa.Value
+ var progToBlock map[*obj.Prog]*ssa.Block
+ var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
+ gatherPrintInfo := f.PrintOrHtmlSSA || ssa.GenssaDump[f.Name]
+ if gatherPrintInfo {
+ progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
+ progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
+ f.Logf("genssa %s\n", f.Name)
+ progToBlock[s.pp.Next] = f.Blocks[0]
+ }
+
+ if base.Ctxt.Flag_locationlists {
+ if cap(f.Cache.ValueToProgAfter) < f.NumValues() {
+ f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues())
+ }
+ valueToProgAfter = f.Cache.ValueToProgAfter[:f.NumValues()]
+ for i := range valueToProgAfter {
+ valueToProgAfter[i] = nil
+ }
+ }
+
+ // If the very first instruction is not tagged as a statement,
+ // debuggers may attribute it to previous function in program.
+ firstPos := src.NoXPos
+ for _, v := range f.Entry.Values {
+ if v.Pos.IsStmt() == src.PosIsStmt && v.Op != ssa.OpArg && v.Op != ssa.OpArgIntReg && v.Op != ssa.OpArgFloatReg && v.Op != ssa.OpLoadReg && v.Op != ssa.OpStoreReg {
+ firstPos = v.Pos
+ v.Pos = firstPos.WithDefaultStmt()
+ break
+ }
+ }
+
+ // inlMarks has an entry for each Prog that implements an inline mark.
+ // It maps from that Prog to the global inlining id of the inlined body
+ // which should unwind to this Prog's location.
+ var inlMarks map[*obj.Prog]int32
+ var inlMarkList []*obj.Prog
+
+ // inlMarksByPos maps from a (column 1) source position to the set of
+ // Progs that are in the set above and have that source position.
+ var inlMarksByPos map[src.XPos][]*obj.Prog
+
+ var argLiveIdx int = -1 // argument liveness info index
+
+ // Emit basic blocks
+ for i, b := range f.Blocks {
+ s.bstart[b.ID] = s.pp.Next
+ s.lineRunStart = nil
+ s.SetPos(s.pp.Pos.WithNotStmt()) // It needs a non-empty Pos, but cannot be a statement boundary (yet).
+
+ if idx, ok := argLiveBlockMap[b.ID]; ok && idx != argLiveIdx {
+ argLiveIdx = idx
+ p := s.pp.Prog(obj.APCDATA)
+ p.From.SetConst(rtabi.PCDATA_ArgLiveIndex)
+ p.To.SetConst(int64(idx))
+ }
+
+ // Emit values in block
+ Arch.SSAMarkMoves(&s, b)
+ for _, v := range b.Values {
+ x := s.pp.Next
+ s.DebugFriendlySetPosFrom(v)
+
+ if v.Op.ResultInArg0() && v.ResultReg() != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+
+ switch v.Op {
+ case ssa.OpInitMem:
+ // memory arg needs no code
+ case ssa.OpArg:
+ // input args need no code
+ case ssa.OpSP, ssa.OpSB:
+ // nothing to do
+ case ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN, ssa.OpMakeResult:
+ // nothing to do
+ case ssa.OpGetG:
+ // nothing to do when there's a g register,
+ // and checkLower complains if there's not
+ case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpWBend:
+ // nothing to do; already used by liveness
+ case ssa.OpPhi:
+ CheckLoweredPhi(v)
+ case ssa.OpConvert:
+ // nothing to do; no-op conversion for liveness
+ if v.Args[0].Reg() != v.Reg() {
+ v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString())
+ }
+ case ssa.OpInlMark:
+ p := Arch.Ginsnop(s.pp)
+ if inlMarks == nil {
+ inlMarks = map[*obj.Prog]int32{}
+ inlMarksByPos = map[src.XPos][]*obj.Prog{}
+ }
+ inlMarks[p] = v.AuxInt32()
+ inlMarkList = append(inlMarkList, p)
+ pos := v.Pos.AtColumn1()
+ inlMarksByPos[pos] = append(inlMarksByPos[pos], p)
+ firstPos = src.NoXPos
+
+ default:
+ // Special case for first line in function; move it to the start (which cannot be a register-valued instruction)
+ if firstPos != src.NoXPos && v.Op != ssa.OpArgIntReg && v.Op != ssa.OpArgFloatReg && v.Op != ssa.OpLoadReg && v.Op != ssa.OpStoreReg {
+ s.SetPos(firstPos)
+ firstPos = src.NoXPos
+ }
+ // Attach this safe point to the next
+ // instruction.
+ s.pp.NextLive = s.livenessMap.Get(v)
+ s.pp.NextUnsafe = s.livenessMap.GetUnsafe(v)
+
+ // let the backend handle it
+ Arch.SSAGenValue(&s, v)
+ }
+
+ if idx, ok := argLiveValueMap[v.ID]; ok && idx != argLiveIdx {
+ argLiveIdx = idx
+ p := s.pp.Prog(obj.APCDATA)
+ p.From.SetConst(rtabi.PCDATA_ArgLiveIndex)
+ p.To.SetConst(int64(idx))
+ }
+
+ if base.Ctxt.Flag_locationlists {
+ valueToProgAfter[v.ID] = s.pp.Next
+ }
+
+ if gatherPrintInfo {
+ for ; x != s.pp.Next; x = x.Link {
+ progToValue[x] = v
+ }
+ }
+ }
+ // If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
+ if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
+ p := Arch.Ginsnop(s.pp)
+ p.Pos = p.Pos.WithIsStmt()
+ if b.Pos == src.NoXPos {
+ b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion. See #35652.
+ if b.Pos == src.NoXPos {
+ b.Pos = pp.Text.Pos // Sometimes p.Pos is empty. See #35695.
+ }
+ }
+ b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number
+ }
+
+ // Set unsafe mark for any end-of-block generated instructions
+ // (normally, conditional or unconditional branches).
+ // This is particularly important for empty blocks, as there
+ // are no values to inherit the unsafe mark from.
+ s.pp.NextUnsafe = s.livenessMap.GetUnsafeBlock(b)
+
+ // Emit control flow instructions for block
+ var next *ssa.Block
+ if i < len(f.Blocks)-1 && base.Flag.N == 0 {
+ // If -N, leave next==nil so every block with successors
+ // ends in a JMP (except call blocks - plive doesn't like
+ // select{send,recv} followed by a JMP call). Helps keep
+ // line numbers for otherwise empty blocks.
+ next = f.Blocks[i+1]
+ }
+ x := s.pp.Next
+ s.SetPos(b.Pos)
+ Arch.SSAGenBlock(&s, b, next)
+ if gatherPrintInfo {
+ for ; x != s.pp.Next; x = x.Link {
+ progToBlock[x] = b
+ }
+ }
+ }
+ if f.Blocks[len(f.Blocks)-1].Kind == ssa.BlockExit {
+ // We need the return address of a panic call to
+ // still be inside the function in question. So if
+ // it ends in a call which doesn't return, add a
+ // nop (which will never execute) after the call.
+ Arch.Ginsnop(pp)
+ }
+ if openDeferInfo != nil {
+ // When doing open-coded defers, generate a disconnected call to
+ // deferreturn and a return. This will be used to during panic
+ // recovery to unwind the stack and return back to the runtime.
+ s.pp.NextLive = s.livenessMap.DeferReturn
+ p := pp.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Deferreturn
+
+ // Load results into registers. So when a deferred function
+ // recovers a panic, it will return to caller with right results.
+ // The results are already in memory, because they are not SSA'd
+ // when the function has defers (see canSSAName).
+ for _, o := range f.OwnAux.ABIInfo().OutParams() {
+ n := o.Name
+ rts, offs := o.RegisterTypesAndOffsets()
+ for i := range o.Registers {
+ Arch.LoadRegResult(&s, f, rts[i], ssa.ObjRegForAbiReg(o.Registers[i], f.Config), n, offs[i])
+ }
+ }
+
+ pp.Prog(obj.ARET)
+ }
+
+ if inlMarks != nil {
+ hasCall := false
+
+ // We have some inline marks. Try to find other instructions we're
+ // going to emit anyway, and use those instructions instead of the
+ // inline marks.
+ for p := pp.Text; p != nil; p = p.Link {
+ if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || Arch.LinkArch.Family == sys.Wasm {
+ // Don't use 0-sized instructions as inline marks, because we need
+ // to identify inline mark instructions by pc offset.
+ // (Some of these instructions are sometimes zero-sized, sometimes not.
+ // We must not use anything that even might be zero-sized.)
+ // TODO: are there others?
+ continue
+ }
+ if _, ok := inlMarks[p]; ok {
+ // Don't use inline marks themselves. We don't know
+ // whether they will be zero-sized or not yet.
+ continue
+ }
+ if p.As == obj.ACALL || p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
+ hasCall = true
+ }
+ pos := p.Pos.AtColumn1()
+ s := inlMarksByPos[pos]
+ if len(s) == 0 {
+ continue
+ }
+ for _, m := range s {
+ // We found an instruction with the same source position as
+ // some of the inline marks.
+ // Use this instruction instead.
+ p.Pos = p.Pos.WithIsStmt() // promote position to a statement
+ pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m])
+ // Make the inline mark a real nop, so it doesn't generate any code.
+ m.As = obj.ANOP
+ m.Pos = src.NoXPos
+ m.From = obj.Addr{}
+ m.To = obj.Addr{}
+ }
+ delete(inlMarksByPos, pos)
+ }
+ // Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
+ for _, p := range inlMarkList {
+ if p.As != obj.ANOP {
+ pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p])
+ }
+ }
+
+ if e.stksize == 0 && !hasCall {
+ // Frameless leaf function. It doesn't need any preamble,
+ // so make sure its first instruction isn't from an inlined callee.
+ // If it is, add a nop at the start of the function with a position
+ // equal to the start of the function.
+ // This ensures that runtime.FuncForPC(uintptr(reflect.ValueOf(fn).Pointer())).Name()
+ // returns the right answer. See issue 58300.
+ for p := pp.Text; p != nil; p = p.Link {
+ if p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.ANOP {
+ continue
+ }
+ if base.Ctxt.PosTable.Pos(p.Pos).Base().InliningIndex() >= 0 {
+ // Make a real (not 0-sized) nop.
+ nop := Arch.Ginsnop(pp)
+ nop.Pos = e.curfn.Pos().WithIsStmt()
+
+ // Unfortunately, Ginsnop puts the instruction at the
+ // end of the list. Move it up to just before p.
+
+ // Unlink from the current list.
+ for x := pp.Text; x != nil; x = x.Link {
+ if x.Link == nop {
+ x.Link = nop.Link
+ break
+ }
+ }
+ // Splice in right before p.
+ for x := pp.Text; x != nil; x = x.Link {
+ if x.Link == p {
+ nop.Link = p
+ x.Link = nop
+ break
+ }
+ }
+ }
+ break
+ }
+ }
+ }
+
+ if base.Ctxt.Flag_locationlists {
+ var debugInfo *ssa.FuncDebug
+ debugInfo = e.curfn.DebugInfo.(*ssa.FuncDebug)
+ if e.curfn.ABI == obj.ABIInternal && base.Flag.N != 0 {
+ ssa.BuildFuncDebugNoOptimized(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset, debugInfo)
+ } else {
+ ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists, StackOffset, debugInfo)
+ }
+ bstart := s.bstart
+ idToIdx := make([]int, f.NumBlocks())
+ for i, b := range f.Blocks {
+ idToIdx[b.ID] = i
+ }
+ // Register a callback that will be used later to fill in PCs into location
+ // lists. At the moment, Prog.Pc is a sequence number; it's not a real PC
+ // until after assembly, so the translation needs to be deferred.
+ debugInfo.GetPC = func(b, v ssa.ID) int64 {
+ switch v {
+ case ssa.BlockStart.ID:
+ if b == f.Entry.ID {
+ return 0 // Start at the very beginning, at the assembler-generated prologue.
+ // this should only happen for function args (ssa.OpArg)
+ }
+ return bstart[b].Pc
+ case ssa.BlockEnd.ID:
+ blk := f.Blocks[idToIdx[b]]
+ nv := len(blk.Values)
+ return valueToProgAfter[blk.Values[nv-1].ID].Pc
+ case ssa.FuncEnd.ID:
+ return e.curfn.LSym.Size
+ default:
+ return valueToProgAfter[v].Pc
+ }
+ }
+ }
+
+ // Resolve branches, and relax DefaultStmt into NotStmt
+ for _, br := range s.Branches {
+ br.P.To.SetTarget(s.bstart[br.B.ID])
+ if br.P.Pos.IsStmt() != src.PosIsStmt {
+ br.P.Pos = br.P.Pos.WithNotStmt()
+ } else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt {
+ br.P.Pos = br.P.Pos.WithNotStmt()
+ }
+
+ }
+
+ // Resolve jump table destinations.
+ for _, jt := range s.JumpTables {
+ // Convert from *Block targets to *Prog targets.
+ targets := make([]*obj.Prog, len(jt.Succs))
+ for i, e := range jt.Succs {
+ targets[i] = s.bstart[e.Block().ID]
+ }
+ // Add to list of jump tables to be resolved at assembly time.
+ // The assembler converts from *Prog entries to absolute addresses
+ // once it knows instruction byte offsets.
+ fi := pp.CurFunc.LSym.Func()
+ fi.JumpTables = append(fi.JumpTables, obj.JumpTable{Sym: jt.Aux.(*obj.LSym), Targets: targets})
+ }
+
+ if e.log { // spew to stdout
+ filename := ""
+ for p := pp.Text; p != nil; p = p.Link {
+ if p.Pos.IsKnown() && p.InnermostFilename() != filename {
+ filename = p.InnermostFilename()
+ f.Logf("# %s\n", filename)
+ }
+
+ var s string
+ if v, ok := progToValue[p]; ok {
+ s = v.String()
+ } else if b, ok := progToBlock[p]; ok {
+ s = b.String()
+ } else {
+ s = " " // most value and branch strings are 2-3 characters long
+ }
+ f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString())
+ }
+ }
+ if f.HTMLWriter != nil { // spew to ssa.html
+ var buf strings.Builder
+ buf.WriteString("<code>")
+ buf.WriteString("<dl class=\"ssa-gen\">")
+ filename := ""
+ for p := pp.Text; p != nil; p = p.Link {
+ // Don't spam every line with the file name, which is often huge.
+ // Only print changes, and "unknown" is not a change.
+ if p.Pos.IsKnown() && p.InnermostFilename() != filename {
+ filename = p.InnermostFilename()
+ buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">")
+ buf.WriteString(html.EscapeString("# " + filename))
+ buf.WriteString("</dd>")
+ }
+
+ buf.WriteString("<dt class=\"ssa-prog-src\">")
+ if v, ok := progToValue[p]; ok {
+ buf.WriteString(v.HTML())
+ } else if b, ok := progToBlock[p]; ok {
+ buf.WriteString("<b>" + b.HTML() + "</b>")
+ }
+ buf.WriteString("</dt>")
+ buf.WriteString("<dd class=\"ssa-prog\">")
+ fmt.Fprintf(&buf, "%.5d <span class=\"l%v line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), p.InnermostLineNumberHTML(), html.EscapeString(p.InstructionString()))
+ buf.WriteString("</dd>")
+ }
+ buf.WriteString("</dl>")
+ buf.WriteString("</code>")
+ f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String())
+ }
+ if ssa.GenssaDump[f.Name] {
+ fi := f.DumpFileForPhase("genssa")
+ if fi != nil {
+
+ // inliningDiffers if any filename changes or if any line number except the innermost (last index) changes.
+ inliningDiffers := func(a, b []src.Pos) bool {
+ if len(a) != len(b) {
+ return true
+ }
+ for i := range a {
+ if a[i].Filename() != b[i].Filename() {
+ return true
+ }
+ if i != len(a)-1 && a[i].Line() != b[i].Line() {
+ return true
+ }
+ }
+ return false
+ }
+
+ var allPosOld []src.Pos
+ var allPos []src.Pos
+
+ for p := pp.Text; p != nil; p = p.Link {
+ if p.Pos.IsKnown() {
+ allPos = allPos[:0]
+ p.Ctxt.AllPos(p.Pos, func(pos src.Pos) { allPos = append(allPos, pos) })
+ if inliningDiffers(allPos, allPosOld) {
+ for _, pos := range allPos {
+ fmt.Fprintf(fi, "# %s:%d\n", pos.Filename(), pos.Line())
+ }
+ allPos, allPosOld = allPosOld, allPos // swap, not copy, so that they do not share slice storage.
+ }
+ }
+
+ var s string
+ if v, ok := progToValue[p]; ok {
+ s = v.String()
+ } else if b, ok := progToBlock[p]; ok {
+ s = b.String()
+ } else {
+ s = " " // most value and branch strings are 2-3 characters long
+ }
+ fmt.Fprintf(fi, " %-6s\t%.5d %s\t%s\n", s, p.Pc, ssa.StmtString(p.Pos), p.InstructionString())
+ }
+ fi.Close()
+ }
+ }
+
+ defframe(&s, e, f)
+
+ f.HTMLWriter.Close()
+ f.HTMLWriter = nil
+}
+
+func defframe(s *State, e *ssafn, f *ssa.Func) {
+ pp := s.pp
+
+ s.maxarg = types.RoundUp(s.maxarg, e.stkalign)
+ frame := s.maxarg + e.stksize
+ if Arch.PadFrame != nil {
+ frame = Arch.PadFrame(frame)
+ }
+
+ // Fill in argument and frame size.
+ pp.Text.To.Type = obj.TYPE_TEXTSIZE
+ pp.Text.To.Val = int32(types.RoundUp(f.OwnAux.ArgWidth(), int64(types.RegSize)))
+ pp.Text.To.Offset = frame
+
+ p := pp.Text
+
+ // Insert code to spill argument registers if the named slot may be partially
+ // live. That is, the named slot is considered live by liveness analysis,
+ // (because a part of it is live), but we may not spill all parts into the
+ // slot. This can only happen with aggregate-typed arguments that are SSA-able
+ // and not address-taken (for non-SSA-able or address-taken arguments we always
+ // spill upfront).
+ // Note: spilling is unnecessary in the -N/no-optimize case, since all values
+ // will be considered non-SSAable and spilled up front.
+ // TODO(register args) Make liveness more fine-grained to that partial spilling is okay.
+ if f.OwnAux.ABIInfo().InRegistersUsed() != 0 && base.Flag.N == 0 {
+ // First, see if it is already spilled before it may be live. Look for a spill
+ // in the entry block up to the first safepoint.
+ type nameOff struct {
+ n *ir.Name
+ off int64
+ }
+ partLiveArgsSpilled := make(map[nameOff]bool)
+ for _, v := range f.Entry.Values {
+ if v.Op.IsCall() {
+ break
+ }
+ if v.Op != ssa.OpStoreReg || v.Args[0].Op != ssa.OpArgIntReg {
+ continue
+ }
+ n, off := ssa.AutoVar(v)
+ if n.Class != ir.PPARAM || n.Addrtaken() || !ssa.CanSSA(n.Type()) || !s.partLiveArgs[n] {
+ continue
+ }
+ partLiveArgsSpilled[nameOff{n, off}] = true
+ }
+
+ // Then, insert code to spill registers if not already.
+ for _, a := range f.OwnAux.ABIInfo().InParams() {
+ n := a.Name
+ if n == nil || n.Addrtaken() || !ssa.CanSSA(n.Type()) || !s.partLiveArgs[n] || len(a.Registers) <= 1 {
+ continue
+ }
+ rts, offs := a.RegisterTypesAndOffsets()
+ for i := range a.Registers {
+ if !rts[i].HasPointers() {
+ continue
+ }
+ if partLiveArgsSpilled[nameOff{n, offs[i]}] {
+ continue // already spilled
+ }
+ reg := ssa.ObjRegForAbiReg(a.Registers[i], f.Config)
+ p = Arch.SpillArgReg(pp, p, f, rts[i], reg, n, offs[i])
+ }
+ }
+ }
+
+ // Insert code to zero ambiguously live variables so that the
+ // garbage collector only sees initialized values when it
+ // looks for pointers.
+ var lo, hi int64
+
+ // Opaque state for backend to use. Current backends use it to
+ // keep track of which helper registers have been zeroed.
+ var state uint32
+
+ // Iterate through declarations. Autos are sorted in decreasing
+ // frame offset order.
+ for _, n := range e.curfn.Dcl {
+ if !n.Needzero() {
+ continue
+ }
+ if n.Class != ir.PAUTO {
+ e.Fatalf(n.Pos(), "needzero class %d", n.Class)
+ }
+ if n.Type().Size()%int64(types.PtrSize) != 0 || n.FrameOffset()%int64(types.PtrSize) != 0 || n.Type().Size() == 0 {
+ e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset_)
+ }
+
+ if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*types.RegSize) {
+ // Merge with range we already have.
+ lo = n.FrameOffset()
+ continue
+ }
+
+ // Zero old range
+ p = Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
+
+ // Set new range.
+ lo = n.FrameOffset()
+ hi = lo + n.Type().Size()
+ }
+
+ // Zero final range.
+ Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
+}
+
+// For generating consecutive jump instructions to model a specific branching
+type IndexJump struct {
+ Jump obj.As
+ Index int
+}
+
+func (s *State) oneJump(b *ssa.Block, jump *IndexJump) {
+ p := s.Br(jump.Jump, b.Succs[jump.Index].Block())
+ p.Pos = b.Pos
+}
+
+// CombJump generates combinational instructions (2 at present) for a block jump,
+// thereby the behaviour of non-standard condition codes could be simulated
+func (s *State) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) {
+ switch next {
+ case b.Succs[0].Block():
+ s.oneJump(b, &jumps[0][0])
+ s.oneJump(b, &jumps[0][1])
+ case b.Succs[1].Block():
+ s.oneJump(b, &jumps[1][0])
+ s.oneJump(b, &jumps[1][1])
+ default:
+ var q *obj.Prog
+ if b.Likely != ssa.BranchUnlikely {
+ s.oneJump(b, &jumps[1][0])
+ s.oneJump(b, &jumps[1][1])
+ q = s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ s.oneJump(b, &jumps[0][0])
+ s.oneJump(b, &jumps[0][1])
+ q = s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ q.Pos = b.Pos
+ }
+}
+
+// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
+func AddAux(a *obj.Addr, v *ssa.Value) {
+ AddAux2(a, v, v.AuxInt)
+}
+func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
+ if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
+ v.Fatalf("bad AddAux addr %v", a)
+ }
+ // add integer offset
+ a.Offset += offset
+
+ // If no additional symbol offset, we're done.
+ if v.Aux == nil {
+ return
+ }
+ // Add symbol's offset from its base register.
+ switch n := v.Aux.(type) {
+ case *ssa.AuxCall:
+ a.Name = obj.NAME_EXTERN
+ a.Sym = n.Fn
+ case *obj.LSym:
+ a.Name = obj.NAME_EXTERN
+ a.Sym = n
+ case *ir.Name:
+ if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
+ a.Name = obj.NAME_PARAM
+ } else {
+ a.Name = obj.NAME_AUTO
+ }
+ a.Sym = n.Linksym()
+ a.Offset += n.FrameOffset()
+ default:
+ v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
+ }
+}
+
+// extendIndex extends v to a full int width.
+// panic with the given kind if v does not fit in an int (only on 32-bit archs).
+func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
+ size := idx.Type.Size()
+ if size == s.config.PtrSize {
+ return idx
+ }
+ if size > s.config.PtrSize {
+ // truncate 64-bit indexes on 32-bit pointer archs. Test the
+ // high word and branch to out-of-bounds failure if it is not 0.
+ var lo *ssa.Value
+ if idx.Type.IsSigned() {
+ lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TINT], idx)
+ } else {
+ lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TUINT], idx)
+ }
+ if bounded || base.Flag.B != 0 {
+ return lo
+ }
+ bNext := s.f.NewBlock(ssa.BlockPlain)
+ bPanic := s.f.NewBlock(ssa.BlockExit)
+ hi := s.newValue1(ssa.OpInt64Hi, types.Types[types.TUINT32], idx)
+ cmp := s.newValue2(ssa.OpEq32, types.Types[types.TBOOL], hi, s.constInt32(types.Types[types.TUINT32], 0))
+ if !idx.Type.IsSigned() {
+ switch kind {
+ case ssa.BoundsIndex:
+ kind = ssa.BoundsIndexU
+ case ssa.BoundsSliceAlen:
+ kind = ssa.BoundsSliceAlenU
+ case ssa.BoundsSliceAcap:
+ kind = ssa.BoundsSliceAcapU
+ case ssa.BoundsSliceB:
+ kind = ssa.BoundsSliceBU
+ case ssa.BoundsSlice3Alen:
+ kind = ssa.BoundsSlice3AlenU
+ case ssa.BoundsSlice3Acap:
+ kind = ssa.BoundsSlice3AcapU
+ case ssa.BoundsSlice3B:
+ kind = ssa.BoundsSlice3BU
+ case ssa.BoundsSlice3C:
+ kind = ssa.BoundsSlice3CU
+ }
+ }
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+ b.AddEdgeTo(bNext)
+ b.AddEdgeTo(bPanic)
+
+ s.startBlock(bPanic)
+ mem := s.newValue4I(ssa.OpPanicExtend, types.TypeMem, int64(kind), hi, lo, len, s.mem())
+ s.endBlock().SetControl(mem)
+ s.startBlock(bNext)
+
+ return lo
+ }
+
+ // Extend value to the required size
+ var op ssa.Op
+ if idx.Type.IsSigned() {
+ switch 10*size + s.config.PtrSize {
+ case 14:
+ op = ssa.OpSignExt8to32
+ case 18:
+ op = ssa.OpSignExt8to64
+ case 24:
+ op = ssa.OpSignExt16to32
+ case 28:
+ op = ssa.OpSignExt16to64
+ case 48:
+ op = ssa.OpSignExt32to64
+ default:
+ s.Fatalf("bad signed index extension %s", idx.Type)
+ }
+ } else {
+ switch 10*size + s.config.PtrSize {
+ case 14:
+ op = ssa.OpZeroExt8to32
+ case 18:
+ op = ssa.OpZeroExt8to64
+ case 24:
+ op = ssa.OpZeroExt16to32
+ case 28:
+ op = ssa.OpZeroExt16to64
+ case 48:
+ op = ssa.OpZeroExt32to64
+ default:
+ s.Fatalf("bad unsigned index extension %s", idx.Type)
+ }
+ }
+ return s.newValue1(op, types.Types[types.TINT], idx)
+}
+
+// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
+// Called during ssaGenValue.
+func CheckLoweredPhi(v *ssa.Value) {
+ if v.Op != ssa.OpPhi {
+ v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
+ }
+ if v.Type.IsMemory() {
+ return
+ }
+ f := v.Block.Func
+ loc := f.RegAlloc[v.ID]
+ for _, a := range v.Args {
+ if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
+ v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func)
+ }
+ }
+}
+
+// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block,
+// except for incoming in-register arguments.
+// The output of LoweredGetClosurePtr is generally hardwired to the correct register.
+// That register contains the closure pointer on closure entry.
+func CheckLoweredGetClosurePtr(v *ssa.Value) {
+ entry := v.Block.Func.Entry
+ if entry != v.Block {
+ base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
+ }
+ for _, w := range entry.Values {
+ if w == v {
+ break
+ }
+ switch w.Op {
+ case ssa.OpArgIntReg, ssa.OpArgFloatReg:
+ // okay
+ default:
+ base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
+ }
+ }
+}
+
+// CheckArgReg ensures that v is in the function's entry block.
+func CheckArgReg(v *ssa.Value) {
+ entry := v.Block.Func.Entry
+ if entry != v.Block {
+ base.Fatalf("in %s, badly placed ArgIReg or ArgFReg: %v %v", v.Block.Func.Name, v.Block, v)
+ }
+}
+
+func AddrAuto(a *obj.Addr, v *ssa.Value) {
+ n, off := ssa.AutoVar(v)
+ a.Type = obj.TYPE_MEM
+ a.Sym = n.Linksym()
+ a.Reg = int16(Arch.REGSP)
+ a.Offset = n.FrameOffset() + off
+ if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
+ a.Name = obj.NAME_PARAM
+ } else {
+ a.Name = obj.NAME_AUTO
+ }
+}
+
+// Call returns a new CALL instruction for the SSA value v.
+// It uses PrepareCall to prepare the call.
+func (s *State) Call(v *ssa.Value) *obj.Prog {
+ pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
+ s.PrepareCall(v)
+
+ p := s.Prog(obj.ACALL)
+ if pPosIsStmt == src.PosIsStmt {
+ p.Pos = v.Pos.WithIsStmt()
+ } else {
+ p.Pos = v.Pos.WithNotStmt()
+ }
+ if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = sym.Fn
+ } else {
+ // TODO(mdempsky): Can these differences be eliminated?
+ switch Arch.LinkArch.Family {
+ case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm:
+ p.To.Type = obj.TYPE_REG
+ case sys.ARM, sys.ARM64, sys.Loong64, sys.MIPS, sys.MIPS64:
+ p.To.Type = obj.TYPE_MEM
+ default:
+ base.Fatalf("unknown indirect call family")
+ }
+ p.To.Reg = v.Args[0].Reg()
+ }
+ return p
+}
+
+// TailCall returns a new tail call instruction for the SSA value v.
+// It is like Call, but for a tail call.
+func (s *State) TailCall(v *ssa.Value) *obj.Prog {
+ p := s.Call(v)
+ p.As = obj.ARET
+ return p
+}
+
+// PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping.
+// It must be called immediately before emitting the actual CALL instruction,
+// since it emits PCDATA for the stack map at the call (calls are safe points).
+func (s *State) PrepareCall(v *ssa.Value) {
+ idx := s.livenessMap.Get(v)
+ if !idx.StackMapValid() {
+ // See Liveness.hasStackMap.
+ if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == ir.Syms.WBZero || sym.Fn == ir.Syms.WBMove) {
+ base.Fatalf("missing stack map index for %v", v.LongString())
+ }
+ }
+
+ call, ok := v.Aux.(*ssa.AuxCall)
+
+ if ok {
+ // Record call graph information for nowritebarrierrec
+ // analysis.
+ if nowritebarrierrecCheck != nil {
+ nowritebarrierrecCheck.recordCall(s.pp.CurFunc, call.Fn, v.Pos)
+ }
+ }
+
+ if s.maxarg < v.AuxInt {
+ s.maxarg = v.AuxInt
+ }
+}
+
+// UseArgs records the fact that an instruction needs a certain amount of
+// callee args space for its use.
+func (s *State) UseArgs(n int64) {
+ if s.maxarg < n {
+ s.maxarg = n
+ }
+}
+
+// fieldIdx finds the index of the field referred to by the ODOT node n.
+func fieldIdx(n *ir.SelectorExpr) int {
+ t := n.X.Type()
+ if !t.IsStruct() {
+ panic("ODOT's LHS is not a struct")
+ }
+
+ for i, f := range t.Fields() {
+ if f.Sym == n.Sel {
+ if f.Offset != n.Offset() {
+ panic("field offset doesn't match")
+ }
+ return i
+ }
+ }
+ panic(fmt.Sprintf("can't find field in expr %v\n", n))
+
+ // TODO: keep the result of this function somewhere in the ODOT Node
+ // so we don't have to recompute it each time we need it.
+}
+
+// ssafn holds frontend information about a function that the backend is processing.
+// It also exports a bunch of compiler services for the ssa backend.
+type ssafn struct {
+ curfn *ir.Func
+ strings map[string]*obj.LSym // map from constant string to data symbols
+ stksize int64 // stack size for current frame
+ stkptrsize int64 // prefix of stack containing pointers
+
+ // alignment for current frame.
+ // NOTE: when stkalign > PtrSize, currently this only ensures the offsets of
+ // objects in the stack frame are aligned. The stack pointer is still aligned
+ // only PtrSize.
+ stkalign int64
+
+ log bool // print ssa debug to the stdout
+}
+
+// StringData returns a symbol which
+// is the data component of a global string constant containing s.
+func (e *ssafn) StringData(s string) *obj.LSym {
+ if aux, ok := e.strings[s]; ok {
+ return aux
+ }
+ if e.strings == nil {
+ e.strings = make(map[string]*obj.LSym)
+ }
+ data := staticdata.StringSym(e.curfn.Pos(), s)
+ e.strings[s] = data
+ return data
+}
+
+// SplitSlot returns a slot representing the data of parent starting at offset.
+func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
+ node := parent.N
+
+ if node.Class != ir.PAUTO || node.Addrtaken() {
+ // addressed things and non-autos retain their parents (i.e., cannot truly be split)
+ return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
+ }
+
+ sym := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg}
+ n := e.curfn.NewLocal(parent.N.Pos(), sym, t)
+ n.SetUsed(true)
+ n.SetEsc(ir.EscNever)
+ types.CalcSize(t)
+ return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
+}
+
+// Logf logs a message from the compiler.
+func (e *ssafn) Logf(msg string, args ...interface{}) {
+ if e.log {
+ fmt.Printf(msg, args...)
+ }
+}
+
+func (e *ssafn) Log() bool {
+ return e.log
+}
+
+// Fatalf reports a compiler error and exits.
+func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
+ base.Pos = pos
+ nargs := append([]interface{}{ir.FuncName(e.curfn)}, args...)
+ base.Fatalf("'%s': "+msg, nargs...)
+}
+
+// Warnl reports a "warning", which is usually flag-triggered
+// logging output for the benefit of tests.
+func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
+ base.WarnfAt(pos, fmt_, args...)
+}
+
+func (e *ssafn) Debug_checknil() bool {
+ return base.Debug.Nil != 0
+}
+
+func (e *ssafn) UseWriteBarrier() bool {
+ return base.Flag.WB
+}
+
+func (e *ssafn) Syslook(name string) *obj.LSym {
+ switch name {
+ case "goschedguarded":
+ return ir.Syms.Goschedguarded
+ case "writeBarrier":
+ return ir.Syms.WriteBarrier
+ case "wbZero":
+ return ir.Syms.WBZero
+ case "wbMove":
+ return ir.Syms.WBMove
+ case "cgoCheckMemmove":
+ return ir.Syms.CgoCheckMemmove
+ case "cgoCheckPtrWrite":
+ return ir.Syms.CgoCheckPtrWrite
+ }
+ e.Fatalf(src.NoXPos, "unknown Syslook func %v", name)
+ return nil
+}
+
+func (e *ssafn) Func() *ir.Func {
+ return e.curfn
+}
+
+func clobberBase(n ir.Node) ir.Node {
+ if n.Op() == ir.ODOT {
+ n := n.(*ir.SelectorExpr)
+ if n.X.Type().NumFields() == 1 {
+ return clobberBase(n.X)
+ }
+ }
+ if n.Op() == ir.OINDEX {
+ n := n.(*ir.IndexExpr)
+ if n.X.Type().IsArray() && n.X.Type().NumElem() == 1 {
+ return clobberBase(n.X)
+ }
+ }
+ return n
+}
+
+// callTargetLSym returns the correct LSym to call 'callee' using its ABI.
+func callTargetLSym(callee *ir.Name) *obj.LSym {
+ if callee.Func == nil {
+ // TODO(austin): This happens in case of interface method I.M from imported package.
+ // It's ABIInternal, and would be better if callee.Func was never nil and we didn't
+ // need this case.
+ return callee.Linksym()
+ }
+
+ return callee.LinksymABI(callee.Func.ABI)
+}
+
+func min8(a, b int8) int8 {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max8(a, b int8) int8 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// deferStructFnField is the field index of _defer.fn.
+const deferStructFnField = 4
+
+var deferType *types.Type
+
+// deferstruct returns a type interchangeable with runtime._defer.
+// Make sure this stays in sync with runtime/runtime2.go:_defer.
+func deferstruct() *types.Type {
+ if deferType != nil {
+ return deferType
+ }
+
+ makefield := func(name string, t *types.Type) *types.Field {
+ sym := (*types.Pkg)(nil).Lookup(name)
+ return types.NewField(src.NoXPos, sym, t)
+ }
+
+ fields := []*types.Field{
+ makefield("heap", types.Types[types.TBOOL]),
+ makefield("rangefunc", types.Types[types.TBOOL]),
+ makefield("sp", types.Types[types.TUINTPTR]),
+ makefield("pc", types.Types[types.TUINTPTR]),
+ // Note: the types here don't really matter. Defer structures
+ // are always scanned explicitly during stack copying and GC,
+ // so we make them uintptr type even though they are real pointers.
+ makefield("fn", types.Types[types.TUINTPTR]),
+ makefield("link", types.Types[types.TUINTPTR]),
+ makefield("head", types.Types[types.TUINTPTR]),
+ }
+ if name := fields[deferStructFnField].Sym.Name; name != "fn" {
+ base.Fatalf("deferStructFnField is %q, not fn", name)
+ }
+
+ n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("_defer"))
+ typ := types.NewNamed(n)
+ n.SetType(typ)
+ n.SetTypecheck(1)
+
+ // build struct holding the above fields
+ typ.SetUnderlying(types.NewStruct(fields))
+ types.CalcStructSize(typ)
+
+ deferType = typ
+ return typ
+}
+
+// SpillSlotAddr uses LocalSlot information to initialize an obj.Addr
+// The resulting addr is used in a non-standard context -- in the prologue
+// of a function, before the frame has been constructed, so the standard
+// addressing for the parameters will be wrong.
+func SpillSlotAddr(spill ssa.Spill, baseReg int16, extraOffset int64) obj.Addr {
+ return obj.Addr{
+ Name: obj.NAME_NONE,
+ Type: obj.TYPE_MEM,
+ Reg: baseReg,
+ Offset: spill.Offset + extraOffset,
+ }
+}
+
+var (
+ BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
+ ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
+)
diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go
new file mode 100644
index 0000000..78c332e
--- /dev/null
+++ b/src/cmd/compile/internal/staticdata/data.go
@@ -0,0 +1,346 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package staticdata
+
+import (
+ "encoding/base64"
+ "fmt"
+ "go/constant"
+ "io"
+ "os"
+ "sort"
+ "strconv"
+ "sync"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/notsha256"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+// InitAddrOffset writes the static name symbol lsym to n, it does not modify n.
+// It's the caller responsibility to make sure lsym is from ONAME/PEXTERN node.
+func InitAddrOffset(n *ir.Name, noff int64, lsym *obj.LSym, off int64) {
+ if n.Op() != ir.ONAME {
+ base.Fatalf("InitAddr n op %v", n.Op())
+ }
+ if n.Sym() == nil {
+ base.Fatalf("InitAddr nil n sym")
+ }
+ s := n.Linksym()
+ s.WriteAddr(base.Ctxt, noff, types.PtrSize, lsym, off)
+}
+
+// InitAddr is InitAddrOffset, with offset fixed to 0.
+func InitAddr(n *ir.Name, noff int64, lsym *obj.LSym) {
+ InitAddrOffset(n, noff, lsym, 0)
+}
+
+// InitSlice writes a static slice symbol {lsym, lencap, lencap} to n+noff, it does not modify n.
+// It's the caller responsibility to make sure lsym is from ONAME node.
+func InitSlice(n *ir.Name, noff int64, lsym *obj.LSym, lencap int64) {
+ s := n.Linksym()
+ s.WriteAddr(base.Ctxt, noff, types.PtrSize, lsym, 0)
+ s.WriteInt(base.Ctxt, noff+types.SliceLenOffset, types.PtrSize, lencap)
+ s.WriteInt(base.Ctxt, noff+types.SliceCapOffset, types.PtrSize, lencap)
+}
+
+func InitSliceBytes(nam *ir.Name, off int64, s string) {
+ if nam.Op() != ir.ONAME {
+ base.Fatalf("InitSliceBytes %v", nam)
+ }
+ InitSlice(nam, off, slicedata(nam.Pos(), s), int64(len(s)))
+}
+
+const (
+ stringSymPrefix = "go:string."
+ stringSymPattern = ".gostring.%d.%s"
+)
+
+// shortHashString converts the hash to a string for use with stringSymPattern.
+// We cut it to 16 bytes and then base64-encode to make it even smaller.
+func shortHashString(hash []byte) string {
+ return base64.StdEncoding.EncodeToString(hash[:16])
+}
+
+// StringSym returns a symbol containing the string s.
+// The symbol contains the string data, not a string header.
+func StringSym(pos src.XPos, s string) (data *obj.LSym) {
+ var symname string
+ if len(s) > 100 {
+ // Huge strings are hashed to avoid long names in object files.
+ // Indulge in some paranoia by writing the length of s, too,
+ // as protection against length extension attacks.
+ // Same pattern is known to fileStringSym below.
+ h := notsha256.New()
+ io.WriteString(h, s)
+ symname = fmt.Sprintf(stringSymPattern, len(s), shortHashString(h.Sum(nil)))
+ } else {
+ // Small strings get named directly by their contents.
+ symname = strconv.Quote(s)
+ }
+
+ symdata := base.Ctxt.Lookup(stringSymPrefix + symname)
+ if !symdata.OnList() {
+ off := dstringdata(symdata, 0, s, pos, "string")
+ objw.Global(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ symdata.Set(obj.AttrContentAddressable, true)
+ }
+
+ return symdata
+}
+
+// StringSymNoCommon is like StringSym, but produces a symbol that is not content-
+// addressable. This symbol is not supposed to appear in the final binary, it is
+// only used to pass string arguments to the linker like R_USENAMEDMETHOD does.
+func StringSymNoCommon(s string) (data *obj.LSym) {
+ var nameSym obj.LSym
+ nameSym.WriteString(base.Ctxt, 0, len(s), s)
+ objw.Global(&nameSym, int32(len(s)), obj.RODATA)
+ return &nameSym
+}
+
+// maxFileSize is the maximum file size permitted by the linker
+// (see issue #9862).
+const maxFileSize = int64(2e9)
+
+// fileStringSym returns a symbol for the contents and the size of file.
+// If readonly is true, the symbol shares storage with any literal string
+// or other file with the same content and is placed in a read-only section.
+// If readonly is false, the symbol is a read-write copy separate from any other,
+// for use as the backing store of a []byte.
+// The content hash of file is copied into hash. (If hash is nil, nothing is copied.)
+// The returned symbol contains the data itself, not a string header.
+func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, 0, err
+ }
+ defer f.Close()
+ info, err := f.Stat()
+ if err != nil {
+ return nil, 0, err
+ }
+ if !info.Mode().IsRegular() {
+ return nil, 0, fmt.Errorf("not a regular file")
+ }
+ size := info.Size()
+ if size <= 1*1024 {
+ data, err := io.ReadAll(f)
+ if err != nil {
+ return nil, 0, err
+ }
+ if int64(len(data)) != size {
+ return nil, 0, fmt.Errorf("file changed between reads")
+ }
+ var sym *obj.LSym
+ if readonly {
+ sym = StringSym(pos, string(data))
+ } else {
+ sym = slicedata(pos, string(data))
+ }
+ if len(hash) > 0 {
+ sum := notsha256.Sum256(data)
+ copy(hash, sum[:])
+ }
+ return sym, size, nil
+ }
+ if size > maxFileSize {
+ // ggloblsym takes an int32,
+ // and probably the rest of the toolchain
+ // can't handle such big symbols either.
+ // See golang.org/issue/9862.
+ return nil, 0, fmt.Errorf("file too large (%d bytes > %d bytes)", size, maxFileSize)
+ }
+
+ // File is too big to read and keep in memory.
+ // Compute hash if needed for read-only content hashing or if the caller wants it.
+ var sum []byte
+ if readonly || len(hash) > 0 {
+ h := notsha256.New()
+ n, err := io.Copy(h, f)
+ if err != nil {
+ return nil, 0, err
+ }
+ if n != size {
+ return nil, 0, fmt.Errorf("file changed between reads")
+ }
+ sum = h.Sum(nil)
+ copy(hash, sum)
+ }
+
+ var symdata *obj.LSym
+ if readonly {
+ symname := fmt.Sprintf(stringSymPattern, size, shortHashString(sum))
+ symdata = base.Ctxt.Lookup(stringSymPrefix + symname)
+ if !symdata.OnList() {
+ info := symdata.NewFileInfo()
+ info.Name = file
+ info.Size = size
+ objw.Global(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ // Note: AttrContentAddressable cannot be set here,
+ // because the content-addressable-handling code
+ // does not know about file symbols.
+ }
+ } else {
+ // Emit a zero-length data symbol
+ // and then fix up length and content to use file.
+ symdata = slicedata(pos, "")
+ symdata.Size = size
+ symdata.Type = objabi.SNOPTRDATA
+ info := symdata.NewFileInfo()
+ info.Name = file
+ info.Size = size
+ }
+
+ return symdata, size, nil
+}
+
+var slicedataGen int
+
+func slicedata(pos src.XPos, s string) *obj.LSym {
+ slicedataGen++
+ symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
+ lsym := types.LocalPkg.Lookup(symname).LinksymABI(obj.ABI0)
+ off := dstringdata(lsym, 0, s, pos, "slice")
+ objw.Global(lsym, int32(off), obj.NOPTR|obj.LOCAL)
+
+ return lsym
+}
+
+func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
+ // Objects that are too large will cause the data section to overflow right away,
+ // causing a cryptic error message by the linker. Check for oversize objects here
+ // and provide a useful error message instead.
+ if int64(len(t)) > 2e9 {
+ base.ErrorfAt(pos, 0, "%v with length %v is too big", what, len(t))
+ return 0
+ }
+
+ s.WriteString(base.Ctxt, int64(off), len(t), t)
+ return off + len(t)
+}
+
+var (
+ funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
+ funcsyms []*ir.Name // functions that need function value symbols
+)
+
+// FuncLinksym returns n·f, the function value symbol for n.
+func FuncLinksym(n *ir.Name) *obj.LSym {
+ if n.Op() != ir.ONAME || n.Class != ir.PFUNC {
+ base.Fatalf("expected func name: %v", n)
+ }
+ s := n.Sym()
+
+ // funcsymsmu here serves to protect not just mutations of funcsyms (below),
+ // but also the package lookup of the func sym name,
+ // since this function gets called concurrently from the backend.
+ // There are no other concurrent package lookups in the backend,
+ // except for the types package, which is protected separately.
+ // Reusing funcsymsmu to also cover this package lookup
+ // avoids a general, broader, expensive package lookup mutex.
+ funcsymsmu.Lock()
+ sf, existed := s.Pkg.LookupOK(ir.FuncSymName(s))
+ if !existed {
+ funcsyms = append(funcsyms, n)
+ }
+ funcsymsmu.Unlock()
+
+ return sf.Linksym()
+}
+
+func GlobalLinksym(n *ir.Name) *obj.LSym {
+ if n.Op() != ir.ONAME || n.Class != ir.PEXTERN {
+ base.Fatalf("expected global variable: %v", n)
+ }
+ return n.Linksym()
+}
+
+func WriteFuncSyms() {
+ sort.Slice(funcsyms, func(i, j int) bool {
+ return funcsyms[i].Linksym().Name < funcsyms[j].Linksym().Name
+ })
+ for _, nam := range funcsyms {
+ s := nam.Sym()
+ sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym()
+
+ // While compiling package runtime, we might try to create
+ // funcsyms for functions from both types.LocalPkg and
+ // ir.Pkgs.Runtime.
+ if base.Flag.CompilingRuntime && sf.OnList() {
+ continue
+ }
+
+ // Function values must always reference ABIInternal
+ // entry points.
+ target := s.Linksym()
+ if target.ABI() != obj.ABIInternal {
+ base.Fatalf("expected ABIInternal: %v has %v", target, target.ABI())
+ }
+ objw.SymPtr(sf, 0, target, 0)
+ objw.Global(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+ }
+}
+
+// InitConst writes the static literal c to n.
+// Neither n nor c is modified.
+func InitConst(n *ir.Name, noff int64, c ir.Node, wid int) {
+ if n.Op() != ir.ONAME {
+ base.Fatalf("InitConst n op %v", n.Op())
+ }
+ if n.Sym() == nil {
+ base.Fatalf("InitConst nil n sym")
+ }
+ if c.Op() == ir.ONIL {
+ return
+ }
+ if c.Op() != ir.OLITERAL {
+ base.Fatalf("InitConst c op %v", c.Op())
+ }
+ s := n.Linksym()
+ switch u := c.Val(); u.Kind() {
+ case constant.Bool:
+ i := int64(obj.Bool2int(constant.BoolVal(u)))
+ s.WriteInt(base.Ctxt, noff, wid, i)
+
+ case constant.Int:
+ s.WriteInt(base.Ctxt, noff, wid, ir.IntVal(c.Type(), u))
+
+ case constant.Float:
+ f, _ := constant.Float64Val(u)
+ switch c.Type().Kind() {
+ case types.TFLOAT32:
+ s.WriteFloat32(base.Ctxt, noff, float32(f))
+ case types.TFLOAT64:
+ s.WriteFloat64(base.Ctxt, noff, f)
+ }
+
+ case constant.Complex:
+ re, _ := constant.Float64Val(constant.Real(u))
+ im, _ := constant.Float64Val(constant.Imag(u))
+ switch c.Type().Kind() {
+ case types.TCOMPLEX64:
+ s.WriteFloat32(base.Ctxt, noff, float32(re))
+ s.WriteFloat32(base.Ctxt, noff+4, float32(im))
+ case types.TCOMPLEX128:
+ s.WriteFloat64(base.Ctxt, noff, re)
+ s.WriteFloat64(base.Ctxt, noff+8, im)
+ }
+
+ case constant.String:
+ i := constant.StringVal(u)
+ symdata := StringSym(n.Pos(), i)
+ s.WriteAddr(base.Ctxt, noff, types.PtrSize, symdata, 0)
+ s.WriteInt(base.Ctxt, noff+int64(types.PtrSize), types.PtrSize, int64(len(i)))
+
+ default:
+ base.Fatalf("InitConst unhandled OLITERAL %v", c)
+ }
+}
diff --git a/src/cmd/compile/internal/staticdata/embed.go b/src/cmd/compile/internal/staticdata/embed.go
new file mode 100644
index 0000000..a4d493c
--- /dev/null
+++ b/src/cmd/compile/internal/staticdata/embed.go
@@ -0,0 +1,174 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package staticdata
+
+import (
+ "path"
+ "sort"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+const (
+ embedUnknown = iota
+ embedBytes
+ embedString
+ embedFiles
+)
+
+func embedFileList(v *ir.Name, kind int) []string {
+ // Build list of files to store.
+ have := make(map[string]bool)
+ var list []string
+ for _, e := range *v.Embed {
+ for _, pattern := range e.Patterns {
+ files, ok := base.Flag.Cfg.Embed.Patterns[pattern]
+ if !ok {
+ base.ErrorfAt(e.Pos, 0, "invalid go:embed: build system did not map pattern: %s", pattern)
+ }
+ for _, file := range files {
+ if base.Flag.Cfg.Embed.Files[file] == "" {
+ base.ErrorfAt(e.Pos, 0, "invalid go:embed: build system did not map file: %s", file)
+ continue
+ }
+ if !have[file] {
+ have[file] = true
+ list = append(list, file)
+ }
+ if kind == embedFiles {
+ for dir := path.Dir(file); dir != "." && !have[dir]; dir = path.Dir(dir) {
+ have[dir] = true
+ list = append(list, dir+"/")
+ }
+ }
+ }
+ }
+ }
+ sort.Slice(list, func(i, j int) bool {
+ return embedFileLess(list[i], list[j])
+ })
+
+ if kind == embedString || kind == embedBytes {
+ if len(list) > 1 {
+ base.ErrorfAt(v.Pos(), 0, "invalid go:embed: multiple files for type %v", v.Type())
+ return nil
+ }
+ }
+
+ return list
+}
+
+// embedKind determines the kind of embedding variable.
+func embedKind(typ *types.Type) int {
+ if typ.Sym() != nil && typ.Sym().Name == "FS" && typ.Sym().Pkg.Path == "embed" {
+ return embedFiles
+ }
+ if typ.Kind() == types.TSTRING {
+ return embedString
+ }
+ if typ.IsSlice() && typ.Elem().Kind() == types.TUINT8 {
+ return embedBytes
+ }
+ return embedUnknown
+}
+
+func embedFileNameSplit(name string) (dir, elem string, isDir bool) {
+ if name[len(name)-1] == '/' {
+ isDir = true
+ name = name[:len(name)-1]
+ }
+ i := len(name) - 1
+ for i >= 0 && name[i] != '/' {
+ i--
+ }
+ if i < 0 {
+ return ".", name, isDir
+ }
+ return name[:i], name[i+1:], isDir
+}
+
+// embedFileLess implements the sort order for a list of embedded files.
+// See the comment inside ../../../../embed/embed.go's Files struct for rationale.
+func embedFileLess(x, y string) bool {
+ xdir, xelem, _ := embedFileNameSplit(x)
+ ydir, yelem, _ := embedFileNameSplit(y)
+ return xdir < ydir || xdir == ydir && xelem < yelem
+}
+
+// WriteEmbed emits the init data for a //go:embed variable,
+// which is either a string, a []byte, or an embed.FS.
+func WriteEmbed(v *ir.Name) {
+ // TODO(mdempsky): User errors should be reported by the frontend.
+
+ commentPos := (*v.Embed)[0].Pos
+ if base.Flag.Cfg.Embed.Patterns == nil {
+ base.ErrorfAt(commentPos, 0, "invalid go:embed: build system did not supply embed configuration")
+ return
+ }
+ kind := embedKind(v.Type())
+ if kind == embedUnknown {
+ base.ErrorfAt(v.Pos(), 0, "go:embed cannot apply to var of type %v", v.Type())
+ return
+ }
+
+ files := embedFileList(v, kind)
+ switch kind {
+ case embedString, embedBytes:
+ file := files[0]
+ fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], kind == embedString, nil)
+ if err != nil {
+ base.ErrorfAt(v.Pos(), 0, "embed %s: %v", file, err)
+ }
+ sym := v.Linksym()
+ off := 0
+ off = objw.SymPtr(sym, off, fsym, 0) // data string
+ off = objw.Uintptr(sym, off, uint64(size)) // len
+ if kind == embedBytes {
+ objw.Uintptr(sym, off, uint64(size)) // cap for slice
+ }
+
+ case embedFiles:
+ slicedata := v.Sym().Pkg.Lookup(v.Sym().Name + `.files`).Linksym()
+ off := 0
+ // []files pointed at by Files
+ off = objw.SymPtr(slicedata, off, slicedata, 3*types.PtrSize) // []file, pointing just past slice
+ off = objw.Uintptr(slicedata, off, uint64(len(files)))
+ off = objw.Uintptr(slicedata, off, uint64(len(files)))
+
+ // embed/embed.go type file is:
+ // name string
+ // data string
+ // hash [16]byte
+ // Emit one of these per file in the set.
+ const hashSize = 16
+ hash := make([]byte, hashSize)
+ for _, file := range files {
+ off = objw.SymPtr(slicedata, off, StringSym(v.Pos(), file), 0) // file string
+ off = objw.Uintptr(slicedata, off, uint64(len(file)))
+ if strings.HasSuffix(file, "/") {
+ // entry for directory - no data
+ off = objw.Uintptr(slicedata, off, 0)
+ off = objw.Uintptr(slicedata, off, 0)
+ off += hashSize
+ } else {
+ fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], true, hash)
+ if err != nil {
+ base.ErrorfAt(v.Pos(), 0, "embed %s: %v", file, err)
+ }
+ off = objw.SymPtr(slicedata, off, fsym, 0) // data string
+ off = objw.Uintptr(slicedata, off, uint64(size))
+ off = int(slicedata.WriteBytes(base.Ctxt, int64(off), hash))
+ }
+ }
+ objw.Global(slicedata, int32(off), obj.RODATA|obj.LOCAL)
+ sym := v.Linksym()
+ objw.SymPtr(sym, 0, slicedata, 0)
+ }
+}
diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go
new file mode 100644
index 0000000..4191f69
--- /dev/null
+++ b/src/cmd/compile/internal/staticinit/sched.go
@@ -0,0 +1,1210 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package staticinit
+
+import (
+ "fmt"
+ "go/constant"
+ "go/token"
+ "os"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+type Entry struct {
+ Xoffset int64 // struct, array only
+ Expr ir.Node // bytes of run-time computed expressions
+}
+
+type Plan struct {
+ E []Entry
+}
+
+// An Schedule is used to decompose assignment statements into
+// static and dynamic initialization parts. Static initializations are
+// handled by populating variables' linker symbol data, while dynamic
+// initializations are accumulated to be executed in order.
+type Schedule struct {
+ // Out is the ordered list of dynamic initialization
+ // statements.
+ Out []ir.Node
+
+ Plans map[ir.Node]*Plan
+ Temps map[ir.Node]*ir.Name
+
+ // seenMutation tracks whether we've seen an initialization
+ // expression that may have modified other package-scope variables
+ // within this package.
+ seenMutation bool
+}
+
+func (s *Schedule) append(n ir.Node) {
+ s.Out = append(s.Out, n)
+}
+
+// StaticInit adds an initialization statement n to the schedule.
+func (s *Schedule) StaticInit(n ir.Node) {
+ if !s.tryStaticInit(n) {
+ if base.Flag.Percent != 0 {
+ ir.Dump("StaticInit failed", n)
+ }
+ s.append(n)
+ }
+}
+
+// varToMapInit holds book-keeping state for global map initialization;
+// it records the init function created by the compiler to host the
+// initialization code for the map in question.
+var varToMapInit map[*ir.Name]*ir.Func
+
+// MapInitToVar is the inverse of VarToMapInit; it maintains a mapping
+// from a compiler-generated init function to the map the function is
+// initializing.
+var MapInitToVar map[*ir.Func]*ir.Name
+
+// recordFuncForVar establishes a mapping between global map var "v" and
+// outlined init function "fn" (and vice versa); so that we can use
+// the mappings later on to update relocations.
+func recordFuncForVar(v *ir.Name, fn *ir.Func) {
+ if varToMapInit == nil {
+ varToMapInit = make(map[*ir.Name]*ir.Func)
+ MapInitToVar = make(map[*ir.Func]*ir.Name)
+ }
+ varToMapInit[v] = fn
+ MapInitToVar[fn] = v
+}
+
+// allBlank reports whether every node in exprs is blank.
+func allBlank(exprs []ir.Node) bool {
+ for _, expr := range exprs {
+ if !ir.IsBlank(expr) {
+ return false
+ }
+ }
+ return true
+}
+
+// tryStaticInit attempts to statically execute an initialization
+// statement and reports whether it succeeded.
+func (s *Schedule) tryStaticInit(n ir.Node) bool {
+ var lhs []ir.Node
+ var rhs ir.Node
+
+ switch n.Op() {
+ default:
+ base.FatalfAt(n.Pos(), "unexpected initialization statement: %v", n)
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ lhs, rhs = []ir.Node{n.X}, n.Y
+ case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ n := n.(*ir.AssignListStmt)
+ if len(n.Lhs) < 2 || len(n.Rhs) != 1 {
+ base.FatalfAt(n.Pos(), "unexpected shape for %v: %v", n.Op(), n)
+ }
+ lhs, rhs = n.Lhs, n.Rhs[0]
+ case ir.OCALLFUNC:
+ return false // outlined map init call; no mutations
+ }
+
+ if !s.seenMutation {
+ s.seenMutation = mayModifyPkgVar(rhs)
+ }
+
+ if allBlank(lhs) && !AnySideEffects(rhs) {
+ return true // discard
+ }
+
+ // Only worry about simple "l = r" assignments. The OAS2*
+ // assignments mostly necessitate dynamic execution anyway.
+ if len(lhs) > 1 {
+ return false
+ }
+
+ lno := ir.SetPos(n)
+ defer func() { base.Pos = lno }()
+
+ nam := lhs[0].(*ir.Name)
+ return s.StaticAssign(nam, 0, rhs, nam.Type())
+}
+
+// like staticassign but we are copying an already
+// initialized value r.
+func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool {
+ if rn.Class == ir.PFUNC {
+ // TODO if roff != 0 { panic }
+ staticdata.InitAddr(l, loff, staticdata.FuncLinksym(rn))
+ return true
+ }
+ if rn.Class != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg {
+ return false
+ }
+ if rn.Defn == nil {
+ // No explicit initialization value. Probably zeroed but perhaps
+ // supplied externally and of unknown value.
+ return false
+ }
+ if rn.Defn.Op() != ir.OAS {
+ return false
+ }
+ if rn.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675)
+ return false
+ }
+ if rn.Embed != nil {
+ return false
+ }
+ orig := rn
+ r := rn.Defn.(*ir.AssignStmt).Y
+ if r == nil {
+ // types2.InitOrder doesn't include default initializers.
+ base.Fatalf("unexpected initializer: %v", rn.Defn)
+ }
+
+ // Variable may have been reassigned by a user-written function call
+ // that was invoked to initialize another global variable (#51913).
+ if s.seenMutation {
+ if base.Debug.StaticCopy != 0 {
+ base.WarnfAt(l.Pos(), "skipping static copy of %v+%v with %v", l, loff, r)
+ }
+ return false
+ }
+
+ for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), typ) {
+ r = r.(*ir.ConvExpr).X
+ }
+
+ switch r.Op() {
+ case ir.OMETHEXPR:
+ r = r.(*ir.SelectorExpr).FuncName()
+ fallthrough
+ case ir.ONAME:
+ r := r.(*ir.Name)
+ if s.staticcopy(l, loff, r, typ) {
+ return true
+ }
+ // We may have skipped past one or more OCONVNOPs, so
+ // use conv to ensure r is assignable to l (#13263).
+ dst := ir.Node(l)
+ if loff != 0 || !types.Identical(typ, l.Type()) {
+ dst = ir.NewNameOffsetExpr(base.Pos, l, loff, typ)
+ }
+ s.append(ir.NewAssignStmt(base.Pos, dst, typecheck.Conv(r, typ)))
+ return true
+
+ case ir.ONIL:
+ return true
+
+ case ir.OLITERAL:
+ if ir.IsZero(r) {
+ return true
+ }
+ staticdata.InitConst(l, loff, r, int(typ.Size()))
+ return true
+
+ case ir.OADDR:
+ r := r.(*ir.AddrExpr)
+ if a, ok := r.X.(*ir.Name); ok && a.Op() == ir.ONAME {
+ staticdata.InitAddr(l, loff, staticdata.GlobalLinksym(a))
+ return true
+ }
+
+ case ir.OPTRLIT:
+ r := r.(*ir.AddrExpr)
+ switch r.X.Op() {
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT:
+ // copy pointer
+ staticdata.InitAddr(l, loff, staticdata.GlobalLinksym(s.Temps[r]))
+ return true
+ }
+
+ case ir.OSLICELIT:
+ r := r.(*ir.CompLitExpr)
+ // copy slice
+ staticdata.InitSlice(l, loff, staticdata.GlobalLinksym(s.Temps[r]), r.Len)
+ return true
+
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ r := r.(*ir.CompLitExpr)
+ p := s.Plans[r]
+ for i := range p.E {
+ e := &p.E[i]
+ typ := e.Expr.Type()
+ if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
+ staticdata.InitConst(l, loff+e.Xoffset, e.Expr, int(typ.Size()))
+ continue
+ }
+ x := e.Expr
+ if x.Op() == ir.OMETHEXPR {
+ x = x.(*ir.SelectorExpr).FuncName()
+ }
+ if x.Op() == ir.ONAME && s.staticcopy(l, loff+e.Xoffset, x.(*ir.Name), typ) {
+ continue
+ }
+ // Requires computation, but we're
+ // copying someone else's computation.
+ ll := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, typ)
+ rr := ir.NewNameOffsetExpr(base.Pos, orig, e.Xoffset, typ)
+ ir.SetPos(rr)
+ s.append(ir.NewAssignStmt(base.Pos, ll, rr))
+ }
+
+ return true
+ }
+
+ return false
+}
+
+func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Type) bool {
+ if r == nil {
+ // No explicit initialization value. Either zero or supplied
+ // externally.
+ return true
+ }
+ for r.Op() == ir.OCONVNOP {
+ r = r.(*ir.ConvExpr).X
+ }
+
+ assign := func(pos src.XPos, a *ir.Name, aoff int64, v ir.Node) {
+ if s.StaticAssign(a, aoff, v, v.Type()) {
+ return
+ }
+ var lhs ir.Node
+ if ir.IsBlank(a) {
+ // Don't use NameOffsetExpr with blank (#43677).
+ lhs = ir.BlankNode
+ } else {
+ lhs = ir.NewNameOffsetExpr(pos, a, aoff, v.Type())
+ }
+ s.append(ir.NewAssignStmt(pos, lhs, v))
+ }
+
+ switch r.Op() {
+ case ir.ONAME:
+ r := r.(*ir.Name)
+ return s.staticcopy(l, loff, r, typ)
+
+ case ir.OMETHEXPR:
+ r := r.(*ir.SelectorExpr)
+ return s.staticcopy(l, loff, r.FuncName(), typ)
+
+ case ir.ONIL:
+ return true
+
+ case ir.OLITERAL:
+ if ir.IsZero(r) {
+ return true
+ }
+ staticdata.InitConst(l, loff, r, int(typ.Size()))
+ return true
+
+ case ir.OADDR:
+ r := r.(*ir.AddrExpr)
+ if name, offset, ok := StaticLoc(r.X); ok && name.Class == ir.PEXTERN {
+ staticdata.InitAddrOffset(l, loff, name.Linksym(), offset)
+ return true
+ }
+ fallthrough
+
+ case ir.OPTRLIT:
+ r := r.(*ir.AddrExpr)
+ switch r.X.Op() {
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT:
+ // Init pointer.
+ a := StaticName(r.X.Type())
+
+ s.Temps[r] = a
+ staticdata.InitAddr(l, loff, a.Linksym())
+
+ // Init underlying literal.
+ assign(base.Pos, a, 0, r.X)
+ return true
+ }
+ //dump("not static ptrlit", r);
+
+ case ir.OSTR2BYTES:
+ r := r.(*ir.ConvExpr)
+ if l.Class == ir.PEXTERN && r.X.Op() == ir.OLITERAL {
+ sval := ir.StringVal(r.X)
+ staticdata.InitSliceBytes(l, loff, sval)
+ return true
+ }
+
+ case ir.OSLICELIT:
+ r := r.(*ir.CompLitExpr)
+ s.initplan(r)
+ // Init slice.
+ ta := types.NewArray(r.Type().Elem(), r.Len)
+ ta.SetNoalg(true)
+ a := StaticName(ta)
+ s.Temps[r] = a
+ staticdata.InitSlice(l, loff, a.Linksym(), r.Len)
+ // Fall through to init underlying array.
+ l = a
+ loff = 0
+ fallthrough
+
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ r := r.(*ir.CompLitExpr)
+ s.initplan(r)
+
+ p := s.Plans[r]
+ for i := range p.E {
+ e := &p.E[i]
+ if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
+ staticdata.InitConst(l, loff+e.Xoffset, e.Expr, int(e.Expr.Type().Size()))
+ continue
+ }
+ ir.SetPos(e.Expr)
+ assign(base.Pos, l, loff+e.Xoffset, e.Expr)
+ }
+
+ return true
+
+ case ir.OMAPLIT:
+ break
+
+ case ir.OCLOSURE:
+ r := r.(*ir.ClosureExpr)
+ if ir.IsTrivialClosure(r) {
+ if base.Debug.Closure > 0 {
+ base.WarnfAt(r.Pos(), "closure converted to global")
+ }
+ // Issue 59680: if the closure we're looking at was produced
+ // by inlining, it could be marked as hidden, which we don't
+ // want (moving the func to a static init will effectively
+ // hide it from escape analysis). Mark as non-hidden here.
+ // so that it will participated in escape analysis.
+ r.Func.SetIsHiddenClosure(false)
+ // Closures with no captured variables are globals,
+ // so the assignment can be done at link time.
+ // TODO if roff != 0 { panic }
+ staticdata.InitAddr(l, loff, staticdata.FuncLinksym(r.Func.Nname))
+ return true
+ }
+ ir.ClosureDebugRuntimeCheck(r)
+
+ case ir.OCONVIFACE:
+ // This logic is mirrored in isStaticCompositeLiteral.
+ // If you change something here, change it there, and vice versa.
+
+ // Determine the underlying concrete type and value we are converting from.
+ r := r.(*ir.ConvExpr)
+ val := ir.Node(r)
+ for val.Op() == ir.OCONVIFACE {
+ val = val.(*ir.ConvExpr).X
+ }
+
+ if val.Type().IsInterface() {
+ // val is an interface type.
+ // If val is nil, we can statically initialize l;
+ // both words are zero and so there no work to do, so report success.
+ // If val is non-nil, we have no concrete type to record,
+ // and we won't be able to statically initialize its value, so report failure.
+ return val.Op() == ir.ONIL
+ }
+
+ if val.Type().HasShape() {
+ // See comment in cmd/compile/internal/walk/convert.go:walkConvInterface
+ return false
+ }
+
+ reflectdata.MarkTypeUsedInInterface(val.Type(), l.Linksym())
+
+ var itab *ir.AddrExpr
+ if typ.IsEmptyInterface() {
+ itab = reflectdata.TypePtrAt(base.Pos, val.Type())
+ } else {
+ itab = reflectdata.ITabAddrAt(base.Pos, val.Type(), typ)
+ }
+
+ // Create a copy of l to modify while we emit data.
+
+ // Emit itab, advance offset.
+ staticdata.InitAddr(l, loff, itab.X.(*ir.LinksymOffsetExpr).Linksym)
+
+ // Emit data.
+ if types.IsDirectIface(val.Type()) {
+ if val.Op() == ir.ONIL {
+ // Nil is zero, nothing to do.
+ return true
+ }
+ // Copy val directly into n.
+ ir.SetPos(val)
+ assign(base.Pos, l, loff+int64(types.PtrSize), val)
+ } else {
+ // Construct temp to hold val, write pointer to temp into n.
+ a := StaticName(val.Type())
+ s.Temps[val] = a
+ assign(base.Pos, a, 0, val)
+ staticdata.InitAddr(l, loff+int64(types.PtrSize), a.Linksym())
+ }
+
+ return true
+
+ case ir.OINLCALL:
+ r := r.(*ir.InlinedCallExpr)
+ return s.staticAssignInlinedCall(l, loff, r, typ)
+ }
+
+ if base.Flag.Percent != 0 {
+ ir.Dump("not static", r)
+ }
+ return false
+}
+
+func (s *Schedule) initplan(n ir.Node) {
+ if s.Plans[n] != nil {
+ return
+ }
+ p := new(Plan)
+ s.Plans[n] = p
+ switch n.Op() {
+ default:
+ base.Fatalf("initplan")
+
+ case ir.OARRAYLIT, ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ var k int64
+ for _, a := range n.List {
+ if a.Op() == ir.OKEY {
+ kv := a.(*ir.KeyExpr)
+ k = typecheck.IndexConst(kv.Key)
+ if k < 0 {
+ base.Fatalf("initplan arraylit: invalid index %v", kv.Key)
+ }
+ a = kv.Value
+ }
+ s.addvalue(p, k*n.Type().Elem().Size(), a)
+ k++
+ }
+
+ case ir.OSTRUCTLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, a := range n.List {
+ if a.Op() != ir.OSTRUCTKEY {
+ base.Fatalf("initplan structlit")
+ }
+ a := a.(*ir.StructKeyExpr)
+ if a.Sym().IsBlank() {
+ continue
+ }
+ s.addvalue(p, a.Field.Offset, a.Value)
+ }
+
+ case ir.OMAPLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, a := range n.List {
+ if a.Op() != ir.OKEY {
+ base.Fatalf("initplan maplit")
+ }
+ a := a.(*ir.KeyExpr)
+ s.addvalue(p, -1, a.Value)
+ }
+ }
+}
+
+func (s *Schedule) addvalue(p *Plan, xoffset int64, n ir.Node) {
+ // special case: zero can be dropped entirely
+ if ir.IsZero(n) {
+ return
+ }
+
+ // special case: inline struct and array (not slice) literals
+ if isvaluelit(n) {
+ s.initplan(n)
+ q := s.Plans[n]
+ for _, qe := range q.E {
+ // qe is a copy; we are not modifying entries in q.E
+ qe.Xoffset += xoffset
+ p.E = append(p.E, qe)
+ }
+ return
+ }
+
+ // add to plan
+ p.E = append(p.E, Entry{Xoffset: xoffset, Expr: n})
+}
+
+func (s *Schedule) staticAssignInlinedCall(l *ir.Name, loff int64, call *ir.InlinedCallExpr, typ *types.Type) bool {
+ if base.Debug.InlStaticInit == 0 {
+ return false
+ }
+
+ // Handle the special case of an inlined call of
+ // a function body with a single return statement,
+ // which turns into a single assignment plus a goto.
+ //
+ // For example code like this:
+ //
+ // type T struct{ x int }
+ // func F(x int) *T { return &T{x} }
+ // var Global = F(400)
+ //
+ // turns into IR like this:
+ //
+ // INLCALL-init
+ // . AS2-init
+ // . . DCL # x.go:18:13
+ // . . . NAME-p.x Class:PAUTO Offset:0 InlFormal OnStack Used int tc(1) # x.go:14:9,x.go:18:13
+ // . AS2 Def tc(1) # x.go:18:13
+ // . AS2-Lhs
+ // . . NAME-p.x Class:PAUTO Offset:0 InlFormal OnStack Used int tc(1) # x.go:14:9,x.go:18:13
+ // . AS2-Rhs
+ // . . LITERAL-400 int tc(1) # x.go:18:14
+ // . INLMARK Index:1 # +x.go:18:13
+ // INLCALL PTR-*T tc(1) # x.go:18:13
+ // INLCALL-Body
+ // . BLOCK tc(1) # x.go:18:13
+ // . BLOCK-List
+ // . . DCL tc(1) # x.go:18:13
+ // . . . NAME-p.~R0 Class:PAUTO Offset:0 OnStack Used PTR-*T tc(1) # x.go:18:13
+ // . . AS2 tc(1) # x.go:18:13
+ // . . AS2-Lhs
+ // . . . NAME-p.~R0 Class:PAUTO Offset:0 OnStack Used PTR-*T tc(1) # x.go:18:13
+ // . . AS2-Rhs
+ // . . . INLINED RETURN ARGUMENT HERE
+ // . . GOTO p..i1 tc(1) # x.go:18:13
+ // . LABEL p..i1 # x.go:18:13
+ // INLCALL-ReturnVars
+ // . NAME-p.~R0 Class:PAUTO Offset:0 OnStack Used PTR-*T tc(1) # x.go:18:13
+ //
+ // In non-unified IR, the tree is slightly different:
+ // - if there are no arguments to the inlined function,
+ // the INLCALL-init omits the AS2.
+ // - the DCL inside BLOCK is on the AS2's init list,
+ // not its own statement in the top level of the BLOCK.
+ //
+ // If the init values are side-effect-free and each either only
+ // appears once in the function body or is safely repeatable,
+ // then we inline the value expressions into the return argument
+ // and then call StaticAssign to handle that copy.
+ //
+ // This handles simple cases like
+ //
+ // var myError = errors.New("mine")
+ //
+ // where errors.New is
+ //
+ // func New(text string) error {
+ // return &errorString{text}
+ // }
+ //
+ // We could make things more sophisticated but this kind of initializer
+ // is the most important case for us to get right.
+
+ init := call.Init()
+ var as2init *ir.AssignListStmt
+ if len(init) == 2 && init[0].Op() == ir.OAS2 && init[1].Op() == ir.OINLMARK {
+ as2init = init[0].(*ir.AssignListStmt)
+ } else if len(init) == 1 && init[0].Op() == ir.OINLMARK {
+ as2init = new(ir.AssignListStmt)
+ } else {
+ return false
+ }
+ if len(call.Body) != 2 || call.Body[0].Op() != ir.OBLOCK || call.Body[1].Op() != ir.OLABEL {
+ return false
+ }
+ label := call.Body[1].(*ir.LabelStmt).Label
+ block := call.Body[0].(*ir.BlockStmt)
+ list := block.List
+ var dcl *ir.Decl
+ if len(list) == 3 && list[0].Op() == ir.ODCL {
+ dcl = list[0].(*ir.Decl)
+ list = list[1:]
+ }
+ if len(list) != 2 ||
+ list[0].Op() != ir.OAS2 ||
+ list[1].Op() != ir.OGOTO ||
+ list[1].(*ir.BranchStmt).Label != label {
+ return false
+ }
+ as2body := list[0].(*ir.AssignListStmt)
+ if dcl == nil {
+ ainit := as2body.Init()
+ if len(ainit) != 1 || ainit[0].Op() != ir.ODCL {
+ return false
+ }
+ dcl = ainit[0].(*ir.Decl)
+ }
+ if len(as2body.Lhs) != 1 || as2body.Lhs[0] != dcl.X {
+ return false
+ }
+
+ // Can't remove the parameter variables if an address is taken.
+ for _, v := range as2init.Lhs {
+ if v.(*ir.Name).Addrtaken() {
+ return false
+ }
+ }
+ // Can't move the computation of the args if they have side effects.
+ for _, r := range as2init.Rhs {
+ if AnySideEffects(r) {
+ return false
+ }
+ }
+
+ // Can only substitute arg for param if param is used
+ // at most once or is repeatable.
+ count := make(map[*ir.Name]int)
+ for _, x := range as2init.Lhs {
+ count[x.(*ir.Name)] = 0
+ }
+
+ hasNonTrivialClosure := false
+ ir.Visit(as2body.Rhs[0], func(n ir.Node) {
+ if name, ok := n.(*ir.Name); ok {
+ if c, ok := count[name]; ok {
+ count[name] = c + 1
+ }
+ }
+ if clo, ok := n.(*ir.ClosureExpr); ok {
+ hasNonTrivialClosure = hasNonTrivialClosure || !ir.IsTrivialClosure(clo)
+ }
+ })
+
+ // If there's a non-trivial closure, it has captured the param,
+ // so we can't substitute arg for param.
+ if hasNonTrivialClosure {
+ return false
+ }
+
+ for name, c := range count {
+ if c > 1 {
+ // Check whether corresponding initializer can be repeated.
+ // Something like 1 can be; make(chan int) or &T{} cannot,
+ // because they need to evaluate to the same result in each use.
+ for i, n := range as2init.Lhs {
+ if n == name && !canRepeat(as2init.Rhs[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ // Possible static init.
+ // Build tree with args substituted for params and try it.
+ args := make(map[*ir.Name]ir.Node)
+ for i, v := range as2init.Lhs {
+ if ir.IsBlank(v) {
+ continue
+ }
+ args[v.(*ir.Name)] = as2init.Rhs[i]
+ }
+ r, ok := subst(as2body.Rhs[0], args)
+ if !ok {
+ return false
+ }
+ ok = s.StaticAssign(l, loff, r, typ)
+
+ if ok && base.Flag.Percent != 0 {
+ ir.Dump("static inlined-LEFT", l)
+ ir.Dump("static inlined-ORIG", call)
+ ir.Dump("static inlined-RIGHT", r)
+ }
+ return ok
+}
+
+// from here down is the walk analysis
+// of composite literals.
+// most of the work is to generate
+// data statements for the constant
+// part of the composite literal.
+
+var statuniqgen int // name generator for static temps
+
+// StaticName returns a name backed by a (writable) static data symbol.
+// Use readonlystaticname for read-only node.
+func StaticName(t *types.Type) *ir.Name {
+ // Don't use LookupNum; it interns the resulting string, but these are all unique.
+ sym := typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen))
+ statuniqgen++
+
+ n := ir.NewNameAt(base.Pos, sym, t)
+ sym.Def = n
+
+ n.Class = ir.PEXTERN
+ typecheck.Target.Externs = append(typecheck.Target.Externs, n)
+
+ n.Linksym().Set(obj.AttrStatic, true)
+ return n
+}
+
+// StaticLoc returns the static address of n, if n has one, or else nil.
+func StaticLoc(n ir.Node) (name *ir.Name, offset int64, ok bool) {
+ if n == nil {
+ return nil, 0, false
+ }
+
+ switch n.Op() {
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ return n, 0, true
+
+ case ir.OMETHEXPR:
+ n := n.(*ir.SelectorExpr)
+ return StaticLoc(n.FuncName())
+
+ case ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ if name, offset, ok = StaticLoc(n.X); !ok {
+ break
+ }
+ offset += n.Offset()
+ return name, offset, true
+
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ if n.X.Type().IsSlice() {
+ break
+ }
+ if name, offset, ok = StaticLoc(n.X); !ok {
+ break
+ }
+ l := getlit(n.Index)
+ if l < 0 {
+ break
+ }
+
+ // Check for overflow.
+ if n.Type().Size() != 0 && types.MaxWidth/n.Type().Size() <= int64(l) {
+ break
+ }
+ offset += int64(l) * n.Type().Size()
+ return name, offset, true
+ }
+
+ return nil, 0, false
+}
+
+func isSideEffect(n ir.Node) bool {
+ switch n.Op() {
+ // Assume side effects unless we know otherwise.
+ default:
+ return true
+
+ // No side effects here (arguments are checked separately).
+ case ir.ONAME,
+ ir.ONONAME,
+ ir.OTYPE,
+ ir.OLITERAL,
+ ir.ONIL,
+ ir.OADD,
+ ir.OSUB,
+ ir.OOR,
+ ir.OXOR,
+ ir.OADDSTR,
+ ir.OADDR,
+ ir.OANDAND,
+ ir.OBYTES2STR,
+ ir.ORUNES2STR,
+ ir.OSTR2BYTES,
+ ir.OSTR2RUNES,
+ ir.OCAP,
+ ir.OCOMPLIT,
+ ir.OMAPLIT,
+ ir.OSTRUCTLIT,
+ ir.OARRAYLIT,
+ ir.OSLICELIT,
+ ir.OPTRLIT,
+ ir.OCONV,
+ ir.OCONVIFACE,
+ ir.OCONVNOP,
+ ir.ODOT,
+ ir.OEQ,
+ ir.ONE,
+ ir.OLT,
+ ir.OLE,
+ ir.OGT,
+ ir.OGE,
+ ir.OKEY,
+ ir.OSTRUCTKEY,
+ ir.OLEN,
+ ir.OMUL,
+ ir.OLSH,
+ ir.ORSH,
+ ir.OAND,
+ ir.OANDNOT,
+ ir.ONEW,
+ ir.ONOT,
+ ir.OBITNOT,
+ ir.OPLUS,
+ ir.ONEG,
+ ir.OOROR,
+ ir.OPAREN,
+ ir.ORUNESTR,
+ ir.OREAL,
+ ir.OIMAG,
+ ir.OCOMPLEX:
+ return false
+
+ // Only possible side effect is division by zero.
+ case ir.ODIV, ir.OMOD:
+ n := n.(*ir.BinaryExpr)
+ if n.Y.Op() != ir.OLITERAL || constant.Sign(n.Y.Val()) == 0 {
+ return true
+ }
+
+ // Only possible side effect is panic on invalid size,
+ // but many makechan and makemap use size zero, which is definitely OK.
+ case ir.OMAKECHAN, ir.OMAKEMAP:
+ n := n.(*ir.MakeExpr)
+ if !ir.IsConst(n.Len, constant.Int) || constant.Sign(n.Len.Val()) != 0 {
+ return true
+ }
+
+ // Only possible side effect is panic on invalid size.
+ // TODO(rsc): Merge with previous case (probably breaks toolstash -cmp).
+ case ir.OMAKESLICE, ir.OMAKESLICECOPY:
+ return true
+ }
+ return false
+}
+
+// AnySideEffects reports whether n contains any operations that could have observable side effects.
+func AnySideEffects(n ir.Node) bool {
+ return ir.Any(n, isSideEffect)
+}
+
+// mayModifyPkgVar reports whether expression n may modify any
+// package-scope variables declared within the current package.
+func mayModifyPkgVar(n ir.Node) bool {
+ // safeLHS reports whether the assigned-to variable lhs is either a
+ // local variable or a global from another package.
+ safeLHS := func(lhs ir.Node) bool {
+ v, ok := ir.OuterValue(lhs).(*ir.Name)
+ return ok && v.Op() == ir.ONAME && !(v.Class == ir.PEXTERN && v.Sym().Pkg == types.LocalPkg)
+ }
+
+ return ir.Any(n, func(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OCALLFUNC, ir.OCALLINTER:
+ return !ir.IsFuncPCIntrinsic(n.(*ir.CallExpr))
+
+ case ir.OAPPEND, ir.OCLEAR, ir.OCOPY:
+ return true // could mutate a global array
+
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ if !safeLHS(n.X) {
+ return true
+ }
+
+ case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ n := n.(*ir.AssignListStmt)
+ for _, lhs := range n.Lhs {
+ if !safeLHS(lhs) {
+ return true
+ }
+ }
+ }
+
+ return false
+ })
+}
+
+// canRepeat reports whether executing n multiple times has the same effect as
+// assigning n to a single variable and using that variable multiple times.
+func canRepeat(n ir.Node) bool {
+ bad := func(n ir.Node) bool {
+ if isSideEffect(n) {
+ return true
+ }
+ switch n.Op() {
+ case ir.OMAKECHAN,
+ ir.OMAKEMAP,
+ ir.OMAKESLICE,
+ ir.OMAKESLICECOPY,
+ ir.OMAPLIT,
+ ir.ONEW,
+ ir.OPTRLIT,
+ ir.OSLICELIT,
+ ir.OSTR2BYTES,
+ ir.OSTR2RUNES:
+ return true
+ }
+ return false
+ }
+ return !ir.Any(n, bad)
+}
+
+func getlit(lit ir.Node) int {
+ if ir.IsSmallIntConst(lit) {
+ return int(ir.Int64Val(lit))
+ }
+ return -1
+}
+
+func isvaluelit(n ir.Node) bool {
+ return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT
+}
+
+func subst(n ir.Node, m map[*ir.Name]ir.Node) (ir.Node, bool) {
+ valid := true
+ var edit func(ir.Node) ir.Node
+ edit = func(x ir.Node) ir.Node {
+ switch x.Op() {
+ case ir.ONAME:
+ x := x.(*ir.Name)
+ if v, ok := m[x]; ok {
+ return ir.DeepCopy(v.Pos(), v)
+ }
+ return x
+ case ir.ONONAME, ir.OLITERAL, ir.ONIL, ir.OTYPE:
+ return x
+ }
+ x = ir.Copy(x)
+ ir.EditChildrenWithHidden(x, edit)
+
+ // TODO: handle more operations, see details discussion in go.dev/cl/466277.
+ switch x.Op() {
+ case ir.OCONV:
+ x := x.(*ir.ConvExpr)
+ if x.X.Op() == ir.OLITERAL {
+ if x, ok := truncate(x.X, x.Type()); ok {
+ return x
+ }
+ valid = false
+ return x
+ }
+ case ir.OADDSTR:
+ return addStr(x.(*ir.AddStringExpr))
+ }
+ return x
+ }
+ n = edit(n)
+ return n, valid
+}
+
+// truncate returns the result of force converting c to type t,
+// truncating its value as needed, like a conversion of a variable.
+// If the conversion is too difficult, truncate returns nil, false.
+func truncate(c ir.Node, t *types.Type) (ir.Node, bool) {
+ ct := c.Type()
+ cv := c.Val()
+ if ct.Kind() != t.Kind() {
+ switch {
+ default:
+ // Note: float -> float/integer and complex -> complex are valid but subtle.
+ // For example a float32(float64 1e300) evaluates to +Inf at runtime
+ // and the compiler doesn't have any concept of +Inf, so that would
+ // have to be left for runtime code evaluation.
+ // For now
+ return nil, false
+
+ case ct.IsInteger() && t.IsInteger():
+ // truncate or sign extend
+ bits := t.Size() * 8
+ cv = constant.BinaryOp(cv, token.AND, constant.MakeUint64(1<<bits-1))
+ if t.IsSigned() && constant.Compare(cv, token.GEQ, constant.MakeUint64(1<<(bits-1))) {
+ cv = constant.BinaryOp(cv, token.OR, constant.MakeInt64(-1<<(bits-1)))
+ }
+ }
+ }
+ c = ir.NewConstExpr(cv, c)
+ c.SetType(t)
+ return c, true
+}
+
+func addStr(n *ir.AddStringExpr) ir.Node {
+ // Merge adjacent constants in the argument list.
+ s := n.List
+ need := 0
+ for i := 0; i < len(s); i++ {
+ if i == 0 || !ir.IsConst(s[i-1], constant.String) || !ir.IsConst(s[i], constant.String) {
+ // Can't merge s[i] into s[i-1]; need a slot in the list.
+ need++
+ }
+ }
+ if need == len(s) {
+ return n
+ }
+ if need == 1 {
+ var strs []string
+ for _, c := range s {
+ strs = append(strs, ir.StringVal(c))
+ }
+ return ir.NewConstExpr(constant.MakeString(strings.Join(strs, "")), n)
+ }
+ newList := make([]ir.Node, 0, need)
+ for i := 0; i < len(s); i++ {
+ if ir.IsConst(s[i], constant.String) && i+1 < len(s) && ir.IsConst(s[i+1], constant.String) {
+ // merge from i up to but not including i2
+ var strs []string
+ i2 := i
+ for i2 < len(s) && ir.IsConst(s[i2], constant.String) {
+ strs = append(strs, ir.StringVal(s[i2]))
+ i2++
+ }
+
+ newList = append(newList, ir.NewConstExpr(constant.MakeString(strings.Join(strs, "")), s[i]))
+ i = i2 - 1
+ } else {
+ newList = append(newList, s[i])
+ }
+ }
+
+ nn := ir.Copy(n).(*ir.AddStringExpr)
+ nn.List = newList
+ return nn
+}
+
+const wrapGlobalMapInitSizeThreshold = 20
+
+// tryWrapGlobalInit returns a new outlined function to contain global
+// initializer statement n, if possible and worthwhile. Otherwise, it
+// returns nil.
+//
+// Currently, it outlines map assignment statements with large,
+// side-effect-free RHS expressions.
+func tryWrapGlobalInit(n ir.Node) *ir.Func {
+ // Look for "X = ..." where X has map type.
+ // FIXME: might also be worth trying to look for cases where
+ // the LHS is of interface type but RHS is map type.
+ if n.Op() != ir.OAS {
+ return nil
+ }
+ as := n.(*ir.AssignStmt)
+ if ir.IsBlank(as.X) || as.X.Op() != ir.ONAME {
+ return nil
+ }
+ nm := as.X.(*ir.Name)
+ if !nm.Type().IsMap() {
+ return nil
+ }
+
+ // Determine size of RHS.
+ rsiz := 0
+ ir.Any(as.Y, func(n ir.Node) bool {
+ rsiz++
+ return false
+ })
+ if base.Debug.WrapGlobalMapDbg > 0 {
+ fmt.Fprintf(os.Stderr, "=-= mapassign %s %v rhs size %d\n",
+ base.Ctxt.Pkgpath, n, rsiz)
+ }
+
+ // Reject smaller candidates if not in stress mode.
+ if rsiz < wrapGlobalMapInitSizeThreshold && base.Debug.WrapGlobalMapCtl != 2 {
+ if base.Debug.WrapGlobalMapDbg > 1 {
+ fmt.Fprintf(os.Stderr, "=-= skipping %v size too small at %d\n",
+ nm, rsiz)
+ }
+ return nil
+ }
+
+ // Reject right hand sides with side effects.
+ if AnySideEffects(as.Y) {
+ if base.Debug.WrapGlobalMapDbg > 0 {
+ fmt.Fprintf(os.Stderr, "=-= rejected %v due to side effects\n", nm)
+ }
+ return nil
+ }
+
+ if base.Debug.WrapGlobalMapDbg > 1 {
+ fmt.Fprintf(os.Stderr, "=-= committed for: %+v\n", n)
+ }
+
+ // Create a new function that will (eventually) have this form:
+ //
+ // func map.init.%d() {
+ // globmapvar = <map initialization>
+ // }
+ //
+ // Note: cmd/link expects the function name to contain "map.init".
+ minitsym := typecheck.LookupNum("map.init.", mapinitgen)
+ mapinitgen++
+
+ fn := ir.NewFunc(n.Pos(), n.Pos(), minitsym, types.NewSignature(nil, nil, nil))
+ fn.SetInlinabilityChecked(true) // suppress inlining (which would defeat the point)
+ typecheck.DeclFunc(fn)
+ if base.Debug.WrapGlobalMapDbg > 0 {
+ fmt.Fprintf(os.Stderr, "=-= generated func is %v\n", fn)
+ }
+
+ // NB: we're relying on this phase being run before inlining;
+ // if for some reason we need to move it after inlining, we'll
+ // need code here that relocates or duplicates inline temps.
+
+ // Insert assignment into function body; mark body finished.
+ fn.Body = []ir.Node{as}
+ typecheck.FinishFuncBody()
+
+ if base.Debug.WrapGlobalMapDbg > 1 {
+ fmt.Fprintf(os.Stderr, "=-= mapvar is %v\n", nm)
+ fmt.Fprintf(os.Stderr, "=-= newfunc is %+v\n", fn)
+ }
+
+ recordFuncForVar(nm, fn)
+
+ return fn
+}
+
+// mapinitgen is a counter used to uniquify compiler-generated
+// map init functions.
+var mapinitgen int
+
+// AddKeepRelocations adds a dummy "R_KEEP" relocation from each
+// global map variable V to its associated outlined init function.
+// These relocation ensure that if the map var itself is determined to
+// be reachable at link time, we also mark the init function as
+// reachable.
+func AddKeepRelocations() {
+ if varToMapInit == nil {
+ return
+ }
+ for k, v := range varToMapInit {
+ // Add R_KEEP relocation from map to init function.
+ fs := v.Linksym()
+ if fs == nil {
+ base.Fatalf("bad: func %v has no linksym", v)
+ }
+ vs := k.Linksym()
+ if vs == nil {
+ base.Fatalf("bad: mapvar %v has no linksym", k)
+ }
+ r := obj.Addrel(vs)
+ r.Sym = fs
+ r.Type = objabi.R_KEEP
+ if base.Debug.WrapGlobalMapDbg > 1 {
+ fmt.Fprintf(os.Stderr, "=-= add R_KEEP relo from %s to %s\n",
+ vs.Name, fs.Name)
+ }
+ }
+ varToMapInit = nil
+}
+
+// OutlineMapInits replaces global map initializers with outlined
+// calls to separate "map init" functions (where possible and
+// profitable), to facilitate better dead-code elimination by the
+// linker.
+func OutlineMapInits(fn *ir.Func) {
+ if base.Debug.WrapGlobalMapCtl == 1 {
+ return
+ }
+
+ outlined := 0
+ for i, stmt := range fn.Body {
+ // Attempt to outline stmt. If successful, replace it with a call
+ // to the returned wrapper function.
+ if wrapperFn := tryWrapGlobalInit(stmt); wrapperFn != nil {
+ ir.WithFunc(fn, func() {
+ fn.Body[i] = typecheck.Call(stmt.Pos(), wrapperFn.Nname, nil, false)
+ })
+ outlined++
+ }
+ }
+
+ if base.Debug.WrapGlobalMapDbg > 1 {
+ fmt.Fprintf(os.Stderr, "=-= outlined %v map initializations\n", outlined)
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/branches.go b/src/cmd/compile/internal/syntax/branches.go
new file mode 100644
index 0000000..3d7ffed
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/branches.go
@@ -0,0 +1,339 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import "fmt"
+
+// checkBranches checks correct use of labels and branch
+// statements (break, continue, fallthrough, goto) in a function body.
+// It catches:
+// - misplaced breaks, continues, and fallthroughs
+// - bad labeled breaks and continues
+// - invalid, unused, duplicate, and missing labels
+// - gotos jumping over variable declarations and into blocks
+func checkBranches(body *BlockStmt, errh ErrorHandler) {
+ if body == nil {
+ return
+ }
+
+ // scope of all labels in this body
+ ls := &labelScope{errh: errh}
+ fwdGotos := ls.blockBranches(nil, targets{}, nil, body.Pos(), body.List)
+
+ // If there are any forward gotos left, no matching label was
+ // found for them. Either those labels were never defined, or
+ // they are inside blocks and not reachable from the gotos.
+ for _, fwd := range fwdGotos {
+ name := fwd.Label.Value
+ if l := ls.labels[name]; l != nil {
+ l.used = true // avoid "defined and not used" error
+ ls.err(fwd.Label.Pos(), "goto %s jumps into block starting at %s", name, l.parent.start)
+ } else {
+ ls.err(fwd.Label.Pos(), "label %s not defined", name)
+ }
+ }
+
+ // spec: "It is illegal to define a label that is never used."
+ for _, l := range ls.labels {
+ if !l.used {
+ l := l.lstmt.Label
+ ls.err(l.Pos(), "label %s defined and not used", l.Value)
+ }
+ }
+}
+
+type labelScope struct {
+ errh ErrorHandler
+ labels map[string]*label // all label declarations inside the function; allocated lazily
+}
+
+type label struct {
+ parent *block // block containing this label declaration
+ lstmt *LabeledStmt // statement declaring the label
+ used bool // whether the label is used or not
+}
+
+type block struct {
+ parent *block // immediately enclosing block, or nil
+ start Pos // start of block
+ lstmt *LabeledStmt // labeled statement associated with this block, or nil
+}
+
+func (ls *labelScope) err(pos Pos, format string, args ...interface{}) {
+ ls.errh(Error{pos, fmt.Sprintf(format, args...)})
+}
+
+// declare declares the label introduced by s in block b and returns
+// the new label. If the label was already declared, declare reports
+// and error and the existing label is returned instead.
+func (ls *labelScope) declare(b *block, s *LabeledStmt) *label {
+ name := s.Label.Value
+ labels := ls.labels
+ if labels == nil {
+ labels = make(map[string]*label)
+ ls.labels = labels
+ } else if alt := labels[name]; alt != nil {
+ ls.err(s.Label.Pos(), "label %s already defined at %s", name, alt.lstmt.Label.Pos().String())
+ return alt
+ }
+ l := &label{b, s, false}
+ labels[name] = l
+ return l
+}
+
+// gotoTarget returns the labeled statement matching the given name and
+// declared in block b or any of its enclosing blocks. The result is nil
+// if the label is not defined, or doesn't match a valid labeled statement.
+func (ls *labelScope) gotoTarget(b *block, name string) *LabeledStmt {
+ if l := ls.labels[name]; l != nil {
+ l.used = true // even if it's not a valid target
+ for ; b != nil; b = b.parent {
+ if l.parent == b {
+ return l.lstmt
+ }
+ }
+ }
+ return nil
+}
+
+var invalid = new(LabeledStmt) // singleton to signal invalid enclosing target
+
+// enclosingTarget returns the innermost enclosing labeled statement matching
+// the given name. The result is nil if the label is not defined, and invalid
+// if the label is defined but doesn't label a valid labeled statement.
+func (ls *labelScope) enclosingTarget(b *block, name string) *LabeledStmt {
+ if l := ls.labels[name]; l != nil {
+ l.used = true // even if it's not a valid target (see e.g., test/fixedbugs/bug136.go)
+ for ; b != nil; b = b.parent {
+ if l.lstmt == b.lstmt {
+ return l.lstmt
+ }
+ }
+ return invalid
+ }
+ return nil
+}
+
+// targets describes the target statements within which break
+// or continue statements are valid.
+type targets struct {
+ breaks Stmt // *ForStmt, *SwitchStmt, *SelectStmt, or nil
+ continues *ForStmt // or nil
+ caseIndex int // case index of immediately enclosing switch statement, or < 0
+}
+
+// blockBranches processes a block's body starting at start and returns the
+// list of unresolved (forward) gotos. parent is the immediately enclosing
+// block (or nil), ctxt provides information about the enclosing statements,
+// and lstmt is the labeled statement associated with this block, or nil.
+func (ls *labelScope) blockBranches(parent *block, ctxt targets, lstmt *LabeledStmt, start Pos, body []Stmt) []*BranchStmt {
+ b := &block{parent: parent, start: start, lstmt: lstmt}
+
+ var varPos Pos
+ var varName Expr
+ var fwdGotos, badGotos []*BranchStmt
+
+ recordVarDecl := func(pos Pos, name Expr) {
+ varPos = pos
+ varName = name
+ // Any existing forward goto jumping over the variable
+ // declaration is invalid. The goto may still jump out
+ // of the block and be ok, but we don't know that yet.
+ // Remember all forward gotos as potential bad gotos.
+ badGotos = append(badGotos[:0], fwdGotos...)
+ }
+
+ jumpsOverVarDecl := func(fwd *BranchStmt) bool {
+ if varPos.IsKnown() {
+ for _, bad := range badGotos {
+ if fwd == bad {
+ return true
+ }
+ }
+ }
+ return false
+ }
+
+ innerBlock := func(ctxt targets, start Pos, body []Stmt) {
+ // Unresolved forward gotos from the inner block
+ // become forward gotos for the current block.
+ fwdGotos = append(fwdGotos, ls.blockBranches(b, ctxt, lstmt, start, body)...)
+ }
+
+ // A fallthrough statement counts as last statement in a statement
+ // list even if there are trailing empty statements; remove them.
+ stmtList := trimTrailingEmptyStmts(body)
+ for stmtIndex, stmt := range stmtList {
+ lstmt = nil
+ L:
+ switch s := stmt.(type) {
+ case *DeclStmt:
+ for _, d := range s.DeclList {
+ if v, ok := d.(*VarDecl); ok {
+ recordVarDecl(v.Pos(), v.NameList[0])
+ break // the first VarDecl will do
+ }
+ }
+
+ case *LabeledStmt:
+ // declare non-blank label
+ if name := s.Label.Value; name != "_" {
+ l := ls.declare(b, s)
+ // resolve matching forward gotos
+ i := 0
+ for _, fwd := range fwdGotos {
+ if fwd.Label.Value == name {
+ fwd.Target = s
+ l.used = true
+ if jumpsOverVarDecl(fwd) {
+ ls.err(
+ fwd.Label.Pos(),
+ "goto %s jumps over declaration of %s at %s",
+ name, String(varName), varPos,
+ )
+ }
+ } else {
+ // no match - keep forward goto
+ fwdGotos[i] = fwd
+ i++
+ }
+ }
+ fwdGotos = fwdGotos[:i]
+ lstmt = s
+ }
+ // process labeled statement
+ stmt = s.Stmt
+ goto L
+
+ case *BranchStmt:
+ // unlabeled branch statement
+ if s.Label == nil {
+ switch s.Tok {
+ case _Break:
+ if t := ctxt.breaks; t != nil {
+ s.Target = t
+ } else {
+ ls.err(s.Pos(), "break is not in a loop, switch, or select")
+ }
+ case _Continue:
+ if t := ctxt.continues; t != nil {
+ s.Target = t
+ } else {
+ ls.err(s.Pos(), "continue is not in a loop")
+ }
+ case _Fallthrough:
+ msg := "fallthrough statement out of place"
+ if t, _ := ctxt.breaks.(*SwitchStmt); t != nil {
+ if _, ok := t.Tag.(*TypeSwitchGuard); ok {
+ msg = "cannot fallthrough in type switch"
+ } else if ctxt.caseIndex < 0 || stmtIndex+1 < len(stmtList) {
+ // fallthrough nested in a block or not the last statement
+ // use msg as is
+ } else if ctxt.caseIndex+1 == len(t.Body) {
+ msg = "cannot fallthrough final case in switch"
+ } else {
+ break // fallthrough ok
+ }
+ }
+ ls.err(s.Pos(), msg)
+ case _Goto:
+ fallthrough // should always have a label
+ default:
+ panic("invalid BranchStmt")
+ }
+ break
+ }
+
+ // labeled branch statement
+ name := s.Label.Value
+ switch s.Tok {
+ case _Break:
+ // spec: "If there is a label, it must be that of an enclosing
+ // "for", "switch", or "select" statement, and that is the one
+ // whose execution terminates."
+ if t := ls.enclosingTarget(b, name); t != nil {
+ switch t := t.Stmt.(type) {
+ case *SwitchStmt, *SelectStmt, *ForStmt:
+ s.Target = t
+ default:
+ ls.err(s.Label.Pos(), "invalid break label %s", name)
+ }
+ } else {
+ ls.err(s.Label.Pos(), "break label not defined: %s", name)
+ }
+
+ case _Continue:
+ // spec: "If there is a label, it must be that of an enclosing
+ // "for" statement, and that is the one whose execution advances."
+ if t := ls.enclosingTarget(b, name); t != nil {
+ if t, ok := t.Stmt.(*ForStmt); ok {
+ s.Target = t
+ } else {
+ ls.err(s.Label.Pos(), "invalid continue label %s", name)
+ }
+ } else {
+ ls.err(s.Label.Pos(), "continue label not defined: %s", name)
+ }
+
+ case _Goto:
+ if t := ls.gotoTarget(b, name); t != nil {
+ s.Target = t
+ } else {
+ // label may be declared later - add goto to forward gotos
+ fwdGotos = append(fwdGotos, s)
+ }
+
+ case _Fallthrough:
+ fallthrough // should never have a label
+ default:
+ panic("invalid BranchStmt")
+ }
+
+ case *AssignStmt:
+ if s.Op == Def {
+ recordVarDecl(s.Pos(), s.Lhs)
+ }
+
+ case *BlockStmt:
+ inner := targets{ctxt.breaks, ctxt.continues, -1}
+ innerBlock(inner, s.Pos(), s.List)
+
+ case *IfStmt:
+ inner := targets{ctxt.breaks, ctxt.continues, -1}
+ innerBlock(inner, s.Then.Pos(), s.Then.List)
+ if s.Else != nil {
+ innerBlock(inner, s.Else.Pos(), []Stmt{s.Else})
+ }
+
+ case *ForStmt:
+ inner := targets{s, s, -1}
+ innerBlock(inner, s.Body.Pos(), s.Body.List)
+
+ case *SwitchStmt:
+ inner := targets{s, ctxt.continues, -1}
+ for i, cc := range s.Body {
+ inner.caseIndex = i
+ innerBlock(inner, cc.Pos(), cc.Body)
+ }
+
+ case *SelectStmt:
+ inner := targets{s, ctxt.continues, -1}
+ for _, cc := range s.Body {
+ innerBlock(inner, cc.Pos(), cc.Body)
+ }
+ }
+ }
+
+ return fwdGotos
+}
+
+func trimTrailingEmptyStmts(list []Stmt) []Stmt {
+ for i := len(list); i > 0; i-- {
+ if _, ok := list[i-1].(*EmptyStmt); !ok {
+ return list[:i]
+ }
+ }
+ return nil
+}
diff --git a/src/cmd/compile/internal/syntax/dumper.go b/src/cmd/compile/internal/syntax/dumper.go
new file mode 100644
index 0000000..d524788
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/dumper.go
@@ -0,0 +1,212 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements printing of syntax tree structures.
+
+package syntax
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Fdump dumps the structure of the syntax tree rooted at n to w.
+// It is intended for debugging purposes; no specific output format
+// is guaranteed.
+func Fdump(w io.Writer, n Node) (err error) {
+ p := dumper{
+ output: w,
+ ptrmap: make(map[Node]int),
+ last: '\n', // force printing of line number on first line
+ }
+
+ defer func() {
+ if e := recover(); e != nil {
+ err = e.(writeError).err // re-panics if it's not a writeError
+ }
+ }()
+
+ if n == nil {
+ p.printf("nil\n")
+ return
+ }
+ p.dump(reflect.ValueOf(n), n)
+ p.printf("\n")
+
+ return
+}
+
+type dumper struct {
+ output io.Writer
+ ptrmap map[Node]int // node -> dump line number
+ indent int // current indentation level
+ last byte // last byte processed by Write
+ line int // current line number
+}
+
+var indentBytes = []byte(". ")
+
+func (p *dumper) Write(data []byte) (n int, err error) {
+ var m int
+ for i, b := range data {
+ // invariant: data[0:n] has been written
+ if b == '\n' {
+ m, err = p.output.Write(data[n : i+1])
+ n += m
+ if err != nil {
+ return
+ }
+ } else if p.last == '\n' {
+ p.line++
+ _, err = fmt.Fprintf(p.output, "%6d ", p.line)
+ if err != nil {
+ return
+ }
+ for j := p.indent; j > 0; j-- {
+ _, err = p.output.Write(indentBytes)
+ if err != nil {
+ return
+ }
+ }
+ }
+ p.last = b
+ }
+ if len(data) > n {
+ m, err = p.output.Write(data[n:])
+ n += m
+ }
+ return
+}
+
+// writeError wraps locally caught write errors so we can distinguish
+// them from genuine panics which we don't want to return as errors.
+type writeError struct {
+ err error
+}
+
+// printf is a convenience wrapper that takes care of print errors.
+func (p *dumper) printf(format string, args ...interface{}) {
+ if _, err := fmt.Fprintf(p, format, args...); err != nil {
+ panic(writeError{err})
+ }
+}
+
+// dump prints the contents of x.
+// If x is the reflect.Value of a struct s, where &s
+// implements Node, then &s should be passed for n -
+// this permits printing of the unexported span and
+// comments fields of the embedded isNode field by
+// calling the Span() and Comment() instead of using
+// reflection.
+func (p *dumper) dump(x reflect.Value, n Node) {
+ switch x.Kind() {
+ case reflect.Interface:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+ p.dump(x.Elem(), nil)
+
+ case reflect.Ptr:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+
+ // special cases for identifiers w/o attached comments (common case)
+ if x, ok := x.Interface().(*Name); ok {
+ p.printf("%s @ %v", x.Value, x.Pos())
+ return
+ }
+
+ p.printf("*")
+ // Fields may share type expressions, and declarations
+ // may share the same group - use ptrmap to keep track
+ // of nodes that have been printed already.
+ if ptr, ok := x.Interface().(Node); ok {
+ if line, exists := p.ptrmap[ptr]; exists {
+ p.printf("(Node @ %d)", line)
+ return
+ }
+ p.ptrmap[ptr] = p.line
+ n = ptr
+ }
+ p.dump(x.Elem(), n)
+
+ case reflect.Slice:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+ p.printf("%s (%d entries) {", x.Type(), x.Len())
+ if x.Len() > 0 {
+ p.indent++
+ p.printf("\n")
+ for i, n := 0, x.Len(); i < n; i++ {
+ p.printf("%d: ", i)
+ p.dump(x.Index(i), nil)
+ p.printf("\n")
+ }
+ p.indent--
+ }
+ p.printf("}")
+
+ case reflect.Struct:
+ typ := x.Type()
+
+ // if span, ok := x.Interface().(lexical.Span); ok {
+ // p.printf("%s", &span)
+ // return
+ // }
+
+ p.printf("%s {", typ)
+ p.indent++
+
+ first := true
+ if n != nil {
+ p.printf("\n")
+ first = false
+ // p.printf("Span: %s\n", n.Span())
+ // if c := *n.Comments(); c != nil {
+ // p.printf("Comments: ")
+ // p.dump(reflect.ValueOf(c), nil) // a Comment is not a Node
+ // p.printf("\n")
+ // }
+ }
+
+ for i, n := 0, typ.NumField(); i < n; i++ {
+ // Exclude non-exported fields because their
+ // values cannot be accessed via reflection.
+ if name := typ.Field(i).Name; isExported(name) {
+ if first {
+ p.printf("\n")
+ first = false
+ }
+ p.printf("%s: ", name)
+ p.dump(x.Field(i), nil)
+ p.printf("\n")
+ }
+ }
+
+ p.indent--
+ p.printf("}")
+
+ default:
+ switch x := x.Interface().(type) {
+ case string:
+ // print strings in quotes
+ p.printf("%q", x)
+ default:
+ p.printf("%v", x)
+ }
+ }
+}
+
+func isExported(name string) bool {
+ ch, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(ch)
+}
diff --git a/src/cmd/compile/internal/syntax/dumper_test.go b/src/cmd/compile/internal/syntax/dumper_test.go
new file mode 100644
index 0000000..1ba85cc
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/dumper_test.go
@@ -0,0 +1,21 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "testing"
+)
+
+func TestDump(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode")
+ }
+
+ ast, _ := ParseFile(*src_, func(err error) { t.Error(err) }, nil, CheckBranches)
+
+ if ast != nil {
+ Fdump(testOut(), ast)
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/error_test.go b/src/cmd/compile/internal/syntax/error_test.go
new file mode 100644
index 0000000..55ea634
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/error_test.go
@@ -0,0 +1,190 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a regression test harness for syntax errors.
+// The files in the testdata directory are parsed and the reported
+// errors are compared against the errors declared in those files.
+//
+// Errors are declared in place in the form of "error comments",
+// just before (or on the same line as) the offending token.
+//
+// Error comments must be of the form // ERROR rx or /* ERROR rx */
+// where rx is a regular expression that matches the reported error
+// message. The rx text comprises the comment text after "ERROR ",
+// with any white space around it stripped.
+//
+// If the line comment form is used, the reported error's line must
+// match the line of the error comment.
+//
+// If the regular comment form is used, the reported error's position
+// must match the position of the token immediately following the
+// error comment. Thus, /* ERROR ... */ comments should appear
+// immediately before the position where the error is reported.
+//
+// Currently, the test harness only supports one error comment per
+// token. If multiple error comments appear before a token, only
+// the last one is considered.
+
+package syntax
+
+import (
+ "flag"
+ "fmt"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+ "testing"
+)
+
+const testdata = "testdata" // directory containing test files
+
+var print = flag.Bool("print", false, "only print errors")
+
+// A position represents a source position in the current file.
+type position struct {
+ line, col uint
+}
+
+func (pos position) String() string {
+ return fmt.Sprintf("%d:%d", pos.line, pos.col)
+}
+
+func sortedPositions(m map[position]string) []position {
+ list := make([]position, len(m))
+ i := 0
+ for pos := range m {
+ list[i] = pos
+ i++
+ }
+ sort.Slice(list, func(i, j int) bool {
+ a, b := list[i], list[j]
+ return a.line < b.line || a.line == b.line && a.col < b.col
+ })
+ return list
+}
+
+// declaredErrors returns a map of source positions to error
+// patterns, extracted from error comments in the given file.
+// Error comments in the form of line comments use col = 0
+// in their position.
+func declaredErrors(t *testing.T, filename string) map[position]string {
+ f, err := os.Open(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+
+ declared := make(map[position]string)
+
+ var s scanner
+ var pattern string
+ s.init(f, func(line, col uint, msg string) {
+ // errors never start with '/' so they are automatically excluded here
+ switch {
+ case strings.HasPrefix(msg, "// ERROR "):
+ // we can't have another comment on the same line - just add it
+ declared[position{s.line, 0}] = strings.TrimSpace(msg[9:])
+ case strings.HasPrefix(msg, "/* ERROR "):
+ // we may have more comments before the next token - collect them
+ pattern = strings.TrimSpace(msg[9 : len(msg)-2])
+ }
+ }, comments)
+
+ // consume file
+ for {
+ s.next()
+ if pattern != "" {
+ declared[position{s.line, s.col}] = pattern
+ pattern = ""
+ }
+ if s.tok == _EOF {
+ break
+ }
+ }
+
+ return declared
+}
+
+func testSyntaxErrors(t *testing.T, filename string) {
+ declared := declaredErrors(t, filename)
+ if *print {
+ fmt.Println("Declared errors:")
+ for _, pos := range sortedPositions(declared) {
+ fmt.Printf("%s:%s: %s\n", filename, pos, declared[pos])
+ }
+
+ fmt.Println()
+ fmt.Println("Reported errors:")
+ }
+
+ f, err := os.Open(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+
+ ParseFile(filename, func(err error) {
+ e, ok := err.(Error)
+ if !ok {
+ return
+ }
+
+ if *print {
+ fmt.Println(err)
+ return
+ }
+
+ orig := position{e.Pos.Line(), e.Pos.Col()}
+ pos := orig
+ pattern, found := declared[pos]
+ if !found {
+ // try line comment (only line must match)
+ pos = position{e.Pos.Line(), 0}
+ pattern, found = declared[pos]
+ }
+ if found {
+ rx, err := regexp.Compile(pattern)
+ if err != nil {
+ t.Errorf("%s:%s: %v", filename, pos, err)
+ return
+ }
+ if match := rx.MatchString(e.Msg); !match {
+ t.Errorf("%s:%s: %q does not match %q", filename, pos, e.Msg, pattern)
+ return
+ }
+ // we have a match - eliminate this error
+ delete(declared, pos)
+ } else {
+ t.Errorf("%s:%s: unexpected error: %s", filename, orig, e.Msg)
+ }
+ }, nil, CheckBranches)
+
+ if *print {
+ fmt.Println()
+ return // we're done
+ }
+
+ // report expected but not reported errors
+ for pos, pattern := range declared {
+ t.Errorf("%s:%s: missing error: %s", filename, pos, pattern)
+ }
+}
+
+func TestSyntaxErrors(t *testing.T) {
+ testenv.MustHaveGoBuild(t) // we need access to source (testdata)
+
+ list, err := os.ReadDir(testdata)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, fi := range list {
+ name := fi.Name()
+ if !fi.IsDir() && !strings.HasPrefix(name, ".") {
+ testSyntaxErrors(t, filepath.Join(testdata, name))
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/nodes.go b/src/cmd/compile/internal/syntax/nodes.go
new file mode 100644
index 0000000..de277fc
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/nodes.go
@@ -0,0 +1,487 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+// ----------------------------------------------------------------------------
+// Nodes
+
+type Node interface {
+ // Pos() returns the position associated with the node as follows:
+ // 1) The position of a node representing a terminal syntax production
+ // (Name, BasicLit, etc.) is the position of the respective production
+ // in the source.
+ // 2) The position of a node representing a non-terminal production
+ // (IndexExpr, IfStmt, etc.) is the position of a token uniquely
+ // associated with that production; usually the left-most one
+ // ('[' for IndexExpr, 'if' for IfStmt, etc.)
+ Pos() Pos
+ SetPos(Pos)
+ aNode()
+}
+
+type node struct {
+ // commented out for now since not yet used
+ // doc *Comment // nil means no comment(s) attached
+ pos Pos
+}
+
+func (n *node) Pos() Pos { return n.pos }
+func (n *node) SetPos(pos Pos) { n.pos = pos }
+func (*node) aNode() {}
+
+// ----------------------------------------------------------------------------
+// Files
+
+// package PkgName; DeclList[0], DeclList[1], ...
+type File struct {
+ Pragma Pragma
+ PkgName *Name
+ DeclList []Decl
+ EOF Pos
+ GoVersion string
+ node
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+type (
+ Decl interface {
+ Node
+ aDecl()
+ }
+
+ // Path
+ // LocalPkgName Path
+ ImportDecl struct {
+ Group *Group // nil means not part of a group
+ Pragma Pragma
+ LocalPkgName *Name // including "."; nil means no rename present
+ Path *BasicLit // Path.Bad || Path.Kind == StringLit; nil means no path
+ decl
+ }
+
+ // NameList
+ // NameList = Values
+ // NameList Type = Values
+ ConstDecl struct {
+ Group *Group // nil means not part of a group
+ Pragma Pragma
+ NameList []*Name
+ Type Expr // nil means no type
+ Values Expr // nil means no values
+ decl
+ }
+
+ // Name Type
+ TypeDecl struct {
+ Group *Group // nil means not part of a group
+ Pragma Pragma
+ Name *Name
+ TParamList []*Field // nil means no type parameters
+ Alias bool
+ Type Expr
+ decl
+ }
+
+ // NameList Type
+ // NameList Type = Values
+ // NameList = Values
+ VarDecl struct {
+ Group *Group // nil means not part of a group
+ Pragma Pragma
+ NameList []*Name
+ Type Expr // nil means no type
+ Values Expr // nil means no values
+ decl
+ }
+
+ // func Name Type { Body }
+ // func Name Type
+ // func Receiver Name Type { Body }
+ // func Receiver Name Type
+ FuncDecl struct {
+ Pragma Pragma
+ Recv *Field // nil means regular function
+ Name *Name
+ TParamList []*Field // nil means no type parameters
+ Type *FuncType
+ Body *BlockStmt // nil means no body (forward declaration)
+ decl
+ }
+)
+
+type decl struct{ node }
+
+func (*decl) aDecl() {}
+
+// All declarations belonging to the same group point to the same Group node.
+type Group struct {
+ _ int // not empty so we are guaranteed different Group instances
+}
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+func NewName(pos Pos, value string) *Name {
+ n := new(Name)
+ n.pos = pos
+ n.Value = value
+ return n
+}
+
+type (
+ Expr interface {
+ Node
+ typeInfo
+ aExpr()
+ }
+
+ // Placeholder for an expression that failed to parse
+ // correctly and where we can't provide a better node.
+ BadExpr struct {
+ expr
+ }
+
+ // Value
+ Name struct {
+ Value string
+ expr
+ }
+
+ // Value
+ BasicLit struct {
+ Value string
+ Kind LitKind
+ Bad bool // true means the literal Value has syntax errors
+ expr
+ }
+
+ // Type { ElemList[0], ElemList[1], ... }
+ CompositeLit struct {
+ Type Expr // nil means no literal type
+ ElemList []Expr
+ NKeys int // number of elements with keys
+ Rbrace Pos
+ expr
+ }
+
+ // Key: Value
+ KeyValueExpr struct {
+ Key, Value Expr
+ expr
+ }
+
+ // func Type { Body }
+ FuncLit struct {
+ Type *FuncType
+ Body *BlockStmt
+ expr
+ }
+
+ // (X)
+ ParenExpr struct {
+ X Expr
+ expr
+ }
+
+ // X.Sel
+ SelectorExpr struct {
+ X Expr
+ Sel *Name
+ expr
+ }
+
+ // X[Index]
+ // X[T1, T2, ...] (with Ti = Index.(*ListExpr).ElemList[i])
+ IndexExpr struct {
+ X Expr
+ Index Expr
+ expr
+ }
+
+ // X[Index[0] : Index[1] : Index[2]]
+ SliceExpr struct {
+ X Expr
+ Index [3]Expr
+ // Full indicates whether this is a simple or full slice expression.
+ // In a valid AST, this is equivalent to Index[2] != nil.
+ // TODO(mdempsky): This is only needed to report the "3-index
+ // slice of string" error when Index[2] is missing.
+ Full bool
+ expr
+ }
+
+ // X.(Type)
+ AssertExpr struct {
+ X Expr
+ Type Expr
+ expr
+ }
+
+ // X.(type)
+ // Lhs := X.(type)
+ TypeSwitchGuard struct {
+ Lhs *Name // nil means no Lhs :=
+ X Expr // X.(type)
+ expr
+ }
+
+ Operation struct {
+ Op Operator
+ X, Y Expr // Y == nil means unary expression
+ expr
+ }
+
+ // Fun(ArgList[0], ArgList[1], ...)
+ CallExpr struct {
+ Fun Expr
+ ArgList []Expr // nil means no arguments
+ HasDots bool // last argument is followed by ...
+ expr
+ }
+
+ // ElemList[0], ElemList[1], ...
+ ListExpr struct {
+ ElemList []Expr
+ expr
+ }
+
+ // [Len]Elem
+ ArrayType struct {
+ // TODO(gri) consider using Name{"..."} instead of nil (permits attaching of comments)
+ Len Expr // nil means Len is ...
+ Elem Expr
+ expr
+ }
+
+ // []Elem
+ SliceType struct {
+ Elem Expr
+ expr
+ }
+
+ // ...Elem
+ DotsType struct {
+ Elem Expr
+ expr
+ }
+
+ // struct { FieldList[0] TagList[0]; FieldList[1] TagList[1]; ... }
+ StructType struct {
+ FieldList []*Field
+ TagList []*BasicLit // i >= len(TagList) || TagList[i] == nil means no tag for field i
+ expr
+ }
+
+ // Name Type
+ // Type
+ Field struct {
+ Name *Name // nil means anonymous field/parameter (structs/parameters), or embedded element (interfaces)
+ Type Expr // field names declared in a list share the same Type (identical pointers)
+ node
+ }
+
+ // interface { MethodList[0]; MethodList[1]; ... }
+ InterfaceType struct {
+ MethodList []*Field
+ expr
+ }
+
+ FuncType struct {
+ ParamList []*Field
+ ResultList []*Field
+ expr
+ }
+
+ // map[Key]Value
+ MapType struct {
+ Key, Value Expr
+ expr
+ }
+
+ // chan Elem
+ // <-chan Elem
+ // chan<- Elem
+ ChanType struct {
+ Dir ChanDir // 0 means no direction
+ Elem Expr
+ expr
+ }
+)
+
+type expr struct {
+ node
+ typeAndValue // After typechecking, contains the results of typechecking this expression.
+}
+
+func (*expr) aExpr() {}
+
+type ChanDir uint
+
+const (
+ _ ChanDir = iota
+ SendOnly
+ RecvOnly
+)
+
+// ----------------------------------------------------------------------------
+// Statements
+
+type (
+ Stmt interface {
+ Node
+ aStmt()
+ }
+
+ SimpleStmt interface {
+ Stmt
+ aSimpleStmt()
+ }
+
+ EmptyStmt struct {
+ simpleStmt
+ }
+
+ LabeledStmt struct {
+ Label *Name
+ Stmt Stmt
+ stmt
+ }
+
+ BlockStmt struct {
+ List []Stmt
+ Rbrace Pos
+ stmt
+ }
+
+ ExprStmt struct {
+ X Expr
+ simpleStmt
+ }
+
+ SendStmt struct {
+ Chan, Value Expr // Chan <- Value
+ simpleStmt
+ }
+
+ DeclStmt struct {
+ DeclList []Decl
+ stmt
+ }
+
+ AssignStmt struct {
+ Op Operator // 0 means no operation
+ Lhs, Rhs Expr // Rhs == nil means Lhs++ (Op == Add) or Lhs-- (Op == Sub)
+ simpleStmt
+ }
+
+ BranchStmt struct {
+ Tok token // Break, Continue, Fallthrough, or Goto
+ Label *Name
+ // Target is the continuation of the control flow after executing
+ // the branch; it is computed by the parser if CheckBranches is set.
+ // Target is a *LabeledStmt for gotos, and a *SwitchStmt, *SelectStmt,
+ // or *ForStmt for breaks and continues, depending on the context of
+ // the branch. Target is not set for fallthroughs.
+ Target Stmt
+ stmt
+ }
+
+ CallStmt struct {
+ Tok token // Go or Defer
+ Call Expr
+ DeferAt Expr // argument to runtime.deferprocat
+ stmt
+ }
+
+ ReturnStmt struct {
+ Results Expr // nil means no explicit return values
+ stmt
+ }
+
+ IfStmt struct {
+ Init SimpleStmt
+ Cond Expr
+ Then *BlockStmt
+ Else Stmt // either nil, *IfStmt, or *BlockStmt
+ stmt
+ }
+
+ ForStmt struct {
+ Init SimpleStmt // incl. *RangeClause
+ Cond Expr
+ Post SimpleStmt
+ Body *BlockStmt
+ stmt
+ }
+
+ SwitchStmt struct {
+ Init SimpleStmt
+ Tag Expr // incl. *TypeSwitchGuard
+ Body []*CaseClause
+ Rbrace Pos
+ stmt
+ }
+
+ SelectStmt struct {
+ Body []*CommClause
+ Rbrace Pos
+ stmt
+ }
+)
+
+type (
+ RangeClause struct {
+ Lhs Expr // nil means no Lhs = or Lhs :=
+ Def bool // means :=
+ X Expr // range X
+ simpleStmt
+ }
+
+ CaseClause struct {
+ Cases Expr // nil means default clause
+ Body []Stmt
+ Colon Pos
+ node
+ }
+
+ CommClause struct {
+ Comm SimpleStmt // send or receive stmt; nil means default clause
+ Body []Stmt
+ Colon Pos
+ node
+ }
+)
+
+type stmt struct{ node }
+
+func (stmt) aStmt() {}
+
+type simpleStmt struct {
+ stmt
+}
+
+func (simpleStmt) aSimpleStmt() {}
+
+// ----------------------------------------------------------------------------
+// Comments
+
+// TODO(gri) Consider renaming to CommentPos, CommentPlacement, etc.
+// Kind = Above doesn't make much sense.
+type CommentKind uint
+
+const (
+ Above CommentKind = iota
+ Below
+ Left
+ Right
+)
+
+type Comment struct {
+ Kind CommentKind
+ Text string
+ Next *Comment
+}
diff --git a/src/cmd/compile/internal/syntax/nodes_test.go b/src/cmd/compile/internal/syntax/nodes_test.go
new file mode 100644
index 0000000..a86ae87
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/nodes_test.go
@@ -0,0 +1,326 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+)
+
+// A test is a source code snippet of a particular node type.
+// In the snippet, a '@' indicates the position recorded by
+// the parser when creating the respective node.
+type test struct {
+ nodetyp string
+ snippet string
+}
+
+var decls = []test{
+ // The position of declarations is always the
+ // position of the first token of an individual
+ // declaration, independent of grouping.
+ {"ImportDecl", `import @"math"`},
+ {"ImportDecl", `import @mymath "math"`},
+ {"ImportDecl", `import @. "math"`},
+ {"ImportDecl", `import (@"math")`},
+ {"ImportDecl", `import (@mymath "math")`},
+ {"ImportDecl", `import (@. "math")`},
+
+ {"ConstDecl", `const @x`},
+ {"ConstDecl", `const @x = 0`},
+ {"ConstDecl", `const @x, y, z = 0, 1, 2`},
+ {"ConstDecl", `const (@x)`},
+ {"ConstDecl", `const (@x = 0)`},
+ {"ConstDecl", `const (@x, y, z = 0, 1, 2)`},
+
+ {"TypeDecl", `type @T int`},
+ {"TypeDecl", `type @T = int`},
+ {"TypeDecl", `type (@T int)`},
+ {"TypeDecl", `type (@T = int)`},
+
+ {"VarDecl", `var @x int`},
+ {"VarDecl", `var @x, y, z int`},
+ {"VarDecl", `var @x int = 0`},
+ {"VarDecl", `var @x, y, z int = 1, 2, 3`},
+ {"VarDecl", `var @x = 0`},
+ {"VarDecl", `var @x, y, z = 1, 2, 3`},
+ {"VarDecl", `var (@x int)`},
+ {"VarDecl", `var (@x, y, z int)`},
+ {"VarDecl", `var (@x int = 0)`},
+ {"VarDecl", `var (@x, y, z int = 1, 2, 3)`},
+ {"VarDecl", `var (@x = 0)`},
+ {"VarDecl", `var (@x, y, z = 1, 2, 3)`},
+
+ {"FuncDecl", `func @f() {}`},
+ {"FuncDecl", `func @(T) f() {}`},
+ {"FuncDecl", `func @(x T) f() {}`},
+}
+
+var exprs = []test{
+ // The position of an expression is the position
+ // of the left-most token that identifies the
+ // kind of expression.
+ {"Name", `@x`},
+
+ {"BasicLit", `@0`},
+ {"BasicLit", `@0x123`},
+ {"BasicLit", `@3.1415`},
+ {"BasicLit", `@.2718`},
+ {"BasicLit", `@1i`},
+ {"BasicLit", `@'a'`},
+ {"BasicLit", `@"abc"`},
+ {"BasicLit", "@`abc`"},
+
+ {"CompositeLit", `@{}`},
+ {"CompositeLit", `T@{}`},
+ {"CompositeLit", `struct{x, y int}@{}`},
+
+ {"KeyValueExpr", `"foo"@: true`},
+ {"KeyValueExpr", `"a"@: b`},
+
+ {"FuncLit", `@func (){}`},
+ {"ParenExpr", `@(x)`},
+ {"SelectorExpr", `a@.b`},
+ {"IndexExpr", `a@[i]`},
+
+ {"SliceExpr", `a@[:]`},
+ {"SliceExpr", `a@[i:]`},
+ {"SliceExpr", `a@[:j]`},
+ {"SliceExpr", `a@[i:j]`},
+ {"SliceExpr", `a@[i:j:k]`},
+
+ {"AssertExpr", `x@.(T)`},
+
+ {"Operation", `@*b`},
+ {"Operation", `@+b`},
+ {"Operation", `@-b`},
+ {"Operation", `@!b`},
+ {"Operation", `@^b`},
+ {"Operation", `@&b`},
+ {"Operation", `@<-b`},
+
+ {"Operation", `a @|| b`},
+ {"Operation", `a @&& b`},
+ {"Operation", `a @== b`},
+ {"Operation", `a @+ b`},
+ {"Operation", `a @* b`},
+
+ {"CallExpr", `f@()`},
+ {"CallExpr", `f@(x, y, z)`},
+ {"CallExpr", `obj.f@(1, 2, 3)`},
+ {"CallExpr", `func(x int) int { return x + 1 }@(y)`},
+
+ // ListExpr: tested via multi-value const/var declarations
+}
+
+var types = []test{
+ {"Operation", `@*T`},
+ {"Operation", `@*struct{}`},
+
+ {"ArrayType", `@[10]T`},
+ {"ArrayType", `@[...]T`},
+
+ {"SliceType", `@[]T`},
+ {"DotsType", `@...T`},
+ {"StructType", `@struct{}`},
+ {"InterfaceType", `@interface{}`},
+ {"FuncType", `func@()`},
+ {"MapType", `@map[T]T`},
+
+ {"ChanType", `@chan T`},
+ {"ChanType", `@chan<- T`},
+ {"ChanType", `@<-chan T`},
+}
+
+var fields = []test{
+ {"Field", `@T`},
+ {"Field", `@(T)`},
+ {"Field", `@x T`},
+ {"Field", `@x *(T)`},
+ {"Field", `@x, y, z T`},
+ {"Field", `@x, y, z (*T)`},
+}
+
+var stmts = []test{
+ {"EmptyStmt", `@`},
+
+ {"LabeledStmt", `L@:`},
+ {"LabeledStmt", `L@: ;`},
+ {"LabeledStmt", `L@: f()`},
+
+ {"BlockStmt", `@{}`},
+
+ // The position of an ExprStmt is the position of the expression.
+ {"ExprStmt", `@<-ch`},
+ {"ExprStmt", `f@()`},
+ {"ExprStmt", `append@(s, 1, 2, 3)`},
+
+ {"SendStmt", `ch @<- x`},
+
+ {"DeclStmt", `@const x = 0`},
+ {"DeclStmt", `@const (x = 0)`},
+ {"DeclStmt", `@type T int`},
+ {"DeclStmt", `@type T = int`},
+ {"DeclStmt", `@type (T1 = int; T2 = float32)`},
+ {"DeclStmt", `@var x = 0`},
+ {"DeclStmt", `@var x, y, z int`},
+ {"DeclStmt", `@var (a, b = 1, 2)`},
+
+ {"AssignStmt", `x @= y`},
+ {"AssignStmt", `a, b, x @= 1, 2, 3`},
+ {"AssignStmt", `x @+= y`},
+ {"AssignStmt", `x @:= y`},
+ {"AssignStmt", `x, ok @:= f()`},
+ {"AssignStmt", `x@++`},
+ {"AssignStmt", `a[i]@--`},
+
+ {"BranchStmt", `@break`},
+ {"BranchStmt", `@break L`},
+ {"BranchStmt", `@continue`},
+ {"BranchStmt", `@continue L`},
+ {"BranchStmt", `@fallthrough`},
+ {"BranchStmt", `@goto L`},
+
+ {"CallStmt", `@defer f()`},
+ {"CallStmt", `@go f()`},
+
+ {"ReturnStmt", `@return`},
+ {"ReturnStmt", `@return x`},
+ {"ReturnStmt", `@return a, b, a + b*f(1, 2, 3)`},
+
+ {"IfStmt", `@if cond {}`},
+ {"IfStmt", `@if cond { f() } else {}`},
+ {"IfStmt", `@if cond { f() } else { g(); h() }`},
+ {"ForStmt", `@for {}`},
+ {"ForStmt", `@for { f() }`},
+ {"SwitchStmt", `@switch {}`},
+ {"SwitchStmt", `@switch { default: }`},
+ {"SwitchStmt", `@switch { default: x++ }`},
+ {"SelectStmt", `@select {}`},
+ {"SelectStmt", `@select { default: }`},
+ {"SelectStmt", `@select { default: ch <- false }`},
+}
+
+var ranges = []test{
+ {"RangeClause", `@range s`},
+ {"RangeClause", `i = @range s`},
+ {"RangeClause", `i := @range s`},
+ {"RangeClause", `_, x = @range s`},
+ {"RangeClause", `i, x = @range s`},
+ {"RangeClause", `_, x := @range s.f`},
+ {"RangeClause", `i, x := @range f(i)`},
+}
+
+var guards = []test{
+ {"TypeSwitchGuard", `x@.(type)`},
+ {"TypeSwitchGuard", `x := x@.(type)`},
+}
+
+var cases = []test{
+ {"CaseClause", `@case x:`},
+ {"CaseClause", `@case x, y, z:`},
+ {"CaseClause", `@case x == 1, y == 2:`},
+ {"CaseClause", `@default:`},
+}
+
+var comms = []test{
+ {"CommClause", `@case <-ch:`},
+ {"CommClause", `@case x <- ch:`},
+ {"CommClause", `@case x = <-ch:`},
+ {"CommClause", `@case x := <-ch:`},
+ {"CommClause", `@case x, ok = <-ch: f(1, 2, 3)`},
+ {"CommClause", `@case x, ok := <-ch: x++`},
+ {"CommClause", `@default:`},
+ {"CommClause", `@default: ch <- true`},
+}
+
+func TestPos(t *testing.T) {
+ // TODO(gri) Once we have a general tree walker, we can use that to find
+ // the first occurrence of the respective node and we don't need to hand-
+ // extract the node for each specific kind of construct.
+
+ testPos(t, decls, "package p; ", "",
+ func(f *File) Node { return f.DeclList[0] },
+ )
+
+ // embed expressions in a composite literal so we can test key:value and naked composite literals
+ testPos(t, exprs, "package p; var _ = T{ ", " }",
+ func(f *File) Node { return f.DeclList[0].(*VarDecl).Values.(*CompositeLit).ElemList[0] },
+ )
+
+ // embed types in a function signature so we can test ... types
+ testPos(t, types, "package p; func f(", ")",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Type.ParamList[0].Type },
+ )
+
+ testPos(t, fields, "package p; func f(", ")",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Type.ParamList[0] },
+ )
+
+ testPos(t, stmts, "package p; func _() { ", "; }",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0] },
+ )
+
+ testPos(t, ranges, "package p; func _() { for ", " {} }",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*ForStmt).Init.(*RangeClause) },
+ )
+
+ testPos(t, guards, "package p; func _() { switch ", " {} }",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*SwitchStmt).Tag.(*TypeSwitchGuard) },
+ )
+
+ testPos(t, cases, "package p; func _() { switch { ", " } }",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*SwitchStmt).Body[0] },
+ )
+
+ testPos(t, comms, "package p; func _() { select { ", " } }",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*SelectStmt).Body[0] },
+ )
+}
+
+func testPos(t *testing.T, list []test, prefix, suffix string, extract func(*File) Node) {
+ for _, test := range list {
+ // complete source, compute @ position, and strip @ from source
+ src, index := stripAt(prefix + test.snippet + suffix)
+ if index < 0 {
+ t.Errorf("missing @: %s (%s)", src, test.nodetyp)
+ continue
+ }
+
+ // build syntax tree
+ file, err := Parse(nil, strings.NewReader(src), nil, nil, 0)
+ if err != nil {
+ t.Errorf("parse error: %s: %v (%s)", src, err, test.nodetyp)
+ continue
+ }
+
+ // extract desired node
+ node := extract(file)
+ if typ := typeOf(node); typ != test.nodetyp {
+ t.Errorf("type error: %s: type = %s, want %s", src, typ, test.nodetyp)
+ continue
+ }
+
+ // verify node position with expected position as indicated by @
+ if pos := int(node.Pos().Col()); pos != index+colbase {
+ t.Errorf("pos error: %s: pos = %d, want %d (%s)", src, pos, index+colbase, test.nodetyp)
+ continue
+ }
+ }
+}
+
+func stripAt(s string) (string, int) {
+ if i := strings.Index(s, "@"); i >= 0 {
+ return s[:i] + s[i+1:], i
+ }
+ return s, -1
+}
+
+func typeOf(n Node) string {
+ const prefix = "*syntax."
+ k := fmt.Sprintf("%T", n)
+ return strings.TrimPrefix(k, prefix)
+}
diff --git a/src/cmd/compile/internal/syntax/operator_string.go b/src/cmd/compile/internal/syntax/operator_string.go
new file mode 100644
index 0000000..f045d8c
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/operator_string.go
@@ -0,0 +1,46 @@
+// Code generated by "stringer -type Operator -linecomment tokens.go"; DO NOT EDIT.
+
+package syntax
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Def-1]
+ _ = x[Not-2]
+ _ = x[Recv-3]
+ _ = x[Tilde-4]
+ _ = x[OrOr-5]
+ _ = x[AndAnd-6]
+ _ = x[Eql-7]
+ _ = x[Neq-8]
+ _ = x[Lss-9]
+ _ = x[Leq-10]
+ _ = x[Gtr-11]
+ _ = x[Geq-12]
+ _ = x[Add-13]
+ _ = x[Sub-14]
+ _ = x[Or-15]
+ _ = x[Xor-16]
+ _ = x[Mul-17]
+ _ = x[Div-18]
+ _ = x[Rem-19]
+ _ = x[And-20]
+ _ = x[AndNot-21]
+ _ = x[Shl-22]
+ _ = x[Shr-23]
+}
+
+const _Operator_name = ":!<-~||&&==!=<<=>>=+-|^*/%&&^<<>>"
+
+var _Operator_index = [...]uint8{0, 1, 2, 4, 5, 7, 9, 11, 13, 14, 16, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31, 33}
+
+func (i Operator) String() string {
+ i -= 1
+ if i >= Operator(len(_Operator_index)-1) {
+ return "Operator(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _Operator_name[_Operator_index[i]:_Operator_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go
new file mode 100644
index 0000000..1569b5e
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/parser.go
@@ -0,0 +1,2849 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "fmt"
+ "go/build/constraint"
+ "io"
+ "strconv"
+ "strings"
+)
+
+const debug = false
+const trace = false
+
+type parser struct {
+ file *PosBase
+ errh ErrorHandler
+ mode Mode
+ pragh PragmaHandler
+ scanner
+
+ base *PosBase // current position base
+ first error // first error encountered
+ errcnt int // number of errors encountered
+ pragma Pragma // pragmas
+ goVersion string // Go version from //go:build line
+
+ top bool // in top of file (before package clause)
+ fnest int // function nesting level (for error handling)
+ xnest int // expression nesting level (for complit ambiguity resolution)
+ indent []byte // tracing support
+}
+
+func (p *parser) init(file *PosBase, r io.Reader, errh ErrorHandler, pragh PragmaHandler, mode Mode) {
+ p.top = true
+ p.file = file
+ p.errh = errh
+ p.mode = mode
+ p.pragh = pragh
+ p.scanner.init(
+ r,
+ // Error and directive handler for scanner.
+ // Because the (line, col) positions passed to the
+ // handler is always at or after the current reading
+ // position, it is safe to use the most recent position
+ // base to compute the corresponding Pos value.
+ func(line, col uint, msg string) {
+ if msg[0] != '/' {
+ p.errorAt(p.posAt(line, col), msg)
+ return
+ }
+
+ // otherwise it must be a comment containing a line or go: directive.
+ // //line directives must be at the start of the line (column colbase).
+ // /*line*/ directives can be anywhere in the line.
+ text := commentText(msg)
+ if (col == colbase || msg[1] == '*') && strings.HasPrefix(text, "line ") {
+ var pos Pos // position immediately following the comment
+ if msg[1] == '/' {
+ // line comment (newline is part of the comment)
+ pos = MakePos(p.file, line+1, colbase)
+ } else {
+ // regular comment
+ // (if the comment spans multiple lines it's not
+ // a valid line directive and will be discarded
+ // by updateBase)
+ pos = MakePos(p.file, line, col+uint(len(msg)))
+ }
+ p.updateBase(pos, line, col+2+5, text[5:]) // +2 to skip over // or /*
+ return
+ }
+
+ // go: directive (but be conservative and test)
+ if strings.HasPrefix(text, "go:") {
+ if p.top && strings.HasPrefix(msg, "//go:build") {
+ if x, err := constraint.Parse(msg); err == nil {
+ p.goVersion = constraint.GoVersion(x)
+ }
+ }
+ if pragh != nil {
+ p.pragma = pragh(p.posAt(line, col+2), p.scanner.blank, text, p.pragma) // +2 to skip over // or /*
+ }
+ }
+ },
+ directives,
+ )
+
+ p.base = file
+ p.first = nil
+ p.errcnt = 0
+ p.pragma = nil
+
+ p.fnest = 0
+ p.xnest = 0
+ p.indent = nil
+}
+
+// takePragma returns the current parsed pragmas
+// and clears them from the parser state.
+func (p *parser) takePragma() Pragma {
+ prag := p.pragma
+ p.pragma = nil
+ return prag
+}
+
+// clearPragma is called at the end of a statement or
+// other Go form that does NOT accept a pragma.
+// It sends the pragma back to the pragma handler
+// to be reported as unused.
+func (p *parser) clearPragma() {
+ if p.pragma != nil {
+ p.pragh(p.pos(), p.scanner.blank, "", p.pragma)
+ p.pragma = nil
+ }
+}
+
+// updateBase sets the current position base to a new line base at pos.
+// The base's filename, line, and column values are extracted from text
+// which is positioned at (tline, tcol) (only needed for error messages).
+func (p *parser) updateBase(pos Pos, tline, tcol uint, text string) {
+ i, n, ok := trailingDigits(text)
+ if i == 0 {
+ return // ignore (not a line directive)
+ }
+ // i > 0
+
+ if !ok {
+ // text has a suffix :xxx but xxx is not a number
+ p.errorAt(p.posAt(tline, tcol+i), "invalid line number: "+text[i:])
+ return
+ }
+
+ var line, col uint
+ i2, n2, ok2 := trailingDigits(text[:i-1])
+ if ok2 {
+ //line filename:line:col
+ i, i2 = i2, i
+ line, col = n2, n
+ if col == 0 || col > PosMax {
+ p.errorAt(p.posAt(tline, tcol+i2), "invalid column number: "+text[i2:])
+ return
+ }
+ text = text[:i2-1] // lop off ":col"
+ } else {
+ //line filename:line
+ line = n
+ }
+
+ if line == 0 || line > PosMax {
+ p.errorAt(p.posAt(tline, tcol+i), "invalid line number: "+text[i:])
+ return
+ }
+
+ // If we have a column (//line filename:line:col form),
+ // an empty filename means to use the previous filename.
+ filename := text[:i-1] // lop off ":line"
+ trimmed := false
+ if filename == "" && ok2 {
+ filename = p.base.Filename()
+ trimmed = p.base.Trimmed()
+ }
+
+ p.base = NewLineBase(pos, filename, trimmed, line, col)
+}
+
+func commentText(s string) string {
+ if s[:2] == "/*" {
+ return s[2 : len(s)-2] // lop off /* and */
+ }
+
+ // line comment (does not include newline)
+ // (on Windows, the line comment may end in \r\n)
+ i := len(s)
+ if s[i-1] == '\r' {
+ i--
+ }
+ return s[2:i] // lop off //, and \r at end, if any
+}
+
+func trailingDigits(text string) (uint, uint, bool) {
+ i := strings.LastIndexByte(text, ':') // look from right (Windows filenames may contain ':')
+ if i < 0 {
+ return 0, 0, false // no ':'
+ }
+ // i >= 0
+ n, err := strconv.ParseUint(text[i+1:], 10, 0)
+ return uint(i + 1), uint(n), err == nil
+}
+
+func (p *parser) got(tok token) bool {
+ if p.tok == tok {
+ p.next()
+ return true
+ }
+ return false
+}
+
+func (p *parser) want(tok token) {
+ if !p.got(tok) {
+ p.syntaxError("expected " + tokstring(tok))
+ p.advance()
+ }
+}
+
+// gotAssign is like got(_Assign) but it also accepts ":="
+// (and reports an error) for better parser error recovery.
+func (p *parser) gotAssign() bool {
+ switch p.tok {
+ case _Define:
+ p.syntaxError("expected =")
+ fallthrough
+ case _Assign:
+ p.next()
+ return true
+ }
+ return false
+}
+
+// ----------------------------------------------------------------------------
+// Error handling
+
+// posAt returns the Pos value for (line, col) and the current position base.
+func (p *parser) posAt(line, col uint) Pos {
+ return MakePos(p.base, line, col)
+}
+
+// errorAt reports an error at the given position.
+func (p *parser) errorAt(pos Pos, msg string) {
+ err := Error{pos, msg}
+ if p.first == nil {
+ p.first = err
+ }
+ p.errcnt++
+ if p.errh == nil {
+ panic(p.first)
+ }
+ p.errh(err)
+}
+
+// syntaxErrorAt reports a syntax error at the given position.
+func (p *parser) syntaxErrorAt(pos Pos, msg string) {
+ if trace {
+ p.print("syntax error: " + msg)
+ }
+
+ if p.tok == _EOF && p.first != nil {
+ return // avoid meaningless follow-up errors
+ }
+
+ // add punctuation etc. as needed to msg
+ switch {
+ case msg == "":
+ // nothing to do
+ case strings.HasPrefix(msg, "in "), strings.HasPrefix(msg, "at "), strings.HasPrefix(msg, "after "):
+ msg = " " + msg
+ case strings.HasPrefix(msg, "expected "):
+ msg = ", " + msg
+ default:
+ // plain error - we don't care about current token
+ p.errorAt(pos, "syntax error: "+msg)
+ return
+ }
+
+ // determine token string
+ var tok string
+ switch p.tok {
+ case _Name, _Semi:
+ tok = p.lit
+ case _Literal:
+ tok = "literal " + p.lit
+ case _Operator:
+ tok = p.op.String()
+ case _AssignOp:
+ tok = p.op.String() + "="
+ case _IncOp:
+ tok = p.op.String()
+ tok += tok
+ default:
+ tok = tokstring(p.tok)
+ }
+
+ // TODO(gri) This may print "unexpected X, expected Y".
+ // Consider "got X, expected Y" in this case.
+ p.errorAt(pos, "syntax error: unexpected "+tok+msg)
+}
+
+// tokstring returns the English word for selected punctuation tokens
+// for more readable error messages. Use tokstring (not tok.String())
+// for user-facing (error) messages; use tok.String() for debugging
+// output.
+func tokstring(tok token) string {
+ switch tok {
+ case _Comma:
+ return "comma"
+ case _Semi:
+ return "semicolon or newline"
+ }
+ return tok.String()
+}
+
+// Convenience methods using the current token position.
+func (p *parser) pos() Pos { return p.posAt(p.line, p.col) }
+func (p *parser) error(msg string) { p.errorAt(p.pos(), msg) }
+func (p *parser) syntaxError(msg string) { p.syntaxErrorAt(p.pos(), msg) }
+
+// The stopset contains keywords that start a statement.
+// They are good synchronization points in case of syntax
+// errors and (usually) shouldn't be skipped over.
+const stopset uint64 = 1<<_Break |
+ 1<<_Const |
+ 1<<_Continue |
+ 1<<_Defer |
+ 1<<_Fallthrough |
+ 1<<_For |
+ 1<<_Go |
+ 1<<_Goto |
+ 1<<_If |
+ 1<<_Return |
+ 1<<_Select |
+ 1<<_Switch |
+ 1<<_Type |
+ 1<<_Var
+
+// advance consumes tokens until it finds a token of the stopset or followlist.
+// The stopset is only considered if we are inside a function (p.fnest > 0).
+// The followlist is the list of valid tokens that can follow a production;
+// if it is empty, exactly one (non-EOF) token is consumed to ensure progress.
+func (p *parser) advance(followlist ...token) {
+ if trace {
+ p.print(fmt.Sprintf("advance %s", followlist))
+ }
+
+ // compute follow set
+ // (not speed critical, advance is only called in error situations)
+ var followset uint64 = 1 << _EOF // don't skip over EOF
+ if len(followlist) > 0 {
+ if p.fnest > 0 {
+ followset |= stopset
+ }
+ for _, tok := range followlist {
+ followset |= 1 << tok
+ }
+ }
+
+ for !contains(followset, p.tok) {
+ if trace {
+ p.print("skip " + p.tok.String())
+ }
+ p.next()
+ if len(followlist) == 0 {
+ break
+ }
+ }
+
+ if trace {
+ p.print("next " + p.tok.String())
+ }
+}
+
+// usage: defer p.trace(msg)()
+func (p *parser) trace(msg string) func() {
+ p.print(msg + " (")
+ const tab = ". "
+ p.indent = append(p.indent, tab...)
+ return func() {
+ p.indent = p.indent[:len(p.indent)-len(tab)]
+ if x := recover(); x != nil {
+ panic(x) // skip print_trace
+ }
+ p.print(")")
+ }
+}
+
+func (p *parser) print(msg string) {
+ fmt.Printf("%5d: %s%s\n", p.line, p.indent, msg)
+}
+
+// ----------------------------------------------------------------------------
+// Package files
+//
+// Parse methods are annotated with matching Go productions as appropriate.
+// The annotations are intended as guidelines only since a single Go grammar
+// rule may be covered by multiple parse methods and vice versa.
+//
+// Excluding methods returning slices, parse methods named xOrNil may return
+// nil; all others are expected to return a valid non-nil node.
+
+// SourceFile = PackageClause ";" { ImportDecl ";" } { TopLevelDecl ";" } .
+func (p *parser) fileOrNil() *File {
+ if trace {
+ defer p.trace("file")()
+ }
+
+ f := new(File)
+ f.pos = p.pos()
+
+ // PackageClause
+ f.GoVersion = p.goVersion
+ p.top = false
+ if !p.got(_Package) {
+ p.syntaxError("package statement must be first")
+ return nil
+ }
+ f.Pragma = p.takePragma()
+ f.PkgName = p.name()
+ p.want(_Semi)
+
+ // don't bother continuing if package clause has errors
+ if p.first != nil {
+ return nil
+ }
+
+ // Accept import declarations anywhere for error tolerance, but complain.
+ // { ( ImportDecl | TopLevelDecl ) ";" }
+ prev := _Import
+ for p.tok != _EOF {
+ if p.tok == _Import && prev != _Import {
+ p.syntaxError("imports must appear before other declarations")
+ }
+ prev = p.tok
+
+ switch p.tok {
+ case _Import:
+ p.next()
+ f.DeclList = p.appendGroup(f.DeclList, p.importDecl)
+
+ case _Const:
+ p.next()
+ f.DeclList = p.appendGroup(f.DeclList, p.constDecl)
+
+ case _Type:
+ p.next()
+ f.DeclList = p.appendGroup(f.DeclList, p.typeDecl)
+
+ case _Var:
+ p.next()
+ f.DeclList = p.appendGroup(f.DeclList, p.varDecl)
+
+ case _Func:
+ p.next()
+ if d := p.funcDeclOrNil(); d != nil {
+ f.DeclList = append(f.DeclList, d)
+ }
+
+ default:
+ if p.tok == _Lbrace && len(f.DeclList) > 0 && isEmptyFuncDecl(f.DeclList[len(f.DeclList)-1]) {
+ // opening { of function declaration on next line
+ p.syntaxError("unexpected semicolon or newline before {")
+ } else {
+ p.syntaxError("non-declaration statement outside function body")
+ }
+ p.advance(_Import, _Const, _Type, _Var, _Func)
+ continue
+ }
+
+ // Reset p.pragma BEFORE advancing to the next token (consuming ';')
+ // since comments before may set pragmas for the next function decl.
+ p.clearPragma()
+
+ if p.tok != _EOF && !p.got(_Semi) {
+ p.syntaxError("after top level declaration")
+ p.advance(_Import, _Const, _Type, _Var, _Func)
+ }
+ }
+ // p.tok == _EOF
+
+ p.clearPragma()
+ f.EOF = p.pos()
+
+ return f
+}
+
+func isEmptyFuncDecl(dcl Decl) bool {
+ f, ok := dcl.(*FuncDecl)
+ return ok && f.Body == nil
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+// list parses a possibly empty, sep-separated list of elements, optionally
+// followed by sep, and closed by close (or EOF). sep must be one of _Comma
+// or _Semi, and close must be one of _Rparen, _Rbrace, or _Rbrack.
+//
+// For each list element, f is called. Specifically, unless we're at close
+// (or EOF), f is called at least once. After f returns true, no more list
+// elements are accepted. list returns the position of the closing token.
+//
+// list = [ f { sep f } [sep] ] close .
+func (p *parser) list(context string, sep, close token, f func() bool) Pos {
+ if debug && (sep != _Comma && sep != _Semi || close != _Rparen && close != _Rbrace && close != _Rbrack) {
+ panic("invalid sep or close argument for list")
+ }
+
+ done := false
+ for p.tok != _EOF && p.tok != close && !done {
+ done = f()
+ // sep is optional before close
+ if !p.got(sep) && p.tok != close {
+ p.syntaxError(fmt.Sprintf("in %s; possibly missing %s or %s", context, tokstring(sep), tokstring(close)))
+ p.advance(_Rparen, _Rbrack, _Rbrace)
+ if p.tok != close {
+ // position could be better but we had an error so we don't care
+ return p.pos()
+ }
+ }
+ }
+
+ pos := p.pos()
+ p.want(close)
+ return pos
+}
+
+// appendGroup(f) = f | "(" { f ";" } ")" . // ";" is optional before ")"
+func (p *parser) appendGroup(list []Decl, f func(*Group) Decl) []Decl {
+ if p.tok == _Lparen {
+ g := new(Group)
+ p.clearPragma()
+ p.next() // must consume "(" after calling clearPragma!
+ p.list("grouped declaration", _Semi, _Rparen, func() bool {
+ if x := f(g); x != nil {
+ list = append(list, x)
+ }
+ return false
+ })
+ } else {
+ if x := f(nil); x != nil {
+ list = append(list, x)
+ }
+ }
+ return list
+}
+
+// ImportSpec = [ "." | PackageName ] ImportPath .
+// ImportPath = string_lit .
+func (p *parser) importDecl(group *Group) Decl {
+ if trace {
+ defer p.trace("importDecl")()
+ }
+
+ d := new(ImportDecl)
+ d.pos = p.pos()
+ d.Group = group
+ d.Pragma = p.takePragma()
+
+ switch p.tok {
+ case _Name:
+ d.LocalPkgName = p.name()
+ case _Dot:
+ d.LocalPkgName = NewName(p.pos(), ".")
+ p.next()
+ }
+ d.Path = p.oliteral()
+ if d.Path == nil {
+ p.syntaxError("missing import path")
+ p.advance(_Semi, _Rparen)
+ return d
+ }
+ if !d.Path.Bad && d.Path.Kind != StringLit {
+ p.syntaxErrorAt(d.Path.Pos(), "import path must be a string")
+ d.Path.Bad = true
+ }
+ // d.Path.Bad || d.Path.Kind == StringLit
+
+ return d
+}
+
+// ConstSpec = IdentifierList [ [ Type ] "=" ExpressionList ] .
+func (p *parser) constDecl(group *Group) Decl {
+ if trace {
+ defer p.trace("constDecl")()
+ }
+
+ d := new(ConstDecl)
+ d.pos = p.pos()
+ d.Group = group
+ d.Pragma = p.takePragma()
+
+ d.NameList = p.nameList(p.name())
+ if p.tok != _EOF && p.tok != _Semi && p.tok != _Rparen {
+ d.Type = p.typeOrNil()
+ if p.gotAssign() {
+ d.Values = p.exprList()
+ }
+ }
+
+ return d
+}
+
+// TypeSpec = identifier [ TypeParams ] [ "=" ] Type .
+func (p *parser) typeDecl(group *Group) Decl {
+ if trace {
+ defer p.trace("typeDecl")()
+ }
+
+ d := new(TypeDecl)
+ d.pos = p.pos()
+ d.Group = group
+ d.Pragma = p.takePragma()
+
+ d.Name = p.name()
+ if p.tok == _Lbrack {
+ // d.Name "[" ...
+ // array/slice type or type parameter list
+ pos := p.pos()
+ p.next()
+ switch p.tok {
+ case _Name:
+ // We may have an array type or a type parameter list.
+ // In either case we expect an expression x (which may
+ // just be a name, or a more complex expression) which
+ // we can analyze further.
+ //
+ // A type parameter list may have a type bound starting
+ // with a "[" as in: P []E. In that case, simply parsing
+ // an expression would lead to an error: P[] is invalid.
+ // But since index or slice expressions are never constant
+ // and thus invalid array length expressions, if the name
+ // is followed by "[" it must be the start of an array or
+ // slice constraint. Only if we don't see a "[" do we
+ // need to parse a full expression. Notably, name <- x
+ // is not a concern because name <- x is a statement and
+ // not an expression.
+ var x Expr = p.name()
+ if p.tok != _Lbrack {
+ // To parse the expression starting with name, expand
+ // the call sequence we would get by passing in name
+ // to parser.expr, and pass in name to parser.pexpr.
+ p.xnest++
+ x = p.binaryExpr(p.pexpr(x, false), 0)
+ p.xnest--
+ }
+ // Analyze expression x. If we can split x into a type parameter
+ // name, possibly followed by a type parameter type, we consider
+ // this the start of a type parameter list, with some caveats:
+ // a single name followed by "]" tilts the decision towards an
+ // array declaration; a type parameter type that could also be
+ // an ordinary expression but which is followed by a comma tilts
+ // the decision towards a type parameter list.
+ if pname, ptype := extractName(x, p.tok == _Comma); pname != nil && (ptype != nil || p.tok != _Rbrack) {
+ // d.Name "[" pname ...
+ // d.Name "[" pname ptype ...
+ // d.Name "[" pname ptype "," ...
+ d.TParamList = p.paramList(pname, ptype, _Rbrack, true) // ptype may be nil
+ d.Alias = p.gotAssign()
+ d.Type = p.typeOrNil()
+ } else {
+ // d.Name "[" pname "]" ...
+ // d.Name "[" x ...
+ d.Type = p.arrayType(pos, x)
+ }
+ case _Rbrack:
+ // d.Name "[" "]" ...
+ p.next()
+ d.Type = p.sliceType(pos)
+ default:
+ // d.Name "[" ...
+ d.Type = p.arrayType(pos, nil)
+ }
+ } else {
+ d.Alias = p.gotAssign()
+ d.Type = p.typeOrNil()
+ }
+
+ if d.Type == nil {
+ d.Type = p.badExpr()
+ p.syntaxError("in type declaration")
+ p.advance(_Semi, _Rparen)
+ }
+
+ return d
+}
+
+// extractName splits the expression x into (name, expr) if syntactically
+// x can be written as name expr. The split only happens if expr is a type
+// element (per the isTypeElem predicate) or if force is set.
+// If x is just a name, the result is (name, nil). If the split succeeds,
+// the result is (name, expr). Otherwise the result is (nil, x).
+// Examples:
+//
+// x force name expr
+// ------------------------------------
+// P*[]int T/F P *[]int
+// P*E T P *E
+// P*E F nil P*E
+// P([]int) T/F P []int
+// P(E) T P E
+// P(E) F nil P(E)
+// P*E|F|~G T/F P *E|F|~G
+// P*E|F|G T P *E|F|G
+// P*E|F|G F nil P*E|F|G
+func extractName(x Expr, force bool) (*Name, Expr) {
+ switch x := x.(type) {
+ case *Name:
+ return x, nil
+ case *Operation:
+ if x.Y == nil {
+ break // unary expr
+ }
+ switch x.Op {
+ case Mul:
+ if name, _ := x.X.(*Name); name != nil && (force || isTypeElem(x.Y)) {
+ // x = name *x.Y
+ op := *x
+ op.X, op.Y = op.Y, nil // change op into unary *op.Y
+ return name, &op
+ }
+ case Or:
+ if name, lhs := extractName(x.X, force || isTypeElem(x.Y)); name != nil && lhs != nil {
+ // x = name lhs|x.Y
+ op := *x
+ op.X = lhs
+ return name, &op
+ }
+ }
+ case *CallExpr:
+ if name, _ := x.Fun.(*Name); name != nil {
+ if len(x.ArgList) == 1 && !x.HasDots && (force || isTypeElem(x.ArgList[0])) {
+ // x = name "(" x.ArgList[0] ")"
+ return name, x.ArgList[0]
+ }
+ }
+ }
+ return nil, x
+}
+
+// isTypeElem reports whether x is a (possibly parenthesized) type element expression.
+// The result is false if x could be a type element OR an ordinary (value) expression.
+func isTypeElem(x Expr) bool {
+ switch x := x.(type) {
+ case *ArrayType, *StructType, *FuncType, *InterfaceType, *SliceType, *MapType, *ChanType:
+ return true
+ case *Operation:
+ return isTypeElem(x.X) || (x.Y != nil && isTypeElem(x.Y)) || x.Op == Tilde
+ case *ParenExpr:
+ return isTypeElem(x.X)
+ }
+ return false
+}
+
+// VarSpec = IdentifierList ( Type [ "=" ExpressionList ] | "=" ExpressionList ) .
+func (p *parser) varDecl(group *Group) Decl {
+ if trace {
+ defer p.trace("varDecl")()
+ }
+
+ d := new(VarDecl)
+ d.pos = p.pos()
+ d.Group = group
+ d.Pragma = p.takePragma()
+
+ d.NameList = p.nameList(p.name())
+ if p.gotAssign() {
+ d.Values = p.exprList()
+ } else {
+ d.Type = p.type_()
+ if p.gotAssign() {
+ d.Values = p.exprList()
+ }
+ }
+
+ return d
+}
+
+// FunctionDecl = "func" FunctionName [ TypeParams ] ( Function | Signature ) .
+// FunctionName = identifier .
+// Function = Signature FunctionBody .
+// MethodDecl = "func" Receiver MethodName ( Function | Signature ) .
+// Receiver = Parameters .
+func (p *parser) funcDeclOrNil() *FuncDecl {
+ if trace {
+ defer p.trace("funcDecl")()
+ }
+
+ f := new(FuncDecl)
+ f.pos = p.pos()
+ f.Pragma = p.takePragma()
+
+ var context string
+ if p.got(_Lparen) {
+ context = "method"
+ rcvr := p.paramList(nil, nil, _Rparen, false)
+ switch len(rcvr) {
+ case 0:
+ p.error("method has no receiver")
+ default:
+ p.error("method has multiple receivers")
+ fallthrough
+ case 1:
+ f.Recv = rcvr[0]
+ }
+ }
+
+ if p.tok == _Name {
+ f.Name = p.name()
+ f.TParamList, f.Type = p.funcType(context)
+ } else {
+ f.Name = NewName(p.pos(), "_")
+ f.Type = new(FuncType)
+ f.Type.pos = p.pos()
+ msg := "expected name or ("
+ if context != "" {
+ msg = "expected name"
+ }
+ p.syntaxError(msg)
+ p.advance(_Lbrace, _Semi)
+ }
+
+ if p.tok == _Lbrace {
+ f.Body = p.funcBody()
+ }
+
+ return f
+}
+
+func (p *parser) funcBody() *BlockStmt {
+ p.fnest++
+ errcnt := p.errcnt
+ body := p.blockStmt("")
+ p.fnest--
+
+ // Don't check branches if there were syntax errors in the function
+ // as it may lead to spurious errors (e.g., see test/switch2.go) or
+ // possibly crashes due to incomplete syntax trees.
+ if p.mode&CheckBranches != 0 && errcnt == p.errcnt {
+ checkBranches(body, p.errh)
+ }
+
+ return body
+}
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+func (p *parser) expr() Expr {
+ if trace {
+ defer p.trace("expr")()
+ }
+
+ return p.binaryExpr(nil, 0)
+}
+
+// Expression = UnaryExpr | Expression binary_op Expression .
+func (p *parser) binaryExpr(x Expr, prec int) Expr {
+ // don't trace binaryExpr - only leads to overly nested trace output
+
+ if x == nil {
+ x = p.unaryExpr()
+ }
+ for (p.tok == _Operator || p.tok == _Star) && p.prec > prec {
+ t := new(Operation)
+ t.pos = p.pos()
+ t.Op = p.op
+ tprec := p.prec
+ p.next()
+ t.X = x
+ t.Y = p.binaryExpr(nil, tprec)
+ x = t
+ }
+ return x
+}
+
+// UnaryExpr = PrimaryExpr | unary_op UnaryExpr .
+func (p *parser) unaryExpr() Expr {
+ if trace {
+ defer p.trace("unaryExpr")()
+ }
+
+ switch p.tok {
+ case _Operator, _Star:
+ switch p.op {
+ case Mul, Add, Sub, Not, Xor, Tilde:
+ x := new(Operation)
+ x.pos = p.pos()
+ x.Op = p.op
+ p.next()
+ x.X = p.unaryExpr()
+ return x
+
+ case And:
+ x := new(Operation)
+ x.pos = p.pos()
+ x.Op = And
+ p.next()
+ // unaryExpr may have returned a parenthesized composite literal
+ // (see comment in operand) - remove parentheses if any
+ x.X = Unparen(p.unaryExpr())
+ return x
+ }
+
+ case _Arrow:
+ // receive op (<-x) or receive-only channel (<-chan E)
+ pos := p.pos()
+ p.next()
+
+ // If the next token is _Chan we still don't know if it is
+ // a channel (<-chan int) or a receive op (<-chan int(ch)).
+ // We only know once we have found the end of the unaryExpr.
+
+ x := p.unaryExpr()
+
+ // There are two cases:
+ //
+ // <-chan... => <-x is a channel type
+ // <-x => <-x is a receive operation
+ //
+ // In the first case, <- must be re-associated with
+ // the channel type parsed already:
+ //
+ // <-(chan E) => (<-chan E)
+ // <-(chan<-E) => (<-chan (<-E))
+
+ if _, ok := x.(*ChanType); ok {
+ // x is a channel type => re-associate <-
+ dir := SendOnly
+ t := x
+ for dir == SendOnly {
+ c, ok := t.(*ChanType)
+ if !ok {
+ break
+ }
+ dir = c.Dir
+ if dir == RecvOnly {
+ // t is type <-chan E but <-<-chan E is not permitted
+ // (report same error as for "type _ <-<-chan E")
+ p.syntaxError("unexpected <-, expected chan")
+ // already progressed, no need to advance
+ }
+ c.Dir = RecvOnly
+ t = c.Elem
+ }
+ if dir == SendOnly {
+ // channel dir is <- but channel element E is not a channel
+ // (report same error as for "type _ <-chan<-E")
+ p.syntaxError(fmt.Sprintf("unexpected %s, expected chan", String(t)))
+ // already progressed, no need to advance
+ }
+ return x
+ }
+
+ // x is not a channel type => we have a receive op
+ o := new(Operation)
+ o.pos = pos
+ o.Op = Recv
+ o.X = x
+ return o
+ }
+
+ // TODO(mdempsky): We need parens here so we can report an
+ // error for "(x) := true". It should be possible to detect
+ // and reject that more efficiently though.
+ return p.pexpr(nil, true)
+}
+
+// callStmt parses call-like statements that can be preceded by 'defer' and 'go'.
+func (p *parser) callStmt() *CallStmt {
+ if trace {
+ defer p.trace("callStmt")()
+ }
+
+ s := new(CallStmt)
+ s.pos = p.pos()
+ s.Tok = p.tok // _Defer or _Go
+ p.next()
+
+ x := p.pexpr(nil, p.tok == _Lparen) // keep_parens so we can report error below
+ if t := Unparen(x); t != x {
+ p.errorAt(x.Pos(), fmt.Sprintf("expression in %s must not be parenthesized", s.Tok))
+ // already progressed, no need to advance
+ x = t
+ }
+
+ s.Call = x
+ return s
+}
+
+// Operand = Literal | OperandName | MethodExpr | "(" Expression ")" .
+// Literal = BasicLit | CompositeLit | FunctionLit .
+// BasicLit = int_lit | float_lit | imaginary_lit | rune_lit | string_lit .
+// OperandName = identifier | QualifiedIdent.
+func (p *parser) operand(keep_parens bool) Expr {
+ if trace {
+ defer p.trace("operand " + p.tok.String())()
+ }
+
+ switch p.tok {
+ case _Name:
+ return p.name()
+
+ case _Literal:
+ return p.oliteral()
+
+ case _Lparen:
+ pos := p.pos()
+ p.next()
+ p.xnest++
+ x := p.expr()
+ p.xnest--
+ p.want(_Rparen)
+
+ // Optimization: Record presence of ()'s only where needed
+ // for error reporting. Don't bother in other cases; it is
+ // just a waste of memory and time.
+ //
+ // Parentheses are not permitted around T in a composite
+ // literal T{}. If the next token is a {, assume x is a
+ // composite literal type T (it may not be, { could be
+ // the opening brace of a block, but we don't know yet).
+ if p.tok == _Lbrace {
+ keep_parens = true
+ }
+
+ // Parentheses are also not permitted around the expression
+ // in a go/defer statement. In that case, operand is called
+ // with keep_parens set.
+ if keep_parens {
+ px := new(ParenExpr)
+ px.pos = pos
+ px.X = x
+ x = px
+ }
+ return x
+
+ case _Func:
+ pos := p.pos()
+ p.next()
+ _, ftyp := p.funcType("function type")
+ if p.tok == _Lbrace {
+ p.xnest++
+
+ f := new(FuncLit)
+ f.pos = pos
+ f.Type = ftyp
+ f.Body = p.funcBody()
+
+ p.xnest--
+ return f
+ }
+ return ftyp
+
+ case _Lbrack, _Chan, _Map, _Struct, _Interface:
+ return p.type_() // othertype
+
+ default:
+ x := p.badExpr()
+ p.syntaxError("expected expression")
+ p.advance(_Rparen, _Rbrack, _Rbrace)
+ return x
+ }
+
+ // Syntactically, composite literals are operands. Because a complit
+ // type may be a qualified identifier which is handled by pexpr
+ // (together with selector expressions), complits are parsed there
+ // as well (operand is only called from pexpr).
+}
+
+// pexpr parses a PrimaryExpr.
+//
+// PrimaryExpr =
+// Operand |
+// Conversion |
+// PrimaryExpr Selector |
+// PrimaryExpr Index |
+// PrimaryExpr Slice |
+// PrimaryExpr TypeAssertion |
+// PrimaryExpr Arguments .
+//
+// Selector = "." identifier .
+// Index = "[" Expression "]" .
+// Slice = "[" ( [ Expression ] ":" [ Expression ] ) |
+// ( [ Expression ] ":" Expression ":" Expression )
+// "]" .
+// TypeAssertion = "." "(" Type ")" .
+// Arguments = "(" [ ( ExpressionList | Type [ "," ExpressionList ] ) [ "..." ] [ "," ] ] ")" .
+func (p *parser) pexpr(x Expr, keep_parens bool) Expr {
+ if trace {
+ defer p.trace("pexpr")()
+ }
+
+ if x == nil {
+ x = p.operand(keep_parens)
+ }
+
+loop:
+ for {
+ pos := p.pos()
+ switch p.tok {
+ case _Dot:
+ p.next()
+ switch p.tok {
+ case _Name:
+ // pexpr '.' sym
+ t := new(SelectorExpr)
+ t.pos = pos
+ t.X = x
+ t.Sel = p.name()
+ x = t
+
+ case _Lparen:
+ p.next()
+ if p.got(_Type) {
+ t := new(TypeSwitchGuard)
+ // t.Lhs is filled in by parser.simpleStmt
+ t.pos = pos
+ t.X = x
+ x = t
+ } else {
+ t := new(AssertExpr)
+ t.pos = pos
+ t.X = x
+ t.Type = p.type_()
+ x = t
+ }
+ p.want(_Rparen)
+
+ default:
+ p.syntaxError("expected name or (")
+ p.advance(_Semi, _Rparen)
+ }
+
+ case _Lbrack:
+ p.next()
+
+ var i Expr
+ if p.tok != _Colon {
+ var comma bool
+ if p.tok == _Rbrack {
+ // invalid empty instance, slice or index expression; accept but complain
+ p.syntaxError("expected operand")
+ i = p.badExpr()
+ } else {
+ i, comma = p.typeList(false)
+ }
+ if comma || p.tok == _Rbrack {
+ p.want(_Rbrack)
+ // x[], x[i,] or x[i, j, ...]
+ t := new(IndexExpr)
+ t.pos = pos
+ t.X = x
+ t.Index = i
+ x = t
+ break
+ }
+ }
+
+ // x[i:...
+ // For better error message, don't simply use p.want(_Colon) here (go.dev/issue/47704).
+ if !p.got(_Colon) {
+ p.syntaxError("expected comma, : or ]")
+ p.advance(_Comma, _Colon, _Rbrack)
+ }
+ p.xnest++
+ t := new(SliceExpr)
+ t.pos = pos
+ t.X = x
+ t.Index[0] = i
+ if p.tok != _Colon && p.tok != _Rbrack {
+ // x[i:j...
+ t.Index[1] = p.expr()
+ }
+ if p.tok == _Colon {
+ t.Full = true
+ // x[i:j:...]
+ if t.Index[1] == nil {
+ p.error("middle index required in 3-index slice")
+ t.Index[1] = p.badExpr()
+ }
+ p.next()
+ if p.tok != _Rbrack {
+ // x[i:j:k...
+ t.Index[2] = p.expr()
+ } else {
+ p.error("final index required in 3-index slice")
+ t.Index[2] = p.badExpr()
+ }
+ }
+ p.xnest--
+ p.want(_Rbrack)
+ x = t
+
+ case _Lparen:
+ t := new(CallExpr)
+ t.pos = pos
+ p.next()
+ t.Fun = x
+ t.ArgList, t.HasDots = p.argList()
+ x = t
+
+ case _Lbrace:
+ // operand may have returned a parenthesized complit
+ // type; accept it but complain if we have a complit
+ t := Unparen(x)
+ // determine if '{' belongs to a composite literal or a block statement
+ complit_ok := false
+ switch t.(type) {
+ case *Name, *SelectorExpr:
+ if p.xnest >= 0 {
+ // x is possibly a composite literal type
+ complit_ok = true
+ }
+ case *IndexExpr:
+ if p.xnest >= 0 && !isValue(t) {
+ // x is possibly a composite literal type
+ complit_ok = true
+ }
+ case *ArrayType, *SliceType, *StructType, *MapType:
+ // x is a comptype
+ complit_ok = true
+ }
+ if !complit_ok {
+ break loop
+ }
+ if t != x {
+ p.syntaxError("cannot parenthesize type in composite literal")
+ // already progressed, no need to advance
+ }
+ n := p.complitexpr()
+ n.Type = x
+ x = n
+
+ default:
+ break loop
+ }
+ }
+
+ return x
+}
+
+// isValue reports whether x syntactically must be a value (and not a type) expression.
+func isValue(x Expr) bool {
+ switch x := x.(type) {
+ case *BasicLit, *CompositeLit, *FuncLit, *SliceExpr, *AssertExpr, *TypeSwitchGuard, *CallExpr:
+ return true
+ case *Operation:
+ return x.Op != Mul || x.Y != nil // *T may be a type
+ case *ParenExpr:
+ return isValue(x.X)
+ case *IndexExpr:
+ return isValue(x.X) || isValue(x.Index)
+ }
+ return false
+}
+
+// Element = Expression | LiteralValue .
+func (p *parser) bare_complitexpr() Expr {
+ if trace {
+ defer p.trace("bare_complitexpr")()
+ }
+
+ if p.tok == _Lbrace {
+ // '{' start_complit braced_keyval_list '}'
+ return p.complitexpr()
+ }
+
+ return p.expr()
+}
+
+// LiteralValue = "{" [ ElementList [ "," ] ] "}" .
+func (p *parser) complitexpr() *CompositeLit {
+ if trace {
+ defer p.trace("complitexpr")()
+ }
+
+ x := new(CompositeLit)
+ x.pos = p.pos()
+
+ p.xnest++
+ p.want(_Lbrace)
+ x.Rbrace = p.list("composite literal", _Comma, _Rbrace, func() bool {
+ // value
+ e := p.bare_complitexpr()
+ if p.tok == _Colon {
+ // key ':' value
+ l := new(KeyValueExpr)
+ l.pos = p.pos()
+ p.next()
+ l.Key = e
+ l.Value = p.bare_complitexpr()
+ e = l
+ x.NKeys++
+ }
+ x.ElemList = append(x.ElemList, e)
+ return false
+ })
+ p.xnest--
+
+ return x
+}
+
+// ----------------------------------------------------------------------------
+// Types
+
+func (p *parser) type_() Expr {
+ if trace {
+ defer p.trace("type_")()
+ }
+
+ typ := p.typeOrNil()
+ if typ == nil {
+ typ = p.badExpr()
+ p.syntaxError("expected type")
+ p.advance(_Comma, _Colon, _Semi, _Rparen, _Rbrack, _Rbrace)
+ }
+
+ return typ
+}
+
+func newIndirect(pos Pos, typ Expr) Expr {
+ o := new(Operation)
+ o.pos = pos
+ o.Op = Mul
+ o.X = typ
+ return o
+}
+
+// typeOrNil is like type_ but it returns nil if there was no type
+// instead of reporting an error.
+//
+// Type = TypeName | TypeLit | "(" Type ")" .
+// TypeName = identifier | QualifiedIdent .
+// TypeLit = ArrayType | StructType | PointerType | FunctionType | InterfaceType |
+// SliceType | MapType | Channel_Type .
+func (p *parser) typeOrNil() Expr {
+ if trace {
+ defer p.trace("typeOrNil")()
+ }
+
+ pos := p.pos()
+ switch p.tok {
+ case _Star:
+ // ptrtype
+ p.next()
+ return newIndirect(pos, p.type_())
+
+ case _Arrow:
+ // recvchantype
+ p.next()
+ p.want(_Chan)
+ t := new(ChanType)
+ t.pos = pos
+ t.Dir = RecvOnly
+ t.Elem = p.chanElem()
+ return t
+
+ case _Func:
+ // fntype
+ p.next()
+ _, t := p.funcType("function type")
+ return t
+
+ case _Lbrack:
+ // '[' oexpr ']' ntype
+ // '[' _DotDotDot ']' ntype
+ p.next()
+ if p.got(_Rbrack) {
+ return p.sliceType(pos)
+ }
+ return p.arrayType(pos, nil)
+
+ case _Chan:
+ // _Chan non_recvchantype
+ // _Chan _Comm ntype
+ p.next()
+ t := new(ChanType)
+ t.pos = pos
+ if p.got(_Arrow) {
+ t.Dir = SendOnly
+ }
+ t.Elem = p.chanElem()
+ return t
+
+ case _Map:
+ // _Map '[' ntype ']' ntype
+ p.next()
+ p.want(_Lbrack)
+ t := new(MapType)
+ t.pos = pos
+ t.Key = p.type_()
+ p.want(_Rbrack)
+ t.Value = p.type_()
+ return t
+
+ case _Struct:
+ return p.structType()
+
+ case _Interface:
+ return p.interfaceType()
+
+ case _Name:
+ return p.qualifiedName(nil)
+
+ case _Lparen:
+ p.next()
+ t := p.type_()
+ p.want(_Rparen)
+ return t
+ }
+
+ return nil
+}
+
+func (p *parser) typeInstance(typ Expr) Expr {
+ if trace {
+ defer p.trace("typeInstance")()
+ }
+
+ pos := p.pos()
+ p.want(_Lbrack)
+ x := new(IndexExpr)
+ x.pos = pos
+ x.X = typ
+ if p.tok == _Rbrack {
+ p.syntaxError("expected type argument list")
+ x.Index = p.badExpr()
+ } else {
+ x.Index, _ = p.typeList(true)
+ }
+ p.want(_Rbrack)
+ return x
+}
+
+// If context != "", type parameters are not permitted.
+func (p *parser) funcType(context string) ([]*Field, *FuncType) {
+ if trace {
+ defer p.trace("funcType")()
+ }
+
+ typ := new(FuncType)
+ typ.pos = p.pos()
+
+ var tparamList []*Field
+ if p.got(_Lbrack) {
+ if context != "" {
+ // accept but complain
+ p.syntaxErrorAt(typ.pos, context+" must have no type parameters")
+ }
+ if p.tok == _Rbrack {
+ p.syntaxError("empty type parameter list")
+ p.next()
+ } else {
+ tparamList = p.paramList(nil, nil, _Rbrack, true)
+ }
+ }
+
+ p.want(_Lparen)
+ typ.ParamList = p.paramList(nil, nil, _Rparen, false)
+ typ.ResultList = p.funcResult()
+
+ return tparamList, typ
+}
+
+// "[" has already been consumed, and pos is its position.
+// If len != nil it is the already consumed array length.
+func (p *parser) arrayType(pos Pos, len Expr) Expr {
+ if trace {
+ defer p.trace("arrayType")()
+ }
+
+ if len == nil && !p.got(_DotDotDot) {
+ p.xnest++
+ len = p.expr()
+ p.xnest--
+ }
+ if p.tok == _Comma {
+ // Trailing commas are accepted in type parameter
+ // lists but not in array type declarations.
+ // Accept for better error handling but complain.
+ p.syntaxError("unexpected comma; expected ]")
+ p.next()
+ }
+ p.want(_Rbrack)
+ t := new(ArrayType)
+ t.pos = pos
+ t.Len = len
+ t.Elem = p.type_()
+ return t
+}
+
+// "[" and "]" have already been consumed, and pos is the position of "[".
+func (p *parser) sliceType(pos Pos) Expr {
+ t := new(SliceType)
+ t.pos = pos
+ t.Elem = p.type_()
+ return t
+}
+
+func (p *parser) chanElem() Expr {
+ if trace {
+ defer p.trace("chanElem")()
+ }
+
+ typ := p.typeOrNil()
+ if typ == nil {
+ typ = p.badExpr()
+ p.syntaxError("missing channel element type")
+ // assume element type is simply absent - don't advance
+ }
+
+ return typ
+}
+
+// StructType = "struct" "{" { FieldDecl ";" } "}" .
+func (p *parser) structType() *StructType {
+ if trace {
+ defer p.trace("structType")()
+ }
+
+ typ := new(StructType)
+ typ.pos = p.pos()
+
+ p.want(_Struct)
+ p.want(_Lbrace)
+ p.list("struct type", _Semi, _Rbrace, func() bool {
+ p.fieldDecl(typ)
+ return false
+ })
+
+ return typ
+}
+
+// InterfaceType = "interface" "{" { ( MethodDecl | EmbeddedElem ) ";" } "}" .
+func (p *parser) interfaceType() *InterfaceType {
+ if trace {
+ defer p.trace("interfaceType")()
+ }
+
+ typ := new(InterfaceType)
+ typ.pos = p.pos()
+
+ p.want(_Interface)
+ p.want(_Lbrace)
+ p.list("interface type", _Semi, _Rbrace, func() bool {
+ var f *Field
+ if p.tok == _Name {
+ f = p.methodDecl()
+ }
+ if f == nil || f.Name == nil {
+ f = p.embeddedElem(f)
+ }
+ typ.MethodList = append(typ.MethodList, f)
+ return false
+ })
+
+ return typ
+}
+
+// Result = Parameters | Type .
+func (p *parser) funcResult() []*Field {
+ if trace {
+ defer p.trace("funcResult")()
+ }
+
+ if p.got(_Lparen) {
+ return p.paramList(nil, nil, _Rparen, false)
+ }
+
+ pos := p.pos()
+ if typ := p.typeOrNil(); typ != nil {
+ f := new(Field)
+ f.pos = pos
+ f.Type = typ
+ return []*Field{f}
+ }
+
+ return nil
+}
+
+func (p *parser) addField(styp *StructType, pos Pos, name *Name, typ Expr, tag *BasicLit) {
+ if tag != nil {
+ for i := len(styp.FieldList) - len(styp.TagList); i > 0; i-- {
+ styp.TagList = append(styp.TagList, nil)
+ }
+ styp.TagList = append(styp.TagList, tag)
+ }
+
+ f := new(Field)
+ f.pos = pos
+ f.Name = name
+ f.Type = typ
+ styp.FieldList = append(styp.FieldList, f)
+
+ if debug && tag != nil && len(styp.FieldList) != len(styp.TagList) {
+ panic("inconsistent struct field list")
+ }
+}
+
+// FieldDecl = (IdentifierList Type | AnonymousField) [ Tag ] .
+// AnonymousField = [ "*" ] TypeName .
+// Tag = string_lit .
+func (p *parser) fieldDecl(styp *StructType) {
+ if trace {
+ defer p.trace("fieldDecl")()
+ }
+
+ pos := p.pos()
+ switch p.tok {
+ case _Name:
+ name := p.name()
+ if p.tok == _Dot || p.tok == _Literal || p.tok == _Semi || p.tok == _Rbrace {
+ // embedded type
+ typ := p.qualifiedName(name)
+ tag := p.oliteral()
+ p.addField(styp, pos, nil, typ, tag)
+ break
+ }
+
+ // name1, name2, ... Type [ tag ]
+ names := p.nameList(name)
+ var typ Expr
+
+ // Careful dance: We don't know if we have an embedded instantiated
+ // type T[P1, P2, ...] or a field T of array/slice type [P]E or []E.
+ if len(names) == 1 && p.tok == _Lbrack {
+ typ = p.arrayOrTArgs()
+ if typ, ok := typ.(*IndexExpr); ok {
+ // embedded type T[P1, P2, ...]
+ typ.X = name // name == names[0]
+ tag := p.oliteral()
+ p.addField(styp, pos, nil, typ, tag)
+ break
+ }
+ } else {
+ // T P
+ typ = p.type_()
+ }
+
+ tag := p.oliteral()
+
+ for _, name := range names {
+ p.addField(styp, name.Pos(), name, typ, tag)
+ }
+
+ case _Star:
+ p.next()
+ var typ Expr
+ if p.tok == _Lparen {
+ // *(T)
+ p.syntaxError("cannot parenthesize embedded type")
+ p.next()
+ typ = p.qualifiedName(nil)
+ p.got(_Rparen) // no need to complain if missing
+ } else {
+ // *T
+ typ = p.qualifiedName(nil)
+ }
+ tag := p.oliteral()
+ p.addField(styp, pos, nil, newIndirect(pos, typ), tag)
+
+ case _Lparen:
+ p.syntaxError("cannot parenthesize embedded type")
+ p.next()
+ var typ Expr
+ if p.tok == _Star {
+ // (*T)
+ pos := p.pos()
+ p.next()
+ typ = newIndirect(pos, p.qualifiedName(nil))
+ } else {
+ // (T)
+ typ = p.qualifiedName(nil)
+ }
+ p.got(_Rparen) // no need to complain if missing
+ tag := p.oliteral()
+ p.addField(styp, pos, nil, typ, tag)
+
+ default:
+ p.syntaxError("expected field name or embedded type")
+ p.advance(_Semi, _Rbrace)
+ }
+}
+
+func (p *parser) arrayOrTArgs() Expr {
+ if trace {
+ defer p.trace("arrayOrTArgs")()
+ }
+
+ pos := p.pos()
+ p.want(_Lbrack)
+ if p.got(_Rbrack) {
+ return p.sliceType(pos)
+ }
+
+ // x [n]E or x[n,], x[n1, n2], ...
+ n, comma := p.typeList(false)
+ p.want(_Rbrack)
+ if !comma {
+ if elem := p.typeOrNil(); elem != nil {
+ // x [n]E
+ t := new(ArrayType)
+ t.pos = pos
+ t.Len = n
+ t.Elem = elem
+ return t
+ }
+ }
+
+ // x[n,], x[n1, n2], ...
+ t := new(IndexExpr)
+ t.pos = pos
+ // t.X will be filled in by caller
+ t.Index = n
+ return t
+}
+
+func (p *parser) oliteral() *BasicLit {
+ if p.tok == _Literal {
+ b := new(BasicLit)
+ b.pos = p.pos()
+ b.Value = p.lit
+ b.Kind = p.kind
+ b.Bad = p.bad
+ p.next()
+ return b
+ }
+ return nil
+}
+
+// MethodSpec = MethodName Signature | InterfaceTypeName .
+// MethodName = identifier .
+// InterfaceTypeName = TypeName .
+func (p *parser) methodDecl() *Field {
+ if trace {
+ defer p.trace("methodDecl")()
+ }
+
+ f := new(Field)
+ f.pos = p.pos()
+ name := p.name()
+
+ const context = "interface method"
+
+ switch p.tok {
+ case _Lparen:
+ // method
+ f.Name = name
+ _, f.Type = p.funcType(context)
+
+ case _Lbrack:
+ // Careful dance: We don't know if we have a generic method m[T C](x T)
+ // or an embedded instantiated type T[P1, P2] (we accept generic methods
+ // for generality and robustness of parsing but complain with an error).
+ pos := p.pos()
+ p.next()
+
+ // Empty type parameter or argument lists are not permitted.
+ // Treat as if [] were absent.
+ if p.tok == _Rbrack {
+ // name[]
+ pos := p.pos()
+ p.next()
+ if p.tok == _Lparen {
+ // name[](
+ p.errorAt(pos, "empty type parameter list")
+ f.Name = name
+ _, f.Type = p.funcType(context)
+ } else {
+ p.errorAt(pos, "empty type argument list")
+ f.Type = name
+ }
+ break
+ }
+
+ // A type argument list looks like a parameter list with only
+ // types. Parse a parameter list and decide afterwards.
+ list := p.paramList(nil, nil, _Rbrack, false)
+ if len(list) == 0 {
+ // The type parameter list is not [] but we got nothing
+ // due to other errors (reported by paramList). Treat
+ // as if [] were absent.
+ if p.tok == _Lparen {
+ f.Name = name
+ _, f.Type = p.funcType(context)
+ } else {
+ f.Type = name
+ }
+ break
+ }
+
+ // len(list) > 0
+ if list[0].Name != nil {
+ // generic method
+ f.Name = name
+ _, f.Type = p.funcType(context)
+ p.errorAt(pos, "interface method must have no type parameters")
+ break
+ }
+
+ // embedded instantiated type
+ t := new(IndexExpr)
+ t.pos = pos
+ t.X = name
+ if len(list) == 1 {
+ t.Index = list[0].Type
+ } else {
+ // len(list) > 1
+ l := new(ListExpr)
+ l.pos = list[0].Pos()
+ l.ElemList = make([]Expr, len(list))
+ for i := range list {
+ l.ElemList[i] = list[i].Type
+ }
+ t.Index = l
+ }
+ f.Type = t
+
+ default:
+ // embedded type
+ f.Type = p.qualifiedName(name)
+ }
+
+ return f
+}
+
+// EmbeddedElem = MethodSpec | EmbeddedTerm { "|" EmbeddedTerm } .
+func (p *parser) embeddedElem(f *Field) *Field {
+ if trace {
+ defer p.trace("embeddedElem")()
+ }
+
+ if f == nil {
+ f = new(Field)
+ f.pos = p.pos()
+ f.Type = p.embeddedTerm()
+ }
+
+ for p.tok == _Operator && p.op == Or {
+ t := new(Operation)
+ t.pos = p.pos()
+ t.Op = Or
+ p.next()
+ t.X = f.Type
+ t.Y = p.embeddedTerm()
+ f.Type = t
+ }
+
+ return f
+}
+
+// EmbeddedTerm = [ "~" ] Type .
+func (p *parser) embeddedTerm() Expr {
+ if trace {
+ defer p.trace("embeddedTerm")()
+ }
+
+ if p.tok == _Operator && p.op == Tilde {
+ t := new(Operation)
+ t.pos = p.pos()
+ t.Op = Tilde
+ p.next()
+ t.X = p.type_()
+ return t
+ }
+
+ t := p.typeOrNil()
+ if t == nil {
+ t = p.badExpr()
+ p.syntaxError("expected ~ term or type")
+ p.advance(_Operator, _Semi, _Rparen, _Rbrack, _Rbrace)
+ }
+
+ return t
+}
+
+// ParameterDecl = [ IdentifierList ] [ "..." ] Type .
+func (p *parser) paramDeclOrNil(name *Name, follow token) *Field {
+ if trace {
+ defer p.trace("paramDeclOrNil")()
+ }
+
+ // type set notation is ok in type parameter lists
+ typeSetsOk := follow == _Rbrack
+
+ pos := p.pos()
+ if name != nil {
+ pos = name.pos
+ } else if typeSetsOk && p.tok == _Operator && p.op == Tilde {
+ // "~" ...
+ return p.embeddedElem(nil)
+ }
+
+ f := new(Field)
+ f.pos = pos
+
+ if p.tok == _Name || name != nil {
+ // name
+ if name == nil {
+ name = p.name()
+ }
+
+ if p.tok == _Lbrack {
+ // name "[" ...
+ f.Type = p.arrayOrTArgs()
+ if typ, ok := f.Type.(*IndexExpr); ok {
+ // name "[" ... "]"
+ typ.X = name
+ } else {
+ // name "[" n "]" E
+ f.Name = name
+ }
+ if typeSetsOk && p.tok == _Operator && p.op == Or {
+ // name "[" ... "]" "|" ...
+ // name "[" n "]" E "|" ...
+ f = p.embeddedElem(f)
+ }
+ return f
+ }
+
+ if p.tok == _Dot {
+ // name "." ...
+ f.Type = p.qualifiedName(name)
+ if typeSetsOk && p.tok == _Operator && p.op == Or {
+ // name "." name "|" ...
+ f = p.embeddedElem(f)
+ }
+ return f
+ }
+
+ if typeSetsOk && p.tok == _Operator && p.op == Or {
+ // name "|" ...
+ f.Type = name
+ return p.embeddedElem(f)
+ }
+
+ f.Name = name
+ }
+
+ if p.tok == _DotDotDot {
+ // [name] "..." ...
+ t := new(DotsType)
+ t.pos = p.pos()
+ p.next()
+ t.Elem = p.typeOrNil()
+ if t.Elem == nil {
+ t.Elem = p.badExpr()
+ p.syntaxError("... is missing type")
+ }
+ f.Type = t
+ return f
+ }
+
+ if typeSetsOk && p.tok == _Operator && p.op == Tilde {
+ // [name] "~" ...
+ f.Type = p.embeddedElem(nil).Type
+ return f
+ }
+
+ f.Type = p.typeOrNil()
+ if typeSetsOk && p.tok == _Operator && p.op == Or && f.Type != nil {
+ // [name] type "|"
+ f = p.embeddedElem(f)
+ }
+ if f.Name != nil || f.Type != nil {
+ return f
+ }
+
+ p.syntaxError("expected " + tokstring(follow))
+ p.advance(_Comma, follow)
+ return nil
+}
+
+// Parameters = "(" [ ParameterList [ "," ] ] ")" .
+// ParameterList = ParameterDecl { "," ParameterDecl } .
+// "(" or "[" has already been consumed.
+// If name != nil, it is the first name after "(" or "[".
+// If typ != nil, name must be != nil, and (name, typ) is the first field in the list.
+// In the result list, either all fields have a name, or no field has a name.
+func (p *parser) paramList(name *Name, typ Expr, close token, requireNames bool) (list []*Field) {
+ if trace {
+ defer p.trace("paramList")()
+ }
+
+ // p.list won't invoke its function argument if we're at the end of the
+ // parameter list. If we have a complete field, handle this case here.
+ if name != nil && typ != nil && p.tok == close {
+ p.next()
+ par := new(Field)
+ par.pos = name.pos
+ par.Name = name
+ par.Type = typ
+ return []*Field{par}
+ }
+
+ var named int // number of parameters that have an explicit name and type
+ var typed int // number of parameters that have an explicit type
+ end := p.list("parameter list", _Comma, close, func() bool {
+ var par *Field
+ if typ != nil {
+ if debug && name == nil {
+ panic("initial type provided without name")
+ }
+ par = new(Field)
+ par.pos = name.pos
+ par.Name = name
+ par.Type = typ
+ } else {
+ par = p.paramDeclOrNil(name, close)
+ }
+ name = nil // 1st name was consumed if present
+ typ = nil // 1st type was consumed if present
+ if par != nil {
+ if debug && par.Name == nil && par.Type == nil {
+ panic("parameter without name or type")
+ }
+ if par.Name != nil && par.Type != nil {
+ named++
+ }
+ if par.Type != nil {
+ typed++
+ }
+ list = append(list, par)
+ }
+ return false
+ })
+
+ if len(list) == 0 {
+ return
+ }
+
+ // distribute parameter types (len(list) > 0)
+ if named == 0 && !requireNames {
+ // all unnamed and we're not in a type parameter list => found names are named types
+ for _, par := range list {
+ if typ := par.Name; typ != nil {
+ par.Type = typ
+ par.Name = nil
+ }
+ }
+ } else if named != len(list) {
+ // some named or we're in a type parameter list => all must be named
+ var errPos Pos // left-most error position (or unknown)
+ var typ Expr // current type (from right to left)
+ for i := len(list) - 1; i >= 0; i-- {
+ par := list[i]
+ if par.Type != nil {
+ typ = par.Type
+ if par.Name == nil {
+ errPos = StartPos(typ)
+ par.Name = NewName(errPos, "_")
+ }
+ } else if typ != nil {
+ par.Type = typ
+ } else {
+ // par.Type == nil && typ == nil => we only have a par.Name
+ errPos = par.Name.Pos()
+ t := p.badExpr()
+ t.pos = errPos // correct position
+ par.Type = t
+ }
+ }
+ if errPos.IsKnown() {
+ var msg string
+ if requireNames {
+ // Not all parameters are named because named != len(list).
+ // If named == typed we must have parameters that have no types,
+ // and they must be at the end of the parameter list, otherwise
+ // the types would have been filled in by the right-to-left sweep
+ // above and we wouldn't have an error. Since we are in a type
+ // parameter list, the missing types are constraints.
+ if named == typed {
+ errPos = end // position error at closing ]
+ msg = "missing type constraint"
+ } else {
+ msg = "missing type parameter name"
+ // go.dev/issue/60812
+ if len(list) == 1 {
+ msg += " or invalid array length"
+ }
+ }
+ } else {
+ msg = "mixed named and unnamed parameters"
+ }
+ p.syntaxErrorAt(errPos, msg)
+ }
+ }
+
+ return
+}
+
+func (p *parser) badExpr() *BadExpr {
+ b := new(BadExpr)
+ b.pos = p.pos()
+ return b
+}
+
+// ----------------------------------------------------------------------------
+// Statements
+
+// SimpleStmt = EmptyStmt | ExpressionStmt | SendStmt | IncDecStmt | Assignment | ShortVarDecl .
+func (p *parser) simpleStmt(lhs Expr, keyword token) SimpleStmt {
+ if trace {
+ defer p.trace("simpleStmt")()
+ }
+
+ if keyword == _For && p.tok == _Range {
+ // _Range expr
+ if debug && lhs != nil {
+ panic("invalid call of simpleStmt")
+ }
+ return p.newRangeClause(nil, false)
+ }
+
+ if lhs == nil {
+ lhs = p.exprList()
+ }
+
+ if _, ok := lhs.(*ListExpr); !ok && p.tok != _Assign && p.tok != _Define {
+ // expr
+ pos := p.pos()
+ switch p.tok {
+ case _AssignOp:
+ // lhs op= rhs
+ op := p.op
+ p.next()
+ return p.newAssignStmt(pos, op, lhs, p.expr())
+
+ case _IncOp:
+ // lhs++ or lhs--
+ op := p.op
+ p.next()
+ return p.newAssignStmt(pos, op, lhs, nil)
+
+ case _Arrow:
+ // lhs <- rhs
+ s := new(SendStmt)
+ s.pos = pos
+ p.next()
+ s.Chan = lhs
+ s.Value = p.expr()
+ return s
+
+ default:
+ // expr
+ s := new(ExprStmt)
+ s.pos = lhs.Pos()
+ s.X = lhs
+ return s
+ }
+ }
+
+ // expr_list
+ switch p.tok {
+ case _Assign, _Define:
+ pos := p.pos()
+ var op Operator
+ if p.tok == _Define {
+ op = Def
+ }
+ p.next()
+
+ if keyword == _For && p.tok == _Range {
+ // expr_list op= _Range expr
+ return p.newRangeClause(lhs, op == Def)
+ }
+
+ // expr_list op= expr_list
+ rhs := p.exprList()
+
+ if x, ok := rhs.(*TypeSwitchGuard); ok && keyword == _Switch && op == Def {
+ if lhs, ok := lhs.(*Name); ok {
+ // switch … lhs := rhs.(type)
+ x.Lhs = lhs
+ s := new(ExprStmt)
+ s.pos = x.Pos()
+ s.X = x
+ return s
+ }
+ }
+
+ return p.newAssignStmt(pos, op, lhs, rhs)
+
+ default:
+ p.syntaxError("expected := or = or comma")
+ p.advance(_Semi, _Rbrace)
+ // make the best of what we have
+ if x, ok := lhs.(*ListExpr); ok {
+ lhs = x.ElemList[0]
+ }
+ s := new(ExprStmt)
+ s.pos = lhs.Pos()
+ s.X = lhs
+ return s
+ }
+}
+
+func (p *parser) newRangeClause(lhs Expr, def bool) *RangeClause {
+ r := new(RangeClause)
+ r.pos = p.pos()
+ p.next() // consume _Range
+ r.Lhs = lhs
+ r.Def = def
+ r.X = p.expr()
+ return r
+}
+
+func (p *parser) newAssignStmt(pos Pos, op Operator, lhs, rhs Expr) *AssignStmt {
+ a := new(AssignStmt)
+ a.pos = pos
+ a.Op = op
+ a.Lhs = lhs
+ a.Rhs = rhs
+ return a
+}
+
+func (p *parser) labeledStmtOrNil(label *Name) Stmt {
+ if trace {
+ defer p.trace("labeledStmt")()
+ }
+
+ s := new(LabeledStmt)
+ s.pos = p.pos()
+ s.Label = label
+
+ p.want(_Colon)
+
+ if p.tok == _Rbrace {
+ // We expect a statement (incl. an empty statement), which must be
+ // terminated by a semicolon. Because semicolons may be omitted before
+ // an _Rbrace, seeing an _Rbrace implies an empty statement.
+ e := new(EmptyStmt)
+ e.pos = p.pos()
+ s.Stmt = e
+ return s
+ }
+
+ s.Stmt = p.stmtOrNil()
+ if s.Stmt != nil {
+ return s
+ }
+
+ // report error at line of ':' token
+ p.syntaxErrorAt(s.pos, "missing statement after label")
+ // we are already at the end of the labeled statement - no need to advance
+ return nil // avoids follow-on errors (see e.g., fixedbugs/bug274.go)
+}
+
+// context must be a non-empty string unless we know that p.tok == _Lbrace.
+func (p *parser) blockStmt(context string) *BlockStmt {
+ if trace {
+ defer p.trace("blockStmt")()
+ }
+
+ s := new(BlockStmt)
+ s.pos = p.pos()
+
+ // people coming from C may forget that braces are mandatory in Go
+ if !p.got(_Lbrace) {
+ p.syntaxError("expected { after " + context)
+ p.advance(_Name, _Rbrace)
+ s.Rbrace = p.pos() // in case we found "}"
+ if p.got(_Rbrace) {
+ return s
+ }
+ }
+
+ s.List = p.stmtList()
+ s.Rbrace = p.pos()
+ p.want(_Rbrace)
+
+ return s
+}
+
+func (p *parser) declStmt(f func(*Group) Decl) *DeclStmt {
+ if trace {
+ defer p.trace("declStmt")()
+ }
+
+ s := new(DeclStmt)
+ s.pos = p.pos()
+
+ p.next() // _Const, _Type, or _Var
+ s.DeclList = p.appendGroup(nil, f)
+
+ return s
+}
+
+func (p *parser) forStmt() Stmt {
+ if trace {
+ defer p.trace("forStmt")()
+ }
+
+ s := new(ForStmt)
+ s.pos = p.pos()
+
+ s.Init, s.Cond, s.Post = p.header(_For)
+ s.Body = p.blockStmt("for clause")
+
+ return s
+}
+
+func (p *parser) header(keyword token) (init SimpleStmt, cond Expr, post SimpleStmt) {
+ p.want(keyword)
+
+ if p.tok == _Lbrace {
+ if keyword == _If {
+ p.syntaxError("missing condition in if statement")
+ cond = p.badExpr()
+ }
+ return
+ }
+ // p.tok != _Lbrace
+
+ outer := p.xnest
+ p.xnest = -1
+
+ if p.tok != _Semi {
+ // accept potential varDecl but complain
+ if p.got(_Var) {
+ p.syntaxError(fmt.Sprintf("var declaration not allowed in %s initializer", tokstring(keyword)))
+ }
+ init = p.simpleStmt(nil, keyword)
+ // If we have a range clause, we are done (can only happen for keyword == _For).
+ if _, ok := init.(*RangeClause); ok {
+ p.xnest = outer
+ return
+ }
+ }
+
+ var condStmt SimpleStmt
+ var semi struct {
+ pos Pos
+ lit string // valid if pos.IsKnown()
+ }
+ if p.tok != _Lbrace {
+ if p.tok == _Semi {
+ semi.pos = p.pos()
+ semi.lit = p.lit
+ p.next()
+ } else {
+ // asking for a '{' rather than a ';' here leads to a better error message
+ p.want(_Lbrace)
+ if p.tok != _Lbrace {
+ p.advance(_Lbrace, _Rbrace) // for better synchronization (e.g., go.dev/issue/22581)
+ }
+ }
+ if keyword == _For {
+ if p.tok != _Semi {
+ if p.tok == _Lbrace {
+ p.syntaxError("expected for loop condition")
+ goto done
+ }
+ condStmt = p.simpleStmt(nil, 0 /* range not permitted */)
+ }
+ p.want(_Semi)
+ if p.tok != _Lbrace {
+ post = p.simpleStmt(nil, 0 /* range not permitted */)
+ if a, _ := post.(*AssignStmt); a != nil && a.Op == Def {
+ p.syntaxErrorAt(a.Pos(), "cannot declare in post statement of for loop")
+ }
+ }
+ } else if p.tok != _Lbrace {
+ condStmt = p.simpleStmt(nil, keyword)
+ }
+ } else {
+ condStmt = init
+ init = nil
+ }
+
+done:
+ // unpack condStmt
+ switch s := condStmt.(type) {
+ case nil:
+ if keyword == _If && semi.pos.IsKnown() {
+ if semi.lit != "semicolon" {
+ p.syntaxErrorAt(semi.pos, fmt.Sprintf("unexpected %s, expected { after if clause", semi.lit))
+ } else {
+ p.syntaxErrorAt(semi.pos, "missing condition in if statement")
+ }
+ b := new(BadExpr)
+ b.pos = semi.pos
+ cond = b
+ }
+ case *ExprStmt:
+ cond = s.X
+ default:
+ // A common syntax error is to write '=' instead of '==',
+ // which turns an expression into an assignment. Provide
+ // a more explicit error message in that case to prevent
+ // further confusion.
+ var str string
+ if as, ok := s.(*AssignStmt); ok && as.Op == 0 {
+ // Emphasize complex Lhs and Rhs of assignment with parentheses to highlight '='.
+ str = "assignment " + emphasize(as.Lhs) + " = " + emphasize(as.Rhs)
+ } else {
+ str = String(s)
+ }
+ p.syntaxErrorAt(s.Pos(), fmt.Sprintf("cannot use %s as value", str))
+ }
+
+ p.xnest = outer
+ return
+}
+
+// emphasize returns a string representation of x, with (top-level)
+// binary expressions emphasized by enclosing them in parentheses.
+func emphasize(x Expr) string {
+ s := String(x)
+ if op, _ := x.(*Operation); op != nil && op.Y != nil {
+ // binary expression
+ return "(" + s + ")"
+ }
+ return s
+}
+
+func (p *parser) ifStmt() *IfStmt {
+ if trace {
+ defer p.trace("ifStmt")()
+ }
+
+ s := new(IfStmt)
+ s.pos = p.pos()
+
+ s.Init, s.Cond, _ = p.header(_If)
+ s.Then = p.blockStmt("if clause")
+
+ if p.got(_Else) {
+ switch p.tok {
+ case _If:
+ s.Else = p.ifStmt()
+ case _Lbrace:
+ s.Else = p.blockStmt("")
+ default:
+ p.syntaxError("else must be followed by if or statement block")
+ p.advance(_Name, _Rbrace)
+ }
+ }
+
+ return s
+}
+
+func (p *parser) switchStmt() *SwitchStmt {
+ if trace {
+ defer p.trace("switchStmt")()
+ }
+
+ s := new(SwitchStmt)
+ s.pos = p.pos()
+
+ s.Init, s.Tag, _ = p.header(_Switch)
+
+ if !p.got(_Lbrace) {
+ p.syntaxError("missing { after switch clause")
+ p.advance(_Case, _Default, _Rbrace)
+ }
+ for p.tok != _EOF && p.tok != _Rbrace {
+ s.Body = append(s.Body, p.caseClause())
+ }
+ s.Rbrace = p.pos()
+ p.want(_Rbrace)
+
+ return s
+}
+
+func (p *parser) selectStmt() *SelectStmt {
+ if trace {
+ defer p.trace("selectStmt")()
+ }
+
+ s := new(SelectStmt)
+ s.pos = p.pos()
+
+ p.want(_Select)
+ if !p.got(_Lbrace) {
+ p.syntaxError("missing { after select clause")
+ p.advance(_Case, _Default, _Rbrace)
+ }
+ for p.tok != _EOF && p.tok != _Rbrace {
+ s.Body = append(s.Body, p.commClause())
+ }
+ s.Rbrace = p.pos()
+ p.want(_Rbrace)
+
+ return s
+}
+
+func (p *parser) caseClause() *CaseClause {
+ if trace {
+ defer p.trace("caseClause")()
+ }
+
+ c := new(CaseClause)
+ c.pos = p.pos()
+
+ switch p.tok {
+ case _Case:
+ p.next()
+ c.Cases = p.exprList()
+
+ case _Default:
+ p.next()
+
+ default:
+ p.syntaxError("expected case or default or }")
+ p.advance(_Colon, _Case, _Default, _Rbrace)
+ }
+
+ c.Colon = p.pos()
+ p.want(_Colon)
+ c.Body = p.stmtList()
+
+ return c
+}
+
+func (p *parser) commClause() *CommClause {
+ if trace {
+ defer p.trace("commClause")()
+ }
+
+ c := new(CommClause)
+ c.pos = p.pos()
+
+ switch p.tok {
+ case _Case:
+ p.next()
+ c.Comm = p.simpleStmt(nil, 0)
+
+ // The syntax restricts the possible simple statements here to:
+ //
+ // lhs <- x (send statement)
+ // <-x
+ // lhs = <-x
+ // lhs := <-x
+ //
+ // All these (and more) are recognized by simpleStmt and invalid
+ // syntax trees are flagged later, during type checking.
+
+ case _Default:
+ p.next()
+
+ default:
+ p.syntaxError("expected case or default or }")
+ p.advance(_Colon, _Case, _Default, _Rbrace)
+ }
+
+ c.Colon = p.pos()
+ p.want(_Colon)
+ c.Body = p.stmtList()
+
+ return c
+}
+
+// stmtOrNil parses a statement if one is present, or else returns nil.
+//
+// Statement =
+// Declaration | LabeledStmt | SimpleStmt |
+// GoStmt | ReturnStmt | BreakStmt | ContinueStmt | GotoStmt |
+// FallthroughStmt | Block | IfStmt | SwitchStmt | SelectStmt | ForStmt |
+// DeferStmt .
+func (p *parser) stmtOrNil() Stmt {
+ if trace {
+ defer p.trace("stmt " + p.tok.String())()
+ }
+
+ // Most statements (assignments) start with an identifier;
+ // look for it first before doing anything more expensive.
+ if p.tok == _Name {
+ p.clearPragma()
+ lhs := p.exprList()
+ if label, ok := lhs.(*Name); ok && p.tok == _Colon {
+ return p.labeledStmtOrNil(label)
+ }
+ return p.simpleStmt(lhs, 0)
+ }
+
+ switch p.tok {
+ case _Var:
+ return p.declStmt(p.varDecl)
+
+ case _Const:
+ return p.declStmt(p.constDecl)
+
+ case _Type:
+ return p.declStmt(p.typeDecl)
+ }
+
+ p.clearPragma()
+
+ switch p.tok {
+ case _Lbrace:
+ return p.blockStmt("")
+
+ case _Operator, _Star:
+ switch p.op {
+ case Add, Sub, Mul, And, Xor, Not:
+ return p.simpleStmt(nil, 0) // unary operators
+ }
+
+ case _Literal, _Func, _Lparen, // operands
+ _Lbrack, _Struct, _Map, _Chan, _Interface, // composite types
+ _Arrow: // receive operator
+ return p.simpleStmt(nil, 0)
+
+ case _For:
+ return p.forStmt()
+
+ case _Switch:
+ return p.switchStmt()
+
+ case _Select:
+ return p.selectStmt()
+
+ case _If:
+ return p.ifStmt()
+
+ case _Fallthrough:
+ s := new(BranchStmt)
+ s.pos = p.pos()
+ p.next()
+ s.Tok = _Fallthrough
+ return s
+
+ case _Break, _Continue:
+ s := new(BranchStmt)
+ s.pos = p.pos()
+ s.Tok = p.tok
+ p.next()
+ if p.tok == _Name {
+ s.Label = p.name()
+ }
+ return s
+
+ case _Go, _Defer:
+ return p.callStmt()
+
+ case _Goto:
+ s := new(BranchStmt)
+ s.pos = p.pos()
+ s.Tok = _Goto
+ p.next()
+ s.Label = p.name()
+ return s
+
+ case _Return:
+ s := new(ReturnStmt)
+ s.pos = p.pos()
+ p.next()
+ if p.tok != _Semi && p.tok != _Rbrace {
+ s.Results = p.exprList()
+ }
+ return s
+
+ case _Semi:
+ s := new(EmptyStmt)
+ s.pos = p.pos()
+ return s
+ }
+
+ return nil
+}
+
+// StatementList = { Statement ";" } .
+func (p *parser) stmtList() (l []Stmt) {
+ if trace {
+ defer p.trace("stmtList")()
+ }
+
+ for p.tok != _EOF && p.tok != _Rbrace && p.tok != _Case && p.tok != _Default {
+ s := p.stmtOrNil()
+ p.clearPragma()
+ if s == nil {
+ break
+ }
+ l = append(l, s)
+ // ";" is optional before "}"
+ if !p.got(_Semi) && p.tok != _Rbrace {
+ p.syntaxError("at end of statement")
+ p.advance(_Semi, _Rbrace, _Case, _Default)
+ p.got(_Semi) // avoid spurious empty statement
+ }
+ }
+ return
+}
+
+// argList parses a possibly empty, comma-separated list of arguments,
+// optionally followed by a comma (if not empty), and closed by ")".
+// The last argument may be followed by "...".
+//
+// argList = [ arg { "," arg } [ "..." ] [ "," ] ] ")" .
+func (p *parser) argList() (list []Expr, hasDots bool) {
+ if trace {
+ defer p.trace("argList")()
+ }
+
+ p.xnest++
+ p.list("argument list", _Comma, _Rparen, func() bool {
+ list = append(list, p.expr())
+ hasDots = p.got(_DotDotDot)
+ return hasDots
+ })
+ p.xnest--
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Common productions
+
+func (p *parser) name() *Name {
+ // no tracing to avoid overly verbose output
+
+ if p.tok == _Name {
+ n := NewName(p.pos(), p.lit)
+ p.next()
+ return n
+ }
+
+ n := NewName(p.pos(), "_")
+ p.syntaxError("expected name")
+ p.advance()
+ return n
+}
+
+// IdentifierList = identifier { "," identifier } .
+// The first name must be provided.
+func (p *parser) nameList(first *Name) []*Name {
+ if trace {
+ defer p.trace("nameList")()
+ }
+
+ if debug && first == nil {
+ panic("first name not provided")
+ }
+
+ l := []*Name{first}
+ for p.got(_Comma) {
+ l = append(l, p.name())
+ }
+
+ return l
+}
+
+// The first name may be provided, or nil.
+func (p *parser) qualifiedName(name *Name) Expr {
+ if trace {
+ defer p.trace("qualifiedName")()
+ }
+
+ var x Expr
+ switch {
+ case name != nil:
+ x = name
+ case p.tok == _Name:
+ x = p.name()
+ default:
+ x = NewName(p.pos(), "_")
+ p.syntaxError("expected name")
+ p.advance(_Dot, _Semi, _Rbrace)
+ }
+
+ if p.tok == _Dot {
+ s := new(SelectorExpr)
+ s.pos = p.pos()
+ p.next()
+ s.X = x
+ s.Sel = p.name()
+ x = s
+ }
+
+ if p.tok == _Lbrack {
+ x = p.typeInstance(x)
+ }
+
+ return x
+}
+
+// ExpressionList = Expression { "," Expression } .
+func (p *parser) exprList() Expr {
+ if trace {
+ defer p.trace("exprList")()
+ }
+
+ x := p.expr()
+ if p.got(_Comma) {
+ list := []Expr{x, p.expr()}
+ for p.got(_Comma) {
+ list = append(list, p.expr())
+ }
+ t := new(ListExpr)
+ t.pos = x.Pos()
+ t.ElemList = list
+ x = t
+ }
+ return x
+}
+
+// typeList parses a non-empty, comma-separated list of types,
+// optionally followed by a comma. If strict is set to false,
+// the first element may also be a (non-type) expression.
+// If there is more than one argument, the result is a *ListExpr.
+// The comma result indicates whether there was a (separating or
+// trailing) comma.
+//
+// typeList = arg { "," arg } [ "," ] .
+func (p *parser) typeList(strict bool) (x Expr, comma bool) {
+ if trace {
+ defer p.trace("typeList")()
+ }
+
+ p.xnest++
+ if strict {
+ x = p.type_()
+ } else {
+ x = p.expr()
+ }
+ if p.got(_Comma) {
+ comma = true
+ if t := p.typeOrNil(); t != nil {
+ list := []Expr{x, t}
+ for p.got(_Comma) {
+ if t = p.typeOrNil(); t == nil {
+ break
+ }
+ list = append(list, t)
+ }
+ l := new(ListExpr)
+ l.pos = x.Pos() // == list[0].Pos()
+ l.ElemList = list
+ x = l
+ }
+ }
+ p.xnest--
+ return
+}
+
+// Unparen returns e with any enclosing parentheses stripped.
+func Unparen(x Expr) Expr {
+ for {
+ p, ok := x.(*ParenExpr)
+ if !ok {
+ break
+ }
+ x = p.X
+ }
+ return x
+}
+
+// UnpackListExpr unpacks a *ListExpr into a []Expr.
+func UnpackListExpr(x Expr) []Expr {
+ switch x := x.(type) {
+ case nil:
+ return nil
+ case *ListExpr:
+ return x.ElemList
+ default:
+ return []Expr{x}
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/parser_test.go b/src/cmd/compile/internal/syntax/parser_test.go
new file mode 100644
index 0000000..538278b
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/parser_test.go
@@ -0,0 +1,395 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+var (
+ fast = flag.Bool("fast", false, "parse package files in parallel")
+ verify = flag.Bool("verify", false, "verify idempotent printing")
+ src_ = flag.String("src", "parser.go", "source file to parse")
+ skip = flag.String("skip", "", "files matching this regular expression are skipped by TestStdLib")
+)
+
+func TestParse(t *testing.T) {
+ ParseFile(*src_, func(err error) { t.Error(err) }, nil, 0)
+}
+
+func TestVerify(t *testing.T) {
+ ast, err := ParseFile(*src_, func(err error) { t.Error(err) }, nil, 0)
+ if err != nil {
+ return // error already reported
+ }
+ verifyPrint(t, *src_, ast)
+}
+
+func TestStdLib(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode")
+ }
+
+ var skipRx *regexp.Regexp
+ if *skip != "" {
+ var err error
+ skipRx, err = regexp.Compile(*skip)
+ if err != nil {
+ t.Fatalf("invalid argument for -skip (%v)", err)
+ }
+ }
+
+ var m1 runtime.MemStats
+ runtime.ReadMemStats(&m1)
+ start := time.Now()
+
+ type parseResult struct {
+ filename string
+ lines uint
+ }
+
+ goroot := testenv.GOROOT(t)
+
+ results := make(chan parseResult)
+ go func() {
+ defer close(results)
+ for _, dir := range []string{
+ filepath.Join(goroot, "src"),
+ filepath.Join(goroot, "misc"),
+ } {
+ if filepath.Base(dir) == "misc" {
+ // cmd/distpack deletes GOROOT/misc, so skip that directory if it isn't present.
+ // cmd/distpack also requires GOROOT/VERSION to exist, so use that to
+ // suppress false-positive skips.
+ if _, err := os.Stat(dir); os.IsNotExist(err) {
+ if _, err := os.Stat(filepath.Join(testenv.GOROOT(t), "VERSION")); err == nil {
+ fmt.Printf("%s not present; skipping\n", dir)
+ continue
+ }
+ }
+ }
+
+ walkDirs(t, dir, func(filename string) {
+ if skipRx != nil && skipRx.MatchString(filename) {
+ // Always report skipped files since regexp
+ // typos can lead to surprising results.
+ fmt.Printf("skipping %s\n", filename)
+ return
+ }
+ if debug {
+ fmt.Printf("parsing %s\n", filename)
+ }
+ ast, err := ParseFile(filename, nil, nil, 0)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if *verify {
+ verifyPrint(t, filename, ast)
+ }
+ results <- parseResult{filename, ast.EOF.Line()}
+ })
+ }
+ }()
+
+ var count, lines uint
+ for res := range results {
+ count++
+ lines += res.lines
+ if testing.Verbose() {
+ fmt.Printf("%5d %s (%d lines)\n", count, res.filename, res.lines)
+ }
+ }
+
+ dt := time.Since(start)
+ var m2 runtime.MemStats
+ runtime.ReadMemStats(&m2)
+ dm := float64(m2.TotalAlloc-m1.TotalAlloc) / 1e6
+
+ fmt.Printf("parsed %d lines (%d files) in %v (%d lines/s)\n", lines, count, dt, int64(float64(lines)/dt.Seconds()))
+ fmt.Printf("allocated %.3fMb (%.3fMb/s)\n", dm, dm/dt.Seconds())
+}
+
+func walkDirs(t *testing.T, dir string, action func(string)) {
+ entries, err := os.ReadDir(dir)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ var files, dirs []string
+ for _, entry := range entries {
+ if entry.Type().IsRegular() {
+ if strings.HasSuffix(entry.Name(), ".go") {
+ path := filepath.Join(dir, entry.Name())
+ files = append(files, path)
+ }
+ } else if entry.IsDir() && entry.Name() != "testdata" {
+ path := filepath.Join(dir, entry.Name())
+ if !strings.HasSuffix(path, string(filepath.Separator)+"test") {
+ dirs = append(dirs, path)
+ }
+ }
+ }
+
+ if *fast {
+ var wg sync.WaitGroup
+ wg.Add(len(files))
+ for _, filename := range files {
+ go func(filename string) {
+ defer wg.Done()
+ action(filename)
+ }(filename)
+ }
+ wg.Wait()
+ } else {
+ for _, filename := range files {
+ action(filename)
+ }
+ }
+
+ for _, dir := range dirs {
+ walkDirs(t, dir, action)
+ }
+}
+
+func verifyPrint(t *testing.T, filename string, ast1 *File) {
+ var buf1 bytes.Buffer
+ _, err := Fprint(&buf1, ast1, LineForm)
+ if err != nil {
+ panic(err)
+ }
+ bytes1 := buf1.Bytes()
+
+ ast2, err := Parse(NewFileBase(filename), &buf1, nil, nil, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ var buf2 bytes.Buffer
+ _, err = Fprint(&buf2, ast2, LineForm)
+ if err != nil {
+ panic(err)
+ }
+ bytes2 := buf2.Bytes()
+
+ if bytes.Compare(bytes1, bytes2) != 0 {
+ fmt.Printf("--- %s ---\n", filename)
+ fmt.Printf("%s\n", bytes1)
+ fmt.Println()
+
+ fmt.Printf("--- %s ---\n", filename)
+ fmt.Printf("%s\n", bytes2)
+ fmt.Println()
+
+ t.Error("printed syntax trees do not match")
+ }
+}
+
+func TestIssue17697(t *testing.T) {
+ _, err := Parse(nil, bytes.NewReader(nil), nil, nil, 0) // return with parser error, don't panic
+ if err == nil {
+ t.Errorf("no error reported")
+ }
+}
+
+func TestParseFile(t *testing.T) {
+ _, err := ParseFile("", nil, nil, 0)
+ if err == nil {
+ t.Error("missing io error")
+ }
+
+ var first error
+ _, err = ParseFile("", func(err error) {
+ if first == nil {
+ first = err
+ }
+ }, nil, 0)
+ if err == nil || first == nil {
+ t.Error("missing io error")
+ }
+ if err != first {
+ t.Errorf("got %v; want first error %v", err, first)
+ }
+}
+
+// Make sure (PosMax + 1) doesn't overflow when converted to default
+// type int (when passed as argument to fmt.Sprintf) on 32bit platforms
+// (see test cases below).
+var tooLarge int = PosMax + 1
+
+func TestLineDirectives(t *testing.T) {
+ // valid line directives lead to a syntax error after them
+ const valid = "syntax error: package statement must be first"
+ const filename = "directives.go"
+
+ for _, test := range []struct {
+ src, msg string
+ filename string
+ line, col uint // 1-based; 0 means unknown
+ }{
+ // ignored //line directives
+ {"//\n", valid, filename, 2, 1}, // no directive
+ {"//line\n", valid, filename, 2, 1}, // missing colon
+ {"//line foo\n", valid, filename, 2, 1}, // missing colon
+ {" //line foo:\n", valid, filename, 2, 1}, // not a line start
+ {"// line foo:\n", valid, filename, 2, 1}, // space between // and line
+
+ // invalid //line directives with one colon
+ {"//line :\n", "invalid line number: ", filename, 1, 9},
+ {"//line :x\n", "invalid line number: x", filename, 1, 9},
+ {"//line foo :\n", "invalid line number: ", filename, 1, 13},
+ {"//line foo:x\n", "invalid line number: x", filename, 1, 12},
+ {"//line foo:0\n", "invalid line number: 0", filename, 1, 12},
+ {"//line foo:1 \n", "invalid line number: 1 ", filename, 1, 12},
+ {"//line foo:-12\n", "invalid line number: -12", filename, 1, 12},
+ {"//line C:foo:0\n", "invalid line number: 0", filename, 1, 14},
+ {fmt.Sprintf("//line foo:%d\n", tooLarge), fmt.Sprintf("invalid line number: %d", tooLarge), filename, 1, 12},
+
+ // invalid //line directives with two colons
+ {"//line ::\n", "invalid line number: ", filename, 1, 10},
+ {"//line ::x\n", "invalid line number: x", filename, 1, 10},
+ {"//line foo::123abc\n", "invalid line number: 123abc", filename, 1, 13},
+ {"//line foo::0\n", "invalid line number: 0", filename, 1, 13},
+ {"//line foo:0:1\n", "invalid line number: 0", filename, 1, 12},
+
+ {"//line :123:0\n", "invalid column number: 0", filename, 1, 13},
+ {"//line foo:123:0\n", "invalid column number: 0", filename, 1, 16},
+ {fmt.Sprintf("//line foo:10:%d\n", tooLarge), fmt.Sprintf("invalid column number: %d", tooLarge), filename, 1, 15},
+
+ // effect of valid //line directives on lines
+ {"//line foo:123\n foo", valid, "foo", 123, 0},
+ {"//line foo:123\n foo", valid, " foo", 123, 0},
+ {"//line foo:123\n//line bar:345\nfoo", valid, "bar", 345, 0},
+ {"//line C:foo:123\n", valid, "C:foo", 123, 0},
+ {"//line /src/a/a.go:123\n foo", valid, "/src/a/a.go", 123, 0},
+ {"//line :x:1\n", valid, ":x", 1, 0},
+ {"//line foo ::1\n", valid, "foo :", 1, 0},
+ {"//line foo:123abc:1\n", valid, "foo:123abc", 1, 0},
+ {"//line foo :123:1\n", valid, "foo ", 123, 1},
+ {"//line ::123\n", valid, ":", 123, 0},
+
+ // effect of valid //line directives on columns
+ {"//line :x:1:10\n", valid, ":x", 1, 10},
+ {"//line foo ::1:2\n", valid, "foo :", 1, 2},
+ {"//line foo:123abc:1:1000\n", valid, "foo:123abc", 1, 1000},
+ {"//line foo :123:1000\n\n", valid, "foo ", 124, 1},
+ {"//line ::123:1234\n", valid, ":", 123, 1234},
+
+ // //line directives with omitted filenames lead to empty filenames
+ {"//line :10\n", valid, "", 10, 0},
+ {"//line :10:20\n", valid, filename, 10, 20},
+ {"//line bar:1\n//line :10\n", valid, "", 10, 0},
+ {"//line bar:1\n//line :10:20\n", valid, "bar", 10, 20},
+
+ // ignored /*line directives
+ {"/**/", valid, filename, 1, 5}, // no directive
+ {"/*line*/", valid, filename, 1, 9}, // missing colon
+ {"/*line foo*/", valid, filename, 1, 13}, // missing colon
+ {" //line foo:*/", valid, filename, 1, 16}, // not a line start
+ {"/* line foo:*/", valid, filename, 1, 16}, // space between // and line
+
+ // invalid /*line directives with one colon
+ {"/*line :*/", "invalid line number: ", filename, 1, 9},
+ {"/*line :x*/", "invalid line number: x", filename, 1, 9},
+ {"/*line foo :*/", "invalid line number: ", filename, 1, 13},
+ {"/*line foo:x*/", "invalid line number: x", filename, 1, 12},
+ {"/*line foo:0*/", "invalid line number: 0", filename, 1, 12},
+ {"/*line foo:1 */", "invalid line number: 1 ", filename, 1, 12},
+ {"/*line C:foo:0*/", "invalid line number: 0", filename, 1, 14},
+ {fmt.Sprintf("/*line foo:%d*/", tooLarge), fmt.Sprintf("invalid line number: %d", tooLarge), filename, 1, 12},
+
+ // invalid /*line directives with two colons
+ {"/*line ::*/", "invalid line number: ", filename, 1, 10},
+ {"/*line ::x*/", "invalid line number: x", filename, 1, 10},
+ {"/*line foo::123abc*/", "invalid line number: 123abc", filename, 1, 13},
+ {"/*line foo::0*/", "invalid line number: 0", filename, 1, 13},
+ {"/*line foo:0:1*/", "invalid line number: 0", filename, 1, 12},
+
+ {"/*line :123:0*/", "invalid column number: 0", filename, 1, 13},
+ {"/*line foo:123:0*/", "invalid column number: 0", filename, 1, 16},
+ {fmt.Sprintf("/*line foo:10:%d*/", tooLarge), fmt.Sprintf("invalid column number: %d", tooLarge), filename, 1, 15},
+
+ // effect of valid /*line directives on lines
+ {"/*line foo:123*/ foo", valid, "foo", 123, 0},
+ {"/*line foo:123*/\n//line bar:345\nfoo", valid, "bar", 345, 0},
+ {"/*line C:foo:123*/", valid, "C:foo", 123, 0},
+ {"/*line /src/a/a.go:123*/ foo", valid, "/src/a/a.go", 123, 0},
+ {"/*line :x:1*/", valid, ":x", 1, 0},
+ {"/*line foo ::1*/", valid, "foo :", 1, 0},
+ {"/*line foo:123abc:1*/", valid, "foo:123abc", 1, 0},
+ {"/*line foo :123:10*/", valid, "foo ", 123, 10},
+ {"/*line ::123*/", valid, ":", 123, 0},
+
+ // effect of valid /*line directives on columns
+ {"/*line :x:1:10*/", valid, ":x", 1, 10},
+ {"/*line foo ::1:2*/", valid, "foo :", 1, 2},
+ {"/*line foo:123abc:1:1000*/", valid, "foo:123abc", 1, 1000},
+ {"/*line foo :123:1000*/\n", valid, "foo ", 124, 1},
+ {"/*line ::123:1234*/", valid, ":", 123, 1234},
+
+ // /*line directives with omitted filenames lead to the previously used filenames
+ {"/*line :10*/", valid, "", 10, 0},
+ {"/*line :10:20*/", valid, filename, 10, 20},
+ {"//line bar:1\n/*line :10*/", valid, "", 10, 0},
+ {"//line bar:1\n/*line :10:20*/", valid, "bar", 10, 20},
+ } {
+ base := NewFileBase(filename)
+ _, err := Parse(base, strings.NewReader(test.src), nil, nil, 0)
+ if err == nil {
+ t.Errorf("%s: no error reported", test.src)
+ continue
+ }
+ perr, ok := err.(Error)
+ if !ok {
+ t.Errorf("%s: got %v; want parser error", test.src, err)
+ continue
+ }
+ if msg := perr.Msg; msg != test.msg {
+ t.Errorf("%s: got msg = %q; want %q", test.src, msg, test.msg)
+ }
+
+ pos := perr.Pos
+ if filename := pos.RelFilename(); filename != test.filename {
+ t.Errorf("%s: got filename = %q; want %q", test.src, filename, test.filename)
+ }
+ if line := pos.RelLine(); line != test.line {
+ t.Errorf("%s: got line = %d; want %d", test.src, line, test.line)
+ }
+ if col := pos.RelCol(); col != test.col {
+ t.Errorf("%s: got col = %d; want %d", test.src, col, test.col)
+ }
+ }
+}
+
+// Test that typical uses of UnpackListExpr don't allocate.
+func TestUnpackListExprAllocs(t *testing.T) {
+ var x Expr = NewName(Pos{}, "x")
+ allocs := testing.AllocsPerRun(1000, func() {
+ list := UnpackListExpr(x)
+ if len(list) != 1 || list[0] != x {
+ t.Fatalf("unexpected result")
+ }
+ })
+
+ if allocs > 0 {
+ errorf := t.Errorf
+ if testenv.OptimizationOff() {
+ errorf = t.Logf // noopt builder disables inlining
+ }
+ errorf("UnpackListExpr allocated %v times", allocs)
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/pos.go b/src/cmd/compile/internal/syntax/pos.go
new file mode 100644
index 0000000..dd25d4f
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/pos.go
@@ -0,0 +1,211 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import "fmt"
+
+// PosMax is the largest line or column value that can be represented without loss.
+// Incoming values (arguments) larger than PosMax will be set to PosMax.
+//
+// Keep this consistent with maxLineCol in go/scanner.
+const PosMax = 1 << 30
+
+// A Pos represents an absolute (line, col) source position
+// with a reference to position base for computing relative
+// (to a file, or line directive) position information.
+// Pos values are intentionally light-weight so that they
+// can be created without too much concern about space use.
+type Pos struct {
+ base *PosBase
+ line, col uint32
+}
+
+// MakePos returns a new Pos for the given PosBase, line and column.
+func MakePos(base *PosBase, line, col uint) Pos { return Pos{base, sat32(line), sat32(col)} }
+
+// TODO(gri) IsKnown makes an assumption about linebase < 1.
+// Maybe we should check for Base() != nil instead.
+
+func (pos Pos) Pos() Pos { return pos }
+func (pos Pos) IsKnown() bool { return pos.line > 0 }
+func (pos Pos) Base() *PosBase { return pos.base }
+func (pos Pos) Line() uint { return uint(pos.line) }
+func (pos Pos) Col() uint { return uint(pos.col) }
+
+func (pos Pos) RelFilename() string { return pos.base.Filename() }
+
+func (pos Pos) RelLine() uint {
+ b := pos.base
+ if b.Line() == 0 {
+ // base line is unknown => relative line is unknown
+ return 0
+ }
+ return b.Line() + (pos.Line() - b.Pos().Line())
+}
+
+func (pos Pos) RelCol() uint {
+ b := pos.base
+ if b.Col() == 0 {
+ // base column is unknown => relative column is unknown
+ // (the current specification for line directives requires
+ // this to apply until the next PosBase/line directive,
+ // not just until the new newline)
+ return 0
+ }
+ if pos.Line() == b.Pos().Line() {
+ // pos on same line as pos base => column is relative to pos base
+ return b.Col() + (pos.Col() - b.Pos().Col())
+ }
+ return pos.Col()
+}
+
+// Cmp compares the positions p and q and returns a result r as follows:
+//
+// r < 0: p is before q
+// r == 0: p and q are the same position (but may not be identical)
+// r > 0: p is after q
+//
+// If p and q are in different files, p is before q if the filename
+// of p sorts lexicographically before the filename of q.
+func (p Pos) Cmp(q Pos) int {
+ pname := p.RelFilename()
+ qname := q.RelFilename()
+ switch {
+ case pname < qname:
+ return -1
+ case pname > qname:
+ return +1
+ }
+
+ pline := p.Line()
+ qline := q.Line()
+ switch {
+ case pline < qline:
+ return -1
+ case pline > qline:
+ return +1
+ }
+
+ pcol := p.Col()
+ qcol := q.Col()
+ switch {
+ case pcol < qcol:
+ return -1
+ case pcol > qcol:
+ return +1
+ }
+
+ return 0
+}
+
+func (pos Pos) String() string {
+ rel := position_{pos.RelFilename(), pos.RelLine(), pos.RelCol()}
+ abs := position_{pos.Base().Pos().RelFilename(), pos.Line(), pos.Col()}
+ s := rel.String()
+ if rel != abs {
+ s += "[" + abs.String() + "]"
+ }
+ return s
+}
+
+// TODO(gri) cleanup: find better name, avoid conflict with position in error_test.go
+type position_ struct {
+ filename string
+ line, col uint
+}
+
+func (p position_) String() string {
+ if p.line == 0 {
+ if p.filename == "" {
+ return "<unknown position>"
+ }
+ return p.filename
+ }
+ if p.col == 0 {
+ return fmt.Sprintf("%s:%d", p.filename, p.line)
+ }
+ return fmt.Sprintf("%s:%d:%d", p.filename, p.line, p.col)
+}
+
+// A PosBase represents the base for relative position information:
+// At position pos, the relative position is filename:line:col.
+type PosBase struct {
+ pos Pos
+ filename string
+ line, col uint32
+ trimmed bool // whether -trimpath has been applied
+}
+
+// NewFileBase returns a new PosBase for the given filename.
+// A file PosBase's position is relative to itself, with the
+// position being filename:1:1.
+func NewFileBase(filename string) *PosBase {
+ return NewTrimmedFileBase(filename, false)
+}
+
+// NewTrimmedFileBase is like NewFileBase, but allows specifying Trimmed.
+func NewTrimmedFileBase(filename string, trimmed bool) *PosBase {
+ base := &PosBase{MakePos(nil, linebase, colbase), filename, linebase, colbase, trimmed}
+ base.pos.base = base
+ return base
+}
+
+// NewLineBase returns a new PosBase for a line directive "line filename:line:col"
+// relative to pos, which is the position of the character immediately following
+// the comment containing the line directive. For a directive in a line comment,
+// that position is the beginning of the next line (i.e., the newline character
+// belongs to the line comment).
+func NewLineBase(pos Pos, filename string, trimmed bool, line, col uint) *PosBase {
+ return &PosBase{pos, filename, sat32(line), sat32(col), trimmed}
+}
+
+func (base *PosBase) IsFileBase() bool {
+ if base == nil {
+ return false
+ }
+ return base.pos.base == base
+}
+
+func (base *PosBase) Pos() (_ Pos) {
+ if base == nil {
+ return
+ }
+ return base.pos
+}
+
+func (base *PosBase) Filename() string {
+ if base == nil {
+ return ""
+ }
+ return base.filename
+}
+
+func (base *PosBase) Line() uint {
+ if base == nil {
+ return 0
+ }
+ return uint(base.line)
+}
+
+func (base *PosBase) Col() uint {
+ if base == nil {
+ return 0
+ }
+ return uint(base.col)
+}
+
+func (base *PosBase) Trimmed() bool {
+ if base == nil {
+ return false
+ }
+ return base.trimmed
+}
+
+func sat32(x uint) uint32 {
+ if x > PosMax {
+ return PosMax
+ }
+ return uint32(x)
+}
diff --git a/src/cmd/compile/internal/syntax/positions.go b/src/cmd/compile/internal/syntax/positions.go
new file mode 100644
index 0000000..9359655
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/positions.go
@@ -0,0 +1,364 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements helper functions for scope position computations.
+
+package syntax
+
+// StartPos returns the start position of n.
+func StartPos(n Node) Pos {
+ // Cases for nodes which don't need a correction are commented out.
+ for m := n; ; {
+ switch n := m.(type) {
+ case nil:
+ panic("nil node")
+
+ // packages
+ case *File:
+ // file block starts at the beginning of the file
+ return MakePos(n.Pos().Base(), 1, 1)
+
+ // declarations
+ // case *ImportDecl:
+ // case *ConstDecl:
+ // case *TypeDecl:
+ // case *VarDecl:
+ // case *FuncDecl:
+
+ // expressions
+ // case *BadExpr:
+ // case *Name:
+ // case *BasicLit:
+ case *CompositeLit:
+ if n.Type != nil {
+ m = n.Type
+ continue
+ }
+ return n.Pos()
+ // case *KeyValueExpr:
+ // case *FuncLit:
+ // case *ParenExpr:
+ case *SelectorExpr:
+ m = n.X
+ case *IndexExpr:
+ m = n.X
+ // case *SliceExpr:
+ case *AssertExpr:
+ m = n.X
+ case *TypeSwitchGuard:
+ if n.Lhs != nil {
+ m = n.Lhs
+ continue
+ }
+ m = n.X
+ case *Operation:
+ if n.Y != nil {
+ m = n.X
+ continue
+ }
+ return n.Pos()
+ case *CallExpr:
+ m = n.Fun
+ case *ListExpr:
+ if len(n.ElemList) > 0 {
+ m = n.ElemList[0]
+ continue
+ }
+ return n.Pos()
+ // types
+ // case *ArrayType:
+ // case *SliceType:
+ // case *DotsType:
+ // case *StructType:
+ // case *Field:
+ // case *InterfaceType:
+ // case *FuncType:
+ // case *MapType:
+ // case *ChanType:
+
+ // statements
+ // case *EmptyStmt:
+ // case *LabeledStmt:
+ // case *BlockStmt:
+ // case *ExprStmt:
+ case *SendStmt:
+ m = n.Chan
+ // case *DeclStmt:
+ case *AssignStmt:
+ m = n.Lhs
+ // case *BranchStmt:
+ // case *CallStmt:
+ // case *ReturnStmt:
+ // case *IfStmt:
+ // case *ForStmt:
+ // case *SwitchStmt:
+ // case *SelectStmt:
+
+ // helper nodes
+ case *RangeClause:
+ if n.Lhs != nil {
+ m = n.Lhs
+ continue
+ }
+ m = n.X
+ // case *CaseClause:
+ // case *CommClause:
+
+ default:
+ return n.Pos()
+ }
+ }
+}
+
+// EndPos returns the approximate end position of n in the source.
+// For some nodes (*Name, *BasicLit) it returns the position immediately
+// following the node; for others (*BlockStmt, *SwitchStmt, etc.) it
+// returns the position of the closing '}'; and for some (*ParenExpr)
+// the returned position is the end position of the last enclosed
+// expression.
+// Thus, EndPos should not be used for exact demarcation of the
+// end of a node in the source; it is mostly useful to determine
+// scope ranges where there is some leeway.
+func EndPos(n Node) Pos {
+ for m := n; ; {
+ switch n := m.(type) {
+ case nil:
+ panic("nil node")
+
+ // packages
+ case *File:
+ return n.EOF
+
+ // declarations
+ case *ImportDecl:
+ m = n.Path
+ case *ConstDecl:
+ if n.Values != nil {
+ m = n.Values
+ continue
+ }
+ if n.Type != nil {
+ m = n.Type
+ continue
+ }
+ if l := len(n.NameList); l > 0 {
+ m = n.NameList[l-1]
+ continue
+ }
+ return n.Pos()
+ case *TypeDecl:
+ m = n.Type
+ case *VarDecl:
+ if n.Values != nil {
+ m = n.Values
+ continue
+ }
+ if n.Type != nil {
+ m = n.Type
+ continue
+ }
+ if l := len(n.NameList); l > 0 {
+ m = n.NameList[l-1]
+ continue
+ }
+ return n.Pos()
+ case *FuncDecl:
+ if n.Body != nil {
+ m = n.Body
+ continue
+ }
+ m = n.Type
+
+ // expressions
+ case *BadExpr:
+ return n.Pos()
+ case *Name:
+ p := n.Pos()
+ return MakePos(p.Base(), p.Line(), p.Col()+uint(len(n.Value)))
+ case *BasicLit:
+ p := n.Pos()
+ return MakePos(p.Base(), p.Line(), p.Col()+uint(len(n.Value)))
+ case *CompositeLit:
+ return n.Rbrace
+ case *KeyValueExpr:
+ m = n.Value
+ case *FuncLit:
+ m = n.Body
+ case *ParenExpr:
+ m = n.X
+ case *SelectorExpr:
+ m = n.Sel
+ case *IndexExpr:
+ m = n.Index
+ case *SliceExpr:
+ for i := len(n.Index) - 1; i >= 0; i-- {
+ if x := n.Index[i]; x != nil {
+ m = x
+ continue
+ }
+ }
+ m = n.X
+ case *AssertExpr:
+ m = n.Type
+ case *TypeSwitchGuard:
+ m = n.X
+ case *Operation:
+ if n.Y != nil {
+ m = n.Y
+ continue
+ }
+ m = n.X
+ case *CallExpr:
+ if l := lastExpr(n.ArgList); l != nil {
+ m = l
+ continue
+ }
+ m = n.Fun
+ case *ListExpr:
+ if l := lastExpr(n.ElemList); l != nil {
+ m = l
+ continue
+ }
+ return n.Pos()
+
+ // types
+ case *ArrayType:
+ m = n.Elem
+ case *SliceType:
+ m = n.Elem
+ case *DotsType:
+ m = n.Elem
+ case *StructType:
+ if l := lastField(n.FieldList); l != nil {
+ m = l
+ continue
+ }
+ return n.Pos()
+ // TODO(gri) need to take TagList into account
+ case *Field:
+ if n.Type != nil {
+ m = n.Type
+ continue
+ }
+ m = n.Name
+ case *InterfaceType:
+ if l := lastField(n.MethodList); l != nil {
+ m = l
+ continue
+ }
+ return n.Pos()
+ case *FuncType:
+ if l := lastField(n.ResultList); l != nil {
+ m = l
+ continue
+ }
+ if l := lastField(n.ParamList); l != nil {
+ m = l
+ continue
+ }
+ return n.Pos()
+ case *MapType:
+ m = n.Value
+ case *ChanType:
+ m = n.Elem
+
+ // statements
+ case *EmptyStmt:
+ return n.Pos()
+ case *LabeledStmt:
+ m = n.Stmt
+ case *BlockStmt:
+ return n.Rbrace
+ case *ExprStmt:
+ m = n.X
+ case *SendStmt:
+ m = n.Value
+ case *DeclStmt:
+ if l := lastDecl(n.DeclList); l != nil {
+ m = l
+ continue
+ }
+ return n.Pos()
+ case *AssignStmt:
+ m = n.Rhs
+ if m == nil {
+ p := EndPos(n.Lhs)
+ return MakePos(p.Base(), p.Line(), p.Col()+2)
+ }
+ case *BranchStmt:
+ if n.Label != nil {
+ m = n.Label
+ continue
+ }
+ return n.Pos()
+ case *CallStmt:
+ m = n.Call
+ case *ReturnStmt:
+ if n.Results != nil {
+ m = n.Results
+ continue
+ }
+ return n.Pos()
+ case *IfStmt:
+ if n.Else != nil {
+ m = n.Else
+ continue
+ }
+ m = n.Then
+ case *ForStmt:
+ m = n.Body
+ case *SwitchStmt:
+ return n.Rbrace
+ case *SelectStmt:
+ return n.Rbrace
+
+ // helper nodes
+ case *RangeClause:
+ m = n.X
+ case *CaseClause:
+ if l := lastStmt(n.Body); l != nil {
+ m = l
+ continue
+ }
+ return n.Colon
+ case *CommClause:
+ if l := lastStmt(n.Body); l != nil {
+ m = l
+ continue
+ }
+ return n.Colon
+
+ default:
+ return n.Pos()
+ }
+ }
+}
+
+func lastDecl(list []Decl) Decl {
+ if l := len(list); l > 0 {
+ return list[l-1]
+ }
+ return nil
+}
+
+func lastExpr(list []Expr) Expr {
+ if l := len(list); l > 0 {
+ return list[l-1]
+ }
+ return nil
+}
+
+func lastStmt(list []Stmt) Stmt {
+ if l := len(list); l > 0 {
+ return list[l-1]
+ }
+ return nil
+}
+
+func lastField(list []*Field) *Field {
+ if l := len(list); l > 0 {
+ return list[l-1]
+ }
+ return nil
+}
diff --git a/src/cmd/compile/internal/syntax/printer.go b/src/cmd/compile/internal/syntax/printer.go
new file mode 100644
index 0000000..9f20db5
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/printer.go
@@ -0,0 +1,1020 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements printing of syntax trees in source format.
+
+package syntax
+
+import (
+ "fmt"
+ "io"
+ "strings"
+)
+
+// Form controls print formatting.
+type Form uint
+
+const (
+ _ Form = iota // default
+ LineForm // use spaces instead of linebreaks where possible
+ ShortForm // like LineForm but print "…" for non-empty function or composite literal bodies
+)
+
+// Fprint prints node x to w in the specified form.
+// It returns the number of bytes written, and whether there was an error.
+func Fprint(w io.Writer, x Node, form Form) (n int, err error) {
+ p := printer{
+ output: w,
+ form: form,
+ linebreaks: form == 0,
+ }
+
+ defer func() {
+ n = p.written
+ if e := recover(); e != nil {
+ err = e.(writeError).err // re-panics if it's not a writeError
+ }
+ }()
+
+ p.print(x)
+ p.flush(_EOF)
+
+ return
+}
+
+// String is a convenience function that prints n in ShortForm
+// and returns the printed string.
+func String(n Node) string {
+ var buf strings.Builder
+ _, err := Fprint(&buf, n, ShortForm)
+ if err != nil {
+ fmt.Fprintf(&buf, "<<< ERROR: %s", err)
+ }
+ return buf.String()
+}
+
+type ctrlSymbol int
+
+const (
+ none ctrlSymbol = iota
+ semi
+ blank
+ newline
+ indent
+ outdent
+ // comment
+ // eolComment
+)
+
+type whitespace struct {
+ last token
+ kind ctrlSymbol
+ //text string // comment text (possibly ""); valid if kind == comment
+}
+
+type printer struct {
+ output io.Writer
+ written int // number of bytes written
+ form Form
+ linebreaks bool // print linebreaks instead of semis
+
+ indent int // current indentation level
+ nlcount int // number of consecutive newlines
+
+ pending []whitespace // pending whitespace
+ lastTok token // last token (after any pending semi) processed by print
+}
+
+// write is a thin wrapper around p.output.Write
+// that takes care of accounting and error handling.
+func (p *printer) write(data []byte) {
+ n, err := p.output.Write(data)
+ p.written += n
+ if err != nil {
+ panic(writeError{err})
+ }
+}
+
+var (
+ tabBytes = []byte("\t\t\t\t\t\t\t\t")
+ newlineByte = []byte("\n")
+ blankByte = []byte(" ")
+)
+
+func (p *printer) writeBytes(data []byte) {
+ if len(data) == 0 {
+ panic("expected non-empty []byte")
+ }
+ if p.nlcount > 0 && p.indent > 0 {
+ // write indentation
+ n := p.indent
+ for n > len(tabBytes) {
+ p.write(tabBytes)
+ n -= len(tabBytes)
+ }
+ p.write(tabBytes[:n])
+ }
+ p.write(data)
+ p.nlcount = 0
+}
+
+func (p *printer) writeString(s string) {
+ p.writeBytes([]byte(s))
+}
+
+// If impliesSemi returns true for a non-blank line's final token tok,
+// a semicolon is automatically inserted. Vice versa, a semicolon may
+// be omitted in those cases.
+func impliesSemi(tok token) bool {
+ switch tok {
+ case _Name,
+ _Break, _Continue, _Fallthrough, _Return,
+ /*_Inc, _Dec,*/ _Rparen, _Rbrack, _Rbrace: // TODO(gri) fix this
+ return true
+ }
+ return false
+}
+
+// TODO(gri) provide table of []byte values for all tokens to avoid repeated string conversion
+
+func lineComment(text string) bool {
+ return strings.HasPrefix(text, "//")
+}
+
+func (p *printer) addWhitespace(kind ctrlSymbol, text string) {
+ p.pending = append(p.pending, whitespace{p.lastTok, kind /*text*/})
+ switch kind {
+ case semi:
+ p.lastTok = _Semi
+ case newline:
+ p.lastTok = 0
+ // TODO(gri) do we need to handle /*-style comments containing newlines here?
+ }
+}
+
+func (p *printer) flush(next token) {
+ // eliminate semis and redundant whitespace
+ sawNewline := next == _EOF
+ sawParen := next == _Rparen || next == _Rbrace
+ for i := len(p.pending) - 1; i >= 0; i-- {
+ switch p.pending[i].kind {
+ case semi:
+ k := semi
+ if sawParen {
+ sawParen = false
+ k = none // eliminate semi
+ } else if sawNewline && impliesSemi(p.pending[i].last) {
+ sawNewline = false
+ k = none // eliminate semi
+ }
+ p.pending[i].kind = k
+ case newline:
+ sawNewline = true
+ case blank, indent, outdent:
+ // nothing to do
+ // case comment:
+ // // A multi-line comment acts like a newline; and a ""
+ // // comment implies by definition at least one newline.
+ // if text := p.pending[i].text; strings.HasPrefix(text, "/*") && strings.ContainsRune(text, '\n') {
+ // sawNewline = true
+ // }
+ // case eolComment:
+ // // TODO(gri) act depending on sawNewline
+ default:
+ panic("unreachable")
+ }
+ }
+
+ // print pending
+ prev := none
+ for i := range p.pending {
+ switch p.pending[i].kind {
+ case none:
+ // nothing to do
+ case semi:
+ p.writeString(";")
+ p.nlcount = 0
+ prev = semi
+ case blank:
+ if prev != blank {
+ // at most one blank
+ p.writeBytes(blankByte)
+ p.nlcount = 0
+ prev = blank
+ }
+ case newline:
+ const maxEmptyLines = 1
+ if p.nlcount <= maxEmptyLines {
+ p.write(newlineByte)
+ p.nlcount++
+ prev = newline
+ }
+ case indent:
+ p.indent++
+ case outdent:
+ p.indent--
+ if p.indent < 0 {
+ panic("negative indentation")
+ }
+ // case comment:
+ // if text := p.pending[i].text; text != "" {
+ // p.writeString(text)
+ // p.nlcount = 0
+ // prev = comment
+ // }
+ // // TODO(gri) should check that line comments are always followed by newline
+ default:
+ panic("unreachable")
+ }
+ }
+
+ p.pending = p.pending[:0] // re-use underlying array
+}
+
+func mayCombine(prev token, next byte) (b bool) {
+ return // for now
+ // switch prev {
+ // case lexical.Int:
+ // b = next == '.' // 1.
+ // case lexical.Add:
+ // b = next == '+' // ++
+ // case lexical.Sub:
+ // b = next == '-' // --
+ // case lexical.Quo:
+ // b = next == '*' // /*
+ // case lexical.Lss:
+ // b = next == '-' || next == '<' // <- or <<
+ // case lexical.And:
+ // b = next == '&' || next == '^' // && or &^
+ // }
+ // return
+}
+
+func (p *printer) print(args ...interface{}) {
+ for i := 0; i < len(args); i++ {
+ switch x := args[i].(type) {
+ case nil:
+ // we should not reach here but don't crash
+
+ case Node:
+ p.printNode(x)
+
+ case token:
+ // _Name implies an immediately following string
+ // argument which is the actual value to print.
+ var s string
+ if x == _Name {
+ i++
+ if i >= len(args) {
+ panic("missing string argument after _Name")
+ }
+ s = args[i].(string)
+ } else {
+ s = x.String()
+ }
+
+ // TODO(gri) This check seems at the wrong place since it doesn't
+ // take into account pending white space.
+ if mayCombine(p.lastTok, s[0]) {
+ panic("adjacent tokens combine without whitespace")
+ }
+
+ if x == _Semi {
+ // delay printing of semi
+ p.addWhitespace(semi, "")
+ } else {
+ p.flush(x)
+ p.writeString(s)
+ p.nlcount = 0
+ p.lastTok = x
+ }
+
+ case Operator:
+ if x != 0 {
+ p.flush(_Operator)
+ p.writeString(x.String())
+ }
+
+ case ctrlSymbol:
+ switch x {
+ case none, semi /*, comment*/ :
+ panic("unreachable")
+ case newline:
+ // TODO(gri) need to handle mandatory newlines after a //-style comment
+ if !p.linebreaks {
+ x = blank
+ }
+ }
+ p.addWhitespace(x, "")
+
+ // case *Comment: // comments are not Nodes
+ // p.addWhitespace(comment, x.Text)
+
+ default:
+ panic(fmt.Sprintf("unexpected argument %v (%T)", x, x))
+ }
+ }
+}
+
+func (p *printer) printNode(n Node) {
+ // ncom := *n.Comments()
+ // if ncom != nil {
+ // // TODO(gri) in general we cannot make assumptions about whether
+ // // a comment is a /*- or a //-style comment since the syntax
+ // // tree may have been manipulated. Need to make sure the correct
+ // // whitespace is emitted.
+ // for _, c := range ncom.Alone {
+ // p.print(c, newline)
+ // }
+ // for _, c := range ncom.Before {
+ // if c.Text == "" || lineComment(c.Text) {
+ // panic("unexpected empty line or //-style 'before' comment")
+ // }
+ // p.print(c, blank)
+ // }
+ // }
+
+ p.printRawNode(n)
+
+ // if ncom != nil && len(ncom.After) > 0 {
+ // for i, c := range ncom.After {
+ // if i+1 < len(ncom.After) {
+ // if c.Text == "" || lineComment(c.Text) {
+ // panic("unexpected empty line or //-style non-final 'after' comment")
+ // }
+ // }
+ // p.print(blank, c)
+ // }
+ // //p.print(newline)
+ // }
+}
+
+func (p *printer) printRawNode(n Node) {
+ switch n := n.(type) {
+ case nil:
+ // we should not reach here but don't crash
+
+ // expressions and types
+ case *BadExpr:
+ p.print(_Name, "<bad expr>")
+
+ case *Name:
+ p.print(_Name, n.Value) // _Name requires actual value following immediately
+
+ case *BasicLit:
+ p.print(_Name, n.Value) // _Name requires actual value following immediately
+
+ case *FuncLit:
+ p.print(n.Type, blank)
+ if n.Body != nil {
+ if p.form == ShortForm {
+ p.print(_Lbrace)
+ if len(n.Body.List) > 0 {
+ p.print(_Name, "…")
+ }
+ p.print(_Rbrace)
+ } else {
+ p.print(n.Body)
+ }
+ }
+
+ case *CompositeLit:
+ if n.Type != nil {
+ p.print(n.Type)
+ }
+ p.print(_Lbrace)
+ if p.form == ShortForm {
+ if len(n.ElemList) > 0 {
+ p.print(_Name, "…")
+ }
+ } else {
+ if n.NKeys > 0 && n.NKeys == len(n.ElemList) {
+ p.printExprLines(n.ElemList)
+ } else {
+ p.printExprList(n.ElemList)
+ }
+ }
+ p.print(_Rbrace)
+
+ case *ParenExpr:
+ p.print(_Lparen, n.X, _Rparen)
+
+ case *SelectorExpr:
+ p.print(n.X, _Dot, n.Sel)
+
+ case *IndexExpr:
+ p.print(n.X, _Lbrack, n.Index, _Rbrack)
+
+ case *SliceExpr:
+ p.print(n.X, _Lbrack)
+ if i := n.Index[0]; i != nil {
+ p.printNode(i)
+ }
+ p.print(_Colon)
+ if j := n.Index[1]; j != nil {
+ p.printNode(j)
+ }
+ if k := n.Index[2]; k != nil {
+ p.print(_Colon, k)
+ }
+ p.print(_Rbrack)
+
+ case *AssertExpr:
+ p.print(n.X, _Dot, _Lparen, n.Type, _Rparen)
+
+ case *TypeSwitchGuard:
+ if n.Lhs != nil {
+ p.print(n.Lhs, blank, _Define, blank)
+ }
+ p.print(n.X, _Dot, _Lparen, _Type, _Rparen)
+
+ case *CallExpr:
+ p.print(n.Fun, _Lparen)
+ p.printExprList(n.ArgList)
+ if n.HasDots {
+ p.print(_DotDotDot)
+ }
+ p.print(_Rparen)
+
+ case *Operation:
+ if n.Y == nil {
+ // unary expr
+ p.print(n.Op)
+ // if n.Op == lexical.Range {
+ // p.print(blank)
+ // }
+ p.print(n.X)
+ } else {
+ // binary expr
+ // TODO(gri) eventually take precedence into account
+ // to control possibly missing parentheses
+ p.print(n.X, blank, n.Op, blank, n.Y)
+ }
+
+ case *KeyValueExpr:
+ p.print(n.Key, _Colon, blank, n.Value)
+
+ case *ListExpr:
+ p.printExprList(n.ElemList)
+
+ case *ArrayType:
+ var len interface{} = _DotDotDot
+ if n.Len != nil {
+ len = n.Len
+ }
+ p.print(_Lbrack, len, _Rbrack, n.Elem)
+
+ case *SliceType:
+ p.print(_Lbrack, _Rbrack, n.Elem)
+
+ case *DotsType:
+ p.print(_DotDotDot, n.Elem)
+
+ case *StructType:
+ p.print(_Struct)
+ if len(n.FieldList) > 0 && p.linebreaks {
+ p.print(blank)
+ }
+ p.print(_Lbrace)
+ if len(n.FieldList) > 0 {
+ if p.linebreaks {
+ p.print(newline, indent)
+ p.printFieldList(n.FieldList, n.TagList, _Semi)
+ p.print(outdent, newline)
+ } else {
+ p.printFieldList(n.FieldList, n.TagList, _Semi)
+ }
+ }
+ p.print(_Rbrace)
+
+ case *FuncType:
+ p.print(_Func)
+ p.printSignature(n)
+
+ case *InterfaceType:
+ p.print(_Interface)
+ if p.linebreaks && len(n.MethodList) > 1 {
+ p.print(blank)
+ p.print(_Lbrace)
+ p.print(newline, indent)
+ p.printMethodList(n.MethodList)
+ p.print(outdent, newline)
+ } else {
+ p.print(_Lbrace)
+ p.printMethodList(n.MethodList)
+ }
+ p.print(_Rbrace)
+
+ case *MapType:
+ p.print(_Map, _Lbrack, n.Key, _Rbrack, n.Value)
+
+ case *ChanType:
+ if n.Dir == RecvOnly {
+ p.print(_Arrow)
+ }
+ p.print(_Chan)
+ if n.Dir == SendOnly {
+ p.print(_Arrow)
+ }
+ p.print(blank)
+ if e, _ := n.Elem.(*ChanType); n.Dir == 0 && e != nil && e.Dir == RecvOnly {
+ // don't print chan (<-chan T) as chan <-chan T
+ p.print(_Lparen)
+ p.print(n.Elem)
+ p.print(_Rparen)
+ } else {
+ p.print(n.Elem)
+ }
+
+ // statements
+ case *DeclStmt:
+ p.printDecl(n.DeclList)
+
+ case *EmptyStmt:
+ // nothing to print
+
+ case *LabeledStmt:
+ p.print(outdent, n.Label, _Colon, indent, newline, n.Stmt)
+
+ case *ExprStmt:
+ p.print(n.X)
+
+ case *SendStmt:
+ p.print(n.Chan, blank, _Arrow, blank, n.Value)
+
+ case *AssignStmt:
+ p.print(n.Lhs)
+ if n.Rhs == nil {
+ // TODO(gri) This is going to break the mayCombine
+ // check once we enable that again.
+ p.print(n.Op, n.Op) // ++ or --
+ } else {
+ p.print(blank, n.Op, _Assign, blank)
+ p.print(n.Rhs)
+ }
+
+ case *CallStmt:
+ p.print(n.Tok, blank, n.Call)
+
+ case *ReturnStmt:
+ p.print(_Return)
+ if n.Results != nil {
+ p.print(blank, n.Results)
+ }
+
+ case *BranchStmt:
+ p.print(n.Tok)
+ if n.Label != nil {
+ p.print(blank, n.Label)
+ }
+
+ case *BlockStmt:
+ p.print(_Lbrace)
+ if len(n.List) > 0 {
+ p.print(newline, indent)
+ p.printStmtList(n.List, true)
+ p.print(outdent, newline)
+ }
+ p.print(_Rbrace)
+
+ case *IfStmt:
+ p.print(_If, blank)
+ if n.Init != nil {
+ p.print(n.Init, _Semi, blank)
+ }
+ p.print(n.Cond, blank, n.Then)
+ if n.Else != nil {
+ p.print(blank, _Else, blank, n.Else)
+ }
+
+ case *SwitchStmt:
+ p.print(_Switch, blank)
+ if n.Init != nil {
+ p.print(n.Init, _Semi, blank)
+ }
+ if n.Tag != nil {
+ p.print(n.Tag, blank)
+ }
+ p.printSwitchBody(n.Body)
+
+ case *SelectStmt:
+ p.print(_Select, blank) // for now
+ p.printSelectBody(n.Body)
+
+ case *RangeClause:
+ if n.Lhs != nil {
+ tok := _Assign
+ if n.Def {
+ tok = _Define
+ }
+ p.print(n.Lhs, blank, tok, blank)
+ }
+ p.print(_Range, blank, n.X)
+
+ case *ForStmt:
+ p.print(_For, blank)
+ if n.Init == nil && n.Post == nil {
+ if n.Cond != nil {
+ p.print(n.Cond, blank)
+ }
+ } else {
+ if n.Init != nil {
+ p.print(n.Init)
+ // TODO(gri) clean this up
+ if _, ok := n.Init.(*RangeClause); ok {
+ p.print(blank, n.Body)
+ break
+ }
+ }
+ p.print(_Semi, blank)
+ if n.Cond != nil {
+ p.print(n.Cond)
+ }
+ p.print(_Semi, blank)
+ if n.Post != nil {
+ p.print(n.Post, blank)
+ }
+ }
+ p.print(n.Body)
+
+ case *ImportDecl:
+ if n.Group == nil {
+ p.print(_Import, blank)
+ }
+ if n.LocalPkgName != nil {
+ p.print(n.LocalPkgName, blank)
+ }
+ p.print(n.Path)
+
+ case *ConstDecl:
+ if n.Group == nil {
+ p.print(_Const, blank)
+ }
+ p.printNameList(n.NameList)
+ if n.Type != nil {
+ p.print(blank, n.Type)
+ }
+ if n.Values != nil {
+ p.print(blank, _Assign, blank, n.Values)
+ }
+
+ case *TypeDecl:
+ if n.Group == nil {
+ p.print(_Type, blank)
+ }
+ p.print(n.Name)
+ if n.TParamList != nil {
+ p.printParameterList(n.TParamList, _Type)
+ }
+ p.print(blank)
+ if n.Alias {
+ p.print(_Assign, blank)
+ }
+ p.print(n.Type)
+
+ case *VarDecl:
+ if n.Group == nil {
+ p.print(_Var, blank)
+ }
+ p.printNameList(n.NameList)
+ if n.Type != nil {
+ p.print(blank, n.Type)
+ }
+ if n.Values != nil {
+ p.print(blank, _Assign, blank, n.Values)
+ }
+
+ case *FuncDecl:
+ p.print(_Func, blank)
+ if r := n.Recv; r != nil {
+ p.print(_Lparen)
+ if r.Name != nil {
+ p.print(r.Name, blank)
+ }
+ p.printNode(r.Type)
+ p.print(_Rparen, blank)
+ }
+ p.print(n.Name)
+ if n.TParamList != nil {
+ p.printParameterList(n.TParamList, _Func)
+ }
+ p.printSignature(n.Type)
+ if n.Body != nil {
+ p.print(blank, n.Body)
+ }
+
+ case *printGroup:
+ p.print(n.Tok, blank, _Lparen)
+ if len(n.Decls) > 0 {
+ p.print(newline, indent)
+ for _, d := range n.Decls {
+ p.printNode(d)
+ p.print(_Semi, newline)
+ }
+ p.print(outdent)
+ }
+ p.print(_Rparen)
+
+ // files
+ case *File:
+ p.print(_Package, blank, n.PkgName)
+ if len(n.DeclList) > 0 {
+ p.print(_Semi, newline, newline)
+ p.printDeclList(n.DeclList)
+ }
+
+ default:
+ panic(fmt.Sprintf("syntax.Iterate: unexpected node type %T", n))
+ }
+}
+
+func (p *printer) printFields(fields []*Field, tags []*BasicLit, i, j int) {
+ if i+1 == j && fields[i].Name == nil {
+ // anonymous field
+ p.printNode(fields[i].Type)
+ } else {
+ for k, f := range fields[i:j] {
+ if k > 0 {
+ p.print(_Comma, blank)
+ }
+ p.printNode(f.Name)
+ }
+ p.print(blank)
+ p.printNode(fields[i].Type)
+ }
+ if i < len(tags) && tags[i] != nil {
+ p.print(blank)
+ p.printNode(tags[i])
+ }
+}
+
+func (p *printer) printFieldList(fields []*Field, tags []*BasicLit, sep token) {
+ i0 := 0
+ var typ Expr
+ for i, f := range fields {
+ if f.Name == nil || f.Type != typ {
+ if i0 < i {
+ p.printFields(fields, tags, i0, i)
+ p.print(sep, newline)
+ i0 = i
+ }
+ typ = f.Type
+ }
+ }
+ p.printFields(fields, tags, i0, len(fields))
+}
+
+func (p *printer) printMethodList(methods []*Field) {
+ for i, m := range methods {
+ if i > 0 {
+ p.print(_Semi, newline)
+ }
+ if m.Name != nil {
+ p.printNode(m.Name)
+ p.printSignature(m.Type.(*FuncType))
+ } else {
+ p.printNode(m.Type)
+ }
+ }
+}
+
+func (p *printer) printNameList(list []*Name) {
+ for i, x := range list {
+ if i > 0 {
+ p.print(_Comma, blank)
+ }
+ p.printNode(x)
+ }
+}
+
+func (p *printer) printExprList(list []Expr) {
+ for i, x := range list {
+ if i > 0 {
+ p.print(_Comma, blank)
+ }
+ p.printNode(x)
+ }
+}
+
+func (p *printer) printExprLines(list []Expr) {
+ if len(list) > 0 {
+ p.print(newline, indent)
+ for _, x := range list {
+ p.print(x, _Comma, newline)
+ }
+ p.print(outdent)
+ }
+}
+
+func groupFor(d Decl) (token, *Group) {
+ switch d := d.(type) {
+ case *ImportDecl:
+ return _Import, d.Group
+ case *ConstDecl:
+ return _Const, d.Group
+ case *TypeDecl:
+ return _Type, d.Group
+ case *VarDecl:
+ return _Var, d.Group
+ case *FuncDecl:
+ return _Func, nil
+ default:
+ panic("unreachable")
+ }
+}
+
+type printGroup struct {
+ node
+ Tok token
+ Decls []Decl
+}
+
+func (p *printer) printDecl(list []Decl) {
+ tok, group := groupFor(list[0])
+
+ if group == nil {
+ if len(list) != 1 {
+ panic("unreachable")
+ }
+ p.printNode(list[0])
+ return
+ }
+
+ // if _, ok := list[0].(*EmptyDecl); ok {
+ // if len(list) != 1 {
+ // panic("unreachable")
+ // }
+ // // TODO(gri) if there are comments inside the empty
+ // // group, we may need to keep the list non-nil
+ // list = nil
+ // }
+
+ // printGroup is here for consistent comment handling
+ // (this is not yet used)
+ var pg printGroup
+ // *pg.Comments() = *group.Comments()
+ pg.Tok = tok
+ pg.Decls = list
+ p.printNode(&pg)
+}
+
+func (p *printer) printDeclList(list []Decl) {
+ i0 := 0
+ var tok token
+ var group *Group
+ for i, x := range list {
+ if s, g := groupFor(x); g == nil || g != group {
+ if i0 < i {
+ p.printDecl(list[i0:i])
+ p.print(_Semi, newline)
+ // print empty line between different declaration groups,
+ // different kinds of declarations, or between functions
+ if g != group || s != tok || s == _Func {
+ p.print(newline)
+ }
+ i0 = i
+ }
+ tok, group = s, g
+ }
+ }
+ p.printDecl(list[i0:])
+}
+
+func (p *printer) printSignature(sig *FuncType) {
+ p.printParameterList(sig.ParamList, 0)
+ if list := sig.ResultList; list != nil {
+ p.print(blank)
+ if len(list) == 1 && list[0].Name == nil {
+ p.printNode(list[0].Type)
+ } else {
+ p.printParameterList(list, 0)
+ }
+ }
+}
+
+// If tok != 0 print a type parameter list: tok == _Type means
+// a type parameter list for a type, tok == _Func means a type
+// parameter list for a func.
+func (p *printer) printParameterList(list []*Field, tok token) {
+ open, close := _Lparen, _Rparen
+ if tok != 0 {
+ open, close = _Lbrack, _Rbrack
+ }
+ p.print(open)
+ for i, f := range list {
+ if i > 0 {
+ p.print(_Comma, blank)
+ }
+ if f.Name != nil {
+ p.printNode(f.Name)
+ if i+1 < len(list) {
+ f1 := list[i+1]
+ if f1.Name != nil && f1.Type == f.Type {
+ continue // no need to print type
+ }
+ }
+ p.print(blank)
+ }
+ p.printNode(Unparen(f.Type)) // no need for (extra) parentheses around parameter types
+ }
+ // A type parameter list [P T] where the name P and the type expression T syntactically
+ // combine to another valid (value) expression requires a trailing comma, as in [P *T,]
+ // (or an enclosing interface as in [P interface(*T)]), so that the type parameter list
+ // is not parsed as an array length [P*T].
+ if tok == _Type && len(list) == 1 && combinesWithName(list[0].Type) {
+ p.print(_Comma)
+ }
+ p.print(close)
+}
+
+// combinesWithName reports whether a name followed by the expression x
+// syntactically combines to another valid (value) expression. For instance
+// using *T for x, "name *T" syntactically appears as the expression x*T.
+// On the other hand, using P|Q or *P|~Q for x, "name P|Q" or name *P|~Q"
+// cannot be combined into a valid (value) expression.
+func combinesWithName(x Expr) bool {
+ switch x := x.(type) {
+ case *Operation:
+ if x.Y == nil {
+ // name *x.X combines to name*x.X if x.X is not a type element
+ return x.Op == Mul && !isTypeElem(x.X)
+ }
+ // binary expressions
+ return combinesWithName(x.X) && !isTypeElem(x.Y)
+ case *ParenExpr:
+ // name(x) combines but we are making sure at
+ // the call site that x is never parenthesized.
+ panic("unexpected parenthesized expression")
+ }
+ return false
+}
+
+func (p *printer) printStmtList(list []Stmt, braces bool) {
+ for i, x := range list {
+ p.print(x, _Semi)
+ if i+1 < len(list) {
+ p.print(newline)
+ } else if braces {
+ // Print an extra semicolon if the last statement is
+ // an empty statement and we are in a braced block
+ // because one semicolon is automatically removed.
+ if _, ok := x.(*EmptyStmt); ok {
+ p.print(x, _Semi)
+ }
+ }
+ }
+}
+
+func (p *printer) printSwitchBody(list []*CaseClause) {
+ p.print(_Lbrace)
+ if len(list) > 0 {
+ p.print(newline)
+ for i, c := range list {
+ p.printCaseClause(c, i+1 == len(list))
+ p.print(newline)
+ }
+ }
+ p.print(_Rbrace)
+}
+
+func (p *printer) printSelectBody(list []*CommClause) {
+ p.print(_Lbrace)
+ if len(list) > 0 {
+ p.print(newline)
+ for i, c := range list {
+ p.printCommClause(c, i+1 == len(list))
+ p.print(newline)
+ }
+ }
+ p.print(_Rbrace)
+}
+
+func (p *printer) printCaseClause(c *CaseClause, braces bool) {
+ if c.Cases != nil {
+ p.print(_Case, blank, c.Cases)
+ } else {
+ p.print(_Default)
+ }
+ p.print(_Colon)
+ if len(c.Body) > 0 {
+ p.print(newline, indent)
+ p.printStmtList(c.Body, braces)
+ p.print(outdent)
+ }
+}
+
+func (p *printer) printCommClause(c *CommClause, braces bool) {
+ if c.Comm != nil {
+ p.print(_Case, blank)
+ p.print(c.Comm)
+ } else {
+ p.print(_Default)
+ }
+ p.print(_Colon)
+ if len(c.Body) > 0 {
+ p.print(newline, indent)
+ p.printStmtList(c.Body, braces)
+ p.print(outdent)
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/printer_test.go b/src/cmd/compile/internal/syntax/printer_test.go
new file mode 100644
index 0000000..99baf7f
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/printer_test.go
@@ -0,0 +1,285 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "testing"
+)
+
+func TestPrint(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode")
+ }
+
+ ast, _ := ParseFile(*src_, func(err error) { t.Error(err) }, nil, 0)
+
+ if ast != nil {
+ Fprint(testOut(), ast, LineForm)
+ fmt.Println()
+ }
+}
+
+type shortBuffer struct {
+ buf []byte
+}
+
+func (w *shortBuffer) Write(data []byte) (n int, err error) {
+ w.buf = append(w.buf, data...)
+ n = len(data)
+ if len(w.buf) > 10 {
+ err = io.ErrShortBuffer
+ }
+ return
+}
+
+func TestPrintError(t *testing.T) {
+ const src = "package p; var x int"
+ ast, err := Parse(nil, strings.NewReader(src), nil, nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var buf shortBuffer
+ _, err = Fprint(&buf, ast, 0)
+ if err == nil || err != io.ErrShortBuffer {
+ t.Errorf("got err = %s, want %s", err, io.ErrShortBuffer)
+ }
+}
+
+var stringTests = [][2]string{
+ dup("package p"),
+ dup("package p; type _ int; type T1 = struct{}; type ( _ *struct{}; T2 = float32 )"),
+
+ // generic type declarations (given type separated with blank from LHS)
+ dup("package p; type _[T any] struct{}"),
+ dup("package p; type _[A, B, C interface{m()}] struct{}"),
+ dup("package p; type _[T any, A, B, C interface{m()}, X, Y, Z interface{~int}] struct{}"),
+
+ dup("package p; type _[P *struct{}] struct{}"),
+ dup("package p; type _[P *T,] struct{}"),
+ dup("package p; type _[P *T, _ any] struct{}"),
+ {"package p; type _[P (*T),] struct{}", "package p; type _[P *T,] struct{}"},
+ {"package p; type _[P (*T), _ any] struct{}", "package p; type _[P *T, _ any] struct{}"},
+ {"package p; type _[P (T),] struct{}", "package p; type _[P T] struct{}"},
+ {"package p; type _[P (T), _ any] struct{}", "package p; type _[P T, _ any] struct{}"},
+
+ {"package p; type _[P (*struct{})] struct{}", "package p; type _[P *struct{}] struct{}"},
+ {"package p; type _[P ([]int)] struct{}", "package p; type _[P []int] struct{}"},
+ {"package p; type _[P ([]int) | int] struct{}", "package p; type _[P []int | int] struct{}"},
+
+ // a type literal in an |-expression indicates a type parameter list (blank after type parameter list and type)
+ dup("package p; type _[P *[]int] struct{}"),
+ dup("package p; type _[P T | T] struct{}"),
+ dup("package p; type _[P T | T | T | T] struct{}"),
+ dup("package p; type _[P *T | T, Q T] struct{}"),
+ dup("package p; type _[P *[]T | T] struct{}"),
+ dup("package p; type _[P *T | T | T | T | ~T] struct{}"),
+ dup("package p; type _[P *T | T | T | ~T | T] struct{}"),
+ dup("package p; type _[P *T | T | struct{} | T] struct{}"),
+ dup("package p; type _[P <-chan int] struct{}"),
+ dup("package p; type _[P *T | struct{} | T] struct{}"),
+
+ // a trailing comma always indicates a (possibly invalid) type parameter list (blank after type parameter list and type)
+ dup("package p; type _[P *T,] struct{}"),
+ dup("package p; type _[P *T | T,] struct{}"),
+ dup("package p; type _[P *T | <-T | T,] struct{}"),
+
+ // slice/array type declarations (no blank between array length and element type)
+ dup("package p; type _ []byte"),
+ dup("package p; type _ [n]byte"),
+ dup("package p; type _ [P(T)]byte"),
+ dup("package p; type _ [P((T))]byte"),
+ dup("package p; type _ [P * *T]byte"),
+ dup("package p; type _ [P * T]byte"),
+ dup("package p; type _ [P(*T)]byte"),
+ dup("package p; type _ [P(**T)]byte"),
+ dup("package p; type _ [P * T - T]byte"),
+ dup("package p; type _ [P * T - T]byte"),
+ dup("package p; type _ [P * T | T]byte"),
+ dup("package p; type _ [P * T | <-T | T]byte"),
+
+ // generic function declarations
+ dup("package p; func _[T any]()"),
+ dup("package p; func _[A, B, C interface{m()}]()"),
+ dup("package p; func _[T any, A, B, C interface{m()}, X, Y, Z interface{~int}]()"),
+
+ // generic functions with elided interfaces in type constraints
+ dup("package p; func _[P *T]() {}"),
+ dup("package p; func _[P *T | T | T | T | ~T]() {}"),
+ dup("package p; func _[P *T | T | struct{} | T]() {}"),
+ dup("package p; func _[P ~int, Q int | string]() {}"),
+ dup("package p; func _[P struct{f int}, Q *P]() {}"),
+
+ // methods with generic receiver types
+ dup("package p; func (R[T]) _()"),
+ dup("package p; func (*R[A, B, C]) _()"),
+ dup("package p; func (_ *R[A, B, C]) _()"),
+
+ // channels
+ dup("package p; type _ chan chan int"),
+ dup("package p; type _ chan (<-chan int)"),
+ dup("package p; type _ chan chan<- int"),
+
+ dup("package p; type _ <-chan chan int"),
+ dup("package p; type _ <-chan <-chan int"),
+ dup("package p; type _ <-chan chan<- int"),
+
+ dup("package p; type _ chan<- chan int"),
+ dup("package p; type _ chan<- <-chan int"),
+ dup("package p; type _ chan<- chan<- int"),
+
+ // TODO(gri) expand
+}
+
+func TestPrintString(t *testing.T) {
+ for _, test := range stringTests {
+ ast, err := Parse(nil, strings.NewReader(test[0]), nil, nil, 0)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if got := String(ast); got != test[1] {
+ t.Errorf("%q: got %q", test[1], got)
+ }
+ }
+}
+
+func testOut() io.Writer {
+ if testing.Verbose() {
+ return os.Stdout
+ }
+ return io.Discard
+}
+
+func dup(s string) [2]string { return [2]string{s, s} }
+
+var exprTests = [][2]string{
+ // basic type literals
+ dup("x"),
+ dup("true"),
+ dup("42"),
+ dup("3.1415"),
+ dup("2.71828i"),
+ dup(`'a'`),
+ dup(`"foo"`),
+ dup("`bar`"),
+ dup("any"),
+
+ // func and composite literals
+ dup("func() {}"),
+ dup("[]int{}"),
+ {"func(x int) complex128 { return 0 }", "func(x int) complex128 {…}"},
+ {"[]int{1, 2, 3}", "[]int{…}"},
+
+ // type expressions
+ dup("[1 << 10]byte"),
+ dup("[]int"),
+ dup("*int"),
+ dup("struct{x int}"),
+ dup("func()"),
+ dup("func(int, float32) string"),
+ dup("interface{m()}"),
+ dup("interface{m() string; n(x int)}"),
+ dup("interface{~int}"),
+ dup("interface{~int | ~float64 | ~string}"),
+ dup("interface{~int; m()}"),
+ dup("interface{~int | ~float64 | ~string; m() string; n(x int)}"),
+ dup("map[string]int"),
+ dup("chan E"),
+ dup("<-chan E"),
+ dup("chan<- E"),
+
+ // new interfaces
+ dup("interface{int}"),
+ dup("interface{~int}"),
+
+ // generic constraints
+ dup("interface{~a | ~b | ~c; ~int | ~string; float64; m()}"),
+ dup("interface{int | string}"),
+ dup("interface{~int | ~string; float64; m()}"),
+ dup("interface{~T[int, string] | string}"),
+
+ // generic types
+ dup("x[T]"),
+ dup("x[N | A | S]"),
+ dup("x[N, A]"),
+
+ // non-type expressions
+ dup("(x)"),
+ dup("x.f"),
+ dup("a[i]"),
+
+ dup("s[:]"),
+ dup("s[i:]"),
+ dup("s[:j]"),
+ dup("s[i:j]"),
+ dup("s[:j:k]"),
+ dup("s[i:j:k]"),
+
+ dup("x.(T)"),
+
+ dup("x.([10]int)"),
+ dup("x.([...]int)"),
+
+ dup("x.(struct{})"),
+ dup("x.(struct{x int; y, z float32; E})"),
+
+ dup("x.(func())"),
+ dup("x.(func(x int))"),
+ dup("x.(func() int)"),
+ dup("x.(func(x, y int, z float32) (r int))"),
+ dup("x.(func(a, b, c int))"),
+ dup("x.(func(x ...T))"),
+
+ dup("x.(interface{})"),
+ dup("x.(interface{m(); n(x int); E})"),
+ dup("x.(interface{m(); n(x int) T; E; F})"),
+
+ dup("x.(map[K]V)"),
+
+ dup("x.(chan E)"),
+ dup("x.(<-chan E)"),
+ dup("x.(chan<- chan int)"),
+ dup("x.(chan<- <-chan int)"),
+ dup("x.(<-chan chan int)"),
+ dup("x.(chan (<-chan int))"),
+
+ dup("f()"),
+ dup("f(x)"),
+ dup("int(x)"),
+ dup("f(x, x + y)"),
+ dup("f(s...)"),
+ dup("f(a, s...)"),
+
+ // generic functions
+ dup("f[T]()"),
+ dup("f[T](T)"),
+ dup("f[T, T1]()"),
+ dup("f[T, T1](T, T1)"),
+
+ dup("*x"),
+ dup("&x"),
+ dup("x + y"),
+ dup("x + y << (2 * s)"),
+}
+
+func TestShortString(t *testing.T) {
+ for _, test := range exprTests {
+ src := "package p; var _ = " + test[0]
+ ast, err := Parse(nil, strings.NewReader(src), nil, nil, 0)
+ if err != nil {
+ t.Errorf("%s: %s", test[0], err)
+ continue
+ }
+ x := ast.DeclList[0].(*VarDecl).Values
+ if got := String(x); got != test[1] {
+ t.Errorf("%s: got %s, want %s", test[0], got, test[1])
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/scanner.go b/src/cmd/compile/internal/syntax/scanner.go
new file mode 100644
index 0000000..807d838
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/scanner.go
@@ -0,0 +1,881 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements scanner, a lexical tokenizer for
+// Go source. After initialization, consecutive calls of
+// next advance the scanner one token at a time.
+//
+// This file, source.go, tokens.go, and token_string.go are self-contained
+// (`go tool compile scanner.go source.go tokens.go token_string.go` compiles)
+// and thus could be made into their own package.
+
+package syntax
+
+import (
+ "fmt"
+ "io"
+ "unicode"
+ "unicode/utf8"
+)
+
+// The mode flags below control which comments are reported
+// by calling the error handler. If no flag is set, comments
+// are ignored.
+const (
+ comments uint = 1 << iota // call handler for all comments
+ directives // call handler for directives only
+)
+
+type scanner struct {
+ source
+ mode uint
+ nlsemi bool // if set '\n' and EOF translate to ';'
+
+ // current token, valid after calling next()
+ line, col uint
+ blank bool // line is blank up to col
+ tok token
+ lit string // valid if tok is _Name, _Literal, or _Semi ("semicolon", "newline", or "EOF"); may be malformed if bad is true
+ bad bool // valid if tok is _Literal, true if a syntax error occurred, lit may be malformed
+ kind LitKind // valid if tok is _Literal
+ op Operator // valid if tok is _Operator, _Star, _AssignOp, or _IncOp
+ prec int // valid if tok is _Operator, _Star, _AssignOp, or _IncOp
+}
+
+func (s *scanner) init(src io.Reader, errh func(line, col uint, msg string), mode uint) {
+ s.source.init(src, errh)
+ s.mode = mode
+ s.nlsemi = false
+}
+
+// errorf reports an error at the most recently read character position.
+func (s *scanner) errorf(format string, args ...interface{}) {
+ s.error(fmt.Sprintf(format, args...))
+}
+
+// errorAtf reports an error at a byte column offset relative to the current token start.
+func (s *scanner) errorAtf(offset int, format string, args ...interface{}) {
+ s.errh(s.line, s.col+uint(offset), fmt.Sprintf(format, args...))
+}
+
+// setLit sets the scanner state for a recognized _Literal token.
+func (s *scanner) setLit(kind LitKind, ok bool) {
+ s.nlsemi = true
+ s.tok = _Literal
+ s.lit = string(s.segment())
+ s.bad = !ok
+ s.kind = kind
+}
+
+// next advances the scanner by reading the next token.
+//
+// If a read, source encoding, or lexical error occurs, next calls
+// the installed error handler with the respective error position
+// and message. The error message is guaranteed to be non-empty and
+// never starts with a '/'. The error handler must exist.
+//
+// If the scanner mode includes the comments flag and a comment
+// (including comments containing directives) is encountered, the
+// error handler is also called with each comment position and text
+// (including opening /* or // and closing */, but without a newline
+// at the end of line comments). Comment text always starts with a /
+// which can be used to distinguish these handler calls from errors.
+//
+// If the scanner mode includes the directives (but not the comments)
+// flag, only comments containing a //line, /*line, or //go: directive
+// are reported, in the same way as regular comments.
+func (s *scanner) next() {
+ nlsemi := s.nlsemi
+ s.nlsemi = false
+
+redo:
+ // skip white space
+ s.stop()
+ startLine, startCol := s.pos()
+ for s.ch == ' ' || s.ch == '\t' || s.ch == '\n' && !nlsemi || s.ch == '\r' {
+ s.nextch()
+ }
+
+ // token start
+ s.line, s.col = s.pos()
+ s.blank = s.line > startLine || startCol == colbase
+ s.start()
+ if isLetter(s.ch) || s.ch >= utf8.RuneSelf && s.atIdentChar(true) {
+ s.nextch()
+ s.ident()
+ return
+ }
+
+ switch s.ch {
+ case -1:
+ if nlsemi {
+ s.lit = "EOF"
+ s.tok = _Semi
+ break
+ }
+ s.tok = _EOF
+
+ case '\n':
+ s.nextch()
+ s.lit = "newline"
+ s.tok = _Semi
+
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ s.number(false)
+
+ case '"':
+ s.stdString()
+
+ case '`':
+ s.rawString()
+
+ case '\'':
+ s.rune()
+
+ case '(':
+ s.nextch()
+ s.tok = _Lparen
+
+ case '[':
+ s.nextch()
+ s.tok = _Lbrack
+
+ case '{':
+ s.nextch()
+ s.tok = _Lbrace
+
+ case ',':
+ s.nextch()
+ s.tok = _Comma
+
+ case ';':
+ s.nextch()
+ s.lit = "semicolon"
+ s.tok = _Semi
+
+ case ')':
+ s.nextch()
+ s.nlsemi = true
+ s.tok = _Rparen
+
+ case ']':
+ s.nextch()
+ s.nlsemi = true
+ s.tok = _Rbrack
+
+ case '}':
+ s.nextch()
+ s.nlsemi = true
+ s.tok = _Rbrace
+
+ case ':':
+ s.nextch()
+ if s.ch == '=' {
+ s.nextch()
+ s.tok = _Define
+ break
+ }
+ s.tok = _Colon
+
+ case '.':
+ s.nextch()
+ if isDecimal(s.ch) {
+ s.number(true)
+ break
+ }
+ if s.ch == '.' {
+ s.nextch()
+ if s.ch == '.' {
+ s.nextch()
+ s.tok = _DotDotDot
+ break
+ }
+ s.rewind() // now s.ch holds 1st '.'
+ s.nextch() // consume 1st '.' again
+ }
+ s.tok = _Dot
+
+ case '+':
+ s.nextch()
+ s.op, s.prec = Add, precAdd
+ if s.ch != '+' {
+ goto assignop
+ }
+ s.nextch()
+ s.nlsemi = true
+ s.tok = _IncOp
+
+ case '-':
+ s.nextch()
+ s.op, s.prec = Sub, precAdd
+ if s.ch != '-' {
+ goto assignop
+ }
+ s.nextch()
+ s.nlsemi = true
+ s.tok = _IncOp
+
+ case '*':
+ s.nextch()
+ s.op, s.prec = Mul, precMul
+ // don't goto assignop - want _Star token
+ if s.ch == '=' {
+ s.nextch()
+ s.tok = _AssignOp
+ break
+ }
+ s.tok = _Star
+
+ case '/':
+ s.nextch()
+ if s.ch == '/' {
+ s.nextch()
+ s.lineComment()
+ goto redo
+ }
+ if s.ch == '*' {
+ s.nextch()
+ s.fullComment()
+ if line, _ := s.pos(); line > s.line && nlsemi {
+ // A multi-line comment acts like a newline;
+ // it translates to a ';' if nlsemi is set.
+ s.lit = "newline"
+ s.tok = _Semi
+ break
+ }
+ goto redo
+ }
+ s.op, s.prec = Div, precMul
+ goto assignop
+
+ case '%':
+ s.nextch()
+ s.op, s.prec = Rem, precMul
+ goto assignop
+
+ case '&':
+ s.nextch()
+ if s.ch == '&' {
+ s.nextch()
+ s.op, s.prec = AndAnd, precAndAnd
+ s.tok = _Operator
+ break
+ }
+ s.op, s.prec = And, precMul
+ if s.ch == '^' {
+ s.nextch()
+ s.op = AndNot
+ }
+ goto assignop
+
+ case '|':
+ s.nextch()
+ if s.ch == '|' {
+ s.nextch()
+ s.op, s.prec = OrOr, precOrOr
+ s.tok = _Operator
+ break
+ }
+ s.op, s.prec = Or, precAdd
+ goto assignop
+
+ case '^':
+ s.nextch()
+ s.op, s.prec = Xor, precAdd
+ goto assignop
+
+ case '<':
+ s.nextch()
+ if s.ch == '=' {
+ s.nextch()
+ s.op, s.prec = Leq, precCmp
+ s.tok = _Operator
+ break
+ }
+ if s.ch == '<' {
+ s.nextch()
+ s.op, s.prec = Shl, precMul
+ goto assignop
+ }
+ if s.ch == '-' {
+ s.nextch()
+ s.tok = _Arrow
+ break
+ }
+ s.op, s.prec = Lss, precCmp
+ s.tok = _Operator
+
+ case '>':
+ s.nextch()
+ if s.ch == '=' {
+ s.nextch()
+ s.op, s.prec = Geq, precCmp
+ s.tok = _Operator
+ break
+ }
+ if s.ch == '>' {
+ s.nextch()
+ s.op, s.prec = Shr, precMul
+ goto assignop
+ }
+ s.op, s.prec = Gtr, precCmp
+ s.tok = _Operator
+
+ case '=':
+ s.nextch()
+ if s.ch == '=' {
+ s.nextch()
+ s.op, s.prec = Eql, precCmp
+ s.tok = _Operator
+ break
+ }
+ s.tok = _Assign
+
+ case '!':
+ s.nextch()
+ if s.ch == '=' {
+ s.nextch()
+ s.op, s.prec = Neq, precCmp
+ s.tok = _Operator
+ break
+ }
+ s.op, s.prec = Not, 0
+ s.tok = _Operator
+
+ case '~':
+ s.nextch()
+ s.op, s.prec = Tilde, 0
+ s.tok = _Operator
+
+ default:
+ s.errorf("invalid character %#U", s.ch)
+ s.nextch()
+ goto redo
+ }
+
+ return
+
+assignop:
+ if s.ch == '=' {
+ s.nextch()
+ s.tok = _AssignOp
+ return
+ }
+ s.tok = _Operator
+}
+
+func (s *scanner) ident() {
+ // accelerate common case (7bit ASCII)
+ for isLetter(s.ch) || isDecimal(s.ch) {
+ s.nextch()
+ }
+
+ // general case
+ if s.ch >= utf8.RuneSelf {
+ for s.atIdentChar(false) {
+ s.nextch()
+ }
+ }
+
+ // possibly a keyword
+ lit := s.segment()
+ if len(lit) >= 2 {
+ if tok := keywordMap[hash(lit)]; tok != 0 && tokStrFast(tok) == string(lit) {
+ s.nlsemi = contains(1<<_Break|1<<_Continue|1<<_Fallthrough|1<<_Return, tok)
+ s.tok = tok
+ return
+ }
+ }
+
+ s.nlsemi = true
+ s.lit = string(lit)
+ s.tok = _Name
+}
+
+// tokStrFast is a faster version of token.String, which assumes that tok
+// is one of the valid tokens - and can thus skip bounds checks.
+func tokStrFast(tok token) string {
+ return _token_name[_token_index[tok-1]:_token_index[tok]]
+}
+
+func (s *scanner) atIdentChar(first bool) bool {
+ switch {
+ case unicode.IsLetter(s.ch) || s.ch == '_':
+ // ok
+ case unicode.IsDigit(s.ch):
+ if first {
+ s.errorf("identifier cannot begin with digit %#U", s.ch)
+ }
+ case s.ch >= utf8.RuneSelf:
+ s.errorf("invalid character %#U in identifier", s.ch)
+ default:
+ return false
+ }
+ return true
+}
+
+// hash is a perfect hash function for keywords.
+// It assumes that s has at least length 2.
+func hash(s []byte) uint {
+ return (uint(s[0])<<4 ^ uint(s[1]) + uint(len(s))) & uint(len(keywordMap)-1)
+}
+
+var keywordMap [1 << 6]token // size must be power of two
+
+func init() {
+ // populate keywordMap
+ for tok := _Break; tok <= _Var; tok++ {
+ h := hash([]byte(tok.String()))
+ if keywordMap[h] != 0 {
+ panic("imperfect hash")
+ }
+ keywordMap[h] = tok
+ }
+}
+
+func lower(ch rune) rune { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter
+func isLetter(ch rune) bool { return 'a' <= lower(ch) && lower(ch) <= 'z' || ch == '_' }
+func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' }
+func isHex(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= lower(ch) && lower(ch) <= 'f' }
+
+// digits accepts the sequence { digit | '_' }.
+// If base <= 10, digits accepts any decimal digit but records
+// the index (relative to the literal start) of a digit >= base
+// in *invalid, if *invalid < 0.
+// digits returns a bitset describing whether the sequence contained
+// digits (bit 0 is set), or separators '_' (bit 1 is set).
+func (s *scanner) digits(base int, invalid *int) (digsep int) {
+ if base <= 10 {
+ max := rune('0' + base)
+ for isDecimal(s.ch) || s.ch == '_' {
+ ds := 1
+ if s.ch == '_' {
+ ds = 2
+ } else if s.ch >= max && *invalid < 0 {
+ _, col := s.pos()
+ *invalid = int(col - s.col) // record invalid rune index
+ }
+ digsep |= ds
+ s.nextch()
+ }
+ } else {
+ for isHex(s.ch) || s.ch == '_' {
+ ds := 1
+ if s.ch == '_' {
+ ds = 2
+ }
+ digsep |= ds
+ s.nextch()
+ }
+ }
+ return
+}
+
+func (s *scanner) number(seenPoint bool) {
+ ok := true
+ kind := IntLit
+ base := 10 // number base
+ prefix := rune(0) // one of 0 (decimal), '0' (0-octal), 'x', 'o', or 'b'
+ digsep := 0 // bit 0: digit present, bit 1: '_' present
+ invalid := -1 // index of invalid digit in literal, or < 0
+
+ // integer part
+ if !seenPoint {
+ if s.ch == '0' {
+ s.nextch()
+ switch lower(s.ch) {
+ case 'x':
+ s.nextch()
+ base, prefix = 16, 'x'
+ case 'o':
+ s.nextch()
+ base, prefix = 8, 'o'
+ case 'b':
+ s.nextch()
+ base, prefix = 2, 'b'
+ default:
+ base, prefix = 8, '0'
+ digsep = 1 // leading 0
+ }
+ }
+ digsep |= s.digits(base, &invalid)
+ if s.ch == '.' {
+ if prefix == 'o' || prefix == 'b' {
+ s.errorf("invalid radix point in %s literal", baseName(base))
+ ok = false
+ }
+ s.nextch()
+ seenPoint = true
+ }
+ }
+
+ // fractional part
+ if seenPoint {
+ kind = FloatLit
+ digsep |= s.digits(base, &invalid)
+ }
+
+ if digsep&1 == 0 && ok {
+ s.errorf("%s literal has no digits", baseName(base))
+ ok = false
+ }
+
+ // exponent
+ if e := lower(s.ch); e == 'e' || e == 'p' {
+ if ok {
+ switch {
+ case e == 'e' && prefix != 0 && prefix != '0':
+ s.errorf("%q exponent requires decimal mantissa", s.ch)
+ ok = false
+ case e == 'p' && prefix != 'x':
+ s.errorf("%q exponent requires hexadecimal mantissa", s.ch)
+ ok = false
+ }
+ }
+ s.nextch()
+ kind = FloatLit
+ if s.ch == '+' || s.ch == '-' {
+ s.nextch()
+ }
+ digsep = s.digits(10, nil) | digsep&2 // don't lose sep bit
+ if digsep&1 == 0 && ok {
+ s.errorf("exponent has no digits")
+ ok = false
+ }
+ } else if prefix == 'x' && kind == FloatLit && ok {
+ s.errorf("hexadecimal mantissa requires a 'p' exponent")
+ ok = false
+ }
+
+ // suffix 'i'
+ if s.ch == 'i' {
+ kind = ImagLit
+ s.nextch()
+ }
+
+ s.setLit(kind, ok) // do this now so we can use s.lit below
+
+ if kind == IntLit && invalid >= 0 && ok {
+ s.errorAtf(invalid, "invalid digit %q in %s literal", s.lit[invalid], baseName(base))
+ ok = false
+ }
+
+ if digsep&2 != 0 && ok {
+ if i := invalidSep(s.lit); i >= 0 {
+ s.errorAtf(i, "'_' must separate successive digits")
+ ok = false
+ }
+ }
+
+ s.bad = !ok // correct s.bad
+}
+
+func baseName(base int) string {
+ switch base {
+ case 2:
+ return "binary"
+ case 8:
+ return "octal"
+ case 10:
+ return "decimal"
+ case 16:
+ return "hexadecimal"
+ }
+ panic("invalid base")
+}
+
+// invalidSep returns the index of the first invalid separator in x, or -1.
+func invalidSep(x string) int {
+ x1 := ' ' // prefix char, we only care if it's 'x'
+ d := '.' // digit, one of '_', '0' (a digit), or '.' (anything else)
+ i := 0
+
+ // a prefix counts as a digit
+ if len(x) >= 2 && x[0] == '0' {
+ x1 = lower(rune(x[1]))
+ if x1 == 'x' || x1 == 'o' || x1 == 'b' {
+ d = '0'
+ i = 2
+ }
+ }
+
+ // mantissa and exponent
+ for ; i < len(x); i++ {
+ p := d // previous digit
+ d = rune(x[i])
+ switch {
+ case d == '_':
+ if p != '0' {
+ return i
+ }
+ case isDecimal(d) || x1 == 'x' && isHex(d):
+ d = '0'
+ default:
+ if p == '_' {
+ return i - 1
+ }
+ d = '.'
+ }
+ }
+ if d == '_' {
+ return len(x) - 1
+ }
+
+ return -1
+}
+
+func (s *scanner) rune() {
+ ok := true
+ s.nextch()
+
+ n := 0
+ for ; ; n++ {
+ if s.ch == '\'' {
+ if ok {
+ if n == 0 {
+ s.errorf("empty rune literal or unescaped '")
+ ok = false
+ } else if n != 1 {
+ s.errorAtf(0, "more than one character in rune literal")
+ ok = false
+ }
+ }
+ s.nextch()
+ break
+ }
+ if s.ch == '\\' {
+ s.nextch()
+ if !s.escape('\'') {
+ ok = false
+ }
+ continue
+ }
+ if s.ch == '\n' {
+ if ok {
+ s.errorf("newline in rune literal")
+ ok = false
+ }
+ break
+ }
+ if s.ch < 0 {
+ if ok {
+ s.errorAtf(0, "rune literal not terminated")
+ ok = false
+ }
+ break
+ }
+ s.nextch()
+ }
+
+ s.setLit(RuneLit, ok)
+}
+
+func (s *scanner) stdString() {
+ ok := true
+ s.nextch()
+
+ for {
+ if s.ch == '"' {
+ s.nextch()
+ break
+ }
+ if s.ch == '\\' {
+ s.nextch()
+ if !s.escape('"') {
+ ok = false
+ }
+ continue
+ }
+ if s.ch == '\n' {
+ s.errorf("newline in string")
+ ok = false
+ break
+ }
+ if s.ch < 0 {
+ s.errorAtf(0, "string not terminated")
+ ok = false
+ break
+ }
+ s.nextch()
+ }
+
+ s.setLit(StringLit, ok)
+}
+
+func (s *scanner) rawString() {
+ ok := true
+ s.nextch()
+
+ for {
+ if s.ch == '`' {
+ s.nextch()
+ break
+ }
+ if s.ch < 0 {
+ s.errorAtf(0, "string not terminated")
+ ok = false
+ break
+ }
+ s.nextch()
+ }
+ // We leave CRs in the string since they are part of the
+ // literal (even though they are not part of the literal
+ // value).
+
+ s.setLit(StringLit, ok)
+}
+
+func (s *scanner) comment(text string) {
+ s.errorAtf(0, "%s", text)
+}
+
+func (s *scanner) skipLine() {
+ // don't consume '\n' - needed for nlsemi logic
+ for s.ch >= 0 && s.ch != '\n' {
+ s.nextch()
+ }
+}
+
+func (s *scanner) lineComment() {
+ // opening has already been consumed
+
+ if s.mode&comments != 0 {
+ s.skipLine()
+ s.comment(string(s.segment()))
+ return
+ }
+
+ // are we saving directives? or is this definitely not a directive?
+ if s.mode&directives == 0 || (s.ch != 'g' && s.ch != 'l') {
+ s.stop()
+ s.skipLine()
+ return
+ }
+
+ // recognize go: or line directives
+ prefix := "go:"
+ if s.ch == 'l' {
+ prefix = "line "
+ }
+ for _, m := range prefix {
+ if s.ch != m {
+ s.stop()
+ s.skipLine()
+ return
+ }
+ s.nextch()
+ }
+
+ // directive text
+ s.skipLine()
+ s.comment(string(s.segment()))
+}
+
+func (s *scanner) skipComment() bool {
+ for s.ch >= 0 {
+ for s.ch == '*' {
+ s.nextch()
+ if s.ch == '/' {
+ s.nextch()
+ return true
+ }
+ }
+ s.nextch()
+ }
+ s.errorAtf(0, "comment not terminated")
+ return false
+}
+
+func (s *scanner) fullComment() {
+ /* opening has already been consumed */
+
+ if s.mode&comments != 0 {
+ if s.skipComment() {
+ s.comment(string(s.segment()))
+ }
+ return
+ }
+
+ if s.mode&directives == 0 || s.ch != 'l' {
+ s.stop()
+ s.skipComment()
+ return
+ }
+
+ // recognize line directive
+ const prefix = "line "
+ for _, m := range prefix {
+ if s.ch != m {
+ s.stop()
+ s.skipComment()
+ return
+ }
+ s.nextch()
+ }
+
+ // directive text
+ if s.skipComment() {
+ s.comment(string(s.segment()))
+ }
+}
+
+func (s *scanner) escape(quote rune) bool {
+ var n int
+ var base, max uint32
+
+ switch s.ch {
+ case quote, 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\':
+ s.nextch()
+ return true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ n, base, max = 3, 8, 255
+ case 'x':
+ s.nextch()
+ n, base, max = 2, 16, 255
+ case 'u':
+ s.nextch()
+ n, base, max = 4, 16, unicode.MaxRune
+ case 'U':
+ s.nextch()
+ n, base, max = 8, 16, unicode.MaxRune
+ default:
+ if s.ch < 0 {
+ return true // complain in caller about EOF
+ }
+ s.errorf("unknown escape")
+ return false
+ }
+
+ var x uint32
+ for i := n; i > 0; i-- {
+ if s.ch < 0 {
+ return true // complain in caller about EOF
+ }
+ d := base
+ if isDecimal(s.ch) {
+ d = uint32(s.ch) - '0'
+ } else if 'a' <= lower(s.ch) && lower(s.ch) <= 'f' {
+ d = uint32(lower(s.ch)) - 'a' + 10
+ }
+ if d >= base {
+ s.errorf("invalid character %q in %s escape", s.ch, baseName(int(base)))
+ return false
+ }
+ // d < base
+ x = x*base + d
+ s.nextch()
+ }
+
+ if x > max && base == 8 {
+ s.errorf("octal escape value %d > 255", x)
+ return false
+ }
+
+ if x > max || 0xD800 <= x && x < 0xE000 /* surrogate range */ {
+ s.errorf("escape is invalid Unicode code point %#U", x)
+ return false
+ }
+
+ return true
+}
diff --git a/src/cmd/compile/internal/syntax/scanner_test.go b/src/cmd/compile/internal/syntax/scanner_test.go
new file mode 100644
index 0000000..450ec1f
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/scanner_test.go
@@ -0,0 +1,767 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+)
+
+// errh is a default error handler for basic tests.
+func errh(line, col uint, msg string) {
+ panic(fmt.Sprintf("%d:%d: %s", line, col, msg))
+}
+
+// Don't bother with other tests if TestSmoke doesn't pass.
+func TestSmoke(t *testing.T) {
+ const src = "if (+foo\t+=..123/***/0.9_0e-0i'a'`raw`\"string\"..f;//$"
+ tokens := []token{_If, _Lparen, _Operator, _Name, _AssignOp, _Dot, _Literal, _Literal, _Literal, _Literal, _Literal, _Dot, _Dot, _Name, _Semi, _EOF}
+
+ var got scanner
+ got.init(strings.NewReader(src), errh, 0)
+ for _, want := range tokens {
+ got.next()
+ if got.tok != want {
+ t.Errorf("%d:%d: got %s; want %s", got.line, got.col, got.tok, want)
+ continue
+ }
+ }
+}
+
+// Once TestSmoke passes, run TestTokens next.
+func TestTokens(t *testing.T) {
+ var got scanner
+ for _, want := range sampleTokens {
+ got.init(strings.NewReader(want.src), func(line, col uint, msg string) {
+ t.Errorf("%s:%d:%d: %s", want.src, line, col, msg)
+ }, 0)
+ got.next()
+ if got.tok != want.tok {
+ t.Errorf("%s: got %s; want %s", want.src, got.tok, want.tok)
+ continue
+ }
+ if (got.tok == _Name || got.tok == _Literal) && got.lit != want.src {
+ t.Errorf("%s: got %q; want %q", want.src, got.lit, want.src)
+ }
+ }
+}
+
+func TestScanner(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode")
+ }
+
+ filename := *src_ // can be changed via -src flag
+ src, err := os.Open(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer src.Close()
+
+ var s scanner
+ s.init(src, errh, 0)
+ for {
+ s.next()
+ if s.tok == _EOF {
+ break
+ }
+ if !testing.Verbose() {
+ continue
+ }
+ switch s.tok {
+ case _Name, _Literal:
+ fmt.Printf("%s:%d:%d: %s => %s\n", filename, s.line, s.col, s.tok, s.lit)
+ case _Operator:
+ fmt.Printf("%s:%d:%d: %s => %s (prec = %d)\n", filename, s.line, s.col, s.tok, s.op, s.prec)
+ default:
+ fmt.Printf("%s:%d:%d: %s\n", filename, s.line, s.col, s.tok)
+ }
+ }
+}
+
+func TestEmbeddedTokens(t *testing.T) {
+ // make source
+ var buf bytes.Buffer
+ for i, s := range sampleTokens {
+ buf.WriteString("\t\t\t\t"[:i&3]) // leading indentation
+ buf.WriteString(s.src) // token
+ buf.WriteString(" "[:i&7]) // trailing spaces
+ fmt.Fprintf(&buf, "/*line foo:%d */ // bar\n", i) // comments + newline (don't crash w/o directive handler)
+ }
+
+ // scan source
+ var got scanner
+ var src string
+ got.init(&buf, func(line, col uint, msg string) {
+ t.Fatalf("%s:%d:%d: %s", src, line, col, msg)
+ }, 0)
+ got.next()
+ for i, want := range sampleTokens {
+ src = want.src
+ nlsemi := false
+
+ if got.line-linebase != uint(i) {
+ t.Errorf("%s: got line %d; want %d", src, got.line-linebase, i)
+ }
+
+ if got.tok != want.tok {
+ t.Errorf("%s: got tok %s; want %s", src, got.tok, want.tok)
+ continue
+ }
+
+ switch want.tok {
+ case _Semi:
+ if got.lit != "semicolon" {
+ t.Errorf("%s: got %s; want semicolon", src, got.lit)
+ }
+
+ case _Name, _Literal:
+ if got.lit != want.src {
+ t.Errorf("%s: got lit %q; want %q", src, got.lit, want.src)
+ continue
+ }
+ nlsemi = true
+
+ case _Operator, _AssignOp, _IncOp:
+ if got.op != want.op {
+ t.Errorf("%s: got op %s; want %s", src, got.op, want.op)
+ continue
+ }
+ if got.prec != want.prec {
+ t.Errorf("%s: got prec %d; want %d", src, got.prec, want.prec)
+ continue
+ }
+ nlsemi = want.tok == _IncOp
+
+ case _Rparen, _Rbrack, _Rbrace, _Break, _Continue, _Fallthrough, _Return:
+ nlsemi = true
+ }
+
+ if nlsemi {
+ got.next()
+ if got.tok != _Semi {
+ t.Errorf("%s: got tok %s; want ;", src, got.tok)
+ continue
+ }
+ if got.lit != "newline" {
+ t.Errorf("%s: got %s; want newline", src, got.lit)
+ }
+ }
+
+ got.next()
+ }
+
+ if got.tok != _EOF {
+ t.Errorf("got %q; want _EOF", got.tok)
+ }
+}
+
+var sampleTokens = [...]struct {
+ tok token
+ src string
+ op Operator
+ prec int
+}{
+ // name samples
+ {_Name, "x", 0, 0},
+ {_Name, "X123", 0, 0},
+ {_Name, "foo", 0, 0},
+ {_Name, "Foo123", 0, 0},
+ {_Name, "foo_bar", 0, 0},
+ {_Name, "_", 0, 0},
+ {_Name, "_foobar", 0, 0},
+ {_Name, "a۰۱۸", 0, 0},
+ {_Name, "foo६४", 0, 0},
+ {_Name, "bar9876", 0, 0},
+ {_Name, "ŝ", 0, 0},
+ {_Name, "ŝfoo", 0, 0},
+
+ // literal samples
+ {_Literal, "0", 0, 0},
+ {_Literal, "1", 0, 0},
+ {_Literal, "12345", 0, 0},
+ {_Literal, "123456789012345678890123456789012345678890", 0, 0},
+ {_Literal, "01234567", 0, 0},
+ {_Literal, "0_1_234_567", 0, 0},
+ {_Literal, "0X0", 0, 0},
+ {_Literal, "0xcafebabe", 0, 0},
+ {_Literal, "0x_cafe_babe", 0, 0},
+ {_Literal, "0O0", 0, 0},
+ {_Literal, "0o000", 0, 0},
+ {_Literal, "0o_000", 0, 0},
+ {_Literal, "0B1", 0, 0},
+ {_Literal, "0b01100110", 0, 0},
+ {_Literal, "0b_0110_0110", 0, 0},
+ {_Literal, "0.", 0, 0},
+ {_Literal, "0.e0", 0, 0},
+ {_Literal, "0.e-1", 0, 0},
+ {_Literal, "0.e+123", 0, 0},
+ {_Literal, ".0", 0, 0},
+ {_Literal, ".0E00", 0, 0},
+ {_Literal, ".0E-0123", 0, 0},
+ {_Literal, ".0E+12345678901234567890", 0, 0},
+ {_Literal, ".45e1", 0, 0},
+ {_Literal, "3.14159265", 0, 0},
+ {_Literal, "1e0", 0, 0},
+ {_Literal, "1e+100", 0, 0},
+ {_Literal, "1e-100", 0, 0},
+ {_Literal, "2.71828e-1000", 0, 0},
+ {_Literal, "0i", 0, 0},
+ {_Literal, "1i", 0, 0},
+ {_Literal, "012345678901234567889i", 0, 0},
+ {_Literal, "123456789012345678890i", 0, 0},
+ {_Literal, "0.i", 0, 0},
+ {_Literal, ".0i", 0, 0},
+ {_Literal, "3.14159265i", 0, 0},
+ {_Literal, "1e0i", 0, 0},
+ {_Literal, "1e+100i", 0, 0},
+ {_Literal, "1e-100i", 0, 0},
+ {_Literal, "2.71828e-1000i", 0, 0},
+ {_Literal, "'a'", 0, 0},
+ {_Literal, "'\\000'", 0, 0},
+ {_Literal, "'\\xFF'", 0, 0},
+ {_Literal, "'\\uff16'", 0, 0},
+ {_Literal, "'\\U0000ff16'", 0, 0},
+ {_Literal, "`foobar`", 0, 0},
+ {_Literal, "`foo\tbar`", 0, 0},
+ {_Literal, "`\r`", 0, 0},
+
+ // operators
+ {_Operator, "!", Not, 0},
+ {_Operator, "~", Tilde, 0},
+
+ {_Operator, "||", OrOr, precOrOr},
+
+ {_Operator, "&&", AndAnd, precAndAnd},
+
+ {_Operator, "==", Eql, precCmp},
+ {_Operator, "!=", Neq, precCmp},
+ {_Operator, "<", Lss, precCmp},
+ {_Operator, "<=", Leq, precCmp},
+ {_Operator, ">", Gtr, precCmp},
+ {_Operator, ">=", Geq, precCmp},
+
+ {_Operator, "+", Add, precAdd},
+ {_Operator, "-", Sub, precAdd},
+ {_Operator, "|", Or, precAdd},
+ {_Operator, "^", Xor, precAdd},
+
+ {_Star, "*", Mul, precMul},
+ {_Operator, "/", Div, precMul},
+ {_Operator, "%", Rem, precMul},
+ {_Operator, "&", And, precMul},
+ {_Operator, "&^", AndNot, precMul},
+ {_Operator, "<<", Shl, precMul},
+ {_Operator, ">>", Shr, precMul},
+
+ // assignment operations
+ {_AssignOp, "+=", Add, precAdd},
+ {_AssignOp, "-=", Sub, precAdd},
+ {_AssignOp, "|=", Or, precAdd},
+ {_AssignOp, "^=", Xor, precAdd},
+
+ {_AssignOp, "*=", Mul, precMul},
+ {_AssignOp, "/=", Div, precMul},
+ {_AssignOp, "%=", Rem, precMul},
+ {_AssignOp, "&=", And, precMul},
+ {_AssignOp, "&^=", AndNot, precMul},
+ {_AssignOp, "<<=", Shl, precMul},
+ {_AssignOp, ">>=", Shr, precMul},
+
+ // other operations
+ {_IncOp, "++", Add, precAdd},
+ {_IncOp, "--", Sub, precAdd},
+ {_Assign, "=", 0, 0},
+ {_Define, ":=", 0, 0},
+ {_Arrow, "<-", 0, 0},
+
+ // delimiters
+ {_Lparen, "(", 0, 0},
+ {_Lbrack, "[", 0, 0},
+ {_Lbrace, "{", 0, 0},
+ {_Rparen, ")", 0, 0},
+ {_Rbrack, "]", 0, 0},
+ {_Rbrace, "}", 0, 0},
+ {_Comma, ",", 0, 0},
+ {_Semi, ";", 0, 0},
+ {_Colon, ":", 0, 0},
+ {_Dot, ".", 0, 0},
+ {_DotDotDot, "...", 0, 0},
+
+ // keywords
+ {_Break, "break", 0, 0},
+ {_Case, "case", 0, 0},
+ {_Chan, "chan", 0, 0},
+ {_Const, "const", 0, 0},
+ {_Continue, "continue", 0, 0},
+ {_Default, "default", 0, 0},
+ {_Defer, "defer", 0, 0},
+ {_Else, "else", 0, 0},
+ {_Fallthrough, "fallthrough", 0, 0},
+ {_For, "for", 0, 0},
+ {_Func, "func", 0, 0},
+ {_Go, "go", 0, 0},
+ {_Goto, "goto", 0, 0},
+ {_If, "if", 0, 0},
+ {_Import, "import", 0, 0},
+ {_Interface, "interface", 0, 0},
+ {_Map, "map", 0, 0},
+ {_Package, "package", 0, 0},
+ {_Range, "range", 0, 0},
+ {_Return, "return", 0, 0},
+ {_Select, "select", 0, 0},
+ {_Struct, "struct", 0, 0},
+ {_Switch, "switch", 0, 0},
+ {_Type, "type", 0, 0},
+ {_Var, "var", 0, 0},
+}
+
+func TestComments(t *testing.T) {
+ type comment struct {
+ line, col uint // 0-based
+ text string
+ }
+
+ for _, test := range []struct {
+ src string
+ want comment
+ }{
+ // no comments
+ {"no comment here", comment{0, 0, ""}},
+ {" /", comment{0, 0, ""}},
+ {"\n /*/", comment{0, 0, ""}},
+
+ //-style comments
+ {"// line comment\n", comment{0, 0, "// line comment"}},
+ {"package p // line comment\n", comment{0, 10, "// line comment"}},
+ {"//\n//\n\t// want this one\r\n", comment{2, 1, "// want this one\r"}},
+ {"\n\n//\n", comment{2, 0, "//"}},
+ {"//", comment{0, 0, "//"}},
+
+ /*-style comments */
+ {"123/* regular comment */", comment{0, 3, "/* regular comment */"}},
+ {"package p /* regular comment", comment{0, 0, ""}},
+ {"\n\n\n/*\n*//* want this one */", comment{4, 2, "/* want this one */"}},
+ {"\n\n/**/", comment{2, 0, "/**/"}},
+ {"/*", comment{0, 0, ""}},
+ } {
+ var s scanner
+ var got comment
+ s.init(strings.NewReader(test.src), func(line, col uint, msg string) {
+ if msg[0] != '/' {
+ // error
+ if msg != "comment not terminated" {
+ t.Errorf("%q: %s", test.src, msg)
+ }
+ return
+ }
+ got = comment{line - linebase, col - colbase, msg} // keep last one
+ }, comments)
+
+ for {
+ s.next()
+ if s.tok == _EOF {
+ break
+ }
+ }
+
+ want := test.want
+ if got.line != want.line || got.col != want.col {
+ t.Errorf("%q: got position %d:%d; want %d:%d", test.src, got.line, got.col, want.line, want.col)
+ }
+ if got.text != want.text {
+ t.Errorf("%q: got %q; want %q", test.src, got.text, want.text)
+ }
+ }
+}
+
+func TestNumbers(t *testing.T) {
+ for _, test := range []struct {
+ kind LitKind
+ src, tokens, err string
+ }{
+ // binaries
+ {IntLit, "0b0", "0b0", ""},
+ {IntLit, "0b1010", "0b1010", ""},
+ {IntLit, "0B1110", "0B1110", ""},
+
+ {IntLit, "0b", "0b", "binary literal has no digits"},
+ {IntLit, "0b0190", "0b0190", "invalid digit '9' in binary literal"},
+ {IntLit, "0b01a0", "0b01 a0", ""}, // only accept 0-9
+
+ {FloatLit, "0b.", "0b.", "invalid radix point in binary literal"},
+ {FloatLit, "0b.1", "0b.1", "invalid radix point in binary literal"},
+ {FloatLit, "0b1.0", "0b1.0", "invalid radix point in binary literal"},
+ {FloatLit, "0b1e10", "0b1e10", "'e' exponent requires decimal mantissa"},
+ {FloatLit, "0b1P-1", "0b1P-1", "'P' exponent requires hexadecimal mantissa"},
+
+ {ImagLit, "0b10i", "0b10i", ""},
+ {ImagLit, "0b10.0i", "0b10.0i", "invalid radix point in binary literal"},
+
+ // octals
+ {IntLit, "0o0", "0o0", ""},
+ {IntLit, "0o1234", "0o1234", ""},
+ {IntLit, "0O1234", "0O1234", ""},
+
+ {IntLit, "0o", "0o", "octal literal has no digits"},
+ {IntLit, "0o8123", "0o8123", "invalid digit '8' in octal literal"},
+ {IntLit, "0o1293", "0o1293", "invalid digit '9' in octal literal"},
+ {IntLit, "0o12a3", "0o12 a3", ""}, // only accept 0-9
+
+ {FloatLit, "0o.", "0o.", "invalid radix point in octal literal"},
+ {FloatLit, "0o.2", "0o.2", "invalid radix point in octal literal"},
+ {FloatLit, "0o1.2", "0o1.2", "invalid radix point in octal literal"},
+ {FloatLit, "0o1E+2", "0o1E+2", "'E' exponent requires decimal mantissa"},
+ {FloatLit, "0o1p10", "0o1p10", "'p' exponent requires hexadecimal mantissa"},
+
+ {ImagLit, "0o10i", "0o10i", ""},
+ {ImagLit, "0o10e0i", "0o10e0i", "'e' exponent requires decimal mantissa"},
+
+ // 0-octals
+ {IntLit, "0", "0", ""},
+ {IntLit, "0123", "0123", ""},
+
+ {IntLit, "08123", "08123", "invalid digit '8' in octal literal"},
+ {IntLit, "01293", "01293", "invalid digit '9' in octal literal"},
+ {IntLit, "0F.", "0 F .", ""}, // only accept 0-9
+ {IntLit, "0123F.", "0123 F .", ""},
+ {IntLit, "0123456x", "0123456 x", ""},
+
+ // decimals
+ {IntLit, "1", "1", ""},
+ {IntLit, "1234", "1234", ""},
+
+ {IntLit, "1f", "1 f", ""}, // only accept 0-9
+
+ {ImagLit, "0i", "0i", ""},
+ {ImagLit, "0678i", "0678i", ""},
+
+ // decimal floats
+ {FloatLit, "0.", "0.", ""},
+ {FloatLit, "123.", "123.", ""},
+ {FloatLit, "0123.", "0123.", ""},
+
+ {FloatLit, ".0", ".0", ""},
+ {FloatLit, ".123", ".123", ""},
+ {FloatLit, ".0123", ".0123", ""},
+
+ {FloatLit, "0.0", "0.0", ""},
+ {FloatLit, "123.123", "123.123", ""},
+ {FloatLit, "0123.0123", "0123.0123", ""},
+
+ {FloatLit, "0e0", "0e0", ""},
+ {FloatLit, "123e+0", "123e+0", ""},
+ {FloatLit, "0123E-1", "0123E-1", ""},
+
+ {FloatLit, "0.e+1", "0.e+1", ""},
+ {FloatLit, "123.E-10", "123.E-10", ""},
+ {FloatLit, "0123.e123", "0123.e123", ""},
+
+ {FloatLit, ".0e-1", ".0e-1", ""},
+ {FloatLit, ".123E+10", ".123E+10", ""},
+ {FloatLit, ".0123E123", ".0123E123", ""},
+
+ {FloatLit, "0.0e1", "0.0e1", ""},
+ {FloatLit, "123.123E-10", "123.123E-10", ""},
+ {FloatLit, "0123.0123e+456", "0123.0123e+456", ""},
+
+ {FloatLit, "0e", "0e", "exponent has no digits"},
+ {FloatLit, "0E+", "0E+", "exponent has no digits"},
+ {FloatLit, "1e+f", "1e+ f", "exponent has no digits"},
+ {FloatLit, "0p0", "0p0", "'p' exponent requires hexadecimal mantissa"},
+ {FloatLit, "1.0P-1", "1.0P-1", "'P' exponent requires hexadecimal mantissa"},
+
+ {ImagLit, "0.i", "0.i", ""},
+ {ImagLit, ".123i", ".123i", ""},
+ {ImagLit, "123.123i", "123.123i", ""},
+ {ImagLit, "123e+0i", "123e+0i", ""},
+ {ImagLit, "123.E-10i", "123.E-10i", ""},
+ {ImagLit, ".123E+10i", ".123E+10i", ""},
+
+ // hexadecimals
+ {IntLit, "0x0", "0x0", ""},
+ {IntLit, "0x1234", "0x1234", ""},
+ {IntLit, "0xcafef00d", "0xcafef00d", ""},
+ {IntLit, "0XCAFEF00D", "0XCAFEF00D", ""},
+
+ {IntLit, "0x", "0x", "hexadecimal literal has no digits"},
+ {IntLit, "0x1g", "0x1 g", ""},
+
+ {ImagLit, "0xf00i", "0xf00i", ""},
+
+ // hexadecimal floats
+ {FloatLit, "0x0p0", "0x0p0", ""},
+ {FloatLit, "0x12efp-123", "0x12efp-123", ""},
+ {FloatLit, "0xABCD.p+0", "0xABCD.p+0", ""},
+ {FloatLit, "0x.0189P-0", "0x.0189P-0", ""},
+ {FloatLit, "0x1.ffffp+1023", "0x1.ffffp+1023", ""},
+
+ {FloatLit, "0x.", "0x.", "hexadecimal literal has no digits"},
+ {FloatLit, "0x0.", "0x0.", "hexadecimal mantissa requires a 'p' exponent"},
+ {FloatLit, "0x.0", "0x.0", "hexadecimal mantissa requires a 'p' exponent"},
+ {FloatLit, "0x1.1", "0x1.1", "hexadecimal mantissa requires a 'p' exponent"},
+ {FloatLit, "0x1.1e0", "0x1.1e0", "hexadecimal mantissa requires a 'p' exponent"},
+ {FloatLit, "0x1.2gp1a", "0x1.2 gp1a", "hexadecimal mantissa requires a 'p' exponent"},
+ {FloatLit, "0x0p", "0x0p", "exponent has no digits"},
+ {FloatLit, "0xeP-", "0xeP-", "exponent has no digits"},
+ {FloatLit, "0x1234PAB", "0x1234P AB", "exponent has no digits"},
+ {FloatLit, "0x1.2p1a", "0x1.2p1 a", ""},
+
+ {ImagLit, "0xf00.bap+12i", "0xf00.bap+12i", ""},
+
+ // separators
+ {IntLit, "0b_1000_0001", "0b_1000_0001", ""},
+ {IntLit, "0o_600", "0o_600", ""},
+ {IntLit, "0_466", "0_466", ""},
+ {IntLit, "1_000", "1_000", ""},
+ {FloatLit, "1_000.000_1", "1_000.000_1", ""},
+ {ImagLit, "10e+1_2_3i", "10e+1_2_3i", ""},
+ {IntLit, "0x_f00d", "0x_f00d", ""},
+ {FloatLit, "0x_f00d.0p1_2", "0x_f00d.0p1_2", ""},
+
+ {IntLit, "0b__1000", "0b__1000", "'_' must separate successive digits"},
+ {IntLit, "0o60___0", "0o60___0", "'_' must separate successive digits"},
+ {IntLit, "0466_", "0466_", "'_' must separate successive digits"},
+ {FloatLit, "1_.", "1_.", "'_' must separate successive digits"},
+ {FloatLit, "0._1", "0._1", "'_' must separate successive digits"},
+ {FloatLit, "2.7_e0", "2.7_e0", "'_' must separate successive digits"},
+ {ImagLit, "10e+12_i", "10e+12_i", "'_' must separate successive digits"},
+ {IntLit, "0x___0", "0x___0", "'_' must separate successive digits"},
+ {FloatLit, "0x1.0_p0", "0x1.0_p0", "'_' must separate successive digits"},
+ } {
+ var s scanner
+ var err string
+ s.init(strings.NewReader(test.src), func(_, _ uint, msg string) {
+ if err == "" {
+ err = msg
+ }
+ }, 0)
+
+ for i, want := range strings.Split(test.tokens, " ") {
+ err = ""
+ s.next()
+
+ if err != "" && !s.bad {
+ t.Errorf("%q: got error but bad not set", test.src)
+ }
+
+ // compute lit where s.lit is not defined
+ var lit string
+ switch s.tok {
+ case _Name, _Literal:
+ lit = s.lit
+ case _Dot:
+ lit = "."
+ }
+
+ if i == 0 {
+ if s.tok != _Literal || s.kind != test.kind {
+ t.Errorf("%q: got token %s (kind = %d); want literal (kind = %d)", test.src, s.tok, s.kind, test.kind)
+ }
+ if err != test.err {
+ t.Errorf("%q: got error %q; want %q", test.src, err, test.err)
+ }
+ }
+
+ if lit != want {
+ t.Errorf("%q: got literal %q (%s); want %s", test.src, lit, s.tok, want)
+ }
+ }
+
+ // make sure we read all
+ s.next()
+ if s.tok == _Semi {
+ s.next()
+ }
+ if s.tok != _EOF {
+ t.Errorf("%q: got %s; want EOF", test.src, s.tok)
+ }
+ }
+}
+
+func TestScanErrors(t *testing.T) {
+ for _, test := range []struct {
+ src, err string
+ line, col uint // 0-based
+ }{
+ // Note: Positions for lexical errors are the earliest position
+ // where the error is apparent, not the beginning of the respective
+ // token.
+
+ // rune-level errors
+ {"fo\x00o", "invalid NUL character", 0, 2},
+ {"foo\n\ufeff bar", "invalid BOM in the middle of the file", 1, 0},
+ {"foo\n\n\xff ", "invalid UTF-8 encoding", 2, 0},
+
+ // token-level errors
+ {"\u00BD" /* ½ */, "invalid character U+00BD '½' in identifier", 0, 0},
+ {"\U0001d736\U0001d737\U0001d738_½" /* 𝜶𝜷𝜸_½ */, "invalid character U+00BD '½' in identifier", 0, 13 /* byte offset */},
+ {"\U0001d7d8" /* 𝟘 */, "identifier cannot begin with digit U+1D7D8 '𝟘'", 0, 0},
+ {"foo\U0001d7d8_½" /* foo𝟘_½ */, "invalid character U+00BD '½' in identifier", 0, 8 /* byte offset */},
+
+ {"x + #y", "invalid character U+0023 '#'", 0, 4},
+ {"foo$bar = 0", "invalid character U+0024 '$'", 0, 3},
+ {"0123456789", "invalid digit '8' in octal literal", 0, 8},
+ {"0123456789. /* foobar", "comment not terminated", 0, 12}, // valid float constant
+ {"0123456789e0 /*\nfoobar", "comment not terminated", 0, 13}, // valid float constant
+ {"var a, b = 09, 07\n", "invalid digit '9' in octal literal", 0, 12},
+
+ {`''`, "empty rune literal or unescaped '", 0, 1},
+ {"'\n", "newline in rune literal", 0, 1},
+ {`'\`, "rune literal not terminated", 0, 0},
+ {`'\'`, "rune literal not terminated", 0, 0},
+ {`'\x`, "rune literal not terminated", 0, 0},
+ {`'\x'`, "invalid character '\\'' in hexadecimal escape", 0, 3},
+ {`'\y'`, "unknown escape", 0, 2},
+ {`'\x0'`, "invalid character '\\'' in hexadecimal escape", 0, 4},
+ {`'\00'`, "invalid character '\\'' in octal escape", 0, 4},
+ {`'\377' /*`, "comment not terminated", 0, 7}, // valid octal escape
+ {`'\378`, "invalid character '8' in octal escape", 0, 4},
+ {`'\400'`, "octal escape value 256 > 255", 0, 5},
+ {`'xx`, "rune literal not terminated", 0, 0},
+ {`'xx'`, "more than one character in rune literal", 0, 0},
+
+ {"\n \"foo\n", "newline in string", 1, 7},
+ {`"`, "string not terminated", 0, 0},
+ {`"foo`, "string not terminated", 0, 0},
+ {"`", "string not terminated", 0, 0},
+ {"`foo", "string not terminated", 0, 0},
+ {"/*/", "comment not terminated", 0, 0},
+ {"/*\n\nfoo", "comment not terminated", 0, 0},
+ {`"\`, "string not terminated", 0, 0},
+ {`"\"`, "string not terminated", 0, 0},
+ {`"\x`, "string not terminated", 0, 0},
+ {`"\x"`, "invalid character '\"' in hexadecimal escape", 0, 3},
+ {`"\y"`, "unknown escape", 0, 2},
+ {`"\x0"`, "invalid character '\"' in hexadecimal escape", 0, 4},
+ {`"\00"`, "invalid character '\"' in octal escape", 0, 4},
+ {`"\377" /*`, "comment not terminated", 0, 7}, // valid octal escape
+ {`"\378"`, "invalid character '8' in octal escape", 0, 4},
+ {`"\400"`, "octal escape value 256 > 255", 0, 5},
+
+ {`s := "foo\z"`, "unknown escape", 0, 10},
+ {`s := "foo\z00\nbar"`, "unknown escape", 0, 10},
+ {`"\x`, "string not terminated", 0, 0},
+ {`"\x"`, "invalid character '\"' in hexadecimal escape", 0, 3},
+ {`var s string = "\x"`, "invalid character '\"' in hexadecimal escape", 0, 18},
+ {`return "\Uffffffff"`, "escape is invalid Unicode code point U+FFFFFFFF", 0, 18},
+
+ {"0b.0", "invalid radix point in binary literal", 0, 2},
+ {"0x.p0\n", "hexadecimal literal has no digits", 0, 3},
+
+ // former problem cases
+ {"package p\n\n\xef", "invalid UTF-8 encoding", 2, 0},
+ } {
+ var s scanner
+ var line, col uint
+ var err string
+ s.init(strings.NewReader(test.src), func(l, c uint, msg string) {
+ if err == "" {
+ line, col = l-linebase, c-colbase
+ err = msg
+ }
+ }, 0)
+
+ for {
+ s.next()
+ if s.tok == _EOF {
+ break
+ }
+ }
+
+ if err != "" {
+ if err != test.err {
+ t.Errorf("%q: got err = %q; want %q", test.src, err, test.err)
+ }
+ if line != test.line {
+ t.Errorf("%q: got line = %d; want %d", test.src, line, test.line)
+ }
+ if col != test.col {
+ t.Errorf("%q: got col = %d; want %d", test.src, col, test.col)
+ }
+ } else {
+ t.Errorf("%q: got no error; want %q", test.src, test.err)
+ }
+ }
+}
+
+func TestDirectives(t *testing.T) {
+ for _, src := range []string{
+ "line",
+ "// line",
+ "//line",
+ "//line foo",
+ "//line foo%bar",
+
+ "go",
+ "// go:",
+ "//go:",
+ "//go :foo",
+ "//go:foo",
+ "//go:foo%bar",
+ } {
+ got := ""
+ var s scanner
+ s.init(strings.NewReader(src), func(_, col uint, msg string) {
+ if col != colbase {
+ t.Errorf("%s: got col = %d; want %d", src, col, colbase)
+ }
+ if msg == "" {
+ t.Errorf("%s: handler called with empty msg", src)
+ }
+ got = msg
+ }, directives)
+
+ s.next()
+ if strings.HasPrefix(src, "//line ") || strings.HasPrefix(src, "//go:") {
+ // handler should have been called
+ if got != src {
+ t.Errorf("got %s; want %s", got, src)
+ }
+ } else {
+ // handler should not have been called
+ if got != "" {
+ t.Errorf("got %s for %s", got, src)
+ }
+ }
+ }
+}
+
+func TestIssue21938(t *testing.T) {
+ s := "/*" + strings.Repeat(" ", 4089) + "*/ .5"
+
+ var got scanner
+ got.init(strings.NewReader(s), errh, 0)
+ got.next()
+
+ if got.tok != _Literal || got.lit != ".5" {
+ t.Errorf("got %s %q; want %s %q", got.tok, got.lit, _Literal, ".5")
+ }
+}
+
+func TestIssue33961(t *testing.T) {
+ literals := `08__ 0b.p 0b_._p 0x.e 0x.p`
+ for _, lit := range strings.Split(literals, " ") {
+ n := 0
+ var got scanner
+ got.init(strings.NewReader(lit), func(_, _ uint, msg string) {
+ // fmt.Printf("%s: %s\n", lit, msg) // uncomment for debugging
+ n++
+ }, 0)
+ got.next()
+
+ if n != 1 {
+ t.Errorf("%q: got %d errors; want 1", lit, n)
+ continue
+ }
+
+ if !got.bad {
+ t.Errorf("%q: got error but bad not set", lit)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/source.go b/src/cmd/compile/internal/syntax/source.go
new file mode 100644
index 0000000..01b5921
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/source.go
@@ -0,0 +1,218 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements source, a buffered rune reader
+// specialized for scanning Go code: Reading
+// ASCII characters, maintaining current (line, col)
+// position information, and recording of the most
+// recently read source segment are highly optimized.
+// This file is self-contained (go tool compile source.go
+// compiles) and thus could be made into its own package.
+
+package syntax
+
+import (
+ "io"
+ "unicode/utf8"
+)
+
+// The source buffer is accessed using three indices b (begin),
+// r (read), and e (end):
+//
+// - If b >= 0, it points to the beginning of a segment of most
+// recently read characters (typically a Go literal).
+//
+// - r points to the byte immediately following the most recently
+// read character ch, which starts at r-chw.
+//
+// - e points to the byte immediately following the last byte that
+// was read into the buffer.
+//
+// The buffer content is terminated at buf[e] with the sentinel
+// character utf8.RuneSelf. This makes it possible to test for
+// the common case of ASCII characters with a single 'if' (see
+// nextch method).
+//
+// +------ content in use -------+
+// v v
+// buf [...read...|...segment...|ch|...unread...|s|...free...]
+// ^ ^ ^ ^
+// | | | |
+// b r-chw r e
+//
+// Invariant: -1 <= b < r <= e < len(buf) && buf[e] == sentinel
+
+type source struct {
+ in io.Reader
+ errh func(line, col uint, msg string)
+
+ buf []byte // source buffer
+ ioerr error // pending I/O error, or nil
+ b, r, e int // buffer indices (see comment above)
+ line, col uint // source position of ch (0-based)
+ ch rune // most recently read character
+ chw int // width of ch
+}
+
+const sentinel = utf8.RuneSelf
+
+func (s *source) init(in io.Reader, errh func(line, col uint, msg string)) {
+ s.in = in
+ s.errh = errh
+
+ if s.buf == nil {
+ s.buf = make([]byte, nextSize(0))
+ }
+ s.buf[0] = sentinel
+ s.ioerr = nil
+ s.b, s.r, s.e = -1, 0, 0
+ s.line, s.col = 0, 0
+ s.ch = ' '
+ s.chw = 0
+}
+
+// starting points for line and column numbers
+const linebase = 1
+const colbase = 1
+
+// pos returns the (line, col) source position of s.ch.
+func (s *source) pos() (line, col uint) {
+ return linebase + s.line, colbase + s.col
+}
+
+// error reports the error msg at source position s.pos().
+func (s *source) error(msg string) {
+ line, col := s.pos()
+ s.errh(line, col, msg)
+}
+
+// start starts a new active source segment (including s.ch).
+// As long as stop has not been called, the active segment's
+// bytes (excluding s.ch) may be retrieved by calling segment.
+func (s *source) start() { s.b = s.r - s.chw }
+func (s *source) stop() { s.b = -1 }
+func (s *source) segment() []byte { return s.buf[s.b : s.r-s.chw] }
+
+// rewind rewinds the scanner's read position and character s.ch
+// to the start of the currently active segment, which must not
+// contain any newlines (otherwise position information will be
+// incorrect). Currently, rewind is only needed for handling the
+// source sequence ".."; it must not be called outside an active
+// segment.
+func (s *source) rewind() {
+ // ok to verify precondition - rewind is rarely called
+ if s.b < 0 {
+ panic("no active segment")
+ }
+ s.col -= uint(s.r - s.b)
+ s.r = s.b
+ s.nextch()
+}
+
+func (s *source) nextch() {
+redo:
+ s.col += uint(s.chw)
+ if s.ch == '\n' {
+ s.line++
+ s.col = 0
+ }
+
+ // fast common case: at least one ASCII character
+ if s.ch = rune(s.buf[s.r]); s.ch < sentinel {
+ s.r++
+ s.chw = 1
+ if s.ch == 0 {
+ s.error("invalid NUL character")
+ goto redo
+ }
+ return
+ }
+
+ // slower general case: add more bytes to buffer if we don't have a full rune
+ for s.e-s.r < utf8.UTFMax && !utf8.FullRune(s.buf[s.r:s.e]) && s.ioerr == nil {
+ s.fill()
+ }
+
+ // EOF
+ if s.r == s.e {
+ if s.ioerr != io.EOF {
+ // ensure we never start with a '/' (e.g., rooted path) in the error message
+ s.error("I/O error: " + s.ioerr.Error())
+ s.ioerr = nil
+ }
+ s.ch = -1
+ s.chw = 0
+ return
+ }
+
+ s.ch, s.chw = utf8.DecodeRune(s.buf[s.r:s.e])
+ s.r += s.chw
+
+ if s.ch == utf8.RuneError && s.chw == 1 {
+ s.error("invalid UTF-8 encoding")
+ goto redo
+ }
+
+ // BOM's are only allowed as the first character in a file
+ const BOM = 0xfeff
+ if s.ch == BOM {
+ if s.line > 0 || s.col > 0 {
+ s.error("invalid BOM in the middle of the file")
+ }
+ goto redo
+ }
+}
+
+// fill reads more source bytes into s.buf.
+// It returns with at least one more byte in the buffer, or with s.ioerr != nil.
+func (s *source) fill() {
+ // determine content to preserve
+ b := s.r
+ if s.b >= 0 {
+ b = s.b
+ s.b = 0 // after buffer has grown or content has been moved down
+ }
+ content := s.buf[b:s.e]
+
+ // grow buffer or move content down
+ if len(content)*2 > len(s.buf) {
+ s.buf = make([]byte, nextSize(len(s.buf)))
+ copy(s.buf, content)
+ } else if b > 0 {
+ copy(s.buf, content)
+ }
+ s.r -= b
+ s.e -= b
+
+ // read more data: try a limited number of times
+ for i := 0; i < 10; i++ {
+ var n int
+ n, s.ioerr = s.in.Read(s.buf[s.e : len(s.buf)-1]) // -1 to leave space for sentinel
+ if n < 0 {
+ panic("negative read") // incorrect underlying io.Reader implementation
+ }
+ if n > 0 || s.ioerr != nil {
+ s.e += n
+ s.buf[s.e] = sentinel
+ return
+ }
+ // n == 0
+ }
+
+ s.buf[s.e] = sentinel
+ s.ioerr = io.ErrNoProgress
+}
+
+// nextSize returns the next bigger size for a buffer of a given size.
+func nextSize(size int) int {
+ const min = 4 << 10 // 4K: minimum buffer size
+ const max = 1 << 20 // 1M: maximum buffer size which is still doubled
+ if size < min {
+ return min
+ }
+ if size <= max {
+ return size << 1
+ }
+ return size + max
+}
diff --git a/src/cmd/compile/internal/syntax/syntax.go b/src/cmd/compile/internal/syntax/syntax.go
new file mode 100644
index 0000000..83b102d
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/syntax.go
@@ -0,0 +1,94 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "fmt"
+ "io"
+ "os"
+)
+
+// Mode describes the parser mode.
+type Mode uint
+
+// Modes supported by the parser.
+const (
+ CheckBranches Mode = 1 << iota // check correct use of labels, break, continue, and goto statements
+)
+
+// Error describes a syntax error. Error implements the error interface.
+type Error struct {
+ Pos Pos
+ Msg string
+}
+
+func (err Error) Error() string {
+ return fmt.Sprintf("%s: %s", err.Pos, err.Msg)
+}
+
+var _ error = Error{} // verify that Error implements error
+
+// An ErrorHandler is called for each error encountered reading a .go file.
+type ErrorHandler func(err error)
+
+// A Pragma value augments a package, import, const, func, type, or var declaration.
+// Its meaning is entirely up to the PragmaHandler,
+// except that nil is used to mean “no pragma seen.”
+type Pragma interface{}
+
+// A PragmaHandler is used to process //go: directives while scanning.
+// It is passed the current pragma value, which starts out being nil,
+// and it returns an updated pragma value.
+// The text is the directive, with the "//" prefix stripped.
+// The current pragma is saved at each package, import, const, func, type, or var
+// declaration, into the File, ImportDecl, ConstDecl, FuncDecl, TypeDecl, or VarDecl node.
+//
+// If text is the empty string, the pragma is being returned
+// to the handler unused, meaning it appeared before a non-declaration.
+// The handler may wish to report an error. In this case, pos is the
+// current parser position, not the position of the pragma itself.
+// Blank specifies whether the line is blank before the pragma.
+type PragmaHandler func(pos Pos, blank bool, text string, current Pragma) Pragma
+
+// Parse parses a single Go source file from src and returns the corresponding
+// syntax tree. If there are errors, Parse will return the first error found,
+// and a possibly partially constructed syntax tree, or nil.
+//
+// If errh != nil, it is called with each error encountered, and Parse will
+// process as much source as possible. In this case, the returned syntax tree
+// is only nil if no correct package clause was found.
+// If errh is nil, Parse will terminate immediately upon encountering the first
+// error, and the returned syntax tree is nil.
+//
+// If pragh != nil, it is called with each pragma encountered.
+func Parse(base *PosBase, src io.Reader, errh ErrorHandler, pragh PragmaHandler, mode Mode) (_ *File, first error) {
+ defer func() {
+ if p := recover(); p != nil {
+ if err, ok := p.(Error); ok {
+ first = err
+ return
+ }
+ panic(p)
+ }
+ }()
+
+ var p parser
+ p.init(base, src, errh, pragh, mode)
+ p.next()
+ return p.fileOrNil(), p.first
+}
+
+// ParseFile behaves like Parse but it reads the source from the named file.
+func ParseFile(filename string, errh ErrorHandler, pragh PragmaHandler, mode Mode) (*File, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ if errh != nil {
+ errh(err)
+ }
+ return nil, err
+ }
+ defer f.Close()
+ return Parse(NewFileBase(filename), f, errh, pragh, mode)
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/chans.go b/src/cmd/compile/internal/syntax/testdata/chans.go
new file mode 100644
index 0000000..d4c4207
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/chans.go
@@ -0,0 +1,66 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package chans
+
+import "runtime"
+
+// Ranger returns a Sender and a Receiver. The Receiver provides a
+// Next method to retrieve values. The Sender provides a Send method
+// to send values and a Close method to stop sending values. The Next
+// method indicates when the Sender has been closed, and the Send
+// method indicates when the Receiver has been freed.
+//
+// This is a convenient way to exit a goroutine sending values when
+// the receiver stops reading them.
+func Ranger[T any]() (*Sender[T], *Receiver[T]) {
+ c := make(chan T)
+ d := make(chan bool)
+ s := &Sender[T]{values: c, done: d}
+ r := &Receiver[T]{values: c, done: d}
+ runtime.SetFinalizer(r, r.finalize)
+ return s, r
+}
+
+// A sender is used to send values to a Receiver.
+type Sender[T any] struct {
+ values chan<- T
+ done <-chan bool
+}
+
+// Send sends a value to the receiver. It returns whether any more
+// values may be sent; if it returns false the value was not sent.
+func (s *Sender[T]) Send(v T) bool {
+ select {
+ case s.values <- v:
+ return true
+ case <-s.done:
+ return false
+ }
+}
+
+// Close tells the receiver that no more values will arrive.
+// After Close is called, the Sender may no longer be used.
+func (s *Sender[T]) Close() {
+ close(s.values)
+}
+
+// A Receiver receives values from a Sender.
+type Receiver[T any] struct {
+ values <-chan T
+ done chan<- bool
+}
+
+// Next returns the next value from the channel. The bool result
+// indicates whether the value is valid, or whether the Sender has
+// been closed and no more values will be received.
+func (r *Receiver[T]) Next() (T, bool) {
+ v, ok := <-r.values
+ return v, ok
+}
+
+// finalize is a finalizer for the receiver.
+func (r *Receiver[T]) finalize() {
+ close(r.done)
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/fallthrough.go b/src/cmd/compile/internal/syntax/testdata/fallthrough.go
new file mode 100644
index 0000000..851da81
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/fallthrough.go
@@ -0,0 +1,55 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package fallthroughs
+
+func _() {
+ var x int
+ switch x {
+ case 0:
+ fallthrough
+
+ case 1:
+ fallthrough // ERROR fallthrough statement out of place
+ {
+ }
+
+ case 2:
+ {
+ fallthrough // ERROR fallthrough statement out of place
+ }
+
+ case 3:
+ for {
+ fallthrough // ERROR fallthrough statement out of place
+ }
+
+ case 4:
+ fallthrough // trailing empty statements are ok
+ ;
+ ;
+
+ case 5:
+ fallthrough
+
+ default:
+ fallthrough // ERROR cannot fallthrough final case in switch
+ }
+
+ fallthrough // ERROR fallthrough statement out of place
+
+ if true {
+ fallthrough // ERROR fallthrough statement out of place
+ }
+
+ for {
+ fallthrough // ERROR fallthrough statement out of place
+ }
+
+ var t any
+ switch t.(type) {
+ case int:
+ fallthrough // ERROR cannot fallthrough in type switch
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/interface.go b/src/cmd/compile/internal/syntax/testdata/interface.go
new file mode 100644
index 0000000..dbc4187
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/interface.go
@@ -0,0 +1,74 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains test cases for interfaces containing
+// constraint elements.
+
+package p
+
+type _ interface {
+ m()
+ E
+}
+
+type _ interface {
+ m()
+ ~int
+ int | string
+ int | ~string
+ ~int | ~string
+}
+
+type _ interface {
+ m()
+ ~int
+ T[int, string] | string
+ int | ~T[string, struct{}]
+ ~int | ~string
+}
+
+type _ interface {
+ int
+ []byte
+ [10]int
+ struct{}
+ *int
+ func()
+ interface{}
+ map[string]int
+ chan T
+ chan<- T
+ <-chan T
+ T[int]
+}
+
+type _ interface {
+ int | string
+ []byte | string
+ [10]int | string
+ struct{} | string
+ *int | string
+ func() | string
+ interface{} | string
+ map[string]int | string
+ chan T | string
+ chan<- T | string
+ <-chan T | string
+ T[int] | string
+}
+
+type _ interface {
+ ~int | string
+ ~[]byte | string
+ ~[10]int | string
+ ~struct{} | string
+ ~*int | string
+ ~func() | string
+ ~interface{} | string
+ ~map[string]int | string
+ ~chan T | string
+ ~chan<- T | string
+ ~<-chan T | string
+ ~T[int] | string
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue20789.go b/src/cmd/compile/internal/syntax/testdata/issue20789.go
new file mode 100644
index 0000000..0d5988b
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue20789.go
@@ -0,0 +1,9 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Make sure this doesn't crash the compiler.
+// Line 9 must end in EOF for this test (no newline).
+
+package e
+func([<-chan<-[func /* ERROR unexpected u */ u){go \ No newline at end of file
diff --git a/src/cmd/compile/internal/syntax/testdata/issue23385.go b/src/cmd/compile/internal/syntax/testdata/issue23385.go
new file mode 100644
index 0000000..2459a73
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue23385.go
@@ -0,0 +1,17 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check error message for use of = instead of == .
+
+package p
+
+func _() {
+ if true || 0 /* ERROR cannot use assignment .* as value */ = 1 {
+ }
+}
+
+func _(a, b string) {
+ if a == "a" && b /* ERROR cannot use assignment .* as value */ = "b" {
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue23434.go b/src/cmd/compile/internal/syntax/testdata/issue23434.go
new file mode 100644
index 0000000..e436abf
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue23434.go
@@ -0,0 +1,31 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for go.dev/issue/23434: Better synchronization of
+// parser after missing type. There should be exactly
+// one error each time, with now follow errors.
+
+package p
+
+type T /* ERROR unexpected newline */
+
+type Map map[int] /* ERROR unexpected newline */
+
+// Examples from go.dev/issue/23434:
+
+func g() {
+ m := make(map[string] /* ERROR unexpected ! */ !)
+ for {
+ x := 1
+ print(x)
+ }
+}
+
+func f() {
+ m := make(map[string] /* ERROR unexpected \) */ )
+ for {
+ x := 1
+ print(x)
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue31092.go b/src/cmd/compile/internal/syntax/testdata/issue31092.go
new file mode 100644
index 0000000..0bd40bd
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue31092.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test cases for go.dev/issue/31092: Better synchronization of
+// parser after seeing an := rather than an = in a const,
+// type, or variable declaration.
+
+package p
+
+const _ /* ERROR unexpected := */ := 0
+type _ /* ERROR unexpected := */ := int
+var _ /* ERROR unexpected := */ := 0
+
+const _ int /* ERROR unexpected := */ := 0
+var _ int /* ERROR unexpected := */ := 0
diff --git a/src/cmd/compile/internal/syntax/testdata/issue43527.go b/src/cmd/compile/internal/syntax/testdata/issue43527.go
new file mode 100644
index 0000000..99a8c09
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue43527.go
@@ -0,0 +1,23 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ // 0 and 1-element []-lists are syntactically valid
+ _[A, B /* ERROR missing type constraint */ ] int
+ _[A, /* ERROR missing type parameter name */ interface{}] int
+ _[A, B, C /* ERROR missing type constraint */ ] int
+ _[A B, C /* ERROR missing type constraint */ ] int
+ _[A B, /* ERROR missing type parameter name */ interface{}] int
+ _[A B, /* ERROR missing type parameter name */ interface{}, C D] int
+ _[A B, /* ERROR missing type parameter name */ interface{}, C, D] int
+ _[A B, /* ERROR missing type parameter name */ interface{}, C, interface{}] int
+ _[A B, C interface{}, D, /* ERROR missing type parameter name */ interface{}] int
+)
+
+// function type parameters use the same parsing routine - just have a couple of tests
+
+func _[A, B /* ERROR missing type constraint */ ]() {}
+func _[A, /* ERROR missing type parameter name */ interface{}]() {}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue43674.go b/src/cmd/compile/internal/syntax/testdata/issue43674.go
new file mode 100644
index 0000000..51c692a
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue43674.go
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _(... /* ERROR [.][.][.] is missing type */ )
+func _(... /* ERROR [.][.][.] is missing type */ , int)
+
+func _(a, b ... /* ERROR [.][.][.] is missing type */ )
+func _(a, b ... /* ERROR [.][.][.] is missing type */ , x int)
+
+func _()(... /* ERROR [.][.][.] is missing type */ )
diff --git a/src/cmd/compile/internal/syntax/testdata/issue46558.go b/src/cmd/compile/internal/syntax/testdata/issue46558.go
new file mode 100644
index 0000000..a22b600
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue46558.go
@@ -0,0 +1,14 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func F(s string) {
+ switch s[0] {
+ case 'a':
+ case s[2] { // ERROR unexpected {
+ case 'b':
+ }
+ }
+} // ERROR non-declaration statement
diff --git a/src/cmd/compile/internal/syntax/testdata/issue47704.go b/src/cmd/compile/internal/syntax/testdata/issue47704.go
new file mode 100644
index 0000000..e4cdad1
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue47704.go
@@ -0,0 +1,17 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ _ = m[] // ERROR expected operand
+ _ = m[x,]
+ _ = m[x /* ERROR unexpected a */ a b c d]
+}
+
+// test case from the issue
+func f(m map[int]int) int {
+ return m[0 // ERROR expected comma, \: or \]
+ ]
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue48382.go b/src/cmd/compile/internal/syntax/testdata/issue48382.go
new file mode 100644
index 0000000..7c024a0
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue48382.go
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type _ func /* ERROR function type must have no type parameters */ [ /* ERROR empty type parameter list */ ]()
+type _ func /* ERROR function type must have no type parameters */ [ x /* ERROR missing type constraint */ ]()
+type _ func /* ERROR function type must have no type parameters */ [P any]()
+
+var _ = (func /* ERROR function type must have no type parameters */ [P any]())(nil)
+var _ = func /* ERROR function type must have no type parameters */ [P any]() {}
+
+type _ interface{
+ m /* ERROR interface method must have no type parameters */ [P any]()
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue49205.go b/src/cmd/compile/internal/syntax/testdata/issue49205.go
new file mode 100644
index 0000000..bbcc950
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue49205.go
@@ -0,0 +1,27 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// test case from issue
+
+type _ interface{
+ m /* ERROR unexpected int in interface type; possibly missing semicolon or newline or } */ int
+}
+
+// other cases where the fix for this issue affects the error message
+
+const (
+ x int = 10 /* ERROR unexpected literal "foo" in grouped declaration; possibly missing semicolon or newline or \) */ "foo"
+)
+
+var _ = []int{1, 2, 3 /* ERROR unexpected int in composite literal; possibly missing comma or } */ int }
+
+type _ struct {
+ x y /* ERROR syntax error: unexpected comma in struct type; possibly missing semicolon or newline or } */ ,
+}
+
+func f(a, b c /* ERROR unexpected d in parameter list; possibly missing comma or \) */ d) {
+ f(a, b, c /* ERROR unexpected d in argument list; possibly missing comma or \) */ d)
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue49482.go b/src/cmd/compile/internal/syntax/testdata/issue49482.go
new file mode 100644
index 0000000..1fc303d
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue49482.go
@@ -0,0 +1,31 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ // these need a comma to disambiguate
+ _[P *T,] struct{}
+ _[P *T, _ any] struct{}
+ _[P (*T),] struct{}
+ _[P (*T), _ any] struct{}
+ _[P (T),] struct{}
+ _[P (T), _ any] struct{}
+
+ // these parse as name followed by type
+ _[P *struct{}] struct{}
+ _[P (*struct{})] struct{}
+ _[P ([]int)] struct{}
+
+ // array declarations
+ _ [P(T)]struct{}
+ _ [P((T))]struct{}
+ _ [P * *T] struct{} // this could be a name followed by a type but it makes the rules more complicated
+ _ [P * T]struct{}
+ _ [P(*T)]struct{}
+ _ [P(**T)]struct{}
+ _ [P * T - T]struct{}
+ _ [P*T-T /* ERROR unexpected comma */ ,]struct{}
+ _ [10 /* ERROR unexpected comma */ ,]struct{}
+)
diff --git a/src/cmd/compile/internal/syntax/testdata/issue52391.go b/src/cmd/compile/internal/syntax/testdata/issue52391.go
new file mode 100644
index 0000000..f2098ce
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue52391.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type _ interface {
+ int
+ (int)
+ (*int)
+ *([]byte)
+ ~(int)
+ (int) | (string)
+ (int) | ~(string)
+ (/* ERROR unexpected ~ */ ~int)
+ (int /* ERROR unexpected \| */ | /* ERROR unexpected string */ string /* ERROR unexpected \) */ )
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue56022.go b/src/cmd/compile/internal/syntax/testdata/issue56022.go
new file mode 100644
index 0000000..d28d35c
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue56022.go
@@ -0,0 +1,10 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func /* ERROR unexpected {, expected name or \($ */ {}
+func (T) /* ERROR unexpected {, expected name$ */ {}
+func (T) /* ERROR unexpected \(, expected name$ */ () {}
+func (T) /* ERROR unexpected \(, expected name$ */ ()
diff --git a/src/cmd/compile/internal/syntax/testdata/issue60599.go b/src/cmd/compile/internal/syntax/testdata/issue60599.go
new file mode 100644
index 0000000..711d97b
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue60599.go
@@ -0,0 +1,11 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _(x, y, z int) {
+ if x /* ERROR cannot use assignment x = y as value */ = y {}
+ if x || y /* ERROR cannot use assignment \(x || y\) = z as value */ = z {}
+ if x /* ERROR cannot use assignment x = \(y || z\) as value */ = y || z {}
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue63835.go b/src/cmd/compile/internal/syntax/testdata/issue63835.go
new file mode 100644
index 0000000..3d165c0
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue63835.go
@@ -0,0 +1,9 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func (x string) /* ERROR syntax error: unexpected \[, expected name */ []byte {
+ return []byte(x)
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/linalg.go b/src/cmd/compile/internal/syntax/testdata/linalg.go
new file mode 100644
index 0000000..822d028
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/linalg.go
@@ -0,0 +1,83 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package linalg
+
+import "math"
+
+// Numeric is type bound that matches any numeric type.
+// It would likely be in a constraints package in the standard library.
+type Numeric interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ float32 | ~float64 |
+ complex64 | ~complex128
+}
+
+func DotProduct[T Numeric](s1, s2 []T) T {
+ if len(s1) != len(s2) {
+ panic("DotProduct: slices of unequal length")
+ }
+ var r T
+ for i := range s1 {
+ r += s1[i] * s2[i]
+ }
+ return r
+}
+
+// NumericAbs matches numeric types with an Abs method.
+type NumericAbs[T any] interface {
+ Numeric
+
+ Abs() T
+}
+
+// AbsDifference computes the absolute value of the difference of
+// a and b, where the absolute value is determined by the Abs method.
+func AbsDifference[T NumericAbs[T]](a, b T) T {
+ d := a - b
+ return d.Abs()
+}
+
+// OrderedNumeric is a type bound that matches numeric types that support the < operator.
+type OrderedNumeric interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ float32 | ~float64
+}
+
+// Complex is a type bound that matches the two complex types, which do not have a < operator.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// OrderedAbs is a helper type that defines an Abs method for
+// ordered numeric types.
+type OrderedAbs[T OrderedNumeric] T
+
+func (a OrderedAbs[T]) Abs() OrderedAbs[T] {
+ if a < 0 {
+ return -a
+ }
+ return a
+}
+
+// ComplexAbs is a helper type that defines an Abs method for
+// complex types.
+type ComplexAbs[T Complex] T
+
+func (a ComplexAbs[T]) Abs() ComplexAbs[T] {
+ r := float64(real(a))
+ i := float64(imag(a))
+ d := math.Sqrt(r * r + i * i)
+ return ComplexAbs[T](complex(d, 0))
+}
+
+func OrderedAbsDifference[T OrderedNumeric](a, b T) T {
+ return T(AbsDifference(OrderedAbs[T](a), OrderedAbs[T](b)))
+}
+
+func ComplexAbsDifference[T Complex](a, b T) T {
+ return T(AbsDifference(ComplexAbs[T](a), ComplexAbs[T](b)))
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/map.go b/src/cmd/compile/internal/syntax/testdata/map.go
new file mode 100644
index 0000000..a508d21
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/map.go
@@ -0,0 +1,112 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package orderedmap provides an ordered map, implemented as a binary tree.
+package orderedmap
+
+import "chans"
+
+// Map is an ordered map.
+type Map[K, V any] struct {
+ root *node[K, V]
+ compare func(K, K) int
+}
+
+// node is the type of a node in the binary tree.
+type node[K, V any] struct {
+ key K
+ val V
+ left, right *node[K, V]
+}
+
+// New returns a new map.
+func New[K, V any](compare func(K, K) int) *Map[K, V] {
+ return &Map[K, V]{compare: compare}
+}
+
+// find looks up key in the map, and returns either a pointer
+// to the node holding key, or a pointer to the location where
+// such a node would go.
+func (m *Map[K, V]) find(key K) **node[K, V] {
+ pn := &m.root
+ for *pn != nil {
+ switch cmp := m.compare(key, (*pn).key); {
+ case cmp < 0:
+ pn = &(*pn).left
+ case cmp > 0:
+ pn = &(*pn).right
+ default:
+ return pn
+ }
+ }
+ return pn
+}
+
+// Insert inserts a new key/value into the map.
+// If the key is already present, the value is replaced.
+// Returns true if this is a new key, false if already present.
+func (m *Map[K, V]) Insert(key K, val V) bool {
+ pn := m.find(key)
+ if *pn != nil {
+ (*pn).val = val
+ return false
+ }
+ *pn = &node[K, V]{key: key, val: val}
+ return true
+}
+
+// Find returns the value associated with a key, or zero if not present.
+// The found result reports whether the key was found.
+func (m *Map[K, V]) Find(key K) (V, bool) {
+ pn := m.find(key)
+ if *pn == nil {
+ var zero V // see the discussion of zero values, above
+ return zero, false
+ }
+ return (*pn).val, true
+}
+
+// keyValue is a pair of key and value used when iterating.
+type keyValue[K, V any] struct {
+ key K
+ val V
+}
+
+// InOrder returns an iterator that does an in-order traversal of the map.
+func (m *Map[K, V]) InOrder() *Iterator[K, V] {
+ sender, receiver := chans.Ranger[keyValue[K, V]]()
+ var f func(*node[K, V]) bool
+ f = func(n *node[K, V]) bool {
+ if n == nil {
+ return true
+ }
+ // Stop sending values if sender.Send returns false,
+ // meaning that nothing is listening at the receiver end.
+ return f(n.left) &&
+ sender.Send(keyValue[K, V]{n.key, n.val}) &&
+ f(n.right)
+ }
+ go func() {
+ f(m.root)
+ sender.Close()
+ }()
+ return &Iterator[K, V]{receiver}
+}
+
+// Iterator is used to iterate over the map.
+type Iterator[K, V any] struct {
+ r *chans.Receiver[keyValue[K, V]]
+}
+
+// Next returns the next key and value pair, and a boolean indicating
+// whether they are valid or whether we have reached the end.
+func (it *Iterator[K, V]) Next() (K, V, bool) {
+ keyval, ok := it.r.Next()
+ if !ok {
+ var zerok K
+ var zerov V
+ return zerok, zerov, false
+ }
+ return keyval.key, keyval.val, true
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/map2.go b/src/cmd/compile/internal/syntax/testdata/map2.go
new file mode 100644
index 0000000..3d1cbfb
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/map2.go
@@ -0,0 +1,146 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is like map.go, but instead of importing chans, it contains
+// the necessary functionality at the end of the file.
+
+// Package orderedmap provides an ordered map, implemented as a binary tree.
+package orderedmap
+
+// Map is an ordered map.
+type Map[K, V any] struct {
+ root *node[K, V]
+ compare func(K, K) int
+}
+
+// node is the type of a node in the binary tree.
+type node[K, V any] struct {
+ key K
+ val V
+ left, right *node[K, V]
+}
+
+// New returns a new map.
+func New[K, V any](compare func(K, K) int) *Map[K, V] {
+ return &Map[K, V]{compare: compare}
+}
+
+// find looks up key in the map, and returns either a pointer
+// to the node holding key, or a pointer to the location where
+// such a node would go.
+func (m *Map[K, V]) find(key K) **node[K, V] {
+ pn := &m.root
+ for *pn != nil {
+ switch cmp := m.compare(key, (*pn).key); {
+ case cmp < 0:
+ pn = &(*pn).left
+ case cmp > 0:
+ pn = &(*pn).right
+ default:
+ return pn
+ }
+ }
+ return pn
+}
+
+// Insert inserts a new key/value into the map.
+// If the key is already present, the value is replaced.
+// Returns true if this is a new key, false if already present.
+func (m *Map[K, V]) Insert(key K, val V) bool {
+ pn := m.find(key)
+ if *pn != nil {
+ (*pn).val = val
+ return false
+ }
+ *pn = &node[K, V]{key: key, val: val}
+ return true
+}
+
+// Find returns the value associated with a key, or zero if not present.
+// The found result reports whether the key was found.
+func (m *Map[K, V]) Find(key K) (V, bool) {
+ pn := m.find(key)
+ if *pn == nil {
+ var zero V // see the discussion of zero values, above
+ return zero, false
+ }
+ return (*pn).val, true
+}
+
+// keyValue is a pair of key and value used when iterating.
+type keyValue[K, V any] struct {
+ key K
+ val V
+}
+
+// InOrder returns an iterator that does an in-order traversal of the map.
+func (m *Map[K, V]) InOrder() *Iterator[K, V] {
+ sender, receiver := chans_Ranger[keyValue[K, V]]()
+ var f func(*node[K, V]) bool
+ f = func(n *node[K, V]) bool {
+ if n == nil {
+ return true
+ }
+ // Stop sending values if sender.Send returns false,
+ // meaning that nothing is listening at the receiver end.
+ return f(n.left) &&
+ sender.Send(keyValue[K, V]{n.key, n.val}) &&
+ f(n.right)
+ }
+ go func() {
+ f(m.root)
+ sender.Close()
+ }()
+ return &Iterator[K, V]{receiver}
+}
+
+// Iterator is used to iterate over the map.
+type Iterator[K, V any] struct {
+ r *chans_Receiver[keyValue[K, V]]
+}
+
+// Next returns the next key and value pair, and a boolean indicating
+// whether they are valid or whether we have reached the end.
+func (it *Iterator[K, V]) Next() (K, V, bool) {
+ keyval, ok := it.r.Next()
+ if !ok {
+ var zerok K
+ var zerov V
+ return zerok, zerov, false
+ }
+ return keyval.key, keyval.val, true
+}
+
+// chans
+
+func chans_Ranger[T any]() (*chans_Sender[T], *chans_Receiver[T])
+
+// A sender is used to send values to a Receiver.
+type chans_Sender[T any] struct {
+ values chan<- T
+ done <-chan bool
+}
+
+func (s *chans_Sender[T]) Send(v T) bool {
+ select {
+ case s.values <- v:
+ return true
+ case <-s.done:
+ return false
+ }
+}
+
+func (s *chans_Sender[T]) Close() {
+ close(s.values)
+}
+
+type chans_Receiver[T any] struct {
+ values <-chan T
+ done chan<- bool
+}
+
+func (r *chans_Receiver[T]) Next() (T, bool) {
+ v, ok := <-r.values
+ return v, ok
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/sample.go b/src/cmd/compile/internal/syntax/testdata/sample.go
new file mode 100644
index 0000000..5a2b4bf
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/sample.go
@@ -0,0 +1,33 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a sample test file illustrating the use
+// of error comments with the error test harness.
+
+package p
+
+// The following are invalid error comments; they are
+// silently ignored. The prefix must be exactly one of
+// "/* ERROR " or "// ERROR ".
+//
+/*ERROR*/
+/*ERROR foo*/
+/* ERRORfoo */
+/* ERROR foo */
+//ERROR
+// ERROR
+// ERRORfoo
+// ERROR foo
+
+// This is a valid error comment; it applies to the
+// immediately following token.
+import "math" /* ERROR unexpected comma */ ,
+
+// If there are multiple /*-style error comments before
+// the next token, only the last one is considered.
+type x = /* ERROR ignored */ /* ERROR literal 0 in type declaration */ 0
+
+// A //-style error comment matches any error position
+// on the same line.
+func () foo() // ERROR method has no receiver
diff --git a/src/cmd/compile/internal/syntax/testdata/slices.go b/src/cmd/compile/internal/syntax/testdata/slices.go
new file mode 100644
index 0000000..9265109
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/slices.go
@@ -0,0 +1,68 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices implements various slice algorithms.
+package slices
+
+// Map turns a []T1 to a []T2 using a mapping function.
+func Map[T1, T2 any](s []T1, f func(T1) T2) []T2 {
+ r := make([]T2, len(s))
+ for i, v := range s {
+ r[i] = f(v)
+ }
+ return r
+}
+
+// Reduce reduces a []T1 to a single value using a reduction function.
+func Reduce[T1, T2 any](s []T1, initializer T2, f func(T2, T1) T2) T2 {
+ r := initializer
+ for _, v := range s {
+ r = f(r, v)
+ }
+ return r
+}
+
+// Filter filters values from a slice using a filter function.
+func Filter[T any](s []T, f func(T) bool) []T {
+ var r []T
+ for _, v := range s {
+ if f(v) {
+ r = append(r, v)
+ }
+ }
+ return r
+}
+
+// Example uses
+
+func limiter(x int) byte {
+ switch {
+ case x < 0:
+ return 0
+ default:
+ return byte(x)
+ case x > 255:
+ return 255
+ }
+}
+
+var input = []int{-4, 68954, 7, 44, 0, -555, 6945}
+var limited1 = Map[int, byte](input, limiter)
+var limited2 = Map(input, limiter) // using type inference
+
+func reducer(x float64, y int) float64 {
+ return x + float64(y)
+}
+
+var reduced1 = Reduce[int, float64](input, 0, reducer)
+var reduced2 = Reduce(input, 1i, reducer) // using type inference
+var reduced3 = Reduce(input, 1, reducer) // using type inference
+
+func filter(x int) bool {
+ return x&1 != 0
+}
+
+var filtered1 = Filter[int](input, filter)
+var filtered2 = Filter(input, filter) // using type inference
+
diff --git a/src/cmd/compile/internal/syntax/testdata/smoketest.go b/src/cmd/compile/internal/syntax/testdata/smoketest.go
new file mode 100644
index 0000000..6b3593a
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/smoketest.go
@@ -0,0 +1,73 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains basic generic code snippets.
+
+package p
+
+// type parameter lists
+type B[P any] struct{}
+type _[P interface{}] struct{}
+type _[P B] struct{}
+type _[P B[P]] struct{}
+
+type _[A, B, C any] struct{}
+type _[A, B, C B] struct{}
+type _[A, B, C B[A, B, C]] struct{}
+type _[A1, A2 B1, A3 B2, A4, A5, A6 B3] struct{}
+
+type _[A interface{}] struct{}
+type _[A, B interface{ m() }] struct{}
+
+type _[A, B, C any] struct{}
+
+// in functions
+func _[P any]()
+func _[P interface{}]()
+func _[P B]()
+func _[P B[P]]()
+
+// type instantiations
+type _ T[int]
+
+// in expressions
+var _ = T[int]{}
+
+// in embedded types
+type _ struct{ T[int] }
+
+// interfaces
+type _ interface {
+ m()
+ ~int
+}
+
+type _ interface {
+ ~int | ~float | ~string
+ ~complex128
+ underlying(underlying underlying) underlying
+}
+
+type _ interface {
+ T
+ T[int]
+}
+
+// tricky cases
+func _(T[P], T[P1, P2])
+func _(a [N]T)
+
+type _ struct {
+ T[P]
+ T[P1, P2]
+ f[N]
+}
+type _ interface {
+ m()
+
+ // instantiated types
+ T[ /* ERROR empty type argument list */ ]
+ T[P]
+ T[P1, P2]
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/tparams.go b/src/cmd/compile/internal/syntax/testdata/tparams.go
new file mode 100644
index 0000000..4b68a15
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/tparams.go
@@ -0,0 +1,57 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type t[a, b /* ERROR missing type constraint */ ] struct{}
+type t[a t, b t, c /* ERROR missing type constraint */ ] struct{}
+type t struct {
+ t [n]byte
+ t[a]
+ t[a, b]
+}
+type t interface {
+ t[a]
+ m /* ERROR method must have no type parameters */ [_ _, /* ERROR mixed */ _]()
+ t[a, b]
+}
+
+func f[ /* ERROR empty type parameter list */ ]()
+func f[a, b /* ERROR missing type constraint */ ]()
+func f[a t, b t, c /* ERROR missing type constraint */ ]()
+
+func f[a b, /* ERROR expected ] */ 0] ()
+
+// go.dev/issue/49482
+type (
+ t[a *[]int] struct{}
+ t[a *t,] struct{}
+ t[a *t|[]int] struct{}
+ t[a *t|t,] struct{}
+ t[a *t|~t,] struct{}
+ t[a *struct{}|t] struct{}
+ t[a *t|struct{}] struct{}
+ t[a *struct{}|~t] struct{}
+)
+
+// go.dev/issue/51488
+type (
+ t[a *t|t,] struct{}
+ t[a *t|t, b t] struct{}
+ t[a *t|t] struct{}
+ t[a *[]t|t] struct{}
+ t[a ([]t)] struct{}
+ t[a ([]t)|t] struct{}
+)
+
+// go.dev/issue/60812
+type (
+ t [t]struct{}
+ t [[]t]struct{}
+ t [[t]t]struct{}
+ t [/* ERROR missing type parameter name or invalid array length */ t[t]]struct{}
+ t [t t[t], /* ERROR missing type parameter name */ t[t]]struct{}
+ t [/* ERROR missing type parameter name */ t[t], t t[t]]struct{}
+ t [/* ERROR missing type parameter name */ t[t], t[t]]struct{} // report only first error
+)
diff --git a/src/cmd/compile/internal/syntax/testdata/typeset.go b/src/cmd/compile/internal/syntax/testdata/typeset.go
new file mode 100644
index 0000000..819025c
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/typeset.go
@@ -0,0 +1,91 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains test cases for typeset-only constraint elements.
+
+package p
+
+type (
+ _[_ t] t
+ _[_ ~t] t
+ _[_ t|t] t
+ _[_ ~t|t] t
+ _[_ t|~t] t
+ _[_ ~t|~t] t
+
+ _[_ t, _, _ t|t] t
+ _[_ t, _, _ ~t|t] t
+ _[_ t, _, _ t|~t] t
+ _[_ t, _, _ ~t|~t] t
+
+ _[_ t.t] t
+ _[_ ~t.t] t
+ _[_ t.t|t.t] t
+ _[_ ~t.t|t.t] t
+ _[_ t.t|~t.t] t
+ _[_ ~t.t|~t.t] t
+
+ _[_ t, _, _ t.t|t.t] t
+ _[_ t, _, _ ~t.t|t.t] t
+ _[_ t, _, _ t.t|~t.t] t
+ _[_ t, _, _ ~t.t|~t.t] t
+
+ _[_ struct{}] t
+ _[_ ~struct{}] t
+
+ _[_ struct{}|t] t
+ _[_ ~struct{}|t] t
+ _[_ struct{}|~t] t
+ _[_ ~struct{}|~t] t
+
+ _[_ t|struct{}] t
+ _[_ ~t|struct{}] t
+ _[_ t|~struct{}] t
+ _[_ ~t|~struct{}] t
+
+ // test cases for go.dev/issue/49175
+ _[_ []t]t
+ _[_ [1]t]t
+ _[_ ~[]t]t
+ _[_ ~[1]t]t
+ t [ /* ERROR missing type parameter name */ t[0]]t
+)
+
+// test cases for go.dev/issue/49174
+func _[_ t]() {}
+func _[_ []t]() {}
+func _[_ [1]t]() {}
+func _[_ []t | t]() {}
+func _[_ [1]t | t]() {}
+func _[_ t | []t]() {}
+func _[_ []t | []t]() {}
+func _[_ [1]t | [1]t]() {}
+func _[_ t[t] | t[t]]() {}
+
+// Single-expression type parameter lists and those that don't start
+// with a (type parameter) name are considered array sizes.
+// The term must be a valid expression (it could be a type incl. a
+// tilde term) but the type-checker will complain.
+type (
+ _[t] t
+ _[t|t] t
+
+ // These are invalid and the type-checker will complain.
+ _[~t] t
+ _[~t|t] t
+ _[t|~t] t
+ _[~t|~t] t
+)
+
+type (
+ _[_ t, t /* ERROR missing type constraint */ ] t
+ _[_ ~t, t /* ERROR missing type constraint */ ] t
+ _[_ t, /* ERROR missing type parameter name */ ~t] t
+ _[_ ~t, /* ERROR missing type parameter name */ ~t] t
+
+ _[_ t|t, /* ERROR missing type parameter name */ t|t] t
+ _[_ ~t|t, /* ERROR missing type parameter name */ t|t] t
+ _[_ t|t, /* ERROR missing type parameter name */ ~t|t] t
+ _[_ ~t|t, /* ERROR missing type parameter name */ ~t|t] t
+)
diff --git a/src/cmd/compile/internal/syntax/testing.go b/src/cmd/compile/internal/syntax/testing.go
new file mode 100644
index 0000000..202b2ef
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testing.go
@@ -0,0 +1,69 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements testing support.
+
+package syntax
+
+import (
+ "io"
+ "regexp"
+)
+
+// CommentsDo parses the given source and calls the provided handler for each
+// comment or error. If the text provided to handler starts with a '/' it is
+// the comment text; otherwise it is the error message.
+func CommentsDo(src io.Reader, handler func(line, col uint, text string)) {
+ var s scanner
+ s.init(src, handler, comments)
+ for s.tok != _EOF {
+ s.next()
+ }
+}
+
+// CommentMap collects all comments in the given src with comment text
+// that matches the supplied regular expression rx and returns them as
+// []Error lists in a map indexed by line number. The comment text is
+// the comment with any comment markers ("//", "/*", or "*/") stripped.
+// The position for each Error is the position of the token immediately
+// preceding the comment and the Error message is the comment text,
+// with all comments that are on the same line collected in a slice, in
+// source order. If there is no preceding token (the matching comment
+// appears at the beginning of the file), then the recorded position
+// is unknown (line, col = 0, 0). If there are no matching comments,
+// the result is nil.
+func CommentMap(src io.Reader, rx *regexp.Regexp) (res map[uint][]Error) {
+ // position of previous token
+ var base *PosBase
+ var prev struct{ line, col uint }
+
+ var s scanner
+ s.init(src, func(_, _ uint, text string) {
+ if text[0] != '/' {
+ return // not a comment, ignore
+ }
+ if text[1] == '*' {
+ text = text[:len(text)-2] // strip trailing */
+ }
+ text = text[2:] // strip leading // or /*
+ if rx.MatchString(text) {
+ pos := MakePos(base, prev.line, prev.col)
+ err := Error{pos, text}
+ if res == nil {
+ res = make(map[uint][]Error)
+ }
+ res[prev.line] = append(res[prev.line], err)
+ }
+ }, comments)
+
+ for s.tok != _EOF {
+ s.next()
+ if s.tok == _Semi && s.lit != "semicolon" {
+ continue // ignore automatically inserted semicolons
+ }
+ prev.line, prev.col = s.line, s.col
+ }
+
+ return
+}
diff --git a/src/cmd/compile/internal/syntax/testing_test.go b/src/cmd/compile/internal/syntax/testing_test.go
new file mode 100644
index 0000000..7e439c5
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testing_test.go
@@ -0,0 +1,48 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+ "testing"
+)
+
+func TestCommentMap(t *testing.T) {
+ const src = `/* ERROR "0:0" */ /* ERROR "0:0" */ // ERROR "0:0"
+// ERROR "0:0"
+x /* ERROR "3:1" */ // ignore automatically inserted semicolon here
+/* ERROR "3:1" */ // position of x on previous line
+ x /* ERROR "5:4" */ ; // do not ignore this semicolon
+/* ERROR "5:24" */ // position of ; on previous line
+ package /* ERROR "7:2" */ // indented with tab
+ import /* ERROR "8:9" */ // indented with blanks
+`
+ m := CommentMap(strings.NewReader(src), regexp.MustCompile("^ ERROR "))
+ found := 0 // number of errors found
+ for line, errlist := range m {
+ for _, err := range errlist {
+ if err.Pos.Line() != line {
+ t.Errorf("%v: got map line %d; want %d", err, err.Pos.Line(), line)
+ continue
+ }
+ // err.Pos.Line() == line
+
+ got := strings.TrimSpace(err.Msg[len(" ERROR "):])
+ want := fmt.Sprintf(`"%d:%d"`, line, err.Pos.Col())
+ if got != want {
+ t.Errorf("%v: got msg %q; want %q", err, got, want)
+ continue
+ }
+ found++
+ }
+ }
+
+ want := strings.Count(src, " ERROR ")
+ if found != want {
+ t.Errorf("CommentMap got %d errors; want %d", found, want)
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/token_string.go b/src/cmd/compile/internal/syntax/token_string.go
new file mode 100644
index 0000000..ef295eb
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/token_string.go
@@ -0,0 +1,70 @@
+// Code generated by "stringer -type token -linecomment tokens.go"; DO NOT EDIT.
+
+package syntax
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[_EOF-1]
+ _ = x[_Name-2]
+ _ = x[_Literal-3]
+ _ = x[_Operator-4]
+ _ = x[_AssignOp-5]
+ _ = x[_IncOp-6]
+ _ = x[_Assign-7]
+ _ = x[_Define-8]
+ _ = x[_Arrow-9]
+ _ = x[_Star-10]
+ _ = x[_Lparen-11]
+ _ = x[_Lbrack-12]
+ _ = x[_Lbrace-13]
+ _ = x[_Rparen-14]
+ _ = x[_Rbrack-15]
+ _ = x[_Rbrace-16]
+ _ = x[_Comma-17]
+ _ = x[_Semi-18]
+ _ = x[_Colon-19]
+ _ = x[_Dot-20]
+ _ = x[_DotDotDot-21]
+ _ = x[_Break-22]
+ _ = x[_Case-23]
+ _ = x[_Chan-24]
+ _ = x[_Const-25]
+ _ = x[_Continue-26]
+ _ = x[_Default-27]
+ _ = x[_Defer-28]
+ _ = x[_Else-29]
+ _ = x[_Fallthrough-30]
+ _ = x[_For-31]
+ _ = x[_Func-32]
+ _ = x[_Go-33]
+ _ = x[_Goto-34]
+ _ = x[_If-35]
+ _ = x[_Import-36]
+ _ = x[_Interface-37]
+ _ = x[_Map-38]
+ _ = x[_Package-39]
+ _ = x[_Range-40]
+ _ = x[_Return-41]
+ _ = x[_Select-42]
+ _ = x[_Struct-43]
+ _ = x[_Switch-44]
+ _ = x[_Type-45]
+ _ = x[_Var-46]
+ _ = x[tokenCount-47]
+}
+
+const _token_name = "EOFnameliteralopop=opop=:=<-*([{)]},;:....breakcasechanconstcontinuedefaultdeferelsefallthroughforfuncgogotoifimportinterfacemappackagerangereturnselectstructswitchtypevar"
+
+var _token_index = [...]uint8{0, 3, 7, 14, 16, 19, 23, 24, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 42, 47, 51, 55, 60, 68, 75, 80, 84, 95, 98, 102, 104, 108, 110, 116, 125, 128, 135, 140, 146, 152, 158, 164, 168, 171, 171}
+
+func (i token) String() string {
+ i -= 1
+ if i >= token(len(_token_index)-1) {
+ return "token(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _token_name[_token_index[i]:_token_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/syntax/tokens.go b/src/cmd/compile/internal/syntax/tokens.go
new file mode 100644
index 0000000..b08f699
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/tokens.go
@@ -0,0 +1,159 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+type Token uint
+
+type token = Token
+
+//go:generate stringer -type token -linecomment tokens.go
+
+const (
+ _ token = iota
+ _EOF // EOF
+
+ // names and literals
+ _Name // name
+ _Literal // literal
+
+ // operators and operations
+ // _Operator is excluding '*' (_Star)
+ _Operator // op
+ _AssignOp // op=
+ _IncOp // opop
+ _Assign // =
+ _Define // :=
+ _Arrow // <-
+ _Star // *
+
+ // delimiters
+ _Lparen // (
+ _Lbrack // [
+ _Lbrace // {
+ _Rparen // )
+ _Rbrack // ]
+ _Rbrace // }
+ _Comma // ,
+ _Semi // ;
+ _Colon // :
+ _Dot // .
+ _DotDotDot // ...
+
+ // keywords
+ _Break // break
+ _Case // case
+ _Chan // chan
+ _Const // const
+ _Continue // continue
+ _Default // default
+ _Defer // defer
+ _Else // else
+ _Fallthrough // fallthrough
+ _For // for
+ _Func // func
+ _Go // go
+ _Goto // goto
+ _If // if
+ _Import // import
+ _Interface // interface
+ _Map // map
+ _Package // package
+ _Range // range
+ _Return // return
+ _Select // select
+ _Struct // struct
+ _Switch // switch
+ _Type // type
+ _Var // var
+
+ // empty line comment to exclude it from .String
+ tokenCount //
+)
+
+const (
+ // for BranchStmt
+ Break = _Break
+ Continue = _Continue
+ Fallthrough = _Fallthrough
+ Goto = _Goto
+
+ // for CallStmt
+ Go = _Go
+ Defer = _Defer
+)
+
+// Make sure we have at most 64 tokens so we can use them in a set.
+const _ uint64 = 1 << (tokenCount - 1)
+
+// contains reports whether tok is in tokset.
+func contains(tokset uint64, tok token) bool {
+ return tokset&(1<<tok) != 0
+}
+
+type LitKind uint8
+
+// TODO(gri) With the 'i' (imaginary) suffix now permitted on integer
+// and floating-point numbers, having a single ImagLit does
+// not represent the literal kind well anymore. Remove it?
+const (
+ IntLit LitKind = iota
+ FloatLit
+ ImagLit
+ RuneLit
+ StringLit
+)
+
+type Operator uint
+
+//go:generate stringer -type Operator -linecomment tokens.go
+
+const (
+ _ Operator = iota
+
+ // Def is the : in :=
+ Def // :
+ Not // !
+ Recv // <-
+ Tilde // ~
+
+ // precOrOr
+ OrOr // ||
+
+ // precAndAnd
+ AndAnd // &&
+
+ // precCmp
+ Eql // ==
+ Neq // !=
+ Lss // <
+ Leq // <=
+ Gtr // >
+ Geq // >=
+
+ // precAdd
+ Add // +
+ Sub // -
+ Or // |
+ Xor // ^
+
+ // precMul
+ Mul // *
+ Div // /
+ Rem // %
+ And // &
+ AndNot // &^
+ Shl // <<
+ Shr // >>
+)
+
+// Operator precedences
+const (
+ _ = iota
+ precOrOr
+ precAndAnd
+ precCmp
+ precAdd
+ precMul
+)
diff --git a/src/cmd/compile/internal/syntax/type.go b/src/cmd/compile/internal/syntax/type.go
new file mode 100644
index 0000000..53132a4
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/type.go
@@ -0,0 +1,75 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import "go/constant"
+
+// A Type represents a type of Go.
+// All types implement the Type interface.
+// (This type originally lived in types2. We moved it here
+// so we could depend on it from other packages without
+// introducing a circularity.)
+type Type interface {
+ // Underlying returns the underlying type of a type.
+ Underlying() Type
+
+ // String returns a string representation of a type.
+ String() string
+}
+
+// Expressions in the syntax package provide storage for
+// the typechecker to record its results. This interface
+// is the mechanism the typechecker uses to record results,
+// and clients use to retrieve those results.
+type typeInfo interface {
+ SetTypeInfo(TypeAndValue)
+ GetTypeInfo() TypeAndValue
+}
+
+// A TypeAndValue records the type information, constant
+// value if known, and various other flags associated with
+// an expression.
+// This type is similar to types2.TypeAndValue, but exposes
+// none of types2's internals.
+type TypeAndValue struct {
+ Type Type
+ Value constant.Value
+ exprFlags
+}
+
+type exprFlags uint16
+
+func (f exprFlags) IsVoid() bool { return f&1 != 0 }
+func (f exprFlags) IsType() bool { return f&2 != 0 }
+func (f exprFlags) IsBuiltin() bool { return f&4 != 0 } // a language builtin that resembles a function call, e.g., "make, append, new"
+func (f exprFlags) IsValue() bool { return f&8 != 0 }
+func (f exprFlags) IsNil() bool { return f&16 != 0 }
+func (f exprFlags) Addressable() bool { return f&32 != 0 }
+func (f exprFlags) Assignable() bool { return f&64 != 0 }
+func (f exprFlags) HasOk() bool { return f&128 != 0 }
+func (f exprFlags) IsRuntimeHelper() bool { return f&256 != 0 } // a runtime function called from transformed syntax
+
+func (f *exprFlags) SetIsVoid() { *f |= 1 }
+func (f *exprFlags) SetIsType() { *f |= 2 }
+func (f *exprFlags) SetIsBuiltin() { *f |= 4 }
+func (f *exprFlags) SetIsValue() { *f |= 8 }
+func (f *exprFlags) SetIsNil() { *f |= 16 }
+func (f *exprFlags) SetAddressable() { *f |= 32 }
+func (f *exprFlags) SetAssignable() { *f |= 64 }
+func (f *exprFlags) SetHasOk() { *f |= 128 }
+func (f *exprFlags) SetIsRuntimeHelper() { *f |= 256 }
+
+// a typeAndValue contains the results of typechecking an expression.
+// It is embedded in expression nodes.
+type typeAndValue struct {
+ tv TypeAndValue
+}
+
+func (x *typeAndValue) SetTypeInfo(tv TypeAndValue) {
+ x.tv = tv
+}
+func (x *typeAndValue) GetTypeInfo() TypeAndValue {
+ return x.tv
+}
diff --git a/src/cmd/compile/internal/syntax/walk.go b/src/cmd/compile/internal/syntax/walk.go
new file mode 100644
index 0000000..b03a7c1
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/walk.go
@@ -0,0 +1,346 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements syntax tree walking.
+
+package syntax
+
+import "fmt"
+
+// Inspect traverses an AST in pre-order: it starts by calling f(root);
+// root must not be nil. If f returns true, Inspect invokes f recursively
+// for each of the non-nil children of root, followed by a call of f(nil).
+//
+// See Walk for caveats about shared nodes.
+func Inspect(root Node, f func(Node) bool) {
+ Walk(root, inspector(f))
+}
+
+type inspector func(Node) bool
+
+func (v inspector) Visit(node Node) Visitor {
+ if v(node) {
+ return v
+ }
+ return nil
+}
+
+// Walk traverses an AST in pre-order: It starts by calling
+// v.Visit(node); node must not be nil. If the visitor w returned by
+// v.Visit(node) is not nil, Walk is invoked recursively with visitor
+// w for each of the non-nil children of node, followed by a call of
+// w.Visit(nil).
+//
+// Some nodes may be shared among multiple parent nodes (e.g., types in
+// field lists such as type T in "a, b, c T"). Such shared nodes are
+// walked multiple times.
+// TODO(gri) Revisit this design. It may make sense to walk those nodes
+// only once. A place where this matters is types2.TestResolveIdents.
+func Walk(root Node, v Visitor) {
+ walker{v}.node(root)
+}
+
+// A Visitor's Visit method is invoked for each node encountered by Walk.
+// If the result visitor w is not nil, Walk visits each of the children
+// of node with the visitor w, followed by a call of w.Visit(nil).
+type Visitor interface {
+ Visit(node Node) (w Visitor)
+}
+
+type walker struct {
+ v Visitor
+}
+
+func (w walker) node(n Node) {
+ if n == nil {
+ panic("nil node")
+ }
+
+ w.v = w.v.Visit(n)
+ if w.v == nil {
+ return
+ }
+
+ switch n := n.(type) {
+ // packages
+ case *File:
+ w.node(n.PkgName)
+ w.declList(n.DeclList)
+
+ // declarations
+ case *ImportDecl:
+ if n.LocalPkgName != nil {
+ w.node(n.LocalPkgName)
+ }
+ w.node(n.Path)
+
+ case *ConstDecl:
+ w.nameList(n.NameList)
+ if n.Type != nil {
+ w.node(n.Type)
+ }
+ if n.Values != nil {
+ w.node(n.Values)
+ }
+
+ case *TypeDecl:
+ w.node(n.Name)
+ w.fieldList(n.TParamList)
+ w.node(n.Type)
+
+ case *VarDecl:
+ w.nameList(n.NameList)
+ if n.Type != nil {
+ w.node(n.Type)
+ }
+ if n.Values != nil {
+ w.node(n.Values)
+ }
+
+ case *FuncDecl:
+ if n.Recv != nil {
+ w.node(n.Recv)
+ }
+ w.node(n.Name)
+ w.fieldList(n.TParamList)
+ w.node(n.Type)
+ if n.Body != nil {
+ w.node(n.Body)
+ }
+
+ // expressions
+ case *BadExpr: // nothing to do
+ case *Name: // nothing to do
+ case *BasicLit: // nothing to do
+
+ case *CompositeLit:
+ if n.Type != nil {
+ w.node(n.Type)
+ }
+ w.exprList(n.ElemList)
+
+ case *KeyValueExpr:
+ w.node(n.Key)
+ w.node(n.Value)
+
+ case *FuncLit:
+ w.node(n.Type)
+ w.node(n.Body)
+
+ case *ParenExpr:
+ w.node(n.X)
+
+ case *SelectorExpr:
+ w.node(n.X)
+ w.node(n.Sel)
+
+ case *IndexExpr:
+ w.node(n.X)
+ w.node(n.Index)
+
+ case *SliceExpr:
+ w.node(n.X)
+ for _, x := range n.Index {
+ if x != nil {
+ w.node(x)
+ }
+ }
+
+ case *AssertExpr:
+ w.node(n.X)
+ w.node(n.Type)
+
+ case *TypeSwitchGuard:
+ if n.Lhs != nil {
+ w.node(n.Lhs)
+ }
+ w.node(n.X)
+
+ case *Operation:
+ w.node(n.X)
+ if n.Y != nil {
+ w.node(n.Y)
+ }
+
+ case *CallExpr:
+ w.node(n.Fun)
+ w.exprList(n.ArgList)
+
+ case *ListExpr:
+ w.exprList(n.ElemList)
+
+ // types
+ case *ArrayType:
+ if n.Len != nil {
+ w.node(n.Len)
+ }
+ w.node(n.Elem)
+
+ case *SliceType:
+ w.node(n.Elem)
+
+ case *DotsType:
+ w.node(n.Elem)
+
+ case *StructType:
+ w.fieldList(n.FieldList)
+ for _, t := range n.TagList {
+ if t != nil {
+ w.node(t)
+ }
+ }
+
+ case *Field:
+ if n.Name != nil {
+ w.node(n.Name)
+ }
+ w.node(n.Type)
+
+ case *InterfaceType:
+ w.fieldList(n.MethodList)
+
+ case *FuncType:
+ w.fieldList(n.ParamList)
+ w.fieldList(n.ResultList)
+
+ case *MapType:
+ w.node(n.Key)
+ w.node(n.Value)
+
+ case *ChanType:
+ w.node(n.Elem)
+
+ // statements
+ case *EmptyStmt: // nothing to do
+
+ case *LabeledStmt:
+ w.node(n.Label)
+ w.node(n.Stmt)
+
+ case *BlockStmt:
+ w.stmtList(n.List)
+
+ case *ExprStmt:
+ w.node(n.X)
+
+ case *SendStmt:
+ w.node(n.Chan)
+ w.node(n.Value)
+
+ case *DeclStmt:
+ w.declList(n.DeclList)
+
+ case *AssignStmt:
+ w.node(n.Lhs)
+ if n.Rhs != nil {
+ w.node(n.Rhs)
+ }
+
+ case *BranchStmt:
+ if n.Label != nil {
+ w.node(n.Label)
+ }
+ // Target points to nodes elsewhere in the syntax tree
+
+ case *CallStmt:
+ w.node(n.Call)
+
+ case *ReturnStmt:
+ if n.Results != nil {
+ w.node(n.Results)
+ }
+
+ case *IfStmt:
+ if n.Init != nil {
+ w.node(n.Init)
+ }
+ w.node(n.Cond)
+ w.node(n.Then)
+ if n.Else != nil {
+ w.node(n.Else)
+ }
+
+ case *ForStmt:
+ if n.Init != nil {
+ w.node(n.Init)
+ }
+ if n.Cond != nil {
+ w.node(n.Cond)
+ }
+ if n.Post != nil {
+ w.node(n.Post)
+ }
+ w.node(n.Body)
+
+ case *SwitchStmt:
+ if n.Init != nil {
+ w.node(n.Init)
+ }
+ if n.Tag != nil {
+ w.node(n.Tag)
+ }
+ for _, s := range n.Body {
+ w.node(s)
+ }
+
+ case *SelectStmt:
+ for _, s := range n.Body {
+ w.node(s)
+ }
+
+ // helper nodes
+ case *RangeClause:
+ if n.Lhs != nil {
+ w.node(n.Lhs)
+ }
+ w.node(n.X)
+
+ case *CaseClause:
+ if n.Cases != nil {
+ w.node(n.Cases)
+ }
+ w.stmtList(n.Body)
+
+ case *CommClause:
+ if n.Comm != nil {
+ w.node(n.Comm)
+ }
+ w.stmtList(n.Body)
+
+ default:
+ panic(fmt.Sprintf("internal error: unknown node type %T", n))
+ }
+
+ w.v.Visit(nil)
+}
+
+func (w walker) declList(list []Decl) {
+ for _, n := range list {
+ w.node(n)
+ }
+}
+
+func (w walker) exprList(list []Expr) {
+ for _, n := range list {
+ w.node(n)
+ }
+}
+
+func (w walker) stmtList(list []Stmt) {
+ for _, n := range list {
+ w.node(n)
+ }
+}
+
+func (w walker) nameList(list []*Name) {
+ for _, n := range list {
+ w.node(n)
+ }
+}
+
+func (w walker) fieldList(list []*Field) {
+ for _, n := range list {
+ w.node(n)
+ }
+}
diff --git a/src/cmd/compile/internal/test/README b/src/cmd/compile/internal/test/README
new file mode 100644
index 0000000..242ff79
--- /dev/null
+++ b/src/cmd/compile/internal/test/README
@@ -0,0 +1,4 @@
+This directory holds small tests and benchmarks of code
+generated by the compiler. This code is not for importing,
+and the tests are intended to verify that specific optimzations
+are applied and correct.
diff --git a/src/cmd/compile/internal/test/abiutils_test.go b/src/cmd/compile/internal/test/abiutils_test.go
new file mode 100644
index 0000000..b500de9
--- /dev/null
+++ b/src/cmd/compile/internal/test/abiutils_test.go
@@ -0,0 +1,398 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "bufio"
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+ "cmd/internal/src"
+ "fmt"
+ "os"
+ "testing"
+)
+
+// AMD64 registers available:
+// - integer: RAX, RBX, RCX, RDI, RSI, R8, R9, r10, R11
+// - floating point: X0 - X14
+var configAMD64 = abi.NewABIConfig(9, 15, 0, 1)
+
+func TestMain(m *testing.M) {
+ ssagen.Arch.LinkArch = &x86.Linkamd64
+ ssagen.Arch.REGSP = x86.REGSP
+ ssagen.Arch.MAXWIDTH = 1 << 50
+ types.MaxWidth = ssagen.Arch.MAXWIDTH
+ base.Ctxt = obj.Linknew(ssagen.Arch.LinkArch)
+ base.Ctxt.DiagFunc = base.Errorf
+ base.Ctxt.DiagFlush = base.FlushErrors
+ base.Ctxt.Bso = bufio.NewWriter(os.Stdout)
+ types.LocalPkg = types.NewPkg("p", "local")
+ types.LocalPkg.Prefix = "p"
+ types.PtrSize = ssagen.Arch.LinkArch.PtrSize
+ types.RegSize = ssagen.Arch.LinkArch.RegSize
+ typecheck.InitUniverse()
+ os.Exit(m.Run())
+}
+
+func TestABIUtilsBasic1(t *testing.T) {
+
+ // func(x int32) int32
+ i32 := types.Types[types.TINT32]
+ ft := mkFuncType(nil, []*types.Type{i32}, []*types.Type{i32})
+
+ // expected results
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } spilloffset: 0 typ: int32
+ OUT 0: R{ I0 } spilloffset: -1 typ: int32
+ offsetToSpillArea: 0 spillAreaSize: 8
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsBasic2(t *testing.T) {
+ // func(p1 int8, p2 int16, p3 int32, p4 int64,
+ // p5 float32, p6 float32, p7 float64, p8 float64,
+ // p9 int8, p10 int16, p11 int32, p12 int64,
+ // p13 float32, p14 float32, p15 float64, p16 float64,
+ // p17 complex128, p18 complex128, p19 complex12, p20 complex128,
+ // p21 complex64, p22 int8, p23 in16, p24 int32, p25 int64,
+ // p26 int8, p27 in16, p28 int32, p29 int64)
+ // (r1 int32, r2 float64, r3 float64) {
+ i8 := types.Types[types.TINT8]
+ i16 := types.Types[types.TINT16]
+ i32 := types.Types[types.TINT32]
+ i64 := types.Types[types.TINT64]
+ f32 := types.Types[types.TFLOAT32]
+ f64 := types.Types[types.TFLOAT64]
+ c64 := types.Types[types.TCOMPLEX64]
+ c128 := types.Types[types.TCOMPLEX128]
+ ft := mkFuncType(nil,
+ []*types.Type{
+ i8, i16, i32, i64,
+ f32, f32, f64, f64,
+ i8, i16, i32, i64,
+ f32, f32, f64, f64,
+ c128, c128, c128, c128, c64,
+ i8, i16, i32, i64,
+ i8, i16, i32, i64},
+ []*types.Type{i32, f64, f64})
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } spilloffset: 0 typ: int8
+ IN 1: R{ I1 } spilloffset: 2 typ: int16
+ IN 2: R{ I2 } spilloffset: 4 typ: int32
+ IN 3: R{ I3 } spilloffset: 8 typ: int64
+ IN 4: R{ F0 } spilloffset: 16 typ: float32
+ IN 5: R{ F1 } spilloffset: 20 typ: float32
+ IN 6: R{ F2 } spilloffset: 24 typ: float64
+ IN 7: R{ F3 } spilloffset: 32 typ: float64
+ IN 8: R{ I4 } spilloffset: 40 typ: int8
+ IN 9: R{ I5 } spilloffset: 42 typ: int16
+ IN 10: R{ I6 } spilloffset: 44 typ: int32
+ IN 11: R{ I7 } spilloffset: 48 typ: int64
+ IN 12: R{ F4 } spilloffset: 56 typ: float32
+ IN 13: R{ F5 } spilloffset: 60 typ: float32
+ IN 14: R{ F6 } spilloffset: 64 typ: float64
+ IN 15: R{ F7 } spilloffset: 72 typ: float64
+ IN 16: R{ F8 F9 } spilloffset: 80 typ: complex128
+ IN 17: R{ F10 F11 } spilloffset: 96 typ: complex128
+ IN 18: R{ F12 F13 } spilloffset: 112 typ: complex128
+ IN 19: R{ } offset: 0 typ: complex128
+ IN 20: R{ } offset: 16 typ: complex64
+ IN 21: R{ I8 } spilloffset: 128 typ: int8
+ IN 22: R{ } offset: 24 typ: int16
+ IN 23: R{ } offset: 28 typ: int32
+ IN 24: R{ } offset: 32 typ: int64
+ IN 25: R{ } offset: 40 typ: int8
+ IN 26: R{ } offset: 42 typ: int16
+ IN 27: R{ } offset: 44 typ: int32
+ IN 28: R{ } offset: 48 typ: int64
+ OUT 0: R{ I0 } spilloffset: -1 typ: int32
+ OUT 1: R{ F0 } spilloffset: -1 typ: float64
+ OUT 2: R{ F1 } spilloffset: -1 typ: float64
+ offsetToSpillArea: 56 spillAreaSize: 136
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsArrays(t *testing.T) {
+ // func(p1 [1]int32, p2 [0]int32, p3 [1][1]int32, p4 [2]int32)
+ // (r1 [2]int32, r2 [1]int32, r3 [0]int32, r4 [1][1]int32) {
+ i32 := types.Types[types.TINT32]
+ ae := types.NewArray(i32, 0)
+ a1 := types.NewArray(i32, 1)
+ a2 := types.NewArray(i32, 2)
+ aa1 := types.NewArray(a1, 1)
+ ft := mkFuncType(nil, []*types.Type{a1, ae, aa1, a2},
+ []*types.Type{a2, a1, ae, aa1})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } spilloffset: 0 typ: [1]int32
+ IN 1: R{ } offset: 0 typ: [0]int32
+ IN 2: R{ I1 } spilloffset: 4 typ: [1][1]int32
+ IN 3: R{ } offset: 0 typ: [2]int32
+ OUT 0: R{ } offset: 8 typ: [2]int32
+ OUT 1: R{ I0 } spilloffset: -1 typ: [1]int32
+ OUT 2: R{ } offset: 16 typ: [0]int32
+ OUT 3: R{ I1 } spilloffset: -1 typ: [1][1]int32
+ offsetToSpillArea: 16 spillAreaSize: 8
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsStruct1(t *testing.T) {
+ // type s struct { f1 int8; f2 int8; f3 struct {}; f4 int8; f5 int16) }
+ // func(p1 int6, p2 s, p3 int64)
+ // (r1 s, r2 int8, r3 int32) {
+ i8 := types.Types[types.TINT8]
+ i16 := types.Types[types.TINT16]
+ i32 := types.Types[types.TINT32]
+ i64 := types.Types[types.TINT64]
+ s := mkstruct(i8, i8, mkstruct(), i8, i16)
+ ft := mkFuncType(nil, []*types.Type{i8, s, i64},
+ []*types.Type{s, i8, i32})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } spilloffset: 0 typ: int8
+ IN 1: R{ I1 I2 I3 I4 } spilloffset: 2 typ: struct { int8; int8; struct {}; int8; int16 }
+ IN 2: R{ I5 } spilloffset: 8 typ: int64
+ OUT 0: R{ I0 I1 I2 I3 } spilloffset: -1 typ: struct { int8; int8; struct {}; int8; int16 }
+ OUT 1: R{ I4 } spilloffset: -1 typ: int8
+ OUT 2: R{ I5 } spilloffset: -1 typ: int32
+ offsetToSpillArea: 0 spillAreaSize: 16
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsStruct2(t *testing.T) {
+ // type s struct { f1 int64; f2 struct { } }
+ // type fs struct { f1 float64; f2 s; f3 struct { } }
+ // func(p1 s, p2 s, p3 fs)
+ // (r1 fs, r2 fs)
+ f64 := types.Types[types.TFLOAT64]
+ i64 := types.Types[types.TINT64]
+ s := mkstruct(i64, mkstruct())
+ fs := mkstruct(f64, s, mkstruct())
+ ft := mkFuncType(nil, []*types.Type{s, s, fs},
+ []*types.Type{fs, fs})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } spilloffset: 0 typ: struct { int64; struct {} }
+ IN 1: R{ I1 } spilloffset: 16 typ: struct { int64; struct {} }
+ IN 2: R{ F0 I2 } spilloffset: 32 typ: struct { float64; struct { int64; struct {} }; struct {} }
+ OUT 0: R{ F0 I0 } spilloffset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} }
+ OUT 1: R{ F1 I1 } spilloffset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} }
+ offsetToSpillArea: 0 spillAreaSize: 64
+`)
+
+ abitest(t, ft, exp)
+}
+
+// TestABIUtilsEmptyFieldAtEndOfStruct is testing to make sure
+// the abi code is doing the right thing for struct types that have
+// a trailing zero-sized field (where the we need to add padding).
+func TestABIUtilsEmptyFieldAtEndOfStruct(t *testing.T) {
+ // type s struct { f1 [2]int64; f2 struct { } }
+ // type s2 struct { f1 [3]int16; f2 struct { } }
+ // type fs struct { f1 float64; f s; f3 struct { } }
+ // func(p1 s, p2 s, p3 fs) (r1 fs, r2 fs)
+ f64 := types.Types[types.TFLOAT64]
+ i64 := types.Types[types.TINT64]
+ i16 := types.Types[types.TINT16]
+ tb := types.Types[types.TBOOL]
+ ab2 := types.NewArray(tb, 2)
+ a2 := types.NewArray(i64, 2)
+ a3 := types.NewArray(i16, 3)
+ empty := mkstruct()
+ s := mkstruct(a2, empty)
+ s2 := mkstruct(a3, empty)
+ fs := mkstruct(f64, s, empty)
+ ft := mkFuncType(nil, []*types.Type{s, ab2, s2, fs, fs},
+ []*types.Type{fs, ab2, fs})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ } offset: 0 typ: struct { [2]int64; struct {} }
+ IN 1: R{ } offset: 24 typ: [2]bool
+ IN 2: R{ } offset: 26 typ: struct { [3]int16; struct {} }
+ IN 3: R{ } offset: 40 typ: struct { float64; struct { [2]int64; struct {} }; struct {} }
+ IN 4: R{ } offset: 80 typ: struct { float64; struct { [2]int64; struct {} }; struct {} }
+ OUT 0: R{ } offset: 120 typ: struct { float64; struct { [2]int64; struct {} }; struct {} }
+ OUT 1: R{ } offset: 160 typ: [2]bool
+ OUT 2: R{ } offset: 168 typ: struct { float64; struct { [2]int64; struct {} }; struct {} }
+ offsetToSpillArea: 208 spillAreaSize: 0
+`)
+
+ abitest(t, ft, exp)
+
+ // Test that NumParamRegs doesn't assign registers to trailing padding.
+ typ := mkstruct(i64, i64, mkstruct())
+ have := configAMD64.NumParamRegs(typ)
+ if have != 2 {
+ t.Errorf("NumParams(%v): have %v, want %v", typ, have, 2)
+ }
+}
+
+func TestABIUtilsSliceString(t *testing.T) {
+ // func(p1 []int32, p2 int8, p3 []int32, p4 int8, p5 string,
+ // p6 int64, p6 []intr32) (r1 string, r2 int64, r3 string, r4 []int32)
+ i32 := types.Types[types.TINT32]
+ sli32 := types.NewSlice(i32)
+ str := types.Types[types.TSTRING]
+ i8 := types.Types[types.TINT8]
+ i64 := types.Types[types.TINT64]
+ ft := mkFuncType(nil, []*types.Type{sli32, i8, sli32, i8, str, i8, i64, sli32},
+ []*types.Type{str, i64, str, sli32})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: []int32
+ IN 1: R{ I3 } spilloffset: 24 typ: int8
+ IN 2: R{ I4 I5 I6 } spilloffset: 32 typ: []int32
+ IN 3: R{ I7 } spilloffset: 56 typ: int8
+ IN 4: R{ } offset: 0 typ: string
+ IN 5: R{ I8 } spilloffset: 57 typ: int8
+ IN 6: R{ } offset: 16 typ: int64
+ IN 7: R{ } offset: 24 typ: []int32
+ OUT 0: R{ I0 I1 } spilloffset: -1 typ: string
+ OUT 1: R{ I2 } spilloffset: -1 typ: int64
+ OUT 2: R{ I3 I4 } spilloffset: -1 typ: string
+ OUT 3: R{ I5 I6 I7 } spilloffset: -1 typ: []int32
+ offsetToSpillArea: 48 spillAreaSize: 64
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsMethod(t *testing.T) {
+ // type s1 struct { f1 int16; f2 int16; f3 int16 }
+ // func(p1 *s1, p2 [7]*s1, p3 float64, p4 int16, p5 int16, p6 int16)
+ // (r1 [7]*s1, r2 float64, r3 int64)
+ i16 := types.Types[types.TINT16]
+ i64 := types.Types[types.TINT64]
+ f64 := types.Types[types.TFLOAT64]
+ s1 := mkstruct(i16, i16, i16)
+ ps1 := types.NewPtr(s1)
+ a7 := types.NewArray(ps1, 7)
+ ft := mkFuncType(s1, []*types.Type{ps1, a7, f64, i16, i16, i16},
+ []*types.Type{a7, f64, i64})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: struct { int16; int16; int16 }
+ IN 1: R{ I3 } spilloffset: 8 typ: *struct { int16; int16; int16 }
+ IN 2: R{ } offset: 0 typ: [7]*struct { int16; int16; int16 }
+ IN 3: R{ F0 } spilloffset: 16 typ: float64
+ IN 4: R{ I4 } spilloffset: 24 typ: int16
+ IN 5: R{ I5 } spilloffset: 26 typ: int16
+ IN 6: R{ I6 } spilloffset: 28 typ: int16
+ OUT 0: R{ } offset: 56 typ: [7]*struct { int16; int16; int16 }
+ OUT 1: R{ F0 } spilloffset: -1 typ: float64
+ OUT 2: R{ I0 } spilloffset: -1 typ: int64
+ offsetToSpillArea: 112 spillAreaSize: 32
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsInterfaces(t *testing.T) {
+ // type s1 { f1 int16; f2 int16; f3 bool)
+ // type nei interface { ...() string }
+ // func(p1 s1, p2 interface{}, p3 interface{}, p4 nei,
+ // p5 *interface{}, p6 nei, p7 int64)
+ // (r1 interface{}, r2 nei, r3 bool)
+ ei := types.Types[types.TINTER] // interface{}
+ pei := types.NewPtr(ei) // *interface{}
+ fldt := mkFuncType(types.FakeRecvType(), []*types.Type{},
+ []*types.Type{types.Types[types.TSTRING]})
+ field := types.NewField(src.NoXPos, typecheck.Lookup("F"), fldt)
+ nei := types.NewInterface([]*types.Field{field})
+ i16 := types.Types[types.TINT16]
+ tb := types.Types[types.TBOOL]
+ s1 := mkstruct(i16, i16, tb)
+ ft := mkFuncType(nil, []*types.Type{s1, ei, ei, nei, pei, nei, i16},
+ []*types.Type{ei, nei, pei})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: struct { int16; int16; bool }
+ IN 1: R{ I3 I4 } spilloffset: 8 typ: interface {}
+ IN 2: R{ I5 I6 } spilloffset: 24 typ: interface {}
+ IN 3: R{ I7 I8 } spilloffset: 40 typ: interface { F() string }
+ IN 4: R{ } offset: 0 typ: *interface {}
+ IN 5: R{ } offset: 8 typ: interface { F() string }
+ IN 6: R{ } offset: 24 typ: int16
+ OUT 0: R{ I0 I1 } spilloffset: -1 typ: interface {}
+ OUT 1: R{ I2 I3 } spilloffset: -1 typ: interface { F() string }
+ OUT 2: R{ I4 } spilloffset: -1 typ: *interface {}
+ offsetToSpillArea: 32 spillAreaSize: 56
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABINumParamRegs(t *testing.T) {
+ i8 := types.Types[types.TINT8]
+ i16 := types.Types[types.TINT16]
+ i32 := types.Types[types.TINT32]
+ i64 := types.Types[types.TINT64]
+ f32 := types.Types[types.TFLOAT32]
+ f64 := types.Types[types.TFLOAT64]
+ c64 := types.Types[types.TCOMPLEX64]
+ c128 := types.Types[types.TCOMPLEX128]
+
+ s := mkstruct(i8, i8, mkstruct(), i8, i16)
+ a := mkstruct(s, s, s)
+
+ nrtest(t, i8, 1)
+ nrtest(t, i16, 1)
+ nrtest(t, i32, 1)
+ nrtest(t, i64, 1)
+ nrtest(t, f32, 1)
+ nrtest(t, f64, 1)
+ nrtest(t, c64, 2)
+ nrtest(t, c128, 2)
+ nrtest(t, s, 4)
+ nrtest(t, a, 12)
+}
+
+func TestABIUtilsComputePadding(t *testing.T) {
+ // type s1 { f1 int8; f2 int16; f3 struct{}; f4 int32; f5 int64 }
+ i8 := types.Types[types.TINT8]
+ i16 := types.Types[types.TINT16]
+ i32 := types.Types[types.TINT32]
+ i64 := types.Types[types.TINT64]
+ emptys := mkstruct()
+ s1 := mkstruct(i8, i16, emptys, i32, i64)
+ // func (p1 int32, p2 s1, p3 emptys, p4 [1]int32)
+ a1 := types.NewArray(i32, 1)
+ ft := mkFuncType(nil, []*types.Type{i32, s1, emptys, a1}, nil)
+
+ // Run abitest() just to document what we're expected to see.
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } spilloffset: 0 typ: int32
+ IN 1: R{ I1 I2 I3 I4 } spilloffset: 8 typ: struct { int8; int16; struct {}; int32; int64 }
+ IN 2: R{ } offset: 0 typ: struct {}
+ IN 3: R{ I5 } spilloffset: 24 typ: [1]int32
+ offsetToSpillArea: 0 spillAreaSize: 32
+`)
+ abitest(t, ft, exp)
+
+ // Analyze with full set of registers, then call ComputePadding
+ // on the second param, verifying the results.
+ regRes := configAMD64.ABIAnalyze(ft, false)
+ padding := make([]uint64, 32)
+ parm := regRes.InParams()[1]
+ padding = parm.ComputePadding(padding)
+ want := "[1 1 1 0]"
+ got := fmt.Sprintf("%+v", padding)
+ if got != want {
+ t.Errorf("padding mismatch: wanted %q got %q\n", got, want)
+ }
+}
diff --git a/src/cmd/compile/internal/test/abiutilsaux_test.go b/src/cmd/compile/internal/test/abiutilsaux_test.go
new file mode 100644
index 0000000..fb1c398
--- /dev/null
+++ b/src/cmd/compile/internal/test/abiutilsaux_test.go
@@ -0,0 +1,131 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+// This file contains utility routines and harness infrastructure used
+// by the ABI tests in "abiutils_test.go".
+
+import (
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "strings"
+ "testing"
+ "text/scanner"
+)
+
+func mkParamResultField(t *types.Type, s *types.Sym, which ir.Class) *types.Field {
+ field := types.NewField(src.NoXPos, s, t)
+ n := ir.NewNameAt(src.NoXPos, s, t)
+ n.Class = which
+ field.Nname = n
+ return field
+}
+
+// mkstruct is a helper routine to create a struct type with fields
+// of the types specified in 'fieldtypes'.
+func mkstruct(fieldtypes ...*types.Type) *types.Type {
+ fields := make([]*types.Field, len(fieldtypes))
+ for k, t := range fieldtypes {
+ if t == nil {
+ panic("bad -- field has no type")
+ }
+ f := types.NewField(src.NoXPos, nil, t)
+ fields[k] = f
+ }
+ s := types.NewStruct(fields)
+ return s
+}
+
+func mkFuncType(rcvr *types.Type, ins []*types.Type, outs []*types.Type) *types.Type {
+ q := typecheck.Lookup("?")
+ inf := []*types.Field{}
+ for _, it := range ins {
+ inf = append(inf, mkParamResultField(it, q, ir.PPARAM))
+ }
+ outf := []*types.Field{}
+ for _, ot := range outs {
+ outf = append(outf, mkParamResultField(ot, q, ir.PPARAMOUT))
+ }
+ var rf *types.Field
+ if rcvr != nil {
+ rf = mkParamResultField(rcvr, q, ir.PPARAM)
+ }
+ return types.NewSignature(rf, inf, outf)
+}
+
+type expectedDump struct {
+ dump string
+ file string
+ line int
+}
+
+func tokenize(src string) []string {
+ var s scanner.Scanner
+ s.Init(strings.NewReader(src))
+ res := []string{}
+ for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() {
+ res = append(res, s.TokenText())
+ }
+ return res
+}
+
+func verifyParamResultOffset(t *testing.T, f *types.Field, r abi.ABIParamAssignment, which string, idx int) int {
+ n := f.Nname.(*ir.Name)
+ if n.FrameOffset() != int64(r.Offset()) {
+ t.Errorf("%s %d: got offset %d wanted %d t=%v",
+ which, idx, r.Offset(), n.Offset_, f.Type)
+ return 1
+ }
+ return 0
+}
+
+func makeExpectedDump(e string) expectedDump {
+ return expectedDump{dump: e}
+}
+
+func difftokens(atoks []string, etoks []string) string {
+ if len(atoks) != len(etoks) {
+ return fmt.Sprintf("expected %d tokens got %d",
+ len(etoks), len(atoks))
+ }
+ for i := 0; i < len(etoks); i++ {
+ if etoks[i] == atoks[i] {
+ continue
+ }
+
+ return fmt.Sprintf("diff at token %d: expected %q got %q",
+ i, etoks[i], atoks[i])
+ }
+ return ""
+}
+
+func nrtest(t *testing.T, ft *types.Type, expected int) {
+ types.CalcSize(ft)
+ got := configAMD64.NumParamRegs(ft)
+ if got != expected {
+ t.Errorf("]\nexpected num regs = %d, got %d, type %v", expected, got, ft)
+ }
+}
+
+func abitest(t *testing.T, ft *types.Type, exp expectedDump) {
+
+ types.CalcSize(ft)
+
+ // Analyze with full set of registers.
+ regRes := configAMD64.ABIAnalyze(ft, false)
+ regResString := strings.TrimSpace(regRes.String())
+
+ // Check results.
+ reason := difftokens(tokenize(regResString), tokenize(exp.dump))
+ if reason != "" {
+ t.Errorf("\nexpected:\n%s\ngot:\n%s\nreason: %s",
+ strings.TrimSpace(exp.dump), regResString, reason)
+ }
+
+}
diff --git a/src/cmd/compile/internal/test/align_test.go b/src/cmd/compile/internal/test/align_test.go
new file mode 100644
index 0000000..32afc92
--- /dev/null
+++ b/src/cmd/compile/internal/test/align_test.go
@@ -0,0 +1,96 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test to make sure that equality functions (and hash
+// functions) don't do unaligned reads on architectures
+// that can't do unaligned reads. See issue 46283.
+
+package test
+
+import "testing"
+
+type T1 struct {
+ x float32
+ a, b, c, d int16 // memequal64
+}
+type T2 struct {
+ x float32
+ a, b, c, d int32 // memequal128
+}
+
+type A2 [2]byte // eq uses a 2-byte load
+type A4 [4]byte // eq uses a 4-byte load
+type A8 [8]byte // eq uses an 8-byte load
+
+//go:noinline
+func cmpT1(p, q *T1) {
+ if *p != *q {
+ panic("comparison test wrong")
+ }
+}
+
+//go:noinline
+func cmpT2(p, q *T2) {
+ if *p != *q {
+ panic("comparison test wrong")
+ }
+}
+
+//go:noinline
+func cmpA2(p, q *A2) {
+ if *p != *q {
+ panic("comparison test wrong")
+ }
+}
+
+//go:noinline
+func cmpA4(p, q *A4) {
+ if *p != *q {
+ panic("comparison test wrong")
+ }
+}
+
+//go:noinline
+func cmpA8(p, q *A8) {
+ if *p != *q {
+ panic("comparison test wrong")
+ }
+}
+
+func TestAlignEqual(t *testing.T) {
+ cmpT1(&T1{}, &T1{})
+ cmpT2(&T2{}, &T2{})
+
+ m1 := map[T1]bool{}
+ m1[T1{}] = true
+ m1[T1{}] = false
+ if len(m1) != 1 {
+ t.Fatalf("len(m1)=%d, want 1", len(m1))
+ }
+ m2 := map[T2]bool{}
+ m2[T2{}] = true
+ m2[T2{}] = false
+ if len(m2) != 1 {
+ t.Fatalf("len(m2)=%d, want 1", len(m2))
+ }
+
+ type X2 struct {
+ y byte
+ z A2
+ }
+ var x2 X2
+ cmpA2(&x2.z, &A2{})
+ type X4 struct {
+ y byte
+ z A4
+ }
+ var x4 X4
+ cmpA4(&x4.z, &A4{})
+ type X8 struct {
+ y byte
+ z A8
+ }
+ var x8 X8
+ cmpA8(&x8.z, &A8{})
+}
diff --git a/src/cmd/compile/internal/test/bench_test.go b/src/cmd/compile/internal/test/bench_test.go
new file mode 100644
index 0000000..4724600
--- /dev/null
+++ b/src/cmd/compile/internal/test/bench_test.go
@@ -0,0 +1,124 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import "testing"
+
+var globl int64
+var globl32 int32
+
+func BenchmarkLoadAdd(b *testing.B) {
+ x := make([]int64, 1024)
+ y := make([]int64, 1024)
+ for i := 0; i < b.N; i++ {
+ var s int64
+ for i := range x {
+ s ^= x[i] + y[i]
+ }
+ globl = s
+ }
+}
+
+// Added for ppc64 extswsli on power9
+func BenchmarkExtShift(b *testing.B) {
+ x := make([]int32, 1024)
+ for i := 0; i < b.N; i++ {
+ var s int64
+ for i := range x {
+ s ^= int64(x[i]+32) * 8
+ }
+ globl = s
+ }
+}
+
+func BenchmarkModify(b *testing.B) {
+ a := make([]int64, 1024)
+ v := globl
+ for i := 0; i < b.N; i++ {
+ for j := range a {
+ a[j] += v
+ }
+ }
+}
+
+func BenchmarkMullImm(b *testing.B) {
+ x := make([]int32, 1024)
+ for i := 0; i < b.N; i++ {
+ var s int32
+ for i := range x {
+ s += x[i] * 100
+ }
+ globl32 = s
+ }
+}
+
+func BenchmarkConstModify(b *testing.B) {
+ a := make([]int64, 1024)
+ for i := 0; i < b.N; i++ {
+ for j := range a {
+ a[j] += 3
+ }
+ }
+}
+
+func BenchmarkBitSet(b *testing.B) {
+ const N = 64 * 8
+ a := make([]uint64, N/64)
+ for i := 0; i < b.N; i++ {
+ for j := uint64(0); j < N; j++ {
+ a[j/64] |= 1 << (j % 64)
+ }
+ }
+}
+
+func BenchmarkBitClear(b *testing.B) {
+ const N = 64 * 8
+ a := make([]uint64, N/64)
+ for i := 0; i < b.N; i++ {
+ for j := uint64(0); j < N; j++ {
+ a[j/64] &^= 1 << (j % 64)
+ }
+ }
+}
+
+func BenchmarkBitToggle(b *testing.B) {
+ const N = 64 * 8
+ a := make([]uint64, N/64)
+ for i := 0; i < b.N; i++ {
+ for j := uint64(0); j < N; j++ {
+ a[j/64] ^= 1 << (j % 64)
+ }
+ }
+}
+
+func BenchmarkBitSetConst(b *testing.B) {
+ const N = 64
+ a := make([]uint64, N)
+ for i := 0; i < b.N; i++ {
+ for j := range a {
+ a[j] |= 1 << 37
+ }
+ }
+}
+
+func BenchmarkBitClearConst(b *testing.B) {
+ const N = 64
+ a := make([]uint64, N)
+ for i := 0; i < b.N; i++ {
+ for j := range a {
+ a[j] &^= 1 << 37
+ }
+ }
+}
+
+func BenchmarkBitToggleConst(b *testing.B) {
+ const N = 64
+ a := make([]uint64, N)
+ for i := 0; i < b.N; i++ {
+ for j := range a {
+ a[j] ^= 1 << 37
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/clobberdead_test.go b/src/cmd/compile/internal/test/clobberdead_test.go
new file mode 100644
index 0000000..80d9678
--- /dev/null
+++ b/src/cmd/compile/internal/test/clobberdead_test.go
@@ -0,0 +1,54 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+const helloSrc = `
+package main
+import "fmt"
+func main() { fmt.Println("hello") }
+`
+
+func TestClobberDead(t *testing.T) {
+ // Test that clobberdead mode generates correct program.
+ runHello(t, "-clobberdead")
+}
+
+func TestClobberDeadReg(t *testing.T) {
+ // Test that clobberdeadreg mode generates correct program.
+ runHello(t, "-clobberdeadreg")
+}
+
+func runHello(t *testing.T, flag string) {
+ if testing.Short() {
+ // This test rebuilds the runtime with a special flag, which
+ // takes a while.
+ t.Skip("skip in short mode")
+ }
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ tmpdir := t.TempDir()
+ src := filepath.Join(tmpdir, "x.go")
+ err := os.WriteFile(src, []byte(helloSrc), 0644)
+ if err != nil {
+ t.Fatalf("write file failed: %v", err)
+ }
+
+ cmd := testenv.Command(t, testenv.GoToolPath(t), "run", "-gcflags=all="+flag, src)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("go run failed: %v\n%s", err, out)
+ }
+ if string(out) != "hello\n" {
+ t.Errorf("wrong output: got %q, want %q", out, "hello\n")
+ }
+}
diff --git a/src/cmd/compile/internal/test/constFold_test.go b/src/cmd/compile/internal/test/constFold_test.go
new file mode 100644
index 0000000..7159f0e
--- /dev/null
+++ b/src/cmd/compile/internal/test/constFold_test.go
@@ -0,0 +1,18111 @@
+// run
+// Code generated by gen/constFoldGen.go. DO NOT EDIT.
+
+package test
+
+import "testing"
+
+func TestConstFolduint64add(t *testing.T) {
+ var x, y, r uint64
+ x = 0
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967296 {
+ t.Errorf("0 %s 4294967296 = %d, want 4294967296", "+", r)
+ }
+ y = 18446744073709551615
+ r = x + y
+ if r != 18446744073709551615 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 18446744073709551615", "+", r)
+ }
+ x = 1
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967297 {
+ t.Errorf("1 %s 4294967296 = %d, want 4294967297", "+", r)
+ }
+ y = 18446744073709551615
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "+", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x + y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 4294967297 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967297", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 8589934592", "+", r)
+ }
+ y = 18446744073709551615
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 4294967295", "+", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x + y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 0", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 4294967295", "+", r)
+ }
+ y = 18446744073709551615
+ r = x + y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 18446744073709551614", "+", r)
+ }
+}
+func TestConstFolduint64sub(t *testing.T) {
+ var x, y, r uint64
+ x = 0
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 18446744073709551615 {
+ t.Errorf("0 %s 1 = %d, want 18446744073709551615", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 18446744069414584320 {
+ t.Errorf("0 %s 4294967296 = %d, want 18446744069414584320", "-", r)
+ }
+ y = 18446744073709551615
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 1", "-", r)
+ }
+ x = 1
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 18446744069414584321 {
+ t.Errorf("1 %s 4294967296 = %d, want 18446744069414584321", "-", r)
+ }
+ y = 18446744073709551615
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 2", "-", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x - y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 4294967295 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967295", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "-", r)
+ }
+ y = 18446744073709551615
+ r = x - y
+ if r != 4294967297 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 4294967297", "-", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x - y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 18446744069414584319 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 18446744069414584319", "-", r)
+ }
+ y = 18446744073709551615
+ r = x - y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", "-", r)
+ }
+}
+func TestConstFolduint64div(t *testing.T) {
+ var x, y, r uint64
+ x = 0
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "/", r)
+ }
+ y = 18446744073709551615
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "/", r)
+ }
+ y = 18446744073709551615
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "/", r)
+ }
+ x = 4294967296
+ y = 1
+ r = x / y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967296", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 1 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 1", "/", r)
+ }
+ y = 18446744073709551615
+ r = x / y
+ if r != 0 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", "/", r)
+ }
+ x = 18446744073709551615
+ y = 1
+ r = x / y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551615", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 4294967295 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 4294967295", "/", r)
+ }
+ y = 18446744073709551615
+ r = x / y
+ if r != 1 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 1", "/", r)
+ }
+}
+func TestConstFolduint64mul(t *testing.T) {
+ var x, y, r uint64
+ x = 0
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 18446744073709551615
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("1 %s 4294967296 = %d, want 4294967296", "*", r)
+ }
+ y = 18446744073709551615
+ r = x * y
+ if r != 18446744073709551615 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 18446744073709551615", "*", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967296", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 18446744073709551615
+ r = x * y
+ if r != 18446744069414584320 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 18446744069414584320", "*", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551615", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 18446744069414584320 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 18446744069414584320", "*", r)
+ }
+ y = 18446744073709551615
+ r = x * y
+ if r != 1 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 1", "*", r)
+ }
+}
+func TestConstFolduint64mod(t *testing.T) {
+ var x, y, r uint64
+ x = 0
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 18446744073709551615
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 4294967296 = %d, want 1", "%", r)
+ }
+ y = 18446744073709551615
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 1", "%", r)
+ }
+ x = 4294967296
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 18446744073709551615
+ r = x % y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 4294967296", "%", r)
+ }
+ x = 18446744073709551615
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 4294967295 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 4294967295", "%", r)
+ }
+ y = 18446744073709551615
+ r = x % y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", "%", r)
+ }
+}
+func TestConstFoldint64add(t *testing.T) {
+ var x, y, r int64
+ x = -9223372036854775808
+ y = -9223372036854775808
+ r = x + y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != 1 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want 1", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != 9223372032559808512 {
+ t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 9223372032559808512", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("-9223372036854775808 %s -1 = %d, want 9223372036854775807", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -9223372036854775807", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != -9223372032559808512 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want -9223372032559808512", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != -2 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want -2", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -1", "+", r)
+ }
+ x = -9223372036854775807
+ y = -9223372036854775808
+ r = x + y
+ if r != 1 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want 1", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 2", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != 9223372032559808513 {
+ t.Errorf("-9223372036854775807 %s -4294967296 = %d, want 9223372032559808513", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775807 %s -1 = %d, want -9223372036854775808", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -9223372036854775806 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775806", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != -9223372032559808511 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -9223372032559808511", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want -1", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want 0", "+", r)
+ }
+ x = -4294967296
+ y = -9223372036854775808
+ r = x + y
+ if r != 9223372032559808512 {
+ t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 9223372032559808512", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != 9223372032559808513 {
+ t.Errorf("-4294967296 %s -9223372036854775807 = %d, want 9223372032559808513", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s -4294967296 = %d, want -8589934592", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -4294967297 {
+ t.Errorf("-4294967296 %s -1 = %d, want -4294967297", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -4294967295 {
+ t.Errorf("-4294967296 %s 1 = %d, want -4294967295", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != 9223372032559808510 {
+ t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 9223372032559808510", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != 9223372032559808511 {
+ t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 9223372032559808511", "+", r)
+ }
+ x = -1
+ y = -9223372036854775808
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("-1 %s -9223372036854775808 = %d, want 9223372036854775807", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("-1 %s -9223372036854775807 = %d, want -9223372036854775808", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != -4294967297 {
+ t.Errorf("-1 %s -4294967296 = %d, want -4294967297", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -2 {
+ t.Errorf("-1 %s -1 = %d, want -2", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("-1 %s 4294967296 = %d, want 4294967295", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != 9223372036854775805 {
+ t.Errorf("-1 %s 9223372036854775806 = %d, want 9223372036854775805", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != 9223372036854775806 {
+ t.Errorf("-1 %s 9223372036854775807 = %d, want 9223372036854775806", "+", r)
+ }
+ x = 0
+ y = -9223372036854775808
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("0 %s -9223372036854775808 = %d, want -9223372036854775808", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != -9223372036854775807 {
+ t.Errorf("0 %s -9223372036854775807 = %d, want -9223372036854775807", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != -4294967296 {
+ t.Errorf("0 %s -4294967296 = %d, want -4294967296", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -1 {
+ t.Errorf("0 %s -1 = %d, want -1", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967296 {
+ t.Errorf("0 %s 4294967296 = %d, want 4294967296", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != 9223372036854775806 {
+ t.Errorf("0 %s 9223372036854775806 = %d, want 9223372036854775806", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("0 %s 9223372036854775807 = %d, want 9223372036854775807", "+", r)
+ }
+ x = 1
+ y = -9223372036854775808
+ r = x + y
+ if r != -9223372036854775807 {
+ t.Errorf("1 %s -9223372036854775808 = %d, want -9223372036854775807", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != -9223372036854775806 {
+ t.Errorf("1 %s -9223372036854775807 = %d, want -9223372036854775806", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != -4294967295 {
+ t.Errorf("1 %s -4294967296 = %d, want -4294967295", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967297 {
+ t.Errorf("1 %s 4294967296 = %d, want 4294967297", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("1 %s 9223372036854775806 = %d, want 9223372036854775807", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("1 %s 9223372036854775807 = %d, want -9223372036854775808", "+", r)
+ }
+ x = 4294967296
+ y = -9223372036854775808
+ r = x + y
+ if r != -9223372032559808512 {
+ t.Errorf("4294967296 %s -9223372036854775808 = %d, want -9223372032559808512", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != -9223372032559808511 {
+ t.Errorf("4294967296 %s -9223372036854775807 = %d, want -9223372032559808511", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != 0 {
+ t.Errorf("4294967296 %s -4294967296 = %d, want 0", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("4294967296 %s -1 = %d, want 4294967295", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 4294967297 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967297", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 8589934592", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != -9223372032559808514 {
+ t.Errorf("4294967296 %s 9223372036854775806 = %d, want -9223372032559808514", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != -9223372032559808513 {
+ t.Errorf("4294967296 %s 9223372036854775807 = %d, want -9223372032559808513", "+", r)
+ }
+ x = 9223372036854775806
+ y = -9223372036854775808
+ r = x + y
+ if r != -2 {
+ t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want -2", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != -1 {
+ t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want -1", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != 9223372032559808510 {
+ t.Errorf("9223372036854775806 %s -4294967296 = %d, want 9223372032559808510", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 9223372036854775805 {
+ t.Errorf("9223372036854775806 %s -1 = %d, want 9223372036854775805", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775807", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != -9223372032559808514 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want -9223372032559808514", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != -4 {
+ t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want -4", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != -3 {
+ t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want -3", "+", r)
+ }
+ x = 9223372036854775807
+ y = -9223372036854775808
+ r = x + y
+ if r != -1 {
+ t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want -1", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want 0", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != 9223372032559808511 {
+ t.Errorf("9223372036854775807 %s -4294967296 = %d, want 9223372032559808511", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775807 %s -1 = %d, want 9223372036854775806", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want -9223372036854775808", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != -9223372032559808513 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want -9223372032559808513", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != -3 {
+ t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want -3", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want -2", "+", r)
+ }
+}
+func TestConstFoldint64sub(t *testing.T) {
+ var x, y, r int64
+ x = -9223372036854775808
+ y = -9223372036854775808
+ r = x - y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want -1", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != -9223372032559808512 {
+ t.Errorf("-9223372036854775808 %s -4294967296 = %d, want -9223372032559808512", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775808 %s -1 = %d, want -9223372036854775807", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 9223372036854775807 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 9223372036854775807", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 9223372032559808512 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 9223372032559808512", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != 2 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want 2", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != 1 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want 1", "-", r)
+ }
+ x = -9223372036854775807
+ y = -9223372036854775808
+ r = x - y
+ if r != 1 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want 1", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 0", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != -9223372032559808511 {
+ t.Errorf("-9223372036854775807 %s -4294967296 = %d, want -9223372032559808511", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -9223372036854775806 {
+ t.Errorf("-9223372036854775807 %s -1 = %d, want -9223372036854775806", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775808", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 9223372032559808513 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want 9223372032559808513", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != 3 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want 3", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want 2", "-", r)
+ }
+ x = -4294967296
+ y = -9223372036854775808
+ r = x - y
+ if r != 9223372032559808512 {
+ t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 9223372032559808512", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != 9223372032559808511 {
+ t.Errorf("-4294967296 %s -9223372036854775807 = %d, want 9223372032559808511", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -4294967296 = %d, want 0", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -4294967295 {
+ t.Errorf("-4294967296 %s -1 = %d, want -4294967295", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -4294967297 {
+ t.Errorf("-4294967296 %s 1 = %d, want -4294967297", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want -8589934592", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != 9223372032559808514 {
+ t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 9223372032559808514", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != 9223372032559808513 {
+ t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 9223372032559808513", "-", r)
+ }
+ x = -1
+ y = -9223372036854775808
+ r = x - y
+ if r != 9223372036854775807 {
+ t.Errorf("-1 %s -9223372036854775808 = %d, want 9223372036854775807", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != 9223372036854775806 {
+ t.Errorf("-1 %s -9223372036854775807 = %d, want 9223372036854775806", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != 4294967295 {
+ t.Errorf("-1 %s -4294967296 = %d, want 4294967295", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != -4294967297 {
+ t.Errorf("-1 %s 4294967296 = %d, want -4294967297", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != -9223372036854775807 {
+ t.Errorf("-1 %s 9223372036854775806 = %d, want -9223372036854775807", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("-1 %s 9223372036854775807 = %d, want -9223372036854775808", "-", r)
+ }
+ x = 0
+ y = -9223372036854775808
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("0 %s -9223372036854775808 = %d, want -9223372036854775808", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != 9223372036854775807 {
+ t.Errorf("0 %s -9223372036854775807 = %d, want 9223372036854775807", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != 4294967296 {
+ t.Errorf("0 %s -4294967296 = %d, want 4294967296", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s -1 = %d, want 1", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -1 {
+ t.Errorf("0 %s 1 = %d, want -1", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != -4294967296 {
+ t.Errorf("0 %s 4294967296 = %d, want -4294967296", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != -9223372036854775806 {
+ t.Errorf("0 %s 9223372036854775806 = %d, want -9223372036854775806", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != -9223372036854775807 {
+ t.Errorf("0 %s 9223372036854775807 = %d, want -9223372036854775807", "-", r)
+ }
+ x = 1
+ y = -9223372036854775808
+ r = x - y
+ if r != -9223372036854775807 {
+ t.Errorf("1 %s -9223372036854775808 = %d, want -9223372036854775807", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("1 %s -9223372036854775807 = %d, want -9223372036854775808", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != 4294967297 {
+ t.Errorf("1 %s -4294967296 = %d, want 4294967297", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s -1 = %d, want 2", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != -4294967295 {
+ t.Errorf("1 %s 4294967296 = %d, want -4294967295", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != -9223372036854775805 {
+ t.Errorf("1 %s 9223372036854775806 = %d, want -9223372036854775805", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != -9223372036854775806 {
+ t.Errorf("1 %s 9223372036854775807 = %d, want -9223372036854775806", "-", r)
+ }
+ x = 4294967296
+ y = -9223372036854775808
+ r = x - y
+ if r != -9223372032559808512 {
+ t.Errorf("4294967296 %s -9223372036854775808 = %d, want -9223372032559808512", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != -9223372032559808513 {
+ t.Errorf("4294967296 %s -9223372036854775807 = %d, want -9223372032559808513", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s -4294967296 = %d, want 8589934592", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 4294967297 {
+ t.Errorf("4294967296 %s -1 = %d, want 4294967297", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 4294967295 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967295", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != -9223372032559808510 {
+ t.Errorf("4294967296 %s 9223372036854775806 = %d, want -9223372032559808510", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != -9223372032559808511 {
+ t.Errorf("4294967296 %s 9223372036854775807 = %d, want -9223372032559808511", "-", r)
+ }
+ x = 9223372036854775806
+ y = -9223372036854775808
+ r = x - y
+ if r != -2 {
+ t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want -2", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != -3 {
+ t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want -3", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != -9223372032559808514 {
+ t.Errorf("9223372036854775806 %s -4294967296 = %d, want -9223372032559808514", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775806 %s -1 = %d, want 9223372036854775807", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 9223372036854775805 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775805", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 9223372032559808510 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want 9223372032559808510", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 0", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != -1 {
+ t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want -1", "-", r)
+ }
+ x = 9223372036854775807
+ y = -9223372036854775808
+ r = x - y
+ if r != -1 {
+ t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want -1", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want -2", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != -9223372032559808513 {
+ t.Errorf("9223372036854775807 %s -4294967296 = %d, want -9223372032559808513", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("9223372036854775807 %s -1 = %d, want -9223372036854775808", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 9223372036854775806", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 9223372032559808511 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want 9223372032559808511", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != 1 {
+ t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want 1", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 0", "-", r)
+ }
+}
+func TestConstFoldint64div(t *testing.T) {
+ var x, y, r int64
+ x = -9223372036854775808
+ y = -9223372036854775808
+ r = x / y
+ if r != 1 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 1", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 1 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want 1", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 2147483648 {
+ t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 2147483648", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s -1 = %d, want -9223372036854775808", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -9223372036854775808", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != -2147483648 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want -2147483648", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want -1", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -1", "/", r)
+ }
+ x = -9223372036854775807
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 1 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 1", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 2147483647 {
+ t.Errorf("-9223372036854775807 %s -4294967296 = %d, want 2147483647", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s -1 = %d, want 9223372036854775807", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775807", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != -2147483647 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -2147483647", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want -1", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want -1", "/", r)
+ }
+ x = -4294967296
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 1 {
+ t.Errorf("-4294967296 %s -4294967296 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 4294967296 {
+ t.Errorf("-4294967296 %s -1 = %d, want 4294967296", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 1 = %d, want -4294967296", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want -1", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = -1
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -4294967296 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967296 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 9223372036854775806 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = 0
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -4294967296 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775806 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -4294967296 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 9223372036854775806 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = 4294967296
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("4294967296 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("4294967296 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != -1 {
+ t.Errorf("4294967296 %s -4294967296 = %d, want -1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -4294967296 {
+ t.Errorf("4294967296 %s -1 = %d, want -4294967296", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967296", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 1 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 1", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 0 {
+ t.Errorf("4294967296 %s 9223372036854775806 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("4294967296 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = 9223372036854775806
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != -2147483647 {
+ t.Errorf("9223372036854775806 %s -4294967296 = %d, want -2147483647", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -9223372036854775806 {
+ t.Errorf("9223372036854775806 %s -1 = %d, want -9223372036854775806", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775806", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 2147483647 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want 2147483647", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 1 {
+ t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 1", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = 9223372036854775807
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != -1 {
+ t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want -1", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != -2147483647 {
+ t.Errorf("9223372036854775807 %s -4294967296 = %d, want -2147483647", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -9223372036854775807 {
+ t.Errorf("9223372036854775807 %s -1 = %d, want -9223372036854775807", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 9223372036854775807", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 2147483647 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want 2147483647", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 1 {
+ t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want 1", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 1 {
+ t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 1", "/", r)
+ }
+}
+func TestConstFoldint64mul(t *testing.T) {
+ var x, y, r int64
+ x = -9223372036854775808
+ y = -9223372036854775808
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want -9223372036854775808", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s -1 = %d, want -9223372036854775808", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -9223372036854775808", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -9223372036854775808", "*", r)
+ }
+ x = -9223372036854775807
+ y = -9223372036854775808
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != 1 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 1", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("-9223372036854775807 %s -4294967296 = %d, want -4294967296", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s -1 = %d, want 9223372036854775807", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775807", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want 4294967296", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 9223372036854775806 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want 9223372036854775806", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want -1", "*", r)
+ }
+ x = -4294967296
+ y = -9223372036854775808
+ r = x * y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 0", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s -9223372036854775807 = %d, want -4294967296", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -4294967296 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("-4294967296 %s -1 = %d, want 4294967296", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 1 = %d, want -4294967296", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 8589934592 {
+ t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 8589934592", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 4294967296", "*", r)
+ }
+ x = -1
+ y = -9223372036854775808
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-1 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != 9223372036854775807 {
+ t.Errorf("-1 %s -9223372036854775807 = %d, want 9223372036854775807", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("-1 %s -4294967296 = %d, want 4294967296", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("-1 %s 4294967296 = %d, want -4294967296", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != -9223372036854775806 {
+ t.Errorf("-1 %s 9223372036854775806 = %d, want -9223372036854775806", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != -9223372036854775807 {
+ t.Errorf("-1 %s 9223372036854775807 = %d, want -9223372036854775807", "*", r)
+ }
+ x = 0
+ y = -9223372036854775808
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775808 = %d, want 0", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775807 = %d, want 0", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -4294967296 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775806 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775807 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = -9223372036854775808
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("1 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != -9223372036854775807 {
+ t.Errorf("1 %s -9223372036854775807 = %d, want -9223372036854775807", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("1 %s -4294967296 = %d, want -4294967296", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("1 %s 4294967296 = %d, want 4294967296", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 9223372036854775806 {
+ t.Errorf("1 %s 9223372036854775806 = %d, want 9223372036854775806", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != 9223372036854775807 {
+ t.Errorf("1 %s 9223372036854775807 = %d, want 9223372036854775807", "*", r)
+ }
+ x = 4294967296
+ y = -9223372036854775808
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s -9223372036854775808 = %d, want 0", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s -9223372036854775807 = %d, want 4294967296", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s -4294967296 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("4294967296 %s -1 = %d, want -4294967296", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967296", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != -8589934592 {
+ t.Errorf("4294967296 %s 9223372036854775806 = %d, want -8589934592", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("4294967296 %s 9223372036854775807 = %d, want -4294967296", "*", r)
+ }
+ x = 9223372036854775806
+ y = -9223372036854775808
+ r = x * y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want 0", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want 9223372036854775806", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 8589934592 {
+ t.Errorf("9223372036854775806 %s -4294967296 = %d, want 8589934592", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -9223372036854775806 {
+ t.Errorf("9223372036854775806 %s -1 = %d, want -9223372036854775806", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775806", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != -8589934592 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want -8589934592", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 4 {
+ t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 4", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != -9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want -9223372036854775806", "*", r)
+ }
+ x = 9223372036854775807
+ y = -9223372036854775808
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != -1 {
+ t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want -1", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("9223372036854775807 %s -4294967296 = %d, want 4294967296", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -9223372036854775807 {
+ t.Errorf("9223372036854775807 %s -1 = %d, want -9223372036854775807", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 9223372036854775807", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want -4294967296", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != -9223372036854775806 {
+ t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want -9223372036854775806", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != 1 {
+ t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 1", "*", r)
+ }
+}
+func TestConstFoldint64mod(t *testing.T) {
+ var x, y, r int64
+ x = -9223372036854775808
+ y = -9223372036854775808
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want -1", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != -2 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want -2", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -1", "%", r)
+ }
+ x = -9223372036854775807
+ y = -9223372036854775808
+ r = x % y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want -9223372036854775807", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 0", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != -4294967295 {
+ t.Errorf("-9223372036854775807 %s -4294967296 = %d, want -4294967295", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != -4294967295 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -4294967295", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want -1", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want 0", "%", r)
+ }
+ x = -4294967296
+ y = -9223372036854775808
+ r = x % y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s -9223372036854775808 = %d, want -4294967296", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s -9223372036854775807 = %d, want -4294967296", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -4294967296 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 9223372036854775806 = %d, want -4294967296", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 9223372036854775807 = %d, want -4294967296", "%", r)
+ }
+ x = -1
+ y = -9223372036854775808
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -9223372036854775808 = %d, want -1", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -9223372036854775807 = %d, want -1", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -4294967296 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967296 = %d, want -1", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 9223372036854775806 = %d, want -1", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 9223372036854775807 = %d, want -1", "%", r)
+ }
+ x = 0
+ y = -9223372036854775808
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775808 = %d, want 0", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775807 = %d, want 0", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -4294967296 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775806 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775807 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = -9223372036854775808
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -9223372036854775808 = %d, want 1", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -9223372036854775807 = %d, want 1", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -4294967296 = %d, want 1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 4294967296 = %d, want 1", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 9223372036854775806 = %d, want 1", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 9223372036854775807 = %d, want 1", "%", r)
+ }
+ x = 4294967296
+ y = -9223372036854775808
+ r = x % y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s -9223372036854775808 = %d, want 4294967296", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s -9223372036854775807 = %d, want 4294967296", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s -4294967296 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 9223372036854775806 = %d, want 4294967296", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 9223372036854775807 = %d, want 4294967296", "%", r)
+ }
+ x = 9223372036854775806
+ y = -9223372036854775808
+ r = x % y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want 9223372036854775806", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want 9223372036854775806", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 4294967294 {
+ t.Errorf("9223372036854775806 %s -4294967296 = %d, want 4294967294", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 4294967294 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want 4294967294", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want 9223372036854775806", "%", r)
+ }
+ x = 9223372036854775807
+ y = -9223372036854775808
+ r = x % y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want 9223372036854775807", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want 0", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 4294967295 {
+ t.Errorf("9223372036854775807 %s -4294967296 = %d, want 4294967295", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 4294967295 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want 4294967295", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != 1 {
+ t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want 1", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 0", "%", r)
+ }
+}
+func TestConstFolduint32add(t *testing.T) {
+ var x, y, r uint32
+ x = 0
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 4294967295
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("0 %s 4294967295 = %d, want 4294967295", "+", r)
+ }
+ x = 1
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 4294967295
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "+", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("4294967295 %s 1 = %d, want 0", "+", r)
+ }
+ y = 4294967295
+ r = x + y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 4294967294", "+", r)
+ }
+}
+func TestConstFolduint32sub(t *testing.T) {
+ var x, y, r uint32
+ x = 0
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 4294967295 {
+ t.Errorf("0 %s 1 = %d, want 4294967295", "-", r)
+ }
+ y = 4294967295
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s 4294967295 = %d, want 1", "-", r)
+ }
+ x = 1
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 4294967295
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s 4294967295 = %d, want 2", "-", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x - y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967294", "-", r)
+ }
+ y = 4294967295
+ r = x - y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 0", "-", r)
+ }
+}
+func TestConstFolduint32div(t *testing.T) {
+ var x, y, r uint32
+ x = 0
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 4294967295
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 4294967295
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "/", r)
+ }
+ x = 4294967295
+ y = 1
+ r = x / y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967295", "/", r)
+ }
+ y = 4294967295
+ r = x / y
+ if r != 1 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 1", "/", r)
+ }
+}
+func TestConstFolduint32mul(t *testing.T) {
+ var x, y, r uint32
+ x = 0
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 4294967295
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 4294967295
+ r = x * y
+ if r != 4294967295 {
+ t.Errorf("1 %s 4294967295 = %d, want 4294967295", "*", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967295 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967295", "*", r)
+ }
+ y = 4294967295
+ r = x * y
+ if r != 1 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 1", "*", r)
+ }
+}
+func TestConstFolduint32mod(t *testing.T) {
+ var x, y, r uint32
+ x = 0
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967295
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967295
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 4294967295 = %d, want 1", "%", r)
+ }
+ x = 4294967295
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967295 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967295
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 0", "%", r)
+ }
+}
+func TestConstFoldint32add(t *testing.T) {
+ var x, y, r int32
+ x = -2147483648
+ y = -2147483648
+ r = x + y
+ if r != 0 {
+ t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != 1 {
+ t.Errorf("-2147483648 %s -2147483647 = %d, want 1", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 2147483647 {
+ t.Errorf("-2147483648 %s -1 = %d, want 2147483647", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -2147483647 {
+ t.Errorf("-2147483648 %s 1 = %d, want -2147483647", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 2147483647 = %d, want -1", "+", r)
+ }
+ x = -2147483647
+ y = -2147483648
+ r = x + y
+ if r != 1 {
+ t.Errorf("-2147483647 %s -2147483648 = %d, want 1", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != 2 {
+ t.Errorf("-2147483647 %s -2147483647 = %d, want 2", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("-2147483647 %s -1 = %d, want -2147483648", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -2147483646 {
+ t.Errorf("-2147483647 %s 1 = %d, want -2147483646", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 2147483647 = %d, want 0", "+", r)
+ }
+ x = -1
+ y = -2147483648
+ r = x + y
+ if r != 2147483647 {
+ t.Errorf("-1 %s -2147483648 = %d, want 2147483647", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("-1 %s -2147483647 = %d, want -2147483648", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -2 {
+ t.Errorf("-1 %s -1 = %d, want -2", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != 2147483646 {
+ t.Errorf("-1 %s 2147483647 = %d, want 2147483646", "+", r)
+ }
+ x = 0
+ y = -2147483648
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("0 %s -2147483648 = %d, want -2147483648", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != -2147483647 {
+ t.Errorf("0 %s -2147483647 = %d, want -2147483647", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -1 {
+ t.Errorf("0 %s -1 = %d, want -1", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != 2147483647 {
+ t.Errorf("0 %s 2147483647 = %d, want 2147483647", "+", r)
+ }
+ x = 1
+ y = -2147483648
+ r = x + y
+ if r != -2147483647 {
+ t.Errorf("1 %s -2147483648 = %d, want -2147483647", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != -2147483646 {
+ t.Errorf("1 %s -2147483647 = %d, want -2147483646", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("1 %s 2147483647 = %d, want -2147483648", "+", r)
+ }
+ x = 2147483647
+ y = -2147483648
+ r = x + y
+ if r != -1 {
+ t.Errorf("2147483647 %s -2147483648 = %d, want -1", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != 0 {
+ t.Errorf("2147483647 %s -2147483647 = %d, want 0", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 2147483646 {
+ t.Errorf("2147483647 %s -1 = %d, want 2147483646", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("2147483647 %s 1 = %d, want -2147483648", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != -2 {
+ t.Errorf("2147483647 %s 2147483647 = %d, want -2", "+", r)
+ }
+}
+func TestConstFoldint32sub(t *testing.T) {
+ var x, y, r int32
+ x = -2147483648
+ y = -2147483648
+ r = x - y
+ if r != 0 {
+ t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != -1 {
+ t.Errorf("-2147483648 %s -2147483647 = %d, want -1", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -2147483647 {
+ t.Errorf("-2147483648 %s -1 = %d, want -2147483647", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 2147483647 {
+ t.Errorf("-2147483648 %s 1 = %d, want 2147483647", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != 1 {
+ t.Errorf("-2147483648 %s 2147483647 = %d, want 1", "-", r)
+ }
+ x = -2147483647
+ y = -2147483648
+ r = x - y
+ if r != 1 {
+ t.Errorf("-2147483647 %s -2147483648 = %d, want 1", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != 0 {
+ t.Errorf("-2147483647 %s -2147483647 = %d, want 0", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -2147483646 {
+ t.Errorf("-2147483647 %s -1 = %d, want -2147483646", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("-2147483647 %s 1 = %d, want -2147483648", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != 2 {
+ t.Errorf("-2147483647 %s 2147483647 = %d, want 2", "-", r)
+ }
+ x = -1
+ y = -2147483648
+ r = x - y
+ if r != 2147483647 {
+ t.Errorf("-1 %s -2147483648 = %d, want 2147483647", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != 2147483646 {
+ t.Errorf("-1 %s -2147483647 = %d, want 2147483646", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("-1 %s 2147483647 = %d, want -2147483648", "-", r)
+ }
+ x = 0
+ y = -2147483648
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("0 %s -2147483648 = %d, want -2147483648", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != 2147483647 {
+ t.Errorf("0 %s -2147483647 = %d, want 2147483647", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s -1 = %d, want 1", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -1 {
+ t.Errorf("0 %s 1 = %d, want -1", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != -2147483647 {
+ t.Errorf("0 %s 2147483647 = %d, want -2147483647", "-", r)
+ }
+ x = 1
+ y = -2147483648
+ r = x - y
+ if r != -2147483647 {
+ t.Errorf("1 %s -2147483648 = %d, want -2147483647", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("1 %s -2147483647 = %d, want -2147483648", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s -1 = %d, want 2", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != -2147483646 {
+ t.Errorf("1 %s 2147483647 = %d, want -2147483646", "-", r)
+ }
+ x = 2147483647
+ y = -2147483648
+ r = x - y
+ if r != -1 {
+ t.Errorf("2147483647 %s -2147483648 = %d, want -1", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != -2 {
+ t.Errorf("2147483647 %s -2147483647 = %d, want -2", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("2147483647 %s -1 = %d, want -2147483648", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 2147483646 {
+ t.Errorf("2147483647 %s 1 = %d, want 2147483646", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != 0 {
+ t.Errorf("2147483647 %s 2147483647 = %d, want 0", "-", r)
+ }
+}
+func TestConstFoldint32div(t *testing.T) {
+ var x, y, r int32
+ x = -2147483648
+ y = -2147483648
+ r = x / y
+ if r != 1 {
+ t.Errorf("-2147483648 %s -2147483648 = %d, want 1", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != 1 {
+ t.Errorf("-2147483648 %s -2147483647 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s -1 = %d, want -2147483648", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 1 = %d, want -2147483648", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 2147483647 = %d, want -1", "/", r)
+ }
+ x = -2147483647
+ y = -2147483648
+ r = x / y
+ if r != 0 {
+ t.Errorf("-2147483647 %s -2147483648 = %d, want 0", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != 1 {
+ t.Errorf("-2147483647 %s -2147483647 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 2147483647 {
+ t.Errorf("-2147483647 %s -1 = %d, want 2147483647", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 1 = %d, want -2147483647", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 2147483647 = %d, want -1", "/", r)
+ }
+ x = -1
+ y = -2147483648
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -2147483648 = %d, want 0", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -2147483647 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 2147483647 = %d, want 0", "/", r)
+ }
+ x = 0
+ y = -2147483648
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -2147483648 = %d, want 0", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -2147483647 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 2147483647 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = -2147483648
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -2147483648 = %d, want 0", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -2147483647 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 2147483647 = %d, want 0", "/", r)
+ }
+ x = 2147483647
+ y = -2147483648
+ r = x / y
+ if r != 0 {
+ t.Errorf("2147483647 %s -2147483648 = %d, want 0", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != -1 {
+ t.Errorf("2147483647 %s -2147483647 = %d, want -1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -2147483647 {
+ t.Errorf("2147483647 %s -1 = %d, want -2147483647", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 1 = %d, want 2147483647", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != 1 {
+ t.Errorf("2147483647 %s 2147483647 = %d, want 1", "/", r)
+ }
+}
+func TestConstFoldint32mul(t *testing.T) {
+ var x, y, r int32
+ x = -2147483648
+ y = -2147483648
+ r = x * y
+ if r != 0 {
+ t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s -2147483647 = %d, want -2147483648", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s -1 = %d, want -2147483648", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 1 = %d, want -2147483648", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 2147483647 = %d, want -2147483648", "*", r)
+ }
+ x = -2147483647
+ y = -2147483648
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-2147483647 %s -2147483648 = %d, want -2147483648", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != 1 {
+ t.Errorf("-2147483647 %s -2147483647 = %d, want 1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 2147483647 {
+ t.Errorf("-2147483647 %s -1 = %d, want 2147483647", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 1 = %d, want -2147483647", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 2147483647 = %d, want -1", "*", r)
+ }
+ x = -1
+ y = -2147483648
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-1 %s -2147483648 = %d, want -2147483648", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != 2147483647 {
+ t.Errorf("-1 %s -2147483647 = %d, want 2147483647", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != -2147483647 {
+ t.Errorf("-1 %s 2147483647 = %d, want -2147483647", "*", r)
+ }
+ x = 0
+ y = -2147483648
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -2147483648 = %d, want 0", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -2147483647 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 2147483647 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = -2147483648
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("1 %s -2147483648 = %d, want -2147483648", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != -2147483647 {
+ t.Errorf("1 %s -2147483647 = %d, want -2147483647", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != 2147483647 {
+ t.Errorf("1 %s 2147483647 = %d, want 2147483647", "*", r)
+ }
+ x = 2147483647
+ y = -2147483648
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("2147483647 %s -2147483648 = %d, want -2147483648", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != -1 {
+ t.Errorf("2147483647 %s -2147483647 = %d, want -1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -2147483647 {
+ t.Errorf("2147483647 %s -1 = %d, want -2147483647", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("2147483647 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 1 = %d, want 2147483647", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != 1 {
+ t.Errorf("2147483647 %s 2147483647 = %d, want 1", "*", r)
+ }
+}
+func TestConstFoldint32mod(t *testing.T) {
+ var x, y, r int32
+ x = -2147483648
+ y = -2147483648
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != -1 {
+ t.Errorf("-2147483648 %s -2147483647 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483648 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 2147483647 = %d, want -1", "%", r)
+ }
+ x = -2147483647
+ y = -2147483648
+ r = x % y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s -2147483648 = %d, want -2147483647", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483647 %s -2147483647 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483647 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 2147483647 = %d, want 0", "%", r)
+ }
+ x = -1
+ y = -2147483648
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -2147483648 = %d, want -1", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -2147483647 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 2147483647 = %d, want -1", "%", r)
+ }
+ x = 0
+ y = -2147483648
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -2147483648 = %d, want 0", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -2147483647 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 2147483647 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = -2147483648
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -2147483648 = %d, want 1", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -2147483647 = %d, want 1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 2147483647 = %d, want 1", "%", r)
+ }
+ x = 2147483647
+ y = -2147483648
+ r = x % y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s -2147483648 = %d, want 2147483647", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("2147483647 %s -2147483647 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("2147483647 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("2147483647 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("2147483647 %s 2147483647 = %d, want 0", "%", r)
+ }
+}
+func TestConstFolduint16add(t *testing.T) {
+ var x, y, r uint16
+ x = 0
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 65535
+ r = x + y
+ if r != 65535 {
+ t.Errorf("0 %s 65535 = %d, want 65535", "+", r)
+ }
+ x = 1
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 65535
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "+", r)
+ }
+ x = 65535
+ y = 0
+ r = x + y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("65535 %s 1 = %d, want 0", "+", r)
+ }
+ y = 65535
+ r = x + y
+ if r != 65534 {
+ t.Errorf("65535 %s 65535 = %d, want 65534", "+", r)
+ }
+}
+func TestConstFolduint16sub(t *testing.T) {
+ var x, y, r uint16
+ x = 0
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 65535 {
+ t.Errorf("0 %s 1 = %d, want 65535", "-", r)
+ }
+ y = 65535
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s 65535 = %d, want 1", "-", r)
+ }
+ x = 1
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 65535
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s 65535 = %d, want 2", "-", r)
+ }
+ x = 65535
+ y = 0
+ r = x - y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 65534 {
+ t.Errorf("65535 %s 1 = %d, want 65534", "-", r)
+ }
+ y = 65535
+ r = x - y
+ if r != 0 {
+ t.Errorf("65535 %s 65535 = %d, want 0", "-", r)
+ }
+}
+func TestConstFolduint16div(t *testing.T) {
+ var x, y, r uint16
+ x = 0
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 65535
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 65535
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "/", r)
+ }
+ x = 65535
+ y = 1
+ r = x / y
+ if r != 65535 {
+ t.Errorf("65535 %s 1 = %d, want 65535", "/", r)
+ }
+ y = 65535
+ r = x / y
+ if r != 1 {
+ t.Errorf("65535 %s 65535 = %d, want 1", "/", r)
+ }
+}
+func TestConstFolduint16mul(t *testing.T) {
+ var x, y, r uint16
+ x = 0
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 65535
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 65535
+ r = x * y
+ if r != 65535 {
+ t.Errorf("1 %s 65535 = %d, want 65535", "*", r)
+ }
+ x = 65535
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("65535 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 65535 {
+ t.Errorf("65535 %s 1 = %d, want 65535", "*", r)
+ }
+ y = 65535
+ r = x * y
+ if r != 1 {
+ t.Errorf("65535 %s 65535 = %d, want 1", "*", r)
+ }
+}
+func TestConstFolduint16mod(t *testing.T) {
+ var x, y, r uint16
+ x = 0
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 65535
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 65535
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 65535 = %d, want 1", "%", r)
+ }
+ x = 65535
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("65535 %s 1 = %d, want 0", "%", r)
+ }
+ y = 65535
+ r = x % y
+ if r != 0 {
+ t.Errorf("65535 %s 65535 = %d, want 0", "%", r)
+ }
+}
+func TestConstFoldint16add(t *testing.T) {
+ var x, y, r int16
+ x = -32768
+ y = -32768
+ r = x + y
+ if r != 0 {
+ t.Errorf("-32768 %s -32768 = %d, want 0", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != 1 {
+ t.Errorf("-32768 %s -32767 = %d, want 1", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 32767 {
+ t.Errorf("-32768 %s -1 = %d, want 32767", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -32767 {
+ t.Errorf("-32768 %s 1 = %d, want -32767", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != -2 {
+ t.Errorf("-32768 %s 32766 = %d, want -2", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != -1 {
+ t.Errorf("-32768 %s 32767 = %d, want -1", "+", r)
+ }
+ x = -32767
+ y = -32768
+ r = x + y
+ if r != 1 {
+ t.Errorf("-32767 %s -32768 = %d, want 1", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != 2 {
+ t.Errorf("-32767 %s -32767 = %d, want 2", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -32768 {
+ t.Errorf("-32767 %s -1 = %d, want -32768", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -32766 {
+ t.Errorf("-32767 %s 1 = %d, want -32766", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != -1 {
+ t.Errorf("-32767 %s 32766 = %d, want -1", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != 0 {
+ t.Errorf("-32767 %s 32767 = %d, want 0", "+", r)
+ }
+ x = -1
+ y = -32768
+ r = x + y
+ if r != 32767 {
+ t.Errorf("-1 %s -32768 = %d, want 32767", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != -32768 {
+ t.Errorf("-1 %s -32767 = %d, want -32768", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -2 {
+ t.Errorf("-1 %s -1 = %d, want -2", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != 32765 {
+ t.Errorf("-1 %s 32766 = %d, want 32765", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != 32766 {
+ t.Errorf("-1 %s 32767 = %d, want 32766", "+", r)
+ }
+ x = 0
+ y = -32768
+ r = x + y
+ if r != -32768 {
+ t.Errorf("0 %s -32768 = %d, want -32768", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != -32767 {
+ t.Errorf("0 %s -32767 = %d, want -32767", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -1 {
+ t.Errorf("0 %s -1 = %d, want -1", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != 32766 {
+ t.Errorf("0 %s 32766 = %d, want 32766", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != 32767 {
+ t.Errorf("0 %s 32767 = %d, want 32767", "+", r)
+ }
+ x = 1
+ y = -32768
+ r = x + y
+ if r != -32767 {
+ t.Errorf("1 %s -32768 = %d, want -32767", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != -32766 {
+ t.Errorf("1 %s -32767 = %d, want -32766", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != 32767 {
+ t.Errorf("1 %s 32766 = %d, want 32767", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != -32768 {
+ t.Errorf("1 %s 32767 = %d, want -32768", "+", r)
+ }
+ x = 32766
+ y = -32768
+ r = x + y
+ if r != -2 {
+ t.Errorf("32766 %s -32768 = %d, want -2", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != -1 {
+ t.Errorf("32766 %s -32767 = %d, want -1", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 32765 {
+ t.Errorf("32766 %s -1 = %d, want 32765", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 32767 {
+ t.Errorf("32766 %s 1 = %d, want 32767", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != -4 {
+ t.Errorf("32766 %s 32766 = %d, want -4", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != -3 {
+ t.Errorf("32766 %s 32767 = %d, want -3", "+", r)
+ }
+ x = 32767
+ y = -32768
+ r = x + y
+ if r != -1 {
+ t.Errorf("32767 %s -32768 = %d, want -1", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != 0 {
+ t.Errorf("32767 %s -32767 = %d, want 0", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 32766 {
+ t.Errorf("32767 %s -1 = %d, want 32766", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -32768 {
+ t.Errorf("32767 %s 1 = %d, want -32768", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != -3 {
+ t.Errorf("32767 %s 32766 = %d, want -3", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != -2 {
+ t.Errorf("32767 %s 32767 = %d, want -2", "+", r)
+ }
+}
+func TestConstFoldint16sub(t *testing.T) {
+ var x, y, r int16
+ x = -32768
+ y = -32768
+ r = x - y
+ if r != 0 {
+ t.Errorf("-32768 %s -32768 = %d, want 0", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != -1 {
+ t.Errorf("-32768 %s -32767 = %d, want -1", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -32767 {
+ t.Errorf("-32768 %s -1 = %d, want -32767", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 32767 {
+ t.Errorf("-32768 %s 1 = %d, want 32767", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != 2 {
+ t.Errorf("-32768 %s 32766 = %d, want 2", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != 1 {
+ t.Errorf("-32768 %s 32767 = %d, want 1", "-", r)
+ }
+ x = -32767
+ y = -32768
+ r = x - y
+ if r != 1 {
+ t.Errorf("-32767 %s -32768 = %d, want 1", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != 0 {
+ t.Errorf("-32767 %s -32767 = %d, want 0", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -32766 {
+ t.Errorf("-32767 %s -1 = %d, want -32766", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -32768 {
+ t.Errorf("-32767 %s 1 = %d, want -32768", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != 3 {
+ t.Errorf("-32767 %s 32766 = %d, want 3", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != 2 {
+ t.Errorf("-32767 %s 32767 = %d, want 2", "-", r)
+ }
+ x = -1
+ y = -32768
+ r = x - y
+ if r != 32767 {
+ t.Errorf("-1 %s -32768 = %d, want 32767", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != 32766 {
+ t.Errorf("-1 %s -32767 = %d, want 32766", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != -32767 {
+ t.Errorf("-1 %s 32766 = %d, want -32767", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != -32768 {
+ t.Errorf("-1 %s 32767 = %d, want -32768", "-", r)
+ }
+ x = 0
+ y = -32768
+ r = x - y
+ if r != -32768 {
+ t.Errorf("0 %s -32768 = %d, want -32768", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != 32767 {
+ t.Errorf("0 %s -32767 = %d, want 32767", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s -1 = %d, want 1", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -1 {
+ t.Errorf("0 %s 1 = %d, want -1", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != -32766 {
+ t.Errorf("0 %s 32766 = %d, want -32766", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != -32767 {
+ t.Errorf("0 %s 32767 = %d, want -32767", "-", r)
+ }
+ x = 1
+ y = -32768
+ r = x - y
+ if r != -32767 {
+ t.Errorf("1 %s -32768 = %d, want -32767", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != -32768 {
+ t.Errorf("1 %s -32767 = %d, want -32768", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s -1 = %d, want 2", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != -32765 {
+ t.Errorf("1 %s 32766 = %d, want -32765", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != -32766 {
+ t.Errorf("1 %s 32767 = %d, want -32766", "-", r)
+ }
+ x = 32766
+ y = -32768
+ r = x - y
+ if r != -2 {
+ t.Errorf("32766 %s -32768 = %d, want -2", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != -3 {
+ t.Errorf("32766 %s -32767 = %d, want -3", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 32767 {
+ t.Errorf("32766 %s -1 = %d, want 32767", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 32765 {
+ t.Errorf("32766 %s 1 = %d, want 32765", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != 0 {
+ t.Errorf("32766 %s 32766 = %d, want 0", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != -1 {
+ t.Errorf("32766 %s 32767 = %d, want -1", "-", r)
+ }
+ x = 32767
+ y = -32768
+ r = x - y
+ if r != -1 {
+ t.Errorf("32767 %s -32768 = %d, want -1", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != -2 {
+ t.Errorf("32767 %s -32767 = %d, want -2", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -32768 {
+ t.Errorf("32767 %s -1 = %d, want -32768", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 32766 {
+ t.Errorf("32767 %s 1 = %d, want 32766", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != 1 {
+ t.Errorf("32767 %s 32766 = %d, want 1", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != 0 {
+ t.Errorf("32767 %s 32767 = %d, want 0", "-", r)
+ }
+}
+func TestConstFoldint16div(t *testing.T) {
+ var x, y, r int16
+ x = -32768
+ y = -32768
+ r = x / y
+ if r != 1 {
+ t.Errorf("-32768 %s -32768 = %d, want 1", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 1 {
+ t.Errorf("-32768 %s -32767 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -32768 {
+ t.Errorf("-32768 %s -1 = %d, want -32768", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -32768 {
+ t.Errorf("-32768 %s 1 = %d, want -32768", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != -1 {
+ t.Errorf("-32768 %s 32766 = %d, want -1", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != -1 {
+ t.Errorf("-32768 %s 32767 = %d, want -1", "/", r)
+ }
+ x = -32767
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("-32767 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 1 {
+ t.Errorf("-32767 %s -32767 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 32767 {
+ t.Errorf("-32767 %s -1 = %d, want 32767", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -32767 {
+ t.Errorf("-32767 %s 1 = %d, want -32767", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != -1 {
+ t.Errorf("-32767 %s 32766 = %d, want -1", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != -1 {
+ t.Errorf("-32767 %s 32767 = %d, want -1", "/", r)
+ }
+ x = -1
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -32767 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 32766 = %d, want 0", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 32767 = %d, want 0", "/", r)
+ }
+ x = 0
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -32767 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 32766 = %d, want 0", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 32767 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -32767 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 32766 = %d, want 0", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 32767 = %d, want 0", "/", r)
+ }
+ x = 32766
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("32766 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("32766 %s -32767 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -32766 {
+ t.Errorf("32766 %s -1 = %d, want -32766", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 32766 {
+ t.Errorf("32766 %s 1 = %d, want 32766", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != 1 {
+ t.Errorf("32766 %s 32766 = %d, want 1", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("32766 %s 32767 = %d, want 0", "/", r)
+ }
+ x = 32767
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("32767 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != -1 {
+ t.Errorf("32767 %s -32767 = %d, want -1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -32767 {
+ t.Errorf("32767 %s -1 = %d, want -32767", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 32767 {
+ t.Errorf("32767 %s 1 = %d, want 32767", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != 1 {
+ t.Errorf("32767 %s 32766 = %d, want 1", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != 1 {
+ t.Errorf("32767 %s 32767 = %d, want 1", "/", r)
+ }
+}
+func TestConstFoldint16mul(t *testing.T) {
+ var x, y, r int16
+ x = -32768
+ y = -32768
+ r = x * y
+ if r != 0 {
+ t.Errorf("-32768 %s -32768 = %d, want 0", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-32768 %s -32767 = %d, want -32768", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-32768 %s -1 = %d, want -32768", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-32768 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-32768 %s 1 = %d, want -32768", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != 0 {
+ t.Errorf("-32768 %s 32766 = %d, want 0", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-32768 %s 32767 = %d, want -32768", "*", r)
+ }
+ x = -32767
+ y = -32768
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-32767 %s -32768 = %d, want -32768", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != 1 {
+ t.Errorf("-32767 %s -32767 = %d, want 1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 32767 {
+ t.Errorf("-32767 %s -1 = %d, want 32767", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-32767 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -32767 {
+ t.Errorf("-32767 %s 1 = %d, want -32767", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != 32766 {
+ t.Errorf("-32767 %s 32766 = %d, want 32766", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != -1 {
+ t.Errorf("-32767 %s 32767 = %d, want -1", "*", r)
+ }
+ x = -1
+ y = -32768
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-1 %s -32768 = %d, want -32768", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != 32767 {
+ t.Errorf("-1 %s -32767 = %d, want 32767", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != -32766 {
+ t.Errorf("-1 %s 32766 = %d, want -32766", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != -32767 {
+ t.Errorf("-1 %s 32767 = %d, want -32767", "*", r)
+ }
+ x = 0
+ y = -32768
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -32768 = %d, want 0", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -32767 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 32766 = %d, want 0", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 32767 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = -32768
+ r = x * y
+ if r != -32768 {
+ t.Errorf("1 %s -32768 = %d, want -32768", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != -32767 {
+ t.Errorf("1 %s -32767 = %d, want -32767", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != 32766 {
+ t.Errorf("1 %s 32766 = %d, want 32766", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != 32767 {
+ t.Errorf("1 %s 32767 = %d, want 32767", "*", r)
+ }
+ x = 32766
+ y = -32768
+ r = x * y
+ if r != 0 {
+ t.Errorf("32766 %s -32768 = %d, want 0", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != 32766 {
+ t.Errorf("32766 %s -32767 = %d, want 32766", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -32766 {
+ t.Errorf("32766 %s -1 = %d, want -32766", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("32766 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 32766 {
+ t.Errorf("32766 %s 1 = %d, want 32766", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != 4 {
+ t.Errorf("32766 %s 32766 = %d, want 4", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != -32766 {
+ t.Errorf("32766 %s 32767 = %d, want -32766", "*", r)
+ }
+ x = 32767
+ y = -32768
+ r = x * y
+ if r != -32768 {
+ t.Errorf("32767 %s -32768 = %d, want -32768", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != -1 {
+ t.Errorf("32767 %s -32767 = %d, want -1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -32767 {
+ t.Errorf("32767 %s -1 = %d, want -32767", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("32767 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 32767 {
+ t.Errorf("32767 %s 1 = %d, want 32767", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != -32766 {
+ t.Errorf("32767 %s 32766 = %d, want -32766", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != 1 {
+ t.Errorf("32767 %s 32767 = %d, want 1", "*", r)
+ }
+}
+func TestConstFoldint16mod(t *testing.T) {
+ var x, y, r int16
+ x = -32768
+ y = -32768
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32768 %s -32768 = %d, want 0", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != -1 {
+ t.Errorf("-32768 %s -32767 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32768 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32768 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != -2 {
+ t.Errorf("-32768 %s 32766 = %d, want -2", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != -1 {
+ t.Errorf("-32768 %s 32767 = %d, want -1", "%", r)
+ }
+ x = -32767
+ y = -32768
+ r = x % y
+ if r != -32767 {
+ t.Errorf("-32767 %s -32768 = %d, want -32767", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32767 %s -32767 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32767 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32767 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != -1 {
+ t.Errorf("-32767 %s 32766 = %d, want -1", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32767 %s 32767 = %d, want 0", "%", r)
+ }
+ x = -1
+ y = -32768
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -32768 = %d, want -1", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -32767 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 32766 = %d, want -1", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 32767 = %d, want -1", "%", r)
+ }
+ x = 0
+ y = -32768
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -32768 = %d, want 0", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -32767 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 32766 = %d, want 0", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 32767 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = -32768
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -32768 = %d, want 1", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -32767 = %d, want 1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 32766 = %d, want 1", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 32767 = %d, want 1", "%", r)
+ }
+ x = 32766
+ y = -32768
+ r = x % y
+ if r != 32766 {
+ t.Errorf("32766 %s -32768 = %d, want 32766", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != 32766 {
+ t.Errorf("32766 %s -32767 = %d, want 32766", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("32766 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("32766 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != 0 {
+ t.Errorf("32766 %s 32766 = %d, want 0", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != 32766 {
+ t.Errorf("32766 %s 32767 = %d, want 32766", "%", r)
+ }
+ x = 32767
+ y = -32768
+ r = x % y
+ if r != 32767 {
+ t.Errorf("32767 %s -32768 = %d, want 32767", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("32767 %s -32767 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("32767 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("32767 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != 1 {
+ t.Errorf("32767 %s 32766 = %d, want 1", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("32767 %s 32767 = %d, want 0", "%", r)
+ }
+}
+func TestConstFolduint8add(t *testing.T) {
+ var x, y, r uint8
+ x = 0
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 255
+ r = x + y
+ if r != 255 {
+ t.Errorf("0 %s 255 = %d, want 255", "+", r)
+ }
+ x = 1
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 255
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "+", r)
+ }
+ x = 255
+ y = 0
+ r = x + y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("255 %s 1 = %d, want 0", "+", r)
+ }
+ y = 255
+ r = x + y
+ if r != 254 {
+ t.Errorf("255 %s 255 = %d, want 254", "+", r)
+ }
+}
+func TestConstFolduint8sub(t *testing.T) {
+ var x, y, r uint8
+ x = 0
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 255 {
+ t.Errorf("0 %s 1 = %d, want 255", "-", r)
+ }
+ y = 255
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s 255 = %d, want 1", "-", r)
+ }
+ x = 1
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 255
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s 255 = %d, want 2", "-", r)
+ }
+ x = 255
+ y = 0
+ r = x - y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 254 {
+ t.Errorf("255 %s 1 = %d, want 254", "-", r)
+ }
+ y = 255
+ r = x - y
+ if r != 0 {
+ t.Errorf("255 %s 255 = %d, want 0", "-", r)
+ }
+}
+func TestConstFolduint8div(t *testing.T) {
+ var x, y, r uint8
+ x = 0
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 255
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 255
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "/", r)
+ }
+ x = 255
+ y = 1
+ r = x / y
+ if r != 255 {
+ t.Errorf("255 %s 1 = %d, want 255", "/", r)
+ }
+ y = 255
+ r = x / y
+ if r != 1 {
+ t.Errorf("255 %s 255 = %d, want 1", "/", r)
+ }
+}
+func TestConstFolduint8mul(t *testing.T) {
+ var x, y, r uint8
+ x = 0
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 255
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 255
+ r = x * y
+ if r != 255 {
+ t.Errorf("1 %s 255 = %d, want 255", "*", r)
+ }
+ x = 255
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("255 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 255 {
+ t.Errorf("255 %s 1 = %d, want 255", "*", r)
+ }
+ y = 255
+ r = x * y
+ if r != 1 {
+ t.Errorf("255 %s 255 = %d, want 1", "*", r)
+ }
+}
+func TestConstFolduint8mod(t *testing.T) {
+ var x, y, r uint8
+ x = 0
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 255
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 255
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 255 = %d, want 1", "%", r)
+ }
+ x = 255
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("255 %s 1 = %d, want 0", "%", r)
+ }
+ y = 255
+ r = x % y
+ if r != 0 {
+ t.Errorf("255 %s 255 = %d, want 0", "%", r)
+ }
+}
+func TestConstFoldint8add(t *testing.T) {
+ var x, y, r int8
+ x = -128
+ y = -128
+ r = x + y
+ if r != 0 {
+ t.Errorf("-128 %s -128 = %d, want 0", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != 1 {
+ t.Errorf("-128 %s -127 = %d, want 1", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 127 {
+ t.Errorf("-128 %s -1 = %d, want 127", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -127 {
+ t.Errorf("-128 %s 1 = %d, want -127", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != -2 {
+ t.Errorf("-128 %s 126 = %d, want -2", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != -1 {
+ t.Errorf("-128 %s 127 = %d, want -1", "+", r)
+ }
+ x = -127
+ y = -128
+ r = x + y
+ if r != 1 {
+ t.Errorf("-127 %s -128 = %d, want 1", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != 2 {
+ t.Errorf("-127 %s -127 = %d, want 2", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -128 {
+ t.Errorf("-127 %s -1 = %d, want -128", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -126 {
+ t.Errorf("-127 %s 1 = %d, want -126", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != -1 {
+ t.Errorf("-127 %s 126 = %d, want -1", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != 0 {
+ t.Errorf("-127 %s 127 = %d, want 0", "+", r)
+ }
+ x = -1
+ y = -128
+ r = x + y
+ if r != 127 {
+ t.Errorf("-1 %s -128 = %d, want 127", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != -128 {
+ t.Errorf("-1 %s -127 = %d, want -128", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -2 {
+ t.Errorf("-1 %s -1 = %d, want -2", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != 125 {
+ t.Errorf("-1 %s 126 = %d, want 125", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != 126 {
+ t.Errorf("-1 %s 127 = %d, want 126", "+", r)
+ }
+ x = 0
+ y = -128
+ r = x + y
+ if r != -128 {
+ t.Errorf("0 %s -128 = %d, want -128", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != -127 {
+ t.Errorf("0 %s -127 = %d, want -127", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -1 {
+ t.Errorf("0 %s -1 = %d, want -1", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != 126 {
+ t.Errorf("0 %s 126 = %d, want 126", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != 127 {
+ t.Errorf("0 %s 127 = %d, want 127", "+", r)
+ }
+ x = 1
+ y = -128
+ r = x + y
+ if r != -127 {
+ t.Errorf("1 %s -128 = %d, want -127", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != -126 {
+ t.Errorf("1 %s -127 = %d, want -126", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != 127 {
+ t.Errorf("1 %s 126 = %d, want 127", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != -128 {
+ t.Errorf("1 %s 127 = %d, want -128", "+", r)
+ }
+ x = 126
+ y = -128
+ r = x + y
+ if r != -2 {
+ t.Errorf("126 %s -128 = %d, want -2", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != -1 {
+ t.Errorf("126 %s -127 = %d, want -1", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 125 {
+ t.Errorf("126 %s -1 = %d, want 125", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 127 {
+ t.Errorf("126 %s 1 = %d, want 127", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != -4 {
+ t.Errorf("126 %s 126 = %d, want -4", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != -3 {
+ t.Errorf("126 %s 127 = %d, want -3", "+", r)
+ }
+ x = 127
+ y = -128
+ r = x + y
+ if r != -1 {
+ t.Errorf("127 %s -128 = %d, want -1", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != 0 {
+ t.Errorf("127 %s -127 = %d, want 0", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 126 {
+ t.Errorf("127 %s -1 = %d, want 126", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -128 {
+ t.Errorf("127 %s 1 = %d, want -128", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != -3 {
+ t.Errorf("127 %s 126 = %d, want -3", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != -2 {
+ t.Errorf("127 %s 127 = %d, want -2", "+", r)
+ }
+}
+func TestConstFoldint8sub(t *testing.T) {
+ var x, y, r int8
+ x = -128
+ y = -128
+ r = x - y
+ if r != 0 {
+ t.Errorf("-128 %s -128 = %d, want 0", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != -1 {
+ t.Errorf("-128 %s -127 = %d, want -1", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -127 {
+ t.Errorf("-128 %s -1 = %d, want -127", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 127 {
+ t.Errorf("-128 %s 1 = %d, want 127", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != 2 {
+ t.Errorf("-128 %s 126 = %d, want 2", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != 1 {
+ t.Errorf("-128 %s 127 = %d, want 1", "-", r)
+ }
+ x = -127
+ y = -128
+ r = x - y
+ if r != 1 {
+ t.Errorf("-127 %s -128 = %d, want 1", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != 0 {
+ t.Errorf("-127 %s -127 = %d, want 0", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -126 {
+ t.Errorf("-127 %s -1 = %d, want -126", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -128 {
+ t.Errorf("-127 %s 1 = %d, want -128", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != 3 {
+ t.Errorf("-127 %s 126 = %d, want 3", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != 2 {
+ t.Errorf("-127 %s 127 = %d, want 2", "-", r)
+ }
+ x = -1
+ y = -128
+ r = x - y
+ if r != 127 {
+ t.Errorf("-1 %s -128 = %d, want 127", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != 126 {
+ t.Errorf("-1 %s -127 = %d, want 126", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != -127 {
+ t.Errorf("-1 %s 126 = %d, want -127", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != -128 {
+ t.Errorf("-1 %s 127 = %d, want -128", "-", r)
+ }
+ x = 0
+ y = -128
+ r = x - y
+ if r != -128 {
+ t.Errorf("0 %s -128 = %d, want -128", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != 127 {
+ t.Errorf("0 %s -127 = %d, want 127", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s -1 = %d, want 1", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -1 {
+ t.Errorf("0 %s 1 = %d, want -1", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != -126 {
+ t.Errorf("0 %s 126 = %d, want -126", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != -127 {
+ t.Errorf("0 %s 127 = %d, want -127", "-", r)
+ }
+ x = 1
+ y = -128
+ r = x - y
+ if r != -127 {
+ t.Errorf("1 %s -128 = %d, want -127", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != -128 {
+ t.Errorf("1 %s -127 = %d, want -128", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s -1 = %d, want 2", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != -125 {
+ t.Errorf("1 %s 126 = %d, want -125", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != -126 {
+ t.Errorf("1 %s 127 = %d, want -126", "-", r)
+ }
+ x = 126
+ y = -128
+ r = x - y
+ if r != -2 {
+ t.Errorf("126 %s -128 = %d, want -2", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != -3 {
+ t.Errorf("126 %s -127 = %d, want -3", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 127 {
+ t.Errorf("126 %s -1 = %d, want 127", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 125 {
+ t.Errorf("126 %s 1 = %d, want 125", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != 0 {
+ t.Errorf("126 %s 126 = %d, want 0", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != -1 {
+ t.Errorf("126 %s 127 = %d, want -1", "-", r)
+ }
+ x = 127
+ y = -128
+ r = x - y
+ if r != -1 {
+ t.Errorf("127 %s -128 = %d, want -1", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != -2 {
+ t.Errorf("127 %s -127 = %d, want -2", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -128 {
+ t.Errorf("127 %s -1 = %d, want -128", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 126 {
+ t.Errorf("127 %s 1 = %d, want 126", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != 1 {
+ t.Errorf("127 %s 126 = %d, want 1", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != 0 {
+ t.Errorf("127 %s 127 = %d, want 0", "-", r)
+ }
+}
+func TestConstFoldint8div(t *testing.T) {
+ var x, y, r int8
+ x = -128
+ y = -128
+ r = x / y
+ if r != 1 {
+ t.Errorf("-128 %s -128 = %d, want 1", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 1 {
+ t.Errorf("-128 %s -127 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -128 {
+ t.Errorf("-128 %s -1 = %d, want -128", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -128 {
+ t.Errorf("-128 %s 1 = %d, want -128", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != -1 {
+ t.Errorf("-128 %s 126 = %d, want -1", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != -1 {
+ t.Errorf("-128 %s 127 = %d, want -1", "/", r)
+ }
+ x = -127
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("-127 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 1 {
+ t.Errorf("-127 %s -127 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 127 {
+ t.Errorf("-127 %s -1 = %d, want 127", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -127 {
+ t.Errorf("-127 %s 1 = %d, want -127", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != -1 {
+ t.Errorf("-127 %s 126 = %d, want -1", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != -1 {
+ t.Errorf("-127 %s 127 = %d, want -1", "/", r)
+ }
+ x = -1
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -127 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 126 = %d, want 0", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 127 = %d, want 0", "/", r)
+ }
+ x = 0
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -127 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 126 = %d, want 0", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 127 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -127 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 126 = %d, want 0", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 127 = %d, want 0", "/", r)
+ }
+ x = 126
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("126 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 0 {
+ t.Errorf("126 %s -127 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -126 {
+ t.Errorf("126 %s -1 = %d, want -126", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 126 {
+ t.Errorf("126 %s 1 = %d, want 126", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != 1 {
+ t.Errorf("126 %s 126 = %d, want 1", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != 0 {
+ t.Errorf("126 %s 127 = %d, want 0", "/", r)
+ }
+ x = 127
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("127 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != -1 {
+ t.Errorf("127 %s -127 = %d, want -1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -127 {
+ t.Errorf("127 %s -1 = %d, want -127", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 127 {
+ t.Errorf("127 %s 1 = %d, want 127", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != 1 {
+ t.Errorf("127 %s 126 = %d, want 1", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != 1 {
+ t.Errorf("127 %s 127 = %d, want 1", "/", r)
+ }
+}
+func TestConstFoldint8mul(t *testing.T) {
+ var x, y, r int8
+ x = -128
+ y = -128
+ r = x * y
+ if r != 0 {
+ t.Errorf("-128 %s -128 = %d, want 0", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != -128 {
+ t.Errorf("-128 %s -127 = %d, want -128", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -128 {
+ t.Errorf("-128 %s -1 = %d, want -128", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-128 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -128 {
+ t.Errorf("-128 %s 1 = %d, want -128", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != 0 {
+ t.Errorf("-128 %s 126 = %d, want 0", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != -128 {
+ t.Errorf("-128 %s 127 = %d, want -128", "*", r)
+ }
+ x = -127
+ y = -128
+ r = x * y
+ if r != -128 {
+ t.Errorf("-127 %s -128 = %d, want -128", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != 1 {
+ t.Errorf("-127 %s -127 = %d, want 1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 127 {
+ t.Errorf("-127 %s -1 = %d, want 127", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-127 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -127 {
+ t.Errorf("-127 %s 1 = %d, want -127", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != 126 {
+ t.Errorf("-127 %s 126 = %d, want 126", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != -1 {
+ t.Errorf("-127 %s 127 = %d, want -1", "*", r)
+ }
+ x = -1
+ y = -128
+ r = x * y
+ if r != -128 {
+ t.Errorf("-1 %s -128 = %d, want -128", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != 127 {
+ t.Errorf("-1 %s -127 = %d, want 127", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != -126 {
+ t.Errorf("-1 %s 126 = %d, want -126", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != -127 {
+ t.Errorf("-1 %s 127 = %d, want -127", "*", r)
+ }
+ x = 0
+ y = -128
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -128 = %d, want 0", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -127 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 126 = %d, want 0", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 127 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = -128
+ r = x * y
+ if r != -128 {
+ t.Errorf("1 %s -128 = %d, want -128", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != -127 {
+ t.Errorf("1 %s -127 = %d, want -127", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != 126 {
+ t.Errorf("1 %s 126 = %d, want 126", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != 127 {
+ t.Errorf("1 %s 127 = %d, want 127", "*", r)
+ }
+ x = 126
+ y = -128
+ r = x * y
+ if r != 0 {
+ t.Errorf("126 %s -128 = %d, want 0", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != 126 {
+ t.Errorf("126 %s -127 = %d, want 126", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -126 {
+ t.Errorf("126 %s -1 = %d, want -126", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("126 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 126 {
+ t.Errorf("126 %s 1 = %d, want 126", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != 4 {
+ t.Errorf("126 %s 126 = %d, want 4", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != -126 {
+ t.Errorf("126 %s 127 = %d, want -126", "*", r)
+ }
+ x = 127
+ y = -128
+ r = x * y
+ if r != -128 {
+ t.Errorf("127 %s -128 = %d, want -128", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != -1 {
+ t.Errorf("127 %s -127 = %d, want -1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -127 {
+ t.Errorf("127 %s -1 = %d, want -127", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("127 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 127 {
+ t.Errorf("127 %s 1 = %d, want 127", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != -126 {
+ t.Errorf("127 %s 126 = %d, want -126", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != 1 {
+ t.Errorf("127 %s 127 = %d, want 1", "*", r)
+ }
+}
+func TestConstFoldint8mod(t *testing.T) {
+ var x, y, r int8
+ x = -128
+ y = -128
+ r = x % y
+ if r != 0 {
+ t.Errorf("-128 %s -128 = %d, want 0", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != -1 {
+ t.Errorf("-128 %s -127 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-128 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-128 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != -2 {
+ t.Errorf("-128 %s 126 = %d, want -2", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != -1 {
+ t.Errorf("-128 %s 127 = %d, want -1", "%", r)
+ }
+ x = -127
+ y = -128
+ r = x % y
+ if r != -127 {
+ t.Errorf("-127 %s -128 = %d, want -127", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != 0 {
+ t.Errorf("-127 %s -127 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-127 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-127 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != -1 {
+ t.Errorf("-127 %s 126 = %d, want -1", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != 0 {
+ t.Errorf("-127 %s 127 = %d, want 0", "%", r)
+ }
+ x = -1
+ y = -128
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -128 = %d, want -1", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -127 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 126 = %d, want -1", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 127 = %d, want -1", "%", r)
+ }
+ x = 0
+ y = -128
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -128 = %d, want 0", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -127 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 126 = %d, want 0", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 127 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = -128
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -128 = %d, want 1", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -127 = %d, want 1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 126 = %d, want 1", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 127 = %d, want 1", "%", r)
+ }
+ x = 126
+ y = -128
+ r = x % y
+ if r != 126 {
+ t.Errorf("126 %s -128 = %d, want 126", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != 126 {
+ t.Errorf("126 %s -127 = %d, want 126", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("126 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("126 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != 0 {
+ t.Errorf("126 %s 126 = %d, want 0", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != 126 {
+ t.Errorf("126 %s 127 = %d, want 126", "%", r)
+ }
+ x = 127
+ y = -128
+ r = x % y
+ if r != 127 {
+ t.Errorf("127 %s -128 = %d, want 127", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != 0 {
+ t.Errorf("127 %s -127 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("127 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("127 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != 1 {
+ t.Errorf("127 %s 126 = %d, want 1", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != 0 {
+ t.Errorf("127 %s 127 = %d, want 0", "%", r)
+ }
+}
+func TestConstFolduint64uint64lsh(t *testing.T) {
+ var x, r uint64
+ var y uint64
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x << y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint64uint64rsh(t *testing.T) {
+ var x, r uint64
+ var y uint64
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x >> y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint64uint32lsh(t *testing.T) {
+ var x, r uint64
+ var y uint32
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x << y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint64uint32rsh(t *testing.T) {
+ var x, r uint64
+ var y uint32
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x >> y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint64uint16lsh(t *testing.T) {
+ var x, r uint64
+ var y uint16
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x << y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint64uint16rsh(t *testing.T) {
+ var x, r uint64
+ var y uint16
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x >> y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint64uint8lsh(t *testing.T) {
+ var x, r uint64
+ var y uint8
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x << y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint64uint8rsh(t *testing.T) {
+ var x, r uint64
+ var y uint8
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x >> y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint64uint64lsh(t *testing.T) {
+ var x, r int64
+ var y uint64
+ x = -9223372036854775808
+ y = 0
+ r = x << y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x << y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x << y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x << y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x << y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint64uint64rsh(t *testing.T) {
+ var x, r int64
+ var y uint64
+ x = -9223372036854775808
+ y = 0
+ r = x >> y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x >> y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x >> y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x >> y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint64uint32lsh(t *testing.T) {
+ var x, r int64
+ var y uint32
+ x = -9223372036854775808
+ y = 0
+ r = x << y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x << y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x << y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x << y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x << y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint64uint32rsh(t *testing.T) {
+ var x, r int64
+ var y uint32
+ x = -9223372036854775808
+ y = 0
+ r = x >> y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x >> y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x >> y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x >> y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint64uint16lsh(t *testing.T) {
+ var x, r int64
+ var y uint16
+ x = -9223372036854775808
+ y = 0
+ r = x << y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x << y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x << y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x << y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x << y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint64uint16rsh(t *testing.T) {
+ var x, r int64
+ var y uint16
+ x = -9223372036854775808
+ y = 0
+ r = x >> y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x >> y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x >> y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x >> y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint64uint8lsh(t *testing.T) {
+ var x, r int64
+ var y uint8
+ x = -9223372036854775808
+ y = 0
+ r = x << y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x << y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x << y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x << y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x << y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint64uint8rsh(t *testing.T) {
+ var x, r int64
+ var y uint8
+ x = -9223372036854775808
+ y = 0
+ r = x >> y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x >> y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x >> y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 255 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x >> y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint32uint64lsh(t *testing.T) {
+ var x, r uint32
+ var y uint64
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x << y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967295 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint32uint64rsh(t *testing.T) {
+ var x, r uint32
+ var y uint64
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x >> y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967295 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint32uint32lsh(t *testing.T) {
+ var x, r uint32
+ var y uint32
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x << y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint32uint32rsh(t *testing.T) {
+ var x, r uint32
+ var y uint32
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x >> y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint32uint16lsh(t *testing.T) {
+ var x, r uint32
+ var y uint16
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x << y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967295 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint32uint16rsh(t *testing.T) {
+ var x, r uint32
+ var y uint16
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x >> y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967295 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint32uint8lsh(t *testing.T) {
+ var x, r uint32
+ var y uint8
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x << y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967295 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint32uint8rsh(t *testing.T) {
+ var x, r uint32
+ var y uint8
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x >> y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967295 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint32uint64lsh(t *testing.T) {
+ var x, r int32
+ var y uint64
+ x = -2147483648
+ y = 0
+ r = x << y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x << y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x << y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("2147483647 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("2147483647 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint32uint64rsh(t *testing.T) {
+ var x, r int32
+ var y uint64
+ x = -2147483648
+ y = 0
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x >> y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 1073741823 {
+ t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("2147483647 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("2147483647 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint32uint32lsh(t *testing.T) {
+ var x, r int32
+ var y uint32
+ x = -2147483648
+ y = 0
+ r = x << y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x << y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x << y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("2147483647 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint32uint32rsh(t *testing.T) {
+ var x, r int32
+ var y uint32
+ x = -2147483648
+ y = 0
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x >> y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 1073741823 {
+ t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("2147483647 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint32uint16lsh(t *testing.T) {
+ var x, r int32
+ var y uint16
+ x = -2147483648
+ y = 0
+ r = x << y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x << y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x << y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("2147483647 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint32uint16rsh(t *testing.T) {
+ var x, r int32
+ var y uint16
+ x = -2147483648
+ y = 0
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x >> y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 1073741823 {
+ t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("2147483647 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint32uint8lsh(t *testing.T) {
+ var x, r int32
+ var y uint8
+ x = -2147483648
+ y = 0
+ r = x << y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x << y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x << y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("2147483647 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint32uint8rsh(t *testing.T) {
+ var x, r int32
+ var y uint8
+ x = -2147483648
+ y = 0
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x >> y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 255 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 1073741823 {
+ t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("2147483647 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint16uint64lsh(t *testing.T) {
+ var x, r uint16
+ var y uint64
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 65535
+ y = 0
+ r = x << y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 65534 {
+ t.Errorf("65535 %s 1 = %d, want 65534", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("65535 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("65535 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint16uint64rsh(t *testing.T) {
+ var x, r uint16
+ var y uint64
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 65535
+ y = 0
+ r = x >> y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("65535 %s 1 = %d, want 32767", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("65535 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("65535 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint16uint32lsh(t *testing.T) {
+ var x, r uint16
+ var y uint32
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 65535
+ y = 0
+ r = x << y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 65534 {
+ t.Errorf("65535 %s 1 = %d, want 65534", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("65535 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint16uint32rsh(t *testing.T) {
+ var x, r uint16
+ var y uint32
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 65535
+ y = 0
+ r = x >> y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("65535 %s 1 = %d, want 32767", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("65535 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint16uint16lsh(t *testing.T) {
+ var x, r uint16
+ var y uint16
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 65535
+ y = 0
+ r = x << y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 65534 {
+ t.Errorf("65535 %s 1 = %d, want 65534", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("65535 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint16uint16rsh(t *testing.T) {
+ var x, r uint16
+ var y uint16
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 65535
+ y = 0
+ r = x >> y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("65535 %s 1 = %d, want 32767", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("65535 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint16uint8lsh(t *testing.T) {
+ var x, r uint16
+ var y uint8
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 65535
+ y = 0
+ r = x << y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 65534 {
+ t.Errorf("65535 %s 1 = %d, want 65534", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("65535 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint16uint8rsh(t *testing.T) {
+ var x, r uint16
+ var y uint8
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 65535
+ y = 0
+ r = x >> y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("65535 %s 1 = %d, want 32767", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("65535 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint16uint64lsh(t *testing.T) {
+ var x, r int16
+ var y uint64
+ x = -32768
+ y = 0
+ r = x << y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -32767
+ y = 0
+ r = x << y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-32767 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32767 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32767 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 32766
+ y = 0
+ r = x << y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("32766 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("32766 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("32766 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 32767
+ y = 0
+ r = x << y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("32767 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("32767 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("32767 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint16uint64rsh(t *testing.T) {
+ var x, r int16
+ var y uint64
+ x = -32768
+ y = 0
+ r = x >> y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32768 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32768 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -32767
+ y = 0
+ r = x >> y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32767 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32767 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 32766
+ y = 0
+ r = x >> y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32766 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32766 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32766 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 32767
+ y = 0
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32767 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32767 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32767 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint16uint32lsh(t *testing.T) {
+ var x, r int16
+ var y uint32
+ x = -32768
+ y = 0
+ r = x << y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -32767
+ y = 0
+ r = x << y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-32767 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32767 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 32766
+ y = 0
+ r = x << y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("32766 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("32766 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 32767
+ y = 0
+ r = x << y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("32767 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("32767 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint16uint32rsh(t *testing.T) {
+ var x, r int16
+ var y uint32
+ x = -32768
+ y = 0
+ r = x >> y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32768 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -32767
+ y = 0
+ r = x >> y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32767 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 32766
+ y = 0
+ r = x >> y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32766 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32766 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 32767
+ y = 0
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32767 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32767 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint16uint16lsh(t *testing.T) {
+ var x, r int16
+ var y uint16
+ x = -32768
+ y = 0
+ r = x << y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -32767
+ y = 0
+ r = x << y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-32767 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32767 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 32766
+ y = 0
+ r = x << y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("32766 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("32766 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 32767
+ y = 0
+ r = x << y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("32767 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("32767 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint16uint16rsh(t *testing.T) {
+ var x, r int16
+ var y uint16
+ x = -32768
+ y = 0
+ r = x >> y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32768 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -32767
+ y = 0
+ r = x >> y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32767 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 32766
+ y = 0
+ r = x >> y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32766 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32766 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 32767
+ y = 0
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32767 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32767 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint16uint8lsh(t *testing.T) {
+ var x, r int16
+ var y uint8
+ x = -32768
+ y = 0
+ r = x << y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -32767
+ y = 0
+ r = x << y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-32767 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32767 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 32766
+ y = 0
+ r = x << y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("32766 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("32766 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 32767
+ y = 0
+ r = x << y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("32767 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("32767 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint16uint8rsh(t *testing.T) {
+ var x, r int16
+ var y uint8
+ x = -32768
+ y = 0
+ r = x >> y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32768 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -32767
+ y = 0
+ r = x >> y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32767 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 255 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 32766
+ y = 0
+ r = x >> y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32766 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32766 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 32767
+ y = 0
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32767 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32767 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint8uint64lsh(t *testing.T) {
+ var x, r uint8
+ var y uint64
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 255
+ y = 0
+ r = x << y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 254 {
+ t.Errorf("255 %s 1 = %d, want 254", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("255 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("255 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint8uint64rsh(t *testing.T) {
+ var x, r uint8
+ var y uint64
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 255
+ y = 0
+ r = x >> y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 127 {
+ t.Errorf("255 %s 1 = %d, want 127", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("255 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("255 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint8uint32lsh(t *testing.T) {
+ var x, r uint8
+ var y uint32
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 255
+ y = 0
+ r = x << y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 254 {
+ t.Errorf("255 %s 1 = %d, want 254", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("255 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint8uint32rsh(t *testing.T) {
+ var x, r uint8
+ var y uint32
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 255
+ y = 0
+ r = x >> y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 127 {
+ t.Errorf("255 %s 1 = %d, want 127", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("255 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint8uint16lsh(t *testing.T) {
+ var x, r uint8
+ var y uint16
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 255
+ y = 0
+ r = x << y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 254 {
+ t.Errorf("255 %s 1 = %d, want 254", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("255 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint8uint16rsh(t *testing.T) {
+ var x, r uint8
+ var y uint16
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 255
+ y = 0
+ r = x >> y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 127 {
+ t.Errorf("255 %s 1 = %d, want 127", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("255 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint8uint8lsh(t *testing.T) {
+ var x, r uint8
+ var y uint8
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 255
+ y = 0
+ r = x << y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 254 {
+ t.Errorf("255 %s 1 = %d, want 254", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("255 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint8uint8rsh(t *testing.T) {
+ var x, r uint8
+ var y uint8
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 255
+ y = 0
+ r = x >> y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 127 {
+ t.Errorf("255 %s 1 = %d, want 127", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("255 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint8uint64lsh(t *testing.T) {
+ var x, r int8
+ var y uint64
+ x = -128
+ y = 0
+ r = x << y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -127
+ y = 0
+ r = x << y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-127 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-127 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-127 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 126
+ y = 0
+ r = x << y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("126 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("126 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("126 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 127
+ y = 0
+ r = x << y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("127 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("127 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("127 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint8uint64rsh(t *testing.T) {
+ var x, r int8
+ var y uint64
+ x = -128
+ y = 0
+ r = x >> y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-128 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-128 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-128 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -127
+ y = 0
+ r = x >> y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-127 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-127 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-127 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 126
+ y = 0
+ r = x >> y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("126 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("126 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("126 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 127
+ y = 0
+ r = x >> y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("127 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("127 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("127 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint8uint32lsh(t *testing.T) {
+ var x, r int8
+ var y uint32
+ x = -128
+ y = 0
+ r = x << y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -127
+ y = 0
+ r = x << y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-127 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-127 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 126
+ y = 0
+ r = x << y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("126 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("126 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 127
+ y = 0
+ r = x << y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("127 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("127 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint8uint32rsh(t *testing.T) {
+ var x, r int8
+ var y uint32
+ x = -128
+ y = 0
+ r = x >> y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-128 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-128 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -127
+ y = 0
+ r = x >> y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-127 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-127 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 126
+ y = 0
+ r = x >> y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("126 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("126 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 127
+ y = 0
+ r = x >> y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("127 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("127 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint8uint16lsh(t *testing.T) {
+ var x, r int8
+ var y uint16
+ x = -128
+ y = 0
+ r = x << y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -127
+ y = 0
+ r = x << y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-127 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-127 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 126
+ y = 0
+ r = x << y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("126 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("126 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 127
+ y = 0
+ r = x << y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("127 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("127 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint8uint16rsh(t *testing.T) {
+ var x, r int8
+ var y uint16
+ x = -128
+ y = 0
+ r = x >> y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-128 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-128 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -127
+ y = 0
+ r = x >> y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-127 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-127 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 126
+ y = 0
+ r = x >> y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("126 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("126 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 127
+ y = 0
+ r = x >> y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("127 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("127 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint8uint8lsh(t *testing.T) {
+ var x, r int8
+ var y uint8
+ x = -128
+ y = 0
+ r = x << y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -127
+ y = 0
+ r = x << y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-127 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-127 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 126
+ y = 0
+ r = x << y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("126 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("126 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 127
+ y = 0
+ r = x << y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("127 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("127 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint8uint8rsh(t *testing.T) {
+ var x, r int8
+ var y uint8
+ x = -128
+ y = 0
+ r = x >> y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-128 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-128 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -127
+ y = 0
+ r = x >> y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-127 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-127 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 255 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 126
+ y = 0
+ r = x >> y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("126 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("126 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 127
+ y = 0
+ r = x >> y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("127 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("127 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldCompareuint64(t *testing.T) {
+ {
+ var x uint64 = 0
+ var y uint64 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 0
+ var y uint64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 0
+ var y uint64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 0
+ var y uint64 = 18446744073709551615
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 1
+ var y uint64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 1
+ var y uint64 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 1
+ var y uint64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 1
+ var y uint64 = 18446744073709551615
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 4294967296
+ var y uint64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 4294967296
+ var y uint64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 4294967296
+ var y uint64 = 4294967296
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 4294967296
+ var y uint64 = 18446744073709551615
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 18446744073709551615
+ var y uint64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 18446744073709551615
+ var y uint64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 18446744073709551615
+ var y uint64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 18446744073709551615
+ var y uint64 = 18446744073709551615
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareint64(t *testing.T) {
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = -9223372036854775808
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = -9223372036854775807
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = -4294967296
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = -1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = 4294967296
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = 9223372036854775806
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = 9223372036854775807
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareuint32(t *testing.T) {
+ {
+ var x uint32 = 0
+ var y uint32 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint32 = 0
+ var y uint32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint32 = 0
+ var y uint32 = 4294967295
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint32 = 1
+ var y uint32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint32 = 1
+ var y uint32 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint32 = 1
+ var y uint32 = 4294967295
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint32 = 4294967295
+ var y uint32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint32 = 4294967295
+ var y uint32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint32 = 4294967295
+ var y uint32 = 4294967295
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareint32(t *testing.T) {
+ {
+ var x int32 = -2147483648
+ var y int32 = -2147483648
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483648
+ var y int32 = -2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483648
+ var y int32 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483648
+ var y int32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483648
+ var y int32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483648
+ var y int32 = 2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = -2147483648
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = -2147483647
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = 2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = -2147483648
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = -2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = -1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = 2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = -2147483648
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = -2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = 2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = -2147483648
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = -2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = 2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = -2147483648
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = -2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = 2147483647
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareuint16(t *testing.T) {
+ {
+ var x uint16 = 0
+ var y uint16 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint16 = 0
+ var y uint16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint16 = 0
+ var y uint16 = 65535
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint16 = 1
+ var y uint16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint16 = 1
+ var y uint16 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint16 = 1
+ var y uint16 = 65535
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint16 = 65535
+ var y uint16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint16 = 65535
+ var y uint16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint16 = 65535
+ var y uint16 = 65535
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareint16(t *testing.T) {
+ {
+ var x int16 = -32768
+ var y int16 = -32768
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = -32767
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = -1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = 32766
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = 32767
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareuint8(t *testing.T) {
+ {
+ var x uint8 = 0
+ var y uint8 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint8 = 0
+ var y uint8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint8 = 0
+ var y uint8 = 255
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint8 = 1
+ var y uint8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint8 = 1
+ var y uint8 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint8 = 1
+ var y uint8 = 255
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint8 = 255
+ var y uint8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint8 = 255
+ var y uint8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint8 = 255
+ var y uint8 = 255
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareint8(t *testing.T) {
+ {
+ var x int8 = -128
+ var y int8 = -128
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = -127
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = -1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = 126
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = 127
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/dep_test.go b/src/cmd/compile/internal/test/dep_test.go
new file mode 100644
index 0000000..d141f10
--- /dev/null
+++ b/src/cmd/compile/internal/test/dep_test.go
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "internal/testenv"
+ "strings"
+ "testing"
+)
+
+func TestDeps(t *testing.T) {
+ out, err := testenv.Command(t, testenv.GoToolPath(t), "list", "-f", "{{.Deps}}", "cmd/compile/internal/gc").Output()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, dep := range strings.Fields(strings.Trim(string(out), "[]")) {
+ switch dep {
+ case "go/build", "go/scanner":
+ // cmd/compile/internal/importer introduces a dependency
+ // on go/build and go/token; cmd/compile/internal/ uses
+ // go/constant which uses go/token in its API. Once we
+ // got rid of those dependencies, enable this check again.
+ // TODO(gri) fix this
+ // t.Errorf("undesired dependency on %q", dep)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/divconst_test.go b/src/cmd/compile/internal/test/divconst_test.go
new file mode 100644
index 0000000..9358a60
--- /dev/null
+++ b/src/cmd/compile/internal/test/divconst_test.go
@@ -0,0 +1,325 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "testing"
+)
+
+var boolres bool
+
+var i64res int64
+
+func BenchmarkDivconstI64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i64res = int64(i) / 7
+ }
+}
+
+func BenchmarkModconstI64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i64res = int64(i) % 7
+ }
+}
+
+func BenchmarkDivisiblePow2constI64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int64(i)%16 == 0
+ }
+}
+func BenchmarkDivisibleconstI64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int64(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstI64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i64res = int64(i) / 7
+ boolres = int64(i)%7 == 0
+ }
+}
+
+var u64res uint64
+
+func TestDivmodConstU64(t *testing.T) {
+ // Test division by c. Function f must be func(n) { return n/c, n%c }
+ testdiv := func(c uint64, f func(uint64) (uint64, uint64)) func(*testing.T) {
+ return func(t *testing.T) {
+ x := uint64(12345)
+ for i := 0; i < 10000; i++ {
+ x += x << 2
+ q, r := f(x)
+ if r < 0 || r >= c || q*c+r != x {
+ t.Errorf("divmod(%d, %d) returned incorrect (%d, %d)", x, c, q, r)
+ }
+ }
+ max := uint64(1<<64-1) / c * c
+ xs := []uint64{0, 1, c - 1, c, c + 1, 2*c - 1, 2 * c, 2*c + 1,
+ c*c - 1, c * c, c*c + 1, max - 1, max, max + 1, 1<<64 - 1}
+ for _, x := range xs {
+ q, r := f(x)
+ if r < 0 || r >= c || q*c+r != x {
+ t.Errorf("divmod(%d, %d) returned incorrect (%d, %d)", x, c, q, r)
+ }
+ }
+ }
+ }
+ t.Run("2", testdiv(2, func(n uint64) (uint64, uint64) { return n / 2, n % 2 }))
+ t.Run("3", testdiv(3, func(n uint64) (uint64, uint64) { return n / 3, n % 3 }))
+ t.Run("4", testdiv(4, func(n uint64) (uint64, uint64) { return n / 4, n % 4 }))
+ t.Run("5", testdiv(5, func(n uint64) (uint64, uint64) { return n / 5, n % 5 }))
+ t.Run("6", testdiv(6, func(n uint64) (uint64, uint64) { return n / 6, n % 6 }))
+ t.Run("7", testdiv(7, func(n uint64) (uint64, uint64) { return n / 7, n % 7 }))
+ t.Run("8", testdiv(8, func(n uint64) (uint64, uint64) { return n / 8, n % 8 }))
+ t.Run("9", testdiv(9, func(n uint64) (uint64, uint64) { return n / 9, n % 9 }))
+ t.Run("10", testdiv(10, func(n uint64) (uint64, uint64) { return n / 10, n % 10 }))
+ t.Run("11", testdiv(11, func(n uint64) (uint64, uint64) { return n / 11, n % 11 }))
+ t.Run("12", testdiv(12, func(n uint64) (uint64, uint64) { return n / 12, n % 12 }))
+ t.Run("13", testdiv(13, func(n uint64) (uint64, uint64) { return n / 13, n % 13 }))
+ t.Run("14", testdiv(14, func(n uint64) (uint64, uint64) { return n / 14, n % 14 }))
+ t.Run("15", testdiv(15, func(n uint64) (uint64, uint64) { return n / 15, n % 15 }))
+ t.Run("16", testdiv(16, func(n uint64) (uint64, uint64) { return n / 16, n % 16 }))
+ t.Run("17", testdiv(17, func(n uint64) (uint64, uint64) { return n / 17, n % 17 }))
+ t.Run("255", testdiv(255, func(n uint64) (uint64, uint64) { return n / 255, n % 255 }))
+ t.Run("256", testdiv(256, func(n uint64) (uint64, uint64) { return n / 256, n % 256 }))
+ t.Run("257", testdiv(257, func(n uint64) (uint64, uint64) { return n / 257, n % 257 }))
+ t.Run("65535", testdiv(65535, func(n uint64) (uint64, uint64) { return n / 65535, n % 65535 }))
+ t.Run("65536", testdiv(65536, func(n uint64) (uint64, uint64) { return n / 65536, n % 65536 }))
+ t.Run("65537", testdiv(65537, func(n uint64) (uint64, uint64) { return n / 65537, n % 65537 }))
+ t.Run("1<<32-1", testdiv(1<<32-1, func(n uint64) (uint64, uint64) { return n / (1<<32 - 1), n % (1<<32 - 1) }))
+ t.Run("1<<32+1", testdiv(1<<32+1, func(n uint64) (uint64, uint64) { return n / (1<<32 + 1), n % (1<<32 + 1) }))
+ t.Run("1<<64-1", testdiv(1<<64-1, func(n uint64) (uint64, uint64) { return n / (1<<64 - 1), n % (1<<64 - 1) }))
+}
+
+func BenchmarkDivconstU64(b *testing.B) {
+ b.Run("3", func(b *testing.B) {
+ x := uint64(123456789123456789)
+ for i := 0; i < b.N; i++ {
+ x += x << 4
+ u64res = uint64(x) / 3
+ }
+ })
+ b.Run("5", func(b *testing.B) {
+ x := uint64(123456789123456789)
+ for i := 0; i < b.N; i++ {
+ x += x << 4
+ u64res = uint64(x) / 5
+ }
+ })
+ b.Run("37", func(b *testing.B) {
+ x := uint64(123456789123456789)
+ for i := 0; i < b.N; i++ {
+ x += x << 4
+ u64res = uint64(x) / 37
+ }
+ })
+ b.Run("1234567", func(b *testing.B) {
+ x := uint64(123456789123456789)
+ for i := 0; i < b.N; i++ {
+ x += x << 4
+ u64res = uint64(x) / 1234567
+ }
+ })
+}
+
+func BenchmarkModconstU64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u64res = uint64(i) % 7
+ }
+}
+
+func BenchmarkDivisibleconstU64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = uint64(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstU64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u64res = uint64(i) / 7
+ boolres = uint64(i)%7 == 0
+ }
+}
+
+var i32res int32
+
+func BenchmarkDivconstI32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i32res = int32(i) / 7
+ }
+}
+
+func BenchmarkModconstI32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i32res = int32(i) % 7
+ }
+}
+
+func BenchmarkDivisiblePow2constI32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int32(i)%16 == 0
+ }
+}
+
+func BenchmarkDivisibleconstI32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int32(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstI32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i32res = int32(i) / 7
+ boolres = int32(i)%7 == 0
+ }
+}
+
+var u32res uint32
+
+func BenchmarkDivconstU32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u32res = uint32(i) / 7
+ }
+}
+
+func BenchmarkModconstU32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u32res = uint32(i) % 7
+ }
+}
+
+func BenchmarkDivisibleconstU32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = uint32(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstU32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u32res = uint32(i) / 7
+ boolres = uint32(i)%7 == 0
+ }
+}
+
+var i16res int16
+
+func BenchmarkDivconstI16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i16res = int16(i) / 7
+ }
+}
+
+func BenchmarkModconstI16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i16res = int16(i) % 7
+ }
+}
+
+func BenchmarkDivisiblePow2constI16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int16(i)%16 == 0
+ }
+}
+
+func BenchmarkDivisibleconstI16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int16(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstI16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i16res = int16(i) / 7
+ boolres = int16(i)%7 == 0
+ }
+}
+
+var u16res uint16
+
+func BenchmarkDivconstU16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u16res = uint16(i) / 7
+ }
+}
+
+func BenchmarkModconstU16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u16res = uint16(i) % 7
+ }
+}
+
+func BenchmarkDivisibleconstU16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = uint16(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstU16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u16res = uint16(i) / 7
+ boolres = uint16(i)%7 == 0
+ }
+}
+
+var i8res int8
+
+func BenchmarkDivconstI8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i8res = int8(i) / 7
+ }
+}
+
+func BenchmarkModconstI8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i8res = int8(i) % 7
+ }
+}
+
+func BenchmarkDivisiblePow2constI8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int8(i)%16 == 0
+ }
+}
+
+func BenchmarkDivisibleconstI8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int8(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstI8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i8res = int8(i) / 7
+ boolres = int8(i)%7 == 0
+ }
+}
+
+var u8res uint8
+
+func BenchmarkDivconstU8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u8res = uint8(i) / 7
+ }
+}
+
+func BenchmarkModconstU8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u8res = uint8(i) % 7
+ }
+}
+
+func BenchmarkDivisibleconstU8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = uint8(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstU8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u8res = uint8(i) / 7
+ boolres = uint8(i)%7 == 0
+ }
+}
diff --git a/src/cmd/compile/internal/test/fixedbugs_test.go b/src/cmd/compile/internal/test/fixedbugs_test.go
new file mode 100644
index 0000000..8ff7a60
--- /dev/null
+++ b/src/cmd/compile/internal/test/fixedbugs_test.go
@@ -0,0 +1,86 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+type T struct {
+ x [2]int64 // field that will be clobbered. Also makes type not SSAable.
+ p *byte // has a pointer
+}
+
+//go:noinline
+func makeT() T {
+ return T{}
+}
+
+var g T
+
+var sink interface{}
+
+func TestIssue15854(t *testing.T) {
+ for i := 0; i < 10000; i++ {
+ if g.x[0] != 0 {
+ t.Fatalf("g.x[0] clobbered with %x\n", g.x[0])
+ }
+ // The bug was in the following assignment. The return
+ // value of makeT() is not copied out of the args area of
+ // stack frame in a timely fashion. So when write barriers
+ // are enabled, the marshaling of the args for the write
+ // barrier call clobbers the result of makeT() before it is
+ // read by the write barrier code.
+ g = makeT()
+ sink = make([]byte, 1000) // force write barriers to eventually happen
+ }
+}
+func TestIssue15854b(t *testing.T) {
+ const N = 10000
+ a := make([]T, N)
+ for i := 0; i < N; i++ {
+ a = append(a, makeT())
+ sink = make([]byte, 1000) // force write barriers to eventually happen
+ }
+ for i, v := range a {
+ if v.x[0] != 0 {
+ t.Fatalf("a[%d].x[0] clobbered with %x\n", i, v.x[0])
+ }
+ }
+}
+
+// Test that the generated assembly has line numbers (Issue #16214).
+func TestIssue16214(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ dir := t.TempDir()
+
+ src := filepath.Join(dir, "x.go")
+ err := os.WriteFile(src, []byte(issue16214src), 0644)
+ if err != nil {
+ t.Fatalf("could not write file: %v", err)
+ }
+
+ cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-p=main", "-S", "-o", filepath.Join(dir, "out.o"), src)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("go tool compile: %v\n%s", err, out)
+ }
+
+ if strings.Contains(string(out), "unknown line number") {
+ t.Errorf("line number missing in assembly:\n%s", out)
+ }
+}
+
+var issue16214src = `
+package main
+
+func Mod32(x uint32) uint32 {
+ return x % 3 // frontend rewrites it as HMUL with 2863311531, the LITERAL node has unknown Pos
+}
+`
diff --git a/src/cmd/compile/internal/test/float_test.go b/src/cmd/compile/internal/test/float_test.go
new file mode 100644
index 0000000..c736f97
--- /dev/null
+++ b/src/cmd/compile/internal/test/float_test.go
@@ -0,0 +1,545 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "math"
+ "testing"
+)
+
+//go:noinline
+func compare1(a, b float64) bool {
+ return a < b
+}
+
+//go:noinline
+func compare2(a, b float32) bool {
+ return a < b
+}
+
+func TestFloatCompare(t *testing.T) {
+ if !compare1(3, 5) {
+ t.Errorf("compare1 returned false")
+ }
+ if !compare2(3, 5) {
+ t.Errorf("compare2 returned false")
+ }
+}
+
+func TestFloatCompareFolded(t *testing.T) {
+ // float64 comparisons
+ d1, d3, d5, d9 := float64(1), float64(3), float64(5), float64(9)
+ if d3 == d5 {
+ t.Errorf("d3 == d5 returned true")
+ }
+ if d3 != d3 {
+ t.Errorf("d3 != d3 returned true")
+ }
+ if d3 > d5 {
+ t.Errorf("d3 > d5 returned true")
+ }
+ if d3 >= d9 {
+ t.Errorf("d3 >= d9 returned true")
+ }
+ if d5 < d1 {
+ t.Errorf("d5 < d1 returned true")
+ }
+ if d9 <= d1 {
+ t.Errorf("d9 <= d1 returned true")
+ }
+ if math.NaN() == math.NaN() {
+ t.Errorf("math.NaN() == math.NaN() returned true")
+ }
+ if math.NaN() >= math.NaN() {
+ t.Errorf("math.NaN() >= math.NaN() returned true")
+ }
+ if math.NaN() <= math.NaN() {
+ t.Errorf("math.NaN() <= math.NaN() returned true")
+ }
+ if math.Copysign(math.NaN(), -1) < math.NaN() {
+ t.Errorf("math.Copysign(math.NaN(), -1) < math.NaN() returned true")
+ }
+ if math.Inf(1) != math.Inf(1) {
+ t.Errorf("math.Inf(1) != math.Inf(1) returned true")
+ }
+ if math.Inf(-1) != math.Inf(-1) {
+ t.Errorf("math.Inf(-1) != math.Inf(-1) returned true")
+ }
+ if math.Copysign(0, -1) != 0 {
+ t.Errorf("math.Copysign(0, -1) != 0 returned true")
+ }
+ if math.Copysign(0, -1) < 0 {
+ t.Errorf("math.Copysign(0, -1) < 0 returned true")
+ }
+ if 0 > math.Copysign(0, -1) {
+ t.Errorf("0 > math.Copysign(0, -1) returned true")
+ }
+
+ // float32 comparisons
+ s1, s3, s5, s9 := float32(1), float32(3), float32(5), float32(9)
+ if s3 == s5 {
+ t.Errorf("s3 == s5 returned true")
+ }
+ if s3 != s3 {
+ t.Errorf("s3 != s3 returned true")
+ }
+ if s3 > s5 {
+ t.Errorf("s3 > s5 returned true")
+ }
+ if s3 >= s9 {
+ t.Errorf("s3 >= s9 returned true")
+ }
+ if s5 < s1 {
+ t.Errorf("s5 < s1 returned true")
+ }
+ if s9 <= s1 {
+ t.Errorf("s9 <= s1 returned true")
+ }
+ sPosNaN, sNegNaN := float32(math.NaN()), float32(math.Copysign(math.NaN(), -1))
+ if sPosNaN == sPosNaN {
+ t.Errorf("sPosNaN == sPosNaN returned true")
+ }
+ if sPosNaN >= sPosNaN {
+ t.Errorf("sPosNaN >= sPosNaN returned true")
+ }
+ if sPosNaN <= sPosNaN {
+ t.Errorf("sPosNaN <= sPosNaN returned true")
+ }
+ if sNegNaN < sPosNaN {
+ t.Errorf("sNegNaN < sPosNaN returned true")
+ }
+ sPosInf, sNegInf := float32(math.Inf(1)), float32(math.Inf(-1))
+ if sPosInf != sPosInf {
+ t.Errorf("sPosInf != sPosInf returned true")
+ }
+ if sNegInf != sNegInf {
+ t.Errorf("sNegInf != sNegInf returned true")
+ }
+ sNegZero := float32(math.Copysign(0, -1))
+ if sNegZero != 0 {
+ t.Errorf("sNegZero != 0 returned true")
+ }
+ if sNegZero < 0 {
+ t.Errorf("sNegZero < 0 returned true")
+ }
+ if 0 > sNegZero {
+ t.Errorf("0 > sNegZero returned true")
+ }
+}
+
+//go:noinline
+func cvt1(a float64) uint64 {
+ return uint64(a)
+}
+
+//go:noinline
+func cvt2(a float64) uint32 {
+ return uint32(a)
+}
+
+//go:noinline
+func cvt3(a float32) uint64 {
+ return uint64(a)
+}
+
+//go:noinline
+func cvt4(a float32) uint32 {
+ return uint32(a)
+}
+
+//go:noinline
+func cvt5(a float64) int64 {
+ return int64(a)
+}
+
+//go:noinline
+func cvt6(a float64) int32 {
+ return int32(a)
+}
+
+//go:noinline
+func cvt7(a float32) int64 {
+ return int64(a)
+}
+
+//go:noinline
+func cvt8(a float32) int32 {
+ return int32(a)
+}
+
+// make sure to cover int, uint cases (issue #16738)
+//
+//go:noinline
+func cvt9(a float64) int {
+ return int(a)
+}
+
+//go:noinline
+func cvt10(a float64) uint {
+ return uint(a)
+}
+
+//go:noinline
+func cvt11(a float32) int {
+ return int(a)
+}
+
+//go:noinline
+func cvt12(a float32) uint {
+ return uint(a)
+}
+
+//go:noinline
+func f2i64p(v float64) *int64 {
+ return ip64(int64(v / 0.1))
+}
+
+//go:noinline
+func ip64(v int64) *int64 {
+ return &v
+}
+
+func TestFloatConvert(t *testing.T) {
+ if got := cvt1(3.5); got != 3 {
+ t.Errorf("cvt1 got %d, wanted 3", got)
+ }
+ if got := cvt2(3.5); got != 3 {
+ t.Errorf("cvt2 got %d, wanted 3", got)
+ }
+ if got := cvt3(3.5); got != 3 {
+ t.Errorf("cvt3 got %d, wanted 3", got)
+ }
+ if got := cvt4(3.5); got != 3 {
+ t.Errorf("cvt4 got %d, wanted 3", got)
+ }
+ if got := cvt5(3.5); got != 3 {
+ t.Errorf("cvt5 got %d, wanted 3", got)
+ }
+ if got := cvt6(3.5); got != 3 {
+ t.Errorf("cvt6 got %d, wanted 3", got)
+ }
+ if got := cvt7(3.5); got != 3 {
+ t.Errorf("cvt7 got %d, wanted 3", got)
+ }
+ if got := cvt8(3.5); got != 3 {
+ t.Errorf("cvt8 got %d, wanted 3", got)
+ }
+ if got := cvt9(3.5); got != 3 {
+ t.Errorf("cvt9 got %d, wanted 3", got)
+ }
+ if got := cvt10(3.5); got != 3 {
+ t.Errorf("cvt10 got %d, wanted 3", got)
+ }
+ if got := cvt11(3.5); got != 3 {
+ t.Errorf("cvt11 got %d, wanted 3", got)
+ }
+ if got := cvt12(3.5); got != 3 {
+ t.Errorf("cvt12 got %d, wanted 3", got)
+ }
+ if got := *f2i64p(10); got != 100 {
+ t.Errorf("f2i64p got %d, wanted 100", got)
+ }
+}
+
+func TestFloatConvertFolded(t *testing.T) {
+ // Assign constants to variables so that they are (hopefully) constant folded
+ // by the SSA backend rather than the frontend.
+ u64, u32, u16, u8 := uint64(1<<63), uint32(1<<31), uint16(1<<15), uint8(1<<7)
+ i64, i32, i16, i8 := int64(-1<<63), int32(-1<<31), int16(-1<<15), int8(-1<<7)
+ du64, du32, du16, du8 := float64(1<<63), float64(1<<31), float64(1<<15), float64(1<<7)
+ di64, di32, di16, di8 := float64(-1<<63), float64(-1<<31), float64(-1<<15), float64(-1<<7)
+ su64, su32, su16, su8 := float32(1<<63), float32(1<<31), float32(1<<15), float32(1<<7)
+ si64, si32, si16, si8 := float32(-1<<63), float32(-1<<31), float32(-1<<15), float32(-1<<7)
+
+ // integer to float
+ if float64(u64) != du64 {
+ t.Errorf("float64(u64) != du64")
+ }
+ if float64(u32) != du32 {
+ t.Errorf("float64(u32) != du32")
+ }
+ if float64(u16) != du16 {
+ t.Errorf("float64(u16) != du16")
+ }
+ if float64(u8) != du8 {
+ t.Errorf("float64(u8) != du8")
+ }
+ if float64(i64) != di64 {
+ t.Errorf("float64(i64) != di64")
+ }
+ if float64(i32) != di32 {
+ t.Errorf("float64(i32) != di32")
+ }
+ if float64(i16) != di16 {
+ t.Errorf("float64(i16) != di16")
+ }
+ if float64(i8) != di8 {
+ t.Errorf("float64(i8) != di8")
+ }
+ if float32(u64) != su64 {
+ t.Errorf("float32(u64) != su64")
+ }
+ if float32(u32) != su32 {
+ t.Errorf("float32(u32) != su32")
+ }
+ if float32(u16) != su16 {
+ t.Errorf("float32(u16) != su16")
+ }
+ if float32(u8) != su8 {
+ t.Errorf("float32(u8) != su8")
+ }
+ if float32(i64) != si64 {
+ t.Errorf("float32(i64) != si64")
+ }
+ if float32(i32) != si32 {
+ t.Errorf("float32(i32) != si32")
+ }
+ if float32(i16) != si16 {
+ t.Errorf("float32(i16) != si16")
+ }
+ if float32(i8) != si8 {
+ t.Errorf("float32(i8) != si8")
+ }
+
+ // float to integer
+ if uint64(du64) != u64 {
+ t.Errorf("uint64(du64) != u64")
+ }
+ if uint32(du32) != u32 {
+ t.Errorf("uint32(du32) != u32")
+ }
+ if uint16(du16) != u16 {
+ t.Errorf("uint16(du16) != u16")
+ }
+ if uint8(du8) != u8 {
+ t.Errorf("uint8(du8) != u8")
+ }
+ if int64(di64) != i64 {
+ t.Errorf("int64(di64) != i64")
+ }
+ if int32(di32) != i32 {
+ t.Errorf("int32(di32) != i32")
+ }
+ if int16(di16) != i16 {
+ t.Errorf("int16(di16) != i16")
+ }
+ if int8(di8) != i8 {
+ t.Errorf("int8(di8) != i8")
+ }
+ if uint64(su64) != u64 {
+ t.Errorf("uint64(su64) != u64")
+ }
+ if uint32(su32) != u32 {
+ t.Errorf("uint32(su32) != u32")
+ }
+ if uint16(su16) != u16 {
+ t.Errorf("uint16(su16) != u16")
+ }
+ if uint8(su8) != u8 {
+ t.Errorf("uint8(su8) != u8")
+ }
+ if int64(si64) != i64 {
+ t.Errorf("int64(si64) != i64")
+ }
+ if int32(si32) != i32 {
+ t.Errorf("int32(si32) != i32")
+ }
+ if int16(si16) != i16 {
+ t.Errorf("int16(si16) != i16")
+ }
+ if int8(si8) != i8 {
+ t.Errorf("int8(si8) != i8")
+ }
+}
+
+func TestFloat32StoreToLoadConstantFold(t *testing.T) {
+ // Test that math.Float32{,from}bits constant fold correctly.
+ // In particular we need to be careful that signaling NaN (sNaN) values
+ // are not converted to quiet NaN (qNaN) values during compilation.
+ // See issue #27193 for more information.
+
+ // signaling NaNs
+ {
+ const nan = uint32(0x7f800001) // sNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0x7fbfffff) // sNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0xff800001) // sNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0xffbfffff) // sNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+
+ // quiet NaNs
+ {
+ const nan = uint32(0x7fc00000) // qNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0x7fffffff) // qNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0x8fc00000) // qNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0x8fffffff) // qNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+
+ // infinities
+ {
+ const inf = uint32(0x7f800000) // +∞
+ if x := math.Float32bits(math.Float32frombits(inf)); x != inf {
+ t.Errorf("got %#x, want %#x", x, inf)
+ }
+ }
+ {
+ const negInf = uint32(0xff800000) // -∞
+ if x := math.Float32bits(math.Float32frombits(negInf)); x != negInf {
+ t.Errorf("got %#x, want %#x", x, negInf)
+ }
+ }
+
+ // numbers
+ {
+ const zero = uint32(0) // +0.0
+ if x := math.Float32bits(math.Float32frombits(zero)); x != zero {
+ t.Errorf("got %#x, want %#x", x, zero)
+ }
+ }
+ {
+ const negZero = uint32(1 << 31) // -0.0
+ if x := math.Float32bits(math.Float32frombits(negZero)); x != negZero {
+ t.Errorf("got %#x, want %#x", x, negZero)
+ }
+ }
+ {
+ const one = uint32(0x3f800000) // 1.0
+ if x := math.Float32bits(math.Float32frombits(one)); x != one {
+ t.Errorf("got %#x, want %#x", x, one)
+ }
+ }
+ {
+ const negOne = uint32(0xbf800000) // -1.0
+ if x := math.Float32bits(math.Float32frombits(negOne)); x != negOne {
+ t.Errorf("got %#x, want %#x", x, negOne)
+ }
+ }
+ {
+ const frac = uint32(0x3fc00000) // +1.5
+ if x := math.Float32bits(math.Float32frombits(frac)); x != frac {
+ t.Errorf("got %#x, want %#x", x, frac)
+ }
+ }
+ {
+ const negFrac = uint32(0xbfc00000) // -1.5
+ if x := math.Float32bits(math.Float32frombits(negFrac)); x != negFrac {
+ t.Errorf("got %#x, want %#x", x, negFrac)
+ }
+ }
+}
+
+// Signaling NaN values as constants.
+const (
+ snan32bits uint32 = 0x7f800001
+ snan64bits uint64 = 0x7ff0000000000001
+)
+
+// Signaling NaNs as variables.
+var snan32bitsVar uint32 = snan32bits
+var snan64bitsVar uint64 = snan64bits
+
+func TestFloatSignalingNaN(t *testing.T) {
+ // Make sure we generate a signaling NaN from a constant properly.
+ // See issue 36400.
+ f32 := math.Float32frombits(snan32bits)
+ g32 := math.Float32frombits(snan32bitsVar)
+ x32 := math.Float32bits(f32)
+ y32 := math.Float32bits(g32)
+ if x32 != y32 {
+ t.Errorf("got %x, want %x (diff=%x)", x32, y32, x32^y32)
+ }
+
+ f64 := math.Float64frombits(snan64bits)
+ g64 := math.Float64frombits(snan64bitsVar)
+ x64 := math.Float64bits(f64)
+ y64 := math.Float64bits(g64)
+ if x64 != y64 {
+ t.Errorf("got %x, want %x (diff=%x)", x64, y64, x64^y64)
+ }
+}
+
+func TestFloatSignalingNaNConversion(t *testing.T) {
+ // Test to make sure when we convert a signaling NaN, we get a NaN.
+ // (Ideally we want a quiet NaN, but some platforms don't agree.)
+ // See issue 36399.
+ s32 := math.Float32frombits(snan32bitsVar)
+ if s32 == s32 {
+ t.Errorf("converting a NaN did not result in a NaN")
+ }
+ s64 := math.Float64frombits(snan64bitsVar)
+ if s64 == s64 {
+ t.Errorf("converting a NaN did not result in a NaN")
+ }
+}
+
+func TestFloatSignalingNaNConversionConst(t *testing.T) {
+ // Test to make sure when we convert a signaling NaN, it converts to a NaN.
+ // (Ideally we want a quiet NaN, but some platforms don't agree.)
+ // See issue 36399 and 36400.
+ s32 := math.Float32frombits(snan32bits)
+ if s32 == s32 {
+ t.Errorf("converting a NaN did not result in a NaN")
+ }
+ s64 := math.Float64frombits(snan64bits)
+ if s64 == s64 {
+ t.Errorf("converting a NaN did not result in a NaN")
+ }
+}
+
+var sinkFloat float64
+
+func BenchmarkMul2(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var m float64 = 1
+ for j := 0; j < 500; j++ {
+ m *= 2
+ }
+ sinkFloat = m
+ }
+}
+func BenchmarkMulNeg2(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var m float64 = 1
+ for j := 0; j < 500; j++ {
+ m *= -2
+ }
+ sinkFloat = m
+ }
+}
diff --git a/src/cmd/compile/internal/test/global_test.go b/src/cmd/compile/internal/test/global_test.go
new file mode 100644
index 0000000..c8b3370
--- /dev/null
+++ b/src/cmd/compile/internal/test/global_test.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "bytes"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+// Make sure "hello world" does not link in all the
+// fmt.scanf routines. See issue 6853.
+func TestScanfRemoval(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ t.Parallel()
+
+ // Make a directory to work in.
+ dir := t.TempDir()
+
+ // Create source.
+ src := filepath.Join(dir, "test.go")
+ f, err := os.Create(src)
+ if err != nil {
+ t.Fatalf("could not create source file: %v", err)
+ }
+ f.Write([]byte(`
+package main
+import "fmt"
+func main() {
+ fmt.Println("hello world")
+}
+`))
+ f.Close()
+
+ // Name of destination.
+ dst := filepath.Join(dir, "test")
+
+ // Compile source.
+ cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", dst, src)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("could not build target: %v\n%s", err, out)
+ }
+
+ // Check destination to see if scanf code was included.
+ cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", dst)
+ out, err = cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("could not read target: %v", err)
+ }
+ if bytes.Contains(out, []byte("scanInt")) {
+ t.Fatalf("scanf code not removed from helloworld")
+ }
+}
+
+// Make sure -S prints assembly code. See issue 14515.
+func TestDashS(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ t.Parallel()
+
+ // Make a directory to work in.
+ dir := t.TempDir()
+
+ // Create source.
+ src := filepath.Join(dir, "test.go")
+ f, err := os.Create(src)
+ if err != nil {
+ t.Fatalf("could not create source file: %v", err)
+ }
+ f.Write([]byte(`
+package main
+import "fmt"
+func main() {
+ fmt.Println("hello world")
+}
+`))
+ f.Close()
+
+ // Compile source.
+ cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-gcflags", "-S", "-o", filepath.Join(dir, "test"), src)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("could not build target: %v\n%s", err, out)
+ }
+
+ patterns := []string{
+ // It is hard to look for actual instructions in an
+ // arch-independent way. So we'll just look for
+ // pseudo-ops that are arch-independent.
+ "\tTEXT\t",
+ "\tFUNCDATA\t",
+ "\tPCDATA\t",
+ }
+ outstr := string(out)
+ for _, p := range patterns {
+ if !strings.Contains(outstr, p) {
+ println(outstr)
+ panic("can't find pattern " + p)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/iface_test.go b/src/cmd/compile/internal/test/iface_test.go
new file mode 100644
index 0000000..db41eb8
--- /dev/null
+++ b/src/cmd/compile/internal/test/iface_test.go
@@ -0,0 +1,138 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import "testing"
+
+// Test to make sure we make copies of the values we
+// put in interfaces.
+
+var x int
+
+func TestEfaceConv1(t *testing.T) {
+ a := 5
+ i := interface{}(a)
+ a += 2
+ if got := i.(int); got != 5 {
+ t.Errorf("wanted 5, got %d\n", got)
+ }
+}
+
+func TestEfaceConv2(t *testing.T) {
+ a := 5
+ sink = &a
+ i := interface{}(a)
+ a += 2
+ if got := i.(int); got != 5 {
+ t.Errorf("wanted 5, got %d\n", got)
+ }
+}
+
+func TestEfaceConv3(t *testing.T) {
+ x = 5
+ if got := e2int3(x); got != 5 {
+ t.Errorf("wanted 5, got %d\n", got)
+ }
+}
+
+//go:noinline
+func e2int3(i interface{}) int {
+ x = 7
+ return i.(int)
+}
+
+func TestEfaceConv4(t *testing.T) {
+ a := 5
+ if got := e2int4(a, &a); got != 5 {
+ t.Errorf("wanted 5, got %d\n", got)
+ }
+}
+
+//go:noinline
+func e2int4(i interface{}, p *int) int {
+ *p = 7
+ return i.(int)
+}
+
+type Int int
+
+var y Int
+
+type I interface {
+ foo()
+}
+
+func (i Int) foo() {
+}
+
+func TestIfaceConv1(t *testing.T) {
+ a := Int(5)
+ i := interface{}(a)
+ a += 2
+ if got := i.(Int); got != 5 {
+ t.Errorf("wanted 5, got %d\n", int(got))
+ }
+}
+
+func TestIfaceConv2(t *testing.T) {
+ a := Int(5)
+ sink = &a
+ i := interface{}(a)
+ a += 2
+ if got := i.(Int); got != 5 {
+ t.Errorf("wanted 5, got %d\n", int(got))
+ }
+}
+
+func TestIfaceConv3(t *testing.T) {
+ y = 5
+ if got := i2Int3(y); got != 5 {
+ t.Errorf("wanted 5, got %d\n", int(got))
+ }
+}
+
+//go:noinline
+func i2Int3(i I) Int {
+ y = 7
+ return i.(Int)
+}
+
+func TestIfaceConv4(t *testing.T) {
+ a := Int(5)
+ if got := i2Int4(a, &a); got != 5 {
+ t.Errorf("wanted 5, got %d\n", int(got))
+ }
+}
+
+//go:noinline
+func i2Int4(i I, p *Int) Int {
+ *p = 7
+ return i.(Int)
+}
+
+func BenchmarkEfaceInteger(b *testing.B) {
+ sum := 0
+ for i := 0; i < b.N; i++ {
+ sum += i2int(i)
+ }
+ sink = sum
+}
+
+//go:noinline
+func i2int(i interface{}) int {
+ return i.(int)
+}
+
+func BenchmarkTypeAssert(b *testing.B) {
+ e := any(Int(0))
+ r := true
+ for i := 0; i < b.N; i++ {
+ _, ok := e.(I)
+ if !ok {
+ r = false
+ }
+ }
+ sink = r
+}
diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go
new file mode 100644
index 0000000..0ccc7b3
--- /dev/null
+++ b/src/cmd/compile/internal/test/inl_test.go
@@ -0,0 +1,411 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "bufio"
+ "internal/goexperiment"
+ "internal/testenv"
+ "io"
+ "math/bits"
+ "regexp"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+// TestIntendedInlining tests that specific functions are inlined.
+// This allows refactoring for code clarity and re-use without fear that
+// changes to the compiler will cause silent performance regressions.
+func TestIntendedInlining(t *testing.T) {
+ if testing.Short() && testenv.Builder() == "" {
+ t.Skip("skipping in short mode")
+ }
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ // want is the list of function names (by package) that should
+ // be inlinable. If they have no callers in their packages, they
+ // might not actually be inlined anywhere.
+ want := map[string][]string{
+ "runtime": {
+ "add",
+ "acquirem",
+ "add1",
+ "addb",
+ "adjustpanics",
+ "adjustpointer",
+ "alignDown",
+ "alignUp",
+ "bucketMask",
+ "bucketShift",
+ "chanbuf",
+ "evacuated",
+ "fastlog2",
+ "float64bits",
+ "funcspdelta",
+ "getm",
+ "getMCache",
+ "isDirectIface",
+ "itabHashFunc",
+ "nextslicecap",
+ "noescape",
+ "pcvalueCacheKey",
+ "rand32",
+ "readUnaligned32",
+ "readUnaligned64",
+ "releasem",
+ "roundupsize",
+ "stackmapdata",
+ "stringStructOf",
+ "subtract1",
+ "subtractb",
+ "tophash",
+ "(*bmap).keys",
+ "(*bmap).overflow",
+ "(*waitq).enqueue",
+ "funcInfo.entry",
+
+ // GC-related ones
+ "cgoInRange",
+ "gclinkptr.ptr",
+ "guintptr.ptr",
+ "writeHeapBitsForAddr",
+ "heapBitsSlice",
+ "markBits.isMarked",
+ "muintptr.ptr",
+ "puintptr.ptr",
+ "spanOf",
+ "spanOfUnchecked",
+ "typePointers.nextFast",
+ "(*gcWork).putFast",
+ "(*gcWork).tryGetFast",
+ "(*guintptr).set",
+ "(*markBits).advance",
+ "(*mspan).allocBitsForIndex",
+ "(*mspan).base",
+ "(*mspan).markBitsForBase",
+ "(*mspan).markBitsForIndex",
+ "(*mspan).writeUserArenaHeapBits",
+ "(*muintptr).set",
+ "(*puintptr).set",
+ "(*wbBuf).get1",
+ "(*wbBuf).get2",
+
+ // Trace-related ones.
+ "traceLocker.ok",
+ "traceEnabled",
+ },
+ "runtime/internal/sys": {},
+ "runtime/internal/math": {
+ "MulUintptr",
+ },
+ "bytes": {
+ "(*Buffer).Bytes",
+ "(*Buffer).Cap",
+ "(*Buffer).Len",
+ "(*Buffer).Grow",
+ "(*Buffer).Next",
+ "(*Buffer).Read",
+ "(*Buffer).ReadByte",
+ "(*Buffer).Reset",
+ "(*Buffer).String",
+ "(*Buffer).UnreadByte",
+ "(*Buffer).tryGrowByReslice",
+ },
+ "internal/abi": {
+ "UseInterfaceSwitchCache",
+ },
+ "compress/flate": {
+ "byLiteral.Len",
+ "byLiteral.Less",
+ "byLiteral.Swap",
+ "(*dictDecoder).tryWriteCopy",
+ },
+ "encoding/base64": {
+ "assemble32",
+ "assemble64",
+ },
+ "unicode/utf8": {
+ "FullRune",
+ "FullRuneInString",
+ "RuneLen",
+ "AppendRune",
+ "ValidRune",
+ },
+ "unicode/utf16": {
+ "Decode",
+ },
+ "reflect": {
+ "Value.Bool",
+ "Value.Bytes",
+ "Value.CanAddr",
+ "Value.CanComplex",
+ "Value.CanFloat",
+ "Value.CanInt",
+ "Value.CanInterface",
+ "Value.CanSet",
+ "Value.CanUint",
+ "Value.Cap",
+ "Value.Complex",
+ "Value.Float",
+ "Value.Int",
+ "Value.Interface",
+ "Value.IsNil",
+ "Value.IsValid",
+ "Value.Kind",
+ "Value.Len",
+ "Value.MapRange",
+ "Value.OverflowComplex",
+ "Value.OverflowFloat",
+ "Value.OverflowInt",
+ "Value.OverflowUint",
+ "Value.String",
+ "Value.Type",
+ "Value.Uint",
+ "Value.UnsafeAddr",
+ "Value.pointer",
+ "add",
+ "align",
+ "flag.mustBe",
+ "flag.mustBeAssignable",
+ "flag.mustBeExported",
+ "flag.kind",
+ "flag.ro",
+ },
+ "regexp": {
+ "(*bitState).push",
+ },
+ "math/big": {
+ "bigEndianWord",
+ // The following functions require the math_big_pure_go build tag.
+ "addVW",
+ "subVW",
+ },
+ "math/rand": {
+ "(*rngSource).Int63",
+ "(*rngSource).Uint64",
+ },
+ "net": {
+ "(*UDPConn).ReadFromUDP",
+ },
+ "sync": {
+ // Both OnceFunc and its returned closure need to be inlinable so
+ // that the returned closure can be inlined into the caller of OnceFunc.
+ "OnceFunc",
+ "OnceFunc.func2", // The returned closure.
+ // TODO(austin): It would be good to check OnceValue and OnceValues,
+ // too, but currently they aren't reported because they have type
+ // parameters and aren't instantiated in sync.
+ },
+ "sync/atomic": {
+ // (*Bool).CompareAndSwap handled below.
+ "(*Bool).Load",
+ "(*Bool).Store",
+ "(*Bool).Swap",
+ "(*Int32).Add",
+ "(*Int32).CompareAndSwap",
+ "(*Int32).Load",
+ "(*Int32).Store",
+ "(*Int32).Swap",
+ "(*Int64).Add",
+ "(*Int64).CompareAndSwap",
+ "(*Int64).Load",
+ "(*Int64).Store",
+ "(*Int64).Swap",
+ "(*Uint32).Add",
+ "(*Uint32).CompareAndSwap",
+ "(*Uint32).Load",
+ "(*Uint32).Store",
+ "(*Uint32).Swap",
+ "(*Uint64).Add",
+ "(*Uint64).CompareAndSwap",
+ "(*Uint64).Load",
+ "(*Uint64).Store",
+ "(*Uint64).Swap",
+ "(*Uintptr).Add",
+ "(*Uintptr).CompareAndSwap",
+ "(*Uintptr).Load",
+ "(*Uintptr).Store",
+ "(*Uintptr).Swap",
+ "(*Pointer[go.shape.int]).CompareAndSwap",
+ "(*Pointer[go.shape.int]).Load",
+ "(*Pointer[go.shape.int]).Store",
+ "(*Pointer[go.shape.int]).Swap",
+ },
+ }
+
+ if runtime.GOARCH != "386" && runtime.GOARCH != "loong64" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" {
+ // nextFreeFast calls sys.TrailingZeros64, which on 386 is implemented in asm and is not inlinable.
+ // We currently don't have midstack inlining so nextFreeFast is also not inlinable on 386.
+ // On loong64, mips64x and riscv64, TrailingZeros64 is not intrinsified and causes nextFreeFast
+ // too expensive to inline (Issue 22239).
+ want["runtime"] = append(want["runtime"], "nextFreeFast")
+ // Same behavior for heapBits.nextFast.
+ want["runtime"] = append(want["runtime"], "heapBits.nextFast")
+ }
+ if runtime.GOARCH != "386" {
+ // As explained above, TrailingZeros64 and TrailingZeros32 are not Go code on 386.
+ // The same applies to Bswap32.
+ want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "TrailingZeros64")
+ want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "TrailingZeros32")
+ want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Bswap32")
+ }
+ if runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64" || runtime.GOARCH == "loong64" || runtime.GOARCH == "mips" || runtime.GOARCH == "mips64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "s390x" {
+ // runtime/internal/atomic.Loaduintptr is only intrinsified on these platforms.
+ want["runtime"] = append(want["runtime"], "traceAcquire")
+ }
+ if bits.UintSize == 64 {
+ // mix is only defined on 64-bit architectures
+ want["runtime"] = append(want["runtime"], "mix")
+ // (*Bool).CompareAndSwap is just over budget on 32-bit systems (386, arm).
+ want["sync/atomic"] = append(want["sync/atomic"], "(*Bool).CompareAndSwap")
+ }
+
+ switch runtime.GOARCH {
+ case "386", "wasm", "arm":
+ default:
+ // TODO(mvdan): As explained in /test/inline_sync.go, some
+ // architectures don't have atomic intrinsics, so these go over
+ // the inlining budget. Move back to the main table once that
+ // problem is solved.
+ want["sync"] = []string{
+ "(*Mutex).Lock",
+ "(*Mutex).Unlock",
+ "(*RWMutex).RLock",
+ "(*RWMutex).RUnlock",
+ "(*Once).Do",
+ }
+ }
+
+ // Functions that must actually be inlined; they must have actual callers.
+ must := map[string]bool{
+ "compress/flate.byLiteral.Len": true,
+ "compress/flate.byLiteral.Less": true,
+ "compress/flate.byLiteral.Swap": true,
+ }
+
+ notInlinedReason := make(map[string]string)
+ pkgs := make([]string, 0, len(want))
+ for pname, fnames := range want {
+ pkgs = append(pkgs, pname)
+ for _, fname := range fnames {
+ fullName := pname + "." + fname
+ if _, ok := notInlinedReason[fullName]; ok {
+ t.Errorf("duplicate func: %s", fullName)
+ }
+ notInlinedReason[fullName] = "unknown reason"
+ }
+ }
+
+ args := append([]string{"build", "-gcflags=-m -m", "-tags=math_big_pure_go"}, pkgs...)
+ cmd := testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), args...))
+ pr, pw := io.Pipe()
+ cmd.Stdout = pw
+ cmd.Stderr = pw
+ cmdErr := make(chan error, 1)
+ go func() {
+ cmdErr <- cmd.Run()
+ pw.Close()
+ }()
+ scanner := bufio.NewScanner(pr)
+ curPkg := ""
+ canInline := regexp.MustCompile(`: can inline ([^ ]*)`)
+ haveInlined := regexp.MustCompile(`: inlining call to ([^ ]*)`)
+ cannotInline := regexp.MustCompile(`: cannot inline ([^ ]*): (.*)`)
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, "# ") {
+ curPkg = line[2:]
+ continue
+ }
+ if m := haveInlined.FindStringSubmatch(line); m != nil {
+ fname := m[1]
+ delete(notInlinedReason, curPkg+"."+fname)
+ continue
+ }
+ if m := canInline.FindStringSubmatch(line); m != nil {
+ fname := m[1]
+ fullname := curPkg + "." + fname
+ // If function must be inlined somewhere, being inlinable is not enough
+ if _, ok := must[fullname]; !ok {
+ delete(notInlinedReason, fullname)
+ continue
+ }
+ }
+ if m := cannotInline.FindStringSubmatch(line); m != nil {
+ fname, reason := m[1], m[2]
+ fullName := curPkg + "." + fname
+ if _, ok := notInlinedReason[fullName]; ok {
+ // cmd/compile gave us a reason why
+ notInlinedReason[fullName] = reason
+ }
+ continue
+ }
+ }
+ if err := <-cmdErr; err != nil {
+ t.Fatal(err)
+ }
+ if err := scanner.Err(); err != nil {
+ t.Fatal(err)
+ }
+ for fullName, reason := range notInlinedReason {
+ t.Errorf("%s was not inlined: %s", fullName, reason)
+ }
+}
+
+func collectInlCands(msgs string) map[string]struct{} {
+ rv := make(map[string]struct{})
+ lines := strings.Split(msgs, "\n")
+ re := regexp.MustCompile(`^\S+\s+can\s+inline\s+(\S+)`)
+ for _, line := range lines {
+ m := re.FindStringSubmatch(line)
+ if m != nil {
+ rv[m[1]] = struct{}{}
+ }
+ }
+ return rv
+}
+
+func TestIssue56044(t *testing.T) {
+ if testing.Short() {
+ t.Skipf("skipping test: too long for short mode")
+ }
+ if !goexperiment.CoverageRedesign {
+ t.Skipf("skipping new coverage tests (experiment not enabled)")
+ }
+
+ testenv.MustHaveGoBuild(t)
+
+ modes := []string{"-covermode=set", "-covermode=atomic"}
+
+ for _, mode := range modes {
+ // Build the Go runtime with "-m", capturing output.
+ args := []string{"build", "-gcflags=runtime=-m", "runtime"}
+ cmd := testenv.Command(t, testenv.GoToolPath(t), args...)
+ b, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("build failed (%v): %s", err, b)
+ }
+ mbase := collectInlCands(string(b))
+
+ // Redo the build with -cover, also with "-m".
+ args = []string{"build", "-gcflags=runtime=-m", mode, "runtime"}
+ cmd = testenv.Command(t, testenv.GoToolPath(t), args...)
+ b, err = cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("build failed (%v): %s", err, b)
+ }
+ mcov := collectInlCands(string(b))
+
+ // Make sure that there aren't any functions that are marked
+ // as inline candidates at base but not with coverage.
+ for k := range mbase {
+ if _, ok := mcov[k]; !ok {
+ t.Errorf("error: did not find %s in coverage -m output", k)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/inst_test.go b/src/cmd/compile/internal/test/inst_test.go
new file mode 100644
index 0000000..069e2ff
--- /dev/null
+++ b/src/cmd/compile/internal/test/inst_test.go
@@ -0,0 +1,60 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "regexp"
+ "testing"
+)
+
+// TestInst tests that only one instantiation of Sort is created, even though generic
+// Sort is used for multiple pointer types across two packages.
+func TestInst(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ testenv.MustHaveGoRun(t)
+
+ // Build ptrsort.go, which uses package mysort.
+ var output []byte
+ var err error
+ filename := "ptrsort.go"
+ exename := "ptrsort"
+ outname := "ptrsort.out"
+ gotool := testenv.GoToolPath(t)
+ dest := filepath.Join(t.TempDir(), exename)
+ cmd := testenv.Command(t, gotool, "build", "-o", dest, filepath.Join("testdata", filename))
+ if output, err = cmd.CombinedOutput(); err != nil {
+ t.Fatalf("Failed: %v:\nOutput: %s\n", err, output)
+ }
+
+ // Test that there is exactly one shape-based instantiation of Sort in
+ // the executable.
+ cmd = testenv.Command(t, gotool, "tool", "nm", dest)
+ if output, err = cmd.CombinedOutput(); err != nil {
+ t.Fatalf("Failed: %v:\nOut: %s\n", err, output)
+ }
+ // Look for shape-based instantiation of Sort, but ignore any extra wrapper
+ // ending in "-tramp" (which are created on riscv).
+ re := regexp.MustCompile(`\bSort\[.*shape.*\][^-]`)
+ r := re.FindAllIndex(output, -1)
+ if len(r) != 1 {
+ t.Fatalf("Wanted 1 instantiations of Sort function, got %d\n", len(r))
+ }
+
+ // Actually run the test and make sure output is correct.
+ cmd = testenv.Command(t, gotool, "run", filepath.Join("testdata", filename))
+ if output, err = cmd.CombinedOutput(); err != nil {
+ t.Fatalf("Failed: %v:\nOut: %s\n", err, output)
+ }
+ out, err := os.ReadFile(filepath.Join("testdata", outname))
+ if err != nil {
+ t.Fatalf("Could not find %s\n", outname)
+ }
+ if string(out) != string(output) {
+ t.Fatalf("Wanted output %v, got %v\n", string(out), string(output))
+ }
+}
diff --git a/src/cmd/compile/internal/test/intrinsics_test.go b/src/cmd/compile/internal/test/intrinsics_test.go
new file mode 100644
index 0000000..b89198c
--- /dev/null
+++ b/src/cmd/compile/internal/test/intrinsics_test.go
@@ -0,0 +1,62 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "math/bits"
+ "testing"
+)
+
+func TestBitLen64(t *testing.T) {
+ for i := 0; i <= 64; i++ {
+ got := bits.Len64(1 << i)
+ want := i + 1
+ if want == 65 {
+ want = 0
+ }
+ if got != want {
+ t.Errorf("Len64(1<<%d) = %d, want %d", i, got, want)
+ }
+ }
+}
+
+func TestBitLen32(t *testing.T) {
+ for i := 0; i <= 32; i++ {
+ got := bits.Len32(1 << i)
+ want := i + 1
+ if want == 33 {
+ want = 0
+ }
+ if got != want {
+ t.Errorf("Len32(1<<%d) = %d, want %d", i, got, want)
+ }
+ }
+}
+
+func TestBitLen16(t *testing.T) {
+ for i := 0; i <= 16; i++ {
+ got := bits.Len16(1 << i)
+ want := i + 1
+ if want == 17 {
+ want = 0
+ }
+ if got != want {
+ t.Errorf("Len16(1<<%d) = %d, want %d", i, got, want)
+ }
+ }
+}
+
+func TestBitLen8(t *testing.T) {
+ for i := 0; i <= 8; i++ {
+ got := bits.Len8(1 << i)
+ want := i + 1
+ if want == 9 {
+ want = 0
+ }
+ if got != want {
+ t.Errorf("Len8(1<<%d) = %d, want %d", i, got, want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/issue50182_test.go b/src/cmd/compile/internal/test/issue50182_test.go
new file mode 100644
index 0000000..cd277fa
--- /dev/null
+++ b/src/cmd/compile/internal/test/issue50182_test.go
@@ -0,0 +1,62 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "fmt"
+ "sort"
+ "testing"
+)
+
+// Test that calling methods on generic types doesn't cause allocations.
+func genericSorted[T sort.Interface](data T) bool {
+ n := data.Len()
+ for i := n - 1; i > 0; i-- {
+ if data.Less(i, i-1) {
+ return false
+ }
+ }
+ return true
+}
+func TestGenericSorted(t *testing.T) {
+ var data = sort.IntSlice{-10, -5, 0, 1, 2, 3, 5, 7, 11, 100, 100, 100, 1000, 10000}
+ f := func() {
+ genericSorted(data)
+ }
+ if n := testing.AllocsPerRun(10, f); n > 0 {
+ t.Errorf("got %f allocs, want 0", n)
+ }
+}
+
+// Test that escape analysis correctly tracks escaping inside of methods
+// called on generic types.
+type fooer interface {
+ foo()
+}
+type P struct {
+ p *int
+ q int
+}
+
+var esc []*int
+
+func (p P) foo() {
+ esc = append(esc, p.p) // foo escapes the pointer from inside of p
+}
+func f[T fooer](t T) {
+ t.foo()
+}
+func TestGenericEscape(t *testing.T) {
+ for i := 0; i < 4; i++ {
+ var x int = 77 + i
+ var p P = P{p: &x}
+ f(p)
+ }
+ for i, p := range esc {
+ if got, want := *p, 77+i; got != want {
+ panic(fmt.Sprintf("entry %d: got %d, want %d", i, got, want))
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/issue53888_test.go b/src/cmd/compile/internal/test/issue53888_test.go
new file mode 100644
index 0000000..0d5b13b
--- /dev/null
+++ b/src/cmd/compile/internal/test/issue53888_test.go
@@ -0,0 +1,46 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !race
+
+package test
+
+import (
+ "internal/testenv"
+ "testing"
+)
+
+func TestAppendOfMake(t *testing.T) {
+ testenv.SkipIfOptimizationOff(t)
+ for n := 32; n < 33; n++ { // avoid stack allocation of make()
+ b := make([]byte, n)
+ f := func() {
+ b = append(b[:0], make([]byte, n)...)
+ }
+ if n := testing.AllocsPerRun(10, f); n > 0 {
+ t.Errorf("got %f allocs, want 0", n)
+ }
+ type S []byte
+
+ s := make(S, n)
+ g := func() {
+ s = append(s[:0], make(S, n)...)
+ }
+ if n := testing.AllocsPerRun(10, g); n > 0 {
+ t.Errorf("got %f allocs, want 0", n)
+ }
+ h := func() {
+ s = append(s[:0], make([]byte, n)...)
+ }
+ if n := testing.AllocsPerRun(10, h); n > 0 {
+ t.Errorf("got %f allocs, want 0", n)
+ }
+ i := func() {
+ b = append(b[:0], make(S, n)...)
+ }
+ if n := testing.AllocsPerRun(10, i); n > 0 {
+ t.Errorf("got %f allocs, want 0", n)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/issue57434_test.go b/src/cmd/compile/internal/test/issue57434_test.go
new file mode 100644
index 0000000..6a34b54
--- /dev/null
+++ b/src/cmd/compile/internal/test/issue57434_test.go
@@ -0,0 +1,38 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "testing"
+)
+
+var output int
+
+type Object struct {
+ Val int
+}
+
+func (o *Object) Initialize() *Object {
+ o.Val = 5
+ return o
+}
+
+func (o *Object) Update() *Object {
+ o.Val = o.Val + 1
+ return o
+}
+
+func TestAutotmpLoopDepth(t *testing.T) {
+ f := func() {
+ for i := 0; i < 10; i++ {
+ var obj Object
+ obj.Initialize().Update()
+ output = obj.Val
+ }
+ }
+ if n := testing.AllocsPerRun(10, f); n > 0 {
+ t.Error("obj moved to heap")
+ }
+}
diff --git a/src/cmd/compile/internal/test/lang_test.go b/src/cmd/compile/internal/test/lang_test.go
new file mode 100644
index 0000000..34ed378
--- /dev/null
+++ b/src/cmd/compile/internal/test/lang_test.go
@@ -0,0 +1,58 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+const aliasSrc = `
+package x
+
+type T = int
+`
+
+func TestInvalidLang(t *testing.T) {
+ t.Parallel()
+
+ testenv.MustHaveGoBuild(t)
+
+ dir := t.TempDir()
+
+ src := filepath.Join(dir, "alias.go")
+ if err := os.WriteFile(src, []byte(aliasSrc), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ outfile := filepath.Join(dir, "alias.o")
+
+ if testLang(t, "go9.99", src, outfile) == nil {
+ t.Error("compilation with -lang=go9.99 succeeded unexpectedly")
+ }
+
+ // This test will have to be adjusted if we ever reach 1.99 or 2.0.
+ if testLang(t, "go1.99", src, outfile) == nil {
+ t.Error("compilation with -lang=go1.99 succeeded unexpectedly")
+ }
+
+ if testLang(t, "go1.8", src, outfile) == nil {
+ t.Error("compilation with -lang=go1.8 succeeded unexpectedly")
+ }
+
+ if err := testLang(t, "go1.9", src, outfile); err != nil {
+ t.Errorf("compilation with -lang=go1.9 failed unexpectedly: %v", err)
+ }
+}
+
+func testLang(t *testing.T, lang, src, outfile string) error {
+ run := []string{testenv.GoToolPath(t), "tool", "compile", "-p=p", "-lang", lang, "-o", outfile, src}
+ t.Log(run)
+ out, err := testenv.Command(t, run[0], run[1:]...).CombinedOutput()
+ t.Logf("%s", out)
+ return err
+}
diff --git a/src/cmd/compile/internal/test/logic_test.go b/src/cmd/compile/internal/test/logic_test.go
new file mode 100644
index 0000000..0e46b5f
--- /dev/null
+++ b/src/cmd/compile/internal/test/logic_test.go
@@ -0,0 +1,293 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import "testing"
+
+// Tests to make sure logic simplification rules are correct.
+
+func TestLogic64(t *testing.T) {
+ // test values to determine function equality
+ values := [...]int64{-1 << 63, 1<<63 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4}
+
+ // golden functions we use repeatedly
+ zero := func(x int64) int64 { return 0 }
+ id := func(x int64) int64 { return x }
+ or := func(x, y int64) int64 { return x | y }
+ and := func(x, y int64) int64 { return x & y }
+ y := func(x, y int64) int64 { return y }
+
+ for _, test := range [...]struct {
+ name string
+ f func(int64) int64
+ golden func(int64) int64
+ }{
+ {"x|x", func(x int64) int64 { return x | x }, id},
+ {"x|0", func(x int64) int64 { return x | 0 }, id},
+ {"x|-1", func(x int64) int64 { return x | -1 }, func(x int64) int64 { return -1 }},
+ {"x&x", func(x int64) int64 { return x & x }, id},
+ {"x&0", func(x int64) int64 { return x & 0 }, zero},
+ {"x&-1", func(x int64) int64 { return x & -1 }, id},
+ {"x^x", func(x int64) int64 { return x ^ x }, zero},
+ {"x^0", func(x int64) int64 { return x ^ 0 }, id},
+ {"x^-1", func(x int64) int64 { return x ^ -1 }, func(x int64) int64 { return ^x }},
+ {"x+0", func(x int64) int64 { return x + 0 }, id},
+ {"x-x", func(x int64) int64 { return x - x }, zero},
+ {"x*0", func(x int64) int64 { return x * 0 }, zero},
+ {"^^x", func(x int64) int64 { return ^^x }, id},
+ } {
+ for _, v := range values {
+ got := test.f(v)
+ want := test.golden(v)
+ if want != got {
+ t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want)
+ }
+ }
+ }
+ for _, test := range [...]struct {
+ name string
+ f func(int64, int64) int64
+ golden func(int64, int64) int64
+ }{
+ {"x|(x|y)", func(x, y int64) int64 { return x | (x | y) }, or},
+ {"x|(y|x)", func(x, y int64) int64 { return x | (y | x) }, or},
+ {"(x|y)|x", func(x, y int64) int64 { return (x | y) | x }, or},
+ {"(y|x)|x", func(x, y int64) int64 { return (y | x) | x }, or},
+ {"x&(x&y)", func(x, y int64) int64 { return x & (x & y) }, and},
+ {"x&(y&x)", func(x, y int64) int64 { return x & (y & x) }, and},
+ {"(x&y)&x", func(x, y int64) int64 { return (x & y) & x }, and},
+ {"(y&x)&x", func(x, y int64) int64 { return (y & x) & x }, and},
+ {"x^(x^y)", func(x, y int64) int64 { return x ^ (x ^ y) }, y},
+ {"x^(y^x)", func(x, y int64) int64 { return x ^ (y ^ x) }, y},
+ {"(x^y)^x", func(x, y int64) int64 { return (x ^ y) ^ x }, y},
+ {"(y^x)^x", func(x, y int64) int64 { return (y ^ x) ^ x }, y},
+ {"-(y-x)", func(x, y int64) int64 { return -(y - x) }, func(x, y int64) int64 { return x - y }},
+ {"(x+y)-x", func(x, y int64) int64 { return (x + y) - x }, y},
+ {"(y+x)-x", func(x, y int64) int64 { return (y + x) - x }, y},
+ } {
+ for _, v := range values {
+ for _, w := range values {
+ got := test.f(v, w)
+ want := test.golden(v, w)
+ if want != got {
+ t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want)
+ }
+ }
+ }
+ }
+}
+
+func TestLogic32(t *testing.T) {
+ // test values to determine function equality
+ values := [...]int32{-1 << 31, 1<<31 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4}
+
+ // golden functions we use repeatedly
+ zero := func(x int32) int32 { return 0 }
+ id := func(x int32) int32 { return x }
+ or := func(x, y int32) int32 { return x | y }
+ and := func(x, y int32) int32 { return x & y }
+ y := func(x, y int32) int32 { return y }
+
+ for _, test := range [...]struct {
+ name string
+ f func(int32) int32
+ golden func(int32) int32
+ }{
+ {"x|x", func(x int32) int32 { return x | x }, id},
+ {"x|0", func(x int32) int32 { return x | 0 }, id},
+ {"x|-1", func(x int32) int32 { return x | -1 }, func(x int32) int32 { return -1 }},
+ {"x&x", func(x int32) int32 { return x & x }, id},
+ {"x&0", func(x int32) int32 { return x & 0 }, zero},
+ {"x&-1", func(x int32) int32 { return x & -1 }, id},
+ {"x^x", func(x int32) int32 { return x ^ x }, zero},
+ {"x^0", func(x int32) int32 { return x ^ 0 }, id},
+ {"x^-1", func(x int32) int32 { return x ^ -1 }, func(x int32) int32 { return ^x }},
+ {"x+0", func(x int32) int32 { return x + 0 }, id},
+ {"x-x", func(x int32) int32 { return x - x }, zero},
+ {"x*0", func(x int32) int32 { return x * 0 }, zero},
+ {"^^x", func(x int32) int32 { return ^^x }, id},
+ } {
+ for _, v := range values {
+ got := test.f(v)
+ want := test.golden(v)
+ if want != got {
+ t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want)
+ }
+ }
+ }
+ for _, test := range [...]struct {
+ name string
+ f func(int32, int32) int32
+ golden func(int32, int32) int32
+ }{
+ {"x|(x|y)", func(x, y int32) int32 { return x | (x | y) }, or},
+ {"x|(y|x)", func(x, y int32) int32 { return x | (y | x) }, or},
+ {"(x|y)|x", func(x, y int32) int32 { return (x | y) | x }, or},
+ {"(y|x)|x", func(x, y int32) int32 { return (y | x) | x }, or},
+ {"x&(x&y)", func(x, y int32) int32 { return x & (x & y) }, and},
+ {"x&(y&x)", func(x, y int32) int32 { return x & (y & x) }, and},
+ {"(x&y)&x", func(x, y int32) int32 { return (x & y) & x }, and},
+ {"(y&x)&x", func(x, y int32) int32 { return (y & x) & x }, and},
+ {"x^(x^y)", func(x, y int32) int32 { return x ^ (x ^ y) }, y},
+ {"x^(y^x)", func(x, y int32) int32 { return x ^ (y ^ x) }, y},
+ {"(x^y)^x", func(x, y int32) int32 { return (x ^ y) ^ x }, y},
+ {"(y^x)^x", func(x, y int32) int32 { return (y ^ x) ^ x }, y},
+ {"-(y-x)", func(x, y int32) int32 { return -(y - x) }, func(x, y int32) int32 { return x - y }},
+ {"(x+y)-x", func(x, y int32) int32 { return (x + y) - x }, y},
+ {"(y+x)-x", func(x, y int32) int32 { return (y + x) - x }, y},
+ } {
+ for _, v := range values {
+ for _, w := range values {
+ got := test.f(v, w)
+ want := test.golden(v, w)
+ if want != got {
+ t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want)
+ }
+ }
+ }
+ }
+}
+
+func TestLogic16(t *testing.T) {
+ // test values to determine function equality
+ values := [...]int16{-1 << 15, 1<<15 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4}
+
+ // golden functions we use repeatedly
+ zero := func(x int16) int16 { return 0 }
+ id := func(x int16) int16 { return x }
+ or := func(x, y int16) int16 { return x | y }
+ and := func(x, y int16) int16 { return x & y }
+ y := func(x, y int16) int16 { return y }
+
+ for _, test := range [...]struct {
+ name string
+ f func(int16) int16
+ golden func(int16) int16
+ }{
+ {"x|x", func(x int16) int16 { return x | x }, id},
+ {"x|0", func(x int16) int16 { return x | 0 }, id},
+ {"x|-1", func(x int16) int16 { return x | -1 }, func(x int16) int16 { return -1 }},
+ {"x&x", func(x int16) int16 { return x & x }, id},
+ {"x&0", func(x int16) int16 { return x & 0 }, zero},
+ {"x&-1", func(x int16) int16 { return x & -1 }, id},
+ {"x^x", func(x int16) int16 { return x ^ x }, zero},
+ {"x^0", func(x int16) int16 { return x ^ 0 }, id},
+ {"x^-1", func(x int16) int16 { return x ^ -1 }, func(x int16) int16 { return ^x }},
+ {"x+0", func(x int16) int16 { return x + 0 }, id},
+ {"x-x", func(x int16) int16 { return x - x }, zero},
+ {"x*0", func(x int16) int16 { return x * 0 }, zero},
+ {"^^x", func(x int16) int16 { return ^^x }, id},
+ } {
+ for _, v := range values {
+ got := test.f(v)
+ want := test.golden(v)
+ if want != got {
+ t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want)
+ }
+ }
+ }
+ for _, test := range [...]struct {
+ name string
+ f func(int16, int16) int16
+ golden func(int16, int16) int16
+ }{
+ {"x|(x|y)", func(x, y int16) int16 { return x | (x | y) }, or},
+ {"x|(y|x)", func(x, y int16) int16 { return x | (y | x) }, or},
+ {"(x|y)|x", func(x, y int16) int16 { return (x | y) | x }, or},
+ {"(y|x)|x", func(x, y int16) int16 { return (y | x) | x }, or},
+ {"x&(x&y)", func(x, y int16) int16 { return x & (x & y) }, and},
+ {"x&(y&x)", func(x, y int16) int16 { return x & (y & x) }, and},
+ {"(x&y)&x", func(x, y int16) int16 { return (x & y) & x }, and},
+ {"(y&x)&x", func(x, y int16) int16 { return (y & x) & x }, and},
+ {"x^(x^y)", func(x, y int16) int16 { return x ^ (x ^ y) }, y},
+ {"x^(y^x)", func(x, y int16) int16 { return x ^ (y ^ x) }, y},
+ {"(x^y)^x", func(x, y int16) int16 { return (x ^ y) ^ x }, y},
+ {"(y^x)^x", func(x, y int16) int16 { return (y ^ x) ^ x }, y},
+ {"-(y-x)", func(x, y int16) int16 { return -(y - x) }, func(x, y int16) int16 { return x - y }},
+ {"(x+y)-x", func(x, y int16) int16 { return (x + y) - x }, y},
+ {"(y+x)-x", func(x, y int16) int16 { return (y + x) - x }, y},
+ } {
+ for _, v := range values {
+ for _, w := range values {
+ got := test.f(v, w)
+ want := test.golden(v, w)
+ if want != got {
+ t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want)
+ }
+ }
+ }
+ }
+}
+
+func TestLogic8(t *testing.T) {
+ // test values to determine function equality
+ values := [...]int8{-1 << 7, 1<<7 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4}
+
+ // golden functions we use repeatedly
+ zero := func(x int8) int8 { return 0 }
+ id := func(x int8) int8 { return x }
+ or := func(x, y int8) int8 { return x | y }
+ and := func(x, y int8) int8 { return x & y }
+ y := func(x, y int8) int8 { return y }
+
+ for _, test := range [...]struct {
+ name string
+ f func(int8) int8
+ golden func(int8) int8
+ }{
+ {"x|x", func(x int8) int8 { return x | x }, id},
+ {"x|0", func(x int8) int8 { return x | 0 }, id},
+ {"x|-1", func(x int8) int8 { return x | -1 }, func(x int8) int8 { return -1 }},
+ {"x&x", func(x int8) int8 { return x & x }, id},
+ {"x&0", func(x int8) int8 { return x & 0 }, zero},
+ {"x&-1", func(x int8) int8 { return x & -1 }, id},
+ {"x^x", func(x int8) int8 { return x ^ x }, zero},
+ {"x^0", func(x int8) int8 { return x ^ 0 }, id},
+ {"x^-1", func(x int8) int8 { return x ^ -1 }, func(x int8) int8 { return ^x }},
+ {"x+0", func(x int8) int8 { return x + 0 }, id},
+ {"x-x", func(x int8) int8 { return x - x }, zero},
+ {"x*0", func(x int8) int8 { return x * 0 }, zero},
+ {"^^x", func(x int8) int8 { return ^^x }, id},
+ } {
+ for _, v := range values {
+ got := test.f(v)
+ want := test.golden(v)
+ if want != got {
+ t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want)
+ }
+ }
+ }
+ for _, test := range [...]struct {
+ name string
+ f func(int8, int8) int8
+ golden func(int8, int8) int8
+ }{
+ {"x|(x|y)", func(x, y int8) int8 { return x | (x | y) }, or},
+ {"x|(y|x)", func(x, y int8) int8 { return x | (y | x) }, or},
+ {"(x|y)|x", func(x, y int8) int8 { return (x | y) | x }, or},
+ {"(y|x)|x", func(x, y int8) int8 { return (y | x) | x }, or},
+ {"x&(x&y)", func(x, y int8) int8 { return x & (x & y) }, and},
+ {"x&(y&x)", func(x, y int8) int8 { return x & (y & x) }, and},
+ {"(x&y)&x", func(x, y int8) int8 { return (x & y) & x }, and},
+ {"(y&x)&x", func(x, y int8) int8 { return (y & x) & x }, and},
+ {"x^(x^y)", func(x, y int8) int8 { return x ^ (x ^ y) }, y},
+ {"x^(y^x)", func(x, y int8) int8 { return x ^ (y ^ x) }, y},
+ {"(x^y)^x", func(x, y int8) int8 { return (x ^ y) ^ x }, y},
+ {"(y^x)^x", func(x, y int8) int8 { return (y ^ x) ^ x }, y},
+ {"-(y-x)", func(x, y int8) int8 { return -(y - x) }, func(x, y int8) int8 { return x - y }},
+ {"(x+y)-x", func(x, y int8) int8 { return (x + y) - x }, y},
+ {"(y+x)-x", func(x, y int8) int8 { return (y + x) - x }, y},
+ } {
+ for _, v := range values {
+ for _, w := range values {
+ got := test.f(v, w)
+ want := test.golden(v, w)
+ if want != got {
+ t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want)
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/math_test.go b/src/cmd/compile/internal/test/math_test.go
new file mode 100644
index 0000000..1febe9d
--- /dev/null
+++ b/src/cmd/compile/internal/test/math_test.go
@@ -0,0 +1,171 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "testing"
+)
+
+var Output int
+
+func BenchmarkDiv64UnsignedSmall(b *testing.B) {
+ q := uint64(1)
+ for i := 1; i <= b.N; i++ {
+ q = (q + uint64(i)) / uint64(i)
+ }
+ Output = int(q)
+}
+
+func BenchmarkDiv64Small(b *testing.B) {
+ q := int64(1)
+ for i := 1; i <= b.N; i++ {
+ q = (q + int64(i)) / int64(i)
+ }
+ Output = int(q)
+}
+
+func BenchmarkDiv64SmallNegDivisor(b *testing.B) {
+ q := int64(-1)
+ for i := 1; i <= b.N; i++ {
+ q = (int64(i) - q) / -int64(i)
+ }
+ Output = int(q)
+}
+
+func BenchmarkDiv64SmallNegDividend(b *testing.B) {
+ q := int64(-1)
+ for i := 1; i <= b.N; i++ {
+ q = -(int64(i) - q) / int64(i)
+ }
+ Output = int(q)
+}
+
+func BenchmarkDiv64SmallNegBoth(b *testing.B) {
+ q := int64(1)
+ for i := 1; i <= b.N; i++ {
+ q = -(int64(i) + q) / -int64(i)
+ }
+ Output = int(q)
+}
+
+func BenchmarkDiv64Unsigned(b *testing.B) {
+ q := uint64(1)
+ for i := 1; i <= b.N; i++ {
+ q = (uint64(0x7fffffffffffffff) - uint64(i) - (q & 1)) / uint64(i)
+ }
+ Output = int(q)
+}
+
+func BenchmarkDiv64(b *testing.B) {
+ q := int64(1)
+ for i := 1; i <= b.N; i++ {
+ q = (int64(0x7fffffffffffffff) - int64(i) - (q & 1)) / int64(i)
+ }
+ Output = int(q)
+}
+
+func BenchmarkDiv64NegDivisor(b *testing.B) {
+ q := int64(-1)
+ for i := 1; i <= b.N; i++ {
+ q = (int64(0x7fffffffffffffff) - int64(i) - (q & 1)) / -int64(i)
+ }
+ Output = int(q)
+}
+
+func BenchmarkDiv64NegDividend(b *testing.B) {
+ q := int64(-1)
+ for i := 1; i <= b.N; i++ {
+ q = -(int64(0x7fffffffffffffff) - int64(i) - (q & 1)) / int64(i)
+ }
+ Output = int(q)
+}
+
+func BenchmarkDiv64NegBoth(b *testing.B) {
+ q := int64(-1)
+ for i := 1; i <= b.N; i++ {
+ q = -(int64(0x7fffffffffffffff) - int64(i) - (q & 1)) / -int64(i)
+ }
+ Output = int(q)
+}
+
+func BenchmarkMod64UnsignedSmall(b *testing.B) {
+ r := uint64(1)
+ for i := 1; i <= b.N; i++ {
+ r = (uint64(i) + r) % uint64(i)
+ }
+ Output = int(r)
+}
+
+func BenchmarkMod64Small(b *testing.B) {
+ r := int64(1)
+ for i := 1; i <= b.N; i++ {
+ r = (int64(i) + r) % int64(i)
+ }
+ Output = int(r)
+}
+
+func BenchmarkMod64SmallNegDivisor(b *testing.B) {
+ r := int64(-1)
+ for i := 1; i <= b.N; i++ {
+ r = (int64(i) - r) % -int64(i)
+ }
+ Output = int(r)
+}
+
+func BenchmarkMod64SmallNegDividend(b *testing.B) {
+ r := int64(-1)
+ for i := 1; i <= b.N; i++ {
+ r = -(int64(i) - r) % int64(i)
+ }
+ Output = int(r)
+}
+
+func BenchmarkMod64SmallNegBoth(b *testing.B) {
+ r := int64(1)
+ for i := 1; i <= b.N; i++ {
+ r = -(int64(i) + r) % -int64(i)
+ }
+ Output = int(r)
+}
+
+func BenchmarkMod64Unsigned(b *testing.B) {
+ r := uint64(1)
+ for i := 1; i <= b.N; i++ {
+ r = (uint64(0x7fffffffffffffff) - uint64(i) - (r & 1)) % uint64(i)
+ }
+ Output = int(r)
+}
+
+func BenchmarkMod64(b *testing.B) {
+ r := int64(1)
+ for i := 1; i <= b.N; i++ {
+ r = (int64(0x7fffffffffffffff) - int64(i) - (r & 1)) % int64(i)
+ }
+ Output = int(r)
+}
+
+func BenchmarkMod64NegDivisor(b *testing.B) {
+ r := int64(-1)
+ for i := 1; i <= b.N; i++ {
+ r = (int64(0x7fffffffffffffff) - int64(i) - (r & 1)) % -int64(i)
+ }
+ Output = int(r)
+}
+
+func BenchmarkMod64NegDividend(b *testing.B) {
+ r := int64(-1)
+ for i := 1; i <= b.N; i++ {
+ r = -(int64(0x7fffffffffffffff) - int64(i) - (r & 1)) % int64(i)
+ }
+ Output = int(r)
+}
+
+func BenchmarkMod64NegBoth(b *testing.B) {
+ r := int64(1)
+ for i := 1; i <= b.N; i++ {
+ r = -(int64(0x7fffffffffffffff) - int64(i) - (r & 1)) % -int64(i)
+ }
+ Output = int(r)
+}
diff --git a/src/cmd/compile/internal/test/memcombine_test.go b/src/cmd/compile/internal/test/memcombine_test.go
new file mode 100644
index 0000000..3fc4a00
--- /dev/null
+++ b/src/cmd/compile/internal/test/memcombine_test.go
@@ -0,0 +1,199 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "encoding/binary"
+ "testing"
+)
+
+var gv = [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8}
+
+//go:noinline
+func readGlobalUnaligned() uint64 {
+ return binary.LittleEndian.Uint64(gv[1:])
+}
+
+func TestUnalignedGlobal(t *testing.T) {
+ // Note: this is a test not so much of the result of the read, but of
+ // the correct compilation of that read. On s390x unaligned global
+ // accesses fail to compile.
+ if got, want := readGlobalUnaligned(), uint64(0x0807060504030201); got != want {
+ t.Errorf("read global %x, want %x", got, want)
+ }
+}
+
+func TestSpillOfExtendedEndianLoads(t *testing.T) {
+ b := []byte{0xaa, 0xbb, 0xcc, 0xdd}
+
+ var testCases = []struct {
+ fn func([]byte) uint64
+ want uint64
+ }{
+ {readUint16le, 0xbbaa},
+ {readUint16be, 0xaabb},
+ {readUint32le, 0xddccbbaa},
+ {readUint32be, 0xaabbccdd},
+ }
+ for _, test := range testCases {
+ if got := test.fn(b); got != test.want {
+ t.Errorf("got %x, want %x", got, test.want)
+ }
+ }
+}
+
+func readUint16le(b []byte) uint64 {
+ y := uint64(binary.LittleEndian.Uint16(b))
+ nop() // force spill
+ return y
+}
+
+func readUint16be(b []byte) uint64 {
+ y := uint64(binary.BigEndian.Uint16(b))
+ nop() // force spill
+ return y
+}
+
+func readUint32le(b []byte) uint64 {
+ y := uint64(binary.LittleEndian.Uint32(b))
+ nop() // force spill
+ return y
+}
+
+func readUint32be(b []byte) uint64 {
+ y := uint64(binary.BigEndian.Uint32(b))
+ nop() // force spill
+ return y
+}
+
+//go:noinline
+func nop() {
+}
+
+type T32 struct {
+ a, b uint32
+}
+
+//go:noinline
+func (t *T32) bigEndianLoad() uint64 {
+ return uint64(t.a)<<32 | uint64(t.b)
+}
+
+//go:noinline
+func (t *T32) littleEndianLoad() uint64 {
+ return uint64(t.a) | (uint64(t.b) << 32)
+}
+
+//go:noinline
+func (t *T32) bigEndianStore(x uint64) {
+ t.a = uint32(x >> 32)
+ t.b = uint32(x)
+}
+
+//go:noinline
+func (t *T32) littleEndianStore(x uint64) {
+ t.a = uint32(x)
+ t.b = uint32(x >> 32)
+}
+
+type T16 struct {
+ a, b uint16
+}
+
+//go:noinline
+func (t *T16) bigEndianLoad() uint32 {
+ return uint32(t.a)<<16 | uint32(t.b)
+}
+
+//go:noinline
+func (t *T16) littleEndianLoad() uint32 {
+ return uint32(t.a) | (uint32(t.b) << 16)
+}
+
+//go:noinline
+func (t *T16) bigEndianStore(x uint32) {
+ t.a = uint16(x >> 16)
+ t.b = uint16(x)
+}
+
+//go:noinline
+func (t *T16) littleEndianStore(x uint32) {
+ t.a = uint16(x)
+ t.b = uint16(x >> 16)
+}
+
+type T8 struct {
+ a, b uint8
+}
+
+//go:noinline
+func (t *T8) bigEndianLoad() uint16 {
+ return uint16(t.a)<<8 | uint16(t.b)
+}
+
+//go:noinline
+func (t *T8) littleEndianLoad() uint16 {
+ return uint16(t.a) | (uint16(t.b) << 8)
+}
+
+//go:noinline
+func (t *T8) bigEndianStore(x uint16) {
+ t.a = uint8(x >> 8)
+ t.b = uint8(x)
+}
+
+//go:noinline
+func (t *T8) littleEndianStore(x uint16) {
+ t.a = uint8(x)
+ t.b = uint8(x >> 8)
+}
+
+func TestIssue64468(t *testing.T) {
+ t32 := T32{1, 2}
+ if got, want := t32.bigEndianLoad(), uint64(1<<32+2); got != want {
+ t.Errorf("T32.bigEndianLoad got %x want %x\n", got, want)
+ }
+ if got, want := t32.littleEndianLoad(), uint64(1+2<<32); got != want {
+ t.Errorf("T32.littleEndianLoad got %x want %x\n", got, want)
+ }
+ t16 := T16{1, 2}
+ if got, want := t16.bigEndianLoad(), uint32(1<<16+2); got != want {
+ t.Errorf("T16.bigEndianLoad got %x want %x\n", got, want)
+ }
+ if got, want := t16.littleEndianLoad(), uint32(1+2<<16); got != want {
+ t.Errorf("T16.littleEndianLoad got %x want %x\n", got, want)
+ }
+ t8 := T8{1, 2}
+ if got, want := t8.bigEndianLoad(), uint16(1<<8+2); got != want {
+ t.Errorf("T8.bigEndianLoad got %x want %x\n", got, want)
+ }
+ if got, want := t8.littleEndianLoad(), uint16(1+2<<8); got != want {
+ t.Errorf("T8.littleEndianLoad got %x want %x\n", got, want)
+ }
+ t32.bigEndianStore(1<<32 + 2)
+ if got, want := t32, (T32{1, 2}); got != want {
+ t.Errorf("T32.bigEndianStore got %x want %x\n", got, want)
+ }
+ t32.littleEndianStore(1<<32 + 2)
+ if got, want := t32, (T32{2, 1}); got != want {
+ t.Errorf("T32.littleEndianStore got %x want %x\n", got, want)
+ }
+ t16.bigEndianStore(1<<16 + 2)
+ if got, want := t16, (T16{1, 2}); got != want {
+ t.Errorf("T16.bigEndianStore got %x want %x\n", got, want)
+ }
+ t16.littleEndianStore(1<<16 + 2)
+ if got, want := t16, (T16{2, 1}); got != want {
+ t.Errorf("T16.littleEndianStore got %x want %x\n", got, want)
+ }
+ t8.bigEndianStore(1<<8 + 2)
+ if got, want := t8, (T8{1, 2}); got != want {
+ t.Errorf("T8.bigEndianStore got %x want %x\n", got, want)
+ }
+ t8.littleEndianStore(1<<8 + 2)
+ if got, want := t8, (T8{2, 1}); got != want {
+ t.Errorf("T8.littleEndianStore got %x want %x\n", got, want)
+ }
+}
diff --git a/src/cmd/compile/internal/test/mulconst_test.go b/src/cmd/compile/internal/test/mulconst_test.go
new file mode 100644
index 0000000..314cab3
--- /dev/null
+++ b/src/cmd/compile/internal/test/mulconst_test.go
@@ -0,0 +1,242 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import "testing"
+
+// Benchmark multiplication of an integer by various constants.
+//
+// The comment above each sub-benchmark provides an example of how the
+// target multiplication operation might be implemented using shift
+// (multiplication by a power of 2), addition and subtraction
+// operations. It is platform-dependent whether these transformations
+// are actually applied.
+
+var (
+ mulSinkI32 int32
+ mulSinkI64 int64
+ mulSinkU32 uint32
+ mulSinkU64 uint64
+)
+
+func BenchmarkMulconstI32(b *testing.B) {
+ // 3x = 2x + x
+ b.Run("3", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 3
+ }
+ mulSinkI32 = x
+ })
+ // 5x = 4x + x
+ b.Run("5", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 5
+ }
+ mulSinkI32 = x
+ })
+ // 12x = 8x + 4x
+ b.Run("12", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 12
+ }
+ mulSinkI32 = x
+ })
+ // 120x = 128x - 8x
+ b.Run("120", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 120
+ }
+ mulSinkI32 = x
+ })
+ // -120x = 8x - 120x
+ b.Run("-120", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= -120
+ }
+ mulSinkI32 = x
+ })
+ // 65537x = 65536x + x
+ b.Run("65537", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65537
+ }
+ mulSinkI32 = x
+ })
+ // 65538x = 65536x + 2x
+ b.Run("65538", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65538
+ }
+ mulSinkI32 = x
+ })
+}
+
+func BenchmarkMulconstI64(b *testing.B) {
+ // 3x = 2x + x
+ b.Run("3", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 3
+ }
+ mulSinkI64 = x
+ })
+ // 5x = 4x + x
+ b.Run("5", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 5
+ }
+ mulSinkI64 = x
+ })
+ // 12x = 8x + 4x
+ b.Run("12", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 12
+ }
+ mulSinkI64 = x
+ })
+ // 120x = 128x - 8x
+ b.Run("120", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 120
+ }
+ mulSinkI64 = x
+ })
+ // -120x = 8x - 120x
+ b.Run("-120", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= -120
+ }
+ mulSinkI64 = x
+ })
+ // 65537x = 65536x + x
+ b.Run("65537", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65537
+ }
+ mulSinkI64 = x
+ })
+ // 65538x = 65536x + 2x
+ b.Run("65538", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65538
+ }
+ mulSinkI64 = x
+ })
+}
+
+func BenchmarkMulconstU32(b *testing.B) {
+ // 3x = 2x + x
+ b.Run("3", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 3
+ }
+ mulSinkU32 = x
+ })
+ // 5x = 4x + x
+ b.Run("5", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 5
+ }
+ mulSinkU32 = x
+ })
+ // 12x = 8x + 4x
+ b.Run("12", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 12
+ }
+ mulSinkU32 = x
+ })
+ // 120x = 128x - 8x
+ b.Run("120", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 120
+ }
+ mulSinkU32 = x
+ })
+ // 65537x = 65536x + x
+ b.Run("65537", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65537
+ }
+ mulSinkU32 = x
+ })
+ // 65538x = 65536x + 2x
+ b.Run("65538", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65538
+ }
+ mulSinkU32 = x
+ })
+}
+
+func BenchmarkMulconstU64(b *testing.B) {
+ // 3x = 2x + x
+ b.Run("3", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 3
+ }
+ mulSinkU64 = x
+ })
+ // 5x = 4x + x
+ b.Run("5", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 5
+ }
+ mulSinkU64 = x
+ })
+ // 12x = 8x + 4x
+ b.Run("12", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 12
+ }
+ mulSinkU64 = x
+ })
+ // 120x = 128x - 8x
+ b.Run("120", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 120
+ }
+ mulSinkU64 = x
+ })
+ // 65537x = 65536x + x
+ b.Run("65537", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65537
+ }
+ mulSinkU64 = x
+ })
+ // 65538x = 65536x + 2x
+ b.Run("65538", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65538
+ }
+ mulSinkU64 = x
+ })
+}
diff --git a/src/cmd/compile/internal/test/pgo_devirtualize_test.go b/src/cmd/compile/internal/test/pgo_devirtualize_test.go
new file mode 100644
index 0000000..f451243
--- /dev/null
+++ b/src/cmd/compile/internal/test/pgo_devirtualize_test.go
@@ -0,0 +1,261 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "bufio"
+ "fmt"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "regexp"
+ "testing"
+)
+
+type devirtualization struct {
+ pos string
+ callee string
+}
+
+// testPGODevirtualize tests that specific PGO devirtualize rewrites are performed.
+func testPGODevirtualize(t *testing.T, dir string, want []devirtualization) {
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ const pkg = "example.com/pgo/devirtualize"
+
+ // Add a go.mod so we have a consistent symbol names in this temp dir.
+ goMod := fmt.Sprintf(`module %s
+go 1.21
+`, pkg)
+ if err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644); err != nil {
+ t.Fatalf("error writing go.mod: %v", err)
+ }
+
+ // Run the test without PGO to ensure that the test assertions are
+ // correct even in the non-optimized version.
+ cmd := testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), "test", "."))
+ cmd.Dir = dir
+ b, err := cmd.CombinedOutput()
+ t.Logf("Test without PGO:\n%s", b)
+ if err != nil {
+ t.Fatalf("Test failed without PGO: %v", err)
+ }
+
+ // Build the test with the profile.
+ pprof := filepath.Join(dir, "devirt.pprof")
+ gcflag := fmt.Sprintf("-gcflags=-m=2 -pgoprofile=%s -d=pgodebug=3", pprof)
+ out := filepath.Join(dir, "test.exe")
+ cmd = testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), "test", "-o", out, gcflag, "."))
+ cmd.Dir = dir
+
+ pr, pw, err := os.Pipe()
+ if err != nil {
+ t.Fatalf("error creating pipe: %v", err)
+ }
+ defer pr.Close()
+ cmd.Stdout = pw
+ cmd.Stderr = pw
+
+ err = cmd.Start()
+ pw.Close()
+ if err != nil {
+ t.Fatalf("error starting go test: %v", err)
+ }
+
+ got := make(map[devirtualization]struct{})
+
+ devirtualizedLine := regexp.MustCompile(`(.*): PGO devirtualizing \w+ call .* to (.*)`)
+
+ scanner := bufio.NewScanner(pr)
+ for scanner.Scan() {
+ line := scanner.Text()
+ t.Logf("child: %s", line)
+
+ m := devirtualizedLine.FindStringSubmatch(line)
+ if m == nil {
+ continue
+ }
+
+ d := devirtualization{
+ pos: m[1],
+ callee: m[2],
+ }
+ got[d] = struct{}{}
+ }
+ if err := cmd.Wait(); err != nil {
+ t.Fatalf("error running go test: %v", err)
+ }
+ if err := scanner.Err(); err != nil {
+ t.Fatalf("error reading go test output: %v", err)
+ }
+
+ if len(got) != len(want) {
+ t.Errorf("mismatched devirtualization count; got %v want %v", got, want)
+ }
+ for _, w := range want {
+ if _, ok := got[w]; ok {
+ continue
+ }
+ t.Errorf("devirtualization %v missing; got %v", w, got)
+ }
+
+ // Run test with PGO to ensure the assertions are still true.
+ cmd = testenv.CleanCmdEnv(testenv.Command(t, out))
+ cmd.Dir = dir
+ b, err = cmd.CombinedOutput()
+ t.Logf("Test with PGO:\n%s", b)
+ if err != nil {
+ t.Fatalf("Test failed without PGO: %v", err)
+ }
+}
+
+// TestPGODevirtualize tests that specific functions are devirtualized when PGO
+// is applied to the exact source that was profiled.
+func TestPGODevirtualize(t *testing.T) {
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("error getting wd: %v", err)
+ }
+ srcDir := filepath.Join(wd, "testdata", "pgo", "devirtualize")
+
+ // Copy the module to a scratch location so we can add a go.mod.
+ dir := t.TempDir()
+ if err := os.Mkdir(filepath.Join(dir, "mult.pkg"), 0755); err != nil {
+ t.Fatalf("error creating dir: %v", err)
+ }
+ for _, file := range []string{"devirt.go", "devirt_test.go", "devirt.pprof", filepath.Join("mult.pkg", "mult.go")} {
+ if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil {
+ t.Fatalf("error copying %s: %v", file, err)
+ }
+ }
+
+ want := []devirtualization{
+ // ExerciseIface
+ {
+ pos: "./devirt.go:101:20",
+ callee: "mult.Mult.Multiply",
+ },
+ {
+ pos: "./devirt.go:101:39",
+ callee: "Add.Add",
+ },
+ // ExerciseFuncConcrete
+ {
+ pos: "./devirt.go:173:36",
+ callee: "AddFn",
+ },
+ {
+ pos: "./devirt.go:173:15",
+ callee: "mult.MultFn",
+ },
+ // ExerciseFuncField
+ {
+ pos: "./devirt.go:207:35",
+ callee: "AddFn",
+ },
+ {
+ pos: "./devirt.go:207:19",
+ callee: "mult.MultFn",
+ },
+ // ExerciseFuncClosure
+ // TODO(prattmic): Closure callees not implemented.
+ //{
+ // pos: "./devirt.go:249:27",
+ // callee: "AddClosure.func1",
+ //},
+ //{
+ // pos: "./devirt.go:249:15",
+ // callee: "mult.MultClosure.func1",
+ //},
+ }
+
+ testPGODevirtualize(t, dir, want)
+}
+
+// Regression test for https://go.dev/issue/65615. If a target function changes
+// from non-generic to generic we can't devirtualize it (don't know the type
+// parameters), but the compiler should not crash.
+func TestLookupFuncGeneric(t *testing.T) {
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("error getting wd: %v", err)
+ }
+ srcDir := filepath.Join(wd, "testdata", "pgo", "devirtualize")
+
+ // Copy the module to a scratch location so we can add a go.mod.
+ dir := t.TempDir()
+ if err := os.Mkdir(filepath.Join(dir, "mult.pkg"), 0755); err != nil {
+ t.Fatalf("error creating dir: %v", err)
+ }
+ for _, file := range []string{"devirt.go", "devirt_test.go", "devirt.pprof", filepath.Join("mult.pkg", "mult.go")} {
+ if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil {
+ t.Fatalf("error copying %s: %v", file, err)
+ }
+ }
+
+ // Change MultFn from a concrete function to a parameterized function.
+ if err := convertMultToGeneric(filepath.Join(dir, "mult.pkg", "mult.go")); err != nil {
+ t.Fatalf("error editing mult.go: %v", err)
+ }
+
+ // Same as TestPGODevirtualize except for MultFn, which we cannot
+ // devirtualize to because it has become generic.
+ //
+ // Note that the important part of this test is that the build is
+ // successful, not the specific devirtualizations.
+ want := []devirtualization{
+ // ExerciseIface
+ {
+ pos: "./devirt.go:101:20",
+ callee: "mult.Mult.Multiply",
+ },
+ {
+ pos: "./devirt.go:101:39",
+ callee: "Add.Add",
+ },
+ // ExerciseFuncConcrete
+ {
+ pos: "./devirt.go:173:36",
+ callee: "AddFn",
+ },
+ // ExerciseFuncField
+ {
+ pos: "./devirt.go:207:35",
+ callee: "AddFn",
+ },
+ // ExerciseFuncClosure
+ // TODO(prattmic): Closure callees not implemented.
+ //{
+ // pos: "./devirt.go:249:27",
+ // callee: "AddClosure.func1",
+ //},
+ //{
+ // pos: "./devirt.go:249:15",
+ // callee: "mult.MultClosure.func1",
+ //},
+ }
+
+ testPGODevirtualize(t, dir, want)
+}
+
+var multFnRe = regexp.MustCompile(`func MultFn\(a, b int64\) int64`)
+
+func convertMultToGeneric(path string) error {
+ content, err := os.ReadFile(path)
+ if err != nil {
+ return fmt.Errorf("error opening: %w", err)
+ }
+
+ if !multFnRe.Match(content) {
+ return fmt.Errorf("MultFn not found; update regexp?")
+ }
+
+ // Users of MultFn shouldn't need adjustment, type inference should
+ // work OK.
+ content = multFnRe.ReplaceAll(content, []byte(`func MultFn[T int32|int64](a, b T) T`))
+
+ return os.WriteFile(path, content, 0644)
+}
diff --git a/src/cmd/compile/internal/test/pgo_inl_test.go b/src/cmd/compile/internal/test/pgo_inl_test.go
new file mode 100644
index 0000000..da6c4a5
--- /dev/null
+++ b/src/cmd/compile/internal/test/pgo_inl_test.go
@@ -0,0 +1,344 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "internal/profile"
+ "internal/testenv"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+)
+
+func buildPGOInliningTest(t *testing.T, dir string, gcflag string) []byte {
+ const pkg = "example.com/pgo/inline"
+
+ // Add a go.mod so we have a consistent symbol names in this temp dir.
+ goMod := fmt.Sprintf(`module %s
+go 1.19
+`, pkg)
+ if err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644); err != nil {
+ t.Fatalf("error writing go.mod: %v", err)
+ }
+
+ exe := filepath.Join(dir, "test.exe")
+ args := []string{"test", "-c", "-o", exe, "-gcflags=" + gcflag}
+ cmd := testenv.Command(t, testenv.GoToolPath(t), args...)
+ cmd.Dir = dir
+ cmd = testenv.CleanCmdEnv(cmd)
+ t.Log(cmd)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("build failed: %v, output:\n%s", err, out)
+ }
+ return out
+}
+
+// testPGOIntendedInlining tests that specific functions are inlined.
+func testPGOIntendedInlining(t *testing.T, dir string) {
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ const pkg = "example.com/pgo/inline"
+
+ want := []string{
+ "(*BS).NS",
+ }
+
+ // The functions which are not expected to be inlined are as follows.
+ wantNot := []string{
+ // The calling edge main->A is hot and the cost of A is large
+ // than inlineHotCalleeMaxBudget.
+ "A",
+ // The calling edge BenchmarkA" -> benchmarkB is cold and the
+ // cost of A is large than inlineMaxBudget.
+ "benchmarkB",
+ }
+
+ must := map[string]bool{
+ "(*BS).NS": true,
+ }
+
+ notInlinedReason := make(map[string]string)
+ for _, fname := range want {
+ fullName := pkg + "." + fname
+ if _, ok := notInlinedReason[fullName]; ok {
+ t.Errorf("duplicate func: %s", fullName)
+ }
+ notInlinedReason[fullName] = "unknown reason"
+ }
+
+ // If the compiler emit "cannot inline for function A", the entry A
+ // in expectedNotInlinedList will be removed.
+ expectedNotInlinedList := make(map[string]struct{})
+ for _, fname := range wantNot {
+ fullName := pkg + "." + fname
+ expectedNotInlinedList[fullName] = struct{}{}
+ }
+
+ // Build the test with the profile. Use a smaller threshold to test.
+ // TODO: maybe adjust the test to work with default threshold.
+ pprof := filepath.Join(dir, "inline_hot.pprof")
+ gcflag := fmt.Sprintf("-m -m -pgoprofile=%s -d=pgoinlinebudget=160,pgoinlinecdfthreshold=90", pprof)
+ out := buildPGOInliningTest(t, dir, gcflag)
+
+ scanner := bufio.NewScanner(bytes.NewReader(out))
+ curPkg := ""
+ canInline := regexp.MustCompile(`: can inline ([^ ]*)`)
+ haveInlined := regexp.MustCompile(`: inlining call to ([^ ]*)`)
+ cannotInline := regexp.MustCompile(`: cannot inline ([^ ]*): (.*)`)
+ for scanner.Scan() {
+ line := scanner.Text()
+ t.Logf("child: %s", line)
+ if strings.HasPrefix(line, "# ") {
+ curPkg = line[2:]
+ splits := strings.Split(curPkg, " ")
+ curPkg = splits[0]
+ continue
+ }
+ if m := haveInlined.FindStringSubmatch(line); m != nil {
+ fname := m[1]
+ delete(notInlinedReason, curPkg+"."+fname)
+ continue
+ }
+ if m := canInline.FindStringSubmatch(line); m != nil {
+ fname := m[1]
+ fullname := curPkg + "." + fname
+ // If function must be inlined somewhere, being inlinable is not enough
+ if _, ok := must[fullname]; !ok {
+ delete(notInlinedReason, fullname)
+ continue
+ }
+ }
+ if m := cannotInline.FindStringSubmatch(line); m != nil {
+ fname, reason := m[1], m[2]
+ fullName := curPkg + "." + fname
+ if _, ok := notInlinedReason[fullName]; ok {
+ // cmd/compile gave us a reason why
+ notInlinedReason[fullName] = reason
+ }
+ delete(expectedNotInlinedList, fullName)
+ continue
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ t.Fatalf("error reading output: %v", err)
+ }
+ for fullName, reason := range notInlinedReason {
+ t.Errorf("%s was not inlined: %s", fullName, reason)
+ }
+
+ // If the list expectedNotInlinedList is not empty, it indicates
+ // the functions in the expectedNotInlinedList are marked with caninline.
+ for fullName, _ := range expectedNotInlinedList {
+ t.Errorf("%s was expected not inlined", fullName)
+ }
+}
+
+// TestPGOIntendedInlining tests that specific functions are inlined when PGO
+// is applied to the exact source that was profiled.
+func TestPGOIntendedInlining(t *testing.T) {
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("error getting wd: %v", err)
+ }
+ srcDir := filepath.Join(wd, "testdata/pgo/inline")
+
+ // Copy the module to a scratch location so we can add a go.mod.
+ dir := t.TempDir()
+
+ for _, file := range []string{"inline_hot.go", "inline_hot_test.go", "inline_hot.pprof"} {
+ if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil {
+ t.Fatalf("error copying %s: %v", file, err)
+ }
+ }
+
+ testPGOIntendedInlining(t, dir)
+}
+
+// TestPGOIntendedInlining tests that specific functions are inlined when PGO
+// is applied to the modified source.
+func TestPGOIntendedInliningShiftedLines(t *testing.T) {
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("error getting wd: %v", err)
+ }
+ srcDir := filepath.Join(wd, "testdata/pgo/inline")
+
+ // Copy the module to a scratch location so we can modify the source.
+ dir := t.TempDir()
+
+ // Copy most of the files unmodified.
+ for _, file := range []string{"inline_hot_test.go", "inline_hot.pprof"} {
+ if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil {
+ t.Fatalf("error copying %s : %v", file, err)
+ }
+ }
+
+ // Add some comments to the top of inline_hot.go. This adjusts the line
+ // numbers of all of the functions without changing the semantics.
+ src, err := os.Open(filepath.Join(srcDir, "inline_hot.go"))
+ if err != nil {
+ t.Fatalf("error opening src inline_hot.go: %v", err)
+ }
+ defer src.Close()
+
+ dst, err := os.Create(filepath.Join(dir, "inline_hot.go"))
+ if err != nil {
+ t.Fatalf("error creating dst inline_hot.go: %v", err)
+ }
+ defer dst.Close()
+
+ if _, err := io.WriteString(dst, `// Autogenerated
+// Lines
+`); err != nil {
+ t.Fatalf("error writing comments to dst: %v", err)
+ }
+
+ if _, err := io.Copy(dst, src); err != nil {
+ t.Fatalf("error copying inline_hot.go: %v", err)
+ }
+
+ dst.Close()
+
+ testPGOIntendedInlining(t, dir)
+}
+
+// TestPGOSingleIndex tests that the sample index can not be 1 and compilation
+// will not fail. All it should care about is that the sample type is either
+// CPU nanoseconds or samples count, whichever it finds first.
+func TestPGOSingleIndex(t *testing.T) {
+ for _, tc := range []struct {
+ originalIndex int
+ }{{
+ // The `testdata/pgo/inline/inline_hot.pprof` file is a standard CPU
+ // profile as the runtime would generate. The 0 index contains the
+ // value-type samples and value-unit count. The 1 index contains the
+ // value-type cpu and value-unit nanoseconds. These tests ensure that
+ // the compiler can work with profiles that only have a single index,
+ // but are either samples count or CPU nanoseconds.
+ originalIndex: 0,
+ }, {
+ originalIndex: 1,
+ }} {
+ t.Run(fmt.Sprintf("originalIndex=%d", tc.originalIndex), func(t *testing.T) {
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("error getting wd: %v", err)
+ }
+ srcDir := filepath.Join(wd, "testdata/pgo/inline")
+
+ // Copy the module to a scratch location so we can add a go.mod.
+ dir := t.TempDir()
+
+ originalPprofFile, err := os.Open(filepath.Join(srcDir, "inline_hot.pprof"))
+ if err != nil {
+ t.Fatalf("error opening inline_hot.pprof: %v", err)
+ }
+ defer originalPprofFile.Close()
+
+ p, err := profile.Parse(originalPprofFile)
+ if err != nil {
+ t.Fatalf("error parsing inline_hot.pprof: %v", err)
+ }
+
+ // Move the samples count value-type to the 0 index.
+ p.SampleType = []*profile.ValueType{p.SampleType[tc.originalIndex]}
+
+ // Ensure we only have a single set of sample values.
+ for _, s := range p.Sample {
+ s.Value = []int64{s.Value[tc.originalIndex]}
+ }
+
+ modifiedPprofFile, err := os.Create(filepath.Join(dir, "inline_hot.pprof"))
+ if err != nil {
+ t.Fatalf("error creating inline_hot.pprof: %v", err)
+ }
+ defer modifiedPprofFile.Close()
+
+ if err := p.Write(modifiedPprofFile); err != nil {
+ t.Fatalf("error writing inline_hot.pprof: %v", err)
+ }
+
+ for _, file := range []string{"inline_hot.go", "inline_hot_test.go"} {
+ if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil {
+ t.Fatalf("error copying %s: %v", file, err)
+ }
+ }
+
+ testPGOIntendedInlining(t, dir)
+ })
+ }
+}
+
+func copyFile(dst, src string) error {
+ s, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer s.Close()
+
+ d, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer d.Close()
+
+ _, err = io.Copy(d, s)
+ return err
+}
+
+// TestPGOHash tests that PGO optimization decisions can be selected by pgohash.
+func TestPGOHash(t *testing.T) {
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ const pkg = "example.com/pgo/inline"
+
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("error getting wd: %v", err)
+ }
+ srcDir := filepath.Join(wd, "testdata/pgo/inline")
+
+ // Copy the module to a scratch location so we can add a go.mod.
+ dir := t.TempDir()
+
+ for _, file := range []string{"inline_hot.go", "inline_hot_test.go", "inline_hot.pprof"} {
+ if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil {
+ t.Fatalf("error copying %s: %v", file, err)
+ }
+ }
+
+ pprof := filepath.Join(dir, "inline_hot.pprof")
+ // build with -trimpath so the source location (thus the hash)
+ // does not depend on the temporary directory path.
+ gcflag0 := fmt.Sprintf("-pgoprofile=%s -trimpath %s=>%s -d=pgoinlinebudget=160,pgoinlinecdfthreshold=90,pgodebug=1", pprof, dir, pkg)
+
+ // Check that a hash match allows PGO inlining.
+ const srcPos = "example.com/pgo/inline/inline_hot.go:81:19"
+ const hashMatch = "pgohash triggered " + srcPos + " (inline)"
+ pgoDebugRE := regexp.MustCompile(`hot-budget check allows inlining for call .* at ` + strings.ReplaceAll(srcPos, ".", "\\."))
+ hash := "v1" // 1 matches srcPos, v for verbose (print source location)
+ gcflag := gcflag0 + ",pgohash=" + hash
+ out := buildPGOInliningTest(t, dir, gcflag)
+ if !bytes.Contains(out, []byte(hashMatch)) || !pgoDebugRE.Match(out) {
+ t.Errorf("output does not contain expected source line, out:\n%s", out)
+ }
+
+ // Check that a hash mismatch turns off PGO inlining.
+ hash = "v0" // 0 should not match srcPos
+ gcflag = gcflag0 + ",pgohash=" + hash
+ out = buildPGOInliningTest(t, dir, gcflag)
+ if bytes.Contains(out, []byte(hashMatch)) || pgoDebugRE.Match(out) {
+ t.Errorf("output contains unexpected source line, out:\n%s", out)
+ }
+}
diff --git a/src/cmd/compile/internal/test/race.go b/src/cmd/compile/internal/test/race.go
new file mode 100644
index 0000000..b721538
--- /dev/null
+++ b/src/cmd/compile/internal/test/race.go
@@ -0,0 +1,64 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !compiler_bootstrap
+
+package test
+
+// The racecompile builder only builds packages, but does not build
+// or run tests. This is a non-test file to hold cases that (used
+// to) trigger compiler data races, so they will be exercised on
+// the racecompile builder.
+//
+// This package is not imported so functions here are not included
+// in the actual compiler.
+
+// Issue 55357: data race when building multiple instantiations of
+// generic closures with _ parameters.
+func Issue55357() {
+ type U struct {
+ A int
+ B string
+ C string
+ }
+ var q T55357[U]
+ q.Count()
+ q.List()
+
+ type M struct {
+ A int64
+ B uint32
+ C uint32
+ }
+ var q2 T55357[M]
+ q2.Count()
+ q2.List()
+}
+
+type T55357[T any] struct{}
+
+//go:noinline
+func (q *T55357[T]) do(w, v bool, fn func(bk []byte, v T) error) error {
+ return nil
+}
+
+func (q *T55357[T]) Count() (n int, rerr error) {
+ err := q.do(false, false, func(kb []byte, _ T) error {
+ n++
+ return nil
+ })
+ return n, err
+}
+
+func (q *T55357[T]) List() (list []T, rerr error) {
+ var l []T
+ err := q.do(false, true, func(_ []byte, v T) error {
+ l = append(l, v)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return l, nil
+}
diff --git a/src/cmd/compile/internal/test/reproduciblebuilds_test.go b/src/cmd/compile/internal/test/reproduciblebuilds_test.go
new file mode 100644
index 0000000..466e0c3
--- /dev/null
+++ b/src/cmd/compile/internal/test/reproduciblebuilds_test.go
@@ -0,0 +1,106 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "bytes"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestReproducibleBuilds(t *testing.T) {
+ tests := []string{
+ "issue20272.go",
+ "issue27013.go",
+ "issue30202.go",
+ }
+
+ testenv.MustHaveGoBuild(t)
+ iters := 10
+ if testing.Short() {
+ iters = 4
+ }
+ t.Parallel()
+ for _, test := range tests {
+ test := test
+ t.Run(test, func(t *testing.T) {
+ t.Parallel()
+ var want []byte
+ tmp, err := os.CreateTemp("", "")
+ if err != nil {
+ t.Fatalf("temp file creation failed: %v", err)
+ }
+ defer os.Remove(tmp.Name())
+ defer tmp.Close()
+ for i := 0; i < iters; i++ {
+ // Note: use -c 2 to expose any nondeterminism which is the result
+ // of the runtime scheduler.
+ out, err := testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-p=p", "-c", "2", "-o", tmp.Name(), filepath.Join("testdata", "reproducible", test)).CombinedOutput()
+ if err != nil {
+ t.Fatalf("failed to compile: %v\n%s", err, out)
+ }
+ obj, err := os.ReadFile(tmp.Name())
+ if err != nil {
+ t.Fatalf("failed to read object file: %v", err)
+ }
+ if i == 0 {
+ want = obj
+ } else {
+ if !bytes.Equal(want, obj) {
+ t.Fatalf("builds produced different output after %d iters (%d bytes vs %d bytes)", i, len(want), len(obj))
+ }
+ }
+ }
+ })
+ }
+}
+
+func TestIssue38068(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ t.Parallel()
+
+ // Compile a small package with and without the concurrent
+ // backend, then check to make sure that the resulting archives
+ // are identical. Note: this uses "go tool compile" instead of
+ // "go build" since the latter will generate different build IDs
+ // if it sees different command line flags.
+ scenarios := []struct {
+ tag string
+ args string
+ libpath string
+ }{
+ {tag: "serial", args: "-c=1"},
+ {tag: "concurrent", args: "-c=2"}}
+
+ tmpdir := t.TempDir()
+
+ src := filepath.Join("testdata", "reproducible", "issue38068.go")
+ for i := range scenarios {
+ s := &scenarios[i]
+ s.libpath = filepath.Join(tmpdir, s.tag+".a")
+ // Note: use of "-p" required in order for DWARF to be generated.
+ cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-p=issue38068", "-buildid=", s.args, "-o", s.libpath, src)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("%v: %v:\n%s", cmd.Args, err, out)
+ }
+ }
+
+ readBytes := func(fn string) []byte {
+ payload, err := os.ReadFile(fn)
+ if err != nil {
+ t.Fatalf("failed to read executable '%s': %v", fn, err)
+ }
+ return payload
+ }
+
+ b1 := readBytes(scenarios[0].libpath)
+ b2 := readBytes(scenarios[1].libpath)
+ if !bytes.Equal(b1, b2) {
+ t.Fatalf("concurrent and serial builds produced different output")
+ }
+}
diff --git a/src/cmd/compile/internal/test/shift_test.go b/src/cmd/compile/internal/test/shift_test.go
new file mode 100644
index 0000000..dd893a1
--- /dev/null
+++ b/src/cmd/compile/internal/test/shift_test.go
@@ -0,0 +1,1152 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "reflect"
+ "testing"
+)
+
+// Tests shifts of zero.
+
+//go:noinline
+func ofz64l64(n uint64) int64 {
+ var x int64
+ return x << n
+}
+
+//go:noinline
+func ofz64l32(n uint32) int64 {
+ var x int64
+ return x << n
+}
+
+//go:noinline
+func ofz64l16(n uint16) int64 {
+ var x int64
+ return x << n
+}
+
+//go:noinline
+func ofz64l8(n uint8) int64 {
+ var x int64
+ return x << n
+}
+
+//go:noinline
+func ofz64r64(n uint64) int64 {
+ var x int64
+ return x >> n
+}
+
+//go:noinline
+func ofz64r32(n uint32) int64 {
+ var x int64
+ return x >> n
+}
+
+//go:noinline
+func ofz64r16(n uint16) int64 {
+ var x int64
+ return x >> n
+}
+
+//go:noinline
+func ofz64r8(n uint8) int64 {
+ var x int64
+ return x >> n
+}
+
+//go:noinline
+func ofz64ur64(n uint64) uint64 {
+ var x uint64
+ return x >> n
+}
+
+//go:noinline
+func ofz64ur32(n uint32) uint64 {
+ var x uint64
+ return x >> n
+}
+
+//go:noinline
+func ofz64ur16(n uint16) uint64 {
+ var x uint64
+ return x >> n
+}
+
+//go:noinline
+func ofz64ur8(n uint8) uint64 {
+ var x uint64
+ return x >> n
+}
+
+//go:noinline
+func ofz32l64(n uint64) int32 {
+ var x int32
+ return x << n
+}
+
+//go:noinline
+func ofz32l32(n uint32) int32 {
+ var x int32
+ return x << n
+}
+
+//go:noinline
+func ofz32l16(n uint16) int32 {
+ var x int32
+ return x << n
+}
+
+//go:noinline
+func ofz32l8(n uint8) int32 {
+ var x int32
+ return x << n
+}
+
+//go:noinline
+func ofz32r64(n uint64) int32 {
+ var x int32
+ return x >> n
+}
+
+//go:noinline
+func ofz32r32(n uint32) int32 {
+ var x int32
+ return x >> n
+}
+
+//go:noinline
+func ofz32r16(n uint16) int32 {
+ var x int32
+ return x >> n
+}
+
+//go:noinline
+func ofz32r8(n uint8) int32 {
+ var x int32
+ return x >> n
+}
+
+//go:noinline
+func ofz32ur64(n uint64) uint32 {
+ var x uint32
+ return x >> n
+}
+
+//go:noinline
+func ofz32ur32(n uint32) uint32 {
+ var x uint32
+ return x >> n
+}
+
+//go:noinline
+func ofz32ur16(n uint16) uint32 {
+ var x uint32
+ return x >> n
+}
+
+//go:noinline
+func ofz32ur8(n uint8) uint32 {
+ var x uint32
+ return x >> n
+}
+
+//go:noinline
+func ofz16l64(n uint64) int16 {
+ var x int16
+ return x << n
+}
+
+//go:noinline
+func ofz16l32(n uint32) int16 {
+ var x int16
+ return x << n
+}
+
+//go:noinline
+func ofz16l16(n uint16) int16 {
+ var x int16
+ return x << n
+}
+
+//go:noinline
+func ofz16l8(n uint8) int16 {
+ var x int16
+ return x << n
+}
+
+//go:noinline
+func ofz16r64(n uint64) int16 {
+ var x int16
+ return x >> n
+}
+
+//go:noinline
+func ofz16r32(n uint32) int16 {
+ var x int16
+ return x >> n
+}
+
+//go:noinline
+func ofz16r16(n uint16) int16 {
+ var x int16
+ return x >> n
+}
+
+//go:noinline
+func ofz16r8(n uint8) int16 {
+ var x int16
+ return x >> n
+}
+
+//go:noinline
+func ofz16ur64(n uint64) uint16 {
+ var x uint16
+ return x >> n
+}
+
+//go:noinline
+func ofz16ur32(n uint32) uint16 {
+ var x uint16
+ return x >> n
+}
+
+//go:noinline
+func ofz16ur16(n uint16) uint16 {
+ var x uint16
+ return x >> n
+}
+
+//go:noinline
+func ofz16ur8(n uint8) uint16 {
+ var x uint16
+ return x >> n
+}
+
+//go:noinline
+func ofz8l64(n uint64) int8 {
+ var x int8
+ return x << n
+}
+
+//go:noinline
+func ofz8l32(n uint32) int8 {
+ var x int8
+ return x << n
+}
+
+//go:noinline
+func ofz8l16(n uint16) int8 {
+ var x int8
+ return x << n
+}
+
+//go:noinline
+func ofz8l8(n uint8) int8 {
+ var x int8
+ return x << n
+}
+
+//go:noinline
+func ofz8r64(n uint64) int8 {
+ var x int8
+ return x >> n
+}
+
+//go:noinline
+func ofz8r32(n uint32) int8 {
+ var x int8
+ return x >> n
+}
+
+//go:noinline
+func ofz8r16(n uint16) int8 {
+ var x int8
+ return x >> n
+}
+
+//go:noinline
+func ofz8r8(n uint8) int8 {
+ var x int8
+ return x >> n
+}
+
+//go:noinline
+func ofz8ur64(n uint64) uint8 {
+ var x uint8
+ return x >> n
+}
+
+//go:noinline
+func ofz8ur32(n uint32) uint8 {
+ var x uint8
+ return x >> n
+}
+
+//go:noinline
+func ofz8ur16(n uint16) uint8 {
+ var x uint8
+ return x >> n
+}
+
+//go:noinline
+func ofz8ur8(n uint8) uint8 {
+ var x uint8
+ return x >> n
+}
+
+func TestShiftOfZero(t *testing.T) {
+ if got := ofz64l64(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz64l32(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz64l16(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz64l8(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz64r64(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz64r32(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz64r16(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz64r8(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz64ur64(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz64ur32(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz64ur16(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz64ur8(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+
+ if got := ofz32l64(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz32l32(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz32l16(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz32l8(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz32r64(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz32r32(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz32r16(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz32r8(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz32ur64(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz32ur32(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz32ur16(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz32ur8(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+
+ if got := ofz16l64(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz16l32(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz16l16(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz16l8(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz16r64(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz16r32(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz16r16(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz16r8(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz16ur64(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz16ur32(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz16ur16(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz16ur8(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+
+ if got := ofz8l64(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz8l32(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz8l16(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz8l8(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz8r64(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz8r32(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz8r16(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz8r8(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz8ur64(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz8ur32(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz8ur16(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz8ur8(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+}
+
+//go:noinline
+func byz64l(n int64) int64 {
+ return n << 0
+}
+
+//go:noinline
+func byz64r(n int64) int64 {
+ return n >> 0
+}
+
+//go:noinline
+func byz64ur(n uint64) uint64 {
+ return n >> 0
+}
+
+//go:noinline
+func byz32l(n int32) int32 {
+ return n << 0
+}
+
+//go:noinline
+func byz32r(n int32) int32 {
+ return n >> 0
+}
+
+//go:noinline
+func byz32ur(n uint32) uint32 {
+ return n >> 0
+}
+
+//go:noinline
+func byz16l(n int16) int16 {
+ return n << 0
+}
+
+//go:noinline
+func byz16r(n int16) int16 {
+ return n >> 0
+}
+
+//go:noinline
+func byz16ur(n uint16) uint16 {
+ return n >> 0
+}
+
+//go:noinline
+func byz8l(n int8) int8 {
+ return n << 0
+}
+
+//go:noinline
+func byz8r(n int8) int8 {
+ return n >> 0
+}
+
+//go:noinline
+func byz8ur(n uint8) uint8 {
+ return n >> 0
+}
+
+func TestShiftByZero(t *testing.T) {
+ {
+ var n int64 = 0x5555555555555555
+ if got := byz64l(n); got != n {
+ t.Errorf("%x<<0 == %x, want %x", n, got, n)
+ }
+ if got := byz64r(n); got != n {
+ t.Errorf("%x>>0 == %x, want %x", n, got, n)
+ }
+ }
+ {
+ var n uint64 = 0xaaaaaaaaaaaaaaaa
+ if got := byz64ur(n); got != n {
+ t.Errorf("%x>>>0 == %x, want %x", n, got, n)
+ }
+ }
+
+ {
+ var n int32 = 0x55555555
+ if got := byz32l(n); got != n {
+ t.Errorf("%x<<0 == %x, want %x", n, got, n)
+ }
+ if got := byz32r(n); got != n {
+ t.Errorf("%x>>0 == %x, want %x", n, got, n)
+ }
+ }
+ {
+ var n uint32 = 0xaaaaaaaa
+ if got := byz32ur(n); got != n {
+ t.Errorf("%x>>>0 == %x, want %x", n, got, n)
+ }
+ }
+
+ {
+ var n int16 = 0x5555
+ if got := byz16l(n); got != n {
+ t.Errorf("%x<<0 == %x, want %x", n, got, n)
+ }
+ if got := byz16r(n); got != n {
+ t.Errorf("%x>>0 == %x, want %x", n, got, n)
+ }
+ }
+ {
+ var n uint16 = 0xaaaa
+ if got := byz16ur(n); got != n {
+ t.Errorf("%x>>>0 == %x, want %x", n, got, n)
+ }
+ }
+
+ {
+ var n int8 = 0x55
+ if got := byz8l(n); got != n {
+ t.Errorf("%x<<0 == %x, want %x", n, got, n)
+ }
+ if got := byz8r(n); got != n {
+ t.Errorf("%x>>0 == %x, want %x", n, got, n)
+ }
+ }
+ {
+ var n uint8 = 0x55
+ if got := byz8ur(n); got != n {
+ t.Errorf("%x>>>0 == %x, want %x", n, got, n)
+ }
+ }
+}
+
+//go:noinline
+func two64l(x int64) int64 {
+ return x << 1 << 1
+}
+
+//go:noinline
+func two64r(x int64) int64 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two64ur(x uint64) uint64 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two32l(x int32) int32 {
+ return x << 1 << 1
+}
+
+//go:noinline
+func two32r(x int32) int32 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two32ur(x uint32) uint32 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two16l(x int16) int16 {
+ return x << 1 << 1
+}
+
+//go:noinline
+func two16r(x int16) int16 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two16ur(x uint16) uint16 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two8l(x int8) int8 {
+ return x << 1 << 1
+}
+
+//go:noinline
+func two8r(x int8) int8 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two8ur(x uint8) uint8 {
+ return x >> 1 >> 1
+}
+
+func TestShiftCombine(t *testing.T) {
+ if got, want := two64l(4), int64(16); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := two64r(64), int64(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two64ur(64), uint64(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two32l(4), int32(16); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := two32r(64), int32(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two32ur(64), uint32(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two16l(4), int16(16); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := two16r(64), int16(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two16ur(64), uint16(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two8l(4), int8(16); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := two8r(64), int8(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two8ur(64), uint8(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+
+}
+
+//go:noinline
+func three64l(x int64) int64 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three64ul(x uint64) uint64 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three64r(x int64) int64 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three64ur(x uint64) uint64 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three32l(x int32) int32 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three32ul(x uint32) uint32 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three32r(x int32) int32 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three32ur(x uint32) uint32 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three16l(x int16) int16 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three16ul(x uint16) uint16 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three16r(x int16) int16 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three16ur(x uint16) uint16 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three8l(x int8) int8 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three8ul(x uint8) uint8 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three8r(x int8) int8 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three8ur(x uint8) uint8 {
+ return x >> 3 << 1 >> 2
+}
+
+func TestShiftCombine3(t *testing.T) {
+ if got, want := three64l(4), int64(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three64ul(4), uint64(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three64r(64), int64(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three64ur(64), uint64(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three32l(4), int32(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three32ul(4), uint32(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three32r(64), int32(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three32ur(64), uint32(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three16l(4), int16(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three16ul(4), uint16(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three16r(64), int16(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three16ur(64), uint16(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three8l(4), int8(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three8ul(4), uint8(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three8r(64), int8(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three8ur(64), uint8(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+}
+
+var (
+ one64 int64 = 1
+ one64u uint64 = 1
+ one32 int32 = 1
+ one32u uint32 = 1
+ one16 int16 = 1
+ one16u uint16 = 1
+ one8 int8 = 1
+ one8u uint8 = 1
+)
+
+func TestShiftLargeCombine(t *testing.T) {
+ var N uint64 = 0x8000000000000000
+ if one64<<N<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one64>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one64u>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32<<N<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32u>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16<<N<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16u>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8<<N<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8u>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+}
+
+func TestShiftLargeCombine3(t *testing.T) {
+ var N uint64 = 0x8000000000000001
+ if one64<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one64u<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one64>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one64u>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32u<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32u>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16u<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16u>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8u<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8u>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+}
+
+func TestShiftGeneric(t *testing.T) {
+ for _, test := range [...]struct {
+ valueWidth int
+ signed bool
+ shiftWidth int
+ left bool
+ f interface{}
+ }{
+ {64, true, 64, true, func(n int64, s uint64) int64 { return n << s }},
+ {64, true, 64, false, func(n int64, s uint64) int64 { return n >> s }},
+ {64, false, 64, false, func(n uint64, s uint64) uint64 { return n >> s }},
+ {64, true, 32, true, func(n int64, s uint32) int64 { return n << s }},
+ {64, true, 32, false, func(n int64, s uint32) int64 { return n >> s }},
+ {64, false, 32, false, func(n uint64, s uint32) uint64 { return n >> s }},
+ {64, true, 16, true, func(n int64, s uint16) int64 { return n << s }},
+ {64, true, 16, false, func(n int64, s uint16) int64 { return n >> s }},
+ {64, false, 16, false, func(n uint64, s uint16) uint64 { return n >> s }},
+ {64, true, 8, true, func(n int64, s uint8) int64 { return n << s }},
+ {64, true, 8, false, func(n int64, s uint8) int64 { return n >> s }},
+ {64, false, 8, false, func(n uint64, s uint8) uint64 { return n >> s }},
+
+ {32, true, 64, true, func(n int32, s uint64) int32 { return n << s }},
+ {32, true, 64, false, func(n int32, s uint64) int32 { return n >> s }},
+ {32, false, 64, false, func(n uint32, s uint64) uint32 { return n >> s }},
+ {32, true, 32, true, func(n int32, s uint32) int32 { return n << s }},
+ {32, true, 32, false, func(n int32, s uint32) int32 { return n >> s }},
+ {32, false, 32, false, func(n uint32, s uint32) uint32 { return n >> s }},
+ {32, true, 16, true, func(n int32, s uint16) int32 { return n << s }},
+ {32, true, 16, false, func(n int32, s uint16) int32 { return n >> s }},
+ {32, false, 16, false, func(n uint32, s uint16) uint32 { return n >> s }},
+ {32, true, 8, true, func(n int32, s uint8) int32 { return n << s }},
+ {32, true, 8, false, func(n int32, s uint8) int32 { return n >> s }},
+ {32, false, 8, false, func(n uint32, s uint8) uint32 { return n >> s }},
+
+ {16, true, 64, true, func(n int16, s uint64) int16 { return n << s }},
+ {16, true, 64, false, func(n int16, s uint64) int16 { return n >> s }},
+ {16, false, 64, false, func(n uint16, s uint64) uint16 { return n >> s }},
+ {16, true, 32, true, func(n int16, s uint32) int16 { return n << s }},
+ {16, true, 32, false, func(n int16, s uint32) int16 { return n >> s }},
+ {16, false, 32, false, func(n uint16, s uint32) uint16 { return n >> s }},
+ {16, true, 16, true, func(n int16, s uint16) int16 { return n << s }},
+ {16, true, 16, false, func(n int16, s uint16) int16 { return n >> s }},
+ {16, false, 16, false, func(n uint16, s uint16) uint16 { return n >> s }},
+ {16, true, 8, true, func(n int16, s uint8) int16 { return n << s }},
+ {16, true, 8, false, func(n int16, s uint8) int16 { return n >> s }},
+ {16, false, 8, false, func(n uint16, s uint8) uint16 { return n >> s }},
+
+ {8, true, 64, true, func(n int8, s uint64) int8 { return n << s }},
+ {8, true, 64, false, func(n int8, s uint64) int8 { return n >> s }},
+ {8, false, 64, false, func(n uint8, s uint64) uint8 { return n >> s }},
+ {8, true, 32, true, func(n int8, s uint32) int8 { return n << s }},
+ {8, true, 32, false, func(n int8, s uint32) int8 { return n >> s }},
+ {8, false, 32, false, func(n uint8, s uint32) uint8 { return n >> s }},
+ {8, true, 16, true, func(n int8, s uint16) int8 { return n << s }},
+ {8, true, 16, false, func(n int8, s uint16) int8 { return n >> s }},
+ {8, false, 16, false, func(n uint8, s uint16) uint8 { return n >> s }},
+ {8, true, 8, true, func(n int8, s uint8) int8 { return n << s }},
+ {8, true, 8, false, func(n int8, s uint8) int8 { return n >> s }},
+ {8, false, 8, false, func(n uint8, s uint8) uint8 { return n >> s }},
+ } {
+ fv := reflect.ValueOf(test.f)
+ var args [2]reflect.Value
+ for i := 0; i < test.valueWidth; i++ {
+ // Build value to be shifted.
+ var n int64 = 1
+ for j := 0; j < i; j++ {
+ n <<= 1
+ }
+ args[0] = reflect.ValueOf(n).Convert(fv.Type().In(0))
+ for s := 0; s <= test.shiftWidth; s++ {
+ args[1] = reflect.ValueOf(s).Convert(fv.Type().In(1))
+
+ // Compute desired result. We're testing variable shifts
+ // assuming constant shifts are correct.
+ r := n
+ var op string
+ switch {
+ case test.left:
+ op = "<<"
+ for j := 0; j < s; j++ {
+ r <<= 1
+ }
+ switch test.valueWidth {
+ case 32:
+ r = int64(int32(r))
+ case 16:
+ r = int64(int16(r))
+ case 8:
+ r = int64(int8(r))
+ }
+ case test.signed:
+ op = ">>"
+ switch test.valueWidth {
+ case 32:
+ r = int64(int32(r))
+ case 16:
+ r = int64(int16(r))
+ case 8:
+ r = int64(int8(r))
+ }
+ for j := 0; j < s; j++ {
+ r >>= 1
+ }
+ default:
+ op = ">>>"
+ for j := 0; j < s; j++ {
+ r = int64(uint64(r) >> 1)
+ }
+ }
+
+ // Call function.
+ res := fv.Call(args[:])[0].Convert(reflect.ValueOf(r).Type())
+
+ if res.Int() != r {
+ t.Errorf("%s%dx%d(%x,%x)=%x, want %x", op, test.valueWidth, test.shiftWidth, n, s, res.Int(), r)
+ }
+ }
+ }
+ }
+}
+
+var shiftSink64 int64
+
+func BenchmarkShiftArithmeticRight(b *testing.B) {
+ x := shiftSink64
+ for i := 0; i < b.N; i++ {
+ x = x >> (i & 63)
+ }
+ shiftSink64 = x
+}
+
+//go:noinline
+func incorrectRotate1(x, c uint64) uint64 {
+ // This should not compile to a rotate instruction.
+ return x<<c | x>>(64-c)
+}
+
+//go:noinline
+func incorrectRotate2(x uint64) uint64 {
+ var c uint64 = 66
+ // This should not compile to a rotate instruction.
+ return x<<c | x>>(64-c)
+}
+
+func TestIncorrectRotate(t *testing.T) {
+ if got := incorrectRotate1(1, 66); got != 0 {
+ t.Errorf("got %x want 0", got)
+ }
+ if got := incorrectRotate2(1); got != 0 {
+ t.Errorf("got %x want 0", got)
+ }
+}
+
+//go:noinline
+func variableShiftOverflow64x8(x int64, y, z uint8) (a, b, c int64) {
+ // Verify junk bits are ignored when doing a variable shift.
+ return x >> (y + z), x << (y + z), int64(uint64(x) >> (y + z))
+}
+
+//go:noinline
+func variableShiftOverflow32x8(x int32, y, z uint8) (a, b, c int32) {
+ // Verify junk bits are ignored when doing a variable shift.
+ return x >> (y + z), x << (y + z), int32(uint32(x) >> (y + z))
+}
+
+//go:noinline
+func variableShiftOverflow16x8(x int16, y, z uint8) (a, b, c int16) {
+ // Verify junk bits are ignored when doing a variable shift.
+ return x >> (y + z), x << (y + z), int16(uint16(x) >> (y + z))
+}
+
+//go:noinline
+func variableShiftOverflow8x8(x int8, y, z uint8) (a, b, c int8) {
+ // Verify junk bits are ignored when doing a variable shift.
+ return x >> (y + z), x << (y + z), int8(uint8(x) >> (y + z))
+}
+
+//go:noinline
+func variableShiftOverflow64x16(x int64, y, z uint16) (a, b, c int64) {
+ // Verify junk bits are ignored when doing a variable shift.
+ return x >> (y + z), x << (y + z), int64(uint64(x) >> (y + z))
+}
+
+//go:noinline
+func variableShiftOverflow32x16(x int32, y, z uint16) (a, b, c int32) {
+ // Verify junk bits are ignored when doing a variable shift.
+ return x >> (y + z), x << (y + z), int32(uint32(x) >> (y + z))
+}
+
+//go:noinline
+func variableShiftOverflow16x16(x int16, y, z uint16) (a, b, c int16) {
+ // Verify junk bits are ignored when doing a variable shift.
+ return x >> (y + z), x << (y + z), int16(uint16(x) >> (y + z))
+}
+
+//go:noinline
+func variableShiftOverflow8x16(x int8, y, z uint16) (a, b, c int8) {
+ // Verify junk bits are ignored when doing a variable shift.
+ return x >> (y + z), x << (y + z), int8(uint8(x) >> (y + z))
+}
+
+//go:noinline
+func makeU8(x uint64) uint8 {
+ // Ensure the upper portions of the register are clear before testing large shift values
+ // using non-native types (e.g uint8 on PPC64).
+ return uint8(x)
+}
+
+//go:noinline
+func makeU16(x uint64) uint16 {
+ // Ensure the upper portions of the register are clear before testing large shift values
+ // using non-native types (e.g uint8 on PPC64).
+ return uint16(x)
+}
+
+func TestShiftOverflow(t *testing.T) {
+ if v, w, z := variableShiftOverflow64x8(-64, makeU8(255), 2); v != -32 || w != -128 || z != 0x7fffffffffffffe0 {
+ t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fffffffffffffe0", v, w, z)
+ }
+ if v, w, z := variableShiftOverflow32x8(-64, makeU8(255), 2); v != -32 || w != -128 || z != 0x7fffffe0 {
+ t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fffffe0", v, w, z)
+ }
+ if v, w, z := variableShiftOverflow16x8(-64, makeU8(255), 2); v != -32 || w != -128 || z != 0x7fe0 {
+ t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fe0", v, w, z)
+ }
+ if v, w, z := variableShiftOverflow8x8(-64, makeU8(255), 2); v != -32 || w != -128 || z != 0x60 {
+ t.Errorf("got %d %d 0x%x, expected -32 -128 0x60", v, w, z)
+ }
+ if v, w, z := variableShiftOverflow64x16(-64, makeU16(0xffff), 2); v != -32 || w != -128 || z != 0x7fffffffffffffe0 {
+ t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fffffffffffffe0", v, w, z)
+ }
+ if v, w, z := variableShiftOverflow32x16(-64, makeU16(0xffff), 2); v != -32 || w != -128 || z != 0x7fffffe0 {
+ t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fffffe0,", v, w, z)
+ }
+ if v, w, z := variableShiftOverflow16x16(-64, makeU16(0xffff), 2); v != -32 || w != -128 || z != 0x7fe0 {
+ t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fe0", v, w, z)
+ }
+ if v, w, z := variableShiftOverflow8x16(-64, makeU16(0xffff), 2); v != -32 || w != -128 || z != 0x60 {
+ t.Errorf("got %d %d 0x%x, expected -32 -128 0x60", v, w, z)
+ }
+}
diff --git a/src/cmd/compile/internal/test/ssa_test.go b/src/cmd/compile/internal/test/ssa_test.go
new file mode 100644
index 0000000..7f2faa1
--- /dev/null
+++ b/src/cmd/compile/internal/test/ssa_test.go
@@ -0,0 +1,179 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+// runGenTest runs a test-generator, then runs the generated test.
+// Generated test can either fail in compilation or execution.
+// The environment variable parameter(s) is passed to the run
+// of the generated test.
+func runGenTest(t *testing.T, filename, tmpname string, ev ...string) {
+ testenv.MustHaveGoRun(t)
+ gotool := testenv.GoToolPath(t)
+ var stdout, stderr bytes.Buffer
+ cmd := testenv.Command(t, gotool, "run", filepath.Join("testdata", filename))
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("Failed: %v:\nOut: %s\nStderr: %s\n", err, &stdout, &stderr)
+ }
+ // Write stdout into a temporary file
+ rungo := filepath.Join(t.TempDir(), "run.go")
+ ok := os.WriteFile(rungo, stdout.Bytes(), 0600)
+ if ok != nil {
+ t.Fatalf("Failed to create temporary file " + rungo)
+ }
+
+ stdout.Reset()
+ stderr.Reset()
+ cmd = testenv.Command(t, gotool, "run", "-gcflags=-d=ssa/check/on", rungo)
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ cmd.Env = append(cmd.Env, ev...)
+ err := cmd.Run()
+ if err != nil {
+ t.Fatalf("Failed: %v:\nOut: %s\nStderr: %s\n", err, &stdout, &stderr)
+ }
+ if s := stderr.String(); s != "" {
+ t.Errorf("Stderr = %s\nWant empty", s)
+ }
+ if s := stdout.String(); s != "" {
+ t.Errorf("Stdout = %s\nWant empty", s)
+ }
+}
+
+func TestGenFlowGraph(t *testing.T) {
+ if testing.Short() {
+ t.Skip("not run in short mode.")
+ }
+ runGenTest(t, "flowgraph_generator1.go", "ssa_fg_tmp1")
+}
+
+// TestCode runs all the tests in the testdata directory as subtests.
+// These tests are special because we want to run them with different
+// compiler flags set (and thus they can't just be _test.go files in
+// this directory).
+func TestCode(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ gotool := testenv.GoToolPath(t)
+
+ // Make a temporary directory to work in.
+ tmpdir := t.TempDir()
+
+ // Find all the test functions (and the files containing them).
+ var srcs []string // files containing Test functions
+ type test struct {
+ name string // TestFoo
+ usesFloat bool // might use float operations
+ }
+ var tests []test
+ files, err := os.ReadDir("testdata")
+ if err != nil {
+ t.Fatalf("can't read testdata directory: %v", err)
+ }
+ for _, f := range files {
+ if !strings.HasSuffix(f.Name(), "_test.go") {
+ continue
+ }
+ text, err := os.ReadFile(filepath.Join("testdata", f.Name()))
+ if err != nil {
+ t.Fatalf("can't read testdata/%s: %v", f.Name(), err)
+ }
+ fset := token.NewFileSet()
+ code, err := parser.ParseFile(fset, f.Name(), text, 0)
+ if err != nil {
+ t.Fatalf("can't parse testdata/%s: %v", f.Name(), err)
+ }
+ srcs = append(srcs, filepath.Join("testdata", f.Name()))
+ foundTest := false
+ for _, d := range code.Decls {
+ fd, ok := d.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ if !strings.HasPrefix(fd.Name.Name, "Test") {
+ continue
+ }
+ if fd.Recv != nil {
+ continue
+ }
+ if fd.Type.Results != nil {
+ continue
+ }
+ if len(fd.Type.Params.List) != 1 {
+ continue
+ }
+ p := fd.Type.Params.List[0]
+ if len(p.Names) != 1 {
+ continue
+ }
+ s, ok := p.Type.(*ast.StarExpr)
+ if !ok {
+ continue
+ }
+ sel, ok := s.X.(*ast.SelectorExpr)
+ if !ok {
+ continue
+ }
+ base, ok := sel.X.(*ast.Ident)
+ if !ok {
+ continue
+ }
+ if base.Name != "testing" {
+ continue
+ }
+ if sel.Sel.Name != "T" {
+ continue
+ }
+ // Found a testing function.
+ tests = append(tests, test{name: fd.Name.Name, usesFloat: bytes.Contains(text, []byte("float"))})
+ foundTest = true
+ }
+ if !foundTest {
+ t.Fatalf("test file testdata/%s has no tests in it", f.Name())
+ }
+ }
+
+ flags := []string{""}
+ if runtime.GOARCH == "arm" || runtime.GOARCH == "mips" || runtime.GOARCH == "mips64" || runtime.GOARCH == "386" {
+ flags = append(flags, ",softfloat")
+ }
+ for _, flag := range flags {
+ args := []string{"test", "-c", "-gcflags=-d=ssa/check/on" + flag, "-o", filepath.Join(tmpdir, "code.test")}
+ args = append(args, srcs...)
+ out, err := testenv.Command(t, gotool, args...).CombinedOutput()
+ if err != nil || len(out) != 0 {
+ t.Fatalf("Build failed: %v\n%s\n", err, out)
+ }
+
+ // Now we have a test binary. Run it with all the tests as subtests of this one.
+ for _, test := range tests {
+ test := test
+ if flag == ",softfloat" && !test.usesFloat {
+ // No point in running the soft float version if the test doesn't use floats.
+ continue
+ }
+ t.Run(fmt.Sprintf("%s%s", test.name[4:], flag), func(t *testing.T) {
+ out, err := testenv.Command(t, filepath.Join(tmpdir, "code.test"), "-test.run=^"+test.name+"$").CombinedOutput()
+ if err != nil || string(out) != "PASS\n" {
+ t.Errorf("Failed:\n%s\n", out)
+ }
+ })
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/switch_test.go b/src/cmd/compile/internal/test/switch_test.go
new file mode 100644
index 0000000..1d12361
--- /dev/null
+++ b/src/cmd/compile/internal/test/switch_test.go
@@ -0,0 +1,296 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "math/bits"
+ "testing"
+)
+
+func BenchmarkSwitch8Predictable(b *testing.B) {
+ benchmarkSwitch8(b, true)
+}
+func BenchmarkSwitch8Unpredictable(b *testing.B) {
+ benchmarkSwitch8(b, false)
+}
+func benchmarkSwitch8(b *testing.B, predictable bool) {
+ n := 0
+ rng := newRNG()
+ for i := 0; i < b.N; i++ {
+ rng = rng.next(predictable)
+ switch rng.value() & 7 {
+ case 0:
+ n += 1
+ case 1:
+ n += 2
+ case 2:
+ n += 3
+ case 3:
+ n += 4
+ case 4:
+ n += 5
+ case 5:
+ n += 6
+ case 6:
+ n += 7
+ case 7:
+ n += 8
+ }
+ }
+ sink = n
+}
+
+func BenchmarkSwitch32Predictable(b *testing.B) {
+ benchmarkSwitch32(b, true)
+}
+func BenchmarkSwitch32Unpredictable(b *testing.B) {
+ benchmarkSwitch32(b, false)
+}
+func benchmarkSwitch32(b *testing.B, predictable bool) {
+ n := 0
+ rng := newRNG()
+ for i := 0; i < b.N; i++ {
+ rng = rng.next(predictable)
+ switch rng.value() & 31 {
+ case 0, 1, 2:
+ n += 1
+ case 4, 5, 6:
+ n += 2
+ case 8, 9, 10:
+ n += 3
+ case 12, 13, 14:
+ n += 4
+ case 16, 17, 18:
+ n += 5
+ case 20, 21, 22:
+ n += 6
+ case 24, 25, 26:
+ n += 7
+ case 28, 29, 30:
+ n += 8
+ default:
+ n += 9
+ }
+ }
+ sink = n
+}
+
+func BenchmarkSwitchStringPredictable(b *testing.B) {
+ benchmarkSwitchString(b, true)
+}
+func BenchmarkSwitchStringUnpredictable(b *testing.B) {
+ benchmarkSwitchString(b, false)
+}
+func benchmarkSwitchString(b *testing.B, predictable bool) {
+ a := []string{
+ "foo",
+ "foo1",
+ "foo22",
+ "foo333",
+ "foo4444",
+ "foo55555",
+ "foo666666",
+ "foo7777777",
+ }
+ n := 0
+ rng := newRNG()
+ for i := 0; i < b.N; i++ {
+ rng = rng.next(predictable)
+ switch a[rng.value()&7] {
+ case "foo":
+ n += 1
+ case "foo1":
+ n += 2
+ case "foo22":
+ n += 3
+ case "foo333":
+ n += 4
+ case "foo4444":
+ n += 5
+ case "foo55555":
+ n += 6
+ case "foo666666":
+ n += 7
+ case "foo7777777":
+ n += 8
+ }
+ }
+ sink = n
+}
+
+func BenchmarkSwitchTypePredictable(b *testing.B) {
+ benchmarkSwitchType(b, true)
+}
+func BenchmarkSwitchTypeUnpredictable(b *testing.B) {
+ benchmarkSwitchType(b, false)
+}
+func benchmarkSwitchType(b *testing.B, predictable bool) {
+ a := []any{
+ int8(1),
+ int16(2),
+ int32(3),
+ int64(4),
+ uint8(5),
+ uint16(6),
+ uint32(7),
+ uint64(8),
+ }
+ n := 0
+ rng := newRNG()
+ for i := 0; i < b.N; i++ {
+ rng = rng.next(predictable)
+ switch a[rng.value()&7].(type) {
+ case int8:
+ n += 1
+ case int16:
+ n += 2
+ case int32:
+ n += 3
+ case int64:
+ n += 4
+ case uint8:
+ n += 5
+ case uint16:
+ n += 6
+ case uint32:
+ n += 7
+ case uint64:
+ n += 8
+ }
+ }
+ sink = n
+}
+
+func BenchmarkSwitchInterfaceTypePredictable(b *testing.B) {
+ benchmarkSwitchInterfaceType(b, true)
+}
+func BenchmarkSwitchInterfaceTypeUnpredictable(b *testing.B) {
+ benchmarkSwitchInterfaceType(b, false)
+}
+
+type SI0 interface {
+ si0()
+}
+type ST0 struct {
+}
+
+func (ST0) si0() {
+}
+
+type SI1 interface {
+ si1()
+}
+type ST1 struct {
+}
+
+func (ST1) si1() {
+}
+
+type SI2 interface {
+ si2()
+}
+type ST2 struct {
+}
+
+func (ST2) si2() {
+}
+
+type SI3 interface {
+ si3()
+}
+type ST3 struct {
+}
+
+func (ST3) si3() {
+}
+
+type SI4 interface {
+ si4()
+}
+type ST4 struct {
+}
+
+func (ST4) si4() {
+}
+
+type SI5 interface {
+ si5()
+}
+type ST5 struct {
+}
+
+func (ST5) si5() {
+}
+
+type SI6 interface {
+ si6()
+}
+type ST6 struct {
+}
+
+func (ST6) si6() {
+}
+
+type SI7 interface {
+ si7()
+}
+type ST7 struct {
+}
+
+func (ST7) si7() {
+}
+
+func benchmarkSwitchInterfaceType(b *testing.B, predictable bool) {
+ a := []any{
+ ST0{},
+ ST1{},
+ ST2{},
+ ST3{},
+ ST4{},
+ ST5{},
+ ST6{},
+ ST7{},
+ }
+ n := 0
+ rng := newRNG()
+ for i := 0; i < b.N; i++ {
+ rng = rng.next(predictable)
+ switch a[rng.value()&7].(type) {
+ case SI0:
+ n += 1
+ case SI1:
+ n += 2
+ case SI2:
+ n += 3
+ case SI3:
+ n += 4
+ case SI4:
+ n += 5
+ case SI5:
+ n += 6
+ case SI6:
+ n += 7
+ case SI7:
+ n += 8
+ }
+ }
+ sink = n
+}
+
+// A simple random number generator used to make switches conditionally predictable.
+type rng uint64
+
+func newRNG() rng {
+ return 1
+}
+func (r rng) next(predictable bool) rng {
+ if predictable {
+ return r + 1
+ }
+ return rng(bits.RotateLeft64(uint64(r), 13) * 0x3c374d)
+}
+func (r rng) value() uint64 {
+ return uint64(r)
+}
diff --git a/src/cmd/compile/internal/test/test.go b/src/cmd/compile/internal/test/test.go
new file mode 100644
index 0000000..195c65a
--- /dev/null
+++ b/src/cmd/compile/internal/test/test.go
@@ -0,0 +1,5 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
diff --git a/src/cmd/compile/internal/test/testdata/addressed_test.go b/src/cmd/compile/internal/test/testdata/addressed_test.go
new file mode 100644
index 0000000..4cc9ac4
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/addressed_test.go
@@ -0,0 +1,214 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "testing"
+)
+
+var output string
+
+func mypanic(t *testing.T, s string) {
+ t.Fatalf(s + "\n" + output)
+
+}
+
+func assertEqual(t *testing.T, x, y int) {
+ if x != y {
+ mypanic(t, fmt.Sprintf("assertEqual failed got %d, want %d", x, y))
+ }
+}
+
+func TestAddressed(t *testing.T) {
+ x := f1_ssa(2, 3)
+ output += fmt.Sprintln("*x is", *x)
+ output += fmt.Sprintln("Gratuitously use some stack")
+ output += fmt.Sprintln("*x is", *x)
+ assertEqual(t, *x, 9)
+
+ w := f3a_ssa(6)
+ output += fmt.Sprintln("*w is", *w)
+ output += fmt.Sprintln("Gratuitously use some stack")
+ output += fmt.Sprintln("*w is", *w)
+ assertEqual(t, *w, 6)
+
+ y := f3b_ssa(12)
+ output += fmt.Sprintln("*y.(*int) is", *y.(*int))
+ output += fmt.Sprintln("Gratuitously use some stack")
+ output += fmt.Sprintln("*y.(*int) is", *y.(*int))
+ assertEqual(t, *y.(*int), 12)
+
+ z := f3c_ssa(8)
+ output += fmt.Sprintln("*z.(*int) is", *z.(*int))
+ output += fmt.Sprintln("Gratuitously use some stack")
+ output += fmt.Sprintln("*z.(*int) is", *z.(*int))
+ assertEqual(t, *z.(*int), 8)
+
+ args(t)
+ test_autos(t)
+}
+
+//go:noinline
+func f1_ssa(x, y int) *int {
+ x = x*y + y
+ return &x
+}
+
+//go:noinline
+func f3a_ssa(x int) *int {
+ return &x
+}
+
+//go:noinline
+func f3b_ssa(x int) interface{} { // ./foo.go:15: internal error: f3b_ssa ~r1 (type interface {}) recorded as live on entry
+ return &x
+}
+
+//go:noinline
+func f3c_ssa(y int) interface{} {
+ x := y
+ return &x
+}
+
+type V struct {
+ p *V
+ w, x int64
+}
+
+func args(t *testing.T) {
+ v := V{p: nil, w: 1, x: 1}
+ a := V{p: &v, w: 2, x: 2}
+ b := V{p: &v, w: 0, x: 0}
+ i := v.args_ssa(a, b)
+ output += fmt.Sprintln("i=", i)
+ assertEqual(t, int(i), 2)
+}
+
+//go:noinline
+func (v V) args_ssa(a, b V) int64 {
+ if v.w == 0 {
+ return v.x
+ }
+ if v.w == 1 {
+ return a.x
+ }
+ if v.w == 2 {
+ return b.x
+ }
+ b.p.p = &a // v.p in caller = &a
+
+ return -1
+}
+
+func test_autos(t *testing.T) {
+ test(t, 11)
+ test(t, 12)
+ test(t, 13)
+ test(t, 21)
+ test(t, 22)
+ test(t, 23)
+ test(t, 31)
+ test(t, 32)
+}
+
+func test(t *testing.T, which int64) {
+ output += fmt.Sprintln("test", which)
+ v1 := V{w: 30, x: 3, p: nil}
+ v2, v3 := v1.autos_ssa(which, 10, 1, 20, 2)
+ if which != v2.val() {
+ output += fmt.Sprintln("Expected which=", which, "got v2.val()=", v2.val())
+ mypanic(t, "Failure of expected V value")
+ }
+ if v2.p.val() != v3.val() {
+ output += fmt.Sprintln("Expected v2.p.val()=", v2.p.val(), "got v3.val()=", v3.val())
+ mypanic(t, "Failure of expected V.p value")
+ }
+ if which != v3.p.p.p.p.p.p.p.val() {
+ output += fmt.Sprintln("Expected which=", which, "got v3.p.p.p.p.p.p.p.val()=", v3.p.p.p.p.p.p.p.val())
+ mypanic(t, "Failure of expected V.p value")
+ }
+}
+
+func (v V) val() int64 {
+ return v.w + v.x
+}
+
+// autos_ssa uses contents of v and parameters w1, w2, x1, x2
+// to initialize a bunch of locals, all of which have their
+// address taken to force heap allocation, and then based on
+// the value of which a pair of those locals are copied in
+// various ways to the two results y, and z, which are also
+// addressed. Which is expected to be one of 11-13, 21-23, 31, 32,
+// and y.val() should be equal to which and y.p.val() should
+// be equal to z.val(). Also, x(.p)**8 == x; that is, the
+// autos are all linked into a ring.
+//
+//go:noinline
+func (v V) autos_ssa(which, w1, x1, w2, x2 int64) (y, z V) {
+ fill_ssa(v.w, v.x, &v, v.p) // gratuitous no-op to force addressing
+ var a, b, c, d, e, f, g, h V
+ fill_ssa(w1, x1, &a, &b)
+ fill_ssa(w1, x2, &b, &c)
+ fill_ssa(w1, v.x, &c, &d)
+ fill_ssa(w2, x1, &d, &e)
+ fill_ssa(w2, x2, &e, &f)
+ fill_ssa(w2, v.x, &f, &g)
+ fill_ssa(v.w, x1, &g, &h)
+ fill_ssa(v.w, x2, &h, &a)
+ switch which {
+ case 11:
+ y = a
+ z.getsI(&b)
+ case 12:
+ y.gets(&b)
+ z = c
+ case 13:
+ y.gets(&c)
+ z = d
+ case 21:
+ y.getsI(&d)
+ z.gets(&e)
+ case 22:
+ y = e
+ z = f
+ case 23:
+ y.gets(&f)
+ z.getsI(&g)
+ case 31:
+ y = g
+ z.gets(&h)
+ case 32:
+ y.getsI(&h)
+ z = a
+ default:
+
+ panic("")
+ }
+ return
+}
+
+// gets is an address-mentioning way of implementing
+// structure assignment.
+//
+//go:noinline
+func (to *V) gets(from *V) {
+ *to = *from
+}
+
+// gets is an address-and-interface-mentioning way of
+// implementing structure assignment.
+//
+//go:noinline
+func (to *V) getsI(from interface{}) {
+ *to = *from.(*V)
+}
+
+// fill_ssa initializes r with V{w:w, x:x, p:p}
+//
+//go:noinline
+func fill_ssa(w, x int64, r, p *V) {
+ *r = V{w: w, x: x, p: p}
+}
diff --git a/src/cmd/compile/internal/test/testdata/append_test.go b/src/cmd/compile/internal/test/testdata/append_test.go
new file mode 100644
index 0000000..6663ce7
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/append_test.go
@@ -0,0 +1,61 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// append_ssa.go tests append operations.
+package main
+
+import "testing"
+
+//go:noinline
+func appendOne_ssa(a []int, x int) []int {
+ return append(a, x)
+}
+
+//go:noinline
+func appendThree_ssa(a []int, x, y, z int) []int {
+ return append(a, x, y, z)
+}
+
+func eqBytes(a, b []int) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func expect(t *testing.T, got, want []int) {
+ if eqBytes(got, want) {
+ return
+ }
+ t.Errorf("expected %v, got %v\n", want, got)
+}
+
+func testAppend(t *testing.T) {
+ var store [7]int
+ a := store[:0]
+
+ a = appendOne_ssa(a, 1)
+ expect(t, a, []int{1})
+ a = appendThree_ssa(a, 2, 3, 4)
+ expect(t, a, []int{1, 2, 3, 4})
+ a = appendThree_ssa(a, 5, 6, 7)
+ expect(t, a, []int{1, 2, 3, 4, 5, 6, 7})
+ if &a[0] != &store[0] {
+ t.Errorf("unnecessary grow")
+ }
+ a = appendOne_ssa(a, 8)
+ expect(t, a, []int{1, 2, 3, 4, 5, 6, 7, 8})
+ if &a[0] == &store[0] {
+ t.Errorf("didn't grow")
+ }
+}
+
+func TestAppend(t *testing.T) {
+ testAppend(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/arithBoundary_test.go b/src/cmd/compile/internal/test/testdata/arithBoundary_test.go
new file mode 100644
index 0000000..777b7cd
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/arithBoundary_test.go
@@ -0,0 +1,694 @@
+// Code generated by gen/arithBoundaryGen.go. DO NOT EDIT.
+
+package main
+
+import "testing"
+
+type utd64 struct {
+ a, b uint64
+ add, sub, mul, div, mod uint64
+}
+type itd64 struct {
+ a, b int64
+ add, sub, mul, div, mod int64
+}
+type utd32 struct {
+ a, b uint32
+ add, sub, mul, div, mod uint32
+}
+type itd32 struct {
+ a, b int32
+ add, sub, mul, div, mod int32
+}
+type utd16 struct {
+ a, b uint16
+ add, sub, mul, div, mod uint16
+}
+type itd16 struct {
+ a, b int16
+ add, sub, mul, div, mod int16
+}
+type utd8 struct {
+ a, b uint8
+ add, sub, mul, div, mod uint8
+}
+type itd8 struct {
+ a, b int8
+ add, sub, mul, div, mod int8
+}
+
+//go:noinline
+func add_uint64_ssa(a, b uint64) uint64 {
+ return a + b
+}
+
+//go:noinline
+func sub_uint64_ssa(a, b uint64) uint64 {
+ return a - b
+}
+
+//go:noinline
+func div_uint64_ssa(a, b uint64) uint64 {
+ return a / b
+}
+
+//go:noinline
+func mod_uint64_ssa(a, b uint64) uint64 {
+ return a % b
+}
+
+//go:noinline
+func mul_uint64_ssa(a, b uint64) uint64 {
+ return a * b
+}
+
+//go:noinline
+func add_int64_ssa(a, b int64) int64 {
+ return a + b
+}
+
+//go:noinline
+func sub_int64_ssa(a, b int64) int64 {
+ return a - b
+}
+
+//go:noinline
+func div_int64_ssa(a, b int64) int64 {
+ return a / b
+}
+
+//go:noinline
+func mod_int64_ssa(a, b int64) int64 {
+ return a % b
+}
+
+//go:noinline
+func mul_int64_ssa(a, b int64) int64 {
+ return a * b
+}
+
+//go:noinline
+func add_uint32_ssa(a, b uint32) uint32 {
+ return a + b
+}
+
+//go:noinline
+func sub_uint32_ssa(a, b uint32) uint32 {
+ return a - b
+}
+
+//go:noinline
+func div_uint32_ssa(a, b uint32) uint32 {
+ return a / b
+}
+
+//go:noinline
+func mod_uint32_ssa(a, b uint32) uint32 {
+ return a % b
+}
+
+//go:noinline
+func mul_uint32_ssa(a, b uint32) uint32 {
+ return a * b
+}
+
+//go:noinline
+func add_int32_ssa(a, b int32) int32 {
+ return a + b
+}
+
+//go:noinline
+func sub_int32_ssa(a, b int32) int32 {
+ return a - b
+}
+
+//go:noinline
+func div_int32_ssa(a, b int32) int32 {
+ return a / b
+}
+
+//go:noinline
+func mod_int32_ssa(a, b int32) int32 {
+ return a % b
+}
+
+//go:noinline
+func mul_int32_ssa(a, b int32) int32 {
+ return a * b
+}
+
+//go:noinline
+func add_uint16_ssa(a, b uint16) uint16 {
+ return a + b
+}
+
+//go:noinline
+func sub_uint16_ssa(a, b uint16) uint16 {
+ return a - b
+}
+
+//go:noinline
+func div_uint16_ssa(a, b uint16) uint16 {
+ return a / b
+}
+
+//go:noinline
+func mod_uint16_ssa(a, b uint16) uint16 {
+ return a % b
+}
+
+//go:noinline
+func mul_uint16_ssa(a, b uint16) uint16 {
+ return a * b
+}
+
+//go:noinline
+func add_int16_ssa(a, b int16) int16 {
+ return a + b
+}
+
+//go:noinline
+func sub_int16_ssa(a, b int16) int16 {
+ return a - b
+}
+
+//go:noinline
+func div_int16_ssa(a, b int16) int16 {
+ return a / b
+}
+
+//go:noinline
+func mod_int16_ssa(a, b int16) int16 {
+ return a % b
+}
+
+//go:noinline
+func mul_int16_ssa(a, b int16) int16 {
+ return a * b
+}
+
+//go:noinline
+func add_uint8_ssa(a, b uint8) uint8 {
+ return a + b
+}
+
+//go:noinline
+func sub_uint8_ssa(a, b uint8) uint8 {
+ return a - b
+}
+
+//go:noinline
+func div_uint8_ssa(a, b uint8) uint8 {
+ return a / b
+}
+
+//go:noinline
+func mod_uint8_ssa(a, b uint8) uint8 {
+ return a % b
+}
+
+//go:noinline
+func mul_uint8_ssa(a, b uint8) uint8 {
+ return a * b
+}
+
+//go:noinline
+func add_int8_ssa(a, b int8) int8 {
+ return a + b
+}
+
+//go:noinline
+func sub_int8_ssa(a, b int8) int8 {
+ return a - b
+}
+
+//go:noinline
+func div_int8_ssa(a, b int8) int8 {
+ return a / b
+}
+
+//go:noinline
+func mod_int8_ssa(a, b int8) int8 {
+ return a % b
+}
+
+//go:noinline
+func mul_int8_ssa(a, b int8) int8 {
+ return a * b
+}
+
+var uint64_data []utd64 = []utd64{utd64{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ utd64{a: 0, b: 1, add: 1, sub: 18446744073709551615, mul: 0, div: 0, mod: 0},
+ utd64{a: 0, b: 4294967296, add: 4294967296, sub: 18446744069414584320, mul: 0, div: 0, mod: 0},
+ utd64{a: 0, b: 18446744073709551615, add: 18446744073709551615, sub: 1, mul: 0, div: 0, mod: 0},
+ utd64{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ utd64{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ utd64{a: 1, b: 4294967296, add: 4294967297, sub: 18446744069414584321, mul: 4294967296, div: 0, mod: 1},
+ utd64{a: 1, b: 18446744073709551615, add: 0, sub: 2, mul: 18446744073709551615, div: 0, mod: 1},
+ utd64{a: 4294967296, b: 0, add: 4294967296, sub: 4294967296, mul: 0},
+ utd64{a: 4294967296, b: 1, add: 4294967297, sub: 4294967295, mul: 4294967296, div: 4294967296, mod: 0},
+ utd64{a: 4294967296, b: 4294967296, add: 8589934592, sub: 0, mul: 0, div: 1, mod: 0},
+ utd64{a: 4294967296, b: 18446744073709551615, add: 4294967295, sub: 4294967297, mul: 18446744069414584320, div: 0, mod: 4294967296},
+ utd64{a: 18446744073709551615, b: 0, add: 18446744073709551615, sub: 18446744073709551615, mul: 0},
+ utd64{a: 18446744073709551615, b: 1, add: 0, sub: 18446744073709551614, mul: 18446744073709551615, div: 18446744073709551615, mod: 0},
+ utd64{a: 18446744073709551615, b: 4294967296, add: 4294967295, sub: 18446744069414584319, mul: 18446744069414584320, div: 4294967295, mod: 4294967295},
+ utd64{a: 18446744073709551615, b: 18446744073709551615, add: 18446744073709551614, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var int64_data []itd64 = []itd64{itd64{a: -9223372036854775808, b: -9223372036854775808, add: 0, sub: 0, mul: 0, div: 1, mod: 0},
+ itd64{a: -9223372036854775808, b: -9223372036854775807, add: 1, sub: -1, mul: -9223372036854775808, div: 1, mod: -1},
+ itd64{a: -9223372036854775808, b: -4294967296, add: 9223372032559808512, sub: -9223372032559808512, mul: 0, div: 2147483648, mod: 0},
+ itd64{a: -9223372036854775808, b: -1, add: 9223372036854775807, sub: -9223372036854775807, mul: -9223372036854775808, div: -9223372036854775808, mod: 0},
+ itd64{a: -9223372036854775808, b: 0, add: -9223372036854775808, sub: -9223372036854775808, mul: 0},
+ itd64{a: -9223372036854775808, b: 1, add: -9223372036854775807, sub: 9223372036854775807, mul: -9223372036854775808, div: -9223372036854775808, mod: 0},
+ itd64{a: -9223372036854775808, b: 4294967296, add: -9223372032559808512, sub: 9223372032559808512, mul: 0, div: -2147483648, mod: 0},
+ itd64{a: -9223372036854775808, b: 9223372036854775806, add: -2, sub: 2, mul: 0, div: -1, mod: -2},
+ itd64{a: -9223372036854775808, b: 9223372036854775807, add: -1, sub: 1, mul: -9223372036854775808, div: -1, mod: -1},
+ itd64{a: -9223372036854775807, b: -9223372036854775808, add: 1, sub: 1, mul: -9223372036854775808, div: 0, mod: -9223372036854775807},
+ itd64{a: -9223372036854775807, b: -9223372036854775807, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd64{a: -9223372036854775807, b: -4294967296, add: 9223372032559808513, sub: -9223372032559808511, mul: -4294967296, div: 2147483647, mod: -4294967295},
+ itd64{a: -9223372036854775807, b: -1, add: -9223372036854775808, sub: -9223372036854775806, mul: 9223372036854775807, div: 9223372036854775807, mod: 0},
+ itd64{a: -9223372036854775807, b: 0, add: -9223372036854775807, sub: -9223372036854775807, mul: 0},
+ itd64{a: -9223372036854775807, b: 1, add: -9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: -9223372036854775807, mod: 0},
+ itd64{a: -9223372036854775807, b: 4294967296, add: -9223372032559808511, sub: 9223372032559808513, mul: 4294967296, div: -2147483647, mod: -4294967295},
+ itd64{a: -9223372036854775807, b: 9223372036854775806, add: -1, sub: 3, mul: 9223372036854775806, div: -1, mod: -1},
+ itd64{a: -9223372036854775807, b: 9223372036854775807, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd64{a: -4294967296, b: -9223372036854775808, add: 9223372032559808512, sub: 9223372032559808512, mul: 0, div: 0, mod: -4294967296},
+ itd64{a: -4294967296, b: -9223372036854775807, add: 9223372032559808513, sub: 9223372032559808511, mul: -4294967296, div: 0, mod: -4294967296},
+ itd64{a: -4294967296, b: -4294967296, add: -8589934592, sub: 0, mul: 0, div: 1, mod: 0},
+ itd64{a: -4294967296, b: -1, add: -4294967297, sub: -4294967295, mul: 4294967296, div: 4294967296, mod: 0},
+ itd64{a: -4294967296, b: 0, add: -4294967296, sub: -4294967296, mul: 0},
+ itd64{a: -4294967296, b: 1, add: -4294967295, sub: -4294967297, mul: -4294967296, div: -4294967296, mod: 0},
+ itd64{a: -4294967296, b: 4294967296, add: 0, sub: -8589934592, mul: 0, div: -1, mod: 0},
+ itd64{a: -4294967296, b: 9223372036854775806, add: 9223372032559808510, sub: 9223372032559808514, mul: 8589934592, div: 0, mod: -4294967296},
+ itd64{a: -4294967296, b: 9223372036854775807, add: 9223372032559808511, sub: 9223372032559808513, mul: 4294967296, div: 0, mod: -4294967296},
+ itd64{a: -1, b: -9223372036854775808, add: 9223372036854775807, sub: 9223372036854775807, mul: -9223372036854775808, div: 0, mod: -1},
+ itd64{a: -1, b: -9223372036854775807, add: -9223372036854775808, sub: 9223372036854775806, mul: 9223372036854775807, div: 0, mod: -1},
+ itd64{a: -1, b: -4294967296, add: -4294967297, sub: 4294967295, mul: 4294967296, div: 0, mod: -1},
+ itd64{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd64{a: -1, b: 0, add: -1, sub: -1, mul: 0},
+ itd64{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd64{a: -1, b: 4294967296, add: 4294967295, sub: -4294967297, mul: -4294967296, div: 0, mod: -1},
+ itd64{a: -1, b: 9223372036854775806, add: 9223372036854775805, sub: -9223372036854775807, mul: -9223372036854775806, div: 0, mod: -1},
+ itd64{a: -1, b: 9223372036854775807, add: 9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: 0, mod: -1},
+ itd64{a: 0, b: -9223372036854775808, add: -9223372036854775808, sub: -9223372036854775808, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: -9223372036854775807, add: -9223372036854775807, sub: 9223372036854775807, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: -4294967296, add: -4294967296, sub: 4294967296, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ itd64{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: 4294967296, add: 4294967296, sub: -4294967296, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: 9223372036854775806, add: 9223372036854775806, sub: -9223372036854775806, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: 9223372036854775807, add: 9223372036854775807, sub: -9223372036854775807, mul: 0, div: 0, mod: 0},
+ itd64{a: 1, b: -9223372036854775808, add: -9223372036854775807, sub: -9223372036854775807, mul: -9223372036854775808, div: 0, mod: 1},
+ itd64{a: 1, b: -9223372036854775807, add: -9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: 0, mod: 1},
+ itd64{a: 1, b: -4294967296, add: -4294967295, sub: 4294967297, mul: -4294967296, div: 0, mod: 1},
+ itd64{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd64{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ itd64{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd64{a: 1, b: 4294967296, add: 4294967297, sub: -4294967295, mul: 4294967296, div: 0, mod: 1},
+ itd64{a: 1, b: 9223372036854775806, add: 9223372036854775807, sub: -9223372036854775805, mul: 9223372036854775806, div: 0, mod: 1},
+ itd64{a: 1, b: 9223372036854775807, add: -9223372036854775808, sub: -9223372036854775806, mul: 9223372036854775807, div: 0, mod: 1},
+ itd64{a: 4294967296, b: -9223372036854775808, add: -9223372032559808512, sub: -9223372032559808512, mul: 0, div: 0, mod: 4294967296},
+ itd64{a: 4294967296, b: -9223372036854775807, add: -9223372032559808511, sub: -9223372032559808513, mul: 4294967296, div: 0, mod: 4294967296},
+ itd64{a: 4294967296, b: -4294967296, add: 0, sub: 8589934592, mul: 0, div: -1, mod: 0},
+ itd64{a: 4294967296, b: -1, add: 4294967295, sub: 4294967297, mul: -4294967296, div: -4294967296, mod: 0},
+ itd64{a: 4294967296, b: 0, add: 4294967296, sub: 4294967296, mul: 0},
+ itd64{a: 4294967296, b: 1, add: 4294967297, sub: 4294967295, mul: 4294967296, div: 4294967296, mod: 0},
+ itd64{a: 4294967296, b: 4294967296, add: 8589934592, sub: 0, mul: 0, div: 1, mod: 0},
+ itd64{a: 4294967296, b: 9223372036854775806, add: -9223372032559808514, sub: -9223372032559808510, mul: -8589934592, div: 0, mod: 4294967296},
+ itd64{a: 4294967296, b: 9223372036854775807, add: -9223372032559808513, sub: -9223372032559808511, mul: -4294967296, div: 0, mod: 4294967296},
+ itd64{a: 9223372036854775806, b: -9223372036854775808, add: -2, sub: -2, mul: 0, div: 0, mod: 9223372036854775806},
+ itd64{a: 9223372036854775806, b: -9223372036854775807, add: -1, sub: -3, mul: 9223372036854775806, div: 0, mod: 9223372036854775806},
+ itd64{a: 9223372036854775806, b: -4294967296, add: 9223372032559808510, sub: -9223372032559808514, mul: 8589934592, div: -2147483647, mod: 4294967294},
+ itd64{a: 9223372036854775806, b: -1, add: 9223372036854775805, sub: 9223372036854775807, mul: -9223372036854775806, div: -9223372036854775806, mod: 0},
+ itd64{a: 9223372036854775806, b: 0, add: 9223372036854775806, sub: 9223372036854775806, mul: 0},
+ itd64{a: 9223372036854775806, b: 1, add: 9223372036854775807, sub: 9223372036854775805, mul: 9223372036854775806, div: 9223372036854775806, mod: 0},
+ itd64{a: 9223372036854775806, b: 4294967296, add: -9223372032559808514, sub: 9223372032559808510, mul: -8589934592, div: 2147483647, mod: 4294967294},
+ itd64{a: 9223372036854775806, b: 9223372036854775806, add: -4, sub: 0, mul: 4, div: 1, mod: 0},
+ itd64{a: 9223372036854775806, b: 9223372036854775807, add: -3, sub: -1, mul: -9223372036854775806, div: 0, mod: 9223372036854775806},
+ itd64{a: 9223372036854775807, b: -9223372036854775808, add: -1, sub: -1, mul: -9223372036854775808, div: 0, mod: 9223372036854775807},
+ itd64{a: 9223372036854775807, b: -9223372036854775807, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd64{a: 9223372036854775807, b: -4294967296, add: 9223372032559808511, sub: -9223372032559808513, mul: 4294967296, div: -2147483647, mod: 4294967295},
+ itd64{a: 9223372036854775807, b: -1, add: 9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: -9223372036854775807, mod: 0},
+ itd64{a: 9223372036854775807, b: 0, add: 9223372036854775807, sub: 9223372036854775807, mul: 0},
+ itd64{a: 9223372036854775807, b: 1, add: -9223372036854775808, sub: 9223372036854775806, mul: 9223372036854775807, div: 9223372036854775807, mod: 0},
+ itd64{a: 9223372036854775807, b: 4294967296, add: -9223372032559808513, sub: 9223372032559808511, mul: -4294967296, div: 2147483647, mod: 4294967295},
+ itd64{a: 9223372036854775807, b: 9223372036854775806, add: -3, sub: 1, mul: -9223372036854775806, div: 1, mod: 1},
+ itd64{a: 9223372036854775807, b: 9223372036854775807, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var uint32_data []utd32 = []utd32{utd32{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ utd32{a: 0, b: 1, add: 1, sub: 4294967295, mul: 0, div: 0, mod: 0},
+ utd32{a: 0, b: 4294967295, add: 4294967295, sub: 1, mul: 0, div: 0, mod: 0},
+ utd32{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ utd32{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ utd32{a: 1, b: 4294967295, add: 0, sub: 2, mul: 4294967295, div: 0, mod: 1},
+ utd32{a: 4294967295, b: 0, add: 4294967295, sub: 4294967295, mul: 0},
+ utd32{a: 4294967295, b: 1, add: 0, sub: 4294967294, mul: 4294967295, div: 4294967295, mod: 0},
+ utd32{a: 4294967295, b: 4294967295, add: 4294967294, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var int32_data []itd32 = []itd32{itd32{a: -2147483648, b: -2147483648, add: 0, sub: 0, mul: 0, div: 1, mod: 0},
+ itd32{a: -2147483648, b: -2147483647, add: 1, sub: -1, mul: -2147483648, div: 1, mod: -1},
+ itd32{a: -2147483648, b: -1, add: 2147483647, sub: -2147483647, mul: -2147483648, div: -2147483648, mod: 0},
+ itd32{a: -2147483648, b: 0, add: -2147483648, sub: -2147483648, mul: 0},
+ itd32{a: -2147483648, b: 1, add: -2147483647, sub: 2147483647, mul: -2147483648, div: -2147483648, mod: 0},
+ itd32{a: -2147483648, b: 2147483647, add: -1, sub: 1, mul: -2147483648, div: -1, mod: -1},
+ itd32{a: -2147483647, b: -2147483648, add: 1, sub: 1, mul: -2147483648, div: 0, mod: -2147483647},
+ itd32{a: -2147483647, b: -2147483647, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd32{a: -2147483647, b: -1, add: -2147483648, sub: -2147483646, mul: 2147483647, div: 2147483647, mod: 0},
+ itd32{a: -2147483647, b: 0, add: -2147483647, sub: -2147483647, mul: 0},
+ itd32{a: -2147483647, b: 1, add: -2147483646, sub: -2147483648, mul: -2147483647, div: -2147483647, mod: 0},
+ itd32{a: -2147483647, b: 2147483647, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd32{a: -1, b: -2147483648, add: 2147483647, sub: 2147483647, mul: -2147483648, div: 0, mod: -1},
+ itd32{a: -1, b: -2147483647, add: -2147483648, sub: 2147483646, mul: 2147483647, div: 0, mod: -1},
+ itd32{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd32{a: -1, b: 0, add: -1, sub: -1, mul: 0},
+ itd32{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd32{a: -1, b: 2147483647, add: 2147483646, sub: -2147483648, mul: -2147483647, div: 0, mod: -1},
+ itd32{a: 0, b: -2147483648, add: -2147483648, sub: -2147483648, mul: 0, div: 0, mod: 0},
+ itd32{a: 0, b: -2147483647, add: -2147483647, sub: 2147483647, mul: 0, div: 0, mod: 0},
+ itd32{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0},
+ itd32{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ itd32{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0},
+ itd32{a: 0, b: 2147483647, add: 2147483647, sub: -2147483647, mul: 0, div: 0, mod: 0},
+ itd32{a: 1, b: -2147483648, add: -2147483647, sub: -2147483647, mul: -2147483648, div: 0, mod: 1},
+ itd32{a: 1, b: -2147483647, add: -2147483646, sub: -2147483648, mul: -2147483647, div: 0, mod: 1},
+ itd32{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd32{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ itd32{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd32{a: 1, b: 2147483647, add: -2147483648, sub: -2147483646, mul: 2147483647, div: 0, mod: 1},
+ itd32{a: 2147483647, b: -2147483648, add: -1, sub: -1, mul: -2147483648, div: 0, mod: 2147483647},
+ itd32{a: 2147483647, b: -2147483647, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd32{a: 2147483647, b: -1, add: 2147483646, sub: -2147483648, mul: -2147483647, div: -2147483647, mod: 0},
+ itd32{a: 2147483647, b: 0, add: 2147483647, sub: 2147483647, mul: 0},
+ itd32{a: 2147483647, b: 1, add: -2147483648, sub: 2147483646, mul: 2147483647, div: 2147483647, mod: 0},
+ itd32{a: 2147483647, b: 2147483647, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var uint16_data []utd16 = []utd16{utd16{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ utd16{a: 0, b: 1, add: 1, sub: 65535, mul: 0, div: 0, mod: 0},
+ utd16{a: 0, b: 65535, add: 65535, sub: 1, mul: 0, div: 0, mod: 0},
+ utd16{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ utd16{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ utd16{a: 1, b: 65535, add: 0, sub: 2, mul: 65535, div: 0, mod: 1},
+ utd16{a: 65535, b: 0, add: 65535, sub: 65535, mul: 0},
+ utd16{a: 65535, b: 1, add: 0, sub: 65534, mul: 65535, div: 65535, mod: 0},
+ utd16{a: 65535, b: 65535, add: 65534, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var int16_data []itd16 = []itd16{itd16{a: -32768, b: -32768, add: 0, sub: 0, mul: 0, div: 1, mod: 0},
+ itd16{a: -32768, b: -32767, add: 1, sub: -1, mul: -32768, div: 1, mod: -1},
+ itd16{a: -32768, b: -1, add: 32767, sub: -32767, mul: -32768, div: -32768, mod: 0},
+ itd16{a: -32768, b: 0, add: -32768, sub: -32768, mul: 0},
+ itd16{a: -32768, b: 1, add: -32767, sub: 32767, mul: -32768, div: -32768, mod: 0},
+ itd16{a: -32768, b: 32766, add: -2, sub: 2, mul: 0, div: -1, mod: -2},
+ itd16{a: -32768, b: 32767, add: -1, sub: 1, mul: -32768, div: -1, mod: -1},
+ itd16{a: -32767, b: -32768, add: 1, sub: 1, mul: -32768, div: 0, mod: -32767},
+ itd16{a: -32767, b: -32767, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd16{a: -32767, b: -1, add: -32768, sub: -32766, mul: 32767, div: 32767, mod: 0},
+ itd16{a: -32767, b: 0, add: -32767, sub: -32767, mul: 0},
+ itd16{a: -32767, b: 1, add: -32766, sub: -32768, mul: -32767, div: -32767, mod: 0},
+ itd16{a: -32767, b: 32766, add: -1, sub: 3, mul: 32766, div: -1, mod: -1},
+ itd16{a: -32767, b: 32767, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd16{a: -1, b: -32768, add: 32767, sub: 32767, mul: -32768, div: 0, mod: -1},
+ itd16{a: -1, b: -32767, add: -32768, sub: 32766, mul: 32767, div: 0, mod: -1},
+ itd16{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd16{a: -1, b: 0, add: -1, sub: -1, mul: 0},
+ itd16{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd16{a: -1, b: 32766, add: 32765, sub: -32767, mul: -32766, div: 0, mod: -1},
+ itd16{a: -1, b: 32767, add: 32766, sub: -32768, mul: -32767, div: 0, mod: -1},
+ itd16{a: 0, b: -32768, add: -32768, sub: -32768, mul: 0, div: 0, mod: 0},
+ itd16{a: 0, b: -32767, add: -32767, sub: 32767, mul: 0, div: 0, mod: 0},
+ itd16{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0},
+ itd16{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ itd16{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0},
+ itd16{a: 0, b: 32766, add: 32766, sub: -32766, mul: 0, div: 0, mod: 0},
+ itd16{a: 0, b: 32767, add: 32767, sub: -32767, mul: 0, div: 0, mod: 0},
+ itd16{a: 1, b: -32768, add: -32767, sub: -32767, mul: -32768, div: 0, mod: 1},
+ itd16{a: 1, b: -32767, add: -32766, sub: -32768, mul: -32767, div: 0, mod: 1},
+ itd16{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd16{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ itd16{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd16{a: 1, b: 32766, add: 32767, sub: -32765, mul: 32766, div: 0, mod: 1},
+ itd16{a: 1, b: 32767, add: -32768, sub: -32766, mul: 32767, div: 0, mod: 1},
+ itd16{a: 32766, b: -32768, add: -2, sub: -2, mul: 0, div: 0, mod: 32766},
+ itd16{a: 32766, b: -32767, add: -1, sub: -3, mul: 32766, div: 0, mod: 32766},
+ itd16{a: 32766, b: -1, add: 32765, sub: 32767, mul: -32766, div: -32766, mod: 0},
+ itd16{a: 32766, b: 0, add: 32766, sub: 32766, mul: 0},
+ itd16{a: 32766, b: 1, add: 32767, sub: 32765, mul: 32766, div: 32766, mod: 0},
+ itd16{a: 32766, b: 32766, add: -4, sub: 0, mul: 4, div: 1, mod: 0},
+ itd16{a: 32766, b: 32767, add: -3, sub: -1, mul: -32766, div: 0, mod: 32766},
+ itd16{a: 32767, b: -32768, add: -1, sub: -1, mul: -32768, div: 0, mod: 32767},
+ itd16{a: 32767, b: -32767, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd16{a: 32767, b: -1, add: 32766, sub: -32768, mul: -32767, div: -32767, mod: 0},
+ itd16{a: 32767, b: 0, add: 32767, sub: 32767, mul: 0},
+ itd16{a: 32767, b: 1, add: -32768, sub: 32766, mul: 32767, div: 32767, mod: 0},
+ itd16{a: 32767, b: 32766, add: -3, sub: 1, mul: -32766, div: 1, mod: 1},
+ itd16{a: 32767, b: 32767, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var uint8_data []utd8 = []utd8{utd8{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ utd8{a: 0, b: 1, add: 1, sub: 255, mul: 0, div: 0, mod: 0},
+ utd8{a: 0, b: 255, add: 255, sub: 1, mul: 0, div: 0, mod: 0},
+ utd8{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ utd8{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ utd8{a: 1, b: 255, add: 0, sub: 2, mul: 255, div: 0, mod: 1},
+ utd8{a: 255, b: 0, add: 255, sub: 255, mul: 0},
+ utd8{a: 255, b: 1, add: 0, sub: 254, mul: 255, div: 255, mod: 0},
+ utd8{a: 255, b: 255, add: 254, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var int8_data []itd8 = []itd8{itd8{a: -128, b: -128, add: 0, sub: 0, mul: 0, div: 1, mod: 0},
+ itd8{a: -128, b: -127, add: 1, sub: -1, mul: -128, div: 1, mod: -1},
+ itd8{a: -128, b: -1, add: 127, sub: -127, mul: -128, div: -128, mod: 0},
+ itd8{a: -128, b: 0, add: -128, sub: -128, mul: 0},
+ itd8{a: -128, b: 1, add: -127, sub: 127, mul: -128, div: -128, mod: 0},
+ itd8{a: -128, b: 126, add: -2, sub: 2, mul: 0, div: -1, mod: -2},
+ itd8{a: -128, b: 127, add: -1, sub: 1, mul: -128, div: -1, mod: -1},
+ itd8{a: -127, b: -128, add: 1, sub: 1, mul: -128, div: 0, mod: -127},
+ itd8{a: -127, b: -127, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd8{a: -127, b: -1, add: -128, sub: -126, mul: 127, div: 127, mod: 0},
+ itd8{a: -127, b: 0, add: -127, sub: -127, mul: 0},
+ itd8{a: -127, b: 1, add: -126, sub: -128, mul: -127, div: -127, mod: 0},
+ itd8{a: -127, b: 126, add: -1, sub: 3, mul: 126, div: -1, mod: -1},
+ itd8{a: -127, b: 127, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd8{a: -1, b: -128, add: 127, sub: 127, mul: -128, div: 0, mod: -1},
+ itd8{a: -1, b: -127, add: -128, sub: 126, mul: 127, div: 0, mod: -1},
+ itd8{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd8{a: -1, b: 0, add: -1, sub: -1, mul: 0},
+ itd8{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd8{a: -1, b: 126, add: 125, sub: -127, mul: -126, div: 0, mod: -1},
+ itd8{a: -1, b: 127, add: 126, sub: -128, mul: -127, div: 0, mod: -1},
+ itd8{a: 0, b: -128, add: -128, sub: -128, mul: 0, div: 0, mod: 0},
+ itd8{a: 0, b: -127, add: -127, sub: 127, mul: 0, div: 0, mod: 0},
+ itd8{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0},
+ itd8{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ itd8{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0},
+ itd8{a: 0, b: 126, add: 126, sub: -126, mul: 0, div: 0, mod: 0},
+ itd8{a: 0, b: 127, add: 127, sub: -127, mul: 0, div: 0, mod: 0},
+ itd8{a: 1, b: -128, add: -127, sub: -127, mul: -128, div: 0, mod: 1},
+ itd8{a: 1, b: -127, add: -126, sub: -128, mul: -127, div: 0, mod: 1},
+ itd8{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd8{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ itd8{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd8{a: 1, b: 126, add: 127, sub: -125, mul: 126, div: 0, mod: 1},
+ itd8{a: 1, b: 127, add: -128, sub: -126, mul: 127, div: 0, mod: 1},
+ itd8{a: 126, b: -128, add: -2, sub: -2, mul: 0, div: 0, mod: 126},
+ itd8{a: 126, b: -127, add: -1, sub: -3, mul: 126, div: 0, mod: 126},
+ itd8{a: 126, b: -1, add: 125, sub: 127, mul: -126, div: -126, mod: 0},
+ itd8{a: 126, b: 0, add: 126, sub: 126, mul: 0},
+ itd8{a: 126, b: 1, add: 127, sub: 125, mul: 126, div: 126, mod: 0},
+ itd8{a: 126, b: 126, add: -4, sub: 0, mul: 4, div: 1, mod: 0},
+ itd8{a: 126, b: 127, add: -3, sub: -1, mul: -126, div: 0, mod: 126},
+ itd8{a: 127, b: -128, add: -1, sub: -1, mul: -128, div: 0, mod: 127},
+ itd8{a: 127, b: -127, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd8{a: 127, b: -1, add: 126, sub: -128, mul: -127, div: -127, mod: 0},
+ itd8{a: 127, b: 0, add: 127, sub: 127, mul: 0},
+ itd8{a: 127, b: 1, add: -128, sub: 126, mul: 127, div: 127, mod: 0},
+ itd8{a: 127, b: 126, add: -3, sub: 1, mul: -126, div: 1, mod: 1},
+ itd8{a: 127, b: 127, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+}
+
+//TestArithmeticBoundary tests boundary results for arithmetic operations.
+func TestArithmeticBoundary(t *testing.T) {
+
+ for _, v := range uint64_data {
+ if got := add_uint64_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_uint64 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_uint64_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_uint64 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_uint64_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_uint64 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_uint64_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_uint64 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_uint64_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_uint64 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range int64_data {
+ if got := add_int64_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_int64 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_int64_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_int64 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_int64_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_int64 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_int64_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_int64 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_int64_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_int64 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range uint32_data {
+ if got := add_uint32_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_uint32 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_uint32_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_uint32 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_uint32_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_uint32 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_uint32_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_uint32 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_uint32_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_uint32 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range int32_data {
+ if got := add_int32_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_int32 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_int32_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_int32 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_int32_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_int32 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_int32_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_int32 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_int32_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_int32 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range uint16_data {
+ if got := add_uint16_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_uint16 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_uint16_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_uint16 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_uint16_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_uint16 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_uint16_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_uint16 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_uint16_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_uint16 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range int16_data {
+ if got := add_int16_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_int16 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_int16_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_int16 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_int16_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_int16 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_int16_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_int16 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_int16_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_int16 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range uint8_data {
+ if got := add_uint8_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_uint8 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_uint8_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_uint8 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_uint8_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_uint8 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_uint8_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_uint8 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_uint8_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_uint8 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range int8_data {
+ if got := add_int8_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_int8 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_int8_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_int8 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_int8_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_int8 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_int8_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_int8 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_int8_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_int8 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/arithConst_test.go b/src/cmd/compile/internal/test/testdata/arithConst_test.go
new file mode 100644
index 0000000..9f5ac61
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/arithConst_test.go
@@ -0,0 +1,9570 @@
+// Code generated by gen/arithConstGen.go. DO NOT EDIT.
+
+package main
+
+import "testing"
+
+//go:noinline
+func add_uint64_0(a uint64) uint64 { return a + 0 }
+
+//go:noinline
+func add_0_uint64(a uint64) uint64 { return 0 + a }
+
+//go:noinline
+func add_uint64_1(a uint64) uint64 { return a + 1 }
+
+//go:noinline
+func add_1_uint64(a uint64) uint64 { return 1 + a }
+
+//go:noinline
+func add_uint64_4294967296(a uint64) uint64 { return a + 4294967296 }
+
+//go:noinline
+func add_4294967296_uint64(a uint64) uint64 { return 4294967296 + a }
+
+//go:noinline
+func add_uint64_9223372036854775808(a uint64) uint64 { return a + 9223372036854775808 }
+
+//go:noinline
+func add_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 + a }
+
+//go:noinline
+func add_uint64_18446744073709551615(a uint64) uint64 { return a + 18446744073709551615 }
+
+//go:noinline
+func add_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 + a }
+
+//go:noinline
+func sub_uint64_0(a uint64) uint64 { return a - 0 }
+
+//go:noinline
+func sub_0_uint64(a uint64) uint64 { return 0 - a }
+
+//go:noinline
+func sub_uint64_1(a uint64) uint64 { return a - 1 }
+
+//go:noinline
+func sub_1_uint64(a uint64) uint64 { return 1 - a }
+
+//go:noinline
+func sub_uint64_4294967296(a uint64) uint64 { return a - 4294967296 }
+
+//go:noinline
+func sub_4294967296_uint64(a uint64) uint64 { return 4294967296 - a }
+
+//go:noinline
+func sub_uint64_9223372036854775808(a uint64) uint64 { return a - 9223372036854775808 }
+
+//go:noinline
+func sub_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 - a }
+
+//go:noinline
+func sub_uint64_18446744073709551615(a uint64) uint64 { return a - 18446744073709551615 }
+
+//go:noinline
+func sub_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 - a }
+
+//go:noinline
+func div_0_uint64(a uint64) uint64 { return 0 / a }
+
+//go:noinline
+func div_uint64_1(a uint64) uint64 { return a / 1 }
+
+//go:noinline
+func div_1_uint64(a uint64) uint64 { return 1 / a }
+
+//go:noinline
+func div_uint64_4294967296(a uint64) uint64 { return a / 4294967296 }
+
+//go:noinline
+func div_4294967296_uint64(a uint64) uint64 { return 4294967296 / a }
+
+//go:noinline
+func div_uint64_9223372036854775808(a uint64) uint64 { return a / 9223372036854775808 }
+
+//go:noinline
+func div_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 / a }
+
+//go:noinline
+func div_uint64_18446744073709551615(a uint64) uint64 { return a / 18446744073709551615 }
+
+//go:noinline
+func div_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 / a }
+
+//go:noinline
+func mul_uint64_0(a uint64) uint64 { return a * 0 }
+
+//go:noinline
+func mul_0_uint64(a uint64) uint64 { return 0 * a }
+
+//go:noinline
+func mul_uint64_1(a uint64) uint64 { return a * 1 }
+
+//go:noinline
+func mul_1_uint64(a uint64) uint64 { return 1 * a }
+
+//go:noinline
+func mul_uint64_4294967296(a uint64) uint64 { return a * 4294967296 }
+
+//go:noinline
+func mul_4294967296_uint64(a uint64) uint64 { return 4294967296 * a }
+
+//go:noinline
+func mul_uint64_9223372036854775808(a uint64) uint64 { return a * 9223372036854775808 }
+
+//go:noinline
+func mul_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 * a }
+
+//go:noinline
+func mul_uint64_18446744073709551615(a uint64) uint64 { return a * 18446744073709551615 }
+
+//go:noinline
+func mul_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 * a }
+
+//go:noinline
+func lsh_uint64_0(a uint64) uint64 { return a << 0 }
+
+//go:noinline
+func lsh_0_uint64(a uint64) uint64 { return 0 << a }
+
+//go:noinline
+func lsh_uint64_1(a uint64) uint64 { return a << 1 }
+
+//go:noinline
+func lsh_1_uint64(a uint64) uint64 { return 1 << a }
+
+//go:noinline
+func lsh_uint64_4294967296(a uint64) uint64 { return a << uint64(4294967296) }
+
+//go:noinline
+func lsh_4294967296_uint64(a uint64) uint64 { return 4294967296 << a }
+
+//go:noinline
+func lsh_uint64_9223372036854775808(a uint64) uint64 { return a << uint64(9223372036854775808) }
+
+//go:noinline
+func lsh_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 << a }
+
+//go:noinline
+func lsh_uint64_18446744073709551615(a uint64) uint64 { return a << uint64(18446744073709551615) }
+
+//go:noinline
+func lsh_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 << a }
+
+//go:noinline
+func rsh_uint64_0(a uint64) uint64 { return a >> 0 }
+
+//go:noinline
+func rsh_0_uint64(a uint64) uint64 { return 0 >> a }
+
+//go:noinline
+func rsh_uint64_1(a uint64) uint64 { return a >> 1 }
+
+//go:noinline
+func rsh_1_uint64(a uint64) uint64 { return 1 >> a }
+
+//go:noinline
+func rsh_uint64_4294967296(a uint64) uint64 { return a >> uint64(4294967296) }
+
+//go:noinline
+func rsh_4294967296_uint64(a uint64) uint64 { return 4294967296 >> a }
+
+//go:noinline
+func rsh_uint64_9223372036854775808(a uint64) uint64 { return a >> uint64(9223372036854775808) }
+
+//go:noinline
+func rsh_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 >> a }
+
+//go:noinline
+func rsh_uint64_18446744073709551615(a uint64) uint64 { return a >> uint64(18446744073709551615) }
+
+//go:noinline
+func rsh_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 >> a }
+
+//go:noinline
+func mod_0_uint64(a uint64) uint64 { return 0 % a }
+
+//go:noinline
+func mod_uint64_1(a uint64) uint64 { return a % 1 }
+
+//go:noinline
+func mod_1_uint64(a uint64) uint64 { return 1 % a }
+
+//go:noinline
+func mod_uint64_4294967296(a uint64) uint64 { return a % 4294967296 }
+
+//go:noinline
+func mod_4294967296_uint64(a uint64) uint64 { return 4294967296 % a }
+
+//go:noinline
+func mod_uint64_9223372036854775808(a uint64) uint64 { return a % 9223372036854775808 }
+
+//go:noinline
+func mod_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 % a }
+
+//go:noinline
+func mod_uint64_18446744073709551615(a uint64) uint64 { return a % 18446744073709551615 }
+
+//go:noinline
+func mod_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 % a }
+
+//go:noinline
+func and_uint64_0(a uint64) uint64 { return a & 0 }
+
+//go:noinline
+func and_0_uint64(a uint64) uint64 { return 0 & a }
+
+//go:noinline
+func and_uint64_1(a uint64) uint64 { return a & 1 }
+
+//go:noinline
+func and_1_uint64(a uint64) uint64 { return 1 & a }
+
+//go:noinline
+func and_uint64_4294967296(a uint64) uint64 { return a & 4294967296 }
+
+//go:noinline
+func and_4294967296_uint64(a uint64) uint64 { return 4294967296 & a }
+
+//go:noinline
+func and_uint64_9223372036854775808(a uint64) uint64 { return a & 9223372036854775808 }
+
+//go:noinline
+func and_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 & a }
+
+//go:noinline
+func and_uint64_18446744073709551615(a uint64) uint64 { return a & 18446744073709551615 }
+
+//go:noinline
+func and_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 & a }
+
+//go:noinline
+func or_uint64_0(a uint64) uint64 { return a | 0 }
+
+//go:noinline
+func or_0_uint64(a uint64) uint64 { return 0 | a }
+
+//go:noinline
+func or_uint64_1(a uint64) uint64 { return a | 1 }
+
+//go:noinline
+func or_1_uint64(a uint64) uint64 { return 1 | a }
+
+//go:noinline
+func or_uint64_4294967296(a uint64) uint64 { return a | 4294967296 }
+
+//go:noinline
+func or_4294967296_uint64(a uint64) uint64 { return 4294967296 | a }
+
+//go:noinline
+func or_uint64_9223372036854775808(a uint64) uint64 { return a | 9223372036854775808 }
+
+//go:noinline
+func or_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 | a }
+
+//go:noinline
+func or_uint64_18446744073709551615(a uint64) uint64 { return a | 18446744073709551615 }
+
+//go:noinline
+func or_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 | a }
+
+//go:noinline
+func xor_uint64_0(a uint64) uint64 { return a ^ 0 }
+
+//go:noinline
+func xor_0_uint64(a uint64) uint64 { return 0 ^ a }
+
+//go:noinline
+func xor_uint64_1(a uint64) uint64 { return a ^ 1 }
+
+//go:noinline
+func xor_1_uint64(a uint64) uint64 { return 1 ^ a }
+
+//go:noinline
+func xor_uint64_4294967296(a uint64) uint64 { return a ^ 4294967296 }
+
+//go:noinline
+func xor_4294967296_uint64(a uint64) uint64 { return 4294967296 ^ a }
+
+//go:noinline
+func xor_uint64_9223372036854775808(a uint64) uint64 { return a ^ 9223372036854775808 }
+
+//go:noinline
+func xor_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 ^ a }
+
+//go:noinline
+func xor_uint64_18446744073709551615(a uint64) uint64 { return a ^ 18446744073709551615 }
+
+//go:noinline
+func xor_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 ^ a }
+
+//go:noinline
+func mul_uint64_3(a uint64) uint64 { return a * 3 }
+
+//go:noinline
+func mul_3_uint64(a uint64) uint64 { return 3 * a }
+
+//go:noinline
+func mul_uint64_5(a uint64) uint64 { return a * 5 }
+
+//go:noinline
+func mul_5_uint64(a uint64) uint64 { return 5 * a }
+
+//go:noinline
+func mul_uint64_7(a uint64) uint64 { return a * 7 }
+
+//go:noinline
+func mul_7_uint64(a uint64) uint64 { return 7 * a }
+
+//go:noinline
+func mul_uint64_9(a uint64) uint64 { return a * 9 }
+
+//go:noinline
+func mul_9_uint64(a uint64) uint64 { return 9 * a }
+
+//go:noinline
+func mul_uint64_10(a uint64) uint64 { return a * 10 }
+
+//go:noinline
+func mul_10_uint64(a uint64) uint64 { return 10 * a }
+
+//go:noinline
+func mul_uint64_11(a uint64) uint64 { return a * 11 }
+
+//go:noinline
+func mul_11_uint64(a uint64) uint64 { return 11 * a }
+
+//go:noinline
+func mul_uint64_13(a uint64) uint64 { return a * 13 }
+
+//go:noinline
+func mul_13_uint64(a uint64) uint64 { return 13 * a }
+
+//go:noinline
+func mul_uint64_19(a uint64) uint64 { return a * 19 }
+
+//go:noinline
+func mul_19_uint64(a uint64) uint64 { return 19 * a }
+
+//go:noinline
+func mul_uint64_21(a uint64) uint64 { return a * 21 }
+
+//go:noinline
+func mul_21_uint64(a uint64) uint64 { return 21 * a }
+
+//go:noinline
+func mul_uint64_25(a uint64) uint64 { return a * 25 }
+
+//go:noinline
+func mul_25_uint64(a uint64) uint64 { return 25 * a }
+
+//go:noinline
+func mul_uint64_27(a uint64) uint64 { return a * 27 }
+
+//go:noinline
+func mul_27_uint64(a uint64) uint64 { return 27 * a }
+
+//go:noinline
+func mul_uint64_37(a uint64) uint64 { return a * 37 }
+
+//go:noinline
+func mul_37_uint64(a uint64) uint64 { return 37 * a }
+
+//go:noinline
+func mul_uint64_41(a uint64) uint64 { return a * 41 }
+
+//go:noinline
+func mul_41_uint64(a uint64) uint64 { return 41 * a }
+
+//go:noinline
+func mul_uint64_45(a uint64) uint64 { return a * 45 }
+
+//go:noinline
+func mul_45_uint64(a uint64) uint64 { return 45 * a }
+
+//go:noinline
+func mul_uint64_73(a uint64) uint64 { return a * 73 }
+
+//go:noinline
+func mul_73_uint64(a uint64) uint64 { return 73 * a }
+
+//go:noinline
+func mul_uint64_81(a uint64) uint64 { return a * 81 }
+
+//go:noinline
+func mul_81_uint64(a uint64) uint64 { return 81 * a }
+
+//go:noinline
+func add_int64_Neg9223372036854775808(a int64) int64 { return a + -9223372036854775808 }
+
+//go:noinline
+func add_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 + a }
+
+//go:noinline
+func add_int64_Neg9223372036854775807(a int64) int64 { return a + -9223372036854775807 }
+
+//go:noinline
+func add_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 + a }
+
+//go:noinline
+func add_int64_Neg4294967296(a int64) int64 { return a + -4294967296 }
+
+//go:noinline
+func add_Neg4294967296_int64(a int64) int64 { return -4294967296 + a }
+
+//go:noinline
+func add_int64_Neg1(a int64) int64 { return a + -1 }
+
+//go:noinline
+func add_Neg1_int64(a int64) int64 { return -1 + a }
+
+//go:noinline
+func add_int64_0(a int64) int64 { return a + 0 }
+
+//go:noinline
+func add_0_int64(a int64) int64 { return 0 + a }
+
+//go:noinline
+func add_int64_1(a int64) int64 { return a + 1 }
+
+//go:noinline
+func add_1_int64(a int64) int64 { return 1 + a }
+
+//go:noinline
+func add_int64_4294967296(a int64) int64 { return a + 4294967296 }
+
+//go:noinline
+func add_4294967296_int64(a int64) int64 { return 4294967296 + a }
+
+//go:noinline
+func add_int64_9223372036854775806(a int64) int64 { return a + 9223372036854775806 }
+
+//go:noinline
+func add_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 + a }
+
+//go:noinline
+func add_int64_9223372036854775807(a int64) int64 { return a + 9223372036854775807 }
+
+//go:noinline
+func add_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 + a }
+
+//go:noinline
+func sub_int64_Neg9223372036854775808(a int64) int64 { return a - -9223372036854775808 }
+
+//go:noinline
+func sub_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 - a }
+
+//go:noinline
+func sub_int64_Neg9223372036854775807(a int64) int64 { return a - -9223372036854775807 }
+
+//go:noinline
+func sub_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 - a }
+
+//go:noinline
+func sub_int64_Neg4294967296(a int64) int64 { return a - -4294967296 }
+
+//go:noinline
+func sub_Neg4294967296_int64(a int64) int64 { return -4294967296 - a }
+
+//go:noinline
+func sub_int64_Neg1(a int64) int64 { return a - -1 }
+
+//go:noinline
+func sub_Neg1_int64(a int64) int64 { return -1 - a }
+
+//go:noinline
+func sub_int64_0(a int64) int64 { return a - 0 }
+
+//go:noinline
+func sub_0_int64(a int64) int64 { return 0 - a }
+
+//go:noinline
+func sub_int64_1(a int64) int64 { return a - 1 }
+
+//go:noinline
+func sub_1_int64(a int64) int64 { return 1 - a }
+
+//go:noinline
+func sub_int64_4294967296(a int64) int64 { return a - 4294967296 }
+
+//go:noinline
+func sub_4294967296_int64(a int64) int64 { return 4294967296 - a }
+
+//go:noinline
+func sub_int64_9223372036854775806(a int64) int64 { return a - 9223372036854775806 }
+
+//go:noinline
+func sub_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 - a }
+
+//go:noinline
+func sub_int64_9223372036854775807(a int64) int64 { return a - 9223372036854775807 }
+
+//go:noinline
+func sub_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 - a }
+
+//go:noinline
+func div_int64_Neg9223372036854775808(a int64) int64 { return a / -9223372036854775808 }
+
+//go:noinline
+func div_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 / a }
+
+//go:noinline
+func div_int64_Neg9223372036854775807(a int64) int64 { return a / -9223372036854775807 }
+
+//go:noinline
+func div_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 / a }
+
+//go:noinline
+func div_int64_Neg4294967296(a int64) int64 { return a / -4294967296 }
+
+//go:noinline
+func div_Neg4294967296_int64(a int64) int64 { return -4294967296 / a }
+
+//go:noinline
+func div_int64_Neg1(a int64) int64 { return a / -1 }
+
+//go:noinline
+func div_Neg1_int64(a int64) int64 { return -1 / a }
+
+//go:noinline
+func div_0_int64(a int64) int64 { return 0 / a }
+
+//go:noinline
+func div_int64_1(a int64) int64 { return a / 1 }
+
+//go:noinline
+func div_1_int64(a int64) int64 { return 1 / a }
+
+//go:noinline
+func div_int64_4294967296(a int64) int64 { return a / 4294967296 }
+
+//go:noinline
+func div_4294967296_int64(a int64) int64 { return 4294967296 / a }
+
+//go:noinline
+func div_int64_9223372036854775806(a int64) int64 { return a / 9223372036854775806 }
+
+//go:noinline
+func div_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 / a }
+
+//go:noinline
+func div_int64_9223372036854775807(a int64) int64 { return a / 9223372036854775807 }
+
+//go:noinline
+func div_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 / a }
+
+//go:noinline
+func mul_int64_Neg9223372036854775808(a int64) int64 { return a * -9223372036854775808 }
+
+//go:noinline
+func mul_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 * a }
+
+//go:noinline
+func mul_int64_Neg9223372036854775807(a int64) int64 { return a * -9223372036854775807 }
+
+//go:noinline
+func mul_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 * a }
+
+//go:noinline
+func mul_int64_Neg4294967296(a int64) int64 { return a * -4294967296 }
+
+//go:noinline
+func mul_Neg4294967296_int64(a int64) int64 { return -4294967296 * a }
+
+//go:noinline
+func mul_int64_Neg1(a int64) int64 { return a * -1 }
+
+//go:noinline
+func mul_Neg1_int64(a int64) int64 { return -1 * a }
+
+//go:noinline
+func mul_int64_0(a int64) int64 { return a * 0 }
+
+//go:noinline
+func mul_0_int64(a int64) int64 { return 0 * a }
+
+//go:noinline
+func mul_int64_1(a int64) int64 { return a * 1 }
+
+//go:noinline
+func mul_1_int64(a int64) int64 { return 1 * a }
+
+//go:noinline
+func mul_int64_4294967296(a int64) int64 { return a * 4294967296 }
+
+//go:noinline
+func mul_4294967296_int64(a int64) int64 { return 4294967296 * a }
+
+//go:noinline
+func mul_int64_9223372036854775806(a int64) int64 { return a * 9223372036854775806 }
+
+//go:noinline
+func mul_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 * a }
+
+//go:noinline
+func mul_int64_9223372036854775807(a int64) int64 { return a * 9223372036854775807 }
+
+//go:noinline
+func mul_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 * a }
+
+//go:noinline
+func mod_int64_Neg9223372036854775808(a int64) int64 { return a % -9223372036854775808 }
+
+//go:noinline
+func mod_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 % a }
+
+//go:noinline
+func mod_int64_Neg9223372036854775807(a int64) int64 { return a % -9223372036854775807 }
+
+//go:noinline
+func mod_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 % a }
+
+//go:noinline
+func mod_int64_Neg4294967296(a int64) int64 { return a % -4294967296 }
+
+//go:noinline
+func mod_Neg4294967296_int64(a int64) int64 { return -4294967296 % a }
+
+//go:noinline
+func mod_int64_Neg1(a int64) int64 { return a % -1 }
+
+//go:noinline
+func mod_Neg1_int64(a int64) int64 { return -1 % a }
+
+//go:noinline
+func mod_0_int64(a int64) int64 { return 0 % a }
+
+//go:noinline
+func mod_int64_1(a int64) int64 { return a % 1 }
+
+//go:noinline
+func mod_1_int64(a int64) int64 { return 1 % a }
+
+//go:noinline
+func mod_int64_4294967296(a int64) int64 { return a % 4294967296 }
+
+//go:noinline
+func mod_4294967296_int64(a int64) int64 { return 4294967296 % a }
+
+//go:noinline
+func mod_int64_9223372036854775806(a int64) int64 { return a % 9223372036854775806 }
+
+//go:noinline
+func mod_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 % a }
+
+//go:noinline
+func mod_int64_9223372036854775807(a int64) int64 { return a % 9223372036854775807 }
+
+//go:noinline
+func mod_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 % a }
+
+//go:noinline
+func and_int64_Neg9223372036854775808(a int64) int64 { return a & -9223372036854775808 }
+
+//go:noinline
+func and_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 & a }
+
+//go:noinline
+func and_int64_Neg9223372036854775807(a int64) int64 { return a & -9223372036854775807 }
+
+//go:noinline
+func and_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 & a }
+
+//go:noinline
+func and_int64_Neg4294967296(a int64) int64 { return a & -4294967296 }
+
+//go:noinline
+func and_Neg4294967296_int64(a int64) int64 { return -4294967296 & a }
+
+//go:noinline
+func and_int64_Neg1(a int64) int64 { return a & -1 }
+
+//go:noinline
+func and_Neg1_int64(a int64) int64 { return -1 & a }
+
+//go:noinline
+func and_int64_0(a int64) int64 { return a & 0 }
+
+//go:noinline
+func and_0_int64(a int64) int64 { return 0 & a }
+
+//go:noinline
+func and_int64_1(a int64) int64 { return a & 1 }
+
+//go:noinline
+func and_1_int64(a int64) int64 { return 1 & a }
+
+//go:noinline
+func and_int64_4294967296(a int64) int64 { return a & 4294967296 }
+
+//go:noinline
+func and_4294967296_int64(a int64) int64 { return 4294967296 & a }
+
+//go:noinline
+func and_int64_9223372036854775806(a int64) int64 { return a & 9223372036854775806 }
+
+//go:noinline
+func and_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 & a }
+
+//go:noinline
+func and_int64_9223372036854775807(a int64) int64 { return a & 9223372036854775807 }
+
+//go:noinline
+func and_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 & a }
+
+//go:noinline
+func or_int64_Neg9223372036854775808(a int64) int64 { return a | -9223372036854775808 }
+
+//go:noinline
+func or_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 | a }
+
+//go:noinline
+func or_int64_Neg9223372036854775807(a int64) int64 { return a | -9223372036854775807 }
+
+//go:noinline
+func or_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 | a }
+
+//go:noinline
+func or_int64_Neg4294967296(a int64) int64 { return a | -4294967296 }
+
+//go:noinline
+func or_Neg4294967296_int64(a int64) int64 { return -4294967296 | a }
+
+//go:noinline
+func or_int64_Neg1(a int64) int64 { return a | -1 }
+
+//go:noinline
+func or_Neg1_int64(a int64) int64 { return -1 | a }
+
+//go:noinline
+func or_int64_0(a int64) int64 { return a | 0 }
+
+//go:noinline
+func or_0_int64(a int64) int64 { return 0 | a }
+
+//go:noinline
+func or_int64_1(a int64) int64 { return a | 1 }
+
+//go:noinline
+func or_1_int64(a int64) int64 { return 1 | a }
+
+//go:noinline
+func or_int64_4294967296(a int64) int64 { return a | 4294967296 }
+
+//go:noinline
+func or_4294967296_int64(a int64) int64 { return 4294967296 | a }
+
+//go:noinline
+func or_int64_9223372036854775806(a int64) int64 { return a | 9223372036854775806 }
+
+//go:noinline
+func or_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 | a }
+
+//go:noinline
+func or_int64_9223372036854775807(a int64) int64 { return a | 9223372036854775807 }
+
+//go:noinline
+func or_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 | a }
+
+//go:noinline
+func xor_int64_Neg9223372036854775808(a int64) int64 { return a ^ -9223372036854775808 }
+
+//go:noinline
+func xor_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 ^ a }
+
+//go:noinline
+func xor_int64_Neg9223372036854775807(a int64) int64 { return a ^ -9223372036854775807 }
+
+//go:noinline
+func xor_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 ^ a }
+
+//go:noinline
+func xor_int64_Neg4294967296(a int64) int64 { return a ^ -4294967296 }
+
+//go:noinline
+func xor_Neg4294967296_int64(a int64) int64 { return -4294967296 ^ a }
+
+//go:noinline
+func xor_int64_Neg1(a int64) int64 { return a ^ -1 }
+
+//go:noinline
+func xor_Neg1_int64(a int64) int64 { return -1 ^ a }
+
+//go:noinline
+func xor_int64_0(a int64) int64 { return a ^ 0 }
+
+//go:noinline
+func xor_0_int64(a int64) int64 { return 0 ^ a }
+
+//go:noinline
+func xor_int64_1(a int64) int64 { return a ^ 1 }
+
+//go:noinline
+func xor_1_int64(a int64) int64 { return 1 ^ a }
+
+//go:noinline
+func xor_int64_4294967296(a int64) int64 { return a ^ 4294967296 }
+
+//go:noinline
+func xor_4294967296_int64(a int64) int64 { return 4294967296 ^ a }
+
+//go:noinline
+func xor_int64_9223372036854775806(a int64) int64 { return a ^ 9223372036854775806 }
+
+//go:noinline
+func xor_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 ^ a }
+
+//go:noinline
+func xor_int64_9223372036854775807(a int64) int64 { return a ^ 9223372036854775807 }
+
+//go:noinline
+func xor_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 ^ a }
+
+//go:noinline
+func mul_int64_Neg9(a int64) int64 { return a * -9 }
+
+//go:noinline
+func mul_Neg9_int64(a int64) int64 { return -9 * a }
+
+//go:noinline
+func mul_int64_Neg5(a int64) int64 { return a * -5 }
+
+//go:noinline
+func mul_Neg5_int64(a int64) int64 { return -5 * a }
+
+//go:noinline
+func mul_int64_Neg3(a int64) int64 { return a * -3 }
+
+//go:noinline
+func mul_Neg3_int64(a int64) int64 { return -3 * a }
+
+//go:noinline
+func mul_int64_3(a int64) int64 { return a * 3 }
+
+//go:noinline
+func mul_3_int64(a int64) int64 { return 3 * a }
+
+//go:noinline
+func mul_int64_5(a int64) int64 { return a * 5 }
+
+//go:noinline
+func mul_5_int64(a int64) int64 { return 5 * a }
+
+//go:noinline
+func mul_int64_7(a int64) int64 { return a * 7 }
+
+//go:noinline
+func mul_7_int64(a int64) int64 { return 7 * a }
+
+//go:noinline
+func mul_int64_9(a int64) int64 { return a * 9 }
+
+//go:noinline
+func mul_9_int64(a int64) int64 { return 9 * a }
+
+//go:noinline
+func mul_int64_10(a int64) int64 { return a * 10 }
+
+//go:noinline
+func mul_10_int64(a int64) int64 { return 10 * a }
+
+//go:noinline
+func mul_int64_11(a int64) int64 { return a * 11 }
+
+//go:noinline
+func mul_11_int64(a int64) int64 { return 11 * a }
+
+//go:noinline
+func mul_int64_13(a int64) int64 { return a * 13 }
+
+//go:noinline
+func mul_13_int64(a int64) int64 { return 13 * a }
+
+//go:noinline
+func mul_int64_19(a int64) int64 { return a * 19 }
+
+//go:noinline
+func mul_19_int64(a int64) int64 { return 19 * a }
+
+//go:noinline
+func mul_int64_21(a int64) int64 { return a * 21 }
+
+//go:noinline
+func mul_21_int64(a int64) int64 { return 21 * a }
+
+//go:noinline
+func mul_int64_25(a int64) int64 { return a * 25 }
+
+//go:noinline
+func mul_25_int64(a int64) int64 { return 25 * a }
+
+//go:noinline
+func mul_int64_27(a int64) int64 { return a * 27 }
+
+//go:noinline
+func mul_27_int64(a int64) int64 { return 27 * a }
+
+//go:noinline
+func mul_int64_37(a int64) int64 { return a * 37 }
+
+//go:noinline
+func mul_37_int64(a int64) int64 { return 37 * a }
+
+//go:noinline
+func mul_int64_41(a int64) int64 { return a * 41 }
+
+//go:noinline
+func mul_41_int64(a int64) int64 { return 41 * a }
+
+//go:noinline
+func mul_int64_45(a int64) int64 { return a * 45 }
+
+//go:noinline
+func mul_45_int64(a int64) int64 { return 45 * a }
+
+//go:noinline
+func mul_int64_73(a int64) int64 { return a * 73 }
+
+//go:noinline
+func mul_73_int64(a int64) int64 { return 73 * a }
+
+//go:noinline
+func mul_int64_81(a int64) int64 { return a * 81 }
+
+//go:noinline
+func mul_81_int64(a int64) int64 { return 81 * a }
+
+//go:noinline
+func add_uint32_0(a uint32) uint32 { return a + 0 }
+
+//go:noinline
+func add_0_uint32(a uint32) uint32 { return 0 + a }
+
+//go:noinline
+func add_uint32_1(a uint32) uint32 { return a + 1 }
+
+//go:noinline
+func add_1_uint32(a uint32) uint32 { return 1 + a }
+
+//go:noinline
+func add_uint32_4294967295(a uint32) uint32 { return a + 4294967295 }
+
+//go:noinline
+func add_4294967295_uint32(a uint32) uint32 { return 4294967295 + a }
+
+//go:noinline
+func sub_uint32_0(a uint32) uint32 { return a - 0 }
+
+//go:noinline
+func sub_0_uint32(a uint32) uint32 { return 0 - a }
+
+//go:noinline
+func sub_uint32_1(a uint32) uint32 { return a - 1 }
+
+//go:noinline
+func sub_1_uint32(a uint32) uint32 { return 1 - a }
+
+//go:noinline
+func sub_uint32_4294967295(a uint32) uint32 { return a - 4294967295 }
+
+//go:noinline
+func sub_4294967295_uint32(a uint32) uint32 { return 4294967295 - a }
+
+//go:noinline
+func div_0_uint32(a uint32) uint32 { return 0 / a }
+
+//go:noinline
+func div_uint32_1(a uint32) uint32 { return a / 1 }
+
+//go:noinline
+func div_1_uint32(a uint32) uint32 { return 1 / a }
+
+//go:noinline
+func div_uint32_4294967295(a uint32) uint32 { return a / 4294967295 }
+
+//go:noinline
+func div_4294967295_uint32(a uint32) uint32 { return 4294967295 / a }
+
+//go:noinline
+func mul_uint32_0(a uint32) uint32 { return a * 0 }
+
+//go:noinline
+func mul_0_uint32(a uint32) uint32 { return 0 * a }
+
+//go:noinline
+func mul_uint32_1(a uint32) uint32 { return a * 1 }
+
+//go:noinline
+func mul_1_uint32(a uint32) uint32 { return 1 * a }
+
+//go:noinline
+func mul_uint32_4294967295(a uint32) uint32 { return a * 4294967295 }
+
+//go:noinline
+func mul_4294967295_uint32(a uint32) uint32 { return 4294967295 * a }
+
+//go:noinline
+func lsh_uint32_0(a uint32) uint32 { return a << 0 }
+
+//go:noinline
+func lsh_0_uint32(a uint32) uint32 { return 0 << a }
+
+//go:noinline
+func lsh_uint32_1(a uint32) uint32 { return a << 1 }
+
+//go:noinline
+func lsh_1_uint32(a uint32) uint32 { return 1 << a }
+
+//go:noinline
+func lsh_uint32_4294967295(a uint32) uint32 { return a << 4294967295 }
+
+//go:noinline
+func lsh_4294967295_uint32(a uint32) uint32 { return 4294967295 << a }
+
+//go:noinline
+func rsh_uint32_0(a uint32) uint32 { return a >> 0 }
+
+//go:noinline
+func rsh_0_uint32(a uint32) uint32 { return 0 >> a }
+
+//go:noinline
+func rsh_uint32_1(a uint32) uint32 { return a >> 1 }
+
+//go:noinline
+func rsh_1_uint32(a uint32) uint32 { return 1 >> a }
+
+//go:noinline
+func rsh_uint32_4294967295(a uint32) uint32 { return a >> 4294967295 }
+
+//go:noinline
+func rsh_4294967295_uint32(a uint32) uint32 { return 4294967295 >> a }
+
+//go:noinline
+func mod_0_uint32(a uint32) uint32 { return 0 % a }
+
+//go:noinline
+func mod_uint32_1(a uint32) uint32 { return a % 1 }
+
+//go:noinline
+func mod_1_uint32(a uint32) uint32 { return 1 % a }
+
+//go:noinline
+func mod_uint32_4294967295(a uint32) uint32 { return a % 4294967295 }
+
+//go:noinline
+func mod_4294967295_uint32(a uint32) uint32 { return 4294967295 % a }
+
+//go:noinline
+func and_uint32_0(a uint32) uint32 { return a & 0 }
+
+//go:noinline
+func and_0_uint32(a uint32) uint32 { return 0 & a }
+
+//go:noinline
+func and_uint32_1(a uint32) uint32 { return a & 1 }
+
+//go:noinline
+func and_1_uint32(a uint32) uint32 { return 1 & a }
+
+//go:noinline
+func and_uint32_4294967295(a uint32) uint32 { return a & 4294967295 }
+
+//go:noinline
+func and_4294967295_uint32(a uint32) uint32 { return 4294967295 & a }
+
+//go:noinline
+func or_uint32_0(a uint32) uint32 { return a | 0 }
+
+//go:noinline
+func or_0_uint32(a uint32) uint32 { return 0 | a }
+
+//go:noinline
+func or_uint32_1(a uint32) uint32 { return a | 1 }
+
+//go:noinline
+func or_1_uint32(a uint32) uint32 { return 1 | a }
+
+//go:noinline
+func or_uint32_4294967295(a uint32) uint32 { return a | 4294967295 }
+
+//go:noinline
+func or_4294967295_uint32(a uint32) uint32 { return 4294967295 | a }
+
+//go:noinline
+func xor_uint32_0(a uint32) uint32 { return a ^ 0 }
+
+//go:noinline
+func xor_0_uint32(a uint32) uint32 { return 0 ^ a }
+
+//go:noinline
+func xor_uint32_1(a uint32) uint32 { return a ^ 1 }
+
+//go:noinline
+func xor_1_uint32(a uint32) uint32 { return 1 ^ a }
+
+//go:noinline
+func xor_uint32_4294967295(a uint32) uint32 { return a ^ 4294967295 }
+
+//go:noinline
+func xor_4294967295_uint32(a uint32) uint32 { return 4294967295 ^ a }
+
+//go:noinline
+func mul_uint32_3(a uint32) uint32 { return a * 3 }
+
+//go:noinline
+func mul_3_uint32(a uint32) uint32 { return 3 * a }
+
+//go:noinline
+func mul_uint32_5(a uint32) uint32 { return a * 5 }
+
+//go:noinline
+func mul_5_uint32(a uint32) uint32 { return 5 * a }
+
+//go:noinline
+func mul_uint32_7(a uint32) uint32 { return a * 7 }
+
+//go:noinline
+func mul_7_uint32(a uint32) uint32 { return 7 * a }
+
+//go:noinline
+func mul_uint32_9(a uint32) uint32 { return a * 9 }
+
+//go:noinline
+func mul_9_uint32(a uint32) uint32 { return 9 * a }
+
+//go:noinline
+func mul_uint32_10(a uint32) uint32 { return a * 10 }
+
+//go:noinline
+func mul_10_uint32(a uint32) uint32 { return 10 * a }
+
+//go:noinline
+func mul_uint32_11(a uint32) uint32 { return a * 11 }
+
+//go:noinline
+func mul_11_uint32(a uint32) uint32 { return 11 * a }
+
+//go:noinline
+func mul_uint32_13(a uint32) uint32 { return a * 13 }
+
+//go:noinline
+func mul_13_uint32(a uint32) uint32 { return 13 * a }
+
+//go:noinline
+func mul_uint32_19(a uint32) uint32 { return a * 19 }
+
+//go:noinline
+func mul_19_uint32(a uint32) uint32 { return 19 * a }
+
+//go:noinline
+func mul_uint32_21(a uint32) uint32 { return a * 21 }
+
+//go:noinline
+func mul_21_uint32(a uint32) uint32 { return 21 * a }
+
+//go:noinline
+func mul_uint32_25(a uint32) uint32 { return a * 25 }
+
+//go:noinline
+func mul_25_uint32(a uint32) uint32 { return 25 * a }
+
+//go:noinline
+func mul_uint32_27(a uint32) uint32 { return a * 27 }
+
+//go:noinline
+func mul_27_uint32(a uint32) uint32 { return 27 * a }
+
+//go:noinline
+func mul_uint32_37(a uint32) uint32 { return a * 37 }
+
+//go:noinline
+func mul_37_uint32(a uint32) uint32 { return 37 * a }
+
+//go:noinline
+func mul_uint32_41(a uint32) uint32 { return a * 41 }
+
+//go:noinline
+func mul_41_uint32(a uint32) uint32 { return 41 * a }
+
+//go:noinline
+func mul_uint32_45(a uint32) uint32 { return a * 45 }
+
+//go:noinline
+func mul_45_uint32(a uint32) uint32 { return 45 * a }
+
+//go:noinline
+func mul_uint32_73(a uint32) uint32 { return a * 73 }
+
+//go:noinline
+func mul_73_uint32(a uint32) uint32 { return 73 * a }
+
+//go:noinline
+func mul_uint32_81(a uint32) uint32 { return a * 81 }
+
+//go:noinline
+func mul_81_uint32(a uint32) uint32 { return 81 * a }
+
+//go:noinline
+func add_int32_Neg2147483648(a int32) int32 { return a + -2147483648 }
+
+//go:noinline
+func add_Neg2147483648_int32(a int32) int32 { return -2147483648 + a }
+
+//go:noinline
+func add_int32_Neg2147483647(a int32) int32 { return a + -2147483647 }
+
+//go:noinline
+func add_Neg2147483647_int32(a int32) int32 { return -2147483647 + a }
+
+//go:noinline
+func add_int32_Neg1(a int32) int32 { return a + -1 }
+
+//go:noinline
+func add_Neg1_int32(a int32) int32 { return -1 + a }
+
+//go:noinline
+func add_int32_0(a int32) int32 { return a + 0 }
+
+//go:noinline
+func add_0_int32(a int32) int32 { return 0 + a }
+
+//go:noinline
+func add_int32_1(a int32) int32 { return a + 1 }
+
+//go:noinline
+func add_1_int32(a int32) int32 { return 1 + a }
+
+//go:noinline
+func add_int32_2147483647(a int32) int32 { return a + 2147483647 }
+
+//go:noinline
+func add_2147483647_int32(a int32) int32 { return 2147483647 + a }
+
+//go:noinline
+func sub_int32_Neg2147483648(a int32) int32 { return a - -2147483648 }
+
+//go:noinline
+func sub_Neg2147483648_int32(a int32) int32 { return -2147483648 - a }
+
+//go:noinline
+func sub_int32_Neg2147483647(a int32) int32 { return a - -2147483647 }
+
+//go:noinline
+func sub_Neg2147483647_int32(a int32) int32 { return -2147483647 - a }
+
+//go:noinline
+func sub_int32_Neg1(a int32) int32 { return a - -1 }
+
+//go:noinline
+func sub_Neg1_int32(a int32) int32 { return -1 - a }
+
+//go:noinline
+func sub_int32_0(a int32) int32 { return a - 0 }
+
+//go:noinline
+func sub_0_int32(a int32) int32 { return 0 - a }
+
+//go:noinline
+func sub_int32_1(a int32) int32 { return a - 1 }
+
+//go:noinline
+func sub_1_int32(a int32) int32 { return 1 - a }
+
+//go:noinline
+func sub_int32_2147483647(a int32) int32 { return a - 2147483647 }
+
+//go:noinline
+func sub_2147483647_int32(a int32) int32 { return 2147483647 - a }
+
+//go:noinline
+func div_int32_Neg2147483648(a int32) int32 { return a / -2147483648 }
+
+//go:noinline
+func div_Neg2147483648_int32(a int32) int32 { return -2147483648 / a }
+
+//go:noinline
+func div_int32_Neg2147483647(a int32) int32 { return a / -2147483647 }
+
+//go:noinline
+func div_Neg2147483647_int32(a int32) int32 { return -2147483647 / a }
+
+//go:noinline
+func div_int32_Neg1(a int32) int32 { return a / -1 }
+
+//go:noinline
+func div_Neg1_int32(a int32) int32 { return -1 / a }
+
+//go:noinline
+func div_0_int32(a int32) int32 { return 0 / a }
+
+//go:noinline
+func div_int32_1(a int32) int32 { return a / 1 }
+
+//go:noinline
+func div_1_int32(a int32) int32 { return 1 / a }
+
+//go:noinline
+func div_int32_2147483647(a int32) int32 { return a / 2147483647 }
+
+//go:noinline
+func div_2147483647_int32(a int32) int32 { return 2147483647 / a }
+
+//go:noinline
+func mul_int32_Neg2147483648(a int32) int32 { return a * -2147483648 }
+
+//go:noinline
+func mul_Neg2147483648_int32(a int32) int32 { return -2147483648 * a }
+
+//go:noinline
+func mul_int32_Neg2147483647(a int32) int32 { return a * -2147483647 }
+
+//go:noinline
+func mul_Neg2147483647_int32(a int32) int32 { return -2147483647 * a }
+
+//go:noinline
+func mul_int32_Neg1(a int32) int32 { return a * -1 }
+
+//go:noinline
+func mul_Neg1_int32(a int32) int32 { return -1 * a }
+
+//go:noinline
+func mul_int32_0(a int32) int32 { return a * 0 }
+
+//go:noinline
+func mul_0_int32(a int32) int32 { return 0 * a }
+
+//go:noinline
+func mul_int32_1(a int32) int32 { return a * 1 }
+
+//go:noinline
+func mul_1_int32(a int32) int32 { return 1 * a }
+
+//go:noinline
+func mul_int32_2147483647(a int32) int32 { return a * 2147483647 }
+
+//go:noinline
+func mul_2147483647_int32(a int32) int32 { return 2147483647 * a }
+
+//go:noinline
+func mod_int32_Neg2147483648(a int32) int32 { return a % -2147483648 }
+
+//go:noinline
+func mod_Neg2147483648_int32(a int32) int32 { return -2147483648 % a }
+
+//go:noinline
+func mod_int32_Neg2147483647(a int32) int32 { return a % -2147483647 }
+
+//go:noinline
+func mod_Neg2147483647_int32(a int32) int32 { return -2147483647 % a }
+
+//go:noinline
+func mod_int32_Neg1(a int32) int32 { return a % -1 }
+
+//go:noinline
+func mod_Neg1_int32(a int32) int32 { return -1 % a }
+
+//go:noinline
+func mod_0_int32(a int32) int32 { return 0 % a }
+
+//go:noinline
+func mod_int32_1(a int32) int32 { return a % 1 }
+
+//go:noinline
+func mod_1_int32(a int32) int32 { return 1 % a }
+
+//go:noinline
+func mod_int32_2147483647(a int32) int32 { return a % 2147483647 }
+
+//go:noinline
+func mod_2147483647_int32(a int32) int32 { return 2147483647 % a }
+
+//go:noinline
+func and_int32_Neg2147483648(a int32) int32 { return a & -2147483648 }
+
+//go:noinline
+func and_Neg2147483648_int32(a int32) int32 { return -2147483648 & a }
+
+//go:noinline
+func and_int32_Neg2147483647(a int32) int32 { return a & -2147483647 }
+
+//go:noinline
+func and_Neg2147483647_int32(a int32) int32 { return -2147483647 & a }
+
+//go:noinline
+func and_int32_Neg1(a int32) int32 { return a & -1 }
+
+//go:noinline
+func and_Neg1_int32(a int32) int32 { return -1 & a }
+
+//go:noinline
+func and_int32_0(a int32) int32 { return a & 0 }
+
+//go:noinline
+func and_0_int32(a int32) int32 { return 0 & a }
+
+//go:noinline
+func and_int32_1(a int32) int32 { return a & 1 }
+
+//go:noinline
+func and_1_int32(a int32) int32 { return 1 & a }
+
+//go:noinline
+func and_int32_2147483647(a int32) int32 { return a & 2147483647 }
+
+//go:noinline
+func and_2147483647_int32(a int32) int32 { return 2147483647 & a }
+
+//go:noinline
+func or_int32_Neg2147483648(a int32) int32 { return a | -2147483648 }
+
+//go:noinline
+func or_Neg2147483648_int32(a int32) int32 { return -2147483648 | a }
+
+//go:noinline
+func or_int32_Neg2147483647(a int32) int32 { return a | -2147483647 }
+
+//go:noinline
+func or_Neg2147483647_int32(a int32) int32 { return -2147483647 | a }
+
+//go:noinline
+func or_int32_Neg1(a int32) int32 { return a | -1 }
+
+//go:noinline
+func or_Neg1_int32(a int32) int32 { return -1 | a }
+
+//go:noinline
+func or_int32_0(a int32) int32 { return a | 0 }
+
+//go:noinline
+func or_0_int32(a int32) int32 { return 0 | a }
+
+//go:noinline
+func or_int32_1(a int32) int32 { return a | 1 }
+
+//go:noinline
+func or_1_int32(a int32) int32 { return 1 | a }
+
+//go:noinline
+func or_int32_2147483647(a int32) int32 { return a | 2147483647 }
+
+//go:noinline
+func or_2147483647_int32(a int32) int32 { return 2147483647 | a }
+
+//go:noinline
+func xor_int32_Neg2147483648(a int32) int32 { return a ^ -2147483648 }
+
+//go:noinline
+func xor_Neg2147483648_int32(a int32) int32 { return -2147483648 ^ a }
+
+//go:noinline
+func xor_int32_Neg2147483647(a int32) int32 { return a ^ -2147483647 }
+
+//go:noinline
+func xor_Neg2147483647_int32(a int32) int32 { return -2147483647 ^ a }
+
+//go:noinline
+func xor_int32_Neg1(a int32) int32 { return a ^ -1 }
+
+//go:noinline
+func xor_Neg1_int32(a int32) int32 { return -1 ^ a }
+
+//go:noinline
+func xor_int32_0(a int32) int32 { return a ^ 0 }
+
+//go:noinline
+func xor_0_int32(a int32) int32 { return 0 ^ a }
+
+//go:noinline
+func xor_int32_1(a int32) int32 { return a ^ 1 }
+
+//go:noinline
+func xor_1_int32(a int32) int32 { return 1 ^ a }
+
+//go:noinline
+func xor_int32_2147483647(a int32) int32 { return a ^ 2147483647 }
+
+//go:noinline
+func xor_2147483647_int32(a int32) int32 { return 2147483647 ^ a }
+
+//go:noinline
+func mul_int32_Neg9(a int32) int32 { return a * -9 }
+
+//go:noinline
+func mul_Neg9_int32(a int32) int32 { return -9 * a }
+
+//go:noinline
+func mul_int32_Neg5(a int32) int32 { return a * -5 }
+
+//go:noinline
+func mul_Neg5_int32(a int32) int32 { return -5 * a }
+
+//go:noinline
+func mul_int32_Neg3(a int32) int32 { return a * -3 }
+
+//go:noinline
+func mul_Neg3_int32(a int32) int32 { return -3 * a }
+
+//go:noinline
+func mul_int32_3(a int32) int32 { return a * 3 }
+
+//go:noinline
+func mul_3_int32(a int32) int32 { return 3 * a }
+
+//go:noinline
+func mul_int32_5(a int32) int32 { return a * 5 }
+
+//go:noinline
+func mul_5_int32(a int32) int32 { return 5 * a }
+
+//go:noinline
+func mul_int32_7(a int32) int32 { return a * 7 }
+
+//go:noinline
+func mul_7_int32(a int32) int32 { return 7 * a }
+
+//go:noinline
+func mul_int32_9(a int32) int32 { return a * 9 }
+
+//go:noinline
+func mul_9_int32(a int32) int32 { return 9 * a }
+
+//go:noinline
+func mul_int32_10(a int32) int32 { return a * 10 }
+
+//go:noinline
+func mul_10_int32(a int32) int32 { return 10 * a }
+
+//go:noinline
+func mul_int32_11(a int32) int32 { return a * 11 }
+
+//go:noinline
+func mul_11_int32(a int32) int32 { return 11 * a }
+
+//go:noinline
+func mul_int32_13(a int32) int32 { return a * 13 }
+
+//go:noinline
+func mul_13_int32(a int32) int32 { return 13 * a }
+
+//go:noinline
+func mul_int32_19(a int32) int32 { return a * 19 }
+
+//go:noinline
+func mul_19_int32(a int32) int32 { return 19 * a }
+
+//go:noinline
+func mul_int32_21(a int32) int32 { return a * 21 }
+
+//go:noinline
+func mul_21_int32(a int32) int32 { return 21 * a }
+
+//go:noinline
+func mul_int32_25(a int32) int32 { return a * 25 }
+
+//go:noinline
+func mul_25_int32(a int32) int32 { return 25 * a }
+
+//go:noinline
+func mul_int32_27(a int32) int32 { return a * 27 }
+
+//go:noinline
+func mul_27_int32(a int32) int32 { return 27 * a }
+
+//go:noinline
+func mul_int32_37(a int32) int32 { return a * 37 }
+
+//go:noinline
+func mul_37_int32(a int32) int32 { return 37 * a }
+
+//go:noinline
+func mul_int32_41(a int32) int32 { return a * 41 }
+
+//go:noinline
+func mul_41_int32(a int32) int32 { return 41 * a }
+
+//go:noinline
+func mul_int32_45(a int32) int32 { return a * 45 }
+
+//go:noinline
+func mul_45_int32(a int32) int32 { return 45 * a }
+
+//go:noinline
+func mul_int32_73(a int32) int32 { return a * 73 }
+
+//go:noinline
+func mul_73_int32(a int32) int32 { return 73 * a }
+
+//go:noinline
+func mul_int32_81(a int32) int32 { return a * 81 }
+
+//go:noinline
+func mul_81_int32(a int32) int32 { return 81 * a }
+
+//go:noinline
+func add_uint16_0(a uint16) uint16 { return a + 0 }
+
+//go:noinline
+func add_0_uint16(a uint16) uint16 { return 0 + a }
+
+//go:noinline
+func add_uint16_1(a uint16) uint16 { return a + 1 }
+
+//go:noinline
+func add_1_uint16(a uint16) uint16 { return 1 + a }
+
+//go:noinline
+func add_uint16_65535(a uint16) uint16 { return a + 65535 }
+
+//go:noinline
+func add_65535_uint16(a uint16) uint16 { return 65535 + a }
+
+//go:noinline
+func sub_uint16_0(a uint16) uint16 { return a - 0 }
+
+//go:noinline
+func sub_0_uint16(a uint16) uint16 { return 0 - a }
+
+//go:noinline
+func sub_uint16_1(a uint16) uint16 { return a - 1 }
+
+//go:noinline
+func sub_1_uint16(a uint16) uint16 { return 1 - a }
+
+//go:noinline
+func sub_uint16_65535(a uint16) uint16 { return a - 65535 }
+
+//go:noinline
+func sub_65535_uint16(a uint16) uint16 { return 65535 - a }
+
+//go:noinline
+func div_0_uint16(a uint16) uint16 { return 0 / a }
+
+//go:noinline
+func div_uint16_1(a uint16) uint16 { return a / 1 }
+
+//go:noinline
+func div_1_uint16(a uint16) uint16 { return 1 / a }
+
+//go:noinline
+func div_uint16_65535(a uint16) uint16 { return a / 65535 }
+
+//go:noinline
+func div_65535_uint16(a uint16) uint16 { return 65535 / a }
+
+//go:noinline
+func mul_uint16_0(a uint16) uint16 { return a * 0 }
+
+//go:noinline
+func mul_0_uint16(a uint16) uint16 { return 0 * a }
+
+//go:noinline
+func mul_uint16_1(a uint16) uint16 { return a * 1 }
+
+//go:noinline
+func mul_1_uint16(a uint16) uint16 { return 1 * a }
+
+//go:noinline
+func mul_uint16_65535(a uint16) uint16 { return a * 65535 }
+
+//go:noinline
+func mul_65535_uint16(a uint16) uint16 { return 65535 * a }
+
+//go:noinline
+func lsh_uint16_0(a uint16) uint16 { return a << 0 }
+
+//go:noinline
+func lsh_0_uint16(a uint16) uint16 { return 0 << a }
+
+//go:noinline
+func lsh_uint16_1(a uint16) uint16 { return a << 1 }
+
+//go:noinline
+func lsh_1_uint16(a uint16) uint16 { return 1 << a }
+
+//go:noinline
+func lsh_uint16_65535(a uint16) uint16 { return a << 65535 }
+
+//go:noinline
+func lsh_65535_uint16(a uint16) uint16 { return 65535 << a }
+
+//go:noinline
+func rsh_uint16_0(a uint16) uint16 { return a >> 0 }
+
+//go:noinline
+func rsh_0_uint16(a uint16) uint16 { return 0 >> a }
+
+//go:noinline
+func rsh_uint16_1(a uint16) uint16 { return a >> 1 }
+
+//go:noinline
+func rsh_1_uint16(a uint16) uint16 { return 1 >> a }
+
+//go:noinline
+func rsh_uint16_65535(a uint16) uint16 { return a >> 65535 }
+
+//go:noinline
+func rsh_65535_uint16(a uint16) uint16 { return 65535 >> a }
+
+//go:noinline
+func mod_0_uint16(a uint16) uint16 { return 0 % a }
+
+//go:noinline
+func mod_uint16_1(a uint16) uint16 { return a % 1 }
+
+//go:noinline
+func mod_1_uint16(a uint16) uint16 { return 1 % a }
+
+//go:noinline
+func mod_uint16_65535(a uint16) uint16 { return a % 65535 }
+
+//go:noinline
+func mod_65535_uint16(a uint16) uint16 { return 65535 % a }
+
+//go:noinline
+func and_uint16_0(a uint16) uint16 { return a & 0 }
+
+//go:noinline
+func and_0_uint16(a uint16) uint16 { return 0 & a }
+
+//go:noinline
+func and_uint16_1(a uint16) uint16 { return a & 1 }
+
+//go:noinline
+func and_1_uint16(a uint16) uint16 { return 1 & a }
+
+//go:noinline
+func and_uint16_65535(a uint16) uint16 { return a & 65535 }
+
+//go:noinline
+func and_65535_uint16(a uint16) uint16 { return 65535 & a }
+
+//go:noinline
+func or_uint16_0(a uint16) uint16 { return a | 0 }
+
+//go:noinline
+func or_0_uint16(a uint16) uint16 { return 0 | a }
+
+//go:noinline
+func or_uint16_1(a uint16) uint16 { return a | 1 }
+
+//go:noinline
+func or_1_uint16(a uint16) uint16 { return 1 | a }
+
+//go:noinline
+func or_uint16_65535(a uint16) uint16 { return a | 65535 }
+
+//go:noinline
+func or_65535_uint16(a uint16) uint16 { return 65535 | a }
+
+//go:noinline
+func xor_uint16_0(a uint16) uint16 { return a ^ 0 }
+
+//go:noinline
+func xor_0_uint16(a uint16) uint16 { return 0 ^ a }
+
+//go:noinline
+func xor_uint16_1(a uint16) uint16 { return a ^ 1 }
+
+//go:noinline
+func xor_1_uint16(a uint16) uint16 { return 1 ^ a }
+
+//go:noinline
+func xor_uint16_65535(a uint16) uint16 { return a ^ 65535 }
+
+//go:noinline
+func xor_65535_uint16(a uint16) uint16 { return 65535 ^ a }
+
+//go:noinline
+func add_int16_Neg32768(a int16) int16 { return a + -32768 }
+
+//go:noinline
+func add_Neg32768_int16(a int16) int16 { return -32768 + a }
+
+//go:noinline
+func add_int16_Neg32767(a int16) int16 { return a + -32767 }
+
+//go:noinline
+func add_Neg32767_int16(a int16) int16 { return -32767 + a }
+
+//go:noinline
+func add_int16_Neg1(a int16) int16 { return a + -1 }
+
+//go:noinline
+func add_Neg1_int16(a int16) int16 { return -1 + a }
+
+//go:noinline
+func add_int16_0(a int16) int16 { return a + 0 }
+
+//go:noinline
+func add_0_int16(a int16) int16 { return 0 + a }
+
+//go:noinline
+func add_int16_1(a int16) int16 { return a + 1 }
+
+//go:noinline
+func add_1_int16(a int16) int16 { return 1 + a }
+
+//go:noinline
+func add_int16_32766(a int16) int16 { return a + 32766 }
+
+//go:noinline
+func add_32766_int16(a int16) int16 { return 32766 + a }
+
+//go:noinline
+func add_int16_32767(a int16) int16 { return a + 32767 }
+
+//go:noinline
+func add_32767_int16(a int16) int16 { return 32767 + a }
+
+//go:noinline
+func sub_int16_Neg32768(a int16) int16 { return a - -32768 }
+
+//go:noinline
+func sub_Neg32768_int16(a int16) int16 { return -32768 - a }
+
+//go:noinline
+func sub_int16_Neg32767(a int16) int16 { return a - -32767 }
+
+//go:noinline
+func sub_Neg32767_int16(a int16) int16 { return -32767 - a }
+
+//go:noinline
+func sub_int16_Neg1(a int16) int16 { return a - -1 }
+
+//go:noinline
+func sub_Neg1_int16(a int16) int16 { return -1 - a }
+
+//go:noinline
+func sub_int16_0(a int16) int16 { return a - 0 }
+
+//go:noinline
+func sub_0_int16(a int16) int16 { return 0 - a }
+
+//go:noinline
+func sub_int16_1(a int16) int16 { return a - 1 }
+
+//go:noinline
+func sub_1_int16(a int16) int16 { return 1 - a }
+
+//go:noinline
+func sub_int16_32766(a int16) int16 { return a - 32766 }
+
+//go:noinline
+func sub_32766_int16(a int16) int16 { return 32766 - a }
+
+//go:noinline
+func sub_int16_32767(a int16) int16 { return a - 32767 }
+
+//go:noinline
+func sub_32767_int16(a int16) int16 { return 32767 - a }
+
+//go:noinline
+func div_int16_Neg32768(a int16) int16 { return a / -32768 }
+
+//go:noinline
+func div_Neg32768_int16(a int16) int16 { return -32768 / a }
+
+//go:noinline
+func div_int16_Neg32767(a int16) int16 { return a / -32767 }
+
+//go:noinline
+func div_Neg32767_int16(a int16) int16 { return -32767 / a }
+
+//go:noinline
+func div_int16_Neg1(a int16) int16 { return a / -1 }
+
+//go:noinline
+func div_Neg1_int16(a int16) int16 { return -1 / a }
+
+//go:noinline
+func div_0_int16(a int16) int16 { return 0 / a }
+
+//go:noinline
+func div_int16_1(a int16) int16 { return a / 1 }
+
+//go:noinline
+func div_1_int16(a int16) int16 { return 1 / a }
+
+//go:noinline
+func div_int16_32766(a int16) int16 { return a / 32766 }
+
+//go:noinline
+func div_32766_int16(a int16) int16 { return 32766 / a }
+
+//go:noinline
+func div_int16_32767(a int16) int16 { return a / 32767 }
+
+//go:noinline
+func div_32767_int16(a int16) int16 { return 32767 / a }
+
+//go:noinline
+func mul_int16_Neg32768(a int16) int16 { return a * -32768 }
+
+//go:noinline
+func mul_Neg32768_int16(a int16) int16 { return -32768 * a }
+
+//go:noinline
+func mul_int16_Neg32767(a int16) int16 { return a * -32767 }
+
+//go:noinline
+func mul_Neg32767_int16(a int16) int16 { return -32767 * a }
+
+//go:noinline
+func mul_int16_Neg1(a int16) int16 { return a * -1 }
+
+//go:noinline
+func mul_Neg1_int16(a int16) int16 { return -1 * a }
+
+//go:noinline
+func mul_int16_0(a int16) int16 { return a * 0 }
+
+//go:noinline
+func mul_0_int16(a int16) int16 { return 0 * a }
+
+//go:noinline
+func mul_int16_1(a int16) int16 { return a * 1 }
+
+//go:noinline
+func mul_1_int16(a int16) int16 { return 1 * a }
+
+//go:noinline
+func mul_int16_32766(a int16) int16 { return a * 32766 }
+
+//go:noinline
+func mul_32766_int16(a int16) int16 { return 32766 * a }
+
+//go:noinline
+func mul_int16_32767(a int16) int16 { return a * 32767 }
+
+//go:noinline
+func mul_32767_int16(a int16) int16 { return 32767 * a }
+
+//go:noinline
+func mod_int16_Neg32768(a int16) int16 { return a % -32768 }
+
+//go:noinline
+func mod_Neg32768_int16(a int16) int16 { return -32768 % a }
+
+//go:noinline
+func mod_int16_Neg32767(a int16) int16 { return a % -32767 }
+
+//go:noinline
+func mod_Neg32767_int16(a int16) int16 { return -32767 % a }
+
+//go:noinline
+func mod_int16_Neg1(a int16) int16 { return a % -1 }
+
+//go:noinline
+func mod_Neg1_int16(a int16) int16 { return -1 % a }
+
+//go:noinline
+func mod_0_int16(a int16) int16 { return 0 % a }
+
+//go:noinline
+func mod_int16_1(a int16) int16 { return a % 1 }
+
+//go:noinline
+func mod_1_int16(a int16) int16 { return 1 % a }
+
+//go:noinline
+func mod_int16_32766(a int16) int16 { return a % 32766 }
+
+//go:noinline
+func mod_32766_int16(a int16) int16 { return 32766 % a }
+
+//go:noinline
+func mod_int16_32767(a int16) int16 { return a % 32767 }
+
+//go:noinline
+func mod_32767_int16(a int16) int16 { return 32767 % a }
+
+//go:noinline
+func and_int16_Neg32768(a int16) int16 { return a & -32768 }
+
+//go:noinline
+func and_Neg32768_int16(a int16) int16 { return -32768 & a }
+
+//go:noinline
+func and_int16_Neg32767(a int16) int16 { return a & -32767 }
+
+//go:noinline
+func and_Neg32767_int16(a int16) int16 { return -32767 & a }
+
+//go:noinline
+func and_int16_Neg1(a int16) int16 { return a & -1 }
+
+//go:noinline
+func and_Neg1_int16(a int16) int16 { return -1 & a }
+
+//go:noinline
+func and_int16_0(a int16) int16 { return a & 0 }
+
+//go:noinline
+func and_0_int16(a int16) int16 { return 0 & a }
+
+//go:noinline
+func and_int16_1(a int16) int16 { return a & 1 }
+
+//go:noinline
+func and_1_int16(a int16) int16 { return 1 & a }
+
+//go:noinline
+func and_int16_32766(a int16) int16 { return a & 32766 }
+
+//go:noinline
+func and_32766_int16(a int16) int16 { return 32766 & a }
+
+//go:noinline
+func and_int16_32767(a int16) int16 { return a & 32767 }
+
+//go:noinline
+func and_32767_int16(a int16) int16 { return 32767 & a }
+
+//go:noinline
+func or_int16_Neg32768(a int16) int16 { return a | -32768 }
+
+//go:noinline
+func or_Neg32768_int16(a int16) int16 { return -32768 | a }
+
+//go:noinline
+func or_int16_Neg32767(a int16) int16 { return a | -32767 }
+
+//go:noinline
+func or_Neg32767_int16(a int16) int16 { return -32767 | a }
+
+//go:noinline
+func or_int16_Neg1(a int16) int16 { return a | -1 }
+
+//go:noinline
+func or_Neg1_int16(a int16) int16 { return -1 | a }
+
+//go:noinline
+func or_int16_0(a int16) int16 { return a | 0 }
+
+//go:noinline
+func or_0_int16(a int16) int16 { return 0 | a }
+
+//go:noinline
+func or_int16_1(a int16) int16 { return a | 1 }
+
+//go:noinline
+func or_1_int16(a int16) int16 { return 1 | a }
+
+//go:noinline
+func or_int16_32766(a int16) int16 { return a | 32766 }
+
+//go:noinline
+func or_32766_int16(a int16) int16 { return 32766 | a }
+
+//go:noinline
+func or_int16_32767(a int16) int16 { return a | 32767 }
+
+//go:noinline
+func or_32767_int16(a int16) int16 { return 32767 | a }
+
+//go:noinline
+func xor_int16_Neg32768(a int16) int16 { return a ^ -32768 }
+
+//go:noinline
+func xor_Neg32768_int16(a int16) int16 { return -32768 ^ a }
+
+//go:noinline
+func xor_int16_Neg32767(a int16) int16 { return a ^ -32767 }
+
+//go:noinline
+func xor_Neg32767_int16(a int16) int16 { return -32767 ^ a }
+
+//go:noinline
+func xor_int16_Neg1(a int16) int16 { return a ^ -1 }
+
+//go:noinline
+func xor_Neg1_int16(a int16) int16 { return -1 ^ a }
+
+//go:noinline
+func xor_int16_0(a int16) int16 { return a ^ 0 }
+
+//go:noinline
+func xor_0_int16(a int16) int16 { return 0 ^ a }
+
+//go:noinline
+func xor_int16_1(a int16) int16 { return a ^ 1 }
+
+//go:noinline
+func xor_1_int16(a int16) int16 { return 1 ^ a }
+
+//go:noinline
+func xor_int16_32766(a int16) int16 { return a ^ 32766 }
+
+//go:noinline
+func xor_32766_int16(a int16) int16 { return 32766 ^ a }
+
+//go:noinline
+func xor_int16_32767(a int16) int16 { return a ^ 32767 }
+
+//go:noinline
+func xor_32767_int16(a int16) int16 { return 32767 ^ a }
+
+//go:noinline
+func add_uint8_0(a uint8) uint8 { return a + 0 }
+
+//go:noinline
+func add_0_uint8(a uint8) uint8 { return 0 + a }
+
+//go:noinline
+func add_uint8_1(a uint8) uint8 { return a + 1 }
+
+//go:noinline
+func add_1_uint8(a uint8) uint8 { return 1 + a }
+
+//go:noinline
+func add_uint8_255(a uint8) uint8 { return a + 255 }
+
+//go:noinline
+func add_255_uint8(a uint8) uint8 { return 255 + a }
+
+//go:noinline
+func sub_uint8_0(a uint8) uint8 { return a - 0 }
+
+//go:noinline
+func sub_0_uint8(a uint8) uint8 { return 0 - a }
+
+//go:noinline
+func sub_uint8_1(a uint8) uint8 { return a - 1 }
+
+//go:noinline
+func sub_1_uint8(a uint8) uint8 { return 1 - a }
+
+//go:noinline
+func sub_uint8_255(a uint8) uint8 { return a - 255 }
+
+//go:noinline
+func sub_255_uint8(a uint8) uint8 { return 255 - a }
+
+//go:noinline
+func div_0_uint8(a uint8) uint8 { return 0 / a }
+
+//go:noinline
+func div_uint8_1(a uint8) uint8 { return a / 1 }
+
+//go:noinline
+func div_1_uint8(a uint8) uint8 { return 1 / a }
+
+//go:noinline
+func div_uint8_255(a uint8) uint8 { return a / 255 }
+
+//go:noinline
+func div_255_uint8(a uint8) uint8 { return 255 / a }
+
+//go:noinline
+func mul_uint8_0(a uint8) uint8 { return a * 0 }
+
+//go:noinline
+func mul_0_uint8(a uint8) uint8 { return 0 * a }
+
+//go:noinline
+func mul_uint8_1(a uint8) uint8 { return a * 1 }
+
+//go:noinline
+func mul_1_uint8(a uint8) uint8 { return 1 * a }
+
+//go:noinline
+func mul_uint8_255(a uint8) uint8 { return a * 255 }
+
+//go:noinline
+func mul_255_uint8(a uint8) uint8 { return 255 * a }
+
+//go:noinline
+func lsh_uint8_0(a uint8) uint8 { return a << 0 }
+
+//go:noinline
+func lsh_0_uint8(a uint8) uint8 { return 0 << a }
+
+//go:noinline
+func lsh_uint8_1(a uint8) uint8 { return a << 1 }
+
+//go:noinline
+func lsh_1_uint8(a uint8) uint8 { return 1 << a }
+
+//go:noinline
+func lsh_uint8_255(a uint8) uint8 { return a << 255 }
+
+//go:noinline
+func lsh_255_uint8(a uint8) uint8 { return 255 << a }
+
+//go:noinline
+func rsh_uint8_0(a uint8) uint8 { return a >> 0 }
+
+//go:noinline
+func rsh_0_uint8(a uint8) uint8 { return 0 >> a }
+
+//go:noinline
+func rsh_uint8_1(a uint8) uint8 { return a >> 1 }
+
+//go:noinline
+func rsh_1_uint8(a uint8) uint8 { return 1 >> a }
+
+//go:noinline
+func rsh_uint8_255(a uint8) uint8 { return a >> 255 }
+
+//go:noinline
+func rsh_255_uint8(a uint8) uint8 { return 255 >> a }
+
+//go:noinline
+func mod_0_uint8(a uint8) uint8 { return 0 % a }
+
+//go:noinline
+func mod_uint8_1(a uint8) uint8 { return a % 1 }
+
+//go:noinline
+func mod_1_uint8(a uint8) uint8 { return 1 % a }
+
+//go:noinline
+func mod_uint8_255(a uint8) uint8 { return a % 255 }
+
+//go:noinline
+func mod_255_uint8(a uint8) uint8 { return 255 % a }
+
+//go:noinline
+func and_uint8_0(a uint8) uint8 { return a & 0 }
+
+//go:noinline
+func and_0_uint8(a uint8) uint8 { return 0 & a }
+
+//go:noinline
+func and_uint8_1(a uint8) uint8 { return a & 1 }
+
+//go:noinline
+func and_1_uint8(a uint8) uint8 { return 1 & a }
+
+//go:noinline
+func and_uint8_255(a uint8) uint8 { return a & 255 }
+
+//go:noinline
+func and_255_uint8(a uint8) uint8 { return 255 & a }
+
+//go:noinline
+func or_uint8_0(a uint8) uint8 { return a | 0 }
+
+//go:noinline
+func or_0_uint8(a uint8) uint8 { return 0 | a }
+
+//go:noinline
+func or_uint8_1(a uint8) uint8 { return a | 1 }
+
+//go:noinline
+func or_1_uint8(a uint8) uint8 { return 1 | a }
+
+//go:noinline
+func or_uint8_255(a uint8) uint8 { return a | 255 }
+
+//go:noinline
+func or_255_uint8(a uint8) uint8 { return 255 | a }
+
+//go:noinline
+func xor_uint8_0(a uint8) uint8 { return a ^ 0 }
+
+//go:noinline
+func xor_0_uint8(a uint8) uint8 { return 0 ^ a }
+
+//go:noinline
+func xor_uint8_1(a uint8) uint8 { return a ^ 1 }
+
+//go:noinline
+func xor_1_uint8(a uint8) uint8 { return 1 ^ a }
+
+//go:noinline
+func xor_uint8_255(a uint8) uint8 { return a ^ 255 }
+
+//go:noinline
+func xor_255_uint8(a uint8) uint8 { return 255 ^ a }
+
+//go:noinline
+func add_int8_Neg128(a int8) int8 { return a + -128 }
+
+//go:noinline
+func add_Neg128_int8(a int8) int8 { return -128 + a }
+
+//go:noinline
+func add_int8_Neg127(a int8) int8 { return a + -127 }
+
+//go:noinline
+func add_Neg127_int8(a int8) int8 { return -127 + a }
+
+//go:noinline
+func add_int8_Neg1(a int8) int8 { return a + -1 }
+
+//go:noinline
+func add_Neg1_int8(a int8) int8 { return -1 + a }
+
+//go:noinline
+func add_int8_0(a int8) int8 { return a + 0 }
+
+//go:noinline
+func add_0_int8(a int8) int8 { return 0 + a }
+
+//go:noinline
+func add_int8_1(a int8) int8 { return a + 1 }
+
+//go:noinline
+func add_1_int8(a int8) int8 { return 1 + a }
+
+//go:noinline
+func add_int8_126(a int8) int8 { return a + 126 }
+
+//go:noinline
+func add_126_int8(a int8) int8 { return 126 + a }
+
+//go:noinline
+func add_int8_127(a int8) int8 { return a + 127 }
+
+//go:noinline
+func add_127_int8(a int8) int8 { return 127 + a }
+
+//go:noinline
+func sub_int8_Neg128(a int8) int8 { return a - -128 }
+
+//go:noinline
+func sub_Neg128_int8(a int8) int8 { return -128 - a }
+
+//go:noinline
+func sub_int8_Neg127(a int8) int8 { return a - -127 }
+
+//go:noinline
+func sub_Neg127_int8(a int8) int8 { return -127 - a }
+
+//go:noinline
+func sub_int8_Neg1(a int8) int8 { return a - -1 }
+
+//go:noinline
+func sub_Neg1_int8(a int8) int8 { return -1 - a }
+
+//go:noinline
+func sub_int8_0(a int8) int8 { return a - 0 }
+
+//go:noinline
+func sub_0_int8(a int8) int8 { return 0 - a }
+
+//go:noinline
+func sub_int8_1(a int8) int8 { return a - 1 }
+
+//go:noinline
+func sub_1_int8(a int8) int8 { return 1 - a }
+
+//go:noinline
+func sub_int8_126(a int8) int8 { return a - 126 }
+
+//go:noinline
+func sub_126_int8(a int8) int8 { return 126 - a }
+
+//go:noinline
+func sub_int8_127(a int8) int8 { return a - 127 }
+
+//go:noinline
+func sub_127_int8(a int8) int8 { return 127 - a }
+
+//go:noinline
+func div_int8_Neg128(a int8) int8 { return a / -128 }
+
+//go:noinline
+func div_Neg128_int8(a int8) int8 { return -128 / a }
+
+//go:noinline
+func div_int8_Neg127(a int8) int8 { return a / -127 }
+
+//go:noinline
+func div_Neg127_int8(a int8) int8 { return -127 / a }
+
+//go:noinline
+func div_int8_Neg1(a int8) int8 { return a / -1 }
+
+//go:noinline
+func div_Neg1_int8(a int8) int8 { return -1 / a }
+
+//go:noinline
+func div_0_int8(a int8) int8 { return 0 / a }
+
+//go:noinline
+func div_int8_1(a int8) int8 { return a / 1 }
+
+//go:noinline
+func div_1_int8(a int8) int8 { return 1 / a }
+
+//go:noinline
+func div_int8_126(a int8) int8 { return a / 126 }
+
+//go:noinline
+func div_126_int8(a int8) int8 { return 126 / a }
+
+//go:noinline
+func div_int8_127(a int8) int8 { return a / 127 }
+
+//go:noinline
+func div_127_int8(a int8) int8 { return 127 / a }
+
+//go:noinline
+func mul_int8_Neg128(a int8) int8 { return a * -128 }
+
+//go:noinline
+func mul_Neg128_int8(a int8) int8 { return -128 * a }
+
+//go:noinline
+func mul_int8_Neg127(a int8) int8 { return a * -127 }
+
+//go:noinline
+func mul_Neg127_int8(a int8) int8 { return -127 * a }
+
+//go:noinline
+func mul_int8_Neg1(a int8) int8 { return a * -1 }
+
+//go:noinline
+func mul_Neg1_int8(a int8) int8 { return -1 * a }
+
+//go:noinline
+func mul_int8_0(a int8) int8 { return a * 0 }
+
+//go:noinline
+func mul_0_int8(a int8) int8 { return 0 * a }
+
+//go:noinline
+func mul_int8_1(a int8) int8 { return a * 1 }
+
+//go:noinline
+func mul_1_int8(a int8) int8 { return 1 * a }
+
+//go:noinline
+func mul_int8_126(a int8) int8 { return a * 126 }
+
+//go:noinline
+func mul_126_int8(a int8) int8 { return 126 * a }
+
+//go:noinline
+func mul_int8_127(a int8) int8 { return a * 127 }
+
+//go:noinline
+func mul_127_int8(a int8) int8 { return 127 * a }
+
+//go:noinline
+func mod_int8_Neg128(a int8) int8 { return a % -128 }
+
+//go:noinline
+func mod_Neg128_int8(a int8) int8 { return -128 % a }
+
+//go:noinline
+func mod_int8_Neg127(a int8) int8 { return a % -127 }
+
+//go:noinline
+func mod_Neg127_int8(a int8) int8 { return -127 % a }
+
+//go:noinline
+func mod_int8_Neg1(a int8) int8 { return a % -1 }
+
+//go:noinline
+func mod_Neg1_int8(a int8) int8 { return -1 % a }
+
+//go:noinline
+func mod_0_int8(a int8) int8 { return 0 % a }
+
+//go:noinline
+func mod_int8_1(a int8) int8 { return a % 1 }
+
+//go:noinline
+func mod_1_int8(a int8) int8 { return 1 % a }
+
+//go:noinline
+func mod_int8_126(a int8) int8 { return a % 126 }
+
+//go:noinline
+func mod_126_int8(a int8) int8 { return 126 % a }
+
+//go:noinline
+func mod_int8_127(a int8) int8 { return a % 127 }
+
+//go:noinline
+func mod_127_int8(a int8) int8 { return 127 % a }
+
+//go:noinline
+func and_int8_Neg128(a int8) int8 { return a & -128 }
+
+//go:noinline
+func and_Neg128_int8(a int8) int8 { return -128 & a }
+
+//go:noinline
+func and_int8_Neg127(a int8) int8 { return a & -127 }
+
+//go:noinline
+func and_Neg127_int8(a int8) int8 { return -127 & a }
+
+//go:noinline
+func and_int8_Neg1(a int8) int8 { return a & -1 }
+
+//go:noinline
+func and_Neg1_int8(a int8) int8 { return -1 & a }
+
+//go:noinline
+func and_int8_0(a int8) int8 { return a & 0 }
+
+//go:noinline
+func and_0_int8(a int8) int8 { return 0 & a }
+
+//go:noinline
+func and_int8_1(a int8) int8 { return a & 1 }
+
+//go:noinline
+func and_1_int8(a int8) int8 { return 1 & a }
+
+//go:noinline
+func and_int8_126(a int8) int8 { return a & 126 }
+
+//go:noinline
+func and_126_int8(a int8) int8 { return 126 & a }
+
+//go:noinline
+func and_int8_127(a int8) int8 { return a & 127 }
+
+//go:noinline
+func and_127_int8(a int8) int8 { return 127 & a }
+
+//go:noinline
+func or_int8_Neg128(a int8) int8 { return a | -128 }
+
+//go:noinline
+func or_Neg128_int8(a int8) int8 { return -128 | a }
+
+//go:noinline
+func or_int8_Neg127(a int8) int8 { return a | -127 }
+
+//go:noinline
+func or_Neg127_int8(a int8) int8 { return -127 | a }
+
+//go:noinline
+func or_int8_Neg1(a int8) int8 { return a | -1 }
+
+//go:noinline
+func or_Neg1_int8(a int8) int8 { return -1 | a }
+
+//go:noinline
+func or_int8_0(a int8) int8 { return a | 0 }
+
+//go:noinline
+func or_0_int8(a int8) int8 { return 0 | a }
+
+//go:noinline
+func or_int8_1(a int8) int8 { return a | 1 }
+
+//go:noinline
+func or_1_int8(a int8) int8 { return 1 | a }
+
+//go:noinline
+func or_int8_126(a int8) int8 { return a | 126 }
+
+//go:noinline
+func or_126_int8(a int8) int8 { return 126 | a }
+
+//go:noinline
+func or_int8_127(a int8) int8 { return a | 127 }
+
+//go:noinline
+func or_127_int8(a int8) int8 { return 127 | a }
+
+//go:noinline
+func xor_int8_Neg128(a int8) int8 { return a ^ -128 }
+
+//go:noinline
+func xor_Neg128_int8(a int8) int8 { return -128 ^ a }
+
+//go:noinline
+func xor_int8_Neg127(a int8) int8 { return a ^ -127 }
+
+//go:noinline
+func xor_Neg127_int8(a int8) int8 { return -127 ^ a }
+
+//go:noinline
+func xor_int8_Neg1(a int8) int8 { return a ^ -1 }
+
+//go:noinline
+func xor_Neg1_int8(a int8) int8 { return -1 ^ a }
+
+//go:noinline
+func xor_int8_0(a int8) int8 { return a ^ 0 }
+
+//go:noinline
+func xor_0_int8(a int8) int8 { return 0 ^ a }
+
+//go:noinline
+func xor_int8_1(a int8) int8 { return a ^ 1 }
+
+//go:noinline
+func xor_1_int8(a int8) int8 { return 1 ^ a }
+
+//go:noinline
+func xor_int8_126(a int8) int8 { return a ^ 126 }
+
+//go:noinline
+func xor_126_int8(a int8) int8 { return 126 ^ a }
+
+//go:noinline
+func xor_int8_127(a int8) int8 { return a ^ 127 }
+
+//go:noinline
+func xor_127_int8(a int8) int8 { return 127 ^ a }
+
+type test_uint64 struct {
+ fn func(uint64) uint64
+ fnname string
+ in uint64
+ want uint64
+}
+
+var tests_uint64 = []test_uint64{
+
+ test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 0, want: 0},
+ test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 0, want: 0},
+ test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 1, want: 1},
+ test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 1, want: 1},
+ test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 0, want: 1},
+ test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 0, want: 1},
+ test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 1, want: 2},
+ test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 1, want: 2},
+ test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 4294967296, want: 4294967297},
+ test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 4294967296, want: 4294967297},
+ test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 18446744073709551615, want: 0},
+ test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 0, want: 4294967296},
+ test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 1, want: 4294967297},
+ test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 1, want: 4294967297},
+ test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 4294967296, want: 8589934592},
+ test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 4294967296, want: 8589934592},
+ test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 18446744073709551615, want: 4294967295},
+ test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 18446744073709551615, want: 4294967295},
+ test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 0, want: 9223372036854775808},
+ test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 1, want: 9223372036854775809},
+ test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 1, want: 9223372036854775809},
+ test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 0, want: 18446744073709551615},
+ test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 1, want: 0},
+ test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 1, want: 0},
+ test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 4294967296, want: 4294967295},
+ test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 4294967296, want: 4294967295},
+ test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 0, want: 0},
+ test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 0, want: 0},
+ test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 1, want: 18446744073709551615},
+ test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 1, want: 1},
+ test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 4294967296, want: 18446744069414584320},
+ test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 18446744073709551615, want: 1},
+ test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 0, want: 1},
+ test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 0, want: 18446744073709551615},
+ test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 1, want: 0},
+ test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 1, want: 0},
+ test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 4294967296, want: 18446744069414584321},
+ test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 4294967296, want: 4294967295},
+ test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 18446744073709551615, want: 2},
+ test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 0, want: 18446744069414584320},
+ test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 1, want: 4294967295},
+ test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 1, want: 18446744069414584321},
+ test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 9223372036854775808, want: 9223372032559808512},
+ test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 18446744073709551615, want: 4294967297},
+ test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 18446744073709551615, want: 18446744069414584319},
+ test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 0, want: 9223372036854775808},
+ test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 1, want: 9223372036854775807},
+ test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 1, want: 9223372036854775809},
+ test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 4294967296, want: 9223372032559808512},
+ test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775809},
+ test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 0, want: 1},
+ test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 1, want: 18446744073709551614},
+ test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 1, want: 2},
+ test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 4294967296, want: 18446744069414584319},
+ test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 4294967296, want: 4294967297},
+ test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 18446744073709551615, want: 0},
+ test_uint64{fn: div_0_uint64, fnname: "div_0_uint64", in: 1, want: 0},
+ test_uint64{fn: div_0_uint64, fnname: "div_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: div_0_uint64, fnname: "div_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: div_0_uint64, fnname: "div_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 0, want: 0},
+ test_uint64{fn: div_1_uint64, fnname: "div_1_uint64", in: 1, want: 1},
+ test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 1, want: 1},
+ test_uint64{fn: div_1_uint64, fnname: "div_1_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 4294967296, want: 4294967296},
+ test_uint64{fn: div_1_uint64, fnname: "div_1_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: div_1_uint64, fnname: "div_1_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: div_4294967296_uint64, fnname: "div_4294967296_uint64", in: 1, want: 4294967296},
+ test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 1, want: 0},
+ test_uint64{fn: div_4294967296_uint64, fnname: "div_4294967296_uint64", in: 4294967296, want: 1},
+ test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 4294967296, want: 1},
+ test_uint64{fn: div_4294967296_uint64, fnname: "div_4294967296_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 9223372036854775808, want: 2147483648},
+ test_uint64{fn: div_4294967296_uint64, fnname: "div_4294967296_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 18446744073709551615, want: 4294967295},
+ test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: div_9223372036854775808_uint64, fnname: "div_9223372036854775808_uint64", in: 1, want: 9223372036854775808},
+ test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 1, want: 0},
+ test_uint64{fn: div_9223372036854775808_uint64, fnname: "div_9223372036854775808_uint64", in: 4294967296, want: 2147483648},
+ test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 4294967296, want: 0},
+ test_uint64{fn: div_9223372036854775808_uint64, fnname: "div_9223372036854775808_uint64", in: 9223372036854775808, want: 1},
+ test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 9223372036854775808, want: 1},
+ test_uint64{fn: div_9223372036854775808_uint64, fnname: "div_9223372036854775808_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 18446744073709551615, want: 1},
+ test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: div_18446744073709551615_uint64, fnname: "div_18446744073709551615_uint64", in: 1, want: 18446744073709551615},
+ test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 1, want: 0},
+ test_uint64{fn: div_18446744073709551615_uint64, fnname: "div_18446744073709551615_uint64", in: 4294967296, want: 4294967295},
+ test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 4294967296, want: 0},
+ test_uint64{fn: div_18446744073709551615_uint64, fnname: "div_18446744073709551615_uint64", in: 9223372036854775808, want: 1},
+ test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 9223372036854775808, want: 0},
+ test_uint64{fn: div_18446744073709551615_uint64, fnname: "div_18446744073709551615_uint64", in: 18446744073709551615, want: 1},
+ test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 18446744073709551615, want: 1},
+ test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 0, want: 0},
+ test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 0, want: 0},
+ test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 1, want: 0},
+ test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 1, want: 0},
+ test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 4294967296, want: 0},
+ test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 0, want: 0},
+ test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 0, want: 0},
+ test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 1, want: 1},
+ test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 1, want: 1},
+ test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 4294967296, want: 4294967296},
+ test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 0, want: 0},
+ test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 1, want: 4294967296},
+ test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 1, want: 4294967296},
+ test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 18446744073709551615, want: 18446744069414584320},
+ test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 18446744073709551615, want: 18446744069414584320},
+ test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 0, want: 0},
+ test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 1, want: 9223372036854775808},
+ test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 1, want: 9223372036854775808},
+ test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 4294967296, want: 0},
+ test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775808},
+ test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775808},
+ test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 0, want: 0},
+ test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 1, want: 18446744073709551615},
+ test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 1, want: 18446744073709551615},
+ test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 4294967296, want: 18446744069414584320},
+ test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 4294967296, want: 18446744069414584320},
+ test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 18446744073709551615, want: 1},
+ test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 18446744073709551615, want: 1},
+ test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 0, want: 0},
+ test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 0, want: 0},
+ test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 1, want: 0},
+ test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 1, want: 1},
+ test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 0, want: 1},
+ test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 0, want: 0},
+ test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 1, want: 2},
+ test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 1, want: 2},
+ test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 4294967296, want: 8589934592},
+ test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 1, want: 8589934592},
+ test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 1, want: 0},
+ test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 1, want: 0},
+ test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 1, want: 0},
+ test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 1, want: 18446744073709551614},
+ test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 1, want: 0},
+ test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 0, want: 0},
+ test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 0, want: 0},
+ test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 1, want: 0},
+ test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 1, want: 1},
+ test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 0, want: 1},
+ test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 0, want: 0},
+ test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 1, want: 0},
+ test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 1, want: 0},
+ test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 4294967296, want: 2147483648},
+ test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 9223372036854775808, want: 4611686018427387904},
+ test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 1, want: 2147483648},
+ test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 1, want: 0},
+ test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 1, want: 4611686018427387904},
+ test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 1, want: 0},
+ test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 1, want: 9223372036854775807},
+ test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 1, want: 0},
+ test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mod_0_uint64, fnname: "mod_0_uint64", in: 1, want: 0},
+ test_uint64{fn: mod_0_uint64, fnname: "mod_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mod_0_uint64, fnname: "mod_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mod_0_uint64, fnname: "mod_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 0, want: 0},
+ test_uint64{fn: mod_1_uint64, fnname: "mod_1_uint64", in: 1, want: 0},
+ test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 1, want: 0},
+ test_uint64{fn: mod_1_uint64, fnname: "mod_1_uint64", in: 4294967296, want: 1},
+ test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 4294967296, want: 0},
+ test_uint64{fn: mod_1_uint64, fnname: "mod_1_uint64", in: 9223372036854775808, want: 1},
+ test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mod_1_uint64, fnname: "mod_1_uint64", in: 18446744073709551615, want: 1},
+ test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: mod_4294967296_uint64, fnname: "mod_4294967296_uint64", in: 1, want: 0},
+ test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 1, want: 1},
+ test_uint64{fn: mod_4294967296_uint64, fnname: "mod_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: mod_4294967296_uint64, fnname: "mod_4294967296_uint64", in: 9223372036854775808, want: 4294967296},
+ test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mod_4294967296_uint64, fnname: "mod_4294967296_uint64", in: 18446744073709551615, want: 4294967296},
+ test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 18446744073709551615, want: 4294967295},
+ test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: mod_9223372036854775808_uint64, fnname: "mod_9223372036854775808_uint64", in: 1, want: 0},
+ test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 1, want: 1},
+ test_uint64{fn: mod_9223372036854775808_uint64, fnname: "mod_9223372036854775808_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 4294967296, want: 4294967296},
+ test_uint64{fn: mod_9223372036854775808_uint64, fnname: "mod_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mod_9223372036854775808_uint64, fnname: "mod_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775808},
+ test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: mod_18446744073709551615_uint64, fnname: "mod_18446744073709551615_uint64", in: 1, want: 0},
+ test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 1, want: 1},
+ test_uint64{fn: mod_18446744073709551615_uint64, fnname: "mod_18446744073709551615_uint64", in: 4294967296, want: 4294967295},
+ test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 4294967296, want: 4294967296},
+ test_uint64{fn: mod_18446744073709551615_uint64, fnname: "mod_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: mod_18446744073709551615_uint64, fnname: "mod_18446744073709551615_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 18446744073709551615, want: 0},
+ test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 0, want: 0},
+ test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 0, want: 0},
+ test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 1, want: 0},
+ test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 1, want: 0},
+ test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 4294967296, want: 0},
+ test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 18446744073709551615, want: 0},
+ test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 0, want: 0},
+ test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 0, want: 0},
+ test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 1, want: 1},
+ test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 1, want: 1},
+ test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 4294967296, want: 0},
+ test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 18446744073709551615, want: 1},
+ test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 18446744073709551615, want: 1},
+ test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 0, want: 0},
+ test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 1, want: 0},
+ test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 1, want: 0},
+ test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 4294967296, want: 4294967296},
+ test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 18446744073709551615, want: 4294967296},
+ test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 18446744073709551615, want: 4294967296},
+ test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 0, want: 0},
+ test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 1, want: 0},
+ test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 1, want: 0},
+ test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 4294967296, want: 0},
+ test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775808},
+ test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775808},
+ test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 0, want: 0},
+ test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 1, want: 1},
+ test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 1, want: 1},
+ test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 4294967296, want: 4294967296},
+ test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 0, want: 0},
+ test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 0, want: 0},
+ test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 1, want: 1},
+ test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 1, want: 1},
+ test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 0, want: 1},
+ test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 0, want: 1},
+ test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 1, want: 1},
+ test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 1, want: 1},
+ test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 4294967296, want: 4294967297},
+ test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 4294967296, want: 4294967297},
+ test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 0, want: 4294967296},
+ test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 1, want: 4294967297},
+ test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 1, want: 4294967297},
+ test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 4294967296, want: 4294967296},
+ test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 0, want: 9223372036854775808},
+ test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 1, want: 9223372036854775809},
+ test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 1, want: 9223372036854775809},
+ test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 0, want: 18446744073709551615},
+ test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 1, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 1, want: 18446744073709551615},
+ test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 4294967296, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 4294967296, want: 18446744073709551615},
+ test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 9223372036854775808, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 9223372036854775808, want: 18446744073709551615},
+ test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 0, want: 0},
+ test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 0, want: 0},
+ test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 1, want: 1},
+ test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 1, want: 1},
+ test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 0, want: 1},
+ test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 0, want: 1},
+ test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 1, want: 0},
+ test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 1, want: 0},
+ test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 4294967296, want: 4294967297},
+ test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 4294967296, want: 4294967297},
+ test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 0, want: 4294967296},
+ test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 1, want: 4294967297},
+ test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 1, want: 4294967297},
+ test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 18446744073709551615, want: 18446744069414584319},
+ test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 18446744073709551615, want: 18446744069414584319},
+ test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 0, want: 9223372036854775808},
+ test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 1, want: 9223372036854775809},
+ test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 1, want: 9223372036854775809},
+ test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 0, want: 18446744073709551615},
+ test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 1, want: 18446744073709551614},
+ test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 1, want: 18446744073709551614},
+ test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 4294967296, want: 18446744069414584319},
+ test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 4294967296, want: 18446744069414584319},
+ test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 18446744073709551615, want: 0}}
+
+type test_uint64mul struct {
+ fn func(uint64) uint64
+ fnname string
+ in uint64
+ want uint64
+}
+
+var tests_uint64mul = []test_uint64{
+
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 3, want: 9},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 3, want: 9},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 5, want: 15},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 5, want: 15},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 7, want: 21},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 7, want: 21},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 9, want: 27},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 9, want: 27},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 10, want: 30},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 10, want: 30},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 11, want: 33},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 11, want: 33},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 13, want: 39},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 13, want: 39},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 19, want: 57},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 19, want: 57},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 21, want: 63},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 21, want: 63},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 25, want: 75},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 25, want: 75},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 27, want: 81},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 27, want: 81},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 37, want: 111},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 37, want: 111},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 41, want: 123},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 41, want: 123},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 45, want: 135},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 45, want: 135},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 73, want: 219},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 73, want: 219},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 81, want: 243},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 81, want: 243},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 3, want: 15},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 3, want: 15},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 5, want: 25},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 5, want: 25},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 7, want: 35},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 7, want: 35},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 9, want: 45},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 9, want: 45},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 10, want: 50},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 10, want: 50},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 11, want: 55},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 11, want: 55},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 13, want: 65},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 13, want: 65},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 19, want: 95},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 19, want: 95},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 21, want: 105},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 21, want: 105},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 25, want: 125},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 25, want: 125},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 27, want: 135},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 27, want: 135},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 37, want: 185},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 37, want: 185},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 41, want: 205},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 41, want: 205},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 45, want: 225},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 45, want: 225},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 73, want: 365},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 73, want: 365},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 81, want: 405},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 81, want: 405},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 3, want: 21},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 3, want: 21},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 5, want: 35},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 5, want: 35},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 7, want: 49},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 7, want: 49},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 9, want: 63},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 9, want: 63},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 10, want: 70},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 10, want: 70},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 11, want: 77},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 11, want: 77},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 13, want: 91},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 13, want: 91},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 19, want: 133},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 19, want: 133},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 21, want: 147},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 21, want: 147},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 25, want: 175},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 25, want: 175},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 27, want: 189},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 27, want: 189},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 37, want: 259},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 37, want: 259},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 41, want: 287},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 41, want: 287},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 45, want: 315},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 45, want: 315},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 73, want: 511},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 73, want: 511},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 81, want: 567},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 81, want: 567},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 3, want: 27},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 3, want: 27},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 5, want: 45},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 5, want: 45},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 7, want: 63},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 7, want: 63},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 9, want: 81},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 9, want: 81},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 10, want: 90},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 10, want: 90},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 11, want: 99},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 11, want: 99},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 13, want: 117},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 13, want: 117},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 19, want: 171},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 19, want: 171},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 21, want: 189},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 21, want: 189},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 25, want: 225},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 25, want: 225},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 27, want: 243},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 27, want: 243},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 37, want: 333},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 37, want: 333},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 41, want: 369},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 41, want: 369},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 45, want: 405},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 45, want: 405},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 73, want: 657},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 73, want: 657},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 81, want: 729},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 81, want: 729},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 3, want: 30},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 3, want: 30},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 5, want: 50},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 5, want: 50},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 7, want: 70},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 7, want: 70},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 9, want: 90},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 9, want: 90},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 10, want: 100},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 10, want: 100},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 11, want: 110},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 11, want: 110},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 13, want: 130},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 13, want: 130},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 19, want: 190},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 19, want: 190},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 21, want: 210},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 21, want: 210},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 25, want: 250},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 25, want: 250},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 27, want: 270},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 27, want: 270},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 37, want: 370},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 37, want: 370},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 41, want: 410},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 41, want: 410},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 45, want: 450},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 45, want: 450},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 73, want: 730},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 73, want: 730},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 81, want: 810},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 81, want: 810},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 3, want: 33},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 3, want: 33},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 5, want: 55},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 5, want: 55},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 7, want: 77},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 7, want: 77},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 9, want: 99},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 9, want: 99},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 10, want: 110},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 10, want: 110},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 11, want: 121},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 11, want: 121},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 13, want: 143},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 13, want: 143},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 19, want: 209},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 19, want: 209},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 21, want: 231},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 21, want: 231},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 25, want: 275},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 25, want: 275},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 27, want: 297},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 27, want: 297},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 37, want: 407},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 37, want: 407},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 41, want: 451},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 41, want: 451},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 45, want: 495},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 45, want: 495},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 73, want: 803},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 73, want: 803},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 81, want: 891},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 81, want: 891},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 3, want: 39},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 3, want: 39},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 5, want: 65},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 5, want: 65},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 7, want: 91},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 7, want: 91},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 9, want: 117},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 9, want: 117},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 10, want: 130},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 10, want: 130},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 11, want: 143},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 11, want: 143},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 13, want: 169},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 13, want: 169},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 19, want: 247},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 19, want: 247},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 21, want: 273},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 21, want: 273},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 25, want: 325},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 25, want: 325},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 27, want: 351},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 27, want: 351},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 37, want: 481},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 37, want: 481},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 41, want: 533},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 41, want: 533},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 45, want: 585},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 45, want: 585},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 73, want: 949},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 73, want: 949},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 81, want: 1053},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 81, want: 1053},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 3, want: 57},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 3, want: 57},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 5, want: 95},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 5, want: 95},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 7, want: 133},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 7, want: 133},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 9, want: 171},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 9, want: 171},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 10, want: 190},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 10, want: 190},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 11, want: 209},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 11, want: 209},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 13, want: 247},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 13, want: 247},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 19, want: 361},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 19, want: 361},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 21, want: 399},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 21, want: 399},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 25, want: 475},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 25, want: 475},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 27, want: 513},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 27, want: 513},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 37, want: 703},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 37, want: 703},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 41, want: 779},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 41, want: 779},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 45, want: 855},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 45, want: 855},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 73, want: 1387},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 73, want: 1387},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 81, want: 1539},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 81, want: 1539},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 3, want: 63},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 3, want: 63},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 5, want: 105},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 5, want: 105},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 7, want: 147},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 7, want: 147},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 9, want: 189},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 9, want: 189},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 10, want: 210},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 10, want: 210},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 11, want: 231},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 11, want: 231},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 13, want: 273},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 13, want: 273},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 19, want: 399},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 19, want: 399},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 21, want: 441},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 21, want: 441},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 25, want: 525},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 25, want: 525},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 27, want: 567},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 27, want: 567},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 37, want: 777},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 37, want: 777},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 41, want: 861},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 41, want: 861},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 45, want: 945},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 45, want: 945},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 73, want: 1533},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 73, want: 1533},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 81, want: 1701},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 81, want: 1701},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 3, want: 75},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 3, want: 75},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 5, want: 125},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 5, want: 125},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 7, want: 175},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 7, want: 175},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 9, want: 225},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 9, want: 225},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 10, want: 250},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 10, want: 250},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 11, want: 275},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 11, want: 275},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 13, want: 325},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 13, want: 325},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 19, want: 475},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 19, want: 475},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 21, want: 525},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 21, want: 525},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 25, want: 625},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 25, want: 625},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 27, want: 675},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 27, want: 675},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 37, want: 925},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 37, want: 925},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 41, want: 1025},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 41, want: 1025},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 45, want: 1125},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 45, want: 1125},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 73, want: 1825},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 73, want: 1825},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 81, want: 2025},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 81, want: 2025},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 3, want: 81},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 3, want: 81},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 5, want: 135},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 5, want: 135},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 7, want: 189},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 7, want: 189},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 9, want: 243},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 9, want: 243},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 10, want: 270},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 10, want: 270},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 11, want: 297},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 11, want: 297},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 13, want: 351},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 13, want: 351},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 19, want: 513},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 19, want: 513},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 21, want: 567},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 21, want: 567},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 25, want: 675},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 25, want: 675},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 27, want: 729},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 27, want: 729},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 37, want: 999},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 37, want: 999},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 41, want: 1107},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 41, want: 1107},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 45, want: 1215},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 45, want: 1215},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 73, want: 1971},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 73, want: 1971},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 81, want: 2187},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 81, want: 2187},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 3, want: 111},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 3, want: 111},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 5, want: 185},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 5, want: 185},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 7, want: 259},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 7, want: 259},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 9, want: 333},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 9, want: 333},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 10, want: 370},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 10, want: 370},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 11, want: 407},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 11, want: 407},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 13, want: 481},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 13, want: 481},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 19, want: 703},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 19, want: 703},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 21, want: 777},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 21, want: 777},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 25, want: 925},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 25, want: 925},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 27, want: 999},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 27, want: 999},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 37, want: 1369},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 37, want: 1369},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 41, want: 1517},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 41, want: 1517},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 45, want: 1665},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 45, want: 1665},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 73, want: 2701},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 73, want: 2701},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 81, want: 2997},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 81, want: 2997},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 3, want: 123},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 3, want: 123},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 5, want: 205},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 5, want: 205},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 7, want: 287},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 7, want: 287},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 9, want: 369},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 9, want: 369},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 10, want: 410},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 10, want: 410},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 11, want: 451},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 11, want: 451},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 13, want: 533},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 13, want: 533},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 19, want: 779},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 19, want: 779},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 21, want: 861},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 21, want: 861},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 25, want: 1025},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 25, want: 1025},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 27, want: 1107},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 27, want: 1107},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 37, want: 1517},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 37, want: 1517},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 41, want: 1681},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 41, want: 1681},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 45, want: 1845},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 45, want: 1845},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 73, want: 2993},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 73, want: 2993},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 81, want: 3321},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 81, want: 3321},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 3, want: 135},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 3, want: 135},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 5, want: 225},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 5, want: 225},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 7, want: 315},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 7, want: 315},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 9, want: 405},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 9, want: 405},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 10, want: 450},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 10, want: 450},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 11, want: 495},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 11, want: 495},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 13, want: 585},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 13, want: 585},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 19, want: 855},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 19, want: 855},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 21, want: 945},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 21, want: 945},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 25, want: 1125},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 25, want: 1125},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 27, want: 1215},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 27, want: 1215},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 37, want: 1665},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 37, want: 1665},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 41, want: 1845},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 41, want: 1845},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 45, want: 2025},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 45, want: 2025},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 73, want: 3285},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 73, want: 3285},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 81, want: 3645},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 81, want: 3645},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 3, want: 219},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 3, want: 219},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 5, want: 365},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 5, want: 365},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 7, want: 511},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 7, want: 511},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 9, want: 657},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 9, want: 657},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 10, want: 730},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 10, want: 730},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 11, want: 803},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 11, want: 803},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 13, want: 949},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 13, want: 949},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 19, want: 1387},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 19, want: 1387},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 21, want: 1533},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 21, want: 1533},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 25, want: 1825},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 25, want: 1825},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 27, want: 1971},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 27, want: 1971},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 37, want: 2701},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 37, want: 2701},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 41, want: 2993},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 41, want: 2993},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 45, want: 3285},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 45, want: 3285},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 73, want: 5329},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 73, want: 5329},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 81, want: 5913},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 81, want: 5913},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 3, want: 243},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 3, want: 243},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 5, want: 405},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 5, want: 405},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 7, want: 567},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 7, want: 567},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 9, want: 729},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 9, want: 729},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 10, want: 810},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 10, want: 810},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 11, want: 891},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 11, want: 891},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 13, want: 1053},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 13, want: 1053},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 19, want: 1539},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 19, want: 1539},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 21, want: 1701},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 21, want: 1701},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 25, want: 2025},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 25, want: 2025},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 27, want: 2187},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 27, want: 2187},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 37, want: 2997},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 37, want: 2997},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 41, want: 3321},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 41, want: 3321},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 45, want: 3645},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 45, want: 3645},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 73, want: 5913},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 73, want: 5913},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 81, want: 6561},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 81, want: 6561}}
+
+type test_int64 struct {
+ fn func(int64) int64
+ fnname string
+ in int64
+ want int64
+}
+
+var tests_int64 = []test_int64{
+
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: -9223372036854775807, want: 1},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: -1, want: 9223372036854775807},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 0, want: -9223372036854775808},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 0, want: -9223372036854775808},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 1, want: -9223372036854775807},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 9223372036854775806, want: -2},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 9223372036854775806, want: -2},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 9223372036854775807, want: -1},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: -9223372036854775808, want: 1},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: -9223372036854775808, want: 1},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: -9223372036854775807, want: 2},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: -9223372036854775807, want: 2},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: -4294967296, want: 9223372032559808513},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: -4294967296, want: 9223372032559808513},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: -1, want: -9223372036854775808},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 0, want: -9223372036854775807},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 0, want: -9223372036854775807},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 1, want: -9223372036854775806},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 1, want: -9223372036854775806},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 9223372036854775806, want: -1},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 9223372036854775807, want: 0},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: -9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: -9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: -4294967296, want: -8589934592},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: -4294967296, want: -8589934592},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: -1, want: -4294967297},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: -1, want: -4294967297},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 0, want: -4294967296},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 0, want: -4294967296},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 1, want: -4294967295},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 1, want: -4294967295},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 4294967296, want: 0},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 9223372036854775806, want: 9223372032559808510},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 9223372036854775806, want: 9223372032559808510},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: -4294967296, want: -4294967297},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: -4294967296, want: -4294967297},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: -1, want: -2},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: -1, want: -2},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 0, want: -1},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 0, want: -1},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 1, want: 0},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 1, want: 0},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 4294967296, want: 4294967295},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 4294967296, want: 4294967295},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 9223372036854775806, want: 9223372036854775805},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 9223372036854775806, want: 9223372036854775805},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: -4294967296, want: -4294967296},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: -1, want: -1},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: -1, want: -1},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 0, want: 0},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 0, want: 0},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 1, want: 1},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 1, want: 1},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 4294967296, want: 4294967296},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: -9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: -9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: -4294967296, want: -4294967295},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: -4294967296, want: -4294967295},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: -1, want: 0},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: -1, want: 0},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 0, want: 1},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 0, want: 1},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 1, want: 2},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 1, want: 2},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 4294967296, want: 4294967297},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 4294967296, want: 4294967297},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: -4294967296, want: 0},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: -1, want: 4294967295},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: -1, want: 4294967295},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 0, want: 4294967296},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 0, want: 4294967296},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 1, want: 4294967297},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 1, want: 4294967297},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 4294967296, want: 8589934592},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 4294967296, want: 8589934592},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 9223372036854775806, want: -9223372032559808514},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 9223372036854775806, want: -9223372032559808514},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: -9223372036854775808, want: -2},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: -9223372036854775808, want: -2},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: -9223372036854775807, want: -1},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: -4294967296, want: 9223372032559808510},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: -4294967296, want: 9223372032559808510},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: -1, want: 9223372036854775805},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: -1, want: 9223372036854775805},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 0, want: 9223372036854775806},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 0, want: 9223372036854775806},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 1, want: 9223372036854775807},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 4294967296, want: -9223372032559808514},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 4294967296, want: -9223372032559808514},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 9223372036854775806, want: -4},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 9223372036854775806, want: -4},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 9223372036854775807, want: -3},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 9223372036854775807, want: -3},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: -9223372036854775807, want: 0},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: -4294967296, want: 9223372032559808511},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: -4294967296, want: 9223372032559808511},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: -1, want: 9223372036854775806},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: -1, want: 9223372036854775806},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 0, want: 9223372036854775807},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 0, want: 9223372036854775807},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 1, want: -9223372036854775808},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 1, want: -9223372036854775808},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 4294967296, want: -9223372032559808513},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 4294967296, want: -9223372032559808513},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 9223372036854775806, want: -3},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 9223372036854775806, want: -3},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 9223372036854775807, want: -2},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 9223372036854775807, want: -2},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: -9223372036854775807, want: 1},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: -4294967296, want: -9223372032559808512},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: -1, want: -9223372036854775807},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: -1, want: 9223372036854775807},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 0, want: -9223372036854775808},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 0, want: -9223372036854775808},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 1, want: -9223372036854775807},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 4294967296, want: 9223372032559808512},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 9223372036854775806, want: 2},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 9223372036854775806, want: -2},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 9223372036854775807, want: -1},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: -9223372036854775808, want: 1},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: -9223372036854775807, want: 0},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: -4294967296, want: -9223372032559808511},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: -4294967296, want: 9223372032559808511},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: -1, want: -9223372036854775806},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: -1, want: 9223372036854775806},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 0, want: -9223372036854775807},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 0, want: 9223372036854775807},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 1, want: -9223372036854775808},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 1, want: -9223372036854775808},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 4294967296, want: 9223372032559808513},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 4294967296, want: -9223372032559808513},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 9223372036854775806, want: 3},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 9223372036854775806, want: -3},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 9223372036854775807, want: 2},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 9223372036854775807, want: -2},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: -9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: -4294967296, want: 0},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: -1, want: -4294967295},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: -1, want: 4294967295},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 0, want: -4294967296},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 0, want: 4294967296},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 1, want: -4294967297},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 1, want: 4294967297},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 4294967296, want: -8589934592},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 4294967296, want: 8589934592},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 9223372036854775806, want: 9223372032559808514},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 9223372036854775806, want: -9223372032559808514},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: -9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: -4294967296, want: 4294967295},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: -4294967296, want: -4294967295},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: -1, want: 0},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: -1, want: 0},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 0, want: -1},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 0, want: 1},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 1, want: -2},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 1, want: 2},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 4294967296, want: -4294967297},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 4294967296, want: 4294967297},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 9223372036854775806, want: -9223372036854775807},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: -9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: -4294967296, want: 4294967296},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: -4294967296, want: -4294967296},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: -1, want: 1},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: -1, want: -1},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 0, want: 0},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 0, want: 0},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 1, want: -1},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 1, want: 1},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 4294967296, want: -4294967296},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 4294967296, want: 4294967296},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: -4294967296, want: 4294967297},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: -4294967296, want: -4294967297},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: -1, want: 2},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: -1, want: -2},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 0, want: 1},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 0, want: -1},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 1, want: 0},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 1, want: 0},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 4294967296, want: -4294967295},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 4294967296, want: 4294967295},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 9223372036854775806, want: -9223372036854775805},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 9223372036854775806, want: 9223372036854775805},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: -9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: -9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: -4294967296, want: 8589934592},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: -4294967296, want: -8589934592},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: -1, want: 4294967297},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: -1, want: -4294967297},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 0, want: 4294967296},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 0, want: -4294967296},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 1, want: 4294967295},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 1, want: -4294967295},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 4294967296, want: 0},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 9223372036854775806, want: -9223372032559808510},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 9223372036854775806, want: 9223372032559808510},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: -9223372036854775808, want: -2},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: -9223372036854775808, want: 2},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: -9223372036854775807, want: -3},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: -9223372036854775807, want: 3},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: -4294967296, want: -9223372032559808514},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: -4294967296, want: 9223372032559808514},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: -1, want: -9223372036854775807},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 0, want: 9223372036854775806},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 0, want: -9223372036854775806},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 1, want: 9223372036854775805},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 1, want: -9223372036854775805},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 4294967296, want: 9223372032559808510},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 4294967296, want: -9223372032559808510},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 9223372036854775806, want: 0},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 9223372036854775807, want: 1},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: -9223372036854775808, want: 1},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: -9223372036854775807, want: -2},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: -9223372036854775807, want: 2},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: -4294967296, want: -9223372032559808513},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: -4294967296, want: 9223372032559808513},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: -1, want: -9223372036854775808},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 0, want: 9223372036854775807},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 0, want: -9223372036854775807},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 1, want: 9223372036854775806},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 1, want: -9223372036854775806},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 4294967296, want: 9223372032559808511},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 9223372036854775806, want: -1},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: -9223372036854775808, want: 1},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: -9223372036854775808, want: 1},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: -4294967296, want: 2147483648},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: -4294967296, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: -1, want: 0},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 0, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: 1, want: -9223372036854775808},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 1, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: 4294967296, want: -2147483648},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 4294967296, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: -9223372036854775808, want: 1},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: -9223372036854775807, want: 1},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: -4294967296, want: 2147483647},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: -4294967296, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: -1, want: 0},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 0, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 1, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: 4294967296, want: -2147483647},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 4294967296, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 9223372036854775807, want: -1},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: -9223372036854775808, want: 2147483648},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: -9223372036854775807, want: 2147483647},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: -4294967296, want: 1},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: -4294967296, want: 1},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: -1, want: 4294967296},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: -1, want: 0},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 0, want: 0},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: 1, want: -4294967296},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 1, want: 0},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: 4294967296, want: -1},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 4294967296, want: -1},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 9223372036854775806, want: -2147483647},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 9223372036854775807, want: -2147483647},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: -9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: -4294967296, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: -4294967296, want: 4294967296},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: -1, want: 1},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: -1, want: 1},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 0, want: 0},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: 1, want: -1},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 1, want: -1},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: 4294967296, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 4294967296, want: -4294967296},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: -4294967296, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: -1, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: 1, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: 4294967296, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: -4294967296, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: -4294967296, want: -4294967296},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: -1, want: -1},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: -1, want: -1},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 0, want: 0},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: 1, want: 1},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 1, want: 1},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: 4294967296, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 4294967296, want: 4294967296},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: -9223372036854775808, want: -2147483648},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: -9223372036854775807, want: -2147483647},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: -4294967296, want: -1},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: -4294967296, want: -1},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: -1, want: -4294967296},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: -1, want: 0},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 0, want: 0},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: 1, want: 4294967296},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 1, want: 0},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: 4294967296, want: 1},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 4294967296, want: 1},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 9223372036854775806, want: 2147483647},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 9223372036854775807, want: 2147483647},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: -9223372036854775808, want: -1},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: -9223372036854775807, want: -1},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: -4294967296, want: -2147483647},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: -4294967296, want: 0},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: -1, want: -9223372036854775806},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: -1, want: 0},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 0, want: 0},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: 1, want: 9223372036854775806},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 1, want: 0},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: 4294967296, want: 2147483647},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 4294967296, want: 0},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 9223372036854775806, want: 1},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 9223372036854775807, want: 1},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: -9223372036854775807, want: -1},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: -4294967296, want: -2147483647},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: -4294967296, want: 0},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: -1, want: -9223372036854775807},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: -1, want: 0},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 0, want: 0},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 1, want: 0},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: 4294967296, want: 2147483647},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 4294967296, want: 0},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 9223372036854775807, want: 1},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: -4294967296, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: -4294967296, want: 0},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: -1, want: -9223372036854775808},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 0, want: 0},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 1, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 1, want: -9223372036854775808},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 4294967296, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 4294967296, want: 0},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 9223372036854775806, want: 0},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: -9223372036854775807, want: 1},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: -4294967296, want: -4294967296},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: -1, want: 9223372036854775807},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 0, want: 0},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 1, want: -9223372036854775807},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 4294967296, want: 4294967296},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 9223372036854775807, want: -1},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: -9223372036854775807, want: -4294967296},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: -9223372036854775807, want: -4294967296},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: -4294967296, want: 0},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: -1, want: 4294967296},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: -1, want: 4294967296},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 0, want: 0},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 1, want: -4294967296},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 1, want: -4294967296},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 4294967296, want: 0},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 9223372036854775806, want: 8589934592},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 9223372036854775806, want: 8589934592},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 9223372036854775807, want: 4294967296},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 9223372036854775807, want: 4294967296},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: -9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: -9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: -4294967296, want: 4294967296},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: -4294967296, want: 4294967296},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: -1, want: 1},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: -1, want: 1},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 0, want: 0},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 1, want: -1},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 1, want: -1},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 4294967296, want: -4294967296},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 4294967296, want: -4294967296},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: -9223372036854775807, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: -4294967296, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: -4294967296, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: -1, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: -1, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 0, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 1, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 1, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 4294967296, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 4294967296, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 9223372036854775806, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 9223372036854775807, want: 0},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: -4294967296, want: -4294967296},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: -1, want: -1},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: -1, want: -1},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 0, want: 0},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 1, want: 1},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 1, want: 1},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 4294967296, want: 4294967296},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: -9223372036854775807, want: 4294967296},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: -9223372036854775807, want: 4294967296},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: -4294967296, want: 0},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: -1, want: -4294967296},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: -1, want: -4294967296},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 0, want: 0},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 1, want: 4294967296},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 1, want: 4294967296},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 4294967296, want: 0},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 9223372036854775806, want: -8589934592},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 9223372036854775806, want: -8589934592},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 9223372036854775807, want: -4294967296},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 9223372036854775807, want: -4294967296},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: -4294967296, want: 8589934592},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: -4294967296, want: 8589934592},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: -1, want: -9223372036854775806},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: -1, want: -9223372036854775806},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 0, want: 0},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 1, want: 9223372036854775806},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 1, want: 9223372036854775806},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 4294967296, want: -8589934592},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 4294967296, want: -8589934592},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 9223372036854775806, want: 4},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 9223372036854775806, want: 4},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: -9223372036854775807, want: -1},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: -4294967296, want: 4294967296},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: -4294967296, want: 4294967296},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: -1, want: -9223372036854775807},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: -1, want: -9223372036854775807},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 0, want: 0},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 1, want: 9223372036854775807},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 4294967296, want: -4294967296},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 4294967296, want: -4294967296},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 9223372036854775807, want: 1},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: -4294967296, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: -4294967296, want: -4294967296},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: -1, want: -1},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 0, want: 0},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 1, want: 1},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: 4294967296, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 4294967296, want: 4294967296},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: 9223372036854775806, want: -2},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: -4294967296, want: -4294967295},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: -4294967296, want: -4294967296},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: -1, want: -1},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 0, want: 0},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 1, want: 1},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: 4294967296, want: -4294967295},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 4294967296, want: 4294967296},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: -9223372036854775808, want: -4294967296},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: -9223372036854775807, want: -4294967296},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: -9223372036854775807, want: -4294967295},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: -4294967296, want: 0},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: -1, want: -1},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 0, want: 0},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 1, want: 1},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 4294967296, want: 0},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: 9223372036854775806, want: -4294967296},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 9223372036854775806, want: 4294967294},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: 9223372036854775807, want: -4294967296},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 9223372036854775807, want: 4294967295},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: -4294967296, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: -4294967296, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: -1, want: 0},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 0, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 1, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: 4294967296, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 4294967296, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 9223372036854775806, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: -4294967296, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: -1, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: 1, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: 4294967296, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: -9223372036854775808, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: -4294967296, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: -4294967296, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: -1, want: 0},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 0, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 1, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: 4294967296, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 4294967296, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 9223372036854775806, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: -9223372036854775808, want: 4294967296},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: -9223372036854775807, want: 4294967296},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: -9223372036854775807, want: -4294967295},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: -4294967296, want: 0},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: -1, want: -1},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 0, want: 0},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 1, want: 1},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 4294967296, want: 0},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: 9223372036854775806, want: 4294967296},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 9223372036854775806, want: 4294967294},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: 9223372036854775807, want: 4294967296},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 9223372036854775807, want: 4294967295},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: -9223372036854775808, want: 9223372036854775806},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: -9223372036854775808, want: -2},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: -9223372036854775807, want: -1},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: -4294967296, want: 4294967294},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: -4294967296, want: -4294967296},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: -1, want: -1},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 0, want: 0},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 1, want: 1},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: 4294967296, want: 4294967294},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 4294967296, want: 4294967296},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 9223372036854775806, want: 0},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 9223372036854775807, want: 1},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: -4294967296, want: 4294967295},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: -4294967296, want: -4294967296},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: -1, want: -1},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 0, want: 0},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 1, want: 1},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: 4294967296, want: 4294967295},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 4294967296, want: 4294967296},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 9223372036854775807, want: 0},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: -4294967296, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: -4294967296, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: -1, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 0, want: 0},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 1, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 1, want: 0},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 4294967296, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 4294967296, want: 0},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 9223372036854775807, want: 0},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: -4294967296, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: -4294967296, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: -1, want: -9223372036854775807},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: -1, want: -9223372036854775807},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 0, want: 0},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 1, want: 1},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 1, want: 1},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 4294967296, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 4294967296, want: 0},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 9223372036854775807, want: 1},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: -4294967296, want: -4294967296},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: -1, want: -4294967296},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: -1, want: -4294967296},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 0, want: 0},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 1, want: 0},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 1, want: 0},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 9223372036854775806, want: 9223372032559808512},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 9223372036854775806, want: 9223372032559808512},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 9223372036854775807, want: 9223372032559808512},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 9223372036854775807, want: 9223372032559808512},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: -4294967296, want: -4294967296},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: -1, want: -1},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: -1, want: -1},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 0, want: 0},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 1, want: 1},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 1, want: 1},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: -4294967296, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: -4294967296, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: -1, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: -1, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 0, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 1, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 1, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 4294967296, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 4294967296, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 9223372036854775807, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: -9223372036854775807, want: 1},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: -4294967296, want: 0},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: -4294967296, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: -1, want: 1},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: -1, want: 1},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 0, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 1, want: 1},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 1, want: 1},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 4294967296, want: 0},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 4294967296, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 9223372036854775807, want: 1},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: -4294967296, want: 4294967296},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: -4294967296, want: 4294967296},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: -1, want: 4294967296},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: -1, want: 4294967296},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 0, want: 0},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 1, want: 0},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 1, want: 0},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 9223372036854775806, want: 4294967296},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 9223372036854775806, want: 4294967296},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 9223372036854775807, want: 4294967296},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 9223372036854775807, want: 4294967296},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: -1, want: 9223372036854775806},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: -1, want: 9223372036854775806},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 0, want: 0},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 1, want: 0},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 1, want: 0},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: -9223372036854775807, want: 1},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: -1, want: 9223372036854775807},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 0, want: 0},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 1, want: 1},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 1, want: 1},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: -1, want: -1},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 0, want: -9223372036854775808},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 0, want: -9223372036854775808},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 1, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 9223372036854775806, want: -2},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 9223372036854775806, want: -2},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: -4294967296, want: -4294967295},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: -4294967296, want: -4294967295},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: -1, want: -1},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 0, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 0, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 1, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 9223372036854775806, want: -1},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: -9223372036854775808, want: -4294967296},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: -9223372036854775808, want: -4294967296},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: -9223372036854775807, want: -4294967295},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: -9223372036854775807, want: -4294967295},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: -1, want: -1},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 0, want: -4294967296},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 0, want: -4294967296},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 1, want: -4294967295},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 1, want: -4294967295},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 4294967296, want: -4294967296},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 4294967296, want: -4294967296},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 9223372036854775806, want: -2},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 9223372036854775806, want: -2},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: -9223372036854775808, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: -4294967296, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: -4294967296, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: -1, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 0, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 0, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 1, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 1, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 4294967296, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 4294967296, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 9223372036854775806, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: -1, want: -1},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 0, want: 0},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 0, want: 0},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 1, want: 1},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 1, want: 1},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 4294967296, want: 4294967296},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: -4294967296, want: -4294967295},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: -4294967296, want: -4294967295},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: -1, want: -1},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 0, want: 1},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 0, want: 1},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 1, want: 1},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 1, want: 1},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 4294967296, want: 4294967297},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 4294967296, want: 4294967297},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: -1, want: -1},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 0, want: 4294967296},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 0, want: 4294967296},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 1, want: 4294967297},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 1, want: 4294967297},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 4294967296, want: 4294967296},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: -9223372036854775808, want: -2},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: -9223372036854775808, want: -2},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: -4294967296, want: -2},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: -4294967296, want: -2},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: -1, want: -1},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 0, want: 9223372036854775806},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 0, want: 9223372036854775806},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 1, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 4294967296, want: 9223372036854775806},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 4294967296, want: 9223372036854775806},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: -4294967296, want: -1},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: -4294967296, want: -1},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: -1, want: -1},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 0, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 0, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 1, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 4294967296, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 4294967296, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: -9223372036854775807, want: 1},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: -1, want: 9223372036854775807},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 0, want: -9223372036854775808},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 0, want: -9223372036854775808},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 1, want: -9223372036854775807},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 9223372036854775806, want: -2},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 9223372036854775806, want: -2},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 9223372036854775807, want: -1},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: -9223372036854775808, want: 1},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: -9223372036854775808, want: 1},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: -9223372036854775807, want: 0},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: -4294967296, want: 9223372032559808513},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: -4294967296, want: 9223372032559808513},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: -1, want: 9223372036854775806},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: -1, want: 9223372036854775806},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 0, want: -9223372036854775807},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 0, want: -9223372036854775807},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 1, want: -9223372036854775808},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 1, want: -9223372036854775808},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 9223372036854775806, want: -1},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 9223372036854775807, want: -2},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 9223372036854775807, want: -2},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: -9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: -9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: -4294967296, want: 0},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: -1, want: 4294967295},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: -1, want: 4294967295},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 0, want: -4294967296},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 0, want: -4294967296},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 1, want: -4294967295},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 1, want: -4294967295},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 4294967296, want: -8589934592},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 4294967296, want: -8589934592},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 9223372036854775806, want: -9223372032559808514},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 9223372036854775806, want: -9223372032559808514},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: -4294967296, want: 4294967295},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: -4294967296, want: 4294967295},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: -1, want: 0},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: -1, want: 0},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 0, want: -1},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 0, want: -1},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 1, want: -2},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 1, want: -2},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 4294967296, want: -4294967297},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 4294967296, want: -4294967297},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 9223372036854775806, want: -9223372036854775807},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 9223372036854775806, want: -9223372036854775807},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: -4294967296, want: -4294967296},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: -1, want: -1},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: -1, want: -1},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 0, want: 0},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 0, want: 0},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 1, want: 1},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 1, want: 1},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 4294967296, want: 4294967296},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: -4294967296, want: -4294967295},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: -4294967296, want: -4294967295},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: -1, want: -2},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: -1, want: -2},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 0, want: 1},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 0, want: 1},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 1, want: 0},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 1, want: 0},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 4294967296, want: 4294967297},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 4294967296, want: 4294967297},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: -4294967296, want: -8589934592},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: -4294967296, want: -8589934592},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: -1, want: -4294967297},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: -1, want: -4294967297},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 0, want: 4294967296},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 0, want: 4294967296},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 1, want: 4294967297},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 1, want: 4294967297},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 4294967296, want: 0},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 9223372036854775806, want: 9223372032559808510},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 9223372036854775806, want: 9223372032559808510},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: -9223372036854775808, want: -2},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: -9223372036854775808, want: -2},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: -9223372036854775807, want: -1},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: -4294967296, want: -9223372032559808514},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: -4294967296, want: -9223372032559808514},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: -1, want: -9223372036854775807},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: -1, want: -9223372036854775807},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 0, want: 9223372036854775806},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 0, want: 9223372036854775806},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 1, want: 9223372036854775807},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 4294967296, want: 9223372032559808510},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 4294967296, want: 9223372032559808510},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 9223372036854775806, want: 0},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 9223372036854775807, want: 1},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: -9223372036854775807, want: -2},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: -9223372036854775807, want: -2},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: -4294967296, want: -9223372032559808513},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: -4294967296, want: -9223372032559808513},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: -1, want: -9223372036854775808},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 0, want: 9223372036854775807},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 0, want: 9223372036854775807},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 1, want: 9223372036854775806},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 1, want: 9223372036854775806},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 4294967296, want: 9223372032559808511},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 4294967296, want: 9223372032559808511},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 9223372036854775806, want: 1},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 9223372036854775807, want: 0}}
+
+type test_int64mul struct {
+ fn func(int64) int64
+ fnname string
+ in int64
+ want int64
+}
+
+var tests_int64mul = []test_int64{
+
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: -9, want: 81},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: -9, want: 81},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: -5, want: 45},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: -5, want: 45},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: -3, want: 27},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: -3, want: 27},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 3, want: -27},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 3, want: -27},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 5, want: -45},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 5, want: -45},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 7, want: -63},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 7, want: -63},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 9, want: -81},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 9, want: -81},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 10, want: -90},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 10, want: -90},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 11, want: -99},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 11, want: -99},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 13, want: -117},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 13, want: -117},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 19, want: -171},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 19, want: -171},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 21, want: -189},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 21, want: -189},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 25, want: -225},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 25, want: -225},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 27, want: -243},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 27, want: -243},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 37, want: -333},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 37, want: -333},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 41, want: -369},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 41, want: -369},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 45, want: -405},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 45, want: -405},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 73, want: -657},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 73, want: -657},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 81, want: -729},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 81, want: -729},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: -9, want: 45},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: -9, want: 45},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: -5, want: 25},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: -5, want: 25},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: -3, want: 15},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: -3, want: 15},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 3, want: -15},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 3, want: -15},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 5, want: -25},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 5, want: -25},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 7, want: -35},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 7, want: -35},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 9, want: -45},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 9, want: -45},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 10, want: -50},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 10, want: -50},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 11, want: -55},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 11, want: -55},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 13, want: -65},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 13, want: -65},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 19, want: -95},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 19, want: -95},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 21, want: -105},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 21, want: -105},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 25, want: -125},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 25, want: -125},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 27, want: -135},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 27, want: -135},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 37, want: -185},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 37, want: -185},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 41, want: -205},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 41, want: -205},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 45, want: -225},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 45, want: -225},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 73, want: -365},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 73, want: -365},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 81, want: -405},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 81, want: -405},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: -9, want: 27},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: -9, want: 27},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: -5, want: 15},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: -5, want: 15},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: -3, want: 9},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: -3, want: 9},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 3, want: -9},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 3, want: -9},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 5, want: -15},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 5, want: -15},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 7, want: -21},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 7, want: -21},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 9, want: -27},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 9, want: -27},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 10, want: -30},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 10, want: -30},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 11, want: -33},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 11, want: -33},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 13, want: -39},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 13, want: -39},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 19, want: -57},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 19, want: -57},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 21, want: -63},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 21, want: -63},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 25, want: -75},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 25, want: -75},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 27, want: -81},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 27, want: -81},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 37, want: -111},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 37, want: -111},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 41, want: -123},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 41, want: -123},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 45, want: -135},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 45, want: -135},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 73, want: -219},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 73, want: -219},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 81, want: -243},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 81, want: -243},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: -9, want: -27},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: -9, want: -27},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: -5, want: -15},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: -5, want: -15},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: -3, want: -9},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: -3, want: -9},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 3, want: 9},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 3, want: 9},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 5, want: 15},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 5, want: 15},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 7, want: 21},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 7, want: 21},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 9, want: 27},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 9, want: 27},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 10, want: 30},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 10, want: 30},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 11, want: 33},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 11, want: 33},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 13, want: 39},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 13, want: 39},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 19, want: 57},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 19, want: 57},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 21, want: 63},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 21, want: 63},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 25, want: 75},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 25, want: 75},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 27, want: 81},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 27, want: 81},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 37, want: 111},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 37, want: 111},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 41, want: 123},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 41, want: 123},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 45, want: 135},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 45, want: 135},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 73, want: 219},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 73, want: 219},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 81, want: 243},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 81, want: 243},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: -9, want: -45},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: -9, want: -45},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: -5, want: -25},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: -5, want: -25},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: -3, want: -15},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: -3, want: -15},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 3, want: 15},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 3, want: 15},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 5, want: 25},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 5, want: 25},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 7, want: 35},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 7, want: 35},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 9, want: 45},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 9, want: 45},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 10, want: 50},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 10, want: 50},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 11, want: 55},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 11, want: 55},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 13, want: 65},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 13, want: 65},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 19, want: 95},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 19, want: 95},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 21, want: 105},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 21, want: 105},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 25, want: 125},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 25, want: 125},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 27, want: 135},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 27, want: 135},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 37, want: 185},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 37, want: 185},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 41, want: 205},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 41, want: 205},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 45, want: 225},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 45, want: 225},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 73, want: 365},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 73, want: 365},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 81, want: 405},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 81, want: 405},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: -9, want: -63},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: -9, want: -63},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: -5, want: -35},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: -5, want: -35},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: -3, want: -21},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: -3, want: -21},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 3, want: 21},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 3, want: 21},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 5, want: 35},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 5, want: 35},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 7, want: 49},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 7, want: 49},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 9, want: 63},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 9, want: 63},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 10, want: 70},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 10, want: 70},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 11, want: 77},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 11, want: 77},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 13, want: 91},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 13, want: 91},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 19, want: 133},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 19, want: 133},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 21, want: 147},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 21, want: 147},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 25, want: 175},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 25, want: 175},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 27, want: 189},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 27, want: 189},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 37, want: 259},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 37, want: 259},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 41, want: 287},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 41, want: 287},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 45, want: 315},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 45, want: 315},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 73, want: 511},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 73, want: 511},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 81, want: 567},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 81, want: 567},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: -9, want: -81},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: -9, want: -81},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: -5, want: -45},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: -5, want: -45},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: -3, want: -27},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: -3, want: -27},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 3, want: 27},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 3, want: 27},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 5, want: 45},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 5, want: 45},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 7, want: 63},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 7, want: 63},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 9, want: 81},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 9, want: 81},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 10, want: 90},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 10, want: 90},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 11, want: 99},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 11, want: 99},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 13, want: 117},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 13, want: 117},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 19, want: 171},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 19, want: 171},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 21, want: 189},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 21, want: 189},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 25, want: 225},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 25, want: 225},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 27, want: 243},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 27, want: 243},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 37, want: 333},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 37, want: 333},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 41, want: 369},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 41, want: 369},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 45, want: 405},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 45, want: 405},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 73, want: 657},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 73, want: 657},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 81, want: 729},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 81, want: 729},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: -9, want: -90},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: -9, want: -90},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: -5, want: -50},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: -5, want: -50},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: -3, want: -30},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: -3, want: -30},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 3, want: 30},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 3, want: 30},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 5, want: 50},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 5, want: 50},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 7, want: 70},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 7, want: 70},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 9, want: 90},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 9, want: 90},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 10, want: 100},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 10, want: 100},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 11, want: 110},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 11, want: 110},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 13, want: 130},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 13, want: 130},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 19, want: 190},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 19, want: 190},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 21, want: 210},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 21, want: 210},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 25, want: 250},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 25, want: 250},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 27, want: 270},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 27, want: 270},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 37, want: 370},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 37, want: 370},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 41, want: 410},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 41, want: 410},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 45, want: 450},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 45, want: 450},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 73, want: 730},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 73, want: 730},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 81, want: 810},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 81, want: 810},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: -9, want: -99},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: -9, want: -99},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: -5, want: -55},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: -5, want: -55},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: -3, want: -33},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: -3, want: -33},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 3, want: 33},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 3, want: 33},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 5, want: 55},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 5, want: 55},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 7, want: 77},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 7, want: 77},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 9, want: 99},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 9, want: 99},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 10, want: 110},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 10, want: 110},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 11, want: 121},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 11, want: 121},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 13, want: 143},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 13, want: 143},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 19, want: 209},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 19, want: 209},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 21, want: 231},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 21, want: 231},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 25, want: 275},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 25, want: 275},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 27, want: 297},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 27, want: 297},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 37, want: 407},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 37, want: 407},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 41, want: 451},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 41, want: 451},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 45, want: 495},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 45, want: 495},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 73, want: 803},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 73, want: 803},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 81, want: 891},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 81, want: 891},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: -9, want: -117},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: -9, want: -117},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: -5, want: -65},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: -5, want: -65},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: -3, want: -39},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: -3, want: -39},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 3, want: 39},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 3, want: 39},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 5, want: 65},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 5, want: 65},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 7, want: 91},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 7, want: 91},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 9, want: 117},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 9, want: 117},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 10, want: 130},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 10, want: 130},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 11, want: 143},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 11, want: 143},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 13, want: 169},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 13, want: 169},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 19, want: 247},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 19, want: 247},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 21, want: 273},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 21, want: 273},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 25, want: 325},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 25, want: 325},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 27, want: 351},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 27, want: 351},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 37, want: 481},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 37, want: 481},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 41, want: 533},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 41, want: 533},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 45, want: 585},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 45, want: 585},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 73, want: 949},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 73, want: 949},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 81, want: 1053},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 81, want: 1053},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: -9, want: -171},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: -9, want: -171},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: -5, want: -95},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: -5, want: -95},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: -3, want: -57},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: -3, want: -57},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 3, want: 57},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 3, want: 57},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 5, want: 95},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 5, want: 95},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 7, want: 133},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 7, want: 133},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 9, want: 171},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 9, want: 171},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 10, want: 190},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 10, want: 190},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 11, want: 209},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 11, want: 209},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 13, want: 247},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 13, want: 247},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 19, want: 361},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 19, want: 361},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 21, want: 399},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 21, want: 399},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 25, want: 475},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 25, want: 475},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 27, want: 513},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 27, want: 513},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 37, want: 703},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 37, want: 703},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 41, want: 779},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 41, want: 779},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 45, want: 855},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 45, want: 855},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 73, want: 1387},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 73, want: 1387},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 81, want: 1539},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 81, want: 1539},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: -9, want: -189},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: -9, want: -189},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: -5, want: -105},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: -5, want: -105},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: -3, want: -63},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: -3, want: -63},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 3, want: 63},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 3, want: 63},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 5, want: 105},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 5, want: 105},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 7, want: 147},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 7, want: 147},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 9, want: 189},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 9, want: 189},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 10, want: 210},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 10, want: 210},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 11, want: 231},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 11, want: 231},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 13, want: 273},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 13, want: 273},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 19, want: 399},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 19, want: 399},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 21, want: 441},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 21, want: 441},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 25, want: 525},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 25, want: 525},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 27, want: 567},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 27, want: 567},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 37, want: 777},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 37, want: 777},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 41, want: 861},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 41, want: 861},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 45, want: 945},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 45, want: 945},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 73, want: 1533},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 73, want: 1533},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 81, want: 1701},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 81, want: 1701},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: -9, want: -225},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: -9, want: -225},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: -5, want: -125},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: -5, want: -125},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: -3, want: -75},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: -3, want: -75},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 3, want: 75},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 3, want: 75},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 5, want: 125},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 5, want: 125},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 7, want: 175},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 7, want: 175},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 9, want: 225},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 9, want: 225},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 10, want: 250},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 10, want: 250},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 11, want: 275},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 11, want: 275},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 13, want: 325},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 13, want: 325},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 19, want: 475},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 19, want: 475},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 21, want: 525},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 21, want: 525},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 25, want: 625},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 25, want: 625},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 27, want: 675},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 27, want: 675},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 37, want: 925},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 37, want: 925},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 41, want: 1025},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 41, want: 1025},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 45, want: 1125},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 45, want: 1125},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 73, want: 1825},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 73, want: 1825},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 81, want: 2025},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 81, want: 2025},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: -9, want: -243},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: -9, want: -243},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: -5, want: -135},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: -5, want: -135},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: -3, want: -81},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: -3, want: -81},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 3, want: 81},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 3, want: 81},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 5, want: 135},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 5, want: 135},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 7, want: 189},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 7, want: 189},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 9, want: 243},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 9, want: 243},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 10, want: 270},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 10, want: 270},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 11, want: 297},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 11, want: 297},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 13, want: 351},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 13, want: 351},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 19, want: 513},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 19, want: 513},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 21, want: 567},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 21, want: 567},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 25, want: 675},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 25, want: 675},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 27, want: 729},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 27, want: 729},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 37, want: 999},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 37, want: 999},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 41, want: 1107},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 41, want: 1107},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 45, want: 1215},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 45, want: 1215},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 73, want: 1971},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 73, want: 1971},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 81, want: 2187},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 81, want: 2187},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: -9, want: -333},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: -9, want: -333},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: -5, want: -185},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: -5, want: -185},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: -3, want: -111},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: -3, want: -111},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 3, want: 111},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 3, want: 111},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 5, want: 185},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 5, want: 185},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 7, want: 259},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 7, want: 259},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 9, want: 333},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 9, want: 333},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 10, want: 370},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 10, want: 370},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 11, want: 407},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 11, want: 407},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 13, want: 481},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 13, want: 481},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 19, want: 703},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 19, want: 703},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 21, want: 777},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 21, want: 777},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 25, want: 925},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 25, want: 925},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 27, want: 999},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 27, want: 999},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 37, want: 1369},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 37, want: 1369},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 41, want: 1517},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 41, want: 1517},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 45, want: 1665},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 45, want: 1665},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 73, want: 2701},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 73, want: 2701},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 81, want: 2997},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 81, want: 2997},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: -9, want: -369},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: -9, want: -369},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: -5, want: -205},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: -5, want: -205},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: -3, want: -123},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: -3, want: -123},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 3, want: 123},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 3, want: 123},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 5, want: 205},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 5, want: 205},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 7, want: 287},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 7, want: 287},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 9, want: 369},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 9, want: 369},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 10, want: 410},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 10, want: 410},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 11, want: 451},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 11, want: 451},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 13, want: 533},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 13, want: 533},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 19, want: 779},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 19, want: 779},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 21, want: 861},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 21, want: 861},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 25, want: 1025},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 25, want: 1025},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 27, want: 1107},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 27, want: 1107},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 37, want: 1517},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 37, want: 1517},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 41, want: 1681},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 41, want: 1681},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 45, want: 1845},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 45, want: 1845},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 73, want: 2993},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 73, want: 2993},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 81, want: 3321},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 81, want: 3321},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: -9, want: -405},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: -9, want: -405},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: -5, want: -225},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: -5, want: -225},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: -3, want: -135},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: -3, want: -135},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 3, want: 135},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 3, want: 135},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 5, want: 225},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 5, want: 225},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 7, want: 315},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 7, want: 315},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 9, want: 405},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 9, want: 405},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 10, want: 450},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 10, want: 450},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 11, want: 495},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 11, want: 495},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 13, want: 585},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 13, want: 585},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 19, want: 855},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 19, want: 855},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 21, want: 945},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 21, want: 945},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 25, want: 1125},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 25, want: 1125},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 27, want: 1215},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 27, want: 1215},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 37, want: 1665},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 37, want: 1665},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 41, want: 1845},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 41, want: 1845},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 45, want: 2025},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 45, want: 2025},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 73, want: 3285},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 73, want: 3285},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 81, want: 3645},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 81, want: 3645},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: -9, want: -657},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: -9, want: -657},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: -5, want: -365},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: -5, want: -365},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: -3, want: -219},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: -3, want: -219},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 3, want: 219},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 3, want: 219},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 5, want: 365},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 5, want: 365},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 7, want: 511},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 7, want: 511},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 9, want: 657},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 9, want: 657},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 10, want: 730},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 10, want: 730},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 11, want: 803},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 11, want: 803},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 13, want: 949},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 13, want: 949},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 19, want: 1387},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 19, want: 1387},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 21, want: 1533},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 21, want: 1533},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 25, want: 1825},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 25, want: 1825},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 27, want: 1971},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 27, want: 1971},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 37, want: 2701},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 37, want: 2701},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 41, want: 2993},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 41, want: 2993},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 45, want: 3285},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 45, want: 3285},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 73, want: 5329},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 73, want: 5329},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 81, want: 5913},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 81, want: 5913},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: -9, want: -729},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: -9, want: -729},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: -5, want: -405},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: -5, want: -405},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: -3, want: -243},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: -3, want: -243},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 3, want: 243},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 3, want: 243},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 5, want: 405},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 5, want: 405},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 7, want: 567},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 7, want: 567},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 9, want: 729},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 9, want: 729},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 10, want: 810},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 10, want: 810},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 11, want: 891},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 11, want: 891},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 13, want: 1053},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 13, want: 1053},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 19, want: 1539},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 19, want: 1539},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 21, want: 1701},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 21, want: 1701},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 25, want: 2025},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 25, want: 2025},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 27, want: 2187},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 27, want: 2187},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 37, want: 2997},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 37, want: 2997},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 41, want: 3321},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 41, want: 3321},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 45, want: 3645},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 45, want: 3645},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 73, want: 5913},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 73, want: 5913},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 81, want: 6561},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 81, want: 6561}}
+
+type test_uint32 struct {
+ fn func(uint32) uint32
+ fnname string
+ in uint32
+ want uint32
+}
+
+var tests_uint32 = []test_uint32{
+
+ test_uint32{fn: add_0_uint32, fnname: "add_0_uint32", in: 0, want: 0},
+ test_uint32{fn: add_uint32_0, fnname: "add_uint32_0", in: 0, want: 0},
+ test_uint32{fn: add_0_uint32, fnname: "add_0_uint32", in: 1, want: 1},
+ test_uint32{fn: add_uint32_0, fnname: "add_uint32_0", in: 1, want: 1},
+ test_uint32{fn: add_0_uint32, fnname: "add_0_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: add_uint32_0, fnname: "add_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: add_1_uint32, fnname: "add_1_uint32", in: 0, want: 1},
+ test_uint32{fn: add_uint32_1, fnname: "add_uint32_1", in: 0, want: 1},
+ test_uint32{fn: add_1_uint32, fnname: "add_1_uint32", in: 1, want: 2},
+ test_uint32{fn: add_uint32_1, fnname: "add_uint32_1", in: 1, want: 2},
+ test_uint32{fn: add_1_uint32, fnname: "add_1_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: add_uint32_1, fnname: "add_uint32_1", in: 4294967295, want: 0},
+ test_uint32{fn: add_4294967295_uint32, fnname: "add_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: add_uint32_4294967295, fnname: "add_uint32_4294967295", in: 0, want: 4294967295},
+ test_uint32{fn: add_4294967295_uint32, fnname: "add_4294967295_uint32", in: 1, want: 0},
+ test_uint32{fn: add_uint32_4294967295, fnname: "add_uint32_4294967295", in: 1, want: 0},
+ test_uint32{fn: add_4294967295_uint32, fnname: "add_4294967295_uint32", in: 4294967295, want: 4294967294},
+ test_uint32{fn: add_uint32_4294967295, fnname: "add_uint32_4294967295", in: 4294967295, want: 4294967294},
+ test_uint32{fn: sub_0_uint32, fnname: "sub_0_uint32", in: 0, want: 0},
+ test_uint32{fn: sub_uint32_0, fnname: "sub_uint32_0", in: 0, want: 0},
+ test_uint32{fn: sub_0_uint32, fnname: "sub_0_uint32", in: 1, want: 4294967295},
+ test_uint32{fn: sub_uint32_0, fnname: "sub_uint32_0", in: 1, want: 1},
+ test_uint32{fn: sub_0_uint32, fnname: "sub_0_uint32", in: 4294967295, want: 1},
+ test_uint32{fn: sub_uint32_0, fnname: "sub_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: sub_1_uint32, fnname: "sub_1_uint32", in: 0, want: 1},
+ test_uint32{fn: sub_uint32_1, fnname: "sub_uint32_1", in: 0, want: 4294967295},
+ test_uint32{fn: sub_1_uint32, fnname: "sub_1_uint32", in: 1, want: 0},
+ test_uint32{fn: sub_uint32_1, fnname: "sub_uint32_1", in: 1, want: 0},
+ test_uint32{fn: sub_1_uint32, fnname: "sub_1_uint32", in: 4294967295, want: 2},
+ test_uint32{fn: sub_uint32_1, fnname: "sub_uint32_1", in: 4294967295, want: 4294967294},
+ test_uint32{fn: sub_4294967295_uint32, fnname: "sub_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: sub_uint32_4294967295, fnname: "sub_uint32_4294967295", in: 0, want: 1},
+ test_uint32{fn: sub_4294967295_uint32, fnname: "sub_4294967295_uint32", in: 1, want: 4294967294},
+ test_uint32{fn: sub_uint32_4294967295, fnname: "sub_uint32_4294967295", in: 1, want: 2},
+ test_uint32{fn: sub_4294967295_uint32, fnname: "sub_4294967295_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: sub_uint32_4294967295, fnname: "sub_uint32_4294967295", in: 4294967295, want: 0},
+ test_uint32{fn: div_0_uint32, fnname: "div_0_uint32", in: 1, want: 0},
+ test_uint32{fn: div_0_uint32, fnname: "div_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: div_uint32_1, fnname: "div_uint32_1", in: 0, want: 0},
+ test_uint32{fn: div_1_uint32, fnname: "div_1_uint32", in: 1, want: 1},
+ test_uint32{fn: div_uint32_1, fnname: "div_uint32_1", in: 1, want: 1},
+ test_uint32{fn: div_1_uint32, fnname: "div_1_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: div_uint32_1, fnname: "div_uint32_1", in: 4294967295, want: 4294967295},
+ test_uint32{fn: div_uint32_4294967295, fnname: "div_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: div_4294967295_uint32, fnname: "div_4294967295_uint32", in: 1, want: 4294967295},
+ test_uint32{fn: div_uint32_4294967295, fnname: "div_uint32_4294967295", in: 1, want: 0},
+ test_uint32{fn: div_4294967295_uint32, fnname: "div_4294967295_uint32", in: 4294967295, want: 1},
+ test_uint32{fn: div_uint32_4294967295, fnname: "div_uint32_4294967295", in: 4294967295, want: 1},
+ test_uint32{fn: mul_0_uint32, fnname: "mul_0_uint32", in: 0, want: 0},
+ test_uint32{fn: mul_uint32_0, fnname: "mul_uint32_0", in: 0, want: 0},
+ test_uint32{fn: mul_0_uint32, fnname: "mul_0_uint32", in: 1, want: 0},
+ test_uint32{fn: mul_uint32_0, fnname: "mul_uint32_0", in: 1, want: 0},
+ test_uint32{fn: mul_0_uint32, fnname: "mul_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: mul_uint32_0, fnname: "mul_uint32_0", in: 4294967295, want: 0},
+ test_uint32{fn: mul_1_uint32, fnname: "mul_1_uint32", in: 0, want: 0},
+ test_uint32{fn: mul_uint32_1, fnname: "mul_uint32_1", in: 0, want: 0},
+ test_uint32{fn: mul_1_uint32, fnname: "mul_1_uint32", in: 1, want: 1},
+ test_uint32{fn: mul_uint32_1, fnname: "mul_uint32_1", in: 1, want: 1},
+ test_uint32{fn: mul_1_uint32, fnname: "mul_1_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: mul_uint32_1, fnname: "mul_uint32_1", in: 4294967295, want: 4294967295},
+ test_uint32{fn: mul_4294967295_uint32, fnname: "mul_4294967295_uint32", in: 0, want: 0},
+ test_uint32{fn: mul_uint32_4294967295, fnname: "mul_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: mul_4294967295_uint32, fnname: "mul_4294967295_uint32", in: 1, want: 4294967295},
+ test_uint32{fn: mul_uint32_4294967295, fnname: "mul_uint32_4294967295", in: 1, want: 4294967295},
+ test_uint32{fn: mul_4294967295_uint32, fnname: "mul_4294967295_uint32", in: 4294967295, want: 1},
+ test_uint32{fn: mul_uint32_4294967295, fnname: "mul_uint32_4294967295", in: 4294967295, want: 1},
+ test_uint32{fn: lsh_0_uint32, fnname: "lsh_0_uint32", in: 0, want: 0},
+ test_uint32{fn: lsh_uint32_0, fnname: "lsh_uint32_0", in: 0, want: 0},
+ test_uint32{fn: lsh_0_uint32, fnname: "lsh_0_uint32", in: 1, want: 0},
+ test_uint32{fn: lsh_uint32_0, fnname: "lsh_uint32_0", in: 1, want: 1},
+ test_uint32{fn: lsh_0_uint32, fnname: "lsh_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: lsh_uint32_0, fnname: "lsh_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: lsh_1_uint32, fnname: "lsh_1_uint32", in: 0, want: 1},
+ test_uint32{fn: lsh_uint32_1, fnname: "lsh_uint32_1", in: 0, want: 0},
+ test_uint32{fn: lsh_1_uint32, fnname: "lsh_1_uint32", in: 1, want: 2},
+ test_uint32{fn: lsh_uint32_1, fnname: "lsh_uint32_1", in: 1, want: 2},
+ test_uint32{fn: lsh_1_uint32, fnname: "lsh_1_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: lsh_uint32_1, fnname: "lsh_uint32_1", in: 4294967295, want: 4294967294},
+ test_uint32{fn: lsh_4294967295_uint32, fnname: "lsh_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: lsh_uint32_4294967295, fnname: "lsh_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: lsh_4294967295_uint32, fnname: "lsh_4294967295_uint32", in: 1, want: 4294967294},
+ test_uint32{fn: lsh_uint32_4294967295, fnname: "lsh_uint32_4294967295", in: 1, want: 0},
+ test_uint32{fn: lsh_4294967295_uint32, fnname: "lsh_4294967295_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: lsh_uint32_4294967295, fnname: "lsh_uint32_4294967295", in: 4294967295, want: 0},
+ test_uint32{fn: rsh_0_uint32, fnname: "rsh_0_uint32", in: 0, want: 0},
+ test_uint32{fn: rsh_uint32_0, fnname: "rsh_uint32_0", in: 0, want: 0},
+ test_uint32{fn: rsh_0_uint32, fnname: "rsh_0_uint32", in: 1, want: 0},
+ test_uint32{fn: rsh_uint32_0, fnname: "rsh_uint32_0", in: 1, want: 1},
+ test_uint32{fn: rsh_0_uint32, fnname: "rsh_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: rsh_uint32_0, fnname: "rsh_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: rsh_1_uint32, fnname: "rsh_1_uint32", in: 0, want: 1},
+ test_uint32{fn: rsh_uint32_1, fnname: "rsh_uint32_1", in: 0, want: 0},
+ test_uint32{fn: rsh_1_uint32, fnname: "rsh_1_uint32", in: 1, want: 0},
+ test_uint32{fn: rsh_uint32_1, fnname: "rsh_uint32_1", in: 1, want: 0},
+ test_uint32{fn: rsh_1_uint32, fnname: "rsh_1_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: rsh_uint32_1, fnname: "rsh_uint32_1", in: 4294967295, want: 2147483647},
+ test_uint32{fn: rsh_4294967295_uint32, fnname: "rsh_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: rsh_uint32_4294967295, fnname: "rsh_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: rsh_4294967295_uint32, fnname: "rsh_4294967295_uint32", in: 1, want: 2147483647},
+ test_uint32{fn: rsh_uint32_4294967295, fnname: "rsh_uint32_4294967295", in: 1, want: 0},
+ test_uint32{fn: rsh_4294967295_uint32, fnname: "rsh_4294967295_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: rsh_uint32_4294967295, fnname: "rsh_uint32_4294967295", in: 4294967295, want: 0},
+ test_uint32{fn: mod_0_uint32, fnname: "mod_0_uint32", in: 1, want: 0},
+ test_uint32{fn: mod_0_uint32, fnname: "mod_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: mod_uint32_1, fnname: "mod_uint32_1", in: 0, want: 0},
+ test_uint32{fn: mod_1_uint32, fnname: "mod_1_uint32", in: 1, want: 0},
+ test_uint32{fn: mod_uint32_1, fnname: "mod_uint32_1", in: 1, want: 0},
+ test_uint32{fn: mod_1_uint32, fnname: "mod_1_uint32", in: 4294967295, want: 1},
+ test_uint32{fn: mod_uint32_1, fnname: "mod_uint32_1", in: 4294967295, want: 0},
+ test_uint32{fn: mod_uint32_4294967295, fnname: "mod_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: mod_4294967295_uint32, fnname: "mod_4294967295_uint32", in: 1, want: 0},
+ test_uint32{fn: mod_uint32_4294967295, fnname: "mod_uint32_4294967295", in: 1, want: 1},
+ test_uint32{fn: mod_4294967295_uint32, fnname: "mod_4294967295_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: mod_uint32_4294967295, fnname: "mod_uint32_4294967295", in: 4294967295, want: 0},
+ test_uint32{fn: and_0_uint32, fnname: "and_0_uint32", in: 0, want: 0},
+ test_uint32{fn: and_uint32_0, fnname: "and_uint32_0", in: 0, want: 0},
+ test_uint32{fn: and_0_uint32, fnname: "and_0_uint32", in: 1, want: 0},
+ test_uint32{fn: and_uint32_0, fnname: "and_uint32_0", in: 1, want: 0},
+ test_uint32{fn: and_0_uint32, fnname: "and_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: and_uint32_0, fnname: "and_uint32_0", in: 4294967295, want: 0},
+ test_uint32{fn: and_1_uint32, fnname: "and_1_uint32", in: 0, want: 0},
+ test_uint32{fn: and_uint32_1, fnname: "and_uint32_1", in: 0, want: 0},
+ test_uint32{fn: and_1_uint32, fnname: "and_1_uint32", in: 1, want: 1},
+ test_uint32{fn: and_uint32_1, fnname: "and_uint32_1", in: 1, want: 1},
+ test_uint32{fn: and_1_uint32, fnname: "and_1_uint32", in: 4294967295, want: 1},
+ test_uint32{fn: and_uint32_1, fnname: "and_uint32_1", in: 4294967295, want: 1},
+ test_uint32{fn: and_4294967295_uint32, fnname: "and_4294967295_uint32", in: 0, want: 0},
+ test_uint32{fn: and_uint32_4294967295, fnname: "and_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: and_4294967295_uint32, fnname: "and_4294967295_uint32", in: 1, want: 1},
+ test_uint32{fn: and_uint32_4294967295, fnname: "and_uint32_4294967295", in: 1, want: 1},
+ test_uint32{fn: and_4294967295_uint32, fnname: "and_4294967295_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: and_uint32_4294967295, fnname: "and_uint32_4294967295", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_0_uint32, fnname: "or_0_uint32", in: 0, want: 0},
+ test_uint32{fn: or_uint32_0, fnname: "or_uint32_0", in: 0, want: 0},
+ test_uint32{fn: or_0_uint32, fnname: "or_0_uint32", in: 1, want: 1},
+ test_uint32{fn: or_uint32_0, fnname: "or_uint32_0", in: 1, want: 1},
+ test_uint32{fn: or_0_uint32, fnname: "or_0_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_uint32_0, fnname: "or_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_1_uint32, fnname: "or_1_uint32", in: 0, want: 1},
+ test_uint32{fn: or_uint32_1, fnname: "or_uint32_1", in: 0, want: 1},
+ test_uint32{fn: or_1_uint32, fnname: "or_1_uint32", in: 1, want: 1},
+ test_uint32{fn: or_uint32_1, fnname: "or_uint32_1", in: 1, want: 1},
+ test_uint32{fn: or_1_uint32, fnname: "or_1_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_uint32_1, fnname: "or_uint32_1", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_4294967295_uint32, fnname: "or_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: or_uint32_4294967295, fnname: "or_uint32_4294967295", in: 0, want: 4294967295},
+ test_uint32{fn: or_4294967295_uint32, fnname: "or_4294967295_uint32", in: 1, want: 4294967295},
+ test_uint32{fn: or_uint32_4294967295, fnname: "or_uint32_4294967295", in: 1, want: 4294967295},
+ test_uint32{fn: or_4294967295_uint32, fnname: "or_4294967295_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_uint32_4294967295, fnname: "or_uint32_4294967295", in: 4294967295, want: 4294967295},
+ test_uint32{fn: xor_0_uint32, fnname: "xor_0_uint32", in: 0, want: 0},
+ test_uint32{fn: xor_uint32_0, fnname: "xor_uint32_0", in: 0, want: 0},
+ test_uint32{fn: xor_0_uint32, fnname: "xor_0_uint32", in: 1, want: 1},
+ test_uint32{fn: xor_uint32_0, fnname: "xor_uint32_0", in: 1, want: 1},
+ test_uint32{fn: xor_0_uint32, fnname: "xor_0_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: xor_uint32_0, fnname: "xor_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: xor_1_uint32, fnname: "xor_1_uint32", in: 0, want: 1},
+ test_uint32{fn: xor_uint32_1, fnname: "xor_uint32_1", in: 0, want: 1},
+ test_uint32{fn: xor_1_uint32, fnname: "xor_1_uint32", in: 1, want: 0},
+ test_uint32{fn: xor_uint32_1, fnname: "xor_uint32_1", in: 1, want: 0},
+ test_uint32{fn: xor_1_uint32, fnname: "xor_1_uint32", in: 4294967295, want: 4294967294},
+ test_uint32{fn: xor_uint32_1, fnname: "xor_uint32_1", in: 4294967295, want: 4294967294},
+ test_uint32{fn: xor_4294967295_uint32, fnname: "xor_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: xor_uint32_4294967295, fnname: "xor_uint32_4294967295", in: 0, want: 4294967295},
+ test_uint32{fn: xor_4294967295_uint32, fnname: "xor_4294967295_uint32", in: 1, want: 4294967294},
+ test_uint32{fn: xor_uint32_4294967295, fnname: "xor_uint32_4294967295", in: 1, want: 4294967294},
+ test_uint32{fn: xor_4294967295_uint32, fnname: "xor_4294967295_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: xor_uint32_4294967295, fnname: "xor_uint32_4294967295", in: 4294967295, want: 0}}
+
+type test_uint32mul struct {
+ fn func(uint32) uint32
+ fnname string
+ in uint32
+ want uint32
+}
+
+var tests_uint32mul = []test_uint32{
+
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 3, want: 9},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 3, want: 9},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 5, want: 15},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 5, want: 15},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 7, want: 21},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 7, want: 21},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 9, want: 27},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 9, want: 27},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 10, want: 30},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 10, want: 30},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 11, want: 33},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 11, want: 33},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 13, want: 39},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 13, want: 39},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 19, want: 57},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 19, want: 57},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 21, want: 63},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 21, want: 63},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 25, want: 75},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 25, want: 75},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 27, want: 81},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 27, want: 81},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 37, want: 111},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 37, want: 111},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 41, want: 123},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 41, want: 123},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 45, want: 135},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 45, want: 135},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 73, want: 219},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 73, want: 219},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 81, want: 243},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 81, want: 243},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 3, want: 15},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 3, want: 15},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 5, want: 25},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 5, want: 25},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 7, want: 35},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 7, want: 35},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 9, want: 45},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 9, want: 45},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 10, want: 50},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 10, want: 50},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 11, want: 55},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 11, want: 55},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 13, want: 65},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 13, want: 65},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 19, want: 95},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 19, want: 95},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 21, want: 105},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 21, want: 105},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 25, want: 125},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 25, want: 125},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 27, want: 135},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 27, want: 135},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 37, want: 185},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 37, want: 185},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 41, want: 205},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 41, want: 205},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 45, want: 225},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 45, want: 225},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 73, want: 365},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 73, want: 365},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 81, want: 405},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 81, want: 405},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 3, want: 21},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 3, want: 21},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 5, want: 35},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 5, want: 35},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 7, want: 49},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 7, want: 49},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 9, want: 63},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 9, want: 63},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 10, want: 70},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 10, want: 70},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 11, want: 77},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 11, want: 77},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 13, want: 91},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 13, want: 91},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 19, want: 133},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 19, want: 133},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 21, want: 147},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 21, want: 147},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 25, want: 175},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 25, want: 175},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 27, want: 189},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 27, want: 189},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 37, want: 259},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 37, want: 259},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 41, want: 287},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 41, want: 287},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 45, want: 315},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 45, want: 315},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 73, want: 511},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 73, want: 511},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 81, want: 567},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 81, want: 567},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 3, want: 27},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 3, want: 27},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 5, want: 45},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 5, want: 45},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 7, want: 63},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 7, want: 63},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 9, want: 81},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 9, want: 81},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 10, want: 90},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 10, want: 90},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 11, want: 99},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 11, want: 99},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 13, want: 117},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 13, want: 117},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 19, want: 171},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 19, want: 171},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 21, want: 189},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 21, want: 189},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 25, want: 225},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 25, want: 225},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 27, want: 243},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 27, want: 243},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 37, want: 333},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 37, want: 333},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 41, want: 369},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 41, want: 369},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 45, want: 405},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 45, want: 405},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 73, want: 657},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 73, want: 657},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 81, want: 729},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 81, want: 729},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 3, want: 30},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 3, want: 30},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 5, want: 50},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 5, want: 50},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 7, want: 70},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 7, want: 70},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 9, want: 90},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 9, want: 90},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 10, want: 100},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 10, want: 100},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 11, want: 110},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 11, want: 110},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 13, want: 130},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 13, want: 130},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 19, want: 190},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 19, want: 190},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 21, want: 210},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 21, want: 210},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 25, want: 250},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 25, want: 250},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 27, want: 270},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 27, want: 270},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 37, want: 370},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 37, want: 370},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 41, want: 410},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 41, want: 410},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 45, want: 450},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 45, want: 450},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 73, want: 730},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 73, want: 730},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 81, want: 810},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 81, want: 810},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 3, want: 33},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 3, want: 33},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 5, want: 55},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 5, want: 55},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 7, want: 77},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 7, want: 77},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 9, want: 99},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 9, want: 99},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 10, want: 110},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 10, want: 110},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 11, want: 121},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 11, want: 121},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 13, want: 143},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 13, want: 143},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 19, want: 209},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 19, want: 209},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 21, want: 231},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 21, want: 231},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 25, want: 275},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 25, want: 275},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 27, want: 297},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 27, want: 297},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 37, want: 407},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 37, want: 407},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 41, want: 451},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 41, want: 451},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 45, want: 495},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 45, want: 495},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 73, want: 803},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 73, want: 803},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 81, want: 891},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 81, want: 891},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 3, want: 39},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 3, want: 39},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 5, want: 65},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 5, want: 65},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 7, want: 91},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 7, want: 91},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 9, want: 117},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 9, want: 117},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 10, want: 130},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 10, want: 130},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 11, want: 143},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 11, want: 143},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 13, want: 169},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 13, want: 169},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 19, want: 247},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 19, want: 247},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 21, want: 273},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 21, want: 273},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 25, want: 325},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 25, want: 325},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 27, want: 351},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 27, want: 351},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 37, want: 481},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 37, want: 481},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 41, want: 533},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 41, want: 533},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 45, want: 585},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 45, want: 585},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 73, want: 949},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 73, want: 949},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 81, want: 1053},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 81, want: 1053},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 3, want: 57},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 3, want: 57},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 5, want: 95},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 5, want: 95},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 7, want: 133},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 7, want: 133},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 9, want: 171},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 9, want: 171},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 10, want: 190},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 10, want: 190},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 11, want: 209},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 11, want: 209},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 13, want: 247},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 13, want: 247},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 19, want: 361},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 19, want: 361},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 21, want: 399},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 21, want: 399},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 25, want: 475},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 25, want: 475},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 27, want: 513},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 27, want: 513},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 37, want: 703},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 37, want: 703},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 41, want: 779},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 41, want: 779},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 45, want: 855},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 45, want: 855},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 73, want: 1387},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 73, want: 1387},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 81, want: 1539},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 81, want: 1539},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 3, want: 63},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 3, want: 63},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 5, want: 105},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 5, want: 105},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 7, want: 147},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 7, want: 147},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 9, want: 189},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 9, want: 189},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 10, want: 210},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 10, want: 210},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 11, want: 231},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 11, want: 231},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 13, want: 273},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 13, want: 273},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 19, want: 399},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 19, want: 399},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 21, want: 441},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 21, want: 441},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 25, want: 525},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 25, want: 525},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 27, want: 567},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 27, want: 567},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 37, want: 777},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 37, want: 777},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 41, want: 861},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 41, want: 861},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 45, want: 945},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 45, want: 945},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 73, want: 1533},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 73, want: 1533},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 81, want: 1701},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 81, want: 1701},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 3, want: 75},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 3, want: 75},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 5, want: 125},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 5, want: 125},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 7, want: 175},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 7, want: 175},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 9, want: 225},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 9, want: 225},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 10, want: 250},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 10, want: 250},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 11, want: 275},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 11, want: 275},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 13, want: 325},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 13, want: 325},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 19, want: 475},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 19, want: 475},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 21, want: 525},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 21, want: 525},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 25, want: 625},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 25, want: 625},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 27, want: 675},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 27, want: 675},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 37, want: 925},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 37, want: 925},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 41, want: 1025},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 41, want: 1025},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 45, want: 1125},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 45, want: 1125},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 73, want: 1825},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 73, want: 1825},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 81, want: 2025},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 81, want: 2025},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 3, want: 81},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 3, want: 81},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 5, want: 135},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 5, want: 135},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 7, want: 189},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 7, want: 189},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 9, want: 243},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 9, want: 243},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 10, want: 270},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 10, want: 270},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 11, want: 297},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 11, want: 297},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 13, want: 351},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 13, want: 351},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 19, want: 513},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 19, want: 513},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 21, want: 567},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 21, want: 567},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 25, want: 675},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 25, want: 675},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 27, want: 729},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 27, want: 729},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 37, want: 999},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 37, want: 999},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 41, want: 1107},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 41, want: 1107},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 45, want: 1215},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 45, want: 1215},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 73, want: 1971},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 73, want: 1971},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 81, want: 2187},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 81, want: 2187},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 3, want: 111},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 3, want: 111},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 5, want: 185},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 5, want: 185},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 7, want: 259},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 7, want: 259},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 9, want: 333},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 9, want: 333},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 10, want: 370},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 10, want: 370},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 11, want: 407},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 11, want: 407},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 13, want: 481},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 13, want: 481},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 19, want: 703},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 19, want: 703},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 21, want: 777},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 21, want: 777},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 25, want: 925},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 25, want: 925},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 27, want: 999},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 27, want: 999},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 37, want: 1369},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 37, want: 1369},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 41, want: 1517},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 41, want: 1517},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 45, want: 1665},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 45, want: 1665},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 73, want: 2701},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 73, want: 2701},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 81, want: 2997},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 81, want: 2997},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 3, want: 123},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 3, want: 123},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 5, want: 205},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 5, want: 205},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 7, want: 287},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 7, want: 287},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 9, want: 369},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 9, want: 369},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 10, want: 410},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 10, want: 410},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 11, want: 451},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 11, want: 451},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 13, want: 533},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 13, want: 533},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 19, want: 779},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 19, want: 779},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 21, want: 861},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 21, want: 861},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 25, want: 1025},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 25, want: 1025},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 27, want: 1107},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 27, want: 1107},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 37, want: 1517},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 37, want: 1517},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 41, want: 1681},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 41, want: 1681},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 45, want: 1845},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 45, want: 1845},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 73, want: 2993},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 73, want: 2993},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 81, want: 3321},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 81, want: 3321},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 3, want: 135},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 3, want: 135},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 5, want: 225},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 5, want: 225},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 7, want: 315},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 7, want: 315},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 9, want: 405},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 9, want: 405},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 10, want: 450},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 10, want: 450},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 11, want: 495},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 11, want: 495},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 13, want: 585},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 13, want: 585},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 19, want: 855},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 19, want: 855},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 21, want: 945},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 21, want: 945},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 25, want: 1125},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 25, want: 1125},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 27, want: 1215},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 27, want: 1215},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 37, want: 1665},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 37, want: 1665},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 41, want: 1845},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 41, want: 1845},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 45, want: 2025},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 45, want: 2025},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 73, want: 3285},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 73, want: 3285},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 81, want: 3645},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 81, want: 3645},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 3, want: 219},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 3, want: 219},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 5, want: 365},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 5, want: 365},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 7, want: 511},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 7, want: 511},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 9, want: 657},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 9, want: 657},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 10, want: 730},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 10, want: 730},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 11, want: 803},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 11, want: 803},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 13, want: 949},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 13, want: 949},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 19, want: 1387},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 19, want: 1387},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 21, want: 1533},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 21, want: 1533},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 25, want: 1825},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 25, want: 1825},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 27, want: 1971},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 27, want: 1971},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 37, want: 2701},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 37, want: 2701},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 41, want: 2993},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 41, want: 2993},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 45, want: 3285},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 45, want: 3285},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 73, want: 5329},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 73, want: 5329},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 81, want: 5913},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 81, want: 5913},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 3, want: 243},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 3, want: 243},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 5, want: 405},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 5, want: 405},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 7, want: 567},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 7, want: 567},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 9, want: 729},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 9, want: 729},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 10, want: 810},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 10, want: 810},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 11, want: 891},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 11, want: 891},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 13, want: 1053},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 13, want: 1053},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 19, want: 1539},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 19, want: 1539},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 21, want: 1701},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 21, want: 1701},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 25, want: 2025},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 25, want: 2025},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 27, want: 2187},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 27, want: 2187},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 37, want: 2997},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 37, want: 2997},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 41, want: 3321},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 41, want: 3321},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 45, want: 3645},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 45, want: 3645},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 73, want: 5913},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 73, want: 5913},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 81, want: 6561},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 81, want: 6561}}
+
+type test_int32 struct {
+ fn func(int32) int32
+ fnname string
+ in int32
+ want int32
+}
+
+var tests_int32 = []test_int32{
+
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: -2147483648, want: 0},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: -2147483648, want: 0},
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: -2147483647, want: 1},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: -2147483647, want: 1},
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: -1, want: 2147483647},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: -1, want: 2147483647},
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: 0, want: -2147483648},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: 0, want: -2147483648},
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: 1, want: -2147483647},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: 1, want: -2147483647},
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: 2147483647, want: -1},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: 2147483647, want: -1},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: -2147483648, want: 1},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: -2147483648, want: 1},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: -2147483647, want: 2},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: -2147483647, want: 2},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: -1, want: -2147483648},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: -1, want: -2147483648},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: 0, want: -2147483647},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: 0, want: -2147483647},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: 1, want: -2147483646},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: 1, want: -2147483646},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: 2147483647, want: 0},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: 2147483647, want: 0},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: -2147483648, want: 2147483647},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: -2147483648, want: 2147483647},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: -2147483647, want: -2147483648},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: -2147483647, want: -2147483648},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: -1, want: -2},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: -1, want: -2},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: 0, want: -1},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: 0, want: -1},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: 1, want: 0},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: 1, want: 0},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: 2147483647, want: 2147483646},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: 2147483647, want: 2147483646},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: -2147483648, want: -2147483648},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: -2147483647, want: -2147483647},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: -1, want: -1},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: -1, want: -1},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: 0, want: 0},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: 0, want: 0},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: 1, want: 1},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: 1, want: 1},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: 2147483647, want: 2147483647},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: -2147483648, want: -2147483647},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: -2147483647, want: -2147483646},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: -2147483647, want: -2147483646},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: -1, want: 0},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: -1, want: 0},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: 0, want: 1},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: 0, want: 1},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: 1, want: 2},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: 1, want: 2},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: 2147483647, want: -2147483648},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: 2147483647, want: -2147483648},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: -2147483648, want: -1},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: -2147483648, want: -1},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: -2147483647, want: 0},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: -2147483647, want: 0},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: -1, want: 2147483646},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: -1, want: 2147483646},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: 0, want: 2147483647},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: 0, want: 2147483647},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: 1, want: -2147483648},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: 1, want: -2147483648},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: 2147483647, want: -2},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: 2147483647, want: -2},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: -2147483648, want: 0},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: -2147483648, want: 0},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: -2147483647, want: -1},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: -2147483647, want: 1},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: -1, want: -2147483647},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: -1, want: 2147483647},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: 0, want: -2147483648},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: 0, want: -2147483648},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: 1, want: 2147483647},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: 1, want: -2147483647},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: 2147483647, want: 1},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: 2147483647, want: -1},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: -2147483648, want: 1},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: -2147483648, want: -1},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: -2147483647, want: 0},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: -2147483647, want: 0},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: -1, want: -2147483646},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: -1, want: 2147483646},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: 0, want: -2147483647},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: 0, want: 2147483647},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: 1, want: -2147483648},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: 1, want: -2147483648},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: 2147483647, want: 2},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: 2147483647, want: -2},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: -2147483648, want: 2147483647},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: -2147483648, want: -2147483647},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: -2147483647, want: 2147483646},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: -2147483647, want: -2147483646},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: -1, want: 0},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: -1, want: 0},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: 0, want: -1},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: 0, want: 1},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: 1, want: -2},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: 1, want: 2},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: 2147483647, want: -2147483648},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: 2147483647, want: -2147483648},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: -2147483648, want: -2147483648},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: -2147483647, want: 2147483647},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: -2147483647, want: -2147483647},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: -1, want: 1},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: -1, want: -1},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: 0, want: 0},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: 0, want: 0},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: 1, want: -1},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: 1, want: 1},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: 2147483647, want: -2147483647},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: 2147483647, want: 2147483647},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: -2147483648, want: 2147483647},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: -2147483647, want: -2147483648},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: -2147483647, want: -2147483648},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: -1, want: 2},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: -1, want: -2},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: 0, want: 1},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: 0, want: -1},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: 1, want: 0},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: 1, want: 0},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: 2147483647, want: -2147483646},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: 2147483647, want: 2147483646},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: -2147483648, want: -1},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: -2147483648, want: 1},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: -2147483647, want: -2},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: -2147483647, want: 2},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: -1, want: -2147483648},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: -1, want: -2147483648},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: 0, want: 2147483647},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: 0, want: -2147483647},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: 1, want: 2147483646},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: 1, want: -2147483646},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: 2147483647, want: 0},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: 2147483647, want: 0},
+ test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: -2147483648, want: 1},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: -2147483648, want: 1},
+ test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: -2147483647, want: 1},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: -2147483647, want: 0},
+ test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: -1, want: -2147483648},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: -1, want: 0},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: 0, want: 0},
+ test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: 1, want: -2147483648},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: 1, want: 0},
+ test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: 2147483647, want: -1},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: 2147483647, want: 0},
+ test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: -2147483648, want: 0},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: -2147483648, want: 1},
+ test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: -2147483647, want: 1},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: -2147483647, want: 1},
+ test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: -1, want: 2147483647},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: -1, want: 0},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: 0, want: 0},
+ test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: 1, want: -2147483647},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: 1, want: 0},
+ test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: 2147483647, want: -1},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: 2147483647, want: -1},
+ test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: -2147483648, want: 0},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: -2147483648, want: -2147483648},
+ test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: -2147483647, want: 0},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: -2147483647, want: 2147483647},
+ test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: -1, want: 1},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: -1, want: 1},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: 0, want: 0},
+ test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: 1, want: -1},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: 1, want: -1},
+ test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: 2147483647, want: 0},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: 2147483647, want: -2147483647},
+ test_int32{fn: div_0_int32, fnname: "div_0_int32", in: -2147483648, want: 0},
+ test_int32{fn: div_0_int32, fnname: "div_0_int32", in: -2147483647, want: 0},
+ test_int32{fn: div_0_int32, fnname: "div_0_int32", in: -1, want: 0},
+ test_int32{fn: div_0_int32, fnname: "div_0_int32", in: 1, want: 0},
+ test_int32{fn: div_0_int32, fnname: "div_0_int32", in: 2147483647, want: 0},
+ test_int32{fn: div_1_int32, fnname: "div_1_int32", in: -2147483648, want: 0},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: -2147483648, want: -2147483648},
+ test_int32{fn: div_1_int32, fnname: "div_1_int32", in: -2147483647, want: 0},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: -2147483647, want: -2147483647},
+ test_int32{fn: div_1_int32, fnname: "div_1_int32", in: -1, want: -1},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: -1, want: -1},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: 0, want: 0},
+ test_int32{fn: div_1_int32, fnname: "div_1_int32", in: 1, want: 1},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: 1, want: 1},
+ test_int32{fn: div_1_int32, fnname: "div_1_int32", in: 2147483647, want: 0},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: 2147483647, want: 2147483647},
+ test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: -2147483648, want: 0},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: -2147483648, want: -1},
+ test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: -2147483647, want: -1},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: -2147483647, want: -1},
+ test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: -1, want: -2147483647},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: -1, want: 0},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: 0, want: 0},
+ test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: 1, want: 2147483647},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: 1, want: 0},
+ test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: 2147483647, want: 1},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: 2147483647, want: 1},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: -2147483648, want: 0},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: -2147483648, want: 0},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: -2147483647, want: -2147483648},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: -2147483647, want: -2147483648},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: -1, want: -2147483648},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: -1, want: -2147483648},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: 0, want: 0},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: 1, want: -2147483648},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: 1, want: -2147483648},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: 2147483647, want: -2147483648},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: 2147483647, want: -2147483648},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: -2147483647, want: 1},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: -2147483647, want: 1},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: -1, want: 2147483647},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: -1, want: 2147483647},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: 0, want: 0},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: 1, want: -2147483647},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: 1, want: -2147483647},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: 2147483647, want: -1},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: 2147483647, want: -1},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: -2147483647, want: 2147483647},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: -2147483647, want: 2147483647},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: -1, want: 1},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: -1, want: 1},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: 0, want: 0},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: 1, want: -1},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: 1, want: -1},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: 2147483647, want: -2147483647},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: 2147483647, want: -2147483647},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: -2147483648, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: -2147483648, want: 0},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: -2147483647, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: -2147483647, want: 0},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: -1, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: -1, want: 0},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: 0, want: 0},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: 1, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: 1, want: 0},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: 2147483647, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: 2147483647, want: 0},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: -2147483647, want: -2147483647},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: -1, want: -1},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: -1, want: -1},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: 0, want: 0},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: 1, want: 1},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: 1, want: 1},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: 2147483647, want: 2147483647},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: -2147483647, want: -1},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: -2147483647, want: -1},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: -1, want: -2147483647},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: -1, want: -2147483647},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: 0, want: 0},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: 1, want: 2147483647},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: 1, want: 2147483647},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: 2147483647, want: 1},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: 2147483647, want: 1},
+ test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: -2147483648, want: 0},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: -2147483648, want: 0},
+ test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: -2147483647, want: -1},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: -2147483647, want: -2147483647},
+ test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: -1, want: 0},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: -1, want: -1},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: 0, want: 0},
+ test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: 1, want: 0},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: 1, want: 1},
+ test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: 2147483647, want: -1},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: 2147483647, want: 2147483647},
+ test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: -2147483648, want: -1},
+ test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: -2147483647, want: 0},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: -2147483647, want: 0},
+ test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: -1, want: 0},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: -1, want: -1},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: 0, want: 0},
+ test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: 1, want: 0},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: 1, want: 1},
+ test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: 2147483647, want: 0},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: 2147483647, want: 0},
+ test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: -2147483648, want: -1},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: -2147483648, want: 0},
+ test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: -2147483647, want: -1},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: -2147483647, want: 0},
+ test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: -1, want: 0},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: -1, want: 0},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: 0, want: 0},
+ test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: 1, want: 0},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: 1, want: 0},
+ test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: 2147483647, want: -1},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: 2147483647, want: 0},
+ test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: -2147483648, want: 0},
+ test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: -2147483647, want: 0},
+ test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: -1, want: 0},
+ test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: 1, want: 0},
+ test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: 2147483647, want: 0},
+ test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: -2147483648, want: 1},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: -2147483648, want: 0},
+ test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: -2147483647, want: 1},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: -2147483647, want: 0},
+ test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: -1, want: 0},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: -1, want: 0},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: 0, want: 0},
+ test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: 1, want: 0},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: 1, want: 0},
+ test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: 2147483647, want: 1},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: 2147483647, want: 0},
+ test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: -2147483648, want: 2147483647},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: -2147483648, want: -1},
+ test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: -2147483647, want: 0},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: -2147483647, want: 0},
+ test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: -1, want: 0},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: -1, want: -1},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: 0, want: 0},
+ test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: 1, want: 0},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: 1, want: 1},
+ test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: 2147483647, want: 0},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: 2147483647, want: 0},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: -2147483647, want: -2147483648},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: -2147483647, want: -2147483648},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: -1, want: -2147483648},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: -1, want: -2147483648},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: 0, want: 0},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: 1, want: 0},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: 1, want: 0},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: 2147483647, want: 0},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: 2147483647, want: 0},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: -2147483647, want: -2147483647},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: -1, want: -2147483647},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: -1, want: -2147483647},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: 0, want: 0},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: 1, want: 1},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: 1, want: 1},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: 2147483647, want: 1},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: 2147483647, want: 1},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: -2147483647, want: -2147483647},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: -1, want: -1},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: -1, want: -1},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: 0, want: 0},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: 1, want: 1},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: 1, want: 1},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: 2147483647, want: 2147483647},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: -2147483648, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: -2147483648, want: 0},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: -2147483647, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: -2147483647, want: 0},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: -1, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: -1, want: 0},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: 0, want: 0},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: 1, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: 1, want: 0},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: 2147483647, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: 2147483647, want: 0},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: -2147483648, want: 0},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: -2147483648, want: 0},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: -2147483647, want: 1},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: -2147483647, want: 1},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: -1, want: 1},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: -1, want: 1},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: 0, want: 0},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: 1, want: 1},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: 1, want: 1},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: 2147483647, want: 1},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: 2147483647, want: 1},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: -2147483648, want: 0},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: -2147483648, want: 0},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: -2147483647, want: 1},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: -2147483647, want: 1},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: -1, want: 2147483647},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: -1, want: 2147483647},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: 0, want: 0},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: 1, want: 1},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: 1, want: 1},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: -2147483648, want: -2147483648},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: -1, want: -1},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: 0, want: -2147483648},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: 0, want: -2147483648},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: 1, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: 1, want: -2147483647},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: 2147483647, want: -1},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: 2147483647, want: -1},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: -2147483648, want: -2147483647},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: -1, want: -1},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: 0, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: 0, want: -2147483647},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: 1, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: 1, want: -2147483647},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: 2147483647, want: -1},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: 2147483647, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: -2147483648, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: -2147483648, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: -2147483647, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: -2147483647, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: -1, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: 0, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: 0, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: 1, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: 1, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: 2147483647, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: 2147483647, want: -1},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: -2147483648, want: -2147483648},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: -1, want: -1},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: 0, want: 0},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: 0, want: 0},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: 1, want: 1},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: 1, want: 1},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: -2147483648, want: -2147483647},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: -1, want: -1},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: 0, want: 1},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: 0, want: 1},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: 1, want: 1},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: 1, want: 1},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: -2147483648, want: -1},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: -2147483648, want: -1},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: -2147483647, want: -1},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: -2147483647, want: -1},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: -1, want: -1},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: 0, want: 2147483647},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: 0, want: 2147483647},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: 1, want: 2147483647},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: 1, want: 2147483647},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: 2147483647, want: 2147483647},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: -2147483648, want: 0},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: -2147483648, want: 0},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: -2147483647, want: 1},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: -2147483647, want: 1},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: -1, want: 2147483647},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: -1, want: 2147483647},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: 0, want: -2147483648},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: 0, want: -2147483648},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: 1, want: -2147483647},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: 1, want: -2147483647},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: 2147483647, want: -1},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: 2147483647, want: -1},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: -2147483648, want: 1},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: -2147483648, want: 1},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: -2147483647, want: 0},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: -2147483647, want: 0},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: -1, want: 2147483646},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: -1, want: 2147483646},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: 0, want: -2147483647},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: 0, want: -2147483647},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: 1, want: -2147483648},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: 1, want: -2147483648},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: 2147483647, want: -2},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: 2147483647, want: -2},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: -2147483648, want: 2147483647},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: -2147483648, want: 2147483647},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: -2147483647, want: 2147483646},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: -2147483647, want: 2147483646},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: -1, want: 0},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: -1, want: 0},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: 0, want: -1},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: 0, want: -1},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: 1, want: -2},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: 1, want: -2},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: 2147483647, want: -2147483648},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: 2147483647, want: -2147483648},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: -2147483648, want: -2147483648},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: -2147483647, want: -2147483647},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: -1, want: -1},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: -1, want: -1},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: 0, want: 0},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: 0, want: 0},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: 1, want: 1},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: 1, want: 1},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: 2147483647, want: 2147483647},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: -2147483648, want: -2147483647},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: -2147483647, want: -2147483648},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: -2147483647, want: -2147483648},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: -1, want: -2},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: -1, want: -2},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: 0, want: 1},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: 0, want: 1},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: 1, want: 0},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: 1, want: 0},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: 2147483647, want: 2147483646},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: 2147483647, want: 2147483646},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: -2147483648, want: -1},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: -2147483648, want: -1},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: -2147483647, want: -2},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: -2147483647, want: -2},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: -1, want: -2147483648},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: -1, want: -2147483648},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: 0, want: 2147483647},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: 0, want: 2147483647},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: 1, want: 2147483646},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: 1, want: 2147483646},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: 2147483647, want: 0},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: 2147483647, want: 0}}
+
+type test_int32mul struct {
+ fn func(int32) int32
+ fnname string
+ in int32
+ want int32
+}
+
+var tests_int32mul = []test_int32{
+
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: -9, want: 81},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: -9, want: 81},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: -5, want: 45},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: -5, want: 45},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: -3, want: 27},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: -3, want: 27},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 3, want: -27},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 3, want: -27},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 5, want: -45},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 5, want: -45},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 7, want: -63},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 7, want: -63},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 9, want: -81},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 9, want: -81},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 10, want: -90},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 10, want: -90},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 11, want: -99},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 11, want: -99},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 13, want: -117},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 13, want: -117},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 19, want: -171},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 19, want: -171},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 21, want: -189},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 21, want: -189},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 25, want: -225},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 25, want: -225},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 27, want: -243},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 27, want: -243},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 37, want: -333},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 37, want: -333},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 41, want: -369},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 41, want: -369},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 45, want: -405},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 45, want: -405},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 73, want: -657},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 73, want: -657},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 81, want: -729},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 81, want: -729},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: -9, want: 45},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: -9, want: 45},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: -5, want: 25},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: -5, want: 25},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: -3, want: 15},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: -3, want: 15},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 3, want: -15},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 3, want: -15},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 5, want: -25},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 5, want: -25},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 7, want: -35},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 7, want: -35},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 9, want: -45},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 9, want: -45},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 10, want: -50},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 10, want: -50},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 11, want: -55},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 11, want: -55},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 13, want: -65},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 13, want: -65},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 19, want: -95},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 19, want: -95},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 21, want: -105},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 21, want: -105},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 25, want: -125},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 25, want: -125},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 27, want: -135},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 27, want: -135},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 37, want: -185},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 37, want: -185},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 41, want: -205},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 41, want: -205},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 45, want: -225},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 45, want: -225},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 73, want: -365},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 73, want: -365},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 81, want: -405},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 81, want: -405},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: -9, want: 27},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: -9, want: 27},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: -5, want: 15},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: -5, want: 15},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: -3, want: 9},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: -3, want: 9},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 3, want: -9},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 3, want: -9},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 5, want: -15},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 5, want: -15},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 7, want: -21},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 7, want: -21},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 9, want: -27},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 9, want: -27},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 10, want: -30},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 10, want: -30},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 11, want: -33},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 11, want: -33},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 13, want: -39},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 13, want: -39},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 19, want: -57},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 19, want: -57},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 21, want: -63},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 21, want: -63},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 25, want: -75},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 25, want: -75},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 27, want: -81},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 27, want: -81},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 37, want: -111},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 37, want: -111},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 41, want: -123},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 41, want: -123},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 45, want: -135},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 45, want: -135},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 73, want: -219},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 73, want: -219},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 81, want: -243},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 81, want: -243},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: -9, want: -27},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: -9, want: -27},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: -5, want: -15},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: -5, want: -15},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: -3, want: -9},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: -3, want: -9},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 3, want: 9},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 3, want: 9},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 5, want: 15},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 5, want: 15},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 7, want: 21},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 7, want: 21},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 9, want: 27},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 9, want: 27},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 10, want: 30},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 10, want: 30},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 11, want: 33},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 11, want: 33},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 13, want: 39},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 13, want: 39},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 19, want: 57},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 19, want: 57},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 21, want: 63},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 21, want: 63},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 25, want: 75},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 25, want: 75},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 27, want: 81},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 27, want: 81},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 37, want: 111},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 37, want: 111},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 41, want: 123},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 41, want: 123},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 45, want: 135},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 45, want: 135},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 73, want: 219},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 73, want: 219},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 81, want: 243},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 81, want: 243},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: -9, want: -45},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: -9, want: -45},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: -5, want: -25},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: -5, want: -25},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: -3, want: -15},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: -3, want: -15},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 3, want: 15},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 3, want: 15},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 5, want: 25},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 5, want: 25},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 7, want: 35},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 7, want: 35},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 9, want: 45},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 9, want: 45},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 10, want: 50},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 10, want: 50},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 11, want: 55},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 11, want: 55},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 13, want: 65},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 13, want: 65},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 19, want: 95},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 19, want: 95},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 21, want: 105},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 21, want: 105},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 25, want: 125},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 25, want: 125},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 27, want: 135},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 27, want: 135},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 37, want: 185},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 37, want: 185},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 41, want: 205},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 41, want: 205},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 45, want: 225},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 45, want: 225},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 73, want: 365},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 73, want: 365},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 81, want: 405},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 81, want: 405},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: -9, want: -63},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: -9, want: -63},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: -5, want: -35},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: -5, want: -35},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: -3, want: -21},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: -3, want: -21},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 3, want: 21},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 3, want: 21},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 5, want: 35},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 5, want: 35},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 7, want: 49},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 7, want: 49},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 9, want: 63},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 9, want: 63},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 10, want: 70},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 10, want: 70},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 11, want: 77},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 11, want: 77},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 13, want: 91},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 13, want: 91},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 19, want: 133},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 19, want: 133},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 21, want: 147},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 21, want: 147},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 25, want: 175},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 25, want: 175},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 27, want: 189},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 27, want: 189},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 37, want: 259},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 37, want: 259},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 41, want: 287},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 41, want: 287},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 45, want: 315},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 45, want: 315},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 73, want: 511},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 73, want: 511},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 81, want: 567},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 81, want: 567},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: -9, want: -81},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: -9, want: -81},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: -5, want: -45},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: -5, want: -45},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: -3, want: -27},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: -3, want: -27},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 3, want: 27},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 3, want: 27},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 5, want: 45},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 5, want: 45},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 7, want: 63},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 7, want: 63},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 9, want: 81},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 9, want: 81},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 10, want: 90},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 10, want: 90},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 11, want: 99},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 11, want: 99},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 13, want: 117},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 13, want: 117},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 19, want: 171},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 19, want: 171},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 21, want: 189},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 21, want: 189},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 25, want: 225},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 25, want: 225},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 27, want: 243},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 27, want: 243},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 37, want: 333},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 37, want: 333},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 41, want: 369},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 41, want: 369},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 45, want: 405},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 45, want: 405},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 73, want: 657},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 73, want: 657},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 81, want: 729},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 81, want: 729},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: -9, want: -90},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: -9, want: -90},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: -5, want: -50},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: -5, want: -50},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: -3, want: -30},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: -3, want: -30},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 3, want: 30},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 3, want: 30},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 5, want: 50},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 5, want: 50},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 7, want: 70},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 7, want: 70},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 9, want: 90},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 9, want: 90},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 10, want: 100},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 10, want: 100},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 11, want: 110},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 11, want: 110},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 13, want: 130},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 13, want: 130},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 19, want: 190},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 19, want: 190},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 21, want: 210},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 21, want: 210},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 25, want: 250},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 25, want: 250},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 27, want: 270},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 27, want: 270},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 37, want: 370},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 37, want: 370},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 41, want: 410},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 41, want: 410},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 45, want: 450},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 45, want: 450},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 73, want: 730},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 73, want: 730},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 81, want: 810},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 81, want: 810},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: -9, want: -99},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: -9, want: -99},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: -5, want: -55},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: -5, want: -55},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: -3, want: -33},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: -3, want: -33},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 3, want: 33},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 3, want: 33},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 5, want: 55},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 5, want: 55},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 7, want: 77},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 7, want: 77},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 9, want: 99},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 9, want: 99},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 10, want: 110},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 10, want: 110},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 11, want: 121},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 11, want: 121},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 13, want: 143},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 13, want: 143},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 19, want: 209},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 19, want: 209},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 21, want: 231},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 21, want: 231},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 25, want: 275},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 25, want: 275},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 27, want: 297},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 27, want: 297},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 37, want: 407},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 37, want: 407},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 41, want: 451},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 41, want: 451},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 45, want: 495},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 45, want: 495},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 73, want: 803},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 73, want: 803},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 81, want: 891},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 81, want: 891},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: -9, want: -117},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: -9, want: -117},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: -5, want: -65},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: -5, want: -65},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: -3, want: -39},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: -3, want: -39},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 3, want: 39},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 3, want: 39},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 5, want: 65},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 5, want: 65},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 7, want: 91},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 7, want: 91},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 9, want: 117},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 9, want: 117},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 10, want: 130},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 10, want: 130},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 11, want: 143},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 11, want: 143},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 13, want: 169},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 13, want: 169},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 19, want: 247},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 19, want: 247},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 21, want: 273},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 21, want: 273},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 25, want: 325},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 25, want: 325},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 27, want: 351},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 27, want: 351},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 37, want: 481},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 37, want: 481},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 41, want: 533},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 41, want: 533},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 45, want: 585},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 45, want: 585},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 73, want: 949},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 73, want: 949},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 81, want: 1053},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 81, want: 1053},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: -9, want: -171},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: -9, want: -171},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: -5, want: -95},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: -5, want: -95},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: -3, want: -57},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: -3, want: -57},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 3, want: 57},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 3, want: 57},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 5, want: 95},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 5, want: 95},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 7, want: 133},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 7, want: 133},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 9, want: 171},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 9, want: 171},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 10, want: 190},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 10, want: 190},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 11, want: 209},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 11, want: 209},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 13, want: 247},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 13, want: 247},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 19, want: 361},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 19, want: 361},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 21, want: 399},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 21, want: 399},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 25, want: 475},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 25, want: 475},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 27, want: 513},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 27, want: 513},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 37, want: 703},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 37, want: 703},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 41, want: 779},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 41, want: 779},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 45, want: 855},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 45, want: 855},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 73, want: 1387},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 73, want: 1387},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 81, want: 1539},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 81, want: 1539},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: -9, want: -189},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: -9, want: -189},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: -5, want: -105},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: -5, want: -105},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: -3, want: -63},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: -3, want: -63},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 3, want: 63},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 3, want: 63},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 5, want: 105},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 5, want: 105},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 7, want: 147},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 7, want: 147},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 9, want: 189},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 9, want: 189},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 10, want: 210},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 10, want: 210},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 11, want: 231},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 11, want: 231},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 13, want: 273},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 13, want: 273},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 19, want: 399},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 19, want: 399},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 21, want: 441},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 21, want: 441},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 25, want: 525},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 25, want: 525},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 27, want: 567},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 27, want: 567},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 37, want: 777},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 37, want: 777},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 41, want: 861},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 41, want: 861},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 45, want: 945},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 45, want: 945},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 73, want: 1533},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 73, want: 1533},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 81, want: 1701},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 81, want: 1701},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: -9, want: -225},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: -9, want: -225},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: -5, want: -125},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: -5, want: -125},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: -3, want: -75},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: -3, want: -75},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 3, want: 75},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 3, want: 75},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 5, want: 125},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 5, want: 125},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 7, want: 175},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 7, want: 175},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 9, want: 225},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 9, want: 225},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 10, want: 250},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 10, want: 250},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 11, want: 275},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 11, want: 275},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 13, want: 325},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 13, want: 325},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 19, want: 475},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 19, want: 475},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 21, want: 525},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 21, want: 525},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 25, want: 625},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 25, want: 625},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 27, want: 675},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 27, want: 675},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 37, want: 925},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 37, want: 925},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 41, want: 1025},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 41, want: 1025},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 45, want: 1125},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 45, want: 1125},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 73, want: 1825},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 73, want: 1825},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 81, want: 2025},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 81, want: 2025},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: -9, want: -243},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: -9, want: -243},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: -5, want: -135},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: -5, want: -135},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: -3, want: -81},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: -3, want: -81},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 3, want: 81},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 3, want: 81},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 5, want: 135},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 5, want: 135},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 7, want: 189},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 7, want: 189},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 9, want: 243},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 9, want: 243},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 10, want: 270},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 10, want: 270},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 11, want: 297},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 11, want: 297},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 13, want: 351},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 13, want: 351},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 19, want: 513},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 19, want: 513},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 21, want: 567},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 21, want: 567},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 25, want: 675},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 25, want: 675},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 27, want: 729},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 27, want: 729},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 37, want: 999},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 37, want: 999},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 41, want: 1107},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 41, want: 1107},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 45, want: 1215},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 45, want: 1215},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 73, want: 1971},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 73, want: 1971},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 81, want: 2187},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 81, want: 2187},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: -9, want: -333},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: -9, want: -333},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: -5, want: -185},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: -5, want: -185},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: -3, want: -111},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: -3, want: -111},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 3, want: 111},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 3, want: 111},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 5, want: 185},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 5, want: 185},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 7, want: 259},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 7, want: 259},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 9, want: 333},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 9, want: 333},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 10, want: 370},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 10, want: 370},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 11, want: 407},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 11, want: 407},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 13, want: 481},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 13, want: 481},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 19, want: 703},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 19, want: 703},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 21, want: 777},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 21, want: 777},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 25, want: 925},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 25, want: 925},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 27, want: 999},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 27, want: 999},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 37, want: 1369},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 37, want: 1369},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 41, want: 1517},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 41, want: 1517},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 45, want: 1665},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 45, want: 1665},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 73, want: 2701},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 73, want: 2701},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 81, want: 2997},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 81, want: 2997},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: -9, want: -369},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: -9, want: -369},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: -5, want: -205},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: -5, want: -205},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: -3, want: -123},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: -3, want: -123},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 3, want: 123},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 3, want: 123},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 5, want: 205},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 5, want: 205},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 7, want: 287},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 7, want: 287},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 9, want: 369},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 9, want: 369},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 10, want: 410},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 10, want: 410},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 11, want: 451},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 11, want: 451},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 13, want: 533},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 13, want: 533},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 19, want: 779},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 19, want: 779},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 21, want: 861},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 21, want: 861},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 25, want: 1025},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 25, want: 1025},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 27, want: 1107},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 27, want: 1107},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 37, want: 1517},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 37, want: 1517},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 41, want: 1681},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 41, want: 1681},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 45, want: 1845},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 45, want: 1845},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 73, want: 2993},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 73, want: 2993},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 81, want: 3321},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 81, want: 3321},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: -9, want: -405},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: -9, want: -405},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: -5, want: -225},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: -5, want: -225},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: -3, want: -135},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: -3, want: -135},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 3, want: 135},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 3, want: 135},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 5, want: 225},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 5, want: 225},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 7, want: 315},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 7, want: 315},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 9, want: 405},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 9, want: 405},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 10, want: 450},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 10, want: 450},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 11, want: 495},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 11, want: 495},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 13, want: 585},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 13, want: 585},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 19, want: 855},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 19, want: 855},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 21, want: 945},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 21, want: 945},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 25, want: 1125},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 25, want: 1125},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 27, want: 1215},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 27, want: 1215},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 37, want: 1665},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 37, want: 1665},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 41, want: 1845},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 41, want: 1845},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 45, want: 2025},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 45, want: 2025},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 73, want: 3285},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 73, want: 3285},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 81, want: 3645},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 81, want: 3645},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: -9, want: -657},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: -9, want: -657},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: -5, want: -365},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: -5, want: -365},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: -3, want: -219},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: -3, want: -219},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 3, want: 219},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 3, want: 219},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 5, want: 365},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 5, want: 365},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 7, want: 511},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 7, want: 511},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 9, want: 657},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 9, want: 657},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 10, want: 730},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 10, want: 730},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 11, want: 803},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 11, want: 803},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 13, want: 949},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 13, want: 949},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 19, want: 1387},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 19, want: 1387},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 21, want: 1533},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 21, want: 1533},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 25, want: 1825},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 25, want: 1825},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 27, want: 1971},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 27, want: 1971},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 37, want: 2701},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 37, want: 2701},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 41, want: 2993},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 41, want: 2993},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 45, want: 3285},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 45, want: 3285},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 73, want: 5329},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 73, want: 5329},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 81, want: 5913},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 81, want: 5913},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: -9, want: -729},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: -9, want: -729},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: -5, want: -405},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: -5, want: -405},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: -3, want: -243},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: -3, want: -243},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 3, want: 243},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 3, want: 243},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 5, want: 405},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 5, want: 405},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 7, want: 567},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 7, want: 567},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 9, want: 729},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 9, want: 729},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 10, want: 810},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 10, want: 810},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 11, want: 891},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 11, want: 891},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 13, want: 1053},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 13, want: 1053},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 19, want: 1539},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 19, want: 1539},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 21, want: 1701},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 21, want: 1701},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 25, want: 2025},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 25, want: 2025},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 27, want: 2187},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 27, want: 2187},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 37, want: 2997},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 37, want: 2997},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 41, want: 3321},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 41, want: 3321},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 45, want: 3645},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 45, want: 3645},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 73, want: 5913},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 73, want: 5913},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 81, want: 6561},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 81, want: 6561}}
+
+type test_uint16 struct {
+ fn func(uint16) uint16
+ fnname string
+ in uint16
+ want uint16
+}
+
+var tests_uint16 = []test_uint16{
+
+ test_uint16{fn: add_0_uint16, fnname: "add_0_uint16", in: 0, want: 0},
+ test_uint16{fn: add_uint16_0, fnname: "add_uint16_0", in: 0, want: 0},
+ test_uint16{fn: add_0_uint16, fnname: "add_0_uint16", in: 1, want: 1},
+ test_uint16{fn: add_uint16_0, fnname: "add_uint16_0", in: 1, want: 1},
+ test_uint16{fn: add_0_uint16, fnname: "add_0_uint16", in: 65535, want: 65535},
+ test_uint16{fn: add_uint16_0, fnname: "add_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: add_1_uint16, fnname: "add_1_uint16", in: 0, want: 1},
+ test_uint16{fn: add_uint16_1, fnname: "add_uint16_1", in: 0, want: 1},
+ test_uint16{fn: add_1_uint16, fnname: "add_1_uint16", in: 1, want: 2},
+ test_uint16{fn: add_uint16_1, fnname: "add_uint16_1", in: 1, want: 2},
+ test_uint16{fn: add_1_uint16, fnname: "add_1_uint16", in: 65535, want: 0},
+ test_uint16{fn: add_uint16_1, fnname: "add_uint16_1", in: 65535, want: 0},
+ test_uint16{fn: add_65535_uint16, fnname: "add_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: add_uint16_65535, fnname: "add_uint16_65535", in: 0, want: 65535},
+ test_uint16{fn: add_65535_uint16, fnname: "add_65535_uint16", in: 1, want: 0},
+ test_uint16{fn: add_uint16_65535, fnname: "add_uint16_65535", in: 1, want: 0},
+ test_uint16{fn: add_65535_uint16, fnname: "add_65535_uint16", in: 65535, want: 65534},
+ test_uint16{fn: add_uint16_65535, fnname: "add_uint16_65535", in: 65535, want: 65534},
+ test_uint16{fn: sub_0_uint16, fnname: "sub_0_uint16", in: 0, want: 0},
+ test_uint16{fn: sub_uint16_0, fnname: "sub_uint16_0", in: 0, want: 0},
+ test_uint16{fn: sub_0_uint16, fnname: "sub_0_uint16", in: 1, want: 65535},
+ test_uint16{fn: sub_uint16_0, fnname: "sub_uint16_0", in: 1, want: 1},
+ test_uint16{fn: sub_0_uint16, fnname: "sub_0_uint16", in: 65535, want: 1},
+ test_uint16{fn: sub_uint16_0, fnname: "sub_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: sub_1_uint16, fnname: "sub_1_uint16", in: 0, want: 1},
+ test_uint16{fn: sub_uint16_1, fnname: "sub_uint16_1", in: 0, want: 65535},
+ test_uint16{fn: sub_1_uint16, fnname: "sub_1_uint16", in: 1, want: 0},
+ test_uint16{fn: sub_uint16_1, fnname: "sub_uint16_1", in: 1, want: 0},
+ test_uint16{fn: sub_1_uint16, fnname: "sub_1_uint16", in: 65535, want: 2},
+ test_uint16{fn: sub_uint16_1, fnname: "sub_uint16_1", in: 65535, want: 65534},
+ test_uint16{fn: sub_65535_uint16, fnname: "sub_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: sub_uint16_65535, fnname: "sub_uint16_65535", in: 0, want: 1},
+ test_uint16{fn: sub_65535_uint16, fnname: "sub_65535_uint16", in: 1, want: 65534},
+ test_uint16{fn: sub_uint16_65535, fnname: "sub_uint16_65535", in: 1, want: 2},
+ test_uint16{fn: sub_65535_uint16, fnname: "sub_65535_uint16", in: 65535, want: 0},
+ test_uint16{fn: sub_uint16_65535, fnname: "sub_uint16_65535", in: 65535, want: 0},
+ test_uint16{fn: div_0_uint16, fnname: "div_0_uint16", in: 1, want: 0},
+ test_uint16{fn: div_0_uint16, fnname: "div_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: div_uint16_1, fnname: "div_uint16_1", in: 0, want: 0},
+ test_uint16{fn: div_1_uint16, fnname: "div_1_uint16", in: 1, want: 1},
+ test_uint16{fn: div_uint16_1, fnname: "div_uint16_1", in: 1, want: 1},
+ test_uint16{fn: div_1_uint16, fnname: "div_1_uint16", in: 65535, want: 0},
+ test_uint16{fn: div_uint16_1, fnname: "div_uint16_1", in: 65535, want: 65535},
+ test_uint16{fn: div_uint16_65535, fnname: "div_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: div_65535_uint16, fnname: "div_65535_uint16", in: 1, want: 65535},
+ test_uint16{fn: div_uint16_65535, fnname: "div_uint16_65535", in: 1, want: 0},
+ test_uint16{fn: div_65535_uint16, fnname: "div_65535_uint16", in: 65535, want: 1},
+ test_uint16{fn: div_uint16_65535, fnname: "div_uint16_65535", in: 65535, want: 1},
+ test_uint16{fn: mul_0_uint16, fnname: "mul_0_uint16", in: 0, want: 0},
+ test_uint16{fn: mul_uint16_0, fnname: "mul_uint16_0", in: 0, want: 0},
+ test_uint16{fn: mul_0_uint16, fnname: "mul_0_uint16", in: 1, want: 0},
+ test_uint16{fn: mul_uint16_0, fnname: "mul_uint16_0", in: 1, want: 0},
+ test_uint16{fn: mul_0_uint16, fnname: "mul_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: mul_uint16_0, fnname: "mul_uint16_0", in: 65535, want: 0},
+ test_uint16{fn: mul_1_uint16, fnname: "mul_1_uint16", in: 0, want: 0},
+ test_uint16{fn: mul_uint16_1, fnname: "mul_uint16_1", in: 0, want: 0},
+ test_uint16{fn: mul_1_uint16, fnname: "mul_1_uint16", in: 1, want: 1},
+ test_uint16{fn: mul_uint16_1, fnname: "mul_uint16_1", in: 1, want: 1},
+ test_uint16{fn: mul_1_uint16, fnname: "mul_1_uint16", in: 65535, want: 65535},
+ test_uint16{fn: mul_uint16_1, fnname: "mul_uint16_1", in: 65535, want: 65535},
+ test_uint16{fn: mul_65535_uint16, fnname: "mul_65535_uint16", in: 0, want: 0},
+ test_uint16{fn: mul_uint16_65535, fnname: "mul_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: mul_65535_uint16, fnname: "mul_65535_uint16", in: 1, want: 65535},
+ test_uint16{fn: mul_uint16_65535, fnname: "mul_uint16_65535", in: 1, want: 65535},
+ test_uint16{fn: mul_65535_uint16, fnname: "mul_65535_uint16", in: 65535, want: 1},
+ test_uint16{fn: mul_uint16_65535, fnname: "mul_uint16_65535", in: 65535, want: 1},
+ test_uint16{fn: lsh_0_uint16, fnname: "lsh_0_uint16", in: 0, want: 0},
+ test_uint16{fn: lsh_uint16_0, fnname: "lsh_uint16_0", in: 0, want: 0},
+ test_uint16{fn: lsh_0_uint16, fnname: "lsh_0_uint16", in: 1, want: 0},
+ test_uint16{fn: lsh_uint16_0, fnname: "lsh_uint16_0", in: 1, want: 1},
+ test_uint16{fn: lsh_0_uint16, fnname: "lsh_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: lsh_uint16_0, fnname: "lsh_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: lsh_1_uint16, fnname: "lsh_1_uint16", in: 0, want: 1},
+ test_uint16{fn: lsh_uint16_1, fnname: "lsh_uint16_1", in: 0, want: 0},
+ test_uint16{fn: lsh_1_uint16, fnname: "lsh_1_uint16", in: 1, want: 2},
+ test_uint16{fn: lsh_uint16_1, fnname: "lsh_uint16_1", in: 1, want: 2},
+ test_uint16{fn: lsh_1_uint16, fnname: "lsh_1_uint16", in: 65535, want: 0},
+ test_uint16{fn: lsh_uint16_1, fnname: "lsh_uint16_1", in: 65535, want: 65534},
+ test_uint16{fn: lsh_65535_uint16, fnname: "lsh_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: lsh_uint16_65535, fnname: "lsh_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: lsh_65535_uint16, fnname: "lsh_65535_uint16", in: 1, want: 65534},
+ test_uint16{fn: lsh_uint16_65535, fnname: "lsh_uint16_65535", in: 1, want: 0},
+ test_uint16{fn: lsh_65535_uint16, fnname: "lsh_65535_uint16", in: 65535, want: 0},
+ test_uint16{fn: lsh_uint16_65535, fnname: "lsh_uint16_65535", in: 65535, want: 0},
+ test_uint16{fn: rsh_0_uint16, fnname: "rsh_0_uint16", in: 0, want: 0},
+ test_uint16{fn: rsh_uint16_0, fnname: "rsh_uint16_0", in: 0, want: 0},
+ test_uint16{fn: rsh_0_uint16, fnname: "rsh_0_uint16", in: 1, want: 0},
+ test_uint16{fn: rsh_uint16_0, fnname: "rsh_uint16_0", in: 1, want: 1},
+ test_uint16{fn: rsh_0_uint16, fnname: "rsh_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: rsh_uint16_0, fnname: "rsh_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: rsh_1_uint16, fnname: "rsh_1_uint16", in: 0, want: 1},
+ test_uint16{fn: rsh_uint16_1, fnname: "rsh_uint16_1", in: 0, want: 0},
+ test_uint16{fn: rsh_1_uint16, fnname: "rsh_1_uint16", in: 1, want: 0},
+ test_uint16{fn: rsh_uint16_1, fnname: "rsh_uint16_1", in: 1, want: 0},
+ test_uint16{fn: rsh_1_uint16, fnname: "rsh_1_uint16", in: 65535, want: 0},
+ test_uint16{fn: rsh_uint16_1, fnname: "rsh_uint16_1", in: 65535, want: 32767},
+ test_uint16{fn: rsh_65535_uint16, fnname: "rsh_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: rsh_uint16_65535, fnname: "rsh_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: rsh_65535_uint16, fnname: "rsh_65535_uint16", in: 1, want: 32767},
+ test_uint16{fn: rsh_uint16_65535, fnname: "rsh_uint16_65535", in: 1, want: 0},
+ test_uint16{fn: rsh_65535_uint16, fnname: "rsh_65535_uint16", in: 65535, want: 0},
+ test_uint16{fn: rsh_uint16_65535, fnname: "rsh_uint16_65535", in: 65535, want: 0},
+ test_uint16{fn: mod_0_uint16, fnname: "mod_0_uint16", in: 1, want: 0},
+ test_uint16{fn: mod_0_uint16, fnname: "mod_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: mod_uint16_1, fnname: "mod_uint16_1", in: 0, want: 0},
+ test_uint16{fn: mod_1_uint16, fnname: "mod_1_uint16", in: 1, want: 0},
+ test_uint16{fn: mod_uint16_1, fnname: "mod_uint16_1", in: 1, want: 0},
+ test_uint16{fn: mod_1_uint16, fnname: "mod_1_uint16", in: 65535, want: 1},
+ test_uint16{fn: mod_uint16_1, fnname: "mod_uint16_1", in: 65535, want: 0},
+ test_uint16{fn: mod_uint16_65535, fnname: "mod_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: mod_65535_uint16, fnname: "mod_65535_uint16", in: 1, want: 0},
+ test_uint16{fn: mod_uint16_65535, fnname: "mod_uint16_65535", in: 1, want: 1},
+ test_uint16{fn: mod_65535_uint16, fnname: "mod_65535_uint16", in: 65535, want: 0},
+ test_uint16{fn: mod_uint16_65535, fnname: "mod_uint16_65535", in: 65535, want: 0},
+ test_uint16{fn: and_0_uint16, fnname: "and_0_uint16", in: 0, want: 0},
+ test_uint16{fn: and_uint16_0, fnname: "and_uint16_0", in: 0, want: 0},
+ test_uint16{fn: and_0_uint16, fnname: "and_0_uint16", in: 1, want: 0},
+ test_uint16{fn: and_uint16_0, fnname: "and_uint16_0", in: 1, want: 0},
+ test_uint16{fn: and_0_uint16, fnname: "and_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: and_uint16_0, fnname: "and_uint16_0", in: 65535, want: 0},
+ test_uint16{fn: and_1_uint16, fnname: "and_1_uint16", in: 0, want: 0},
+ test_uint16{fn: and_uint16_1, fnname: "and_uint16_1", in: 0, want: 0},
+ test_uint16{fn: and_1_uint16, fnname: "and_1_uint16", in: 1, want: 1},
+ test_uint16{fn: and_uint16_1, fnname: "and_uint16_1", in: 1, want: 1},
+ test_uint16{fn: and_1_uint16, fnname: "and_1_uint16", in: 65535, want: 1},
+ test_uint16{fn: and_uint16_1, fnname: "and_uint16_1", in: 65535, want: 1},
+ test_uint16{fn: and_65535_uint16, fnname: "and_65535_uint16", in: 0, want: 0},
+ test_uint16{fn: and_uint16_65535, fnname: "and_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: and_65535_uint16, fnname: "and_65535_uint16", in: 1, want: 1},
+ test_uint16{fn: and_uint16_65535, fnname: "and_uint16_65535", in: 1, want: 1},
+ test_uint16{fn: and_65535_uint16, fnname: "and_65535_uint16", in: 65535, want: 65535},
+ test_uint16{fn: and_uint16_65535, fnname: "and_uint16_65535", in: 65535, want: 65535},
+ test_uint16{fn: or_0_uint16, fnname: "or_0_uint16", in: 0, want: 0},
+ test_uint16{fn: or_uint16_0, fnname: "or_uint16_0", in: 0, want: 0},
+ test_uint16{fn: or_0_uint16, fnname: "or_0_uint16", in: 1, want: 1},
+ test_uint16{fn: or_uint16_0, fnname: "or_uint16_0", in: 1, want: 1},
+ test_uint16{fn: or_0_uint16, fnname: "or_0_uint16", in: 65535, want: 65535},
+ test_uint16{fn: or_uint16_0, fnname: "or_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: or_1_uint16, fnname: "or_1_uint16", in: 0, want: 1},
+ test_uint16{fn: or_uint16_1, fnname: "or_uint16_1", in: 0, want: 1},
+ test_uint16{fn: or_1_uint16, fnname: "or_1_uint16", in: 1, want: 1},
+ test_uint16{fn: or_uint16_1, fnname: "or_uint16_1", in: 1, want: 1},
+ test_uint16{fn: or_1_uint16, fnname: "or_1_uint16", in: 65535, want: 65535},
+ test_uint16{fn: or_uint16_1, fnname: "or_uint16_1", in: 65535, want: 65535},
+ test_uint16{fn: or_65535_uint16, fnname: "or_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: or_uint16_65535, fnname: "or_uint16_65535", in: 0, want: 65535},
+ test_uint16{fn: or_65535_uint16, fnname: "or_65535_uint16", in: 1, want: 65535},
+ test_uint16{fn: or_uint16_65535, fnname: "or_uint16_65535", in: 1, want: 65535},
+ test_uint16{fn: or_65535_uint16, fnname: "or_65535_uint16", in: 65535, want: 65535},
+ test_uint16{fn: or_uint16_65535, fnname: "or_uint16_65535", in: 65535, want: 65535},
+ test_uint16{fn: xor_0_uint16, fnname: "xor_0_uint16", in: 0, want: 0},
+ test_uint16{fn: xor_uint16_0, fnname: "xor_uint16_0", in: 0, want: 0},
+ test_uint16{fn: xor_0_uint16, fnname: "xor_0_uint16", in: 1, want: 1},
+ test_uint16{fn: xor_uint16_0, fnname: "xor_uint16_0", in: 1, want: 1},
+ test_uint16{fn: xor_0_uint16, fnname: "xor_0_uint16", in: 65535, want: 65535},
+ test_uint16{fn: xor_uint16_0, fnname: "xor_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: xor_1_uint16, fnname: "xor_1_uint16", in: 0, want: 1},
+ test_uint16{fn: xor_uint16_1, fnname: "xor_uint16_1", in: 0, want: 1},
+ test_uint16{fn: xor_1_uint16, fnname: "xor_1_uint16", in: 1, want: 0},
+ test_uint16{fn: xor_uint16_1, fnname: "xor_uint16_1", in: 1, want: 0},
+ test_uint16{fn: xor_1_uint16, fnname: "xor_1_uint16", in: 65535, want: 65534},
+ test_uint16{fn: xor_uint16_1, fnname: "xor_uint16_1", in: 65535, want: 65534},
+ test_uint16{fn: xor_65535_uint16, fnname: "xor_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: xor_uint16_65535, fnname: "xor_uint16_65535", in: 0, want: 65535},
+ test_uint16{fn: xor_65535_uint16, fnname: "xor_65535_uint16", in: 1, want: 65534},
+ test_uint16{fn: xor_uint16_65535, fnname: "xor_uint16_65535", in: 1, want: 65534},
+ test_uint16{fn: xor_65535_uint16, fnname: "xor_65535_uint16", in: 65535, want: 0},
+ test_uint16{fn: xor_uint16_65535, fnname: "xor_uint16_65535", in: 65535, want: 0}}
+
+type test_int16 struct {
+ fn func(int16) int16
+ fnname string
+ in int16
+ want int16
+}
+
+var tests_int16 = []test_int16{
+
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: -32768, want: 0},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: -32768, want: 0},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: -32767, want: 1},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: -32767, want: 1},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: -1, want: 32767},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: -1, want: 32767},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: 0, want: -32768},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: 0, want: -32768},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: 1, want: -32767},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: 1, want: -32767},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: 32766, want: -2},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: 32766, want: -2},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: 32767, want: -1},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: 32767, want: -1},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: -32768, want: 1},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: -32768, want: 1},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: -32767, want: 2},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: -32767, want: 2},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: -1, want: -32768},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: -1, want: -32768},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: 0, want: -32767},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: 0, want: -32767},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: 1, want: -32766},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: 1, want: -32766},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: 32766, want: -1},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: 32766, want: -1},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: 32767, want: 0},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: 32767, want: 0},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: -32768, want: 32767},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: -32768, want: 32767},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: -32767, want: -32768},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: -32767, want: -32768},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: -1, want: -2},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: -1, want: -2},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: 0, want: -1},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: 0, want: -1},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: 1, want: 0},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: 1, want: 0},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: 32766, want: 32765},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: 32766, want: 32765},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: 32767, want: 32766},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: 32767, want: 32766},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: -32768, want: -32768},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: -32768, want: -32768},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: -32767, want: -32767},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: -32767, want: -32767},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: -1, want: -1},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: -1, want: -1},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: 0, want: 0},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: 0, want: 0},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: 1, want: 1},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: 1, want: 1},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: 32766, want: 32766},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: 32766, want: 32766},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: 32767, want: 32767},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: 32767, want: 32767},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: -32768, want: -32767},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: -32768, want: -32767},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: -32767, want: -32766},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: -32767, want: -32766},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: -1, want: 0},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: -1, want: 0},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: 0, want: 1},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: 0, want: 1},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: 1, want: 2},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: 1, want: 2},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: 32766, want: 32767},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: 32766, want: 32767},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: 32767, want: -32768},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: 32767, want: -32768},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: -32768, want: -2},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: -32768, want: -2},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: -32767, want: -1},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: -32767, want: -1},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: -1, want: 32765},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: -1, want: 32765},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: 0, want: 32766},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: 0, want: 32766},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: 1, want: 32767},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: 1, want: 32767},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: 32766, want: -4},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: 32766, want: -4},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: 32767, want: -3},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: 32767, want: -3},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: -32768, want: -1},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: -32768, want: -1},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: -32767, want: 0},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: -32767, want: 0},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: -1, want: 32766},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: -1, want: 32766},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: 0, want: 32767},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: 0, want: 32767},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: 1, want: -32768},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: 1, want: -32768},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: 32766, want: -3},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: 32766, want: -3},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: 32767, want: -2},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: 32767, want: -2},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: -32768, want: 0},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: -32768, want: 0},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: -32767, want: -1},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: -32767, want: 1},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: -1, want: -32767},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: -1, want: 32767},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: 0, want: -32768},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: 0, want: -32768},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: 1, want: 32767},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: 1, want: -32767},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: 32766, want: 2},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: 32766, want: -2},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: 32767, want: 1},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: 32767, want: -1},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: -32768, want: 1},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: -32768, want: -1},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: -32767, want: 0},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: -32767, want: 0},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: -1, want: -32766},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: -1, want: 32766},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: 0, want: -32767},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: 0, want: 32767},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: 1, want: -32768},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: 1, want: -32768},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: 32766, want: 3},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: 32766, want: -3},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: 32767, want: 2},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: 32767, want: -2},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: -32768, want: 32767},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: -32768, want: -32767},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: -32767, want: 32766},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: -32767, want: -32766},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: -1, want: 0},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: -1, want: 0},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: 0, want: -1},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: 0, want: 1},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: 1, want: -2},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: 1, want: 2},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: 32766, want: -32767},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: 32766, want: 32767},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: 32767, want: -32768},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: 32767, want: -32768},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: -32768, want: -32768},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: -32768, want: -32768},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: -32767, want: 32767},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: -32767, want: -32767},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: -1, want: 1},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: -1, want: -1},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: 0, want: 0},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: 0, want: 0},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: 1, want: -1},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: 1, want: 1},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: 32766, want: -32766},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: 32766, want: 32766},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: 32767, want: -32767},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: 32767, want: 32767},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: -32768, want: -32767},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: -32768, want: 32767},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: -32767, want: -32768},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: -32767, want: -32768},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: -1, want: 2},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: -1, want: -2},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: 0, want: 1},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: 0, want: -1},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: 1, want: 0},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: 1, want: 0},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: 32766, want: -32765},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: 32766, want: 32765},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: 32767, want: -32766},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: 32767, want: 32766},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: -32768, want: -2},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: -32768, want: 2},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: -32767, want: -3},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: -32767, want: 3},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: -1, want: 32767},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: -1, want: -32767},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: 0, want: 32766},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: 0, want: -32766},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: 1, want: 32765},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: 1, want: -32765},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: 32766, want: 0},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: 32766, want: 0},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: 32767, want: -1},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: 32767, want: 1},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: -32768, want: -1},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: -32768, want: 1},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: -32767, want: -2},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: -32767, want: 2},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: -1, want: -32768},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: -1, want: -32768},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: 0, want: 32767},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: 0, want: -32767},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: 1, want: 32766},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: 1, want: -32766},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: 32766, want: 1},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: 32766, want: -1},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: 32767, want: 0},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: 32767, want: 0},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: -32768, want: 1},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: -32768, want: 1},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: -32767, want: 1},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: -32767, want: 0},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: -1, want: -32768},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: -1, want: 0},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: 0, want: 0},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: 1, want: -32768},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: 1, want: 0},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: 32766, want: -1},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: 32766, want: 0},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: 32767, want: -1},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: 32767, want: 0},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: -32768, want: 0},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: -32768, want: 1},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: -32767, want: 1},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: -32767, want: 1},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: -1, want: 32767},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: -1, want: 0},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: 0, want: 0},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: 1, want: -32767},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: 1, want: 0},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: 32766, want: -1},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: 32766, want: 0},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: 32767, want: -1},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: 32767, want: -1},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: -32768, want: 0},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: -32768, want: -32768},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: -32767, want: 0},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: -32767, want: 32767},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: -1, want: 1},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: -1, want: 1},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: 0, want: 0},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: 1, want: -1},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: 1, want: -1},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: 32766, want: 0},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: 32766, want: -32766},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: 32767, want: 0},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: 32767, want: -32767},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: -32768, want: 0},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: -32767, want: 0},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: -1, want: 0},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: 1, want: 0},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: 32766, want: 0},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: 32767, want: 0},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: -32768, want: 0},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: -32768, want: -32768},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: -32767, want: 0},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: -32767, want: -32767},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: -1, want: -1},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: -1, want: -1},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: 0, want: 0},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: 1, want: 1},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: 1, want: 1},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: 32766, want: 0},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: 32766, want: 32766},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: 32767, want: 0},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: 32767, want: 32767},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: -32768, want: 0},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: -32768, want: -1},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: -32767, want: 0},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: -32767, want: -1},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: -1, want: -32766},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: -1, want: 0},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: 0, want: 0},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: 1, want: 32766},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: 1, want: 0},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: 32766, want: 1},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: 32766, want: 1},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: 32767, want: 0},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: 32767, want: 1},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: -32768, want: 0},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: -32768, want: -1},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: -32767, want: -1},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: -32767, want: -1},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: -1, want: -32767},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: -1, want: 0},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: 0, want: 0},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: 1, want: 32767},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: 1, want: 0},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: 32766, want: 1},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: 32766, want: 0},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: 32767, want: 1},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: 32767, want: 1},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: -32768, want: 0},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: -32768, want: 0},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: -32767, want: -32768},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: -32767, want: -32768},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: -1, want: -32768},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: -1, want: -32768},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: 0, want: 0},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: 1, want: -32768},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: 1, want: -32768},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: 32766, want: 0},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: 32766, want: 0},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: 32767, want: -32768},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: 32767, want: -32768},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: -32768, want: -32768},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: -32768, want: -32768},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: -32767, want: 1},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: -32767, want: 1},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: -1, want: 32767},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: -1, want: 32767},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: 0, want: 0},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: 1, want: -32767},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: 1, want: -32767},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: 32766, want: 32766},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: 32766, want: 32766},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: 32767, want: -1},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: 32767, want: -1},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: -32768, want: -32768},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: -32768, want: -32768},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: -32767, want: 32767},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: -32767, want: 32767},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: -1, want: 1},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: -1, want: 1},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: 0, want: 0},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: 1, want: -1},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: 1, want: -1},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: 32766, want: -32766},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: 32766, want: -32766},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: 32767, want: -32767},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: 32767, want: -32767},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: -32768, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: -32768, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: -32767, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: -32767, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: -1, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: -1, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: 0, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: 1, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: 1, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: 32766, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: 32766, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: 32767, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: 32767, want: 0},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: -32768, want: -32768},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: -32768, want: -32768},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: -32767, want: -32767},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: -32767, want: -32767},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: -1, want: -1},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: -1, want: -1},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: 0, want: 0},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: 1, want: 1},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: 1, want: 1},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: 32766, want: 32766},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: 32766, want: 32766},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: 32767, want: 32767},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: 32767, want: 32767},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: -32768, want: 0},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: -32768, want: 0},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: -32767, want: 32766},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: -32767, want: 32766},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: -1, want: -32766},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: -1, want: -32766},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: 0, want: 0},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: 1, want: 32766},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: 1, want: 32766},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: 32766, want: 4},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: 32766, want: 4},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: 32767, want: -32766},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: 32767, want: -32766},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: -32768, want: -32768},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: -32768, want: -32768},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: -32767, want: -1},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: -32767, want: -1},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: -1, want: -32767},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: -1, want: -32767},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: 0, want: 0},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: 1, want: 32767},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: 1, want: 32767},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: 32766, want: -32766},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: 32766, want: -32766},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: 32767, want: 1},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: 32767, want: 1},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: -32768, want: 0},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: -32768, want: 0},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: -32767, want: -1},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: -32767, want: -32767},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: -1, want: -1},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: 0, want: 0},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: 1, want: 1},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: 32766, want: -2},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: 32766, want: 32766},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: 32767, want: -1},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: 32767, want: 32767},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: -32768, want: -32767},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: -32768, want: -1},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: -32767, want: 0},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: -32767, want: 0},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: -1, want: -1},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: 0, want: 0},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: 1, want: 1},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: 32766, want: -1},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: 32766, want: 32766},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: 32767, want: 0},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: 32767, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: -32768, want: -1},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: -32768, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: -32767, want: -1},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: -32767, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: -1, want: 0},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: 0, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: 1, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: 32766, want: -1},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: 32766, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: 32767, want: -1},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: 32767, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: -32768, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: -32767, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: -1, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: 1, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: 32766, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: 32767, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: -32768, want: 1},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: -32768, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: -32767, want: 1},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: -32767, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: -1, want: 0},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: 0, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: 1, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: 32766, want: 1},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: 32766, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: 32767, want: 1},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: 32767, want: 0},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: -32768, want: 32766},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: -32768, want: -2},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: -32767, want: 32766},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: -32767, want: -1},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: -1, want: -1},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: 0, want: 0},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: 1, want: 1},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: 32766, want: 0},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: 32766, want: 0},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: 32767, want: 32766},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: 32767, want: 1},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: -32768, want: 32767},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: -32768, want: -1},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: -32767, want: 0},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: -32767, want: 0},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: -1, want: -1},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: 0, want: 0},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: 1, want: 1},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: 32766, want: 1},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: 32766, want: 32766},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: 32767, want: 0},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: 32767, want: 0},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: -32768, want: -32768},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: -32768, want: -32768},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: -32767, want: -32768},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: -32767, want: -32768},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: -1, want: -32768},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: -1, want: -32768},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: 0, want: 0},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: 1, want: 0},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: 1, want: 0},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: 32766, want: 0},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: 32766, want: 0},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: 32767, want: 0},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: 32767, want: 0},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: -32768, want: -32768},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: -32768, want: -32768},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: -32767, want: -32767},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: -32767, want: -32767},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: -1, want: -32767},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: -1, want: -32767},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: 0, want: 0},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: 1, want: 1},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: 1, want: 1},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: 32766, want: 0},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: 32766, want: 0},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: 32767, want: 1},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: 32767, want: 1},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: -32768, want: -32768},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: -32768, want: -32768},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: -32767, want: -32767},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: -32767, want: -32767},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: -1, want: -1},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: -1, want: -1},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: 0, want: 0},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: 1, want: 1},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: 1, want: 1},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: 32766, want: 32766},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: 32766, want: 32766},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: 32767, want: 32767},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: 32767, want: 32767},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: -32768, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: -32768, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: -32767, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: -32767, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: -1, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: -1, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: 0, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: 1, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: 1, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: 32766, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: 32766, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: 32767, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: 32767, want: 0},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: -32768, want: 0},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: -32768, want: 0},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: -32767, want: 1},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: -32767, want: 1},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: -1, want: 1},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: -1, want: 1},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: 0, want: 0},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: 1, want: 1},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: 1, want: 1},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: 32766, want: 0},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: 32766, want: 0},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: 32767, want: 1},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: 32767, want: 1},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: -32768, want: 0},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: -32768, want: 0},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: -32767, want: 0},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: -32767, want: 0},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: -1, want: 32766},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: -1, want: 32766},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: 0, want: 0},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: 1, want: 0},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: 1, want: 0},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: 32766, want: 32766},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: 32766, want: 32766},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: 32767, want: 32766},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: 32767, want: 32766},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: -32768, want: 0},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: -32768, want: 0},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: -32767, want: 1},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: -32767, want: 1},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: -1, want: 32767},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: -1, want: 32767},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: 0, want: 0},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: 1, want: 1},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: 1, want: 1},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: 32766, want: 32766},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: 32766, want: 32766},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: 32767, want: 32767},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: 32767, want: 32767},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: -32768, want: -32768},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: -32768, want: -32768},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: -32767, want: -32767},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: -32767, want: -32767},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: -1, want: -1},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: 0, want: -32768},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: 0, want: -32768},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: 1, want: -32767},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: 1, want: -32767},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: 32766, want: -2},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: 32766, want: -2},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: 32767, want: -1},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: 32767, want: -1},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: -32768, want: -32767},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: -32768, want: -32767},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: -32767, want: -32767},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: -32767, want: -32767},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: -1, want: -1},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: 0, want: -32767},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: 0, want: -32767},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: 1, want: -32767},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: 1, want: -32767},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: 32766, want: -1},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: 32766, want: -1},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: 32767, want: -1},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: 32767, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: -32768, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: -32768, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: -32767, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: -32767, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: -1, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: 0, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: 0, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: 1, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: 1, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: 32766, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: 32766, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: 32767, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: 32767, want: -1},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: -32768, want: -32768},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: -32768, want: -32768},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: -32767, want: -32767},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: -32767, want: -32767},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: -1, want: -1},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: 0, want: 0},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: 0, want: 0},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: 1, want: 1},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: 1, want: 1},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: 32766, want: 32766},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: 32766, want: 32766},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: 32767, want: 32767},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: 32767, want: 32767},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: -32768, want: -32767},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: -32768, want: -32767},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: -32767, want: -32767},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: -32767, want: -32767},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: -1, want: -1},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: 0, want: 1},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: 0, want: 1},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: 1, want: 1},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: 1, want: 1},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: 32766, want: 32767},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: 32766, want: 32767},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: 32767, want: 32767},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: 32767, want: 32767},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: -32768, want: -2},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: -32768, want: -2},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: -32767, want: -1},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: -32767, want: -1},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: -1, want: -1},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: 0, want: 32766},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: 0, want: 32766},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: 1, want: 32767},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: 1, want: 32767},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: 32766, want: 32766},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: 32766, want: 32766},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: 32767, want: 32767},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: 32767, want: 32767},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: -32768, want: -1},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: -32768, want: -1},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: -32767, want: -1},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: -32767, want: -1},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: -1, want: -1},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: 0, want: 32767},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: 0, want: 32767},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: 1, want: 32767},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: 1, want: 32767},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: 32766, want: 32767},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: 32766, want: 32767},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: 32767, want: 32767},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: 32767, want: 32767},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: -32768, want: 0},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: -32768, want: 0},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: -32767, want: 1},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: -32767, want: 1},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: -1, want: 32767},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: -1, want: 32767},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: 0, want: -32768},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: 0, want: -32768},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: 1, want: -32767},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: 1, want: -32767},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: 32766, want: -2},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: 32766, want: -2},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: 32767, want: -1},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: 32767, want: -1},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: -32768, want: 1},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: -32768, want: 1},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: -32767, want: 0},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: -32767, want: 0},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: -1, want: 32766},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: -1, want: 32766},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: 0, want: -32767},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: 0, want: -32767},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: 1, want: -32768},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: 1, want: -32768},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: 32766, want: -1},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: 32766, want: -1},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: 32767, want: -2},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: 32767, want: -2},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: -32768, want: 32767},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: -32768, want: 32767},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: -32767, want: 32766},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: -32767, want: 32766},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: -1, want: 0},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: -1, want: 0},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: 0, want: -1},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: 0, want: -1},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: 1, want: -2},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: 1, want: -2},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: 32766, want: -32767},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: 32766, want: -32767},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: 32767, want: -32768},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: 32767, want: -32768},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: -32768, want: -32768},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: -32768, want: -32768},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: -32767, want: -32767},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: -32767, want: -32767},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: -1, want: -1},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: -1, want: -1},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: 0, want: 0},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: 0, want: 0},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: 1, want: 1},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: 1, want: 1},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: 32766, want: 32766},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: 32766, want: 32766},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: 32767, want: 32767},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: 32767, want: 32767},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: -32768, want: -32767},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: -32768, want: -32767},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: -32767, want: -32768},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: -32767, want: -32768},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: -1, want: -2},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: -1, want: -2},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: 0, want: 1},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: 0, want: 1},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: 1, want: 0},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: 1, want: 0},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: 32766, want: 32767},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: 32766, want: 32767},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: 32767, want: 32766},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: 32767, want: 32766},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: -32768, want: -2},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: -32768, want: -2},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: -32767, want: -1},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: -32767, want: -1},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: -1, want: -32767},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: -1, want: -32767},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: 0, want: 32766},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: 0, want: 32766},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: 1, want: 32767},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: 1, want: 32767},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: 32766, want: 0},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: 32766, want: 0},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: 32767, want: 1},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: 32767, want: 1},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: -32768, want: -1},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: -32768, want: -1},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: -32767, want: -2},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: -32767, want: -2},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: -1, want: -32768},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: -1, want: -32768},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: 0, want: 32767},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: 0, want: 32767},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: 1, want: 32766},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: 1, want: 32766},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: 32766, want: 1},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: 32766, want: 1},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: 32767, want: 0},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: 32767, want: 0}}
+
+type test_uint8 struct {
+ fn func(uint8) uint8
+ fnname string
+ in uint8
+ want uint8
+}
+
+var tests_uint8 = []test_uint8{
+
+ test_uint8{fn: add_0_uint8, fnname: "add_0_uint8", in: 0, want: 0},
+ test_uint8{fn: add_uint8_0, fnname: "add_uint8_0", in: 0, want: 0},
+ test_uint8{fn: add_0_uint8, fnname: "add_0_uint8", in: 1, want: 1},
+ test_uint8{fn: add_uint8_0, fnname: "add_uint8_0", in: 1, want: 1},
+ test_uint8{fn: add_0_uint8, fnname: "add_0_uint8", in: 255, want: 255},
+ test_uint8{fn: add_uint8_0, fnname: "add_uint8_0", in: 255, want: 255},
+ test_uint8{fn: add_1_uint8, fnname: "add_1_uint8", in: 0, want: 1},
+ test_uint8{fn: add_uint8_1, fnname: "add_uint8_1", in: 0, want: 1},
+ test_uint8{fn: add_1_uint8, fnname: "add_1_uint8", in: 1, want: 2},
+ test_uint8{fn: add_uint8_1, fnname: "add_uint8_1", in: 1, want: 2},
+ test_uint8{fn: add_1_uint8, fnname: "add_1_uint8", in: 255, want: 0},
+ test_uint8{fn: add_uint8_1, fnname: "add_uint8_1", in: 255, want: 0},
+ test_uint8{fn: add_255_uint8, fnname: "add_255_uint8", in: 0, want: 255},
+ test_uint8{fn: add_uint8_255, fnname: "add_uint8_255", in: 0, want: 255},
+ test_uint8{fn: add_255_uint8, fnname: "add_255_uint8", in: 1, want: 0},
+ test_uint8{fn: add_uint8_255, fnname: "add_uint8_255", in: 1, want: 0},
+ test_uint8{fn: add_255_uint8, fnname: "add_255_uint8", in: 255, want: 254},
+ test_uint8{fn: add_uint8_255, fnname: "add_uint8_255", in: 255, want: 254},
+ test_uint8{fn: sub_0_uint8, fnname: "sub_0_uint8", in: 0, want: 0},
+ test_uint8{fn: sub_uint8_0, fnname: "sub_uint8_0", in: 0, want: 0},
+ test_uint8{fn: sub_0_uint8, fnname: "sub_0_uint8", in: 1, want: 255},
+ test_uint8{fn: sub_uint8_0, fnname: "sub_uint8_0", in: 1, want: 1},
+ test_uint8{fn: sub_0_uint8, fnname: "sub_0_uint8", in: 255, want: 1},
+ test_uint8{fn: sub_uint8_0, fnname: "sub_uint8_0", in: 255, want: 255},
+ test_uint8{fn: sub_1_uint8, fnname: "sub_1_uint8", in: 0, want: 1},
+ test_uint8{fn: sub_uint8_1, fnname: "sub_uint8_1", in: 0, want: 255},
+ test_uint8{fn: sub_1_uint8, fnname: "sub_1_uint8", in: 1, want: 0},
+ test_uint8{fn: sub_uint8_1, fnname: "sub_uint8_1", in: 1, want: 0},
+ test_uint8{fn: sub_1_uint8, fnname: "sub_1_uint8", in: 255, want: 2},
+ test_uint8{fn: sub_uint8_1, fnname: "sub_uint8_1", in: 255, want: 254},
+ test_uint8{fn: sub_255_uint8, fnname: "sub_255_uint8", in: 0, want: 255},
+ test_uint8{fn: sub_uint8_255, fnname: "sub_uint8_255", in: 0, want: 1},
+ test_uint8{fn: sub_255_uint8, fnname: "sub_255_uint8", in: 1, want: 254},
+ test_uint8{fn: sub_uint8_255, fnname: "sub_uint8_255", in: 1, want: 2},
+ test_uint8{fn: sub_255_uint8, fnname: "sub_255_uint8", in: 255, want: 0},
+ test_uint8{fn: sub_uint8_255, fnname: "sub_uint8_255", in: 255, want: 0},
+ test_uint8{fn: div_0_uint8, fnname: "div_0_uint8", in: 1, want: 0},
+ test_uint8{fn: div_0_uint8, fnname: "div_0_uint8", in: 255, want: 0},
+ test_uint8{fn: div_uint8_1, fnname: "div_uint8_1", in: 0, want: 0},
+ test_uint8{fn: div_1_uint8, fnname: "div_1_uint8", in: 1, want: 1},
+ test_uint8{fn: div_uint8_1, fnname: "div_uint8_1", in: 1, want: 1},
+ test_uint8{fn: div_1_uint8, fnname: "div_1_uint8", in: 255, want: 0},
+ test_uint8{fn: div_uint8_1, fnname: "div_uint8_1", in: 255, want: 255},
+ test_uint8{fn: div_uint8_255, fnname: "div_uint8_255", in: 0, want: 0},
+ test_uint8{fn: div_255_uint8, fnname: "div_255_uint8", in: 1, want: 255},
+ test_uint8{fn: div_uint8_255, fnname: "div_uint8_255", in: 1, want: 0},
+ test_uint8{fn: div_255_uint8, fnname: "div_255_uint8", in: 255, want: 1},
+ test_uint8{fn: div_uint8_255, fnname: "div_uint8_255", in: 255, want: 1},
+ test_uint8{fn: mul_0_uint8, fnname: "mul_0_uint8", in: 0, want: 0},
+ test_uint8{fn: mul_uint8_0, fnname: "mul_uint8_0", in: 0, want: 0},
+ test_uint8{fn: mul_0_uint8, fnname: "mul_0_uint8", in: 1, want: 0},
+ test_uint8{fn: mul_uint8_0, fnname: "mul_uint8_0", in: 1, want: 0},
+ test_uint8{fn: mul_0_uint8, fnname: "mul_0_uint8", in: 255, want: 0},
+ test_uint8{fn: mul_uint8_0, fnname: "mul_uint8_0", in: 255, want: 0},
+ test_uint8{fn: mul_1_uint8, fnname: "mul_1_uint8", in: 0, want: 0},
+ test_uint8{fn: mul_uint8_1, fnname: "mul_uint8_1", in: 0, want: 0},
+ test_uint8{fn: mul_1_uint8, fnname: "mul_1_uint8", in: 1, want: 1},
+ test_uint8{fn: mul_uint8_1, fnname: "mul_uint8_1", in: 1, want: 1},
+ test_uint8{fn: mul_1_uint8, fnname: "mul_1_uint8", in: 255, want: 255},
+ test_uint8{fn: mul_uint8_1, fnname: "mul_uint8_1", in: 255, want: 255},
+ test_uint8{fn: mul_255_uint8, fnname: "mul_255_uint8", in: 0, want: 0},
+ test_uint8{fn: mul_uint8_255, fnname: "mul_uint8_255", in: 0, want: 0},
+ test_uint8{fn: mul_255_uint8, fnname: "mul_255_uint8", in: 1, want: 255},
+ test_uint8{fn: mul_uint8_255, fnname: "mul_uint8_255", in: 1, want: 255},
+ test_uint8{fn: mul_255_uint8, fnname: "mul_255_uint8", in: 255, want: 1},
+ test_uint8{fn: mul_uint8_255, fnname: "mul_uint8_255", in: 255, want: 1},
+ test_uint8{fn: lsh_0_uint8, fnname: "lsh_0_uint8", in: 0, want: 0},
+ test_uint8{fn: lsh_uint8_0, fnname: "lsh_uint8_0", in: 0, want: 0},
+ test_uint8{fn: lsh_0_uint8, fnname: "lsh_0_uint8", in: 1, want: 0},
+ test_uint8{fn: lsh_uint8_0, fnname: "lsh_uint8_0", in: 1, want: 1},
+ test_uint8{fn: lsh_0_uint8, fnname: "lsh_0_uint8", in: 255, want: 0},
+ test_uint8{fn: lsh_uint8_0, fnname: "lsh_uint8_0", in: 255, want: 255},
+ test_uint8{fn: lsh_1_uint8, fnname: "lsh_1_uint8", in: 0, want: 1},
+ test_uint8{fn: lsh_uint8_1, fnname: "lsh_uint8_1", in: 0, want: 0},
+ test_uint8{fn: lsh_1_uint8, fnname: "lsh_1_uint8", in: 1, want: 2},
+ test_uint8{fn: lsh_uint8_1, fnname: "lsh_uint8_1", in: 1, want: 2},
+ test_uint8{fn: lsh_1_uint8, fnname: "lsh_1_uint8", in: 255, want: 0},
+ test_uint8{fn: lsh_uint8_1, fnname: "lsh_uint8_1", in: 255, want: 254},
+ test_uint8{fn: lsh_255_uint8, fnname: "lsh_255_uint8", in: 0, want: 255},
+ test_uint8{fn: lsh_uint8_255, fnname: "lsh_uint8_255", in: 0, want: 0},
+ test_uint8{fn: lsh_255_uint8, fnname: "lsh_255_uint8", in: 1, want: 254},
+ test_uint8{fn: lsh_uint8_255, fnname: "lsh_uint8_255", in: 1, want: 0},
+ test_uint8{fn: lsh_255_uint8, fnname: "lsh_255_uint8", in: 255, want: 0},
+ test_uint8{fn: lsh_uint8_255, fnname: "lsh_uint8_255", in: 255, want: 0},
+ test_uint8{fn: rsh_0_uint8, fnname: "rsh_0_uint8", in: 0, want: 0},
+ test_uint8{fn: rsh_uint8_0, fnname: "rsh_uint8_0", in: 0, want: 0},
+ test_uint8{fn: rsh_0_uint8, fnname: "rsh_0_uint8", in: 1, want: 0},
+ test_uint8{fn: rsh_uint8_0, fnname: "rsh_uint8_0", in: 1, want: 1},
+ test_uint8{fn: rsh_0_uint8, fnname: "rsh_0_uint8", in: 255, want: 0},
+ test_uint8{fn: rsh_uint8_0, fnname: "rsh_uint8_0", in: 255, want: 255},
+ test_uint8{fn: rsh_1_uint8, fnname: "rsh_1_uint8", in: 0, want: 1},
+ test_uint8{fn: rsh_uint8_1, fnname: "rsh_uint8_1", in: 0, want: 0},
+ test_uint8{fn: rsh_1_uint8, fnname: "rsh_1_uint8", in: 1, want: 0},
+ test_uint8{fn: rsh_uint8_1, fnname: "rsh_uint8_1", in: 1, want: 0},
+ test_uint8{fn: rsh_1_uint8, fnname: "rsh_1_uint8", in: 255, want: 0},
+ test_uint8{fn: rsh_uint8_1, fnname: "rsh_uint8_1", in: 255, want: 127},
+ test_uint8{fn: rsh_255_uint8, fnname: "rsh_255_uint8", in: 0, want: 255},
+ test_uint8{fn: rsh_uint8_255, fnname: "rsh_uint8_255", in: 0, want: 0},
+ test_uint8{fn: rsh_255_uint8, fnname: "rsh_255_uint8", in: 1, want: 127},
+ test_uint8{fn: rsh_uint8_255, fnname: "rsh_uint8_255", in: 1, want: 0},
+ test_uint8{fn: rsh_255_uint8, fnname: "rsh_255_uint8", in: 255, want: 0},
+ test_uint8{fn: rsh_uint8_255, fnname: "rsh_uint8_255", in: 255, want: 0},
+ test_uint8{fn: mod_0_uint8, fnname: "mod_0_uint8", in: 1, want: 0},
+ test_uint8{fn: mod_0_uint8, fnname: "mod_0_uint8", in: 255, want: 0},
+ test_uint8{fn: mod_uint8_1, fnname: "mod_uint8_1", in: 0, want: 0},
+ test_uint8{fn: mod_1_uint8, fnname: "mod_1_uint8", in: 1, want: 0},
+ test_uint8{fn: mod_uint8_1, fnname: "mod_uint8_1", in: 1, want: 0},
+ test_uint8{fn: mod_1_uint8, fnname: "mod_1_uint8", in: 255, want: 1},
+ test_uint8{fn: mod_uint8_1, fnname: "mod_uint8_1", in: 255, want: 0},
+ test_uint8{fn: mod_uint8_255, fnname: "mod_uint8_255", in: 0, want: 0},
+ test_uint8{fn: mod_255_uint8, fnname: "mod_255_uint8", in: 1, want: 0},
+ test_uint8{fn: mod_uint8_255, fnname: "mod_uint8_255", in: 1, want: 1},
+ test_uint8{fn: mod_255_uint8, fnname: "mod_255_uint8", in: 255, want: 0},
+ test_uint8{fn: mod_uint8_255, fnname: "mod_uint8_255", in: 255, want: 0},
+ test_uint8{fn: and_0_uint8, fnname: "and_0_uint8", in: 0, want: 0},
+ test_uint8{fn: and_uint8_0, fnname: "and_uint8_0", in: 0, want: 0},
+ test_uint8{fn: and_0_uint8, fnname: "and_0_uint8", in: 1, want: 0},
+ test_uint8{fn: and_uint8_0, fnname: "and_uint8_0", in: 1, want: 0},
+ test_uint8{fn: and_0_uint8, fnname: "and_0_uint8", in: 255, want: 0},
+ test_uint8{fn: and_uint8_0, fnname: "and_uint8_0", in: 255, want: 0},
+ test_uint8{fn: and_1_uint8, fnname: "and_1_uint8", in: 0, want: 0},
+ test_uint8{fn: and_uint8_1, fnname: "and_uint8_1", in: 0, want: 0},
+ test_uint8{fn: and_1_uint8, fnname: "and_1_uint8", in: 1, want: 1},
+ test_uint8{fn: and_uint8_1, fnname: "and_uint8_1", in: 1, want: 1},
+ test_uint8{fn: and_1_uint8, fnname: "and_1_uint8", in: 255, want: 1},
+ test_uint8{fn: and_uint8_1, fnname: "and_uint8_1", in: 255, want: 1},
+ test_uint8{fn: and_255_uint8, fnname: "and_255_uint8", in: 0, want: 0},
+ test_uint8{fn: and_uint8_255, fnname: "and_uint8_255", in: 0, want: 0},
+ test_uint8{fn: and_255_uint8, fnname: "and_255_uint8", in: 1, want: 1},
+ test_uint8{fn: and_uint8_255, fnname: "and_uint8_255", in: 1, want: 1},
+ test_uint8{fn: and_255_uint8, fnname: "and_255_uint8", in: 255, want: 255},
+ test_uint8{fn: and_uint8_255, fnname: "and_uint8_255", in: 255, want: 255},
+ test_uint8{fn: or_0_uint8, fnname: "or_0_uint8", in: 0, want: 0},
+ test_uint8{fn: or_uint8_0, fnname: "or_uint8_0", in: 0, want: 0},
+ test_uint8{fn: or_0_uint8, fnname: "or_0_uint8", in: 1, want: 1},
+ test_uint8{fn: or_uint8_0, fnname: "or_uint8_0", in: 1, want: 1},
+ test_uint8{fn: or_0_uint8, fnname: "or_0_uint8", in: 255, want: 255},
+ test_uint8{fn: or_uint8_0, fnname: "or_uint8_0", in: 255, want: 255},
+ test_uint8{fn: or_1_uint8, fnname: "or_1_uint8", in: 0, want: 1},
+ test_uint8{fn: or_uint8_1, fnname: "or_uint8_1", in: 0, want: 1},
+ test_uint8{fn: or_1_uint8, fnname: "or_1_uint8", in: 1, want: 1},
+ test_uint8{fn: or_uint8_1, fnname: "or_uint8_1", in: 1, want: 1},
+ test_uint8{fn: or_1_uint8, fnname: "or_1_uint8", in: 255, want: 255},
+ test_uint8{fn: or_uint8_1, fnname: "or_uint8_1", in: 255, want: 255},
+ test_uint8{fn: or_255_uint8, fnname: "or_255_uint8", in: 0, want: 255},
+ test_uint8{fn: or_uint8_255, fnname: "or_uint8_255", in: 0, want: 255},
+ test_uint8{fn: or_255_uint8, fnname: "or_255_uint8", in: 1, want: 255},
+ test_uint8{fn: or_uint8_255, fnname: "or_uint8_255", in: 1, want: 255},
+ test_uint8{fn: or_255_uint8, fnname: "or_255_uint8", in: 255, want: 255},
+ test_uint8{fn: or_uint8_255, fnname: "or_uint8_255", in: 255, want: 255},
+ test_uint8{fn: xor_0_uint8, fnname: "xor_0_uint8", in: 0, want: 0},
+ test_uint8{fn: xor_uint8_0, fnname: "xor_uint8_0", in: 0, want: 0},
+ test_uint8{fn: xor_0_uint8, fnname: "xor_0_uint8", in: 1, want: 1},
+ test_uint8{fn: xor_uint8_0, fnname: "xor_uint8_0", in: 1, want: 1},
+ test_uint8{fn: xor_0_uint8, fnname: "xor_0_uint8", in: 255, want: 255},
+ test_uint8{fn: xor_uint8_0, fnname: "xor_uint8_0", in: 255, want: 255},
+ test_uint8{fn: xor_1_uint8, fnname: "xor_1_uint8", in: 0, want: 1},
+ test_uint8{fn: xor_uint8_1, fnname: "xor_uint8_1", in: 0, want: 1},
+ test_uint8{fn: xor_1_uint8, fnname: "xor_1_uint8", in: 1, want: 0},
+ test_uint8{fn: xor_uint8_1, fnname: "xor_uint8_1", in: 1, want: 0},
+ test_uint8{fn: xor_1_uint8, fnname: "xor_1_uint8", in: 255, want: 254},
+ test_uint8{fn: xor_uint8_1, fnname: "xor_uint8_1", in: 255, want: 254},
+ test_uint8{fn: xor_255_uint8, fnname: "xor_255_uint8", in: 0, want: 255},
+ test_uint8{fn: xor_uint8_255, fnname: "xor_uint8_255", in: 0, want: 255},
+ test_uint8{fn: xor_255_uint8, fnname: "xor_255_uint8", in: 1, want: 254},
+ test_uint8{fn: xor_uint8_255, fnname: "xor_uint8_255", in: 1, want: 254},
+ test_uint8{fn: xor_255_uint8, fnname: "xor_255_uint8", in: 255, want: 0},
+ test_uint8{fn: xor_uint8_255, fnname: "xor_uint8_255", in: 255, want: 0}}
+
+type test_int8 struct {
+ fn func(int8) int8
+ fnname string
+ in int8
+ want int8
+}
+
+var tests_int8 = []test_int8{
+
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: -128, want: 0},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: -128, want: 0},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: -127, want: 1},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: -127, want: 1},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: -1, want: 127},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: -1, want: 127},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: 0, want: -128},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: 0, want: -128},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: 1, want: -127},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: 1, want: -127},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: 126, want: -2},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: 126, want: -2},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: 127, want: -1},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: 127, want: -1},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: -128, want: 1},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: -128, want: 1},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: -127, want: 2},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: -127, want: 2},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: -1, want: -128},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: -1, want: -128},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: 0, want: -127},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: 0, want: -127},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: 1, want: -126},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: 1, want: -126},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: 126, want: -1},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: 126, want: -1},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: 127, want: 0},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: 127, want: 0},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: -128, want: 127},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: -128, want: 127},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: -127, want: -128},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: -127, want: -128},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: -1, want: -2},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: -1, want: -2},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: 0, want: -1},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: 0, want: -1},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: 1, want: 0},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: 1, want: 0},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: 126, want: 125},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: 126, want: 125},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: 127, want: 126},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: 127, want: 126},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: -128, want: -128},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: -128, want: -128},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: -127, want: -127},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: -127, want: -127},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: -1, want: -1},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: -1, want: -1},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: 0, want: 0},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: 0, want: 0},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: 1, want: 1},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: 1, want: 1},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: 126, want: 126},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: 126, want: 126},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: 127, want: 127},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: 127, want: 127},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: -128, want: -127},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: -128, want: -127},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: -127, want: -126},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: -127, want: -126},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: -1, want: 0},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: -1, want: 0},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: 0, want: 1},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: 0, want: 1},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: 1, want: 2},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: 1, want: 2},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: 126, want: 127},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: 126, want: 127},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: 127, want: -128},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: 127, want: -128},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: -128, want: -2},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: -128, want: -2},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: -127, want: -1},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: -127, want: -1},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: -1, want: 125},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: -1, want: 125},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: 0, want: 126},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: 0, want: 126},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: 1, want: 127},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: 1, want: 127},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: 126, want: -4},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: 126, want: -4},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: 127, want: -3},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: 127, want: -3},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: -128, want: -1},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: -128, want: -1},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: -127, want: 0},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: -127, want: 0},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: -1, want: 126},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: -1, want: 126},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: 0, want: 127},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: 0, want: 127},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: 1, want: -128},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: 1, want: -128},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: 126, want: -3},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: 126, want: -3},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: 127, want: -2},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: 127, want: -2},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: -128, want: 0},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: -128, want: 0},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: -127, want: -1},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: -127, want: 1},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: -1, want: -127},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: -1, want: 127},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: 0, want: -128},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: 0, want: -128},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: 1, want: 127},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: 1, want: -127},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: 126, want: 2},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: 126, want: -2},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: 127, want: 1},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: 127, want: -1},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: -128, want: 1},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: -128, want: -1},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: -127, want: 0},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: -127, want: 0},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: -1, want: -126},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: -1, want: 126},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: 0, want: -127},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: 0, want: 127},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: 1, want: -128},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: 1, want: -128},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: 126, want: 3},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: 126, want: -3},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: 127, want: 2},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: 127, want: -2},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: -128, want: 127},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: -128, want: -127},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: -127, want: 126},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: -127, want: -126},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: -1, want: 0},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: -1, want: 0},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: 0, want: -1},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: 0, want: 1},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: 1, want: -2},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: 1, want: 2},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: 126, want: -127},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: 126, want: 127},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: 127, want: -128},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: 127, want: -128},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: -128, want: -128},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: -128, want: -128},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: -127, want: 127},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: -127, want: -127},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: -1, want: 1},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: -1, want: -1},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: 0, want: 0},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: 0, want: 0},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: 1, want: -1},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: 1, want: 1},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: 126, want: -126},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: 126, want: 126},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: 127, want: -127},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: 127, want: 127},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: -128, want: -127},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: -128, want: 127},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: -127, want: -128},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: -127, want: -128},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: -1, want: 2},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: -1, want: -2},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: 0, want: 1},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: 0, want: -1},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: 1, want: 0},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: 1, want: 0},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: 126, want: -125},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: 126, want: 125},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: 127, want: -126},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: 127, want: 126},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: -128, want: -2},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: -128, want: 2},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: -127, want: -3},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: -127, want: 3},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: -1, want: 127},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: -1, want: -127},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: 0, want: 126},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: 0, want: -126},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: 1, want: 125},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: 1, want: -125},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: 126, want: 0},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: 126, want: 0},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: 127, want: -1},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: 127, want: 1},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: -128, want: -1},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: -128, want: 1},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: -127, want: -2},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: -127, want: 2},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: -1, want: -128},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: -1, want: -128},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: 0, want: 127},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: 0, want: -127},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: 1, want: 126},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: 1, want: -126},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: 126, want: 1},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: 126, want: -1},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: 127, want: 0},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: 127, want: 0},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: -128, want: 1},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: -128, want: 1},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: -127, want: 1},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: -127, want: 0},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: -1, want: -128},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: -1, want: 0},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: 0, want: 0},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: 1, want: -128},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: 1, want: 0},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: 126, want: -1},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: 126, want: 0},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: 127, want: -1},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: 127, want: 0},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: -128, want: 0},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: -128, want: 1},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: -127, want: 1},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: -127, want: 1},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: -1, want: 127},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: -1, want: 0},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: 0, want: 0},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: 1, want: -127},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: 1, want: 0},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: 126, want: -1},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: 126, want: 0},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: 127, want: -1},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: 127, want: -1},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: -128, want: 0},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: -128, want: -128},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: -127, want: 0},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: -127, want: 127},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: -1, want: 1},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: -1, want: 1},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: 0, want: 0},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: 1, want: -1},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: 1, want: -1},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: 126, want: 0},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: 126, want: -126},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: 127, want: 0},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: 127, want: -127},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: -128, want: 0},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: -127, want: 0},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: -1, want: 0},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: 1, want: 0},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: 126, want: 0},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: 127, want: 0},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: -128, want: 0},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: -128, want: -128},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: -127, want: 0},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: -127, want: -127},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: -1, want: -1},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: -1, want: -1},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: 0, want: 0},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: 1, want: 1},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: 1, want: 1},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: 126, want: 0},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: 126, want: 126},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: 127, want: 0},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: 127, want: 127},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: -128, want: 0},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: -128, want: -1},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: -127, want: 0},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: -127, want: -1},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: -1, want: -126},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: -1, want: 0},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: 0, want: 0},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: 1, want: 126},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: 1, want: 0},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: 126, want: 1},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: 126, want: 1},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: 127, want: 0},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: 127, want: 1},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: -128, want: 0},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: -128, want: -1},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: -127, want: -1},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: -127, want: -1},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: -1, want: -127},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: -1, want: 0},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: 0, want: 0},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: 1, want: 127},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: 1, want: 0},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: 126, want: 1},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: 126, want: 0},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: 127, want: 1},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: 127, want: 1},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: -128, want: 0},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: -128, want: 0},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: -127, want: -128},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: -127, want: -128},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: -1, want: -128},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: -1, want: -128},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: 0, want: 0},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: 1, want: -128},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: 1, want: -128},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: 126, want: 0},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: 126, want: 0},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: 127, want: -128},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: 127, want: -128},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: -128, want: -128},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: -128, want: -128},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: -127, want: 1},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: -127, want: 1},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: -1, want: 127},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: -1, want: 127},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: 0, want: 0},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: 1, want: -127},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: 1, want: -127},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: 126, want: 126},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: 126, want: 126},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: 127, want: -1},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: 127, want: -1},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: -128, want: -128},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: -128, want: -128},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: -127, want: 127},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: -127, want: 127},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: -1, want: 1},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: -1, want: 1},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: 0, want: 0},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: 1, want: -1},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: 1, want: -1},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: 126, want: -126},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: 126, want: -126},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: 127, want: -127},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: 127, want: -127},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: -128, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: -128, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: -127, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: -127, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: -1, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: -1, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: 0, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: 1, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: 1, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: 126, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: 126, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: 127, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: 127, want: 0},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: -128, want: -128},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: -128, want: -128},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: -127, want: -127},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: -127, want: -127},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: -1, want: -1},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: -1, want: -1},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: 0, want: 0},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: 1, want: 1},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: 1, want: 1},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: 126, want: 126},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: 126, want: 126},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: 127, want: 127},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: 127, want: 127},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: -128, want: 0},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: -128, want: 0},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: -127, want: 126},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: -127, want: 126},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: -1, want: -126},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: -1, want: -126},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: 0, want: 0},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: 1, want: 126},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: 1, want: 126},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: 126, want: 4},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: 126, want: 4},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: 127, want: -126},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: 127, want: -126},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: -128, want: -128},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: -128, want: -128},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: -127, want: -1},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: -127, want: -1},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: -1, want: -127},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: -1, want: -127},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: 0, want: 0},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: 1, want: 127},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: 1, want: 127},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: 126, want: -126},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: 126, want: -126},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: 127, want: 1},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: 127, want: 1},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: -128, want: 0},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: -128, want: 0},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: -127, want: -1},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: -127, want: -127},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: -1, want: -1},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: 0, want: 0},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: 1, want: 1},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: 126, want: -2},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: 126, want: 126},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: 127, want: -1},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: 127, want: 127},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: -128, want: -127},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: -128, want: -1},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: -127, want: 0},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: -127, want: 0},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: -1, want: -1},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: 0, want: 0},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: 1, want: 1},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: 126, want: -1},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: 126, want: 126},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: 127, want: 0},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: 127, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: -128, want: -1},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: -128, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: -127, want: -1},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: -127, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: -1, want: 0},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: 0, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: 1, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: 126, want: -1},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: 126, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: 127, want: -1},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: 127, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: -128, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: -127, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: -1, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: 1, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: 126, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: 127, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: -128, want: 1},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: -128, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: -127, want: 1},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: -127, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: -1, want: 0},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: 0, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: 1, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: 126, want: 1},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: 126, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: 127, want: 1},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: 127, want: 0},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: -128, want: 126},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: -128, want: -2},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: -127, want: 126},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: -127, want: -1},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: -1, want: -1},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: 0, want: 0},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: 1, want: 1},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: 126, want: 0},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: 126, want: 0},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: 127, want: 126},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: 127, want: 1},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: -128, want: 127},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: -128, want: -1},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: -127, want: 0},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: -127, want: 0},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: -1, want: -1},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: 0, want: 0},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: 1, want: 1},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: 126, want: 1},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: 126, want: 126},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: 127, want: 0},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: 127, want: 0},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: -128, want: -128},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: -128, want: -128},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: -127, want: -128},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: -127, want: -128},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: -1, want: -128},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: -1, want: -128},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: 0, want: 0},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: 1, want: 0},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: 1, want: 0},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: 126, want: 0},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: 126, want: 0},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: 127, want: 0},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: 127, want: 0},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: -128, want: -128},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: -128, want: -128},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: -127, want: -127},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: -127, want: -127},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: -1, want: -127},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: -1, want: -127},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: 0, want: 0},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: 1, want: 1},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: 1, want: 1},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: 126, want: 0},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: 126, want: 0},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: 127, want: 1},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: 127, want: 1},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: -128, want: -128},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: -128, want: -128},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: -127, want: -127},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: -127, want: -127},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: -1, want: -1},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: -1, want: -1},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: 0, want: 0},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: 1, want: 1},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: 1, want: 1},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: 126, want: 126},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: 126, want: 126},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: 127, want: 127},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: 127, want: 127},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: -128, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: -128, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: -127, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: -127, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: -1, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: -1, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: 0, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: 1, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: 1, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: 126, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: 126, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: 127, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: 127, want: 0},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: -128, want: 0},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: -128, want: 0},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: -127, want: 1},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: -127, want: 1},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: -1, want: 1},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: -1, want: 1},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: 0, want: 0},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: 1, want: 1},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: 1, want: 1},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: 126, want: 0},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: 126, want: 0},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: 127, want: 1},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: 127, want: 1},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: -128, want: 0},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: -128, want: 0},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: -127, want: 0},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: -127, want: 0},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: -1, want: 126},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: -1, want: 126},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: 0, want: 0},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: 1, want: 0},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: 1, want: 0},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: 126, want: 126},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: 126, want: 126},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: 127, want: 126},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: 127, want: 126},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: -128, want: 0},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: -128, want: 0},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: -127, want: 1},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: -127, want: 1},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: -1, want: 127},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: -1, want: 127},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: 0, want: 0},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: 1, want: 1},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: 1, want: 1},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: 126, want: 126},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: 126, want: 126},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: 127, want: 127},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: 127, want: 127},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: -128, want: -128},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: -128, want: -128},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: -127, want: -127},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: -127, want: -127},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: -1, want: -1},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: 0, want: -128},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: 0, want: -128},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: 1, want: -127},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: 1, want: -127},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: 126, want: -2},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: 126, want: -2},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: 127, want: -1},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: 127, want: -1},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: -128, want: -127},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: -128, want: -127},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: -127, want: -127},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: -127, want: -127},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: -1, want: -1},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: 0, want: -127},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: 0, want: -127},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: 1, want: -127},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: 1, want: -127},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: 126, want: -1},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: 126, want: -1},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: 127, want: -1},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: 127, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: -128, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: -128, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: -127, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: -127, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: -1, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: 0, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: 0, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: 1, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: 1, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: 126, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: 126, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: 127, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: 127, want: -1},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: -128, want: -128},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: -128, want: -128},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: -127, want: -127},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: -127, want: -127},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: -1, want: -1},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: 0, want: 0},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: 0, want: 0},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: 1, want: 1},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: 1, want: 1},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: 126, want: 126},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: 126, want: 126},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: 127, want: 127},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: 127, want: 127},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: -128, want: -127},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: -128, want: -127},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: -127, want: -127},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: -127, want: -127},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: -1, want: -1},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: 0, want: 1},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: 0, want: 1},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: 1, want: 1},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: 1, want: 1},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: 126, want: 127},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: 126, want: 127},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: 127, want: 127},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: 127, want: 127},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: -128, want: -2},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: -128, want: -2},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: -127, want: -1},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: -127, want: -1},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: -1, want: -1},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: 0, want: 126},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: 0, want: 126},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: 1, want: 127},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: 1, want: 127},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: 126, want: 126},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: 126, want: 126},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: 127, want: 127},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: 127, want: 127},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: -128, want: -1},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: -128, want: -1},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: -127, want: -1},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: -127, want: -1},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: -1, want: -1},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: 0, want: 127},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: 0, want: 127},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: 1, want: 127},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: 1, want: 127},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: 126, want: 127},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: 126, want: 127},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: 127, want: 127},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: 127, want: 127},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: -128, want: 0},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: -128, want: 0},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: -127, want: 1},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: -127, want: 1},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: -1, want: 127},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: -1, want: 127},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: 0, want: -128},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: 0, want: -128},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: 1, want: -127},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: 1, want: -127},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: 126, want: -2},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: 126, want: -2},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: 127, want: -1},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: 127, want: -1},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: -128, want: 1},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: -128, want: 1},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: -127, want: 0},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: -127, want: 0},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: -1, want: 126},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: -1, want: 126},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: 0, want: -127},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: 0, want: -127},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: 1, want: -128},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: 1, want: -128},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: 126, want: -1},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: 126, want: -1},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: 127, want: -2},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: 127, want: -2},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: -128, want: 127},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: -128, want: 127},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: -127, want: 126},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: -127, want: 126},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: -1, want: 0},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: -1, want: 0},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: 0, want: -1},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: 0, want: -1},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: 1, want: -2},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: 1, want: -2},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: 126, want: -127},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: 126, want: -127},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: 127, want: -128},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: 127, want: -128},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: -128, want: -128},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: -128, want: -128},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: -127, want: -127},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: -127, want: -127},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: -1, want: -1},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: -1, want: -1},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: 0, want: 0},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: 0, want: 0},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: 1, want: 1},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: 1, want: 1},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: 126, want: 126},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: 126, want: 126},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: 127, want: 127},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: 127, want: 127},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: -128, want: -127},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: -128, want: -127},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: -127, want: -128},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: -127, want: -128},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: -1, want: -2},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: -1, want: -2},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: 0, want: 1},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: 0, want: 1},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: 1, want: 0},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: 1, want: 0},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: 126, want: 127},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: 126, want: 127},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: 127, want: 126},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: 127, want: 126},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: -128, want: -2},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: -128, want: -2},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: -127, want: -1},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: -127, want: -1},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: -1, want: -127},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: -1, want: -127},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: 0, want: 126},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: 0, want: 126},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: 1, want: 127},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: 1, want: 127},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: 126, want: 0},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: 126, want: 0},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: 127, want: 1},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: 127, want: 1},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: -128, want: -1},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: -128, want: -1},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: -127, want: -2},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: -127, want: -2},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: -1, want: -128},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: -1, want: -128},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: 0, want: 127},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: 0, want: 127},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: 1, want: 126},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: 1, want: 126},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: 126, want: 1},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: 126, want: 1},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: 127, want: 0},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: 127, want: 0}}
+
+// TestArithmeticConst tests results for arithmetic operations against constants.
+func TestArithmeticConst(t *testing.T) {
+ for _, test := range tests_uint64 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_uint64mul {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int64 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int64mul {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_uint32 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_uint32mul {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int32 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int32mul {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_uint16 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int16 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_uint8 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int8 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+
+}
diff --git a/src/cmd/compile/internal/test/testdata/arith_test.go b/src/cmd/compile/internal/test/testdata/arith_test.go
new file mode 100644
index 0000000..cd7b5bc
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/arith_test.go
@@ -0,0 +1,1564 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests arithmetic expressions
+
+package main
+
+import (
+ "math"
+ "runtime"
+ "testing"
+)
+
+const (
+ y = 0x0fffFFFF
+)
+
+var (
+ g8 int8
+ g16 int16
+ g32 int32
+ g64 int64
+)
+
+//go:noinline
+func lshNop1(x uint64) uint64 {
+ // two outer shifts should be removed
+ return (((x << 5) >> 2) << 2)
+}
+
+//go:noinline
+func lshNop2(x uint64) uint64 {
+ return (((x << 5) >> 2) << 3)
+}
+
+//go:noinline
+func lshNop3(x uint64) uint64 {
+ return (((x << 5) >> 2) << 6)
+}
+
+//go:noinline
+func lshNotNop(x uint64) uint64 {
+ // outer shift can't be removed
+ return (((x << 5) >> 2) << 1)
+}
+
+//go:noinline
+func rshNop1(x uint64) uint64 {
+ return (((x >> 5) << 2) >> 2)
+}
+
+//go:noinline
+func rshNop2(x uint64) uint64 {
+ return (((x >> 5) << 2) >> 3)
+}
+
+//go:noinline
+func rshNop3(x uint64) uint64 {
+ return (((x >> 5) << 2) >> 6)
+}
+
+//go:noinline
+func rshNotNop(x uint64) uint64 {
+ return (((x >> 5) << 2) >> 1)
+}
+
+func testShiftRemoval(t *testing.T) {
+ allSet := ^uint64(0)
+ if want, got := uint64(0x7ffffffffffffff), rshNop1(allSet); want != got {
+ t.Errorf("testShiftRemoval rshNop1 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0x3ffffffffffffff), rshNop2(allSet); want != got {
+ t.Errorf("testShiftRemoval rshNop2 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0x7fffffffffffff), rshNop3(allSet); want != got {
+ t.Errorf("testShiftRemoval rshNop3 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0xffffffffffffffe), rshNotNop(allSet); want != got {
+ t.Errorf("testShiftRemoval rshNotNop failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0xffffffffffffffe0), lshNop1(allSet); want != got {
+ t.Errorf("testShiftRemoval lshNop1 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0xffffffffffffffc0), lshNop2(allSet); want != got {
+ t.Errorf("testShiftRemoval lshNop2 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0xfffffffffffffe00), lshNop3(allSet); want != got {
+ t.Errorf("testShiftRemoval lshNop3 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0x7ffffffffffffff0), lshNotNop(allSet); want != got {
+ t.Errorf("testShiftRemoval lshNotNop failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func parseLE64(b []byte) uint64 {
+ // skip the first two bytes, and parse the remaining 8 as a uint64
+ return uint64(b[2]) | uint64(b[3])<<8 | uint64(b[4])<<16 | uint64(b[5])<<24 |
+ uint64(b[6])<<32 | uint64(b[7])<<40 | uint64(b[8])<<48 | uint64(b[9])<<56
+}
+
+//go:noinline
+func parseLE32(b []byte) uint32 {
+ return uint32(b[2]) | uint32(b[3])<<8 | uint32(b[4])<<16 | uint32(b[5])<<24
+}
+
+//go:noinline
+func parseLE16(b []byte) uint16 {
+ return uint16(b[2]) | uint16(b[3])<<8
+}
+
+// testLoadCombine tests for issue #14694 where load combining didn't respect the pointer offset.
+func testLoadCombine(t *testing.T) {
+ testData := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09}
+ if want, got := uint64(0x0908070605040302), parseLE64(testData); want != got {
+ t.Errorf("testLoadCombine failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(0x05040302), parseLE32(testData); want != got {
+ t.Errorf("testLoadCombine failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint16(0x0302), parseLE16(testData); want != got {
+ t.Errorf("testLoadCombine failed, wanted %d got %d", want, got)
+ }
+}
+
+var loadSymData = [...]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}
+
+func testLoadSymCombine(t *testing.T) {
+ w2 := uint16(0x0201)
+ g2 := uint16(loadSymData[0]) | uint16(loadSymData[1])<<8
+ if g2 != w2 {
+ t.Errorf("testLoadSymCombine failed, wanted %d got %d", w2, g2)
+ }
+ w4 := uint32(0x04030201)
+ g4 := uint32(loadSymData[0]) | uint32(loadSymData[1])<<8 |
+ uint32(loadSymData[2])<<16 | uint32(loadSymData[3])<<24
+ if g4 != w4 {
+ t.Errorf("testLoadSymCombine failed, wanted %d got %d", w4, g4)
+ }
+ w8 := uint64(0x0807060504030201)
+ g8 := uint64(loadSymData[0]) | uint64(loadSymData[1])<<8 |
+ uint64(loadSymData[2])<<16 | uint64(loadSymData[3])<<24 |
+ uint64(loadSymData[4])<<32 | uint64(loadSymData[5])<<40 |
+ uint64(loadSymData[6])<<48 | uint64(loadSymData[7])<<56
+ if g8 != w8 {
+ t.Errorf("testLoadSymCombine failed, wanted %d got %d", w8, g8)
+ }
+}
+
+//go:noinline
+func invalidAdd_ssa(x uint32) uint32 {
+ return x + y + y + y + y + y + y + y + y + y + y + y + y + y + y + y + y + y
+}
+
+//go:noinline
+func invalidSub_ssa(x uint32) uint32 {
+ return x - y - y - y - y - y - y - y - y - y - y - y - y - y - y - y - y - y
+}
+
+//go:noinline
+func invalidMul_ssa(x uint32) uint32 {
+ return x * y * y * y * y * y * y * y * y * y * y * y * y * y * y * y * y * y
+}
+
+// testLargeConst tests a situation where larger than 32 bit consts were passed to ADDL
+// causing an invalid instruction error.
+func testLargeConst(t *testing.T) {
+ if want, got := uint32(268435440), invalidAdd_ssa(1); want != got {
+ t.Errorf("testLargeConst add failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(4026531858), invalidSub_ssa(1); want != got {
+ t.Errorf("testLargeConst sub failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(268435455), invalidMul_ssa(1); want != got {
+ t.Errorf("testLargeConst mul failed, wanted %d got %d", want, got)
+ }
+}
+
+// testArithRshConst ensures that "const >> const" right shifts correctly perform
+// sign extension on the lhs constant
+func testArithRshConst(t *testing.T) {
+ wantu := uint64(0x4000000000000000)
+ if got := arithRshuConst_ssa(); got != wantu {
+ t.Errorf("arithRshuConst failed, wanted %d got %d", wantu, got)
+ }
+
+ wants := int64(-0x4000000000000000)
+ if got := arithRshConst_ssa(); got != wants {
+ t.Errorf("arithRshConst failed, wanted %d got %d", wants, got)
+ }
+}
+
+//go:noinline
+func arithRshuConst_ssa() uint64 {
+ y := uint64(0x8000000000000001)
+ z := uint64(1)
+ return uint64(y >> z)
+}
+
+//go:noinline
+func arithRshConst_ssa() int64 {
+ y := int64(-0x8000000000000000)
+ z := uint64(1)
+ return int64(y >> z)
+}
+
+//go:noinline
+func arithConstShift_ssa(x int64) int64 {
+ return x >> 100
+}
+
+// testArithConstShift tests that right shift by large constants preserve
+// the sign of the input.
+func testArithConstShift(t *testing.T) {
+ want := int64(-1)
+ if got := arithConstShift_ssa(-1); want != got {
+ t.Errorf("arithConstShift_ssa(-1) failed, wanted %d got %d", want, got)
+ }
+ want = 0
+ if got := arithConstShift_ssa(1); want != got {
+ t.Errorf("arithConstShift_ssa(1) failed, wanted %d got %d", want, got)
+ }
+}
+
+// overflowConstShift_ssa verifies that constant folding for shift
+// doesn't wrap (i.e. x << MAX_INT << 1 doesn't get folded to x << 0).
+//
+//go:noinline
+func overflowConstShift64_ssa(x int64) int64 {
+ return x << uint64(0xffffffffffffffff) << uint64(1)
+}
+
+//go:noinline
+func overflowConstShift32_ssa(x int64) int32 {
+ return int32(x) << uint32(0xffffffff) << uint32(1)
+}
+
+//go:noinline
+func overflowConstShift16_ssa(x int64) int16 {
+ return int16(x) << uint16(0xffff) << uint16(1)
+}
+
+//go:noinline
+func overflowConstShift8_ssa(x int64) int8 {
+ return int8(x) << uint8(0xff) << uint8(1)
+}
+
+func testOverflowConstShift(t *testing.T) {
+ want := int64(0)
+ for x := int64(-127); x < int64(127); x++ {
+ got := overflowConstShift64_ssa(x)
+ if want != got {
+ t.Errorf("overflowShift64 failed, wanted %d got %d", want, got)
+ }
+ got = int64(overflowConstShift32_ssa(x))
+ if want != got {
+ t.Errorf("overflowShift32 failed, wanted %d got %d", want, got)
+ }
+ got = int64(overflowConstShift16_ssa(x))
+ if want != got {
+ t.Errorf("overflowShift16 failed, wanted %d got %d", want, got)
+ }
+ got = int64(overflowConstShift8_ssa(x))
+ if want != got {
+ t.Errorf("overflowShift8 failed, wanted %d got %d", want, got)
+ }
+ }
+}
+
+//go:noinline
+func rsh64x64ConstOverflow8(x int8) int64 {
+ return int64(x) >> 9
+}
+
+//go:noinline
+func rsh64x64ConstOverflow16(x int16) int64 {
+ return int64(x) >> 17
+}
+
+//go:noinline
+func rsh64x64ConstOverflow32(x int32) int64 {
+ return int64(x) >> 33
+}
+
+func testArithRightShiftConstOverflow(t *testing.T) {
+ allSet := int64(-1)
+ if got, want := rsh64x64ConstOverflow8(0x7f), int64(0); got != want {
+ t.Errorf("rsh64x64ConstOverflow8 failed: got %v, want %v", got, want)
+ }
+ if got, want := rsh64x64ConstOverflow16(0x7fff), int64(0); got != want {
+ t.Errorf("rsh64x64ConstOverflow16 failed: got %v, want %v", got, want)
+ }
+ if got, want := rsh64x64ConstOverflow32(0x7ffffff), int64(0); got != want {
+ t.Errorf("rsh64x64ConstOverflow32 failed: got %v, want %v", got, want)
+ }
+ if got, want := rsh64x64ConstOverflow8(int8(-1)), allSet; got != want {
+ t.Errorf("rsh64x64ConstOverflow8 failed: got %v, want %v", got, want)
+ }
+ if got, want := rsh64x64ConstOverflow16(int16(-1)), allSet; got != want {
+ t.Errorf("rsh64x64ConstOverflow16 failed: got %v, want %v", got, want)
+ }
+ if got, want := rsh64x64ConstOverflow32(int32(-1)), allSet; got != want {
+ t.Errorf("rsh64x64ConstOverflow32 failed: got %v, want %v", got, want)
+ }
+}
+
+//go:noinline
+func rsh64Ux64ConstOverflow8(x uint8) uint64 {
+ return uint64(x) >> 9
+}
+
+//go:noinline
+func rsh64Ux64ConstOverflow16(x uint16) uint64 {
+ return uint64(x) >> 17
+}
+
+//go:noinline
+func rsh64Ux64ConstOverflow32(x uint32) uint64 {
+ return uint64(x) >> 33
+}
+
+func testRightShiftConstOverflow(t *testing.T) {
+ if got, want := rsh64Ux64ConstOverflow8(0xff), uint64(0); got != want {
+ t.Errorf("rsh64Ux64ConstOverflow8 failed: got %v, want %v", got, want)
+ }
+ if got, want := rsh64Ux64ConstOverflow16(0xffff), uint64(0); got != want {
+ t.Errorf("rsh64Ux64ConstOverflow16 failed: got %v, want %v", got, want)
+ }
+ if got, want := rsh64Ux64ConstOverflow32(0xffffffff), uint64(0); got != want {
+ t.Errorf("rsh64Ux64ConstOverflow32 failed: got %v, want %v", got, want)
+ }
+}
+
+// test64BitConstMult tests that rewrite rules don't fold 64 bit constants
+// into multiply instructions.
+func test64BitConstMult(t *testing.T) {
+ want := int64(103079215109)
+ if got := test64BitConstMult_ssa(1, 2); want != got {
+ t.Errorf("test64BitConstMult failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func test64BitConstMult_ssa(a, b int64) int64 {
+ return 34359738369*a + b*34359738370
+}
+
+// test64BitConstAdd tests that rewrite rules don't fold 64 bit constants
+// into add instructions.
+func test64BitConstAdd(t *testing.T) {
+ want := int64(3567671782835376650)
+ if got := test64BitConstAdd_ssa(1, 2); want != got {
+ t.Errorf("test64BitConstAdd failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func test64BitConstAdd_ssa(a, b int64) int64 {
+ return a + 575815584948629622 + b + 2991856197886747025
+}
+
+// testRegallocCVSpill tests that regalloc spills a value whose last use is the
+// current value.
+func testRegallocCVSpill(t *testing.T) {
+ want := int8(-9)
+ if got := testRegallocCVSpill_ssa(1, 2, 3, 4); want != got {
+ t.Errorf("testRegallocCVSpill failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func testRegallocCVSpill_ssa(a, b, c, d int8) int8 {
+ return a + -32 + b + 63*c*-87*d
+}
+
+func testBitwiseLogic(t *testing.T) {
+ a, b := uint32(57623283), uint32(1314713839)
+ if want, got := uint32(38551779), testBitwiseAnd_ssa(a, b); want != got {
+ t.Errorf("testBitwiseAnd failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(1333785343), testBitwiseOr_ssa(a, b); want != got {
+ t.Errorf("testBitwiseOr failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(1295233564), testBitwiseXor_ssa(a, b); want != got {
+ t.Errorf("testBitwiseXor failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(832), testBitwiseLsh_ssa(13, 4, 2); want != got {
+ t.Errorf("testBitwiseLsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(0), testBitwiseLsh_ssa(13, 25, 15); want != got {
+ t.Errorf("testBitwiseLsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(0), testBitwiseLsh_ssa(-13, 25, 15); want != got {
+ t.Errorf("testBitwiseLsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(-13), testBitwiseRsh_ssa(-832, 4, 2); want != got {
+ t.Errorf("testBitwiseRsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(0), testBitwiseRsh_ssa(13, 25, 15); want != got {
+ t.Errorf("testBitwiseRsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(-1), testBitwiseRsh_ssa(-13, 25, 15); want != got {
+ t.Errorf("testBitwiseRsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(0x3ffffff), testBitwiseRshU_ssa(0xffffffff, 4, 2); want != got {
+ t.Errorf("testBitwiseRshU failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(0), testBitwiseRshU_ssa(13, 25, 15); want != got {
+ t.Errorf("testBitwiseRshU failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(0), testBitwiseRshU_ssa(0x8aaaaaaa, 25, 15); want != got {
+ t.Errorf("testBitwiseRshU failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func testBitwiseAnd_ssa(a, b uint32) uint32 {
+ return a & b
+}
+
+//go:noinline
+func testBitwiseOr_ssa(a, b uint32) uint32 {
+ return a | b
+}
+
+//go:noinline
+func testBitwiseXor_ssa(a, b uint32) uint32 {
+ return a ^ b
+}
+
+//go:noinline
+func testBitwiseLsh_ssa(a int32, b, c uint32) int32 {
+ return a << b << c
+}
+
+//go:noinline
+func testBitwiseRsh_ssa(a int32, b, c uint32) int32 {
+ return a >> b >> c
+}
+
+//go:noinline
+func testBitwiseRshU_ssa(a uint32, b, c uint32) uint32 {
+ return a >> b >> c
+}
+
+//go:noinline
+func testShiftCX_ssa() int {
+ v1 := uint8(3)
+ v4 := (v1 * v1) ^ v1 | v1 - v1 - v1&v1 ^ uint8(3+2) + v1*1>>0 - v1 | 1 | v1<<(2*3|0-0*0^1)
+ v5 := v4>>(3-0-uint(3)) | v1 | v1 + v1 ^ v4<<(0+1|3&1)<<(uint64(1)<<0*2*0<<0) ^ v1
+ v6 := v5 ^ (v1+v1)*v1 | v1 | v1*v1>>(v1&v1)>>(uint(1)<<0*uint(3)>>1)*v1<<2*v1<<v1 - v1>>2 | (v4 - v1) ^ v1 + v1 ^ v1>>1 | v1 + v1 - v1 ^ v1
+ v7 := v6 & v5 << 0
+ v1++
+ v11 := 2&1 ^ 0 + 3 | int(0^0)<<1>>(1*0*3) ^ 0*0 ^ 3&0*3&3 ^ 3*3 ^ 1 ^ int(2)<<(2*3) + 2 | 2 | 2 ^ 2 + 1 | 3 | 0 ^ int(1)>>1 ^ 2 // int
+ v7--
+ return int(uint64(2*1)<<(3-2)<<uint(3>>v7)-2)&v11 | v11 - int(2)<<0>>(2-1)*(v11*0&v11<<1<<(uint8(2)+v4))
+}
+
+func testShiftCX(t *testing.T) {
+ want := 141
+ if got := testShiftCX_ssa(); want != got {
+ t.Errorf("testShiftCX failed, wanted %d got %d", want, got)
+ }
+}
+
+// testSubqToNegq ensures that the SUBQ -> NEGQ translation works correctly.
+func testSubqToNegq(t *testing.T) {
+ want := int64(-318294940372190156)
+ if got := testSubqToNegq_ssa(1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2); want != got {
+ t.Errorf("testSubqToNegq failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func testSubqToNegq_ssa(a, b, c, d, e, f, g, h, i, j, k int64) int64 {
+ return a + 8207351403619448057 - b - 1779494519303207690 + c*8810076340510052032*d - 4465874067674546219 - e*4361839741470334295 - f + 8688847565426072650*g*8065564729145417479
+}
+
+func testOcom(t *testing.T) {
+ want1, want2 := int32(0x55555555), int32(-0x55555556)
+ if got1, got2 := testOcom_ssa(0x55555555, 0x55555555); want1 != got1 || want2 != got2 {
+ t.Errorf("testOcom failed, wanted %d and %d got %d and %d", want1, want2, got1, got2)
+ }
+}
+
+//go:noinline
+func testOcom_ssa(a, b int32) (int32, int32) {
+ return ^^^^a, ^^^^^b
+}
+
+func lrot1_ssa(w uint8, x uint16, y uint32, z uint64) (a uint8, b uint16, c uint32, d uint64) {
+ a = (w << 5) | (w >> 3)
+ b = (x << 13) | (x >> 3)
+ c = (y << 29) | (y >> 3)
+ d = (z << 61) | (z >> 3)
+ return
+}
+
+//go:noinline
+func lrot2_ssa(w, n uint32) uint32 {
+ // Want to be sure that a "rotate by 32" which
+ // is really 0 | (w >> 0) == w
+ // is correctly compiled.
+ return (w << n) | (w >> (32 - n))
+}
+
+//go:noinline
+func lrot3_ssa(w uint32) uint32 {
+ // Want to be sure that a "rotate by 32" which
+ // is really 0 | (w >> 0) == w
+ // is correctly compiled.
+ return (w << 32) | (w >> (32 - 32))
+}
+
+func testLrot(t *testing.T) {
+ wantA, wantB, wantC, wantD := uint8(0xe1), uint16(0xe001),
+ uint32(0xe0000001), uint64(0xe000000000000001)
+ a, b, c, d := lrot1_ssa(0xf, 0xf, 0xf, 0xf)
+ if a != wantA || b != wantB || c != wantC || d != wantD {
+ t.Errorf("lrot1_ssa(0xf, 0xf, 0xf, 0xf)=%d %d %d %d, got %d %d %d %d", wantA, wantB, wantC, wantD, a, b, c, d)
+ }
+ x := lrot2_ssa(0xb0000001, 32)
+ wantX := uint32(0xb0000001)
+ if x != wantX {
+ t.Errorf("lrot2_ssa(0xb0000001, 32)=%d, got %d", wantX, x)
+ }
+ x = lrot3_ssa(0xb0000001)
+ if x != wantX {
+ t.Errorf("lrot3_ssa(0xb0000001)=%d, got %d", wantX, x)
+ }
+
+}
+
+//go:noinline
+func sub1_ssa() uint64 {
+ v1 := uint64(3) // uint64
+ return v1*v1 - (v1&v1)&v1
+}
+
+//go:noinline
+func sub2_ssa() uint8 {
+ v1 := uint8(0)
+ v3 := v1 + v1 + v1 ^ v1 | 3 + v1 ^ v1 | v1 ^ v1
+ v1-- // dev.ssa doesn't see this one
+ return v1 ^ v1*v1 - v3
+}
+
+func testSubConst(t *testing.T) {
+ x1 := sub1_ssa()
+ want1 := uint64(6)
+ if x1 != want1 {
+ t.Errorf("sub1_ssa()=%d, got %d", want1, x1)
+ }
+ x2 := sub2_ssa()
+ want2 := uint8(251)
+ if x2 != want2 {
+ t.Errorf("sub2_ssa()=%d, got %d", want2, x2)
+ }
+}
+
+//go:noinline
+func orPhi_ssa(a bool, x int) int {
+ v := 0
+ if a {
+ v = -1
+ } else {
+ v = -1
+ }
+ return x | v
+}
+
+func testOrPhi(t *testing.T) {
+ if want, got := -1, orPhi_ssa(true, 4); got != want {
+ t.Errorf("orPhi_ssa(true, 4)=%d, want %d", got, want)
+ }
+ if want, got := -1, orPhi_ssa(false, 0); got != want {
+ t.Errorf("orPhi_ssa(false, 0)=%d, want %d", got, want)
+ }
+}
+
+//go:noinline
+func addshiftLL_ssa(a, b uint32) uint32 {
+ return a + b<<3
+}
+
+//go:noinline
+func subshiftLL_ssa(a, b uint32) uint32 {
+ return a - b<<3
+}
+
+//go:noinline
+func rsbshiftLL_ssa(a, b uint32) uint32 {
+ return a<<3 - b
+}
+
+//go:noinline
+func andshiftLL_ssa(a, b uint32) uint32 {
+ return a & (b << 3)
+}
+
+//go:noinline
+func orshiftLL_ssa(a, b uint32) uint32 {
+ return a | b<<3
+}
+
+//go:noinline
+func xorshiftLL_ssa(a, b uint32) uint32 {
+ return a ^ b<<3
+}
+
+//go:noinline
+func bicshiftLL_ssa(a, b uint32) uint32 {
+ return a &^ (b << 3)
+}
+
+//go:noinline
+func notshiftLL_ssa(a uint32) uint32 {
+ return ^(a << 3)
+}
+
+//go:noinline
+func addshiftRL_ssa(a, b uint32) uint32 {
+ return a + b>>3
+}
+
+//go:noinline
+func subshiftRL_ssa(a, b uint32) uint32 {
+ return a - b>>3
+}
+
+//go:noinline
+func rsbshiftRL_ssa(a, b uint32) uint32 {
+ return a>>3 - b
+}
+
+//go:noinline
+func andshiftRL_ssa(a, b uint32) uint32 {
+ return a & (b >> 3)
+}
+
+//go:noinline
+func orshiftRL_ssa(a, b uint32) uint32 {
+ return a | b>>3
+}
+
+//go:noinline
+func xorshiftRL_ssa(a, b uint32) uint32 {
+ return a ^ b>>3
+}
+
+//go:noinline
+func bicshiftRL_ssa(a, b uint32) uint32 {
+ return a &^ (b >> 3)
+}
+
+//go:noinline
+func notshiftRL_ssa(a uint32) uint32 {
+ return ^(a >> 3)
+}
+
+//go:noinline
+func addshiftRA_ssa(a, b int32) int32 {
+ return a + b>>3
+}
+
+//go:noinline
+func subshiftRA_ssa(a, b int32) int32 {
+ return a - b>>3
+}
+
+//go:noinline
+func rsbshiftRA_ssa(a, b int32) int32 {
+ return a>>3 - b
+}
+
+//go:noinline
+func andshiftRA_ssa(a, b int32) int32 {
+ return a & (b >> 3)
+}
+
+//go:noinline
+func orshiftRA_ssa(a, b int32) int32 {
+ return a | b>>3
+}
+
+//go:noinline
+func xorshiftRA_ssa(a, b int32) int32 {
+ return a ^ b>>3
+}
+
+//go:noinline
+func bicshiftRA_ssa(a, b int32) int32 {
+ return a &^ (b >> 3)
+}
+
+//go:noinline
+func notshiftRA_ssa(a int32) int32 {
+ return ^(a >> 3)
+}
+
+//go:noinline
+func addshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a + b<<s
+}
+
+//go:noinline
+func subshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a - b<<s
+}
+
+//go:noinline
+func rsbshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a<<s - b
+}
+
+//go:noinline
+func andshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a & (b << s)
+}
+
+//go:noinline
+func orshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a | b<<s
+}
+
+//go:noinline
+func xorshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a ^ b<<s
+}
+
+//go:noinline
+func bicshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a &^ (b << s)
+}
+
+//go:noinline
+func notshiftLLreg_ssa(a uint32, s uint8) uint32 {
+ return ^(a << s)
+}
+
+//go:noinline
+func addshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a + b>>s
+}
+
+//go:noinline
+func subshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a - b>>s
+}
+
+//go:noinline
+func rsbshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a>>s - b
+}
+
+//go:noinline
+func andshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a & (b >> s)
+}
+
+//go:noinline
+func orshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a | b>>s
+}
+
+//go:noinline
+func xorshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a ^ b>>s
+}
+
+//go:noinline
+func bicshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a &^ (b >> s)
+}
+
+//go:noinline
+func notshiftRLreg_ssa(a uint32, s uint8) uint32 {
+ return ^(a >> s)
+}
+
+//go:noinline
+func addshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a + b>>s
+}
+
+//go:noinline
+func subshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a - b>>s
+}
+
+//go:noinline
+func rsbshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a>>s - b
+}
+
+//go:noinline
+func andshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a & (b >> s)
+}
+
+//go:noinline
+func orshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a | b>>s
+}
+
+//go:noinline
+func xorshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a ^ b>>s
+}
+
+//go:noinline
+func bicshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a &^ (b >> s)
+}
+
+//go:noinline
+func notshiftRAreg_ssa(a int32, s uint8) int32 {
+ return ^(a >> s)
+}
+
+// test ARM shifted ops
+func testShiftedOps(t *testing.T) {
+ a, b := uint32(10), uint32(42)
+ if want, got := a+b<<3, addshiftLL_ssa(a, b); got != want {
+ t.Errorf("addshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a-b<<3, subshiftLL_ssa(a, b); got != want {
+ t.Errorf("subshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a<<3-b, rsbshiftLL_ssa(a, b); got != want {
+ t.Errorf("rsbshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a&(b<<3), andshiftLL_ssa(a, b); got != want {
+ t.Errorf("andshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a|b<<3, orshiftLL_ssa(a, b); got != want {
+ t.Errorf("orshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a^b<<3, xorshiftLL_ssa(a, b); got != want {
+ t.Errorf("xorshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a&^(b<<3), bicshiftLL_ssa(a, b); got != want {
+ t.Errorf("bicshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := ^(a << 3), notshiftLL_ssa(a); got != want {
+ t.Errorf("notshiftLL_ssa(10) = %d want %d", got, want)
+ }
+ if want, got := a+b>>3, addshiftRL_ssa(a, b); got != want {
+ t.Errorf("addshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a-b>>3, subshiftRL_ssa(a, b); got != want {
+ t.Errorf("subshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a>>3-b, rsbshiftRL_ssa(a, b); got != want {
+ t.Errorf("rsbshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a&(b>>3), andshiftRL_ssa(a, b); got != want {
+ t.Errorf("andshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a|b>>3, orshiftRL_ssa(a, b); got != want {
+ t.Errorf("orshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a^b>>3, xorshiftRL_ssa(a, b); got != want {
+ t.Errorf("xorshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a&^(b>>3), bicshiftRL_ssa(a, b); got != want {
+ t.Errorf("bicshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := ^(a >> 3), notshiftRL_ssa(a); got != want {
+ t.Errorf("notshiftRL_ssa(10) = %d want %d", got, want)
+ }
+ c, d := int32(10), int32(-42)
+ if want, got := c+d>>3, addshiftRA_ssa(c, d); got != want {
+ t.Errorf("addshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c-d>>3, subshiftRA_ssa(c, d); got != want {
+ t.Errorf("subshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c>>3-d, rsbshiftRA_ssa(c, d); got != want {
+ t.Errorf("rsbshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c&(d>>3), andshiftRA_ssa(c, d); got != want {
+ t.Errorf("andshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c|d>>3, orshiftRA_ssa(c, d); got != want {
+ t.Errorf("orshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c^d>>3, xorshiftRA_ssa(c, d); got != want {
+ t.Errorf("xorshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c&^(d>>3), bicshiftRA_ssa(c, d); got != want {
+ t.Errorf("bicshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := ^(d >> 3), notshiftRA_ssa(d); got != want {
+ t.Errorf("notshiftRA_ssa(-42) = %d want %d", got, want)
+ }
+ s := uint8(3)
+ if want, got := a+b<<s, addshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("addshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a-b<<s, subshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("subshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a<<s-b, rsbshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("rsbshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a&(b<<s), andshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("andshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a|b<<s, orshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("orshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a^b<<s, xorshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("xorshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a&^(b<<s), bicshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("bicshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := ^(a << s), notshiftLLreg_ssa(a, s); got != want {
+ t.Errorf("notshiftLLreg_ssa(10) = %d want %d", got, want)
+ }
+ if want, got := a+b>>s, addshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("addshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a-b>>s, subshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("subshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a>>s-b, rsbshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("rsbshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a&(b>>s), andshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("andshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a|b>>s, orshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("orshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a^b>>s, xorshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("xorshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a&^(b>>s), bicshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("bicshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := ^(a >> s), notshiftRLreg_ssa(a, s); got != want {
+ t.Errorf("notshiftRLreg_ssa(10) = %d want %d", got, want)
+ }
+ if want, got := c+d>>s, addshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("addshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c-d>>s, subshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("subshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c>>s-d, rsbshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("rsbshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c&(d>>s), andshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("andshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c|d>>s, orshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("orshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c^d>>s, xorshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("xorshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c&^(d>>s), bicshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("bicshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := ^(d >> s), notshiftRAreg_ssa(d, s); got != want {
+ t.Errorf("notshiftRAreg_ssa(-42, 3) = %d want %d", got, want)
+ }
+}
+
+// TestArithmetic tests that both backends have the same result for arithmetic expressions.
+func TestArithmetic(t *testing.T) {
+ test64BitConstMult(t)
+ test64BitConstAdd(t)
+ testRegallocCVSpill(t)
+ testSubqToNegq(t)
+ testBitwiseLogic(t)
+ testOcom(t)
+ testLrot(t)
+ testShiftCX(t)
+ testSubConst(t)
+ testOverflowConstShift(t)
+ testArithRightShiftConstOverflow(t)
+ testRightShiftConstOverflow(t)
+ testArithConstShift(t)
+ testArithRshConst(t)
+ testLargeConst(t)
+ testLoadCombine(t)
+ testLoadSymCombine(t)
+ testShiftRemoval(t)
+ testShiftedOps(t)
+ testDivFixUp(t)
+ testDivisibleSignedPow2(t)
+ testDivisibility(t)
+}
+
+// testDivFixUp ensures that signed division fix-ups are being generated.
+func testDivFixUp(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Error("testDivFixUp failed")
+ if e, ok := r.(runtime.Error); ok {
+ t.Logf("%v\n", e.Error())
+ }
+ }
+ }()
+ var w int8 = -128
+ var x int16 = -32768
+ var y int32 = -2147483648
+ var z int64 = -9223372036854775808
+
+ for i := -5; i < 0; i++ {
+ g8 = w / int8(i)
+ g16 = x / int16(i)
+ g32 = y / int32(i)
+ g64 = z / int64(i)
+ g8 = w % int8(i)
+ g16 = x % int16(i)
+ g32 = y % int32(i)
+ g64 = z % int64(i)
+ }
+}
+
+//go:noinline
+func divisible_int8_2to1(x int8) bool {
+ return x%(1<<1) == 0
+}
+
+//go:noinline
+func divisible_int8_2to2(x int8) bool {
+ return x%(1<<2) == 0
+}
+
+//go:noinline
+func divisible_int8_2to3(x int8) bool {
+ return x%(1<<3) == 0
+}
+
+//go:noinline
+func divisible_int8_2to4(x int8) bool {
+ return x%(1<<4) == 0
+}
+
+//go:noinline
+func divisible_int8_2to5(x int8) bool {
+ return x%(1<<5) == 0
+}
+
+//go:noinline
+func divisible_int8_2to6(x int8) bool {
+ return x%(1<<6) == 0
+}
+
+//go:noinline
+func divisible_int16_2to1(x int16) bool {
+ return x%(1<<1) == 0
+}
+
+//go:noinline
+func divisible_int16_2to2(x int16) bool {
+ return x%(1<<2) == 0
+}
+
+//go:noinline
+func divisible_int16_2to3(x int16) bool {
+ return x%(1<<3) == 0
+}
+
+//go:noinline
+func divisible_int16_2to4(x int16) bool {
+ return x%(1<<4) == 0
+}
+
+//go:noinline
+func divisible_int16_2to5(x int16) bool {
+ return x%(1<<5) == 0
+}
+
+//go:noinline
+func divisible_int16_2to6(x int16) bool {
+ return x%(1<<6) == 0
+}
+
+//go:noinline
+func divisible_int16_2to7(x int16) bool {
+ return x%(1<<7) == 0
+}
+
+//go:noinline
+func divisible_int16_2to8(x int16) bool {
+ return x%(1<<8) == 0
+}
+
+//go:noinline
+func divisible_int16_2to9(x int16) bool {
+ return x%(1<<9) == 0
+}
+
+//go:noinline
+func divisible_int16_2to10(x int16) bool {
+ return x%(1<<10) == 0
+}
+
+//go:noinline
+func divisible_int16_2to11(x int16) bool {
+ return x%(1<<11) == 0
+}
+
+//go:noinline
+func divisible_int16_2to12(x int16) bool {
+ return x%(1<<12) == 0
+}
+
+//go:noinline
+func divisible_int16_2to13(x int16) bool {
+ return x%(1<<13) == 0
+}
+
+//go:noinline
+func divisible_int16_2to14(x int16) bool {
+ return x%(1<<14) == 0
+}
+
+//go:noinline
+func divisible_int32_2to4(x int32) bool {
+ return x%(1<<4) == 0
+}
+
+//go:noinline
+func divisible_int32_2to15(x int32) bool {
+ return x%(1<<15) == 0
+}
+
+//go:noinline
+func divisible_int32_2to26(x int32) bool {
+ return x%(1<<26) == 0
+}
+
+//go:noinline
+func divisible_int64_2to4(x int64) bool {
+ return x%(1<<4) == 0
+}
+
+//go:noinline
+func divisible_int64_2to15(x int64) bool {
+ return x%(1<<15) == 0
+}
+
+//go:noinline
+func divisible_int64_2to26(x int64) bool {
+ return x%(1<<26) == 0
+}
+
+//go:noinline
+func divisible_int64_2to34(x int64) bool {
+ return x%(1<<34) == 0
+}
+
+//go:noinline
+func divisible_int64_2to48(x int64) bool {
+ return x%(1<<48) == 0
+}
+
+//go:noinline
+func divisible_int64_2to57(x int64) bool {
+ return x%(1<<57) == 0
+}
+
+// testDivisibleSignedPow2 confirms that x%(1<<k)==0 is rewritten correctly
+func testDivisibleSignedPow2(t *testing.T) {
+ var i int64
+ var pow2 = []int64{
+ 1,
+ 1 << 1,
+ 1 << 2,
+ 1 << 3,
+ 1 << 4,
+ 1 << 5,
+ 1 << 6,
+ 1 << 7,
+ 1 << 8,
+ 1 << 9,
+ 1 << 10,
+ 1 << 11,
+ 1 << 12,
+ 1 << 13,
+ 1 << 14,
+ }
+ // exhaustive test for int8
+ for i = math.MinInt8; i <= math.MaxInt8; i++ {
+ if want, got := int8(i)%int8(pow2[1]) == 0, divisible_int8_2to1(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to1(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(pow2[2]) == 0, divisible_int8_2to2(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to2(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(pow2[3]) == 0, divisible_int8_2to3(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to3(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(pow2[4]) == 0, divisible_int8_2to4(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to4(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(pow2[5]) == 0, divisible_int8_2to5(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to5(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(pow2[6]) == 0, divisible_int8_2to6(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to6(%d) = %v want %v", i, got, want)
+ }
+ }
+ // exhaustive test for int16
+ for i = math.MinInt16; i <= math.MaxInt16; i++ {
+ if want, got := int16(i)%int16(pow2[1]) == 0, divisible_int16_2to1(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to1(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[2]) == 0, divisible_int16_2to2(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to2(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[3]) == 0, divisible_int16_2to3(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to3(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[4]) == 0, divisible_int16_2to4(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to4(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[5]) == 0, divisible_int16_2to5(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to5(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[6]) == 0, divisible_int16_2to6(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to6(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[7]) == 0, divisible_int16_2to7(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to7(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[8]) == 0, divisible_int16_2to8(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to8(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[9]) == 0, divisible_int16_2to9(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to9(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[10]) == 0, divisible_int16_2to10(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to10(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[11]) == 0, divisible_int16_2to11(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to11(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[12]) == 0, divisible_int16_2to12(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to12(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[13]) == 0, divisible_int16_2to13(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to13(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[14]) == 0, divisible_int16_2to14(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to14(%d) = %v want %v", i, got, want)
+ }
+ }
+ // spot check for int32 and int64
+ var (
+ two4 int64 = 1 << 4
+ two15 int64 = 1 << 15
+ two26 int64 = 1 << 26
+ two34 int64 = 1 << 34
+ two48 int64 = 1 << 48
+ two57 int64 = 1 << 57
+ )
+ var xs = []int64{two4, two4 + 3, -3 * two4, -3*two4 + 1,
+ two15, two15 + 3, -3 * two15, -3*two15 + 1,
+ two26, two26 + 37, -5 * two26, -5*two26 + 2,
+ two34, two34 + 356, -7 * two34, -7*two34 + 13,
+ two48, two48 + 3000, -12 * two48, -12*two48 + 1111,
+ two57, two57 + 397654, -15 * two57, -15*two57 + 11234,
+ }
+ for _, x := range xs {
+ if int64(int32(x)) == x {
+ if want, got := int32(x)%int32(two4) == 0, divisible_int32_2to4(int32(x)); got != want {
+ t.Errorf("divisible_int32_2to4(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := int32(x)%int32(two15) == 0, divisible_int32_2to15(int32(x)); got != want {
+ t.Errorf("divisible_int32_2to15(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := int32(x)%int32(two26) == 0, divisible_int32_2to26(int32(x)); got != want {
+ t.Errorf("divisible_int32_2to26(%d) = %v want %v", x, got, want)
+ }
+ }
+ // spot check for int64
+ if want, got := x%two4 == 0, divisible_int64_2to4(x); got != want {
+ t.Errorf("divisible_int64_2to4(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := x%two15 == 0, divisible_int64_2to15(x); got != want {
+ t.Errorf("divisible_int64_2to15(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := x%two26 == 0, divisible_int64_2to26(x); got != want {
+ t.Errorf("divisible_int64_2to26(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := x%two34 == 0, divisible_int64_2to34(x); got != want {
+ t.Errorf("divisible_int64_2to34(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := x%two48 == 0, divisible_int64_2to48(x); got != want {
+ t.Errorf("divisible_int64_2to48(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := x%two57 == 0, divisible_int64_2to57(x); got != want {
+ t.Errorf("divisible_int64_2to57(%d) = %v want %v", x, got, want)
+ }
+ }
+}
+
+func div6_uint8(n uint8) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_uint16(n uint16) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_uint32(n uint32) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_uint64(n uint64) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div19_uint8(n uint8) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_uint16(n uint16) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_uint32(n uint32) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_uint64(n uint64) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div6_int8(n int8) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_int16(n int16) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_int32(n int32) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_int64(n int64) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div19_int8(n int8) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_int16(n int16) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_int32(n int32) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_int64(n int64) bool {
+ return n%19 == 0
+}
+
+// testDivisibility confirms that rewrite rules x%c ==0 for c constant are correct.
+func testDivisibility(t *testing.T) {
+ // unsigned tests
+ // test an even and an odd divisor
+ var sixU, nineteenU uint64 = 6, 19
+ // test all inputs for uint8, uint16
+ for i := uint64(0); i <= math.MaxUint16; i++ {
+ if i <= math.MaxUint8 {
+ if want, got := uint8(i)%uint8(sixU) == 0, div6_uint8(uint8(i)); got != want {
+ t.Errorf("div6_uint8(%d) = %v want %v", i, got, want)
+ }
+ if want, got := uint8(i)%uint8(nineteenU) == 0, div19_uint8(uint8(i)); got != want {
+ t.Errorf("div6_uint19(%d) = %v want %v", i, got, want)
+ }
+ }
+ if want, got := uint16(i)%uint16(sixU) == 0, div6_uint16(uint16(i)); got != want {
+ t.Errorf("div6_uint16(%d) = %v want %v", i, got, want)
+ }
+ if want, got := uint16(i)%uint16(nineteenU) == 0, div19_uint16(uint16(i)); got != want {
+ t.Errorf("div19_uint16(%d) = %v want %v", i, got, want)
+ }
+ }
+ var maxU32, maxU64 uint64 = math.MaxUint32, math.MaxUint64
+ // spot check inputs for uint32 and uint64
+ xu := []uint64{
+ 0, 1, 2, 3, 4, 5,
+ sixU, 2 * sixU, 3 * sixU, 5 * sixU, 12345 * sixU,
+ sixU + 1, 2*sixU - 5, 3*sixU + 3, 5*sixU + 4, 12345*sixU - 2,
+ nineteenU, 2 * nineteenU, 3 * nineteenU, 5 * nineteenU, 12345 * nineteenU,
+ nineteenU + 1, 2*nineteenU - 5, 3*nineteenU + 3, 5*nineteenU + 4, 12345*nineteenU - 2,
+ maxU32, maxU32 - 1, maxU32 - 2, maxU32 - 3, maxU32 - 4,
+ maxU32 - 5, maxU32 - 6, maxU32 - 7, maxU32 - 8,
+ maxU32 - 9, maxU32 - 10, maxU32 - 11, maxU32 - 12,
+ maxU32 - 13, maxU32 - 14, maxU32 - 15, maxU32 - 16,
+ maxU32 - 17, maxU32 - 18, maxU32 - 19, maxU32 - 20,
+ maxU64, maxU64 - 1, maxU64 - 2, maxU64 - 3, maxU64 - 4,
+ maxU64 - 5, maxU64 - 6, maxU64 - 7, maxU64 - 8,
+ maxU64 - 9, maxU64 - 10, maxU64 - 11, maxU64 - 12,
+ maxU64 - 13, maxU64 - 14, maxU64 - 15, maxU64 - 16,
+ maxU64 - 17, maxU64 - 18, maxU64 - 19, maxU64 - 20,
+ }
+ for _, x := range xu {
+ if x <= maxU32 {
+ if want, got := uint32(x)%uint32(sixU) == 0, div6_uint32(uint32(x)); got != want {
+ t.Errorf("div6_uint32(%d) = %v want %v", x, got, want)
+ }
+ if want, got := uint32(x)%uint32(nineteenU) == 0, div19_uint32(uint32(x)); got != want {
+ t.Errorf("div19_uint32(%d) = %v want %v", x, got, want)
+ }
+ }
+ if want, got := x%sixU == 0, div6_uint64(x); got != want {
+ t.Errorf("div6_uint64(%d) = %v want %v", x, got, want)
+ }
+ if want, got := x%nineteenU == 0, div19_uint64(x); got != want {
+ t.Errorf("div19_uint64(%d) = %v want %v", x, got, want)
+ }
+ }
+
+ // signed tests
+ // test an even and an odd divisor
+ var sixS, nineteenS int64 = 6, 19
+ // test all inputs for int8, int16
+ for i := int64(math.MinInt16); i <= math.MaxInt16; i++ {
+ if math.MinInt8 <= i && i <= math.MaxInt8 {
+ if want, got := int8(i)%int8(sixS) == 0, div6_int8(int8(i)); got != want {
+ t.Errorf("div6_int8(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(nineteenS) == 0, div19_int8(int8(i)); got != want {
+ t.Errorf("div6_int19(%d) = %v want %v", i, got, want)
+ }
+ }
+ if want, got := int16(i)%int16(sixS) == 0, div6_int16(int16(i)); got != want {
+ t.Errorf("div6_int16(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(nineteenS) == 0, div19_int16(int16(i)); got != want {
+ t.Errorf("div19_int16(%d) = %v want %v", i, got, want)
+ }
+ }
+ var minI32, maxI32, minI64, maxI64 int64 = math.MinInt32, math.MaxInt32, math.MinInt64, math.MaxInt64
+ // spot check inputs for int32 and int64
+ xs := []int64{
+ 0, 1, 2, 3, 4, 5,
+ -1, -2, -3, -4, -5,
+ sixS, 2 * sixS, 3 * sixS, 5 * sixS, 12345 * sixS,
+ sixS + 1, 2*sixS - 5, 3*sixS + 3, 5*sixS + 4, 12345*sixS - 2,
+ -sixS, -2 * sixS, -3 * sixS, -5 * sixS, -12345 * sixS,
+ -sixS + 1, -2*sixS - 5, -3*sixS + 3, -5*sixS + 4, -12345*sixS - 2,
+ nineteenS, 2 * nineteenS, 3 * nineteenS, 5 * nineteenS, 12345 * nineteenS,
+ nineteenS + 1, 2*nineteenS - 5, 3*nineteenS + 3, 5*nineteenS + 4, 12345*nineteenS - 2,
+ -nineteenS, -2 * nineteenS, -3 * nineteenS, -5 * nineteenS, -12345 * nineteenS,
+ -nineteenS + 1, -2*nineteenS - 5, -3*nineteenS + 3, -5*nineteenS + 4, -12345*nineteenS - 2,
+ minI32, minI32 + 1, minI32 + 2, minI32 + 3, minI32 + 4,
+ minI32 + 5, minI32 + 6, minI32 + 7, minI32 + 8,
+ minI32 + 9, minI32 + 10, minI32 + 11, minI32 + 12,
+ minI32 + 13, minI32 + 14, minI32 + 15, minI32 + 16,
+ minI32 + 17, minI32 + 18, minI32 + 19, minI32 + 20,
+ maxI32, maxI32 - 1, maxI32 - 2, maxI32 - 3, maxI32 - 4,
+ maxI32 - 5, maxI32 - 6, maxI32 - 7, maxI32 - 8,
+ maxI32 - 9, maxI32 - 10, maxI32 - 11, maxI32 - 12,
+ maxI32 - 13, maxI32 - 14, maxI32 - 15, maxI32 - 16,
+ maxI32 - 17, maxI32 - 18, maxI32 - 19, maxI32 - 20,
+ minI64, minI64 + 1, minI64 + 2, minI64 + 3, minI64 + 4,
+ minI64 + 5, minI64 + 6, minI64 + 7, minI64 + 8,
+ minI64 + 9, minI64 + 10, minI64 + 11, minI64 + 12,
+ minI64 + 13, minI64 + 14, minI64 + 15, minI64 + 16,
+ minI64 + 17, minI64 + 18, minI64 + 19, minI64 + 20,
+ maxI64, maxI64 - 1, maxI64 - 2, maxI64 - 3, maxI64 - 4,
+ maxI64 - 5, maxI64 - 6, maxI64 - 7, maxI64 - 8,
+ maxI64 - 9, maxI64 - 10, maxI64 - 11, maxI64 - 12,
+ maxI64 - 13, maxI64 - 14, maxI64 - 15, maxI64 - 16,
+ maxI64 - 17, maxI64 - 18, maxI64 - 19, maxI64 - 20,
+ }
+ for _, x := range xs {
+ if minI32 <= x && x <= maxI32 {
+ if want, got := int32(x)%int32(sixS) == 0, div6_int32(int32(x)); got != want {
+ t.Errorf("div6_int32(%d) = %v want %v", x, got, want)
+ }
+ if want, got := int32(x)%int32(nineteenS) == 0, div19_int32(int32(x)); got != want {
+ t.Errorf("div19_int32(%d) = %v want %v", x, got, want)
+ }
+ }
+ if want, got := x%sixS == 0, div6_int64(x); got != want {
+ t.Errorf("div6_int64(%d) = %v want %v", x, got, want)
+ }
+ if want, got := x%nineteenS == 0, div19_int64(x); got != want {
+ t.Errorf("div19_int64(%d) = %v want %v", x, got, want)
+ }
+ }
+}
+
+//go:noinline
+func genREV16_1(c uint64) uint64 {
+ b := ((c & 0xff00ff00ff00ff00) >> 8) | ((c & 0x00ff00ff00ff00ff) << 8)
+ return b
+}
+
+//go:noinline
+func genREV16_2(c uint64) uint64 {
+ b := ((c & 0xff00ff00) >> 8) | ((c & 0x00ff00ff) << 8)
+ return b
+}
+
+//go:noinline
+func genREV16W(c uint32) uint32 {
+ b := ((c & 0xff00ff00) >> 8) | ((c & 0x00ff00ff) << 8)
+ return b
+}
+
+func TestREV16(t *testing.T) {
+ x := uint64(0x8f7f6f5f4f3f2f1f)
+ want1 := uint64(0x7f8f5f6f3f4f1f2f)
+ want2 := uint64(0x3f4f1f2f)
+
+ got1 := genREV16_1(x)
+ if got1 != want1 {
+ t.Errorf("genREV16_1(%#x) = %#x want %#x", x, got1, want1)
+ }
+ got2 := genREV16_2(x)
+ if got2 != want2 {
+ t.Errorf("genREV16_2(%#x) = %#x want %#x", x, got2, want2)
+ }
+}
+
+func TestREV16W(t *testing.T) {
+ x := uint32(0x4f3f2f1f)
+ want := uint32(0x3f4f1f2f)
+
+ got := genREV16W(x)
+ if got != want {
+ t.Errorf("genREV16W(%#x) = %#x want %#x", x, got, want)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/array_test.go b/src/cmd/compile/internal/test/testdata/array_test.go
new file mode 100644
index 0000000..efa00d0
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/array_test.go
@@ -0,0 +1,132 @@
+package main
+
+import "testing"
+
+//go:noinline
+func testSliceLenCap12_ssa(a [10]int, i, j int) (int, int) {
+ b := a[i:j]
+ return len(b), cap(b)
+}
+
+//go:noinline
+func testSliceLenCap1_ssa(a [10]int, i, j int) (int, int) {
+ b := a[i:]
+ return len(b), cap(b)
+}
+
+//go:noinline
+func testSliceLenCap2_ssa(a [10]int, i, j int) (int, int) {
+ b := a[:j]
+ return len(b), cap(b)
+}
+
+func testSliceLenCap(t *testing.T) {
+ a := [10]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+ tests := [...]struct {
+ fn func(a [10]int, i, j int) (int, int)
+ i, j int // slice range
+ l, c int // len, cap
+ }{
+ // -1 means the value is not used.
+ {testSliceLenCap12_ssa, 0, 0, 0, 10},
+ {testSliceLenCap12_ssa, 0, 1, 1, 10},
+ {testSliceLenCap12_ssa, 0, 10, 10, 10},
+ {testSliceLenCap12_ssa, 10, 10, 0, 0},
+ {testSliceLenCap12_ssa, 0, 5, 5, 10},
+ {testSliceLenCap12_ssa, 5, 5, 0, 5},
+ {testSliceLenCap12_ssa, 5, 10, 5, 5},
+ {testSliceLenCap1_ssa, 0, -1, 0, 10},
+ {testSliceLenCap1_ssa, 5, -1, 5, 5},
+ {testSliceLenCap1_ssa, 10, -1, 0, 0},
+ {testSliceLenCap2_ssa, -1, 0, 0, 10},
+ {testSliceLenCap2_ssa, -1, 5, 5, 10},
+ {testSliceLenCap2_ssa, -1, 10, 10, 10},
+ }
+
+ for i, test := range tests {
+ if l, c := test.fn(a, test.i, test.j); l != test.l && c != test.c {
+ t.Errorf("#%d len(a[%d:%d]), cap(a[%d:%d]) = %d %d, want %d %d", i, test.i, test.j, test.i, test.j, l, c, test.l, test.c)
+ }
+ }
+}
+
+//go:noinline
+func testSliceGetElement_ssa(a [10]int, i, j, p int) int {
+ return a[i:j][p]
+}
+
+func testSliceGetElement(t *testing.T) {
+ a := [10]int{0, 10, 20, 30, 40, 50, 60, 70, 80, 90}
+ tests := [...]struct {
+ i, j, p int
+ want int // a[i:j][p]
+ }{
+ {0, 10, 2, 20},
+ {0, 5, 4, 40},
+ {5, 10, 3, 80},
+ {1, 9, 7, 80},
+ }
+
+ for i, test := range tests {
+ if got := testSliceGetElement_ssa(a, test.i, test.j, test.p); got != test.want {
+ t.Errorf("#%d a[%d:%d][%d] = %d, wanted %d", i, test.i, test.j, test.p, got, test.want)
+ }
+ }
+}
+
+//go:noinline
+func testSliceSetElement_ssa(a *[10]int, i, j, p, x int) {
+ (*a)[i:j][p] = x
+}
+
+func testSliceSetElement(t *testing.T) {
+ a := [10]int{0, 10, 20, 30, 40, 50, 60, 70, 80, 90}
+ tests := [...]struct {
+ i, j, p int
+ want int // a[i:j][p]
+ }{
+ {0, 10, 2, 17},
+ {0, 5, 4, 11},
+ {5, 10, 3, 28},
+ {1, 9, 7, 99},
+ }
+
+ for i, test := range tests {
+ testSliceSetElement_ssa(&a, test.i, test.j, test.p, test.want)
+ if got := a[test.i+test.p]; got != test.want {
+ t.Errorf("#%d a[%d:%d][%d] = %d, wanted %d", i, test.i, test.j, test.p, got, test.want)
+ }
+ }
+}
+
+func testSlicePanic1(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ //println("panicked as expected")
+ }
+ }()
+
+ a := [10]int{0, 10, 20, 30, 40, 50, 60, 70, 80, 90}
+ testSliceLenCap12_ssa(a, 3, 12)
+ t.Errorf("expected to panic, but didn't")
+}
+
+func testSlicePanic2(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ //println("panicked as expected")
+ }
+ }()
+
+ a := [10]int{0, 10, 20, 30, 40, 50, 60, 70, 80, 90}
+ testSliceGetElement_ssa(a, 3, 7, 4)
+ t.Errorf("expected to panic, but didn't")
+}
+
+func TestArray(t *testing.T) {
+ testSliceLenCap(t)
+ testSliceGetElement(t)
+ testSliceSetElement(t)
+ testSlicePanic1(t)
+ testSlicePanic2(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/assert_test.go b/src/cmd/compile/internal/test/testdata/assert_test.go
new file mode 100644
index 0000000..4326be8
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/assert_test.go
@@ -0,0 +1,128 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests type assertion expressions and statements
+
+package main
+
+import (
+ "runtime"
+ "testing"
+)
+
+type (
+ S struct{}
+ U struct{}
+
+ I interface {
+ F()
+ }
+)
+
+var (
+ s *S
+ u *U
+)
+
+func (s *S) F() {}
+func (u *U) F() {}
+
+func e2t_ssa(e interface{}) *U {
+ return e.(*U)
+}
+
+func i2t_ssa(i I) *U {
+ return i.(*U)
+}
+
+func testAssertE2TOk(t *testing.T) {
+ if got := e2t_ssa(u); got != u {
+ t.Errorf("e2t_ssa(u)=%v want %v", got, u)
+ }
+}
+
+func testAssertE2TPanic(t *testing.T) {
+ var got *U
+ defer func() {
+ if got != nil {
+ t.Errorf("e2t_ssa(s)=%v want nil", got)
+ }
+ e := recover()
+ err, ok := e.(*runtime.TypeAssertionError)
+ if !ok {
+ t.Errorf("e2t_ssa(s) panic type %T", e)
+ }
+ want := "interface conversion: interface {} is *main.S, not *main.U"
+ if err.Error() != want {
+ t.Errorf("e2t_ssa(s) wrong error, want '%s', got '%s'", want, err.Error())
+ }
+ }()
+ got = e2t_ssa(s)
+ t.Errorf("e2t_ssa(s) should panic")
+
+}
+
+func testAssertI2TOk(t *testing.T) {
+ if got := i2t_ssa(u); got != u {
+ t.Errorf("i2t_ssa(u)=%v want %v", got, u)
+ }
+}
+
+func testAssertI2TPanic(t *testing.T) {
+ var got *U
+ defer func() {
+ if got != nil {
+ t.Errorf("i2t_ssa(s)=%v want nil", got)
+ }
+ e := recover()
+ err, ok := e.(*runtime.TypeAssertionError)
+ if !ok {
+ t.Errorf("i2t_ssa(s) panic type %T", e)
+ }
+ want := "interface conversion: main.I is *main.S, not *main.U"
+ if err.Error() != want {
+ t.Errorf("i2t_ssa(s) wrong error, want '%s', got '%s'", want, err.Error())
+ }
+ }()
+ got = i2t_ssa(s)
+ t.Errorf("i2t_ssa(s) should panic")
+}
+
+func e2t2_ssa(e interface{}) (*U, bool) {
+ u, ok := e.(*U)
+ return u, ok
+}
+
+func i2t2_ssa(i I) (*U, bool) {
+ u, ok := i.(*U)
+ return u, ok
+}
+
+func testAssertE2T2(t *testing.T) {
+ if got, ok := e2t2_ssa(u); !ok || got != u {
+ t.Errorf("e2t2_ssa(u)=(%v, %v) want (%v, %v)", got, ok, u, true)
+ }
+ if got, ok := e2t2_ssa(s); ok || got != nil {
+ t.Errorf("e2t2_ssa(s)=(%v, %v) want (%v, %v)", got, ok, nil, false)
+ }
+}
+
+func testAssertI2T2(t *testing.T) {
+ if got, ok := i2t2_ssa(u); !ok || got != u {
+ t.Errorf("i2t2_ssa(u)=(%v, %v) want (%v, %v)", got, ok, u, true)
+ }
+ if got, ok := i2t2_ssa(s); ok || got != nil {
+ t.Errorf("i2t2_ssa(s)=(%v, %v) want (%v, %v)", got, ok, nil, false)
+ }
+}
+
+// TestTypeAssertion tests type assertions.
+func TestTypeAssertion(t *testing.T) {
+ testAssertE2TOk(t)
+ testAssertE2TPanic(t)
+ testAssertI2TOk(t)
+ testAssertI2TPanic(t)
+ testAssertE2T2(t)
+ testAssertI2T2(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/break_test.go b/src/cmd/compile/internal/test/testdata/break_test.go
new file mode 100644
index 0000000..50245df
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/break_test.go
@@ -0,0 +1,250 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests continue and break.
+
+package main
+
+import "testing"
+
+func continuePlain_ssa() int {
+ var n int
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue
+ }
+ n = i
+ }
+ return n
+}
+
+func continueLabeled_ssa() int {
+ var n int
+Next:
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue Next
+ }
+ n = i
+ }
+ return n
+}
+
+func continuePlainInner_ssa() int {
+ var n int
+ for j := 0; j < 30; j += 10 {
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func continueLabeledInner_ssa() int {
+ var n int
+ for j := 0; j < 30; j += 10 {
+ Next:
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue Next
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func continueLabeledOuter_ssa() int {
+ var n int
+Next:
+ for j := 0; j < 30; j += 10 {
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue Next
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func breakPlain_ssa() int {
+ var n int
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break
+ }
+ n = i
+ }
+ return n
+}
+
+func breakLabeled_ssa() int {
+ var n int
+Next:
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break Next
+ }
+ n = i
+ }
+ return n
+}
+
+func breakPlainInner_ssa() int {
+ var n int
+ for j := 0; j < 30; j += 10 {
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func breakLabeledInner_ssa() int {
+ var n int
+ for j := 0; j < 30; j += 10 {
+ Next:
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break Next
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func breakLabeledOuter_ssa() int {
+ var n int
+Next:
+ for j := 0; j < 30; j += 10 {
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break Next
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+var g, h int // globals to ensure optimizations don't collapse our switch statements
+
+func switchPlain_ssa() int {
+ var n int
+ switch g {
+ case 0:
+ n = 1
+ break
+ n = 2
+ }
+ return n
+}
+
+func switchLabeled_ssa() int {
+ var n int
+Done:
+ switch g {
+ case 0:
+ n = 1
+ break Done
+ n = 2
+ }
+ return n
+}
+
+func switchPlainInner_ssa() int {
+ var n int
+ switch g {
+ case 0:
+ n = 1
+ switch h {
+ case 0:
+ n += 10
+ break
+ }
+ n = 2
+ }
+ return n
+}
+
+func switchLabeledInner_ssa() int {
+ var n int
+ switch g {
+ case 0:
+ n = 1
+ Done:
+ switch h {
+ case 0:
+ n += 10
+ break Done
+ }
+ n = 2
+ }
+ return n
+}
+
+func switchLabeledOuter_ssa() int {
+ var n int
+Done:
+ switch g {
+ case 0:
+ n = 1
+ switch h {
+ case 0:
+ n += 10
+ break Done
+ }
+ n = 2
+ }
+ return n
+}
+
+// TestBreakContinue tests that continue and break statements do what they say.
+func TestBreakContinue(t *testing.T) {
+ tests := [...]struct {
+ name string
+ fn func() int
+ want int
+ }{
+ {"continuePlain_ssa", continuePlain_ssa, 9},
+ {"continueLabeled_ssa", continueLabeled_ssa, 9},
+ {"continuePlainInner_ssa", continuePlainInner_ssa, 29},
+ {"continueLabeledInner_ssa", continueLabeledInner_ssa, 29},
+ {"continueLabeledOuter_ssa", continueLabeledOuter_ssa, 5},
+
+ {"breakPlain_ssa", breakPlain_ssa, 5},
+ {"breakLabeled_ssa", breakLabeled_ssa, 5},
+ {"breakPlainInner_ssa", breakPlainInner_ssa, 25},
+ {"breakLabeledInner_ssa", breakLabeledInner_ssa, 25},
+ {"breakLabeledOuter_ssa", breakLabeledOuter_ssa, 5},
+
+ {"switchPlain_ssa", switchPlain_ssa, 1},
+ {"switchLabeled_ssa", switchLabeled_ssa, 1},
+ {"switchPlainInner_ssa", switchPlainInner_ssa, 2},
+ {"switchLabeledInner_ssa", switchLabeledInner_ssa, 2},
+ {"switchLabeledOuter_ssa", switchLabeledOuter_ssa, 11},
+
+ // no select tests; they're identical to switch
+ }
+
+ for _, test := range tests {
+ if got := test.fn(); got != test.want {
+ t.Errorf("%s()=%d, want %d", test.name, got, test.want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/chan_test.go b/src/cmd/compile/internal/test/testdata/chan_test.go
new file mode 100644
index 0000000..628bd8f
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/chan_test.go
@@ -0,0 +1,63 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// chan.go tests chan operations.
+package main
+
+import "testing"
+
+//go:noinline
+func lenChan_ssa(v chan int) int {
+ return len(v)
+}
+
+//go:noinline
+func capChan_ssa(v chan int) int {
+ return cap(v)
+}
+
+func testLenChan(t *testing.T) {
+
+ v := make(chan int, 10)
+ v <- 1
+ v <- 1
+ v <- 1
+
+ if want, got := 3, lenChan_ssa(v); got != want {
+ t.Errorf("expected len(chan) = %d, got %d", want, got)
+ }
+}
+
+func testLenNilChan(t *testing.T) {
+
+ var v chan int
+ if want, got := 0, lenChan_ssa(v); got != want {
+ t.Errorf("expected len(nil) = %d, got %d", want, got)
+ }
+}
+
+func testCapChan(t *testing.T) {
+
+ v := make(chan int, 25)
+
+ if want, got := 25, capChan_ssa(v); got != want {
+ t.Errorf("expected cap(chan) = %d, got %d", want, got)
+ }
+}
+
+func testCapNilChan(t *testing.T) {
+
+ var v chan int
+ if want, got := 0, capChan_ssa(v); got != want {
+ t.Errorf("expected cap(nil) = %d, got %d", want, got)
+ }
+}
+
+func TestChan(t *testing.T) {
+ testLenChan(t)
+ testLenNilChan(t)
+
+ testCapChan(t)
+ testCapNilChan(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/closure_test.go b/src/cmd/compile/internal/test/testdata/closure_test.go
new file mode 100644
index 0000000..6cddc2d
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/closure_test.go
@@ -0,0 +1,32 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// closure.go tests closure operations.
+package main
+
+import "testing"
+
+//go:noinline
+func testCFunc_ssa() int {
+ a := 0
+ b := func() {
+ switch {
+ }
+ a++
+ }
+ b()
+ b()
+ return a
+}
+
+func testCFunc(t *testing.T) {
+ if want, got := 2, testCFunc_ssa(); got != want {
+ t.Errorf("expected %d, got %d", want, got)
+ }
+}
+
+// TestClosure tests closure related behavior.
+func TestClosure(t *testing.T) {
+ testCFunc(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/cmpConst_test.go b/src/cmd/compile/internal/test/testdata/cmpConst_test.go
new file mode 100644
index 0000000..9400ef4
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/cmpConst_test.go
@@ -0,0 +1,2209 @@
+// Code generated by gen/cmpConstGen.go. DO NOT EDIT.
+
+package main
+
+import (
+ "reflect"
+ "runtime"
+ "testing"
+)
+
+// results show the expected result for the elements left of, equal to and right of the index.
+type result struct{ l, e, r bool }
+
+var (
+ eq = result{l: false, e: true, r: false}
+ ne = result{l: true, e: false, r: true}
+ lt = result{l: true, e: false, r: false}
+ le = result{l: true, e: true, r: false}
+ gt = result{l: false, e: false, r: true}
+ ge = result{l: false, e: true, r: true}
+)
+
+// uint64 tests
+var uint64_vals = []uint64{
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+ 32768,
+ 65534,
+ 65535,
+ 65536,
+ 2147483646,
+ 2147483647,
+ 2147483648,
+ 4278190080,
+ 4294967294,
+ 4294967295,
+ 4294967296,
+ 1095216660480,
+ 9223372036854775806,
+ 9223372036854775807,
+ 9223372036854775808,
+ 18374686479671623680,
+ 18446744073709551614,
+ 18446744073709551615,
+}
+
+func lt_0_uint64(x uint64) bool { return x < 0 }
+func le_0_uint64(x uint64) bool { return x <= 0 }
+func gt_0_uint64(x uint64) bool { return x > 0 }
+func ge_0_uint64(x uint64) bool { return x >= 0 }
+func eq_0_uint64(x uint64) bool { return x == 0 }
+func ne_0_uint64(x uint64) bool { return x != 0 }
+func lt_1_uint64(x uint64) bool { return x < 1 }
+func le_1_uint64(x uint64) bool { return x <= 1 }
+func gt_1_uint64(x uint64) bool { return x > 1 }
+func ge_1_uint64(x uint64) bool { return x >= 1 }
+func eq_1_uint64(x uint64) bool { return x == 1 }
+func ne_1_uint64(x uint64) bool { return x != 1 }
+func lt_126_uint64(x uint64) bool { return x < 126 }
+func le_126_uint64(x uint64) bool { return x <= 126 }
+func gt_126_uint64(x uint64) bool { return x > 126 }
+func ge_126_uint64(x uint64) bool { return x >= 126 }
+func eq_126_uint64(x uint64) bool { return x == 126 }
+func ne_126_uint64(x uint64) bool { return x != 126 }
+func lt_127_uint64(x uint64) bool { return x < 127 }
+func le_127_uint64(x uint64) bool { return x <= 127 }
+func gt_127_uint64(x uint64) bool { return x > 127 }
+func ge_127_uint64(x uint64) bool { return x >= 127 }
+func eq_127_uint64(x uint64) bool { return x == 127 }
+func ne_127_uint64(x uint64) bool { return x != 127 }
+func lt_128_uint64(x uint64) bool { return x < 128 }
+func le_128_uint64(x uint64) bool { return x <= 128 }
+func gt_128_uint64(x uint64) bool { return x > 128 }
+func ge_128_uint64(x uint64) bool { return x >= 128 }
+func eq_128_uint64(x uint64) bool { return x == 128 }
+func ne_128_uint64(x uint64) bool { return x != 128 }
+func lt_254_uint64(x uint64) bool { return x < 254 }
+func le_254_uint64(x uint64) bool { return x <= 254 }
+func gt_254_uint64(x uint64) bool { return x > 254 }
+func ge_254_uint64(x uint64) bool { return x >= 254 }
+func eq_254_uint64(x uint64) bool { return x == 254 }
+func ne_254_uint64(x uint64) bool { return x != 254 }
+func lt_255_uint64(x uint64) bool { return x < 255 }
+func le_255_uint64(x uint64) bool { return x <= 255 }
+func gt_255_uint64(x uint64) bool { return x > 255 }
+func ge_255_uint64(x uint64) bool { return x >= 255 }
+func eq_255_uint64(x uint64) bool { return x == 255 }
+func ne_255_uint64(x uint64) bool { return x != 255 }
+func lt_256_uint64(x uint64) bool { return x < 256 }
+func le_256_uint64(x uint64) bool { return x <= 256 }
+func gt_256_uint64(x uint64) bool { return x > 256 }
+func ge_256_uint64(x uint64) bool { return x >= 256 }
+func eq_256_uint64(x uint64) bool { return x == 256 }
+func ne_256_uint64(x uint64) bool { return x != 256 }
+func lt_32766_uint64(x uint64) bool { return x < 32766 }
+func le_32766_uint64(x uint64) bool { return x <= 32766 }
+func gt_32766_uint64(x uint64) bool { return x > 32766 }
+func ge_32766_uint64(x uint64) bool { return x >= 32766 }
+func eq_32766_uint64(x uint64) bool { return x == 32766 }
+func ne_32766_uint64(x uint64) bool { return x != 32766 }
+func lt_32767_uint64(x uint64) bool { return x < 32767 }
+func le_32767_uint64(x uint64) bool { return x <= 32767 }
+func gt_32767_uint64(x uint64) bool { return x > 32767 }
+func ge_32767_uint64(x uint64) bool { return x >= 32767 }
+func eq_32767_uint64(x uint64) bool { return x == 32767 }
+func ne_32767_uint64(x uint64) bool { return x != 32767 }
+func lt_32768_uint64(x uint64) bool { return x < 32768 }
+func le_32768_uint64(x uint64) bool { return x <= 32768 }
+func gt_32768_uint64(x uint64) bool { return x > 32768 }
+func ge_32768_uint64(x uint64) bool { return x >= 32768 }
+func eq_32768_uint64(x uint64) bool { return x == 32768 }
+func ne_32768_uint64(x uint64) bool { return x != 32768 }
+func lt_65534_uint64(x uint64) bool { return x < 65534 }
+func le_65534_uint64(x uint64) bool { return x <= 65534 }
+func gt_65534_uint64(x uint64) bool { return x > 65534 }
+func ge_65534_uint64(x uint64) bool { return x >= 65534 }
+func eq_65534_uint64(x uint64) bool { return x == 65534 }
+func ne_65534_uint64(x uint64) bool { return x != 65534 }
+func lt_65535_uint64(x uint64) bool { return x < 65535 }
+func le_65535_uint64(x uint64) bool { return x <= 65535 }
+func gt_65535_uint64(x uint64) bool { return x > 65535 }
+func ge_65535_uint64(x uint64) bool { return x >= 65535 }
+func eq_65535_uint64(x uint64) bool { return x == 65535 }
+func ne_65535_uint64(x uint64) bool { return x != 65535 }
+func lt_65536_uint64(x uint64) bool { return x < 65536 }
+func le_65536_uint64(x uint64) bool { return x <= 65536 }
+func gt_65536_uint64(x uint64) bool { return x > 65536 }
+func ge_65536_uint64(x uint64) bool { return x >= 65536 }
+func eq_65536_uint64(x uint64) bool { return x == 65536 }
+func ne_65536_uint64(x uint64) bool { return x != 65536 }
+func lt_2147483646_uint64(x uint64) bool { return x < 2147483646 }
+func le_2147483646_uint64(x uint64) bool { return x <= 2147483646 }
+func gt_2147483646_uint64(x uint64) bool { return x > 2147483646 }
+func ge_2147483646_uint64(x uint64) bool { return x >= 2147483646 }
+func eq_2147483646_uint64(x uint64) bool { return x == 2147483646 }
+func ne_2147483646_uint64(x uint64) bool { return x != 2147483646 }
+func lt_2147483647_uint64(x uint64) bool { return x < 2147483647 }
+func le_2147483647_uint64(x uint64) bool { return x <= 2147483647 }
+func gt_2147483647_uint64(x uint64) bool { return x > 2147483647 }
+func ge_2147483647_uint64(x uint64) bool { return x >= 2147483647 }
+func eq_2147483647_uint64(x uint64) bool { return x == 2147483647 }
+func ne_2147483647_uint64(x uint64) bool { return x != 2147483647 }
+func lt_2147483648_uint64(x uint64) bool { return x < 2147483648 }
+func le_2147483648_uint64(x uint64) bool { return x <= 2147483648 }
+func gt_2147483648_uint64(x uint64) bool { return x > 2147483648 }
+func ge_2147483648_uint64(x uint64) bool { return x >= 2147483648 }
+func eq_2147483648_uint64(x uint64) bool { return x == 2147483648 }
+func ne_2147483648_uint64(x uint64) bool { return x != 2147483648 }
+func lt_4278190080_uint64(x uint64) bool { return x < 4278190080 }
+func le_4278190080_uint64(x uint64) bool { return x <= 4278190080 }
+func gt_4278190080_uint64(x uint64) bool { return x > 4278190080 }
+func ge_4278190080_uint64(x uint64) bool { return x >= 4278190080 }
+func eq_4278190080_uint64(x uint64) bool { return x == 4278190080 }
+func ne_4278190080_uint64(x uint64) bool { return x != 4278190080 }
+func lt_4294967294_uint64(x uint64) bool { return x < 4294967294 }
+func le_4294967294_uint64(x uint64) bool { return x <= 4294967294 }
+func gt_4294967294_uint64(x uint64) bool { return x > 4294967294 }
+func ge_4294967294_uint64(x uint64) bool { return x >= 4294967294 }
+func eq_4294967294_uint64(x uint64) bool { return x == 4294967294 }
+func ne_4294967294_uint64(x uint64) bool { return x != 4294967294 }
+func lt_4294967295_uint64(x uint64) bool { return x < 4294967295 }
+func le_4294967295_uint64(x uint64) bool { return x <= 4294967295 }
+func gt_4294967295_uint64(x uint64) bool { return x > 4294967295 }
+func ge_4294967295_uint64(x uint64) bool { return x >= 4294967295 }
+func eq_4294967295_uint64(x uint64) bool { return x == 4294967295 }
+func ne_4294967295_uint64(x uint64) bool { return x != 4294967295 }
+func lt_4294967296_uint64(x uint64) bool { return x < 4294967296 }
+func le_4294967296_uint64(x uint64) bool { return x <= 4294967296 }
+func gt_4294967296_uint64(x uint64) bool { return x > 4294967296 }
+func ge_4294967296_uint64(x uint64) bool { return x >= 4294967296 }
+func eq_4294967296_uint64(x uint64) bool { return x == 4294967296 }
+func ne_4294967296_uint64(x uint64) bool { return x != 4294967296 }
+func lt_1095216660480_uint64(x uint64) bool { return x < 1095216660480 }
+func le_1095216660480_uint64(x uint64) bool { return x <= 1095216660480 }
+func gt_1095216660480_uint64(x uint64) bool { return x > 1095216660480 }
+func ge_1095216660480_uint64(x uint64) bool { return x >= 1095216660480 }
+func eq_1095216660480_uint64(x uint64) bool { return x == 1095216660480 }
+func ne_1095216660480_uint64(x uint64) bool { return x != 1095216660480 }
+func lt_9223372036854775806_uint64(x uint64) bool { return x < 9223372036854775806 }
+func le_9223372036854775806_uint64(x uint64) bool { return x <= 9223372036854775806 }
+func gt_9223372036854775806_uint64(x uint64) bool { return x > 9223372036854775806 }
+func ge_9223372036854775806_uint64(x uint64) bool { return x >= 9223372036854775806 }
+func eq_9223372036854775806_uint64(x uint64) bool { return x == 9223372036854775806 }
+func ne_9223372036854775806_uint64(x uint64) bool { return x != 9223372036854775806 }
+func lt_9223372036854775807_uint64(x uint64) bool { return x < 9223372036854775807 }
+func le_9223372036854775807_uint64(x uint64) bool { return x <= 9223372036854775807 }
+func gt_9223372036854775807_uint64(x uint64) bool { return x > 9223372036854775807 }
+func ge_9223372036854775807_uint64(x uint64) bool { return x >= 9223372036854775807 }
+func eq_9223372036854775807_uint64(x uint64) bool { return x == 9223372036854775807 }
+func ne_9223372036854775807_uint64(x uint64) bool { return x != 9223372036854775807 }
+func lt_9223372036854775808_uint64(x uint64) bool { return x < 9223372036854775808 }
+func le_9223372036854775808_uint64(x uint64) bool { return x <= 9223372036854775808 }
+func gt_9223372036854775808_uint64(x uint64) bool { return x > 9223372036854775808 }
+func ge_9223372036854775808_uint64(x uint64) bool { return x >= 9223372036854775808 }
+func eq_9223372036854775808_uint64(x uint64) bool { return x == 9223372036854775808 }
+func ne_9223372036854775808_uint64(x uint64) bool { return x != 9223372036854775808 }
+func lt_18374686479671623680_uint64(x uint64) bool { return x < 18374686479671623680 }
+func le_18374686479671623680_uint64(x uint64) bool { return x <= 18374686479671623680 }
+func gt_18374686479671623680_uint64(x uint64) bool { return x > 18374686479671623680 }
+func ge_18374686479671623680_uint64(x uint64) bool { return x >= 18374686479671623680 }
+func eq_18374686479671623680_uint64(x uint64) bool { return x == 18374686479671623680 }
+func ne_18374686479671623680_uint64(x uint64) bool { return x != 18374686479671623680 }
+func lt_18446744073709551614_uint64(x uint64) bool { return x < 18446744073709551614 }
+func le_18446744073709551614_uint64(x uint64) bool { return x <= 18446744073709551614 }
+func gt_18446744073709551614_uint64(x uint64) bool { return x > 18446744073709551614 }
+func ge_18446744073709551614_uint64(x uint64) bool { return x >= 18446744073709551614 }
+func eq_18446744073709551614_uint64(x uint64) bool { return x == 18446744073709551614 }
+func ne_18446744073709551614_uint64(x uint64) bool { return x != 18446744073709551614 }
+func lt_18446744073709551615_uint64(x uint64) bool { return x < 18446744073709551615 }
+func le_18446744073709551615_uint64(x uint64) bool { return x <= 18446744073709551615 }
+func gt_18446744073709551615_uint64(x uint64) bool { return x > 18446744073709551615 }
+func ge_18446744073709551615_uint64(x uint64) bool { return x >= 18446744073709551615 }
+func eq_18446744073709551615_uint64(x uint64) bool { return x == 18446744073709551615 }
+func ne_18446744073709551615_uint64(x uint64) bool { return x != 18446744073709551615 }
+
+var uint64_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(uint64) bool
+}{
+ {idx: 0, exp: lt, fn: lt_0_uint64},
+ {idx: 0, exp: le, fn: le_0_uint64},
+ {idx: 0, exp: gt, fn: gt_0_uint64},
+ {idx: 0, exp: ge, fn: ge_0_uint64},
+ {idx: 0, exp: eq, fn: eq_0_uint64},
+ {idx: 0, exp: ne, fn: ne_0_uint64},
+ {idx: 1, exp: lt, fn: lt_1_uint64},
+ {idx: 1, exp: le, fn: le_1_uint64},
+ {idx: 1, exp: gt, fn: gt_1_uint64},
+ {idx: 1, exp: ge, fn: ge_1_uint64},
+ {idx: 1, exp: eq, fn: eq_1_uint64},
+ {idx: 1, exp: ne, fn: ne_1_uint64},
+ {idx: 2, exp: lt, fn: lt_126_uint64},
+ {idx: 2, exp: le, fn: le_126_uint64},
+ {idx: 2, exp: gt, fn: gt_126_uint64},
+ {idx: 2, exp: ge, fn: ge_126_uint64},
+ {idx: 2, exp: eq, fn: eq_126_uint64},
+ {idx: 2, exp: ne, fn: ne_126_uint64},
+ {idx: 3, exp: lt, fn: lt_127_uint64},
+ {idx: 3, exp: le, fn: le_127_uint64},
+ {idx: 3, exp: gt, fn: gt_127_uint64},
+ {idx: 3, exp: ge, fn: ge_127_uint64},
+ {idx: 3, exp: eq, fn: eq_127_uint64},
+ {idx: 3, exp: ne, fn: ne_127_uint64},
+ {idx: 4, exp: lt, fn: lt_128_uint64},
+ {idx: 4, exp: le, fn: le_128_uint64},
+ {idx: 4, exp: gt, fn: gt_128_uint64},
+ {idx: 4, exp: ge, fn: ge_128_uint64},
+ {idx: 4, exp: eq, fn: eq_128_uint64},
+ {idx: 4, exp: ne, fn: ne_128_uint64},
+ {idx: 5, exp: lt, fn: lt_254_uint64},
+ {idx: 5, exp: le, fn: le_254_uint64},
+ {idx: 5, exp: gt, fn: gt_254_uint64},
+ {idx: 5, exp: ge, fn: ge_254_uint64},
+ {idx: 5, exp: eq, fn: eq_254_uint64},
+ {idx: 5, exp: ne, fn: ne_254_uint64},
+ {idx: 6, exp: lt, fn: lt_255_uint64},
+ {idx: 6, exp: le, fn: le_255_uint64},
+ {idx: 6, exp: gt, fn: gt_255_uint64},
+ {idx: 6, exp: ge, fn: ge_255_uint64},
+ {idx: 6, exp: eq, fn: eq_255_uint64},
+ {idx: 6, exp: ne, fn: ne_255_uint64},
+ {idx: 7, exp: lt, fn: lt_256_uint64},
+ {idx: 7, exp: le, fn: le_256_uint64},
+ {idx: 7, exp: gt, fn: gt_256_uint64},
+ {idx: 7, exp: ge, fn: ge_256_uint64},
+ {idx: 7, exp: eq, fn: eq_256_uint64},
+ {idx: 7, exp: ne, fn: ne_256_uint64},
+ {idx: 8, exp: lt, fn: lt_32766_uint64},
+ {idx: 8, exp: le, fn: le_32766_uint64},
+ {idx: 8, exp: gt, fn: gt_32766_uint64},
+ {idx: 8, exp: ge, fn: ge_32766_uint64},
+ {idx: 8, exp: eq, fn: eq_32766_uint64},
+ {idx: 8, exp: ne, fn: ne_32766_uint64},
+ {idx: 9, exp: lt, fn: lt_32767_uint64},
+ {idx: 9, exp: le, fn: le_32767_uint64},
+ {idx: 9, exp: gt, fn: gt_32767_uint64},
+ {idx: 9, exp: ge, fn: ge_32767_uint64},
+ {idx: 9, exp: eq, fn: eq_32767_uint64},
+ {idx: 9, exp: ne, fn: ne_32767_uint64},
+ {idx: 10, exp: lt, fn: lt_32768_uint64},
+ {idx: 10, exp: le, fn: le_32768_uint64},
+ {idx: 10, exp: gt, fn: gt_32768_uint64},
+ {idx: 10, exp: ge, fn: ge_32768_uint64},
+ {idx: 10, exp: eq, fn: eq_32768_uint64},
+ {idx: 10, exp: ne, fn: ne_32768_uint64},
+ {idx: 11, exp: lt, fn: lt_65534_uint64},
+ {idx: 11, exp: le, fn: le_65534_uint64},
+ {idx: 11, exp: gt, fn: gt_65534_uint64},
+ {idx: 11, exp: ge, fn: ge_65534_uint64},
+ {idx: 11, exp: eq, fn: eq_65534_uint64},
+ {idx: 11, exp: ne, fn: ne_65534_uint64},
+ {idx: 12, exp: lt, fn: lt_65535_uint64},
+ {idx: 12, exp: le, fn: le_65535_uint64},
+ {idx: 12, exp: gt, fn: gt_65535_uint64},
+ {idx: 12, exp: ge, fn: ge_65535_uint64},
+ {idx: 12, exp: eq, fn: eq_65535_uint64},
+ {idx: 12, exp: ne, fn: ne_65535_uint64},
+ {idx: 13, exp: lt, fn: lt_65536_uint64},
+ {idx: 13, exp: le, fn: le_65536_uint64},
+ {idx: 13, exp: gt, fn: gt_65536_uint64},
+ {idx: 13, exp: ge, fn: ge_65536_uint64},
+ {idx: 13, exp: eq, fn: eq_65536_uint64},
+ {idx: 13, exp: ne, fn: ne_65536_uint64},
+ {idx: 14, exp: lt, fn: lt_2147483646_uint64},
+ {idx: 14, exp: le, fn: le_2147483646_uint64},
+ {idx: 14, exp: gt, fn: gt_2147483646_uint64},
+ {idx: 14, exp: ge, fn: ge_2147483646_uint64},
+ {idx: 14, exp: eq, fn: eq_2147483646_uint64},
+ {idx: 14, exp: ne, fn: ne_2147483646_uint64},
+ {idx: 15, exp: lt, fn: lt_2147483647_uint64},
+ {idx: 15, exp: le, fn: le_2147483647_uint64},
+ {idx: 15, exp: gt, fn: gt_2147483647_uint64},
+ {idx: 15, exp: ge, fn: ge_2147483647_uint64},
+ {idx: 15, exp: eq, fn: eq_2147483647_uint64},
+ {idx: 15, exp: ne, fn: ne_2147483647_uint64},
+ {idx: 16, exp: lt, fn: lt_2147483648_uint64},
+ {idx: 16, exp: le, fn: le_2147483648_uint64},
+ {idx: 16, exp: gt, fn: gt_2147483648_uint64},
+ {idx: 16, exp: ge, fn: ge_2147483648_uint64},
+ {idx: 16, exp: eq, fn: eq_2147483648_uint64},
+ {idx: 16, exp: ne, fn: ne_2147483648_uint64},
+ {idx: 17, exp: lt, fn: lt_4278190080_uint64},
+ {idx: 17, exp: le, fn: le_4278190080_uint64},
+ {idx: 17, exp: gt, fn: gt_4278190080_uint64},
+ {idx: 17, exp: ge, fn: ge_4278190080_uint64},
+ {idx: 17, exp: eq, fn: eq_4278190080_uint64},
+ {idx: 17, exp: ne, fn: ne_4278190080_uint64},
+ {idx: 18, exp: lt, fn: lt_4294967294_uint64},
+ {idx: 18, exp: le, fn: le_4294967294_uint64},
+ {idx: 18, exp: gt, fn: gt_4294967294_uint64},
+ {idx: 18, exp: ge, fn: ge_4294967294_uint64},
+ {idx: 18, exp: eq, fn: eq_4294967294_uint64},
+ {idx: 18, exp: ne, fn: ne_4294967294_uint64},
+ {idx: 19, exp: lt, fn: lt_4294967295_uint64},
+ {idx: 19, exp: le, fn: le_4294967295_uint64},
+ {idx: 19, exp: gt, fn: gt_4294967295_uint64},
+ {idx: 19, exp: ge, fn: ge_4294967295_uint64},
+ {idx: 19, exp: eq, fn: eq_4294967295_uint64},
+ {idx: 19, exp: ne, fn: ne_4294967295_uint64},
+ {idx: 20, exp: lt, fn: lt_4294967296_uint64},
+ {idx: 20, exp: le, fn: le_4294967296_uint64},
+ {idx: 20, exp: gt, fn: gt_4294967296_uint64},
+ {idx: 20, exp: ge, fn: ge_4294967296_uint64},
+ {idx: 20, exp: eq, fn: eq_4294967296_uint64},
+ {idx: 20, exp: ne, fn: ne_4294967296_uint64},
+ {idx: 21, exp: lt, fn: lt_1095216660480_uint64},
+ {idx: 21, exp: le, fn: le_1095216660480_uint64},
+ {idx: 21, exp: gt, fn: gt_1095216660480_uint64},
+ {idx: 21, exp: ge, fn: ge_1095216660480_uint64},
+ {idx: 21, exp: eq, fn: eq_1095216660480_uint64},
+ {idx: 21, exp: ne, fn: ne_1095216660480_uint64},
+ {idx: 22, exp: lt, fn: lt_9223372036854775806_uint64},
+ {idx: 22, exp: le, fn: le_9223372036854775806_uint64},
+ {idx: 22, exp: gt, fn: gt_9223372036854775806_uint64},
+ {idx: 22, exp: ge, fn: ge_9223372036854775806_uint64},
+ {idx: 22, exp: eq, fn: eq_9223372036854775806_uint64},
+ {idx: 22, exp: ne, fn: ne_9223372036854775806_uint64},
+ {idx: 23, exp: lt, fn: lt_9223372036854775807_uint64},
+ {idx: 23, exp: le, fn: le_9223372036854775807_uint64},
+ {idx: 23, exp: gt, fn: gt_9223372036854775807_uint64},
+ {idx: 23, exp: ge, fn: ge_9223372036854775807_uint64},
+ {idx: 23, exp: eq, fn: eq_9223372036854775807_uint64},
+ {idx: 23, exp: ne, fn: ne_9223372036854775807_uint64},
+ {idx: 24, exp: lt, fn: lt_9223372036854775808_uint64},
+ {idx: 24, exp: le, fn: le_9223372036854775808_uint64},
+ {idx: 24, exp: gt, fn: gt_9223372036854775808_uint64},
+ {idx: 24, exp: ge, fn: ge_9223372036854775808_uint64},
+ {idx: 24, exp: eq, fn: eq_9223372036854775808_uint64},
+ {idx: 24, exp: ne, fn: ne_9223372036854775808_uint64},
+ {idx: 25, exp: lt, fn: lt_18374686479671623680_uint64},
+ {idx: 25, exp: le, fn: le_18374686479671623680_uint64},
+ {idx: 25, exp: gt, fn: gt_18374686479671623680_uint64},
+ {idx: 25, exp: ge, fn: ge_18374686479671623680_uint64},
+ {idx: 25, exp: eq, fn: eq_18374686479671623680_uint64},
+ {idx: 25, exp: ne, fn: ne_18374686479671623680_uint64},
+ {idx: 26, exp: lt, fn: lt_18446744073709551614_uint64},
+ {idx: 26, exp: le, fn: le_18446744073709551614_uint64},
+ {idx: 26, exp: gt, fn: gt_18446744073709551614_uint64},
+ {idx: 26, exp: ge, fn: ge_18446744073709551614_uint64},
+ {idx: 26, exp: eq, fn: eq_18446744073709551614_uint64},
+ {idx: 26, exp: ne, fn: ne_18446744073709551614_uint64},
+ {idx: 27, exp: lt, fn: lt_18446744073709551615_uint64},
+ {idx: 27, exp: le, fn: le_18446744073709551615_uint64},
+ {idx: 27, exp: gt, fn: gt_18446744073709551615_uint64},
+ {idx: 27, exp: ge, fn: ge_18446744073709551615_uint64},
+ {idx: 27, exp: eq, fn: eq_18446744073709551615_uint64},
+ {idx: 27, exp: ne, fn: ne_18446744073709551615_uint64},
+}
+
+// uint32 tests
+var uint32_vals = []uint32{
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+ 32768,
+ 65534,
+ 65535,
+ 65536,
+ 2147483646,
+ 2147483647,
+ 2147483648,
+ 4278190080,
+ 4294967294,
+ 4294967295,
+}
+
+func lt_0_uint32(x uint32) bool { return x < 0 }
+func le_0_uint32(x uint32) bool { return x <= 0 }
+func gt_0_uint32(x uint32) bool { return x > 0 }
+func ge_0_uint32(x uint32) bool { return x >= 0 }
+func eq_0_uint32(x uint32) bool { return x == 0 }
+func ne_0_uint32(x uint32) bool { return x != 0 }
+func lt_1_uint32(x uint32) bool { return x < 1 }
+func le_1_uint32(x uint32) bool { return x <= 1 }
+func gt_1_uint32(x uint32) bool { return x > 1 }
+func ge_1_uint32(x uint32) bool { return x >= 1 }
+func eq_1_uint32(x uint32) bool { return x == 1 }
+func ne_1_uint32(x uint32) bool { return x != 1 }
+func lt_126_uint32(x uint32) bool { return x < 126 }
+func le_126_uint32(x uint32) bool { return x <= 126 }
+func gt_126_uint32(x uint32) bool { return x > 126 }
+func ge_126_uint32(x uint32) bool { return x >= 126 }
+func eq_126_uint32(x uint32) bool { return x == 126 }
+func ne_126_uint32(x uint32) bool { return x != 126 }
+func lt_127_uint32(x uint32) bool { return x < 127 }
+func le_127_uint32(x uint32) bool { return x <= 127 }
+func gt_127_uint32(x uint32) bool { return x > 127 }
+func ge_127_uint32(x uint32) bool { return x >= 127 }
+func eq_127_uint32(x uint32) bool { return x == 127 }
+func ne_127_uint32(x uint32) bool { return x != 127 }
+func lt_128_uint32(x uint32) bool { return x < 128 }
+func le_128_uint32(x uint32) bool { return x <= 128 }
+func gt_128_uint32(x uint32) bool { return x > 128 }
+func ge_128_uint32(x uint32) bool { return x >= 128 }
+func eq_128_uint32(x uint32) bool { return x == 128 }
+func ne_128_uint32(x uint32) bool { return x != 128 }
+func lt_254_uint32(x uint32) bool { return x < 254 }
+func le_254_uint32(x uint32) bool { return x <= 254 }
+func gt_254_uint32(x uint32) bool { return x > 254 }
+func ge_254_uint32(x uint32) bool { return x >= 254 }
+func eq_254_uint32(x uint32) bool { return x == 254 }
+func ne_254_uint32(x uint32) bool { return x != 254 }
+func lt_255_uint32(x uint32) bool { return x < 255 }
+func le_255_uint32(x uint32) bool { return x <= 255 }
+func gt_255_uint32(x uint32) bool { return x > 255 }
+func ge_255_uint32(x uint32) bool { return x >= 255 }
+func eq_255_uint32(x uint32) bool { return x == 255 }
+func ne_255_uint32(x uint32) bool { return x != 255 }
+func lt_256_uint32(x uint32) bool { return x < 256 }
+func le_256_uint32(x uint32) bool { return x <= 256 }
+func gt_256_uint32(x uint32) bool { return x > 256 }
+func ge_256_uint32(x uint32) bool { return x >= 256 }
+func eq_256_uint32(x uint32) bool { return x == 256 }
+func ne_256_uint32(x uint32) bool { return x != 256 }
+func lt_32766_uint32(x uint32) bool { return x < 32766 }
+func le_32766_uint32(x uint32) bool { return x <= 32766 }
+func gt_32766_uint32(x uint32) bool { return x > 32766 }
+func ge_32766_uint32(x uint32) bool { return x >= 32766 }
+func eq_32766_uint32(x uint32) bool { return x == 32766 }
+func ne_32766_uint32(x uint32) bool { return x != 32766 }
+func lt_32767_uint32(x uint32) bool { return x < 32767 }
+func le_32767_uint32(x uint32) bool { return x <= 32767 }
+func gt_32767_uint32(x uint32) bool { return x > 32767 }
+func ge_32767_uint32(x uint32) bool { return x >= 32767 }
+func eq_32767_uint32(x uint32) bool { return x == 32767 }
+func ne_32767_uint32(x uint32) bool { return x != 32767 }
+func lt_32768_uint32(x uint32) bool { return x < 32768 }
+func le_32768_uint32(x uint32) bool { return x <= 32768 }
+func gt_32768_uint32(x uint32) bool { return x > 32768 }
+func ge_32768_uint32(x uint32) bool { return x >= 32768 }
+func eq_32768_uint32(x uint32) bool { return x == 32768 }
+func ne_32768_uint32(x uint32) bool { return x != 32768 }
+func lt_65534_uint32(x uint32) bool { return x < 65534 }
+func le_65534_uint32(x uint32) bool { return x <= 65534 }
+func gt_65534_uint32(x uint32) bool { return x > 65534 }
+func ge_65534_uint32(x uint32) bool { return x >= 65534 }
+func eq_65534_uint32(x uint32) bool { return x == 65534 }
+func ne_65534_uint32(x uint32) bool { return x != 65534 }
+func lt_65535_uint32(x uint32) bool { return x < 65535 }
+func le_65535_uint32(x uint32) bool { return x <= 65535 }
+func gt_65535_uint32(x uint32) bool { return x > 65535 }
+func ge_65535_uint32(x uint32) bool { return x >= 65535 }
+func eq_65535_uint32(x uint32) bool { return x == 65535 }
+func ne_65535_uint32(x uint32) bool { return x != 65535 }
+func lt_65536_uint32(x uint32) bool { return x < 65536 }
+func le_65536_uint32(x uint32) bool { return x <= 65536 }
+func gt_65536_uint32(x uint32) bool { return x > 65536 }
+func ge_65536_uint32(x uint32) bool { return x >= 65536 }
+func eq_65536_uint32(x uint32) bool { return x == 65536 }
+func ne_65536_uint32(x uint32) bool { return x != 65536 }
+func lt_2147483646_uint32(x uint32) bool { return x < 2147483646 }
+func le_2147483646_uint32(x uint32) bool { return x <= 2147483646 }
+func gt_2147483646_uint32(x uint32) bool { return x > 2147483646 }
+func ge_2147483646_uint32(x uint32) bool { return x >= 2147483646 }
+func eq_2147483646_uint32(x uint32) bool { return x == 2147483646 }
+func ne_2147483646_uint32(x uint32) bool { return x != 2147483646 }
+func lt_2147483647_uint32(x uint32) bool { return x < 2147483647 }
+func le_2147483647_uint32(x uint32) bool { return x <= 2147483647 }
+func gt_2147483647_uint32(x uint32) bool { return x > 2147483647 }
+func ge_2147483647_uint32(x uint32) bool { return x >= 2147483647 }
+func eq_2147483647_uint32(x uint32) bool { return x == 2147483647 }
+func ne_2147483647_uint32(x uint32) bool { return x != 2147483647 }
+func lt_2147483648_uint32(x uint32) bool { return x < 2147483648 }
+func le_2147483648_uint32(x uint32) bool { return x <= 2147483648 }
+func gt_2147483648_uint32(x uint32) bool { return x > 2147483648 }
+func ge_2147483648_uint32(x uint32) bool { return x >= 2147483648 }
+func eq_2147483648_uint32(x uint32) bool { return x == 2147483648 }
+func ne_2147483648_uint32(x uint32) bool { return x != 2147483648 }
+func lt_4278190080_uint32(x uint32) bool { return x < 4278190080 }
+func le_4278190080_uint32(x uint32) bool { return x <= 4278190080 }
+func gt_4278190080_uint32(x uint32) bool { return x > 4278190080 }
+func ge_4278190080_uint32(x uint32) bool { return x >= 4278190080 }
+func eq_4278190080_uint32(x uint32) bool { return x == 4278190080 }
+func ne_4278190080_uint32(x uint32) bool { return x != 4278190080 }
+func lt_4294967294_uint32(x uint32) bool { return x < 4294967294 }
+func le_4294967294_uint32(x uint32) bool { return x <= 4294967294 }
+func gt_4294967294_uint32(x uint32) bool { return x > 4294967294 }
+func ge_4294967294_uint32(x uint32) bool { return x >= 4294967294 }
+func eq_4294967294_uint32(x uint32) bool { return x == 4294967294 }
+func ne_4294967294_uint32(x uint32) bool { return x != 4294967294 }
+func lt_4294967295_uint32(x uint32) bool { return x < 4294967295 }
+func le_4294967295_uint32(x uint32) bool { return x <= 4294967295 }
+func gt_4294967295_uint32(x uint32) bool { return x > 4294967295 }
+func ge_4294967295_uint32(x uint32) bool { return x >= 4294967295 }
+func eq_4294967295_uint32(x uint32) bool { return x == 4294967295 }
+func ne_4294967295_uint32(x uint32) bool { return x != 4294967295 }
+
+var uint32_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(uint32) bool
+}{
+ {idx: 0, exp: lt, fn: lt_0_uint32},
+ {idx: 0, exp: le, fn: le_0_uint32},
+ {idx: 0, exp: gt, fn: gt_0_uint32},
+ {idx: 0, exp: ge, fn: ge_0_uint32},
+ {idx: 0, exp: eq, fn: eq_0_uint32},
+ {idx: 0, exp: ne, fn: ne_0_uint32},
+ {idx: 1, exp: lt, fn: lt_1_uint32},
+ {idx: 1, exp: le, fn: le_1_uint32},
+ {idx: 1, exp: gt, fn: gt_1_uint32},
+ {idx: 1, exp: ge, fn: ge_1_uint32},
+ {idx: 1, exp: eq, fn: eq_1_uint32},
+ {idx: 1, exp: ne, fn: ne_1_uint32},
+ {idx: 2, exp: lt, fn: lt_126_uint32},
+ {idx: 2, exp: le, fn: le_126_uint32},
+ {idx: 2, exp: gt, fn: gt_126_uint32},
+ {idx: 2, exp: ge, fn: ge_126_uint32},
+ {idx: 2, exp: eq, fn: eq_126_uint32},
+ {idx: 2, exp: ne, fn: ne_126_uint32},
+ {idx: 3, exp: lt, fn: lt_127_uint32},
+ {idx: 3, exp: le, fn: le_127_uint32},
+ {idx: 3, exp: gt, fn: gt_127_uint32},
+ {idx: 3, exp: ge, fn: ge_127_uint32},
+ {idx: 3, exp: eq, fn: eq_127_uint32},
+ {idx: 3, exp: ne, fn: ne_127_uint32},
+ {idx: 4, exp: lt, fn: lt_128_uint32},
+ {idx: 4, exp: le, fn: le_128_uint32},
+ {idx: 4, exp: gt, fn: gt_128_uint32},
+ {idx: 4, exp: ge, fn: ge_128_uint32},
+ {idx: 4, exp: eq, fn: eq_128_uint32},
+ {idx: 4, exp: ne, fn: ne_128_uint32},
+ {idx: 5, exp: lt, fn: lt_254_uint32},
+ {idx: 5, exp: le, fn: le_254_uint32},
+ {idx: 5, exp: gt, fn: gt_254_uint32},
+ {idx: 5, exp: ge, fn: ge_254_uint32},
+ {idx: 5, exp: eq, fn: eq_254_uint32},
+ {idx: 5, exp: ne, fn: ne_254_uint32},
+ {idx: 6, exp: lt, fn: lt_255_uint32},
+ {idx: 6, exp: le, fn: le_255_uint32},
+ {idx: 6, exp: gt, fn: gt_255_uint32},
+ {idx: 6, exp: ge, fn: ge_255_uint32},
+ {idx: 6, exp: eq, fn: eq_255_uint32},
+ {idx: 6, exp: ne, fn: ne_255_uint32},
+ {idx: 7, exp: lt, fn: lt_256_uint32},
+ {idx: 7, exp: le, fn: le_256_uint32},
+ {idx: 7, exp: gt, fn: gt_256_uint32},
+ {idx: 7, exp: ge, fn: ge_256_uint32},
+ {idx: 7, exp: eq, fn: eq_256_uint32},
+ {idx: 7, exp: ne, fn: ne_256_uint32},
+ {idx: 8, exp: lt, fn: lt_32766_uint32},
+ {idx: 8, exp: le, fn: le_32766_uint32},
+ {idx: 8, exp: gt, fn: gt_32766_uint32},
+ {idx: 8, exp: ge, fn: ge_32766_uint32},
+ {idx: 8, exp: eq, fn: eq_32766_uint32},
+ {idx: 8, exp: ne, fn: ne_32766_uint32},
+ {idx: 9, exp: lt, fn: lt_32767_uint32},
+ {idx: 9, exp: le, fn: le_32767_uint32},
+ {idx: 9, exp: gt, fn: gt_32767_uint32},
+ {idx: 9, exp: ge, fn: ge_32767_uint32},
+ {idx: 9, exp: eq, fn: eq_32767_uint32},
+ {idx: 9, exp: ne, fn: ne_32767_uint32},
+ {idx: 10, exp: lt, fn: lt_32768_uint32},
+ {idx: 10, exp: le, fn: le_32768_uint32},
+ {idx: 10, exp: gt, fn: gt_32768_uint32},
+ {idx: 10, exp: ge, fn: ge_32768_uint32},
+ {idx: 10, exp: eq, fn: eq_32768_uint32},
+ {idx: 10, exp: ne, fn: ne_32768_uint32},
+ {idx: 11, exp: lt, fn: lt_65534_uint32},
+ {idx: 11, exp: le, fn: le_65534_uint32},
+ {idx: 11, exp: gt, fn: gt_65534_uint32},
+ {idx: 11, exp: ge, fn: ge_65534_uint32},
+ {idx: 11, exp: eq, fn: eq_65534_uint32},
+ {idx: 11, exp: ne, fn: ne_65534_uint32},
+ {idx: 12, exp: lt, fn: lt_65535_uint32},
+ {idx: 12, exp: le, fn: le_65535_uint32},
+ {idx: 12, exp: gt, fn: gt_65535_uint32},
+ {idx: 12, exp: ge, fn: ge_65535_uint32},
+ {idx: 12, exp: eq, fn: eq_65535_uint32},
+ {idx: 12, exp: ne, fn: ne_65535_uint32},
+ {idx: 13, exp: lt, fn: lt_65536_uint32},
+ {idx: 13, exp: le, fn: le_65536_uint32},
+ {idx: 13, exp: gt, fn: gt_65536_uint32},
+ {idx: 13, exp: ge, fn: ge_65536_uint32},
+ {idx: 13, exp: eq, fn: eq_65536_uint32},
+ {idx: 13, exp: ne, fn: ne_65536_uint32},
+ {idx: 14, exp: lt, fn: lt_2147483646_uint32},
+ {idx: 14, exp: le, fn: le_2147483646_uint32},
+ {idx: 14, exp: gt, fn: gt_2147483646_uint32},
+ {idx: 14, exp: ge, fn: ge_2147483646_uint32},
+ {idx: 14, exp: eq, fn: eq_2147483646_uint32},
+ {idx: 14, exp: ne, fn: ne_2147483646_uint32},
+ {idx: 15, exp: lt, fn: lt_2147483647_uint32},
+ {idx: 15, exp: le, fn: le_2147483647_uint32},
+ {idx: 15, exp: gt, fn: gt_2147483647_uint32},
+ {idx: 15, exp: ge, fn: ge_2147483647_uint32},
+ {idx: 15, exp: eq, fn: eq_2147483647_uint32},
+ {idx: 15, exp: ne, fn: ne_2147483647_uint32},
+ {idx: 16, exp: lt, fn: lt_2147483648_uint32},
+ {idx: 16, exp: le, fn: le_2147483648_uint32},
+ {idx: 16, exp: gt, fn: gt_2147483648_uint32},
+ {idx: 16, exp: ge, fn: ge_2147483648_uint32},
+ {idx: 16, exp: eq, fn: eq_2147483648_uint32},
+ {idx: 16, exp: ne, fn: ne_2147483648_uint32},
+ {idx: 17, exp: lt, fn: lt_4278190080_uint32},
+ {idx: 17, exp: le, fn: le_4278190080_uint32},
+ {idx: 17, exp: gt, fn: gt_4278190080_uint32},
+ {idx: 17, exp: ge, fn: ge_4278190080_uint32},
+ {idx: 17, exp: eq, fn: eq_4278190080_uint32},
+ {idx: 17, exp: ne, fn: ne_4278190080_uint32},
+ {idx: 18, exp: lt, fn: lt_4294967294_uint32},
+ {idx: 18, exp: le, fn: le_4294967294_uint32},
+ {idx: 18, exp: gt, fn: gt_4294967294_uint32},
+ {idx: 18, exp: ge, fn: ge_4294967294_uint32},
+ {idx: 18, exp: eq, fn: eq_4294967294_uint32},
+ {idx: 18, exp: ne, fn: ne_4294967294_uint32},
+ {idx: 19, exp: lt, fn: lt_4294967295_uint32},
+ {idx: 19, exp: le, fn: le_4294967295_uint32},
+ {idx: 19, exp: gt, fn: gt_4294967295_uint32},
+ {idx: 19, exp: ge, fn: ge_4294967295_uint32},
+ {idx: 19, exp: eq, fn: eq_4294967295_uint32},
+ {idx: 19, exp: ne, fn: ne_4294967295_uint32},
+}
+
+// uint16 tests
+var uint16_vals = []uint16{
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+ 32768,
+ 65534,
+ 65535,
+}
+
+func lt_0_uint16(x uint16) bool { return x < 0 }
+func le_0_uint16(x uint16) bool { return x <= 0 }
+func gt_0_uint16(x uint16) bool { return x > 0 }
+func ge_0_uint16(x uint16) bool { return x >= 0 }
+func eq_0_uint16(x uint16) bool { return x == 0 }
+func ne_0_uint16(x uint16) bool { return x != 0 }
+func lt_1_uint16(x uint16) bool { return x < 1 }
+func le_1_uint16(x uint16) bool { return x <= 1 }
+func gt_1_uint16(x uint16) bool { return x > 1 }
+func ge_1_uint16(x uint16) bool { return x >= 1 }
+func eq_1_uint16(x uint16) bool { return x == 1 }
+func ne_1_uint16(x uint16) bool { return x != 1 }
+func lt_126_uint16(x uint16) bool { return x < 126 }
+func le_126_uint16(x uint16) bool { return x <= 126 }
+func gt_126_uint16(x uint16) bool { return x > 126 }
+func ge_126_uint16(x uint16) bool { return x >= 126 }
+func eq_126_uint16(x uint16) bool { return x == 126 }
+func ne_126_uint16(x uint16) bool { return x != 126 }
+func lt_127_uint16(x uint16) bool { return x < 127 }
+func le_127_uint16(x uint16) bool { return x <= 127 }
+func gt_127_uint16(x uint16) bool { return x > 127 }
+func ge_127_uint16(x uint16) bool { return x >= 127 }
+func eq_127_uint16(x uint16) bool { return x == 127 }
+func ne_127_uint16(x uint16) bool { return x != 127 }
+func lt_128_uint16(x uint16) bool { return x < 128 }
+func le_128_uint16(x uint16) bool { return x <= 128 }
+func gt_128_uint16(x uint16) bool { return x > 128 }
+func ge_128_uint16(x uint16) bool { return x >= 128 }
+func eq_128_uint16(x uint16) bool { return x == 128 }
+func ne_128_uint16(x uint16) bool { return x != 128 }
+func lt_254_uint16(x uint16) bool { return x < 254 }
+func le_254_uint16(x uint16) bool { return x <= 254 }
+func gt_254_uint16(x uint16) bool { return x > 254 }
+func ge_254_uint16(x uint16) bool { return x >= 254 }
+func eq_254_uint16(x uint16) bool { return x == 254 }
+func ne_254_uint16(x uint16) bool { return x != 254 }
+func lt_255_uint16(x uint16) bool { return x < 255 }
+func le_255_uint16(x uint16) bool { return x <= 255 }
+func gt_255_uint16(x uint16) bool { return x > 255 }
+func ge_255_uint16(x uint16) bool { return x >= 255 }
+func eq_255_uint16(x uint16) bool { return x == 255 }
+func ne_255_uint16(x uint16) bool { return x != 255 }
+func lt_256_uint16(x uint16) bool { return x < 256 }
+func le_256_uint16(x uint16) bool { return x <= 256 }
+func gt_256_uint16(x uint16) bool { return x > 256 }
+func ge_256_uint16(x uint16) bool { return x >= 256 }
+func eq_256_uint16(x uint16) bool { return x == 256 }
+func ne_256_uint16(x uint16) bool { return x != 256 }
+func lt_32766_uint16(x uint16) bool { return x < 32766 }
+func le_32766_uint16(x uint16) bool { return x <= 32766 }
+func gt_32766_uint16(x uint16) bool { return x > 32766 }
+func ge_32766_uint16(x uint16) bool { return x >= 32766 }
+func eq_32766_uint16(x uint16) bool { return x == 32766 }
+func ne_32766_uint16(x uint16) bool { return x != 32766 }
+func lt_32767_uint16(x uint16) bool { return x < 32767 }
+func le_32767_uint16(x uint16) bool { return x <= 32767 }
+func gt_32767_uint16(x uint16) bool { return x > 32767 }
+func ge_32767_uint16(x uint16) bool { return x >= 32767 }
+func eq_32767_uint16(x uint16) bool { return x == 32767 }
+func ne_32767_uint16(x uint16) bool { return x != 32767 }
+func lt_32768_uint16(x uint16) bool { return x < 32768 }
+func le_32768_uint16(x uint16) bool { return x <= 32768 }
+func gt_32768_uint16(x uint16) bool { return x > 32768 }
+func ge_32768_uint16(x uint16) bool { return x >= 32768 }
+func eq_32768_uint16(x uint16) bool { return x == 32768 }
+func ne_32768_uint16(x uint16) bool { return x != 32768 }
+func lt_65534_uint16(x uint16) bool { return x < 65534 }
+func le_65534_uint16(x uint16) bool { return x <= 65534 }
+func gt_65534_uint16(x uint16) bool { return x > 65534 }
+func ge_65534_uint16(x uint16) bool { return x >= 65534 }
+func eq_65534_uint16(x uint16) bool { return x == 65534 }
+func ne_65534_uint16(x uint16) bool { return x != 65534 }
+func lt_65535_uint16(x uint16) bool { return x < 65535 }
+func le_65535_uint16(x uint16) bool { return x <= 65535 }
+func gt_65535_uint16(x uint16) bool { return x > 65535 }
+func ge_65535_uint16(x uint16) bool { return x >= 65535 }
+func eq_65535_uint16(x uint16) bool { return x == 65535 }
+func ne_65535_uint16(x uint16) bool { return x != 65535 }
+
+var uint16_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(uint16) bool
+}{
+ {idx: 0, exp: lt, fn: lt_0_uint16},
+ {idx: 0, exp: le, fn: le_0_uint16},
+ {idx: 0, exp: gt, fn: gt_0_uint16},
+ {idx: 0, exp: ge, fn: ge_0_uint16},
+ {idx: 0, exp: eq, fn: eq_0_uint16},
+ {idx: 0, exp: ne, fn: ne_0_uint16},
+ {idx: 1, exp: lt, fn: lt_1_uint16},
+ {idx: 1, exp: le, fn: le_1_uint16},
+ {idx: 1, exp: gt, fn: gt_1_uint16},
+ {idx: 1, exp: ge, fn: ge_1_uint16},
+ {idx: 1, exp: eq, fn: eq_1_uint16},
+ {idx: 1, exp: ne, fn: ne_1_uint16},
+ {idx: 2, exp: lt, fn: lt_126_uint16},
+ {idx: 2, exp: le, fn: le_126_uint16},
+ {idx: 2, exp: gt, fn: gt_126_uint16},
+ {idx: 2, exp: ge, fn: ge_126_uint16},
+ {idx: 2, exp: eq, fn: eq_126_uint16},
+ {idx: 2, exp: ne, fn: ne_126_uint16},
+ {idx: 3, exp: lt, fn: lt_127_uint16},
+ {idx: 3, exp: le, fn: le_127_uint16},
+ {idx: 3, exp: gt, fn: gt_127_uint16},
+ {idx: 3, exp: ge, fn: ge_127_uint16},
+ {idx: 3, exp: eq, fn: eq_127_uint16},
+ {idx: 3, exp: ne, fn: ne_127_uint16},
+ {idx: 4, exp: lt, fn: lt_128_uint16},
+ {idx: 4, exp: le, fn: le_128_uint16},
+ {idx: 4, exp: gt, fn: gt_128_uint16},
+ {idx: 4, exp: ge, fn: ge_128_uint16},
+ {idx: 4, exp: eq, fn: eq_128_uint16},
+ {idx: 4, exp: ne, fn: ne_128_uint16},
+ {idx: 5, exp: lt, fn: lt_254_uint16},
+ {idx: 5, exp: le, fn: le_254_uint16},
+ {idx: 5, exp: gt, fn: gt_254_uint16},
+ {idx: 5, exp: ge, fn: ge_254_uint16},
+ {idx: 5, exp: eq, fn: eq_254_uint16},
+ {idx: 5, exp: ne, fn: ne_254_uint16},
+ {idx: 6, exp: lt, fn: lt_255_uint16},
+ {idx: 6, exp: le, fn: le_255_uint16},
+ {idx: 6, exp: gt, fn: gt_255_uint16},
+ {idx: 6, exp: ge, fn: ge_255_uint16},
+ {idx: 6, exp: eq, fn: eq_255_uint16},
+ {idx: 6, exp: ne, fn: ne_255_uint16},
+ {idx: 7, exp: lt, fn: lt_256_uint16},
+ {idx: 7, exp: le, fn: le_256_uint16},
+ {idx: 7, exp: gt, fn: gt_256_uint16},
+ {idx: 7, exp: ge, fn: ge_256_uint16},
+ {idx: 7, exp: eq, fn: eq_256_uint16},
+ {idx: 7, exp: ne, fn: ne_256_uint16},
+ {idx: 8, exp: lt, fn: lt_32766_uint16},
+ {idx: 8, exp: le, fn: le_32766_uint16},
+ {idx: 8, exp: gt, fn: gt_32766_uint16},
+ {idx: 8, exp: ge, fn: ge_32766_uint16},
+ {idx: 8, exp: eq, fn: eq_32766_uint16},
+ {idx: 8, exp: ne, fn: ne_32766_uint16},
+ {idx: 9, exp: lt, fn: lt_32767_uint16},
+ {idx: 9, exp: le, fn: le_32767_uint16},
+ {idx: 9, exp: gt, fn: gt_32767_uint16},
+ {idx: 9, exp: ge, fn: ge_32767_uint16},
+ {idx: 9, exp: eq, fn: eq_32767_uint16},
+ {idx: 9, exp: ne, fn: ne_32767_uint16},
+ {idx: 10, exp: lt, fn: lt_32768_uint16},
+ {idx: 10, exp: le, fn: le_32768_uint16},
+ {idx: 10, exp: gt, fn: gt_32768_uint16},
+ {idx: 10, exp: ge, fn: ge_32768_uint16},
+ {idx: 10, exp: eq, fn: eq_32768_uint16},
+ {idx: 10, exp: ne, fn: ne_32768_uint16},
+ {idx: 11, exp: lt, fn: lt_65534_uint16},
+ {idx: 11, exp: le, fn: le_65534_uint16},
+ {idx: 11, exp: gt, fn: gt_65534_uint16},
+ {idx: 11, exp: ge, fn: ge_65534_uint16},
+ {idx: 11, exp: eq, fn: eq_65534_uint16},
+ {idx: 11, exp: ne, fn: ne_65534_uint16},
+ {idx: 12, exp: lt, fn: lt_65535_uint16},
+ {idx: 12, exp: le, fn: le_65535_uint16},
+ {idx: 12, exp: gt, fn: gt_65535_uint16},
+ {idx: 12, exp: ge, fn: ge_65535_uint16},
+ {idx: 12, exp: eq, fn: eq_65535_uint16},
+ {idx: 12, exp: ne, fn: ne_65535_uint16},
+}
+
+// uint8 tests
+var uint8_vals = []uint8{
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+}
+
+func lt_0_uint8(x uint8) bool { return x < 0 }
+func le_0_uint8(x uint8) bool { return x <= 0 }
+func gt_0_uint8(x uint8) bool { return x > 0 }
+func ge_0_uint8(x uint8) bool { return x >= 0 }
+func eq_0_uint8(x uint8) bool { return x == 0 }
+func ne_0_uint8(x uint8) bool { return x != 0 }
+func lt_1_uint8(x uint8) bool { return x < 1 }
+func le_1_uint8(x uint8) bool { return x <= 1 }
+func gt_1_uint8(x uint8) bool { return x > 1 }
+func ge_1_uint8(x uint8) bool { return x >= 1 }
+func eq_1_uint8(x uint8) bool { return x == 1 }
+func ne_1_uint8(x uint8) bool { return x != 1 }
+func lt_126_uint8(x uint8) bool { return x < 126 }
+func le_126_uint8(x uint8) bool { return x <= 126 }
+func gt_126_uint8(x uint8) bool { return x > 126 }
+func ge_126_uint8(x uint8) bool { return x >= 126 }
+func eq_126_uint8(x uint8) bool { return x == 126 }
+func ne_126_uint8(x uint8) bool { return x != 126 }
+func lt_127_uint8(x uint8) bool { return x < 127 }
+func le_127_uint8(x uint8) bool { return x <= 127 }
+func gt_127_uint8(x uint8) bool { return x > 127 }
+func ge_127_uint8(x uint8) bool { return x >= 127 }
+func eq_127_uint8(x uint8) bool { return x == 127 }
+func ne_127_uint8(x uint8) bool { return x != 127 }
+func lt_128_uint8(x uint8) bool { return x < 128 }
+func le_128_uint8(x uint8) bool { return x <= 128 }
+func gt_128_uint8(x uint8) bool { return x > 128 }
+func ge_128_uint8(x uint8) bool { return x >= 128 }
+func eq_128_uint8(x uint8) bool { return x == 128 }
+func ne_128_uint8(x uint8) bool { return x != 128 }
+func lt_254_uint8(x uint8) bool { return x < 254 }
+func le_254_uint8(x uint8) bool { return x <= 254 }
+func gt_254_uint8(x uint8) bool { return x > 254 }
+func ge_254_uint8(x uint8) bool { return x >= 254 }
+func eq_254_uint8(x uint8) bool { return x == 254 }
+func ne_254_uint8(x uint8) bool { return x != 254 }
+func lt_255_uint8(x uint8) bool { return x < 255 }
+func le_255_uint8(x uint8) bool { return x <= 255 }
+func gt_255_uint8(x uint8) bool { return x > 255 }
+func ge_255_uint8(x uint8) bool { return x >= 255 }
+func eq_255_uint8(x uint8) bool { return x == 255 }
+func ne_255_uint8(x uint8) bool { return x != 255 }
+
+var uint8_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(uint8) bool
+}{
+ {idx: 0, exp: lt, fn: lt_0_uint8},
+ {idx: 0, exp: le, fn: le_0_uint8},
+ {idx: 0, exp: gt, fn: gt_0_uint8},
+ {idx: 0, exp: ge, fn: ge_0_uint8},
+ {idx: 0, exp: eq, fn: eq_0_uint8},
+ {idx: 0, exp: ne, fn: ne_0_uint8},
+ {idx: 1, exp: lt, fn: lt_1_uint8},
+ {idx: 1, exp: le, fn: le_1_uint8},
+ {idx: 1, exp: gt, fn: gt_1_uint8},
+ {idx: 1, exp: ge, fn: ge_1_uint8},
+ {idx: 1, exp: eq, fn: eq_1_uint8},
+ {idx: 1, exp: ne, fn: ne_1_uint8},
+ {idx: 2, exp: lt, fn: lt_126_uint8},
+ {idx: 2, exp: le, fn: le_126_uint8},
+ {idx: 2, exp: gt, fn: gt_126_uint8},
+ {idx: 2, exp: ge, fn: ge_126_uint8},
+ {idx: 2, exp: eq, fn: eq_126_uint8},
+ {idx: 2, exp: ne, fn: ne_126_uint8},
+ {idx: 3, exp: lt, fn: lt_127_uint8},
+ {idx: 3, exp: le, fn: le_127_uint8},
+ {idx: 3, exp: gt, fn: gt_127_uint8},
+ {idx: 3, exp: ge, fn: ge_127_uint8},
+ {idx: 3, exp: eq, fn: eq_127_uint8},
+ {idx: 3, exp: ne, fn: ne_127_uint8},
+ {idx: 4, exp: lt, fn: lt_128_uint8},
+ {idx: 4, exp: le, fn: le_128_uint8},
+ {idx: 4, exp: gt, fn: gt_128_uint8},
+ {idx: 4, exp: ge, fn: ge_128_uint8},
+ {idx: 4, exp: eq, fn: eq_128_uint8},
+ {idx: 4, exp: ne, fn: ne_128_uint8},
+ {idx: 5, exp: lt, fn: lt_254_uint8},
+ {idx: 5, exp: le, fn: le_254_uint8},
+ {idx: 5, exp: gt, fn: gt_254_uint8},
+ {idx: 5, exp: ge, fn: ge_254_uint8},
+ {idx: 5, exp: eq, fn: eq_254_uint8},
+ {idx: 5, exp: ne, fn: ne_254_uint8},
+ {idx: 6, exp: lt, fn: lt_255_uint8},
+ {idx: 6, exp: le, fn: le_255_uint8},
+ {idx: 6, exp: gt, fn: gt_255_uint8},
+ {idx: 6, exp: ge, fn: ge_255_uint8},
+ {idx: 6, exp: eq, fn: eq_255_uint8},
+ {idx: 6, exp: ne, fn: ne_255_uint8},
+}
+
+// int64 tests
+var int64_vals = []int64{
+ -9223372036854775808,
+ -9223372036854775807,
+ -2147483649,
+ -2147483648,
+ -2147483647,
+ -32769,
+ -32768,
+ -32767,
+ -129,
+ -128,
+ -127,
+ -1,
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+ 32768,
+ 65534,
+ 65535,
+ 65536,
+ 2147483646,
+ 2147483647,
+ 2147483648,
+ 4278190080,
+ 4294967294,
+ 4294967295,
+ 4294967296,
+ 1095216660480,
+ 9223372036854775806,
+ 9223372036854775807,
+}
+
+func lt_neg9223372036854775808_int64(x int64) bool { return x < -9223372036854775808 }
+func le_neg9223372036854775808_int64(x int64) bool { return x <= -9223372036854775808 }
+func gt_neg9223372036854775808_int64(x int64) bool { return x > -9223372036854775808 }
+func ge_neg9223372036854775808_int64(x int64) bool { return x >= -9223372036854775808 }
+func eq_neg9223372036854775808_int64(x int64) bool { return x == -9223372036854775808 }
+func ne_neg9223372036854775808_int64(x int64) bool { return x != -9223372036854775808 }
+func lt_neg9223372036854775807_int64(x int64) bool { return x < -9223372036854775807 }
+func le_neg9223372036854775807_int64(x int64) bool { return x <= -9223372036854775807 }
+func gt_neg9223372036854775807_int64(x int64) bool { return x > -9223372036854775807 }
+func ge_neg9223372036854775807_int64(x int64) bool { return x >= -9223372036854775807 }
+func eq_neg9223372036854775807_int64(x int64) bool { return x == -9223372036854775807 }
+func ne_neg9223372036854775807_int64(x int64) bool { return x != -9223372036854775807 }
+func lt_neg2147483649_int64(x int64) bool { return x < -2147483649 }
+func le_neg2147483649_int64(x int64) bool { return x <= -2147483649 }
+func gt_neg2147483649_int64(x int64) bool { return x > -2147483649 }
+func ge_neg2147483649_int64(x int64) bool { return x >= -2147483649 }
+func eq_neg2147483649_int64(x int64) bool { return x == -2147483649 }
+func ne_neg2147483649_int64(x int64) bool { return x != -2147483649 }
+func lt_neg2147483648_int64(x int64) bool { return x < -2147483648 }
+func le_neg2147483648_int64(x int64) bool { return x <= -2147483648 }
+func gt_neg2147483648_int64(x int64) bool { return x > -2147483648 }
+func ge_neg2147483648_int64(x int64) bool { return x >= -2147483648 }
+func eq_neg2147483648_int64(x int64) bool { return x == -2147483648 }
+func ne_neg2147483648_int64(x int64) bool { return x != -2147483648 }
+func lt_neg2147483647_int64(x int64) bool { return x < -2147483647 }
+func le_neg2147483647_int64(x int64) bool { return x <= -2147483647 }
+func gt_neg2147483647_int64(x int64) bool { return x > -2147483647 }
+func ge_neg2147483647_int64(x int64) bool { return x >= -2147483647 }
+func eq_neg2147483647_int64(x int64) bool { return x == -2147483647 }
+func ne_neg2147483647_int64(x int64) bool { return x != -2147483647 }
+func lt_neg32769_int64(x int64) bool { return x < -32769 }
+func le_neg32769_int64(x int64) bool { return x <= -32769 }
+func gt_neg32769_int64(x int64) bool { return x > -32769 }
+func ge_neg32769_int64(x int64) bool { return x >= -32769 }
+func eq_neg32769_int64(x int64) bool { return x == -32769 }
+func ne_neg32769_int64(x int64) bool { return x != -32769 }
+func lt_neg32768_int64(x int64) bool { return x < -32768 }
+func le_neg32768_int64(x int64) bool { return x <= -32768 }
+func gt_neg32768_int64(x int64) bool { return x > -32768 }
+func ge_neg32768_int64(x int64) bool { return x >= -32768 }
+func eq_neg32768_int64(x int64) bool { return x == -32768 }
+func ne_neg32768_int64(x int64) bool { return x != -32768 }
+func lt_neg32767_int64(x int64) bool { return x < -32767 }
+func le_neg32767_int64(x int64) bool { return x <= -32767 }
+func gt_neg32767_int64(x int64) bool { return x > -32767 }
+func ge_neg32767_int64(x int64) bool { return x >= -32767 }
+func eq_neg32767_int64(x int64) bool { return x == -32767 }
+func ne_neg32767_int64(x int64) bool { return x != -32767 }
+func lt_neg129_int64(x int64) bool { return x < -129 }
+func le_neg129_int64(x int64) bool { return x <= -129 }
+func gt_neg129_int64(x int64) bool { return x > -129 }
+func ge_neg129_int64(x int64) bool { return x >= -129 }
+func eq_neg129_int64(x int64) bool { return x == -129 }
+func ne_neg129_int64(x int64) bool { return x != -129 }
+func lt_neg128_int64(x int64) bool { return x < -128 }
+func le_neg128_int64(x int64) bool { return x <= -128 }
+func gt_neg128_int64(x int64) bool { return x > -128 }
+func ge_neg128_int64(x int64) bool { return x >= -128 }
+func eq_neg128_int64(x int64) bool { return x == -128 }
+func ne_neg128_int64(x int64) bool { return x != -128 }
+func lt_neg127_int64(x int64) bool { return x < -127 }
+func le_neg127_int64(x int64) bool { return x <= -127 }
+func gt_neg127_int64(x int64) bool { return x > -127 }
+func ge_neg127_int64(x int64) bool { return x >= -127 }
+func eq_neg127_int64(x int64) bool { return x == -127 }
+func ne_neg127_int64(x int64) bool { return x != -127 }
+func lt_neg1_int64(x int64) bool { return x < -1 }
+func le_neg1_int64(x int64) bool { return x <= -1 }
+func gt_neg1_int64(x int64) bool { return x > -1 }
+func ge_neg1_int64(x int64) bool { return x >= -1 }
+func eq_neg1_int64(x int64) bool { return x == -1 }
+func ne_neg1_int64(x int64) bool { return x != -1 }
+func lt_0_int64(x int64) bool { return x < 0 }
+func le_0_int64(x int64) bool { return x <= 0 }
+func gt_0_int64(x int64) bool { return x > 0 }
+func ge_0_int64(x int64) bool { return x >= 0 }
+func eq_0_int64(x int64) bool { return x == 0 }
+func ne_0_int64(x int64) bool { return x != 0 }
+func lt_1_int64(x int64) bool { return x < 1 }
+func le_1_int64(x int64) bool { return x <= 1 }
+func gt_1_int64(x int64) bool { return x > 1 }
+func ge_1_int64(x int64) bool { return x >= 1 }
+func eq_1_int64(x int64) bool { return x == 1 }
+func ne_1_int64(x int64) bool { return x != 1 }
+func lt_126_int64(x int64) bool { return x < 126 }
+func le_126_int64(x int64) bool { return x <= 126 }
+func gt_126_int64(x int64) bool { return x > 126 }
+func ge_126_int64(x int64) bool { return x >= 126 }
+func eq_126_int64(x int64) bool { return x == 126 }
+func ne_126_int64(x int64) bool { return x != 126 }
+func lt_127_int64(x int64) bool { return x < 127 }
+func le_127_int64(x int64) bool { return x <= 127 }
+func gt_127_int64(x int64) bool { return x > 127 }
+func ge_127_int64(x int64) bool { return x >= 127 }
+func eq_127_int64(x int64) bool { return x == 127 }
+func ne_127_int64(x int64) bool { return x != 127 }
+func lt_128_int64(x int64) bool { return x < 128 }
+func le_128_int64(x int64) bool { return x <= 128 }
+func gt_128_int64(x int64) bool { return x > 128 }
+func ge_128_int64(x int64) bool { return x >= 128 }
+func eq_128_int64(x int64) bool { return x == 128 }
+func ne_128_int64(x int64) bool { return x != 128 }
+func lt_254_int64(x int64) bool { return x < 254 }
+func le_254_int64(x int64) bool { return x <= 254 }
+func gt_254_int64(x int64) bool { return x > 254 }
+func ge_254_int64(x int64) bool { return x >= 254 }
+func eq_254_int64(x int64) bool { return x == 254 }
+func ne_254_int64(x int64) bool { return x != 254 }
+func lt_255_int64(x int64) bool { return x < 255 }
+func le_255_int64(x int64) bool { return x <= 255 }
+func gt_255_int64(x int64) bool { return x > 255 }
+func ge_255_int64(x int64) bool { return x >= 255 }
+func eq_255_int64(x int64) bool { return x == 255 }
+func ne_255_int64(x int64) bool { return x != 255 }
+func lt_256_int64(x int64) bool { return x < 256 }
+func le_256_int64(x int64) bool { return x <= 256 }
+func gt_256_int64(x int64) bool { return x > 256 }
+func ge_256_int64(x int64) bool { return x >= 256 }
+func eq_256_int64(x int64) bool { return x == 256 }
+func ne_256_int64(x int64) bool { return x != 256 }
+func lt_32766_int64(x int64) bool { return x < 32766 }
+func le_32766_int64(x int64) bool { return x <= 32766 }
+func gt_32766_int64(x int64) bool { return x > 32766 }
+func ge_32766_int64(x int64) bool { return x >= 32766 }
+func eq_32766_int64(x int64) bool { return x == 32766 }
+func ne_32766_int64(x int64) bool { return x != 32766 }
+func lt_32767_int64(x int64) bool { return x < 32767 }
+func le_32767_int64(x int64) bool { return x <= 32767 }
+func gt_32767_int64(x int64) bool { return x > 32767 }
+func ge_32767_int64(x int64) bool { return x >= 32767 }
+func eq_32767_int64(x int64) bool { return x == 32767 }
+func ne_32767_int64(x int64) bool { return x != 32767 }
+func lt_32768_int64(x int64) bool { return x < 32768 }
+func le_32768_int64(x int64) bool { return x <= 32768 }
+func gt_32768_int64(x int64) bool { return x > 32768 }
+func ge_32768_int64(x int64) bool { return x >= 32768 }
+func eq_32768_int64(x int64) bool { return x == 32768 }
+func ne_32768_int64(x int64) bool { return x != 32768 }
+func lt_65534_int64(x int64) bool { return x < 65534 }
+func le_65534_int64(x int64) bool { return x <= 65534 }
+func gt_65534_int64(x int64) bool { return x > 65534 }
+func ge_65534_int64(x int64) bool { return x >= 65534 }
+func eq_65534_int64(x int64) bool { return x == 65534 }
+func ne_65534_int64(x int64) bool { return x != 65534 }
+func lt_65535_int64(x int64) bool { return x < 65535 }
+func le_65535_int64(x int64) bool { return x <= 65535 }
+func gt_65535_int64(x int64) bool { return x > 65535 }
+func ge_65535_int64(x int64) bool { return x >= 65535 }
+func eq_65535_int64(x int64) bool { return x == 65535 }
+func ne_65535_int64(x int64) bool { return x != 65535 }
+func lt_65536_int64(x int64) bool { return x < 65536 }
+func le_65536_int64(x int64) bool { return x <= 65536 }
+func gt_65536_int64(x int64) bool { return x > 65536 }
+func ge_65536_int64(x int64) bool { return x >= 65536 }
+func eq_65536_int64(x int64) bool { return x == 65536 }
+func ne_65536_int64(x int64) bool { return x != 65536 }
+func lt_2147483646_int64(x int64) bool { return x < 2147483646 }
+func le_2147483646_int64(x int64) bool { return x <= 2147483646 }
+func gt_2147483646_int64(x int64) bool { return x > 2147483646 }
+func ge_2147483646_int64(x int64) bool { return x >= 2147483646 }
+func eq_2147483646_int64(x int64) bool { return x == 2147483646 }
+func ne_2147483646_int64(x int64) bool { return x != 2147483646 }
+func lt_2147483647_int64(x int64) bool { return x < 2147483647 }
+func le_2147483647_int64(x int64) bool { return x <= 2147483647 }
+func gt_2147483647_int64(x int64) bool { return x > 2147483647 }
+func ge_2147483647_int64(x int64) bool { return x >= 2147483647 }
+func eq_2147483647_int64(x int64) bool { return x == 2147483647 }
+func ne_2147483647_int64(x int64) bool { return x != 2147483647 }
+func lt_2147483648_int64(x int64) bool { return x < 2147483648 }
+func le_2147483648_int64(x int64) bool { return x <= 2147483648 }
+func gt_2147483648_int64(x int64) bool { return x > 2147483648 }
+func ge_2147483648_int64(x int64) bool { return x >= 2147483648 }
+func eq_2147483648_int64(x int64) bool { return x == 2147483648 }
+func ne_2147483648_int64(x int64) bool { return x != 2147483648 }
+func lt_4278190080_int64(x int64) bool { return x < 4278190080 }
+func le_4278190080_int64(x int64) bool { return x <= 4278190080 }
+func gt_4278190080_int64(x int64) bool { return x > 4278190080 }
+func ge_4278190080_int64(x int64) bool { return x >= 4278190080 }
+func eq_4278190080_int64(x int64) bool { return x == 4278190080 }
+func ne_4278190080_int64(x int64) bool { return x != 4278190080 }
+func lt_4294967294_int64(x int64) bool { return x < 4294967294 }
+func le_4294967294_int64(x int64) bool { return x <= 4294967294 }
+func gt_4294967294_int64(x int64) bool { return x > 4294967294 }
+func ge_4294967294_int64(x int64) bool { return x >= 4294967294 }
+func eq_4294967294_int64(x int64) bool { return x == 4294967294 }
+func ne_4294967294_int64(x int64) bool { return x != 4294967294 }
+func lt_4294967295_int64(x int64) bool { return x < 4294967295 }
+func le_4294967295_int64(x int64) bool { return x <= 4294967295 }
+func gt_4294967295_int64(x int64) bool { return x > 4294967295 }
+func ge_4294967295_int64(x int64) bool { return x >= 4294967295 }
+func eq_4294967295_int64(x int64) bool { return x == 4294967295 }
+func ne_4294967295_int64(x int64) bool { return x != 4294967295 }
+func lt_4294967296_int64(x int64) bool { return x < 4294967296 }
+func le_4294967296_int64(x int64) bool { return x <= 4294967296 }
+func gt_4294967296_int64(x int64) bool { return x > 4294967296 }
+func ge_4294967296_int64(x int64) bool { return x >= 4294967296 }
+func eq_4294967296_int64(x int64) bool { return x == 4294967296 }
+func ne_4294967296_int64(x int64) bool { return x != 4294967296 }
+func lt_1095216660480_int64(x int64) bool { return x < 1095216660480 }
+func le_1095216660480_int64(x int64) bool { return x <= 1095216660480 }
+func gt_1095216660480_int64(x int64) bool { return x > 1095216660480 }
+func ge_1095216660480_int64(x int64) bool { return x >= 1095216660480 }
+func eq_1095216660480_int64(x int64) bool { return x == 1095216660480 }
+func ne_1095216660480_int64(x int64) bool { return x != 1095216660480 }
+func lt_9223372036854775806_int64(x int64) bool { return x < 9223372036854775806 }
+func le_9223372036854775806_int64(x int64) bool { return x <= 9223372036854775806 }
+func gt_9223372036854775806_int64(x int64) bool { return x > 9223372036854775806 }
+func ge_9223372036854775806_int64(x int64) bool { return x >= 9223372036854775806 }
+func eq_9223372036854775806_int64(x int64) bool { return x == 9223372036854775806 }
+func ne_9223372036854775806_int64(x int64) bool { return x != 9223372036854775806 }
+func lt_9223372036854775807_int64(x int64) bool { return x < 9223372036854775807 }
+func le_9223372036854775807_int64(x int64) bool { return x <= 9223372036854775807 }
+func gt_9223372036854775807_int64(x int64) bool { return x > 9223372036854775807 }
+func ge_9223372036854775807_int64(x int64) bool { return x >= 9223372036854775807 }
+func eq_9223372036854775807_int64(x int64) bool { return x == 9223372036854775807 }
+func ne_9223372036854775807_int64(x int64) bool { return x != 9223372036854775807 }
+
+var int64_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(int64) bool
+}{
+ {idx: 0, exp: lt, fn: lt_neg9223372036854775808_int64},
+ {idx: 0, exp: le, fn: le_neg9223372036854775808_int64},
+ {idx: 0, exp: gt, fn: gt_neg9223372036854775808_int64},
+ {idx: 0, exp: ge, fn: ge_neg9223372036854775808_int64},
+ {idx: 0, exp: eq, fn: eq_neg9223372036854775808_int64},
+ {idx: 0, exp: ne, fn: ne_neg9223372036854775808_int64},
+ {idx: 1, exp: lt, fn: lt_neg9223372036854775807_int64},
+ {idx: 1, exp: le, fn: le_neg9223372036854775807_int64},
+ {idx: 1, exp: gt, fn: gt_neg9223372036854775807_int64},
+ {idx: 1, exp: ge, fn: ge_neg9223372036854775807_int64},
+ {idx: 1, exp: eq, fn: eq_neg9223372036854775807_int64},
+ {idx: 1, exp: ne, fn: ne_neg9223372036854775807_int64},
+ {idx: 2, exp: lt, fn: lt_neg2147483649_int64},
+ {idx: 2, exp: le, fn: le_neg2147483649_int64},
+ {idx: 2, exp: gt, fn: gt_neg2147483649_int64},
+ {idx: 2, exp: ge, fn: ge_neg2147483649_int64},
+ {idx: 2, exp: eq, fn: eq_neg2147483649_int64},
+ {idx: 2, exp: ne, fn: ne_neg2147483649_int64},
+ {idx: 3, exp: lt, fn: lt_neg2147483648_int64},
+ {idx: 3, exp: le, fn: le_neg2147483648_int64},
+ {idx: 3, exp: gt, fn: gt_neg2147483648_int64},
+ {idx: 3, exp: ge, fn: ge_neg2147483648_int64},
+ {idx: 3, exp: eq, fn: eq_neg2147483648_int64},
+ {idx: 3, exp: ne, fn: ne_neg2147483648_int64},
+ {idx: 4, exp: lt, fn: lt_neg2147483647_int64},
+ {idx: 4, exp: le, fn: le_neg2147483647_int64},
+ {idx: 4, exp: gt, fn: gt_neg2147483647_int64},
+ {idx: 4, exp: ge, fn: ge_neg2147483647_int64},
+ {idx: 4, exp: eq, fn: eq_neg2147483647_int64},
+ {idx: 4, exp: ne, fn: ne_neg2147483647_int64},
+ {idx: 5, exp: lt, fn: lt_neg32769_int64},
+ {idx: 5, exp: le, fn: le_neg32769_int64},
+ {idx: 5, exp: gt, fn: gt_neg32769_int64},
+ {idx: 5, exp: ge, fn: ge_neg32769_int64},
+ {idx: 5, exp: eq, fn: eq_neg32769_int64},
+ {idx: 5, exp: ne, fn: ne_neg32769_int64},
+ {idx: 6, exp: lt, fn: lt_neg32768_int64},
+ {idx: 6, exp: le, fn: le_neg32768_int64},
+ {idx: 6, exp: gt, fn: gt_neg32768_int64},
+ {idx: 6, exp: ge, fn: ge_neg32768_int64},
+ {idx: 6, exp: eq, fn: eq_neg32768_int64},
+ {idx: 6, exp: ne, fn: ne_neg32768_int64},
+ {idx: 7, exp: lt, fn: lt_neg32767_int64},
+ {idx: 7, exp: le, fn: le_neg32767_int64},
+ {idx: 7, exp: gt, fn: gt_neg32767_int64},
+ {idx: 7, exp: ge, fn: ge_neg32767_int64},
+ {idx: 7, exp: eq, fn: eq_neg32767_int64},
+ {idx: 7, exp: ne, fn: ne_neg32767_int64},
+ {idx: 8, exp: lt, fn: lt_neg129_int64},
+ {idx: 8, exp: le, fn: le_neg129_int64},
+ {idx: 8, exp: gt, fn: gt_neg129_int64},
+ {idx: 8, exp: ge, fn: ge_neg129_int64},
+ {idx: 8, exp: eq, fn: eq_neg129_int64},
+ {idx: 8, exp: ne, fn: ne_neg129_int64},
+ {idx: 9, exp: lt, fn: lt_neg128_int64},
+ {idx: 9, exp: le, fn: le_neg128_int64},
+ {idx: 9, exp: gt, fn: gt_neg128_int64},
+ {idx: 9, exp: ge, fn: ge_neg128_int64},
+ {idx: 9, exp: eq, fn: eq_neg128_int64},
+ {idx: 9, exp: ne, fn: ne_neg128_int64},
+ {idx: 10, exp: lt, fn: lt_neg127_int64},
+ {idx: 10, exp: le, fn: le_neg127_int64},
+ {idx: 10, exp: gt, fn: gt_neg127_int64},
+ {idx: 10, exp: ge, fn: ge_neg127_int64},
+ {idx: 10, exp: eq, fn: eq_neg127_int64},
+ {idx: 10, exp: ne, fn: ne_neg127_int64},
+ {idx: 11, exp: lt, fn: lt_neg1_int64},
+ {idx: 11, exp: le, fn: le_neg1_int64},
+ {idx: 11, exp: gt, fn: gt_neg1_int64},
+ {idx: 11, exp: ge, fn: ge_neg1_int64},
+ {idx: 11, exp: eq, fn: eq_neg1_int64},
+ {idx: 11, exp: ne, fn: ne_neg1_int64},
+ {idx: 12, exp: lt, fn: lt_0_int64},
+ {idx: 12, exp: le, fn: le_0_int64},
+ {idx: 12, exp: gt, fn: gt_0_int64},
+ {idx: 12, exp: ge, fn: ge_0_int64},
+ {idx: 12, exp: eq, fn: eq_0_int64},
+ {idx: 12, exp: ne, fn: ne_0_int64},
+ {idx: 13, exp: lt, fn: lt_1_int64},
+ {idx: 13, exp: le, fn: le_1_int64},
+ {idx: 13, exp: gt, fn: gt_1_int64},
+ {idx: 13, exp: ge, fn: ge_1_int64},
+ {idx: 13, exp: eq, fn: eq_1_int64},
+ {idx: 13, exp: ne, fn: ne_1_int64},
+ {idx: 14, exp: lt, fn: lt_126_int64},
+ {idx: 14, exp: le, fn: le_126_int64},
+ {idx: 14, exp: gt, fn: gt_126_int64},
+ {idx: 14, exp: ge, fn: ge_126_int64},
+ {idx: 14, exp: eq, fn: eq_126_int64},
+ {idx: 14, exp: ne, fn: ne_126_int64},
+ {idx: 15, exp: lt, fn: lt_127_int64},
+ {idx: 15, exp: le, fn: le_127_int64},
+ {idx: 15, exp: gt, fn: gt_127_int64},
+ {idx: 15, exp: ge, fn: ge_127_int64},
+ {idx: 15, exp: eq, fn: eq_127_int64},
+ {idx: 15, exp: ne, fn: ne_127_int64},
+ {idx: 16, exp: lt, fn: lt_128_int64},
+ {idx: 16, exp: le, fn: le_128_int64},
+ {idx: 16, exp: gt, fn: gt_128_int64},
+ {idx: 16, exp: ge, fn: ge_128_int64},
+ {idx: 16, exp: eq, fn: eq_128_int64},
+ {idx: 16, exp: ne, fn: ne_128_int64},
+ {idx: 17, exp: lt, fn: lt_254_int64},
+ {idx: 17, exp: le, fn: le_254_int64},
+ {idx: 17, exp: gt, fn: gt_254_int64},
+ {idx: 17, exp: ge, fn: ge_254_int64},
+ {idx: 17, exp: eq, fn: eq_254_int64},
+ {idx: 17, exp: ne, fn: ne_254_int64},
+ {idx: 18, exp: lt, fn: lt_255_int64},
+ {idx: 18, exp: le, fn: le_255_int64},
+ {idx: 18, exp: gt, fn: gt_255_int64},
+ {idx: 18, exp: ge, fn: ge_255_int64},
+ {idx: 18, exp: eq, fn: eq_255_int64},
+ {idx: 18, exp: ne, fn: ne_255_int64},
+ {idx: 19, exp: lt, fn: lt_256_int64},
+ {idx: 19, exp: le, fn: le_256_int64},
+ {idx: 19, exp: gt, fn: gt_256_int64},
+ {idx: 19, exp: ge, fn: ge_256_int64},
+ {idx: 19, exp: eq, fn: eq_256_int64},
+ {idx: 19, exp: ne, fn: ne_256_int64},
+ {idx: 20, exp: lt, fn: lt_32766_int64},
+ {idx: 20, exp: le, fn: le_32766_int64},
+ {idx: 20, exp: gt, fn: gt_32766_int64},
+ {idx: 20, exp: ge, fn: ge_32766_int64},
+ {idx: 20, exp: eq, fn: eq_32766_int64},
+ {idx: 20, exp: ne, fn: ne_32766_int64},
+ {idx: 21, exp: lt, fn: lt_32767_int64},
+ {idx: 21, exp: le, fn: le_32767_int64},
+ {idx: 21, exp: gt, fn: gt_32767_int64},
+ {idx: 21, exp: ge, fn: ge_32767_int64},
+ {idx: 21, exp: eq, fn: eq_32767_int64},
+ {idx: 21, exp: ne, fn: ne_32767_int64},
+ {idx: 22, exp: lt, fn: lt_32768_int64},
+ {idx: 22, exp: le, fn: le_32768_int64},
+ {idx: 22, exp: gt, fn: gt_32768_int64},
+ {idx: 22, exp: ge, fn: ge_32768_int64},
+ {idx: 22, exp: eq, fn: eq_32768_int64},
+ {idx: 22, exp: ne, fn: ne_32768_int64},
+ {idx: 23, exp: lt, fn: lt_65534_int64},
+ {idx: 23, exp: le, fn: le_65534_int64},
+ {idx: 23, exp: gt, fn: gt_65534_int64},
+ {idx: 23, exp: ge, fn: ge_65534_int64},
+ {idx: 23, exp: eq, fn: eq_65534_int64},
+ {idx: 23, exp: ne, fn: ne_65534_int64},
+ {idx: 24, exp: lt, fn: lt_65535_int64},
+ {idx: 24, exp: le, fn: le_65535_int64},
+ {idx: 24, exp: gt, fn: gt_65535_int64},
+ {idx: 24, exp: ge, fn: ge_65535_int64},
+ {idx: 24, exp: eq, fn: eq_65535_int64},
+ {idx: 24, exp: ne, fn: ne_65535_int64},
+ {idx: 25, exp: lt, fn: lt_65536_int64},
+ {idx: 25, exp: le, fn: le_65536_int64},
+ {idx: 25, exp: gt, fn: gt_65536_int64},
+ {idx: 25, exp: ge, fn: ge_65536_int64},
+ {idx: 25, exp: eq, fn: eq_65536_int64},
+ {idx: 25, exp: ne, fn: ne_65536_int64},
+ {idx: 26, exp: lt, fn: lt_2147483646_int64},
+ {idx: 26, exp: le, fn: le_2147483646_int64},
+ {idx: 26, exp: gt, fn: gt_2147483646_int64},
+ {idx: 26, exp: ge, fn: ge_2147483646_int64},
+ {idx: 26, exp: eq, fn: eq_2147483646_int64},
+ {idx: 26, exp: ne, fn: ne_2147483646_int64},
+ {idx: 27, exp: lt, fn: lt_2147483647_int64},
+ {idx: 27, exp: le, fn: le_2147483647_int64},
+ {idx: 27, exp: gt, fn: gt_2147483647_int64},
+ {idx: 27, exp: ge, fn: ge_2147483647_int64},
+ {idx: 27, exp: eq, fn: eq_2147483647_int64},
+ {idx: 27, exp: ne, fn: ne_2147483647_int64},
+ {idx: 28, exp: lt, fn: lt_2147483648_int64},
+ {idx: 28, exp: le, fn: le_2147483648_int64},
+ {idx: 28, exp: gt, fn: gt_2147483648_int64},
+ {idx: 28, exp: ge, fn: ge_2147483648_int64},
+ {idx: 28, exp: eq, fn: eq_2147483648_int64},
+ {idx: 28, exp: ne, fn: ne_2147483648_int64},
+ {idx: 29, exp: lt, fn: lt_4278190080_int64},
+ {idx: 29, exp: le, fn: le_4278190080_int64},
+ {idx: 29, exp: gt, fn: gt_4278190080_int64},
+ {idx: 29, exp: ge, fn: ge_4278190080_int64},
+ {idx: 29, exp: eq, fn: eq_4278190080_int64},
+ {idx: 29, exp: ne, fn: ne_4278190080_int64},
+ {idx: 30, exp: lt, fn: lt_4294967294_int64},
+ {idx: 30, exp: le, fn: le_4294967294_int64},
+ {idx: 30, exp: gt, fn: gt_4294967294_int64},
+ {idx: 30, exp: ge, fn: ge_4294967294_int64},
+ {idx: 30, exp: eq, fn: eq_4294967294_int64},
+ {idx: 30, exp: ne, fn: ne_4294967294_int64},
+ {idx: 31, exp: lt, fn: lt_4294967295_int64},
+ {idx: 31, exp: le, fn: le_4294967295_int64},
+ {idx: 31, exp: gt, fn: gt_4294967295_int64},
+ {idx: 31, exp: ge, fn: ge_4294967295_int64},
+ {idx: 31, exp: eq, fn: eq_4294967295_int64},
+ {idx: 31, exp: ne, fn: ne_4294967295_int64},
+ {idx: 32, exp: lt, fn: lt_4294967296_int64},
+ {idx: 32, exp: le, fn: le_4294967296_int64},
+ {idx: 32, exp: gt, fn: gt_4294967296_int64},
+ {idx: 32, exp: ge, fn: ge_4294967296_int64},
+ {idx: 32, exp: eq, fn: eq_4294967296_int64},
+ {idx: 32, exp: ne, fn: ne_4294967296_int64},
+ {idx: 33, exp: lt, fn: lt_1095216660480_int64},
+ {idx: 33, exp: le, fn: le_1095216660480_int64},
+ {idx: 33, exp: gt, fn: gt_1095216660480_int64},
+ {idx: 33, exp: ge, fn: ge_1095216660480_int64},
+ {idx: 33, exp: eq, fn: eq_1095216660480_int64},
+ {idx: 33, exp: ne, fn: ne_1095216660480_int64},
+ {idx: 34, exp: lt, fn: lt_9223372036854775806_int64},
+ {idx: 34, exp: le, fn: le_9223372036854775806_int64},
+ {idx: 34, exp: gt, fn: gt_9223372036854775806_int64},
+ {idx: 34, exp: ge, fn: ge_9223372036854775806_int64},
+ {idx: 34, exp: eq, fn: eq_9223372036854775806_int64},
+ {idx: 34, exp: ne, fn: ne_9223372036854775806_int64},
+ {idx: 35, exp: lt, fn: lt_9223372036854775807_int64},
+ {idx: 35, exp: le, fn: le_9223372036854775807_int64},
+ {idx: 35, exp: gt, fn: gt_9223372036854775807_int64},
+ {idx: 35, exp: ge, fn: ge_9223372036854775807_int64},
+ {idx: 35, exp: eq, fn: eq_9223372036854775807_int64},
+ {idx: 35, exp: ne, fn: ne_9223372036854775807_int64},
+}
+
+// int32 tests
+var int32_vals = []int32{
+ -2147483648,
+ -2147483647,
+ -32769,
+ -32768,
+ -32767,
+ -129,
+ -128,
+ -127,
+ -1,
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+ 32768,
+ 65534,
+ 65535,
+ 65536,
+ 2147483646,
+ 2147483647,
+}
+
+func lt_neg2147483648_int32(x int32) bool { return x < -2147483648 }
+func le_neg2147483648_int32(x int32) bool { return x <= -2147483648 }
+func gt_neg2147483648_int32(x int32) bool { return x > -2147483648 }
+func ge_neg2147483648_int32(x int32) bool { return x >= -2147483648 }
+func eq_neg2147483648_int32(x int32) bool { return x == -2147483648 }
+func ne_neg2147483648_int32(x int32) bool { return x != -2147483648 }
+func lt_neg2147483647_int32(x int32) bool { return x < -2147483647 }
+func le_neg2147483647_int32(x int32) bool { return x <= -2147483647 }
+func gt_neg2147483647_int32(x int32) bool { return x > -2147483647 }
+func ge_neg2147483647_int32(x int32) bool { return x >= -2147483647 }
+func eq_neg2147483647_int32(x int32) bool { return x == -2147483647 }
+func ne_neg2147483647_int32(x int32) bool { return x != -2147483647 }
+func lt_neg32769_int32(x int32) bool { return x < -32769 }
+func le_neg32769_int32(x int32) bool { return x <= -32769 }
+func gt_neg32769_int32(x int32) bool { return x > -32769 }
+func ge_neg32769_int32(x int32) bool { return x >= -32769 }
+func eq_neg32769_int32(x int32) bool { return x == -32769 }
+func ne_neg32769_int32(x int32) bool { return x != -32769 }
+func lt_neg32768_int32(x int32) bool { return x < -32768 }
+func le_neg32768_int32(x int32) bool { return x <= -32768 }
+func gt_neg32768_int32(x int32) bool { return x > -32768 }
+func ge_neg32768_int32(x int32) bool { return x >= -32768 }
+func eq_neg32768_int32(x int32) bool { return x == -32768 }
+func ne_neg32768_int32(x int32) bool { return x != -32768 }
+func lt_neg32767_int32(x int32) bool { return x < -32767 }
+func le_neg32767_int32(x int32) bool { return x <= -32767 }
+func gt_neg32767_int32(x int32) bool { return x > -32767 }
+func ge_neg32767_int32(x int32) bool { return x >= -32767 }
+func eq_neg32767_int32(x int32) bool { return x == -32767 }
+func ne_neg32767_int32(x int32) bool { return x != -32767 }
+func lt_neg129_int32(x int32) bool { return x < -129 }
+func le_neg129_int32(x int32) bool { return x <= -129 }
+func gt_neg129_int32(x int32) bool { return x > -129 }
+func ge_neg129_int32(x int32) bool { return x >= -129 }
+func eq_neg129_int32(x int32) bool { return x == -129 }
+func ne_neg129_int32(x int32) bool { return x != -129 }
+func lt_neg128_int32(x int32) bool { return x < -128 }
+func le_neg128_int32(x int32) bool { return x <= -128 }
+func gt_neg128_int32(x int32) bool { return x > -128 }
+func ge_neg128_int32(x int32) bool { return x >= -128 }
+func eq_neg128_int32(x int32) bool { return x == -128 }
+func ne_neg128_int32(x int32) bool { return x != -128 }
+func lt_neg127_int32(x int32) bool { return x < -127 }
+func le_neg127_int32(x int32) bool { return x <= -127 }
+func gt_neg127_int32(x int32) bool { return x > -127 }
+func ge_neg127_int32(x int32) bool { return x >= -127 }
+func eq_neg127_int32(x int32) bool { return x == -127 }
+func ne_neg127_int32(x int32) bool { return x != -127 }
+func lt_neg1_int32(x int32) bool { return x < -1 }
+func le_neg1_int32(x int32) bool { return x <= -1 }
+func gt_neg1_int32(x int32) bool { return x > -1 }
+func ge_neg1_int32(x int32) bool { return x >= -1 }
+func eq_neg1_int32(x int32) bool { return x == -1 }
+func ne_neg1_int32(x int32) bool { return x != -1 }
+func lt_0_int32(x int32) bool { return x < 0 }
+func le_0_int32(x int32) bool { return x <= 0 }
+func gt_0_int32(x int32) bool { return x > 0 }
+func ge_0_int32(x int32) bool { return x >= 0 }
+func eq_0_int32(x int32) bool { return x == 0 }
+func ne_0_int32(x int32) bool { return x != 0 }
+func lt_1_int32(x int32) bool { return x < 1 }
+func le_1_int32(x int32) bool { return x <= 1 }
+func gt_1_int32(x int32) bool { return x > 1 }
+func ge_1_int32(x int32) bool { return x >= 1 }
+func eq_1_int32(x int32) bool { return x == 1 }
+func ne_1_int32(x int32) bool { return x != 1 }
+func lt_126_int32(x int32) bool { return x < 126 }
+func le_126_int32(x int32) bool { return x <= 126 }
+func gt_126_int32(x int32) bool { return x > 126 }
+func ge_126_int32(x int32) bool { return x >= 126 }
+func eq_126_int32(x int32) bool { return x == 126 }
+func ne_126_int32(x int32) bool { return x != 126 }
+func lt_127_int32(x int32) bool { return x < 127 }
+func le_127_int32(x int32) bool { return x <= 127 }
+func gt_127_int32(x int32) bool { return x > 127 }
+func ge_127_int32(x int32) bool { return x >= 127 }
+func eq_127_int32(x int32) bool { return x == 127 }
+func ne_127_int32(x int32) bool { return x != 127 }
+func lt_128_int32(x int32) bool { return x < 128 }
+func le_128_int32(x int32) bool { return x <= 128 }
+func gt_128_int32(x int32) bool { return x > 128 }
+func ge_128_int32(x int32) bool { return x >= 128 }
+func eq_128_int32(x int32) bool { return x == 128 }
+func ne_128_int32(x int32) bool { return x != 128 }
+func lt_254_int32(x int32) bool { return x < 254 }
+func le_254_int32(x int32) bool { return x <= 254 }
+func gt_254_int32(x int32) bool { return x > 254 }
+func ge_254_int32(x int32) bool { return x >= 254 }
+func eq_254_int32(x int32) bool { return x == 254 }
+func ne_254_int32(x int32) bool { return x != 254 }
+func lt_255_int32(x int32) bool { return x < 255 }
+func le_255_int32(x int32) bool { return x <= 255 }
+func gt_255_int32(x int32) bool { return x > 255 }
+func ge_255_int32(x int32) bool { return x >= 255 }
+func eq_255_int32(x int32) bool { return x == 255 }
+func ne_255_int32(x int32) bool { return x != 255 }
+func lt_256_int32(x int32) bool { return x < 256 }
+func le_256_int32(x int32) bool { return x <= 256 }
+func gt_256_int32(x int32) bool { return x > 256 }
+func ge_256_int32(x int32) bool { return x >= 256 }
+func eq_256_int32(x int32) bool { return x == 256 }
+func ne_256_int32(x int32) bool { return x != 256 }
+func lt_32766_int32(x int32) bool { return x < 32766 }
+func le_32766_int32(x int32) bool { return x <= 32766 }
+func gt_32766_int32(x int32) bool { return x > 32766 }
+func ge_32766_int32(x int32) bool { return x >= 32766 }
+func eq_32766_int32(x int32) bool { return x == 32766 }
+func ne_32766_int32(x int32) bool { return x != 32766 }
+func lt_32767_int32(x int32) bool { return x < 32767 }
+func le_32767_int32(x int32) bool { return x <= 32767 }
+func gt_32767_int32(x int32) bool { return x > 32767 }
+func ge_32767_int32(x int32) bool { return x >= 32767 }
+func eq_32767_int32(x int32) bool { return x == 32767 }
+func ne_32767_int32(x int32) bool { return x != 32767 }
+func lt_32768_int32(x int32) bool { return x < 32768 }
+func le_32768_int32(x int32) bool { return x <= 32768 }
+func gt_32768_int32(x int32) bool { return x > 32768 }
+func ge_32768_int32(x int32) bool { return x >= 32768 }
+func eq_32768_int32(x int32) bool { return x == 32768 }
+func ne_32768_int32(x int32) bool { return x != 32768 }
+func lt_65534_int32(x int32) bool { return x < 65534 }
+func le_65534_int32(x int32) bool { return x <= 65534 }
+func gt_65534_int32(x int32) bool { return x > 65534 }
+func ge_65534_int32(x int32) bool { return x >= 65534 }
+func eq_65534_int32(x int32) bool { return x == 65534 }
+func ne_65534_int32(x int32) bool { return x != 65534 }
+func lt_65535_int32(x int32) bool { return x < 65535 }
+func le_65535_int32(x int32) bool { return x <= 65535 }
+func gt_65535_int32(x int32) bool { return x > 65535 }
+func ge_65535_int32(x int32) bool { return x >= 65535 }
+func eq_65535_int32(x int32) bool { return x == 65535 }
+func ne_65535_int32(x int32) bool { return x != 65535 }
+func lt_65536_int32(x int32) bool { return x < 65536 }
+func le_65536_int32(x int32) bool { return x <= 65536 }
+func gt_65536_int32(x int32) bool { return x > 65536 }
+func ge_65536_int32(x int32) bool { return x >= 65536 }
+func eq_65536_int32(x int32) bool { return x == 65536 }
+func ne_65536_int32(x int32) bool { return x != 65536 }
+func lt_2147483646_int32(x int32) bool { return x < 2147483646 }
+func le_2147483646_int32(x int32) bool { return x <= 2147483646 }
+func gt_2147483646_int32(x int32) bool { return x > 2147483646 }
+func ge_2147483646_int32(x int32) bool { return x >= 2147483646 }
+func eq_2147483646_int32(x int32) bool { return x == 2147483646 }
+func ne_2147483646_int32(x int32) bool { return x != 2147483646 }
+func lt_2147483647_int32(x int32) bool { return x < 2147483647 }
+func le_2147483647_int32(x int32) bool { return x <= 2147483647 }
+func gt_2147483647_int32(x int32) bool { return x > 2147483647 }
+func ge_2147483647_int32(x int32) bool { return x >= 2147483647 }
+func eq_2147483647_int32(x int32) bool { return x == 2147483647 }
+func ne_2147483647_int32(x int32) bool { return x != 2147483647 }
+
+var int32_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(int32) bool
+}{
+ {idx: 0, exp: lt, fn: lt_neg2147483648_int32},
+ {idx: 0, exp: le, fn: le_neg2147483648_int32},
+ {idx: 0, exp: gt, fn: gt_neg2147483648_int32},
+ {idx: 0, exp: ge, fn: ge_neg2147483648_int32},
+ {idx: 0, exp: eq, fn: eq_neg2147483648_int32},
+ {idx: 0, exp: ne, fn: ne_neg2147483648_int32},
+ {idx: 1, exp: lt, fn: lt_neg2147483647_int32},
+ {idx: 1, exp: le, fn: le_neg2147483647_int32},
+ {idx: 1, exp: gt, fn: gt_neg2147483647_int32},
+ {idx: 1, exp: ge, fn: ge_neg2147483647_int32},
+ {idx: 1, exp: eq, fn: eq_neg2147483647_int32},
+ {idx: 1, exp: ne, fn: ne_neg2147483647_int32},
+ {idx: 2, exp: lt, fn: lt_neg32769_int32},
+ {idx: 2, exp: le, fn: le_neg32769_int32},
+ {idx: 2, exp: gt, fn: gt_neg32769_int32},
+ {idx: 2, exp: ge, fn: ge_neg32769_int32},
+ {idx: 2, exp: eq, fn: eq_neg32769_int32},
+ {idx: 2, exp: ne, fn: ne_neg32769_int32},
+ {idx: 3, exp: lt, fn: lt_neg32768_int32},
+ {idx: 3, exp: le, fn: le_neg32768_int32},
+ {idx: 3, exp: gt, fn: gt_neg32768_int32},
+ {idx: 3, exp: ge, fn: ge_neg32768_int32},
+ {idx: 3, exp: eq, fn: eq_neg32768_int32},
+ {idx: 3, exp: ne, fn: ne_neg32768_int32},
+ {idx: 4, exp: lt, fn: lt_neg32767_int32},
+ {idx: 4, exp: le, fn: le_neg32767_int32},
+ {idx: 4, exp: gt, fn: gt_neg32767_int32},
+ {idx: 4, exp: ge, fn: ge_neg32767_int32},
+ {idx: 4, exp: eq, fn: eq_neg32767_int32},
+ {idx: 4, exp: ne, fn: ne_neg32767_int32},
+ {idx: 5, exp: lt, fn: lt_neg129_int32},
+ {idx: 5, exp: le, fn: le_neg129_int32},
+ {idx: 5, exp: gt, fn: gt_neg129_int32},
+ {idx: 5, exp: ge, fn: ge_neg129_int32},
+ {idx: 5, exp: eq, fn: eq_neg129_int32},
+ {idx: 5, exp: ne, fn: ne_neg129_int32},
+ {idx: 6, exp: lt, fn: lt_neg128_int32},
+ {idx: 6, exp: le, fn: le_neg128_int32},
+ {idx: 6, exp: gt, fn: gt_neg128_int32},
+ {idx: 6, exp: ge, fn: ge_neg128_int32},
+ {idx: 6, exp: eq, fn: eq_neg128_int32},
+ {idx: 6, exp: ne, fn: ne_neg128_int32},
+ {idx: 7, exp: lt, fn: lt_neg127_int32},
+ {idx: 7, exp: le, fn: le_neg127_int32},
+ {idx: 7, exp: gt, fn: gt_neg127_int32},
+ {idx: 7, exp: ge, fn: ge_neg127_int32},
+ {idx: 7, exp: eq, fn: eq_neg127_int32},
+ {idx: 7, exp: ne, fn: ne_neg127_int32},
+ {idx: 8, exp: lt, fn: lt_neg1_int32},
+ {idx: 8, exp: le, fn: le_neg1_int32},
+ {idx: 8, exp: gt, fn: gt_neg1_int32},
+ {idx: 8, exp: ge, fn: ge_neg1_int32},
+ {idx: 8, exp: eq, fn: eq_neg1_int32},
+ {idx: 8, exp: ne, fn: ne_neg1_int32},
+ {idx: 9, exp: lt, fn: lt_0_int32},
+ {idx: 9, exp: le, fn: le_0_int32},
+ {idx: 9, exp: gt, fn: gt_0_int32},
+ {idx: 9, exp: ge, fn: ge_0_int32},
+ {idx: 9, exp: eq, fn: eq_0_int32},
+ {idx: 9, exp: ne, fn: ne_0_int32},
+ {idx: 10, exp: lt, fn: lt_1_int32},
+ {idx: 10, exp: le, fn: le_1_int32},
+ {idx: 10, exp: gt, fn: gt_1_int32},
+ {idx: 10, exp: ge, fn: ge_1_int32},
+ {idx: 10, exp: eq, fn: eq_1_int32},
+ {idx: 10, exp: ne, fn: ne_1_int32},
+ {idx: 11, exp: lt, fn: lt_126_int32},
+ {idx: 11, exp: le, fn: le_126_int32},
+ {idx: 11, exp: gt, fn: gt_126_int32},
+ {idx: 11, exp: ge, fn: ge_126_int32},
+ {idx: 11, exp: eq, fn: eq_126_int32},
+ {idx: 11, exp: ne, fn: ne_126_int32},
+ {idx: 12, exp: lt, fn: lt_127_int32},
+ {idx: 12, exp: le, fn: le_127_int32},
+ {idx: 12, exp: gt, fn: gt_127_int32},
+ {idx: 12, exp: ge, fn: ge_127_int32},
+ {idx: 12, exp: eq, fn: eq_127_int32},
+ {idx: 12, exp: ne, fn: ne_127_int32},
+ {idx: 13, exp: lt, fn: lt_128_int32},
+ {idx: 13, exp: le, fn: le_128_int32},
+ {idx: 13, exp: gt, fn: gt_128_int32},
+ {idx: 13, exp: ge, fn: ge_128_int32},
+ {idx: 13, exp: eq, fn: eq_128_int32},
+ {idx: 13, exp: ne, fn: ne_128_int32},
+ {idx: 14, exp: lt, fn: lt_254_int32},
+ {idx: 14, exp: le, fn: le_254_int32},
+ {idx: 14, exp: gt, fn: gt_254_int32},
+ {idx: 14, exp: ge, fn: ge_254_int32},
+ {idx: 14, exp: eq, fn: eq_254_int32},
+ {idx: 14, exp: ne, fn: ne_254_int32},
+ {idx: 15, exp: lt, fn: lt_255_int32},
+ {idx: 15, exp: le, fn: le_255_int32},
+ {idx: 15, exp: gt, fn: gt_255_int32},
+ {idx: 15, exp: ge, fn: ge_255_int32},
+ {idx: 15, exp: eq, fn: eq_255_int32},
+ {idx: 15, exp: ne, fn: ne_255_int32},
+ {idx: 16, exp: lt, fn: lt_256_int32},
+ {idx: 16, exp: le, fn: le_256_int32},
+ {idx: 16, exp: gt, fn: gt_256_int32},
+ {idx: 16, exp: ge, fn: ge_256_int32},
+ {idx: 16, exp: eq, fn: eq_256_int32},
+ {idx: 16, exp: ne, fn: ne_256_int32},
+ {idx: 17, exp: lt, fn: lt_32766_int32},
+ {idx: 17, exp: le, fn: le_32766_int32},
+ {idx: 17, exp: gt, fn: gt_32766_int32},
+ {idx: 17, exp: ge, fn: ge_32766_int32},
+ {idx: 17, exp: eq, fn: eq_32766_int32},
+ {idx: 17, exp: ne, fn: ne_32766_int32},
+ {idx: 18, exp: lt, fn: lt_32767_int32},
+ {idx: 18, exp: le, fn: le_32767_int32},
+ {idx: 18, exp: gt, fn: gt_32767_int32},
+ {idx: 18, exp: ge, fn: ge_32767_int32},
+ {idx: 18, exp: eq, fn: eq_32767_int32},
+ {idx: 18, exp: ne, fn: ne_32767_int32},
+ {idx: 19, exp: lt, fn: lt_32768_int32},
+ {idx: 19, exp: le, fn: le_32768_int32},
+ {idx: 19, exp: gt, fn: gt_32768_int32},
+ {idx: 19, exp: ge, fn: ge_32768_int32},
+ {idx: 19, exp: eq, fn: eq_32768_int32},
+ {idx: 19, exp: ne, fn: ne_32768_int32},
+ {idx: 20, exp: lt, fn: lt_65534_int32},
+ {idx: 20, exp: le, fn: le_65534_int32},
+ {idx: 20, exp: gt, fn: gt_65534_int32},
+ {idx: 20, exp: ge, fn: ge_65534_int32},
+ {idx: 20, exp: eq, fn: eq_65534_int32},
+ {idx: 20, exp: ne, fn: ne_65534_int32},
+ {idx: 21, exp: lt, fn: lt_65535_int32},
+ {idx: 21, exp: le, fn: le_65535_int32},
+ {idx: 21, exp: gt, fn: gt_65535_int32},
+ {idx: 21, exp: ge, fn: ge_65535_int32},
+ {idx: 21, exp: eq, fn: eq_65535_int32},
+ {idx: 21, exp: ne, fn: ne_65535_int32},
+ {idx: 22, exp: lt, fn: lt_65536_int32},
+ {idx: 22, exp: le, fn: le_65536_int32},
+ {idx: 22, exp: gt, fn: gt_65536_int32},
+ {idx: 22, exp: ge, fn: ge_65536_int32},
+ {idx: 22, exp: eq, fn: eq_65536_int32},
+ {idx: 22, exp: ne, fn: ne_65536_int32},
+ {idx: 23, exp: lt, fn: lt_2147483646_int32},
+ {idx: 23, exp: le, fn: le_2147483646_int32},
+ {idx: 23, exp: gt, fn: gt_2147483646_int32},
+ {idx: 23, exp: ge, fn: ge_2147483646_int32},
+ {idx: 23, exp: eq, fn: eq_2147483646_int32},
+ {idx: 23, exp: ne, fn: ne_2147483646_int32},
+ {idx: 24, exp: lt, fn: lt_2147483647_int32},
+ {idx: 24, exp: le, fn: le_2147483647_int32},
+ {idx: 24, exp: gt, fn: gt_2147483647_int32},
+ {idx: 24, exp: ge, fn: ge_2147483647_int32},
+ {idx: 24, exp: eq, fn: eq_2147483647_int32},
+ {idx: 24, exp: ne, fn: ne_2147483647_int32},
+}
+
+// int16 tests
+var int16_vals = []int16{
+ -32768,
+ -32767,
+ -129,
+ -128,
+ -127,
+ -1,
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+}
+
+func lt_neg32768_int16(x int16) bool { return x < -32768 }
+func le_neg32768_int16(x int16) bool { return x <= -32768 }
+func gt_neg32768_int16(x int16) bool { return x > -32768 }
+func ge_neg32768_int16(x int16) bool { return x >= -32768 }
+func eq_neg32768_int16(x int16) bool { return x == -32768 }
+func ne_neg32768_int16(x int16) bool { return x != -32768 }
+func lt_neg32767_int16(x int16) bool { return x < -32767 }
+func le_neg32767_int16(x int16) bool { return x <= -32767 }
+func gt_neg32767_int16(x int16) bool { return x > -32767 }
+func ge_neg32767_int16(x int16) bool { return x >= -32767 }
+func eq_neg32767_int16(x int16) bool { return x == -32767 }
+func ne_neg32767_int16(x int16) bool { return x != -32767 }
+func lt_neg129_int16(x int16) bool { return x < -129 }
+func le_neg129_int16(x int16) bool { return x <= -129 }
+func gt_neg129_int16(x int16) bool { return x > -129 }
+func ge_neg129_int16(x int16) bool { return x >= -129 }
+func eq_neg129_int16(x int16) bool { return x == -129 }
+func ne_neg129_int16(x int16) bool { return x != -129 }
+func lt_neg128_int16(x int16) bool { return x < -128 }
+func le_neg128_int16(x int16) bool { return x <= -128 }
+func gt_neg128_int16(x int16) bool { return x > -128 }
+func ge_neg128_int16(x int16) bool { return x >= -128 }
+func eq_neg128_int16(x int16) bool { return x == -128 }
+func ne_neg128_int16(x int16) bool { return x != -128 }
+func lt_neg127_int16(x int16) bool { return x < -127 }
+func le_neg127_int16(x int16) bool { return x <= -127 }
+func gt_neg127_int16(x int16) bool { return x > -127 }
+func ge_neg127_int16(x int16) bool { return x >= -127 }
+func eq_neg127_int16(x int16) bool { return x == -127 }
+func ne_neg127_int16(x int16) bool { return x != -127 }
+func lt_neg1_int16(x int16) bool { return x < -1 }
+func le_neg1_int16(x int16) bool { return x <= -1 }
+func gt_neg1_int16(x int16) bool { return x > -1 }
+func ge_neg1_int16(x int16) bool { return x >= -1 }
+func eq_neg1_int16(x int16) bool { return x == -1 }
+func ne_neg1_int16(x int16) bool { return x != -1 }
+func lt_0_int16(x int16) bool { return x < 0 }
+func le_0_int16(x int16) bool { return x <= 0 }
+func gt_0_int16(x int16) bool { return x > 0 }
+func ge_0_int16(x int16) bool { return x >= 0 }
+func eq_0_int16(x int16) bool { return x == 0 }
+func ne_0_int16(x int16) bool { return x != 0 }
+func lt_1_int16(x int16) bool { return x < 1 }
+func le_1_int16(x int16) bool { return x <= 1 }
+func gt_1_int16(x int16) bool { return x > 1 }
+func ge_1_int16(x int16) bool { return x >= 1 }
+func eq_1_int16(x int16) bool { return x == 1 }
+func ne_1_int16(x int16) bool { return x != 1 }
+func lt_126_int16(x int16) bool { return x < 126 }
+func le_126_int16(x int16) bool { return x <= 126 }
+func gt_126_int16(x int16) bool { return x > 126 }
+func ge_126_int16(x int16) bool { return x >= 126 }
+func eq_126_int16(x int16) bool { return x == 126 }
+func ne_126_int16(x int16) bool { return x != 126 }
+func lt_127_int16(x int16) bool { return x < 127 }
+func le_127_int16(x int16) bool { return x <= 127 }
+func gt_127_int16(x int16) bool { return x > 127 }
+func ge_127_int16(x int16) bool { return x >= 127 }
+func eq_127_int16(x int16) bool { return x == 127 }
+func ne_127_int16(x int16) bool { return x != 127 }
+func lt_128_int16(x int16) bool { return x < 128 }
+func le_128_int16(x int16) bool { return x <= 128 }
+func gt_128_int16(x int16) bool { return x > 128 }
+func ge_128_int16(x int16) bool { return x >= 128 }
+func eq_128_int16(x int16) bool { return x == 128 }
+func ne_128_int16(x int16) bool { return x != 128 }
+func lt_254_int16(x int16) bool { return x < 254 }
+func le_254_int16(x int16) bool { return x <= 254 }
+func gt_254_int16(x int16) bool { return x > 254 }
+func ge_254_int16(x int16) bool { return x >= 254 }
+func eq_254_int16(x int16) bool { return x == 254 }
+func ne_254_int16(x int16) bool { return x != 254 }
+func lt_255_int16(x int16) bool { return x < 255 }
+func le_255_int16(x int16) bool { return x <= 255 }
+func gt_255_int16(x int16) bool { return x > 255 }
+func ge_255_int16(x int16) bool { return x >= 255 }
+func eq_255_int16(x int16) bool { return x == 255 }
+func ne_255_int16(x int16) bool { return x != 255 }
+func lt_256_int16(x int16) bool { return x < 256 }
+func le_256_int16(x int16) bool { return x <= 256 }
+func gt_256_int16(x int16) bool { return x > 256 }
+func ge_256_int16(x int16) bool { return x >= 256 }
+func eq_256_int16(x int16) bool { return x == 256 }
+func ne_256_int16(x int16) bool { return x != 256 }
+func lt_32766_int16(x int16) bool { return x < 32766 }
+func le_32766_int16(x int16) bool { return x <= 32766 }
+func gt_32766_int16(x int16) bool { return x > 32766 }
+func ge_32766_int16(x int16) bool { return x >= 32766 }
+func eq_32766_int16(x int16) bool { return x == 32766 }
+func ne_32766_int16(x int16) bool { return x != 32766 }
+func lt_32767_int16(x int16) bool { return x < 32767 }
+func le_32767_int16(x int16) bool { return x <= 32767 }
+func gt_32767_int16(x int16) bool { return x > 32767 }
+func ge_32767_int16(x int16) bool { return x >= 32767 }
+func eq_32767_int16(x int16) bool { return x == 32767 }
+func ne_32767_int16(x int16) bool { return x != 32767 }
+
+var int16_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(int16) bool
+}{
+ {idx: 0, exp: lt, fn: lt_neg32768_int16},
+ {idx: 0, exp: le, fn: le_neg32768_int16},
+ {idx: 0, exp: gt, fn: gt_neg32768_int16},
+ {idx: 0, exp: ge, fn: ge_neg32768_int16},
+ {idx: 0, exp: eq, fn: eq_neg32768_int16},
+ {idx: 0, exp: ne, fn: ne_neg32768_int16},
+ {idx: 1, exp: lt, fn: lt_neg32767_int16},
+ {idx: 1, exp: le, fn: le_neg32767_int16},
+ {idx: 1, exp: gt, fn: gt_neg32767_int16},
+ {idx: 1, exp: ge, fn: ge_neg32767_int16},
+ {idx: 1, exp: eq, fn: eq_neg32767_int16},
+ {idx: 1, exp: ne, fn: ne_neg32767_int16},
+ {idx: 2, exp: lt, fn: lt_neg129_int16},
+ {idx: 2, exp: le, fn: le_neg129_int16},
+ {idx: 2, exp: gt, fn: gt_neg129_int16},
+ {idx: 2, exp: ge, fn: ge_neg129_int16},
+ {idx: 2, exp: eq, fn: eq_neg129_int16},
+ {idx: 2, exp: ne, fn: ne_neg129_int16},
+ {idx: 3, exp: lt, fn: lt_neg128_int16},
+ {idx: 3, exp: le, fn: le_neg128_int16},
+ {idx: 3, exp: gt, fn: gt_neg128_int16},
+ {idx: 3, exp: ge, fn: ge_neg128_int16},
+ {idx: 3, exp: eq, fn: eq_neg128_int16},
+ {idx: 3, exp: ne, fn: ne_neg128_int16},
+ {idx: 4, exp: lt, fn: lt_neg127_int16},
+ {idx: 4, exp: le, fn: le_neg127_int16},
+ {idx: 4, exp: gt, fn: gt_neg127_int16},
+ {idx: 4, exp: ge, fn: ge_neg127_int16},
+ {idx: 4, exp: eq, fn: eq_neg127_int16},
+ {idx: 4, exp: ne, fn: ne_neg127_int16},
+ {idx: 5, exp: lt, fn: lt_neg1_int16},
+ {idx: 5, exp: le, fn: le_neg1_int16},
+ {idx: 5, exp: gt, fn: gt_neg1_int16},
+ {idx: 5, exp: ge, fn: ge_neg1_int16},
+ {idx: 5, exp: eq, fn: eq_neg1_int16},
+ {idx: 5, exp: ne, fn: ne_neg1_int16},
+ {idx: 6, exp: lt, fn: lt_0_int16},
+ {idx: 6, exp: le, fn: le_0_int16},
+ {idx: 6, exp: gt, fn: gt_0_int16},
+ {idx: 6, exp: ge, fn: ge_0_int16},
+ {idx: 6, exp: eq, fn: eq_0_int16},
+ {idx: 6, exp: ne, fn: ne_0_int16},
+ {idx: 7, exp: lt, fn: lt_1_int16},
+ {idx: 7, exp: le, fn: le_1_int16},
+ {idx: 7, exp: gt, fn: gt_1_int16},
+ {idx: 7, exp: ge, fn: ge_1_int16},
+ {idx: 7, exp: eq, fn: eq_1_int16},
+ {idx: 7, exp: ne, fn: ne_1_int16},
+ {idx: 8, exp: lt, fn: lt_126_int16},
+ {idx: 8, exp: le, fn: le_126_int16},
+ {idx: 8, exp: gt, fn: gt_126_int16},
+ {idx: 8, exp: ge, fn: ge_126_int16},
+ {idx: 8, exp: eq, fn: eq_126_int16},
+ {idx: 8, exp: ne, fn: ne_126_int16},
+ {idx: 9, exp: lt, fn: lt_127_int16},
+ {idx: 9, exp: le, fn: le_127_int16},
+ {idx: 9, exp: gt, fn: gt_127_int16},
+ {idx: 9, exp: ge, fn: ge_127_int16},
+ {idx: 9, exp: eq, fn: eq_127_int16},
+ {idx: 9, exp: ne, fn: ne_127_int16},
+ {idx: 10, exp: lt, fn: lt_128_int16},
+ {idx: 10, exp: le, fn: le_128_int16},
+ {idx: 10, exp: gt, fn: gt_128_int16},
+ {idx: 10, exp: ge, fn: ge_128_int16},
+ {idx: 10, exp: eq, fn: eq_128_int16},
+ {idx: 10, exp: ne, fn: ne_128_int16},
+ {idx: 11, exp: lt, fn: lt_254_int16},
+ {idx: 11, exp: le, fn: le_254_int16},
+ {idx: 11, exp: gt, fn: gt_254_int16},
+ {idx: 11, exp: ge, fn: ge_254_int16},
+ {idx: 11, exp: eq, fn: eq_254_int16},
+ {idx: 11, exp: ne, fn: ne_254_int16},
+ {idx: 12, exp: lt, fn: lt_255_int16},
+ {idx: 12, exp: le, fn: le_255_int16},
+ {idx: 12, exp: gt, fn: gt_255_int16},
+ {idx: 12, exp: ge, fn: ge_255_int16},
+ {idx: 12, exp: eq, fn: eq_255_int16},
+ {idx: 12, exp: ne, fn: ne_255_int16},
+ {idx: 13, exp: lt, fn: lt_256_int16},
+ {idx: 13, exp: le, fn: le_256_int16},
+ {idx: 13, exp: gt, fn: gt_256_int16},
+ {idx: 13, exp: ge, fn: ge_256_int16},
+ {idx: 13, exp: eq, fn: eq_256_int16},
+ {idx: 13, exp: ne, fn: ne_256_int16},
+ {idx: 14, exp: lt, fn: lt_32766_int16},
+ {idx: 14, exp: le, fn: le_32766_int16},
+ {idx: 14, exp: gt, fn: gt_32766_int16},
+ {idx: 14, exp: ge, fn: ge_32766_int16},
+ {idx: 14, exp: eq, fn: eq_32766_int16},
+ {idx: 14, exp: ne, fn: ne_32766_int16},
+ {idx: 15, exp: lt, fn: lt_32767_int16},
+ {idx: 15, exp: le, fn: le_32767_int16},
+ {idx: 15, exp: gt, fn: gt_32767_int16},
+ {idx: 15, exp: ge, fn: ge_32767_int16},
+ {idx: 15, exp: eq, fn: eq_32767_int16},
+ {idx: 15, exp: ne, fn: ne_32767_int16},
+}
+
+// int8 tests
+var int8_vals = []int8{
+ -128,
+ -127,
+ -1,
+ 0,
+ 1,
+ 126,
+ 127,
+}
+
+func lt_neg128_int8(x int8) bool { return x < -128 }
+func le_neg128_int8(x int8) bool { return x <= -128 }
+func gt_neg128_int8(x int8) bool { return x > -128 }
+func ge_neg128_int8(x int8) bool { return x >= -128 }
+func eq_neg128_int8(x int8) bool { return x == -128 }
+func ne_neg128_int8(x int8) bool { return x != -128 }
+func lt_neg127_int8(x int8) bool { return x < -127 }
+func le_neg127_int8(x int8) bool { return x <= -127 }
+func gt_neg127_int8(x int8) bool { return x > -127 }
+func ge_neg127_int8(x int8) bool { return x >= -127 }
+func eq_neg127_int8(x int8) bool { return x == -127 }
+func ne_neg127_int8(x int8) bool { return x != -127 }
+func lt_neg1_int8(x int8) bool { return x < -1 }
+func le_neg1_int8(x int8) bool { return x <= -1 }
+func gt_neg1_int8(x int8) bool { return x > -1 }
+func ge_neg1_int8(x int8) bool { return x >= -1 }
+func eq_neg1_int8(x int8) bool { return x == -1 }
+func ne_neg1_int8(x int8) bool { return x != -1 }
+func lt_0_int8(x int8) bool { return x < 0 }
+func le_0_int8(x int8) bool { return x <= 0 }
+func gt_0_int8(x int8) bool { return x > 0 }
+func ge_0_int8(x int8) bool { return x >= 0 }
+func eq_0_int8(x int8) bool { return x == 0 }
+func ne_0_int8(x int8) bool { return x != 0 }
+func lt_1_int8(x int8) bool { return x < 1 }
+func le_1_int8(x int8) bool { return x <= 1 }
+func gt_1_int8(x int8) bool { return x > 1 }
+func ge_1_int8(x int8) bool { return x >= 1 }
+func eq_1_int8(x int8) bool { return x == 1 }
+func ne_1_int8(x int8) bool { return x != 1 }
+func lt_126_int8(x int8) bool { return x < 126 }
+func le_126_int8(x int8) bool { return x <= 126 }
+func gt_126_int8(x int8) bool { return x > 126 }
+func ge_126_int8(x int8) bool { return x >= 126 }
+func eq_126_int8(x int8) bool { return x == 126 }
+func ne_126_int8(x int8) bool { return x != 126 }
+func lt_127_int8(x int8) bool { return x < 127 }
+func le_127_int8(x int8) bool { return x <= 127 }
+func gt_127_int8(x int8) bool { return x > 127 }
+func ge_127_int8(x int8) bool { return x >= 127 }
+func eq_127_int8(x int8) bool { return x == 127 }
+func ne_127_int8(x int8) bool { return x != 127 }
+
+var int8_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(int8) bool
+}{
+ {idx: 0, exp: lt, fn: lt_neg128_int8},
+ {idx: 0, exp: le, fn: le_neg128_int8},
+ {idx: 0, exp: gt, fn: gt_neg128_int8},
+ {idx: 0, exp: ge, fn: ge_neg128_int8},
+ {idx: 0, exp: eq, fn: eq_neg128_int8},
+ {idx: 0, exp: ne, fn: ne_neg128_int8},
+ {idx: 1, exp: lt, fn: lt_neg127_int8},
+ {idx: 1, exp: le, fn: le_neg127_int8},
+ {idx: 1, exp: gt, fn: gt_neg127_int8},
+ {idx: 1, exp: ge, fn: ge_neg127_int8},
+ {idx: 1, exp: eq, fn: eq_neg127_int8},
+ {idx: 1, exp: ne, fn: ne_neg127_int8},
+ {idx: 2, exp: lt, fn: lt_neg1_int8},
+ {idx: 2, exp: le, fn: le_neg1_int8},
+ {idx: 2, exp: gt, fn: gt_neg1_int8},
+ {idx: 2, exp: ge, fn: ge_neg1_int8},
+ {idx: 2, exp: eq, fn: eq_neg1_int8},
+ {idx: 2, exp: ne, fn: ne_neg1_int8},
+ {idx: 3, exp: lt, fn: lt_0_int8},
+ {idx: 3, exp: le, fn: le_0_int8},
+ {idx: 3, exp: gt, fn: gt_0_int8},
+ {idx: 3, exp: ge, fn: ge_0_int8},
+ {idx: 3, exp: eq, fn: eq_0_int8},
+ {idx: 3, exp: ne, fn: ne_0_int8},
+ {idx: 4, exp: lt, fn: lt_1_int8},
+ {idx: 4, exp: le, fn: le_1_int8},
+ {idx: 4, exp: gt, fn: gt_1_int8},
+ {idx: 4, exp: ge, fn: ge_1_int8},
+ {idx: 4, exp: eq, fn: eq_1_int8},
+ {idx: 4, exp: ne, fn: ne_1_int8},
+ {idx: 5, exp: lt, fn: lt_126_int8},
+ {idx: 5, exp: le, fn: le_126_int8},
+ {idx: 5, exp: gt, fn: gt_126_int8},
+ {idx: 5, exp: ge, fn: ge_126_int8},
+ {idx: 5, exp: eq, fn: eq_126_int8},
+ {idx: 5, exp: ne, fn: ne_126_int8},
+ {idx: 6, exp: lt, fn: lt_127_int8},
+ {idx: 6, exp: le, fn: le_127_int8},
+ {idx: 6, exp: gt, fn: gt_127_int8},
+ {idx: 6, exp: ge, fn: ge_127_int8},
+ {idx: 6, exp: eq, fn: eq_127_int8},
+ {idx: 6, exp: ne, fn: ne_127_int8},
+}
+
+// TestComparisonsConst tests results for comparison operations against constants.
+func TestComparisonsConst(t *testing.T) {
+ for i, test := range uint64_tests {
+ for j, x := range uint64_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=uint64 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range uint32_tests {
+ for j, x := range uint32_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=uint32 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range uint16_tests {
+ for j, x := range uint16_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=uint16 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range uint8_tests {
+ for j, x := range uint8_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=uint8 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range int64_tests {
+ for j, x := range int64_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=int64 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range int32_tests {
+ for j, x := range int32_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=int32 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range int16_tests {
+ for j, x := range int16_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=int16 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range int8_tests {
+ for j, x := range int8_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=int8 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/cmp_test.go b/src/cmd/compile/internal/test/testdata/cmp_test.go
new file mode 100644
index 0000000..06b58f2
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/cmp_test.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// cmp_ssa.go tests compare simplification operations.
+package main
+
+import "testing"
+
+//go:noinline
+func eq_ssa(a int64) bool {
+ return 4+a == 10
+}
+
+//go:noinline
+func neq_ssa(a int64) bool {
+ return 10 != a+4
+}
+
+func testCmp(t *testing.T) {
+ if wanted, got := true, eq_ssa(6); wanted != got {
+ t.Errorf("eq_ssa: expected %v, got %v\n", wanted, got)
+ }
+ if wanted, got := false, eq_ssa(7); wanted != got {
+ t.Errorf("eq_ssa: expected %v, got %v\n", wanted, got)
+ }
+ if wanted, got := false, neq_ssa(6); wanted != got {
+ t.Errorf("neq_ssa: expected %v, got %v\n", wanted, got)
+ }
+ if wanted, got := true, neq_ssa(7); wanted != got {
+ t.Errorf("neq_ssa: expected %v, got %v\n", wanted, got)
+ }
+}
+
+func TestCmp(t *testing.T) {
+ testCmp(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/compound_test.go b/src/cmd/compile/internal/test/testdata/compound_test.go
new file mode 100644
index 0000000..4ae464d
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/compound_test.go
@@ -0,0 +1,128 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test compound objects
+
+package main
+
+import (
+ "testing"
+)
+
+func string_ssa(a, b string, x bool) string {
+ s := ""
+ if x {
+ s = a
+ } else {
+ s = b
+ }
+ return s
+}
+
+func testString(t *testing.T) {
+ a := "foo"
+ b := "barz"
+ if want, got := a, string_ssa(a, b, true); got != want {
+ t.Errorf("string_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+ if want, got := b, string_ssa(a, b, false); got != want {
+ t.Errorf("string_ssa(%v, %v, false) = %v, want %v\n", a, b, got, want)
+ }
+}
+
+//go:noinline
+func complex64_ssa(a, b complex64, x bool) complex64 {
+ var c complex64
+ if x {
+ c = a
+ } else {
+ c = b
+ }
+ return c
+}
+
+//go:noinline
+func complex128_ssa(a, b complex128, x bool) complex128 {
+ var c complex128
+ if x {
+ c = a
+ } else {
+ c = b
+ }
+ return c
+}
+
+func testComplex64(t *testing.T) {
+ var a complex64 = 1 + 2i
+ var b complex64 = 3 + 4i
+
+ if want, got := a, complex64_ssa(a, b, true); got != want {
+ t.Errorf("complex64_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+ if want, got := b, complex64_ssa(a, b, false); got != want {
+ t.Errorf("complex64_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+}
+
+func testComplex128(t *testing.T) {
+ var a complex128 = 1 + 2i
+ var b complex128 = 3 + 4i
+
+ if want, got := a, complex128_ssa(a, b, true); got != want {
+ t.Errorf("complex128_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+ if want, got := b, complex128_ssa(a, b, false); got != want {
+ t.Errorf("complex128_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+}
+
+func slice_ssa(a, b []byte, x bool) []byte {
+ var s []byte
+ if x {
+ s = a
+ } else {
+ s = b
+ }
+ return s
+}
+
+func testSlice(t *testing.T) {
+ a := []byte{3, 4, 5}
+ b := []byte{7, 8, 9}
+ if want, got := byte(3), slice_ssa(a, b, true)[0]; got != want {
+ t.Errorf("slice_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+ if want, got := byte(7), slice_ssa(a, b, false)[0]; got != want {
+ t.Errorf("slice_ssa(%v, %v, false) = %v, want %v\n", a, b, got, want)
+ }
+}
+
+func interface_ssa(a, b interface{}, x bool) interface{} {
+ var s interface{}
+ if x {
+ s = a
+ } else {
+ s = b
+ }
+ return s
+}
+
+func testInterface(t *testing.T) {
+ a := interface{}(3)
+ b := interface{}(4)
+ if want, got := 3, interface_ssa(a, b, true).(int); got != want {
+ t.Errorf("interface_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+ if want, got := 4, interface_ssa(a, b, false).(int); got != want {
+ t.Errorf("interface_ssa(%v, %v, false) = %v, want %v\n", a, b, got, want)
+ }
+}
+
+func TestCompound(t *testing.T) {
+ testString(t)
+ testSlice(t)
+ testInterface(t)
+ testComplex64(t)
+ testComplex128(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/copy_test.go b/src/cmd/compile/internal/test/testdata/copy_test.go
new file mode 100644
index 0000000..c29611d
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/copy_test.go
@@ -0,0 +1,760 @@
+// Code generated by gen/copyGen.go. DO NOT EDIT.
+
+package main
+
+import "testing"
+
+type T1 struct {
+ pre [8]byte
+ mid [1]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1copy_ssa(y, x *[1]byte) {
+ *y = *x
+}
+func testCopy1(t *testing.T) {
+ a := T1{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1]byte{0}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1]byte{100}
+ t1copy_ssa(&a.mid, &x)
+ want := T1{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1]byte{100}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T2 struct {
+ pre [8]byte
+ mid [2]byte
+ post [8]byte
+}
+
+//go:noinline
+func t2copy_ssa(y, x *[2]byte) {
+ *y = *x
+}
+func testCopy2(t *testing.T) {
+ a := T2{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [2]byte{0, 1}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [2]byte{100, 101}
+ t2copy_ssa(&a.mid, &x)
+ want := T2{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [2]byte{100, 101}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t2copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T3 struct {
+ pre [8]byte
+ mid [3]byte
+ post [8]byte
+}
+
+//go:noinline
+func t3copy_ssa(y, x *[3]byte) {
+ *y = *x
+}
+func testCopy3(t *testing.T) {
+ a := T3{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [3]byte{0, 1, 2}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [3]byte{100, 101, 102}
+ t3copy_ssa(&a.mid, &x)
+ want := T3{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [3]byte{100, 101, 102}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t3copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T4 struct {
+ pre [8]byte
+ mid [4]byte
+ post [8]byte
+}
+
+//go:noinline
+func t4copy_ssa(y, x *[4]byte) {
+ *y = *x
+}
+func testCopy4(t *testing.T) {
+ a := T4{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [4]byte{0, 1, 2, 3}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [4]byte{100, 101, 102, 103}
+ t4copy_ssa(&a.mid, &x)
+ want := T4{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [4]byte{100, 101, 102, 103}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t4copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T5 struct {
+ pre [8]byte
+ mid [5]byte
+ post [8]byte
+}
+
+//go:noinline
+func t5copy_ssa(y, x *[5]byte) {
+ *y = *x
+}
+func testCopy5(t *testing.T) {
+ a := T5{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [5]byte{0, 1, 2, 3, 4}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [5]byte{100, 101, 102, 103, 104}
+ t5copy_ssa(&a.mid, &x)
+ want := T5{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [5]byte{100, 101, 102, 103, 104}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t5copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T6 struct {
+ pre [8]byte
+ mid [6]byte
+ post [8]byte
+}
+
+//go:noinline
+func t6copy_ssa(y, x *[6]byte) {
+ *y = *x
+}
+func testCopy6(t *testing.T) {
+ a := T6{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [6]byte{0, 1, 2, 3, 4, 5}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [6]byte{100, 101, 102, 103, 104, 105}
+ t6copy_ssa(&a.mid, &x)
+ want := T6{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [6]byte{100, 101, 102, 103, 104, 105}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t6copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T7 struct {
+ pre [8]byte
+ mid [7]byte
+ post [8]byte
+}
+
+//go:noinline
+func t7copy_ssa(y, x *[7]byte) {
+ *y = *x
+}
+func testCopy7(t *testing.T) {
+ a := T7{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [7]byte{0, 1, 2, 3, 4, 5, 6}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [7]byte{100, 101, 102, 103, 104, 105, 106}
+ t7copy_ssa(&a.mid, &x)
+ want := T7{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [7]byte{100, 101, 102, 103, 104, 105, 106}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t7copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T8 struct {
+ pre [8]byte
+ mid [8]byte
+ post [8]byte
+}
+
+//go:noinline
+func t8copy_ssa(y, x *[8]byte) {
+ *y = *x
+}
+func testCopy8(t *testing.T) {
+ a := T8{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [8]byte{0, 1, 2, 3, 4, 5, 6, 7}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [8]byte{100, 101, 102, 103, 104, 105, 106, 107}
+ t8copy_ssa(&a.mid, &x)
+ want := T8{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [8]byte{100, 101, 102, 103, 104, 105, 106, 107}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t8copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T9 struct {
+ pre [8]byte
+ mid [9]byte
+ post [8]byte
+}
+
+//go:noinline
+func t9copy_ssa(y, x *[9]byte) {
+ *y = *x
+}
+func testCopy9(t *testing.T) {
+ a := T9{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [9]byte{0, 1, 2, 3, 4, 5, 6, 7, 8}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [9]byte{100, 101, 102, 103, 104, 105, 106, 107, 108}
+ t9copy_ssa(&a.mid, &x)
+ want := T9{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [9]byte{100, 101, 102, 103, 104, 105, 106, 107, 108}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t9copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T10 struct {
+ pre [8]byte
+ mid [10]byte
+ post [8]byte
+}
+
+//go:noinline
+func t10copy_ssa(y, x *[10]byte) {
+ *y = *x
+}
+func testCopy10(t *testing.T) {
+ a := T10{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [10]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [10]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109}
+ t10copy_ssa(&a.mid, &x)
+ want := T10{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [10]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t10copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T15 struct {
+ pre [8]byte
+ mid [15]byte
+ post [8]byte
+}
+
+//go:noinline
+func t15copy_ssa(y, x *[15]byte) {
+ *y = *x
+}
+func testCopy15(t *testing.T) {
+ a := T15{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [15]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [15]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114}
+ t15copy_ssa(&a.mid, &x)
+ want := T15{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [15]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t15copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T16 struct {
+ pre [8]byte
+ mid [16]byte
+ post [8]byte
+}
+
+//go:noinline
+func t16copy_ssa(y, x *[16]byte) {
+ *y = *x
+}
+func testCopy16(t *testing.T) {
+ a := T16{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [16]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115}
+ t16copy_ssa(&a.mid, &x)
+ want := T16{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [16]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t16copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T17 struct {
+ pre [8]byte
+ mid [17]byte
+ post [8]byte
+}
+
+//go:noinline
+func t17copy_ssa(y, x *[17]byte) {
+ *y = *x
+}
+func testCopy17(t *testing.T) {
+ a := T17{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [17]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [17]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116}
+ t17copy_ssa(&a.mid, &x)
+ want := T17{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [17]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t17copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T23 struct {
+ pre [8]byte
+ mid [23]byte
+ post [8]byte
+}
+
+//go:noinline
+func t23copy_ssa(y, x *[23]byte) {
+ *y = *x
+}
+func testCopy23(t *testing.T) {
+ a := T23{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [23]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [23]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122}
+ t23copy_ssa(&a.mid, &x)
+ want := T23{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [23]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t23copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T24 struct {
+ pre [8]byte
+ mid [24]byte
+ post [8]byte
+}
+
+//go:noinline
+func t24copy_ssa(y, x *[24]byte) {
+ *y = *x
+}
+func testCopy24(t *testing.T) {
+ a := T24{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [24]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [24]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123}
+ t24copy_ssa(&a.mid, &x)
+ want := T24{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [24]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t24copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T25 struct {
+ pre [8]byte
+ mid [25]byte
+ post [8]byte
+}
+
+//go:noinline
+func t25copy_ssa(y, x *[25]byte) {
+ *y = *x
+}
+func testCopy25(t *testing.T) {
+ a := T25{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [25]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [25]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124}
+ t25copy_ssa(&a.mid, &x)
+ want := T25{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [25]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t25copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T31 struct {
+ pre [8]byte
+ mid [31]byte
+ post [8]byte
+}
+
+//go:noinline
+func t31copy_ssa(y, x *[31]byte) {
+ *y = *x
+}
+func testCopy31(t *testing.T) {
+ a := T31{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [31]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [31]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130}
+ t31copy_ssa(&a.mid, &x)
+ want := T31{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [31]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t31copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T32 struct {
+ pre [8]byte
+ mid [32]byte
+ post [8]byte
+}
+
+//go:noinline
+func t32copy_ssa(y, x *[32]byte) {
+ *y = *x
+}
+func testCopy32(t *testing.T) {
+ a := T32{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [32]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [32]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}
+ t32copy_ssa(&a.mid, &x)
+ want := T32{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [32]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t32copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T33 struct {
+ pre [8]byte
+ mid [33]byte
+ post [8]byte
+}
+
+//go:noinline
+func t33copy_ssa(y, x *[33]byte) {
+ *y = *x
+}
+func testCopy33(t *testing.T) {
+ a := T33{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [33]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [33]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132}
+ t33copy_ssa(&a.mid, &x)
+ want := T33{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [33]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t33copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T63 struct {
+ pre [8]byte
+ mid [63]byte
+ post [8]byte
+}
+
+//go:noinline
+func t63copy_ssa(y, x *[63]byte) {
+ *y = *x
+}
+func testCopy63(t *testing.T) {
+ a := T63{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [63]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [63]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162}
+ t63copy_ssa(&a.mid, &x)
+ want := T63{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [63]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t63copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T64 struct {
+ pre [8]byte
+ mid [64]byte
+ post [8]byte
+}
+
+//go:noinline
+func t64copy_ssa(y, x *[64]byte) {
+ *y = *x
+}
+func testCopy64(t *testing.T) {
+ a := T64{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [64]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [64]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163}
+ t64copy_ssa(&a.mid, &x)
+ want := T64{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [64]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t64copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T65 struct {
+ pre [8]byte
+ mid [65]byte
+ post [8]byte
+}
+
+//go:noinline
+func t65copy_ssa(y, x *[65]byte) {
+ *y = *x
+}
+func testCopy65(t *testing.T) {
+ a := T65{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [65]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [65]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164}
+ t65copy_ssa(&a.mid, &x)
+ want := T65{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [65]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t65copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1023 struct {
+ pre [8]byte
+ mid [1023]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1023copy_ssa(y, x *[1023]byte) {
+ *y = *x
+}
+func testCopy1023(t *testing.T) {
+ a := T1023{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1023]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1023]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122}
+ t1023copy_ssa(&a.mid, &x)
+ want := T1023{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1023]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1023copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1024 struct {
+ pre [8]byte
+ mid [1024]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1024copy_ssa(y, x *[1024]byte) {
+ *y = *x
+}
+func testCopy1024(t *testing.T) {
+ a := T1024{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1024]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1024]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123}
+ t1024copy_ssa(&a.mid, &x)
+ want := T1024{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1024]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1024copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1025 struct {
+ pre [8]byte
+ mid [1025]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1025copy_ssa(y, x *[1025]byte) {
+ *y = *x
+}
+func testCopy1025(t *testing.T) {
+ a := T1025{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1025]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1025]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124}
+ t1025copy_ssa(&a.mid, &x)
+ want := T1025{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1025]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1025copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1031 struct {
+ pre [8]byte
+ mid [1031]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1031copy_ssa(y, x *[1031]byte) {
+ *y = *x
+}
+func testCopy1031(t *testing.T) {
+ a := T1031{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1031]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1031]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130}
+ t1031copy_ssa(&a.mid, &x)
+ want := T1031{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1031]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1031copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1032 struct {
+ pre [8]byte
+ mid [1032]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1032copy_ssa(y, x *[1032]byte) {
+ *y = *x
+}
+func testCopy1032(t *testing.T) {
+ a := T1032{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1032]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1032]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}
+ t1032copy_ssa(&a.mid, &x)
+ want := T1032{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1032]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1032copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1033 struct {
+ pre [8]byte
+ mid [1033]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1033copy_ssa(y, x *[1033]byte) {
+ *y = *x
+}
+func testCopy1033(t *testing.T) {
+ a := T1033{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1033]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1033]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132}
+ t1033copy_ssa(&a.mid, &x)
+ want := T1033{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1033]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1033copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1039 struct {
+ pre [8]byte
+ mid [1039]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1039copy_ssa(y, x *[1039]byte) {
+ *y = *x
+}
+func testCopy1039(t *testing.T) {
+ a := T1039{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1039]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1039]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138}
+ t1039copy_ssa(&a.mid, &x)
+ want := T1039{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1039]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1039copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1040 struct {
+ pre [8]byte
+ mid [1040]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1040copy_ssa(y, x *[1040]byte) {
+ *y = *x
+}
+func testCopy1040(t *testing.T) {
+ a := T1040{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1040]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1040]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139}
+ t1040copy_ssa(&a.mid, &x)
+ want := T1040{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1040]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1040copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1041 struct {
+ pre [8]byte
+ mid [1041]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1041copy_ssa(y, x *[1041]byte) {
+ *y = *x
+}
+func testCopy1041(t *testing.T) {
+ a := T1041{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1041]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1041]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140}
+ t1041copy_ssa(&a.mid, &x)
+ want := T1041{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1041]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1041copy got=%v, want %v\n", a, want)
+ }
+}
+
+//go:noinline
+func tu2copy_ssa(docopy bool, data [2]byte, x *[2]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy2(t *testing.T) {
+ var a [2]byte
+ t2 := [2]byte{2, 3}
+ tu2copy_ssa(true, t2, &a)
+ want2 := [2]byte{2, 3}
+ if a != want2 {
+ t.Errorf("tu2copy got=%v, want %v\n", a, want2)
+ }
+}
+
+//go:noinline
+func tu3copy_ssa(docopy bool, data [3]byte, x *[3]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy3(t *testing.T) {
+ var a [3]byte
+ t3 := [3]byte{3, 4, 5}
+ tu3copy_ssa(true, t3, &a)
+ want3 := [3]byte{3, 4, 5}
+ if a != want3 {
+ t.Errorf("tu3copy got=%v, want %v\n", a, want3)
+ }
+}
+
+//go:noinline
+func tu4copy_ssa(docopy bool, data [4]byte, x *[4]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy4(t *testing.T) {
+ var a [4]byte
+ t4 := [4]byte{4, 5, 6, 7}
+ tu4copy_ssa(true, t4, &a)
+ want4 := [4]byte{4, 5, 6, 7}
+ if a != want4 {
+ t.Errorf("tu4copy got=%v, want %v\n", a, want4)
+ }
+}
+
+//go:noinline
+func tu5copy_ssa(docopy bool, data [5]byte, x *[5]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy5(t *testing.T) {
+ var a [5]byte
+ t5 := [5]byte{5, 6, 7, 8, 9}
+ tu5copy_ssa(true, t5, &a)
+ want5 := [5]byte{5, 6, 7, 8, 9}
+ if a != want5 {
+ t.Errorf("tu5copy got=%v, want %v\n", a, want5)
+ }
+}
+
+//go:noinline
+func tu6copy_ssa(docopy bool, data [6]byte, x *[6]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy6(t *testing.T) {
+ var a [6]byte
+ t6 := [6]byte{6, 7, 8, 9, 10, 11}
+ tu6copy_ssa(true, t6, &a)
+ want6 := [6]byte{6, 7, 8, 9, 10, 11}
+ if a != want6 {
+ t.Errorf("tu6copy got=%v, want %v\n", a, want6)
+ }
+}
+
+//go:noinline
+func tu7copy_ssa(docopy bool, data [7]byte, x *[7]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy7(t *testing.T) {
+ var a [7]byte
+ t7 := [7]byte{7, 8, 9, 10, 11, 12, 13}
+ tu7copy_ssa(true, t7, &a)
+ want7 := [7]byte{7, 8, 9, 10, 11, 12, 13}
+ if a != want7 {
+ t.Errorf("tu7copy got=%v, want %v\n", a, want7)
+ }
+}
+func TestCopy(t *testing.T) {
+ testCopy1(t)
+ testCopy2(t)
+ testCopy3(t)
+ testCopy4(t)
+ testCopy5(t)
+ testCopy6(t)
+ testCopy7(t)
+ testCopy8(t)
+ testCopy9(t)
+ testCopy10(t)
+ testCopy15(t)
+ testCopy16(t)
+ testCopy17(t)
+ testCopy23(t)
+ testCopy24(t)
+ testCopy25(t)
+ testCopy31(t)
+ testCopy32(t)
+ testCopy33(t)
+ testCopy63(t)
+ testCopy64(t)
+ testCopy65(t)
+ testCopy1023(t)
+ testCopy1024(t)
+ testCopy1025(t)
+ testCopy1031(t)
+ testCopy1032(t)
+ testCopy1033(t)
+ testCopy1039(t)
+ testCopy1040(t)
+ testCopy1041(t)
+ testUnalignedCopy2(t)
+ testUnalignedCopy3(t)
+ testUnalignedCopy4(t)
+ testUnalignedCopy5(t)
+ testUnalignedCopy6(t)
+ testUnalignedCopy7(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/ctl_test.go b/src/cmd/compile/internal/test/testdata/ctl_test.go
new file mode 100644
index 0000000..501f79e
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/ctl_test.go
@@ -0,0 +1,148 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test control flow
+
+package main
+
+import "testing"
+
+// nor_ssa calculates NOR(a, b).
+// It is implemented in a way that generates
+// phi control values.
+func nor_ssa(a, b bool) bool {
+ var c bool
+ if a {
+ c = true
+ }
+ if b {
+ c = true
+ }
+ if c {
+ return false
+ }
+ return true
+}
+
+func testPhiControl(t *testing.T) {
+ tests := [...][3]bool{ // a, b, want
+ {false, false, true},
+ {true, false, false},
+ {false, true, false},
+ {true, true, false},
+ }
+ for _, test := range tests {
+ a, b := test[0], test[1]
+ got := nor_ssa(a, b)
+ want := test[2]
+ if want != got {
+ t.Errorf("nor(%t, %t)=%t got %t", a, b, want, got)
+ }
+ }
+}
+
+func emptyRange_ssa(b []byte) bool {
+ for _, x := range b {
+ _ = x
+ }
+ return true
+}
+
+func testEmptyRange(t *testing.T) {
+ if !emptyRange_ssa([]byte{}) {
+ t.Errorf("emptyRange_ssa([]byte{})=false, want true")
+ }
+}
+
+func switch_ssa(a int) int {
+ ret := 0
+ switch a {
+ case 5:
+ ret += 5
+ case 4:
+ ret += 4
+ case 3:
+ ret += 3
+ case 2:
+ ret += 2
+ case 1:
+ ret += 1
+ }
+ return ret
+}
+
+func fallthrough_ssa(a int) int {
+ ret := 0
+ switch a {
+ case 5:
+ ret++
+ fallthrough
+ case 4:
+ ret++
+ fallthrough
+ case 3:
+ ret++
+ fallthrough
+ case 2:
+ ret++
+ fallthrough
+ case 1:
+ ret++
+ }
+ return ret
+}
+
+func testFallthrough(t *testing.T) {
+ for i := 0; i < 6; i++ {
+ if got := fallthrough_ssa(i); got != i {
+ t.Errorf("fallthrough_ssa(i) = %d, wanted %d", got, i)
+ }
+ }
+}
+
+func testSwitch(t *testing.T) {
+ for i := 0; i < 6; i++ {
+ if got := switch_ssa(i); got != i {
+ t.Errorf("switch_ssa(i) = %d, wanted %d", got, i)
+ }
+ }
+}
+
+type junk struct {
+ step int
+}
+
+// flagOverwrite_ssa is intended to reproduce an issue seen where a XOR
+// was scheduled between a compare and branch, clearing flags.
+//
+//go:noinline
+func flagOverwrite_ssa(s *junk, c int) int {
+ if '0' <= c && c <= '9' {
+ s.step = 0
+ return 1
+ }
+ if c == 'e' || c == 'E' {
+ s.step = 0
+ return 2
+ }
+ s.step = 0
+ return 3
+}
+
+func testFlagOverwrite(t *testing.T) {
+ j := junk{}
+ if got := flagOverwrite_ssa(&j, ' '); got != 3 {
+ t.Errorf("flagOverwrite_ssa = %d, wanted 3", got)
+ }
+}
+
+func TestCtl(t *testing.T) {
+ testPhiControl(t)
+ testEmptyRange(t)
+
+ testSwitch(t)
+ testFallthrough(t)
+
+ testFlagOverwrite(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/deferNoReturn_test.go b/src/cmd/compile/internal/test/testdata/deferNoReturn_test.go
new file mode 100644
index 0000000..308e897
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/deferNoReturn_test.go
@@ -0,0 +1,21 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that a defer in a function with no return
+// statement will compile correctly.
+
+package main
+
+import "testing"
+
+func deferNoReturn_ssa() {
+ defer func() { println("returned") }()
+ for {
+ println("loop")
+ }
+}
+
+func TestDeferNoReturn(t *testing.T) {
+ // This is a compile-time test, no runtime testing required.
+}
diff --git a/src/cmd/compile/internal/test/testdata/divbyzero_test.go b/src/cmd/compile/internal/test/testdata/divbyzero_test.go
new file mode 100644
index 0000000..ee848b3
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/divbyzero_test.go
@@ -0,0 +1,48 @@
+package main
+
+import (
+ "runtime"
+ "testing"
+)
+
+func checkDivByZero(f func()) (divByZero bool) {
+ defer func() {
+ if r := recover(); r != nil {
+ if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: integer divide by zero" {
+ divByZero = true
+ }
+ }
+ }()
+ f()
+ return false
+}
+
+//go:noinline
+func div_a(i uint, s []int) int {
+ return s[i%uint(len(s))]
+}
+
+//go:noinline
+func div_b(i uint, j uint) uint {
+ return i / j
+}
+
+//go:noinline
+func div_c(i int) int {
+ return 7 / (i - i)
+}
+
+func TestDivByZero(t *testing.T) {
+ if got := checkDivByZero(func() { div_b(7, 0) }); !got {
+ t.Errorf("expected div by zero for b(7, 0), got no error\n")
+ }
+ if got := checkDivByZero(func() { div_b(7, 7) }); got {
+ t.Errorf("expected no error for b(7, 7), got div by zero\n")
+ }
+ if got := checkDivByZero(func() { div_a(4, nil) }); !got {
+ t.Errorf("expected div by zero for a(4, nil), got no error\n")
+ }
+ if got := checkDivByZero(func() { div_c(5) }); !got {
+ t.Errorf("expected div by zero for c(5), got no error\n")
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/dupLoad_test.go b/src/cmd/compile/internal/test/testdata/dupLoad_test.go
new file mode 100644
index 0000000..d859123
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/dupLoad_test.go
@@ -0,0 +1,83 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This test makes sure that we don't split a single
+// load up into two separate loads.
+
+package main
+
+import "testing"
+
+//go:noinline
+func read1(b []byte) (uint16, uint16) {
+ // There is only a single read of b[0]. The two
+ // returned values must have the same low byte.
+ v := b[0]
+ return uint16(v), uint16(v) | uint16(b[1])<<8
+}
+
+func main1(t *testing.T) {
+ const N = 100000
+ done := make(chan bool, 2)
+ b := make([]byte, 2)
+ go func() {
+ for i := 0; i < N; i++ {
+ b[0] = byte(i)
+ b[1] = byte(i)
+ }
+ done <- true
+ }()
+ go func() {
+ for i := 0; i < N; i++ {
+ x, y := read1(b)
+ if byte(x) != byte(y) {
+ t.Errorf("x=%x y=%x\n", x, y)
+ done <- false
+ return
+ }
+ }
+ done <- true
+ }()
+ <-done
+ <-done
+}
+
+//go:noinline
+func read2(b []byte) (uint16, uint16) {
+ // There is only a single read of b[1]. The two
+ // returned values must have the same high byte.
+ v := uint16(b[1]) << 8
+ return v, uint16(b[0]) | v
+}
+
+func main2(t *testing.T) {
+ const N = 100000
+ done := make(chan bool, 2)
+ b := make([]byte, 2)
+ go func() {
+ for i := 0; i < N; i++ {
+ b[0] = byte(i)
+ b[1] = byte(i)
+ }
+ done <- true
+ }()
+ go func() {
+ for i := 0; i < N; i++ {
+ x, y := read2(b)
+ if x&0xff00 != y&0xff00 {
+ t.Errorf("x=%x y=%x\n", x, y)
+ done <- false
+ return
+ }
+ }
+ done <- true
+ }()
+ <-done
+ <-done
+}
+
+func TestDupLoad(t *testing.T) {
+ main1(t)
+ main2(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/flowgraph_generator1.go b/src/cmd/compile/internal/test/testdata/flowgraph_generator1.go
new file mode 100644
index 0000000..ad22601
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/flowgraph_generator1.go
@@ -0,0 +1,315 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "strings"
+)
+
+// make fake flow graph.
+
+// The blocks of the flow graph are designated with letters A
+// through Z, always including A (start block) and Z (exit
+// block) The specification of a flow graph is a comma-
+// separated list of block successor words, for blocks ordered
+// A, B, C etc, where each block except Z has one or two
+// successors, and any block except A can be a target. Within
+// the generated code, each block with two successors includes
+// a conditional testing x & 1 != 0 (x is the input parameter
+// to the generated function) and also unconditionally shifts x
+// right by one, so that different inputs generate different
+// execution paths, including loops. Every block inverts a
+// global binary to ensure it is not empty. For a flow graph
+// with J words (J+1 blocks), a J-1 bit serial number specifies
+// which blocks (not including A and Z) include an increment of
+// the return variable y by increasing powers of 10, and a
+// different version of the test function is created for each
+// of the 2-to-the-(J-1) serial numbers.
+
+// For each generated function a compact summary is also
+// created so that the generated function can be simulated
+// with a simple interpreter to sanity check the behavior of
+// the compiled code.
+
+// For example:
+
+// func BC_CD_BE_BZ_CZ101(x int64) int64 {
+// y := int64(0)
+// var b int64
+// _ = b
+// b = x & 1
+// x = x >> 1
+// if b != 0 {
+// goto C
+// }
+// goto B
+// B:
+// glob_ = !glob_
+// y += 1
+// b = x & 1
+// x = x >> 1
+// if b != 0 {
+// goto D
+// }
+// goto C
+// C:
+// glob_ = !glob_
+// // no y increment
+// b = x & 1
+// x = x >> 1
+// if b != 0 {
+// goto E
+// }
+// goto B
+// D:
+// glob_ = !glob_
+// y += 10
+// b = x & 1
+// x = x >> 1
+// if b != 0 {
+// goto Z
+// }
+// goto B
+// E:
+// glob_ = !glob_
+// // no y increment
+// b = x & 1
+// x = x >> 1
+// if b != 0 {
+// goto Z
+// }
+// goto C
+// Z:
+// return y
+// }
+
+// {f:BC_CD_BE_BZ_CZ101,
+// maxin:32, blocks:[]blo{
+// blo{inc:0, cond:true, succs:[2]int64{1, 2}},
+// blo{inc:1, cond:true, succs:[2]int64{2, 3}},
+// blo{inc:0, cond:true, succs:[2]int64{1, 4}},
+// blo{inc:10, cond:true, succs:[2]int64{1, 25}},
+// blo{inc:0, cond:true, succs:[2]int64{2, 25}},}},
+
+var labels string = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+func blocks(spec string) (blocks []string, fnameBase string) {
+ spec = strings.ToUpper(spec)
+ blocks = strings.Split(spec, ",")
+ fnameBase = strings.Replace(spec, ",", "_", -1)
+ return
+}
+
+func makeFunctionFromFlowGraph(blocks []blo, fname string) string {
+ s := ""
+
+ for j := range blocks {
+ // begin block
+ if j == 0 {
+ // block A, implicit label
+ s += `
+func ` + fname + `(x int64) int64 {
+ y := int64(0)
+ var b int64
+ _ = b`
+ } else {
+ // block B,C, etc, explicit label w/ conditional increment
+ l := labels[j : j+1]
+ yeq := `
+ // no y increment`
+ if blocks[j].inc != 0 {
+ yeq = `
+ y += ` + fmt.Sprintf("%d", blocks[j].inc)
+ }
+
+ s += `
+` + l + `:
+ glob = !glob` + yeq
+ }
+
+ // edges to successors
+ if blocks[j].cond { // conditionally branch to second successor
+ s += `
+ b = x & 1
+ x = x >> 1
+ if b != 0 {` + `
+ goto ` + string(labels[blocks[j].succs[1]]) + `
+ }`
+
+ }
+ // branch to first successor
+ s += `
+ goto ` + string(labels[blocks[j].succs[0]])
+ }
+
+ // end block (Z)
+ s += `
+Z:
+ return y
+}
+`
+ return s
+}
+
+var graphs []string = []string{
+ "Z", "BZ,Z", "B,BZ", "BZ,BZ",
+ "ZB,Z", "B,ZB", "ZB,BZ", "ZB,ZB",
+
+ "BC,C,Z", "BC,BC,Z", "BC,BC,BZ",
+ "BC,Z,Z", "BC,ZC,Z", "BC,ZC,BZ",
+ "BZ,C,Z", "BZ,BC,Z", "BZ,CZ,Z",
+ "BZ,C,BZ", "BZ,BC,BZ", "BZ,CZ,BZ",
+ "BZ,C,CZ", "BZ,BC,CZ", "BZ,CZ,CZ",
+
+ "BC,CD,BE,BZ,CZ",
+ "BC,BD,CE,CZ,BZ",
+ "BC,BD,CE,FZ,GZ,F,G",
+ "BC,BD,CE,FZ,GZ,G,F",
+
+ "BC,DE,BE,FZ,FZ,Z",
+ "BC,DE,BE,FZ,ZF,Z",
+ "BC,DE,BE,ZF,FZ,Z",
+ "BC,DE,EB,FZ,FZ,Z",
+ "BC,ED,BE,FZ,FZ,Z",
+ "CB,DE,BE,FZ,FZ,Z",
+
+ "CB,ED,BE,FZ,FZ,Z",
+ "BC,ED,EB,FZ,ZF,Z",
+ "CB,DE,EB,ZF,FZ,Z",
+ "CB,ED,EB,FZ,FZ,Z",
+
+ "BZ,CD,CD,CE,BZ",
+ "EC,DF,FG,ZC,GB,BE,FD",
+ "BH,CF,DG,HE,BF,CG,DH,BZ",
+}
+
+// blo describes a block in the generated/interpreted code
+type blo struct {
+ inc int64 // increment amount
+ cond bool // block ends in conditional
+ succs [2]int64
+}
+
+// strings2blocks converts a slice of strings specifying
+// successors into a slice of blo encoding the blocks in a
+// common form easy to execute or interpret.
+func strings2blocks(blocks []string, fname string, i int) (bs []blo, cond uint) {
+ bs = make([]blo, len(blocks))
+ edge := int64(1)
+ cond = 0
+ k := uint(0)
+ for j, s := range blocks {
+ if j == 0 {
+ } else {
+ if (i>>k)&1 != 0 {
+ bs[j].inc = edge
+ edge *= 10
+ }
+ k++
+ }
+ if len(s) > 1 {
+ bs[j].succs[1] = int64(blocks[j][1] - 'A')
+ bs[j].cond = true
+ cond++
+ }
+ bs[j].succs[0] = int64(blocks[j][0] - 'A')
+ }
+ return bs, cond
+}
+
+// fmtBlocks writes out the blocks for consumption in the generated test
+func fmtBlocks(bs []blo) string {
+ s := "[]blo{"
+ for _, b := range bs {
+ s += fmt.Sprintf("blo{inc:%d, cond:%v, succs:[2]int64{%d, %d}},", b.inc, b.cond, b.succs[0], b.succs[1])
+ }
+ s += "}"
+ return s
+}
+
+func main() {
+ fmt.Printf(`// This is a machine-generated test file from flowgraph_generator1.go.
+package main
+import "fmt"
+var glob bool
+`)
+ s := "var funs []fun = []fun{"
+ for _, g := range graphs {
+ split, fnameBase := blocks(g)
+ nconfigs := 1 << uint(len(split)-1)
+
+ for i := 0; i < nconfigs; i++ {
+ fname := fnameBase + fmt.Sprintf("%b", i)
+ bs, k := strings2blocks(split, fname, i)
+ fmt.Printf("%s", makeFunctionFromFlowGraph(bs, fname))
+ s += `
+ {f:` + fname + `, maxin:` + fmt.Sprintf("%d", 1<<k) + `, blocks:` + fmtBlocks(bs) + `},`
+ }
+
+ }
+ s += `}
+`
+ // write types for name+array tables.
+ fmt.Printf("%s",
+ `
+type blo struct {
+ inc int64
+ cond bool
+ succs [2]int64
+}
+type fun struct {
+ f func(int64) int64
+ maxin int64
+ blocks []blo
+}
+`)
+ // write table of function names and blo arrays.
+ fmt.Printf("%s", s)
+
+ // write interpreter and main/test
+ fmt.Printf("%s", `
+func interpret(blocks []blo, x int64) (int64, bool) {
+ y := int64(0)
+ last := int64(25) // 'Z'-'A'
+ j := int64(0)
+ for i := 0; i < 4*len(blocks); i++ {
+ b := blocks[j]
+ y += b.inc
+ next := b.succs[0]
+ if b.cond {
+ c := x&1 != 0
+ x = x>>1
+ if c {
+ next = b.succs[1]
+ }
+ }
+ if next == last {
+ return y, true
+ }
+ j = next
+ }
+ return -1, false
+}
+
+func main() {
+ sum := int64(0)
+ for i, f := range funs {
+ for x := int64(0); x < 16*f.maxin; x++ {
+ y, ok := interpret(f.blocks, x)
+ if ok {
+ yy := f.f(x)
+ if y != yy {
+ fmt.Printf("y(%d) != yy(%d), x=%b, i=%d, blocks=%v\n", y, yy, x, i, f.blocks)
+ return
+ }
+ sum += y
+ }
+ }
+ }
+// fmt.Printf("Sum of all returns over all terminating inputs is %d\n", sum)
+}
+`)
+}
diff --git a/src/cmd/compile/internal/test/testdata/fp_test.go b/src/cmd/compile/internal/test/testdata/fp_test.go
new file mode 100644
index 0000000..b96ce84
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/fp_test.go
@@ -0,0 +1,1775 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests floating point arithmetic expressions
+
+package main
+
+import (
+ "fmt"
+ "testing"
+)
+
+// manysub_ssa is designed to tickle bugs that depend on register
+// pressure or unfriendly operand ordering in registers (and at
+// least once it succeeded in this).
+//
+//go:noinline
+func manysub_ssa(a, b, c, d float64) (aa, ab, ac, ad, ba, bb, bc, bd, ca, cb, cc, cd, da, db, dc, dd float64) {
+ aa = a + 11.0 - a
+ ab = a - b
+ ac = a - c
+ ad = a - d
+ ba = b - a
+ bb = b + 22.0 - b
+ bc = b - c
+ bd = b - d
+ ca = c - a
+ cb = c - b
+ cc = c + 33.0 - c
+ cd = c - d
+ da = d - a
+ db = d - b
+ dc = d - c
+ dd = d + 44.0 - d
+ return
+}
+
+// fpspill_ssa attempts to trigger a bug where phis with floating point values
+// were stored in non-fp registers causing an error in doasm.
+//
+//go:noinline
+func fpspill_ssa(a int) float64 {
+
+ ret := -1.0
+ switch a {
+ case 0:
+ ret = 1.0
+ case 1:
+ ret = 1.1
+ case 2:
+ ret = 1.2
+ case 3:
+ ret = 1.3
+ case 4:
+ ret = 1.4
+ case 5:
+ ret = 1.5
+ case 6:
+ ret = 1.6
+ case 7:
+ ret = 1.7
+ case 8:
+ ret = 1.8
+ case 9:
+ ret = 1.9
+ case 10:
+ ret = 1.10
+ case 11:
+ ret = 1.11
+ case 12:
+ ret = 1.12
+ case 13:
+ ret = 1.13
+ case 14:
+ ret = 1.14
+ case 15:
+ ret = 1.15
+ case 16:
+ ret = 1.16
+ }
+ return ret
+}
+
+//go:noinline
+func add64_ssa(a, b float64) float64 {
+ return a + b
+}
+
+//go:noinline
+func mul64_ssa(a, b float64) float64 {
+ return a * b
+}
+
+//go:noinline
+func sub64_ssa(a, b float64) float64 {
+ return a - b
+}
+
+//go:noinline
+func div64_ssa(a, b float64) float64 {
+ return a / b
+}
+
+//go:noinline
+func neg64_ssa(a, b float64) float64 {
+ return -a + -1*b
+}
+
+//go:noinline
+func add32_ssa(a, b float32) float32 {
+ return a + b
+}
+
+//go:noinline
+func mul32_ssa(a, b float32) float32 {
+ return a * b
+}
+
+//go:noinline
+func sub32_ssa(a, b float32) float32 {
+ return a - b
+}
+
+//go:noinline
+func div32_ssa(a, b float32) float32 {
+ return a / b
+}
+
+//go:noinline
+func neg32_ssa(a, b float32) float32 {
+ return -a + -1*b
+}
+
+//go:noinline
+func conv2Float64_ssa(a int8, b uint8, c int16, d uint16,
+ e int32, f uint32, g int64, h uint64, i float32) (aa, bb, cc, dd, ee, ff, gg, hh, ii float64) {
+ aa = float64(a)
+ bb = float64(b)
+ cc = float64(c)
+ hh = float64(h)
+ dd = float64(d)
+ ee = float64(e)
+ ff = float64(f)
+ gg = float64(g)
+ ii = float64(i)
+ return
+}
+
+//go:noinline
+func conv2Float32_ssa(a int8, b uint8, c int16, d uint16,
+ e int32, f uint32, g int64, h uint64, i float64) (aa, bb, cc, dd, ee, ff, gg, hh, ii float32) {
+ aa = float32(a)
+ bb = float32(b)
+ cc = float32(c)
+ dd = float32(d)
+ ee = float32(e)
+ ff = float32(f)
+ gg = float32(g)
+ hh = float32(h)
+ ii = float32(i)
+ return
+}
+
+func integer2floatConversions(t *testing.T) {
+ {
+ a, b, c, d, e, f, g, h, i := conv2Float64_ssa(0, 0, 0, 0, 0, 0, 0, 0, 0)
+ expectAll64(t, "zero64", 0, a, b, c, d, e, f, g, h, i)
+ }
+ {
+ a, b, c, d, e, f, g, h, i := conv2Float64_ssa(1, 1, 1, 1, 1, 1, 1, 1, 1)
+ expectAll64(t, "one64", 1, a, b, c, d, e, f, g, h, i)
+ }
+ {
+ a, b, c, d, e, f, g, h, i := conv2Float32_ssa(0, 0, 0, 0, 0, 0, 0, 0, 0)
+ expectAll32(t, "zero32", 0, a, b, c, d, e, f, g, h, i)
+ }
+ {
+ a, b, c, d, e, f, g, h, i := conv2Float32_ssa(1, 1, 1, 1, 1, 1, 1, 1, 1)
+ expectAll32(t, "one32", 1, a, b, c, d, e, f, g, h, i)
+ }
+ {
+ // Check maximum values
+ a, b, c, d, e, f, g, h, i := conv2Float64_ssa(127, 255, 32767, 65535, 0x7fffffff, 0xffffffff, 0x7fffFFFFffffFFFF, 0xffffFFFFffffFFFF, 3.402823e38)
+ expect64(t, "a", a, 127)
+ expect64(t, "b", b, 255)
+ expect64(t, "c", c, 32767)
+ expect64(t, "d", d, 65535)
+ expect64(t, "e", e, float64(int32(0x7fffffff)))
+ expect64(t, "f", f, float64(uint32(0xffffffff)))
+ expect64(t, "g", g, float64(int64(0x7fffffffffffffff)))
+ expect64(t, "h", h, float64(uint64(0xffffffffffffffff)))
+ expect64(t, "i", i, float64(float32(3.402823e38)))
+ }
+ {
+ // Check minimum values (and tweaks for unsigned)
+ a, b, c, d, e, f, g, h, i := conv2Float64_ssa(-128, 254, -32768, 65534, ^0x7fffffff, 0xfffffffe, ^0x7fffFFFFffffFFFF, 0xffffFFFFffffF401, 1.5e-45)
+ expect64(t, "a", a, -128)
+ expect64(t, "b", b, 254)
+ expect64(t, "c", c, -32768)
+ expect64(t, "d", d, 65534)
+ expect64(t, "e", e, float64(^int32(0x7fffffff)))
+ expect64(t, "f", f, float64(uint32(0xfffffffe)))
+ expect64(t, "g", g, float64(^int64(0x7fffffffffffffff)))
+ expect64(t, "h", h, float64(uint64(0xfffffffffffff401)))
+ expect64(t, "i", i, float64(float32(1.5e-45)))
+ }
+ {
+ // Check maximum values
+ a, b, c, d, e, f, g, h, i := conv2Float32_ssa(127, 255, 32767, 65535, 0x7fffffff, 0xffffffff, 0x7fffFFFFffffFFFF, 0xffffFFFFffffFFFF, 3.402823e38)
+ expect32(t, "a", a, 127)
+ expect32(t, "b", b, 255)
+ expect32(t, "c", c, 32767)
+ expect32(t, "d", d, 65535)
+ expect32(t, "e", e, float32(int32(0x7fffffff)))
+ expect32(t, "f", f, float32(uint32(0xffffffff)))
+ expect32(t, "g", g, float32(int64(0x7fffffffffffffff)))
+ expect32(t, "h", h, float32(uint64(0xffffffffffffffff)))
+ expect32(t, "i", i, float32(float64(3.402823e38)))
+ }
+ {
+ // Check minimum values (and tweaks for unsigned)
+ a, b, c, d, e, f, g, h, i := conv2Float32_ssa(-128, 254, -32768, 65534, ^0x7fffffff, 0xfffffffe, ^0x7fffFFFFffffFFFF, 0xffffFFFFffffF401, 1.5e-45)
+ expect32(t, "a", a, -128)
+ expect32(t, "b", b, 254)
+ expect32(t, "c", c, -32768)
+ expect32(t, "d", d, 65534)
+ expect32(t, "e", e, float32(^int32(0x7fffffff)))
+ expect32(t, "f", f, float32(uint32(0xfffffffe)))
+ expect32(t, "g", g, float32(^int64(0x7fffffffffffffff)))
+ expect32(t, "h", h, float32(uint64(0xfffffffffffff401)))
+ expect32(t, "i", i, float32(float64(1.5e-45)))
+ }
+}
+
+func multiplyAdd(t *testing.T) {
+ {
+ // Test that a multiply-accumulate operation with intermediate
+ // rounding forced by a float32() cast produces the expected
+ // result.
+ // Test cases generated experimentally on a system (s390x) that
+ // supports fused multiply-add instructions.
+ var tests = [...]struct{ x, y, z, res float32 }{
+ {0.6046603, 0.9405091, 0.6645601, 1.2332485}, // fused multiply-add result: 1.2332486
+ {0.67908466, 0.21855305, 0.20318687, 0.3516029}, // fused multiply-add result: 0.35160288
+ {0.29311424, 0.29708257, 0.752573, 0.8396522}, // fused multiply-add result: 0.8396521
+ {0.5305857, 0.2535405, 0.282081, 0.41660595}, // fused multiply-add result: 0.41660598
+ {0.29711226, 0.89436173, 0.097454615, 0.36318043}, // fused multiply-add result: 0.36318046
+ {0.6810783, 0.24151509, 0.31152245, 0.47601312}, // fused multiply-add result: 0.47601315
+ {0.73023146, 0.18292491, 0.4283571, 0.5619346}, // fused multiply-add result: 0.56193465
+ {0.89634174, 0.32208398, 0.7211478, 1.009845}, // fused multiply-add result: 1.0098451
+ {0.6280982, 0.12675293, 0.2813303, 0.36094356}, // fused multiply-add result: 0.3609436
+ {0.29400632, 0.75316125, 0.15096405, 0.3723982}, // fused multiply-add result: 0.37239823
+ }
+ check := func(s string, got, expected float32) {
+ if got != expected {
+ fmt.Printf("multiplyAdd: %s, expected %g, got %g\n", s, expected, got)
+ }
+ }
+ for _, t := range tests {
+ check(
+ fmt.Sprintf("float32(%v * %v) + %v", t.x, t.y, t.z),
+ func(x, y, z float32) float32 {
+ return float32(x*y) + z
+ }(t.x, t.y, t.z),
+ t.res)
+
+ check(
+ fmt.Sprintf("%v += float32(%v * %v)", t.z, t.x, t.y),
+ func(x, y, z float32) float32 {
+ z += float32(x * y)
+ return z
+ }(t.x, t.y, t.z),
+ t.res)
+ }
+ }
+ {
+ // Test that a multiply-accumulate operation with intermediate
+ // rounding forced by a float64() cast produces the expected
+ // result.
+ // Test cases generated experimentally on a system (s390x) that
+ // supports fused multiply-add instructions.
+ var tests = [...]struct{ x, y, z, res float64 }{
+ {0.4688898449024232, 0.28303415118044517, 0.29310185733681576, 0.42581369658590373}, // fused multiply-add result: 0.4258136965859037
+ {0.7886049150193449, 0.3618054804803169, 0.8805431227416171, 1.1658647029293308}, // fused multiply-add result: 1.1658647029293305
+ {0.7302314772948083, 0.18292491645390843, 0.4283570818068078, 0.5619346137829748}, // fused multiply-add result: 0.5619346137829747
+ {0.6908388315056789, 0.7109071952999951, 0.5637795958152644, 1.0549018919252924}, // fused multiply-add result: 1.0549018919252926
+ {0.4584424785756506, 0.6001655953233308, 0.02626515060968944, 0.3014065536855481}, // fused multiply-add result: 0.30140655368554814
+ {0.539210105890946, 0.9756748149873165, 0.7507630564795985, 1.2768567767840384}, // fused multiply-add result: 1.2768567767840386
+ {0.7830349733960021, 0.3932509992288867, 0.1304138461737918, 0.4383431318929343}, // fused multiply-add result: 0.43834313189293433
+ {0.6841751300974551, 0.6530402051353608, 0.524499759549865, 0.9712936268572192}, // fused multiply-add result: 0.9712936268572193
+ {0.3691117091643448, 0.826454125634742, 0.34768170859156955, 0.6527356034505334}, // fused multiply-add result: 0.6527356034505333
+ {0.16867966833433606, 0.33136826030698385, 0.8279280961505588, 0.8838231843956668}, // fused multiply-add result: 0.8838231843956669
+ }
+ check := func(s string, got, expected float64) {
+ if got != expected {
+ fmt.Printf("multiplyAdd: %s, expected %g, got %g\n", s, expected, got)
+ }
+ }
+ for _, t := range tests {
+ check(
+ fmt.Sprintf("float64(%v * %v) + %v", t.x, t.y, t.z),
+ func(x, y, z float64) float64 {
+ return float64(x*y) + z
+ }(t.x, t.y, t.z),
+ t.res)
+
+ check(
+ fmt.Sprintf("%v += float64(%v * %v)", t.z, t.x, t.y),
+ func(x, y, z float64) float64 {
+ z += float64(x * y)
+ return z
+ }(t.x, t.y, t.z),
+ t.res)
+ }
+ }
+ {
+ // Test that a multiply-accumulate operation with intermediate
+ // rounding forced by a complex128() cast produces the expected
+ // result.
+ // Test cases generated experimentally on a system (s390x) that
+ // supports fused multiply-add instructions.
+ var tests = [...]struct {
+ x, y float64
+ res complex128
+ }{
+ {0.6046602879796196, 0.9405090880450124, (2.754489951983871 + 3i)}, // fused multiply-add result: (2.7544899519838713 + 3i)
+ {0.09696951891448456, 0.30091186058528707, (0.5918204173287407 + 3i)}, // fused multiply-add result: (0.5918204173287408 + 3i)
+ {0.544155573000885, 0.27850762181610883, (1.910974340818764 + 3i)}, // fused multiply-add result: (1.9109743408187638 + 3i)
+ {0.9769168685862624, 0.07429099894984302, (3.0050416047086297 + 3i)}, // fused multiply-add result: (3.00504160470863 + 3i)
+ {0.9269868035744142, 0.9549454404167818, (3.735905851140024 + 3i)}, // fused multiply-add result: (3.7359058511400245 + 3i)
+ {0.7109071952999951, 0.5637795958152644, (2.69650118171525 + 3i)}, // fused multiply-add result: (2.6965011817152496 + 3i)
+ {0.7558235074915978, 0.40380328579570035, (2.671273808270494 + 3i)}, // fused multiply-add result: (2.6712738082704934 + 3i)
+ {0.13065111702897217, 0.9859647293402467, (1.3779180804271633 + 3i)}, // fused multiply-add result: (1.3779180804271631 + 3i)
+ {0.8963417453962161, 0.3220839705208817, (3.0111092067095298 + 3i)}, // fused multiply-add result: (3.01110920670953 + 3i)
+ {0.39998376285699544, 0.497868113342702, (1.697819401913688 + 3i)}, // fused multiply-add result: (1.6978194019136883 + 3i)
+ }
+ check := func(s string, got, expected complex128) {
+ if got != expected {
+ fmt.Printf("multiplyAdd: %s, expected %v, got %v\n", s, expected, got)
+ }
+ }
+ for _, t := range tests {
+ check(
+ fmt.Sprintf("complex128(complex(%v, 1)*3) + complex(%v, 0)", t.x, t.y),
+ func(x, y float64) complex128 {
+ return complex128(complex(x, 1)*3) + complex(y, 0)
+ }(t.x, t.y),
+ t.res)
+
+ check(
+ fmt.Sprintf("z := complex(%v, 1); z += complex128(complex(%v, 1) * 3)", t.y, t.x),
+ func(x, y float64) complex128 {
+ z := complex(y, 0)
+ z += complex128(complex(x, 1) * 3)
+ return z
+ }(t.x, t.y),
+ t.res)
+ }
+ }
+}
+
+const (
+ aa = 0x1000000000000000
+ ab = 0x100000000000000
+ ac = 0x10000000000000
+ ad = 0x1000000000000
+ ba = 0x100000000000
+ bb = 0x10000000000
+ bc = 0x1000000000
+ bd = 0x100000000
+ ca = 0x10000000
+ cb = 0x1000000
+ cc = 0x100000
+ cd = 0x10000
+ da = 0x1000
+ db = 0x100
+ dc = 0x10
+ dd = 0x1
+)
+
+//go:noinline
+func compares64_ssa(a, b, c, d float64) (lt, le, eq, ne, ge, gt uint64) {
+ if a < a {
+ lt += aa
+ }
+ if a < b {
+ lt += ab
+ }
+ if a < c {
+ lt += ac
+ }
+ if a < d {
+ lt += ad
+ }
+
+ if b < a {
+ lt += ba
+ }
+ if b < b {
+ lt += bb
+ }
+ if b < c {
+ lt += bc
+ }
+ if b < d {
+ lt += bd
+ }
+
+ if c < a {
+ lt += ca
+ }
+ if c < b {
+ lt += cb
+ }
+ if c < c {
+ lt += cc
+ }
+ if c < d {
+ lt += cd
+ }
+
+ if d < a {
+ lt += da
+ }
+ if d < b {
+ lt += db
+ }
+ if d < c {
+ lt += dc
+ }
+ if d < d {
+ lt += dd
+ }
+
+ if a <= a {
+ le += aa
+ }
+ if a <= b {
+ le += ab
+ }
+ if a <= c {
+ le += ac
+ }
+ if a <= d {
+ le += ad
+ }
+
+ if b <= a {
+ le += ba
+ }
+ if b <= b {
+ le += bb
+ }
+ if b <= c {
+ le += bc
+ }
+ if b <= d {
+ le += bd
+ }
+
+ if c <= a {
+ le += ca
+ }
+ if c <= b {
+ le += cb
+ }
+ if c <= c {
+ le += cc
+ }
+ if c <= d {
+ le += cd
+ }
+
+ if d <= a {
+ le += da
+ }
+ if d <= b {
+ le += db
+ }
+ if d <= c {
+ le += dc
+ }
+ if d <= d {
+ le += dd
+ }
+
+ if a == a {
+ eq += aa
+ }
+ if a == b {
+ eq += ab
+ }
+ if a == c {
+ eq += ac
+ }
+ if a == d {
+ eq += ad
+ }
+
+ if b == a {
+ eq += ba
+ }
+ if b == b {
+ eq += bb
+ }
+ if b == c {
+ eq += bc
+ }
+ if b == d {
+ eq += bd
+ }
+
+ if c == a {
+ eq += ca
+ }
+ if c == b {
+ eq += cb
+ }
+ if c == c {
+ eq += cc
+ }
+ if c == d {
+ eq += cd
+ }
+
+ if d == a {
+ eq += da
+ }
+ if d == b {
+ eq += db
+ }
+ if d == c {
+ eq += dc
+ }
+ if d == d {
+ eq += dd
+ }
+
+ if a != a {
+ ne += aa
+ }
+ if a != b {
+ ne += ab
+ }
+ if a != c {
+ ne += ac
+ }
+ if a != d {
+ ne += ad
+ }
+
+ if b != a {
+ ne += ba
+ }
+ if b != b {
+ ne += bb
+ }
+ if b != c {
+ ne += bc
+ }
+ if b != d {
+ ne += bd
+ }
+
+ if c != a {
+ ne += ca
+ }
+ if c != b {
+ ne += cb
+ }
+ if c != c {
+ ne += cc
+ }
+ if c != d {
+ ne += cd
+ }
+
+ if d != a {
+ ne += da
+ }
+ if d != b {
+ ne += db
+ }
+ if d != c {
+ ne += dc
+ }
+ if d != d {
+ ne += dd
+ }
+
+ if a >= a {
+ ge += aa
+ }
+ if a >= b {
+ ge += ab
+ }
+ if a >= c {
+ ge += ac
+ }
+ if a >= d {
+ ge += ad
+ }
+
+ if b >= a {
+ ge += ba
+ }
+ if b >= b {
+ ge += bb
+ }
+ if b >= c {
+ ge += bc
+ }
+ if b >= d {
+ ge += bd
+ }
+
+ if c >= a {
+ ge += ca
+ }
+ if c >= b {
+ ge += cb
+ }
+ if c >= c {
+ ge += cc
+ }
+ if c >= d {
+ ge += cd
+ }
+
+ if d >= a {
+ ge += da
+ }
+ if d >= b {
+ ge += db
+ }
+ if d >= c {
+ ge += dc
+ }
+ if d >= d {
+ ge += dd
+ }
+
+ if a > a {
+ gt += aa
+ }
+ if a > b {
+ gt += ab
+ }
+ if a > c {
+ gt += ac
+ }
+ if a > d {
+ gt += ad
+ }
+
+ if b > a {
+ gt += ba
+ }
+ if b > b {
+ gt += bb
+ }
+ if b > c {
+ gt += bc
+ }
+ if b > d {
+ gt += bd
+ }
+
+ if c > a {
+ gt += ca
+ }
+ if c > b {
+ gt += cb
+ }
+ if c > c {
+ gt += cc
+ }
+ if c > d {
+ gt += cd
+ }
+
+ if d > a {
+ gt += da
+ }
+ if d > b {
+ gt += db
+ }
+ if d > c {
+ gt += dc
+ }
+ if d > d {
+ gt += dd
+ }
+
+ return
+}
+
+//go:noinline
+func compares32_ssa(a, b, c, d float32) (lt, le, eq, ne, ge, gt uint64) {
+ if a < a {
+ lt += aa
+ }
+ if a < b {
+ lt += ab
+ }
+ if a < c {
+ lt += ac
+ }
+ if a < d {
+ lt += ad
+ }
+
+ if b < a {
+ lt += ba
+ }
+ if b < b {
+ lt += bb
+ }
+ if b < c {
+ lt += bc
+ }
+ if b < d {
+ lt += bd
+ }
+
+ if c < a {
+ lt += ca
+ }
+ if c < b {
+ lt += cb
+ }
+ if c < c {
+ lt += cc
+ }
+ if c < d {
+ lt += cd
+ }
+
+ if d < a {
+ lt += da
+ }
+ if d < b {
+ lt += db
+ }
+ if d < c {
+ lt += dc
+ }
+ if d < d {
+ lt += dd
+ }
+
+ if a <= a {
+ le += aa
+ }
+ if a <= b {
+ le += ab
+ }
+ if a <= c {
+ le += ac
+ }
+ if a <= d {
+ le += ad
+ }
+
+ if b <= a {
+ le += ba
+ }
+ if b <= b {
+ le += bb
+ }
+ if b <= c {
+ le += bc
+ }
+ if b <= d {
+ le += bd
+ }
+
+ if c <= a {
+ le += ca
+ }
+ if c <= b {
+ le += cb
+ }
+ if c <= c {
+ le += cc
+ }
+ if c <= d {
+ le += cd
+ }
+
+ if d <= a {
+ le += da
+ }
+ if d <= b {
+ le += db
+ }
+ if d <= c {
+ le += dc
+ }
+ if d <= d {
+ le += dd
+ }
+
+ if a == a {
+ eq += aa
+ }
+ if a == b {
+ eq += ab
+ }
+ if a == c {
+ eq += ac
+ }
+ if a == d {
+ eq += ad
+ }
+
+ if b == a {
+ eq += ba
+ }
+ if b == b {
+ eq += bb
+ }
+ if b == c {
+ eq += bc
+ }
+ if b == d {
+ eq += bd
+ }
+
+ if c == a {
+ eq += ca
+ }
+ if c == b {
+ eq += cb
+ }
+ if c == c {
+ eq += cc
+ }
+ if c == d {
+ eq += cd
+ }
+
+ if d == a {
+ eq += da
+ }
+ if d == b {
+ eq += db
+ }
+ if d == c {
+ eq += dc
+ }
+ if d == d {
+ eq += dd
+ }
+
+ if a != a {
+ ne += aa
+ }
+ if a != b {
+ ne += ab
+ }
+ if a != c {
+ ne += ac
+ }
+ if a != d {
+ ne += ad
+ }
+
+ if b != a {
+ ne += ba
+ }
+ if b != b {
+ ne += bb
+ }
+ if b != c {
+ ne += bc
+ }
+ if b != d {
+ ne += bd
+ }
+
+ if c != a {
+ ne += ca
+ }
+ if c != b {
+ ne += cb
+ }
+ if c != c {
+ ne += cc
+ }
+ if c != d {
+ ne += cd
+ }
+
+ if d != a {
+ ne += da
+ }
+ if d != b {
+ ne += db
+ }
+ if d != c {
+ ne += dc
+ }
+ if d != d {
+ ne += dd
+ }
+
+ if a >= a {
+ ge += aa
+ }
+ if a >= b {
+ ge += ab
+ }
+ if a >= c {
+ ge += ac
+ }
+ if a >= d {
+ ge += ad
+ }
+
+ if b >= a {
+ ge += ba
+ }
+ if b >= b {
+ ge += bb
+ }
+ if b >= c {
+ ge += bc
+ }
+ if b >= d {
+ ge += bd
+ }
+
+ if c >= a {
+ ge += ca
+ }
+ if c >= b {
+ ge += cb
+ }
+ if c >= c {
+ ge += cc
+ }
+ if c >= d {
+ ge += cd
+ }
+
+ if d >= a {
+ ge += da
+ }
+ if d >= b {
+ ge += db
+ }
+ if d >= c {
+ ge += dc
+ }
+ if d >= d {
+ ge += dd
+ }
+
+ if a > a {
+ gt += aa
+ }
+ if a > b {
+ gt += ab
+ }
+ if a > c {
+ gt += ac
+ }
+ if a > d {
+ gt += ad
+ }
+
+ if b > a {
+ gt += ba
+ }
+ if b > b {
+ gt += bb
+ }
+ if b > c {
+ gt += bc
+ }
+ if b > d {
+ gt += bd
+ }
+
+ if c > a {
+ gt += ca
+ }
+ if c > b {
+ gt += cb
+ }
+ if c > c {
+ gt += cc
+ }
+ if c > d {
+ gt += cd
+ }
+
+ if d > a {
+ gt += da
+ }
+ if d > b {
+ gt += db
+ }
+ if d > c {
+ gt += dc
+ }
+ if d > d {
+ gt += dd
+ }
+
+ return
+}
+
+//go:noinline
+func le64_ssa(x, y float64) bool {
+ return x <= y
+}
+
+//go:noinline
+func ge64_ssa(x, y float64) bool {
+ return x >= y
+}
+
+//go:noinline
+func lt64_ssa(x, y float64) bool {
+ return x < y
+}
+
+//go:noinline
+func gt64_ssa(x, y float64) bool {
+ return x > y
+}
+
+//go:noinline
+func eq64_ssa(x, y float64) bool {
+ return x == y
+}
+
+//go:noinline
+func ne64_ssa(x, y float64) bool {
+ return x != y
+}
+
+//go:noinline
+func eqbr64_ssa(x, y float64) float64 {
+ if x == y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func nebr64_ssa(x, y float64) float64 {
+ if x != y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func gebr64_ssa(x, y float64) float64 {
+ if x >= y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func lebr64_ssa(x, y float64) float64 {
+ if x <= y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func ltbr64_ssa(x, y float64) float64 {
+ if x < y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func gtbr64_ssa(x, y float64) float64 {
+ if x > y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func le32_ssa(x, y float32) bool {
+ return x <= y
+}
+
+//go:noinline
+func ge32_ssa(x, y float32) bool {
+ return x >= y
+}
+
+//go:noinline
+func lt32_ssa(x, y float32) bool {
+ return x < y
+}
+
+//go:noinline
+func gt32_ssa(x, y float32) bool {
+ return x > y
+}
+
+//go:noinline
+func eq32_ssa(x, y float32) bool {
+ return x == y
+}
+
+//go:noinline
+func ne32_ssa(x, y float32) bool {
+ return x != y
+}
+
+//go:noinline
+func eqbr32_ssa(x, y float32) float32 {
+ if x == y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func nebr32_ssa(x, y float32) float32 {
+ if x != y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func gebr32_ssa(x, y float32) float32 {
+ if x >= y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func lebr32_ssa(x, y float32) float32 {
+ if x <= y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func ltbr32_ssa(x, y float32) float32 {
+ if x < y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func gtbr32_ssa(x, y float32) float32 {
+ if x > y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func F32toU8_ssa(x float32) uint8 {
+ return uint8(x)
+}
+
+//go:noinline
+func F32toI8_ssa(x float32) int8 {
+ return int8(x)
+}
+
+//go:noinline
+func F32toU16_ssa(x float32) uint16 {
+ return uint16(x)
+}
+
+//go:noinline
+func F32toI16_ssa(x float32) int16 {
+ return int16(x)
+}
+
+//go:noinline
+func F32toU32_ssa(x float32) uint32 {
+ return uint32(x)
+}
+
+//go:noinline
+func F32toI32_ssa(x float32) int32 {
+ return int32(x)
+}
+
+//go:noinline
+func F32toU64_ssa(x float32) uint64 {
+ return uint64(x)
+}
+
+//go:noinline
+func F32toI64_ssa(x float32) int64 {
+ return int64(x)
+}
+
+//go:noinline
+func F64toU8_ssa(x float64) uint8 {
+ return uint8(x)
+}
+
+//go:noinline
+func F64toI8_ssa(x float64) int8 {
+ return int8(x)
+}
+
+//go:noinline
+func F64toU16_ssa(x float64) uint16 {
+ return uint16(x)
+}
+
+//go:noinline
+func F64toI16_ssa(x float64) int16 {
+ return int16(x)
+}
+
+//go:noinline
+func F64toU32_ssa(x float64) uint32 {
+ return uint32(x)
+}
+
+//go:noinline
+func F64toI32_ssa(x float64) int32 {
+ return int32(x)
+}
+
+//go:noinline
+func F64toU64_ssa(x float64) uint64 {
+ return uint64(x)
+}
+
+//go:noinline
+func F64toI64_ssa(x float64) int64 {
+ return int64(x)
+}
+
+func floatsToInts(t *testing.T, x float64, expected int64) {
+ y := float32(x)
+ expectInt64(t, "F64toI8", int64(F64toI8_ssa(x)), expected)
+ expectInt64(t, "F64toI16", int64(F64toI16_ssa(x)), expected)
+ expectInt64(t, "F64toI32", int64(F64toI32_ssa(x)), expected)
+ expectInt64(t, "F64toI64", int64(F64toI64_ssa(x)), expected)
+ expectInt64(t, "F32toI8", int64(F32toI8_ssa(y)), expected)
+ expectInt64(t, "F32toI16", int64(F32toI16_ssa(y)), expected)
+ expectInt64(t, "F32toI32", int64(F32toI32_ssa(y)), expected)
+ expectInt64(t, "F32toI64", int64(F32toI64_ssa(y)), expected)
+}
+
+func floatsToUints(t *testing.T, x float64, expected uint64) {
+ y := float32(x)
+ expectUint64(t, "F64toU8", uint64(F64toU8_ssa(x)), expected)
+ expectUint64(t, "F64toU16", uint64(F64toU16_ssa(x)), expected)
+ expectUint64(t, "F64toU32", uint64(F64toU32_ssa(x)), expected)
+ expectUint64(t, "F64toU64", uint64(F64toU64_ssa(x)), expected)
+ expectUint64(t, "F32toU8", uint64(F32toU8_ssa(y)), expected)
+ expectUint64(t, "F32toU16", uint64(F32toU16_ssa(y)), expected)
+ expectUint64(t, "F32toU32", uint64(F32toU32_ssa(y)), expected)
+ expectUint64(t, "F32toU64", uint64(F32toU64_ssa(y)), expected)
+}
+
+func floatingToIntegerConversionsTest(t *testing.T) {
+ floatsToInts(t, 0.0, 0)
+ floatsToInts(t, 0.5, 0)
+ floatsToInts(t, 0.9, 0)
+ floatsToInts(t, 1.0, 1)
+ floatsToInts(t, 1.5, 1)
+ floatsToInts(t, 127.0, 127)
+ floatsToInts(t, -1.0, -1)
+ floatsToInts(t, -128.0, -128)
+
+ floatsToUints(t, 0.0, 0)
+ floatsToUints(t, 1.0, 1)
+ floatsToUints(t, 255.0, 255)
+
+ for j := uint(0); j < 24; j++ {
+ // Avoid hard cases in the construction
+ // of the test inputs.
+ v := int64(1<<62) | int64(1<<(62-j))
+ w := uint64(v)
+ f := float32(v)
+ d := float64(v)
+ expectUint64(t, "2**62...", F32toU64_ssa(f), w)
+ expectUint64(t, "2**62...", F64toU64_ssa(d), w)
+ expectInt64(t, "2**62...", F32toI64_ssa(f), v)
+ expectInt64(t, "2**62...", F64toI64_ssa(d), v)
+ expectInt64(t, "2**62...", F32toI64_ssa(-f), -v)
+ expectInt64(t, "2**62...", F64toI64_ssa(-d), -v)
+ w += w
+ f += f
+ d += d
+ expectUint64(t, "2**63...", F32toU64_ssa(f), w)
+ expectUint64(t, "2**63...", F64toU64_ssa(d), w)
+ }
+
+ for j := uint(0); j < 16; j++ {
+ // Avoid hard cases in the construction
+ // of the test inputs.
+ v := int32(1<<30) | int32(1<<(30-j))
+ w := uint32(v)
+ f := float32(v)
+ d := float64(v)
+ expectUint32(t, "2**30...", F32toU32_ssa(f), w)
+ expectUint32(t, "2**30...", F64toU32_ssa(d), w)
+ expectInt32(t, "2**30...", F32toI32_ssa(f), v)
+ expectInt32(t, "2**30...", F64toI32_ssa(d), v)
+ expectInt32(t, "2**30...", F32toI32_ssa(-f), -v)
+ expectInt32(t, "2**30...", F64toI32_ssa(-d), -v)
+ w += w
+ f += f
+ d += d
+ expectUint32(t, "2**31...", F32toU32_ssa(f), w)
+ expectUint32(t, "2**31...", F64toU32_ssa(d), w)
+ }
+
+ for j := uint(0); j < 15; j++ {
+ // Avoid hard cases in the construction
+ // of the test inputs.
+ v := int16(1<<14) | int16(1<<(14-j))
+ w := uint16(v)
+ f := float32(v)
+ d := float64(v)
+ expectUint16(t, "2**14...", F32toU16_ssa(f), w)
+ expectUint16(t, "2**14...", F64toU16_ssa(d), w)
+ expectInt16(t, "2**14...", F32toI16_ssa(f), v)
+ expectInt16(t, "2**14...", F64toI16_ssa(d), v)
+ expectInt16(t, "2**14...", F32toI16_ssa(-f), -v)
+ expectInt16(t, "2**14...", F64toI16_ssa(-d), -v)
+ w += w
+ f += f
+ d += d
+ expectUint16(t, "2**15...", F32toU16_ssa(f), w)
+ expectUint16(t, "2**15...", F64toU16_ssa(d), w)
+ }
+
+ expectInt32(t, "-2147483648", F32toI32_ssa(-2147483648), -2147483648)
+
+ expectInt32(t, "-2147483648", F64toI32_ssa(-2147483648), -2147483648)
+ expectInt32(t, "-2147483647", F64toI32_ssa(-2147483647), -2147483647)
+ expectUint32(t, "4294967295", F64toU32_ssa(4294967295), 4294967295)
+
+ expectInt16(t, "-32768", F64toI16_ssa(-32768), -32768)
+ expectInt16(t, "-32768", F32toI16_ssa(-32768), -32768)
+
+ // NB more of a pain to do these for 32-bit because of lost bits in Float32 mantissa
+ expectInt16(t, "32767", F64toI16_ssa(32767), 32767)
+ expectInt16(t, "32767", F32toI16_ssa(32767), 32767)
+ expectUint16(t, "32767", F64toU16_ssa(32767), 32767)
+ expectUint16(t, "32767", F32toU16_ssa(32767), 32767)
+ expectUint16(t, "65535", F64toU16_ssa(65535), 65535)
+ expectUint16(t, "65535", F32toU16_ssa(65535), 65535)
+}
+
+func fail64(s string, f func(a, b float64) float64, a, b, e float64) {
+ d := f(a, b)
+ if d != e {
+ fmt.Printf("For (float64) %v %v %v, expected %v, got %v\n", a, s, b, e, d)
+ }
+}
+
+func fail64bool(s string, f func(a, b float64) bool, a, b float64, e bool) {
+ d := f(a, b)
+ if d != e {
+ fmt.Printf("For (float64) %v %v %v, expected %v, got %v\n", a, s, b, e, d)
+ }
+}
+
+func fail32(s string, f func(a, b float32) float32, a, b, e float32) {
+ d := f(a, b)
+ if d != e {
+ fmt.Printf("For (float32) %v %v %v, expected %v, got %v\n", a, s, b, e, d)
+ }
+}
+
+func fail32bool(s string, f func(a, b float32) bool, a, b float32, e bool) {
+ d := f(a, b)
+ if d != e {
+ fmt.Printf("For (float32) %v %v %v, expected %v, got %v\n", a, s, b, e, d)
+ }
+}
+
+func expect64(t *testing.T, s string, x, expected float64) {
+ if x != expected {
+ println("F64 Expected", expected, "for", s, ", got", x)
+ }
+}
+
+func expect32(t *testing.T, s string, x, expected float32) {
+ if x != expected {
+ println("F32 Expected", expected, "for", s, ", got", x)
+ }
+}
+
+func expectUint64(t *testing.T, s string, x, expected uint64) {
+ if x != expected {
+ fmt.Printf("U64 Expected 0x%016x for %s, got 0x%016x\n", expected, s, x)
+ }
+}
+
+func expectInt64(t *testing.T, s string, x, expected int64) {
+ if x != expected {
+ fmt.Printf("%s: Expected 0x%016x, got 0x%016x\n", s, expected, x)
+ }
+}
+
+func expectUint32(t *testing.T, s string, x, expected uint32) {
+ if x != expected {
+ fmt.Printf("U32 %s: Expected 0x%08x, got 0x%08x\n", s, expected, x)
+ }
+}
+
+func expectInt32(t *testing.T, s string, x, expected int32) {
+ if x != expected {
+ fmt.Printf("I32 %s: Expected 0x%08x, got 0x%08x\n", s, expected, x)
+ }
+}
+
+func expectUint16(t *testing.T, s string, x, expected uint16) {
+ if x != expected {
+ fmt.Printf("U16 %s: Expected 0x%04x, got 0x%04x\n", s, expected, x)
+ }
+}
+
+func expectInt16(t *testing.T, s string, x, expected int16) {
+ if x != expected {
+ fmt.Printf("I16 %s: Expected 0x%04x, got 0x%04x\n", s, expected, x)
+ }
+}
+
+func expectAll64(t *testing.T, s string, expected, a, b, c, d, e, f, g, h, i float64) {
+ expect64(t, s+":a", a, expected)
+ expect64(t, s+":b", b, expected)
+ expect64(t, s+":c", c, expected)
+ expect64(t, s+":d", d, expected)
+ expect64(t, s+":e", e, expected)
+ expect64(t, s+":f", f, expected)
+ expect64(t, s+":g", g, expected)
+}
+
+func expectAll32(t *testing.T, s string, expected, a, b, c, d, e, f, g, h, i float32) {
+ expect32(t, s+":a", a, expected)
+ expect32(t, s+":b", b, expected)
+ expect32(t, s+":c", c, expected)
+ expect32(t, s+":d", d, expected)
+ expect32(t, s+":e", e, expected)
+ expect32(t, s+":f", f, expected)
+ expect32(t, s+":g", g, expected)
+}
+
+var ev64 [2]float64 = [2]float64{42.0, 17.0}
+var ev32 [2]float32 = [2]float32{42.0, 17.0}
+
+func cmpOpTest(t *testing.T,
+ s string,
+ f func(a, b float64) bool,
+ g func(a, b float64) float64,
+ ff func(a, b float32) bool,
+ gg func(a, b float32) float32,
+ zero, one, inf, nan float64, result uint) {
+ fail64bool(s, f, zero, zero, result>>16&1 == 1)
+ fail64bool(s, f, zero, one, result>>12&1 == 1)
+ fail64bool(s, f, zero, inf, result>>8&1 == 1)
+ fail64bool(s, f, zero, nan, result>>4&1 == 1)
+ fail64bool(s, f, nan, nan, result&1 == 1)
+
+ fail64(s, g, zero, zero, ev64[result>>16&1])
+ fail64(s, g, zero, one, ev64[result>>12&1])
+ fail64(s, g, zero, inf, ev64[result>>8&1])
+ fail64(s, g, zero, nan, ev64[result>>4&1])
+ fail64(s, g, nan, nan, ev64[result>>0&1])
+
+ {
+ zero := float32(zero)
+ one := float32(one)
+ inf := float32(inf)
+ nan := float32(nan)
+ fail32bool(s, ff, zero, zero, (result>>16)&1 == 1)
+ fail32bool(s, ff, zero, one, (result>>12)&1 == 1)
+ fail32bool(s, ff, zero, inf, (result>>8)&1 == 1)
+ fail32bool(s, ff, zero, nan, (result>>4)&1 == 1)
+ fail32bool(s, ff, nan, nan, result&1 == 1)
+
+ fail32(s, gg, zero, zero, ev32[(result>>16)&1])
+ fail32(s, gg, zero, one, ev32[(result>>12)&1])
+ fail32(s, gg, zero, inf, ev32[(result>>8)&1])
+ fail32(s, gg, zero, nan, ev32[(result>>4)&1])
+ fail32(s, gg, nan, nan, ev32[(result>>0)&1])
+ }
+}
+
+func expectCx128(t *testing.T, s string, x, expected complex128) {
+ if x != expected {
+ t.Errorf("Cx 128 Expected %f for %s, got %f", expected, s, x)
+ }
+}
+
+func expectCx64(t *testing.T, s string, x, expected complex64) {
+ if x != expected {
+ t.Errorf("Cx 64 Expected %f for %s, got %f", expected, s, x)
+ }
+}
+
+//go:noinline
+func cx128sum_ssa(a, b complex128) complex128 {
+ return a + b
+}
+
+//go:noinline
+func cx128diff_ssa(a, b complex128) complex128 {
+ return a - b
+}
+
+//go:noinline
+func cx128prod_ssa(a, b complex128) complex128 {
+ return a * b
+}
+
+//go:noinline
+func cx128quot_ssa(a, b complex128) complex128 {
+ return a / b
+}
+
+//go:noinline
+func cx128neg_ssa(a complex128) complex128 {
+ return -a
+}
+
+//go:noinline
+func cx128real_ssa(a complex128) float64 {
+ return real(a)
+}
+
+//go:noinline
+func cx128imag_ssa(a complex128) float64 {
+ return imag(a)
+}
+
+//go:noinline
+func cx128cnst_ssa(a complex128) complex128 {
+ b := 2 + 3i
+ return a * b
+}
+
+//go:noinline
+func cx64sum_ssa(a, b complex64) complex64 {
+ return a + b
+}
+
+//go:noinline
+func cx64diff_ssa(a, b complex64) complex64 {
+ return a - b
+}
+
+//go:noinline
+func cx64prod_ssa(a, b complex64) complex64 {
+ return a * b
+}
+
+//go:noinline
+func cx64quot_ssa(a, b complex64) complex64 {
+ return a / b
+}
+
+//go:noinline
+func cx64neg_ssa(a complex64) complex64 {
+ return -a
+}
+
+//go:noinline
+func cx64real_ssa(a complex64) float32 {
+ return real(a)
+}
+
+//go:noinline
+func cx64imag_ssa(a complex64) float32 {
+ return imag(a)
+}
+
+//go:noinline
+func cx128eq_ssa(a, b complex128) bool {
+ return a == b
+}
+
+//go:noinline
+func cx128ne_ssa(a, b complex128) bool {
+ return a != b
+}
+
+//go:noinline
+func cx64eq_ssa(a, b complex64) bool {
+ return a == b
+}
+
+//go:noinline
+func cx64ne_ssa(a, b complex64) bool {
+ return a != b
+}
+
+func expectTrue(t *testing.T, s string, b bool) {
+ if !b {
+ t.Errorf("expected true for %s, got false", s)
+ }
+}
+func expectFalse(t *testing.T, s string, b bool) {
+ if b {
+ t.Errorf("expected false for %s, got true", s)
+ }
+}
+
+func complexTest128(t *testing.T) {
+ var a complex128 = 1 + 2i
+ var b complex128 = 3 + 6i
+ sum := cx128sum_ssa(b, a)
+ diff := cx128diff_ssa(b, a)
+ prod := cx128prod_ssa(b, a)
+ quot := cx128quot_ssa(b, a)
+ neg := cx128neg_ssa(a)
+ r := cx128real_ssa(a)
+ i := cx128imag_ssa(a)
+ cnst := cx128cnst_ssa(a)
+ c1 := cx128eq_ssa(a, a)
+ c2 := cx128eq_ssa(a, b)
+ c3 := cx128ne_ssa(a, a)
+ c4 := cx128ne_ssa(a, b)
+
+ expectCx128(t, "sum", sum, 4+8i)
+ expectCx128(t, "diff", diff, 2+4i)
+ expectCx128(t, "prod", prod, -9+12i)
+ expectCx128(t, "quot", quot, 3+0i)
+ expectCx128(t, "neg", neg, -1-2i)
+ expect64(t, "real", r, 1)
+ expect64(t, "imag", i, 2)
+ expectCx128(t, "cnst", cnst, -4+7i)
+ expectTrue(t, fmt.Sprintf("%v==%v", a, a), c1)
+ expectFalse(t, fmt.Sprintf("%v==%v", a, b), c2)
+ expectFalse(t, fmt.Sprintf("%v!=%v", a, a), c3)
+ expectTrue(t, fmt.Sprintf("%v!=%v", a, b), c4)
+}
+
+func complexTest64(t *testing.T) {
+ var a complex64 = 1 + 2i
+ var b complex64 = 3 + 6i
+ sum := cx64sum_ssa(b, a)
+ diff := cx64diff_ssa(b, a)
+ prod := cx64prod_ssa(b, a)
+ quot := cx64quot_ssa(b, a)
+ neg := cx64neg_ssa(a)
+ r := cx64real_ssa(a)
+ i := cx64imag_ssa(a)
+ c1 := cx64eq_ssa(a, a)
+ c2 := cx64eq_ssa(a, b)
+ c3 := cx64ne_ssa(a, a)
+ c4 := cx64ne_ssa(a, b)
+
+ expectCx64(t, "sum", sum, 4+8i)
+ expectCx64(t, "diff", diff, 2+4i)
+ expectCx64(t, "prod", prod, -9+12i)
+ expectCx64(t, "quot", quot, 3+0i)
+ expectCx64(t, "neg", neg, -1-2i)
+ expect32(t, "real", r, 1)
+ expect32(t, "imag", i, 2)
+ expectTrue(t, fmt.Sprintf("%v==%v", a, a), c1)
+ expectFalse(t, fmt.Sprintf("%v==%v", a, b), c2)
+ expectFalse(t, fmt.Sprintf("%v!=%v", a, a), c3)
+ expectTrue(t, fmt.Sprintf("%v!=%v", a, b), c4)
+}
+
+// TestFP tests that we get the right answer for floating point expressions.
+func TestFP(t *testing.T) {
+ a := 3.0
+ b := 4.0
+
+ c := float32(3.0)
+ d := float32(4.0)
+
+ tiny := float32(1.5e-45) // smallest f32 denorm = 2**(-149)
+ dtiny := float64(tiny) // well within range of f64
+
+ fail64("+", add64_ssa, a, b, 7.0)
+ fail64("*", mul64_ssa, a, b, 12.0)
+ fail64("-", sub64_ssa, a, b, -1.0)
+ fail64("/", div64_ssa, a, b, 0.75)
+ fail64("neg", neg64_ssa, a, b, -7)
+
+ fail32("+", add32_ssa, c, d, 7.0)
+ fail32("*", mul32_ssa, c, d, 12.0)
+ fail32("-", sub32_ssa, c, d, -1.0)
+ fail32("/", div32_ssa, c, d, 0.75)
+ fail32("neg", neg32_ssa, c, d, -7)
+
+ // denorm-squared should underflow to zero.
+ fail32("*", mul32_ssa, tiny, tiny, 0)
+
+ // but should not underflow in float and in fact is exactly representable.
+ fail64("*", mul64_ssa, dtiny, dtiny, 1.9636373861190906e-90)
+
+ // Intended to create register pressure which forces
+ // asymmetric op into different code paths.
+ aa, ab, ac, ad, ba, bb, bc, bd, ca, cb, cc, cd, da, db, dc, dd := manysub_ssa(1000.0, 100.0, 10.0, 1.0)
+
+ expect64(t, "aa", aa, 11.0)
+ expect64(t, "ab", ab, 900.0)
+ expect64(t, "ac", ac, 990.0)
+ expect64(t, "ad", ad, 999.0)
+
+ expect64(t, "ba", ba, -900.0)
+ expect64(t, "bb", bb, 22.0)
+ expect64(t, "bc", bc, 90.0)
+ expect64(t, "bd", bd, 99.0)
+
+ expect64(t, "ca", ca, -990.0)
+ expect64(t, "cb", cb, -90.0)
+ expect64(t, "cc", cc, 33.0)
+ expect64(t, "cd", cd, 9.0)
+
+ expect64(t, "da", da, -999.0)
+ expect64(t, "db", db, -99.0)
+ expect64(t, "dc", dc, -9.0)
+ expect64(t, "dd", dd, 44.0)
+
+ integer2floatConversions(t)
+
+ multiplyAdd(t)
+
+ var zero64 float64 = 0.0
+ var one64 float64 = 1.0
+ var inf64 float64 = 1.0 / zero64
+ var nan64 float64 = sub64_ssa(inf64, inf64)
+
+ cmpOpTest(t, "!=", ne64_ssa, nebr64_ssa, ne32_ssa, nebr32_ssa, zero64, one64, inf64, nan64, 0x01111)
+ cmpOpTest(t, "==", eq64_ssa, eqbr64_ssa, eq32_ssa, eqbr32_ssa, zero64, one64, inf64, nan64, 0x10000)
+ cmpOpTest(t, "<=", le64_ssa, lebr64_ssa, le32_ssa, lebr32_ssa, zero64, one64, inf64, nan64, 0x11100)
+ cmpOpTest(t, "<", lt64_ssa, ltbr64_ssa, lt32_ssa, ltbr32_ssa, zero64, one64, inf64, nan64, 0x01100)
+ cmpOpTest(t, ">", gt64_ssa, gtbr64_ssa, gt32_ssa, gtbr32_ssa, zero64, one64, inf64, nan64, 0x00000)
+ cmpOpTest(t, ">=", ge64_ssa, gebr64_ssa, ge32_ssa, gebr32_ssa, zero64, one64, inf64, nan64, 0x10000)
+
+ {
+ lt, le, eq, ne, ge, gt := compares64_ssa(0.0, 1.0, inf64, nan64)
+ expectUint64(t, "lt", lt, 0x0110001000000000)
+ expectUint64(t, "le", le, 0x1110011000100000)
+ expectUint64(t, "eq", eq, 0x1000010000100000)
+ expectUint64(t, "ne", ne, 0x0111101111011111)
+ expectUint64(t, "ge", ge, 0x1000110011100000)
+ expectUint64(t, "gt", gt, 0x0000100011000000)
+ // fmt.Printf("lt=0x%016x, le=0x%016x, eq=0x%016x, ne=0x%016x, ge=0x%016x, gt=0x%016x\n",
+ // lt, le, eq, ne, ge, gt)
+ }
+ {
+ lt, le, eq, ne, ge, gt := compares32_ssa(0.0, 1.0, float32(inf64), float32(nan64))
+ expectUint64(t, "lt", lt, 0x0110001000000000)
+ expectUint64(t, "le", le, 0x1110011000100000)
+ expectUint64(t, "eq", eq, 0x1000010000100000)
+ expectUint64(t, "ne", ne, 0x0111101111011111)
+ expectUint64(t, "ge", ge, 0x1000110011100000)
+ expectUint64(t, "gt", gt, 0x0000100011000000)
+ }
+
+ floatingToIntegerConversionsTest(t)
+ complexTest128(t)
+ complexTest64(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/gen/arithBoundaryGen.go b/src/cmd/compile/internal/test/testdata/gen/arithBoundaryGen.go
new file mode 100644
index 0000000..b03c105
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/gen/arithBoundaryGen.go
@@ -0,0 +1,208 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This program generates a test to verify that the standard arithmetic
+// operators properly handle some special cases. The test file should be
+// generated with a known working version of go.
+// launch with `go run arithBoundaryGen.go` a file called arithBoundary.go
+// will be written into the parent directory containing the tests
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "log"
+ "text/template"
+)
+
+// used for interpolation in a text template
+type tmplData struct {
+ Name, Stype, Symbol string
+}
+
+// used to work around an issue with the mod symbol being
+// interpreted as part of a format string
+func (s tmplData) SymFirst() string {
+ return string(s.Symbol[0])
+}
+
+// ucast casts an unsigned int to the size in s
+func ucast(i uint64, s sizedTestData) uint64 {
+ switch s.name {
+ case "uint32":
+ return uint64(uint32(i))
+ case "uint16":
+ return uint64(uint16(i))
+ case "uint8":
+ return uint64(uint8(i))
+ }
+ return i
+}
+
+// icast casts a signed int to the size in s
+func icast(i int64, s sizedTestData) int64 {
+ switch s.name {
+ case "int32":
+ return int64(int32(i))
+ case "int16":
+ return int64(int16(i))
+ case "int8":
+ return int64(int8(i))
+ }
+ return i
+}
+
+type sizedTestData struct {
+ name string
+ sn string
+ u []uint64
+ i []int64
+}
+
+// values to generate tests. these should include the smallest and largest values, along
+// with any other values that might cause issues. we generate n^2 tests for each size to
+// cover all cases.
+var szs = []sizedTestData{
+ sizedTestData{name: "uint64", sn: "64", u: []uint64{0, 1, 4294967296, 0xffffFFFFffffFFFF}},
+ sizedTestData{name: "int64", sn: "64", i: []int64{-0x8000000000000000, -0x7FFFFFFFFFFFFFFF,
+ -4294967296, -1, 0, 1, 4294967296, 0x7FFFFFFFFFFFFFFE, 0x7FFFFFFFFFFFFFFF}},
+
+ sizedTestData{name: "uint32", sn: "32", u: []uint64{0, 1, 4294967295}},
+ sizedTestData{name: "int32", sn: "32", i: []int64{-0x80000000, -0x7FFFFFFF, -1, 0,
+ 1, 0x7FFFFFFF}},
+
+ sizedTestData{name: "uint16", sn: "16", u: []uint64{0, 1, 65535}},
+ sizedTestData{name: "int16", sn: "16", i: []int64{-32768, -32767, -1, 0, 1, 32766, 32767}},
+
+ sizedTestData{name: "uint8", sn: "8", u: []uint64{0, 1, 255}},
+ sizedTestData{name: "int8", sn: "8", i: []int64{-128, -127, -1, 0, 1, 126, 127}},
+}
+
+type op struct {
+ name, symbol string
+}
+
+// ops that we will be generating tests for
+var ops = []op{op{"add", "+"}, op{"sub", "-"}, op{"div", "/"}, op{"mod", "%%"}, op{"mul", "*"}}
+
+func main() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated by gen/arithBoundaryGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package main;\n")
+ fmt.Fprintf(w, "import \"testing\"\n")
+
+ for _, sz := range []int{64, 32, 16, 8} {
+ fmt.Fprintf(w, "type utd%d struct {\n", sz)
+ fmt.Fprintf(w, " a,b uint%d\n", sz)
+ fmt.Fprintf(w, " add,sub,mul,div,mod uint%d\n", sz)
+ fmt.Fprintf(w, "}\n")
+
+ fmt.Fprintf(w, "type itd%d struct {\n", sz)
+ fmt.Fprintf(w, " a,b int%d\n", sz)
+ fmt.Fprintf(w, " add,sub,mul,div,mod int%d\n", sz)
+ fmt.Fprintf(w, "}\n")
+ }
+
+ // the function being tested
+ testFunc, err := template.New("testFunc").Parse(
+ `//go:noinline
+ func {{.Name}}_{{.Stype}}_ssa(a, b {{.Stype}}) {{.Stype}} {
+ return a {{.SymFirst}} b
+}
+`)
+ if err != nil {
+ panic(err)
+ }
+
+ // generate our functions to be tested
+ for _, s := range szs {
+ for _, o := range ops {
+ fd := tmplData{o.name, s.name, o.symbol}
+ err = testFunc.Execute(w, fd)
+ if err != nil {
+ panic(err)
+ }
+ }
+ }
+
+ // generate the test data
+ for _, s := range szs {
+ if len(s.u) > 0 {
+ fmt.Fprintf(w, "var %s_data []utd%s = []utd%s{", s.name, s.sn, s.sn)
+ for _, i := range s.u {
+ for _, j := range s.u {
+ fmt.Fprintf(w, "utd%s{a: %d, b: %d, add: %d, sub: %d, mul: %d", s.sn, i, j, ucast(i+j, s), ucast(i-j, s), ucast(i*j, s))
+ if j != 0 {
+ fmt.Fprintf(w, ", div: %d, mod: %d", ucast(i/j, s), ucast(i%j, s))
+ }
+ fmt.Fprint(w, "},\n")
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ } else {
+ // TODO: clean up this duplication
+ fmt.Fprintf(w, "var %s_data []itd%s = []itd%s{", s.name, s.sn, s.sn)
+ for _, i := range s.i {
+ for _, j := range s.i {
+ fmt.Fprintf(w, "itd%s{a: %d, b: %d, add: %d, sub: %d, mul: %d", s.sn, i, j, icast(i+j, s), icast(i-j, s), icast(i*j, s))
+ if j != 0 {
+ fmt.Fprintf(w, ", div: %d, mod: %d", icast(i/j, s), icast(i%j, s))
+ }
+ fmt.Fprint(w, "},\n")
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+ }
+
+ fmt.Fprintf(w, "//TestArithmeticBoundary tests boundary results for arithmetic operations.\n")
+ fmt.Fprintf(w, "func TestArithmeticBoundary(t *testing.T) {\n\n")
+
+ verify, err := template.New("tst").Parse(
+ `if got := {{.Name}}_{{.Stype}}_ssa(v.a, v.b); got != v.{{.Name}} {
+ t.Errorf("{{.Name}}_{{.Stype}} %d{{.Symbol}}%d = %d, wanted %d\n",v.a,v.b,got,v.{{.Name}})
+}
+`)
+
+ for _, s := range szs {
+ fmt.Fprintf(w, "for _, v := range %s_data {\n", s.name)
+
+ for _, o := range ops {
+ // avoid generating tests that divide by zero
+ if o.name == "div" || o.name == "mod" {
+ fmt.Fprint(w, "if v.b != 0 {")
+ }
+
+ err = verify.Execute(w, tmplData{o.name, s.name, o.symbol})
+
+ if o.name == "div" || o.name == "mod" {
+ fmt.Fprint(w, "\n}\n")
+ }
+
+ if err != nil {
+ panic(err)
+ }
+
+ }
+ fmt.Fprint(w, " }\n")
+ }
+
+ fmt.Fprintf(w, "}\n")
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = os.WriteFile("../arithBoundary_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/gen/arithConstGen.go b/src/cmd/compile/internal/test/testdata/gen/arithConstGen.go
new file mode 100644
index 0000000..1649f46
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/gen/arithConstGen.go
@@ -0,0 +1,345 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This program generates a test to verify that the standard arithmetic
+// operators properly handle const cases. The test file should be
+// generated with a known working version of go.
+// launch with `go run arithConstGen.go` a file called arithConst.go
+// will be written into the parent directory containing the tests
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "log"
+ "strings"
+ "text/template"
+)
+
+type op struct {
+ name, symbol string
+}
+type szD struct {
+ name string
+ sn string
+ u []uint64
+ i []int64
+ oponly string
+}
+
+var szs = []szD{
+ {name: "uint64", sn: "64", u: []uint64{0, 1, 4294967296, 0x8000000000000000, 0xffffFFFFffffFFFF}},
+ {name: "uint64", sn: "64", u: []uint64{3, 5, 7, 9, 10, 11, 13, 19, 21, 25, 27, 37, 41, 45, 73, 81}, oponly: "mul"},
+
+ {name: "int64", sn: "64", i: []int64{-0x8000000000000000, -0x7FFFFFFFFFFFFFFF,
+ -4294967296, -1, 0, 1, 4294967296, 0x7FFFFFFFFFFFFFFE, 0x7FFFFFFFFFFFFFFF}},
+ {name: "int64", sn: "64", i: []int64{-9, -5, -3, 3, 5, 7, 9, 10, 11, 13, 19, 21, 25, 27, 37, 41, 45, 73, 81}, oponly: "mul"},
+
+ {name: "uint32", sn: "32", u: []uint64{0, 1, 4294967295}},
+ {name: "uint32", sn: "32", u: []uint64{3, 5, 7, 9, 10, 11, 13, 19, 21, 25, 27, 37, 41, 45, 73, 81}, oponly: "mul"},
+
+ {name: "int32", sn: "32", i: []int64{-0x80000000, -0x7FFFFFFF, -1, 0,
+ 1, 0x7FFFFFFF}},
+ {name: "int32", sn: "32", i: []int64{-9, -5, -3, 3, 5, 7, 9, 10, 11, 13, 19, 21, 25, 27, 37, 41, 45, 73, 81}, oponly: "mul"},
+
+ {name: "uint16", sn: "16", u: []uint64{0, 1, 65535}},
+ {name: "int16", sn: "16", i: []int64{-32768, -32767, -1, 0, 1, 32766, 32767}},
+
+ {name: "uint8", sn: "8", u: []uint64{0, 1, 255}},
+ {name: "int8", sn: "8", i: []int64{-128, -127, -1, 0, 1, 126, 127}},
+}
+
+var ops = []op{
+ {"add", "+"},
+ {"sub", "-"},
+ {"div", "/"},
+ {"mul", "*"},
+ {"lsh", "<<"},
+ {"rsh", ">>"},
+ {"mod", "%"},
+ {"and", "&"},
+ {"or", "|"},
+ {"xor", "^"},
+}
+
+// compute the result of i op j, cast as type t.
+func ansU(i, j uint64, t, op string) string {
+ var ans uint64
+ switch op {
+ case "+":
+ ans = i + j
+ case "-":
+ ans = i - j
+ case "*":
+ ans = i * j
+ case "/":
+ if j != 0 {
+ ans = i / j
+ }
+ case "%":
+ if j != 0 {
+ ans = i % j
+ }
+ case "<<":
+ ans = i << j
+ case ">>":
+ ans = i >> j
+ case "&":
+ ans = i & j
+ case "|":
+ ans = i | j
+ case "^":
+ ans = i ^ j
+ }
+ switch t {
+ case "uint32":
+ ans = uint64(uint32(ans))
+ case "uint16":
+ ans = uint64(uint16(ans))
+ case "uint8":
+ ans = uint64(uint8(ans))
+ }
+ return fmt.Sprintf("%d", ans)
+}
+
+// compute the result of i op j, cast as type t.
+func ansS(i, j int64, t, op string) string {
+ var ans int64
+ switch op {
+ case "+":
+ ans = i + j
+ case "-":
+ ans = i - j
+ case "*":
+ ans = i * j
+ case "/":
+ if j != 0 {
+ ans = i / j
+ }
+ case "%":
+ if j != 0 {
+ ans = i % j
+ }
+ case "<<":
+ ans = i << uint64(j)
+ case ">>":
+ ans = i >> uint64(j)
+ case "&":
+ ans = i & j
+ case "|":
+ ans = i | j
+ case "^":
+ ans = i ^ j
+ }
+ switch t {
+ case "int32":
+ ans = int64(int32(ans))
+ case "int16":
+ ans = int64(int16(ans))
+ case "int8":
+ ans = int64(int8(ans))
+ }
+ return fmt.Sprintf("%d", ans)
+}
+
+func main() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated by gen/arithConstGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package main;\n")
+ fmt.Fprintf(w, "import \"testing\"\n")
+
+ fncCnst1 := template.Must(template.New("fnc").Parse(
+ `//go:noinline
+func {{.Name}}_{{.Type_}}_{{.FNumber}}(a {{.Type_}}) {{.Type_}} { return a {{.Symbol}} {{.Number}} }
+`))
+ fncCnst2 := template.Must(template.New("fnc").Parse(
+ `//go:noinline
+func {{.Name}}_{{.FNumber}}_{{.Type_}}(a {{.Type_}}) {{.Type_}} { return {{.Number}} {{.Symbol}} a }
+`))
+
+ type fncData struct {
+ Name, Type_, Symbol, FNumber, Number string
+ }
+
+ for _, s := range szs {
+ for _, o := range ops {
+ if s.oponly != "" && s.oponly != o.name {
+ continue
+ }
+ fd := fncData{o.name, s.name, o.symbol, "", ""}
+
+ // unsigned test cases
+ if len(s.u) > 0 {
+ for _, i := range s.u {
+ fd.Number = fmt.Sprintf("%d", i)
+ fd.FNumber = strings.Replace(fd.Number, "-", "Neg", -1)
+
+ // avoid division by zero
+ if o.name != "mod" && o.name != "div" || i != 0 {
+ // introduce uint64 cast for rhs shift operands
+ // if they are too large for default uint type
+ number := fd.Number
+ if (o.name == "lsh" || o.name == "rsh") && uint64(uint32(i)) != i {
+ fd.Number = fmt.Sprintf("uint64(%s)", number)
+ }
+ fncCnst1.Execute(w, fd)
+ fd.Number = number
+ }
+
+ fncCnst2.Execute(w, fd)
+ }
+ }
+
+ // signed test cases
+ if len(s.i) > 0 {
+ // don't generate tests for shifts by signed integers
+ if o.name == "lsh" || o.name == "rsh" {
+ continue
+ }
+ for _, i := range s.i {
+ fd.Number = fmt.Sprintf("%d", i)
+ fd.FNumber = strings.Replace(fd.Number, "-", "Neg", -1)
+
+ // avoid division by zero
+ if o.name != "mod" && o.name != "div" || i != 0 {
+ fncCnst1.Execute(w, fd)
+ }
+ fncCnst2.Execute(w, fd)
+ }
+ }
+ }
+ }
+
+ vrf1 := template.Must(template.New("vrf1").Parse(`
+ test_{{.Size}}{fn: {{.Name}}_{{.FNumber}}_{{.Type_}}, fnname: "{{.Name}}_{{.FNumber}}_{{.Type_}}", in: {{.Input}}, want: {{.Ans}}},`))
+
+ vrf2 := template.Must(template.New("vrf2").Parse(`
+ test_{{.Size}}{fn: {{.Name}}_{{.Type_}}_{{.FNumber}}, fnname: "{{.Name}}_{{.Type_}}_{{.FNumber}}", in: {{.Input}}, want: {{.Ans}}},`))
+
+ type cfncData struct {
+ Size, Name, Type_, Symbol, FNumber, Number string
+ Ans, Input string
+ }
+ for _, s := range szs {
+ fmt.Fprintf(w, `
+type test_%[1]s%[2]s struct {
+ fn func (%[1]s) %[1]s
+ fnname string
+ in %[1]s
+ want %[1]s
+}
+`, s.name, s.oponly)
+ fmt.Fprintf(w, "var tests_%[1]s%[2]s =[]test_%[1]s {\n\n", s.name, s.oponly)
+
+ if len(s.u) > 0 {
+ for _, o := range ops {
+ if s.oponly != "" && s.oponly != o.name {
+ continue
+ }
+ fd := cfncData{s.name, o.name, s.name, o.symbol, "", "", "", ""}
+ for _, i := range s.u {
+ fd.Number = fmt.Sprintf("%d", i)
+ fd.FNumber = strings.Replace(fd.Number, "-", "Neg", -1)
+
+ // unsigned
+ for _, j := range s.u {
+
+ if o.name != "mod" && o.name != "div" || j != 0 {
+ fd.Ans = ansU(i, j, s.name, o.symbol)
+ fd.Input = fmt.Sprintf("%d", j)
+ if err := vrf1.Execute(w, fd); err != nil {
+ panic(err)
+ }
+ }
+
+ if o.name != "mod" && o.name != "div" || i != 0 {
+ fd.Ans = ansU(j, i, s.name, o.symbol)
+ fd.Input = fmt.Sprintf("%d", j)
+ if err := vrf2.Execute(w, fd); err != nil {
+ panic(err)
+ }
+ }
+
+ }
+ }
+
+ }
+ }
+
+ // signed
+ if len(s.i) > 0 {
+ for _, o := range ops {
+ if s.oponly != "" && s.oponly != o.name {
+ continue
+ }
+ // don't generate tests for shifts by signed integers
+ if o.name == "lsh" || o.name == "rsh" {
+ continue
+ }
+ fd := cfncData{s.name, o.name, s.name, o.symbol, "", "", "", ""}
+ for _, i := range s.i {
+ fd.Number = fmt.Sprintf("%d", i)
+ fd.FNumber = strings.Replace(fd.Number, "-", "Neg", -1)
+ for _, j := range s.i {
+ if o.name != "mod" && o.name != "div" || j != 0 {
+ fd.Ans = ansS(i, j, s.name, o.symbol)
+ fd.Input = fmt.Sprintf("%d", j)
+ if err := vrf1.Execute(w, fd); err != nil {
+ panic(err)
+ }
+ }
+
+ if o.name != "mod" && o.name != "div" || i != 0 {
+ fd.Ans = ansS(j, i, s.name, o.symbol)
+ fd.Input = fmt.Sprintf("%d", j)
+ if err := vrf2.Execute(w, fd); err != nil {
+ panic(err)
+ }
+ }
+
+ }
+ }
+
+ }
+ }
+
+ fmt.Fprintf(w, "}\n\n")
+ }
+
+ fmt.Fprint(w, `
+
+// TestArithmeticConst tests results for arithmetic operations against constants.
+func TestArithmeticConst(t *testing.T) {
+`)
+
+ for _, s := range szs {
+ fmt.Fprintf(w, `for _, test := range tests_%s%s {`, s.name, s.oponly)
+ // Use WriteString here to avoid a vet warning about formatting directives.
+ w.WriteString(`if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+`)
+ }
+
+ fmt.Fprint(w, `
+}
+`)
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = os.WriteFile("../arithConst_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/gen/cmpConstGen.go b/src/cmd/compile/internal/test/testdata/gen/cmpConstGen.go
new file mode 100644
index 0000000..dcdafc0
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/gen/cmpConstGen.go
@@ -0,0 +1,246 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This program generates a test to verify that the standard comparison
+// operators properly handle one const operand. The test file should be
+// generated with a known working version of go.
+// launch with `go run cmpConstGen.go` a file called cmpConst.go
+// will be written into the parent directory containing the tests
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "log"
+ "math/big"
+ "sort"
+)
+
+const (
+ maxU64 = (1 << 64) - 1
+ maxU32 = (1 << 32) - 1
+ maxU16 = (1 << 16) - 1
+ maxU8 = (1 << 8) - 1
+
+ maxI64 = (1 << 63) - 1
+ maxI32 = (1 << 31) - 1
+ maxI16 = (1 << 15) - 1
+ maxI8 = (1 << 7) - 1
+
+ minI64 = -(1 << 63)
+ minI32 = -(1 << 31)
+ minI16 = -(1 << 15)
+ minI8 = -(1 << 7)
+)
+
+func cmp(left *big.Int, op string, right *big.Int) bool {
+ switch left.Cmp(right) {
+ case -1: // less than
+ return op == "<" || op == "<=" || op == "!="
+ case 0: // equal
+ return op == "==" || op == "<=" || op == ">="
+ case 1: // greater than
+ return op == ">" || op == ">=" || op == "!="
+ }
+ panic("unexpected comparison value")
+}
+
+func inRange(typ string, val *big.Int) bool {
+ min, max := &big.Int{}, &big.Int{}
+ switch typ {
+ case "uint64":
+ max = max.SetUint64(maxU64)
+ case "uint32":
+ max = max.SetUint64(maxU32)
+ case "uint16":
+ max = max.SetUint64(maxU16)
+ case "uint8":
+ max = max.SetUint64(maxU8)
+ case "int64":
+ min = min.SetInt64(minI64)
+ max = max.SetInt64(maxI64)
+ case "int32":
+ min = min.SetInt64(minI32)
+ max = max.SetInt64(maxI32)
+ case "int16":
+ min = min.SetInt64(minI16)
+ max = max.SetInt64(maxI16)
+ case "int8":
+ min = min.SetInt64(minI8)
+ max = max.SetInt64(maxI8)
+ default:
+ panic("unexpected type")
+ }
+ return cmp(min, "<=", val) && cmp(val, "<=", max)
+}
+
+func getValues(typ string) []*big.Int {
+ Uint := func(v uint64) *big.Int { return big.NewInt(0).SetUint64(v) }
+ Int := func(v int64) *big.Int { return big.NewInt(0).SetInt64(v) }
+ values := []*big.Int{
+ // limits
+ Uint(maxU64),
+ Uint(maxU64 - 1),
+ Uint(maxI64 + 1),
+ Uint(maxI64),
+ Uint(maxI64 - 1),
+ Uint(maxU32 + 1),
+ Uint(maxU32),
+ Uint(maxU32 - 1),
+ Uint(maxI32 + 1),
+ Uint(maxI32),
+ Uint(maxI32 - 1),
+ Uint(maxU16 + 1),
+ Uint(maxU16),
+ Uint(maxU16 - 1),
+ Uint(maxI16 + 1),
+ Uint(maxI16),
+ Uint(maxI16 - 1),
+ Uint(maxU8 + 1),
+ Uint(maxU8),
+ Uint(maxU8 - 1),
+ Uint(maxI8 + 1),
+ Uint(maxI8),
+ Uint(maxI8 - 1),
+ Uint(0),
+ Int(minI8 + 1),
+ Int(minI8),
+ Int(minI8 - 1),
+ Int(minI16 + 1),
+ Int(minI16),
+ Int(minI16 - 1),
+ Int(minI32 + 1),
+ Int(minI32),
+ Int(minI32 - 1),
+ Int(minI64 + 1),
+ Int(minI64),
+
+ // other possibly interesting values
+ Uint(1),
+ Int(-1),
+ Uint(0xff << 56),
+ Uint(0xff << 32),
+ Uint(0xff << 24),
+ }
+ sort.Slice(values, func(i, j int) bool { return values[i].Cmp(values[j]) == -1 })
+ var ret []*big.Int
+ for _, val := range values {
+ if !inRange(typ, val) {
+ continue
+ }
+ ret = append(ret, val)
+ }
+ return ret
+}
+
+func sigString(v *big.Int) string {
+ var t big.Int
+ t.Abs(v)
+ if v.Sign() == -1 {
+ return "neg" + t.String()
+ }
+ return t.String()
+}
+
+func main() {
+ types := []string{
+ "uint64", "uint32", "uint16", "uint8",
+ "int64", "int32", "int16", "int8",
+ }
+
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated by gen/cmpConstGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package main;\n")
+ fmt.Fprintf(w, "import (\"testing\"; \"reflect\"; \"runtime\";)\n")
+ fmt.Fprintf(w, "// results show the expected result for the elements left of, equal to and right of the index.\n")
+ fmt.Fprintf(w, "type result struct{l, e, r bool}\n")
+ fmt.Fprintf(w, "var (\n")
+ fmt.Fprintf(w, " eq = result{l: false, e: true, r: false}\n")
+ fmt.Fprintf(w, " ne = result{l: true, e: false, r: true}\n")
+ fmt.Fprintf(w, " lt = result{l: true, e: false, r: false}\n")
+ fmt.Fprintf(w, " le = result{l: true, e: true, r: false}\n")
+ fmt.Fprintf(w, " gt = result{l: false, e: false, r: true}\n")
+ fmt.Fprintf(w, " ge = result{l: false, e: true, r: true}\n")
+ fmt.Fprintf(w, ")\n")
+
+ operators := []struct{ op, name string }{
+ {"<", "lt"},
+ {"<=", "le"},
+ {">", "gt"},
+ {">=", "ge"},
+ {"==", "eq"},
+ {"!=", "ne"},
+ }
+
+ for _, typ := range types {
+ // generate a slice containing valid values for this type
+ fmt.Fprintf(w, "\n// %v tests\n", typ)
+ values := getValues(typ)
+ fmt.Fprintf(w, "var %v_vals = []%v{\n", typ, typ)
+ for _, val := range values {
+ fmt.Fprintf(w, "%v,\n", val.String())
+ }
+ fmt.Fprintf(w, "}\n")
+
+ // generate test functions
+ for _, r := range values {
+ // TODO: could also test constant on lhs.
+ sig := sigString(r)
+ for _, op := range operators {
+ // no need for go:noinline because the function is called indirectly
+ fmt.Fprintf(w, "func %v_%v_%v(x %v) bool { return x %v %v; }\n", op.name, sig, typ, typ, op.op, r.String())
+ }
+ }
+
+ // generate a table of test cases
+ fmt.Fprintf(w, "var %v_tests = []struct{\n", typ)
+ fmt.Fprintf(w, " idx int // index of the constant used\n")
+ fmt.Fprintf(w, " exp result // expected results\n")
+ fmt.Fprintf(w, " fn func(%v) bool\n", typ)
+ fmt.Fprintf(w, "}{\n")
+ for i, r := range values {
+ sig := sigString(r)
+ for _, op := range operators {
+ fmt.Fprintf(w, "{idx: %v,", i)
+ fmt.Fprintf(w, "exp: %v,", op.name)
+ fmt.Fprintf(w, "fn: %v_%v_%v},\n", op.name, sig, typ)
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+
+ // emit the main function, looping over all test cases
+ fmt.Fprintf(w, "// TestComparisonsConst tests results for comparison operations against constants.\n")
+ fmt.Fprintf(w, "func TestComparisonsConst(t *testing.T) {\n")
+ for _, typ := range types {
+ fmt.Fprintf(w, "for i, test := range %v_tests {\n", typ)
+ fmt.Fprintf(w, " for j, x := range %v_vals {\n", typ)
+ fmt.Fprintf(w, " want := test.exp.l\n")
+ fmt.Fprintf(w, " if j == test.idx {\nwant = test.exp.e\n}")
+ fmt.Fprintf(w, " else if j > test.idx {\nwant = test.exp.r\n}\n")
+ fmt.Fprintf(w, " if test.fn(x) != want {\n")
+ fmt.Fprintf(w, " fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()\n")
+ fmt.Fprintf(w, " t.Errorf(\"test failed: %%v(%%v) != %%v [type=%v i=%%v j=%%v idx=%%v]\", fn, x, want, i, j, test.idx)\n", typ)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+ }
+ fmt.Fprintf(w, "}\n")
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = os.WriteFile("../cmpConst_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/gen/constFoldGen.go b/src/cmd/compile/internal/test/testdata/gen/constFoldGen.go
new file mode 100644
index 0000000..7079422
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/gen/constFoldGen.go
@@ -0,0 +1,307 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This program generates a test to verify that the standard arithmetic
+// operators properly handle constant folding. The test file should be
+// generated with a known working version of go.
+// launch with `go run constFoldGen.go` a file called constFold_test.go
+// will be written into the grandparent directory containing the tests.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "log"
+ "os"
+)
+
+type op struct {
+ name, symbol string
+}
+type szD struct {
+ name string
+ sn string
+ u []uint64
+ i []int64
+}
+
+var szs []szD = []szD{
+ szD{name: "uint64", sn: "64", u: []uint64{0, 1, 4294967296, 0xffffFFFFffffFFFF}},
+ szD{name: "int64", sn: "64", i: []int64{-0x8000000000000000, -0x7FFFFFFFFFFFFFFF,
+ -4294967296, -1, 0, 1, 4294967296, 0x7FFFFFFFFFFFFFFE, 0x7FFFFFFFFFFFFFFF}},
+
+ szD{name: "uint32", sn: "32", u: []uint64{0, 1, 4294967295}},
+ szD{name: "int32", sn: "32", i: []int64{-0x80000000, -0x7FFFFFFF, -1, 0,
+ 1, 0x7FFFFFFF}},
+
+ szD{name: "uint16", sn: "16", u: []uint64{0, 1, 65535}},
+ szD{name: "int16", sn: "16", i: []int64{-32768, -32767, -1, 0, 1, 32766, 32767}},
+
+ szD{name: "uint8", sn: "8", u: []uint64{0, 1, 255}},
+ szD{name: "int8", sn: "8", i: []int64{-128, -127, -1, 0, 1, 126, 127}},
+}
+
+var ops = []op{
+ op{"add", "+"}, op{"sub", "-"}, op{"div", "/"}, op{"mul", "*"},
+ op{"lsh", "<<"}, op{"rsh", ">>"}, op{"mod", "%"},
+}
+
+// compute the result of i op j, cast as type t.
+func ansU(i, j uint64, t, op string) string {
+ var ans uint64
+ switch op {
+ case "+":
+ ans = i + j
+ case "-":
+ ans = i - j
+ case "*":
+ ans = i * j
+ case "/":
+ if j != 0 {
+ ans = i / j
+ }
+ case "%":
+ if j != 0 {
+ ans = i % j
+ }
+ case "<<":
+ ans = i << j
+ case ">>":
+ ans = i >> j
+ }
+ switch t {
+ case "uint32":
+ ans = uint64(uint32(ans))
+ case "uint16":
+ ans = uint64(uint16(ans))
+ case "uint8":
+ ans = uint64(uint8(ans))
+ }
+ return fmt.Sprintf("%d", ans)
+}
+
+// compute the result of i op j, cast as type t.
+func ansS(i, j int64, t, op string) string {
+ var ans int64
+ switch op {
+ case "+":
+ ans = i + j
+ case "-":
+ ans = i - j
+ case "*":
+ ans = i * j
+ case "/":
+ if j != 0 {
+ ans = i / j
+ }
+ case "%":
+ if j != 0 {
+ ans = i % j
+ }
+ case "<<":
+ ans = i << uint64(j)
+ case ">>":
+ ans = i >> uint64(j)
+ }
+ switch t {
+ case "int32":
+ ans = int64(int32(ans))
+ case "int16":
+ ans = int64(int16(ans))
+ case "int8":
+ ans = int64(int8(ans))
+ }
+ return fmt.Sprintf("%d", ans)
+}
+
+func main() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// run\n")
+ fmt.Fprintf(w, "// Code generated by gen/constFoldGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package gc\n")
+ fmt.Fprintf(w, "import \"testing\"\n")
+
+ for _, s := range szs {
+ for _, o := range ops {
+ if o.symbol == "<<" || o.symbol == ">>" {
+ // shifts handled separately below, as they can have
+ // different types on the LHS and RHS.
+ continue
+ }
+ fmt.Fprintf(w, "func TestConstFold%s%s(t *testing.T) {\n", s.name, o.name)
+ fmt.Fprintf(w, "\tvar x, y, r %s\n", s.name)
+ // unsigned test cases
+ for _, c := range s.u {
+ fmt.Fprintf(w, "\tx = %d\n", c)
+ for _, d := range s.u {
+ if d == 0 && (o.symbol == "/" || o.symbol == "%") {
+ continue
+ }
+ fmt.Fprintf(w, "\ty = %d\n", d)
+ fmt.Fprintf(w, "\tr = x %s y\n", o.symbol)
+ want := ansU(c, d, s.name, o.symbol)
+ fmt.Fprintf(w, "\tif r != %s {\n", want)
+ fmt.Fprintf(w, "\t\tt.Errorf(\"%d %%s %d = %%d, want %s\", %q, r)\n", c, d, want, o.symbol)
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ // signed test cases
+ for _, c := range s.i {
+ fmt.Fprintf(w, "\tx = %d\n", c)
+ for _, d := range s.i {
+ if d == 0 && (o.symbol == "/" || o.symbol == "%") {
+ continue
+ }
+ fmt.Fprintf(w, "\ty = %d\n", d)
+ fmt.Fprintf(w, "\tr = x %s y\n", o.symbol)
+ want := ansS(c, d, s.name, o.symbol)
+ fmt.Fprintf(w, "\tif r != %s {\n", want)
+ fmt.Fprintf(w, "\t\tt.Errorf(\"%d %%s %d = %%d, want %s\", %q, r)\n", c, d, want, o.symbol)
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+ }
+
+ // Special signed/unsigned cases for shifts
+ for _, ls := range szs {
+ for _, rs := range szs {
+ if rs.name[0] != 'u' {
+ continue
+ }
+ for _, o := range ops {
+ if o.symbol != "<<" && o.symbol != ">>" {
+ continue
+ }
+ fmt.Fprintf(w, "func TestConstFold%s%s%s(t *testing.T) {\n", ls.name, rs.name, o.name)
+ fmt.Fprintf(w, "\tvar x, r %s\n", ls.name)
+ fmt.Fprintf(w, "\tvar y %s\n", rs.name)
+ // unsigned LHS
+ for _, c := range ls.u {
+ fmt.Fprintf(w, "\tx = %d\n", c)
+ for _, d := range rs.u {
+ fmt.Fprintf(w, "\ty = %d\n", d)
+ fmt.Fprintf(w, "\tr = x %s y\n", o.symbol)
+ want := ansU(c, d, ls.name, o.symbol)
+ fmt.Fprintf(w, "\tif r != %s {\n", want)
+ fmt.Fprintf(w, "\t\tt.Errorf(\"%d %%s %d = %%d, want %s\", %q, r)\n", c, d, want, o.symbol)
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ // signed LHS
+ for _, c := range ls.i {
+ fmt.Fprintf(w, "\tx = %d\n", c)
+ for _, d := range rs.u {
+ fmt.Fprintf(w, "\ty = %d\n", d)
+ fmt.Fprintf(w, "\tr = x %s y\n", o.symbol)
+ want := ansS(c, int64(d), ls.name, o.symbol)
+ fmt.Fprintf(w, "\tif r != %s {\n", want)
+ fmt.Fprintf(w, "\t\tt.Errorf(\"%d %%s %d = %%d, want %s\", %q, r)\n", c, d, want, o.symbol)
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+ }
+ }
+
+ // Constant folding for comparisons
+ for _, s := range szs {
+ fmt.Fprintf(w, "func TestConstFoldCompare%s(t *testing.T) {\n", s.name)
+ for _, x := range s.i {
+ for _, y := range s.i {
+ fmt.Fprintf(w, "\t{\n")
+ fmt.Fprintf(w, "\t\tvar x %s = %d\n", s.name, x)
+ fmt.Fprintf(w, "\t\tvar y %s = %d\n", s.name, y)
+ if x == y {
+ fmt.Fprintf(w, "\t\tif !(x == y) { t.Errorf(\"!(%%d == %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x == y { t.Errorf(\"%%d == %%d\", x, y) }\n")
+ }
+ if x != y {
+ fmt.Fprintf(w, "\t\tif !(x != y) { t.Errorf(\"!(%%d != %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x != y { t.Errorf(\"%%d != %%d\", x, y) }\n")
+ }
+ if x < y {
+ fmt.Fprintf(w, "\t\tif !(x < y) { t.Errorf(\"!(%%d < %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x < y { t.Errorf(\"%%d < %%d\", x, y) }\n")
+ }
+ if x > y {
+ fmt.Fprintf(w, "\t\tif !(x > y) { t.Errorf(\"!(%%d > %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x > y { t.Errorf(\"%%d > %%d\", x, y) }\n")
+ }
+ if x <= y {
+ fmt.Fprintf(w, "\t\tif !(x <= y) { t.Errorf(\"!(%%d <= %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x <= y { t.Errorf(\"%%d <= %%d\", x, y) }\n")
+ }
+ if x >= y {
+ fmt.Fprintf(w, "\t\tif !(x >= y) { t.Errorf(\"!(%%d >= %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x >= y { t.Errorf(\"%%d >= %%d\", x, y) }\n")
+ }
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ for _, x := range s.u {
+ for _, y := range s.u {
+ fmt.Fprintf(w, "\t{\n")
+ fmt.Fprintf(w, "\t\tvar x %s = %d\n", s.name, x)
+ fmt.Fprintf(w, "\t\tvar y %s = %d\n", s.name, y)
+ if x == y {
+ fmt.Fprintf(w, "\t\tif !(x == y) { t.Errorf(\"!(%%d == %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x == y { t.Errorf(\"%%d == %%d\", x, y) }\n")
+ }
+ if x != y {
+ fmt.Fprintf(w, "\t\tif !(x != y) { t.Errorf(\"!(%%d != %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x != y { t.Errorf(\"%%d != %%d\", x, y) }\n")
+ }
+ if x < y {
+ fmt.Fprintf(w, "\t\tif !(x < y) { t.Errorf(\"!(%%d < %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x < y { t.Errorf(\"%%d < %%d\", x, y) }\n")
+ }
+ if x > y {
+ fmt.Fprintf(w, "\t\tif !(x > y) { t.Errorf(\"!(%%d > %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x > y { t.Errorf(\"%%d > %%d\", x, y) }\n")
+ }
+ if x <= y {
+ fmt.Fprintf(w, "\t\tif !(x <= y) { t.Errorf(\"!(%%d <= %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x <= y { t.Errorf(\"%%d <= %%d\", x, y) }\n")
+ }
+ if x >= y {
+ fmt.Fprintf(w, "\t\tif !(x >= y) { t.Errorf(\"!(%%d >= %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x >= y { t.Errorf(\"%%d >= %%d\", x, y) }\n")
+ }
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = os.WriteFile("../../constFold_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/gen/copyGen.go b/src/cmd/compile/internal/test/testdata/gen/copyGen.go
new file mode 100644
index 0000000..dd09b3b
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/gen/copyGen.go
@@ -0,0 +1,121 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "log"
+ "os"
+)
+
+// This program generates tests to verify that copying operations
+// copy the data they are supposed to and clobber no adjacent values.
+
+// run as `go run copyGen.go`. A file called copy.go
+// will be written into the parent directory containing the tests.
+
+var sizes = [...]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 23, 24, 25, 31, 32, 33, 63, 64, 65, 1023, 1024, 1025, 1024 + 7, 1024 + 8, 1024 + 9, 1024 + 15, 1024 + 16, 1024 + 17}
+
+var usizes = [...]int{2, 3, 4, 5, 6, 7}
+
+func main() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated by gen/copyGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package main\n")
+ fmt.Fprintf(w, "import \"testing\"\n")
+
+ for _, s := range sizes {
+ // type for test
+ fmt.Fprintf(w, "type T%d struct {\n", s)
+ fmt.Fprintf(w, " pre [8]byte\n")
+ fmt.Fprintf(w, " mid [%d]byte\n", s)
+ fmt.Fprintf(w, " post [8]byte\n")
+ fmt.Fprintf(w, "}\n")
+
+ // function being tested
+ fmt.Fprintf(w, "//go:noinline\n")
+ fmt.Fprintf(w, "func t%dcopy_ssa(y, x *[%d]byte) {\n", s, s)
+ fmt.Fprintf(w, " *y = *x\n")
+ fmt.Fprintf(w, "}\n")
+
+ // testing harness
+ fmt.Fprintf(w, "func testCopy%d(t *testing.T) {\n", s)
+ fmt.Fprintf(w, " a := T%d{[8]byte{201, 202, 203, 204, 205, 206, 207, 208},[%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "%d,", i%100)
+ }
+ fmt.Fprintf(w, "},[8]byte{211, 212, 213, 214, 215, 216, 217, 218}}\n")
+ fmt.Fprintf(w, " x := [%d]byte{", s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "%d,", 100+i%100)
+ }
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, " t%dcopy_ssa(&a.mid, &x)\n", s)
+ fmt.Fprintf(w, " want := T%d{[8]byte{201, 202, 203, 204, 205, 206, 207, 208},[%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "%d,", 100+i%100)
+ }
+ fmt.Fprintf(w, "},[8]byte{211, 212, 213, 214, 215, 216, 217, 218}}\n")
+ fmt.Fprintf(w, " if a != want {\n")
+ fmt.Fprintf(w, " t.Errorf(\"t%dcopy got=%%v, want %%v\\n\", a, want)\n", s)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+ }
+
+ for _, s := range usizes {
+ // function being tested
+ fmt.Fprintf(w, "//go:noinline\n")
+ fmt.Fprintf(w, "func tu%dcopy_ssa(docopy bool, data [%d]byte, x *[%d]byte) {\n", s, s, s)
+ fmt.Fprintf(w, " if docopy {\n")
+ fmt.Fprintf(w, " *x = data\n")
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+
+ // testing harness
+ fmt.Fprintf(w, "func testUnalignedCopy%d(t *testing.T) {\n", s)
+ fmt.Fprintf(w, " var a [%d]byte\n", s)
+ fmt.Fprintf(w, " t%d := [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, " %d,", s+i)
+ }
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, " tu%dcopy_ssa(true, t%d, &a)\n", s, s)
+ fmt.Fprintf(w, " want%d := [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, " %d,", s+i)
+ }
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, " if a != want%d {\n", s)
+ fmt.Fprintf(w, " t.Errorf(\"tu%dcopy got=%%v, want %%v\\n\", a, want%d)\n", s, s)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+ }
+
+ // boilerplate at end
+ fmt.Fprintf(w, "func TestCopy(t *testing.T) {\n")
+ for _, s := range sizes {
+ fmt.Fprintf(w, " testCopy%d(t)\n", s)
+ }
+ for _, s := range usizes {
+ fmt.Fprintf(w, " testUnalignedCopy%d(t)\n", s)
+ }
+ fmt.Fprintf(w, "}\n")
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = os.WriteFile("../copy_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/gen/zeroGen.go b/src/cmd/compile/internal/test/testdata/gen/zeroGen.go
new file mode 100644
index 0000000..f3dcaa1
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/gen/zeroGen.go
@@ -0,0 +1,143 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "log"
+ "os"
+)
+
+// This program generates tests to verify that zeroing operations
+// zero the data they are supposed to and clobber no adjacent values.
+
+// run as `go run zeroGen.go`. A file called zero.go
+// will be written into the parent directory containing the tests.
+
+var sizes = [...]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 23, 24, 25, 31, 32, 33, 63, 64, 65, 1023, 1024, 1025}
+var usizes = [...]int{8, 16, 24, 32, 64, 256}
+
+func main() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated by gen/zeroGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package main\n")
+ fmt.Fprintf(w, "import \"testing\"\n")
+
+ for _, s := range sizes {
+ // type for test
+ fmt.Fprintf(w, "type Z%d struct {\n", s)
+ fmt.Fprintf(w, " pre [8]byte\n")
+ fmt.Fprintf(w, " mid [%d]byte\n", s)
+ fmt.Fprintf(w, " post [8]byte\n")
+ fmt.Fprintf(w, "}\n")
+
+ // function being tested
+ fmt.Fprintf(w, "//go:noinline\n")
+ fmt.Fprintf(w, "func zero%d_ssa(x *[%d]byte) {\n", s, s)
+ fmt.Fprintf(w, " *x = [%d]byte{}\n", s)
+ fmt.Fprintf(w, "}\n")
+
+ // testing harness
+ fmt.Fprintf(w, "func testZero%d(t *testing.T) {\n", s)
+ fmt.Fprintf(w, " a := Z%d{[8]byte{255,255,255,255,255,255,255,255},[%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "255,")
+ }
+ fmt.Fprintf(w, "},[8]byte{255,255,255,255,255,255,255,255}}\n")
+ fmt.Fprintf(w, " zero%d_ssa(&a.mid)\n", s)
+ fmt.Fprintf(w, " want := Z%d{[8]byte{255,255,255,255,255,255,255,255},[%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "0,")
+ }
+ fmt.Fprintf(w, "},[8]byte{255,255,255,255,255,255,255,255}}\n")
+ fmt.Fprintf(w, " if a != want {\n")
+ fmt.Fprintf(w, " t.Errorf(\"zero%d got=%%v, want %%v\\n\", a, want)\n", s)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+ }
+
+ for _, s := range usizes {
+ // type for test
+ fmt.Fprintf(w, "type Z%du1 struct {\n", s)
+ fmt.Fprintf(w, " b bool\n")
+ fmt.Fprintf(w, " val [%d]byte\n", s)
+ fmt.Fprintf(w, "}\n")
+
+ fmt.Fprintf(w, "type Z%du2 struct {\n", s)
+ fmt.Fprintf(w, " i uint16\n")
+ fmt.Fprintf(w, " val [%d]byte\n", s)
+ fmt.Fprintf(w, "}\n")
+
+ // function being tested
+ fmt.Fprintf(w, "//go:noinline\n")
+ fmt.Fprintf(w, "func zero%du1_ssa(t *Z%du1) {\n", s, s)
+ fmt.Fprintf(w, " t.val = [%d]byte{}\n", s)
+ fmt.Fprintf(w, "}\n")
+
+ // function being tested
+ fmt.Fprintf(w, "//go:noinline\n")
+ fmt.Fprintf(w, "func zero%du2_ssa(t *Z%du2) {\n", s, s)
+ fmt.Fprintf(w, " t.val = [%d]byte{}\n", s)
+ fmt.Fprintf(w, "}\n")
+
+ // testing harness
+ fmt.Fprintf(w, "func testZero%du(t *testing.T) {\n", s)
+ fmt.Fprintf(w, " a := Z%du1{false, [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "255,")
+ }
+ fmt.Fprintf(w, "}}\n")
+ fmt.Fprintf(w, " zero%du1_ssa(&a)\n", s)
+ fmt.Fprintf(w, " want := Z%du1{false, [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "0,")
+ }
+ fmt.Fprintf(w, "}}\n")
+ fmt.Fprintf(w, " if a != want {\n")
+ fmt.Fprintf(w, " t.Errorf(\"zero%du2 got=%%v, want %%v\\n\", a, want)\n", s)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, " b := Z%du2{15, [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "255,")
+ }
+ fmt.Fprintf(w, "}}\n")
+ fmt.Fprintf(w, " zero%du2_ssa(&b)\n", s)
+ fmt.Fprintf(w, " wantb := Z%du2{15, [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "0,")
+ }
+ fmt.Fprintf(w, "}}\n")
+ fmt.Fprintf(w, " if b != wantb {\n")
+ fmt.Fprintf(w, " t.Errorf(\"zero%du2 got=%%v, want %%v\\n\", b, wantb)\n", s)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+ }
+
+ // boilerplate at end
+ fmt.Fprintf(w, "func TestZero(t *testing.T) {\n")
+ for _, s := range sizes {
+ fmt.Fprintf(w, " testZero%d(t)\n", s)
+ }
+ for _, s := range usizes {
+ fmt.Fprintf(w, " testZero%du(t)\n", s)
+ }
+ fmt.Fprintf(w, "}\n")
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = os.WriteFile("../zero_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/loadstore_test.go b/src/cmd/compile/internal/test/testdata/loadstore_test.go
new file mode 100644
index 0000000..0521728
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/loadstore_test.go
@@ -0,0 +1,205 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests load/store ordering
+
+package main
+
+import "testing"
+
+// testLoadStoreOrder tests for reordering of stores/loads.
+func testLoadStoreOrder(t *testing.T) {
+ z := uint32(1000)
+ if testLoadStoreOrder_ssa(&z, 100) == 0 {
+ t.Errorf("testLoadStoreOrder failed")
+ }
+}
+
+//go:noinline
+func testLoadStoreOrder_ssa(z *uint32, prec uint) int {
+ old := *z // load
+ *z = uint32(prec) // store
+ if *z < old { // load
+ return 1
+ }
+ return 0
+}
+
+func testStoreSize(t *testing.T) {
+ a := [4]uint16{11, 22, 33, 44}
+ testStoreSize_ssa(&a[0], &a[2], 77)
+ want := [4]uint16{77, 22, 33, 44}
+ if a != want {
+ t.Errorf("testStoreSize failed. want = %d, got = %d", want, a)
+ }
+}
+
+//go:noinline
+func testStoreSize_ssa(p *uint16, q *uint16, v uint32) {
+ // Test to make sure that (Store ptr (Trunc32to16 val) mem)
+ // does not end up as a 32-bit store. It must stay a 16 bit store
+ // even when Trunc32to16 is rewritten to be a nop.
+ // To ensure that we get rewrite the Trunc32to16 before
+ // we rewrite the Store, we force the truncate into an
+ // earlier basic block by using it on both branches.
+ w := uint16(v)
+ if p != nil {
+ *p = w
+ } else {
+ *q = w
+ }
+}
+
+//go:noinline
+func testExtStore_ssa(p *byte, b bool) int {
+ x := *p
+ *p = 7
+ if b {
+ return int(x)
+ }
+ return 0
+}
+
+func testExtStore(t *testing.T) {
+ const start = 8
+ var b byte = start
+ if got := testExtStore_ssa(&b, true); got != start {
+ t.Errorf("testExtStore failed. want = %d, got = %d", start, got)
+ }
+}
+
+var b int
+
+// testDeadStorePanic_ssa ensures that we don't optimize away stores
+// that could be read by after recover(). Modeled after fixedbugs/issue1304.
+//
+//go:noinline
+func testDeadStorePanic_ssa(a int) (r int) {
+ defer func() {
+ recover()
+ r = a
+ }()
+ a = 2 // store
+ b := a - a // optimized to zero
+ c := 4
+ a = c / b // store, but panics
+ a = 3 // store
+ r = a
+ return
+}
+
+func testDeadStorePanic(t *testing.T) {
+ if want, got := 2, testDeadStorePanic_ssa(1); want != got {
+ t.Errorf("testDeadStorePanic failed. want = %d, got = %d", want, got)
+ }
+}
+
+//go:noinline
+func loadHitStore8(x int8, p *int8) int32 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return int32(*p) // load and cast
+}
+
+//go:noinline
+func loadHitStoreU8(x uint8, p *uint8) uint32 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return uint32(*p) // load and cast
+}
+
+//go:noinline
+func loadHitStore16(x int16, p *int16) int32 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return int32(*p) // load and cast
+}
+
+//go:noinline
+func loadHitStoreU16(x uint16, p *uint16) uint32 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return uint32(*p) // load and cast
+}
+
+//go:noinline
+func loadHitStore32(x int32, p *int32) int64 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return int64(*p) // load and cast
+}
+
+//go:noinline
+func loadHitStoreU32(x uint32, p *uint32) uint64 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return uint64(*p) // load and cast
+}
+
+func testLoadHitStore(t *testing.T) {
+ // Test that sign/zero extensions are kept when a load-hit-store
+ // is replaced by a register-register move.
+ {
+ var in int8 = (1 << 6) + 1
+ var p int8
+ got := loadHitStore8(in, &p)
+ want := int32(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (int8) failed. want = %d, got = %d", want, got)
+ }
+ }
+ {
+ var in uint8 = (1 << 6) + 1
+ var p uint8
+ got := loadHitStoreU8(in, &p)
+ want := uint32(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (uint8) failed. want = %d, got = %d", want, got)
+ }
+ }
+ {
+ var in int16 = (1 << 10) + 1
+ var p int16
+ got := loadHitStore16(in, &p)
+ want := int32(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (int16) failed. want = %d, got = %d", want, got)
+ }
+ }
+ {
+ var in uint16 = (1 << 10) + 1
+ var p uint16
+ got := loadHitStoreU16(in, &p)
+ want := uint32(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (uint16) failed. want = %d, got = %d", want, got)
+ }
+ }
+ {
+ var in int32 = (1 << 30) + 1
+ var p int32
+ got := loadHitStore32(in, &p)
+ want := int64(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (int32) failed. want = %d, got = %d", want, got)
+ }
+ }
+ {
+ var in uint32 = (1 << 30) + 1
+ var p uint32
+ got := loadHitStoreU32(in, &p)
+ want := uint64(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (uint32) failed. want = %d, got = %d", want, got)
+ }
+ }
+}
+
+func TestLoadStore(t *testing.T) {
+ testLoadStoreOrder(t)
+ testStoreSize(t)
+ testExtStore(t)
+ testDeadStorePanic(t)
+ testLoadHitStore(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/map_test.go b/src/cmd/compile/internal/test/testdata/map_test.go
new file mode 100644
index 0000000..71dc820
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/map_test.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// map.go tests map operations.
+package main
+
+import "testing"
+
+//go:noinline
+func lenMap_ssa(v map[int]int) int {
+ return len(v)
+}
+
+func testLenMap(t *testing.T) {
+
+ v := make(map[int]int)
+ v[0] = 0
+ v[1] = 0
+ v[2] = 0
+
+ if want, got := 3, lenMap_ssa(v); got != want {
+ t.Errorf("expected len(map) = %d, got %d", want, got)
+ }
+}
+
+func testLenNilMap(t *testing.T) {
+
+ var v map[int]int
+ if want, got := 0, lenMap_ssa(v); got != want {
+ t.Errorf("expected len(nil) = %d, got %d", want, got)
+ }
+}
+func TestMap(t *testing.T) {
+ testLenMap(t)
+ testLenNilMap(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/mysort/mysort.go b/src/cmd/compile/internal/test/testdata/mysort/mysort.go
new file mode 100644
index 0000000..14852c8
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/mysort/mysort.go
@@ -0,0 +1,40 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Generic sort function, tested with two different pointer types.
+
+package mysort
+
+import (
+ "fmt"
+)
+
+type LessConstraint[T any] interface {
+ Less(T) bool
+}
+
+//go:noinline
+func Sort[T LessConstraint[T]](x []T) {
+ n := len(x)
+ for i := 1; i < n; i++ {
+ for j := i; j > 0 && x[j].Less(x[j-1]); j-- {
+ x[j], x[j-1] = x[j-1], x[j]
+ }
+ }
+}
+
+type MyInt struct {
+ Value int
+}
+
+func (a *MyInt) Less(b *MyInt) bool {
+ return a.Value < b.Value
+}
+
+//go:noinline
+func F() {
+ sl1 := []*MyInt{&MyInt{4}, &MyInt{3}, &MyInt{8}, &MyInt{7}}
+ Sort(sl1)
+ fmt.Printf("%v %v %v %v\n", sl1[0], sl1[1], sl1[2], sl1[3])
+}
diff --git a/src/cmd/compile/internal/test/testdata/namedReturn_test.go b/src/cmd/compile/internal/test/testdata/namedReturn_test.go
new file mode 100644
index 0000000..b07e225
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/namedReturn_test.go
@@ -0,0 +1,93 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This test makes sure that naming named
+// return variables in a return statement works.
+// See issue #14904.
+
+package main
+
+import (
+ "runtime"
+ "testing"
+)
+
+// Our heap-allocated object that will be GC'd incorrectly.
+// Note that we always check the second word because that's
+// where 0xdeaddeaddeaddead is written.
+type B [4]int
+
+// small (SSAable) array
+type A1 [3]*B
+
+//go:noinline
+func f1() (t A1) {
+ t[0] = &B{91, 92, 93, 94}
+ runtime.GC()
+ return t
+}
+
+// large (non-SSAable) array
+type A2 [8]*B
+
+//go:noinline
+func f2() (t A2) {
+ t[0] = &B{91, 92, 93, 94}
+ runtime.GC()
+ return t
+}
+
+// small (SSAable) struct
+type A3 struct {
+ a, b, c *B
+}
+
+//go:noinline
+func f3() (t A3) {
+ t.a = &B{91, 92, 93, 94}
+ runtime.GC()
+ return t
+}
+
+// large (non-SSAable) struct
+type A4 struct {
+ a, b, c, d, e, f *B
+}
+
+//go:noinline
+func f4() (t A4) {
+ t.a = &B{91, 92, 93, 94}
+ runtime.GC()
+ return t
+}
+
+var sink *B
+
+func f5() int {
+ b := &B{91, 92, 93, 94}
+ t := A4{b, nil, nil, nil, nil, nil}
+ sink = b // make sure b is heap allocated ...
+ sink = nil // ... but not live
+ runtime.GC()
+ t = t
+ return t.a[1]
+}
+
+func TestNamedReturn(t *testing.T) {
+ if v := f1()[0][1]; v != 92 {
+ t.Errorf("f1()[0][1]=%d, want 92\n", v)
+ }
+ if v := f2()[0][1]; v != 92 {
+ t.Errorf("f2()[0][1]=%d, want 92\n", v)
+ }
+ if v := f3().a[1]; v != 92 {
+ t.Errorf("f3().a[1]=%d, want 92\n", v)
+ }
+ if v := f4().a[1]; v != 92 {
+ t.Errorf("f4().a[1]=%d, want 92\n", v)
+ }
+ if v := f5(); v != 92 {
+ t.Errorf("f5()=%d, want 92\n", v)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.go b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.go
new file mode 100644
index 0000000..ac238f6
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.go
@@ -0,0 +1,252 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// WARNING: Please avoid updating this file. If this file needs to be updated,
+// then a new devirt.pprof file should be generated:
+//
+// $ cd $GOROOT/src/cmd/compile/internal/test/testdata/pgo/devirtualize/
+// $ go mod init example.com/pgo/devirtualize
+// $ go test -bench=. -cpuprofile ./devirt.pprof
+
+package devirt
+
+// Devirtualization of callees from transitive dependencies should work even if
+// they aren't directly referenced in the package. See #61577.
+//
+// Dots in the last package path component are escaped in symbol names. Use one
+// to ensure the escaping doesn't break lookup.
+import (
+ "fmt"
+
+ "example.com/pgo/devirtualize/mult.pkg"
+)
+
+var sink int
+
+type Adder interface {
+ Add(a, b int) int
+}
+
+type Add struct{}
+
+func (Add) Add(a, b int) int {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return a + b
+}
+
+type Sub struct{}
+
+func (Sub) Add(a, b int) int {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return a - b
+}
+
+// ExerciseIface calls mostly a1 and m1.
+//
+//go:noinline
+func ExerciseIface(iter int, a1, a2 Adder, m1, m2 mult.Multiplier) int {
+ // The call below must evaluate selectA() to determine the receiver to
+ // use. This should happen exactly once per iteration. Assert that is
+ // the case to ensure the IR manipulation does not result in over- or
+ // under-evaluation.
+ selectI := 0
+ selectA := func(gotI int) Adder {
+ if gotI != selectI {
+ panic(fmt.Sprintf("selectA not called once per iteration; got i %d want %d", gotI, selectI))
+ }
+ selectI++
+
+ if gotI%10 == 0 {
+ return a2
+ }
+ return a1
+ }
+ oneI := 0
+ one := func(gotI int) int {
+ if gotI != oneI {
+ panic(fmt.Sprintf("one not called once per iteration; got i %d want %d", gotI, oneI))
+ }
+ oneI++
+
+ // The function value must be evaluated before arguments, so
+ // selectI must have been incremented already.
+ if selectI != oneI {
+ panic(fmt.Sprintf("selectA not called before not called before one; got i %d want %d", selectI, oneI))
+ }
+
+ return 1
+ }
+
+ val := 0
+ for i := 0; i < iter; i++ {
+ m := m1
+ if i%10 == 0 {
+ m = m2
+ }
+
+ // N.B. Profiles only distinguish calls on a per-line level,
+ // making the two calls ambiguous. However because the
+ // interfaces and implementations are mutually exclusive,
+ // devirtualization can still select the correct callee for
+ // each.
+ //
+ // If they were not mutually exclusive (for example, two Add
+ // calls), then we could not definitively select the correct
+ // callee.
+ val += m.Multiply(42, selectA(i).Add(one(i), 2))
+ }
+ return val
+}
+
+type AddFunc func(int, int) int
+
+func AddFn(a, b int) int {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return a + b
+}
+
+func SubFn(a, b int) int {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return a - b
+}
+
+// ExerciseFuncConcrete calls mostly a1 and m1.
+//
+//go:noinline
+func ExerciseFuncConcrete(iter int, a1, a2 AddFunc, m1, m2 mult.MultFunc) int {
+ // The call below must evaluate selectA() to determine the function to
+ // call. This should happen exactly once per iteration. Assert that is
+ // the case to ensure the IR manipulation does not result in over- or
+ // under-evaluation.
+ selectI := 0
+ selectA := func(gotI int) AddFunc {
+ if gotI != selectI {
+ panic(fmt.Sprintf("selectA not called once per iteration; got i %d want %d", gotI, selectI))
+ }
+ selectI++
+
+ if gotI%10 == 0 {
+ return a2
+ }
+ return a1
+ }
+ oneI := 0
+ one := func(gotI int) int {
+ if gotI != oneI {
+ panic(fmt.Sprintf("one not called once per iteration; got i %d want %d", gotI, oneI))
+ }
+ oneI++
+
+ // The function value must be evaluated before arguments, so
+ // selectI must have been incremented already.
+ if selectI != oneI {
+ panic(fmt.Sprintf("selectA not called before not called before one; got i %d want %d", selectI, oneI))
+ }
+
+ return 1
+ }
+
+ val := 0
+ for i := 0; i < iter; i++ {
+ m := m1
+ if i%10 == 0 {
+ m = m2
+ }
+
+ // N.B. Profiles only distinguish calls on a per-line level,
+ // making the two calls ambiguous. However because the
+ // function types are mutually exclusive, devirtualization can
+ // still select the correct callee for each.
+ //
+ // If they were not mutually exclusive (for example, two
+ // AddFunc calls), then we could not definitively select the
+ // correct callee.
+ val += int(m(42, int64(selectA(i)(one(i), 2))))
+ }
+ return val
+}
+
+// ExerciseFuncField calls mostly a1 and m1.
+//
+// This is a simplified version of ExerciseFuncConcrete, but accessing the
+// function values via a struct field.
+//
+//go:noinline
+func ExerciseFuncField(iter int, a1, a2 AddFunc, m1, m2 mult.MultFunc) int {
+ ops := struct {
+ a AddFunc
+ m mult.MultFunc
+ }{}
+
+ val := 0
+ for i := 0; i < iter; i++ {
+ ops.a = a1
+ ops.m = m1
+ if i%10 == 0 {
+ ops.a = a2
+ ops.m = m2
+ }
+
+ // N.B. Profiles only distinguish calls on a per-line level,
+ // making the two calls ambiguous. However because the
+ // function types are mutually exclusive, devirtualization can
+ // still select the correct callee for each.
+ //
+ // If they were not mutually exclusive (for example, two
+ // AddFunc calls), then we could not definitively select the
+ // correct callee.
+ val += int(ops.m(42, int64(ops.a(1, 2))))
+ }
+ return val
+}
+
+//go:noinline
+func AddClosure() AddFunc {
+ // Implicit closure by capturing the receiver.
+ var a Add
+ return a.Add
+}
+
+//go:noinline
+func SubClosure() AddFunc {
+ var s Sub
+ return s.Add
+}
+
+// ExerciseFuncClosure calls mostly a1 and m1.
+//
+// This is a simplified version of ExerciseFuncConcrete, but we need two
+// distinct call sites to test two different types of function values.
+//
+//go:noinline
+func ExerciseFuncClosure(iter int, a1, a2 AddFunc, m1, m2 mult.MultFunc) int {
+ val := 0
+ for i := 0; i < iter; i++ {
+ a := a1
+ m := m1
+ if i%10 == 0 {
+ a = a2
+ m = m2
+ }
+
+ // N.B. Profiles only distinguish calls on a per-line level,
+ // making the two calls ambiguous. However because the
+ // function types are mutually exclusive, devirtualization can
+ // still select the correct callee for each.
+ //
+ // If they were not mutually exclusive (for example, two
+ // AddFunc calls), then we could not definitively select the
+ // correct callee.
+ val += int(m(42, int64(a(1, 2))))
+ }
+ return val
+}
diff --git a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof
new file mode 100644
index 0000000..2a27f1b
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof
Binary files differ
diff --git a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go
new file mode 100644
index 0000000..59b565d
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go
@@ -0,0 +1,73 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// WARNING: Please avoid updating this file. If this file needs to be updated,
+// then a new devirt.pprof file should be generated:
+//
+// $ cd $GOROOT/src/cmd/compile/internal/test/testdata/pgo/devirtualize/
+// $ go mod init example.com/pgo/devirtualize
+// $ go test -bench=. -cpuprofile ./devirt.pprof
+
+package devirt
+
+import (
+ "testing"
+
+ "example.com/pgo/devirtualize/mult.pkg"
+)
+
+func BenchmarkDevirtIface(b *testing.B) {
+ var (
+ a1 Add
+ a2 Sub
+ m1 mult.Mult
+ m2 mult.NegMult
+ )
+
+ ExerciseIface(b.N, a1, a2, m1, m2)
+}
+
+// Verify that devirtualization doesn't result in calls or side effects applying more than once.
+func TestDevirtIface(t *testing.T) {
+ var (
+ a1 Add
+ a2 Sub
+ m1 mult.Mult
+ m2 mult.NegMult
+ )
+
+ if v := ExerciseIface(10, a1, a2, m1, m2); v != 1176 {
+ t.Errorf("ExerciseIface(10) got %d want 1176", v)
+ }
+}
+
+func BenchmarkDevirtFuncConcrete(b *testing.B) {
+ ExerciseFuncConcrete(b.N, AddFn, SubFn, mult.MultFn, mult.NegMultFn)
+}
+
+func TestDevirtFuncConcrete(t *testing.T) {
+ if v := ExerciseFuncConcrete(10, AddFn, SubFn, mult.MultFn, mult.NegMultFn); v != 1176 {
+ t.Errorf("ExerciseFuncConcrete(10) got %d want 1176", v)
+ }
+}
+
+func BenchmarkDevirtFuncField(b *testing.B) {
+ ExerciseFuncField(b.N, AddFn, SubFn, mult.MultFn, mult.NegMultFn)
+}
+
+func TestDevirtFuncField(t *testing.T) {
+ if v := ExerciseFuncField(10, AddFn, SubFn, mult.MultFn, mult.NegMultFn); v != 1176 {
+ t.Errorf("ExerciseFuncField(10) got %d want 1176", v)
+ }
+}
+
+func BenchmarkDevirtFuncClosure(b *testing.B) {
+ ExerciseFuncClosure(b.N, AddClosure(), SubClosure(), mult.MultClosure(), mult.NegMultClosure())
+}
+
+func TestDevirtFuncClosure(t *testing.T) {
+ if v := ExerciseFuncClosure(10, AddClosure(), SubClosure(), mult.MultClosure(), mult.NegMultClosure()); v != 1176 {
+ t.Errorf("ExerciseFuncClosure(10) got %d want 1176", v)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult.pkg/mult.go b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult.pkg/mult.go
new file mode 100644
index 0000000..113a5e1
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult.pkg/mult.go
@@ -0,0 +1,72 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// WARNING: Please avoid updating this file.
+// See the warning in ../devirt.go for more details.
+
+package mult
+
+var sink int
+
+type Multiplier interface {
+ Multiply(a, b int) int
+}
+
+type Mult struct{}
+
+func (Mult) Multiply(a, b int) int {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return a * b
+}
+
+type NegMult struct{}
+
+func (NegMult) Multiply(a, b int) int {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return -1 * a * b
+}
+
+// N.B. Different types than AddFunc to test intra-line disambiguation.
+type MultFunc func(int64, int64) int64
+
+func MultFn(a, b int64) int64 {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return a * b
+}
+
+func NegMultFn(a, b int64) int64 {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return -1 * a * b
+}
+
+//go:noinline
+func MultClosure() MultFunc {
+ // Explicit closure to differentiate from AddClosure.
+ c := 1
+ return func(a, b int64) int64 {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return a * b * int64(c)
+ }
+}
+
+//go:noinline
+func NegMultClosure() MultFunc {
+ c := 1
+ return func(a, b int64) int64 {
+ for i := 0; i < 1000; i++ {
+ sink++
+ }
+ return -1 * a * b * int64(c)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.go b/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.go
new file mode 100644
index 0000000..9a462fd
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.go
@@ -0,0 +1,90 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// WARNING: Please avoid updating this file. If this file needs to be updated,
+// then a new inline_hot.pprof file should be generated:
+//
+// $ cd $GOROOT/src/cmd/compile/internal/test/testdata/pgo/inline/
+// $ go test -bench=. -cpuprofile ./inline_hot.pprof
+package main
+
+import (
+ "time"
+)
+
+type BS struct {
+ length uint
+ s []uint64
+}
+
+const wSize = uint(64)
+const lWSize = uint(6)
+
+func D(i uint) int {
+ return int((i + (wSize - 1)) >> lWSize)
+}
+
+func N(length uint) (bs *BS) {
+ bs = &BS{
+ length,
+ make([]uint64, D(length)),
+ }
+
+ return bs
+}
+
+func (b *BS) S(i uint) *BS {
+ b.s[i>>lWSize] |= 1 << (i & (wSize - 1))
+ return b
+}
+
+var jn = [...]byte{
+ 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
+ 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
+ 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
+ 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
+}
+
+func T(v uint64) uint {
+ return uint(jn[((v&-v)*0x03f79d71b4ca8b09)>>58])
+}
+
+func (b *BS) NS(i uint) (uint, bool) {
+ x := int(i >> lWSize)
+ if x >= len(b.s) {
+ return 0, false
+ }
+ w := b.s[x]
+ w = w >> (i & (wSize - 1))
+ if w != 0 {
+ return i + T(w), true
+ }
+ x = x + 1
+ for x < len(b.s) {
+ if b.s[x] != 0 {
+ return uint(x)*wSize + T(b.s[x]), true
+ }
+ x = x + 1
+
+ }
+ return 0, false
+}
+
+func A() {
+ s := N(100000)
+ for i := 0; i < 1000; i += 30 {
+ s.S(uint(i))
+ }
+ for j := 0; j < 1000; j++ {
+ c := uint(0)
+ for i, e := s.NS(0); e; i, e = s.NS(i + 1) {
+ c++
+ }
+ }
+}
+
+func main() {
+ time.Sleep(time.Second)
+ A()
+}
diff --git a/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.pprof b/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.pprof
new file mode 100644
index 0000000..1b55ed1
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.pprof
Binary files differ
diff --git a/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot_test.go b/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot_test.go
new file mode 100644
index 0000000..2725c57
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot_test.go
@@ -0,0 +1,51 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// WARNING: Please avoid updating this file. If this file needs to be updated,
+// then a new inline_hot.pprof file should be generated:
+//
+// $ cd $GOROOT/src/cmd/compile/internal/test/testdata/pgo/inline/
+// $ go test -bench=. -cpuprofile ./inline_hot.pprof
+package main
+
+import "testing"
+
+func BenchmarkA(b *testing.B) {
+ benchmarkB(b)
+}
+func benchmarkB(b *testing.B) {
+
+ for i := 0; true; {
+ A()
+ i = i + 1
+ if i >= b.N {
+ break
+ }
+ A()
+ i = i + 1
+ if i >= b.N {
+ break
+ }
+ A()
+ i = i + 1
+ if i >= b.N {
+ break
+ }
+ A()
+ i = i + 1
+ if i >= b.N {
+ break
+ }
+ A()
+ i = i + 1
+ if i >= b.N {
+ break
+ }
+ A()
+ i = i + 1
+ if i >= b.N {
+ break
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/phi_test.go b/src/cmd/compile/internal/test/testdata/phi_test.go
new file mode 100644
index 0000000..c8a73ff
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/phi_test.go
@@ -0,0 +1,99 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// Test to make sure spills of cast-shortened values
+// don't end up spilling the pre-shortened size instead
+// of the post-shortened size.
+
+import (
+ "runtime"
+ "testing"
+)
+
+var data1 [26]int32
+var data2 [26]int64
+
+func init() {
+ for i := 0; i < 26; i++ {
+ // If we spill all 8 bytes of this datum, the 1 in the high-order 4 bytes
+ // will overwrite some other variable in the stack frame.
+ data2[i] = 0x100000000
+ }
+}
+
+func foo() int32 {
+ var a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z int32
+ if always {
+ a = data1[0]
+ b = data1[1]
+ c = data1[2]
+ d = data1[3]
+ e = data1[4]
+ f = data1[5]
+ g = data1[6]
+ h = data1[7]
+ i = data1[8]
+ j = data1[9]
+ k = data1[10]
+ l = data1[11]
+ m = data1[12]
+ n = data1[13]
+ o = data1[14]
+ p = data1[15]
+ q = data1[16]
+ r = data1[17]
+ s = data1[18]
+ t = data1[19]
+ u = data1[20]
+ v = data1[21]
+ w = data1[22]
+ x = data1[23]
+ y = data1[24]
+ z = data1[25]
+ } else {
+ a = int32(data2[0])
+ b = int32(data2[1])
+ c = int32(data2[2])
+ d = int32(data2[3])
+ e = int32(data2[4])
+ f = int32(data2[5])
+ g = int32(data2[6])
+ h = int32(data2[7])
+ i = int32(data2[8])
+ j = int32(data2[9])
+ k = int32(data2[10])
+ l = int32(data2[11])
+ m = int32(data2[12])
+ n = int32(data2[13])
+ o = int32(data2[14])
+ p = int32(data2[15])
+ q = int32(data2[16])
+ r = int32(data2[17])
+ s = int32(data2[18])
+ t = int32(data2[19])
+ u = int32(data2[20])
+ v = int32(data2[21])
+ w = int32(data2[22])
+ x = int32(data2[23])
+ y = int32(data2[24])
+ z = int32(data2[25])
+ }
+ // Lots of phis of the form phi(int32,int64) of type int32 happen here.
+ // Some will be stack phis. For those stack phis, make sure the spill
+ // of the second argument uses the phi's width (4 bytes), not its width
+ // (8 bytes). Otherwise, a random stack slot gets clobbered.
+
+ runtime.Gosched()
+ return a + b + c + d + e + f + g + h + i + j + k + l + m + n + o + p + q + r + s + t + u + v + w + x + y + z
+}
+
+func TestPhi(t *testing.T) {
+ want := int32(0)
+ got := foo()
+ if got != want {
+ t.Fatalf("want %d, got %d\n", want, got)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/ptrsort.go b/src/cmd/compile/internal/test/testdata/ptrsort.go
new file mode 100644
index 0000000..d26ba58
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/ptrsort.go
@@ -0,0 +1,30 @@
+package main
+
+// Test generic sort function with two different pointer types in different packages,
+// make sure only one instantiation is created.
+
+import (
+ "fmt"
+
+ "cmd/compile/internal/test/testdata/mysort"
+)
+
+type MyString struct {
+ string
+}
+
+func (a *MyString) Less(b *MyString) bool {
+ return a.string < b.string
+}
+
+func main() {
+ mysort.F()
+
+ sl1 := []*mysort.MyInt{{7}, {1}, {4}, {6}}
+ mysort.Sort(sl1)
+ fmt.Printf("%v %v %v %v\n", sl1[0], sl1[1], sl1[2], sl1[3])
+
+ sl2 := []*MyString{{"when"}, {"in"}, {"the"}, {"course"}, {"of"}}
+ mysort.Sort(sl2)
+ fmt.Printf("%v %v %v %v %v\n", sl2[0], sl2[1], sl2[2], sl2[3], sl2[4])
+}
diff --git a/src/cmd/compile/internal/test/testdata/ptrsort.out b/src/cmd/compile/internal/test/testdata/ptrsort.out
new file mode 100644
index 0000000..41f1621
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/ptrsort.out
@@ -0,0 +1,3 @@
+&{3} &{4} &{7} &{8}
+&{1} &{4} &{6} &{7}
+&{course} &{in} &{of} &{the} &{when}
diff --git a/src/cmd/compile/internal/test/testdata/regalloc_test.go b/src/cmd/compile/internal/test/testdata/regalloc_test.go
new file mode 100644
index 0000000..577f8e7
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/regalloc_test.go
@@ -0,0 +1,50 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests phi implementation
+
+package main
+
+import "testing"
+
+func phiOverwrite_ssa() int {
+ var n int
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break
+ }
+ n = i
+ }
+ return n
+}
+
+func phiOverwrite(t *testing.T) {
+ want := 5
+ got := phiOverwrite_ssa()
+ if got != want {
+ t.Errorf("phiOverwrite_ssa()= %d, got %d", want, got)
+ }
+}
+
+func phiOverwriteBig_ssa() int {
+ var a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z int
+ a = 1
+ for idx := 0; idx < 26; idx++ {
+ a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z = b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, a
+ }
+ return a*1 + b*2 + c*3 + d*4 + e*5 + f*6 + g*7 + h*8 + i*9 + j*10 + k*11 + l*12 + m*13 + n*14 + o*15 + p*16 + q*17 + r*18 + s*19 + t*20 + u*21 + v*22 + w*23 + x*24 + y*25 + z*26
+}
+
+func phiOverwriteBig(t *testing.T) {
+ want := 1
+ got := phiOverwriteBig_ssa()
+ if got != want {
+ t.Errorf("phiOverwriteBig_ssa()= %d, got %d", want, got)
+ }
+}
+
+func TestRegalloc(t *testing.T) {
+ phiOverwrite(t)
+ phiOverwriteBig(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/reproducible/issue20272.go b/src/cmd/compile/internal/test/testdata/reproducible/issue20272.go
new file mode 100644
index 0000000..3db0b8a
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/reproducible/issue20272.go
@@ -0,0 +1,34 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var (
+ i0 uint8
+ b0 byte
+
+ i1 *uint8
+ b1 *byte
+
+ i2 **uint8
+ b2 **byte
+
+ i3 ***uint8
+ b3 ***byte
+
+ i4 ****uint8
+ b4 ****byte
+
+ i5 *****uint8
+ b5 *****byte
+
+ i6 ******uint8
+ b6 ******byte
+
+ i7 *******uint8
+ b7 *******byte
+
+ i8 ********uint8
+ b8 ********byte
+)
diff --git a/src/cmd/compile/internal/test/testdata/reproducible/issue27013.go b/src/cmd/compile/internal/test/testdata/reproducible/issue27013.go
new file mode 100644
index 0000000..817f4a6
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/reproducible/issue27013.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func A(arg interface{}) {
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+}
diff --git a/src/cmd/compile/internal/test/testdata/reproducible/issue30202.go b/src/cmd/compile/internal/test/testdata/reproducible/issue30202.go
new file mode 100644
index 0000000..7b5de2c
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/reproducible/issue30202.go
@@ -0,0 +1,17 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func A(x interface {
+ X() int
+}) int {
+ return x.X()
+}
+
+func B(x interface {
+ X() int
+}) int {
+ return x.X()
+}
diff --git a/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go b/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go
new file mode 100644
index 0000000..b87daed
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go
@@ -0,0 +1,70 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue38068
+
+// A type with a couple of inlinable, non-pointer-receiver methods
+// that have params and local variables.
+type A struct {
+ s string
+ next *A
+ prev *A
+}
+
+// Inlinable, value-received method with locals and parms.
+func (a A) double(x string, y int) string {
+ if y == 191 {
+ a.s = ""
+ }
+ q := a.s + "a"
+ r := a.s + "b"
+ return q + r
+}
+
+// Inlinable, value-received method with locals and parms.
+func (a A) triple(x string, y int) string {
+ q := a.s
+ if y == 998877 {
+ a.s = x
+ }
+ r := a.s + a.s
+ return q + r
+}
+
+type methods struct {
+ m1 func(a *A, x string, y int) string
+ m2 func(a *A, x string, y int) string
+}
+
+// Now a function that makes references to the methods via pointers,
+// which should trigger the wrapper generation.
+func P(a *A, ms *methods) {
+ if a != nil {
+ defer func() { println("done") }()
+ }
+ println(ms.m1(a, "a", 2))
+ println(ms.m2(a, "b", 3))
+}
+
+func G(x *A, n int) {
+ if n <= 0 {
+ println(n)
+ return
+ }
+ // Address-taken local of type A, which will insure that the
+ // compiler's writeType() routine will create a method wrapper.
+ var a, b A
+ a.next = x
+ a.prev = &b
+ x = &a
+ G(x, n-2)
+}
+
+var M methods
+
+func F() {
+ M.m1 = (*A).double
+ M.m2 = (*A).triple
+ G(nil, 100)
+}
diff --git a/src/cmd/compile/internal/test/testdata/short_test.go b/src/cmd/compile/internal/test/testdata/short_test.go
new file mode 100644
index 0000000..7a743b5
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/short_test.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests short circuiting.
+
+package main
+
+import "testing"
+
+func and_ssa(arg1, arg2 bool) bool {
+ return arg1 && rightCall(arg2)
+}
+
+func or_ssa(arg1, arg2 bool) bool {
+ return arg1 || rightCall(arg2)
+}
+
+var rightCalled bool
+
+//go:noinline
+func rightCall(v bool) bool {
+ rightCalled = true
+ return v
+ panic("unreached")
+}
+
+func testAnd(t *testing.T, arg1, arg2, wantRes bool) {
+ testShortCircuit(t, "AND", arg1, arg2, and_ssa, arg1, wantRes)
+}
+func testOr(t *testing.T, arg1, arg2, wantRes bool) {
+ testShortCircuit(t, "OR", arg1, arg2, or_ssa, !arg1, wantRes)
+}
+
+func testShortCircuit(t *testing.T, opName string, arg1, arg2 bool, fn func(bool, bool) bool, wantRightCall, wantRes bool) {
+ rightCalled = false
+ got := fn(arg1, arg2)
+ if rightCalled != wantRightCall {
+ t.Errorf("failed for %t %s %t; rightCalled=%t want=%t", arg1, opName, arg2, rightCalled, wantRightCall)
+ }
+ if wantRes != got {
+ t.Errorf("failed for %t %s %t; res=%t want=%t", arg1, opName, arg2, got, wantRes)
+ }
+}
+
+// TestShortCircuit tests OANDAND and OOROR expressions and short circuiting.
+func TestShortCircuit(t *testing.T) {
+ testAnd(t, false, false, false)
+ testAnd(t, false, true, false)
+ testAnd(t, true, false, false)
+ testAnd(t, true, true, true)
+
+ testOr(t, false, false, false)
+ testOr(t, false, true, true)
+ testOr(t, true, false, true)
+ testOr(t, true, true, true)
+}
diff --git a/src/cmd/compile/internal/test/testdata/slice_test.go b/src/cmd/compile/internal/test/testdata/slice_test.go
new file mode 100644
index 0000000..c134578
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/slice_test.go
@@ -0,0 +1,46 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This test makes sure that t.s = t.s[0:x] doesn't write
+// either the slice pointer or the capacity.
+// See issue #14855.
+
+package main
+
+import "testing"
+
+const N = 1000000
+
+type X struct {
+ s []int
+}
+
+func TestSlice(t *testing.T) {
+ done := make(chan struct{})
+ a := make([]int, N+10)
+
+ x := &X{a}
+
+ go func() {
+ for i := 0; i < N; i++ {
+ x.s = x.s[1:9]
+ }
+ done <- struct{}{}
+ }()
+ go func() {
+ for i := 0; i < N; i++ {
+ x.s = x.s[0:8] // should only write len
+ }
+ done <- struct{}{}
+ }()
+ <-done
+ <-done
+
+ if cap(x.s) != cap(a)-N {
+ t.Errorf("wanted cap=%d, got %d\n", cap(a)-N, cap(x.s))
+ }
+ if &x.s[0] != &a[N] {
+ t.Errorf("wanted ptr=%p, got %p\n", &a[N], &x.s[0])
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/sqrtConst_test.go b/src/cmd/compile/internal/test/testdata/sqrtConst_test.go
new file mode 100644
index 0000000..5b7a149
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/sqrtConst_test.go
@@ -0,0 +1,50 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "math"
+ "testing"
+)
+
+var tests = [...]struct {
+ name string
+ in float64 // used for error messages, not an input
+ got float64
+ want float64
+}{
+ {"sqrt0", 0, math.Sqrt(0), 0},
+ {"sqrt1", 1, math.Sqrt(1), 1},
+ {"sqrt2", 2, math.Sqrt(2), math.Sqrt2},
+ {"sqrt4", 4, math.Sqrt(4), 2},
+ {"sqrt100", 100, math.Sqrt(100), 10},
+ {"sqrt101", 101, math.Sqrt(101), 10.04987562112089},
+}
+
+var nanTests = [...]struct {
+ name string
+ in float64 // used for error messages, not an input
+ got float64
+}{
+ {"sqrtNaN", math.NaN(), math.Sqrt(math.NaN())},
+ {"sqrtNegative", -1, math.Sqrt(-1)},
+ {"sqrtNegInf", math.Inf(-1), math.Sqrt(math.Inf(-1))},
+}
+
+func TestSqrtConst(t *testing.T) {
+ for _, test := range tests {
+ if test.got != test.want {
+ t.Errorf("%s: math.Sqrt(%f): got %f, want %f\n", test.name, test.in, test.got, test.want)
+ }
+ }
+ for _, test := range nanTests {
+ if math.IsNaN(test.got) != true {
+ t.Errorf("%s: math.Sqrt(%f): got %f, want NaN\n", test.name, test.in, test.got)
+ }
+ }
+ if got := math.Sqrt(math.Inf(1)); !math.IsInf(got, 1) {
+ t.Errorf("math.Sqrt(+Inf), got %f, want +Inf\n", got)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/string_test.go b/src/cmd/compile/internal/test/testdata/string_test.go
new file mode 100644
index 0000000..5d086f0
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/string_test.go
@@ -0,0 +1,207 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// string_ssa.go tests string operations.
+package main
+
+import "testing"
+
+//go:noinline
+func testStringSlice1_ssa(a string, i, j int) string {
+ return a[i:]
+}
+
+//go:noinline
+func testStringSlice2_ssa(a string, i, j int) string {
+ return a[:j]
+}
+
+//go:noinline
+func testStringSlice12_ssa(a string, i, j int) string {
+ return a[i:j]
+}
+
+func testStringSlice(t *testing.T) {
+ tests := [...]struct {
+ fn func(string, int, int) string
+ s string
+ low, high int
+ want string
+ }{
+ // -1 means the value is not used.
+ {testStringSlice1_ssa, "foobar", 0, -1, "foobar"},
+ {testStringSlice1_ssa, "foobar", 3, -1, "bar"},
+ {testStringSlice1_ssa, "foobar", 6, -1, ""},
+ {testStringSlice2_ssa, "foobar", -1, 0, ""},
+ {testStringSlice2_ssa, "foobar", -1, 3, "foo"},
+ {testStringSlice2_ssa, "foobar", -1, 6, "foobar"},
+ {testStringSlice12_ssa, "foobar", 0, 6, "foobar"},
+ {testStringSlice12_ssa, "foobar", 0, 0, ""},
+ {testStringSlice12_ssa, "foobar", 6, 6, ""},
+ {testStringSlice12_ssa, "foobar", 1, 5, "ooba"},
+ {testStringSlice12_ssa, "foobar", 3, 3, ""},
+ {testStringSlice12_ssa, "", 0, 0, ""},
+ }
+
+ for i, test := range tests {
+ if got := test.fn(test.s, test.low, test.high); test.want != got {
+ t.Errorf("#%d %s[%d,%d] = %s, want %s", i, test.s, test.low, test.high, got, test.want)
+ }
+ }
+}
+
+type prefix struct {
+ prefix string
+}
+
+func (p *prefix) slice_ssa() {
+ p.prefix = p.prefix[:3]
+}
+
+//go:noinline
+func testStructSlice(t *testing.T) {
+ p := &prefix{"prefix"}
+ p.slice_ssa()
+ if "pre" != p.prefix {
+ t.Errorf("wrong field slice: wanted %s got %s", "pre", p.prefix)
+ }
+}
+
+func testStringSlicePanic(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ //println("panicked as expected")
+ }
+ }()
+
+ str := "foobar"
+ t.Errorf("got %s and expected to panic, but didn't", testStringSlice12_ssa(str, 3, 9))
+}
+
+const _Accuracy_name = "BelowExactAbove"
+
+var _Accuracy_index = [...]uint8{0, 5, 10, 15}
+
+//go:noinline
+func testSmallIndexType_ssa(i int) string {
+ return _Accuracy_name[_Accuracy_index[i]:_Accuracy_index[i+1]]
+}
+
+func testSmallIndexType(t *testing.T) {
+ tests := []struct {
+ i int
+ want string
+ }{
+ {0, "Below"},
+ {1, "Exact"},
+ {2, "Above"},
+ }
+
+ for i, test := range tests {
+ if got := testSmallIndexType_ssa(test.i); got != test.want {
+ t.Errorf("#%d got %s wanted %s", i, got, test.want)
+ }
+ }
+}
+
+//go:noinline
+func testInt64Index_ssa(s string, i int64) byte {
+ return s[i]
+}
+
+//go:noinline
+func testInt64Slice_ssa(s string, i, j int64) string {
+ return s[i:j]
+}
+
+func testInt64Index(t *testing.T) {
+ tests := []struct {
+ i int64
+ j int64
+ b byte
+ s string
+ }{
+ {0, 5, 'B', "Below"},
+ {5, 10, 'E', "Exact"},
+ {10, 15, 'A', "Above"},
+ }
+
+ str := "BelowExactAbove"
+ for i, test := range tests {
+ if got := testInt64Index_ssa(str, test.i); got != test.b {
+ t.Errorf("#%d got %d wanted %d", i, got, test.b)
+ }
+ if got := testInt64Slice_ssa(str, test.i, test.j); got != test.s {
+ t.Errorf("#%d got %s wanted %s", i, got, test.s)
+ }
+ }
+}
+
+func testInt64IndexPanic(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ //println("panicked as expected")
+ }
+ }()
+
+ str := "foobar"
+ t.Errorf("got %d and expected to panic, but didn't", testInt64Index_ssa(str, 1<<32+1))
+}
+
+func testInt64SlicePanic(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ //println("panicked as expected")
+ }
+ }()
+
+ str := "foobar"
+ t.Errorf("got %s and expected to panic, but didn't", testInt64Slice_ssa(str, 1<<32, 1<<32+1))
+}
+
+//go:noinline
+func testStringElem_ssa(s string, i int) byte {
+ return s[i]
+}
+
+func testStringElem(t *testing.T) {
+ tests := []struct {
+ s string
+ i int
+ n byte
+ }{
+ {"foobar", 3, 98},
+ {"foobar", 0, 102},
+ {"foobar", 5, 114},
+ }
+ for _, test := range tests {
+ if got := testStringElem_ssa(test.s, test.i); got != test.n {
+ t.Errorf("testStringElem \"%s\"[%d] = %d, wanted %d", test.s, test.i, got, test.n)
+ }
+ }
+}
+
+//go:noinline
+func testStringElemConst_ssa(i int) byte {
+ s := "foobar"
+ return s[i]
+}
+
+func testStringElemConst(t *testing.T) {
+ if got := testStringElemConst_ssa(3); got != 98 {
+ t.Errorf("testStringElemConst= %d, wanted 98", got)
+ }
+}
+
+func TestString(t *testing.T) {
+ testStringSlice(t)
+ testStringSlicePanic(t)
+ testStructSlice(t)
+ testSmallIndexType(t)
+ testStringElem(t)
+ testStringElemConst(t)
+ testInt64Index(t)
+ testInt64IndexPanic(t)
+ testInt64SlicePanic(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/unsafe_test.go b/src/cmd/compile/internal/test/testdata/unsafe_test.go
new file mode 100644
index 0000000..37599d3
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/unsafe_test.go
@@ -0,0 +1,145 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "runtime"
+ "testing"
+ "unsafe"
+)
+
+// global pointer slot
+var a *[8]uint
+
+// unfoldable true
+var always = true
+
+// Test to make sure that a pointer value which is alive
+// across a call is retained, even when there are matching
+// conversions to/from uintptr around the call.
+// We arrange things very carefully to have to/from
+// conversions on either side of the call which cannot be
+// combined with any other conversions.
+func f_ssa() *[8]uint {
+ // Make x a uintptr pointing to where a points.
+ var x uintptr
+ if always {
+ x = uintptr(unsafe.Pointer(a))
+ } else {
+ x = 0
+ }
+ // Clobber the global pointer. The only live ref
+ // to the allocated object is now x.
+ a = nil
+
+ // Convert to pointer so it should hold
+ // the object live across GC call.
+ p := unsafe.Pointer(x)
+
+ // Call gc.
+ runtime.GC()
+
+ // Convert back to uintptr.
+ y := uintptr(p)
+
+ // Mess with y so that the subsequent cast
+ // to unsafe.Pointer can't be combined with the
+ // uintptr cast above.
+ var z uintptr
+ if always {
+ z = y
+ } else {
+ z = 0
+ }
+ return (*[8]uint)(unsafe.Pointer(z))
+}
+
+// g_ssa is the same as f_ssa, but with a bit of pointer
+// arithmetic for added insanity.
+func g_ssa() *[7]uint {
+ // Make x a uintptr pointing to where a points.
+ var x uintptr
+ if always {
+ x = uintptr(unsafe.Pointer(a))
+ } else {
+ x = 0
+ }
+ // Clobber the global pointer. The only live ref
+ // to the allocated object is now x.
+ a = nil
+
+ // Offset x by one int.
+ x += unsafe.Sizeof(int(0))
+
+ // Convert to pointer so it should hold
+ // the object live across GC call.
+ p := unsafe.Pointer(x)
+
+ // Call gc.
+ runtime.GC()
+
+ // Convert back to uintptr.
+ y := uintptr(p)
+
+ // Mess with y so that the subsequent cast
+ // to unsafe.Pointer can't be combined with the
+ // uintptr cast above.
+ var z uintptr
+ if always {
+ z = y
+ } else {
+ z = 0
+ }
+ return (*[7]uint)(unsafe.Pointer(z))
+}
+
+func testf(t *testing.T) {
+ a = new([8]uint)
+ for i := 0; i < 8; i++ {
+ a[i] = 0xabcd
+ }
+ c := f_ssa()
+ for i := 0; i < 8; i++ {
+ if c[i] != 0xabcd {
+ t.Fatalf("%d:%x\n", i, c[i])
+ }
+ }
+}
+
+func testg(t *testing.T) {
+ a = new([8]uint)
+ for i := 0; i < 8; i++ {
+ a[i] = 0xabcd
+ }
+ c := g_ssa()
+ for i := 0; i < 7; i++ {
+ if c[i] != 0xabcd {
+ t.Fatalf("%d:%x\n", i, c[i])
+ }
+ }
+}
+
+func alias_ssa(ui64 *uint64, ui32 *uint32) uint32 {
+ *ui32 = 0xffffffff
+ *ui64 = 0 // store
+ ret := *ui32 // load from same address, should be zero
+ *ui64 = 0xffffffffffffffff // store
+ return ret
+}
+func testdse(t *testing.T) {
+ x := int64(-1)
+ // construct two pointers that alias one another
+ ui64 := (*uint64)(unsafe.Pointer(&x))
+ ui32 := (*uint32)(unsafe.Pointer(&x))
+ if want, got := uint32(0), alias_ssa(ui64, ui32); got != want {
+ t.Fatalf("alias_ssa: wanted %d, got %d\n", want, got)
+ }
+}
+
+func TestUnsafe(t *testing.T) {
+ testf(t)
+ testg(t)
+ testdse(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/zero_test.go b/src/cmd/compile/internal/test/testdata/zero_test.go
new file mode 100644
index 0000000..64fa25e
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/zero_test.go
@@ -0,0 +1,711 @@
+// Code generated by gen/zeroGen.go. DO NOT EDIT.
+
+package main
+
+import "testing"
+
+type Z1 struct {
+ pre [8]byte
+ mid [1]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero1_ssa(x *[1]byte) {
+ *x = [1]byte{}
+}
+func testZero1(t *testing.T) {
+ a := Z1{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1]byte{255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero1_ssa(&a.mid)
+ want := Z1{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1]byte{0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero1 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z2 struct {
+ pre [8]byte
+ mid [2]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero2_ssa(x *[2]byte) {
+ *x = [2]byte{}
+}
+func testZero2(t *testing.T) {
+ a := Z2{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [2]byte{255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero2_ssa(&a.mid)
+ want := Z2{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [2]byte{0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero2 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z3 struct {
+ pre [8]byte
+ mid [3]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero3_ssa(x *[3]byte) {
+ *x = [3]byte{}
+}
+func testZero3(t *testing.T) {
+ a := Z3{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [3]byte{255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero3_ssa(&a.mid)
+ want := Z3{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [3]byte{0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero3 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z4 struct {
+ pre [8]byte
+ mid [4]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero4_ssa(x *[4]byte) {
+ *x = [4]byte{}
+}
+func testZero4(t *testing.T) {
+ a := Z4{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [4]byte{255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero4_ssa(&a.mid)
+ want := Z4{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [4]byte{0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero4 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z5 struct {
+ pre [8]byte
+ mid [5]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero5_ssa(x *[5]byte) {
+ *x = [5]byte{}
+}
+func testZero5(t *testing.T) {
+ a := Z5{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [5]byte{255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero5_ssa(&a.mid)
+ want := Z5{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [5]byte{0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero5 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z6 struct {
+ pre [8]byte
+ mid [6]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero6_ssa(x *[6]byte) {
+ *x = [6]byte{}
+}
+func testZero6(t *testing.T) {
+ a := Z6{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [6]byte{255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero6_ssa(&a.mid)
+ want := Z6{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [6]byte{0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero6 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z7 struct {
+ pre [8]byte
+ mid [7]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero7_ssa(x *[7]byte) {
+ *x = [7]byte{}
+}
+func testZero7(t *testing.T) {
+ a := Z7{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [7]byte{255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero7_ssa(&a.mid)
+ want := Z7{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [7]byte{0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero7 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z8 struct {
+ pre [8]byte
+ mid [8]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero8_ssa(x *[8]byte) {
+ *x = [8]byte{}
+}
+func testZero8(t *testing.T) {
+ a := Z8{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero8_ssa(&a.mid)
+ want := Z8{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero8 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z9 struct {
+ pre [8]byte
+ mid [9]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero9_ssa(x *[9]byte) {
+ *x = [9]byte{}
+}
+func testZero9(t *testing.T) {
+ a := Z9{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [9]byte{255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero9_ssa(&a.mid)
+ want := Z9{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [9]byte{0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero9 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z10 struct {
+ pre [8]byte
+ mid [10]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero10_ssa(x *[10]byte) {
+ *x = [10]byte{}
+}
+func testZero10(t *testing.T) {
+ a := Z10{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [10]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero10_ssa(&a.mid)
+ want := Z10{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [10]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero10 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z15 struct {
+ pre [8]byte
+ mid [15]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero15_ssa(x *[15]byte) {
+ *x = [15]byte{}
+}
+func testZero15(t *testing.T) {
+ a := Z15{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [15]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero15_ssa(&a.mid)
+ want := Z15{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [15]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero15 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z16 struct {
+ pre [8]byte
+ mid [16]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero16_ssa(x *[16]byte) {
+ *x = [16]byte{}
+}
+func testZero16(t *testing.T) {
+ a := Z16{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero16_ssa(&a.mid)
+ want := Z16{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero16 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z17 struct {
+ pre [8]byte
+ mid [17]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero17_ssa(x *[17]byte) {
+ *x = [17]byte{}
+}
+func testZero17(t *testing.T) {
+ a := Z17{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [17]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero17_ssa(&a.mid)
+ want := Z17{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [17]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero17 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z23 struct {
+ pre [8]byte
+ mid [23]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero23_ssa(x *[23]byte) {
+ *x = [23]byte{}
+}
+func testZero23(t *testing.T) {
+ a := Z23{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [23]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero23_ssa(&a.mid)
+ want := Z23{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [23]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero23 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z24 struct {
+ pre [8]byte
+ mid [24]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero24_ssa(x *[24]byte) {
+ *x = [24]byte{}
+}
+func testZero24(t *testing.T) {
+ a := Z24{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [24]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero24_ssa(&a.mid)
+ want := Z24{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [24]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero24 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z25 struct {
+ pre [8]byte
+ mid [25]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero25_ssa(x *[25]byte) {
+ *x = [25]byte{}
+}
+func testZero25(t *testing.T) {
+ a := Z25{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [25]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero25_ssa(&a.mid)
+ want := Z25{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [25]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero25 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z31 struct {
+ pre [8]byte
+ mid [31]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero31_ssa(x *[31]byte) {
+ *x = [31]byte{}
+}
+func testZero31(t *testing.T) {
+ a := Z31{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [31]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero31_ssa(&a.mid)
+ want := Z31{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [31]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero31 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z32 struct {
+ pre [8]byte
+ mid [32]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero32_ssa(x *[32]byte) {
+ *x = [32]byte{}
+}
+func testZero32(t *testing.T) {
+ a := Z32{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero32_ssa(&a.mid)
+ want := Z32{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero32 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z33 struct {
+ pre [8]byte
+ mid [33]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero33_ssa(x *[33]byte) {
+ *x = [33]byte{}
+}
+func testZero33(t *testing.T) {
+ a := Z33{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [33]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero33_ssa(&a.mid)
+ want := Z33{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [33]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero33 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z63 struct {
+ pre [8]byte
+ mid [63]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero63_ssa(x *[63]byte) {
+ *x = [63]byte{}
+}
+func testZero63(t *testing.T) {
+ a := Z63{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [63]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero63_ssa(&a.mid)
+ want := Z63{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [63]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero63 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z64 struct {
+ pre [8]byte
+ mid [64]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero64_ssa(x *[64]byte) {
+ *x = [64]byte{}
+}
+func testZero64(t *testing.T) {
+ a := Z64{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [64]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero64_ssa(&a.mid)
+ want := Z64{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [64]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero64 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z65 struct {
+ pre [8]byte
+ mid [65]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero65_ssa(x *[65]byte) {
+ *x = [65]byte{}
+}
+func testZero65(t *testing.T) {
+ a := Z65{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [65]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero65_ssa(&a.mid)
+ want := Z65{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [65]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero65 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z1023 struct {
+ pre [8]byte
+ mid [1023]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero1023_ssa(x *[1023]byte) {
+ *x = [1023]byte{}
+}
+func testZero1023(t *testing.T) {
+ a := Z1023{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1023]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero1023_ssa(&a.mid)
+ want := Z1023{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1023]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero1023 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z1024 struct {
+ pre [8]byte
+ mid [1024]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero1024_ssa(x *[1024]byte) {
+ *x = [1024]byte{}
+}
+func testZero1024(t *testing.T) {
+ a := Z1024{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1024]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero1024_ssa(&a.mid)
+ want := Z1024{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1024]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero1024 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z1025 struct {
+ pre [8]byte
+ mid [1025]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero1025_ssa(x *[1025]byte) {
+ *x = [1025]byte{}
+}
+func testZero1025(t *testing.T) {
+ a := Z1025{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1025]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero1025_ssa(&a.mid)
+ want := Z1025{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1025]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero1025 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z8u1 struct {
+ b bool
+ val [8]byte
+}
+type Z8u2 struct {
+ i uint16
+ val [8]byte
+}
+
+//go:noinline
+func zero8u1_ssa(t *Z8u1) {
+ t.val = [8]byte{}
+}
+
+//go:noinline
+func zero8u2_ssa(t *Z8u2) {
+ t.val = [8]byte{}
+}
+func testZero8u(t *testing.T) {
+ a := Z8u1{false, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero8u1_ssa(&a)
+ want := Z8u1{false, [8]byte{0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero8u2 got=%v, want %v\n", a, want)
+ }
+ b := Z8u2{15, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero8u2_ssa(&b)
+ wantb := Z8u2{15, [8]byte{0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero8u2 got=%v, want %v\n", b, wantb)
+ }
+}
+
+type Z16u1 struct {
+ b bool
+ val [16]byte
+}
+type Z16u2 struct {
+ i uint16
+ val [16]byte
+}
+
+//go:noinline
+func zero16u1_ssa(t *Z16u1) {
+ t.val = [16]byte{}
+}
+
+//go:noinline
+func zero16u2_ssa(t *Z16u2) {
+ t.val = [16]byte{}
+}
+func testZero16u(t *testing.T) {
+ a := Z16u1{false, [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero16u1_ssa(&a)
+ want := Z16u1{false, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero16u2 got=%v, want %v\n", a, want)
+ }
+ b := Z16u2{15, [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero16u2_ssa(&b)
+ wantb := Z16u2{15, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero16u2 got=%v, want %v\n", b, wantb)
+ }
+}
+
+type Z24u1 struct {
+ b bool
+ val [24]byte
+}
+type Z24u2 struct {
+ i uint16
+ val [24]byte
+}
+
+//go:noinline
+func zero24u1_ssa(t *Z24u1) {
+ t.val = [24]byte{}
+}
+
+//go:noinline
+func zero24u2_ssa(t *Z24u2) {
+ t.val = [24]byte{}
+}
+func testZero24u(t *testing.T) {
+ a := Z24u1{false, [24]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero24u1_ssa(&a)
+ want := Z24u1{false, [24]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero24u2 got=%v, want %v\n", a, want)
+ }
+ b := Z24u2{15, [24]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero24u2_ssa(&b)
+ wantb := Z24u2{15, [24]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero24u2 got=%v, want %v\n", b, wantb)
+ }
+}
+
+type Z32u1 struct {
+ b bool
+ val [32]byte
+}
+type Z32u2 struct {
+ i uint16
+ val [32]byte
+}
+
+//go:noinline
+func zero32u1_ssa(t *Z32u1) {
+ t.val = [32]byte{}
+}
+
+//go:noinline
+func zero32u2_ssa(t *Z32u2) {
+ t.val = [32]byte{}
+}
+func testZero32u(t *testing.T) {
+ a := Z32u1{false, [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero32u1_ssa(&a)
+ want := Z32u1{false, [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero32u2 got=%v, want %v\n", a, want)
+ }
+ b := Z32u2{15, [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero32u2_ssa(&b)
+ wantb := Z32u2{15, [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero32u2 got=%v, want %v\n", b, wantb)
+ }
+}
+
+type Z64u1 struct {
+ b bool
+ val [64]byte
+}
+type Z64u2 struct {
+ i uint16
+ val [64]byte
+}
+
+//go:noinline
+func zero64u1_ssa(t *Z64u1) {
+ t.val = [64]byte{}
+}
+
+//go:noinline
+func zero64u2_ssa(t *Z64u2) {
+ t.val = [64]byte{}
+}
+func testZero64u(t *testing.T) {
+ a := Z64u1{false, [64]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero64u1_ssa(&a)
+ want := Z64u1{false, [64]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero64u2 got=%v, want %v\n", a, want)
+ }
+ b := Z64u2{15, [64]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero64u2_ssa(&b)
+ wantb := Z64u2{15, [64]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero64u2 got=%v, want %v\n", b, wantb)
+ }
+}
+
+type Z256u1 struct {
+ b bool
+ val [256]byte
+}
+type Z256u2 struct {
+ i uint16
+ val [256]byte
+}
+
+//go:noinline
+func zero256u1_ssa(t *Z256u1) {
+ t.val = [256]byte{}
+}
+
+//go:noinline
+func zero256u2_ssa(t *Z256u2) {
+ t.val = [256]byte{}
+}
+func testZero256u(t *testing.T) {
+ a := Z256u1{false, [256]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero256u1_ssa(&a)
+ want := Z256u1{false, [256]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero256u2 got=%v, want %v\n", a, want)
+ }
+ b := Z256u2{15, [256]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero256u2_ssa(&b)
+ wantb := Z256u2{15, [256]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero256u2 got=%v, want %v\n", b, wantb)
+ }
+}
+func TestZero(t *testing.T) {
+ testZero1(t)
+ testZero2(t)
+ testZero3(t)
+ testZero4(t)
+ testZero5(t)
+ testZero6(t)
+ testZero7(t)
+ testZero8(t)
+ testZero9(t)
+ testZero10(t)
+ testZero15(t)
+ testZero16(t)
+ testZero17(t)
+ testZero23(t)
+ testZero24(t)
+ testZero25(t)
+ testZero31(t)
+ testZero32(t)
+ testZero33(t)
+ testZero63(t)
+ testZero64(t)
+ testZero65(t)
+ testZero1023(t)
+ testZero1024(t)
+ testZero1025(t)
+ testZero8u(t)
+ testZero16u(t)
+ testZero24u(t)
+ testZero32u(t)
+ testZero64u(t)
+ testZero256u(t)
+}
diff --git a/src/cmd/compile/internal/test/truncconst_test.go b/src/cmd/compile/internal/test/truncconst_test.go
new file mode 100644
index 0000000..7705042
--- /dev/null
+++ b/src/cmd/compile/internal/test/truncconst_test.go
@@ -0,0 +1,63 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import "testing"
+
+var f52want float64 = 1.0 / (1 << 52)
+var f53want float64 = 1.0 / (1 << 53)
+
+func TestTruncFlt(t *testing.T) {
+ const f52 = 1 + 1.0/(1<<52)
+ const f53 = 1 + 1.0/(1<<53)
+
+ if got := f52 - 1; got != f52want {
+ t.Errorf("f52-1 = %g, want %g", got, f52want)
+ }
+ if got := float64(f52) - 1; got != f52want {
+ t.Errorf("float64(f52)-1 = %g, want %g", got, f52want)
+ }
+ if got := f53 - 1; got != f53want {
+ t.Errorf("f53-1 = %g, want %g", got, f53want)
+ }
+ if got := float64(f53) - 1; got != 0 {
+ t.Errorf("float64(f53)-1 = %g, want 0", got)
+ }
+}
+
+func TestTruncCmplx(t *testing.T) {
+ const r52 = complex(1+1.0/(1<<52), 0)
+ const r53 = complex(1+1.0/(1<<53), 0)
+
+ if got := real(r52 - 1); got != f52want {
+ t.Errorf("real(r52-1) = %g, want %g", got, f52want)
+ }
+ if got := real(complex128(r52) - 1); got != f52want {
+ t.Errorf("real(complex128(r52)-1) = %g, want %g", got, f52want)
+ }
+ if got := real(r53 - 1); got != f53want {
+ t.Errorf("real(r53-1) = %g, want %g", got, f53want)
+ }
+ if got := real(complex128(r53) - 1); got != 0 {
+ t.Errorf("real(complex128(r53)-1) = %g, want 0", got)
+ }
+
+ const i52 = complex(0, 1+1.0/(1<<52))
+ const i53 = complex(0, 1+1.0/(1<<53))
+
+ if got := imag(i52 - 1i); got != f52want {
+ t.Errorf("imag(i52-1i) = %g, want %g", got, f52want)
+ }
+ if got := imag(complex128(i52) - 1i); got != f52want {
+ t.Errorf("imag(complex128(i52)-1i) = %g, want %g", got, f52want)
+ }
+ if got := imag(i53 - 1i); got != f53want {
+ t.Errorf("imag(i53-1i) = %g, want %g", got, f53want)
+ }
+ if got := imag(complex128(i53) - 1i); got != 0 {
+ t.Errorf("imag(complex128(i53)-1i) = %g, want 0", got)
+ }
+
+}
diff --git a/src/cmd/compile/internal/test/zerorange_test.go b/src/cmd/compile/internal/test/zerorange_test.go
new file mode 100644
index 0000000..e92b5d3
--- /dev/null
+++ b/src/cmd/compile/internal/test/zerorange_test.go
@@ -0,0 +1,184 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "testing"
+)
+
+var glob = 3
+var globp *int64
+
+// Testing compilation of arch.ZeroRange of various sizes.
+
+// By storing a pointer to an int64 output param in a global, the compiler must
+// ensure that output param is allocated on the heap. Also, since there is a
+// defer, the pointer to each output param must be zeroed in the prologue (see
+// plive.go:epilogue()). So, we will get a block of one or more stack slots that
+// need to be zeroed. Hence, we are testing compilation completes successfully when
+// zerorange calls of various sizes (8-136 bytes) are generated. We are not
+// testing runtime correctness (which is hard to do for the current uses of
+// ZeroRange).
+
+func TestZeroRange(t *testing.T) {
+ testZeroRange8(t)
+ testZeroRange16(t)
+ testZeroRange32(t)
+ testZeroRange64(t)
+ testZeroRange136(t)
+}
+
+func testZeroRange8(t *testing.T) (r int64) {
+ defer func() {
+ glob = 4
+ }()
+ globp = &r
+ return
+}
+
+func testZeroRange16(t *testing.T) (r, s int64) {
+ defer func() {
+ glob = 4
+ }()
+ globp = &r
+ globp = &s
+ return
+}
+
+func testZeroRange32(t *testing.T) (r, s, t2, u int64) {
+ defer func() {
+ glob = 4
+ }()
+ globp = &r
+ globp = &s
+ globp = &t2
+ globp = &u
+ return
+}
+
+func testZeroRange64(t *testing.T) (r, s, t2, u, v, w, x, y int64) {
+ defer func() {
+ glob = 4
+ }()
+ globp = &r
+ globp = &s
+ globp = &t2
+ globp = &u
+ globp = &v
+ globp = &w
+ globp = &x
+ globp = &y
+ return
+}
+
+func testZeroRange136(t *testing.T) (r, s, t2, u, v, w, x, y, r1, s1, t1, u1, v1, w1, x1, y1, z1 int64) {
+ defer func() {
+ glob = 4
+ }()
+ globp = &r
+ globp = &s
+ globp = &t2
+ globp = &u
+ globp = &v
+ globp = &w
+ globp = &x
+ globp = &y
+ globp = &r1
+ globp = &s1
+ globp = &t1
+ globp = &u1
+ globp = &v1
+ globp = &w1
+ globp = &x1
+ globp = &y1
+ globp = &z1
+ return
+}
+
+type S struct {
+ x [2]uint64
+ p *uint64
+ y [2]uint64
+ q uint64
+}
+
+type M struct {
+ x [8]uint64
+ p *uint64
+ y [8]uint64
+ q uint64
+}
+
+type L struct {
+ x [4096]uint64
+ p *uint64
+ y [4096]uint64
+ q uint64
+}
+
+//go:noinline
+func triggerZerorangeLarge(f, g, h uint64) (rv0 uint64) {
+ ll := L{p: &f}
+ da := f
+ rv0 = f + g + h
+ defer func(dl L, i uint64) {
+ rv0 += dl.q + i
+ }(ll, da)
+ return rv0
+}
+
+//go:noinline
+func triggerZerorangeMedium(f, g, h uint64) (rv0 uint64) {
+ ll := M{p: &f}
+ rv0 = f + g + h
+ defer func(dm M, i uint64) {
+ rv0 += dm.q + i
+ }(ll, f)
+ return rv0
+}
+
+//go:noinline
+func triggerZerorangeSmall(f, g, h uint64) (rv0 uint64) {
+ ll := S{p: &f}
+ rv0 = f + g + h
+ defer func(ds S, i uint64) {
+ rv0 += ds.q + i
+ }(ll, f)
+ return rv0
+}
+
+// This test was created as a follow up to issue #45372, to help
+// improve coverage of the compiler's arch-specific "zerorange"
+// function, which is invoked to zero out ambiguously live portions of
+// the stack frame in certain specific circumstances.
+//
+// In the current compiler implementation, for zerorange to be
+// invoked, we need to have an ambiguously live variable that needs
+// zeroing. One way to trigger this is to have a function with an
+// open-coded defer, where the opendefer function has an argument that
+// contains a pointer (this is what's used below).
+//
+// At the moment this test doesn't do any specific checking for
+// code sequence, or verification that things were properly set to zero,
+// this seems as though it would be too tricky and would result
+// in a "brittle" test.
+//
+// The small/medium/large scenarios below are inspired by the amd64
+// implementation of zerorange, which generates different code
+// depending on the size of the thing that needs to be zeroed out
+// (I've verified at the time of the writing of this test that it
+// exercises the various cases).
+func TestZerorange45372(t *testing.T) {
+ if r := triggerZerorangeLarge(101, 303, 505); r != 1010 {
+ t.Errorf("large: wanted %d got %d", 1010, r)
+ }
+ if r := triggerZerorangeMedium(101, 303, 505); r != 1010 {
+ t.Errorf("medium: wanted %d got %d", 1010, r)
+ }
+ if r := triggerZerorangeSmall(101, 303, 505); r != 1010 {
+ t.Errorf("small: wanted %d got %d", 1010, r)
+ }
+
+}
diff --git a/src/cmd/compile/internal/typebits/typebits.go b/src/cmd/compile/internal/typebits/typebits.go
new file mode 100644
index 0000000..b07f437
--- /dev/null
+++ b/src/cmd/compile/internal/typebits/typebits.go
@@ -0,0 +1,96 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typebits
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
+ "cmd/compile/internal/types"
+)
+
+// NOTE: The bitmap for a specific type t could be cached in t after
+// the first run and then simply copied into bv at the correct offset
+// on future calls with the same type t.
+func Set(t *types.Type, off int64, bv bitvec.BitVec) {
+ set(t, off, bv, false)
+}
+
+// SetNoCheck is like Set, but do not check for alignment.
+func SetNoCheck(t *types.Type, off int64, bv bitvec.BitVec) {
+ set(t, off, bv, true)
+}
+
+func set(t *types.Type, off int64, bv bitvec.BitVec, skip bool) {
+ if !skip && uint8(t.Alignment()) > 0 && off&int64(uint8(t.Alignment())-1) != 0 {
+ base.Fatalf("typebits.Set: invalid initial alignment: type %v has alignment %d, but offset is %v", t, uint8(t.Alignment()), off)
+ }
+ if !t.HasPointers() {
+ // Note: this case ensures that pointers to not-in-heap types
+ // are not considered pointers by garbage collection and stack copying.
+ return
+ }
+
+ switch t.Kind() {
+ case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP:
+ if off&int64(types.PtrSize-1) != 0 {
+ base.Fatalf("typebits.Set: invalid alignment, %v", t)
+ }
+ bv.Set(int32(off / int64(types.PtrSize))) // pointer
+
+ case types.TSTRING:
+ // struct { byte *str; intgo len; }
+ if off&int64(types.PtrSize-1) != 0 {
+ base.Fatalf("typebits.Set: invalid alignment, %v", t)
+ }
+ bv.Set(int32(off / int64(types.PtrSize))) //pointer in first slot
+
+ case types.TINTER:
+ // struct { Itab *tab; void *data; }
+ // or, when isnilinter(t)==true:
+ // struct { Type *type; void *data; }
+ if off&int64(types.PtrSize-1) != 0 {
+ base.Fatalf("typebits.Set: invalid alignment, %v", t)
+ }
+ // The first word of an interface is a pointer, but we don't
+ // treat it as such.
+ // 1. If it is a non-empty interface, the pointer points to an itab
+ // which is always in persistentalloc space.
+ // 2. If it is an empty interface, the pointer points to a _type.
+ // a. If it is a compile-time-allocated type, it points into
+ // the read-only data section.
+ // b. If it is a reflect-allocated type, it points into the Go heap.
+ // Reflect is responsible for keeping a reference to
+ // the underlying type so it won't be GCd.
+ // If we ever have a moving GC, we need to change this for 2b (as
+ // well as scan itabs to update their itab._type fields).
+ bv.Set(int32(off/int64(types.PtrSize) + 1)) // pointer in second slot
+
+ case types.TSLICE:
+ // struct { byte *array; uintgo len; uintgo cap; }
+ if off&int64(types.PtrSize-1) != 0 {
+ base.Fatalf("typebits.Set: invalid TARRAY alignment, %v", t)
+ }
+ bv.Set(int32(off / int64(types.PtrSize))) // pointer in first slot (BitsPointer)
+
+ case types.TARRAY:
+ elt := t.Elem()
+ if elt.Size() == 0 {
+ // Short-circuit for #20739.
+ break
+ }
+ for i := int64(0); i < t.NumElem(); i++ {
+ set(elt, off, bv, skip)
+ off += elt.Size()
+ }
+
+ case types.TSTRUCT:
+ for _, f := range t.Fields() {
+ set(f.Type, off+f.Offset, bv, skip)
+ }
+
+ default:
+ base.Fatalf("typebits.Set: unexpected type, %v", t)
+ }
+}
diff --git a/src/cmd/compile/internal/typecheck/_builtin/coverage.go b/src/cmd/compile/internal/typecheck/_builtin/coverage.go
new file mode 100644
index 0000000..0222635
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/_builtin/coverage.go
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// NOTE: If you change this file you must run "go generate"
+// to update builtin.go. This is not done automatically
+// to avoid depending on having a working compiler binary.
+
+//go:build ignore
+
+package coverage
+
+func initHook(istest bool)
diff --git a/src/cmd/compile/internal/typecheck/_builtin/runtime.go b/src/cmd/compile/internal/typecheck/_builtin/runtime.go
new file mode 100644
index 0000000..4211529
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/_builtin/runtime.go
@@ -0,0 +1,286 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// NOTE: If you change this file you must run "go generate"
+// to update builtin.go. This is not done automatically
+// to avoid depending on having a working compiler binary.
+
+//go:build ignore
+
+package runtime
+
+// emitted by compiler, not referred to by go programs
+
+import "unsafe"
+
+func newobject(typ *byte) *any
+func mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
+func panicdivide()
+func panicshift()
+func panicmakeslicelen()
+func panicmakeslicecap()
+func throwinit()
+func panicwrap()
+
+func gopanic(interface{})
+func gorecover(*int32) interface{}
+func goschedguarded()
+
+// Note: these declarations are just for wasm port.
+// Other ports call assembly stubs instead.
+func goPanicIndex(x int, y int)
+func goPanicIndexU(x uint, y int)
+func goPanicSliceAlen(x int, y int)
+func goPanicSliceAlenU(x uint, y int)
+func goPanicSliceAcap(x int, y int)
+func goPanicSliceAcapU(x uint, y int)
+func goPanicSliceB(x int, y int)
+func goPanicSliceBU(x uint, y int)
+func goPanicSlice3Alen(x int, y int)
+func goPanicSlice3AlenU(x uint, y int)
+func goPanicSlice3Acap(x int, y int)
+func goPanicSlice3AcapU(x uint, y int)
+func goPanicSlice3B(x int, y int)
+func goPanicSlice3BU(x uint, y int)
+func goPanicSlice3C(x int, y int)
+func goPanicSlice3CU(x uint, y int)
+func goPanicSliceConvert(x int, y int)
+
+func printbool(bool)
+func printfloat(float64)
+func printint(int64)
+func printhex(uint64)
+func printuint(uint64)
+func printcomplex(complex128)
+func printstring(string)
+func printpointer(any)
+func printuintptr(uintptr)
+func printiface(any)
+func printeface(any)
+func printslice(any)
+func printnl()
+func printsp()
+func printlock()
+func printunlock()
+
+func concatstring2(*[32]byte, string, string) string
+func concatstring3(*[32]byte, string, string, string) string
+func concatstring4(*[32]byte, string, string, string, string) string
+func concatstring5(*[32]byte, string, string, string, string, string) string
+func concatstrings(*[32]byte, []string) string
+
+func cmpstring(string, string) int
+func intstring(*[4]byte, int64) string
+func slicebytetostring(buf *[32]byte, ptr *byte, n int) string
+func slicebytetostringtmp(ptr *byte, n int) string
+func slicerunetostring(*[32]byte, []rune) string
+func stringtoslicebyte(*[32]byte, string) []byte
+func stringtoslicerune(*[32]rune, string) []rune
+func slicecopy(toPtr *any, toLen int, fromPtr *any, fromLen int, wid uintptr) int
+
+func decoderune(string, int) (retv rune, retk int)
+func countrunes(string) int
+
+// Convert non-interface type to the data word of a (empty or nonempty) interface.
+func convT(typ *byte, elem *any) unsafe.Pointer
+
+// Same as convT, for types with no pointers in them.
+func convTnoptr(typ *byte, elem *any) unsafe.Pointer
+
+// Specialized versions of convT for specific types.
+// These functions take concrete types in the runtime. But they may
+// be used for a wider range of types, which have the same memory
+// layout as the parameter type. The compiler converts the
+// to-be-converted type to the parameter type before calling the
+// runtime function. This way, the call is ABI-insensitive.
+func convT16(val uint16) unsafe.Pointer
+func convT32(val uint32) unsafe.Pointer
+func convT64(val uint64) unsafe.Pointer
+func convTstring(val string) unsafe.Pointer
+func convTslice(val []uint8) unsafe.Pointer
+
+// interface type assertions x.(T)
+func assertE2I(inter *byte, typ *byte) *byte
+func assertE2I2(inter *byte, typ *byte) *byte
+func panicdottypeE(have, want, iface *byte)
+func panicdottypeI(have, want, iface *byte)
+func panicnildottype(want *byte)
+func typeAssert(s *byte, typ *byte) *byte
+
+// interface switches
+func interfaceSwitch(s *byte, t *byte) (int, *byte)
+
+// interface equality. Type/itab pointers are already known to be equal, so
+// we only need to pass one.
+func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
+func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
+
+// panic for iteration after exit in range func
+func panicrangeexit()
+
+// defer in range over func
+func deferrangefunc() interface{}
+
+func rand32() uint32
+
+// *byte is really *runtime.Type
+func makemap64(mapType *byte, hint int64, mapbuf *any) (hmap map[any]any)
+func makemap(mapType *byte, hint int, mapbuf *any) (hmap map[any]any)
+func makemap_small() (hmap map[any]any)
+func mapaccess1(mapType *byte, hmap map[any]any, key *any) (val *any)
+func mapaccess1_fast32(mapType *byte, hmap map[any]any, key uint32) (val *any)
+func mapaccess1_fast64(mapType *byte, hmap map[any]any, key uint64) (val *any)
+func mapaccess1_faststr(mapType *byte, hmap map[any]any, key string) (val *any)
+func mapaccess1_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any)
+func mapaccess2(mapType *byte, hmap map[any]any, key *any) (val *any, pres bool)
+func mapaccess2_fast32(mapType *byte, hmap map[any]any, key uint32) (val *any, pres bool)
+func mapaccess2_fast64(mapType *byte, hmap map[any]any, key uint64) (val *any, pres bool)
+func mapaccess2_faststr(mapType *byte, hmap map[any]any, key string) (val *any, pres bool)
+func mapaccess2_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any, pres bool)
+func mapassign(mapType *byte, hmap map[any]any, key *any) (val *any)
+func mapassign_fast32(mapType *byte, hmap map[any]any, key uint32) (val *any)
+func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key unsafe.Pointer) (val *any)
+func mapassign_fast64(mapType *byte, hmap map[any]any, key uint64) (val *any)
+func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key unsafe.Pointer) (val *any)
+func mapassign_faststr(mapType *byte, hmap map[any]any, key string) (val *any)
+func mapiterinit(mapType *byte, hmap map[any]any, hiter *any)
+func mapdelete(mapType *byte, hmap map[any]any, key *any)
+func mapdelete_fast32(mapType *byte, hmap map[any]any, key uint32)
+func mapdelete_fast64(mapType *byte, hmap map[any]any, key uint64)
+func mapdelete_faststr(mapType *byte, hmap map[any]any, key string)
+func mapiternext(hiter *any)
+func mapclear(mapType *byte, hmap map[any]any)
+
+// *byte is really *runtime.Type
+func makechan64(chanType *byte, size int64) (hchan chan any)
+func makechan(chanType *byte, size int) (hchan chan any)
+func chanrecv1(hchan <-chan any, elem *any)
+func chanrecv2(hchan <-chan any, elem *any) bool
+func chansend1(hchan chan<- any, elem *any)
+func closechan(hchan any)
+
+var writeBarrier struct {
+ enabled bool
+ pad [3]byte
+ cgo bool
+ alignme uint64
+}
+
+// *byte is really *runtime.Type
+func typedmemmove(typ *byte, dst *any, src *any)
+func typedmemclr(typ *byte, dst *any)
+func typedslicecopy(typ *byte, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
+
+func selectnbsend(hchan chan<- any, elem *any) bool
+func selectnbrecv(elem *any, hchan <-chan any) (bool, bool)
+
+func selectsetpc(pc *uintptr)
+func selectgo(cas0 *byte, order0 *byte, pc0 *uintptr, nsends int, nrecvs int, block bool) (int, bool)
+func block()
+
+func makeslice(typ *byte, len int, cap int) unsafe.Pointer
+func makeslice64(typ *byte, len int64, cap int64) unsafe.Pointer
+func makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
+func growslice(oldPtr *any, newLen, oldCap, num int, et *byte) (ary []any)
+func unsafeslicecheckptr(typ *byte, ptr unsafe.Pointer, len int64)
+func panicunsafeslicelen()
+func panicunsafeslicenilptr()
+func unsafestringcheckptr(ptr unsafe.Pointer, len int64)
+func panicunsafestringlen()
+func panicunsafestringnilptr()
+
+func memmove(to *any, frm *any, length uintptr)
+func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
+func memclrHasPointers(ptr unsafe.Pointer, n uintptr)
+
+func memequal(x, y *any, size uintptr) bool
+func memequal0(x, y *any) bool
+func memequal8(x, y *any) bool
+func memequal16(x, y *any) bool
+func memequal32(x, y *any) bool
+func memequal64(x, y *any) bool
+func memequal128(x, y *any) bool
+func f32equal(p, q unsafe.Pointer) bool
+func f64equal(p, q unsafe.Pointer) bool
+func c64equal(p, q unsafe.Pointer) bool
+func c128equal(p, q unsafe.Pointer) bool
+func strequal(p, q unsafe.Pointer) bool
+func interequal(p, q unsafe.Pointer) bool
+func nilinterequal(p, q unsafe.Pointer) bool
+
+func memhash(x *any, h uintptr, size uintptr) uintptr
+func memhash0(p unsafe.Pointer, h uintptr) uintptr
+func memhash8(p unsafe.Pointer, h uintptr) uintptr
+func memhash16(p unsafe.Pointer, h uintptr) uintptr
+func memhash32(p unsafe.Pointer, h uintptr) uintptr
+func memhash64(p unsafe.Pointer, h uintptr) uintptr
+func memhash128(p unsafe.Pointer, h uintptr) uintptr
+func f32hash(p *any, h uintptr) uintptr
+func f64hash(p *any, h uintptr) uintptr
+func c64hash(p *any, h uintptr) uintptr
+func c128hash(p *any, h uintptr) uintptr
+func strhash(a *any, h uintptr) uintptr
+func interhash(p *any, h uintptr) uintptr
+func nilinterhash(p *any, h uintptr) uintptr
+
+// only used on 32-bit
+func int64div(int64, int64) int64
+func uint64div(uint64, uint64) uint64
+func int64mod(int64, int64) int64
+func uint64mod(uint64, uint64) uint64
+func float64toint64(float64) int64
+func float64touint64(float64) uint64
+func float64touint32(float64) uint32
+func int64tofloat64(int64) float64
+func int64tofloat32(int64) float32
+func uint64tofloat64(uint64) float64
+func uint64tofloat32(uint64) float32
+func uint32tofloat64(uint32) float64
+
+func complex128div(num complex128, den complex128) (quo complex128)
+
+func getcallerpc() uintptr
+func getcallersp() uintptr
+
+// race detection
+func racefuncenter(uintptr)
+func racefuncexit()
+func raceread(uintptr)
+func racewrite(uintptr)
+func racereadrange(addr, size uintptr)
+func racewriterange(addr, size uintptr)
+
+// memory sanitizer
+func msanread(addr, size uintptr)
+func msanwrite(addr, size uintptr)
+func msanmove(dst, src, size uintptr)
+
+// address sanitizer
+func asanread(addr, size uintptr)
+func asanwrite(addr, size uintptr)
+
+func checkptrAlignment(unsafe.Pointer, *byte, uintptr)
+func checkptrArithmetic(unsafe.Pointer, []unsafe.Pointer)
+
+func libfuzzerTraceCmp1(uint8, uint8, uint)
+func libfuzzerTraceCmp2(uint16, uint16, uint)
+func libfuzzerTraceCmp4(uint32, uint32, uint)
+func libfuzzerTraceCmp8(uint64, uint64, uint)
+func libfuzzerTraceConstCmp1(uint8, uint8, uint)
+func libfuzzerTraceConstCmp2(uint16, uint16, uint)
+func libfuzzerTraceConstCmp4(uint32, uint32, uint)
+func libfuzzerTraceConstCmp8(uint64, uint64, uint)
+func libfuzzerHookStrCmp(string, string, uint)
+func libfuzzerHookEqualFold(string, string, uint)
+
+func addCovMeta(p unsafe.Pointer, len uint32, hash [16]byte, pkpath string, pkgId int, cmode uint8, cgran uint8) uint32
+
+// architecture variants
+var x86HasPOPCNT bool
+var x86HasSSE41 bool
+var x86HasFMA bool
+var armHasVFPv4 bool
+var arm64HasATOMICS bool
+
+func asanregisterglobals(unsafe.Pointer, uintptr)
diff --git a/src/cmd/compile/internal/typecheck/bexport.go b/src/cmd/compile/internal/typecheck/bexport.go
new file mode 100644
index 0000000..ed9a011
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/bexport.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+// Tags. Must be < 0.
+const (
+ // Objects
+ packageTag = -(iota + 1)
+ constTag
+ typeTag
+ varTag
+ funcTag
+ endTag
+)
diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go
new file mode 100644
index 0000000..09f60c6
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/builtin.go
@@ -0,0 +1,408 @@
+// Code generated by mkbuiltin.go. DO NOT EDIT.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// Not inlining this function removes a significant chunk of init code.
+//
+//go:noinline
+func newSig(params, results []*types.Field) *types.Type {
+ return types.NewSignature(nil, params, results)
+}
+
+func params(tlist ...*types.Type) []*types.Field {
+ flist := make([]*types.Field, len(tlist))
+ for i, typ := range tlist {
+ flist[i] = types.NewField(src.NoXPos, nil, typ)
+ }
+ return flist
+}
+
+var runtimeDecls = [...]struct {
+ name string
+ tag int
+ typ int
+}{
+ {"newobject", funcTag, 4},
+ {"mallocgc", funcTag, 8},
+ {"panicdivide", funcTag, 9},
+ {"panicshift", funcTag, 9},
+ {"panicmakeslicelen", funcTag, 9},
+ {"panicmakeslicecap", funcTag, 9},
+ {"throwinit", funcTag, 9},
+ {"panicwrap", funcTag, 9},
+ {"gopanic", funcTag, 11},
+ {"gorecover", funcTag, 14},
+ {"goschedguarded", funcTag, 9},
+ {"goPanicIndex", funcTag, 16},
+ {"goPanicIndexU", funcTag, 18},
+ {"goPanicSliceAlen", funcTag, 16},
+ {"goPanicSliceAlenU", funcTag, 18},
+ {"goPanicSliceAcap", funcTag, 16},
+ {"goPanicSliceAcapU", funcTag, 18},
+ {"goPanicSliceB", funcTag, 16},
+ {"goPanicSliceBU", funcTag, 18},
+ {"goPanicSlice3Alen", funcTag, 16},
+ {"goPanicSlice3AlenU", funcTag, 18},
+ {"goPanicSlice3Acap", funcTag, 16},
+ {"goPanicSlice3AcapU", funcTag, 18},
+ {"goPanicSlice3B", funcTag, 16},
+ {"goPanicSlice3BU", funcTag, 18},
+ {"goPanicSlice3C", funcTag, 16},
+ {"goPanicSlice3CU", funcTag, 18},
+ {"goPanicSliceConvert", funcTag, 16},
+ {"printbool", funcTag, 19},
+ {"printfloat", funcTag, 21},
+ {"printint", funcTag, 23},
+ {"printhex", funcTag, 25},
+ {"printuint", funcTag, 25},
+ {"printcomplex", funcTag, 27},
+ {"printstring", funcTag, 29},
+ {"printpointer", funcTag, 30},
+ {"printuintptr", funcTag, 31},
+ {"printiface", funcTag, 30},
+ {"printeface", funcTag, 30},
+ {"printslice", funcTag, 30},
+ {"printnl", funcTag, 9},
+ {"printsp", funcTag, 9},
+ {"printlock", funcTag, 9},
+ {"printunlock", funcTag, 9},
+ {"concatstring2", funcTag, 34},
+ {"concatstring3", funcTag, 35},
+ {"concatstring4", funcTag, 36},
+ {"concatstring5", funcTag, 37},
+ {"concatstrings", funcTag, 39},
+ {"cmpstring", funcTag, 40},
+ {"intstring", funcTag, 43},
+ {"slicebytetostring", funcTag, 44},
+ {"slicebytetostringtmp", funcTag, 45},
+ {"slicerunetostring", funcTag, 48},
+ {"stringtoslicebyte", funcTag, 50},
+ {"stringtoslicerune", funcTag, 53},
+ {"slicecopy", funcTag, 54},
+ {"decoderune", funcTag, 55},
+ {"countrunes", funcTag, 56},
+ {"convT", funcTag, 57},
+ {"convTnoptr", funcTag, 57},
+ {"convT16", funcTag, 59},
+ {"convT32", funcTag, 61},
+ {"convT64", funcTag, 62},
+ {"convTstring", funcTag, 63},
+ {"convTslice", funcTag, 66},
+ {"assertE2I", funcTag, 67},
+ {"assertE2I2", funcTag, 67},
+ {"panicdottypeE", funcTag, 68},
+ {"panicdottypeI", funcTag, 68},
+ {"panicnildottype", funcTag, 69},
+ {"typeAssert", funcTag, 67},
+ {"interfaceSwitch", funcTag, 70},
+ {"ifaceeq", funcTag, 72},
+ {"efaceeq", funcTag, 72},
+ {"panicrangeexit", funcTag, 9},
+ {"deferrangefunc", funcTag, 73},
+ {"rand32", funcTag, 74},
+ {"makemap64", funcTag, 76},
+ {"makemap", funcTag, 77},
+ {"makemap_small", funcTag, 78},
+ {"mapaccess1", funcTag, 79},
+ {"mapaccess1_fast32", funcTag, 80},
+ {"mapaccess1_fast64", funcTag, 81},
+ {"mapaccess1_faststr", funcTag, 82},
+ {"mapaccess1_fat", funcTag, 83},
+ {"mapaccess2", funcTag, 84},
+ {"mapaccess2_fast32", funcTag, 85},
+ {"mapaccess2_fast64", funcTag, 86},
+ {"mapaccess2_faststr", funcTag, 87},
+ {"mapaccess2_fat", funcTag, 88},
+ {"mapassign", funcTag, 79},
+ {"mapassign_fast32", funcTag, 80},
+ {"mapassign_fast32ptr", funcTag, 89},
+ {"mapassign_fast64", funcTag, 81},
+ {"mapassign_fast64ptr", funcTag, 89},
+ {"mapassign_faststr", funcTag, 82},
+ {"mapiterinit", funcTag, 90},
+ {"mapdelete", funcTag, 90},
+ {"mapdelete_fast32", funcTag, 91},
+ {"mapdelete_fast64", funcTag, 92},
+ {"mapdelete_faststr", funcTag, 93},
+ {"mapiternext", funcTag, 94},
+ {"mapclear", funcTag, 95},
+ {"makechan64", funcTag, 97},
+ {"makechan", funcTag, 98},
+ {"chanrecv1", funcTag, 100},
+ {"chanrecv2", funcTag, 101},
+ {"chansend1", funcTag, 103},
+ {"closechan", funcTag, 30},
+ {"writeBarrier", varTag, 105},
+ {"typedmemmove", funcTag, 106},
+ {"typedmemclr", funcTag, 107},
+ {"typedslicecopy", funcTag, 108},
+ {"selectnbsend", funcTag, 109},
+ {"selectnbrecv", funcTag, 110},
+ {"selectsetpc", funcTag, 111},
+ {"selectgo", funcTag, 112},
+ {"block", funcTag, 9},
+ {"makeslice", funcTag, 113},
+ {"makeslice64", funcTag, 114},
+ {"makeslicecopy", funcTag, 115},
+ {"growslice", funcTag, 117},
+ {"unsafeslicecheckptr", funcTag, 118},
+ {"panicunsafeslicelen", funcTag, 9},
+ {"panicunsafeslicenilptr", funcTag, 9},
+ {"unsafestringcheckptr", funcTag, 119},
+ {"panicunsafestringlen", funcTag, 9},
+ {"panicunsafestringnilptr", funcTag, 9},
+ {"memmove", funcTag, 120},
+ {"memclrNoHeapPointers", funcTag, 121},
+ {"memclrHasPointers", funcTag, 121},
+ {"memequal", funcTag, 122},
+ {"memequal0", funcTag, 123},
+ {"memequal8", funcTag, 123},
+ {"memequal16", funcTag, 123},
+ {"memequal32", funcTag, 123},
+ {"memequal64", funcTag, 123},
+ {"memequal128", funcTag, 123},
+ {"f32equal", funcTag, 124},
+ {"f64equal", funcTag, 124},
+ {"c64equal", funcTag, 124},
+ {"c128equal", funcTag, 124},
+ {"strequal", funcTag, 124},
+ {"interequal", funcTag, 124},
+ {"nilinterequal", funcTag, 124},
+ {"memhash", funcTag, 125},
+ {"memhash0", funcTag, 126},
+ {"memhash8", funcTag, 126},
+ {"memhash16", funcTag, 126},
+ {"memhash32", funcTag, 126},
+ {"memhash64", funcTag, 126},
+ {"memhash128", funcTag, 126},
+ {"f32hash", funcTag, 127},
+ {"f64hash", funcTag, 127},
+ {"c64hash", funcTag, 127},
+ {"c128hash", funcTag, 127},
+ {"strhash", funcTag, 127},
+ {"interhash", funcTag, 127},
+ {"nilinterhash", funcTag, 127},
+ {"int64div", funcTag, 128},
+ {"uint64div", funcTag, 129},
+ {"int64mod", funcTag, 128},
+ {"uint64mod", funcTag, 129},
+ {"float64toint64", funcTag, 130},
+ {"float64touint64", funcTag, 131},
+ {"float64touint32", funcTag, 132},
+ {"int64tofloat64", funcTag, 133},
+ {"int64tofloat32", funcTag, 135},
+ {"uint64tofloat64", funcTag, 136},
+ {"uint64tofloat32", funcTag, 137},
+ {"uint32tofloat64", funcTag, 138},
+ {"complex128div", funcTag, 139},
+ {"getcallerpc", funcTag, 140},
+ {"getcallersp", funcTag, 140},
+ {"racefuncenter", funcTag, 31},
+ {"racefuncexit", funcTag, 9},
+ {"raceread", funcTag, 31},
+ {"racewrite", funcTag, 31},
+ {"racereadrange", funcTag, 141},
+ {"racewriterange", funcTag, 141},
+ {"msanread", funcTag, 141},
+ {"msanwrite", funcTag, 141},
+ {"msanmove", funcTag, 142},
+ {"asanread", funcTag, 141},
+ {"asanwrite", funcTag, 141},
+ {"checkptrAlignment", funcTag, 143},
+ {"checkptrArithmetic", funcTag, 145},
+ {"libfuzzerTraceCmp1", funcTag, 146},
+ {"libfuzzerTraceCmp2", funcTag, 147},
+ {"libfuzzerTraceCmp4", funcTag, 148},
+ {"libfuzzerTraceCmp8", funcTag, 149},
+ {"libfuzzerTraceConstCmp1", funcTag, 146},
+ {"libfuzzerTraceConstCmp2", funcTag, 147},
+ {"libfuzzerTraceConstCmp4", funcTag, 148},
+ {"libfuzzerTraceConstCmp8", funcTag, 149},
+ {"libfuzzerHookStrCmp", funcTag, 150},
+ {"libfuzzerHookEqualFold", funcTag, 150},
+ {"addCovMeta", funcTag, 152},
+ {"x86HasPOPCNT", varTag, 6},
+ {"x86HasSSE41", varTag, 6},
+ {"x86HasFMA", varTag, 6},
+ {"armHasVFPv4", varTag, 6},
+ {"arm64HasATOMICS", varTag, 6},
+ {"asanregisterglobals", funcTag, 121},
+}
+
+func runtimeTypes() []*types.Type {
+ var typs [153]*types.Type
+ typs[0] = types.ByteType
+ typs[1] = types.NewPtr(typs[0])
+ typs[2] = types.Types[types.TANY]
+ typs[3] = types.NewPtr(typs[2])
+ typs[4] = newSig(params(typs[1]), params(typs[3]))
+ typs[5] = types.Types[types.TUINTPTR]
+ typs[6] = types.Types[types.TBOOL]
+ typs[7] = types.Types[types.TUNSAFEPTR]
+ typs[8] = newSig(params(typs[5], typs[1], typs[6]), params(typs[7]))
+ typs[9] = newSig(nil, nil)
+ typs[10] = types.Types[types.TINTER]
+ typs[11] = newSig(params(typs[10]), nil)
+ typs[12] = types.Types[types.TINT32]
+ typs[13] = types.NewPtr(typs[12])
+ typs[14] = newSig(params(typs[13]), params(typs[10]))
+ typs[15] = types.Types[types.TINT]
+ typs[16] = newSig(params(typs[15], typs[15]), nil)
+ typs[17] = types.Types[types.TUINT]
+ typs[18] = newSig(params(typs[17], typs[15]), nil)
+ typs[19] = newSig(params(typs[6]), nil)
+ typs[20] = types.Types[types.TFLOAT64]
+ typs[21] = newSig(params(typs[20]), nil)
+ typs[22] = types.Types[types.TINT64]
+ typs[23] = newSig(params(typs[22]), nil)
+ typs[24] = types.Types[types.TUINT64]
+ typs[25] = newSig(params(typs[24]), nil)
+ typs[26] = types.Types[types.TCOMPLEX128]
+ typs[27] = newSig(params(typs[26]), nil)
+ typs[28] = types.Types[types.TSTRING]
+ typs[29] = newSig(params(typs[28]), nil)
+ typs[30] = newSig(params(typs[2]), nil)
+ typs[31] = newSig(params(typs[5]), nil)
+ typs[32] = types.NewArray(typs[0], 32)
+ typs[33] = types.NewPtr(typs[32])
+ typs[34] = newSig(params(typs[33], typs[28], typs[28]), params(typs[28]))
+ typs[35] = newSig(params(typs[33], typs[28], typs[28], typs[28]), params(typs[28]))
+ typs[36] = newSig(params(typs[33], typs[28], typs[28], typs[28], typs[28]), params(typs[28]))
+ typs[37] = newSig(params(typs[33], typs[28], typs[28], typs[28], typs[28], typs[28]), params(typs[28]))
+ typs[38] = types.NewSlice(typs[28])
+ typs[39] = newSig(params(typs[33], typs[38]), params(typs[28]))
+ typs[40] = newSig(params(typs[28], typs[28]), params(typs[15]))
+ typs[41] = types.NewArray(typs[0], 4)
+ typs[42] = types.NewPtr(typs[41])
+ typs[43] = newSig(params(typs[42], typs[22]), params(typs[28]))
+ typs[44] = newSig(params(typs[33], typs[1], typs[15]), params(typs[28]))
+ typs[45] = newSig(params(typs[1], typs[15]), params(typs[28]))
+ typs[46] = types.RuneType
+ typs[47] = types.NewSlice(typs[46])
+ typs[48] = newSig(params(typs[33], typs[47]), params(typs[28]))
+ typs[49] = types.NewSlice(typs[0])
+ typs[50] = newSig(params(typs[33], typs[28]), params(typs[49]))
+ typs[51] = types.NewArray(typs[46], 32)
+ typs[52] = types.NewPtr(typs[51])
+ typs[53] = newSig(params(typs[52], typs[28]), params(typs[47]))
+ typs[54] = newSig(params(typs[3], typs[15], typs[3], typs[15], typs[5]), params(typs[15]))
+ typs[55] = newSig(params(typs[28], typs[15]), params(typs[46], typs[15]))
+ typs[56] = newSig(params(typs[28]), params(typs[15]))
+ typs[57] = newSig(params(typs[1], typs[3]), params(typs[7]))
+ typs[58] = types.Types[types.TUINT16]
+ typs[59] = newSig(params(typs[58]), params(typs[7]))
+ typs[60] = types.Types[types.TUINT32]
+ typs[61] = newSig(params(typs[60]), params(typs[7]))
+ typs[62] = newSig(params(typs[24]), params(typs[7]))
+ typs[63] = newSig(params(typs[28]), params(typs[7]))
+ typs[64] = types.Types[types.TUINT8]
+ typs[65] = types.NewSlice(typs[64])
+ typs[66] = newSig(params(typs[65]), params(typs[7]))
+ typs[67] = newSig(params(typs[1], typs[1]), params(typs[1]))
+ typs[68] = newSig(params(typs[1], typs[1], typs[1]), nil)
+ typs[69] = newSig(params(typs[1]), nil)
+ typs[70] = newSig(params(typs[1], typs[1]), params(typs[15], typs[1]))
+ typs[71] = types.NewPtr(typs[5])
+ typs[72] = newSig(params(typs[71], typs[7], typs[7]), params(typs[6]))
+ typs[73] = newSig(nil, params(typs[10]))
+ typs[74] = newSig(nil, params(typs[60]))
+ typs[75] = types.NewMap(typs[2], typs[2])
+ typs[76] = newSig(params(typs[1], typs[22], typs[3]), params(typs[75]))
+ typs[77] = newSig(params(typs[1], typs[15], typs[3]), params(typs[75]))
+ typs[78] = newSig(nil, params(typs[75]))
+ typs[79] = newSig(params(typs[1], typs[75], typs[3]), params(typs[3]))
+ typs[80] = newSig(params(typs[1], typs[75], typs[60]), params(typs[3]))
+ typs[81] = newSig(params(typs[1], typs[75], typs[24]), params(typs[3]))
+ typs[82] = newSig(params(typs[1], typs[75], typs[28]), params(typs[3]))
+ typs[83] = newSig(params(typs[1], typs[75], typs[3], typs[1]), params(typs[3]))
+ typs[84] = newSig(params(typs[1], typs[75], typs[3]), params(typs[3], typs[6]))
+ typs[85] = newSig(params(typs[1], typs[75], typs[60]), params(typs[3], typs[6]))
+ typs[86] = newSig(params(typs[1], typs[75], typs[24]), params(typs[3], typs[6]))
+ typs[87] = newSig(params(typs[1], typs[75], typs[28]), params(typs[3], typs[6]))
+ typs[88] = newSig(params(typs[1], typs[75], typs[3], typs[1]), params(typs[3], typs[6]))
+ typs[89] = newSig(params(typs[1], typs[75], typs[7]), params(typs[3]))
+ typs[90] = newSig(params(typs[1], typs[75], typs[3]), nil)
+ typs[91] = newSig(params(typs[1], typs[75], typs[60]), nil)
+ typs[92] = newSig(params(typs[1], typs[75], typs[24]), nil)
+ typs[93] = newSig(params(typs[1], typs[75], typs[28]), nil)
+ typs[94] = newSig(params(typs[3]), nil)
+ typs[95] = newSig(params(typs[1], typs[75]), nil)
+ typs[96] = types.NewChan(typs[2], types.Cboth)
+ typs[97] = newSig(params(typs[1], typs[22]), params(typs[96]))
+ typs[98] = newSig(params(typs[1], typs[15]), params(typs[96]))
+ typs[99] = types.NewChan(typs[2], types.Crecv)
+ typs[100] = newSig(params(typs[99], typs[3]), nil)
+ typs[101] = newSig(params(typs[99], typs[3]), params(typs[6]))
+ typs[102] = types.NewChan(typs[2], types.Csend)
+ typs[103] = newSig(params(typs[102], typs[3]), nil)
+ typs[104] = types.NewArray(typs[0], 3)
+ typs[105] = types.NewStruct([]*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[104]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])})
+ typs[106] = newSig(params(typs[1], typs[3], typs[3]), nil)
+ typs[107] = newSig(params(typs[1], typs[3]), nil)
+ typs[108] = newSig(params(typs[1], typs[3], typs[15], typs[3], typs[15]), params(typs[15]))
+ typs[109] = newSig(params(typs[102], typs[3]), params(typs[6]))
+ typs[110] = newSig(params(typs[3], typs[99]), params(typs[6], typs[6]))
+ typs[111] = newSig(params(typs[71]), nil)
+ typs[112] = newSig(params(typs[1], typs[1], typs[71], typs[15], typs[15], typs[6]), params(typs[15], typs[6]))
+ typs[113] = newSig(params(typs[1], typs[15], typs[15]), params(typs[7]))
+ typs[114] = newSig(params(typs[1], typs[22], typs[22]), params(typs[7]))
+ typs[115] = newSig(params(typs[1], typs[15], typs[15], typs[7]), params(typs[7]))
+ typs[116] = types.NewSlice(typs[2])
+ typs[117] = newSig(params(typs[3], typs[15], typs[15], typs[15], typs[1]), params(typs[116]))
+ typs[118] = newSig(params(typs[1], typs[7], typs[22]), nil)
+ typs[119] = newSig(params(typs[7], typs[22]), nil)
+ typs[120] = newSig(params(typs[3], typs[3], typs[5]), nil)
+ typs[121] = newSig(params(typs[7], typs[5]), nil)
+ typs[122] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6]))
+ typs[123] = newSig(params(typs[3], typs[3]), params(typs[6]))
+ typs[124] = newSig(params(typs[7], typs[7]), params(typs[6]))
+ typs[125] = newSig(params(typs[3], typs[5], typs[5]), params(typs[5]))
+ typs[126] = newSig(params(typs[7], typs[5]), params(typs[5]))
+ typs[127] = newSig(params(typs[3], typs[5]), params(typs[5]))
+ typs[128] = newSig(params(typs[22], typs[22]), params(typs[22]))
+ typs[129] = newSig(params(typs[24], typs[24]), params(typs[24]))
+ typs[130] = newSig(params(typs[20]), params(typs[22]))
+ typs[131] = newSig(params(typs[20]), params(typs[24]))
+ typs[132] = newSig(params(typs[20]), params(typs[60]))
+ typs[133] = newSig(params(typs[22]), params(typs[20]))
+ typs[134] = types.Types[types.TFLOAT32]
+ typs[135] = newSig(params(typs[22]), params(typs[134]))
+ typs[136] = newSig(params(typs[24]), params(typs[20]))
+ typs[137] = newSig(params(typs[24]), params(typs[134]))
+ typs[138] = newSig(params(typs[60]), params(typs[20]))
+ typs[139] = newSig(params(typs[26], typs[26]), params(typs[26]))
+ typs[140] = newSig(nil, params(typs[5]))
+ typs[141] = newSig(params(typs[5], typs[5]), nil)
+ typs[142] = newSig(params(typs[5], typs[5], typs[5]), nil)
+ typs[143] = newSig(params(typs[7], typs[1], typs[5]), nil)
+ typs[144] = types.NewSlice(typs[7])
+ typs[145] = newSig(params(typs[7], typs[144]), nil)
+ typs[146] = newSig(params(typs[64], typs[64], typs[17]), nil)
+ typs[147] = newSig(params(typs[58], typs[58], typs[17]), nil)
+ typs[148] = newSig(params(typs[60], typs[60], typs[17]), nil)
+ typs[149] = newSig(params(typs[24], typs[24], typs[17]), nil)
+ typs[150] = newSig(params(typs[28], typs[28], typs[17]), nil)
+ typs[151] = types.NewArray(typs[0], 16)
+ typs[152] = newSig(params(typs[7], typs[60], typs[151], typs[28], typs[15], typs[64], typs[64]), params(typs[60]))
+ return typs[:]
+}
+
+var coverageDecls = [...]struct {
+ name string
+ tag int
+ typ int
+}{
+ {"initHook", funcTag, 1},
+}
+
+func coverageTypes() []*types.Type {
+ var typs [2]*types.Type
+ typs[0] = types.Types[types.TBOOL]
+ typs[1] = newSig(params(typs[0]), nil)
+ return typs[:]
+}
diff --git a/src/cmd/compile/internal/typecheck/builtin_test.go b/src/cmd/compile/internal/typecheck/builtin_test.go
new file mode 100644
index 0000000..3c0d6b8
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/builtin_test.go
@@ -0,0 +1,31 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "bytes"
+ "internal/testenv"
+ "os"
+ "testing"
+)
+
+func TestBuiltin(t *testing.T) {
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ old, err := os.ReadFile("builtin.go")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ new, err := testenv.Command(t, testenv.GoToolPath(t), "run", "mkbuiltin.go", "-stdout").Output()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(old, new) {
+ t.Fatal("builtin.go out of date; run mkbuiltin.go")
+ }
+}
diff --git a/src/cmd/compile/internal/typecheck/const.go b/src/cmd/compile/internal/typecheck/const.go
new file mode 100644
index 0000000..e7f9ec5
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/const.go
@@ -0,0 +1,486 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "fmt"
+ "go/constant"
+ "go/token"
+ "math"
+ "math/big"
+ "unicode"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
+
+func roundFloat(v constant.Value, sz int64) constant.Value {
+ switch sz {
+ case 4:
+ f, _ := constant.Float32Val(v)
+ return makeFloat64(float64(f))
+ case 8:
+ f, _ := constant.Float64Val(v)
+ return makeFloat64(f)
+ }
+ base.Fatalf("unexpected size: %v", sz)
+ panic("unreachable")
+}
+
+// truncate float literal fv to 32-bit or 64-bit precision
+// according to type; return truncated value.
+func truncfltlit(v constant.Value, t *types.Type) constant.Value {
+ if t.IsUntyped() {
+ return v
+ }
+
+ return roundFloat(v, t.Size())
+}
+
+// truncate Real and Imag parts of Mpcplx to 32-bit or 64-bit
+// precision, according to type; return truncated value. In case of
+// overflow, calls Errorf but does not truncate the input value.
+func trunccmplxlit(v constant.Value, t *types.Type) constant.Value {
+ if t.IsUntyped() {
+ return v
+ }
+
+ fsz := t.Size() / 2
+ return makeComplex(roundFloat(constant.Real(v), fsz), roundFloat(constant.Imag(v), fsz))
+}
+
+// TODO(mdempsky): Replace these with better APIs.
+func convlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) }
+func DefaultLit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) }
+
+// convlit1 converts an untyped expression n to type t. If n already
+// has a type, convlit1 has no effect.
+//
+// For explicit conversions, t must be non-nil, and integer-to-string
+// conversions are allowed.
+//
+// For implicit conversions (e.g., assignments), t may be nil; if so,
+// n is converted to its default type.
+//
+// If there's an error converting n to t, context is used in the error
+// message.
+func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir.Node {
+ if explicit && t == nil {
+ base.Fatalf("explicit conversion missing type")
+ }
+ if t != nil && t.IsUntyped() {
+ base.Fatalf("bad conversion to untyped: %v", t)
+ }
+
+ if n == nil || n.Type() == nil {
+ // Allow sloppy callers.
+ return n
+ }
+ if !n.Type().IsUntyped() {
+ // Already typed; nothing to do.
+ return n
+ }
+
+ // Nil is technically not a constant, so handle it specially.
+ if n.Type().Kind() == types.TNIL {
+ if n.Op() != ir.ONIL {
+ base.Fatalf("unexpected op: %v (%v)", n, n.Op())
+ }
+ n = ir.Copy(n)
+ if t == nil {
+ base.Fatalf("use of untyped nil")
+ }
+
+ if !t.HasNil() {
+ // Leave for caller to handle.
+ return n
+ }
+
+ n.SetType(t)
+ return n
+ }
+
+ if t == nil || !ir.OKForConst[t.Kind()] {
+ t = defaultType(n.Type())
+ }
+
+ switch n.Op() {
+ default:
+ base.Fatalf("unexpected untyped expression: %v", n)
+
+ case ir.OLITERAL:
+ v := ConvertVal(n.Val(), t, explicit)
+ if v.Kind() == constant.Unknown {
+ n = ir.NewConstExpr(n.Val(), n)
+ break
+ }
+ n = ir.NewConstExpr(v, n)
+ n.SetType(t)
+ return n
+
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.OREAL, ir.OIMAG:
+ ot := operandType(n.Op(), t)
+ if ot == nil {
+ n = DefaultLit(n, nil)
+ break
+ }
+
+ n := n.(*ir.UnaryExpr)
+ n.X = convlit(n.X, ot)
+ if n.X.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ n.SetType(t)
+ return n
+
+ case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND, ir.OCOMPLEX:
+ ot := operandType(n.Op(), t)
+ if ot == nil {
+ n = DefaultLit(n, nil)
+ break
+ }
+
+ var l, r ir.Node
+ switch n := n.(type) {
+ case *ir.BinaryExpr:
+ n.X = convlit(n.X, ot)
+ n.Y = convlit(n.Y, ot)
+ l, r = n.X, n.Y
+ case *ir.LogicalExpr:
+ n.X = convlit(n.X, ot)
+ n.Y = convlit(n.Y, ot)
+ l, r = n.X, n.Y
+ }
+
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !types.Identical(l.Type(), r.Type()) {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetType(t)
+ return n
+
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n := n.(*ir.BinaryExpr)
+ if !t.IsBoolean() {
+ break
+ }
+ n.SetType(t)
+ return n
+
+ case ir.OLSH, ir.ORSH:
+ n := n.(*ir.BinaryExpr)
+ n.X = convlit1(n.X, t, explicit, nil)
+ n.SetType(n.X.Type())
+ if n.Type() != nil && !n.Type().IsInteger() {
+ base.Errorf("invalid operation: %v (shift of type %v)", n, n.Type())
+ n.SetType(nil)
+ }
+ return n
+ }
+
+ if explicit {
+ base.Fatalf("cannot convert %L to type %v", n, t)
+ } else if context != nil {
+ base.Fatalf("cannot use %L as type %v in %s", n, t, context())
+ } else {
+ base.Fatalf("cannot use %L as type %v", n, t)
+ }
+
+ n.SetType(nil)
+ return n
+}
+
+func operandType(op ir.Op, t *types.Type) *types.Type {
+ switch op {
+ case ir.OCOMPLEX:
+ if t.IsComplex() {
+ return types.FloatForComplex(t)
+ }
+ case ir.OREAL, ir.OIMAG:
+ if t.IsFloat() {
+ return types.ComplexForFloat(t)
+ }
+ default:
+ if okfor[op][t.Kind()] {
+ return t
+ }
+ }
+ return nil
+}
+
+// ConvertVal converts v into a representation appropriate for t. If
+// no such representation exists, it returns constant.MakeUnknown()
+// instead.
+//
+// If explicit is true, then conversions from integer to string are
+// also allowed.
+func ConvertVal(v constant.Value, t *types.Type, explicit bool) constant.Value {
+ switch ct := v.Kind(); ct {
+ case constant.Bool:
+ if t.IsBoolean() {
+ return v
+ }
+
+ case constant.String:
+ if t.IsString() {
+ return v
+ }
+
+ case constant.Int:
+ if explicit && t.IsString() {
+ return tostr(v)
+ }
+ fallthrough
+ case constant.Float, constant.Complex:
+ switch {
+ case t.IsInteger():
+ v = toint(v)
+ return v
+ case t.IsFloat():
+ v = toflt(v)
+ v = truncfltlit(v, t)
+ return v
+ case t.IsComplex():
+ v = tocplx(v)
+ v = trunccmplxlit(v, t)
+ return v
+ }
+ }
+
+ return constant.MakeUnknown()
+}
+
+func tocplx(v constant.Value) constant.Value {
+ return constant.ToComplex(v)
+}
+
+func toflt(v constant.Value) constant.Value {
+ if v.Kind() == constant.Complex {
+ v = constant.Real(v)
+ }
+
+ return constant.ToFloat(v)
+}
+
+func toint(v constant.Value) constant.Value {
+ if v.Kind() == constant.Complex {
+ v = constant.Real(v)
+ }
+
+ if v := constant.ToInt(v); v.Kind() == constant.Int {
+ return v
+ }
+
+ // The value of v cannot be represented as an integer;
+ // so we need to print an error message.
+ // Unfortunately some float values cannot be
+ // reasonably formatted for inclusion in an error
+ // message (example: 1 + 1e-100), so first we try to
+ // format the float; if the truncation resulted in
+ // something that looks like an integer we omit the
+ // value from the error message.
+ // (See issue #11371).
+ f := ir.BigFloat(v)
+ if f.MantExp(nil) > 2*ir.ConstPrec {
+ base.Errorf("integer too large")
+ } else {
+ var t big.Float
+ t.Parse(fmt.Sprint(v), 0)
+ if t.IsInt() {
+ base.Errorf("constant truncated to integer")
+ } else {
+ base.Errorf("constant %v truncated to integer", v)
+ }
+ }
+
+ // Prevent follow-on errors.
+ return constant.MakeUnknown()
+}
+
+func tostr(v constant.Value) constant.Value {
+ if v.Kind() == constant.Int {
+ r := unicode.ReplacementChar
+ if x, ok := constant.Uint64Val(v); ok && x <= unicode.MaxRune {
+ r = rune(x)
+ }
+ v = constant.MakeString(string(r))
+ }
+ return v
+}
+
+func makeFloat64(f float64) constant.Value {
+ if math.IsInf(f, 0) {
+ base.Fatalf("infinity is not a valid constant")
+ }
+ return constant.MakeFloat64(f)
+}
+
+func makeComplex(real, imag constant.Value) constant.Value {
+ return constant.BinaryOp(constant.ToFloat(real), token.ADD, constant.MakeImag(constant.ToFloat(imag)))
+}
+
+// DefaultLit on both nodes simultaneously;
+// if they're both ideal going in they better
+// get the same type going out.
+// force means must assign concrete (non-ideal) type.
+// The results of defaultlit2 MUST be assigned back to l and r, e.g.
+//
+// n.Left, n.Right = defaultlit2(n.Left, n.Right, force)
+func defaultlit2(l ir.Node, r ir.Node, force bool) (ir.Node, ir.Node) {
+ if l.Type() == nil || r.Type() == nil {
+ return l, r
+ }
+
+ if !l.Type().IsInterface() && !r.Type().IsInterface() {
+ // Can't mix bool with non-bool, string with non-string.
+ if l.Type().IsBoolean() != r.Type().IsBoolean() {
+ return l, r
+ }
+ if l.Type().IsString() != r.Type().IsString() {
+ return l, r
+ }
+ }
+
+ if !l.Type().IsUntyped() {
+ r = convlit(r, l.Type())
+ return l, r
+ }
+
+ if !r.Type().IsUntyped() {
+ l = convlit(l, r.Type())
+ return l, r
+ }
+
+ if !force {
+ return l, r
+ }
+
+ // Can't mix nil with anything untyped.
+ if ir.IsNil(l) || ir.IsNil(r) {
+ return l, r
+ }
+ t := defaultType(mixUntyped(l.Type(), r.Type()))
+ l = convlit(l, t)
+ r = convlit(r, t)
+ return l, r
+}
+
+func mixUntyped(t1, t2 *types.Type) *types.Type {
+ if t1 == t2 {
+ return t1
+ }
+
+ rank := func(t *types.Type) int {
+ switch t {
+ case types.UntypedInt:
+ return 0
+ case types.UntypedRune:
+ return 1
+ case types.UntypedFloat:
+ return 2
+ case types.UntypedComplex:
+ return 3
+ }
+ base.Fatalf("bad type %v", t)
+ panic("unreachable")
+ }
+
+ if rank(t2) > rank(t1) {
+ return t2
+ }
+ return t1
+}
+
+func defaultType(t *types.Type) *types.Type {
+ if !t.IsUntyped() || t.Kind() == types.TNIL {
+ return t
+ }
+
+ switch t {
+ case types.UntypedBool:
+ return types.Types[types.TBOOL]
+ case types.UntypedString:
+ return types.Types[types.TSTRING]
+ case types.UntypedInt:
+ return types.Types[types.TINT]
+ case types.UntypedRune:
+ return types.RuneType
+ case types.UntypedFloat:
+ return types.Types[types.TFLOAT64]
+ case types.UntypedComplex:
+ return types.Types[types.TCOMPLEX128]
+ }
+
+ base.Fatalf("bad type %v", t)
+ return nil
+}
+
+// IndexConst checks if Node n contains a constant expression
+// representable as a non-negative int and returns its value.
+// If n is not a constant expression, not representable as an
+// integer, or negative, it returns -1. If n is too large, it
+// returns -2.
+func IndexConst(n ir.Node) int64 {
+ if n.Op() != ir.OLITERAL {
+ return -1
+ }
+ if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
+ return -1
+ }
+
+ v := toint(n.Val())
+ if v.Kind() != constant.Int || constant.Sign(v) < 0 {
+ return -1
+ }
+ if ir.ConstOverflow(v, types.Types[types.TINT]) {
+ return -2
+ }
+ return ir.IntVal(types.Types[types.TINT], v)
+}
+
+// callOrChan reports whether n is a call or channel operation.
+func callOrChan(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OAPPEND,
+ ir.OCALL,
+ ir.OCALLFUNC,
+ ir.OCALLINTER,
+ ir.OCALLMETH,
+ ir.OCAP,
+ ir.OCLEAR,
+ ir.OCLOSE,
+ ir.OCOMPLEX,
+ ir.OCOPY,
+ ir.ODELETE,
+ ir.OIMAG,
+ ir.OLEN,
+ ir.OMAKE,
+ ir.OMAX,
+ ir.OMIN,
+ ir.ONEW,
+ ir.OPANIC,
+ ir.OPRINT,
+ ir.OPRINTLN,
+ ir.OREAL,
+ ir.ORECOVER,
+ ir.ORECOVERFP,
+ ir.ORECV,
+ ir.OUNSAFEADD,
+ ir.OUNSAFESLICE,
+ ir.OUNSAFESLICEDATA,
+ ir.OUNSAFESTRING,
+ ir.OUNSAFESTRINGDATA:
+ return true
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go
new file mode 100644
index 0000000..4a847e8
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/dcl.go
@@ -0,0 +1,125 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "fmt"
+ "sync"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+var funcStack []*ir.Func // stack of previous values of ir.CurFunc
+
+// DeclFunc declares the parameters for fn and adds it to
+// Target.Funcs.
+//
+// Before returning, it sets CurFunc to fn. When the caller is done
+// constructing fn, it must call FinishFuncBody to restore CurFunc.
+func DeclFunc(fn *ir.Func) {
+ fn.DeclareParams(true)
+ fn.Nname.Defn = fn
+ Target.Funcs = append(Target.Funcs, fn)
+
+ funcStack = append(funcStack, ir.CurFunc)
+ ir.CurFunc = fn
+}
+
+// FinishFuncBody restores ir.CurFunc to its state before the last
+// call to DeclFunc.
+func FinishFuncBody() {
+ funcStack, ir.CurFunc = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1]
+}
+
+func CheckFuncStack() {
+ if len(funcStack) != 0 {
+ base.Fatalf("funcStack is non-empty: %v", len(funcStack))
+ }
+}
+
+// make a new Node off the books.
+func TempAt(pos src.XPos, curfn *ir.Func, typ *types.Type) *ir.Name {
+ if curfn == nil {
+ base.FatalfAt(pos, "no curfn for TempAt")
+ }
+ if typ == nil {
+ base.FatalfAt(pos, "TempAt called with nil type")
+ }
+ if typ.Kind() == types.TFUNC && typ.Recv() != nil {
+ base.FatalfAt(pos, "misuse of method type: %v", typ)
+ }
+ types.CalcSize(typ)
+
+ sym := &types.Sym{
+ Name: autotmpname(len(curfn.Dcl)),
+ Pkg: types.LocalPkg,
+ }
+ name := curfn.NewLocal(pos, sym, typ)
+ name.SetEsc(ir.EscNever)
+ name.SetUsed(true)
+ name.SetAutoTemp(true)
+
+ return name
+}
+
+var (
+ autotmpnamesmu sync.Mutex
+ autotmpnames []string
+)
+
+// autotmpname returns the name for an autotmp variable numbered n.
+func autotmpname(n int) string {
+ autotmpnamesmu.Lock()
+ defer autotmpnamesmu.Unlock()
+
+ // Grow autotmpnames, if needed.
+ if n >= len(autotmpnames) {
+ autotmpnames = append(autotmpnames, make([]string, n+1-len(autotmpnames))...)
+ autotmpnames = autotmpnames[:cap(autotmpnames)]
+ }
+
+ s := autotmpnames[n]
+ if s == "" {
+ // Give each tmp a different name so that they can be registerized.
+ // Add a preceding . to avoid clashing with legal names.
+ prefix := ".autotmp_%d"
+
+ s = fmt.Sprintf(prefix, n)
+ autotmpnames[n] = s
+ }
+ return s
+}
+
+// f is method type, with receiver.
+// return function type, receiver as first argument (or not).
+func NewMethodType(sig *types.Type, recv *types.Type) *types.Type {
+ nrecvs := 0
+ if recv != nil {
+ nrecvs++
+ }
+
+ // TODO(mdempsky): Move this function to types.
+ // TODO(mdempsky): Preserve positions, names, and package from sig+recv.
+
+ params := make([]*types.Field, nrecvs+sig.NumParams())
+ if recv != nil {
+ params[0] = types.NewField(base.Pos, nil, recv)
+ }
+ for i, param := range sig.Params() {
+ d := types.NewField(base.Pos, nil, param.Type)
+ d.SetIsDDD(param.IsDDD())
+ params[nrecvs+i] = d
+ }
+
+ results := make([]*types.Field, sig.NumResults())
+ for i, t := range sig.Results() {
+ results[i] = types.NewField(base.Pos, nil, t.Type)
+ }
+
+ return types.NewSignature(nil, params, results)
+}
diff --git a/src/cmd/compile/internal/typecheck/export.go b/src/cmd/compile/internal/typecheck/export.go
new file mode 100644
index 0000000..585c1b7
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/export.go
@@ -0,0 +1,33 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// importfunc declares symbol s as an imported function with type t.
+func importfunc(s *types.Sym, t *types.Type) {
+ fn := ir.NewFunc(src.NoXPos, src.NoXPos, s, t)
+ importsym(fn.Nname)
+}
+
+// importvar declares symbol s as an imported variable with type t.
+func importvar(s *types.Sym, t *types.Type) {
+ n := ir.NewNameAt(src.NoXPos, s, t)
+ n.Class = ir.PEXTERN
+ importsym(n)
+}
+
+func importsym(name *ir.Name) {
+ sym := name.Sym()
+ if sym.Def != nil {
+ base.Fatalf("importsym of symbol that already exists: %v", sym.Def)
+ }
+ sym.Def = name
+}
diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go
new file mode 100644
index 0000000..12d1743
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/expr.go
@@ -0,0 +1,933 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "fmt"
+ "go/constant"
+ "go/token"
+ "internal/types/errors"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+func tcShift(n, l, r ir.Node) (ir.Node, ir.Node, *types.Type) {
+ if l.Type() == nil || r.Type() == nil {
+ return l, r, nil
+ }
+
+ r = DefaultLit(r, types.Types[types.TUINT])
+ t := r.Type()
+ if !t.IsInteger() {
+ base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type())
+ return l, r, nil
+ }
+ t = l.Type()
+ if t != nil && t.Kind() != types.TIDEAL && !t.IsInteger() {
+ base.Errorf("invalid operation: %v (shift of type %v)", n, t)
+ return l, r, nil
+ }
+
+ // no DefaultLit for left
+ // the outer context gives the type
+ t = l.Type()
+ if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL {
+ t = types.UntypedInt
+ }
+ return l, r, t
+}
+
+// tcArith typechecks operands of a binary arithmetic expression.
+// The result of tcArith MUST be assigned back to original operands,
+// t is the type of the expression, and should be set by the caller. e.g:
+//
+// n.X, n.Y, t = tcArith(n, op, n.X, n.Y)
+// n.SetType(t)
+func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type) {
+ l, r = defaultlit2(l, r, false)
+ if l.Type() == nil || r.Type() == nil {
+ return l, r, nil
+ }
+ t := l.Type()
+ if t.Kind() == types.TIDEAL {
+ t = r.Type()
+ }
+ aop := ir.OXXX
+ if n.Op().IsCmp() && t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
+ // comparison is okay as long as one side is
+ // assignable to the other. convert so they have
+ // the same type.
+ //
+ // the only conversion that isn't a no-op is concrete == interface.
+ // in that case, check comparability of the concrete type.
+ // The conversion allocates, so only do it if the concrete type is huge.
+ converted := false
+ if r.Type().Kind() != types.TBLANK {
+ aop, _ = assignOp(l.Type(), r.Type())
+ if aop != ir.OXXX {
+ if r.Type().IsInterface() && !l.Type().IsInterface() && !types.IsComparable(l.Type()) {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type()))
+ return l, r, nil
+ }
+
+ types.CalcSize(l.Type())
+ if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Size() >= 1<<16 {
+ l = ir.NewConvExpr(base.Pos, aop, r.Type(), l)
+ l.SetTypecheck(1)
+ }
+
+ t = r.Type()
+ converted = true
+ }
+ }
+
+ if !converted && l.Type().Kind() != types.TBLANK {
+ aop, _ = assignOp(r.Type(), l.Type())
+ if aop != ir.OXXX {
+ if l.Type().IsInterface() && !r.Type().IsInterface() && !types.IsComparable(r.Type()) {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type()))
+ return l, r, nil
+ }
+
+ types.CalcSize(r.Type())
+ if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Size() >= 1<<16 {
+ r = ir.NewConvExpr(base.Pos, aop, l.Type(), r)
+ r.SetTypecheck(1)
+ }
+
+ t = l.Type()
+ }
+ }
+ }
+
+ if t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
+ l, r = defaultlit2(l, r, true)
+ if l.Type() == nil || r.Type() == nil {
+ return l, r, nil
+ }
+ if l.Type().IsInterface() == r.Type().IsInterface() || aop == 0 {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+ return l, r, nil
+ }
+ }
+
+ if t.Kind() == types.TIDEAL {
+ t = mixUntyped(l.Type(), r.Type())
+ }
+ if dt := defaultType(t); !okfor[op][dt.Kind()] {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t))
+ return l, r, nil
+ }
+
+ // okfor allows any array == array, map == map, func == func.
+ // restrict to slice/map/func == nil and nil == slice/map/func.
+ if l.Type().IsArray() && !types.IsComparable(l.Type()) {
+ base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type())
+ return l, r, nil
+ }
+
+ if l.Type().IsSlice() && !ir.IsNil(l) && !ir.IsNil(r) {
+ base.Errorf("invalid operation: %v (slice can only be compared to nil)", n)
+ return l, r, nil
+ }
+
+ if l.Type().IsMap() && !ir.IsNil(l) && !ir.IsNil(r) {
+ base.Errorf("invalid operation: %v (map can only be compared to nil)", n)
+ return l, r, nil
+ }
+
+ if l.Type().Kind() == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) {
+ base.Errorf("invalid operation: %v (func can only be compared to nil)", n)
+ return l, r, nil
+ }
+
+ if l.Type().IsStruct() {
+ if f := types.IncomparableField(l.Type()); f != nil {
+ base.Errorf("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type)
+ return l, r, nil
+ }
+ }
+
+ return l, r, t
+}
+
+// The result of tcCompLit MUST be assigned back to n, e.g.
+//
+// n.Left = tcCompLit(n.Left)
+func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("tcCompLit", n)(&res)
+ }
+
+ lno := base.Pos
+ defer func() {
+ base.Pos = lno
+ }()
+
+ ir.SetPos(n)
+
+ t := n.Type()
+ base.AssertfAt(t != nil, n.Pos(), "missing type in composite literal")
+
+ switch t.Kind() {
+ default:
+ base.Errorf("invalid composite literal type %v", t)
+ n.SetType(nil)
+
+ case types.TARRAY:
+ typecheckarraylit(t.Elem(), t.NumElem(), n.List, "array literal")
+ n.SetOp(ir.OARRAYLIT)
+
+ case types.TSLICE:
+ length := typecheckarraylit(t.Elem(), -1, n.List, "slice literal")
+ n.SetOp(ir.OSLICELIT)
+ n.Len = length
+
+ case types.TMAP:
+ for i3, l := range n.List {
+ ir.SetPos(l)
+ if l.Op() != ir.OKEY {
+ n.List[i3] = Expr(l)
+ base.Errorf("missing key in map literal")
+ continue
+ }
+ l := l.(*ir.KeyExpr)
+
+ r := l.Key
+ r = Expr(r)
+ l.Key = AssignConv(r, t.Key(), "map key")
+
+ r = l.Value
+ r = Expr(r)
+ l.Value = AssignConv(r, t.Elem(), "map value")
+ }
+
+ n.SetOp(ir.OMAPLIT)
+
+ case types.TSTRUCT:
+ // Need valid field offsets for Xoffset below.
+ types.CalcSize(t)
+
+ errored := false
+ if len(n.List) != 0 && nokeys(n.List) {
+ // simple list of variables
+ ls := n.List
+ for i, n1 := range ls {
+ ir.SetPos(n1)
+ n1 = Expr(n1)
+ ls[i] = n1
+ if i >= t.NumFields() {
+ if !errored {
+ base.Errorf("too many values in %v", n)
+ errored = true
+ }
+ continue
+ }
+
+ f := t.Field(i)
+ s := f.Sym
+
+ // Do the test for assigning to unexported fields.
+ // But if this is an instantiated function, then
+ // the function has already been typechecked. In
+ // that case, don't do the test, since it can fail
+ // for the closure structs created in
+ // walkClosure(), because the instantiated
+ // function is compiled as if in the source
+ // package of the generic function.
+ if !(ir.CurFunc != nil && strings.Contains(ir.CurFunc.Nname.Sym().Name, "[")) {
+ if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg {
+ base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
+ }
+ }
+ // No pushtype allowed here. Must name fields for that.
+ n1 = AssignConv(n1, f.Type, "field value")
+ ls[i] = ir.NewStructKeyExpr(base.Pos, f, n1)
+ }
+ if len(ls) < t.NumFields() {
+ base.Errorf("too few values in %v", n)
+ }
+ } else {
+ hash := make(map[string]bool)
+
+ // keyed list
+ ls := n.List
+ for i, n := range ls {
+ ir.SetPos(n)
+
+ sk, ok := n.(*ir.StructKeyExpr)
+ if !ok {
+ kv, ok := n.(*ir.KeyExpr)
+ if !ok {
+ if !errored {
+ base.Errorf("mixture of field:value and value initializers")
+ errored = true
+ }
+ ls[i] = Expr(n)
+ continue
+ }
+
+ sk = tcStructLitKey(t, kv)
+ if sk == nil {
+ continue
+ }
+
+ fielddup(sk.Sym().Name, hash)
+ }
+
+ // No pushtype allowed here. Tried and rejected.
+ sk.Value = Expr(sk.Value)
+ sk.Value = AssignConv(sk.Value, sk.Field.Type, "field value")
+ ls[i] = sk
+ }
+ }
+
+ n.SetOp(ir.OSTRUCTLIT)
+ }
+
+ return n
+}
+
+// tcStructLitKey typechecks an OKEY node that appeared within a
+// struct literal.
+func tcStructLitKey(typ *types.Type, kv *ir.KeyExpr) *ir.StructKeyExpr {
+ key := kv.Key
+
+ sym := key.Sym()
+
+ // An OXDOT uses the Sym field to hold
+ // the field to the right of the dot,
+ // so s will be non-nil, but an OXDOT
+ // is never a valid struct literal key.
+ if sym == nil || sym.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || sym.IsBlank() {
+ base.Errorf("invalid field name %v in struct initializer", key)
+ return nil
+ }
+
+ if f := Lookdot1(nil, sym, typ, typ.Fields(), 0); f != nil {
+ return ir.NewStructKeyExpr(kv.Pos(), f, kv.Value)
+ }
+
+ if ci := Lookdot1(nil, sym, typ, typ.Fields(), 2); ci != nil { // Case-insensitive lookup.
+ if visible(ci.Sym) {
+ base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", sym, typ, ci.Sym)
+ } else if nonexported(sym) && sym.Name == ci.Sym.Name { // Ensure exactness before the suggestion.
+ base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", sym, typ)
+ } else {
+ base.Errorf("unknown field '%v' in struct literal of type %v", sym, typ)
+ }
+ return nil
+ }
+
+ var f *types.Field
+ p, _ := dotpath(sym, typ, &f, true)
+ if p == nil || f.IsMethod() {
+ base.Errorf("unknown field '%v' in struct literal of type %v", sym, typ)
+ return nil
+ }
+
+ // dotpath returns the parent embedded types in reverse order.
+ var ep []string
+ for ei := len(p) - 1; ei >= 0; ei-- {
+ ep = append(ep, p[ei].field.Sym.Name)
+ }
+ ep = append(ep, sym.Name)
+ base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), typ)
+ return nil
+}
+
+// tcConv typechecks an OCONV node.
+func tcConv(n *ir.ConvExpr) ir.Node {
+ types.CheckSize(n.Type()) // ensure width is calculated for backend
+ n.X = Expr(n.X)
+ n.X = convlit1(n.X, n.Type(), true, nil)
+ t := n.X.Type()
+ if t == nil || n.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ op, why := convertOp(n.X.Op() == ir.OLITERAL, t, n.Type())
+ if op == ir.OXXX {
+ // Due to //go:nointerface, we may be stricter than types2 here (#63333).
+ base.ErrorfAt(n.Pos(), errors.InvalidConversion, "cannot convert %L to type %v%s", n.X, n.Type(), why)
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetOp(op)
+ switch n.Op() {
+ case ir.OCONVNOP:
+ if t.Kind() == n.Type().Kind() {
+ switch t.Kind() {
+ case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128:
+ // Floating point casts imply rounding and
+ // so the conversion must be kept.
+ n.SetOp(ir.OCONV)
+ }
+ }
+
+ // do not convert to []byte literal. See CL 125796.
+ // generated code and compiler memory footprint is better without it.
+ case ir.OSTR2BYTES:
+ // ok
+
+ case ir.OSTR2RUNES:
+ if n.X.Op() == ir.OLITERAL {
+ return stringtoruneslit(n)
+ }
+
+ case ir.OBYTES2STR:
+ if t.Elem() != types.ByteType && t.Elem() != types.Types[types.TUINT8] {
+ // If t is a slice of a user-defined byte type B (not uint8
+ // or byte), then add an extra CONVNOP from []B to []byte, so
+ // that the call to slicebytetostring() added in walk will
+ // typecheck correctly.
+ n.X = ir.NewConvExpr(n.X.Pos(), ir.OCONVNOP, types.NewSlice(types.ByteType), n.X)
+ n.X.SetTypecheck(1)
+ }
+
+ case ir.ORUNES2STR:
+ if t.Elem() != types.RuneType && t.Elem() != types.Types[types.TINT32] {
+ // If t is a slice of a user-defined rune type B (not uint32
+ // or rune), then add an extra CONVNOP from []B to []rune, so
+ // that the call to slicerunetostring() added in walk will
+ // typecheck correctly.
+ n.X = ir.NewConvExpr(n.X.Pos(), ir.OCONVNOP, types.NewSlice(types.RuneType), n.X)
+ n.X.SetTypecheck(1)
+ }
+
+ }
+ return n
+}
+
+// DotField returns a field selector expression that selects the
+// index'th field of the given expression, which must be of struct or
+// pointer-to-struct type.
+func DotField(pos src.XPos, x ir.Node, index int) *ir.SelectorExpr {
+ op, typ := ir.ODOT, x.Type()
+ if typ.IsPtr() {
+ op, typ = ir.ODOTPTR, typ.Elem()
+ }
+ if !typ.IsStruct() {
+ base.FatalfAt(pos, "DotField of non-struct: %L", x)
+ }
+
+ // TODO(mdempsky): This is the backend's responsibility.
+ types.CalcSize(typ)
+
+ field := typ.Field(index)
+ return dot(pos, field.Type, op, x, field)
+}
+
+func dot(pos src.XPos, typ *types.Type, op ir.Op, x ir.Node, selection *types.Field) *ir.SelectorExpr {
+ n := ir.NewSelectorExpr(pos, op, x, selection.Sym)
+ n.Selection = selection
+ n.SetType(typ)
+ n.SetTypecheck(1)
+ return n
+}
+
+// XDotMethod returns an expression representing the field selection
+// x.sym. If any implicit field selection are necessary, those are
+// inserted too.
+func XDotField(pos src.XPos, x ir.Node, sym *types.Sym) *ir.SelectorExpr {
+ n := Expr(ir.NewSelectorExpr(pos, ir.OXDOT, x, sym)).(*ir.SelectorExpr)
+ if n.Op() != ir.ODOT && n.Op() != ir.ODOTPTR {
+ base.FatalfAt(pos, "unexpected result op: %v (%v)", n.Op(), n)
+ }
+ return n
+}
+
+// XDotMethod returns an expression representing the method value
+// x.sym (i.e., x is a value, not a type). If any implicit field
+// selection are necessary, those are inserted too.
+//
+// If callee is true, the result is an ODOTMETH/ODOTINTER, otherwise
+// an OMETHVALUE.
+func XDotMethod(pos src.XPos, x ir.Node, sym *types.Sym, callee bool) *ir.SelectorExpr {
+ n := ir.NewSelectorExpr(pos, ir.OXDOT, x, sym)
+ if callee {
+ n = Callee(n).(*ir.SelectorExpr)
+ if n.Op() != ir.ODOTMETH && n.Op() != ir.ODOTINTER {
+ base.FatalfAt(pos, "unexpected result op: %v (%v)", n.Op(), n)
+ }
+ } else {
+ n = Expr(n).(*ir.SelectorExpr)
+ if n.Op() != ir.OMETHVALUE {
+ base.FatalfAt(pos, "unexpected result op: %v (%v)", n.Op(), n)
+ }
+ }
+ return n
+}
+
+// tcDot typechecks an OXDOT or ODOT node.
+func tcDot(n *ir.SelectorExpr, top int) ir.Node {
+ if n.Op() == ir.OXDOT {
+ n = AddImplicitDots(n)
+ n.SetOp(ir.ODOT)
+ if n.X == nil {
+ n.SetType(nil)
+ return n
+ }
+ }
+
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+
+ t := n.X.Type()
+ if t == nil {
+ base.UpdateErrorDot(ir.Line(n), fmt.Sprint(n.X), fmt.Sprint(n))
+ n.SetType(nil)
+ return n
+ }
+
+ if n.X.Op() == ir.OTYPE {
+ base.FatalfAt(n.Pos(), "use NewMethodExpr to construct OMETHEXPR")
+ }
+
+ if t.IsPtr() && !t.Elem().IsInterface() {
+ t = t.Elem()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ n.SetOp(ir.ODOTPTR)
+ types.CheckSize(t)
+ }
+
+ if n.Sel.IsBlank() {
+ base.Errorf("cannot refer to blank field or method")
+ n.SetType(nil)
+ return n
+ }
+
+ if Lookdot(n, t, 0) == nil {
+ // Legitimate field or method lookup failed, try to explain the error
+ switch {
+ case t.IsEmptyInterface():
+ base.Errorf("%v undefined (type %v is interface with no methods)", n, n.X.Type())
+
+ case t.IsPtr() && t.Elem().IsInterface():
+ // Pointer to interface is almost always a mistake.
+ base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.X.Type())
+
+ case Lookdot(n, t, 1) != nil:
+ // Field or method matches by name, but it is not exported.
+ base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sel)
+
+ default:
+ if mt := Lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup.
+ base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.X.Type(), n.Sel, mt.Sym)
+ } else {
+ base.Errorf("%v undefined (type %v has no field or method %v)", n, n.X.Type(), n.Sel)
+ }
+ }
+ n.SetType(nil)
+ return n
+ }
+
+ if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && top&ctxCallee == 0 {
+ n.SetOp(ir.OMETHVALUE)
+ n.SetType(NewMethodType(n.Type(), nil))
+ }
+ return n
+}
+
+// tcDotType typechecks an ODOTTYPE node.
+func tcDotType(n *ir.TypeAssertExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !t.IsInterface() {
+ base.Errorf("invalid type assertion: %v (non-interface type %v on left)", n, t)
+ n.SetType(nil)
+ return n
+ }
+
+ base.AssertfAt(n.Type() != nil, n.Pos(), "missing type: %v", n)
+
+ if n.Type() != nil && !n.Type().IsInterface() {
+ why := ImplementsExplain(n.Type(), t)
+ if why != "" {
+ base.Fatalf("impossible type assertion:\n\t%s", why)
+ n.SetType(nil)
+ return n
+ }
+ }
+ return n
+}
+
+// tcITab typechecks an OITAB node.
+func tcITab(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ t := n.X.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !t.IsInterface() {
+ base.Fatalf("OITAB of %v", t)
+ }
+ n.SetType(types.NewPtr(types.Types[types.TUINTPTR]))
+ return n
+}
+
+// tcIndex typechecks an OINDEX node.
+func tcIndex(n *ir.IndexExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ n.X = implicitstar(n.X)
+ l := n.X
+ n.Index = Expr(n.Index)
+ r := n.Index
+ t := l.Type()
+ if t == nil || r.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ switch t.Kind() {
+ default:
+ base.Errorf("invalid operation: %v (type %v does not support indexing)", n, t)
+ n.SetType(nil)
+ return n
+
+ case types.TSTRING, types.TARRAY, types.TSLICE:
+ n.Index = indexlit(n.Index)
+ if t.IsString() {
+ n.SetType(types.ByteType)
+ } else {
+ n.SetType(t.Elem())
+ }
+ why := "string"
+ if t.IsArray() {
+ why = "array"
+ } else if t.IsSlice() {
+ why = "slice"
+ }
+
+ if n.Index.Type() != nil && !n.Index.Type().IsInteger() {
+ base.Errorf("non-integer %s index %v", why, n.Index)
+ return n
+ }
+
+ if !n.Bounded() && ir.IsConst(n.Index, constant.Int) {
+ x := n.Index.Val()
+ if constant.Sign(x) < 0 {
+ base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Index)
+ } else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) {
+ base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Index, t.NumElem())
+ } else if ir.IsConst(n.X, constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(ir.StringVal(n.X))))) {
+ base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Index, len(ir.StringVal(n.X)))
+ } else if ir.ConstOverflow(x, types.Types[types.TINT]) {
+ base.Errorf("invalid %s index %v (index too large)", why, n.Index)
+ }
+ }
+
+ case types.TMAP:
+ n.Index = AssignConv(n.Index, t.Key(), "map index")
+ n.SetType(t.Elem())
+ n.SetOp(ir.OINDEXMAP)
+ n.Assigned = false
+ }
+ return n
+}
+
+// tcLenCap typechecks an OLEN or OCAP node.
+func tcLenCap(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ n.X = implicitstar(n.X)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ var ok bool
+ if n.Op() == ir.OLEN {
+ ok = okforlen[t.Kind()]
+ } else {
+ ok = okforcap[t.Kind()]
+ }
+ if !ok {
+ base.Errorf("invalid argument %L for %v", l, n.Op())
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetType(types.Types[types.TINT])
+ return n
+}
+
+// tcUnsafeData typechecks an OUNSAFESLICEDATA or OUNSAFESTRINGDATA node.
+func tcUnsafeData(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ var kind types.Kind
+ if n.Op() == ir.OUNSAFESLICEDATA {
+ kind = types.TSLICE
+ } else {
+ /* kind is string */
+ kind = types.TSTRING
+ }
+
+ if t.Kind() != kind {
+ base.Errorf("invalid argument %L for %v", l, n.Op())
+ n.SetType(nil)
+ return n
+ }
+
+ if kind == types.TSTRING {
+ t = types.ByteType
+ } else {
+ t = t.Elem()
+ }
+ n.SetType(types.NewPtr(t))
+ return n
+}
+
+// tcRecv typechecks an ORECV node.
+func tcRecv(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !t.IsChan() {
+ base.Errorf("invalid operation: %v (receive from non-chan type %v)", n, t)
+ n.SetType(nil)
+ return n
+ }
+
+ if !t.ChanDir().CanRecv() {
+ base.Errorf("invalid operation: %v (receive from send-only type %v)", n, t)
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetType(t.Elem())
+ return n
+}
+
+// tcSPtr typechecks an OSPTR node.
+func tcSPtr(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ t := n.X.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !t.IsSlice() && !t.IsString() {
+ base.Fatalf("OSPTR of %v", t)
+ }
+ if t.IsString() {
+ n.SetType(types.NewPtr(types.Types[types.TUINT8]))
+ } else {
+ n.SetType(types.NewPtr(t.Elem()))
+ }
+ return n
+}
+
+// tcSlice typechecks an OSLICE or OSLICE3 node.
+func tcSlice(n *ir.SliceExpr) ir.Node {
+ n.X = DefaultLit(Expr(n.X), nil)
+ n.Low = indexlit(Expr(n.Low))
+ n.High = indexlit(Expr(n.High))
+ n.Max = indexlit(Expr(n.Max))
+ hasmax := n.Op().IsSlice3()
+ l := n.X
+ if l.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ if l.Type().IsArray() {
+ if !ir.IsAddressable(n.X) {
+ base.Errorf("invalid operation %v (slice of unaddressable value)", n)
+ n.SetType(nil)
+ return n
+ }
+
+ addr := NodAddr(n.X)
+ addr.SetImplicit(true)
+ n.X = Expr(addr)
+ l = n.X
+ }
+ t := l.Type()
+ var tp *types.Type
+ if t.IsString() {
+ if hasmax {
+ base.Errorf("invalid operation %v (3-index slice of string)", n)
+ n.SetType(nil)
+ return n
+ }
+ n.SetType(t)
+ n.SetOp(ir.OSLICESTR)
+ } else if t.IsPtr() && t.Elem().IsArray() {
+ tp = t.Elem()
+ n.SetType(types.NewSlice(tp.Elem()))
+ types.CalcSize(n.Type())
+ if hasmax {
+ n.SetOp(ir.OSLICE3ARR)
+ } else {
+ n.SetOp(ir.OSLICEARR)
+ }
+ } else if t.IsSlice() {
+ n.SetType(t)
+ } else {
+ base.Errorf("cannot slice %v (type %v)", l, t)
+ n.SetType(nil)
+ return n
+ }
+
+ if n.Low != nil && !checksliceindex(l, n.Low, tp) {
+ n.SetType(nil)
+ return n
+ }
+ if n.High != nil && !checksliceindex(l, n.High, tp) {
+ n.SetType(nil)
+ return n
+ }
+ if n.Max != nil && !checksliceindex(l, n.Max, tp) {
+ n.SetType(nil)
+ return n
+ }
+ if !checksliceconst(n.Low, n.High) || !checksliceconst(n.Low, n.Max) || !checksliceconst(n.High, n.Max) {
+ n.SetType(nil)
+ return n
+ }
+ return n
+}
+
+// tcSliceHeader typechecks an OSLICEHEADER node.
+func tcSliceHeader(n *ir.SliceHeaderExpr) ir.Node {
+ // Errors here are Fatalf instead of Errorf because only the compiler
+ // can construct an OSLICEHEADER node.
+ // Components used in OSLICEHEADER that are supplied by parsed source code
+ // have already been typechecked in e.g. OMAKESLICE earlier.
+ t := n.Type()
+ if t == nil {
+ base.Fatalf("no type specified for OSLICEHEADER")
+ }
+
+ if !t.IsSlice() {
+ base.Fatalf("invalid type %v for OSLICEHEADER", n.Type())
+ }
+
+ if n.Ptr == nil || n.Ptr.Type() == nil || !n.Ptr.Type().IsUnsafePtr() {
+ base.Fatalf("need unsafe.Pointer for OSLICEHEADER")
+ }
+
+ n.Ptr = Expr(n.Ptr)
+ n.Len = DefaultLit(Expr(n.Len), types.Types[types.TINT])
+ n.Cap = DefaultLit(Expr(n.Cap), types.Types[types.TINT])
+
+ if ir.IsConst(n.Len, constant.Int) && ir.Int64Val(n.Len) < 0 {
+ base.Fatalf("len for OSLICEHEADER must be non-negative")
+ }
+
+ if ir.IsConst(n.Cap, constant.Int) && ir.Int64Val(n.Cap) < 0 {
+ base.Fatalf("cap for OSLICEHEADER must be non-negative")
+ }
+
+ if ir.IsConst(n.Len, constant.Int) && ir.IsConst(n.Cap, constant.Int) && constant.Compare(n.Len.Val(), token.GTR, n.Cap.Val()) {
+ base.Fatalf("len larger than cap for OSLICEHEADER")
+ }
+
+ return n
+}
+
+// tcStringHeader typechecks an OSTRINGHEADER node.
+func tcStringHeader(n *ir.StringHeaderExpr) ir.Node {
+ t := n.Type()
+ if t == nil {
+ base.Fatalf("no type specified for OSTRINGHEADER")
+ }
+
+ if !t.IsString() {
+ base.Fatalf("invalid type %v for OSTRINGHEADER", n.Type())
+ }
+
+ if n.Ptr == nil || n.Ptr.Type() == nil || !n.Ptr.Type().IsUnsafePtr() {
+ base.Fatalf("need unsafe.Pointer for OSTRINGHEADER")
+ }
+
+ n.Ptr = Expr(n.Ptr)
+ n.Len = DefaultLit(Expr(n.Len), types.Types[types.TINT])
+
+ if ir.IsConst(n.Len, constant.Int) && ir.Int64Val(n.Len) < 0 {
+ base.Fatalf("len for OSTRINGHEADER must be non-negative")
+ }
+
+ return n
+}
+
+// tcStar typechecks an ODEREF node, which may be an expression or a type.
+func tcStar(n *ir.StarExpr, top int) ir.Node {
+ n.X = typecheck(n.X, ctxExpr|ctxType)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ // TODO(mdempsky): Remove (along with ctxType above) once I'm
+ // confident this code path isn't needed any more.
+ if l.Op() == ir.OTYPE {
+ base.Fatalf("unexpected type in deref expression: %v", l)
+ }
+
+ if !t.IsPtr() {
+ if top&(ctxExpr|ctxStmt) != 0 {
+ base.Errorf("invalid indirect of %L", n.X)
+ n.SetType(nil)
+ return n
+ }
+ base.Errorf("%v is not a type", l)
+ return n
+ }
+
+ n.SetType(t.Elem())
+ return n
+}
+
+// tcUnaryArith typechecks a unary arithmetic expression.
+func tcUnaryArith(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !okfor[n.Op()][defaultType(t).Kind()] {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(t))
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetType(t)
+ return n
+}
diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go
new file mode 100644
index 0000000..5c54a5b
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/func.go
@@ -0,0 +1,834 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+
+ "fmt"
+ "go/constant"
+ "go/token"
+)
+
+// MakeDotArgs package all the arguments that match a ... T parameter into a []T.
+func MakeDotArgs(pos src.XPos, typ *types.Type, args []ir.Node) ir.Node {
+ if len(args) == 0 {
+ return ir.NewNilExpr(pos, typ)
+ }
+
+ args = append([]ir.Node(nil), args...)
+ lit := ir.NewCompLitExpr(pos, ir.OCOMPLIT, typ, args)
+ lit.SetImplicit(true)
+
+ n := Expr(lit)
+ if n.Type() == nil {
+ base.FatalfAt(pos, "mkdotargslice: typecheck failed")
+ }
+ return n
+}
+
+// FixVariadicCall rewrites calls to variadic functions to use an
+// explicit ... argument if one is not already present.
+func FixVariadicCall(call *ir.CallExpr) {
+ fntype := call.Fun.Type()
+ if !fntype.IsVariadic() || call.IsDDD {
+ return
+ }
+
+ vi := fntype.NumParams() - 1
+ vt := fntype.Param(vi).Type
+
+ args := call.Args
+ extra := args[vi:]
+ slice := MakeDotArgs(call.Pos(), vt, extra)
+ for i := range extra {
+ extra[i] = nil // allow GC
+ }
+
+ call.Args = append(args[:vi], slice)
+ call.IsDDD = true
+}
+
+// FixMethodCall rewrites a method call t.M(...) into a function call T.M(t, ...).
+func FixMethodCall(call *ir.CallExpr) {
+ if call.Fun.Op() != ir.ODOTMETH {
+ return
+ }
+
+ dot := call.Fun.(*ir.SelectorExpr)
+
+ fn := NewMethodExpr(dot.Pos(), dot.X.Type(), dot.Selection.Sym)
+
+ args := make([]ir.Node, 1+len(call.Args))
+ args[0] = dot.X
+ copy(args[1:], call.Args)
+
+ call.SetOp(ir.OCALLFUNC)
+ call.Fun = fn
+ call.Args = args
+}
+
+func AssertFixedCall(call *ir.CallExpr) {
+ if call.Fun.Type().IsVariadic() && !call.IsDDD {
+ base.FatalfAt(call.Pos(), "missed FixVariadicCall")
+ }
+ if call.Op() == ir.OCALLMETH {
+ base.FatalfAt(call.Pos(), "missed FixMethodCall")
+ }
+}
+
+// ClosureType returns the struct type used to hold all the information
+// needed in the closure for clo (clo must be a OCLOSURE node).
+// The address of a variable of the returned type can be cast to a func.
+func ClosureType(clo *ir.ClosureExpr) *types.Type {
+ // Create closure in the form of a composite literal.
+ // supposing the closure captures an int i and a string s
+ // and has one float64 argument and no results,
+ // the generated code looks like:
+ //
+ // clos = &struct{F uintptr; X0 *int; X1 *string}{func.1, &i, &s}
+ //
+ // The use of the struct provides type information to the garbage
+ // collector so that it can walk the closure. We could use (in this
+ // case) [3]unsafe.Pointer instead, but that would leave the gc in
+ // the dark. The information appears in the binary in the form of
+ // type descriptors; the struct is unnamed and uses exported field
+ // names so that closures in multiple packages with the same struct
+ // type can share the descriptor.
+
+ fields := make([]*types.Field, 1+len(clo.Func.ClosureVars))
+ fields[0] = types.NewField(base.AutogeneratedPos, types.LocalPkg.Lookup("F"), types.Types[types.TUINTPTR])
+ for i, v := range clo.Func.ClosureVars {
+ typ := v.Type()
+ if !v.Byval() {
+ typ = types.NewPtr(typ)
+ }
+ fields[1+i] = types.NewField(base.AutogeneratedPos, types.LocalPkg.LookupNum("X", i), typ)
+ }
+ typ := types.NewStruct(fields)
+ typ.SetNoalg(true)
+ return typ
+}
+
+// MethodValueType returns the struct type used to hold all the information
+// needed in the closure for a OMETHVALUE node. The address of a variable of
+// the returned type can be cast to a func.
+func MethodValueType(n *ir.SelectorExpr) *types.Type {
+ t := types.NewStruct([]*types.Field{
+ types.NewField(base.Pos, Lookup("F"), types.Types[types.TUINTPTR]),
+ types.NewField(base.Pos, Lookup("R"), n.X.Type()),
+ })
+ t.SetNoalg(true)
+ return t
+}
+
+// type check function definition
+// To be called by typecheck, not directly.
+// (Call typecheck.Func instead.)
+func tcFunc(n *ir.Func) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("tcFunc", n)(nil)
+ }
+
+ if name := n.Nname; name.Typecheck() == 0 {
+ base.AssertfAt(name.Type() != nil, n.Pos(), "missing type: %v", name)
+ name.SetTypecheck(1)
+ }
+}
+
+// tcCall typechecks an OCALL node.
+func tcCall(n *ir.CallExpr, top int) ir.Node {
+ Stmts(n.Init()) // imported rewritten f(g()) calls (#30907)
+ n.Fun = typecheck(n.Fun, ctxExpr|ctxType|ctxCallee)
+
+ l := n.Fun
+
+ if l.Op() == ir.ONAME && l.(*ir.Name).BuiltinOp != 0 {
+ l := l.(*ir.Name)
+ if n.IsDDD && l.BuiltinOp != ir.OAPPEND {
+ base.Errorf("invalid use of ... with builtin %v", l)
+ }
+
+ // builtin: OLEN, OCAP, etc.
+ switch l.BuiltinOp {
+ default:
+ base.Fatalf("unknown builtin %v", l)
+
+ case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OMAX, ir.OMIN, ir.OPRINT, ir.OPRINTLN, ir.ORECOVER:
+ n.SetOp(l.BuiltinOp)
+ n.Fun = nil
+ n.SetTypecheck(0) // re-typechecking new op is OK, not a loop
+ return typecheck(n, top)
+
+ case ir.OCAP, ir.OCLEAR, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL, ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA:
+ typecheckargs(n)
+ fallthrough
+ case ir.ONEW:
+ arg, ok := needOneArg(n, "%v", n.Op())
+ if !ok {
+ n.SetType(nil)
+ return n
+ }
+ u := ir.NewUnaryExpr(n.Pos(), l.BuiltinOp, arg)
+ return typecheck(ir.InitExpr(n.Init(), u), top) // typecheckargs can add to old.Init
+
+ case ir.OCOMPLEX, ir.OCOPY, ir.OUNSAFEADD, ir.OUNSAFESLICE, ir.OUNSAFESTRING:
+ typecheckargs(n)
+ arg1, arg2, ok := needTwoArgs(n)
+ if !ok {
+ n.SetType(nil)
+ return n
+ }
+ b := ir.NewBinaryExpr(n.Pos(), l.BuiltinOp, arg1, arg2)
+ return typecheck(ir.InitExpr(n.Init(), b), top) // typecheckargs can add to old.Init
+ }
+ panic("unreachable")
+ }
+
+ n.Fun = DefaultLit(n.Fun, nil)
+ l = n.Fun
+ if l.Op() == ir.OTYPE {
+ if n.IsDDD {
+ base.Fatalf("invalid use of ... in type conversion to %v", l.Type())
+ }
+
+ // pick off before type-checking arguments
+ arg, ok := needOneArg(n, "conversion to %v", l.Type())
+ if !ok {
+ n.SetType(nil)
+ return n
+ }
+
+ n := ir.NewConvExpr(n.Pos(), ir.OCONV, nil, arg)
+ n.SetType(l.Type())
+ return tcConv(n)
+ }
+
+ RewriteNonNameCall(n)
+ typecheckargs(n)
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ types.CheckSize(t)
+
+ switch l.Op() {
+ case ir.ODOTINTER:
+ n.SetOp(ir.OCALLINTER)
+
+ case ir.ODOTMETH:
+ l := l.(*ir.SelectorExpr)
+ n.SetOp(ir.OCALLMETH)
+
+ // typecheckaste was used here but there wasn't enough
+ // information further down the call chain to know if we
+ // were testing a method receiver for unexported fields.
+ // It isn't necessary, so just do a sanity check.
+ tp := t.Recv().Type
+
+ if l.X == nil || !types.Identical(l.X.Type(), tp) {
+ base.Fatalf("method receiver")
+ }
+
+ default:
+ n.SetOp(ir.OCALLFUNC)
+ if t.Kind() != types.TFUNC {
+ if o := l; o.Name() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil {
+ // be more specific when the non-function
+ // name matches a predeclared function
+ base.Errorf("cannot call non-function %L, declared at %s",
+ l, base.FmtPos(o.Name().Pos()))
+ } else {
+ base.Errorf("cannot call non-function %L", l)
+ }
+ n.SetType(nil)
+ return n
+ }
+ }
+
+ typecheckaste(ir.OCALL, n.Fun, n.IsDDD, t.Params(), n.Args, func() string { return fmt.Sprintf("argument to %v", n.Fun) })
+ FixVariadicCall(n)
+ FixMethodCall(n)
+ if t.NumResults() == 0 {
+ return n
+ }
+ if t.NumResults() == 1 {
+ n.SetType(l.Type().Result(0).Type)
+
+ if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.ONAME {
+ if sym := n.Fun.(*ir.Name).Sym(); types.RuntimeSymName(sym) == "getg" {
+ // Emit code for runtime.getg() directly instead of calling function.
+ // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
+ // so that the ordering pass can make sure to preserve the semantics of the original code
+ // (in particular, the exact time of the function call) by introducing temporaries.
+ // In this case, we know getg() always returns the same result within a given function
+ // and we want to avoid the temporaries, so we do the rewrite earlier than is typical.
+ n.SetOp(ir.OGETG)
+ }
+ }
+ return n
+ }
+
+ // multiple return
+ if top&(ctxMultiOK|ctxStmt) == 0 {
+ base.Errorf("multiple-value %v() in single-value context", l)
+ return n
+ }
+
+ n.SetType(l.Type().ResultsTuple())
+ return n
+}
+
+// tcAppend typechecks an OAPPEND node.
+func tcAppend(n *ir.CallExpr) ir.Node {
+ typecheckargs(n)
+ args := n.Args
+ if len(args) == 0 {
+ base.Errorf("missing arguments to append")
+ n.SetType(nil)
+ return n
+ }
+
+ t := args[0].Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetType(t)
+ if !t.IsSlice() {
+ if ir.IsNil(args[0]) {
+ base.Errorf("first argument to append must be typed slice; have untyped nil")
+ n.SetType(nil)
+ return n
+ }
+
+ base.Errorf("first argument to append must be slice; have %L", t)
+ n.SetType(nil)
+ return n
+ }
+
+ if n.IsDDD {
+ if len(args) == 1 {
+ base.Errorf("cannot use ... on first argument to append")
+ n.SetType(nil)
+ return n
+ }
+
+ if len(args) != 2 {
+ base.Errorf("too many arguments to append")
+ n.SetType(nil)
+ return n
+ }
+
+ // AssignConv is of args[1] not required here, as the
+ // types of args[0] and args[1] don't need to match
+ // (They will both have an underlying type which are
+ // slices of identical base types, or be []byte and string.)
+ // See issue 53888.
+ return n
+ }
+
+ as := args[1:]
+ for i, n := range as {
+ if n.Type() == nil {
+ continue
+ }
+ as[i] = AssignConv(n, t.Elem(), "append")
+ types.CheckSize(as[i].Type()) // ensure width is calculated for backend
+ }
+ return n
+}
+
+// tcClear typechecks an OCLEAR node.
+func tcClear(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ switch {
+ case t.IsMap(), t.IsSlice():
+ default:
+ base.Errorf("invalid operation: %v (argument must be a map or slice)", n)
+ n.SetType(nil)
+ return n
+ }
+
+ return n
+}
+
+// tcClose typechecks an OCLOSE node.
+func tcClose(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !t.IsChan() {
+ base.Errorf("invalid operation: %v (non-chan type %v)", n, t)
+ n.SetType(nil)
+ return n
+ }
+
+ if !t.ChanDir().CanSend() {
+ base.Errorf("invalid operation: %v (cannot close receive-only channel)", n)
+ n.SetType(nil)
+ return n
+ }
+ return n
+}
+
+// tcComplex typechecks an OCOMPLEX node.
+func tcComplex(n *ir.BinaryExpr) ir.Node {
+ l := Expr(n.X)
+ r := Expr(n.Y)
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ l, r = defaultlit2(l, r, false)
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ n.X = l
+ n.Y = r
+
+ if !types.Identical(l.Type(), r.Type()) {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+ n.SetType(nil)
+ return n
+ }
+
+ var t *types.Type
+ switch l.Type().Kind() {
+ default:
+ base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type())
+ n.SetType(nil)
+ return n
+
+ case types.TIDEAL:
+ t = types.UntypedComplex
+
+ case types.TFLOAT32:
+ t = types.Types[types.TCOMPLEX64]
+
+ case types.TFLOAT64:
+ t = types.Types[types.TCOMPLEX128]
+ }
+ n.SetType(t)
+ return n
+}
+
+// tcCopy typechecks an OCOPY node.
+func tcCopy(n *ir.BinaryExpr) ir.Node {
+ n.SetType(types.Types[types.TINT])
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ n.Y = Expr(n.Y)
+ n.Y = DefaultLit(n.Y, nil)
+ if n.X.Type() == nil || n.Y.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ // copy([]byte, string)
+ if n.X.Type().IsSlice() && n.Y.Type().IsString() {
+ if types.Identical(n.X.Type().Elem(), types.ByteType) {
+ return n
+ }
+ base.Errorf("arguments to copy have different element types: %L and string", n.X.Type())
+ n.SetType(nil)
+ return n
+ }
+
+ if !n.X.Type().IsSlice() || !n.Y.Type().IsSlice() {
+ if !n.X.Type().IsSlice() && !n.Y.Type().IsSlice() {
+ base.Errorf("arguments to copy must be slices; have %L, %L", n.X.Type(), n.Y.Type())
+ } else if !n.X.Type().IsSlice() {
+ base.Errorf("first argument to copy should be slice; have %L", n.X.Type())
+ } else {
+ base.Errorf("second argument to copy should be slice or string; have %L", n.Y.Type())
+ }
+ n.SetType(nil)
+ return n
+ }
+
+ if !types.Identical(n.X.Type().Elem(), n.Y.Type().Elem()) {
+ base.Errorf("arguments to copy have different element types: %L and %L", n.X.Type(), n.Y.Type())
+ n.SetType(nil)
+ return n
+ }
+ return n
+}
+
+// tcDelete typechecks an ODELETE node.
+func tcDelete(n *ir.CallExpr) ir.Node {
+ typecheckargs(n)
+ args := n.Args
+ if len(args) == 0 {
+ base.Errorf("missing arguments to delete")
+ n.SetType(nil)
+ return n
+ }
+
+ if len(args) == 1 {
+ base.Errorf("missing second (key) argument to delete")
+ n.SetType(nil)
+ return n
+ }
+
+ if len(args) != 2 {
+ base.Errorf("too many arguments to delete")
+ n.SetType(nil)
+ return n
+ }
+
+ l := args[0]
+ r := args[1]
+ if l.Type() != nil && !l.Type().IsMap() {
+ base.Errorf("first argument to delete must be map; have %L", l.Type())
+ n.SetType(nil)
+ return n
+ }
+
+ args[1] = AssignConv(r, l.Type().Key(), "delete")
+ return n
+}
+
+// tcMake typechecks an OMAKE node.
+func tcMake(n *ir.CallExpr) ir.Node {
+ args := n.Args
+ if len(args) == 0 {
+ base.Errorf("missing argument to make")
+ n.SetType(nil)
+ return n
+ }
+
+ n.Args = nil
+ l := args[0]
+ l = typecheck(l, ctxType)
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ i := 1
+ var nn ir.Node
+ switch t.Kind() {
+ default:
+ base.Errorf("cannot make type %v", t)
+ n.SetType(nil)
+ return n
+
+ case types.TSLICE:
+ if i >= len(args) {
+ base.Errorf("missing len argument to make(%v)", t)
+ n.SetType(nil)
+ return n
+ }
+
+ l = args[i]
+ i++
+ l = Expr(l)
+ var r ir.Node
+ if i < len(args) {
+ r = args[i]
+ i++
+ r = Expr(r)
+ }
+
+ if l.Type() == nil || (r != nil && r.Type() == nil) {
+ n.SetType(nil)
+ return n
+ }
+ if !checkmake(t, "len", &l) || r != nil && !checkmake(t, "cap", &r) {
+ n.SetType(nil)
+ return n
+ }
+ if ir.IsConst(l, constant.Int) && r != nil && ir.IsConst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) {
+ base.Errorf("len larger than cap in make(%v)", t)
+ n.SetType(nil)
+ return n
+ }
+ nn = ir.NewMakeExpr(n.Pos(), ir.OMAKESLICE, l, r)
+
+ case types.TMAP:
+ if i < len(args) {
+ l = args[i]
+ i++
+ l = Expr(l)
+ l = DefaultLit(l, types.Types[types.TINT])
+ if l.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !checkmake(t, "size", &l) {
+ n.SetType(nil)
+ return n
+ }
+ } else {
+ l = ir.NewInt(base.Pos, 0)
+ }
+ nn = ir.NewMakeExpr(n.Pos(), ir.OMAKEMAP, l, nil)
+ nn.SetEsc(n.Esc())
+
+ case types.TCHAN:
+ l = nil
+ if i < len(args) {
+ l = args[i]
+ i++
+ l = Expr(l)
+ l = DefaultLit(l, types.Types[types.TINT])
+ if l.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !checkmake(t, "buffer", &l) {
+ n.SetType(nil)
+ return n
+ }
+ } else {
+ l = ir.NewInt(base.Pos, 0)
+ }
+ nn = ir.NewMakeExpr(n.Pos(), ir.OMAKECHAN, l, nil)
+ }
+
+ if i < len(args) {
+ base.Errorf("too many arguments to make(%v)", t)
+ n.SetType(nil)
+ return n
+ }
+
+ nn.SetType(t)
+ return nn
+}
+
+// tcMakeSliceCopy typechecks an OMAKESLICECOPY node.
+func tcMakeSliceCopy(n *ir.MakeExpr) ir.Node {
+ // Errors here are Fatalf instead of Errorf because only the compiler
+ // can construct an OMAKESLICECOPY node.
+ // Components used in OMAKESCLICECOPY that are supplied by parsed source code
+ // have already been typechecked in OMAKE and OCOPY earlier.
+ t := n.Type()
+
+ if t == nil {
+ base.Fatalf("no type specified for OMAKESLICECOPY")
+ }
+
+ if !t.IsSlice() {
+ base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type())
+ }
+
+ if n.Len == nil {
+ base.Fatalf("missing len argument for OMAKESLICECOPY")
+ }
+
+ if n.Cap == nil {
+ base.Fatalf("missing slice argument to copy for OMAKESLICECOPY")
+ }
+
+ n.Len = Expr(n.Len)
+ n.Cap = Expr(n.Cap)
+
+ n.Len = DefaultLit(n.Len, types.Types[types.TINT])
+
+ if !n.Len.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
+ base.Errorf("non-integer len argument in OMAKESLICECOPY")
+ }
+
+ if ir.IsConst(n.Len, constant.Int) {
+ if ir.ConstOverflow(n.Len.Val(), types.Types[types.TINT]) {
+ base.Fatalf("len for OMAKESLICECOPY too large")
+ }
+ if constant.Sign(n.Len.Val()) < 0 {
+ base.Fatalf("len for OMAKESLICECOPY must be non-negative")
+ }
+ }
+ return n
+}
+
+// tcNew typechecks an ONEW node.
+func tcNew(n *ir.UnaryExpr) ir.Node {
+ if n.X == nil {
+ // Fatalf because the OCALL above checked for us,
+ // so this must be an internally-generated mistake.
+ base.Fatalf("missing argument to new")
+ }
+ l := n.X
+ l = typecheck(l, ctxType)
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ n.X = l
+ n.SetType(types.NewPtr(t))
+ return n
+}
+
+// tcPanic typechecks an OPANIC node.
+func tcPanic(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = AssignConv(n.X, types.Types[types.TINTER], "argument to panic")
+ if n.X.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ return n
+}
+
+// tcPrint typechecks an OPRINT or OPRINTN node.
+func tcPrint(n *ir.CallExpr) ir.Node {
+ typecheckargs(n)
+ ls := n.Args
+ for i1, n1 := range ls {
+ // Special case for print: int constant is int64, not int.
+ if ir.IsConst(n1, constant.Int) {
+ ls[i1] = DefaultLit(ls[i1], types.Types[types.TINT64])
+ } else {
+ ls[i1] = DefaultLit(ls[i1], nil)
+ }
+ }
+ return n
+}
+
+// tcMinMax typechecks an OMIN or OMAX node.
+func tcMinMax(n *ir.CallExpr) ir.Node {
+ typecheckargs(n)
+ arg0 := n.Args[0]
+ for _, arg := range n.Args[1:] {
+ if !types.Identical(arg.Type(), arg0.Type()) {
+ base.FatalfAt(n.Pos(), "mismatched arguments: %L and %L", arg0, arg)
+ }
+ }
+ n.SetType(arg0.Type())
+ return n
+}
+
+// tcRealImag typechecks an OREAL or OIMAG node.
+func tcRealImag(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ // Determine result type.
+ switch t.Kind() {
+ case types.TIDEAL:
+ n.SetType(types.UntypedFloat)
+ case types.TCOMPLEX64:
+ n.SetType(types.Types[types.TFLOAT32])
+ case types.TCOMPLEX128:
+ n.SetType(types.Types[types.TFLOAT64])
+ default:
+ base.Errorf("invalid argument %L for %v", l, n.Op())
+ n.SetType(nil)
+ return n
+ }
+ return n
+}
+
+// tcRecover typechecks an ORECOVER node.
+func tcRecover(n *ir.CallExpr) ir.Node {
+ if len(n.Args) != 0 {
+ base.Errorf("too many arguments to recover")
+ n.SetType(nil)
+ return n
+ }
+
+ // FP is equal to caller's SP plus FixedFrameSize.
+ var fp ir.Node = ir.NewCallExpr(n.Pos(), ir.OGETCALLERSP, nil, nil)
+ if off := base.Ctxt.Arch.FixedFrameSize; off != 0 {
+ fp = ir.NewBinaryExpr(n.Pos(), ir.OADD, fp, ir.NewInt(base.Pos, off))
+ }
+ // TODO(mdempsky): Replace *int32 with unsafe.Pointer, without upsetting checkptr.
+ fp = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp)
+
+ n.SetOp(ir.ORECOVERFP)
+ n.SetType(types.Types[types.TINTER])
+ n.Args = []ir.Node{Expr(fp)}
+ return n
+}
+
+// tcUnsafeAdd typechecks an OUNSAFEADD node.
+func tcUnsafeAdd(n *ir.BinaryExpr) *ir.BinaryExpr {
+ n.X = AssignConv(Expr(n.X), types.Types[types.TUNSAFEPTR], "argument to unsafe.Add")
+ n.Y = DefaultLit(Expr(n.Y), types.Types[types.TINT])
+ if n.X.Type() == nil || n.Y.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !n.Y.Type().IsInteger() {
+ n.SetType(nil)
+ return n
+ }
+ n.SetType(n.X.Type())
+ return n
+}
+
+// tcUnsafeSlice typechecks an OUNSAFESLICE node.
+func tcUnsafeSlice(n *ir.BinaryExpr) *ir.BinaryExpr {
+ n.X = Expr(n.X)
+ n.Y = Expr(n.Y)
+ if n.X.Type() == nil || n.Y.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ t := n.X.Type()
+ if !t.IsPtr() {
+ base.Errorf("first argument to unsafe.Slice must be pointer; have %L", t)
+ } else if t.Elem().NotInHeap() {
+ // TODO(mdempsky): This can be relaxed, but should only affect the
+ // Go runtime itself. End users should only see not-in-heap
+ // types due to incomplete C structs in cgo, and those types don't
+ // have a meaningful size anyway.
+ base.Errorf("unsafe.Slice of incomplete (or unallocatable) type not allowed")
+ }
+
+ if !checkunsafesliceorstring(n.Op(), &n.Y) {
+ n.SetType(nil)
+ return n
+ }
+ n.SetType(types.NewSlice(t.Elem()))
+ return n
+}
+
+// tcUnsafeString typechecks an OUNSAFESTRING node.
+func tcUnsafeString(n *ir.BinaryExpr) *ir.BinaryExpr {
+ n.X = Expr(n.X)
+ n.Y = Expr(n.Y)
+ if n.X.Type() == nil || n.Y.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ t := n.X.Type()
+ if !t.IsPtr() || !types.Identical(t.Elem(), types.Types[types.TUINT8]) {
+ base.Errorf("first argument to unsafe.String must be *byte; have %L", t)
+ }
+
+ if !checkunsafesliceorstring(n.Op(), &n.Y) {
+ n.SetType(nil)
+ return n
+ }
+ n.SetType(types.Types[types.TSTRING])
+ return n
+}
diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go
new file mode 100644
index 0000000..83d35b3
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/iexport.go
@@ -0,0 +1,260 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package export.
+//
+// The indexed export data format is an evolution of the previous
+// binary export data format. Its chief contribution is introducing an
+// index table, which allows efficient random access of individual
+// declarations and inline function bodies. In turn, this allows
+// avoiding unnecessary work for compilation units that import large
+// packages.
+//
+//
+// The top-level data format is structured as:
+//
+// Header struct {
+// Tag byte // 'i'
+// Version uvarint
+// StringSize uvarint
+// DataSize uvarint
+// }
+//
+// Strings [StringSize]byte
+// Data [DataSize]byte
+//
+// MainIndex []struct{
+// PkgPath stringOff
+// PkgName stringOff
+// PkgHeight uvarint
+//
+// Decls []struct{
+// Name stringOff
+// Offset declOff
+// }
+// }
+//
+// Fingerprint [8]byte
+//
+// uvarint means a uint64 written out using uvarint encoding.
+//
+// []T means a uvarint followed by that many T objects. In other
+// words:
+//
+// Len uvarint
+// Elems [Len]T
+//
+// stringOff means a uvarint that indicates an offset within the
+// Strings section. At that offset is another uvarint, followed by
+// that many bytes, which form the string value.
+//
+// declOff means a uvarint that indicates an offset within the Data
+// section where the associated declaration can be found.
+//
+//
+// There are five kinds of declarations, distinguished by their first
+// byte:
+//
+// type Var struct {
+// Tag byte // 'V'
+// Pos Pos
+// Type typeOff
+// }
+//
+// type Func struct {
+// Tag byte // 'F' or 'G'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'G'
+// Signature Signature
+// }
+//
+// type Const struct {
+// Tag byte // 'C'
+// Pos Pos
+// Value Value
+// }
+//
+// type Type struct {
+// Tag byte // 'T' or 'U'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'U'
+// Underlying typeOff
+//
+// Methods []struct{ // omitted if Underlying is an interface type
+// Pos Pos
+// Name stringOff
+// Recv Param
+// Signature Signature
+// }
+// }
+//
+// type Alias struct {
+// Tag byte // 'A'
+// Pos Pos
+// Type typeOff
+// }
+//
+// // "Automatic" declaration of each typeparam
+// type TypeParam struct {
+// Tag byte // 'P'
+// Pos Pos
+// Implicit bool
+// Constraint typeOff
+// }
+//
+// typeOff means a uvarint that either indicates a predeclared type,
+// or an offset into the Data section. If the uvarint is less than
+// predeclReserved, then it indicates the index into the predeclared
+// types list (see predeclared in bexport.go for order). Otherwise,
+// subtracting predeclReserved yields the offset of a type descriptor.
+//
+// Value means a type, kind, and type-specific value. See
+// (*exportWriter).value for details.
+//
+//
+// There are twelve kinds of type descriptors, distinguished by an itag:
+//
+// type DefinedType struct {
+// Tag itag // definedType
+// Name stringOff
+// PkgPath stringOff
+// }
+//
+// type PointerType struct {
+// Tag itag // pointerType
+// Elem typeOff
+// }
+//
+// type SliceType struct {
+// Tag itag // sliceType
+// Elem typeOff
+// }
+//
+// type ArrayType struct {
+// Tag itag // arrayType
+// Len uint64
+// Elem typeOff
+// }
+//
+// type ChanType struct {
+// Tag itag // chanType
+// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+// Elem typeOff
+// }
+//
+// type MapType struct {
+// Tag itag // mapType
+// Key typeOff
+// Elem typeOff
+// }
+//
+// type FuncType struct {
+// Tag itag // signatureType
+// PkgPath stringOff
+// Signature Signature
+// }
+//
+// type StructType struct {
+// Tag itag // structType
+// PkgPath stringOff
+// Fields []struct {
+// Pos Pos
+// Name stringOff
+// Type typeOff
+// Embedded bool
+// Note stringOff
+// }
+// }
+//
+// type InterfaceType struct {
+// Tag itag // interfaceType
+// PkgPath stringOff
+// Embeddeds []struct {
+// Pos Pos
+// Type typeOff
+// }
+// Methods []struct {
+// Pos Pos
+// Name stringOff
+// Signature Signature
+// }
+// }
+//
+// // Reference to a type param declaration
+// type TypeParamType struct {
+// Tag itag // typeParamType
+// Name stringOff
+// PkgPath stringOff
+// }
+//
+// // Instantiation of a generic type (like List[T2] or List[int])
+// type InstanceType struct {
+// Tag itag // instanceType
+// Pos pos
+// TypeArgs []typeOff
+// BaseType typeOff
+// }
+//
+// type UnionType struct {
+// Tag itag // interfaceType
+// Terms []struct {
+// tilde bool
+// Type typeOff
+// }
+// }
+//
+//
+//
+// type Signature struct {
+// Params []Param
+// Results []Param
+// Variadic bool // omitted if Results is empty
+// }
+//
+// type Param struct {
+// Pos Pos
+// Name stringOff
+// Type typOff
+// }
+//
+//
+// Pos encodes a file:line:column triple, incorporating a simple delta
+// encoding scheme within a data object. See exportWriter.pos for
+// details.
+//
+//
+// Compiler-specific details.
+//
+// cmd/compile writes out a second index for inline bodies and also
+// appends additional compiler-specific details after declarations.
+// Third-party tools are not expected to depend on these details and
+// they're expected to change much more rapidly, so they're omitted
+// here. See exportWriter's varExt/funcExt/etc methods for details.
+
+package typecheck
+
+import (
+ "strings"
+)
+
+const blankMarker = "$"
+
+// TparamName returns the real name of a type parameter, after stripping its
+// qualifying prefix and reverting blank-name encoding. See TparamExportName
+// for details.
+func TparamName(exportName string) string {
+ // Remove the "path" from the type param name that makes it unique.
+ ix := strings.LastIndex(exportName, ".")
+ if ix < 0 {
+ return ""
+ }
+ name := exportName[ix+1:]
+ if strings.HasPrefix(name, blankMarker) {
+ return "_"
+ }
+ return name
+}
+
+// The name used for dictionary parameters or local variables.
+const LocalDictName = ".dict"
diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go
new file mode 100644
index 0000000..cb3feb1
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/iimport.go
@@ -0,0 +1,53 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package import.
+// See iexport.go for the export data format.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
+
+// HaveInlineBody reports whether we have fn's inline body available
+// for inlining.
+//
+// It's a function literal so that it can be overridden for
+// GOEXPERIMENT=unified.
+var HaveInlineBody = func(fn *ir.Func) bool {
+ base.Fatalf("HaveInlineBody not overridden")
+ panic("unreachable")
+}
+
+func SetBaseTypeIndex(t *types.Type, i, pi int64) {
+ if t.Obj() == nil {
+ base.Fatalf("SetBaseTypeIndex on non-defined type %v", t)
+ }
+ if i != -1 && pi != -1 {
+ typeSymIdx[t] = [2]int64{i, pi}
+ }
+}
+
+// Map imported type T to the index of type descriptor symbols of T and *T,
+// so we can use index to reference the symbol.
+// TODO(mdempsky): Store this information directly in the Type's Name.
+var typeSymIdx = make(map[*types.Type][2]int64)
+
+func BaseTypeIndex(t *types.Type) int64 {
+ tbase := t
+ if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil {
+ tbase = t.Elem()
+ }
+ i, ok := typeSymIdx[tbase]
+ if !ok {
+ return -1
+ }
+ if t != tbase {
+ return i[1]
+ }
+ return i[0]
+}
diff --git a/src/cmd/compile/internal/typecheck/mkbuiltin.go b/src/cmd/compile/internal/typecheck/mkbuiltin.go
new file mode 100644
index 0000000..28afac5
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/mkbuiltin.go
@@ -0,0 +1,254 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+
+// Generate builtin.go from builtin/runtime.go.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+var stdout = flag.Bool("stdout", false, "write to stdout instead of builtin.go")
+var nofmt = flag.Bool("nofmt", false, "skip formatting builtin.go")
+
+func main() {
+ flag.Parse()
+
+ var b bytes.Buffer
+ fmt.Fprintln(&b, "// Code generated by mkbuiltin.go. DO NOT EDIT.")
+ fmt.Fprintln(&b)
+ fmt.Fprintln(&b, "package typecheck")
+ fmt.Fprintln(&b)
+ fmt.Fprintln(&b, `import (`)
+ fmt.Fprintln(&b, ` "cmd/compile/internal/types"`)
+ fmt.Fprintln(&b, ` "cmd/internal/src"`)
+ fmt.Fprintln(&b, `)`)
+
+ fmt.Fprintln(&b, `
+// Not inlining this function removes a significant chunk of init code.
+//go:noinline
+func newSig(params, results []*types.Field) *types.Type {
+ return types.NewSignature(nil, params, results)
+}
+
+func params(tlist ...*types.Type) []*types.Field {
+ flist := make([]*types.Field, len(tlist))
+ for i, typ := range tlist {
+ flist[i] = types.NewField(src.NoXPos, nil, typ)
+ }
+ return flist
+}
+`)
+
+ mkbuiltin(&b, "runtime")
+ mkbuiltin(&b, "coverage")
+
+ var err error
+ out := b.Bytes()
+ if !*nofmt {
+ out, err = format.Source(out)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+ if *stdout {
+ _, err = os.Stdout.Write(out)
+ } else {
+ err = os.WriteFile("builtin.go", out, 0666)
+ }
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func mkbuiltin(w io.Writer, name string) {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, filepath.Join("_builtin", name+".go"), nil, 0)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ var interner typeInterner
+
+ fmt.Fprintf(w, "var %sDecls = [...]struct { name string; tag int; typ int }{\n", name)
+ for _, decl := range f.Decls {
+ switch decl := decl.(type) {
+ case *ast.FuncDecl:
+ if decl.Recv != nil {
+ log.Fatal("methods unsupported")
+ }
+ if decl.Body != nil {
+ log.Fatal("unexpected function body")
+ }
+ fmt.Fprintf(w, "{%q, funcTag, %d},\n", decl.Name.Name, interner.intern(decl.Type))
+ case *ast.GenDecl:
+ if decl.Tok == token.IMPORT {
+ if len(decl.Specs) != 1 || decl.Specs[0].(*ast.ImportSpec).Path.Value != "\"unsafe\"" {
+ log.Fatal("runtime cannot import other package")
+ }
+ continue
+ }
+ if decl.Tok != token.VAR {
+ log.Fatal("unhandled declaration kind", decl.Tok)
+ }
+ for _, spec := range decl.Specs {
+ spec := spec.(*ast.ValueSpec)
+ if len(spec.Values) != 0 {
+ log.Fatal("unexpected values")
+ }
+ typ := interner.intern(spec.Type)
+ for _, name := range spec.Names {
+ fmt.Fprintf(w, "{%q, varTag, %d},\n", name.Name, typ)
+ }
+ }
+ default:
+ log.Fatal("unhandled decl type", decl)
+ }
+ }
+ fmt.Fprintln(w, "}")
+
+ fmt.Fprintln(w)
+ fmt.Fprintf(w, "func %sTypes() []*types.Type {\n", name)
+ fmt.Fprintf(w, "var typs [%d]*types.Type\n", len(interner.typs))
+ for i, typ := range interner.typs {
+ fmt.Fprintf(w, "typs[%d] = %s\n", i, typ)
+ }
+ fmt.Fprintln(w, "return typs[:]")
+ fmt.Fprintln(w, "}")
+}
+
+// typeInterner maps Go type expressions to compiler code that
+// constructs the denoted type. It recognizes and reuses common
+// subtype expressions.
+type typeInterner struct {
+ typs []string
+ hash map[string]int
+}
+
+func (i *typeInterner) intern(t ast.Expr) int {
+ x := i.mktype(t)
+ v, ok := i.hash[x]
+ if !ok {
+ v = len(i.typs)
+ if i.hash == nil {
+ i.hash = make(map[string]int)
+ }
+ i.hash[x] = v
+ i.typs = append(i.typs, x)
+ }
+ return v
+}
+
+func (i *typeInterner) subtype(t ast.Expr) string {
+ return fmt.Sprintf("typs[%d]", i.intern(t))
+}
+
+func (i *typeInterner) mktype(t ast.Expr) string {
+ switch t := t.(type) {
+ case *ast.Ident:
+ switch t.Name {
+ case "byte":
+ return "types.ByteType"
+ case "rune":
+ return "types.RuneType"
+ }
+ return fmt.Sprintf("types.Types[types.T%s]", strings.ToUpper(t.Name))
+ case *ast.SelectorExpr:
+ if t.X.(*ast.Ident).Name != "unsafe" || t.Sel.Name != "Pointer" {
+ log.Fatalf("unhandled type: %#v", t)
+ }
+ return "types.Types[types.TUNSAFEPTR]"
+
+ case *ast.ArrayType:
+ if t.Len == nil {
+ return fmt.Sprintf("types.NewSlice(%s)", i.subtype(t.Elt))
+ }
+ return fmt.Sprintf("types.NewArray(%s, %d)", i.subtype(t.Elt), intconst(t.Len))
+ case *ast.ChanType:
+ dir := "types.Cboth"
+ switch t.Dir {
+ case ast.SEND:
+ dir = "types.Csend"
+ case ast.RECV:
+ dir = "types.Crecv"
+ }
+ return fmt.Sprintf("types.NewChan(%s, %s)", i.subtype(t.Value), dir)
+ case *ast.FuncType:
+ return fmt.Sprintf("newSig(%s, %s)", i.fields(t.Params, false), i.fields(t.Results, false))
+ case *ast.InterfaceType:
+ if len(t.Methods.List) != 0 {
+ log.Fatal("non-empty interfaces unsupported")
+ }
+ return "types.Types[types.TINTER]"
+ case *ast.MapType:
+ return fmt.Sprintf("types.NewMap(%s, %s)", i.subtype(t.Key), i.subtype(t.Value))
+ case *ast.StarExpr:
+ return fmt.Sprintf("types.NewPtr(%s)", i.subtype(t.X))
+ case *ast.StructType:
+ return fmt.Sprintf("types.NewStruct(%s)", i.fields(t.Fields, true))
+
+ default:
+ log.Fatalf("unhandled type: %#v", t)
+ panic("unreachable")
+ }
+}
+
+func (i *typeInterner) fields(fl *ast.FieldList, keepNames bool) string {
+ if fl == nil || len(fl.List) == 0 {
+ return "nil"
+ }
+
+ var res []string
+ for _, f := range fl.List {
+ typ := i.subtype(f.Type)
+ if len(f.Names) == 0 {
+ res = append(res, typ)
+ } else {
+ for _, name := range f.Names {
+ if keepNames {
+ res = append(res, fmt.Sprintf("types.NewField(src.NoXPos, Lookup(%q), %s)", name.Name, typ))
+ } else {
+ res = append(res, typ)
+ }
+ }
+ }
+ }
+
+ if keepNames {
+ return fmt.Sprintf("[]*types.Field{%s}", strings.Join(res, ", "))
+ }
+ return fmt.Sprintf("params(%s)", strings.Join(res, ", "))
+}
+
+func intconst(e ast.Expr) int64 {
+ switch e := e.(type) {
+ case *ast.BasicLit:
+ if e.Kind != token.INT {
+ log.Fatalf("expected INT, got %v", e.Kind)
+ }
+ x, err := strconv.ParseInt(e.Value, 0, 64)
+ if err != nil {
+ log.Fatal(err)
+ }
+ return x
+ default:
+ log.Fatalf("unhandled expr: %#v", e)
+ panic("unreachable")
+ }
+}
diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go
new file mode 100644
index 0000000..8d79248
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/stmt.go
@@ -0,0 +1,727 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "internal/types/errors"
+)
+
+func RangeExprType(t *types.Type) *types.Type {
+ if t.IsPtr() && t.Elem().IsArray() {
+ return t.Elem()
+ }
+ return t
+}
+
+func typecheckrangeExpr(n *ir.RangeStmt) {
+}
+
+// type check assignment.
+// if this assignment is the definition of a var on the left side,
+// fill in the var's type.
+func tcAssign(n *ir.AssignStmt) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("tcAssign", n)(nil)
+ }
+
+ if n.Y == nil {
+ n.X = AssignExpr(n.X)
+ return
+ }
+
+ lhs, rhs := []ir.Node{n.X}, []ir.Node{n.Y}
+ assign(n, lhs, rhs)
+ n.X, n.Y = lhs[0], rhs[0]
+
+ // TODO(mdempsky): This seems out of place.
+ if !ir.IsBlank(n.X) {
+ types.CheckSize(n.X.Type()) // ensure width is calculated for backend
+ }
+}
+
+func tcAssignList(n *ir.AssignListStmt) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("tcAssignList", n)(nil)
+ }
+
+ assign(n, n.Lhs, n.Rhs)
+}
+
+func assign(stmt ir.Node, lhs, rhs []ir.Node) {
+ // delicate little dance.
+ // the definition of lhs may refer to this assignment
+ // as its definition, in which case it will call tcAssign.
+ // in that case, do not call typecheck back, or it will cycle.
+ // if the variable has a type (ntype) then typechecking
+ // will not look at defn, so it is okay (and desirable,
+ // so that the conversion below happens).
+
+ checkLHS := func(i int, typ *types.Type) {
+ if n := lhs[i]; typ != nil && ir.DeclaredBy(n, stmt) && n.Type() == nil {
+ base.Assertf(typ.Kind() == types.TNIL, "unexpected untyped nil")
+ n.SetType(defaultType(typ))
+ }
+ if lhs[i].Typecheck() == 0 {
+ lhs[i] = AssignExpr(lhs[i])
+ }
+ checkassign(lhs[i])
+ }
+
+ assignType := func(i int, typ *types.Type) {
+ checkLHS(i, typ)
+ if typ != nil {
+ checkassignto(typ, lhs[i])
+ }
+ }
+
+ cr := len(rhs)
+ if len(rhs) == 1 {
+ rhs[0] = typecheck(rhs[0], ctxExpr|ctxMultiOK)
+ if rtyp := rhs[0].Type(); rtyp != nil && rtyp.IsFuncArgStruct() {
+ cr = rtyp.NumFields()
+ }
+ } else {
+ Exprs(rhs)
+ }
+
+ // x, ok = y
+assignOK:
+ for len(lhs) == 2 && cr == 1 {
+ stmt := stmt.(*ir.AssignListStmt)
+ r := rhs[0]
+
+ switch r.Op() {
+ case ir.OINDEXMAP:
+ stmt.SetOp(ir.OAS2MAPR)
+ case ir.ORECV:
+ stmt.SetOp(ir.OAS2RECV)
+ case ir.ODOTTYPE:
+ r := r.(*ir.TypeAssertExpr)
+ stmt.SetOp(ir.OAS2DOTTYPE)
+ r.SetOp(ir.ODOTTYPE2)
+ case ir.ODYNAMICDOTTYPE:
+ r := r.(*ir.DynamicTypeAssertExpr)
+ stmt.SetOp(ir.OAS2DOTTYPE)
+ r.SetOp(ir.ODYNAMICDOTTYPE2)
+ default:
+ break assignOK
+ }
+
+ assignType(0, r.Type())
+ assignType(1, types.UntypedBool)
+ return
+ }
+
+ if len(lhs) != cr {
+ if r, ok := rhs[0].(*ir.CallExpr); ok && len(rhs) == 1 {
+ if r.Type() != nil {
+ base.ErrorfAt(stmt.Pos(), errors.WrongAssignCount, "assignment mismatch: %d variable%s but %v returns %d value%s", len(lhs), plural(len(lhs)), r.Fun, cr, plural(cr))
+ }
+ } else {
+ base.ErrorfAt(stmt.Pos(), errors.WrongAssignCount, "assignment mismatch: %d variable%s but %v value%s", len(lhs), plural(len(lhs)), len(rhs), plural(len(rhs)))
+ }
+
+ for i := range lhs {
+ checkLHS(i, nil)
+ }
+ return
+ }
+
+ // x,y,z = f()
+ if cr > len(rhs) {
+ stmt := stmt.(*ir.AssignListStmt)
+ stmt.SetOp(ir.OAS2FUNC)
+ r := rhs[0].(*ir.CallExpr)
+ rtyp := r.Type()
+
+ mismatched := false
+ failed := false
+ for i := range lhs {
+ result := rtyp.Field(i).Type
+ assignType(i, result)
+
+ if lhs[i].Type() == nil || result == nil {
+ failed = true
+ } else if lhs[i] != ir.BlankNode && !types.Identical(lhs[i].Type(), result) {
+ mismatched = true
+ }
+ }
+ if mismatched && !failed {
+ RewriteMultiValueCall(stmt, r)
+ }
+ return
+ }
+
+ for i, r := range rhs {
+ checkLHS(i, r.Type())
+ if lhs[i].Type() != nil {
+ rhs[i] = AssignConv(r, lhs[i].Type(), "assignment")
+ }
+ }
+}
+
+func plural(n int) string {
+ if n == 1 {
+ return ""
+ }
+ return "s"
+}
+
+// tcCheckNil typechecks an OCHECKNIL node.
+func tcCheckNil(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ if !n.X.Type().IsPtrShaped() {
+ base.FatalfAt(n.Pos(), "%L is not pointer shaped", n.X)
+ }
+ return n
+}
+
+// tcFor typechecks an OFOR node.
+func tcFor(n *ir.ForStmt) ir.Node {
+ Stmts(n.Init())
+ n.Cond = Expr(n.Cond)
+ n.Cond = DefaultLit(n.Cond, nil)
+ if n.Cond != nil {
+ t := n.Cond.Type()
+ if t != nil && !t.IsBoolean() {
+ base.Errorf("non-bool %L used as for condition", n.Cond)
+ }
+ }
+ n.Post = Stmt(n.Post)
+ Stmts(n.Body)
+ return n
+}
+
+// tcGoDefer typechecks (normalizes) an OGO/ODEFER statement.
+func tcGoDefer(n *ir.GoDeferStmt) {
+ call := normalizeGoDeferCall(n.Pos(), n.Op(), n.Call, n.PtrInit())
+ call.GoDefer = true
+ n.Call = call
+}
+
+// normalizeGoDeferCall normalizes call into a normal function call
+// with no arguments and no results, suitable for use in an OGO/ODEFER
+// statement.
+//
+// For example, it normalizes:
+//
+// f(x, y)
+//
+// into:
+//
+// x1, y1 := x, y // added to init
+// func() { f(x1, y1) }() // result
+func normalizeGoDeferCall(pos src.XPos, op ir.Op, call ir.Node, init *ir.Nodes) *ir.CallExpr {
+ init.Append(ir.TakeInit(call)...)
+
+ if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC {
+ if sig := call.Fun.Type(); sig.NumParams()+sig.NumResults() == 0 {
+ return call // already in normal form
+ }
+ }
+
+ // Create a new wrapper function without parameters or results.
+ wrapperFn := ir.NewClosureFunc(pos, pos, op, types.NewSignature(nil, nil, nil), ir.CurFunc, Target)
+ wrapperFn.DeclareParams(true)
+ wrapperFn.SetWrapper(true)
+
+ // argps collects the list of operands within the call expression
+ // that must be evaluated at the go/defer statement.
+ var argps []*ir.Node
+
+ var visit func(argp *ir.Node)
+ visit = func(argp *ir.Node) {
+ arg := *argp
+ if arg == nil {
+ return
+ }
+
+ // Recognize a few common expressions that can be evaluated within
+ // the wrapper, so we don't need to allocate space for them within
+ // the closure.
+ switch arg.Op() {
+ case ir.OLITERAL, ir.ONIL, ir.OMETHEXPR, ir.ONEW:
+ return
+ case ir.ONAME:
+ arg := arg.(*ir.Name)
+ if arg.Class == ir.PFUNC {
+ return // reference to global function
+ }
+ case ir.OADDR:
+ arg := arg.(*ir.AddrExpr)
+ if arg.X.Op() == ir.OLINKSYMOFFSET {
+ return // address of global symbol
+ }
+
+ case ir.OCONVNOP:
+ arg := arg.(*ir.ConvExpr)
+
+ // For unsafe.Pointer->uintptr conversion arguments, save the
+ // unsafe.Pointer argument. This is necessary to handle cases
+ // like fixedbugs/issue24491a.go correctly.
+ //
+ // TODO(mdempsky): Limit to static callees with
+ // //go:uintptr{escapes,keepalive}?
+ if arg.Type().IsUintptr() && arg.X.Type().IsUnsafePtr() {
+ visit(&arg.X)
+ return
+ }
+
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT:
+ // TODO(mdempsky): For very large slices, it may be preferable
+ // to construct them at the go/defer statement instead.
+ list := arg.(*ir.CompLitExpr).List
+ for i, el := range list {
+ switch el := el.(type) {
+ case *ir.KeyExpr:
+ visit(&el.Value)
+ case *ir.StructKeyExpr:
+ visit(&el.Value)
+ default:
+ visit(&list[i])
+ }
+ }
+ return
+ }
+
+ argps = append(argps, argp)
+ }
+
+ visitList := func(list []ir.Node) {
+ for i := range list {
+ visit(&list[i])
+ }
+ }
+
+ switch call.Op() {
+ default:
+ base.Fatalf("unexpected call op: %v", call.Op())
+
+ case ir.OCALLFUNC:
+ call := call.(*ir.CallExpr)
+
+ // If the callee is a named function, link to the original callee.
+ if wrapped := ir.StaticCalleeName(call.Fun); wrapped != nil {
+ wrapperFn.WrappedFunc = wrapped.Func
+ }
+
+ visit(&call.Fun)
+ visitList(call.Args)
+
+ case ir.OCALLINTER:
+ call := call.(*ir.CallExpr)
+ argps = append(argps, &call.Fun.(*ir.SelectorExpr).X) // must be first for OCHECKNIL; see below
+ visitList(call.Args)
+
+ case ir.OAPPEND, ir.ODELETE, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
+ call := call.(*ir.CallExpr)
+ visitList(call.Args)
+ visit(&call.RType)
+
+ case ir.OCOPY:
+ call := call.(*ir.BinaryExpr)
+ visit(&call.X)
+ visit(&call.Y)
+ visit(&call.RType)
+
+ case ir.OCLEAR, ir.OCLOSE, ir.OPANIC:
+ call := call.(*ir.UnaryExpr)
+ visit(&call.X)
+ }
+
+ if len(argps) != 0 {
+ // Found one or more operands that need to be evaluated upfront
+ // and spilled to temporary variables, which can be captured by
+ // the wrapper function.
+
+ stmtPos := base.Pos
+ callPos := base.Pos
+
+ as := ir.NewAssignListStmt(callPos, ir.OAS2, make([]ir.Node, len(argps)), make([]ir.Node, len(argps)))
+ for i, argp := range argps {
+ arg := *argp
+
+ pos := callPos
+ if ir.HasUniquePos(arg) {
+ pos = arg.Pos()
+ }
+
+ // tmp := arg
+ tmp := TempAt(pos, ir.CurFunc, arg.Type())
+ init.Append(Stmt(ir.NewDecl(pos, ir.ODCL, tmp)))
+ tmp.Defn = as
+ as.Lhs[i] = tmp
+ as.Rhs[i] = arg
+
+ // Rewrite original expression to use/capture tmp.
+ *argp = ir.NewClosureVar(pos, wrapperFn, tmp)
+ }
+ init.Append(Stmt(as))
+
+ // For "go/defer iface.M()", if iface is nil, we need to panic at
+ // the point of the go/defer statement.
+ if call.Op() == ir.OCALLINTER {
+ iface := as.Lhs[0]
+ init.Append(Stmt(ir.NewUnaryExpr(stmtPos, ir.OCHECKNIL, ir.NewUnaryExpr(iface.Pos(), ir.OITAB, iface))))
+ }
+ }
+
+ // Move call into the wrapper function, now that it's safe to
+ // evaluate there.
+ wrapperFn.Body = []ir.Node{call}
+
+ // Finally, construct a call to the wrapper.
+ return Call(call.Pos(), wrapperFn.OClosure, nil, false).(*ir.CallExpr)
+}
+
+// tcIf typechecks an OIF node.
+func tcIf(n *ir.IfStmt) ir.Node {
+ Stmts(n.Init())
+ n.Cond = Expr(n.Cond)
+ n.Cond = DefaultLit(n.Cond, nil)
+ if n.Cond != nil {
+ t := n.Cond.Type()
+ if t != nil && !t.IsBoolean() {
+ base.Errorf("non-bool %L used as if condition", n.Cond)
+ }
+ }
+ Stmts(n.Body)
+ Stmts(n.Else)
+ return n
+}
+
+// range
+func tcRange(n *ir.RangeStmt) {
+ n.X = Expr(n.X)
+
+ // delicate little dance. see tcAssignList
+ if n.Key != nil {
+ if !ir.DeclaredBy(n.Key, n) {
+ n.Key = AssignExpr(n.Key)
+ }
+ checkassign(n.Key)
+ }
+ if n.Value != nil {
+ if !ir.DeclaredBy(n.Value, n) {
+ n.Value = AssignExpr(n.Value)
+ }
+ checkassign(n.Value)
+ }
+
+ // second half of dance
+ n.SetTypecheck(1)
+ if n.Key != nil && n.Key.Typecheck() == 0 {
+ n.Key = AssignExpr(n.Key)
+ }
+ if n.Value != nil && n.Value.Typecheck() == 0 {
+ n.Value = AssignExpr(n.Value)
+ }
+
+ Stmts(n.Body)
+}
+
+// tcReturn typechecks an ORETURN node.
+func tcReturn(n *ir.ReturnStmt) ir.Node {
+ if ir.CurFunc == nil {
+ base.FatalfAt(n.Pos(), "return outside function")
+ }
+
+ typecheckargs(n)
+ if len(n.Results) != 0 {
+ typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), n.Results, func() string { return "return argument" })
+ }
+ return n
+}
+
+// select
+func tcSelect(sel *ir.SelectStmt) {
+ var def *ir.CommClause
+ lno := ir.SetPos(sel)
+ Stmts(sel.Init())
+ for _, ncase := range sel.Cases {
+ if ncase.Comm == nil {
+ // default
+ if def != nil {
+ base.ErrorfAt(ncase.Pos(), errors.DuplicateDefault, "multiple defaults in select (first at %v)", ir.Line(def))
+ } else {
+ def = ncase
+ }
+ } else {
+ n := Stmt(ncase.Comm)
+ ncase.Comm = n
+ oselrecv2 := func(dst, recv ir.Node, def bool) {
+ selrecv := ir.NewAssignListStmt(n.Pos(), ir.OSELRECV2, []ir.Node{dst, ir.BlankNode}, []ir.Node{recv})
+ selrecv.Def = def
+ selrecv.SetTypecheck(1)
+ selrecv.SetInit(n.Init())
+ ncase.Comm = selrecv
+ }
+ switch n.Op() {
+ default:
+ pos := n.Pos()
+ if n.Op() == ir.ONAME {
+ // We don't have the right position for ONAME nodes (see #15459 and
+ // others). Using ncase.Pos for now as it will provide the correct
+ // line number (assuming the expression follows the "case" keyword
+ // on the same line). This matches the approach before 1.10.
+ pos = ncase.Pos()
+ }
+ base.ErrorfAt(pos, errors.InvalidSelectCase, "select case must be receive, send or assign recv")
+
+ case ir.OAS:
+ // convert x = <-c into x, _ = <-c
+ // remove implicit conversions; the eventual assignment
+ // will reintroduce them.
+ n := n.(*ir.AssignStmt)
+ if r := n.Y; r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE {
+ r := r.(*ir.ConvExpr)
+ if r.Implicit() {
+ n.Y = r.X
+ }
+ }
+ if n.Y.Op() != ir.ORECV {
+ base.ErrorfAt(n.Pos(), errors.InvalidSelectCase, "select assignment must have receive on right hand side")
+ break
+ }
+ oselrecv2(n.X, n.Y, n.Def)
+
+ case ir.OAS2RECV:
+ n := n.(*ir.AssignListStmt)
+ if n.Rhs[0].Op() != ir.ORECV {
+ base.ErrorfAt(n.Pos(), errors.InvalidSelectCase, "select assignment must have receive on right hand side")
+ break
+ }
+ n.SetOp(ir.OSELRECV2)
+
+ case ir.ORECV:
+ // convert <-c into _, _ = <-c
+ n := n.(*ir.UnaryExpr)
+ oselrecv2(ir.BlankNode, n, false)
+
+ case ir.OSEND:
+ break
+ }
+ }
+
+ Stmts(ncase.Body)
+ }
+
+ base.Pos = lno
+}
+
+// tcSend typechecks an OSEND node.
+func tcSend(n *ir.SendStmt) ir.Node {
+ n.Chan = Expr(n.Chan)
+ n.Value = Expr(n.Value)
+ n.Chan = DefaultLit(n.Chan, nil)
+ t := n.Chan.Type()
+ if t == nil {
+ return n
+ }
+ if !t.IsChan() {
+ base.Errorf("invalid operation: %v (send to non-chan type %v)", n, t)
+ return n
+ }
+
+ if !t.ChanDir().CanSend() {
+ base.Errorf("invalid operation: %v (send to receive-only type %v)", n, t)
+ return n
+ }
+
+ n.Value = AssignConv(n.Value, t.Elem(), "send")
+ if n.Value.Type() == nil {
+ return n
+ }
+ return n
+}
+
+// tcSwitch typechecks a switch statement.
+func tcSwitch(n *ir.SwitchStmt) {
+ Stmts(n.Init())
+ if n.Tag != nil && n.Tag.Op() == ir.OTYPESW {
+ tcSwitchType(n)
+ } else {
+ tcSwitchExpr(n)
+ }
+}
+
+func tcSwitchExpr(n *ir.SwitchStmt) {
+ t := types.Types[types.TBOOL]
+ if n.Tag != nil {
+ n.Tag = Expr(n.Tag)
+ n.Tag = DefaultLit(n.Tag, nil)
+ t = n.Tag.Type()
+ }
+
+ var nilonly string
+ if t != nil {
+ switch {
+ case t.IsMap():
+ nilonly = "map"
+ case t.Kind() == types.TFUNC:
+ nilonly = "func"
+ case t.IsSlice():
+ nilonly = "slice"
+
+ case !types.IsComparable(t):
+ if t.IsStruct() {
+ base.ErrorfAt(n.Pos(), errors.InvalidExprSwitch, "cannot switch on %L (struct containing %v cannot be compared)", n.Tag, types.IncomparableField(t).Type)
+ } else {
+ base.ErrorfAt(n.Pos(), errors.InvalidExprSwitch, "cannot switch on %L", n.Tag)
+ }
+ t = nil
+ }
+ }
+
+ var defCase ir.Node
+ for _, ncase := range n.Cases {
+ ls := ncase.List
+ if len(ls) == 0 { // default:
+ if defCase != nil {
+ base.ErrorfAt(ncase.Pos(), errors.DuplicateDefault, "multiple defaults in switch (first at %v)", ir.Line(defCase))
+ } else {
+ defCase = ncase
+ }
+ }
+
+ for i := range ls {
+ ir.SetPos(ncase)
+ ls[i] = Expr(ls[i])
+ ls[i] = DefaultLit(ls[i], t)
+ n1 := ls[i]
+ if t == nil || n1.Type() == nil {
+ continue
+ }
+
+ if nilonly != "" && !ir.IsNil(n1) {
+ base.ErrorfAt(ncase.Pos(), errors.MismatchedTypes, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Tag)
+ } else if t.IsInterface() && !n1.Type().IsInterface() && !types.IsComparable(n1.Type()) {
+ base.ErrorfAt(ncase.Pos(), errors.UndefinedOp, "invalid case %L in switch (incomparable type)", n1)
+ } else {
+ op1, _ := assignOp(n1.Type(), t)
+ op2, _ := assignOp(t, n1.Type())
+ if op1 == ir.OXXX && op2 == ir.OXXX {
+ if n.Tag != nil {
+ base.ErrorfAt(ncase.Pos(), errors.MismatchedTypes, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Tag, n1.Type(), t)
+ } else {
+ base.ErrorfAt(ncase.Pos(), errors.MismatchedTypes, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type())
+ }
+ }
+ }
+ }
+
+ Stmts(ncase.Body)
+ }
+}
+
+func tcSwitchType(n *ir.SwitchStmt) {
+ guard := n.Tag.(*ir.TypeSwitchGuard)
+ guard.X = Expr(guard.X)
+ t := guard.X.Type()
+ if t != nil && !t.IsInterface() {
+ base.ErrorfAt(n.Pos(), errors.InvalidTypeSwitch, "cannot type switch on non-interface value %L", guard.X)
+ t = nil
+ }
+
+ // We don't actually declare the type switch's guarded
+ // declaration itself. So if there are no cases, we won't
+ // notice that it went unused.
+ if v := guard.Tag; v != nil && !ir.IsBlank(v) && len(n.Cases) == 0 {
+ base.ErrorfAt(v.Pos(), errors.UnusedVar, "%v declared but not used", v.Sym())
+ }
+
+ var defCase, nilCase ir.Node
+ var ts typeSet
+ for _, ncase := range n.Cases {
+ ls := ncase.List
+ if len(ls) == 0 { // default:
+ if defCase != nil {
+ base.ErrorfAt(ncase.Pos(), errors.DuplicateDefault, "multiple defaults in switch (first at %v)", ir.Line(defCase))
+ } else {
+ defCase = ncase
+ }
+ }
+
+ for i := range ls {
+ ls[i] = typecheck(ls[i], ctxExpr|ctxType)
+ n1 := ls[i]
+ if t == nil || n1.Type() == nil {
+ continue
+ }
+
+ if ir.IsNil(n1) { // case nil:
+ if nilCase != nil {
+ base.ErrorfAt(ncase.Pos(), errors.DuplicateCase, "multiple nil cases in type switch (first at %v)", ir.Line(nilCase))
+ } else {
+ nilCase = ncase
+ }
+ continue
+ }
+ if n1.Op() == ir.ODYNAMICTYPE {
+ continue
+ }
+ if n1.Op() != ir.OTYPE {
+ base.ErrorfAt(ncase.Pos(), errors.NotAType, "%L is not a type", n1)
+ continue
+ }
+ if !n1.Type().IsInterface() {
+ why := ImplementsExplain(n1.Type(), t)
+ if why != "" {
+ base.ErrorfAt(ncase.Pos(), errors.ImpossibleAssert, "impossible type switch case: %L cannot have dynamic type %v (%s)", guard.X, n1.Type(), why)
+ }
+ continue
+ }
+
+ ts.add(ncase.Pos(), n1.Type())
+ }
+
+ if ncase.Var != nil {
+ // Assign the clause variable's type.
+ vt := t
+ if len(ls) == 1 {
+ if ls[0].Op() == ir.OTYPE || ls[0].Op() == ir.ODYNAMICTYPE {
+ vt = ls[0].Type()
+ } else if !ir.IsNil(ls[0]) {
+ // Invalid single-type case;
+ // mark variable as broken.
+ vt = nil
+ }
+ }
+
+ nvar := ncase.Var
+ nvar.SetType(vt)
+ if vt != nil {
+ nvar = AssignExpr(nvar).(*ir.Name)
+ } else {
+ // Clause variable is broken; prevent typechecking.
+ nvar.SetTypecheck(1)
+ }
+ ncase.Var = nvar
+ }
+
+ Stmts(ncase.Body)
+ }
+}
+
+type typeSet struct {
+ m map[string]src.XPos
+}
+
+func (s *typeSet) add(pos src.XPos, typ *types.Type) {
+ if s.m == nil {
+ s.m = make(map[string]src.XPos)
+ }
+
+ ls := typ.LinkString()
+ if prev, ok := s.m[ls]; ok {
+ base.ErrorfAt(pos, errors.DuplicateCase, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev))
+ return
+ }
+ s.m[ls] = pos
+}
diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go
new file mode 100644
index 0000000..d64b0f0
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/subr.go
@@ -0,0 +1,792 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+func AssignConv(n ir.Node, t *types.Type, context string) ir.Node {
+ return assignconvfn(n, t, func() string { return context })
+}
+
+// LookupNum returns types.LocalPkg.LookupNum(prefix, n).
+func LookupNum(prefix string, n int) *types.Sym {
+ return types.LocalPkg.LookupNum(prefix, n)
+}
+
+// Given funarg struct list, return list of fn args.
+func NewFuncParams(origs []*types.Field) []*types.Field {
+ res := make([]*types.Field, len(origs))
+ for i, orig := range origs {
+ p := types.NewField(orig.Pos, orig.Sym, orig.Type)
+ p.SetIsDDD(orig.IsDDD())
+ res[i] = p
+ }
+ return res
+}
+
+// NodAddr returns a node representing &n at base.Pos.
+func NodAddr(n ir.Node) *ir.AddrExpr {
+ return NodAddrAt(base.Pos, n)
+}
+
+// NodAddrAt returns a node representing &n at position pos.
+func NodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr {
+ return ir.NewAddrExpr(pos, Expr(n))
+}
+
+// LinksymAddr returns a new expression that evaluates to the address
+// of lsym. typ specifies the type of the addressed memory.
+func LinksymAddr(pos src.XPos, lsym *obj.LSym, typ *types.Type) *ir.AddrExpr {
+ n := ir.NewLinksymExpr(pos, lsym, typ)
+ return Expr(NodAddrAt(pos, n)).(*ir.AddrExpr)
+}
+
+func NodNil() ir.Node {
+ return ir.NewNilExpr(base.Pos, types.Types[types.TNIL])
+}
+
+// AddImplicitDots finds missing fields in obj.field that
+// will give the shortest unique addressing and
+// modifies the tree with missing field names.
+func AddImplicitDots(n *ir.SelectorExpr) *ir.SelectorExpr {
+ n.X = typecheck(n.X, ctxType|ctxExpr)
+ t := n.X.Type()
+ if t == nil {
+ return n
+ }
+
+ if n.X.Op() == ir.OTYPE {
+ return n
+ }
+
+ s := n.Sel
+ if s == nil {
+ return n
+ }
+
+ switch path, ambig := dotpath(s, t, nil, false); {
+ case path != nil:
+ // rebuild elided dots
+ for c := len(path) - 1; c >= 0; c-- {
+ dot := ir.NewSelectorExpr(n.Pos(), ir.ODOT, n.X, path[c].field.Sym)
+ dot.SetImplicit(true)
+ dot.SetType(path[c].field.Type)
+ n.X = dot
+ }
+ case ambig:
+ base.Errorf("ambiguous selector %v", n)
+ n.X = nil
+ }
+
+ return n
+}
+
+// CalcMethods calculates all the methods (including embedding) of a non-interface
+// type t.
+func CalcMethods(t *types.Type) {
+ if t == nil || len(t.AllMethods()) != 0 {
+ return
+ }
+
+ // mark top-level method symbols
+ // so that expand1 doesn't consider them.
+ for _, f := range t.Methods() {
+ f.Sym.SetUniq(true)
+ }
+
+ // generate all reachable methods
+ slist = slist[:0]
+ expand1(t, true)
+
+ // check each method to be uniquely reachable
+ var ms []*types.Field
+ for i, sl := range slist {
+ slist[i].field = nil
+ sl.field.Sym.SetUniq(false)
+
+ var f *types.Field
+ path, _ := dotpath(sl.field.Sym, t, &f, false)
+ if path == nil {
+ continue
+ }
+
+ // dotpath may have dug out arbitrary fields, we only want methods.
+ if !f.IsMethod() {
+ continue
+ }
+
+ // add it to the base type method list
+ f = f.Copy()
+ f.Embedded = 1 // needs a trampoline
+ for _, d := range path {
+ if d.field.Type.IsPtr() {
+ f.Embedded = 2
+ break
+ }
+ }
+ ms = append(ms, f)
+ }
+
+ for _, f := range t.Methods() {
+ f.Sym.SetUniq(false)
+ }
+
+ ms = append(ms, t.Methods()...)
+ sort.Sort(types.MethodsByName(ms))
+ t.SetAllMethods(ms)
+}
+
+// adddot1 returns the number of fields or methods named s at depth d in Type t.
+// If exactly one exists, it will be returned in *save (if save is not nil),
+// and dotlist will contain the path of embedded fields traversed to find it,
+// in reverse order. If none exist, more will indicate whether t contains any
+// embedded fields at depth d, so callers can decide whether to retry at
+// a greater depth.
+func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase bool) (c int, more bool) {
+ if t.Recur() {
+ return
+ }
+ t.SetRecur(true)
+ defer t.SetRecur(false)
+
+ var u *types.Type
+ d--
+ if d < 0 {
+ // We've reached our target depth. If t has any fields/methods
+ // named s, then we're done. Otherwise, we still need to check
+ // below for embedded fields.
+ c = lookdot0(s, t, save, ignorecase)
+ if c != 0 {
+ return c, false
+ }
+ }
+
+ u = t
+ if u.IsPtr() {
+ u = u.Elem()
+ }
+ if !u.IsStruct() && !u.IsInterface() {
+ return c, false
+ }
+
+ var fields []*types.Field
+ if u.IsStruct() {
+ fields = u.Fields()
+ } else {
+ fields = u.AllMethods()
+ }
+ for _, f := range fields {
+ if f.Embedded == 0 || f.Sym == nil {
+ continue
+ }
+ if d < 0 {
+ // Found an embedded field at target depth.
+ return c, true
+ }
+ a, more1 := adddot1(s, f.Type, d, save, ignorecase)
+ if a != 0 && c == 0 {
+ dotlist[d].field = f
+ }
+ c += a
+ if more1 {
+ more = true
+ }
+ }
+
+ return c, more
+}
+
+// dotlist is used by adddot1 to record the path of embedded fields
+// used to access a target field or method.
+// Must be non-nil so that dotpath returns a non-nil slice even if d is zero.
+var dotlist = make([]dlist, 10)
+
+// Convert node n for assignment to type t.
+func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node {
+ if n == nil || n.Type() == nil {
+ return n
+ }
+
+ if t.Kind() == types.TBLANK && n.Type().Kind() == types.TNIL {
+ base.Errorf("use of untyped nil")
+ }
+
+ n = convlit1(n, t, false, context)
+ if n.Type() == nil {
+ base.Fatalf("cannot assign %v to %v", n, t)
+ }
+ if n.Type().IsUntyped() {
+ base.Fatalf("%L has untyped type", n)
+ }
+ if t.Kind() == types.TBLANK {
+ return n
+ }
+ if types.Identical(n.Type(), t) {
+ return n
+ }
+
+ op, why := assignOp(n.Type(), t)
+ if op == ir.OXXX {
+ base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why)
+ op = ir.OCONV
+ }
+
+ r := ir.NewConvExpr(base.Pos, op, t, n)
+ r.SetTypecheck(1)
+ r.SetImplicit(true)
+ return r
+}
+
+// Is type src assignment compatible to type dst?
+// If so, return op code to use in conversion.
+// If not, return OXXX. In this case, the string return parameter may
+// hold a reason why. In all other cases, it'll be the empty string.
+func assignOp(src, dst *types.Type) (ir.Op, string) {
+ if src == dst {
+ return ir.OCONVNOP, ""
+ }
+ if src == nil || dst == nil || src.Kind() == types.TFORW || dst.Kind() == types.TFORW || src.Underlying() == nil || dst.Underlying() == nil {
+ return ir.OXXX, ""
+ }
+
+ // 1. src type is identical to dst.
+ if types.Identical(src, dst) {
+ return ir.OCONVNOP, ""
+ }
+
+ // 2. src and dst have identical underlying types and
+ // a. either src or dst is not a named type, or
+ // b. both are empty interface types, or
+ // c. at least one is a gcshape type.
+ // For assignable but different non-empty interface types,
+ // we want to recompute the itab. Recomputing the itab ensures
+ // that itabs are unique (thus an interface with a compile-time
+ // type I has an itab with interface type I).
+ if types.Identical(src.Underlying(), dst.Underlying()) {
+ if src.IsEmptyInterface() {
+ // Conversion between two empty interfaces
+ // requires no code.
+ return ir.OCONVNOP, ""
+ }
+ if (src.Sym() == nil || dst.Sym() == nil) && !src.IsInterface() {
+ // Conversion between two types, at least one unnamed,
+ // needs no conversion. The exception is nonempty interfaces
+ // which need to have their itab updated.
+ return ir.OCONVNOP, ""
+ }
+ if src.IsShape() || dst.IsShape() {
+ // Conversion between a shape type and one of the types
+ // it represents also needs no conversion.
+ return ir.OCONVNOP, ""
+ }
+ }
+
+ // 3. dst is an interface type and src implements dst.
+ if dst.IsInterface() && src.Kind() != types.TNIL {
+ if src.IsShape() {
+ // Shape types implement things they have already
+ // been typechecked to implement, even if they
+ // don't have the methods for them.
+ return ir.OCONVIFACE, ""
+ }
+ if src.HasShape() {
+ // Unified IR uses OCONVIFACE for converting all derived types
+ // to interface type, not just type arguments themselves.
+ return ir.OCONVIFACE, ""
+ }
+
+ why := ImplementsExplain(src, dst)
+ if why == "" {
+ return ir.OCONVIFACE, ""
+ }
+ return ir.OXXX, ":\n\t" + why
+ }
+
+ if isptrto(dst, types.TINTER) {
+ why := fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
+ return ir.OXXX, why
+ }
+
+ if src.IsInterface() && dst.Kind() != types.TBLANK {
+ var why string
+ if Implements(dst, src) {
+ why = ": need type assertion"
+ }
+ return ir.OXXX, why
+ }
+
+ // 4. src is a bidirectional channel value, dst is a channel type,
+ // src and dst have identical element types, and
+ // either src or dst is not a named type.
+ if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() {
+ if types.Identical(src.Elem(), dst.Elem()) && (src.Sym() == nil || dst.Sym() == nil) {
+ return ir.OCONVNOP, ""
+ }
+ }
+
+ // 5. src is the predeclared identifier nil and dst is a nillable type.
+ if src.Kind() == types.TNIL {
+ switch dst.Kind() {
+ case types.TPTR,
+ types.TFUNC,
+ types.TMAP,
+ types.TCHAN,
+ types.TINTER,
+ types.TSLICE:
+ return ir.OCONVNOP, ""
+ }
+ }
+
+ // 6. rule about untyped constants - already converted by DefaultLit.
+
+ // 7. Any typed value can be assigned to the blank identifier.
+ if dst.Kind() == types.TBLANK {
+ return ir.OCONVNOP, ""
+ }
+
+ return ir.OXXX, ""
+}
+
+// Can we convert a value of type src to a value of type dst?
+// If so, return op code to use in conversion (maybe OCONVNOP).
+// If not, return OXXX. In this case, the string return parameter may
+// hold a reason why. In all other cases, it'll be the empty string.
+// srcConstant indicates whether the value of type src is a constant.
+func convertOp(srcConstant bool, src, dst *types.Type) (ir.Op, string) {
+ if src == dst {
+ return ir.OCONVNOP, ""
+ }
+ if src == nil || dst == nil {
+ return ir.OXXX, ""
+ }
+
+ // Conversions from regular to not-in-heap are not allowed
+ // (unless it's unsafe.Pointer). These are runtime-specific
+ // rules.
+ // (a) Disallow (*T) to (*U) where T is not-in-heap but U isn't.
+ if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() {
+ why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem())
+ return ir.OXXX, why
+ }
+ // (b) Disallow string to []T where T is not-in-heap.
+ if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Kind() == types.ByteType.Kind() || dst.Elem().Kind() == types.RuneType.Kind()) {
+ why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
+ return ir.OXXX, why
+ }
+
+ // 1. src can be assigned to dst.
+ op, why := assignOp(src, dst)
+ if op != ir.OXXX {
+ return op, why
+ }
+
+ // The rules for interfaces are no different in conversions
+ // than assignments. If interfaces are involved, stop now
+ // with the good message from assignop.
+ // Otherwise clear the error.
+ if src.IsInterface() || dst.IsInterface() {
+ return ir.OXXX, why
+ }
+
+ // 2. Ignoring struct tags, src and dst have identical underlying types.
+ if types.IdenticalIgnoreTags(src.Underlying(), dst.Underlying()) {
+ return ir.OCONVNOP, ""
+ }
+
+ // 3. src and dst are unnamed pointer types and, ignoring struct tags,
+ // their base types have identical underlying types.
+ if src.IsPtr() && dst.IsPtr() && src.Sym() == nil && dst.Sym() == nil {
+ if types.IdenticalIgnoreTags(src.Elem().Underlying(), dst.Elem().Underlying()) {
+ return ir.OCONVNOP, ""
+ }
+ }
+
+ // 4. src and dst are both integer or floating point types.
+ if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) {
+ if types.SimType[src.Kind()] == types.SimType[dst.Kind()] {
+ return ir.OCONVNOP, ""
+ }
+ return ir.OCONV, ""
+ }
+
+ // 5. src and dst are both complex types.
+ if src.IsComplex() && dst.IsComplex() {
+ if types.SimType[src.Kind()] == types.SimType[dst.Kind()] {
+ return ir.OCONVNOP, ""
+ }
+ return ir.OCONV, ""
+ }
+
+ // Special case for constant conversions: any numeric
+ // conversion is potentially okay. We'll validate further
+ // within evconst. See #38117.
+ if srcConstant && (src.IsInteger() || src.IsFloat() || src.IsComplex()) && (dst.IsInteger() || dst.IsFloat() || dst.IsComplex()) {
+ return ir.OCONV, ""
+ }
+
+ // 6. src is an integer or has type []byte or []rune
+ // and dst is a string type.
+ if src.IsInteger() && dst.IsString() {
+ return ir.ORUNESTR, ""
+ }
+
+ if src.IsSlice() && dst.IsString() {
+ if src.Elem().Kind() == types.ByteType.Kind() {
+ return ir.OBYTES2STR, ""
+ }
+ if src.Elem().Kind() == types.RuneType.Kind() {
+ return ir.ORUNES2STR, ""
+ }
+ }
+
+ // 7. src is a string and dst is []byte or []rune.
+ // String to slice.
+ if src.IsString() && dst.IsSlice() {
+ if dst.Elem().Kind() == types.ByteType.Kind() {
+ return ir.OSTR2BYTES, ""
+ }
+ if dst.Elem().Kind() == types.RuneType.Kind() {
+ return ir.OSTR2RUNES, ""
+ }
+ }
+
+ // 8. src is a pointer or uintptr and dst is unsafe.Pointer.
+ if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() {
+ return ir.OCONVNOP, ""
+ }
+
+ // 9. src is unsafe.Pointer and dst is a pointer or uintptr.
+ if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) {
+ return ir.OCONVNOP, ""
+ }
+
+ // 10. src is a slice and dst is an array or pointer-to-array.
+ // They must have same element type.
+ if src.IsSlice() {
+ if dst.IsArray() && types.Identical(src.Elem(), dst.Elem()) {
+ return ir.OSLICE2ARR, ""
+ }
+ if dst.IsPtr() && dst.Elem().IsArray() &&
+ types.Identical(src.Elem(), dst.Elem().Elem()) {
+ return ir.OSLICE2ARRPTR, ""
+ }
+ }
+
+ return ir.OXXX, ""
+}
+
+// Code to resolve elided DOTs in embedded types.
+
+// A dlist stores a pointer to a TFIELD Type embedded within
+// a TSTRUCT or TINTER Type.
+type dlist struct {
+ field *types.Field
+}
+
+// dotpath computes the unique shortest explicit selector path to fully qualify
+// a selection expression x.f, where x is of type t and f is the symbol s.
+// If no such path exists, dotpath returns nil.
+// If there are multiple shortest paths to the same depth, ambig is true.
+func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) (path []dlist, ambig bool) {
+ // The embedding of types within structs imposes a tree structure onto
+ // types: structs parent the types they embed, and types parent their
+ // fields or methods. Our goal here is to find the shortest path to
+ // a field or method named s in the subtree rooted at t. To accomplish
+ // that, we iteratively perform depth-first searches of increasing depth
+ // until we either find the named field/method or exhaust the tree.
+ for d := 0; ; d++ {
+ if d > len(dotlist) {
+ dotlist = append(dotlist, dlist{})
+ }
+ if c, more := adddot1(s, t, d, save, ignorecase); c == 1 {
+ return dotlist[:d], false
+ } else if c > 1 {
+ return nil, true
+ } else if !more {
+ return nil, false
+ }
+ }
+}
+
+func expand0(t *types.Type) {
+ u := t
+ if u.IsPtr() {
+ u = u.Elem()
+ }
+
+ if u.IsInterface() {
+ for _, f := range u.AllMethods() {
+ if f.Sym.Uniq() {
+ continue
+ }
+ f.Sym.SetUniq(true)
+ slist = append(slist, symlink{field: f})
+ }
+
+ return
+ }
+
+ u = types.ReceiverBaseType(t)
+ if u != nil {
+ for _, f := range u.Methods() {
+ if f.Sym.Uniq() {
+ continue
+ }
+ f.Sym.SetUniq(true)
+ slist = append(slist, symlink{field: f})
+ }
+ }
+}
+
+func expand1(t *types.Type, top bool) {
+ if t.Recur() {
+ return
+ }
+ t.SetRecur(true)
+
+ if !top {
+ expand0(t)
+ }
+
+ u := t
+ if u.IsPtr() {
+ u = u.Elem()
+ }
+
+ if u.IsStruct() || u.IsInterface() {
+ var fields []*types.Field
+ if u.IsStruct() {
+ fields = u.Fields()
+ } else {
+ fields = u.AllMethods()
+ }
+ for _, f := range fields {
+ if f.Embedded == 0 {
+ continue
+ }
+ if f.Sym == nil {
+ continue
+ }
+ expand1(f.Type, false)
+ }
+ }
+
+ t.SetRecur(false)
+}
+
+func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) *types.Field {
+ if t == nil {
+ return nil
+ }
+
+ var m *types.Field
+ path, _ := dotpath(s, t, &m, ignorecase)
+ if path == nil {
+ return nil
+ }
+
+ if !m.IsMethod() {
+ return nil
+ }
+
+ return m
+}
+
+// Implements reports whether t implements the interface iface. t can be
+// an interface, a type parameter, or a concrete type.
+func Implements(t, iface *types.Type) bool {
+ var missing, have *types.Field
+ var ptr int
+ return implements(t, iface, &missing, &have, &ptr)
+}
+
+// ImplementsExplain reports whether t implements the interface iface. t can be
+// an interface, a type parameter, or a concrete type. If t does not implement
+// iface, a non-empty string is returned explaining why.
+func ImplementsExplain(t, iface *types.Type) string {
+ var missing, have *types.Field
+ var ptr int
+ if implements(t, iface, &missing, &have, &ptr) {
+ return ""
+ }
+
+ if isptrto(t, types.TINTER) {
+ return fmt.Sprintf("%v is pointer to interface, not interface", t)
+ } else if have != nil && have.Sym == missing.Sym && have.Nointerface() {
+ return fmt.Sprintf("%v does not implement %v (%v method is marked 'nointerface')", t, iface, missing.Sym)
+ } else if have != nil && have.Sym == missing.Sym {
+ return fmt.Sprintf("%v does not implement %v (wrong type for %v method)\n"+
+ "\t\thave %v%S\n\t\twant %v%S", t, iface, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else if ptr != 0 {
+ return fmt.Sprintf("%v does not implement %v (%v method has pointer receiver)", t, iface, missing.Sym)
+ } else if have != nil {
+ return fmt.Sprintf("%v does not implement %v (missing %v method)\n"+
+ "\t\thave %v%S\n\t\twant %v%S", t, iface, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ }
+ return fmt.Sprintf("%v does not implement %v (missing %v method)", t, iface, missing.Sym)
+}
+
+// implements reports whether t implements the interface iface. t can be
+// an interface, a type parameter, or a concrete type. If implements returns
+// false, it stores a method of iface that is not implemented in *m. If the
+// method name matches but the type is wrong, it additionally stores the type
+// of the method (on t) in *samename.
+func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool {
+ t0 := t
+ if t == nil {
+ return false
+ }
+
+ if t.IsInterface() {
+ i := 0
+ tms := t.AllMethods()
+ for _, im := range iface.AllMethods() {
+ for i < len(tms) && tms[i].Sym != im.Sym {
+ i++
+ }
+ if i == len(tms) {
+ *m = im
+ *samename = nil
+ *ptr = 0
+ return false
+ }
+ tm := tms[i]
+ if !types.Identical(tm.Type, im.Type) {
+ *m = im
+ *samename = tm
+ *ptr = 0
+ return false
+ }
+ }
+
+ return true
+ }
+
+ t = types.ReceiverBaseType(t)
+ var tms []*types.Field
+ if t != nil {
+ CalcMethods(t)
+ tms = t.AllMethods()
+ }
+ i := 0
+ for _, im := range iface.AllMethods() {
+ for i < len(tms) && tms[i].Sym != im.Sym {
+ i++
+ }
+ if i == len(tms) {
+ *m = im
+ *samename = ifacelookdot(im.Sym, t, true)
+ *ptr = 0
+ return false
+ }
+ tm := tms[i]
+ if tm.Nointerface() || !types.Identical(tm.Type, im.Type) {
+ *m = im
+ *samename = tm
+ *ptr = 0
+ return false
+ }
+
+ // if pointer receiver in method,
+ // the method does not exist for value types.
+ if !types.IsMethodApplicable(t0, tm) {
+ if false && base.Flag.LowerR != 0 {
+ base.Errorf("interface pointer mismatch")
+ }
+
+ *m = im
+ *samename = nil
+ *ptr = 1
+ return false
+ }
+ }
+
+ return true
+}
+
+func isptrto(t *types.Type, et types.Kind) bool {
+ if t == nil {
+ return false
+ }
+ if !t.IsPtr() {
+ return false
+ }
+ t = t.Elem()
+ if t == nil {
+ return false
+ }
+ if t.Kind() != et {
+ return false
+ }
+ return true
+}
+
+// lookdot0 returns the number of fields or methods named s associated
+// with Type t. If exactly one exists, it will be returned in *save
+// (if save is not nil).
+func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) int {
+ u := t
+ if u.IsPtr() {
+ u = u.Elem()
+ }
+
+ c := 0
+ if u.IsStruct() || u.IsInterface() {
+ var fields []*types.Field
+ if u.IsStruct() {
+ fields = u.Fields()
+ } else {
+ fields = u.AllMethods()
+ }
+ for _, f := range fields {
+ if f.Sym == s || (ignorecase && f.IsMethod() && strings.EqualFold(f.Sym.Name, s.Name)) {
+ if save != nil {
+ *save = f
+ }
+ c++
+ }
+ }
+ }
+
+ u = t
+ if t.Sym() != nil && t.IsPtr() && !t.Elem().IsPtr() {
+ // If t is a defined pointer type, then x.m is shorthand for (*x).m.
+ u = t.Elem()
+ }
+ u = types.ReceiverBaseType(u)
+ if u != nil {
+ for _, f := range u.Methods() {
+ if f.Embedded == 0 && (f.Sym == s || (ignorecase && strings.EqualFold(f.Sym.Name, s.Name))) {
+ if save != nil {
+ *save = f
+ }
+ c++
+ }
+ }
+ }
+
+ return c
+}
+
+var slist []symlink
+
+// Code to help generate trampoline functions for methods on embedded
+// types. These are approx the same as the corresponding AddImplicitDots
+// routines except that they expect to be called with unique tasks and
+// they return the actual methods.
+
+type symlink struct {
+ field *types.Field
+}
diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go
new file mode 100644
index 0000000..a977b5e
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/syms.go
@@ -0,0 +1,134 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+// LookupRuntime returns a function or variable declared in
+// _builtin/runtime.go. If types_ is non-empty, successive occurrences
+// of the "any" placeholder type will be substituted.
+func LookupRuntime(name string, types_ ...*types.Type) *ir.Name {
+ s := ir.Pkgs.Runtime.Lookup(name)
+ if s == nil || s.Def == nil {
+ base.Fatalf("LookupRuntime: can't find runtime.%s", name)
+ }
+ n := s.Def.(*ir.Name)
+ if len(types_) != 0 {
+ n = substArgTypes(n, types_...)
+ }
+ return n
+}
+
+// SubstArgTypes substitutes the given list of types for
+// successive occurrences of the "any" placeholder in the
+// type syntax expression n.Type.
+func substArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name {
+ for _, t := range types_ {
+ types.CalcSize(t)
+ }
+ n := ir.NewNameAt(old.Pos(), old.Sym(), types.SubstAny(old.Type(), &types_))
+ n.Class = old.Class
+ n.Func = old.Func
+ if len(types_) > 0 {
+ base.Fatalf("SubstArgTypes: too many argument types")
+ }
+ return n
+}
+
+// AutoLabel generates a new Name node for use with
+// an automatically generated label.
+// prefix is a short mnemonic (e.g. ".s" for switch)
+// to help with debugging.
+// It should begin with "." to avoid conflicts with
+// user labels.
+func AutoLabel(prefix string) *types.Sym {
+ if prefix[0] != '.' {
+ base.Fatalf("autolabel prefix must start with '.', have %q", prefix)
+ }
+ fn := ir.CurFunc
+ if ir.CurFunc == nil {
+ base.Fatalf("autolabel outside function")
+ }
+ n := fn.Label
+ fn.Label++
+ return LookupNum(prefix, int(n))
+}
+
+func Lookup(name string) *types.Sym {
+ return types.LocalPkg.Lookup(name)
+}
+
+// InitRuntime loads the definitions for the low-level runtime functions,
+// so that the compiler can generate calls to them,
+// but does not make them visible to user code.
+func InitRuntime() {
+ base.Timer.Start("fe", "loadsys")
+
+ typs := runtimeTypes()
+ for _, d := range &runtimeDecls {
+ sym := ir.Pkgs.Runtime.Lookup(d.name)
+ typ := typs[d.typ]
+ switch d.tag {
+ case funcTag:
+ importfunc(sym, typ)
+ case varTag:
+ importvar(sym, typ)
+ default:
+ base.Fatalf("unhandled declaration tag %v", d.tag)
+ }
+ }
+}
+
+// LookupRuntimeFunc looks up Go function name in package runtime. This function
+// must follow the internal calling convention.
+func LookupRuntimeFunc(name string) *obj.LSym {
+ return LookupRuntimeABI(name, obj.ABIInternal)
+}
+
+// LookupRuntimeVar looks up a variable (or assembly function) name in package
+// runtime. If this is a function, it may have a special calling
+// convention.
+func LookupRuntimeVar(name string) *obj.LSym {
+ return LookupRuntimeABI(name, obj.ABI0)
+}
+
+// LookupRuntimeABI looks up a name in package runtime using the given ABI.
+func LookupRuntimeABI(name string, abi obj.ABI) *obj.LSym {
+ return base.PkgLinksym("runtime", name, abi)
+}
+
+// InitCoverage loads the definitions for routines called
+// by code coverage instrumentation (similar to InitRuntime above).
+func InitCoverage() {
+ typs := coverageTypes()
+ for _, d := range &coverageDecls {
+ sym := ir.Pkgs.Coverage.Lookup(d.name)
+ typ := typs[d.typ]
+ switch d.tag {
+ case funcTag:
+ importfunc(sym, typ)
+ case varTag:
+ importvar(sym, typ)
+ default:
+ base.Fatalf("unhandled declaration tag %v", d.tag)
+ }
+ }
+}
+
+// LookupCoverage looks up the Go function 'name' in package
+// runtime/coverage. This function must follow the internal calling
+// convention.
+func LookupCoverage(name string) *ir.Name {
+ sym := ir.Pkgs.Coverage.Lookup(name)
+ if sym == nil {
+ base.Fatalf("LookupCoverage: can't find runtime/coverage.%s", name)
+ }
+ return sym.Def.(*ir.Name)
+}
diff --git a/src/cmd/compile/internal/typecheck/target.go b/src/cmd/compile/internal/typecheck/target.go
new file mode 100644
index 0000000..018614d
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/target.go
@@ -0,0 +1,12 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run mkbuiltin.go
+
+package typecheck
+
+import "cmd/compile/internal/ir"
+
+// Target is the package being compiled.
+var Target *ir.Package
diff --git a/src/cmd/compile/internal/typecheck/type.go b/src/cmd/compile/internal/typecheck/type.go
new file mode 100644
index 0000000..37c3943
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/type.go
@@ -0,0 +1,5 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go
new file mode 100644
index 0000000..b22e453
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/typecheck.go
@@ -0,0 +1,1317 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "fmt"
+ "go/constant"
+ "go/token"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+func AssignExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr|ctxAssign) }
+func Expr(n ir.Node) ir.Node { return typecheck(n, ctxExpr) }
+func Stmt(n ir.Node) ir.Node { return typecheck(n, ctxStmt) }
+
+func Exprs(exprs []ir.Node) { typecheckslice(exprs, ctxExpr) }
+func Stmts(stmts []ir.Node) { typecheckslice(stmts, ctxStmt) }
+
+func Call(pos src.XPos, callee ir.Node, args []ir.Node, dots bool) ir.Node {
+ call := ir.NewCallExpr(pos, ir.OCALL, callee, args)
+ call.IsDDD = dots
+ return typecheck(call, ctxStmt|ctxExpr)
+}
+
+func Callee(n ir.Node) ir.Node {
+ return typecheck(n, ctxExpr|ctxCallee)
+}
+
+var traceIndent []byte
+
+func tracePrint(title string, n ir.Node) func(np *ir.Node) {
+ indent := traceIndent
+
+ // guard against nil
+ var pos, op string
+ var tc uint8
+ if n != nil {
+ pos = base.FmtPos(n.Pos())
+ op = n.Op().String()
+ tc = n.Typecheck()
+ }
+
+ types.SkipSizeForTracing = true
+ defer func() { types.SkipSizeForTracing = false }()
+ fmt.Printf("%s: %s%s %p %s %v tc=%d\n", pos, indent, title, n, op, n, tc)
+ traceIndent = append(traceIndent, ". "...)
+
+ return func(np *ir.Node) {
+ traceIndent = traceIndent[:len(traceIndent)-2]
+
+ // if we have a result, use that
+ if np != nil {
+ n = *np
+ }
+
+ // guard against nil
+ // use outer pos, op so we don't get empty pos/op if n == nil (nicer output)
+ var tc uint8
+ var typ *types.Type
+ if n != nil {
+ pos = base.FmtPos(n.Pos())
+ op = n.Op().String()
+ tc = n.Typecheck()
+ typ = n.Type()
+ }
+
+ types.SkipSizeForTracing = true
+ defer func() { types.SkipSizeForTracing = false }()
+ fmt.Printf("%s: %s=> %p %s %v tc=%d type=%L\n", pos, indent, n, op, n, tc, typ)
+ }
+}
+
+const (
+ ctxStmt = 1 << iota // evaluated at statement level
+ ctxExpr // evaluated in value context
+ ctxType // evaluated in type context
+ ctxCallee // call-only expressions are ok
+ ctxMultiOK // multivalue function returns are ok
+ ctxAssign // assigning to expression
+)
+
+// type checks the whole tree of an expression.
+// calculates expression types.
+// evaluates compile time constants.
+// marks variables that escape the local frame.
+// rewrites n.Op to be more specific in some cases.
+
+func typecheckslice(l []ir.Node, top int) {
+ for i := range l {
+ l[i] = typecheck(l[i], top)
+ }
+}
+
+var _typekind = []string{
+ types.TINT: "int",
+ types.TUINT: "uint",
+ types.TINT8: "int8",
+ types.TUINT8: "uint8",
+ types.TINT16: "int16",
+ types.TUINT16: "uint16",
+ types.TINT32: "int32",
+ types.TUINT32: "uint32",
+ types.TINT64: "int64",
+ types.TUINT64: "uint64",
+ types.TUINTPTR: "uintptr",
+ types.TCOMPLEX64: "complex64",
+ types.TCOMPLEX128: "complex128",
+ types.TFLOAT32: "float32",
+ types.TFLOAT64: "float64",
+ types.TBOOL: "bool",
+ types.TSTRING: "string",
+ types.TPTR: "pointer",
+ types.TUNSAFEPTR: "unsafe.Pointer",
+ types.TSTRUCT: "struct",
+ types.TINTER: "interface",
+ types.TCHAN: "chan",
+ types.TMAP: "map",
+ types.TARRAY: "array",
+ types.TSLICE: "slice",
+ types.TFUNC: "func",
+ types.TNIL: "nil",
+ types.TIDEAL: "untyped number",
+}
+
+func typekind(t *types.Type) string {
+ if t.IsUntyped() {
+ return fmt.Sprintf("%v", t)
+ }
+ et := t.Kind()
+ if int(et) < len(_typekind) {
+ s := _typekind[et]
+ if s != "" {
+ return s
+ }
+ }
+ return fmt.Sprintf("etype=%d", et)
+}
+
+// typecheck type checks node n.
+// The result of typecheck MUST be assigned back to n, e.g.
+//
+// n.Left = typecheck(n.Left, top)
+func typecheck(n ir.Node, top int) (res ir.Node) {
+ if n == nil {
+ return nil
+ }
+
+ // only trace if there's work to do
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("typecheck", n)(&res)
+ }
+
+ lno := ir.SetPos(n)
+ defer func() { base.Pos = lno }()
+
+ // Skip over parens.
+ for n.Op() == ir.OPAREN {
+ n = n.(*ir.ParenExpr).X
+ }
+
+ // Skip typecheck if already done.
+ // But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
+ if n.Typecheck() == 1 || n.Typecheck() == 3 {
+ switch n.Op() {
+ case ir.ONAME:
+ break
+
+ default:
+ return n
+ }
+ }
+
+ if n.Typecheck() == 2 {
+ base.FatalfAt(n.Pos(), "typechecking loop")
+ }
+
+ n.SetTypecheck(2)
+ n = typecheck1(n, top)
+ n.SetTypecheck(1)
+
+ t := n.Type()
+ if t != nil && !t.IsFuncArgStruct() && n.Op() != ir.OTYPE {
+ switch t.Kind() {
+ case types.TFUNC, // might have TANY; wait until it's called
+ types.TANY, types.TFORW, types.TIDEAL, types.TNIL, types.TBLANK:
+ break
+
+ default:
+ types.CheckSize(t)
+ }
+ }
+
+ return n
+}
+
+// indexlit implements typechecking of untyped values as
+// array/slice indexes. It is almost equivalent to DefaultLit
+// but also accepts untyped numeric values representable as
+// value of type int (see also checkmake for comparison).
+// The result of indexlit MUST be assigned back to n, e.g.
+//
+// n.Left = indexlit(n.Left)
+func indexlit(n ir.Node) ir.Node {
+ if n != nil && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
+ return DefaultLit(n, types.Types[types.TINT])
+ }
+ return n
+}
+
+// typecheck1 should ONLY be called from typecheck.
+func typecheck1(n ir.Node, top int) ir.Node {
+ switch n.Op() {
+ default:
+ ir.Dump("typecheck", n)
+ base.Fatalf("typecheck %v", n.Op())
+ panic("unreachable")
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.BuiltinOp != 0 {
+ if top&ctxCallee == 0 {
+ base.Errorf("use of builtin %v not in function call", n.Sym())
+ n.SetType(nil)
+ return n
+ }
+ return n
+ }
+ if top&ctxAssign == 0 {
+ // not a write to the variable
+ if ir.IsBlank(n) {
+ base.Errorf("cannot use _ as value")
+ n.SetType(nil)
+ return n
+ }
+ n.SetUsed(true)
+ }
+ return n
+
+ // type or expr
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ return tcStar(n, top)
+
+ // x op= y
+ case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
+ n.X, n.Y = Expr(n.X), Expr(n.Y)
+ checkassign(n.X)
+ if n.IncDec && !okforarith[n.X.Type().Kind()] {
+ base.Errorf("invalid operation: %v (non-numeric type %v)", n, n.X.Type())
+ return n
+ }
+ switch n.AsOp {
+ case ir.OLSH, ir.ORSH:
+ n.X, n.Y, _ = tcShift(n, n.X, n.Y)
+ case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD, ir.OMUL, ir.OOR, ir.OSUB, ir.OXOR:
+ n.X, n.Y, _ = tcArith(n, n.AsOp, n.X, n.Y)
+ default:
+ base.Fatalf("invalid assign op: %v", n.AsOp)
+ }
+ return n
+
+ // logical operators
+ case ir.OANDAND, ir.OOROR:
+ n := n.(*ir.LogicalExpr)
+ n.X, n.Y = Expr(n.X), Expr(n.Y)
+ if n.X.Type() == nil || n.Y.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ // For "x == x && len(s)", it's better to report that "len(s)" (type int)
+ // can't be used with "&&" than to report that "x == x" (type untyped bool)
+ // can't be converted to int (see issue #41500).
+ if !n.X.Type().IsBoolean() {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.X.Type()))
+ n.SetType(nil)
+ return n
+ }
+ if !n.Y.Type().IsBoolean() {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Y.Type()))
+ n.SetType(nil)
+ return n
+ }
+ l, r, t := tcArith(n, n.Op(), n.X, n.Y)
+ n.X, n.Y = l, r
+ n.SetType(t)
+ return n
+
+ // shift operators
+ case ir.OLSH, ir.ORSH:
+ n := n.(*ir.BinaryExpr)
+ n.X, n.Y = Expr(n.X), Expr(n.Y)
+ l, r, t := tcShift(n, n.X, n.Y)
+ n.X, n.Y = l, r
+ n.SetType(t)
+ return n
+
+ // comparison operators
+ case ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT, ir.ONE:
+ n := n.(*ir.BinaryExpr)
+ n.X, n.Y = Expr(n.X), Expr(n.Y)
+ l, r, t := tcArith(n, n.Op(), n.X, n.Y)
+ if t != nil {
+ n.X, n.Y = l, r
+ n.SetType(types.UntypedBool)
+ n.X, n.Y = defaultlit2(l, r, true)
+ }
+ return n
+
+ // binary operators
+ case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD, ir.OMUL, ir.OOR, ir.OSUB, ir.OXOR:
+ n := n.(*ir.BinaryExpr)
+ n.X, n.Y = Expr(n.X), Expr(n.Y)
+ l, r, t := tcArith(n, n.Op(), n.X, n.Y)
+ if t != nil && t.Kind() == types.TSTRING && n.Op() == ir.OADD {
+ // create or update OADDSTR node with list of strings in x + y + z + (w + v) + ...
+ var add *ir.AddStringExpr
+ if l.Op() == ir.OADDSTR {
+ add = l.(*ir.AddStringExpr)
+ add.SetPos(n.Pos())
+ } else {
+ add = ir.NewAddStringExpr(n.Pos(), []ir.Node{l})
+ }
+ if r.Op() == ir.OADDSTR {
+ r := r.(*ir.AddStringExpr)
+ add.List.Append(r.List.Take()...)
+ } else {
+ add.List.Append(r)
+ }
+ add.SetType(t)
+ return add
+ }
+ n.X, n.Y = l, r
+ n.SetType(t)
+ return n
+
+ case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS:
+ n := n.(*ir.UnaryExpr)
+ return tcUnaryArith(n)
+
+ // exprs
+ case ir.OCOMPLIT:
+ return tcCompLit(n.(*ir.CompLitExpr))
+
+ case ir.OXDOT, ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ return tcDot(n, top)
+
+ case ir.ODOTTYPE:
+ n := n.(*ir.TypeAssertExpr)
+ return tcDotType(n)
+
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ return tcIndex(n)
+
+ case ir.ORECV:
+ n := n.(*ir.UnaryExpr)
+ return tcRecv(n)
+
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ return tcSend(n)
+
+ case ir.OSLICEHEADER:
+ n := n.(*ir.SliceHeaderExpr)
+ return tcSliceHeader(n)
+
+ case ir.OSTRINGHEADER:
+ n := n.(*ir.StringHeaderExpr)
+ return tcStringHeader(n)
+
+ case ir.OMAKESLICECOPY:
+ n := n.(*ir.MakeExpr)
+ return tcMakeSliceCopy(n)
+
+ case ir.OSLICE, ir.OSLICE3:
+ n := n.(*ir.SliceExpr)
+ return tcSlice(n)
+
+ // call and call like
+ case ir.OCALL:
+ n := n.(*ir.CallExpr)
+ return tcCall(n, top)
+
+ case ir.OCAP, ir.OLEN:
+ n := n.(*ir.UnaryExpr)
+ return tcLenCap(n)
+
+ case ir.OMIN, ir.OMAX:
+ n := n.(*ir.CallExpr)
+ return tcMinMax(n)
+
+ case ir.OREAL, ir.OIMAG:
+ n := n.(*ir.UnaryExpr)
+ return tcRealImag(n)
+
+ case ir.OCOMPLEX:
+ n := n.(*ir.BinaryExpr)
+ return tcComplex(n)
+
+ case ir.OCLEAR:
+ n := n.(*ir.UnaryExpr)
+ return tcClear(n)
+
+ case ir.OCLOSE:
+ n := n.(*ir.UnaryExpr)
+ return tcClose(n)
+
+ case ir.ODELETE:
+ n := n.(*ir.CallExpr)
+ return tcDelete(n)
+
+ case ir.OAPPEND:
+ n := n.(*ir.CallExpr)
+ return tcAppend(n)
+
+ case ir.OCOPY:
+ n := n.(*ir.BinaryExpr)
+ return tcCopy(n)
+
+ case ir.OCONV:
+ n := n.(*ir.ConvExpr)
+ return tcConv(n)
+
+ case ir.OMAKE:
+ n := n.(*ir.CallExpr)
+ return tcMake(n)
+
+ case ir.ONEW:
+ n := n.(*ir.UnaryExpr)
+ return tcNew(n)
+
+ case ir.OPRINT, ir.OPRINTLN:
+ n := n.(*ir.CallExpr)
+ return tcPrint(n)
+
+ case ir.OPANIC:
+ n := n.(*ir.UnaryExpr)
+ return tcPanic(n)
+
+ case ir.ORECOVER:
+ n := n.(*ir.CallExpr)
+ return tcRecover(n)
+
+ case ir.OUNSAFEADD:
+ n := n.(*ir.BinaryExpr)
+ return tcUnsafeAdd(n)
+
+ case ir.OUNSAFESLICE:
+ n := n.(*ir.BinaryExpr)
+ return tcUnsafeSlice(n)
+
+ case ir.OUNSAFESLICEDATA:
+ n := n.(*ir.UnaryExpr)
+ return tcUnsafeData(n)
+
+ case ir.OUNSAFESTRING:
+ n := n.(*ir.BinaryExpr)
+ return tcUnsafeString(n)
+
+ case ir.OUNSAFESTRINGDATA:
+ n := n.(*ir.UnaryExpr)
+ return tcUnsafeData(n)
+
+ case ir.OITAB:
+ n := n.(*ir.UnaryExpr)
+ return tcITab(n)
+
+ case ir.OIDATA:
+ // Whoever creates the OIDATA node must know a priori the concrete type at that moment,
+ // usually by just having checked the OITAB.
+ n := n.(*ir.UnaryExpr)
+ base.Fatalf("cannot typecheck interface data %v", n)
+ panic("unreachable")
+
+ case ir.OSPTR:
+ n := n.(*ir.UnaryExpr)
+ return tcSPtr(n)
+
+ case ir.OCFUNC:
+ n := n.(*ir.UnaryExpr)
+ n.X = Expr(n.X)
+ n.SetType(types.Types[types.TUINTPTR])
+ return n
+
+ case ir.OGETCALLERPC, ir.OGETCALLERSP:
+ n := n.(*ir.CallExpr)
+ if len(n.Args) != 0 {
+ base.FatalfAt(n.Pos(), "unexpected arguments: %v", n)
+ }
+ n.SetType(types.Types[types.TUINTPTR])
+ return n
+
+ case ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ n.X = Expr(n.X)
+ return n
+
+ // statements
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ tcAssign(n)
+
+ // Code that creates temps does not bother to set defn, so do it here.
+ if n.X.Op() == ir.ONAME && ir.IsAutoTmp(n.X) {
+ n.X.Name().Defn = n
+ }
+ return n
+
+ case ir.OAS2:
+ tcAssignList(n.(*ir.AssignListStmt))
+ return n
+
+ case ir.OBREAK,
+ ir.OCONTINUE,
+ ir.ODCL,
+ ir.OGOTO,
+ ir.OFALL:
+ return n
+
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ Stmts(n.List)
+ return n
+
+ case ir.OLABEL:
+ if n.Sym().IsBlank() {
+ // Empty identifier is valid but useless.
+ // Eliminate now to simplify life later.
+ // See issues 7538, 11589, 11593.
+ n = ir.NewBlockStmt(n.Pos(), nil)
+ }
+ return n
+
+ case ir.ODEFER, ir.OGO:
+ n := n.(*ir.GoDeferStmt)
+ n.Call = typecheck(n.Call, ctxStmt|ctxExpr)
+ tcGoDefer(n)
+ return n
+
+ case ir.OFOR:
+ n := n.(*ir.ForStmt)
+ return tcFor(n)
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ return tcIf(n)
+
+ case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
+ return tcReturn(n)
+
+ case ir.OTAILCALL:
+ n := n.(*ir.TailCallStmt)
+ n.Call = typecheck(n.Call, ctxStmt|ctxExpr).(*ir.CallExpr)
+ return n
+
+ case ir.OCHECKNIL:
+ n := n.(*ir.UnaryExpr)
+ return tcCheckNil(n)
+
+ case ir.OSELECT:
+ tcSelect(n.(*ir.SelectStmt))
+ return n
+
+ case ir.OSWITCH:
+ tcSwitch(n.(*ir.SwitchStmt))
+ return n
+
+ case ir.ORANGE:
+ tcRange(n.(*ir.RangeStmt))
+ return n
+
+ case ir.OTYPESW:
+ n := n.(*ir.TypeSwitchGuard)
+ base.Fatalf("use of .(type) outside type switch")
+ return n
+
+ case ir.ODCLFUNC:
+ tcFunc(n.(*ir.Func))
+ return n
+ }
+
+ // No return n here!
+ // Individual cases can type-assert n, introducing a new one.
+ // Each must execute its own return n.
+}
+
+func typecheckargs(n ir.InitNode) {
+ var list []ir.Node
+ switch n := n.(type) {
+ default:
+ base.Fatalf("typecheckargs %+v", n.Op())
+ case *ir.CallExpr:
+ list = n.Args
+ if n.IsDDD {
+ Exprs(list)
+ return
+ }
+ case *ir.ReturnStmt:
+ list = n.Results
+ }
+ if len(list) != 1 {
+ Exprs(list)
+ return
+ }
+
+ typecheckslice(list, ctxExpr|ctxMultiOK)
+ t := list[0].Type()
+ if t == nil || !t.IsFuncArgStruct() {
+ return
+ }
+
+ // Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
+ RewriteMultiValueCall(n, list[0])
+}
+
+// RewriteNonNameCall replaces non-Name call expressions with temps,
+// rewriting f()(...) to t0 := f(); t0(...).
+func RewriteNonNameCall(n *ir.CallExpr) {
+ np := &n.Fun
+ if dot, ok := (*np).(*ir.SelectorExpr); ok && (dot.Op() == ir.ODOTMETH || dot.Op() == ir.ODOTINTER || dot.Op() == ir.OMETHVALUE) {
+ np = &dot.X // peel away method selector
+ }
+
+ // Check for side effects in the callee expression.
+ // We explicitly special case new(T) though, because it doesn't have
+ // observable side effects, and keeping it in place allows better escape analysis.
+ if !ir.Any(*np, func(n ir.Node) bool { return n.Op() != ir.ONEW && callOrChan(n) }) {
+ return
+ }
+
+ tmp := TempAt(base.Pos, ir.CurFunc, (*np).Type())
+ as := ir.NewAssignStmt(base.Pos, tmp, *np)
+ as.PtrInit().Append(Stmt(ir.NewDecl(n.Pos(), ir.ODCL, tmp)))
+ *np = tmp
+
+ n.PtrInit().Append(Stmt(as))
+}
+
+// RewriteMultiValueCall rewrites multi-valued f() to use temporaries,
+// so the backend wouldn't need to worry about tuple-valued expressions.
+func RewriteMultiValueCall(n ir.InitNode, call ir.Node) {
+ as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, []ir.Node{call})
+ results := call.Type().Fields()
+ list := make([]ir.Node, len(results))
+ for i, result := range results {
+ tmp := TempAt(base.Pos, ir.CurFunc, result.Type)
+ as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, tmp))
+ as.Lhs.Append(tmp)
+ list[i] = tmp
+ }
+
+ n.PtrInit().Append(Stmt(as))
+
+ switch n := n.(type) {
+ default:
+ base.Fatalf("rewriteMultiValueCall %+v", n.Op())
+ case *ir.CallExpr:
+ n.Args = list
+ case *ir.ReturnStmt:
+ n.Results = list
+ case *ir.AssignListStmt:
+ if n.Op() != ir.OAS2FUNC {
+ base.Fatalf("rewriteMultiValueCall: invalid op %v", n.Op())
+ }
+ as.SetOp(ir.OAS2FUNC)
+ n.SetOp(ir.OAS2)
+ n.Rhs = make([]ir.Node, len(list))
+ for i, tmp := range list {
+ n.Rhs[i] = AssignConv(tmp, n.Lhs[i].Type(), "assignment")
+ }
+ }
+}
+
+func checksliceindex(l ir.Node, r ir.Node, tp *types.Type) bool {
+ t := r.Type()
+ if t == nil {
+ return false
+ }
+ if !t.IsInteger() {
+ base.Errorf("invalid slice index %v (type %v)", r, t)
+ return false
+ }
+
+ if r.Op() == ir.OLITERAL {
+ x := r.Val()
+ if constant.Sign(x) < 0 {
+ base.Errorf("invalid slice index %v (index must be non-negative)", r)
+ return false
+ } else if tp != nil && tp.NumElem() >= 0 && constant.Compare(x, token.GTR, constant.MakeInt64(tp.NumElem())) {
+ base.Errorf("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem())
+ return false
+ } else if ir.IsConst(l, constant.String) && constant.Compare(x, token.GTR, constant.MakeInt64(int64(len(ir.StringVal(l))))) {
+ base.Errorf("invalid slice index %v (out of bounds for %d-byte string)", r, len(ir.StringVal(l)))
+ return false
+ } else if ir.ConstOverflow(x, types.Types[types.TINT]) {
+ base.Errorf("invalid slice index %v (index too large)", r)
+ return false
+ }
+ }
+
+ return true
+}
+
+func checksliceconst(lo ir.Node, hi ir.Node) bool {
+ if lo != nil && hi != nil && lo.Op() == ir.OLITERAL && hi.Op() == ir.OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) {
+ base.Errorf("invalid slice index: %v > %v", lo, hi)
+ return false
+ }
+
+ return true
+}
+
+// The result of implicitstar MUST be assigned back to n, e.g.
+//
+// n.Left = implicitstar(n.Left)
+func implicitstar(n ir.Node) ir.Node {
+ // insert implicit * if needed for fixed array
+ t := n.Type()
+ if t == nil || !t.IsPtr() {
+ return n
+ }
+ t = t.Elem()
+ if t == nil {
+ return n
+ }
+ if !t.IsArray() {
+ return n
+ }
+ star := ir.NewStarExpr(base.Pos, n)
+ star.SetImplicit(true)
+ return Expr(star)
+}
+
+func needOneArg(n *ir.CallExpr, f string, args ...interface{}) (ir.Node, bool) {
+ if len(n.Args) == 0 {
+ p := fmt.Sprintf(f, args...)
+ base.Errorf("missing argument to %s: %v", p, n)
+ return nil, false
+ }
+
+ if len(n.Args) > 1 {
+ p := fmt.Sprintf(f, args...)
+ base.Errorf("too many arguments to %s: %v", p, n)
+ return n.Args[0], false
+ }
+
+ return n.Args[0], true
+}
+
+func needTwoArgs(n *ir.CallExpr) (ir.Node, ir.Node, bool) {
+ if len(n.Args) != 2 {
+ if len(n.Args) < 2 {
+ base.Errorf("not enough arguments in call to %v", n)
+ } else {
+ base.Errorf("too many arguments in call to %v", n)
+ }
+ return nil, nil, false
+ }
+ return n.Args[0], n.Args[1], true
+}
+
+// Lookdot1 looks up the specified method s in the list fs of methods, returning
+// the matching field or nil. If dostrcmp is 0, it matches the symbols. If
+// dostrcmp is 1, it matches by name exactly. If dostrcmp is 2, it matches names
+// with case folding.
+func Lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs []*types.Field, dostrcmp int) *types.Field {
+ var r *types.Field
+ for _, f := range fs {
+ if dostrcmp != 0 && f.Sym.Name == s.Name {
+ return f
+ }
+ if dostrcmp == 2 && strings.EqualFold(f.Sym.Name, s.Name) {
+ return f
+ }
+ if f.Sym != s {
+ continue
+ }
+ if r != nil {
+ if errnode != nil {
+ base.Errorf("ambiguous selector %v", errnode)
+ } else if t.IsPtr() {
+ base.Errorf("ambiguous selector (%v).%v", t, s)
+ } else {
+ base.Errorf("ambiguous selector %v.%v", t, s)
+ }
+ break
+ }
+
+ r = f
+ }
+
+ return r
+}
+
+// NewMethodExpr returns an OMETHEXPR node representing method
+// expression "recv.sym".
+func NewMethodExpr(pos src.XPos, recv *types.Type, sym *types.Sym) *ir.SelectorExpr {
+ // Compute the method set for recv.
+ var ms []*types.Field
+ if recv.IsInterface() {
+ ms = recv.AllMethods()
+ } else {
+ mt := types.ReceiverBaseType(recv)
+ if mt == nil {
+ base.FatalfAt(pos, "type %v has no receiver base type", recv)
+ }
+ CalcMethods(mt)
+ ms = mt.AllMethods()
+ }
+
+ m := Lookdot1(nil, sym, recv, ms, 0)
+ if m == nil {
+ base.FatalfAt(pos, "type %v has no method %v", recv, sym)
+ }
+
+ if !types.IsMethodApplicable(recv, m) {
+ base.FatalfAt(pos, "invalid method expression %v.%v (needs pointer receiver)", recv, sym)
+ }
+
+ n := ir.NewSelectorExpr(pos, ir.OMETHEXPR, ir.TypeNode(recv), sym)
+ n.Selection = m
+ n.SetType(NewMethodType(m.Type, recv))
+ n.SetTypecheck(1)
+ return n
+}
+
+func derefall(t *types.Type) *types.Type {
+ for t != nil && t.IsPtr() {
+ t = t.Elem()
+ }
+ return t
+}
+
+// Lookdot looks up field or method n.Sel in the type t and returns the matching
+// field. It transforms the op of node n to ODOTINTER or ODOTMETH, if appropriate.
+// It also may add a StarExpr node to n.X as needed for access to non-pointer
+// methods. If dostrcmp is 0, it matches the field/method with the exact symbol
+// as n.Sel (appropriate for exported fields). If dostrcmp is 1, it matches by name
+// exactly. If dostrcmp is 2, it matches names with case folding.
+func Lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field {
+ s := n.Sel
+
+ types.CalcSize(t)
+ var f1 *types.Field
+ if t.IsStruct() {
+ f1 = Lookdot1(n, s, t, t.Fields(), dostrcmp)
+ } else if t.IsInterface() {
+ f1 = Lookdot1(n, s, t, t.AllMethods(), dostrcmp)
+ }
+
+ var f2 *types.Field
+ if n.X.Type() == t || n.X.Type().Sym() == nil {
+ mt := types.ReceiverBaseType(t)
+ if mt != nil {
+ f2 = Lookdot1(n, s, mt, mt.Methods(), dostrcmp)
+ }
+ }
+
+ if f1 != nil {
+ if dostrcmp > 1 {
+ // Already in the process of diagnosing an error.
+ return f1
+ }
+ if f2 != nil {
+ base.Errorf("%v is both field and method", n.Sel)
+ }
+ if f1.Offset == types.BADWIDTH {
+ base.Fatalf("Lookdot badwidth t=%v, f1=%v@%p", t, f1, f1)
+ }
+ n.Selection = f1
+ n.SetType(f1.Type)
+ if t.IsInterface() {
+ if n.X.Type().IsPtr() {
+ star := ir.NewStarExpr(base.Pos, n.X)
+ star.SetImplicit(true)
+ n.X = Expr(star)
+ }
+
+ n.SetOp(ir.ODOTINTER)
+ }
+ return f1
+ }
+
+ if f2 != nil {
+ if dostrcmp > 1 {
+ // Already in the process of diagnosing an error.
+ return f2
+ }
+ orig := n.X
+ tt := n.X.Type()
+ types.CalcSize(tt)
+ rcvr := f2.Type.Recv().Type
+ if !types.Identical(rcvr, tt) {
+ if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) {
+ checklvalue(n.X, "call pointer method on")
+ addr := NodAddr(n.X)
+ addr.SetImplicit(true)
+ n.X = typecheck(addr, ctxType|ctxExpr)
+ } else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) {
+ star := ir.NewStarExpr(base.Pos, n.X)
+ star.SetImplicit(true)
+ n.X = typecheck(star, ctxType|ctxExpr)
+ } else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) {
+ base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sel, n.X)
+ for tt.IsPtr() {
+ // Stop one level early for method with pointer receiver.
+ if rcvr.IsPtr() && !tt.Elem().IsPtr() {
+ break
+ }
+ star := ir.NewStarExpr(base.Pos, n.X)
+ star.SetImplicit(true)
+ n.X = typecheck(star, ctxType|ctxExpr)
+ tt = tt.Elem()
+ }
+ } else {
+ base.Fatalf("method mismatch: %v for %v", rcvr, tt)
+ }
+ }
+
+ // Check that we haven't implicitly dereferenced any defined pointer types.
+ for x := n.X; ; {
+ var inner ir.Node
+ implicit := false
+ switch x := x.(type) {
+ case *ir.AddrExpr:
+ inner, implicit = x.X, x.Implicit()
+ case *ir.SelectorExpr:
+ inner, implicit = x.X, x.Implicit()
+ case *ir.StarExpr:
+ inner, implicit = x.X, x.Implicit()
+ }
+ if !implicit {
+ break
+ }
+ if inner.Type().Sym() != nil && (x.Op() == ir.ODEREF || x.Op() == ir.ODOTPTR) {
+ // Found an implicit dereference of a defined pointer type.
+ // Restore n.X for better error message.
+ n.X = orig
+ return nil
+ }
+ x = inner
+ }
+
+ n.Selection = f2
+ n.SetType(f2.Type)
+ n.SetOp(ir.ODOTMETH)
+
+ return f2
+ }
+
+ return nil
+}
+
+func nokeys(l ir.Nodes) bool {
+ for _, n := range l {
+ if n.Op() == ir.OKEY || n.Op() == ir.OSTRUCTKEY {
+ return false
+ }
+ }
+ return true
+}
+
+func hasddd(params []*types.Field) bool {
+ // TODO(mdempsky): Simply check the last param.
+ for _, tl := range params {
+ if tl.IsDDD() {
+ return true
+ }
+ }
+
+ return false
+}
+
+// typecheck assignment: type list = expression list
+func typecheckaste(op ir.Op, call ir.Node, isddd bool, params []*types.Field, nl ir.Nodes, desc func() string) {
+ var t *types.Type
+ var i int
+
+ lno := base.Pos
+ defer func() { base.Pos = lno }()
+
+ var n ir.Node
+ if len(nl) == 1 {
+ n = nl[0]
+ }
+
+ n1 := len(params)
+ n2 := len(nl)
+ if !hasddd(params) {
+ if isddd {
+ goto invalidddd
+ }
+ if n2 > n1 {
+ goto toomany
+ }
+ if n2 < n1 {
+ goto notenough
+ }
+ } else {
+ if !isddd {
+ if n2 < n1-1 {
+ goto notenough
+ }
+ } else {
+ if n2 > n1 {
+ goto toomany
+ }
+ if n2 < n1 {
+ goto notenough
+ }
+ }
+ }
+
+ i = 0
+ for _, tl := range params {
+ t = tl.Type
+ if tl.IsDDD() {
+ if isddd {
+ if i >= len(nl) {
+ goto notenough
+ }
+ if len(nl)-i > 1 {
+ goto toomany
+ }
+ n = nl[i]
+ ir.SetPos(n)
+ if n.Type() != nil {
+ nl[i] = assignconvfn(n, t, desc)
+ }
+ return
+ }
+
+ // TODO(mdempsky): Make into ... call with implicit slice.
+ for ; i < len(nl); i++ {
+ n = nl[i]
+ ir.SetPos(n)
+ if n.Type() != nil {
+ nl[i] = assignconvfn(n, t.Elem(), desc)
+ }
+ }
+ return
+ }
+
+ if i >= len(nl) {
+ goto notenough
+ }
+ n = nl[i]
+ ir.SetPos(n)
+ if n.Type() != nil {
+ nl[i] = assignconvfn(n, t, desc)
+ }
+ i++
+ }
+
+ if i < len(nl) {
+ goto toomany
+ }
+
+invalidddd:
+ if isddd {
+ if call != nil {
+ base.Errorf("invalid use of ... in call to %v", call)
+ } else {
+ base.Errorf("invalid use of ... in %v", op)
+ }
+ }
+ return
+
+notenough:
+ if n == nil || n.Type() != nil {
+ base.Fatalf("not enough arguments to %v", op)
+ }
+ return
+
+toomany:
+ base.Fatalf("too many arguments to %v", op)
+}
+
+// type check composite.
+func fielddup(name string, hash map[string]bool) {
+ if hash[name] {
+ base.Errorf("duplicate field name in struct literal: %s", name)
+ return
+ }
+ hash[name] = true
+}
+
+// typecheckarraylit type-checks a sequence of slice/array literal elements.
+func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx string) int64 {
+ // If there are key/value pairs, create a map to keep seen
+ // keys so we can check for duplicate indices.
+ var indices map[int64]bool
+ for _, elt := range elts {
+ if elt.Op() == ir.OKEY {
+ indices = make(map[int64]bool)
+ break
+ }
+ }
+
+ var key, length int64
+ for i, elt := range elts {
+ ir.SetPos(elt)
+ r := elts[i]
+ var kv *ir.KeyExpr
+ if elt.Op() == ir.OKEY {
+ elt := elt.(*ir.KeyExpr)
+ elt.Key = Expr(elt.Key)
+ key = IndexConst(elt.Key)
+ if key < 0 {
+ base.Fatalf("invalid index: %v", elt.Key)
+ }
+ kv = elt
+ r = elt.Value
+ }
+
+ r = Expr(r)
+ r = AssignConv(r, elemType, ctx)
+ if kv != nil {
+ kv.Value = r
+ } else {
+ elts[i] = r
+ }
+
+ if key >= 0 {
+ if indices != nil {
+ if indices[key] {
+ base.Errorf("duplicate index in %s: %d", ctx, key)
+ } else {
+ indices[key] = true
+ }
+ }
+
+ if bound >= 0 && key >= bound {
+ base.Errorf("array index %d out of bounds [0:%d]", key, bound)
+ bound = -1
+ }
+ }
+
+ key++
+ if key > length {
+ length = key
+ }
+ }
+
+ return length
+}
+
+// visible reports whether sym is exported or locally defined.
+func visible(sym *types.Sym) bool {
+ return sym != nil && (types.IsExported(sym.Name) || sym.Pkg == types.LocalPkg)
+}
+
+// nonexported reports whether sym is an unexported field.
+func nonexported(sym *types.Sym) bool {
+ return sym != nil && !types.IsExported(sym.Name)
+}
+
+func checklvalue(n ir.Node, verb string) {
+ if !ir.IsAddressable(n) {
+ base.Errorf("cannot %s %v", verb, n)
+ }
+}
+
+func checkassign(n ir.Node) {
+ // have already complained about n being invalid
+ if n.Type() == nil {
+ if base.Errors() == 0 {
+ base.Fatalf("expected an error about %v", n)
+ }
+ return
+ }
+
+ if ir.IsAddressable(n) {
+ return
+ }
+ if n.Op() == ir.OINDEXMAP {
+ n := n.(*ir.IndexExpr)
+ n.Assigned = true
+ return
+ }
+
+ defer n.SetType(nil)
+
+ switch {
+ case n.Op() == ir.ODOT && n.(*ir.SelectorExpr).X.Op() == ir.OINDEXMAP:
+ base.Errorf("cannot assign to struct field %v in map", n)
+ case (n.Op() == ir.OINDEX && n.(*ir.IndexExpr).X.Type().IsString()) || n.Op() == ir.OSLICESTR:
+ base.Errorf("cannot assign to %v (strings are immutable)", n)
+ case n.Op() == ir.OLITERAL && n.Sym() != nil && ir.IsConstNode(n):
+ base.Errorf("cannot assign to %v (declared const)", n)
+ default:
+ base.Errorf("cannot assign to %v", n)
+ }
+}
+
+func checkassignto(src *types.Type, dst ir.Node) {
+ // TODO(mdempsky): Handle all untyped types correctly.
+ if src == types.UntypedBool && dst.Type().IsBoolean() {
+ return
+ }
+
+ if op, why := assignOp(src, dst.Type()); op == ir.OXXX {
+ base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why)
+ return
+ }
+}
+
+// The result of stringtoruneslit MUST be assigned back to n, e.g.
+//
+// n.Left = stringtoruneslit(n.Left)
+func stringtoruneslit(n *ir.ConvExpr) ir.Node {
+ if n.X.Op() != ir.OLITERAL || n.X.Val().Kind() != constant.String {
+ base.Fatalf("stringtoarraylit %v", n)
+ }
+
+ var l []ir.Node
+ i := 0
+ for _, r := range ir.StringVal(n.X) {
+ l = append(l, ir.NewKeyExpr(base.Pos, ir.NewInt(base.Pos, int64(i)), ir.NewInt(base.Pos, int64(r))))
+ i++
+ }
+
+ return Expr(ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, n.Type(), l))
+}
+
+func checkmake(t *types.Type, arg string, np *ir.Node) bool {
+ n := *np
+ if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
+ base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type())
+ return false
+ }
+
+ // Do range checks for constants before DefaultLit
+ // to avoid redundant "constant NNN overflows int" errors.
+ if n.Op() == ir.OLITERAL {
+ v := toint(n.Val())
+ if constant.Sign(v) < 0 {
+ base.Errorf("negative %s argument in make(%v)", arg, t)
+ return false
+ }
+ if ir.ConstOverflow(v, types.Types[types.TINT]) {
+ base.Errorf("%s argument too large in make(%v)", arg, t)
+ return false
+ }
+ }
+
+ // DefaultLit is necessary for non-constants too: n might be 1.1<<k.
+ // TODO(gri) The length argument requirements for (array/slice) make
+ // are the same as for index expressions. Factor the code better;
+ // for instance, indexlit might be called here and incorporate some
+ // of the bounds checks done for make.
+ n = DefaultLit(n, types.Types[types.TINT])
+ *np = n
+
+ return true
+}
+
+// checkunsafesliceorstring is like checkmake but for unsafe.{Slice,String}.
+func checkunsafesliceorstring(op ir.Op, np *ir.Node) bool {
+ n := *np
+ if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
+ base.Errorf("non-integer len argument in %v - %v", op, n.Type())
+ return false
+ }
+
+ // Do range checks for constants before DefaultLit
+ // to avoid redundant "constant NNN overflows int" errors.
+ if n.Op() == ir.OLITERAL {
+ v := toint(n.Val())
+ if constant.Sign(v) < 0 {
+ base.Errorf("negative len argument in %v", op)
+ return false
+ }
+ if ir.ConstOverflow(v, types.Types[types.TINT]) {
+ base.Errorf("len argument too large in %v", op)
+ return false
+ }
+ }
+
+ // DefaultLit is necessary for non-constants too: n might be 1.1<<k.
+ n = DefaultLit(n, types.Types[types.TINT])
+ *np = n
+
+ return true
+}
+
+func Conv(n ir.Node, t *types.Type) ir.Node {
+ if types.IdenticalStrict(n.Type(), t) {
+ return n
+ }
+ n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+ n.SetType(t)
+ n = Expr(n)
+ return n
+}
+
+// ConvNop converts node n to type t using the OCONVNOP op
+// and typechecks the result with ctxExpr.
+func ConvNop(n ir.Node, t *types.Type) ir.Node {
+ if types.IdenticalStrict(n.Type(), t) {
+ return n
+ }
+ n = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, n)
+ n.SetType(t)
+ n = Expr(n)
+ return n
+}
diff --git a/src/cmd/compile/internal/typecheck/universe.go b/src/cmd/compile/internal/typecheck/universe.go
new file mode 100644
index 0000000..4c4487c
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/universe.go
@@ -0,0 +1,197 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+var (
+ okfor [ir.OEND][]bool
+)
+
+var (
+ okforeq [types.NTYPE]bool
+ okforadd [types.NTYPE]bool
+ okforand [types.NTYPE]bool
+ okfornone [types.NTYPE]bool
+ okforbool [types.NTYPE]bool
+ okforcap [types.NTYPE]bool
+ okforlen [types.NTYPE]bool
+ okforarith [types.NTYPE]bool
+)
+
+var builtinFuncs = [...]struct {
+ name string
+ op ir.Op
+}{
+ {"append", ir.OAPPEND},
+ {"cap", ir.OCAP},
+ {"clear", ir.OCLEAR},
+ {"close", ir.OCLOSE},
+ {"complex", ir.OCOMPLEX},
+ {"copy", ir.OCOPY},
+ {"delete", ir.ODELETE},
+ {"imag", ir.OIMAG},
+ {"len", ir.OLEN},
+ {"make", ir.OMAKE},
+ {"max", ir.OMAX},
+ {"min", ir.OMIN},
+ {"new", ir.ONEW},
+ {"panic", ir.OPANIC},
+ {"print", ir.OPRINT},
+ {"println", ir.OPRINTLN},
+ {"real", ir.OREAL},
+ {"recover", ir.ORECOVER},
+}
+
+var unsafeFuncs = [...]struct {
+ name string
+ op ir.Op
+}{
+ {"Add", ir.OUNSAFEADD},
+ {"Slice", ir.OUNSAFESLICE},
+ {"SliceData", ir.OUNSAFESLICEDATA},
+ {"String", ir.OUNSAFESTRING},
+ {"StringData", ir.OUNSAFESTRINGDATA},
+}
+
+// InitUniverse initializes the universe block.
+func InitUniverse() {
+ types.InitTypes(func(sym *types.Sym, typ *types.Type) types.Object {
+ n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, sym)
+ n.SetType(typ)
+ n.SetTypecheck(1)
+ sym.Def = n
+ return n
+ })
+
+ for _, s := range &builtinFuncs {
+ ir.NewBuiltin(types.BuiltinPkg.Lookup(s.name), s.op)
+ }
+
+ for _, s := range &unsafeFuncs {
+ ir.NewBuiltin(types.UnsafePkg.Lookup(s.name), s.op)
+ }
+
+ s := types.BuiltinPkg.Lookup("true")
+ s.Def = ir.NewConstAt(src.NoXPos, s, types.UntypedBool, constant.MakeBool(true))
+
+ s = types.BuiltinPkg.Lookup("false")
+ s.Def = ir.NewConstAt(src.NoXPos, s, types.UntypedBool, constant.MakeBool(false))
+
+ s = Lookup("_")
+ types.BlankSym = s
+ ir.BlankNode = ir.NewNameAt(src.NoXPos, s, types.Types[types.TBLANK])
+ s.Def = ir.BlankNode
+
+ s = types.BuiltinPkg.Lookup("_")
+ s.Def = ir.NewNameAt(src.NoXPos, s, types.Types[types.TBLANK])
+
+ s = types.BuiltinPkg.Lookup("nil")
+ s.Def = NodNil()
+
+ // initialize okfor
+ for et := types.Kind(0); et < types.NTYPE; et++ {
+ if types.IsInt[et] || et == types.TIDEAL {
+ okforeq[et] = true
+ types.IsOrdered[et] = true
+ okforarith[et] = true
+ okforadd[et] = true
+ okforand[et] = true
+ ir.OKForConst[et] = true
+ types.IsSimple[et] = true
+ }
+
+ if types.IsFloat[et] {
+ okforeq[et] = true
+ types.IsOrdered[et] = true
+ okforadd[et] = true
+ okforarith[et] = true
+ ir.OKForConst[et] = true
+ types.IsSimple[et] = true
+ }
+
+ if types.IsComplex[et] {
+ okforeq[et] = true
+ okforadd[et] = true
+ okforarith[et] = true
+ ir.OKForConst[et] = true
+ types.IsSimple[et] = true
+ }
+ }
+
+ types.IsSimple[types.TBOOL] = true
+
+ okforadd[types.TSTRING] = true
+
+ okforbool[types.TBOOL] = true
+
+ okforcap[types.TARRAY] = true
+ okforcap[types.TCHAN] = true
+ okforcap[types.TSLICE] = true
+
+ ir.OKForConst[types.TBOOL] = true
+ ir.OKForConst[types.TSTRING] = true
+
+ okforlen[types.TARRAY] = true
+ okforlen[types.TCHAN] = true
+ okforlen[types.TMAP] = true
+ okforlen[types.TSLICE] = true
+ okforlen[types.TSTRING] = true
+
+ okforeq[types.TPTR] = true
+ okforeq[types.TUNSAFEPTR] = true
+ okforeq[types.TINTER] = true
+ okforeq[types.TCHAN] = true
+ okforeq[types.TSTRING] = true
+ okforeq[types.TBOOL] = true
+ okforeq[types.TMAP] = true // nil only; refined in typecheck
+ okforeq[types.TFUNC] = true // nil only; refined in typecheck
+ okforeq[types.TSLICE] = true // nil only; refined in typecheck
+ okforeq[types.TARRAY] = true // only if element type is comparable; refined in typecheck
+ okforeq[types.TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck
+
+ types.IsOrdered[types.TSTRING] = true
+
+ for i := range okfor {
+ okfor[i] = okfornone[:]
+ }
+
+ // binary
+ okfor[ir.OADD] = okforadd[:]
+ okfor[ir.OAND] = okforand[:]
+ okfor[ir.OANDAND] = okforbool[:]
+ okfor[ir.OANDNOT] = okforand[:]
+ okfor[ir.ODIV] = okforarith[:]
+ okfor[ir.OEQ] = okforeq[:]
+ okfor[ir.OGE] = types.IsOrdered[:]
+ okfor[ir.OGT] = types.IsOrdered[:]
+ okfor[ir.OLE] = types.IsOrdered[:]
+ okfor[ir.OLT] = types.IsOrdered[:]
+ okfor[ir.OMOD] = okforand[:]
+ okfor[ir.OMUL] = okforarith[:]
+ okfor[ir.ONE] = okforeq[:]
+ okfor[ir.OOR] = okforand[:]
+ okfor[ir.OOROR] = okforbool[:]
+ okfor[ir.OSUB] = okforarith[:]
+ okfor[ir.OXOR] = okforand[:]
+ okfor[ir.OLSH] = okforand[:]
+ okfor[ir.ORSH] = okforand[:]
+
+ // unary
+ okfor[ir.OBITNOT] = okforand[:]
+ okfor[ir.ONEG] = okforarith[:]
+ okfor[ir.ONOT] = okforbool[:]
+ okfor[ir.OPLUS] = okforarith[:]
+
+ // special
+ okfor[ir.OCAP] = okforcap[:]
+ okfor[ir.OLEN] = okforlen[:]
+}
diff --git a/src/cmd/compile/internal/types/alg.go b/src/cmd/compile/internal/types/alg.go
new file mode 100644
index 0000000..d3b4462
--- /dev/null
+++ b/src/cmd/compile/internal/types/alg.go
@@ -0,0 +1,169 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import "cmd/compile/internal/base"
+
+// AlgKind describes the kind of algorithms used for comparing and
+// hashing a Type.
+type AlgKind int
+
+//go:generate stringer -type AlgKind -trimprefix A alg.go
+
+const (
+ ANOEQ AlgKind = iota
+ AMEM0
+ AMEM8
+ AMEM16
+ AMEM32
+ AMEM64
+ AMEM128
+ ASTRING
+ AINTER
+ ANILINTER
+ AFLOAT32
+ AFLOAT64
+ ACPLX64
+ ACPLX128
+
+ // Type can be compared/hashed as regular memory.
+ AMEM AlgKind = 100
+
+ // Type needs special comparison/hashing functions.
+ ASPECIAL AlgKind = -1
+)
+
+// AlgType returns the AlgKind used for comparing and hashing Type t.
+// If it returns ANOEQ, it also returns the component type of t that
+// makes it incomparable.
+func AlgType(t *Type) (AlgKind, *Type) {
+ if t.Noalg() {
+ return ANOEQ, t
+ }
+
+ switch t.Kind() {
+ case TANY, TFORW:
+ // will be defined later.
+ return ANOEQ, t
+
+ case TINT8, TUINT8, TINT16, TUINT16,
+ TINT32, TUINT32, TINT64, TUINT64,
+ TINT, TUINT, TUINTPTR,
+ TBOOL, TPTR,
+ TCHAN, TUNSAFEPTR:
+ return AMEM, nil
+
+ case TFUNC, TMAP:
+ return ANOEQ, t
+
+ case TFLOAT32:
+ return AFLOAT32, nil
+
+ case TFLOAT64:
+ return AFLOAT64, nil
+
+ case TCOMPLEX64:
+ return ACPLX64, nil
+
+ case TCOMPLEX128:
+ return ACPLX128, nil
+
+ case TSTRING:
+ return ASTRING, nil
+
+ case TINTER:
+ if t.IsEmptyInterface() {
+ return ANILINTER, nil
+ }
+ return AINTER, nil
+
+ case TSLICE:
+ return ANOEQ, t
+
+ case TARRAY:
+ a, bad := AlgType(t.Elem())
+ switch a {
+ case AMEM:
+ return AMEM, nil
+ case ANOEQ:
+ return ANOEQ, bad
+ }
+
+ switch t.NumElem() {
+ case 0:
+ // We checked above that the element type is comparable.
+ return AMEM, nil
+ case 1:
+ // Single-element array is same as its lone element.
+ return a, nil
+ }
+
+ return ASPECIAL, nil
+
+ case TSTRUCT:
+ fields := t.Fields()
+
+ // One-field struct is same as that one field alone.
+ if len(fields) == 1 && !fields[0].Sym.IsBlank() {
+ return AlgType(fields[0].Type)
+ }
+
+ ret := AMEM
+ for i, f := range fields {
+ // All fields must be comparable.
+ a, bad := AlgType(f.Type)
+ if a == ANOEQ {
+ return ANOEQ, bad
+ }
+
+ // Blank fields, padded fields, fields with non-memory
+ // equality need special compare.
+ if a != AMEM || f.Sym.IsBlank() || IsPaddedField(t, i) {
+ ret = ASPECIAL
+ }
+ }
+
+ return ret, nil
+ }
+
+ base.Fatalf("AlgType: unexpected type %v", t)
+ return 0, nil
+}
+
+// TypeHasNoAlg reports whether t does not have any associated hash/eq
+// algorithms because t, or some component of t, is marked Noalg.
+func TypeHasNoAlg(t *Type) bool {
+ a, bad := AlgType(t)
+ return a == ANOEQ && bad.Noalg()
+}
+
+// IsComparable reports whether t is a comparable type.
+func IsComparable(t *Type) bool {
+ a, _ := AlgType(t)
+ return a != ANOEQ
+}
+
+// IncomparableField returns an incomparable Field of struct Type t, if any.
+func IncomparableField(t *Type) *Field {
+ for _, f := range t.Fields() {
+ if !IsComparable(f.Type) {
+ return f
+ }
+ }
+ return nil
+}
+
+// IsPaddedField reports whether the i'th field of struct type t is followed
+// by padding.
+func IsPaddedField(t *Type, i int) bool {
+ if !t.IsStruct() {
+ base.Fatalf("IsPaddedField called non-struct %v", t)
+ }
+ end := t.width
+ if i+1 < t.NumFields() {
+ end = t.Field(i + 1).Offset
+ }
+ return t.Field(i).End() != end
+}
diff --git a/src/cmd/compile/internal/types/algkind_string.go b/src/cmd/compile/internal/types/algkind_string.go
new file mode 100644
index 0000000..a1b518e
--- /dev/null
+++ b/src/cmd/compile/internal/types/algkind_string.go
@@ -0,0 +1,48 @@
+// Code generated by "stringer -type AlgKind -trimprefix A alg.go"; DO NOT EDIT.
+
+package types
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[ANOEQ-0]
+ _ = x[AMEM0-1]
+ _ = x[AMEM8-2]
+ _ = x[AMEM16-3]
+ _ = x[AMEM32-4]
+ _ = x[AMEM64-5]
+ _ = x[AMEM128-6]
+ _ = x[ASTRING-7]
+ _ = x[AINTER-8]
+ _ = x[ANILINTER-9]
+ _ = x[AFLOAT32-10]
+ _ = x[AFLOAT64-11]
+ _ = x[ACPLX64-12]
+ _ = x[ACPLX128-13]
+ _ = x[AMEM-100]
+ _ = x[ASPECIAL - -1]
+}
+
+const (
+ _AlgKind_name_0 = "SPECIALNOEQMEM0MEM8MEM16MEM32MEM64MEM128STRINGINTERNILINTERFLOAT32FLOAT64CPLX64CPLX128"
+ _AlgKind_name_1 = "MEM"
+)
+
+var (
+ _AlgKind_index_0 = [...]uint8{0, 7, 11, 15, 19, 24, 29, 34, 40, 46, 51, 59, 66, 73, 79, 86}
+)
+
+func (i AlgKind) String() string {
+ switch {
+ case -1 <= i && i <= 13:
+ i -= -1
+ return _AlgKind_name_0[_AlgKind_index_0[i]:_AlgKind_index_0[i+1]]
+ case i == 100:
+ return _AlgKind_name_1
+ default:
+ return "AlgKind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go
new file mode 100644
index 0000000..c9b9853
--- /dev/null
+++ b/src/cmd/compile/internal/types/fmt.go
@@ -0,0 +1,650 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+ "sync"
+
+ "cmd/compile/internal/base"
+ "cmd/internal/notsha256"
+)
+
+// BuiltinPkg is a fake package that declares the universe block.
+var BuiltinPkg *Pkg
+
+// LocalPkg is the package being compiled.
+var LocalPkg *Pkg
+
+// UnsafePkg is package unsafe.
+var UnsafePkg *Pkg
+
+// BlankSym is the blank (_) symbol.
+var BlankSym *Sym
+
+// numImport tracks how often a package with a given name is imported.
+// It is used to provide a better error message (by using the package
+// path to disambiguate) if a package that appears multiple times with
+// the same name appears in an error message.
+var NumImport = make(map[string]int)
+
+// fmtMode represents the kind of printing being done.
+// The default is regular Go syntax (fmtGo).
+// fmtDebug is like fmtGo but for debugging dumps and prints the type kind too.
+// fmtTypeID and fmtTypeIDName are for generating various unique representations
+// of types used in hashes, the linker, and function/method instantiations.
+type fmtMode int
+
+const (
+ fmtGo fmtMode = iota
+ fmtDebug
+ fmtTypeID
+ fmtTypeIDName
+)
+
+// Sym
+
+// Format implements formatting for a Sym.
+// The valid formats are:
+//
+// %v Go syntax: Name for symbols in the local package, PkgName.Name for imported symbols.
+// %+v Debug syntax: always include PkgName. prefix even for local names.
+// %S Short syntax: Name only, no matter what.
+func (s *Sym) Format(f fmt.State, verb rune) {
+ mode := fmtGo
+ switch verb {
+ case 'v', 'S':
+ if verb == 'v' && f.Flag('+') {
+ mode = fmtDebug
+ }
+ fmt.Fprint(f, sconv(s, verb, mode))
+
+ default:
+ fmt.Fprintf(f, "%%!%c(*types.Sym=%p)", verb, s)
+ }
+}
+
+func (s *Sym) String() string {
+ return sconv(s, 0, fmtGo)
+}
+
+// See #16897 for details about performance implications
+// before changing the implementation of sconv.
+func sconv(s *Sym, verb rune, mode fmtMode) string {
+ if verb == 'L' {
+ panic("linksymfmt")
+ }
+
+ if s == nil {
+ return "<S>"
+ }
+
+ q := pkgqual(s.Pkg, verb, mode)
+ if q == "" {
+ return s.Name
+ }
+
+ buf := fmtBufferPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer fmtBufferPool.Put(buf)
+
+ buf.WriteString(q)
+ buf.WriteByte('.')
+ buf.WriteString(s.Name)
+ return InternString(buf.Bytes())
+}
+
+func sconv2(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) {
+ if verb == 'L' {
+ panic("linksymfmt")
+ }
+ if s == nil {
+ b.WriteString("<S>")
+ return
+ }
+
+ symfmt(b, s, verb, mode)
+}
+
+func symfmt(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) {
+ name := s.Name
+ if q := pkgqual(s.Pkg, verb, mode); q != "" {
+ b.WriteString(q)
+ b.WriteByte('.')
+ }
+ b.WriteString(name)
+}
+
+// pkgqual returns the qualifier that should be used for printing
+// symbols from the given package in the given mode.
+// If it returns the empty string, no qualification is needed.
+func pkgqual(pkg *Pkg, verb rune, mode fmtMode) string {
+ if pkg == nil {
+ return ""
+ }
+ if verb != 'S' {
+ switch mode {
+ case fmtGo: // This is for the user
+ if pkg == BuiltinPkg || pkg == LocalPkg {
+ return ""
+ }
+
+ // If the name was used by multiple packages, display the full path,
+ if pkg.Name != "" && NumImport[pkg.Name] > 1 {
+ return strconv.Quote(pkg.Path)
+ }
+ return pkg.Name
+
+ case fmtDebug:
+ return pkg.Name
+
+ case fmtTypeIDName:
+ // dcommontype, typehash
+ return pkg.Name
+
+ case fmtTypeID:
+ // (methodsym), typesym, weaksym
+ return pkg.Prefix
+ }
+ }
+
+ return ""
+}
+
+// Type
+
+var BasicTypeNames = []string{
+ TINT: "int",
+ TUINT: "uint",
+ TINT8: "int8",
+ TUINT8: "uint8",
+ TINT16: "int16",
+ TUINT16: "uint16",
+ TINT32: "int32",
+ TUINT32: "uint32",
+ TINT64: "int64",
+ TUINT64: "uint64",
+ TUINTPTR: "uintptr",
+ TFLOAT32: "float32",
+ TFLOAT64: "float64",
+ TCOMPLEX64: "complex64",
+ TCOMPLEX128: "complex128",
+ TBOOL: "bool",
+ TANY: "any",
+ TSTRING: "string",
+ TNIL: "nil",
+ TIDEAL: "untyped number",
+ TBLANK: "blank",
+}
+
+var fmtBufferPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+}
+
+// Format implements formatting for a Type.
+// The valid formats are:
+//
+// %v Go syntax
+// %+v Debug syntax: Go syntax with a KIND- prefix for all but builtins.
+// %L Go syntax for underlying type if t is named
+// %S short Go syntax: drop leading "func" in function type
+// %-S special case for method receiver symbol
+func (t *Type) Format(s fmt.State, verb rune) {
+ mode := fmtGo
+ switch verb {
+ case 'v', 'S', 'L':
+ if verb == 'v' && s.Flag('+') { // %+v is debug format
+ mode = fmtDebug
+ }
+ if verb == 'S' && s.Flag('-') { // %-S is special case for receiver - short typeid format
+ mode = fmtTypeID
+ }
+ fmt.Fprint(s, tconv(t, verb, mode))
+ default:
+ fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t)
+ }
+}
+
+// String returns the Go syntax for the type t.
+func (t *Type) String() string {
+ return tconv(t, 0, fmtGo)
+}
+
+// LinkString returns a string description of t, suitable for use in
+// link symbols.
+//
+// The description corresponds to type identity. That is, for any pair
+// of types t1 and t2, Identical(t1, t2) == (t1.LinkString() ==
+// t2.LinkString()) is true. Thus it's safe to use as a map key to
+// implement a type-identity-keyed map.
+func (t *Type) LinkString() string {
+ return tconv(t, 0, fmtTypeID)
+}
+
+// NameString generates a user-readable, mostly unique string
+// description of t. NameString always returns the same description
+// for identical types, even across compilation units.
+//
+// NameString qualifies identifiers by package name, so it has
+// collisions when different packages share the same names and
+// identifiers. It also does not distinguish function-scope defined
+// types from package-scoped defined types or from each other.
+func (t *Type) NameString() string {
+ return tconv(t, 0, fmtTypeIDName)
+}
+
+func tconv(t *Type, verb rune, mode fmtMode) string {
+ buf := fmtBufferPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer fmtBufferPool.Put(buf)
+
+ tconv2(buf, t, verb, mode, nil)
+ return InternString(buf.Bytes())
+}
+
+// tconv2 writes a string representation of t to b.
+// flag and mode control exactly what is printed.
+// Any types x that are already in the visited map get printed as @%d where %d=visited[x].
+// See #16897 before changing the implementation of tconv.
+func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type]int) {
+ if off, ok := visited[t]; ok {
+ // We've seen this type before, so we're trying to print it recursively.
+ // Print a reference to it instead.
+ fmt.Fprintf(b, "@%d", off)
+ return
+ }
+ if t == nil {
+ b.WriteString("<T>")
+ return
+ }
+ if t.Kind() == TSSA {
+ b.WriteString(t.extra.(string))
+ return
+ }
+ if t.Kind() == TTUPLE {
+ b.WriteString(t.FieldType(0).String())
+ b.WriteByte(',')
+ b.WriteString(t.FieldType(1).String())
+ return
+ }
+
+ if t.Kind() == TRESULTS {
+ tys := t.extra.(*Results).Types
+ for i, et := range tys {
+ if i > 0 {
+ b.WriteByte(',')
+ }
+ b.WriteString(et.String())
+ }
+ return
+ }
+
+ if t == AnyType || t == ByteType || t == RuneType {
+ // in %-T mode collapse predeclared aliases with their originals.
+ switch mode {
+ case fmtTypeIDName, fmtTypeID:
+ t = Types[t.Kind()]
+ default:
+ sconv2(b, t.Sym(), 'S', mode)
+ return
+ }
+ }
+ if t == ErrorType {
+ b.WriteString("error")
+ return
+ }
+
+ // Unless the 'L' flag was specified, if the type has a name, just print that name.
+ if verb != 'L' && t.Sym() != nil && t != Types[t.Kind()] {
+ // Default to 'v' if verb is invalid.
+ if verb != 'S' {
+ verb = 'v'
+ }
+
+ // In unified IR, function-scope defined types will have a ·N
+ // suffix embedded directly in their Name. Trim this off for
+ // non-fmtTypeID modes.
+ sym := t.Sym()
+ if mode != fmtTypeID {
+ base, _ := SplitVargenSuffix(sym.Name)
+ if len(base) < len(sym.Name) {
+ sym = &Sym{Pkg: sym.Pkg, Name: base}
+ }
+ }
+ sconv2(b, sym, verb, mode)
+ return
+ }
+
+ if int(t.Kind()) < len(BasicTypeNames) && BasicTypeNames[t.Kind()] != "" {
+ var name string
+ switch t {
+ case UntypedBool:
+ name = "untyped bool"
+ case UntypedString:
+ name = "untyped string"
+ case UntypedInt:
+ name = "untyped int"
+ case UntypedRune:
+ name = "untyped rune"
+ case UntypedFloat:
+ name = "untyped float"
+ case UntypedComplex:
+ name = "untyped complex"
+ default:
+ name = BasicTypeNames[t.Kind()]
+ }
+ b.WriteString(name)
+ return
+ }
+
+ if mode == fmtDebug {
+ b.WriteString(t.Kind().String())
+ b.WriteByte('-')
+ tconv2(b, t, 'v', fmtGo, visited)
+ return
+ }
+
+ // At this point, we might call tconv2 recursively. Add the current type to the visited list so we don't
+ // try to print it recursively.
+ // We record the offset in the result buffer where the type's text starts. This offset serves as a reference
+ // point for any later references to the same type.
+ // Note that we remove the type from the visited map as soon as the recursive call is done.
+ // This prevents encoding types like map[*int]*int as map[*int]@4. (That encoding would work,
+ // but I'd like to use the @ notation only when strictly necessary.)
+ if visited == nil {
+ visited = map[*Type]int{}
+ }
+ visited[t] = b.Len()
+ defer delete(visited, t)
+
+ switch t.Kind() {
+ case TPTR:
+ b.WriteByte('*')
+ switch mode {
+ case fmtTypeID, fmtTypeIDName:
+ if verb == 'S' {
+ tconv2(b, t.Elem(), 'S', mode, visited)
+ return
+ }
+ }
+ tconv2(b, t.Elem(), 'v', mode, visited)
+
+ case TARRAY:
+ b.WriteByte('[')
+ b.WriteString(strconv.FormatInt(t.NumElem(), 10))
+ b.WriteByte(']')
+ tconv2(b, t.Elem(), 0, mode, visited)
+
+ case TSLICE:
+ b.WriteString("[]")
+ tconv2(b, t.Elem(), 0, mode, visited)
+
+ case TCHAN:
+ switch t.ChanDir() {
+ case Crecv:
+ b.WriteString("<-chan ")
+ tconv2(b, t.Elem(), 0, mode, visited)
+ case Csend:
+ b.WriteString("chan<- ")
+ tconv2(b, t.Elem(), 0, mode, visited)
+ default:
+ b.WriteString("chan ")
+ if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym() == nil && t.Elem().ChanDir() == Crecv {
+ b.WriteByte('(')
+ tconv2(b, t.Elem(), 0, mode, visited)
+ b.WriteByte(')')
+ } else {
+ tconv2(b, t.Elem(), 0, mode, visited)
+ }
+ }
+
+ case TMAP:
+ b.WriteString("map[")
+ tconv2(b, t.Key(), 0, mode, visited)
+ b.WriteByte(']')
+ tconv2(b, t.Elem(), 0, mode, visited)
+
+ case TINTER:
+ if t.IsEmptyInterface() {
+ b.WriteString("interface {}")
+ break
+ }
+ b.WriteString("interface {")
+ for i, f := range t.AllMethods() {
+ if i != 0 {
+ b.WriteByte(';')
+ }
+ b.WriteByte(' ')
+ switch {
+ case f.Sym == nil:
+ // Check first that a symbol is defined for this type.
+ // Wrong interface definitions may have types lacking a symbol.
+ break
+ case IsExported(f.Sym.Name):
+ sconv2(b, f.Sym, 'S', mode)
+ default:
+ if mode != fmtTypeIDName {
+ mode = fmtTypeID
+ }
+ sconv2(b, f.Sym, 'v', mode)
+ }
+ tconv2(b, f.Type, 'S', mode, visited)
+ }
+ if len(t.AllMethods()) != 0 {
+ b.WriteByte(' ')
+ }
+ b.WriteByte('}')
+
+ case TFUNC:
+ if verb == 'S' {
+ // no leading func
+ } else {
+ if t.Recv() != nil {
+ b.WriteString("method")
+ formatParams(b, t.Recvs(), mode, visited)
+ b.WriteByte(' ')
+ }
+ b.WriteString("func")
+ }
+ formatParams(b, t.Params(), mode, visited)
+
+ switch t.NumResults() {
+ case 0:
+ // nothing to do
+
+ case 1:
+ b.WriteByte(' ')
+ tconv2(b, t.Result(0).Type, 0, mode, visited) // struct->field->field's type
+
+ default:
+ b.WriteByte(' ')
+ formatParams(b, t.Results(), mode, visited)
+ }
+
+ case TSTRUCT:
+ if m := t.StructType().Map; m != nil {
+ mt := m.MapType()
+ // Format the bucket struct for map[x]y as map.bucket[x]y.
+ // This avoids a recursive print that generates very long names.
+ switch t {
+ case mt.Bucket:
+ b.WriteString("map.bucket[")
+ default:
+ base.Fatalf("unknown internal map type")
+ }
+ tconv2(b, m.Key(), 0, mode, visited)
+ b.WriteByte(']')
+ tconv2(b, m.Elem(), 0, mode, visited)
+ break
+ }
+
+ b.WriteString("struct {")
+ for i, f := range t.Fields() {
+ if i != 0 {
+ b.WriteByte(';')
+ }
+ b.WriteByte(' ')
+ fldconv(b, f, 'L', mode, visited, false)
+ }
+ if t.NumFields() != 0 {
+ b.WriteByte(' ')
+ }
+ b.WriteByte('}')
+
+ case TFORW:
+ b.WriteString("undefined")
+ if t.Sym() != nil {
+ b.WriteByte(' ')
+ sconv2(b, t.Sym(), 'v', mode)
+ }
+
+ case TUNSAFEPTR:
+ b.WriteString("unsafe.Pointer")
+
+ case Txxx:
+ b.WriteString("Txxx")
+
+ default:
+ // Don't know how to handle - fall back to detailed prints
+ b.WriteString(t.Kind().String())
+ b.WriteString(" <")
+ sconv2(b, t.Sym(), 'v', mode)
+ b.WriteString(">")
+
+ }
+}
+
+func formatParams(b *bytes.Buffer, params []*Field, mode fmtMode, visited map[*Type]int) {
+ b.WriteByte('(')
+ fieldVerb := 'v'
+ switch mode {
+ case fmtTypeID, fmtTypeIDName, fmtGo:
+ // no argument names on function signature, and no "noescape"/"nosplit" tags
+ fieldVerb = 'S'
+ }
+ for i, param := range params {
+ if i != 0 {
+ b.WriteString(", ")
+ }
+ fldconv(b, param, fieldVerb, mode, visited, true)
+ }
+ b.WriteByte(')')
+}
+
+func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Type]int, isParam bool) {
+ if f == nil {
+ b.WriteString("<T>")
+ return
+ }
+
+ var name string
+ nameSep := " "
+ if verb != 'S' {
+ s := f.Sym
+
+ // Using type aliases and embedded fields, it's possible to
+ // construct types that can't be directly represented as a
+ // type literal. For example, given "type Int = int" (#50190),
+ // it would be incorrect to format "struct{ Int }" as either
+ // "struct{ int }" or "struct{ Int int }", because those each
+ // represent other, distinct types.
+ //
+ // So for the purpose of LinkString (i.e., fmtTypeID), we use
+ // the non-standard syntax "struct{ Int = int }" to represent
+ // embedded fields that have been renamed through the use of
+ // type aliases.
+ if f.Embedded != 0 {
+ if mode == fmtTypeID {
+ nameSep = " = "
+
+ // Compute tsym, the symbol that would normally be used as
+ // the field name when embedding f.Type.
+ // TODO(mdempsky): Check for other occurrences of this logic
+ // and deduplicate.
+ typ := f.Type
+ if typ.IsPtr() {
+ base.Assertf(typ.Sym() == nil, "embedded pointer type has name: %L", typ)
+ typ = typ.Elem()
+ }
+ tsym := typ.Sym()
+
+ // If the field name matches the embedded type's name, then
+ // suppress printing of the field name. For example, format
+ // "struct{ T }" as simply that instead of "struct{ T = T }".
+ if tsym != nil && (s == tsym || IsExported(tsym.Name) && s.Name == tsym.Name) {
+ s = nil
+ }
+ } else {
+ // Suppress the field name for embedded fields for
+ // non-LinkString formats, to match historical behavior.
+ // TODO(mdempsky): Re-evaluate this.
+ s = nil
+ }
+ }
+
+ if s != nil {
+ if isParam {
+ name = fmt.Sprint(f.Nname)
+ } else if verb == 'L' {
+ name = s.Name
+ if !IsExported(name) && mode != fmtTypeIDName {
+ name = sconv(s, 0, mode) // qualify non-exported names (used on structs, not on funarg)
+ }
+ } else {
+ name = sconv(s, 0, mode)
+ }
+ }
+ }
+
+ if name != "" {
+ b.WriteString(name)
+ b.WriteString(nameSep)
+ }
+
+ if f.IsDDD() {
+ var et *Type
+ if f.Type != nil {
+ et = f.Type.Elem()
+ }
+ b.WriteString("...")
+ tconv2(b, et, 0, mode, visited)
+ } else {
+ tconv2(b, f.Type, 0, mode, visited)
+ }
+
+ if verb != 'S' && !isParam && f.Note != "" {
+ b.WriteString(" ")
+ b.WriteString(strconv.Quote(f.Note))
+ }
+}
+
+// SplitVargenSuffix returns name split into a base string and a ·N
+// suffix, if any.
+func SplitVargenSuffix(name string) (base, suffix string) {
+ i := len(name)
+ for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
+ i--
+ }
+ const dot = "·"
+ if i >= len(dot) && name[i-len(dot):i] == dot {
+ i -= len(dot)
+ return name[:i], name[i:]
+ }
+ return name, ""
+}
+
+// TypeHash computes a hash value for type t to use in type switch statements.
+func TypeHash(t *Type) uint32 {
+ p := t.LinkString()
+
+ // Using SHA256 is overkill, but reduces accidental collisions.
+ h := notsha256.Sum256([]byte(p))
+ return binary.LittleEndian.Uint32(h[:4])
+}
diff --git a/src/cmd/compile/internal/types/goversion.go b/src/cmd/compile/internal/types/goversion.go
new file mode 100644
index 0000000..ac08a49
--- /dev/null
+++ b/src/cmd/compile/internal/types/goversion.go
@@ -0,0 +1,88 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "fmt"
+ "internal/goversion"
+ "internal/lazyregexp"
+ "log"
+ "strconv"
+
+ "cmd/compile/internal/base"
+)
+
+// A lang is a language version broken into major and minor numbers.
+type lang struct {
+ major, minor int
+}
+
+// langWant is the desired language version set by the -lang flag.
+// If the -lang flag is not set, this is the zero value, meaning that
+// any language version is supported.
+var langWant lang
+
+// AllowsGoVersion reports whether local package is allowed
+// to use Go version major.minor.
+func AllowsGoVersion(major, minor int) bool {
+ if langWant.major == 0 && langWant.minor == 0 {
+ return true
+ }
+ return langWant.major > major || (langWant.major == major && langWant.minor >= minor)
+}
+
+// ParseLangFlag verifies that the -lang flag holds a valid value, and
+// exits if not. It initializes data used by AllowsGoVersion.
+func ParseLangFlag() {
+ if base.Flag.Lang == "" {
+ return
+ }
+
+ var err error
+ langWant, err = parseLang(base.Flag.Lang)
+ if err != nil {
+ log.Fatalf("invalid value %q for -lang: %v", base.Flag.Lang, err)
+ }
+
+ if def := currentLang(); base.Flag.Lang != def {
+ defVers, err := parseLang(def)
+ if err != nil {
+ log.Fatalf("internal error parsing default lang %q: %v", def, err)
+ }
+ if langWant.major > defVers.major || (langWant.major == defVers.major && langWant.minor > defVers.minor) {
+ log.Fatalf("invalid value %q for -lang: max known version is %q", base.Flag.Lang, def)
+ }
+ }
+}
+
+// parseLang parses a -lang option into a langVer.
+func parseLang(s string) (lang, error) {
+ if s == "go1" { // cmd/go's new spelling of "go1.0" (#65528)
+ s = "go1.0"
+ }
+
+ matches := goVersionRE.FindStringSubmatch(s)
+ if matches == nil {
+ return lang{}, fmt.Errorf(`should be something like "go1.12"`)
+ }
+ major, err := strconv.Atoi(matches[1])
+ if err != nil {
+ return lang{}, err
+ }
+ minor, err := strconv.Atoi(matches[2])
+ if err != nil {
+ return lang{}, err
+ }
+ return lang{major: major, minor: minor}, nil
+}
+
+// currentLang returns the current language version.
+func currentLang() string {
+ return fmt.Sprintf("go1.%d", goversion.Version)
+}
+
+// goVersionRE is a regular expression that matches the valid
+// arguments to the -lang flag.
+var goVersionRE = lazyregexp.New(`^go([1-9]\d*)\.(0|[1-9]\d*)$`)
diff --git a/src/cmd/compile/internal/types/identity.go b/src/cmd/compile/internal/types/identity.go
new file mode 100644
index 0000000..fa28c03
--- /dev/null
+++ b/src/cmd/compile/internal/types/identity.go
@@ -0,0 +1,157 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+const (
+ identIgnoreTags = 1 << iota
+ identStrict
+)
+
+// Identical reports whether t1 and t2 are identical types, following the spec rules.
+// Receiver parameter types are ignored. Named (defined) types are only equal if they
+// are pointer-equal - i.e. there must be a unique types.Type for each specific named
+// type. Also, a type containing a shape type is considered identical to another type
+// (shape or not) if their underlying types are the same, or they are both pointers.
+func Identical(t1, t2 *Type) bool {
+ return identical(t1, t2, 0, nil)
+}
+
+// IdenticalIgnoreTags is like Identical, but it ignores struct tags
+// for struct identity.
+func IdenticalIgnoreTags(t1, t2 *Type) bool {
+ return identical(t1, t2, identIgnoreTags, nil)
+}
+
+// IdenticalStrict is like Identical, but matches types exactly, without the
+// exception for shapes.
+func IdenticalStrict(t1, t2 *Type) bool {
+ return identical(t1, t2, identStrict, nil)
+}
+
+type typePair struct {
+ t1 *Type
+ t2 *Type
+}
+
+func identical(t1, t2 *Type, flags int, assumedEqual map[typePair]struct{}) bool {
+ if t1 == t2 {
+ return true
+ }
+ if t1 == nil || t2 == nil || t1.kind != t2.kind {
+ return false
+ }
+ if t1.obj != nil || t2.obj != nil {
+ if flags&identStrict == 0 && (t1.HasShape() || t2.HasShape()) {
+ switch t1.kind {
+ case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR, TCOMPLEX64, TCOMPLEX128, TFLOAT32, TFLOAT64, TBOOL, TSTRING, TPTR, TUNSAFEPTR:
+ return true
+ }
+ // fall through to unnamed type comparison for complex types.
+ goto cont
+ }
+ // Special case: we keep byte/uint8 and rune/int32
+ // separate for error messages. Treat them as equal.
+ switch t1.kind {
+ case TUINT8:
+ return (t1 == Types[TUINT8] || t1 == ByteType) && (t2 == Types[TUINT8] || t2 == ByteType)
+ case TINT32:
+ return (t1 == Types[TINT32] || t1 == RuneType) && (t2 == Types[TINT32] || t2 == RuneType)
+ case TINTER:
+ // Make sure named any type matches any unnamed empty interface
+ // (but not a shape type, if identStrict).
+ isUnnamedEface := func(t *Type) bool { return t.IsEmptyInterface() && t.Sym() == nil }
+ if flags&identStrict != 0 {
+ return t1 == AnyType && isUnnamedEface(t2) && !t2.HasShape() || t2 == AnyType && isUnnamedEface(t1) && !t1.HasShape()
+ }
+ return t1 == AnyType && isUnnamedEface(t2) || t2 == AnyType && isUnnamedEface(t1)
+ default:
+ return false
+ }
+ }
+cont:
+
+ // Any cyclic type must go through a named type, and if one is
+ // named, it is only identical to the other if they are the
+ // same pointer (t1 == t2), so there's no chance of chasing
+ // cycles ad infinitum, so no need for a depth counter.
+ if assumedEqual == nil {
+ assumedEqual = make(map[typePair]struct{})
+ } else if _, ok := assumedEqual[typePair{t1, t2}]; ok {
+ return true
+ }
+ assumedEqual[typePair{t1, t2}] = struct{}{}
+
+ switch t1.kind {
+ case TIDEAL:
+ // Historically, cmd/compile used a single "untyped
+ // number" type, so all untyped number types were
+ // identical. Match this behavior.
+ // TODO(mdempsky): Revisit this.
+ return true
+
+ case TINTER:
+ if len(t1.AllMethods()) != len(t2.AllMethods()) {
+ return false
+ }
+ for i, f1 := range t1.AllMethods() {
+ f2 := t2.AllMethods()[i]
+ if f1.Sym != f2.Sym || !identical(f1.Type, f2.Type, flags, assumedEqual) {
+ return false
+ }
+ }
+ return true
+
+ case TSTRUCT:
+ if t1.NumFields() != t2.NumFields() {
+ return false
+ }
+ for i, f1 := range t1.Fields() {
+ f2 := t2.Field(i)
+ if f1.Sym != f2.Sym || f1.Embedded != f2.Embedded || !identical(f1.Type, f2.Type, flags, assumedEqual) {
+ return false
+ }
+ if (flags&identIgnoreTags) == 0 && f1.Note != f2.Note {
+ return false
+ }
+ }
+ return true
+
+ case TFUNC:
+ // Check parameters and result parameters for type equality.
+ // We intentionally ignore receiver parameters for type
+ // equality, because they're never relevant.
+ if t1.NumParams() != t2.NumParams() ||
+ t1.NumResults() != t2.NumResults() ||
+ t1.IsVariadic() != t2.IsVariadic() {
+ return false
+ }
+
+ fs1 := t1.ParamsResults()
+ fs2 := t2.ParamsResults()
+ for i, f1 := range fs1 {
+ if !identical(f1.Type, fs2[i].Type, flags, assumedEqual) {
+ return false
+ }
+ }
+ return true
+
+ case TARRAY:
+ if t1.NumElem() != t2.NumElem() {
+ return false
+ }
+
+ case TCHAN:
+ if t1.ChanDir() != t2.ChanDir() {
+ return false
+ }
+
+ case TMAP:
+ if !identical(t1.Key(), t2.Key(), flags, assumedEqual) {
+ return false
+ }
+ }
+
+ return identical(t1.Elem(), t2.Elem(), flags, assumedEqual)
+}
diff --git a/src/cmd/compile/internal/types/kind_string.go b/src/cmd/compile/internal/types/kind_string.go
new file mode 100644
index 0000000..1e1e846
--- /dev/null
+++ b/src/cmd/compile/internal/types/kind_string.go
@@ -0,0 +1,60 @@
+// Code generated by "stringer -type Kind -trimprefix T type.go"; DO NOT EDIT.
+
+package types
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Txxx-0]
+ _ = x[TINT8-1]
+ _ = x[TUINT8-2]
+ _ = x[TINT16-3]
+ _ = x[TUINT16-4]
+ _ = x[TINT32-5]
+ _ = x[TUINT32-6]
+ _ = x[TINT64-7]
+ _ = x[TUINT64-8]
+ _ = x[TINT-9]
+ _ = x[TUINT-10]
+ _ = x[TUINTPTR-11]
+ _ = x[TCOMPLEX64-12]
+ _ = x[TCOMPLEX128-13]
+ _ = x[TFLOAT32-14]
+ _ = x[TFLOAT64-15]
+ _ = x[TBOOL-16]
+ _ = x[TPTR-17]
+ _ = x[TFUNC-18]
+ _ = x[TSLICE-19]
+ _ = x[TARRAY-20]
+ _ = x[TSTRUCT-21]
+ _ = x[TCHAN-22]
+ _ = x[TMAP-23]
+ _ = x[TINTER-24]
+ _ = x[TFORW-25]
+ _ = x[TANY-26]
+ _ = x[TSTRING-27]
+ _ = x[TUNSAFEPTR-28]
+ _ = x[TIDEAL-29]
+ _ = x[TNIL-30]
+ _ = x[TBLANK-31]
+ _ = x[TFUNCARGS-32]
+ _ = x[TCHANARGS-33]
+ _ = x[TSSA-34]
+ _ = x[TTUPLE-35]
+ _ = x[TRESULTS-36]
+ _ = x[NTYPE-37]
+}
+
+const _Kind_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRIDEALNILBLANKFUNCARGSCHANARGSSSATUPLERESULTSNTYPE"
+
+var _Kind_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 158, 161, 166, 174, 182, 185, 190, 197, 202}
+
+func (i Kind) String() string {
+ if i >= Kind(len(_Kind_index)-1) {
+ return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/types/pkg.go b/src/cmd/compile/internal/types/pkg.go
new file mode 100644
index 0000000..c6ce788
--- /dev/null
+++ b/src/cmd/compile/internal/types/pkg.go
@@ -0,0 +1,131 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "fmt"
+ "strconv"
+ "sync"
+)
+
+// pkgMap maps a package path to a package.
+var pkgMap = make(map[string]*Pkg)
+
+type Pkg struct {
+ Path string // string literal used in import statement, e.g. "runtime/internal/sys"
+ Name string // package name, e.g. "sys"
+ Prefix string // escaped path for use in symbol table
+ Syms map[string]*Sym
+ Pathsym *obj.LSym
+
+ Direct bool // imported directly
+}
+
+// NewPkg returns a new Pkg for the given package path and name.
+// Unless name is the empty string, if the package exists already,
+// the existing package name and the provided name must match.
+func NewPkg(path, name string) *Pkg {
+ if p := pkgMap[path]; p != nil {
+ if name != "" && p.Name != name {
+ panic(fmt.Sprintf("conflicting package names %s and %s for path %q", p.Name, name, path))
+ }
+ return p
+ }
+
+ p := new(Pkg)
+ p.Path = path
+ p.Name = name
+ if path == "go.shape" {
+ // Don't escape "go.shape", since it's not needed (it's a builtin
+ // package), and we don't want escape codes showing up in shape type
+ // names, which also appear in names of function/method
+ // instantiations.
+ p.Prefix = path
+ } else {
+ p.Prefix = objabi.PathToPrefix(path)
+ }
+ p.Syms = make(map[string]*Sym)
+ pkgMap[path] = p
+
+ return p
+}
+
+func PkgMap() map[string]*Pkg {
+ return pkgMap
+}
+
+var nopkg = &Pkg{
+ Syms: make(map[string]*Sym),
+}
+
+func (pkg *Pkg) Lookup(name string) *Sym {
+ s, _ := pkg.LookupOK(name)
+ return s
+}
+
+// LookupOK looks up name in pkg and reports whether it previously existed.
+func (pkg *Pkg) LookupOK(name string) (s *Sym, existed bool) {
+ // TODO(gri) remove this check in favor of specialized lookup
+ if pkg == nil {
+ pkg = nopkg
+ }
+ if s := pkg.Syms[name]; s != nil {
+ return s, true
+ }
+
+ s = &Sym{
+ Name: name,
+ Pkg: pkg,
+ }
+ pkg.Syms[name] = s
+ return s, false
+}
+
+func (pkg *Pkg) LookupBytes(name []byte) *Sym {
+ // TODO(gri) remove this check in favor of specialized lookup
+ if pkg == nil {
+ pkg = nopkg
+ }
+ if s := pkg.Syms[string(name)]; s != nil {
+ return s
+ }
+ str := InternString(name)
+ return pkg.Lookup(str)
+}
+
+// LookupNum looks up the symbol starting with prefix and ending with
+// the decimal n. If prefix is too long, LookupNum panics.
+func (pkg *Pkg) LookupNum(prefix string, n int) *Sym {
+ var buf [20]byte // plenty long enough for all current users
+ copy(buf[:], prefix)
+ b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10)
+ return pkg.LookupBytes(b)
+}
+
+// Selector looks up a selector identifier.
+func (pkg *Pkg) Selector(name string) *Sym {
+ if IsExported(name) {
+ pkg = LocalPkg
+ }
+ return pkg.Lookup(name)
+}
+
+var (
+ internedStringsmu sync.Mutex // protects internedStrings
+ internedStrings = map[string]string{}
+)
+
+func InternString(b []byte) string {
+ internedStringsmu.Lock()
+ s, ok := internedStrings[string(b)] // string(b) here doesn't allocate
+ if !ok {
+ s = string(b)
+ internedStrings[s] = s
+ }
+ internedStringsmu.Unlock()
+ return s
+}
diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go
new file mode 100644
index 0000000..6ba2b91
--- /dev/null
+++ b/src/cmd/compile/internal/types/size.go
@@ -0,0 +1,638 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "math"
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/internal/src"
+ "internal/types/errors"
+)
+
+var PtrSize int
+
+var RegSize int
+
+// Slices in the runtime are represented by three components:
+//
+// type slice struct {
+// ptr unsafe.Pointer
+// len int
+// cap int
+// }
+//
+// Strings in the runtime are represented by two components:
+//
+// type string struct {
+// ptr unsafe.Pointer
+// len int
+// }
+//
+// These variables are the offsets of fields and sizes of these structs.
+var (
+ SlicePtrOffset int64
+ SliceLenOffset int64
+ SliceCapOffset int64
+
+ SliceSize int64
+ StringSize int64
+)
+
+var SkipSizeForTracing bool
+
+// typePos returns the position associated with t.
+// This is where t was declared or where it appeared as a type expression.
+func typePos(t *Type) src.XPos {
+ if pos := t.Pos(); pos.IsKnown() {
+ return pos
+ }
+ base.Fatalf("bad type: %v", t)
+ panic("unreachable")
+}
+
+// MaxWidth is the maximum size of a value on the target architecture.
+var MaxWidth int64
+
+// CalcSizeDisabled indicates whether it is safe
+// to calculate Types' widths and alignments. See CalcSize.
+var CalcSizeDisabled bool
+
+// machine size and rounding alignment is dictated around
+// the size of a pointer, set in gc.Main (see ../gc/main.go).
+var defercalc int
+
+// RoundUp rounds o to a multiple of r, r is a power of 2.
+func RoundUp(o int64, r int64) int64 {
+ if r < 1 || r > 8 || r&(r-1) != 0 {
+ base.Fatalf("Round %d", r)
+ }
+ return (o + r - 1) &^ (r - 1)
+}
+
+// expandiface computes the method set for interface type t by
+// expanding embedded interfaces.
+func expandiface(t *Type) {
+ seen := make(map[*Sym]*Field)
+ var methods []*Field
+
+ addMethod := func(m *Field, explicit bool) {
+ switch prev := seen[m.Sym]; {
+ case prev == nil:
+ seen[m.Sym] = m
+ case !explicit && Identical(m.Type, prev.Type):
+ return
+ default:
+ base.ErrorfAt(m.Pos, errors.DuplicateDecl, "duplicate method %s", m.Sym.Name)
+ }
+ methods = append(methods, m)
+ }
+
+ {
+ methods := t.Methods()
+ sort.SliceStable(methods, func(i, j int) bool {
+ mi, mj := methods[i], methods[j]
+
+ // Sort embedded types by type name (if any).
+ if mi.Sym == nil && mj.Sym == nil {
+ return mi.Type.Sym().Less(mj.Type.Sym())
+ }
+
+ // Sort methods before embedded types.
+ if mi.Sym == nil || mj.Sym == nil {
+ return mi.Sym != nil
+ }
+
+ // Sort methods by symbol name.
+ return mi.Sym.Less(mj.Sym)
+ })
+ }
+
+ for _, m := range t.Methods() {
+ if m.Sym == nil {
+ continue
+ }
+
+ CheckSize(m.Type)
+ addMethod(m, true)
+ }
+
+ for _, m := range t.Methods() {
+ if m.Sym != nil || m.Type == nil {
+ continue
+ }
+
+ // In 1.18, embedded types can be anything. In Go 1.17, we disallow
+ // embedding anything other than interfaces. This requirement was caught
+ // by types2 already, so allow non-interface here.
+ if !m.Type.IsInterface() {
+ continue
+ }
+
+ // Embedded interface: duplicate all methods
+ // and add to t's method set.
+ for _, t1 := range m.Type.AllMethods() {
+ f := NewField(m.Pos, t1.Sym, t1.Type)
+ addMethod(f, false)
+
+ // Clear position after typechecking, for consistency with types2.
+ f.Pos = src.NoXPos
+ }
+
+ // Clear position after typechecking, for consistency with types2.
+ m.Pos = src.NoXPos
+ }
+
+ sort.Sort(MethodsByName(methods))
+
+ if int64(len(methods)) >= MaxWidth/int64(PtrSize) {
+ base.ErrorfAt(typePos(t), 0, "interface too large")
+ }
+ for i, m := range methods {
+ m.Offset = int64(i) * int64(PtrSize)
+ }
+
+ t.SetAllMethods(methods)
+}
+
+// calcStructOffset computes the offsets of a sequence of fields,
+// starting at the given offset. It returns the resulting offset and
+// maximum field alignment.
+func calcStructOffset(t *Type, fields []*Field, offset int64) int64 {
+ for _, f := range fields {
+ CalcSize(f.Type)
+ offset = RoundUp(offset, int64(f.Type.align))
+
+ if t.IsStruct() { // param offsets depend on ABI
+ f.Offset = offset
+
+ // If type T contains a field F marked as not-in-heap,
+ // then T must also be a not-in-heap type. Otherwise,
+ // you could heap allocate T and then get a pointer F,
+ // which would be a heap pointer to a not-in-heap type.
+ if f.Type.NotInHeap() {
+ t.SetNotInHeap(true)
+ }
+ }
+
+ offset += f.Type.width
+
+ maxwidth := MaxWidth
+ // On 32-bit systems, reflect tables impose an additional constraint
+ // that each field start offset must fit in 31 bits.
+ if maxwidth < 1<<32 {
+ maxwidth = 1<<31 - 1
+ }
+ if offset >= maxwidth {
+ base.ErrorfAt(typePos(t), 0, "type %L too large", t)
+ offset = 8 // small but nonzero
+ }
+ }
+
+ return offset
+}
+
+func isAtomicStdPkg(p *Pkg) bool {
+ if p.Prefix == `""` {
+ panic("bad package prefix")
+ }
+ return p.Prefix == "sync/atomic" || p.Prefix == "runtime/internal/atomic"
+}
+
+// CalcSize calculates and stores the size and alignment for t.
+// If CalcSizeDisabled is set, and the size/alignment
+// have not already been calculated, it calls Fatal.
+// This is used to prevent data races in the back end.
+func CalcSize(t *Type) {
+ // Calling CalcSize when typecheck tracing enabled is not safe.
+ // See issue #33658.
+ if base.EnableTrace && SkipSizeForTracing {
+ return
+ }
+ if PtrSize == 0 {
+ // Assume this is a test.
+ return
+ }
+
+ if t == nil {
+ return
+ }
+
+ if t.width == -2 {
+ t.width = 0
+ t.align = 1
+ base.Fatalf("invalid recursive type %v", t)
+ return
+ }
+
+ if t.widthCalculated() {
+ return
+ }
+
+ if CalcSizeDisabled {
+ base.Fatalf("width not calculated: %v", t)
+ }
+
+ // defer CheckSize calls until after we're done
+ DeferCheckSize()
+
+ lno := base.Pos
+ if pos := t.Pos(); pos.IsKnown() {
+ base.Pos = pos
+ }
+
+ t.width = -2
+ t.align = 0 // 0 means use t.Width, below
+
+ et := t.Kind()
+ switch et {
+ case TFUNC, TCHAN, TMAP, TSTRING:
+ break
+
+ // SimType == 0 during bootstrap
+ default:
+ if SimType[t.Kind()] != 0 {
+ et = SimType[t.Kind()]
+ }
+ }
+
+ var w int64
+ switch et {
+ default:
+ base.Fatalf("CalcSize: unknown type: %v", t)
+
+ // compiler-specific stuff
+ case TINT8, TUINT8, TBOOL:
+ // bool is int8
+ w = 1
+ t.intRegs = 1
+
+ case TINT16, TUINT16:
+ w = 2
+ t.intRegs = 1
+
+ case TINT32, TUINT32:
+ w = 4
+ t.intRegs = 1
+
+ case TINT64, TUINT64:
+ w = 8
+ t.align = uint8(RegSize)
+ t.intRegs = uint8(8 / RegSize)
+
+ case TFLOAT32:
+ w = 4
+ t.floatRegs = 1
+
+ case TFLOAT64:
+ w = 8
+ t.align = uint8(RegSize)
+ t.floatRegs = 1
+
+ case TCOMPLEX64:
+ w = 8
+ t.align = 4
+ t.floatRegs = 2
+
+ case TCOMPLEX128:
+ w = 16
+ t.align = uint8(RegSize)
+ t.floatRegs = 2
+
+ case TPTR:
+ w = int64(PtrSize)
+ t.intRegs = 1
+ CheckSize(t.Elem())
+
+ case TUNSAFEPTR:
+ w = int64(PtrSize)
+ t.intRegs = 1
+
+ case TINTER: // implemented as 2 pointers
+ w = 2 * int64(PtrSize)
+ t.align = uint8(PtrSize)
+ t.intRegs = 2
+ expandiface(t)
+
+ case TCHAN: // implemented as pointer
+ w = int64(PtrSize)
+ t.intRegs = 1
+
+ CheckSize(t.Elem())
+
+ // Make fake type to trigger channel element size check after
+ // any top-level recursive type has been completed.
+ t1 := NewChanArgs(t)
+ CheckSize(t1)
+
+ case TCHANARGS:
+ t1 := t.ChanArgs()
+ CalcSize(t1) // just in case
+ // Make sure size of t1.Elem() is calculated at this point. We can
+ // use CalcSize() here rather than CheckSize(), because the top-level
+ // (possibly recursive) type will have been calculated before the fake
+ // chanargs is handled.
+ CalcSize(t1.Elem())
+ if t1.Elem().width >= 1<<16 {
+ base.Errorf("channel element type too large (>64kB)")
+ }
+ w = 1 // anything will do
+
+ case TMAP: // implemented as pointer
+ w = int64(PtrSize)
+ t.intRegs = 1
+ CheckSize(t.Elem())
+ CheckSize(t.Key())
+
+ case TFORW: // should have been filled in
+ base.Fatalf("invalid recursive type %v", t)
+
+ case TANY: // not a real type; should be replaced before use.
+ base.Fatalf("CalcSize any")
+
+ case TSTRING:
+ if StringSize == 0 {
+ base.Fatalf("early CalcSize string")
+ }
+ w = StringSize
+ t.align = uint8(PtrSize)
+ t.intRegs = 2
+
+ case TARRAY:
+ if t.Elem() == nil {
+ break
+ }
+
+ CalcSize(t.Elem())
+ t.SetNotInHeap(t.Elem().NotInHeap())
+ if t.Elem().width != 0 {
+ cap := (uint64(MaxWidth) - 1) / uint64(t.Elem().width)
+ if uint64(t.NumElem()) > cap {
+ base.Errorf("type %L larger than address space", t)
+ }
+ }
+ w = t.NumElem() * t.Elem().width
+ t.align = t.Elem().align
+
+ // ABIInternal only allows "trivial" arrays (i.e., length 0 or 1)
+ // to be passed by register.
+ switch t.NumElem() {
+ case 0:
+ t.intRegs = 0
+ t.floatRegs = 0
+ case 1:
+ t.intRegs = t.Elem().intRegs
+ t.floatRegs = t.Elem().floatRegs
+ default:
+ t.intRegs = math.MaxUint8
+ t.floatRegs = math.MaxUint8
+ }
+
+ case TSLICE:
+ if t.Elem() == nil {
+ break
+ }
+ w = SliceSize
+ CheckSize(t.Elem())
+ t.align = uint8(PtrSize)
+ t.intRegs = 3
+
+ case TSTRUCT:
+ if t.IsFuncArgStruct() {
+ base.Fatalf("CalcSize fn struct %v", t)
+ }
+ CalcStructSize(t)
+ w = t.width
+
+ // make fake type to check later to
+ // trigger function argument computation.
+ case TFUNC:
+ t1 := NewFuncArgs(t)
+ CheckSize(t1)
+ w = int64(PtrSize) // width of func type is pointer
+ t.intRegs = 1
+
+ // function is 3 cated structures;
+ // compute their widths as side-effect.
+ case TFUNCARGS:
+ t1 := t.FuncArgs()
+ // TODO(mdempsky): Should package abi be responsible for computing argwid?
+ w = calcStructOffset(t1, t1.Recvs(), 0)
+ w = calcStructOffset(t1, t1.Params(), w)
+ w = RoundUp(w, int64(RegSize))
+ w = calcStructOffset(t1, t1.Results(), w)
+ w = RoundUp(w, int64(RegSize))
+ t1.extra.(*Func).Argwid = w
+ t.align = 1
+ }
+
+ if PtrSize == 4 && w != int64(int32(w)) {
+ base.Errorf("type %v too large", t)
+ }
+
+ t.width = w
+ if t.align == 0 {
+ if w == 0 || w > 8 || w&(w-1) != 0 {
+ base.Fatalf("invalid alignment for %v", t)
+ }
+ t.align = uint8(w)
+ }
+
+ base.Pos = lno
+
+ ResumeCheckSize()
+}
+
+// CalcStructSize calculates the size of t,
+// filling in t.width, t.align, t.intRegs, and t.floatRegs,
+// even if size calculation is otherwise disabled.
+func CalcStructSize(t *Type) {
+ var maxAlign uint8 = 1
+
+ // Recognize special types. This logic is duplicated in go/types and
+ // cmd/compile/internal/types2.
+ if sym := t.Sym(); sym != nil {
+ switch {
+ case sym.Name == "align64" && isAtomicStdPkg(sym.Pkg):
+ maxAlign = 8
+ case sym.Pkg.Path == "runtime/internal/sys" && sym.Name == "nih":
+ t.SetNotInHeap(true)
+ }
+ }
+
+ fields := t.Fields()
+ size := calcStructOffset(t, fields, 0)
+
+ // For non-zero-sized structs which end in a zero-sized field, we
+ // add an extra byte of padding to the type. This padding ensures
+ // that taking the address of a zero-sized field can't manufacture a
+ // pointer to the next object in the heap. See issue 9401.
+ if size > 0 && fields[len(fields)-1].Type.width == 0 {
+ size++
+ }
+
+ var intRegs, floatRegs uint64
+ for _, field := range fields {
+ typ := field.Type
+
+ // The alignment of a struct type is the maximum alignment of its
+ // field types.
+ if align := typ.align; align > maxAlign {
+ maxAlign = align
+ }
+
+ // Each field needs its own registers.
+ // We sum in uint64 to avoid possible overflows.
+ intRegs += uint64(typ.intRegs)
+ floatRegs += uint64(typ.floatRegs)
+ }
+
+ // Final size includes trailing padding.
+ size = RoundUp(size, int64(maxAlign))
+
+ if intRegs > math.MaxUint8 || floatRegs > math.MaxUint8 {
+ intRegs = math.MaxUint8
+ floatRegs = math.MaxUint8
+ }
+
+ t.width = size
+ t.align = maxAlign
+ t.intRegs = uint8(intRegs)
+ t.floatRegs = uint8(floatRegs)
+}
+
+func (t *Type) widthCalculated() bool {
+ return t.align > 0
+}
+
+// when a type's width should be known, we call CheckSize
+// to compute it. during a declaration like
+//
+// type T *struct { next T }
+//
+// it is necessary to defer the calculation of the struct width
+// until after T has been initialized to be a pointer to that struct.
+// similarly, during import processing structs may be used
+// before their definition. in those situations, calling
+// DeferCheckSize() stops width calculations until
+// ResumeCheckSize() is called, at which point all the
+// CalcSizes that were deferred are executed.
+// CalcSize should only be called when the type's size
+// is needed immediately. CheckSize makes sure the
+// size is evaluated eventually.
+
+var deferredTypeStack []*Type
+
+func CheckSize(t *Type) {
+ if t == nil {
+ return
+ }
+
+ // function arg structs should not be checked
+ // outside of the enclosing function.
+ if t.IsFuncArgStruct() {
+ base.Fatalf("CheckSize %v", t)
+ }
+
+ if defercalc == 0 {
+ CalcSize(t)
+ return
+ }
+
+ // if type has not yet been pushed on deferredTypeStack yet, do it now
+ if !t.Deferwidth() {
+ t.SetDeferwidth(true)
+ deferredTypeStack = append(deferredTypeStack, t)
+ }
+}
+
+func DeferCheckSize() {
+ defercalc++
+}
+
+func ResumeCheckSize() {
+ if defercalc == 1 {
+ for len(deferredTypeStack) > 0 {
+ t := deferredTypeStack[len(deferredTypeStack)-1]
+ deferredTypeStack = deferredTypeStack[:len(deferredTypeStack)-1]
+ t.SetDeferwidth(false)
+ CalcSize(t)
+ }
+ }
+
+ defercalc--
+}
+
+// PtrDataSize returns the length in bytes of the prefix of t
+// containing pointer data. Anything after this offset is scalar data.
+//
+// PtrDataSize is only defined for actual Go types. It's an error to
+// use it on compiler-internal types (e.g., TSSA, TRESULTS).
+func PtrDataSize(t *Type) int64 {
+ switch t.Kind() {
+ case TBOOL, TINT8, TUINT8, TINT16, TUINT16, TINT32,
+ TUINT32, TINT64, TUINT64, TINT, TUINT,
+ TUINTPTR, TCOMPLEX64, TCOMPLEX128, TFLOAT32, TFLOAT64:
+ return 0
+
+ case TPTR:
+ if t.Elem().NotInHeap() {
+ return 0
+ }
+ return int64(PtrSize)
+
+ case TUNSAFEPTR, TFUNC, TCHAN, TMAP:
+ return int64(PtrSize)
+
+ case TSTRING:
+ // struct { byte *str; intgo len; }
+ return int64(PtrSize)
+
+ case TINTER:
+ // struct { Itab *tab; void *data; } or
+ // struct { Type *type; void *data; }
+ // Note: see comment in typebits.Set
+ return 2 * int64(PtrSize)
+
+ case TSLICE:
+ if t.Elem().NotInHeap() {
+ return 0
+ }
+ // struct { byte *array; uintgo len; uintgo cap; }
+ return int64(PtrSize)
+
+ case TARRAY:
+ if t.NumElem() == 0 {
+ return 0
+ }
+ // t.NumElem() > 0
+ size := PtrDataSize(t.Elem())
+ if size == 0 {
+ return 0
+ }
+ return (t.NumElem()-1)*t.Elem().Size() + size
+
+ case TSTRUCT:
+ // Find the last field that has pointers, if any.
+ fs := t.Fields()
+ for i := len(fs) - 1; i >= 0; i-- {
+ if size := PtrDataSize(fs[i].Type); size > 0 {
+ return fs[i].Offset + size
+ }
+ }
+ return 0
+
+ case TSSA:
+ if t != TypeInt128 {
+ base.Fatalf("PtrDataSize: unexpected ssa type %v", t)
+ }
+ return 0
+
+ default:
+ base.Fatalf("PtrDataSize: unexpected type, %v", t)
+ return 0
+ }
+}
diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go
new file mode 100644
index 0000000..8a6f241
--- /dev/null
+++ b/src/cmd/compile/internal/types/sizeof_test.go
@@ -0,0 +1,48 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "reflect"
+ "testing"
+ "unsafe"
+)
+
+// Assert that the size of important structures do not change unexpectedly.
+
+func TestSizeof(t *testing.T) {
+ const _64bit = unsafe.Sizeof(uintptr(0)) == 8
+
+ var tests = []struct {
+ val interface{} // type as a value
+ _32bit uintptr // size on 32bit platforms
+ _64bit uintptr // size on 64bit platforms
+ }{
+ {Sym{}, 32, 64},
+ {Type{}, 56, 96},
+ {Map{}, 12, 24},
+ {Forward{}, 20, 32},
+ {Func{}, 32, 56},
+ {Struct{}, 12, 24},
+ {Interface{}, 0, 0},
+ {Chan{}, 8, 16},
+ {Array{}, 12, 16},
+ {FuncArgs{}, 4, 8},
+ {ChanArgs{}, 4, 8},
+ {Ptr{}, 4, 8},
+ {Slice{}, 4, 8},
+ }
+
+ for _, tt := range tests {
+ want := tt._32bit
+ if _64bit {
+ want = tt._64bit
+ }
+ got := reflect.TypeOf(tt.val).Size()
+ if want != got {
+ t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types/sort.go b/src/cmd/compile/internal/types/sort.go
new file mode 100644
index 0000000..765c070
--- /dev/null
+++ b/src/cmd/compile/internal/types/sort.go
@@ -0,0 +1,19 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+// MethodsByName sorts methods by name.
+type MethodsByName []*Field
+
+func (x MethodsByName) Len() int { return len(x) }
+func (x MethodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x MethodsByName) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
+
+// EmbeddedsByName sorts embedded types by name.
+type EmbeddedsByName []*Field
+
+func (x EmbeddedsByName) Len() int { return len(x) }
+func (x EmbeddedsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x EmbeddedsByName) Less(i, j int) bool { return x[i].Type.Sym().Less(x[j].Type.Sym()) }
diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go
new file mode 100644
index 0000000..67fa6bb
--- /dev/null
+++ b/src/cmd/compile/internal/types/sym.go
@@ -0,0 +1,138 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/internal/obj"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Sym represents an object name in a segmented (pkg, name) namespace.
+// Most commonly, this is a Go identifier naming an object declared within a package,
+// but Syms are also used to name internal synthesized objects.
+//
+// As an exception, field and method names that are exported use the Sym
+// associated with localpkg instead of the package that declared them. This
+// allows using Sym pointer equality to test for Go identifier uniqueness when
+// handling selector expressions.
+//
+// Ideally, Sym should be used for representing Go language constructs,
+// while cmd/internal/obj.LSym is used for representing emitted artifacts.
+//
+// NOTE: In practice, things can be messier than the description above
+// for various reasons (historical, convenience).
+type Sym struct {
+ Linkname string // link name
+
+ Pkg *Pkg
+ Name string // object name
+
+ // The unique ONAME, OTYPE, OPACK, or OLITERAL node that this symbol is
+ // bound to within the current scope. (Most parts of the compiler should
+ // prefer passing the Node directly, rather than relying on this field.)
+ //
+ // Deprecated: New code should avoid depending on Sym.Def. Add
+ // mdempsky@ as a reviewer for any CLs involving Sym.Def.
+ Def Object
+
+ flags bitset8
+}
+
+const (
+ symOnExportList = 1 << iota // added to exportlist (no need to add again)
+ symUniq
+ symSiggen // type symbol has been generated
+ symAsm // on asmlist, for writing to -asmhdr
+ symFunc // function symbol
+)
+
+func (sym *Sym) OnExportList() bool { return sym.flags&symOnExportList != 0 }
+func (sym *Sym) Uniq() bool { return sym.flags&symUniq != 0 }
+func (sym *Sym) Siggen() bool { return sym.flags&symSiggen != 0 }
+func (sym *Sym) Asm() bool { return sym.flags&symAsm != 0 }
+func (sym *Sym) Func() bool { return sym.flags&symFunc != 0 }
+
+func (sym *Sym) SetOnExportList(b bool) { sym.flags.set(symOnExportList, b) }
+func (sym *Sym) SetUniq(b bool) { sym.flags.set(symUniq, b) }
+func (sym *Sym) SetSiggen(b bool) { sym.flags.set(symSiggen, b) }
+func (sym *Sym) SetAsm(b bool) { sym.flags.set(symAsm, b) }
+func (sym *Sym) SetFunc(b bool) { sym.flags.set(symFunc, b) }
+
+func (sym *Sym) IsBlank() bool {
+ return sym != nil && sym.Name == "_"
+}
+
+// Deprecated: This method should not be used directly. Instead, use a
+// higher-level abstraction that directly returns the linker symbol
+// for a named object. For example, reflectdata.TypeLinksym(t) instead
+// of reflectdata.TypeSym(t).Linksym().
+func (sym *Sym) Linksym() *obj.LSym {
+ abi := obj.ABI0
+ if sym.Func() {
+ abi = obj.ABIInternal
+ }
+ return sym.LinksymABI(abi)
+}
+
+// Deprecated: This method should not be used directly. Instead, use a
+// higher-level abstraction that directly returns the linker symbol
+// for a named object. For example, (*ir.Name).LinksymABI(abi) instead
+// of (*ir.Name).Sym().LinksymABI(abi).
+func (sym *Sym) LinksymABI(abi obj.ABI) *obj.LSym {
+ if sym == nil {
+ base.Fatalf("nil symbol")
+ }
+ if sym.Linkname != "" {
+ return base.Linkname(sym.Linkname, abi)
+ }
+ return base.PkgLinksym(sym.Pkg.Prefix, sym.Name, abi)
+}
+
+// Less reports whether symbol a is ordered before symbol b.
+//
+// Symbols are ordered exported before non-exported, then by name, and
+// finally (for non-exported symbols) by package path.
+func (a *Sym) Less(b *Sym) bool {
+ if a == b {
+ return false
+ }
+
+ // Nil before non-nil.
+ if a == nil {
+ return true
+ }
+ if b == nil {
+ return false
+ }
+
+ // Exported symbols before non-exported.
+ ea := IsExported(a.Name)
+ eb := IsExported(b.Name)
+ if ea != eb {
+ return ea
+ }
+
+ // Order by name and then (for non-exported names) by package
+ // height and path.
+ if a.Name != b.Name {
+ return a.Name < b.Name
+ }
+ if !ea {
+ return a.Pkg.Path < b.Pkg.Path
+ }
+ return false
+}
+
+// IsExported reports whether name is an exported Go symbol (that is,
+// whether it begins with an upper-case letter).
+func IsExported(name string) bool {
+ if r := name[0]; r < utf8.RuneSelf {
+ return 'A' <= r && r <= 'Z'
+ }
+ r, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(r)
+}
diff --git a/src/cmd/compile/internal/types/sym_test.go b/src/cmd/compile/internal/types/sym_test.go
new file mode 100644
index 0000000..94efd42
--- /dev/null
+++ b/src/cmd/compile/internal/types/sym_test.go
@@ -0,0 +1,59 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types_test
+
+import (
+ "cmd/compile/internal/types"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func TestSymLess(t *testing.T) {
+ var (
+ local = types.NewPkg("", "")
+ abc = types.NewPkg("abc", "")
+ uvw = types.NewPkg("uvw", "")
+ xyz = types.NewPkg("xyz", "")
+ gr = types.NewPkg("gr", "")
+ )
+
+ data := []*types.Sym{
+ abc.Lookup("b"),
+ local.Lookup("B"),
+ local.Lookup("C"),
+ uvw.Lookup("c"),
+ local.Lookup("C"),
+ gr.Lookup("φ"),
+ local.Lookup("Φ"),
+ xyz.Lookup("b"),
+ abc.Lookup("a"),
+ local.Lookup("B"),
+ }
+ want := []*types.Sym{
+ local.Lookup("B"),
+ local.Lookup("B"),
+ local.Lookup("C"),
+ local.Lookup("C"),
+ local.Lookup("Φ"),
+ abc.Lookup("a"),
+ abc.Lookup("b"),
+ xyz.Lookup("b"),
+ uvw.Lookup("c"),
+ gr.Lookup("φ"),
+ }
+ if len(data) != len(want) {
+ t.Fatal("want and data must match")
+ }
+ if reflect.DeepEqual(data, want) {
+ t.Fatal("data must be shuffled")
+ }
+ sort.Slice(data, func(i, j int) bool { return data[i].Less(data[j]) })
+ if !reflect.DeepEqual(data, want) {
+ t.Logf("want: %#v", want)
+ t.Logf("data: %#v", data)
+ t.Errorf("sorting failed")
+ }
+}
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
new file mode 100644
index 0000000..2777b4f
--- /dev/null
+++ b/src/cmd/compile/internal/types/type.go
@@ -0,0 +1,1983 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "fmt"
+ "go/constant"
+ "internal/types/errors"
+ "sync"
+)
+
+// Object represents an ir.Node, but without needing to import cmd/compile/internal/ir,
+// which would cause an import cycle. The uses in other packages must type assert
+// values of type Object to ir.Node or a more specific type.
+type Object interface {
+ Pos() src.XPos
+ Sym() *Sym
+ Type() *Type
+}
+
+//go:generate stringer -type Kind -trimprefix T type.go
+
+// Kind describes a kind of type.
+type Kind uint8
+
+const (
+ Txxx Kind = iota
+
+ TINT8
+ TUINT8
+ TINT16
+ TUINT16
+ TINT32
+ TUINT32
+ TINT64
+ TUINT64
+ TINT
+ TUINT
+ TUINTPTR
+
+ TCOMPLEX64
+ TCOMPLEX128
+
+ TFLOAT32
+ TFLOAT64
+
+ TBOOL
+
+ TPTR
+ TFUNC
+ TSLICE
+ TARRAY
+ TSTRUCT
+ TCHAN
+ TMAP
+ TINTER
+ TFORW
+ TANY
+ TSTRING
+ TUNSAFEPTR
+
+ // pseudo-types for literals
+ TIDEAL // untyped numeric constants
+ TNIL
+ TBLANK
+
+ // pseudo-types used temporarily only during frame layout (CalcSize())
+ TFUNCARGS
+ TCHANARGS
+
+ // SSA backend types
+ TSSA // internal types used by SSA backend (flags, memory, etc.)
+ TTUPLE // a pair of types, used by SSA backend
+ TRESULTS // multiple types; the result of calling a function or method, with a memory at the end.
+
+ NTYPE
+)
+
+// ChanDir is whether a channel can send, receive, or both.
+type ChanDir uint8
+
+func (c ChanDir) CanRecv() bool { return c&Crecv != 0 }
+func (c ChanDir) CanSend() bool { return c&Csend != 0 }
+
+const (
+ // types of channel
+ // must match ../../../../reflect/type.go:/ChanDir
+ Crecv ChanDir = 1 << 0
+ Csend ChanDir = 1 << 1
+ Cboth ChanDir = Crecv | Csend
+)
+
+// Types stores pointers to predeclared named types.
+//
+// It also stores pointers to several special types:
+// - Types[TANY] is the placeholder "any" type recognized by SubstArgTypes.
+// - Types[TBLANK] represents the blank variable's type.
+// - Types[TINTER] is the canonical "interface{}" type.
+// - Types[TNIL] represents the predeclared "nil" value's type.
+// - Types[TUNSAFEPTR] is package unsafe's Pointer type.
+var Types [NTYPE]*Type
+
+var (
+ // Predeclared alias types. These are actually created as distinct
+ // defined types for better error messages, but are then specially
+ // treated as identical to their respective underlying types.
+ AnyType *Type
+ ByteType *Type
+ RuneType *Type
+
+ // Predeclared error interface type.
+ ErrorType *Type
+ // Predeclared comparable interface type.
+ ComparableType *Type
+
+ // Types to represent untyped string and boolean constants.
+ UntypedString = newType(TSTRING)
+ UntypedBool = newType(TBOOL)
+
+ // Types to represent untyped numeric constants.
+ UntypedInt = newType(TIDEAL)
+ UntypedRune = newType(TIDEAL)
+ UntypedFloat = newType(TIDEAL)
+ UntypedComplex = newType(TIDEAL)
+)
+
+// UntypedTypes maps from a constant.Kind to its untyped Type
+// representation.
+var UntypedTypes = [...]*Type{
+ constant.Bool: UntypedBool,
+ constant.String: UntypedString,
+ constant.Int: UntypedInt,
+ constant.Float: UntypedFloat,
+ constant.Complex: UntypedComplex,
+}
+
+// DefaultKinds maps from a constant.Kind to its default Kind.
+var DefaultKinds = [...]Kind{
+ constant.Bool: TBOOL,
+ constant.String: TSTRING,
+ constant.Int: TINT,
+ constant.Float: TFLOAT64,
+ constant.Complex: TCOMPLEX128,
+}
+
+// A Type represents a Go type.
+//
+// There may be multiple unnamed types with identical structure. However, there must
+// be a unique Type object for each unique named (defined) type. After noding, a
+// package-level type can be looked up by building its unique symbol sym (sym =
+// package.Lookup(name)) and checking sym.Def. If sym.Def is non-nil, the type
+// already exists at package scope and is available at sym.Def.(*ir.Name).Type().
+// Local types (which may have the same name as a package-level type) are
+// distinguished by their vargen, which is embedded in their symbol name.
+type Type struct {
+ // extra contains extra etype-specific fields.
+ // As an optimization, those etype-specific structs which contain exactly
+ // one pointer-shaped field are stored as values rather than pointers when possible.
+ //
+ // TMAP: *Map
+ // TFORW: *Forward
+ // TFUNC: *Func
+ // TSTRUCT: *Struct
+ // TINTER: *Interface
+ // TFUNCARGS: FuncArgs
+ // TCHANARGS: ChanArgs
+ // TCHAN: *Chan
+ // TPTR: Ptr
+ // TARRAY: *Array
+ // TSLICE: Slice
+ // TSSA: string
+ extra interface{}
+
+ // width is the width of this Type in bytes.
+ width int64 // valid if Align > 0
+
+ // list of base methods (excluding embedding)
+ methods fields
+ // list of all methods (including embedding)
+ allMethods fields
+
+ // canonical OTYPE node for a named type (should be an ir.Name node with same sym)
+ obj Object
+ // the underlying type (type literal or predeclared type) for a defined type
+ underlying *Type
+
+ // Cache of composite types, with this type being the element type.
+ cache struct {
+ ptr *Type // *T, or nil
+ slice *Type // []T, or nil
+ }
+
+ kind Kind // kind of type
+ align uint8 // the required alignment of this type, in bytes (0 means Width and Align have not yet been computed)
+
+ intRegs, floatRegs uint8 // registers needed for ABIInternal
+
+ flags bitset8
+
+ // For defined (named) generic types, a pointer to the list of type params
+ // (in order) of this type that need to be instantiated. For instantiated
+ // generic types, this is the targs used to instantiate them. These targs
+ // may be typeparams (for re-instantiated types such as Value[T2]) or
+ // concrete types (for fully instantiated types such as Value[int]).
+ // rparams is only set for named types that are generic or are fully
+ // instantiated from a generic type, and is otherwise set to nil.
+ // TODO(danscales): choose a better name.
+ rparams *[]*Type
+}
+
+// Registers returns the number of integer and floating-point
+// registers required to represent a parameter of this type under the
+// ABIInternal calling conventions.
+//
+// If t must be passed by memory, Registers returns (math.MaxUint8,
+// math.MaxUint8).
+func (t *Type) Registers() (uint8, uint8) {
+ CalcSize(t)
+ return t.intRegs, t.floatRegs
+}
+
+func (*Type) CanBeAnSSAAux() {}
+
+const (
+ typeNotInHeap = 1 << iota // type cannot be heap allocated
+ typeNoalg // suppress hash and eq algorithm generation
+ typeDeferwidth // width computation has been deferred and type is on deferredTypeStack
+ typeRecur
+ typeIsShape // represents a set of closely related types, for generics
+ typeHasShape // there is a shape somewhere in the type
+)
+
+func (t *Type) NotInHeap() bool { return t.flags&typeNotInHeap != 0 }
+func (t *Type) Noalg() bool { return t.flags&typeNoalg != 0 }
+func (t *Type) Deferwidth() bool { return t.flags&typeDeferwidth != 0 }
+func (t *Type) Recur() bool { return t.flags&typeRecur != 0 }
+func (t *Type) IsShape() bool { return t.flags&typeIsShape != 0 }
+func (t *Type) HasShape() bool { return t.flags&typeHasShape != 0 }
+
+func (t *Type) SetNotInHeap(b bool) { t.flags.set(typeNotInHeap, b) }
+func (t *Type) SetNoalg(b bool) { t.flags.set(typeNoalg, b) }
+func (t *Type) SetDeferwidth(b bool) { t.flags.set(typeDeferwidth, b) }
+func (t *Type) SetRecur(b bool) { t.flags.set(typeRecur, b) }
+
+// Should always do SetHasShape(true) when doing SetIsShape(true).
+func (t *Type) SetIsShape(b bool) { t.flags.set(typeIsShape, b) }
+func (t *Type) SetHasShape(b bool) { t.flags.set(typeHasShape, b) }
+
+// Kind returns the kind of type t.
+func (t *Type) Kind() Kind { return t.kind }
+
+// Sym returns the name of type t.
+func (t *Type) Sym() *Sym {
+ if t.obj != nil {
+ return t.obj.Sym()
+ }
+ return nil
+}
+
+// Underlying returns the underlying type of type t.
+func (t *Type) Underlying() *Type { return t.underlying }
+
+// Pos returns a position associated with t, if any.
+// This should only be used for diagnostics.
+func (t *Type) Pos() src.XPos {
+ if t.obj != nil {
+ return t.obj.Pos()
+ }
+ return src.NoXPos
+}
+
+func (t *Type) RParams() []*Type {
+ if t.rparams == nil {
+ return nil
+ }
+ return *t.rparams
+}
+
+func (t *Type) SetRParams(rparams []*Type) {
+ if len(rparams) == 0 {
+ base.Fatalf("Setting nil or zero-length rparams")
+ }
+ t.rparams = &rparams
+ // HasShape should be set if any type argument is or has a shape type.
+ for _, rparam := range rparams {
+ if rparam.HasShape() {
+ t.SetHasShape(true)
+ break
+ }
+ }
+}
+
+// IsFullyInstantiated reports whether t is a fully instantiated generic type; i.e. an
+// instantiated generic type where all type arguments are non-generic or fully
+// instantiated generic types.
+func (t *Type) IsFullyInstantiated() bool {
+ return len(t.RParams()) > 0
+}
+
+// Map contains Type fields specific to maps.
+type Map struct {
+ Key *Type // Key type
+ Elem *Type // Val (elem) type
+
+ Bucket *Type // internal struct type representing a hash bucket
+}
+
+// MapType returns t's extra map-specific fields.
+func (t *Type) MapType() *Map {
+ t.wantEtype(TMAP)
+ return t.extra.(*Map)
+}
+
+// Forward contains Type fields specific to forward types.
+type Forward struct {
+ Copyto []*Type // where to copy the eventual value to
+ Embedlineno src.XPos // first use of this type as an embedded type
+}
+
+// forwardType returns t's extra forward-type-specific fields.
+func (t *Type) forwardType() *Forward {
+ t.wantEtype(TFORW)
+ return t.extra.(*Forward)
+}
+
+// Func contains Type fields specific to func types.
+type Func struct {
+ allParams []*Field // slice of all parameters, in receiver/params/results order
+
+ startParams int // index of the start of the (regular) parameters section
+ startResults int // index of the start of the results section
+
+ resultsTuple *Type // struct-like type representing multi-value results
+
+ // Argwid is the total width of the function receiver, params, and results.
+ // It gets calculated via a temporary TFUNCARGS type.
+ // Note that TFUNC's Width is Widthptr.
+ Argwid int64
+}
+
+func (ft *Func) recvs() []*Field { return ft.allParams[:ft.startParams] }
+func (ft *Func) params() []*Field { return ft.allParams[ft.startParams:ft.startResults] }
+func (ft *Func) results() []*Field { return ft.allParams[ft.startResults:] }
+func (ft *Func) recvParams() []*Field { return ft.allParams[:ft.startResults] }
+func (ft *Func) paramsResults() []*Field { return ft.allParams[ft.startParams:] }
+
+// funcType returns t's extra func-specific fields.
+func (t *Type) funcType() *Func {
+ t.wantEtype(TFUNC)
+ return t.extra.(*Func)
+}
+
+// StructType contains Type fields specific to struct types.
+type Struct struct {
+ fields fields
+
+ // Maps have three associated internal structs (see struct MapType).
+ // Map links such structs back to their map type.
+ Map *Type
+
+ ParamTuple bool // whether this struct is actually a tuple of signature parameters
+}
+
+// StructType returns t's extra struct-specific fields.
+func (t *Type) StructType() *Struct {
+ t.wantEtype(TSTRUCT)
+ return t.extra.(*Struct)
+}
+
+// Interface contains Type fields specific to interface types.
+type Interface struct {
+}
+
+// Ptr contains Type fields specific to pointer types.
+type Ptr struct {
+ Elem *Type // element type
+}
+
+// ChanArgs contains Type fields specific to TCHANARGS types.
+type ChanArgs struct {
+ T *Type // reference to a chan type whose elements need a width check
+}
+
+// // FuncArgs contains Type fields specific to TFUNCARGS types.
+type FuncArgs struct {
+ T *Type // reference to a func type whose elements need a width check
+}
+
+// Chan contains Type fields specific to channel types.
+type Chan struct {
+ Elem *Type // element type
+ Dir ChanDir // channel direction
+}
+
+// chanType returns t's extra channel-specific fields.
+func (t *Type) chanType() *Chan {
+ t.wantEtype(TCHAN)
+ return t.extra.(*Chan)
+}
+
+type Tuple struct {
+ first *Type
+ second *Type
+ // Any tuple with a memory type must put that memory type second.
+}
+
+// Results are the output from calls that will be late-expanded.
+type Results struct {
+ Types []*Type // Last element is memory output from call.
+}
+
+// Array contains Type fields specific to array types.
+type Array struct {
+ Elem *Type // element type
+ Bound int64 // number of elements; <0 if unknown yet
+}
+
+// Slice contains Type fields specific to slice types.
+type Slice struct {
+ Elem *Type // element type
+}
+
+// A Field is a (Sym, Type) pairing along with some other information, and,
+// depending on the context, is used to represent:
+// - a field in a struct
+// - a method in an interface or associated with a named type
+// - a function parameter
+type Field struct {
+ flags bitset8
+
+ Embedded uint8 // embedded field
+
+ Pos src.XPos
+
+ // Name of field/method/parameter. Can be nil for interface fields embedded
+ // in interfaces and unnamed parameters.
+ Sym *Sym
+ Type *Type // field type
+ Note string // literal string annotation
+
+ // For fields that represent function parameters, Nname points to the
+ // associated ONAME Node. For fields that represent methods, Nname points to
+ // the function name node.
+ Nname Object
+
+ // Offset in bytes of this field or method within its enclosing struct
+ // or interface Type. For parameters, this is BADWIDTH.
+ Offset int64
+}
+
+const (
+ fieldIsDDD = 1 << iota // field is ... argument
+ fieldNointerface
+)
+
+func (f *Field) IsDDD() bool { return f.flags&fieldIsDDD != 0 }
+func (f *Field) Nointerface() bool { return f.flags&fieldNointerface != 0 }
+
+func (f *Field) SetIsDDD(b bool) { f.flags.set(fieldIsDDD, b) }
+func (f *Field) SetNointerface(b bool) { f.flags.set(fieldNointerface, b) }
+
+// End returns the offset of the first byte immediately after this field.
+func (f *Field) End() int64 {
+ return f.Offset + f.Type.width
+}
+
+// IsMethod reports whether f represents a method rather than a struct field.
+func (f *Field) IsMethod() bool {
+ return f.Type.kind == TFUNC && f.Type.Recv() != nil
+}
+
+// fields is a pointer to a slice of *Field.
+// This saves space in Types that do not have fields or methods
+// compared to a simple slice of *Field.
+type fields struct {
+ s *[]*Field
+}
+
+// Slice returns the entries in f as a slice.
+// Changes to the slice entries will be reflected in f.
+func (f *fields) Slice() []*Field {
+ if f.s == nil {
+ return nil
+ }
+ return *f.s
+}
+
+// Set sets f to a slice.
+// This takes ownership of the slice.
+func (f *fields) Set(s []*Field) {
+ if len(s) == 0 {
+ f.s = nil
+ } else {
+ // Copy s and take address of t rather than s to avoid
+ // allocation in the case where len(s) == 0.
+ t := s
+ f.s = &t
+ }
+}
+
+// newType returns a new Type of the specified kind.
+func newType(et Kind) *Type {
+ t := &Type{
+ kind: et,
+ width: BADWIDTH,
+ }
+ t.underlying = t
+ // TODO(josharian): lazily initialize some of these?
+ switch t.kind {
+ case TMAP:
+ t.extra = new(Map)
+ case TFORW:
+ t.extra = new(Forward)
+ case TFUNC:
+ t.extra = new(Func)
+ case TSTRUCT:
+ t.extra = new(Struct)
+ case TINTER:
+ t.extra = new(Interface)
+ case TPTR:
+ t.extra = Ptr{}
+ case TCHANARGS:
+ t.extra = ChanArgs{}
+ case TFUNCARGS:
+ t.extra = FuncArgs{}
+ case TCHAN:
+ t.extra = new(Chan)
+ case TTUPLE:
+ t.extra = new(Tuple)
+ case TRESULTS:
+ t.extra = new(Results)
+ }
+ return t
+}
+
+// NewArray returns a new fixed-length array Type.
+func NewArray(elem *Type, bound int64) *Type {
+ if bound < 0 {
+ base.Fatalf("NewArray: invalid bound %v", bound)
+ }
+ t := newType(TARRAY)
+ t.extra = &Array{Elem: elem, Bound: bound}
+ if elem.HasShape() {
+ t.SetHasShape(true)
+ }
+ return t
+}
+
+// NewSlice returns the slice Type with element type elem.
+func NewSlice(elem *Type) *Type {
+ if t := elem.cache.slice; t != nil {
+ if t.Elem() != elem {
+ base.Fatalf("elem mismatch")
+ }
+ if elem.HasShape() != t.HasShape() {
+ base.Fatalf("Incorrect HasShape flag for cached slice type")
+ }
+ return t
+ }
+
+ t := newType(TSLICE)
+ t.extra = Slice{Elem: elem}
+ elem.cache.slice = t
+ if elem.HasShape() {
+ t.SetHasShape(true)
+ }
+ return t
+}
+
+// NewChan returns a new chan Type with direction dir.
+func NewChan(elem *Type, dir ChanDir) *Type {
+ t := newType(TCHAN)
+ ct := t.chanType()
+ ct.Elem = elem
+ ct.Dir = dir
+ if elem.HasShape() {
+ t.SetHasShape(true)
+ }
+ return t
+}
+
+func NewTuple(t1, t2 *Type) *Type {
+ t := newType(TTUPLE)
+ t.extra.(*Tuple).first = t1
+ t.extra.(*Tuple).second = t2
+ if t1.HasShape() || t2.HasShape() {
+ t.SetHasShape(true)
+ }
+ return t
+}
+
+func newResults(types []*Type) *Type {
+ t := newType(TRESULTS)
+ t.extra.(*Results).Types = types
+ return t
+}
+
+func NewResults(types []*Type) *Type {
+ if len(types) == 1 && types[0] == TypeMem {
+ return TypeResultMem
+ }
+ return newResults(types)
+}
+
+func newSSA(name string) *Type {
+ t := newType(TSSA)
+ t.extra = name
+ return t
+}
+
+// NewMap returns a new map Type with key type k and element (aka value) type v.
+func NewMap(k, v *Type) *Type {
+ t := newType(TMAP)
+ mt := t.MapType()
+ mt.Key = k
+ mt.Elem = v
+ if k.HasShape() || v.HasShape() {
+ t.SetHasShape(true)
+ }
+ return t
+}
+
+// NewPtrCacheEnabled controls whether *T Types are cached in T.
+// Caching is disabled just before starting the backend.
+// This allows the backend to run concurrently.
+var NewPtrCacheEnabled = true
+
+// NewPtr returns the pointer type pointing to t.
+func NewPtr(elem *Type) *Type {
+ if elem == nil {
+ base.Fatalf("NewPtr: pointer to elem Type is nil")
+ }
+
+ if t := elem.cache.ptr; t != nil {
+ if t.Elem() != elem {
+ base.Fatalf("NewPtr: elem mismatch")
+ }
+ if elem.HasShape() != t.HasShape() {
+ base.Fatalf("Incorrect HasShape flag for cached pointer type")
+ }
+ return t
+ }
+
+ t := newType(TPTR)
+ t.extra = Ptr{Elem: elem}
+ t.width = int64(PtrSize)
+ t.align = uint8(PtrSize)
+ t.intRegs = 1
+ if NewPtrCacheEnabled {
+ elem.cache.ptr = t
+ }
+ if elem.HasShape() {
+ t.SetHasShape(true)
+ }
+ return t
+}
+
+// NewChanArgs returns a new TCHANARGS type for channel type c.
+func NewChanArgs(c *Type) *Type {
+ t := newType(TCHANARGS)
+ t.extra = ChanArgs{T: c}
+ return t
+}
+
+// NewFuncArgs returns a new TFUNCARGS type for func type f.
+func NewFuncArgs(f *Type) *Type {
+ t := newType(TFUNCARGS)
+ t.extra = FuncArgs{T: f}
+ return t
+}
+
+func NewField(pos src.XPos, sym *Sym, typ *Type) *Field {
+ f := &Field{
+ Pos: pos,
+ Sym: sym,
+ Type: typ,
+ Offset: BADWIDTH,
+ }
+ if typ == nil {
+ base.Fatalf("typ is nil")
+ }
+ return f
+}
+
+// SubstAny walks t, replacing instances of "any" with successive
+// elements removed from types. It returns the substituted type.
+func SubstAny(t *Type, types *[]*Type) *Type {
+ if t == nil {
+ return nil
+ }
+
+ switch t.kind {
+ default:
+ // Leave the type unchanged.
+
+ case TANY:
+ if len(*types) == 0 {
+ base.Fatalf("SubstArgTypes: not enough argument types")
+ }
+ t = (*types)[0]
+ *types = (*types)[1:]
+
+ case TPTR:
+ elem := SubstAny(t.Elem(), types)
+ if elem != t.Elem() {
+ t = t.copy()
+ t.extra = Ptr{Elem: elem}
+ }
+
+ case TARRAY:
+ elem := SubstAny(t.Elem(), types)
+ if elem != t.Elem() {
+ t = t.copy()
+ t.extra.(*Array).Elem = elem
+ }
+
+ case TSLICE:
+ elem := SubstAny(t.Elem(), types)
+ if elem != t.Elem() {
+ t = t.copy()
+ t.extra = Slice{Elem: elem}
+ }
+
+ case TCHAN:
+ elem := SubstAny(t.Elem(), types)
+ if elem != t.Elem() {
+ t = t.copy()
+ t.extra.(*Chan).Elem = elem
+ }
+
+ case TMAP:
+ key := SubstAny(t.Key(), types)
+ elem := SubstAny(t.Elem(), types)
+ if key != t.Key() || elem != t.Elem() {
+ t = t.copy()
+ t.extra.(*Map).Key = key
+ t.extra.(*Map).Elem = elem
+ }
+
+ case TFUNC:
+ ft := t.funcType()
+ allParams := substFields(ft.allParams, types)
+
+ t = t.copy()
+ ft = t.funcType()
+ ft.allParams = allParams
+
+ rt := ft.resultsTuple
+ rt = rt.copy()
+ ft.resultsTuple = rt
+ rt.setFields(t.Results())
+
+ case TSTRUCT:
+ // Make a copy of all fields, including ones whose type does not change.
+ // This prevents aliasing across functions, which can lead to later
+ // fields getting their Offset incorrectly overwritten.
+ nfs := substFields(t.Fields(), types)
+ t = t.copy()
+ t.setFields(nfs)
+ }
+
+ return t
+}
+
+func substFields(fields []*Field, types *[]*Type) []*Field {
+ nfs := make([]*Field, len(fields))
+ for i, f := range fields {
+ nft := SubstAny(f.Type, types)
+ nfs[i] = f.Copy()
+ nfs[i].Type = nft
+ }
+ return nfs
+}
+
+// copy returns a shallow copy of the Type.
+func (t *Type) copy() *Type {
+ if t == nil {
+ return nil
+ }
+ nt := *t
+ // copy any *T Extra fields, to avoid aliasing
+ switch t.kind {
+ case TMAP:
+ x := *t.extra.(*Map)
+ nt.extra = &x
+ case TFORW:
+ x := *t.extra.(*Forward)
+ nt.extra = &x
+ case TFUNC:
+ x := *t.extra.(*Func)
+ nt.extra = &x
+ case TSTRUCT:
+ x := *t.extra.(*Struct)
+ nt.extra = &x
+ case TINTER:
+ x := *t.extra.(*Interface)
+ nt.extra = &x
+ case TCHAN:
+ x := *t.extra.(*Chan)
+ nt.extra = &x
+ case TARRAY:
+ x := *t.extra.(*Array)
+ nt.extra = &x
+ case TTUPLE, TSSA, TRESULTS:
+ base.Fatalf("ssa types cannot be copied")
+ }
+ // TODO(mdempsky): Find out why this is necessary and explain.
+ if t.underlying == t {
+ nt.underlying = &nt
+ }
+ return &nt
+}
+
+func (f *Field) Copy() *Field {
+ nf := *f
+ return &nf
+}
+
+func (t *Type) wantEtype(et Kind) {
+ if t.kind != et {
+ base.Fatalf("want %v, but have %v", et, t)
+ }
+}
+
+// ResultTuple returns the result type of signature type t as a tuple.
+// This can be used as the type of multi-valued call expressions.
+func (t *Type) ResultsTuple() *Type { return t.funcType().resultsTuple }
+
+// Recvs returns a slice of receiver parameters of signature type t.
+// The returned slice always has length 0 or 1.
+func (t *Type) Recvs() []*Field { return t.funcType().recvs() }
+
+// Params returns a slice of regular parameters of signature type t.
+func (t *Type) Params() []*Field { return t.funcType().params() }
+
+// Results returns a slice of result parameters of signature type t.
+func (t *Type) Results() []*Field { return t.funcType().results() }
+
+// RecvsParamsResults returns a slice containing all of the
+// signature's parameters in receiver (if any), (normal) parameters,
+// and then results.
+func (t *Type) RecvParamsResults() []*Field { return t.funcType().allParams }
+
+// RecvParams returns a slice containing the signature's receiver (if
+// any) followed by its (normal) parameters.
+func (t *Type) RecvParams() []*Field { return t.funcType().recvParams() }
+
+// ParamsResults returns a slice containing the signature's (normal)
+// parameters followed by its results.
+func (t *Type) ParamsResults() []*Field { return t.funcType().paramsResults() }
+
+func (t *Type) NumRecvs() int { return len(t.Recvs()) }
+func (t *Type) NumParams() int { return len(t.Params()) }
+func (t *Type) NumResults() int { return len(t.Results()) }
+
+// IsVariadic reports whether function type t is variadic.
+func (t *Type) IsVariadic() bool {
+ n := t.NumParams()
+ return n > 0 && t.Param(n-1).IsDDD()
+}
+
+// Recv returns the receiver of function type t, if any.
+func (t *Type) Recv() *Field {
+ if s := t.Recvs(); len(s) == 1 {
+ return s[0]
+ }
+ return nil
+}
+
+// Param returns the i'th parameter of signature type t.
+func (t *Type) Param(i int) *Field { return t.Params()[i] }
+
+// Result returns the i'th result of signature type t.
+func (t *Type) Result(i int) *Field { return t.Results()[i] }
+
+// Key returns the key type of map type t.
+func (t *Type) Key() *Type {
+ t.wantEtype(TMAP)
+ return t.extra.(*Map).Key
+}
+
+// Elem returns the type of elements of t.
+// Usable with pointers, channels, arrays, slices, and maps.
+func (t *Type) Elem() *Type {
+ switch t.kind {
+ case TPTR:
+ return t.extra.(Ptr).Elem
+ case TARRAY:
+ return t.extra.(*Array).Elem
+ case TSLICE:
+ return t.extra.(Slice).Elem
+ case TCHAN:
+ return t.extra.(*Chan).Elem
+ case TMAP:
+ return t.extra.(*Map).Elem
+ }
+ base.Fatalf("Type.Elem %s", t.kind)
+ return nil
+}
+
+// ChanArgs returns the channel type for TCHANARGS type t.
+func (t *Type) ChanArgs() *Type {
+ t.wantEtype(TCHANARGS)
+ return t.extra.(ChanArgs).T
+}
+
+// FuncArgs returns the func type for TFUNCARGS type t.
+func (t *Type) FuncArgs() *Type {
+ t.wantEtype(TFUNCARGS)
+ return t.extra.(FuncArgs).T
+}
+
+// IsFuncArgStruct reports whether t is a struct representing function parameters or results.
+func (t *Type) IsFuncArgStruct() bool {
+ return t.kind == TSTRUCT && t.extra.(*Struct).ParamTuple
+}
+
+// Methods returns a pointer to the base methods (excluding embedding) for type t.
+// These can either be concrete methods (for non-interface types) or interface
+// methods (for interface types).
+func (t *Type) Methods() []*Field {
+ return t.methods.Slice()
+}
+
+// AllMethods returns a pointer to all the methods (including embedding) for type t.
+// For an interface type, this is the set of methods that are typically iterated
+// over. For non-interface types, AllMethods() only returns a valid result after
+// CalcMethods() has been called at least once.
+func (t *Type) AllMethods() []*Field {
+ if t.kind == TINTER {
+ // Calculate the full method set of an interface type on the fly
+ // now, if not done yet.
+ CalcSize(t)
+ }
+ return t.allMethods.Slice()
+}
+
+// SetMethods sets the direct method set for type t (i.e., *not*
+// including promoted methods from embedded types).
+func (t *Type) SetMethods(fs []*Field) {
+ t.methods.Set(fs)
+}
+
+// SetAllMethods sets the set of all methods for type t (i.e.,
+// including promoted methods from embedded types).
+func (t *Type) SetAllMethods(fs []*Field) {
+ t.allMethods.Set(fs)
+}
+
+// fields returns the fields of struct type t.
+func (t *Type) fields() *fields {
+ t.wantEtype(TSTRUCT)
+ return &t.extra.(*Struct).fields
+}
+
+// Field returns the i'th field of struct type t.
+func (t *Type) Field(i int) *Field { return t.Fields()[i] }
+
+// Fields returns a slice of containing all fields of
+// a struct type t.
+func (t *Type) Fields() []*Field { return t.fields().Slice() }
+
+// setFields sets struct type t's fields to fields.
+func (t *Type) setFields(fields []*Field) {
+ // If we've calculated the width of t before,
+ // then some other type such as a function signature
+ // might now have the wrong type.
+ // Rather than try to track and invalidate those,
+ // enforce that SetFields cannot be called once
+ // t's width has been calculated.
+ if t.widthCalculated() {
+ base.Fatalf("SetFields of %v: width previously calculated", t)
+ }
+ t.wantEtype(TSTRUCT)
+ t.fields().Set(fields)
+}
+
+// SetInterface sets the base methods of an interface type t.
+func (t *Type) SetInterface(methods []*Field) {
+ t.wantEtype(TINTER)
+ t.methods.Set(methods)
+}
+
+// ArgWidth returns the total aligned argument size for a function.
+// It includes the receiver, parameters, and results.
+func (t *Type) ArgWidth() int64 {
+ t.wantEtype(TFUNC)
+ return t.extra.(*Func).Argwid
+}
+
+func (t *Type) Size() int64 {
+ if t.kind == TSSA {
+ if t == TypeInt128 {
+ return 16
+ }
+ return 0
+ }
+ CalcSize(t)
+ return t.width
+}
+
+func (t *Type) Alignment() int64 {
+ CalcSize(t)
+ return int64(t.align)
+}
+
+func (t *Type) SimpleString() string {
+ return t.kind.String()
+}
+
+// Cmp is a comparison between values a and b.
+//
+// -1 if a < b
+// 0 if a == b
+// 1 if a > b
+type Cmp int8
+
+const (
+ CMPlt = Cmp(-1)
+ CMPeq = Cmp(0)
+ CMPgt = Cmp(1)
+)
+
+// Compare compares types for purposes of the SSA back
+// end, returning a Cmp (one of CMPlt, CMPeq, CMPgt).
+// The answers are correct for an optimizer
+// or code generator, but not necessarily typechecking.
+// The order chosen is arbitrary, only consistency and division
+// into equivalence classes (Types that compare CMPeq) matters.
+func (t *Type) Compare(x *Type) Cmp {
+ if x == t {
+ return CMPeq
+ }
+ return t.cmp(x)
+}
+
+func cmpForNe(x bool) Cmp {
+ if x {
+ return CMPlt
+ }
+ return CMPgt
+}
+
+func (r *Sym) cmpsym(s *Sym) Cmp {
+ if r == s {
+ return CMPeq
+ }
+ if r == nil {
+ return CMPlt
+ }
+ if s == nil {
+ return CMPgt
+ }
+ // Fast sort, not pretty sort
+ if len(r.Name) != len(s.Name) {
+ return cmpForNe(len(r.Name) < len(s.Name))
+ }
+ if r.Pkg != s.Pkg {
+ if len(r.Pkg.Prefix) != len(s.Pkg.Prefix) {
+ return cmpForNe(len(r.Pkg.Prefix) < len(s.Pkg.Prefix))
+ }
+ if r.Pkg.Prefix != s.Pkg.Prefix {
+ return cmpForNe(r.Pkg.Prefix < s.Pkg.Prefix)
+ }
+ }
+ if r.Name != s.Name {
+ return cmpForNe(r.Name < s.Name)
+ }
+ return CMPeq
+}
+
+// cmp compares two *Types t and x, returning CMPlt,
+// CMPeq, CMPgt as t<x, t==x, t>x, for an arbitrary
+// and optimizer-centric notion of comparison.
+// TODO(josharian): make this safe for recursive interface types
+// and use in signatlist sorting. See issue 19869.
+func (t *Type) cmp(x *Type) Cmp {
+ // This follows the structure of function identical in identity.go
+ // with two exceptions.
+ // 1. Symbols are compared more carefully because a <,=,> result is desired.
+ // 2. Maps are treated specially to avoid endless recursion -- maps
+ // contain an internal data type not expressible in Go source code.
+ if t == x {
+ return CMPeq
+ }
+ if t == nil {
+ return CMPlt
+ }
+ if x == nil {
+ return CMPgt
+ }
+
+ if t.kind != x.kind {
+ return cmpForNe(t.kind < x.kind)
+ }
+
+ if t.obj != nil || x.obj != nil {
+ // Special case: we keep byte and uint8 separate
+ // for error messages. Treat them as equal.
+ switch t.kind {
+ case TUINT8:
+ if (t == Types[TUINT8] || t == ByteType) && (x == Types[TUINT8] || x == ByteType) {
+ return CMPeq
+ }
+
+ case TINT32:
+ if (t == Types[RuneType.kind] || t == RuneType) && (x == Types[RuneType.kind] || x == RuneType) {
+ return CMPeq
+ }
+
+ case TINTER:
+ // Make sure named any type matches any empty interface.
+ if t == AnyType && x.IsEmptyInterface() || x == AnyType && t.IsEmptyInterface() {
+ return CMPeq
+ }
+ }
+ }
+
+ if c := t.Sym().cmpsym(x.Sym()); c != CMPeq {
+ return c
+ }
+
+ if x.obj != nil {
+ return CMPeq
+ }
+ // both syms nil, look at structure below.
+
+ switch t.kind {
+ case TBOOL, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TUNSAFEPTR, TUINTPTR,
+ TINT8, TINT16, TINT32, TINT64, TINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINT:
+ return CMPeq
+
+ case TSSA:
+ tname := t.extra.(string)
+ xname := x.extra.(string)
+ // desire fast sorting, not pretty sorting.
+ if len(tname) == len(xname) {
+ if tname == xname {
+ return CMPeq
+ }
+ if tname < xname {
+ return CMPlt
+ }
+ return CMPgt
+ }
+ if len(tname) > len(xname) {
+ return CMPgt
+ }
+ return CMPlt
+
+ case TTUPLE:
+ xtup := x.extra.(*Tuple)
+ ttup := t.extra.(*Tuple)
+ if c := ttup.first.Compare(xtup.first); c != CMPeq {
+ return c
+ }
+ return ttup.second.Compare(xtup.second)
+
+ case TRESULTS:
+ xResults := x.extra.(*Results)
+ tResults := t.extra.(*Results)
+ xl, tl := len(xResults.Types), len(tResults.Types)
+ if tl != xl {
+ if tl < xl {
+ return CMPlt
+ }
+ return CMPgt
+ }
+ for i := 0; i < tl; i++ {
+ if c := tResults.Types[i].Compare(xResults.Types[i]); c != CMPeq {
+ return c
+ }
+ }
+ return CMPeq
+
+ case TMAP:
+ if c := t.Key().cmp(x.Key()); c != CMPeq {
+ return c
+ }
+ return t.Elem().cmp(x.Elem())
+
+ case TPTR, TSLICE:
+ // No special cases for these, they are handled
+ // by the general code after the switch.
+
+ case TSTRUCT:
+ if t.StructType().Map == nil {
+ if x.StructType().Map != nil {
+ return CMPlt // nil < non-nil
+ }
+ // to the fallthrough
+ } else if x.StructType().Map == nil {
+ return CMPgt // nil > non-nil
+ } else if t.StructType().Map.MapType().Bucket == t {
+ // Both have non-nil Map
+ // Special case for Maps which include a recursive type where the recursion is not broken with a named type
+ if x.StructType().Map.MapType().Bucket != x {
+ return CMPlt // bucket maps are least
+ }
+ return t.StructType().Map.cmp(x.StructType().Map)
+ } else if x.StructType().Map.MapType().Bucket == x {
+ return CMPgt // bucket maps are least
+ } // If t != t.Map.Bucket, fall through to general case
+
+ tfs := t.Fields()
+ xfs := x.Fields()
+ for i := 0; i < len(tfs) && i < len(xfs); i++ {
+ t1, x1 := tfs[i], xfs[i]
+ if t1.Embedded != x1.Embedded {
+ return cmpForNe(t1.Embedded < x1.Embedded)
+ }
+ if t1.Note != x1.Note {
+ return cmpForNe(t1.Note < x1.Note)
+ }
+ if c := t1.Sym.cmpsym(x1.Sym); c != CMPeq {
+ return c
+ }
+ if c := t1.Type.cmp(x1.Type); c != CMPeq {
+ return c
+ }
+ }
+ if len(tfs) != len(xfs) {
+ return cmpForNe(len(tfs) < len(xfs))
+ }
+ return CMPeq
+
+ case TINTER:
+ tfs := t.AllMethods()
+ xfs := x.AllMethods()
+ for i := 0; i < len(tfs) && i < len(xfs); i++ {
+ t1, x1 := tfs[i], xfs[i]
+ if c := t1.Sym.cmpsym(x1.Sym); c != CMPeq {
+ return c
+ }
+ if c := t1.Type.cmp(x1.Type); c != CMPeq {
+ return c
+ }
+ }
+ if len(tfs) != len(xfs) {
+ return cmpForNe(len(tfs) < len(xfs))
+ }
+ return CMPeq
+
+ case TFUNC:
+ if tn, xn := t.NumRecvs(), x.NumRecvs(); tn != xn {
+ return cmpForNe(tn < xn)
+ }
+ if tn, xn := t.NumParams(), x.NumParams(); tn != xn {
+ return cmpForNe(tn < xn)
+ }
+ if tn, xn := t.NumResults(), x.NumResults(); tn != xn {
+ return cmpForNe(tn < xn)
+ }
+ if tv, xv := t.IsVariadic(), x.IsVariadic(); tv != xv {
+ return cmpForNe(!tv)
+ }
+
+ tfs := t.RecvParamsResults()
+ xfs := x.RecvParamsResults()
+ for i, tf := range tfs {
+ if c := tf.Type.cmp(xfs[i].Type); c != CMPeq {
+ return c
+ }
+ }
+ return CMPeq
+
+ case TARRAY:
+ if t.NumElem() != x.NumElem() {
+ return cmpForNe(t.NumElem() < x.NumElem())
+ }
+
+ case TCHAN:
+ if t.ChanDir() != x.ChanDir() {
+ return cmpForNe(t.ChanDir() < x.ChanDir())
+ }
+
+ default:
+ e := fmt.Sprintf("Do not know how to compare %v with %v", t, x)
+ panic(e)
+ }
+
+ // Common element type comparison for TARRAY, TCHAN, TPTR, and TSLICE.
+ return t.Elem().cmp(x.Elem())
+}
+
+// IsKind reports whether t is a Type of the specified kind.
+func (t *Type) IsKind(et Kind) bool {
+ return t != nil && t.kind == et
+}
+
+func (t *Type) IsBoolean() bool {
+ return t.kind == TBOOL
+}
+
+var unsignedEType = [...]Kind{
+ TINT8: TUINT8,
+ TUINT8: TUINT8,
+ TINT16: TUINT16,
+ TUINT16: TUINT16,
+ TINT32: TUINT32,
+ TUINT32: TUINT32,
+ TINT64: TUINT64,
+ TUINT64: TUINT64,
+ TINT: TUINT,
+ TUINT: TUINT,
+ TUINTPTR: TUINTPTR,
+}
+
+// ToUnsigned returns the unsigned equivalent of integer type t.
+func (t *Type) ToUnsigned() *Type {
+ if !t.IsInteger() {
+ base.Fatalf("unsignedType(%v)", t)
+ }
+ return Types[unsignedEType[t.kind]]
+}
+
+func (t *Type) IsInteger() bool {
+ switch t.kind {
+ case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR:
+ return true
+ }
+ return t == UntypedInt || t == UntypedRune
+}
+
+func (t *Type) IsSigned() bool {
+ switch t.kind {
+ case TINT8, TINT16, TINT32, TINT64, TINT:
+ return true
+ }
+ return false
+}
+
+func (t *Type) IsUnsigned() bool {
+ switch t.kind {
+ case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR:
+ return true
+ }
+ return false
+}
+
+func (t *Type) IsFloat() bool {
+ return t.kind == TFLOAT32 || t.kind == TFLOAT64 || t == UntypedFloat
+}
+
+func (t *Type) IsComplex() bool {
+ return t.kind == TCOMPLEX64 || t.kind == TCOMPLEX128 || t == UntypedComplex
+}
+
+// IsPtr reports whether t is a regular Go pointer type.
+// This does not include unsafe.Pointer.
+func (t *Type) IsPtr() bool {
+ return t.kind == TPTR
+}
+
+// IsPtrElem reports whether t is the element of a pointer (to t).
+func (t *Type) IsPtrElem() bool {
+ return t.cache.ptr != nil
+}
+
+// IsUnsafePtr reports whether t is an unsafe pointer.
+func (t *Type) IsUnsafePtr() bool {
+ return t.kind == TUNSAFEPTR
+}
+
+// IsUintptr reports whether t is a uintptr.
+func (t *Type) IsUintptr() bool {
+ return t.kind == TUINTPTR
+}
+
+// IsPtrShaped reports whether t is represented by a single machine pointer.
+// In addition to regular Go pointer types, this includes map, channel, and
+// function types and unsafe.Pointer. It does not include array or struct types
+// that consist of a single pointer shaped type.
+// TODO(mdempsky): Should it? See golang.org/issue/15028.
+func (t *Type) IsPtrShaped() bool {
+ return t.kind == TPTR || t.kind == TUNSAFEPTR ||
+ t.kind == TMAP || t.kind == TCHAN || t.kind == TFUNC
+}
+
+// HasNil reports whether the set of values determined by t includes nil.
+func (t *Type) HasNil() bool {
+ switch t.kind {
+ case TCHAN, TFUNC, TINTER, TMAP, TNIL, TPTR, TSLICE, TUNSAFEPTR:
+ return true
+ }
+ return false
+}
+
+func (t *Type) IsString() bool {
+ return t.kind == TSTRING
+}
+
+func (t *Type) IsMap() bool {
+ return t.kind == TMAP
+}
+
+func (t *Type) IsChan() bool {
+ return t.kind == TCHAN
+}
+
+func (t *Type) IsSlice() bool {
+ return t.kind == TSLICE
+}
+
+func (t *Type) IsArray() bool {
+ return t.kind == TARRAY
+}
+
+func (t *Type) IsStruct() bool {
+ return t.kind == TSTRUCT
+}
+
+func (t *Type) IsInterface() bool {
+ return t.kind == TINTER
+}
+
+// IsEmptyInterface reports whether t is an empty interface type.
+func (t *Type) IsEmptyInterface() bool {
+ return t.IsInterface() && len(t.AllMethods()) == 0
+}
+
+// IsScalar reports whether 't' is a scalar Go type, e.g.
+// bool/int/float/complex. Note that struct and array types consisting
+// of a single scalar element are not considered scalar, likewise
+// pointer types are also not considered scalar.
+func (t *Type) IsScalar() bool {
+ switch t.kind {
+ case TBOOL, TINT8, TUINT8, TINT16, TUINT16, TINT32,
+ TUINT32, TINT64, TUINT64, TINT, TUINT,
+ TUINTPTR, TCOMPLEX64, TCOMPLEX128, TFLOAT32, TFLOAT64:
+ return true
+ }
+ return false
+}
+
+func (t *Type) PtrTo() *Type {
+ return NewPtr(t)
+}
+
+func (t *Type) NumFields() int {
+ if t.kind == TRESULTS {
+ return len(t.extra.(*Results).Types)
+ }
+ return len(t.Fields())
+}
+func (t *Type) FieldType(i int) *Type {
+ if t.kind == TTUPLE {
+ switch i {
+ case 0:
+ return t.extra.(*Tuple).first
+ case 1:
+ return t.extra.(*Tuple).second
+ default:
+ panic("bad tuple index")
+ }
+ }
+ if t.kind == TRESULTS {
+ return t.extra.(*Results).Types[i]
+ }
+ return t.Field(i).Type
+}
+func (t *Type) FieldOff(i int) int64 {
+ return t.Field(i).Offset
+}
+func (t *Type) FieldName(i int) string {
+ return t.Field(i).Sym.Name
+}
+
+// OffsetOf reports the offset of the field of a struct.
+// The field is looked up by name.
+func (t *Type) OffsetOf(name string) int64 {
+ if t.kind != TSTRUCT {
+ base.Fatalf("can't call OffsetOf on non-struct %v", t)
+ }
+ for _, f := range t.Fields() {
+ if f.Sym.Name == name {
+ return f.Offset
+ }
+ }
+ base.Fatalf("couldn't find field %s in %v", name, t)
+ return -1
+}
+
+func (t *Type) NumElem() int64 {
+ t.wantEtype(TARRAY)
+ return t.extra.(*Array).Bound
+}
+
+type componentsIncludeBlankFields bool
+
+const (
+ IgnoreBlankFields componentsIncludeBlankFields = false
+ CountBlankFields componentsIncludeBlankFields = true
+)
+
+// NumComponents returns the number of primitive elements that compose t.
+// Struct and array types are flattened for the purpose of counting.
+// All other types (including string, slice, and interface types) count as one element.
+// If countBlank is IgnoreBlankFields, then blank struct fields
+// (and their comprised elements) are excluded from the count.
+// struct { x, y [3]int } has six components; [10]struct{ x, y string } has twenty.
+func (t *Type) NumComponents(countBlank componentsIncludeBlankFields) int64 {
+ switch t.kind {
+ case TSTRUCT:
+ if t.IsFuncArgStruct() {
+ base.Fatalf("NumComponents func arg struct")
+ }
+ var n int64
+ for _, f := range t.Fields() {
+ if countBlank == IgnoreBlankFields && f.Sym.IsBlank() {
+ continue
+ }
+ n += f.Type.NumComponents(countBlank)
+ }
+ return n
+ case TARRAY:
+ return t.NumElem() * t.Elem().NumComponents(countBlank)
+ }
+ return 1
+}
+
+// SoleComponent returns the only primitive component in t,
+// if there is exactly one. Otherwise, it returns nil.
+// Components are counted as in NumComponents, including blank fields.
+// Keep in sync with cmd/compile/internal/walk/convert.go:soleComponent.
+func (t *Type) SoleComponent() *Type {
+ switch t.kind {
+ case TSTRUCT:
+ if t.IsFuncArgStruct() {
+ base.Fatalf("SoleComponent func arg struct")
+ }
+ if t.NumFields() != 1 {
+ return nil
+ }
+ return t.Field(0).Type.SoleComponent()
+ case TARRAY:
+ if t.NumElem() != 1 {
+ return nil
+ }
+ return t.Elem().SoleComponent()
+ }
+ return t
+}
+
+// ChanDir returns the direction of a channel type t.
+// The direction will be one of Crecv, Csend, or Cboth.
+func (t *Type) ChanDir() ChanDir {
+ t.wantEtype(TCHAN)
+ return t.extra.(*Chan).Dir
+}
+
+func (t *Type) IsMemory() bool {
+ if t == TypeMem || t.kind == TTUPLE && t.extra.(*Tuple).second == TypeMem {
+ return true
+ }
+ if t.kind == TRESULTS {
+ if types := t.extra.(*Results).Types; len(types) > 0 && types[len(types)-1] == TypeMem {
+ return true
+ }
+ }
+ return false
+}
+func (t *Type) IsFlags() bool { return t == TypeFlags }
+func (t *Type) IsVoid() bool { return t == TypeVoid }
+func (t *Type) IsTuple() bool { return t.kind == TTUPLE }
+func (t *Type) IsResults() bool { return t.kind == TRESULTS }
+
+// IsUntyped reports whether t is an untyped type.
+func (t *Type) IsUntyped() bool {
+ if t == nil {
+ return false
+ }
+ if t == UntypedString || t == UntypedBool {
+ return true
+ }
+ switch t.kind {
+ case TNIL, TIDEAL:
+ return true
+ }
+ return false
+}
+
+// HasPointers reports whether t contains a heap pointer.
+// Note that this function ignores pointers to not-in-heap types.
+func (t *Type) HasPointers() bool {
+ return PtrDataSize(t) > 0
+}
+
+var recvType *Type
+
+// FakeRecvType returns the singleton type used for interface method receivers.
+func FakeRecvType() *Type {
+ if recvType == nil {
+ recvType = NewPtr(newType(TSTRUCT))
+ }
+ return recvType
+}
+
+func FakeRecv() *Field {
+ return NewField(base.AutogeneratedPos, nil, FakeRecvType())
+}
+
+var (
+ // TSSA types. HasPointers assumes these are pointer-free.
+ TypeInvalid = newSSA("invalid")
+ TypeMem = newSSA("mem")
+ TypeFlags = newSSA("flags")
+ TypeVoid = newSSA("void")
+ TypeInt128 = newSSA("int128")
+ TypeResultMem = newResults([]*Type{TypeMem})
+)
+
+func init() {
+ TypeInt128.width = 16
+ TypeInt128.align = 8
+}
+
+// NewNamed returns a new named type for the given type name. obj should be an
+// ir.Name. The new type is incomplete (marked as TFORW kind), and the underlying
+// type should be set later via SetUnderlying(). References to the type are
+// maintained until the type is filled in, so those references can be updated when
+// the type is complete.
+func NewNamed(obj Object) *Type {
+ t := newType(TFORW)
+ t.obj = obj
+ if obj.Sym().Pkg == ShapePkg {
+ t.SetIsShape(true)
+ t.SetHasShape(true)
+ }
+ return t
+}
+
+// Obj returns the canonical type name node for a named type t, nil for an unnamed type.
+func (t *Type) Obj() Object {
+ return t.obj
+}
+
+// SetUnderlying sets the underlying type of an incomplete type (i.e. type whose kind
+// is currently TFORW). SetUnderlying automatically updates any types that were waiting
+// for this type to be completed.
+func (t *Type) SetUnderlying(underlying *Type) {
+ if underlying.kind == TFORW {
+ // This type isn't computed yet; when it is, update n.
+ underlying.forwardType().Copyto = append(underlying.forwardType().Copyto, t)
+ return
+ }
+
+ ft := t.forwardType()
+
+ // TODO(mdempsky): Fix Type rekinding.
+ t.kind = underlying.kind
+ t.extra = underlying.extra
+ t.width = underlying.width
+ t.align = underlying.align
+ t.intRegs = underlying.intRegs
+ t.floatRegs = underlying.floatRegs
+ t.underlying = underlying.underlying
+
+ if underlying.NotInHeap() {
+ t.SetNotInHeap(true)
+ }
+ if underlying.HasShape() {
+ t.SetHasShape(true)
+ }
+
+ // spec: "The declared type does not inherit any methods bound
+ // to the existing type, but the method set of an interface
+ // type [...] remains unchanged."
+ if t.IsInterface() {
+ t.methods = underlying.methods
+ t.allMethods = underlying.allMethods
+ }
+
+ // Update types waiting on this type.
+ for _, w := range ft.Copyto {
+ w.SetUnderlying(t)
+ }
+
+ // Double-check use of type as embedded type.
+ if ft.Embedlineno.IsKnown() {
+ if t.IsPtr() || t.IsUnsafePtr() {
+ base.ErrorfAt(ft.Embedlineno, errors.InvalidPtrEmbed, "embedded type cannot be a pointer")
+ }
+ }
+}
+
+func fieldsHasShape(fields []*Field) bool {
+ for _, f := range fields {
+ if f.Type != nil && f.Type.HasShape() {
+ return true
+ }
+ }
+ return false
+}
+
+// newBasic returns a new basic type of the given kind.
+func newBasic(kind Kind, obj Object) *Type {
+ t := newType(kind)
+ t.obj = obj
+ return t
+}
+
+// NewInterface returns a new interface for the given methods and
+// embedded types. Embedded types are specified as fields with no Sym.
+func NewInterface(methods []*Field) *Type {
+ t := newType(TINTER)
+ t.SetInterface(methods)
+ for _, f := range methods {
+ // f.Type could be nil for a broken interface declaration
+ if f.Type != nil && f.Type.HasShape() {
+ t.SetHasShape(true)
+ break
+ }
+ }
+ return t
+}
+
+// NewSignature returns a new function type for the given receiver,
+// parameters, and results, any of which may be nil.
+func NewSignature(recv *Field, params, results []*Field) *Type {
+ startParams := 0
+ if recv != nil {
+ startParams = 1
+ }
+ startResults := startParams + len(params)
+
+ allParams := make([]*Field, startResults+len(results))
+ if recv != nil {
+ allParams[0] = recv
+ }
+ copy(allParams[startParams:], params)
+ copy(allParams[startResults:], results)
+
+ t := newType(TFUNC)
+ ft := t.funcType()
+
+ funargs := func(fields []*Field) *Type {
+ s := NewStruct(fields)
+ s.StructType().ParamTuple = true
+ return s
+ }
+
+ ft.allParams = allParams
+ ft.startParams = startParams
+ ft.startResults = startResults
+
+ ft.resultsTuple = funargs(allParams[startResults:])
+
+ if fieldsHasShape(allParams) {
+ t.SetHasShape(true)
+ }
+
+ return t
+}
+
+// NewStruct returns a new struct with the given fields.
+func NewStruct(fields []*Field) *Type {
+ t := newType(TSTRUCT)
+ t.setFields(fields)
+ if fieldsHasShape(fields) {
+ t.SetHasShape(true)
+ }
+ return t
+}
+
+var (
+ IsInt [NTYPE]bool
+ IsFloat [NTYPE]bool
+ IsComplex [NTYPE]bool
+ IsSimple [NTYPE]bool
+)
+
+var IsOrdered [NTYPE]bool
+
+// IsReflexive reports whether t has a reflexive equality operator.
+// That is, if x==x for all x of type t.
+func IsReflexive(t *Type) bool {
+ switch t.Kind() {
+ case TBOOL,
+ TINT,
+ TUINT,
+ TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TUINTPTR,
+ TPTR,
+ TUNSAFEPTR,
+ TSTRING,
+ TCHAN:
+ return true
+
+ case TFLOAT32,
+ TFLOAT64,
+ TCOMPLEX64,
+ TCOMPLEX128,
+ TINTER:
+ return false
+
+ case TARRAY:
+ return IsReflexive(t.Elem())
+
+ case TSTRUCT:
+ for _, t1 := range t.Fields() {
+ if !IsReflexive(t1.Type) {
+ return false
+ }
+ }
+ return true
+
+ default:
+ base.Fatalf("bad type for map key: %v", t)
+ return false
+ }
+}
+
+// Can this type be stored directly in an interface word?
+// Yes, if the representation is a single pointer.
+func IsDirectIface(t *Type) bool {
+ switch t.Kind() {
+ case TPTR:
+ // Pointers to notinheap types must be stored indirectly. See issue 42076.
+ return !t.Elem().NotInHeap()
+ case TCHAN,
+ TMAP,
+ TFUNC,
+ TUNSAFEPTR:
+ return true
+
+ case TARRAY:
+ // Array of 1 direct iface type can be direct.
+ return t.NumElem() == 1 && IsDirectIface(t.Elem())
+
+ case TSTRUCT:
+ // Struct with 1 field of direct iface type can be direct.
+ return t.NumFields() == 1 && IsDirectIface(t.Field(0).Type)
+ }
+
+ return false
+}
+
+// IsInterfaceMethod reports whether (field) m is
+// an interface method. Such methods have the
+// special receiver type types.FakeRecvType().
+func IsInterfaceMethod(f *Type) bool {
+ return f.Recv().Type == FakeRecvType()
+}
+
+// IsMethodApplicable reports whether method m can be called on a
+// value of type t. This is necessary because we compute a single
+// method set for both T and *T, but some *T methods are not
+// applicable to T receivers.
+func IsMethodApplicable(t *Type, m *Field) bool {
+ return t.IsPtr() || !m.Type.Recv().Type.IsPtr() || IsInterfaceMethod(m.Type) || m.Embedded == 2
+}
+
+// RuntimeSymName returns the name of s if it's in package "runtime"; otherwise
+// it returns "".
+func RuntimeSymName(s *Sym) string {
+ if s.Pkg.Path == "runtime" {
+ return s.Name
+ }
+ return ""
+}
+
+// ReflectSymName returns the name of s if it's in package "reflect"; otherwise
+// it returns "".
+func ReflectSymName(s *Sym) string {
+ if s.Pkg.Path == "reflect" {
+ return s.Name
+ }
+ return ""
+}
+
+// IsNoInstrumentPkg reports whether p is a package that
+// should not be instrumented.
+func IsNoInstrumentPkg(p *Pkg) bool {
+ return objabi.LookupPkgSpecial(p.Path).NoInstrument
+}
+
+// IsNoRacePkg reports whether p is a package that
+// should not be race instrumented.
+func IsNoRacePkg(p *Pkg) bool {
+ return objabi.LookupPkgSpecial(p.Path).NoRaceFunc
+}
+
+// ReceiverBaseType returns the underlying type, if any,
+// that owns methods with receiver parameter t.
+// The result is either a named type or an anonymous struct.
+func ReceiverBaseType(t *Type) *Type {
+ if t == nil {
+ return nil
+ }
+
+ // Strip away pointer if it's there.
+ if t.IsPtr() {
+ if t.Sym() != nil {
+ return nil
+ }
+ t = t.Elem()
+ if t == nil {
+ return nil
+ }
+ }
+
+ // Must be a named type or anonymous struct.
+ if t.Sym() == nil && !t.IsStruct() {
+ return nil
+ }
+
+ // Check types.
+ if IsSimple[t.Kind()] {
+ return t
+ }
+ switch t.Kind() {
+ case TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRING, TSTRUCT:
+ return t
+ }
+ return nil
+}
+
+func FloatForComplex(t *Type) *Type {
+ switch t.Kind() {
+ case TCOMPLEX64:
+ return Types[TFLOAT32]
+ case TCOMPLEX128:
+ return Types[TFLOAT64]
+ }
+ base.Fatalf("unexpected type: %v", t)
+ return nil
+}
+
+func ComplexForFloat(t *Type) *Type {
+ switch t.Kind() {
+ case TFLOAT32:
+ return Types[TCOMPLEX64]
+ case TFLOAT64:
+ return Types[TCOMPLEX128]
+ }
+ base.Fatalf("unexpected type: %v", t)
+ return nil
+}
+
+func TypeSym(t *Type) *Sym {
+ return TypeSymLookup(TypeSymName(t))
+}
+
+func TypeSymLookup(name string) *Sym {
+ typepkgmu.Lock()
+ s := typepkg.Lookup(name)
+ typepkgmu.Unlock()
+ return s
+}
+
+func TypeSymName(t *Type) string {
+ name := t.LinkString()
+ // Use a separate symbol name for Noalg types for #17752.
+ if TypeHasNoAlg(t) {
+ name = "noalg." + name
+ }
+ return name
+}
+
+// Fake package for runtime type info (headers)
+// Don't access directly, use typeLookup below.
+var (
+ typepkgmu sync.Mutex // protects typepkg lookups
+ typepkg = NewPkg("type", "type")
+)
+
+var SimType [NTYPE]Kind
+
+// Fake package for shape types (see typecheck.Shapify()).
+var ShapePkg = NewPkg("go.shape", "go.shape")
diff --git a/src/cmd/compile/internal/types/type_test.go b/src/cmd/compile/internal/types/type_test.go
new file mode 100644
index 0000000..1fd05b3
--- /dev/null
+++ b/src/cmd/compile/internal/types/type_test.go
@@ -0,0 +1,27 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "testing"
+)
+
+func TestSSACompare(t *testing.T) {
+ a := []*Type{
+ TypeInvalid,
+ TypeMem,
+ TypeFlags,
+ TypeVoid,
+ TypeInt128,
+ }
+ for _, x := range a {
+ for _, y := range a {
+ c := x.Compare(y)
+ if x == y && c != CMPeq || x != y && c == CMPeq {
+ t.Errorf("%s compare %s == %d\n", x.extra, y.extra, c)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types/universe.go b/src/cmd/compile/internal/types/universe.go
new file mode 100644
index 0000000..d1800f2
--- /dev/null
+++ b/src/cmd/compile/internal/types/universe.go
@@ -0,0 +1,154 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/internal/src"
+)
+
+var basicTypes = [...]struct {
+ name string
+ etype Kind
+}{
+ {"int8", TINT8},
+ {"int16", TINT16},
+ {"int32", TINT32},
+ {"int64", TINT64},
+ {"uint8", TUINT8},
+ {"uint16", TUINT16},
+ {"uint32", TUINT32},
+ {"uint64", TUINT64},
+ {"float32", TFLOAT32},
+ {"float64", TFLOAT64},
+ {"complex64", TCOMPLEX64},
+ {"complex128", TCOMPLEX128},
+ {"bool", TBOOL},
+ {"string", TSTRING},
+}
+
+var typedefs = [...]struct {
+ name string
+ etype Kind
+ sameas32 Kind
+ sameas64 Kind
+}{
+ {"int", TINT, TINT32, TINT64},
+ {"uint", TUINT, TUINT32, TUINT64},
+ {"uintptr", TUINTPTR, TUINT32, TUINT64},
+}
+
+func InitTypes(defTypeName func(sym *Sym, typ *Type) Object) {
+ if PtrSize == 0 {
+ base.Fatalf("InitTypes called before PtrSize was set")
+ }
+
+ SlicePtrOffset = 0
+ SliceLenOffset = RoundUp(SlicePtrOffset+int64(PtrSize), int64(PtrSize))
+ SliceCapOffset = RoundUp(SliceLenOffset+int64(PtrSize), int64(PtrSize))
+ SliceSize = RoundUp(SliceCapOffset+int64(PtrSize), int64(PtrSize))
+
+ // string is same as slice wo the cap
+ StringSize = RoundUp(SliceLenOffset+int64(PtrSize), int64(PtrSize))
+
+ for et := Kind(0); et < NTYPE; et++ {
+ SimType[et] = et
+ }
+
+ Types[TANY] = newType(TANY) // note: an old placeholder type, NOT the new builtin 'any' alias for interface{}
+ Types[TINTER] = NewInterface(nil)
+ CheckSize(Types[TINTER])
+
+ defBasic := func(kind Kind, pkg *Pkg, name string) *Type {
+ typ := newType(kind)
+ obj := defTypeName(pkg.Lookup(name), typ)
+ typ.obj = obj
+ if kind != TANY {
+ CheckSize(typ)
+ }
+ return typ
+ }
+
+ for _, s := range &basicTypes {
+ Types[s.etype] = defBasic(s.etype, BuiltinPkg, s.name)
+ }
+
+ for _, s := range &typedefs {
+ sameas := s.sameas32
+ if PtrSize == 8 {
+ sameas = s.sameas64
+ }
+ SimType[s.etype] = sameas
+
+ Types[s.etype] = defBasic(s.etype, BuiltinPkg, s.name)
+ }
+
+ // We create separate byte and rune types for better error messages
+ // rather than just creating type alias *Sym's for the uint8 and
+ // int32 Hence, (bytetype|runtype).Sym.isAlias() is false.
+ // TODO(gri) Should we get rid of this special case (at the cost
+ // of less informative error messages involving bytes and runes)?
+ // NOTE(rsc): No, the error message quality is important.
+ // (Alternatively, we could introduce an OTALIAS node representing
+ // type aliases, albeit at the cost of having to deal with it everywhere).
+ ByteType = defBasic(TUINT8, BuiltinPkg, "byte")
+ RuneType = defBasic(TINT32, BuiltinPkg, "rune")
+
+ // error type
+ DeferCheckSize()
+ ErrorType = defBasic(TFORW, BuiltinPkg, "error")
+ ErrorType.SetUnderlying(makeErrorInterface())
+ ResumeCheckSize()
+
+ // comparable type (interface)
+ DeferCheckSize()
+ ComparableType = defBasic(TFORW, BuiltinPkg, "comparable")
+ ComparableType.SetUnderlying(makeComparableInterface())
+ ResumeCheckSize()
+
+ // any type (interface)
+ DeferCheckSize()
+ AnyType = defBasic(TFORW, BuiltinPkg, "any")
+ AnyType.SetUnderlying(NewInterface(nil))
+ ResumeCheckSize()
+
+ Types[TUNSAFEPTR] = defBasic(TUNSAFEPTR, UnsafePkg, "Pointer")
+
+ Types[TBLANK] = newType(TBLANK)
+ Types[TNIL] = newType(TNIL)
+
+ // simple aliases
+ SimType[TMAP] = TPTR
+ SimType[TCHAN] = TPTR
+ SimType[TFUNC] = TPTR
+ SimType[TUNSAFEPTR] = TPTR
+
+ for et := TINT8; et <= TUINT64; et++ {
+ IsInt[et] = true
+ }
+ IsInt[TINT] = true
+ IsInt[TUINT] = true
+ IsInt[TUINTPTR] = true
+
+ IsFloat[TFLOAT32] = true
+ IsFloat[TFLOAT64] = true
+
+ IsComplex[TCOMPLEX64] = true
+ IsComplex[TCOMPLEX128] = true
+}
+
+func makeErrorInterface() *Type {
+ sig := NewSignature(FakeRecv(), nil, []*Field{
+ NewField(src.NoXPos, nil, Types[TSTRING]),
+ })
+ method := NewField(src.NoXPos, LocalPkg.Lookup("Error"), sig)
+ return NewInterface([]*Field{method})
+}
+
+// makeComparableInterface makes the predefined "comparable" interface in the
+// built-in package. It has a unique name, but no methods.
+func makeComparableInterface() *Type {
+ return NewInterface(nil)
+}
diff --git a/src/cmd/compile/internal/types/utils.go b/src/cmd/compile/internal/types/utils.go
new file mode 100644
index 0000000..f9f629c
--- /dev/null
+++ b/src/cmd/compile/internal/types/utils.go
@@ -0,0 +1,17 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+const BADWIDTH = -1000000000
+
+type bitset8 uint8
+
+func (f *bitset8) set(mask uint8, b bool) {
+ if b {
+ *(*uint8)(f) |= mask
+ } else {
+ *(*uint8)(f) &^= mask
+ }
+}
diff --git a/src/cmd/compile/internal/types2/alias.go b/src/cmd/compile/internal/types2/alias.go
new file mode 100644
index 0000000..06dfba1
--- /dev/null
+++ b/src/cmd/compile/internal/types2/alias.go
@@ -0,0 +1,88 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "fmt"
+
+// An Alias represents an alias type.
+// Whether or not Alias types are created is controlled by the
+// gotypesalias setting with the GODEBUG environment variable.
+// For gotypesalias=1, alias declarations produce an Alias type.
+// Otherwise, the alias information is only in the type name,
+// which points directly to the actual (aliased) type.
+type Alias struct {
+ obj *TypeName // corresponding declared alias object
+ fromRHS Type // RHS of type alias declaration; may be an alias
+ actual Type // actual (aliased) type; never an alias
+}
+
+// NewAlias creates a new Alias type with the given type name and rhs.
+// rhs must not be nil.
+func NewAlias(obj *TypeName, rhs Type) *Alias {
+ alias := (*Checker)(nil).newAlias(obj, rhs)
+ // Ensure that alias.actual is set (#65455).
+ unalias(alias)
+ return alias
+}
+
+func (a *Alias) Obj() *TypeName { return a.obj }
+func (a *Alias) Underlying() Type { return unalias(a).Underlying() }
+func (a *Alias) String() string { return TypeString(a, nil) }
+
+// Type accessors
+
+// Unalias returns t if it is not an alias type;
+// otherwise it follows t's alias chain until it
+// reaches a non-alias type which is then returned.
+// Consequently, the result is never an alias type.
+func Unalias(t Type) Type {
+ if a0, _ := t.(*Alias); a0 != nil {
+ return unalias(a0)
+ }
+ return t
+}
+
+func unalias(a0 *Alias) Type {
+ if a0.actual != nil {
+ return a0.actual
+ }
+ var t Type
+ for a := a0; a != nil; a, _ = t.(*Alias) {
+ t = a.fromRHS
+ }
+ if t == nil {
+ panic(fmt.Sprintf("non-terminated alias %s", a0.obj.name))
+ }
+ a0.actual = t
+ return t
+}
+
+// asNamed returns t as *Named if that is t's
+// actual type. It returns nil otherwise.
+func asNamed(t Type) *Named {
+ n, _ := Unalias(t).(*Named)
+ return n
+}
+
+// newAlias creates a new Alias type with the given type name and rhs.
+// rhs must not be nil.
+func (check *Checker) newAlias(obj *TypeName, rhs Type) *Alias {
+ assert(rhs != nil)
+ a := &Alias{obj, rhs, nil}
+ if obj.typ == nil {
+ obj.typ = a
+ }
+
+ // Ensure that a.actual is set at the end of type checking.
+ if check != nil {
+ check.needsCleanup(a)
+ }
+
+ return a
+}
+
+func (a *Alias) cleanup() {
+ Unalias(a)
+}
diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go
new file mode 100644
index 0000000..bb02d91
--- /dev/null
+++ b/src/cmd/compile/internal/types2/api.go
@@ -0,0 +1,471 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package types declares the data types and implements
+// the algorithms for type-checking of Go packages. Use
+// Config.Check to invoke the type checker for a package.
+// Alternatively, create a new type checker with NewChecker
+// and invoke it incrementally by calling Checker.Files.
+//
+// Type-checking consists of several interdependent phases:
+//
+// Name resolution maps each identifier (syntax.Name) in the program to the
+// language object (Object) it denotes.
+// Use Info.{Defs,Uses,Implicits} for the results of name resolution.
+//
+// Constant folding computes the exact constant value (constant.Value)
+// for every expression (syntax.Expr) that is a compile-time constant.
+// Use Info.Types[expr].Value for the results of constant folding.
+//
+// Type inference computes the type (Type) of every expression (syntax.Expr)
+// and checks for compliance with the language specification.
+// Use Info.Types[expr].Type for the results of type inference.
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "go/constant"
+ . "internal/types/errors"
+ "strings"
+)
+
+// An Error describes a type-checking error; it implements the error interface.
+// A "soft" error is an error that still permits a valid interpretation of a
+// package (such as "unused variable"); "hard" errors may lead to unpredictable
+// behavior if ignored.
+type Error struct {
+ Pos syntax.Pos // error position
+ Msg string // default error message, user-friendly
+ Full string // full error message, for debugging (may contain internal details)
+ Soft bool // if set, error is "soft"
+ Code Code // error code
+}
+
+// Error returns an error string formatted as follows:
+// filename:line:column: message
+func (err Error) Error() string {
+ return fmt.Sprintf("%s: %s", err.Pos, err.Msg)
+}
+
+// FullError returns an error string like Error, buy it may contain
+// type-checker internal details such as subscript indices for type
+// parameters and more. Useful for debugging.
+func (err Error) FullError() string {
+ return fmt.Sprintf("%s: %s", err.Pos, err.Full)
+}
+
+// An ArgumentError holds an error associated with an argument index.
+type ArgumentError struct {
+ Index int
+ Err error
+}
+
+func (e *ArgumentError) Error() string { return e.Err.Error() }
+func (e *ArgumentError) Unwrap() error { return e.Err }
+
+// An Importer resolves import paths to Packages.
+//
+// CAUTION: This interface does not support the import of locally
+// vendored packages. See https://golang.org/s/go15vendor.
+// If possible, external implementations should implement ImporterFrom.
+type Importer interface {
+ // Import returns the imported package for the given import path.
+ // The semantics is like for ImporterFrom.ImportFrom except that
+ // dir and mode are ignored (since they are not present).
+ Import(path string) (*Package, error)
+}
+
+// ImportMode is reserved for future use.
+type ImportMode int
+
+// An ImporterFrom resolves import paths to packages; it
+// supports vendoring per https://golang.org/s/go15vendor.
+// Use go/importer to obtain an ImporterFrom implementation.
+type ImporterFrom interface {
+ // Importer is present for backward-compatibility. Calling
+ // Import(path) is the same as calling ImportFrom(path, "", 0);
+ // i.e., locally vendored packages may not be found.
+ // The types package does not call Import if an ImporterFrom
+ // is present.
+ Importer
+
+ // ImportFrom returns the imported package for the given import
+ // path when imported by a package file located in dir.
+ // If the import failed, besides returning an error, ImportFrom
+ // is encouraged to cache and return a package anyway, if one
+ // was created. This will reduce package inconsistencies and
+ // follow-on type checker errors due to the missing package.
+ // The mode value must be 0; it is reserved for future use.
+ // Two calls to ImportFrom with the same path and dir must
+ // return the same package.
+ ImportFrom(path, dir string, mode ImportMode) (*Package, error)
+}
+
+// A Config specifies the configuration for type checking.
+// The zero value for Config is a ready-to-use default configuration.
+type Config struct {
+ // Context is the context used for resolving global identifiers. If nil, the
+ // type checker will initialize this field with a newly created context.
+ Context *Context
+
+ // GoVersion describes the accepted Go language version. The string must
+ // start with a prefix of the form "go%d.%d" (e.g. "go1.20", "go1.21rc1", or
+ // "go1.21.0") or it must be empty; an empty string disables Go language
+ // version checks. If the format is invalid, invoking the type checker will
+ // result in an error.
+ GoVersion string
+
+ // If IgnoreFuncBodies is set, function bodies are not
+ // type-checked.
+ IgnoreFuncBodies bool
+
+ // If FakeImportC is set, `import "C"` (for packages requiring Cgo)
+ // declares an empty "C" package and errors are omitted for qualified
+ // identifiers referring to package C (which won't find an object).
+ // This feature is intended for the standard library cmd/api tool.
+ //
+ // Caution: Effects may be unpredictable due to follow-on errors.
+ // Do not use casually!
+ FakeImportC bool
+
+ // If IgnoreBranchErrors is set, branch/label errors are ignored.
+ IgnoreBranchErrors bool
+
+ // If go115UsesCgo is set, the type checker expects the
+ // _cgo_gotypes.go file generated by running cmd/cgo to be
+ // provided as a package source file. Qualified identifiers
+ // referring to package C will be resolved to cgo-provided
+ // declarations within _cgo_gotypes.go.
+ //
+ // It is an error to set both FakeImportC and go115UsesCgo.
+ go115UsesCgo bool
+
+ // If Trace is set, a debug trace is printed to stdout.
+ Trace bool
+
+ // If Error != nil, it is called with each error found
+ // during type checking; err has dynamic type Error.
+ // Secondary errors (for instance, to enumerate all types
+ // involved in an invalid recursive type declaration) have
+ // error strings that start with a '\t' character.
+ // If Error == nil, type-checking stops with the first
+ // error found.
+ Error func(err error)
+
+ // An importer is used to import packages referred to from
+ // import declarations.
+ // If the installed importer implements ImporterFrom, the type
+ // checker calls ImportFrom instead of Import.
+ // The type checker reports an error if an importer is needed
+ // but none was installed.
+ Importer Importer
+
+ // If Sizes != nil, it provides the sizing functions for package unsafe.
+ // Otherwise SizesFor("gc", "amd64") is used instead.
+ Sizes Sizes
+
+ // If DisableUnusedImportCheck is set, packages are not checked
+ // for unused imports.
+ DisableUnusedImportCheck bool
+
+ // If a non-empty ErrorURL format string is provided, it is used
+ // to format an error URL link that is appended to the first line
+ // of an error message. ErrorURL must be a format string containing
+ // exactly one "%s" format, e.g. "[go.dev/e/%s]".
+ ErrorURL string
+}
+
+func srcimporter_setUsesCgo(conf *Config) {
+ conf.go115UsesCgo = true
+}
+
+// Info holds result type information for a type-checked package.
+// Only the information for which a map is provided is collected.
+// If the package has type errors, the collected information may
+// be incomplete.
+type Info struct {
+ // Types maps expressions to their types, and for constant
+ // expressions, also their values. Invalid expressions are
+ // omitted.
+ //
+ // For (possibly parenthesized) identifiers denoting built-in
+ // functions, the recorded signatures are call-site specific:
+ // if the call result is not a constant, the recorded type is
+ // an argument-specific signature. Otherwise, the recorded type
+ // is invalid.
+ //
+ // The Types map does not record the type of every identifier,
+ // only those that appear where an arbitrary expression is
+ // permitted. For instance, the identifier f in a selector
+ // expression x.f is found only in the Selections map, the
+ // identifier z in a variable declaration 'var z int' is found
+ // only in the Defs map, and identifiers denoting packages in
+ // qualified identifiers are collected in the Uses map.
+ Types map[syntax.Expr]TypeAndValue
+
+ // If StoreTypesInSyntax is set, type information identical to
+ // that which would be put in the Types map, will be set in
+ // syntax.Expr.TypeAndValue (independently of whether Types
+ // is nil or not).
+ StoreTypesInSyntax bool
+
+ // Instances maps identifiers denoting generic types or functions to their
+ // type arguments and instantiated type.
+ //
+ // For example, Instances will map the identifier for 'T' in the type
+ // instantiation T[int, string] to the type arguments [int, string] and
+ // resulting instantiated *Named type. Given a generic function
+ // func F[A any](A), Instances will map the identifier for 'F' in the call
+ // expression F(int(1)) to the inferred type arguments [int], and resulting
+ // instantiated *Signature.
+ //
+ // Invariant: Instantiating Uses[id].Type() with Instances[id].TypeArgs
+ // results in an equivalent of Instances[id].Type.
+ Instances map[*syntax.Name]Instance
+
+ // Defs maps identifiers to the objects they define (including
+ // package names, dots "." of dot-imports, and blank "_" identifiers).
+ // For identifiers that do not denote objects (e.g., the package name
+ // in package clauses, or symbolic variables t in t := x.(type) of
+ // type switch headers), the corresponding objects are nil.
+ //
+ // For an embedded field, Defs returns the field *Var it defines.
+ //
+ // Invariant: Defs[id] == nil || Defs[id].Pos() == id.Pos()
+ Defs map[*syntax.Name]Object
+
+ // Uses maps identifiers to the objects they denote.
+ //
+ // For an embedded field, Uses returns the *TypeName it denotes.
+ //
+ // Invariant: Uses[id].Pos() != id.Pos()
+ Uses map[*syntax.Name]Object
+
+ // Implicits maps nodes to their implicitly declared objects, if any.
+ // The following node and object types may appear:
+ //
+ // node declared object
+ //
+ // *syntax.ImportDecl *PkgName for imports without renames
+ // *syntax.CaseClause type-specific *Var for each type switch case clause (incl. default)
+ // *syntax.Field anonymous parameter *Var (incl. unnamed results)
+ //
+ Implicits map[syntax.Node]Object
+
+ // Selections maps selector expressions (excluding qualified identifiers)
+ // to their corresponding selections.
+ Selections map[*syntax.SelectorExpr]*Selection
+
+ // Scopes maps syntax.Nodes to the scopes they define. Package scopes are not
+ // associated with a specific node but with all files belonging to a package.
+ // Thus, the package scope can be found in the type-checked Package object.
+ // Scopes nest, with the Universe scope being the outermost scope, enclosing
+ // the package scope, which contains (one or more) files scopes, which enclose
+ // function scopes which in turn enclose statement and function literal scopes.
+ // Note that even though package-level functions are declared in the package
+ // scope, the function scopes are embedded in the file scope of the file
+ // containing the function declaration.
+ //
+ // The Scope of a function contains the declarations of any
+ // type parameters, parameters, and named results, plus any
+ // local declarations in the body block.
+ // It is coextensive with the complete extent of the
+ // function's syntax ([*ast.FuncDecl] or [*ast.FuncLit]).
+ // The Scopes mapping does not contain an entry for the
+ // function body ([*ast.BlockStmt]); the function's scope is
+ // associated with the [*ast.FuncType].
+ //
+ // The following node types may appear in Scopes:
+ //
+ // *syntax.File
+ // *syntax.FuncType
+ // *syntax.TypeDecl
+ // *syntax.BlockStmt
+ // *syntax.IfStmt
+ // *syntax.SwitchStmt
+ // *syntax.CaseClause
+ // *syntax.CommClause
+ // *syntax.ForStmt
+ //
+ Scopes map[syntax.Node]*Scope
+
+ // InitOrder is the list of package-level initializers in the order in which
+ // they must be executed. Initializers referring to variables related by an
+ // initialization dependency appear in topological order, the others appear
+ // in source order. Variables without an initialization expression do not
+ // appear in this list.
+ InitOrder []*Initializer
+
+ // FileVersions maps a file to its Go version string.
+ // If the file doesn't specify a version, the reported
+ // string is Config.GoVersion.
+ // Version strings begin with “go”, like “go1.21”, and
+ // are suitable for use with the [go/version] package.
+ FileVersions map[*syntax.PosBase]string
+}
+
+func (info *Info) recordTypes() bool {
+ return info.Types != nil || info.StoreTypesInSyntax
+}
+
+// TypeOf returns the type of expression e, or nil if not found.
+// Precondition 1: the Types map is populated or StoreTypesInSynax is set.
+// Precondition 2: Uses and Defs maps are populated.
+func (info *Info) TypeOf(e syntax.Expr) Type {
+ if info.Types != nil {
+ if t, ok := info.Types[e]; ok {
+ return t.Type
+ }
+ } else if info.StoreTypesInSyntax {
+ if tv := e.GetTypeInfo(); tv.Type != nil {
+ return tv.Type
+ }
+ }
+
+ if id, _ := e.(*syntax.Name); id != nil {
+ if obj := info.ObjectOf(id); obj != nil {
+ return obj.Type()
+ }
+ }
+ return nil
+}
+
+// ObjectOf returns the object denoted by the specified id,
+// or nil if not found.
+//
+// If id is an embedded struct field, ObjectOf returns the field (*Var)
+// it defines, not the type (*TypeName) it uses.
+//
+// Precondition: the Uses and Defs maps are populated.
+func (info *Info) ObjectOf(id *syntax.Name) Object {
+ if obj := info.Defs[id]; obj != nil {
+ return obj
+ }
+ return info.Uses[id]
+}
+
+// PkgNameOf returns the local package name defined by the import,
+// or nil if not found.
+//
+// For dot-imports, the package name is ".".
+//
+// Precondition: the Defs and Implicts maps are populated.
+func (info *Info) PkgNameOf(imp *syntax.ImportDecl) *PkgName {
+ var obj Object
+ if imp.LocalPkgName != nil {
+ obj = info.Defs[imp.LocalPkgName]
+ } else {
+ obj = info.Implicits[imp]
+ }
+ pkgname, _ := obj.(*PkgName)
+ return pkgname
+}
+
+// TypeAndValue reports the type and value (for constants)
+// of the corresponding expression.
+type TypeAndValue struct {
+ mode operandMode
+ Type Type
+ Value constant.Value
+}
+
+// IsVoid reports whether the corresponding expression
+// is a function call without results.
+func (tv TypeAndValue) IsVoid() bool {
+ return tv.mode == novalue
+}
+
+// IsType reports whether the corresponding expression specifies a type.
+func (tv TypeAndValue) IsType() bool {
+ return tv.mode == typexpr
+}
+
+// IsBuiltin reports whether the corresponding expression denotes
+// a (possibly parenthesized) built-in function.
+func (tv TypeAndValue) IsBuiltin() bool {
+ return tv.mode == builtin
+}
+
+// IsValue reports whether the corresponding expression is a value.
+// Builtins are not considered values. Constant values have a non-
+// nil Value.
+func (tv TypeAndValue) IsValue() bool {
+ switch tv.mode {
+ case constant_, variable, mapindex, value, nilvalue, commaok, commaerr:
+ return true
+ }
+ return false
+}
+
+// IsNil reports whether the corresponding expression denotes the
+// predeclared value nil. Depending on context, it may have been
+// given a type different from UntypedNil.
+func (tv TypeAndValue) IsNil() bool {
+ return tv.mode == nilvalue
+}
+
+// Addressable reports whether the corresponding expression
+// is addressable (https://golang.org/ref/spec#Address_operators).
+func (tv TypeAndValue) Addressable() bool {
+ return tv.mode == variable
+}
+
+// Assignable reports whether the corresponding expression
+// is assignable to (provided a value of the right type).
+func (tv TypeAndValue) Assignable() bool {
+ return tv.mode == variable || tv.mode == mapindex
+}
+
+// HasOk reports whether the corresponding expression may be
+// used on the rhs of a comma-ok assignment.
+func (tv TypeAndValue) HasOk() bool {
+ return tv.mode == commaok || tv.mode == mapindex
+}
+
+// Instance reports the type arguments and instantiated type for type and
+// function instantiations. For type instantiations, Type will be of dynamic
+// type *Named. For function instantiations, Type will be of dynamic type
+// *Signature.
+type Instance struct {
+ TypeArgs *TypeList
+ Type Type
+}
+
+// An Initializer describes a package-level variable, or a list of variables in case
+// of a multi-valued initialization expression, and the corresponding initialization
+// expression.
+type Initializer struct {
+ Lhs []*Var // var Lhs = Rhs
+ Rhs syntax.Expr
+}
+
+func (init *Initializer) String() string {
+ var buf strings.Builder
+ for i, lhs := range init.Lhs {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(lhs.Name())
+ }
+ buf.WriteString(" = ")
+ syntax.Fprint(&buf, init.Rhs, syntax.ShortForm)
+ return buf.String()
+}
+
+// Check type-checks a package and returns the resulting package object and
+// the first error if any. Additionally, if info != nil, Check populates each
+// of the non-nil maps in the Info struct.
+//
+// The package is marked as complete if no errors occurred, otherwise it is
+// incomplete. See Config.Error for controlling behavior in the presence of
+// errors.
+//
+// The package is specified by a list of *syntax.Files and corresponding
+// file set, and the package path the package is identified with.
+// The clean path must not be empty or dot (".").
+func (conf *Config) Check(path string, files []*syntax.File, info *Info) (*Package, error) {
+ pkg := NewPackage(path, "")
+ return pkg, NewChecker(conf, pkg, info).Files(files)
+}
diff --git a/src/cmd/compile/internal/types2/api_predicates.go b/src/cmd/compile/internal/types2/api_predicates.go
new file mode 100644
index 0000000..480f711
--- /dev/null
+++ b/src/cmd/compile/internal/types2/api_predicates.go
@@ -0,0 +1,84 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements exported type predicates.
+
+package types2
+
+// AssertableTo reports whether a value of type V can be asserted to have type T.
+//
+// The behavior of AssertableTo is unspecified in three cases:
+// - if T is Typ[Invalid]
+// - if V is a generalized interface; i.e., an interface that may only be used
+// as a type constraint in Go code
+// - if T is an uninstantiated generic type
+func AssertableTo(V *Interface, T Type) bool {
+ // Checker.newAssertableTo suppresses errors for invalid types, so we need special
+ // handling here.
+ if !isValid(T.Underlying()) {
+ return false
+ }
+ return (*Checker)(nil).newAssertableTo(nopos, V, T, nil)
+}
+
+// AssignableTo reports whether a value of type V is assignable to a variable
+// of type T.
+//
+// The behavior of AssignableTo is unspecified if V or T is Typ[Invalid] or an
+// uninstantiated generic type.
+func AssignableTo(V, T Type) bool {
+ x := operand{mode: value, typ: V}
+ ok, _ := x.assignableTo(nil, T, nil) // check not needed for non-constant x
+ return ok
+}
+
+// ConvertibleTo reports whether a value of type V is convertible to a value of
+// type T.
+//
+// The behavior of ConvertibleTo is unspecified if V or T is Typ[Invalid] or an
+// uninstantiated generic type.
+func ConvertibleTo(V, T Type) bool {
+ x := operand{mode: value, typ: V}
+ return x.convertibleTo(nil, T, nil) // check not needed for non-constant x
+}
+
+// Implements reports whether type V implements interface T.
+//
+// The behavior of Implements is unspecified if V is Typ[Invalid] or an uninstantiated
+// generic type.
+func Implements(V Type, T *Interface) bool {
+ if T.Empty() {
+ // All types (even Typ[Invalid]) implement the empty interface.
+ return true
+ }
+ // Checker.implements suppresses errors for invalid types, so we need special
+ // handling here.
+ if !isValid(V.Underlying()) {
+ return false
+ }
+ return (*Checker)(nil).implements(nopos, V, T, false, nil)
+}
+
+// Satisfies reports whether type V satisfies the constraint T.
+//
+// The behavior of Satisfies is unspecified if V is Typ[Invalid] or an uninstantiated
+// generic type.
+func Satisfies(V Type, T *Interface) bool {
+ return (*Checker)(nil).implements(nopos, V, T, true, nil)
+}
+
+// Identical reports whether x and y are identical types.
+// Receivers of [Signature] types are ignored.
+func Identical(x, y Type) bool {
+ var c comparer
+ return c.identical(x, y, nil)
+}
+
+// IdenticalIgnoreTags reports whether x and y are identical types if tags are ignored.
+// Receivers of [Signature] types are ignored.
+func IdenticalIgnoreTags(x, y Type) bool {
+ var c comparer
+ c.ignoreTags = true
+ return c.identical(x, y, nil)
+}
diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go
new file mode 100644
index 0000000..bacba71
--- /dev/null
+++ b/src/cmd/compile/internal/types2/api_test.go
@@ -0,0 +1,2939 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "cmd/compile/internal/syntax"
+ "errors"
+ "fmt"
+ "internal/goversion"
+ "internal/testenv"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+ "sync"
+ "testing"
+
+ . "cmd/compile/internal/types2"
+)
+
+// nopos indicates an unknown position
+var nopos syntax.Pos
+
+func mustParse(src string) *syntax.File {
+ f, err := syntax.Parse(syntax.NewFileBase(pkgName(src)), strings.NewReader(src), nil, nil, 0)
+ if err != nil {
+ panic(err) // so we don't need to pass *testing.T
+ }
+ return f
+}
+
+func typecheck(src string, conf *Config, info *Info) (*Package, error) {
+ f := mustParse(src)
+ if conf == nil {
+ conf = &Config{
+ Error: func(err error) {}, // collect all errors
+ Importer: defaultImporter(),
+ }
+ }
+ return conf.Check(f.PkgName.Value, []*syntax.File{f}, info)
+}
+
+func mustTypecheck(src string, conf *Config, info *Info) *Package {
+ pkg, err := typecheck(src, conf, info)
+ if err != nil {
+ panic(err) // so we don't need to pass *testing.T
+ }
+ return pkg
+}
+
+// pkgName extracts the package name from src, which must contain a package header.
+func pkgName(src string) string {
+ const kw = "package "
+ if i := strings.Index(src, kw); i >= 0 {
+ after := src[i+len(kw):]
+ n := len(after)
+ if i := strings.IndexAny(after, "\n\t ;/"); i >= 0 {
+ n = i
+ }
+ return after[:n]
+ }
+ panic("missing package header: " + src)
+}
+
+func TestValuesInfo(t *testing.T) {
+ var tests = []struct {
+ src string
+ expr string // constant expression
+ typ string // constant type
+ val string // constant value
+ }{
+ {`package a0; const _ = false`, `false`, `untyped bool`, `false`},
+ {`package a1; const _ = 0`, `0`, `untyped int`, `0`},
+ {`package a2; const _ = 'A'`, `'A'`, `untyped rune`, `65`},
+ {`package a3; const _ = 0.`, `0.`, `untyped float`, `0`},
+ {`package a4; const _ = 0i`, `0i`, `untyped complex`, `(0 + 0i)`},
+ {`package a5; const _ = "foo"`, `"foo"`, `untyped string`, `"foo"`},
+
+ {`package b0; var _ = false`, `false`, `bool`, `false`},
+ {`package b1; var _ = 0`, `0`, `int`, `0`},
+ {`package b2; var _ = 'A'`, `'A'`, `rune`, `65`},
+ {`package b3; var _ = 0.`, `0.`, `float64`, `0`},
+ {`package b4; var _ = 0i`, `0i`, `complex128`, `(0 + 0i)`},
+ {`package b5; var _ = "foo"`, `"foo"`, `string`, `"foo"`},
+
+ {`package c0a; var _ = bool(false)`, `false`, `bool`, `false`},
+ {`package c0b; var _ = bool(false)`, `bool(false)`, `bool`, `false`},
+ {`package c0c; type T bool; var _ = T(false)`, `T(false)`, `c0c.T`, `false`},
+
+ {`package c1a; var _ = int(0)`, `0`, `int`, `0`},
+ {`package c1b; var _ = int(0)`, `int(0)`, `int`, `0`},
+ {`package c1c; type T int; var _ = T(0)`, `T(0)`, `c1c.T`, `0`},
+
+ {`package c2a; var _ = rune('A')`, `'A'`, `rune`, `65`},
+ {`package c2b; var _ = rune('A')`, `rune('A')`, `rune`, `65`},
+ {`package c2c; type T rune; var _ = T('A')`, `T('A')`, `c2c.T`, `65`},
+
+ {`package c3a; var _ = float32(0.)`, `0.`, `float32`, `0`},
+ {`package c3b; var _ = float32(0.)`, `float32(0.)`, `float32`, `0`},
+ {`package c3c; type T float32; var _ = T(0.)`, `T(0.)`, `c3c.T`, `0`},
+
+ {`package c4a; var _ = complex64(0i)`, `0i`, `complex64`, `(0 + 0i)`},
+ {`package c4b; var _ = complex64(0i)`, `complex64(0i)`, `complex64`, `(0 + 0i)`},
+ {`package c4c; type T complex64; var _ = T(0i)`, `T(0i)`, `c4c.T`, `(0 + 0i)`},
+
+ {`package c5a; var _ = string("foo")`, `"foo"`, `string`, `"foo"`},
+ {`package c5b; var _ = string("foo")`, `string("foo")`, `string`, `"foo"`},
+ {`package c5c; type T string; var _ = T("foo")`, `T("foo")`, `c5c.T`, `"foo"`},
+ {`package c5d; var _ = string(65)`, `65`, `untyped int`, `65`},
+ {`package c5e; var _ = string('A')`, `'A'`, `untyped rune`, `65`},
+ {`package c5f; type T string; var _ = T('A')`, `'A'`, `untyped rune`, `65`},
+
+ {`package d0; var _ = []byte("foo")`, `"foo"`, `string`, `"foo"`},
+ {`package d1; var _ = []byte(string("foo"))`, `"foo"`, `string`, `"foo"`},
+ {`package d2; var _ = []byte(string("foo"))`, `string("foo")`, `string`, `"foo"`},
+ {`package d3; type T []byte; var _ = T("foo")`, `"foo"`, `string`, `"foo"`},
+
+ {`package e0; const _ = float32( 1e-200)`, `float32(1e-200)`, `float32`, `0`},
+ {`package e1; const _ = float32(-1e-200)`, `float32(-1e-200)`, `float32`, `0`},
+ {`package e2; const _ = float64( 1e-2000)`, `float64(1e-2000)`, `float64`, `0`},
+ {`package e3; const _ = float64(-1e-2000)`, `float64(-1e-2000)`, `float64`, `0`},
+ {`package e4; const _ = complex64( 1e-200)`, `complex64(1e-200)`, `complex64`, `(0 + 0i)`},
+ {`package e5; const _ = complex64(-1e-200)`, `complex64(-1e-200)`, `complex64`, `(0 + 0i)`},
+ {`package e6; const _ = complex128( 1e-2000)`, `complex128(1e-2000)`, `complex128`, `(0 + 0i)`},
+ {`package e7; const _ = complex128(-1e-2000)`, `complex128(-1e-2000)`, `complex128`, `(0 + 0i)`},
+
+ {`package f0 ; var _ float32 = 1e-200`, `1e-200`, `float32`, `0`},
+ {`package f1 ; var _ float32 = -1e-200`, `-1e-200`, `float32`, `0`},
+ {`package f2a; var _ float64 = 1e-2000`, `1e-2000`, `float64`, `0`},
+ {`package f3a; var _ float64 = -1e-2000`, `-1e-2000`, `float64`, `0`},
+ {`package f2b; var _ = 1e-2000`, `1e-2000`, `float64`, `0`},
+ {`package f3b; var _ = -1e-2000`, `-1e-2000`, `float64`, `0`},
+ {`package f4 ; var _ complex64 = 1e-200 `, `1e-200`, `complex64`, `(0 + 0i)`},
+ {`package f5 ; var _ complex64 = -1e-200 `, `-1e-200`, `complex64`, `(0 + 0i)`},
+ {`package f6a; var _ complex128 = 1e-2000i`, `1e-2000i`, `complex128`, `(0 + 0i)`},
+ {`package f7a; var _ complex128 = -1e-2000i`, `-1e-2000i`, `complex128`, `(0 + 0i)`},
+ {`package f6b; var _ = 1e-2000i`, `1e-2000i`, `complex128`, `(0 + 0i)`},
+ {`package f7b; var _ = -1e-2000i`, `-1e-2000i`, `complex128`, `(0 + 0i)`},
+
+ {`package g0; const (a = len([iota]int{}); b; c); const _ = c`, `c`, `int`, `2`}, // go.dev/issue/22341
+ {`package g1; var(j int32; s int; n = 1.0<<s == j)`, `1.0`, `int32`, `1`}, // go.dev/issue/48422
+ }
+
+ for _, test := range tests {
+ info := Info{
+ Types: make(map[syntax.Expr]TypeAndValue),
+ }
+ name := mustTypecheck(test.src, nil, &info).Name()
+
+ // look for expression
+ var expr syntax.Expr
+ for e := range info.Types {
+ if syntax.String(e) == test.expr {
+ expr = e
+ break
+ }
+ }
+ if expr == nil {
+ t.Errorf("package %s: no expression found for %s", name, test.expr)
+ continue
+ }
+ tv := info.Types[expr]
+
+ // check that type is correct
+ if got := tv.Type.String(); got != test.typ {
+ t.Errorf("package %s: got type %s; want %s", name, got, test.typ)
+ continue
+ }
+
+ // if we have a constant, check that value is correct
+ if tv.Value != nil {
+ if got := tv.Value.ExactString(); got != test.val {
+ t.Errorf("package %s: got value %s; want %s", name, got, test.val)
+ }
+ } else {
+ if test.val != "" {
+ t.Errorf("package %s: no constant found; want %s", name, test.val)
+ }
+ }
+ }
+}
+
+func TestTypesInfo(t *testing.T) {
+ // Test sources that are not expected to typecheck must start with the broken prefix.
+ const brokenPkg = "package broken_"
+
+ var tests = []struct {
+ src string
+ expr string // expression
+ typ string // value type
+ }{
+ // single-valued expressions of untyped constants
+ {`package b0; var x interface{} = false`, `false`, `bool`},
+ {`package b1; var x interface{} = 0`, `0`, `int`},
+ {`package b2; var x interface{} = 0.`, `0.`, `float64`},
+ {`package b3; var x interface{} = 0i`, `0i`, `complex128`},
+ {`package b4; var x interface{} = "foo"`, `"foo"`, `string`},
+
+ // uses of nil
+ {`package n0; var _ *int = nil`, `nil`, `*int`},
+ {`package n1; var _ func() = nil`, `nil`, `func()`},
+ {`package n2; var _ []byte = nil`, `nil`, `[]byte`},
+ {`package n3; var _ map[int]int = nil`, `nil`, `map[int]int`},
+ {`package n4; var _ chan int = nil`, `nil`, `chan int`},
+ {`package n5a; var _ interface{} = (*int)(nil)`, `nil`, `*int`},
+ {`package n5b; var _ interface{m()} = nil`, `nil`, `interface{m()}`},
+ {`package n6; import "unsafe"; var _ unsafe.Pointer = nil`, `nil`, `unsafe.Pointer`},
+
+ {`package n10; var (x *int; _ = x == nil)`, `nil`, `*int`},
+ {`package n11; var (x func(); _ = x == nil)`, `nil`, `func()`},
+ {`package n12; var (x []byte; _ = x == nil)`, `nil`, `[]byte`},
+ {`package n13; var (x map[int]int; _ = x == nil)`, `nil`, `map[int]int`},
+ {`package n14; var (x chan int; _ = x == nil)`, `nil`, `chan int`},
+ {`package n15a; var (x interface{}; _ = x == (*int)(nil))`, `nil`, `*int`},
+ {`package n15b; var (x interface{m()}; _ = x == nil)`, `nil`, `interface{m()}`},
+ {`package n15; import "unsafe"; var (x unsafe.Pointer; _ = x == nil)`, `nil`, `unsafe.Pointer`},
+
+ {`package n20; var _ = (*int)(nil)`, `nil`, `*int`},
+ {`package n21; var _ = (func())(nil)`, `nil`, `func()`},
+ {`package n22; var _ = ([]byte)(nil)`, `nil`, `[]byte`},
+ {`package n23; var _ = (map[int]int)(nil)`, `nil`, `map[int]int`},
+ {`package n24; var _ = (chan int)(nil)`, `nil`, `chan int`},
+ {`package n25a; var _ = (interface{})((*int)(nil))`, `nil`, `*int`},
+ {`package n25b; var _ = (interface{m()})(nil)`, `nil`, `interface{m()}`},
+ {`package n26; import "unsafe"; var _ = unsafe.Pointer(nil)`, `nil`, `unsafe.Pointer`},
+
+ {`package n30; func f(*int) { f(nil) }`, `nil`, `*int`},
+ {`package n31; func f(func()) { f(nil) }`, `nil`, `func()`},
+ {`package n32; func f([]byte) { f(nil) }`, `nil`, `[]byte`},
+ {`package n33; func f(map[int]int) { f(nil) }`, `nil`, `map[int]int`},
+ {`package n34; func f(chan int) { f(nil) }`, `nil`, `chan int`},
+ {`package n35a; func f(interface{}) { f((*int)(nil)) }`, `nil`, `*int`},
+ {`package n35b; func f(interface{m()}) { f(nil) }`, `nil`, `interface{m()}`},
+ {`package n35; import "unsafe"; func f(unsafe.Pointer) { f(nil) }`, `nil`, `unsafe.Pointer`},
+
+ // comma-ok expressions
+ {`package p0; var x interface{}; var _, _ = x.(int)`,
+ `x.(int)`,
+ `(int, bool)`,
+ },
+ {`package p1; var x interface{}; func _() { _, _ = x.(int) }`,
+ `x.(int)`,
+ `(int, bool)`,
+ },
+ {`package p2a; type mybool bool; var m map[string]complex128; var b mybool; func _() { _, b = m["foo"] }`,
+ `m["foo"]`,
+ `(complex128, p2a.mybool)`,
+ },
+ {`package p2b; var m map[string]complex128; var b bool; func _() { _, b = m["foo"] }`,
+ `m["foo"]`,
+ `(complex128, bool)`,
+ },
+ {`package p3; var c chan string; var _, _ = <-c`,
+ `<-c`,
+ `(string, bool)`,
+ },
+
+ // go.dev/issue/6796
+ {`package issue6796_a; var x interface{}; var _, _ = (x.(int))`,
+ `x.(int)`,
+ `(int, bool)`,
+ },
+ {`package issue6796_b; var c chan string; var _, _ = (<-c)`,
+ `(<-c)`,
+ `(string, bool)`,
+ },
+ {`package issue6796_c; var c chan string; var _, _ = (<-c)`,
+ `<-c`,
+ `(string, bool)`,
+ },
+ {`package issue6796_d; var c chan string; var _, _ = ((<-c))`,
+ `(<-c)`,
+ `(string, bool)`,
+ },
+ {`package issue6796_e; func f(c chan string) { _, _ = ((<-c)) }`,
+ `(<-c)`,
+ `(string, bool)`,
+ },
+
+ // go.dev/issue/7060
+ {`package issue7060_a; var ( m map[int]string; x, ok = m[0] )`,
+ `m[0]`,
+ `(string, bool)`,
+ },
+ {`package issue7060_b; var ( m map[int]string; x, ok interface{} = m[0] )`,
+ `m[0]`,
+ `(string, bool)`,
+ },
+ {`package issue7060_c; func f(x interface{}, ok bool, m map[int]string) { x, ok = m[0] }`,
+ `m[0]`,
+ `(string, bool)`,
+ },
+ {`package issue7060_d; var ( ch chan string; x, ok = <-ch )`,
+ `<-ch`,
+ `(string, bool)`,
+ },
+ {`package issue7060_e; var ( ch chan string; x, ok interface{} = <-ch )`,
+ `<-ch`,
+ `(string, bool)`,
+ },
+ {`package issue7060_f; func f(x interface{}, ok bool, ch chan string) { x, ok = <-ch }`,
+ `<-ch`,
+ `(string, bool)`,
+ },
+
+ // go.dev/issue/28277
+ {`package issue28277_a; func f(...int)`,
+ `...int`,
+ `[]int`,
+ },
+ {`package issue28277_b; func f(a, b int, c ...[]struct{})`,
+ `...[]struct{}`,
+ `[][]struct{}`,
+ },
+
+ // go.dev/issue/47243
+ {`package issue47243_a; var x int32; var _ = x << 3`, `3`, `untyped int`},
+ {`package issue47243_b; var x int32; var _ = x << 3.`, `3.`, `untyped float`},
+ {`package issue47243_c; var x int32; var _ = 1 << x`, `1 << x`, `int`},
+ {`package issue47243_d; var x int32; var _ = 1 << x`, `1`, `int`},
+ {`package issue47243_e; var x int32; var _ = 1 << 2`, `1`, `untyped int`},
+ {`package issue47243_f; var x int32; var _ = 1 << 2`, `2`, `untyped int`},
+ {`package issue47243_g; var x int32; var _ = int(1) << 2`, `2`, `untyped int`},
+ {`package issue47243_h; var x int32; var _ = 1 << (2 << x)`, `1`, `int`},
+ {`package issue47243_i; var x int32; var _ = 1 << (2 << x)`, `(2 << x)`, `untyped int`},
+ {`package issue47243_j; var x int32; var _ = 1 << (2 << x)`, `2`, `untyped int`},
+
+ // tests for broken code that doesn't type-check
+ {brokenPkg + `x0; func _() { var x struct {f string}; x.f := 0 }`, `x.f`, `string`},
+ {brokenPkg + `x1; func _() { var z string; type x struct {f string}; y := &x{q: z}}`, `z`, `string`},
+ {brokenPkg + `x2; func _() { var a, b string; type x struct {f string}; z := &x{f: a, f: b,}}`, `b`, `string`},
+ {brokenPkg + `x3; var x = panic("");`, `panic`, `func(interface{})`},
+ {`package x4; func _() { panic("") }`, `panic`, `func(interface{})`},
+ {brokenPkg + `x5; func _() { var x map[string][...]int; x = map[string][...]int{"": {1,2,3}} }`, `x`, `map[string]invalid type`},
+
+ // parameterized functions
+ {`package p0; func f[T any](T) {}; var _ = f[int]`, `f`, `func[T any](T)`},
+ {`package p1; func f[T any](T) {}; var _ = f[int]`, `f[int]`, `func(int)`},
+ {`package p2; func f[T any](T) {}; func _() { f(42) }`, `f`, `func(int)`},
+ {`package p3; func f[T any](T) {}; func _() { f[int](42) }`, `f[int]`, `func(int)`},
+ {`package p4; func f[T any](T) {}; func _() { f[int](42) }`, `f`, `func[T any](T)`},
+ {`package p5; func f[T any](T) {}; func _() { f(42) }`, `f(42)`, `()`},
+
+ // type parameters
+ {`package t0; type t[] int; var _ t`, `t`, `t0.t`}, // t[] is a syntax error that is ignored in this test in favor of t
+ {`package t1; type t[P any] int; var _ t[int]`, `t`, `t1.t[P any]`},
+ {`package t2; type t[P interface{}] int; var _ t[int]`, `t`, `t2.t[P interface{}]`},
+ {`package t3; type t[P, Q interface{}] int; var _ t[int, int]`, `t`, `t3.t[P, Q interface{}]`},
+ {brokenPkg + `t4; type t[P, Q interface{ m() }] int; var _ t[int, int]`, `t`, `broken_t4.t[P, Q interface{m()}]`},
+
+ // instantiated types must be sanitized
+ {`package g0; type t[P any] int; var x struct{ f t[int] }; var _ = x.f`, `x.f`, `g0.t[int]`},
+
+ // go.dev/issue/45096
+ {`package issue45096; func _[T interface{ ~int8 | ~int16 | ~int32 }](x T) { _ = x < 0 }`, `0`, `T`},
+
+ // go.dev/issue/47895
+ {`package p; import "unsafe"; type S struct { f int }; var s S; var _ = unsafe.Offsetof(s.f)`, `s.f`, `int`},
+
+ // go.dev/issue/50093
+ {`package u0a; func _[_ interface{int}]() {}`, `int`, `int`},
+ {`package u1a; func _[_ interface{~int}]() {}`, `~int`, `~int`},
+ {`package u2a; func _[_ interface{int | string}]() {}`, `int | string`, `int | string`},
+ {`package u3a; func _[_ interface{int | string | ~bool}]() {}`, `int | string | ~bool`, `int | string | ~bool`},
+ {`package u3a; func _[_ interface{int | string | ~bool}]() {}`, `int | string`, `int | string`},
+ {`package u3a; func _[_ interface{int | string | ~bool}]() {}`, `~bool`, `~bool`},
+ {`package u3a; func _[_ interface{int | string | ~float64|~bool}]() {}`, `int | string | ~float64`, `int | string | ~float64`},
+
+ {`package u0b; func _[_ int]() {}`, `int`, `int`},
+ {`package u1b; func _[_ ~int]() {}`, `~int`, `~int`},
+ {`package u2b; func _[_ int | string]() {}`, `int | string`, `int | string`},
+ {`package u3b; func _[_ int | string | ~bool]() {}`, `int | string | ~bool`, `int | string | ~bool`},
+ {`package u3b; func _[_ int | string | ~bool]() {}`, `int | string`, `int | string`},
+ {`package u3b; func _[_ int | string | ~bool]() {}`, `~bool`, `~bool`},
+ {`package u3b; func _[_ int | string | ~float64|~bool]() {}`, `int | string | ~float64`, `int | string | ~float64`},
+
+ {`package u0c; type _ interface{int}`, `int`, `int`},
+ {`package u1c; type _ interface{~int}`, `~int`, `~int`},
+ {`package u2c; type _ interface{int | string}`, `int | string`, `int | string`},
+ {`package u3c; type _ interface{int | string | ~bool}`, `int | string | ~bool`, `int | string | ~bool`},
+ {`package u3c; type _ interface{int | string | ~bool}`, `int | string`, `int | string`},
+ {`package u3c; type _ interface{int | string | ~bool}`, `~bool`, `~bool`},
+ {`package u3c; type _ interface{int | string | ~float64|~bool}`, `int | string | ~float64`, `int | string | ~float64`},
+
+ // reverse type inference
+ {`package r1; var _ func(int) = g; func g[P any](P) {}`, `g`, `func(int)`},
+ {`package r2; var _ func(int) = g[int]; func g[P any](P) {}`, `g`, `func[P any](P)`}, // go.dev/issues/60212
+ {`package r3; var _ func(int) = g[int]; func g[P any](P) {}`, `g[int]`, `func(int)`},
+ {`package r4; var _ func(int, string) = g; func g[P, Q any](P, Q) {}`, `g`, `func(int, string)`},
+ {`package r5; var _ func(int, string) = g[int]; func g[P, Q any](P, Q) {}`, `g`, `func[P, Q any](P, Q)`}, // go.dev/issues/60212
+ {`package r6; var _ func(int, string) = g[int]; func g[P, Q any](P, Q) {}`, `g[int]`, `func(int, string)`},
+
+ {`package s1; func _() { f(g) }; func f(func(int)) {}; func g[P any](P) {}`, `g`, `func(int)`},
+ {`package s2; func _() { f(g[int]) }; func f(func(int)) {}; func g[P any](P) {}`, `g`, `func[P any](P)`}, // go.dev/issues/60212
+ {`package s3; func _() { f(g[int]) }; func f(func(int)) {}; func g[P any](P) {}`, `g[int]`, `func(int)`},
+ {`package s4; func _() { f(g) }; func f(func(int, string)) {}; func g[P, Q any](P, Q) {}`, `g`, `func(int, string)`},
+ {`package s5; func _() { f(g[int]) }; func f(func(int, string)) {}; func g[P, Q any](P, Q) {}`, `g`, `func[P, Q any](P, Q)`}, // go.dev/issues/60212
+ {`package s6; func _() { f(g[int]) }; func f(func(int, string)) {}; func g[P, Q any](P, Q) {}`, `g[int]`, `func(int, string)`},
+
+ {`package s7; func _() { f(g, h) }; func f[P any](func(int, P), func(P, string)) {}; func g[P any](P, P) {}; func h[P, Q any](P, Q) {}`, `g`, `func(int, int)`},
+ {`package s8; func _() { f(g, h) }; func f[P any](func(int, P), func(P, string)) {}; func g[P any](P, P) {}; func h[P, Q any](P, Q) {}`, `h`, `func(int, string)`},
+ {`package s9; func _() { f(g, h[int]) }; func f[P any](func(int, P), func(P, string)) {}; func g[P any](P, P) {}; func h[P, Q any](P, Q) {}`, `h`, `func[P, Q any](P, Q)`}, // go.dev/issues/60212
+ {`package s10; func _() { f(g, h[int]) }; func f[P any](func(int, P), func(P, string)) {}; func g[P any](P, P) {}; func h[P, Q any](P, Q) {}`, `h[int]`, `func(int, string)`},
+ }
+
+ for _, test := range tests {
+ info := Info{Types: make(map[syntax.Expr]TypeAndValue)}
+ var name string
+ if strings.HasPrefix(test.src, brokenPkg) {
+ pkg, err := typecheck(test.src, nil, &info)
+ if err == nil {
+ t.Errorf("package %s: expected to fail but passed", pkg.Name())
+ continue
+ }
+ if pkg != nil {
+ name = pkg.Name()
+ }
+ } else {
+ name = mustTypecheck(test.src, nil, &info).Name()
+ }
+
+ // look for expression type
+ var typ Type
+ for e, tv := range info.Types {
+ if syntax.String(e) == test.expr {
+ typ = tv.Type
+ break
+ }
+ }
+ if typ == nil {
+ t.Errorf("package %s: no type found for %s", name, test.expr)
+ continue
+ }
+
+ // check that type is correct
+ if got := typ.String(); got != test.typ {
+ t.Errorf("package %s: expr = %s: got %s; want %s", name, test.expr, got, test.typ)
+ }
+ }
+}
+
+func TestInstanceInfo(t *testing.T) {
+ const lib = `package lib
+
+func F[P any](P) {}
+
+type T[P any] []P
+`
+
+ type testInst struct {
+ name string
+ targs []string
+ typ string
+ }
+
+ var tests = []struct {
+ src string
+ instances []testInst // recorded instances in source order
+ }{
+ {`package p0; func f[T any](T) {}; func _() { f(42) }`,
+ []testInst{{`f`, []string{`int`}, `func(int)`}},
+ },
+ {`package p1; func f[T any](T) T { panic(0) }; func _() { f('@') }`,
+ []testInst{{`f`, []string{`rune`}, `func(rune) rune`}},
+ },
+ {`package p2; func f[T any](...T) T { panic(0) }; func _() { f(0i) }`,
+ []testInst{{`f`, []string{`complex128`}, `func(...complex128) complex128`}},
+ },
+ {`package p3; func f[A, B, C any](A, *B, []C) {}; func _() { f(1.2, new(string), []byte{}) }`,
+ []testInst{{`f`, []string{`float64`, `string`, `byte`}, `func(float64, *string, []byte)`}},
+ },
+ {`package p4; func f[A, B any](A, *B, ...[]B) {}; func _() { f(1.2, new(byte)) }`,
+ []testInst{{`f`, []string{`float64`, `byte`}, `func(float64, *byte, ...[]byte)`}},
+ },
+
+ {`package s1; func f[T any, P interface{*T}](x T) {}; func _(x string) { f(x) }`,
+ []testInst{{`f`, []string{`string`, `*string`}, `func(x string)`}},
+ },
+ {`package s2; func f[T any, P interface{*T}](x []T) {}; func _(x []int) { f(x) }`,
+ []testInst{{`f`, []string{`int`, `*int`}, `func(x []int)`}},
+ },
+ {`package s3; type C[T any] interface{chan<- T}; func f[T any, P C[T]](x []T) {}; func _(x []int) { f(x) }`,
+ []testInst{
+ {`C`, []string{`T`}, `interface{chan<- T}`},
+ {`f`, []string{`int`, `chan<- int`}, `func(x []int)`},
+ },
+ },
+ {`package s4; type C[T any] interface{chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T) {}; func _(x []int) { f(x) }`,
+ []testInst{
+ {`C`, []string{`T`}, `interface{chan<- T}`},
+ {`C`, []string{`[]*P`}, `interface{chan<- []*P}`},
+ {`f`, []string{`int`, `chan<- int`, `chan<- []*chan<- int`}, `func(x []int)`},
+ },
+ },
+
+ {`package t1; func f[T any, P interface{*T}]() T { panic(0) }; func _() { _ = f[string] }`,
+ []testInst{{`f`, []string{`string`, `*string`}, `func() string`}},
+ },
+ {`package t2; func f[T any, P interface{*T}]() T { panic(0) }; func _() { _ = (f[string]) }`,
+ []testInst{{`f`, []string{`string`, `*string`}, `func() string`}},
+ },
+ {`package t3; type C[T any] interface{chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T { return nil }; func _() { _ = f[int] }`,
+ []testInst{
+ {`C`, []string{`T`}, `interface{chan<- T}`},
+ {`C`, []string{`[]*P`}, `interface{chan<- []*P}`},
+ {`f`, []string{`int`, `chan<- int`, `chan<- []*chan<- int`}, `func() []int`},
+ },
+ },
+ {`package t4; type C[T any] interface{chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T { return nil }; func _() { _ = (f[int]) }`,
+ []testInst{
+ {`C`, []string{`T`}, `interface{chan<- T}`},
+ {`C`, []string{`[]*P`}, `interface{chan<- []*P}`},
+ {`f`, []string{`int`, `chan<- int`, `chan<- []*chan<- int`}, `func() []int`},
+ },
+ },
+ {`package i0; import "lib"; func _() { lib.F(42) }`,
+ []testInst{{`F`, []string{`int`}, `func(int)`}},
+ },
+
+ {`package duplfunc0; func f[T any](T) {}; func _() { f(42); f("foo"); f[int](3) }`,
+ []testInst{
+ {`f`, []string{`int`}, `func(int)`},
+ {`f`, []string{`string`}, `func(string)`},
+ {`f`, []string{`int`}, `func(int)`},
+ },
+ },
+ {`package duplfunc1; import "lib"; func _() { lib.F(42); lib.F("foo"); lib.F(3) }`,
+ []testInst{
+ {`F`, []string{`int`}, `func(int)`},
+ {`F`, []string{`string`}, `func(string)`},
+ {`F`, []string{`int`}, `func(int)`},
+ },
+ },
+
+ {`package type0; type T[P interface{~int}] struct{ x P }; var _ T[int]`,
+ []testInst{{`T`, []string{`int`}, `struct{x int}`}},
+ },
+ {`package type1; type T[P interface{~int}] struct{ x P }; var _ (T[int])`,
+ []testInst{{`T`, []string{`int`}, `struct{x int}`}},
+ },
+ {`package type2; type T[P interface{~int}] struct{ x P }; var _ T[(int)]`,
+ []testInst{{`T`, []string{`int`}, `struct{x int}`}},
+ },
+ {`package type3; type T[P1 interface{~[]P2}, P2 any] struct{ x P1; y P2 }; var _ T[[]int, int]`,
+ []testInst{{`T`, []string{`[]int`, `int`}, `struct{x []int; y int}`}},
+ },
+ {`package type4; import "lib"; var _ lib.T[int]`,
+ []testInst{{`T`, []string{`int`}, `[]int`}},
+ },
+
+ {`package dupltype0; type T[P interface{~int}] struct{ x P }; var x T[int]; var y T[int]`,
+ []testInst{
+ {`T`, []string{`int`}, `struct{x int}`},
+ {`T`, []string{`int`}, `struct{x int}`},
+ },
+ },
+ {`package dupltype1; type T[P ~int] struct{ x P }; func (r *T[Q]) add(z T[Q]) { r.x += z.x }`,
+ []testInst{
+ {`T`, []string{`Q`}, `struct{x Q}`},
+ {`T`, []string{`Q`}, `struct{x Q}`},
+ },
+ },
+ {`package dupltype1; import "lib"; var x lib.T[int]; var y lib.T[int]; var z lib.T[string]`,
+ []testInst{
+ {`T`, []string{`int`}, `[]int`},
+ {`T`, []string{`int`}, `[]int`},
+ {`T`, []string{`string`}, `[]string`},
+ },
+ },
+ {`package issue51803; func foo[T any](T) {}; func _() { foo[int]( /* leave arg away on purpose */ ) }`,
+ []testInst{{`foo`, []string{`int`}, `func(int)`}},
+ },
+
+ // reverse type inference
+ {`package reverse1a; var f func(int) = g; func g[P any](P) {}`,
+ []testInst{{`g`, []string{`int`}, `func(int)`}},
+ },
+ {`package reverse1b; func f(func(int)) {}; func g[P any](P) {}; func _() { f(g) }`,
+ []testInst{{`g`, []string{`int`}, `func(int)`}},
+ },
+ {`package reverse2a; var f func(int, string) = g; func g[P, Q any](P, Q) {}`,
+ []testInst{{`g`, []string{`int`, `string`}, `func(int, string)`}},
+ },
+ {`package reverse2b; func f(func(int, string)) {}; func g[P, Q any](P, Q) {}; func _() { f(g) }`,
+ []testInst{{`g`, []string{`int`, `string`}, `func(int, string)`}},
+ },
+ {`package reverse2c; func f(func(int, string)) {}; func g[P, Q any](P, Q) {}; func _() { f(g[int]) }`,
+ []testInst{{`g`, []string{`int`, `string`}, `func(int, string)`}},
+ },
+ // reverse3a not possible (cannot assign to generic function outside of argument passing)
+ {`package reverse3b; func f[R any](func(int) R) {}; func g[P any](P) string { return "" }; func _() { f(g) }`,
+ []testInst{
+ {`f`, []string{`string`}, `func(func(int) string)`},
+ {`g`, []string{`int`}, `func(int) string`},
+ },
+ },
+ {`package reverse4a; var _, _ func([]int, *float32) = g, h; func g[P, Q any]([]P, *Q) {}; func h[R any]([]R, *float32) {}`,
+ []testInst{
+ {`g`, []string{`int`, `float32`}, `func([]int, *float32)`},
+ {`h`, []string{`int`}, `func([]int, *float32)`},
+ },
+ },
+ {`package reverse4b; func f(_, _ func([]int, *float32)) {}; func g[P, Q any]([]P, *Q) {}; func h[R any]([]R, *float32) {}; func _() { f(g, h) }`,
+ []testInst{
+ {`g`, []string{`int`, `float32`}, `func([]int, *float32)`},
+ {`h`, []string{`int`}, `func([]int, *float32)`},
+ },
+ },
+ {`package issue59956; func f(func(int), func(string), func(bool)) {}; func g[P any](P) {}; func _() { f(g, g, g) }`,
+ []testInst{
+ {`g`, []string{`int`}, `func(int)`},
+ {`g`, []string{`string`}, `func(string)`},
+ {`g`, []string{`bool`}, `func(bool)`},
+ },
+ },
+ }
+
+ for _, test := range tests {
+ imports := make(testImporter)
+ conf := Config{Importer: imports}
+ instMap := make(map[*syntax.Name]Instance)
+ useMap := make(map[*syntax.Name]Object)
+ makePkg := func(src string) *Package {
+ pkg, err := typecheck(src, &conf, &Info{Instances: instMap, Uses: useMap})
+ // allow error for issue51803
+ if err != nil && (pkg == nil || pkg.Name() != "issue51803") {
+ t.Fatal(err)
+ }
+ imports[pkg.Name()] = pkg
+ return pkg
+ }
+ makePkg(lib)
+ pkg := makePkg(test.src)
+
+ t.Run(pkg.Name(), func(t *testing.T) {
+ // Sort instances in source order for stability.
+ instances := sortedInstances(instMap)
+ if got, want := len(instances), len(test.instances); got != want {
+ t.Fatalf("got %d instances, want %d", got, want)
+ }
+
+ // Pairwise compare with the expected instances.
+ for ii, inst := range instances {
+ var targs []Type
+ for i := 0; i < inst.Inst.TypeArgs.Len(); i++ {
+ targs = append(targs, inst.Inst.TypeArgs.At(i))
+ }
+ typ := inst.Inst.Type
+
+ testInst := test.instances[ii]
+ if got := inst.Name.Value; got != testInst.name {
+ t.Fatalf("got name %s, want %s", got, testInst.name)
+ }
+
+ if len(targs) != len(testInst.targs) {
+ t.Fatalf("got %d type arguments; want %d", len(targs), len(testInst.targs))
+ }
+ for i, targ := range targs {
+ if got := targ.String(); got != testInst.targs[i] {
+ t.Errorf("type argument %d: got %s; want %s", i, got, testInst.targs[i])
+ }
+ }
+ if got := typ.Underlying().String(); got != testInst.typ {
+ t.Errorf("package %s: got %s; want %s", pkg.Name(), got, testInst.typ)
+ }
+
+ // Verify the invariant that re-instantiating the corresponding generic
+ // type with TypeArgs results in an identical instance.
+ ptype := useMap[inst.Name].Type()
+ lister, _ := ptype.(interface{ TypeParams() *TypeParamList })
+ if lister == nil || lister.TypeParams().Len() == 0 {
+ t.Fatalf("info.Types[%v] = %v, want parameterized type", inst.Name, ptype)
+ }
+ inst2, err := Instantiate(nil, ptype, targs, true)
+ if err != nil {
+ t.Errorf("Instantiate(%v, %v) failed: %v", ptype, targs, err)
+ }
+ if !Identical(inst.Inst.Type, inst2) {
+ t.Errorf("%v and %v are not identical", inst.Inst.Type, inst2)
+ }
+ }
+ })
+ }
+}
+
+type recordedInstance struct {
+ Name *syntax.Name
+ Inst Instance
+}
+
+func sortedInstances(m map[*syntax.Name]Instance) (instances []recordedInstance) {
+ for id, inst := range m {
+ instances = append(instances, recordedInstance{id, inst})
+ }
+ sort.Slice(instances, func(i, j int) bool {
+ return CmpPos(instances[i].Name.Pos(), instances[j].Name.Pos()) < 0
+ })
+ return instances
+}
+
+func TestDefsInfo(t *testing.T) {
+ var tests = []struct {
+ src string
+ obj string
+ want string
+ }{
+ {`package p0; const x = 42`, `x`, `const p0.x untyped int`},
+ {`package p1; const x int = 42`, `x`, `const p1.x int`},
+ {`package p2; var x int`, `x`, `var p2.x int`},
+ {`package p3; type x int`, `x`, `type p3.x int`},
+ {`package p4; func f()`, `f`, `func p4.f()`},
+ {`package p5; func f() int { x, _ := 1, 2; return x }`, `_`, `var _ int`},
+
+ // Tests using generics.
+ {`package g0; type x[T any] int`, `x`, `type g0.x[T any] int`},
+ {`package g1; func f[T any]() {}`, `f`, `func g1.f[T any]()`},
+ {`package g2; type x[T any] int; func (*x[_]) m() {}`, `m`, `func (*g2.x[_]).m()`},
+ }
+
+ for _, test := range tests {
+ info := Info{
+ Defs: make(map[*syntax.Name]Object),
+ }
+ name := mustTypecheck(test.src, nil, &info).Name()
+
+ // find object
+ var def Object
+ for id, obj := range info.Defs {
+ if id.Value == test.obj {
+ def = obj
+ break
+ }
+ }
+ if def == nil {
+ t.Errorf("package %s: %s not found", name, test.obj)
+ continue
+ }
+
+ if got := def.String(); got != test.want {
+ t.Errorf("package %s: got %s; want %s", name, got, test.want)
+ }
+ }
+}
+
+func TestUsesInfo(t *testing.T) {
+ var tests = []struct {
+ src string
+ obj string
+ want string
+ }{
+ {`package p0; func _() { _ = x }; const x = 42`, `x`, `const p0.x untyped int`},
+ {`package p1; func _() { _ = x }; const x int = 42`, `x`, `const p1.x int`},
+ {`package p2; func _() { _ = x }; var x int`, `x`, `var p2.x int`},
+ {`package p3; func _() { type _ x }; type x int`, `x`, `type p3.x int`},
+ {`package p4; func _() { _ = f }; func f()`, `f`, `func p4.f()`},
+
+ // Tests using generics.
+ {`package g0; func _[T any]() { _ = x }; const x = 42`, `x`, `const g0.x untyped int`},
+ {`package g1; func _[T any](x T) { }`, `T`, `type parameter T any`},
+ {`package g2; type N[A any] int; var _ N[int]`, `N`, `type g2.N[A any] int`},
+ {`package g3; type N[A any] int; func (N[_]) m() {}`, `N`, `type g3.N[A any] int`},
+
+ // Uses of fields are instantiated.
+ {`package s1; type N[A any] struct{ a A }; var f = N[int]{}.a`, `a`, `field a int`},
+ {`package s1; type N[A any] struct{ a A }; func (r N[B]) m(b B) { r.a = b }`, `a`, `field a B`},
+
+ // Uses of methods are uses of the instantiated method.
+ {`package m0; type N[A any] int; func (r N[B]) m() { r.n() }; func (N[C]) n() {}`, `n`, `func (m0.N[B]).n()`},
+ {`package m1; type N[A any] int; func (r N[B]) m() { }; var f = N[int].m`, `m`, `func (m1.N[int]).m()`},
+ {`package m2; func _[A any](v interface{ m() A }) { v.m() }`, `m`, `func (interface).m() A`},
+ {`package m3; func f[A any]() interface{ m() A } { return nil }; var _ = f[int]().m()`, `m`, `func (interface).m() int`},
+ {`package m4; type T[A any] func() interface{ m() A }; var x T[int]; var y = x().m`, `m`, `func (interface).m() int`},
+ {`package m5; type T[A any] interface{ m() A }; func _[B any](t T[B]) { t.m() }`, `m`, `func (m5.T[B]).m() B`},
+ {`package m6; type T[A any] interface{ m() }; func _[B any](t T[B]) { t.m() }`, `m`, `func (m6.T[B]).m()`},
+ {`package m7; type T[A any] interface{ m() A }; func _(t T[int]) { t.m() }`, `m`, `func (m7.T[int]).m() int`},
+ {`package m8; type T[A any] interface{ m() }; func _(t T[int]) { t.m() }`, `m`, `func (m8.T[int]).m()`},
+ {`package m9; type T[A any] interface{ m() }; func _(t T[int]) { _ = t.m }`, `m`, `func (m9.T[int]).m()`},
+ {
+ `package m10; type E[A any] interface{ m() }; type T[B any] interface{ E[B]; n() }; func _(t T[int]) { t.m() }`,
+ `m`,
+ `func (m10.E[int]).m()`,
+ },
+ }
+
+ for _, test := range tests {
+ info := Info{
+ Uses: make(map[*syntax.Name]Object),
+ }
+ name := mustTypecheck(test.src, nil, &info).Name()
+
+ // find object
+ var use Object
+ for id, obj := range info.Uses {
+ if id.Value == test.obj {
+ if use != nil {
+ panic(fmt.Sprintf("multiple uses of %q", id.Value))
+ }
+ use = obj
+ }
+ }
+ if use == nil {
+ t.Errorf("package %s: %s not found", name, test.obj)
+ continue
+ }
+
+ if got := use.String(); got != test.want {
+ t.Errorf("package %s: got %s; want %s", name, got, test.want)
+ }
+ }
+}
+
+func TestGenericMethodInfo(t *testing.T) {
+ src := `package p
+
+type N[A any] int
+
+func (r N[B]) m() { r.m(); r.n() }
+
+func (r *N[C]) n() { }
+`
+ f := mustParse(src)
+ info := Info{
+ Defs: make(map[*syntax.Name]Object),
+ Uses: make(map[*syntax.Name]Object),
+ Selections: make(map[*syntax.SelectorExpr]*Selection),
+ }
+ var conf Config
+ pkg, err := conf.Check("p", []*syntax.File{f}, &info)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ N := pkg.Scope().Lookup("N").Type().(*Named)
+
+ // Find the generic methods stored on N.
+ gm, gn := N.Method(0), N.Method(1)
+ if gm.Name() == "n" {
+ gm, gn = gn, gm
+ }
+
+ // Collect objects from info.
+ var dm, dn *Func // the declared methods
+ var dmm, dmn *Func // the methods used in the body of m
+ for _, decl := range f.DeclList {
+ fdecl, ok := decl.(*syntax.FuncDecl)
+ if !ok {
+ continue
+ }
+ def := info.Defs[fdecl.Name].(*Func)
+ switch fdecl.Name.Value {
+ case "m":
+ dm = def
+ syntax.Inspect(fdecl.Body, func(n syntax.Node) bool {
+ if call, ok := n.(*syntax.CallExpr); ok {
+ sel := call.Fun.(*syntax.SelectorExpr)
+ use := info.Uses[sel.Sel].(*Func)
+ selection := info.Selections[sel]
+ if selection.Kind() != MethodVal {
+ t.Errorf("Selection kind = %v, want %v", selection.Kind(), MethodVal)
+ }
+ if selection.Obj() != use {
+ t.Errorf("info.Selections contains %v, want %v", selection.Obj(), use)
+ }
+ switch sel.Sel.Value {
+ case "m":
+ dmm = use
+ case "n":
+ dmn = use
+ }
+ }
+ return true
+ })
+ case "n":
+ dn = def
+ }
+ }
+
+ if gm != dm {
+ t.Errorf(`N.Method(...) returns %v for "m", but Info.Defs has %v`, gm, dm)
+ }
+ if gn != dn {
+ t.Errorf(`N.Method(...) returns %v for "m", but Info.Defs has %v`, gm, dm)
+ }
+ if dmm != dm {
+ t.Errorf(`Inside "m", r.m uses %v, want the defined func %v`, dmm, dm)
+ }
+ if dmn == dn {
+ t.Errorf(`Inside "m", r.n uses %v, want a func distinct from %v`, dmm, dm)
+ }
+}
+
+func TestImplicitsInfo(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ var tests = []struct {
+ src string
+ want string
+ }{
+ {`package p2; import . "fmt"; var _ = Println`, ""}, // no Implicits entry
+ {`package p0; import local "fmt"; var _ = local.Println`, ""}, // no Implicits entry
+ {`package p1; import "fmt"; var _ = fmt.Println`, "importSpec: package fmt"},
+
+ {`package p3; func f(x interface{}) { switch x.(type) { case int: } }`, ""}, // no Implicits entry
+ {`package p4; func f(x interface{}) { switch t := x.(type) { case int: _ = t } }`, "caseClause: var t int"},
+ {`package p5; func f(x interface{}) { switch t := x.(type) { case int, uint: _ = t } }`, "caseClause: var t interface{}"},
+ {`package p6; func f(x interface{}) { switch t := x.(type) { default: _ = t } }`, "caseClause: var t interface{}"},
+
+ {`package p7; func f(x int) {}`, ""}, // no Implicits entry
+ {`package p8; func f(int) {}`, "field: var int"},
+ {`package p9; func f() (complex64) { return 0 }`, "field: var complex64"},
+ {`package p10; type T struct{}; func (*T) f() {}`, "field: var *p10.T"},
+
+ // Tests using generics.
+ {`package f0; func f[T any](x int) {}`, ""}, // no Implicits entry
+ {`package f1; func f[T any](int) {}`, "field: var int"},
+ {`package f2; func f[T any](T) {}`, "field: var T"},
+ {`package f3; func f[T any]() (complex64) { return 0 }`, "field: var complex64"},
+ {`package f4; func f[T any](t T) (T) { return t }`, "field: var T"},
+ {`package t0; type T[A any] struct{}; func (*T[_]) f() {}`, "field: var *t0.T[_]"},
+ {`package t1; type T[A any] struct{}; func _(x interface{}) { switch t := x.(type) { case T[int]: _ = t } }`, "caseClause: var t t1.T[int]"},
+ {`package t2; type T[A any] struct{}; func _[P any](x interface{}) { switch t := x.(type) { case T[P]: _ = t } }`, "caseClause: var t t2.T[P]"},
+ {`package t3; func _[P any](x interface{}) { switch t := x.(type) { case P: _ = t } }`, "caseClause: var t P"},
+ }
+
+ for _, test := range tests {
+ info := Info{
+ Implicits: make(map[syntax.Node]Object),
+ }
+ name := mustTypecheck(test.src, nil, &info).Name()
+
+ // the test cases expect at most one Implicits entry
+ if len(info.Implicits) > 1 {
+ t.Errorf("package %s: %d Implicits entries found", name, len(info.Implicits))
+ continue
+ }
+
+ // extract Implicits entry, if any
+ var got string
+ for n, obj := range info.Implicits {
+ switch x := n.(type) {
+ case *syntax.ImportDecl:
+ got = "importSpec"
+ case *syntax.CaseClause:
+ got = "caseClause"
+ case *syntax.Field:
+ got = "field"
+ default:
+ t.Fatalf("package %s: unexpected %T", name, x)
+ }
+ got += ": " + obj.String()
+ }
+
+ // verify entry
+ if got != test.want {
+ t.Errorf("package %s: got %q; want %q", name, got, test.want)
+ }
+ }
+}
+
+func TestPkgNameOf(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ const src = `
+package p
+
+import (
+ . "os"
+ _ "io"
+ "math"
+ "path/filepath"
+ snort "sort"
+)
+
+// avoid imported and not used errors
+var (
+ _ = Open // os.Open
+ _ = math.Sin
+ _ = filepath.Abs
+ _ = snort.Ints
+)
+`
+
+ var tests = []struct {
+ path string // path string enclosed in "'s
+ want string
+ }{
+ {`"os"`, "."},
+ {`"io"`, "_"},
+ {`"math"`, "math"},
+ {`"path/filepath"`, "filepath"},
+ {`"sort"`, "snort"},
+ }
+
+ f := mustParse(src)
+ info := Info{
+ Defs: make(map[*syntax.Name]Object),
+ Implicits: make(map[syntax.Node]Object),
+ }
+ var conf Config
+ conf.Importer = defaultImporter()
+ _, err := conf.Check("p", []*syntax.File{f}, &info)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // map import paths to importDecl
+ imports := make(map[string]*syntax.ImportDecl)
+ for _, d := range f.DeclList {
+ if imp, _ := d.(*syntax.ImportDecl); imp != nil {
+ imports[imp.Path.Value] = imp
+ }
+ }
+
+ for _, test := range tests {
+ imp := imports[test.path]
+ if imp == nil {
+ t.Fatalf("invalid test case: import path %s not found", test.path)
+ }
+ got := info.PkgNameOf(imp)
+ if got == nil {
+ t.Fatalf("import %s: package name not found", test.path)
+ }
+ if got.Name() != test.want {
+ t.Errorf("import %s: got %s; want %s", test.path, got.Name(), test.want)
+ }
+ }
+
+ // test non-existing importDecl
+ if got := info.PkgNameOf(new(syntax.ImportDecl)); got != nil {
+ t.Errorf("got %s for non-existing import declaration", got.Name())
+ }
+}
+
+func predString(tv TypeAndValue) string {
+ var buf strings.Builder
+ pred := func(b bool, s string) {
+ if b {
+ if buf.Len() > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(s)
+ }
+ }
+
+ pred(tv.IsVoid(), "void")
+ pred(tv.IsType(), "type")
+ pred(tv.IsBuiltin(), "builtin")
+ pred(tv.IsValue() && tv.Value != nil, "const")
+ pred(tv.IsValue() && tv.Value == nil, "value")
+ pred(tv.IsNil(), "nil")
+ pred(tv.Addressable(), "addressable")
+ pred(tv.Assignable(), "assignable")
+ pred(tv.HasOk(), "hasOk")
+
+ if buf.Len() == 0 {
+ return "invalid"
+ }
+ return buf.String()
+}
+
+func TestPredicatesInfo(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ var tests = []struct {
+ src string
+ expr string
+ pred string
+ }{
+ // void
+ {`package n0; func f() { f() }`, `f()`, `void`},
+
+ // types
+ {`package t0; type _ int`, `int`, `type`},
+ {`package t1; type _ []int`, `[]int`, `type`},
+ {`package t2; type _ func()`, `func()`, `type`},
+ {`package t3; type _ func(int)`, `int`, `type`},
+ {`package t3; type _ func(...int)`, `...int`, `type`},
+
+ // built-ins
+ {`package b0; var _ = len("")`, `len`, `builtin`},
+ {`package b1; var _ = (len)("")`, `(len)`, `builtin`},
+
+ // constants
+ {`package c0; var _ = 42`, `42`, `const`},
+ {`package c1; var _ = "foo" + "bar"`, `"foo" + "bar"`, `const`},
+ {`package c2; const (i = 1i; _ = i)`, `i`, `const`},
+
+ // values
+ {`package v0; var (a, b int; _ = a + b)`, `a + b`, `value`},
+ {`package v1; var _ = &[]int{1}`, `[]int{…}`, `value`},
+ {`package v2; var _ = func(){}`, `func() {}`, `value`},
+ {`package v4; func f() { _ = f }`, `f`, `value`},
+ {`package v3; var _ *int = nil`, `nil`, `value, nil`},
+ {`package v3; var _ *int = (nil)`, `(nil)`, `value, nil`},
+
+ // addressable (and thus assignable) operands
+ {`package a0; var (x int; _ = x)`, `x`, `value, addressable, assignable`},
+ {`package a1; var (p *int; _ = *p)`, `*p`, `value, addressable, assignable`},
+ {`package a2; var (s []int; _ = s[0])`, `s[0]`, `value, addressable, assignable`},
+ {`package a3; var (s struct{f int}; _ = s.f)`, `s.f`, `value, addressable, assignable`},
+ {`package a4; var (a [10]int; _ = a[0])`, `a[0]`, `value, addressable, assignable`},
+ {`package a5; func _(x int) { _ = x }`, `x`, `value, addressable, assignable`},
+ {`package a6; func _()(x int) { _ = x; return }`, `x`, `value, addressable, assignable`},
+ {`package a7; type T int; func (x T) _() { _ = x }`, `x`, `value, addressable, assignable`},
+ // composite literals are not addressable
+
+ // assignable but not addressable values
+ {`package s0; var (m map[int]int; _ = m[0])`, `m[0]`, `value, assignable, hasOk`},
+ {`package s1; var (m map[int]int; _, _ = m[0])`, `m[0]`, `value, assignable, hasOk`},
+
+ // hasOk expressions
+ {`package k0; var (ch chan int; _ = <-ch)`, `<-ch`, `value, hasOk`},
+ {`package k1; var (ch chan int; _, _ = <-ch)`, `<-ch`, `value, hasOk`},
+
+ // missing entries
+ // - package names are collected in the Uses map
+ // - identifiers being declared are collected in the Defs map
+ {`package m0; import "os"; func _() { _ = os.Stdout }`, `os`, `<missing>`},
+ {`package m1; import p "os"; func _() { _ = p.Stdout }`, `p`, `<missing>`},
+ {`package m2; const c = 0`, `c`, `<missing>`},
+ {`package m3; type T int`, `T`, `<missing>`},
+ {`package m4; var v int`, `v`, `<missing>`},
+ {`package m5; func f() {}`, `f`, `<missing>`},
+ {`package m6; func _(x int) {}`, `x`, `<missing>`},
+ {`package m6; func _()(x int) { return }`, `x`, `<missing>`},
+ {`package m6; type T int; func (x T) _() {}`, `x`, `<missing>`},
+ }
+
+ for _, test := range tests {
+ info := Info{Types: make(map[syntax.Expr]TypeAndValue)}
+ name := mustTypecheck(test.src, nil, &info).Name()
+
+ // look for expression predicates
+ got := "<missing>"
+ for e, tv := range info.Types {
+ //println(name, syntax.String(e))
+ if syntax.String(e) == test.expr {
+ got = predString(tv)
+ break
+ }
+ }
+
+ if got != test.pred {
+ t.Errorf("package %s: got %s; want %s", name, got, test.pred)
+ }
+ }
+}
+
+func TestScopesInfo(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ var tests = []struct {
+ src string
+ scopes []string // list of scope descriptors of the form kind:varlist
+ }{
+ {`package p0`, []string{
+ "file:",
+ }},
+ {`package p1; import ( "fmt"; m "math"; _ "os" ); var ( _ = fmt.Println; _ = m.Pi )`, []string{
+ "file:fmt m",
+ }},
+ {`package p2; func _() {}`, []string{
+ "file:", "func:",
+ }},
+ {`package p3; func _(x, y int) {}`, []string{
+ "file:", "func:x y",
+ }},
+ {`package p4; func _(x, y int) { x, z := 1, 2; _ = z }`, []string{
+ "file:", "func:x y z", // redeclaration of x
+ }},
+ {`package p5; func _(x, y int) (u, _ int) { return }`, []string{
+ "file:", "func:u x y",
+ }},
+ {`package p6; func _() { { var x int; _ = x } }`, []string{
+ "file:", "func:", "block:x",
+ }},
+ {`package p7; func _() { if true {} }`, []string{
+ "file:", "func:", "if:", "block:",
+ }},
+ {`package p8; func _() { if x := 0; x < 0 { y := x; _ = y } }`, []string{
+ "file:", "func:", "if:x", "block:y",
+ }},
+ {`package p9; func _() { switch x := 0; x {} }`, []string{
+ "file:", "func:", "switch:x",
+ }},
+ {`package p10; func _() { switch x := 0; x { case 1: y := x; _ = y; default: }}`, []string{
+ "file:", "func:", "switch:x", "case:y", "case:",
+ }},
+ {`package p11; func _(t interface{}) { switch t.(type) {} }`, []string{
+ "file:", "func:t", "switch:",
+ }},
+ {`package p12; func _(t interface{}) { switch t := t; t.(type) {} }`, []string{
+ "file:", "func:t", "switch:t",
+ }},
+ {`package p13; func _(t interface{}) { switch x := t.(type) { case int: _ = x } }`, []string{
+ "file:", "func:t", "switch:", "case:x", // x implicitly declared
+ }},
+ {`package p14; func _() { select{} }`, []string{
+ "file:", "func:",
+ }},
+ {`package p15; func _(c chan int) { select{ case <-c: } }`, []string{
+ "file:", "func:c", "comm:",
+ }},
+ {`package p16; func _(c chan int) { select{ case i := <-c: x := i; _ = x} }`, []string{
+ "file:", "func:c", "comm:i x",
+ }},
+ {`package p17; func _() { for{} }`, []string{
+ "file:", "func:", "for:", "block:",
+ }},
+ {`package p18; func _(n int) { for i := 0; i < n; i++ { _ = i } }`, []string{
+ "file:", "func:n", "for:i", "block:",
+ }},
+ {`package p19; func _(a []int) { for i := range a { _ = i} }`, []string{
+ "file:", "func:a", "for:i", "block:",
+ }},
+ {`package p20; var s int; func _(a []int) { for i, x := range a { s += x; _ = i } }`, []string{
+ "file:", "func:a", "for:i x", "block:",
+ }},
+ }
+
+ for _, test := range tests {
+ info := Info{Scopes: make(map[syntax.Node]*Scope)}
+ name := mustTypecheck(test.src, nil, &info).Name()
+
+ // number of scopes must match
+ if len(info.Scopes) != len(test.scopes) {
+ t.Errorf("package %s: got %d scopes; want %d", name, len(info.Scopes), len(test.scopes))
+ }
+
+ // scope descriptions must match
+ for node, scope := range info.Scopes {
+ var kind string
+ switch node.(type) {
+ case *syntax.File:
+ kind = "file"
+ case *syntax.FuncType:
+ kind = "func"
+ case *syntax.BlockStmt:
+ kind = "block"
+ case *syntax.IfStmt:
+ kind = "if"
+ case *syntax.SwitchStmt:
+ kind = "switch"
+ case *syntax.SelectStmt:
+ kind = "select"
+ case *syntax.CaseClause:
+ kind = "case"
+ case *syntax.CommClause:
+ kind = "comm"
+ case *syntax.ForStmt:
+ kind = "for"
+ default:
+ kind = fmt.Sprintf("%T", node)
+ }
+
+ // look for matching scope description
+ desc := kind + ":" + strings.Join(scope.Names(), " ")
+ found := false
+ for _, d := range test.scopes {
+ if desc == d {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("package %s: no matching scope found for %s", name, desc)
+ }
+ }
+ }
+}
+
+func TestInitOrderInfo(t *testing.T) {
+ var tests = []struct {
+ src string
+ inits []string
+ }{
+ {`package p0; var (x = 1; y = x)`, []string{
+ "x = 1", "y = x",
+ }},
+ {`package p1; var (a = 1; b = 2; c = 3)`, []string{
+ "a = 1", "b = 2", "c = 3",
+ }},
+ {`package p2; var (a, b, c = 1, 2, 3)`, []string{
+ "a = 1", "b = 2", "c = 3",
+ }},
+ {`package p3; var _ = f(); func f() int { return 1 }`, []string{
+ "_ = f()", // blank var
+ }},
+ {`package p4; var (a = 0; x = y; y = z; z = 0)`, []string{
+ "a = 0", "z = 0", "y = z", "x = y",
+ }},
+ {`package p5; var (a, _ = m[0]; m map[int]string)`, []string{
+ "a, _ = m[0]", // blank var
+ }},
+ {`package p6; var a, b = f(); func f() (_, _ int) { return z, z }; var z = 0`, []string{
+ "z = 0", "a, b = f()",
+ }},
+ {`package p7; var (a = func() int { return b }(); b = 1)`, []string{
+ "b = 1", "a = func() int {…}()",
+ }},
+ {`package p8; var (a, b = func() (_, _ int) { return c, c }(); c = 1)`, []string{
+ "c = 1", "a, b = func() (_, _ int) {…}()",
+ }},
+ {`package p9; type T struct{}; func (T) m() int { _ = y; return 0 }; var x, y = T.m, 1`, []string{
+ "y = 1", "x = T.m",
+ }},
+ {`package p10; var (d = c + b; a = 0; b = 0; c = 0)`, []string{
+ "a = 0", "b = 0", "c = 0", "d = c + b",
+ }},
+ {`package p11; var (a = e + c; b = d + c; c = 0; d = 0; e = 0)`, []string{
+ "c = 0", "d = 0", "b = d + c", "e = 0", "a = e + c",
+ }},
+ // emit an initializer for n:1 initializations only once (not for each node
+ // on the lhs which may appear in different order in the dependency graph)
+ {`package p12; var (a = x; b = 0; x, y = m[0]; m map[int]int)`, []string{
+ "b = 0", "x, y = m[0]", "a = x",
+ }},
+ // test case from spec section on package initialization
+ {`package p12
+
+ var (
+ a = c + b
+ b = f()
+ c = f()
+ d = 3
+ )
+
+ func f() int {
+ d++
+ return d
+ }`, []string{
+ "d = 3", "b = f()", "c = f()", "a = c + b",
+ }},
+ // test case for go.dev/issue/7131
+ {`package main
+
+ var counter int
+ func next() int { counter++; return counter }
+
+ var _ = makeOrder()
+ func makeOrder() []int { return []int{f, b, d, e, c, a} }
+
+ var a = next()
+ var b, c = next(), next()
+ var d, e, f = next(), next(), next()
+ `, []string{
+ "a = next()", "b = next()", "c = next()", "d = next()", "e = next()", "f = next()", "_ = makeOrder()",
+ }},
+ // test case for go.dev/issue/10709
+ {`package p13
+
+ var (
+ v = t.m()
+ t = makeT(0)
+ )
+
+ type T struct{}
+
+ func (T) m() int { return 0 }
+
+ func makeT(n int) T {
+ if n > 0 {
+ return makeT(n-1)
+ }
+ return T{}
+ }`, []string{
+ "t = makeT(0)", "v = t.m()",
+ }},
+ // test case for go.dev/issue/10709: same as test before, but variable decls swapped
+ {`package p14
+
+ var (
+ t = makeT(0)
+ v = t.m()
+ )
+
+ type T struct{}
+
+ func (T) m() int { return 0 }
+
+ func makeT(n int) T {
+ if n > 0 {
+ return makeT(n-1)
+ }
+ return T{}
+ }`, []string{
+ "t = makeT(0)", "v = t.m()",
+ }},
+ // another candidate possibly causing problems with go.dev/issue/10709
+ {`package p15
+
+ var y1 = f1()
+
+ func f1() int { return g1() }
+ func g1() int { f1(); return x1 }
+
+ var x1 = 0
+
+ var y2 = f2()
+
+ func f2() int { return g2() }
+ func g2() int { return x2 }
+
+ var x2 = 0`, []string{
+ "x1 = 0", "y1 = f1()", "x2 = 0", "y2 = f2()",
+ }},
+ }
+
+ for _, test := range tests {
+ info := Info{}
+ name := mustTypecheck(test.src, nil, &info).Name()
+
+ // number of initializers must match
+ if len(info.InitOrder) != len(test.inits) {
+ t.Errorf("package %s: got %d initializers; want %d", name, len(info.InitOrder), len(test.inits))
+ continue
+ }
+
+ // initializers must match
+ for i, want := range test.inits {
+ got := info.InitOrder[i].String()
+ if got != want {
+ t.Errorf("package %s, init %d: got %s; want %s", name, i, got, want)
+ continue
+ }
+ }
+ }
+}
+
+func TestMultiFileInitOrder(t *testing.T) {
+ fileA := mustParse(`package main; var a = 1`)
+ fileB := mustParse(`package main; var b = 2`)
+
+ // The initialization order must not depend on the parse
+ // order of the files, only on the presentation order to
+ // the type-checker.
+ for _, test := range []struct {
+ files []*syntax.File
+ want string
+ }{
+ {[]*syntax.File{fileA, fileB}, "[a = 1 b = 2]"},
+ {[]*syntax.File{fileB, fileA}, "[b = 2 a = 1]"},
+ } {
+ var info Info
+ if _, err := new(Config).Check("main", test.files, &info); err != nil {
+ t.Fatal(err)
+ }
+ if got := fmt.Sprint(info.InitOrder); got != test.want {
+ t.Fatalf("got %s; want %s", got, test.want)
+ }
+ }
+}
+
+func TestFiles(t *testing.T) {
+ var sources = []string{
+ "package p; type T struct{}; func (T) m1() {}",
+ "package p; func (T) m2() {}; var x interface{ m1(); m2() } = T{}",
+ "package p; func (T) m3() {}; var y interface{ m1(); m2(); m3() } = T{}",
+ "package p",
+ }
+
+ var conf Config
+ pkg := NewPackage("p", "p")
+ var info Info
+ check := NewChecker(&conf, pkg, &info)
+
+ for _, src := range sources {
+ if err := check.Files([]*syntax.File{mustParse(src)}); err != nil {
+ t.Error(err)
+ }
+ }
+
+ // check InitOrder is [x y]
+ var vars []string
+ for _, init := range info.InitOrder {
+ for _, v := range init.Lhs {
+ vars = append(vars, v.Name())
+ }
+ }
+ if got, want := fmt.Sprint(vars), "[x y]"; got != want {
+ t.Errorf("InitOrder == %s, want %s", got, want)
+ }
+}
+
+type testImporter map[string]*Package
+
+func (m testImporter) Import(path string) (*Package, error) {
+ if pkg := m[path]; pkg != nil {
+ return pkg, nil
+ }
+ return nil, fmt.Errorf("package %q not found", path)
+}
+
+func TestSelection(t *testing.T) {
+ selections := make(map[*syntax.SelectorExpr]*Selection)
+
+ imports := make(testImporter)
+ conf := Config{Importer: imports}
+ makePkg := func(path, src string) {
+ pkg := mustTypecheck(src, &conf, &Info{Selections: selections})
+ imports[path] = pkg
+ }
+
+ const libSrc = `
+package lib
+type T float64
+const C T = 3
+var V T
+func F() {}
+func (T) M() {}
+`
+ const mainSrc = `
+package main
+import "lib"
+
+type A struct {
+ *B
+ C
+}
+
+type B struct {
+ b int
+}
+
+func (B) f(int)
+
+type C struct {
+ c int
+}
+
+type G[P any] struct {
+ p P
+}
+
+func (G[P]) m(P) {}
+
+var Inst G[int]
+
+func (C) g()
+func (*C) h()
+
+func main() {
+ // qualified identifiers
+ var _ lib.T
+ _ = lib.C
+ _ = lib.F
+ _ = lib.V
+ _ = lib.T.M
+
+ // fields
+ _ = A{}.B
+ _ = new(A).B
+
+ _ = A{}.C
+ _ = new(A).C
+
+ _ = A{}.b
+ _ = new(A).b
+
+ _ = A{}.c
+ _ = new(A).c
+
+ _ = Inst.p
+ _ = G[string]{}.p
+
+ // methods
+ _ = A{}.f
+ _ = new(A).f
+ _ = A{}.g
+ _ = new(A).g
+ _ = new(A).h
+
+ _ = B{}.f
+ _ = new(B).f
+
+ _ = C{}.g
+ _ = new(C).g
+ _ = new(C).h
+ _ = Inst.m
+
+ // method expressions
+ _ = A.f
+ _ = (*A).f
+ _ = B.f
+ _ = (*B).f
+ _ = G[string].m
+}`
+
+ wantOut := map[string][2]string{
+ "lib.T.M": {"method expr (lib.T) M(lib.T)", ".[0]"},
+
+ "A{}.B": {"field (main.A) B *main.B", ".[0]"},
+ "new(A).B": {"field (*main.A) B *main.B", "->[0]"},
+ "A{}.C": {"field (main.A) C main.C", ".[1]"},
+ "new(A).C": {"field (*main.A) C main.C", "->[1]"},
+ "A{}.b": {"field (main.A) b int", "->[0 0]"},
+ "new(A).b": {"field (*main.A) b int", "->[0 0]"},
+ "A{}.c": {"field (main.A) c int", ".[1 0]"},
+ "new(A).c": {"field (*main.A) c int", "->[1 0]"},
+ "Inst.p": {"field (main.G[int]) p int", ".[0]"},
+
+ "A{}.f": {"method (main.A) f(int)", "->[0 0]"},
+ "new(A).f": {"method (*main.A) f(int)", "->[0 0]"},
+ "A{}.g": {"method (main.A) g()", ".[1 0]"},
+ "new(A).g": {"method (*main.A) g()", "->[1 0]"},
+ "new(A).h": {"method (*main.A) h()", "->[1 1]"}, // TODO(gri) should this report .[1 1] ?
+ "B{}.f": {"method (main.B) f(int)", ".[0]"},
+ "new(B).f": {"method (*main.B) f(int)", "->[0]"},
+ "C{}.g": {"method (main.C) g()", ".[0]"},
+ "new(C).g": {"method (*main.C) g()", "->[0]"},
+ "new(C).h": {"method (*main.C) h()", "->[1]"}, // TODO(gri) should this report .[1] ?
+ "Inst.m": {"method (main.G[int]) m(int)", ".[0]"},
+
+ "A.f": {"method expr (main.A) f(main.A, int)", "->[0 0]"},
+ "(*A).f": {"method expr (*main.A) f(*main.A, int)", "->[0 0]"},
+ "B.f": {"method expr (main.B) f(main.B, int)", ".[0]"},
+ "(*B).f": {"method expr (*main.B) f(*main.B, int)", "->[0]"},
+ "G[string].m": {"method expr (main.G[string]) m(main.G[string], string)", ".[0]"},
+ "G[string]{}.p": {"field (main.G[string]) p string", ".[0]"},
+ }
+
+ makePkg("lib", libSrc)
+ makePkg("main", mainSrc)
+
+ for e, sel := range selections {
+ _ = sel.String() // assertion: must not panic
+
+ start := indexFor(mainSrc, syntax.StartPos(e))
+ end := indexFor(mainSrc, syntax.EndPos(e))
+ segment := mainSrc[start:end] // (all SelectorExprs are in main, not lib)
+
+ direct := "."
+ if sel.Indirect() {
+ direct = "->"
+ }
+ got := [2]string{
+ sel.String(),
+ fmt.Sprintf("%s%v", direct, sel.Index()),
+ }
+ want := wantOut[segment]
+ if want != got {
+ t.Errorf("%s: got %q; want %q", segment, got, want)
+ }
+ delete(wantOut, segment)
+
+ // We must explicitly assert properties of the
+ // Signature's receiver since it doesn't participate
+ // in Identical() or String().
+ sig, _ := sel.Type().(*Signature)
+ if sel.Kind() == MethodVal {
+ got := sig.Recv().Type()
+ want := sel.Recv()
+ if !Identical(got, want) {
+ t.Errorf("%s: Recv() = %s, want %s", segment, got, want)
+ }
+ } else if sig != nil && sig.Recv() != nil {
+ t.Errorf("%s: signature has receiver %s", sig, sig.Recv().Type())
+ }
+ }
+ // Assert that all wantOut entries were used exactly once.
+ for segment := range wantOut {
+ t.Errorf("no syntax.Selection found with syntax %q", segment)
+ }
+}
+
+// indexFor returns the index into s corresponding to the position pos.
+func indexFor(s string, pos syntax.Pos) int {
+ i, line := 0, 1 // string index and corresponding line
+ target := int(pos.Line())
+ for line < target && i < len(s) {
+ if s[i] == '\n' {
+ line++
+ }
+ i++
+ }
+ return i + int(pos.Col()-1) // columns are 1-based
+}
+
+func TestIssue8518(t *testing.T) {
+ imports := make(testImporter)
+ conf := Config{
+ Error: func(err error) { t.Log(err) }, // don't exit after first error
+ Importer: imports,
+ }
+ makePkg := func(path, src string) {
+ imports[path], _ = conf.Check(path, []*syntax.File{mustParse(src)}, nil) // errors logged via conf.Error
+ }
+
+ const libSrc = `
+package a
+import "missing"
+const C1 = foo
+const C2 = missing.C
+`
+
+ const mainSrc = `
+package main
+import "a"
+var _ = a.C1
+var _ = a.C2
+`
+
+ makePkg("a", libSrc)
+ makePkg("main", mainSrc) // don't crash when type-checking this package
+}
+
+func TestIssue59603(t *testing.T) {
+ imports := make(testImporter)
+ conf := Config{
+ Error: func(err error) { t.Log(err) }, // don't exit after first error
+ Importer: imports,
+ }
+ makePkg := func(path, src string) {
+ imports[path], _ = conf.Check(path, []*syntax.File{mustParse(src)}, nil) // errors logged via conf.Error
+ }
+
+ const libSrc = `
+package a
+const C = foo
+`
+
+ const mainSrc = `
+package main
+import "a"
+const _ = a.C
+`
+
+ makePkg("a", libSrc)
+ makePkg("main", mainSrc) // don't crash when type-checking this package
+}
+
+func TestLookupFieldOrMethodOnNil(t *testing.T) {
+ // LookupFieldOrMethod on a nil type is expected to produce a run-time panic.
+ defer func() {
+ const want = "LookupFieldOrMethod on nil type"
+ p := recover()
+ if s, ok := p.(string); !ok || s != want {
+ t.Fatalf("got %v, want %s", p, want)
+ }
+ }()
+ LookupFieldOrMethod(nil, false, nil, "")
+}
+
+func TestLookupFieldOrMethod(t *testing.T) {
+ // Test cases assume a lookup of the form a.f or x.f, where a stands for an
+ // addressable value, and x for a non-addressable value (even though a variable
+ // for ease of test case writing).
+ var tests = []struct {
+ src string
+ found bool
+ index []int
+ indirect bool
+ }{
+ // field lookups
+ {"var x T; type T struct{}", false, nil, false},
+ {"var x T; type T struct{ f int }", true, []int{0}, false},
+ {"var x T; type T struct{ a, b, f, c int }", true, []int{2}, false},
+
+ // field lookups on a generic type
+ {"var x T[int]; type T[P any] struct{}", false, nil, false},
+ {"var x T[int]; type T[P any] struct{ f P }", true, []int{0}, false},
+ {"var x T[int]; type T[P any] struct{ a, b, f, c P }", true, []int{2}, false},
+
+ // method lookups
+ {"var a T; type T struct{}; func (T) f() {}", true, []int{0}, false},
+ {"var a *T; type T struct{}; func (T) f() {}", true, []int{0}, true},
+ {"var a T; type T struct{}; func (*T) f() {}", true, []int{0}, false},
+ {"var a *T; type T struct{}; func (*T) f() {}", true, []int{0}, true}, // TODO(gri) should this report indirect = false?
+
+ // method lookups on a generic type
+ {"var a T[int]; type T[P any] struct{}; func (T[P]) f() {}", true, []int{0}, false},
+ {"var a *T[int]; type T[P any] struct{}; func (T[P]) f() {}", true, []int{0}, true},
+ {"var a T[int]; type T[P any] struct{}; func (*T[P]) f() {}", true, []int{0}, false},
+ {"var a *T[int]; type T[P any] struct{}; func (*T[P]) f() {}", true, []int{0}, true}, // TODO(gri) should this report indirect = false?
+
+ // collisions
+ {"type ( E1 struct{ f int }; E2 struct{ f int }; x struct{ E1; *E2 })", false, []int{1, 0}, false},
+ {"type ( E1 struct{ f int }; E2 struct{}; x struct{ E1; *E2 }); func (E2) f() {}", false, []int{1, 0}, false},
+
+ // collisions on a generic type
+ {"type ( E1[P any] struct{ f P }; E2[P any] struct{ f P }; x struct{ E1[int]; *E2[int] })", false, []int{1, 0}, false},
+ {"type ( E1[P any] struct{ f P }; E2[P any] struct{}; x struct{ E1[int]; *E2[int] }); func (E2[P]) f() {}", false, []int{1, 0}, false},
+
+ // outside methodset
+ // (*T).f method exists, but value of type T is not addressable
+ {"var x T; type T struct{}; func (*T) f() {}", false, nil, true},
+
+ // outside method set of a generic type
+ {"var x T[int]; type T[P any] struct{}; func (*T[P]) f() {}", false, nil, true},
+
+ // recursive generic types; see go.dev/issue/52715
+ {"var a T[int]; type ( T[P any] struct { *N[P] }; N[P any] struct { *T[P] } ); func (N[P]) f() {}", true, []int{0, 0}, true},
+ {"var a T[int]; type ( T[P any] struct { *N[P] }; N[P any] struct { *T[P] } ); func (T[P]) f() {}", true, []int{0}, false},
+ }
+
+ for _, test := range tests {
+ pkg := mustTypecheck("package p;"+test.src, nil, nil)
+
+ obj := pkg.Scope().Lookup("a")
+ if obj == nil {
+ if obj = pkg.Scope().Lookup("x"); obj == nil {
+ t.Errorf("%s: incorrect test case - no object a or x", test.src)
+ continue
+ }
+ }
+
+ f, index, indirect := LookupFieldOrMethod(obj.Type(), obj.Name() == "a", pkg, "f")
+ if (f != nil) != test.found {
+ if f == nil {
+ t.Errorf("%s: got no object; want one", test.src)
+ } else {
+ t.Errorf("%s: got object = %v; want none", test.src, f)
+ }
+ }
+ if !sameSlice(index, test.index) {
+ t.Errorf("%s: got index = %v; want %v", test.src, index, test.index)
+ }
+ if indirect != test.indirect {
+ t.Errorf("%s: got indirect = %v; want %v", test.src, indirect, test.indirect)
+ }
+ }
+}
+
+// Test for go.dev/issue/52715
+func TestLookupFieldOrMethod_RecursiveGeneric(t *testing.T) {
+ const src = `
+package pkg
+
+type Tree[T any] struct {
+ *Node[T]
+}
+
+func (*Tree[R]) N(r R) R { return r }
+
+type Node[T any] struct {
+ *Tree[T]
+}
+
+type Instance = *Tree[int]
+`
+
+ f := mustParse(src)
+ pkg := NewPackage("pkg", f.PkgName.Value)
+ if err := NewChecker(nil, pkg, nil).Files([]*syntax.File{f}); err != nil {
+ panic(err)
+ }
+
+ T := pkg.Scope().Lookup("Instance").Type()
+ _, _, _ = LookupFieldOrMethod(T, false, pkg, "M") // verify that LookupFieldOrMethod terminates
+}
+
+func sameSlice(a, b []int) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i, x := range a {
+ if x != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// TestScopeLookupParent ensures that (*Scope).LookupParent returns
+// the correct result at various positions within the source.
+func TestScopeLookupParent(t *testing.T) {
+ imports := make(testImporter)
+ conf := Config{Importer: imports}
+ var info Info
+ makePkg := func(path, src string) {
+ var err error
+ imports[path], err = conf.Check(path, []*syntax.File{mustParse(src)}, &info)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ makePkg("lib", "package lib; var X int")
+ // Each /*name=kind:line*/ comment makes the test look up the
+ // name at that point and checks that it resolves to a decl of
+ // the specified kind and line number. "undef" means undefined.
+ mainSrc := `
+/*lib=pkgname:5*/ /*X=var:1*/ /*Pi=const:8*/ /*T=typename:9*/ /*Y=var:10*/ /*F=func:12*/
+package main
+
+import "lib"
+import . "lib"
+
+const Pi = 3.1415
+type T struct{}
+var Y, _ = lib.X, X
+
+func F[T *U, U any](param1, param2 int) /*param1=undef*/ (res1 /*res1=undef*/, res2 int) /*param1=var:12*/ /*res1=var:12*/ /*U=typename:12*/ {
+ const pi, e = 3.1415, /*pi=undef*/ 2.71828 /*pi=const:13*/ /*e=const:13*/
+ type /*t=undef*/ t /*t=typename:14*/ *t
+ print(Y) /*Y=var:10*/
+ x, Y := Y, /*x=undef*/ /*Y=var:10*/ Pi /*x=var:16*/ /*Y=var:16*/ ; _ = x; _ = Y
+ var F = /*F=func:12*/ F[*int, int] /*F=var:17*/ ; _ = F
+
+ var a []int
+ for i, x := range a /*i=undef*/ /*x=var:16*/ { _ = i; _ = x }
+
+ var i interface{}
+ switch y := i.(type) { /*y=undef*/
+ case /*y=undef*/ int /*y=var:23*/ :
+ case float32, /*y=undef*/ float64 /*y=var:23*/ :
+ default /*y=var:23*/:
+ println(y)
+ }
+ /*y=undef*/
+
+ switch int := i.(type) {
+ case /*int=typename:0*/ int /*int=var:31*/ :
+ println(int)
+ default /*int=var:31*/ :
+ }
+
+ _ = param1
+ _ = res1
+ return
+}
+/*main=undef*/
+`
+
+ info.Uses = make(map[*syntax.Name]Object)
+ makePkg("main", mainSrc)
+ mainScope := imports["main"].Scope()
+
+ rx := regexp.MustCompile(`^/\*(\w*)=([\w:]*)\*/$`)
+
+ base := syntax.NewFileBase("main")
+ syntax.CommentsDo(strings.NewReader(mainSrc), func(line, col uint, text string) {
+ pos := syntax.MakePos(base, line, col)
+
+ // Syntax errors are not comments.
+ if text[0] != '/' {
+ t.Errorf("%s: %s", pos, text)
+ return
+ }
+
+ // Parse the assertion in the comment.
+ m := rx.FindStringSubmatch(text)
+ if m == nil {
+ t.Errorf("%s: bad comment: %s", pos, text)
+ return
+ }
+ name, want := m[1], m[2]
+
+ // Look up the name in the innermost enclosing scope.
+ inner := mainScope.Innermost(pos)
+ if inner == nil {
+ t.Errorf("%s: at %s: can't find innermost scope", pos, text)
+ return
+ }
+ got := "undef"
+ if _, obj := inner.LookupParent(name, pos); obj != nil {
+ kind := strings.ToLower(strings.TrimPrefix(reflect.TypeOf(obj).String(), "*types2."))
+ got = fmt.Sprintf("%s:%d", kind, obj.Pos().Line())
+ }
+ if got != want {
+ t.Errorf("%s: at %s: %s resolved to %s, want %s", pos, text, name, got, want)
+ }
+ })
+
+ // Check that for each referring identifier,
+ // a lookup of its name on the innermost
+ // enclosing scope returns the correct object.
+
+ for id, wantObj := range info.Uses {
+ inner := mainScope.Innermost(id.Pos())
+ if inner == nil {
+ t.Errorf("%s: can't find innermost scope enclosing %q", id.Pos(), id.Value)
+ continue
+ }
+
+ // Exclude selectors and qualified identifiers---lexical
+ // refs only. (Ideally, we'd see if the AST parent is a
+ // SelectorExpr, but that requires PathEnclosingInterval
+ // from golang.org/x/tools/go/ast/astutil.)
+ if id.Value == "X" {
+ continue
+ }
+
+ _, gotObj := inner.LookupParent(id.Value, id.Pos())
+ if gotObj != wantObj {
+ // Print the scope tree of mainScope in case of error.
+ var printScopeTree func(indent string, s *Scope)
+ printScopeTree = func(indent string, s *Scope) {
+ t.Logf("%sscope %s %v-%v = %v",
+ indent,
+ ScopeComment(s),
+ s.Pos(),
+ s.End(),
+ s.Names())
+ for i := range s.NumChildren() {
+ printScopeTree(indent+" ", s.Child(i))
+ }
+ }
+ printScopeTree("", mainScope)
+
+ t.Errorf("%s: Scope(%s).LookupParent(%s@%v) got %v, want %v [scopePos=%v]",
+ id.Pos(),
+ ScopeComment(inner),
+ id.Value,
+ id.Pos(),
+ gotObj,
+ wantObj,
+ ObjectScopePos(wantObj))
+ continue
+ }
+ }
+}
+
+// newDefined creates a new defined type named T with the given underlying type.
+func newDefined(underlying Type) *Named {
+ tname := NewTypeName(nopos, nil, "T", nil)
+ return NewNamed(tname, underlying, nil)
+}
+
+func TestConvertibleTo(t *testing.T) {
+ for _, test := range []struct {
+ v, t Type
+ want bool
+ }{
+ {Typ[Int], Typ[Int], true},
+ {Typ[Int], Typ[Float32], true},
+ {Typ[Int], Typ[String], true},
+ {newDefined(Typ[Int]), Typ[Int], true},
+ {newDefined(new(Struct)), new(Struct), true},
+ {newDefined(Typ[Int]), new(Struct), false},
+ {Typ[UntypedInt], Typ[Int], true},
+ {NewSlice(Typ[Int]), NewArray(Typ[Int], 10), true},
+ {NewSlice(Typ[Int]), NewArray(Typ[Uint], 10), false},
+ {NewSlice(Typ[Int]), NewPointer(NewArray(Typ[Int], 10)), true},
+ {NewSlice(Typ[Int]), NewPointer(NewArray(Typ[Uint], 10)), false},
+ // Untyped string values are not permitted by the spec, so the behavior below is undefined.
+ {Typ[UntypedString], Typ[String], true},
+ } {
+ if got := ConvertibleTo(test.v, test.t); got != test.want {
+ t.Errorf("ConvertibleTo(%v, %v) = %t, want %t", test.v, test.t, got, test.want)
+ }
+ }
+}
+
+func TestAssignableTo(t *testing.T) {
+ for _, test := range []struct {
+ v, t Type
+ want bool
+ }{
+ {Typ[Int], Typ[Int], true},
+ {Typ[Int], Typ[Float32], false},
+ {newDefined(Typ[Int]), Typ[Int], false},
+ {newDefined(new(Struct)), new(Struct), true},
+ {Typ[UntypedBool], Typ[Bool], true},
+ {Typ[UntypedString], Typ[Bool], false},
+ // Neither untyped string nor untyped numeric assignments arise during
+ // normal type checking, so the below behavior is technically undefined by
+ // the spec.
+ {Typ[UntypedString], Typ[String], true},
+ {Typ[UntypedInt], Typ[Int], true},
+ } {
+ if got := AssignableTo(test.v, test.t); got != test.want {
+ t.Errorf("AssignableTo(%v, %v) = %t, want %t", test.v, test.t, got, test.want)
+ }
+ }
+}
+
+func TestIdentical(t *testing.T) {
+ // For each test, we compare the types of objects X and Y in the source.
+ tests := []struct {
+ src string
+ want bool
+ }{
+ // Basic types.
+ {"var X int; var Y int", true},
+ {"var X int; var Y string", false},
+
+ // TODO: add more tests for complex types.
+
+ // Named types.
+ {"type X int; type Y int", false},
+
+ // Aliases.
+ {"type X = int; type Y = int", true},
+
+ // Functions.
+ {`func X(int) string { return "" }; func Y(int) string { return "" }`, true},
+ {`func X() string { return "" }; func Y(int) string { return "" }`, false},
+ {`func X(int) string { return "" }; func Y(int) {}`, false},
+
+ // Generic functions. Type parameters should be considered identical modulo
+ // renaming. See also go.dev/issue/49722.
+ {`func X[P ~int](){}; func Y[Q ~int]() {}`, true},
+ {`func X[P1 any, P2 ~*P1](){}; func Y[Q1 any, Q2 ~*Q1]() {}`, true},
+ {`func X[P1 any, P2 ~[]P1](){}; func Y[Q1 any, Q2 ~*Q1]() {}`, false},
+ {`func X[P ~int](P){}; func Y[Q ~int](Q) {}`, true},
+ {`func X[P ~string](P){}; func Y[Q ~int](Q) {}`, false},
+ {`func X[P ~int]([]P){}; func Y[Q ~int]([]Q) {}`, true},
+ }
+
+ for _, test := range tests {
+ pkg := mustTypecheck("package p;"+test.src, nil, nil)
+ X := pkg.Scope().Lookup("X")
+ Y := pkg.Scope().Lookup("Y")
+ if X == nil || Y == nil {
+ t.Fatal("test must declare both X and Y")
+ }
+ if got := Identical(X.Type(), Y.Type()); got != test.want {
+ t.Errorf("Identical(%s, %s) = %t, want %t", X.Type(), Y.Type(), got, test.want)
+ }
+ }
+}
+
+func TestIdentical_issue15173(t *testing.T) {
+ // Identical should allow nil arguments and be symmetric.
+ for _, test := range []struct {
+ x, y Type
+ want bool
+ }{
+ {Typ[Int], Typ[Int], true},
+ {Typ[Int], nil, false},
+ {nil, Typ[Int], false},
+ {nil, nil, true},
+ } {
+ if got := Identical(test.x, test.y); got != test.want {
+ t.Errorf("Identical(%v, %v) = %t", test.x, test.y, got)
+ }
+ }
+}
+
+func TestIdenticalUnions(t *testing.T) {
+ tname := NewTypeName(nopos, nil, "myInt", nil)
+ myInt := NewNamed(tname, Typ[Int], nil)
+ tmap := map[string]*Term{
+ "int": NewTerm(false, Typ[Int]),
+ "~int": NewTerm(true, Typ[Int]),
+ "string": NewTerm(false, Typ[String]),
+ "~string": NewTerm(true, Typ[String]),
+ "myInt": NewTerm(false, myInt),
+ }
+ makeUnion := func(s string) *Union {
+ parts := strings.Split(s, "|")
+ var terms []*Term
+ for _, p := range parts {
+ term := tmap[p]
+ if term == nil {
+ t.Fatalf("missing term %q", p)
+ }
+ terms = append(terms, term)
+ }
+ return NewUnion(terms)
+ }
+ for _, test := range []struct {
+ x, y string
+ want bool
+ }{
+ // These tests are just sanity checks. The tests for type sets and
+ // interfaces provide much more test coverage.
+ {"int|~int", "~int", true},
+ {"myInt|~int", "~int", true},
+ {"int|string", "string|int", true},
+ {"int|int|string", "string|int", true},
+ {"myInt|string", "int|string", false},
+ } {
+ x := makeUnion(test.x)
+ y := makeUnion(test.y)
+ if got := Identical(x, y); got != test.want {
+ t.Errorf("Identical(%v, %v) = %t", test.x, test.y, got)
+ }
+ }
+}
+
+func TestIssue61737(t *testing.T) {
+ // This test verifies that it is possible to construct invalid interfaces
+ // containing duplicate methods using the go/types API.
+ //
+ // It must be possible for importers to construct such invalid interfaces.
+ // Previously, this panicked.
+
+ sig1 := NewSignatureType(nil, nil, nil, NewTuple(NewParam(nopos, nil, "", Typ[Int])), nil, false)
+ sig2 := NewSignatureType(nil, nil, nil, NewTuple(NewParam(nopos, nil, "", Typ[String])), nil, false)
+
+ methods := []*Func{
+ NewFunc(nopos, nil, "M", sig1),
+ NewFunc(nopos, nil, "M", sig2),
+ }
+
+ embeddedMethods := []*Func{
+ NewFunc(nopos, nil, "M", sig2),
+ }
+ embedded := NewInterfaceType(embeddedMethods, nil)
+ iface := NewInterfaceType(methods, []Type{embedded})
+ iface.NumMethods() // unlike go/types, there is no Complete() method, so we complete implicitly
+}
+
+func TestNewAlias_Issue65455(t *testing.T) {
+ obj := NewTypeName(nopos, nil, "A", nil)
+ alias := NewAlias(obj, Typ[Int])
+ alias.Underlying() // must not panic
+}
+
+func TestIssue15305(t *testing.T) {
+ const src = "package p; func f() int16; var _ = f(undef)"
+ f := mustParse(src)
+ conf := Config{
+ Error: func(err error) {}, // allow errors
+ }
+ info := &Info{
+ Types: make(map[syntax.Expr]TypeAndValue),
+ }
+ conf.Check("p", []*syntax.File{f}, info) // ignore result
+ for e, tv := range info.Types {
+ if _, ok := e.(*syntax.CallExpr); ok {
+ if tv.Type != Typ[Int16] {
+ t.Errorf("CallExpr has type %v, want int16", tv.Type)
+ }
+ return
+ }
+ }
+ t.Errorf("CallExpr has no type")
+}
+
+// TestCompositeLitTypes verifies that Info.Types registers the correct
+// types for composite literal expressions and composite literal type
+// expressions.
+func TestCompositeLitTypes(t *testing.T) {
+ for i, test := range []struct {
+ lit, typ string
+ }{
+ {`[16]byte{}`, `[16]byte`},
+ {`[...]byte{}`, `[0]byte`}, // test for go.dev/issue/14092
+ {`[...]int{1, 2, 3}`, `[3]int`}, // test for go.dev/issue/14092
+ {`[...]int{90: 0, 98: 1, 2}`, `[100]int`}, // test for go.dev/issue/14092
+ {`[]int{}`, `[]int`},
+ {`map[string]bool{"foo": true}`, `map[string]bool`},
+ {`struct{}{}`, `struct{}`},
+ {`struct{x, y int; z complex128}{}`, `struct{x int; y int; z complex128}`},
+ } {
+ f := mustParse(fmt.Sprintf("package p%d; var _ = %s", i, test.lit))
+ types := make(map[syntax.Expr]TypeAndValue)
+ if _, err := new(Config).Check("p", []*syntax.File{f}, &Info{Types: types}); err != nil {
+ t.Fatalf("%s: %v", test.lit, err)
+ }
+
+ cmptype := func(x syntax.Expr, want string) {
+ tv, ok := types[x]
+ if !ok {
+ t.Errorf("%s: no Types entry found", test.lit)
+ return
+ }
+ if tv.Type == nil {
+ t.Errorf("%s: type is nil", test.lit)
+ return
+ }
+ if got := tv.Type.String(); got != want {
+ t.Errorf("%s: got %v, want %s", test.lit, got, want)
+ }
+ }
+
+ // test type of composite literal expression
+ rhs := f.DeclList[0].(*syntax.VarDecl).Values
+ cmptype(rhs, test.typ)
+
+ // test type of composite literal type expression
+ cmptype(rhs.(*syntax.CompositeLit).Type, test.typ)
+ }
+}
+
+// TestObjectParents verifies that objects have parent scopes or not
+// as specified by the Object interface.
+func TestObjectParents(t *testing.T) {
+ const src = `
+package p
+
+const C = 0
+
+type T1 struct {
+ a, b int
+ T2
+}
+
+type T2 interface {
+ im1()
+ im2()
+}
+
+func (T1) m1() {}
+func (*T1) m2() {}
+
+func f(x int) { y := x; print(y) }
+`
+
+ f := mustParse(src)
+
+ info := &Info{
+ Defs: make(map[*syntax.Name]Object),
+ }
+ if _, err := new(Config).Check("p", []*syntax.File{f}, info); err != nil {
+ t.Fatal(err)
+ }
+
+ for ident, obj := range info.Defs {
+ if obj == nil {
+ // only package names and implicit vars have a nil object
+ // (in this test we only need to handle the package name)
+ if ident.Value != "p" {
+ t.Errorf("%v has nil object", ident)
+ }
+ continue
+ }
+
+ // struct fields, type-associated and interface methods
+ // have no parent scope
+ wantParent := true
+ switch obj := obj.(type) {
+ case *Var:
+ if obj.IsField() {
+ wantParent = false
+ }
+ case *Func:
+ if obj.Type().(*Signature).Recv() != nil { // method
+ wantParent = false
+ }
+ }
+
+ gotParent := obj.Parent() != nil
+ switch {
+ case gotParent && !wantParent:
+ t.Errorf("%v: want no parent, got %s", ident, obj.Parent())
+ case !gotParent && wantParent:
+ t.Errorf("%v: no parent found", ident)
+ }
+ }
+}
+
+// TestFailedImport tests that we don't get follow-on errors
+// elsewhere in a package due to failing to import a package.
+func TestFailedImport(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ const src = `
+package p
+
+import foo "go/types/thisdirectorymustnotexistotherwisethistestmayfail/foo" // should only see an error here
+
+const c = foo.C
+type T = foo.T
+var v T = c
+func f(x T) T { return foo.F(x) }
+`
+ f := mustParse(src)
+ files := []*syntax.File{f}
+
+ // type-check using all possible importers
+ for _, compiler := range []string{"gc", "gccgo", "source"} {
+ errcount := 0
+ conf := Config{
+ Error: func(err error) {
+ // we should only see the import error
+ if errcount > 0 || !strings.Contains(err.Error(), "could not import") {
+ t.Errorf("for %s importer, got unexpected error: %v", compiler, err)
+ }
+ errcount++
+ },
+ //Importer: importer.For(compiler, nil),
+ }
+
+ info := &Info{
+ Uses: make(map[*syntax.Name]Object),
+ }
+ pkg, _ := conf.Check("p", files, info)
+ if pkg == nil {
+ t.Errorf("for %s importer, type-checking failed to return a package", compiler)
+ continue
+ }
+
+ imports := pkg.Imports()
+ if len(imports) != 1 {
+ t.Errorf("for %s importer, got %d imports, want 1", compiler, len(imports))
+ continue
+ }
+ imp := imports[0]
+ if imp.Name() != "foo" {
+ t.Errorf(`for %s importer, got %q, want "foo"`, compiler, imp.Name())
+ continue
+ }
+
+ // verify that all uses of foo refer to the imported package foo (imp)
+ for ident, obj := range info.Uses {
+ if ident.Value == "foo" {
+ if obj, ok := obj.(*PkgName); ok {
+ if obj.Imported() != imp {
+ t.Errorf("%s resolved to %v; want %v", ident.Value, obj.Imported(), imp)
+ }
+ } else {
+ t.Errorf("%s resolved to %v; want package name", ident.Value, obj)
+ }
+ }
+ }
+ }
+}
+
+func TestInstantiate(t *testing.T) {
+ // eventually we like more tests but this is a start
+ const src = "package p; type T[P any] *T[P]"
+ pkg := mustTypecheck(src, nil, nil)
+
+ // type T should have one type parameter
+ T := pkg.Scope().Lookup("T").Type().(*Named)
+ if n := T.TypeParams().Len(); n != 1 {
+ t.Fatalf("expected 1 type parameter; found %d", n)
+ }
+
+ // instantiation should succeed (no endless recursion)
+ // even with a nil *Checker
+ res, err := Instantiate(nil, T, []Type{Typ[Int]}, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // instantiated type should point to itself
+ if p := res.Underlying().(*Pointer).Elem(); p != res {
+ t.Fatalf("unexpected result type: %s points to %s", res, p)
+ }
+}
+
+func TestInstantiateConcurrent(t *testing.T) {
+ const src = `package p
+
+type I[P any] interface {
+ m(P)
+ n() P
+}
+
+type J = I[int]
+
+type Nested[P any] *interface{b(P)}
+
+type K = Nested[string]
+`
+ pkg := mustTypecheck(src, nil, nil)
+
+ insts := []*Interface{
+ pkg.Scope().Lookup("J").Type().Underlying().(*Interface),
+ pkg.Scope().Lookup("K").Type().Underlying().(*Pointer).Elem().(*Interface),
+ }
+
+ // Use the interface instances concurrently.
+ for _, inst := range insts {
+ var (
+ counts [2]int // method counts
+ methods [2][]string // method strings
+ )
+ var wg sync.WaitGroup
+ for i := 0; i < 2; i++ {
+ i := i
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ counts[i] = inst.NumMethods()
+ for mi := 0; mi < counts[i]; mi++ {
+ methods[i] = append(methods[i], inst.Method(mi).String())
+ }
+ }()
+ }
+ wg.Wait()
+
+ if counts[0] != counts[1] {
+ t.Errorf("mismatching method counts for %s: %d vs %d", inst, counts[0], counts[1])
+ continue
+ }
+ for i := 0; i < counts[0]; i++ {
+ if m0, m1 := methods[0][i], methods[1][i]; m0 != m1 {
+ t.Errorf("mismatching methods for %s: %s vs %s", inst, m0, m1)
+ }
+ }
+ }
+}
+
+func TestInstantiateErrors(t *testing.T) {
+ tests := []struct {
+ src string // by convention, T must be the type being instantiated
+ targs []Type
+ wantAt int // -1 indicates no error
+ }{
+ {"type T[P interface{~string}] int", []Type{Typ[Int]}, 0},
+ {"type T[P1 interface{int}, P2 interface{~string}] int", []Type{Typ[Int], Typ[Int]}, 1},
+ {"type T[P1 any, P2 interface{~[]P1}] int", []Type{Typ[Int], NewSlice(Typ[String])}, 1},
+ {"type T[P1 interface{~[]P2}, P2 any] int", []Type{NewSlice(Typ[String]), Typ[Int]}, 0},
+ }
+
+ for _, test := range tests {
+ src := "package p; " + test.src
+ pkg := mustTypecheck(src, nil, nil)
+
+ T := pkg.Scope().Lookup("T").Type().(*Named)
+
+ _, err := Instantiate(nil, T, test.targs, true)
+ if err == nil {
+ t.Fatalf("Instantiate(%v, %v) returned nil error, want non-nil", T, test.targs)
+ }
+
+ var argErr *ArgumentError
+ if !errors.As(err, &argErr) {
+ t.Fatalf("Instantiate(%v, %v): error is not an *ArgumentError", T, test.targs)
+ }
+
+ if argErr.Index != test.wantAt {
+ t.Errorf("Instantiate(%v, %v): error at index %d, want index %d", T, test.targs, argErr.Index, test.wantAt)
+ }
+ }
+}
+
+func TestArgumentErrorUnwrapping(t *testing.T) {
+ var err error = &ArgumentError{
+ Index: 1,
+ Err: Error{Msg: "test"},
+ }
+ var e Error
+ if !errors.As(err, &e) {
+ t.Fatalf("error %v does not wrap types.Error", err)
+ }
+ if e.Msg != "test" {
+ t.Errorf("e.Msg = %q, want %q", e.Msg, "test")
+ }
+}
+
+func TestInstanceIdentity(t *testing.T) {
+ imports := make(testImporter)
+ conf := Config{Importer: imports}
+ makePkg := func(src string) {
+ f := mustParse(src)
+ name := f.PkgName.Value
+ pkg, err := conf.Check(name, []*syntax.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ imports[name] = pkg
+ }
+ makePkg(`package lib; type T[P any] struct{}`)
+ makePkg(`package a; import "lib"; var A lib.T[int]`)
+ makePkg(`package b; import "lib"; var B lib.T[int]`)
+ a := imports["a"].Scope().Lookup("A")
+ b := imports["b"].Scope().Lookup("B")
+ if !Identical(a.Type(), b.Type()) {
+ t.Errorf("mismatching types: a.A: %s, b.B: %s", a.Type(), b.Type())
+ }
+}
+
+// TestInstantiatedObjects verifies properties of instantiated objects.
+func TestInstantiatedObjects(t *testing.T) {
+ const src = `
+package p
+
+type T[P any] struct {
+ field P
+}
+
+func (recv *T[Q]) concreteMethod(mParam Q) (mResult Q) { return }
+
+type FT[P any] func(ftParam P) (ftResult P)
+
+func F[P any](fParam P) (fResult P){ return }
+
+type I[P any] interface {
+ interfaceMethod(P)
+}
+
+type R[P any] T[P]
+
+func (R[P]) m() {} // having a method triggers expansion of R
+
+var (
+ t T[int]
+ ft FT[int]
+ f = F[int]
+ i I[int]
+)
+
+func fn() {
+ var r R[int]
+ _ = r
+}
+`
+ info := &Info{
+ Defs: make(map[*syntax.Name]Object),
+ }
+ f := mustParse(src)
+ conf := Config{}
+ pkg, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, info)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ lookup := func(name string) Type { return pkg.Scope().Lookup(name).Type() }
+ fnScope := pkg.Scope().Lookup("fn").(*Func).Scope()
+
+ tests := []struct {
+ name string
+ obj Object
+ }{
+ // Struct fields
+ {"field", lookup("t").Underlying().(*Struct).Field(0)},
+ {"field", fnScope.Lookup("r").Type().Underlying().(*Struct).Field(0)},
+
+ // Methods and method fields
+ {"concreteMethod", lookup("t").(*Named).Method(0)},
+ {"recv", lookup("t").(*Named).Method(0).Type().(*Signature).Recv()},
+ {"mParam", lookup("t").(*Named).Method(0).Type().(*Signature).Params().At(0)},
+ {"mResult", lookup("t").(*Named).Method(0).Type().(*Signature).Results().At(0)},
+
+ // Interface methods
+ {"interfaceMethod", lookup("i").Underlying().(*Interface).Method(0)},
+
+ // Function type fields
+ {"ftParam", lookup("ft").Underlying().(*Signature).Params().At(0)},
+ {"ftResult", lookup("ft").Underlying().(*Signature).Results().At(0)},
+
+ // Function fields
+ {"fParam", lookup("f").(*Signature).Params().At(0)},
+ {"fResult", lookup("f").(*Signature).Results().At(0)},
+ }
+
+ // Collect all identifiers by name.
+ idents := make(map[string][]*syntax.Name)
+ syntax.Inspect(f, func(n syntax.Node) bool {
+ if id, ok := n.(*syntax.Name); ok {
+ idents[id.Value] = append(idents[id.Value], id)
+ }
+ return true
+ })
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.name, func(t *testing.T) {
+ if got := len(idents[test.name]); got != 1 {
+ t.Fatalf("found %d identifiers named %s, want 1", got, test.name)
+ }
+ ident := idents[test.name][0]
+ def := info.Defs[ident]
+ if def == test.obj {
+ t.Fatalf("info.Defs[%s] contains the test object", test.name)
+ }
+ if orig := originObject(test.obj); def != orig {
+ t.Errorf("info.Defs[%s] does not match obj.Origin()", test.name)
+ }
+ if def.Pkg() != test.obj.Pkg() {
+ t.Errorf("Pkg() = %v, want %v", def.Pkg(), test.obj.Pkg())
+ }
+ if def.Name() != test.obj.Name() {
+ t.Errorf("Name() = %v, want %v", def.Name(), test.obj.Name())
+ }
+ if def.Pos() != test.obj.Pos() {
+ t.Errorf("Pos() = %v, want %v", def.Pos(), test.obj.Pos())
+ }
+ if def.Parent() != test.obj.Parent() {
+ t.Fatalf("Parent() = %v, want %v", def.Parent(), test.obj.Parent())
+ }
+ if def.Exported() != test.obj.Exported() {
+ t.Fatalf("Exported() = %v, want %v", def.Exported(), test.obj.Exported())
+ }
+ if def.Id() != test.obj.Id() {
+ t.Fatalf("Id() = %v, want %v", def.Id(), test.obj.Id())
+ }
+ // String and Type are expected to differ.
+ })
+ }
+}
+
+func originObject(obj Object) Object {
+ switch obj := obj.(type) {
+ case *Var:
+ return obj.Origin()
+ case *Func:
+ return obj.Origin()
+ }
+ return obj
+}
+
+func TestImplements(t *testing.T) {
+ const src = `
+package p
+
+type EmptyIface interface{}
+
+type I interface {
+ m()
+}
+
+type C interface {
+ m()
+ ~int
+}
+
+type Integer interface{
+ int8 | int16 | int32 | int64
+}
+
+type EmptyTypeSet interface{
+ Integer
+ ~string
+}
+
+type N1 int
+func (N1) m() {}
+
+type N2 int
+func (*N2) m() {}
+
+type N3 int
+func (N3) m(int) {}
+
+type N4 string
+func (N4) m()
+
+type Bad Bad // invalid type
+`
+
+ f := mustParse(src)
+ conf := Config{Error: func(error) {}}
+ pkg, _ := conf.Check(f.PkgName.Value, []*syntax.File{f}, nil)
+
+ lookup := func(tname string) Type { return pkg.Scope().Lookup(tname).Type() }
+ var (
+ EmptyIface = lookup("EmptyIface").Underlying().(*Interface)
+ I = lookup("I").(*Named)
+ II = I.Underlying().(*Interface)
+ C = lookup("C").(*Named)
+ CI = C.Underlying().(*Interface)
+ Integer = lookup("Integer").Underlying().(*Interface)
+ EmptyTypeSet = lookup("EmptyTypeSet").Underlying().(*Interface)
+ N1 = lookup("N1")
+ N1p = NewPointer(N1)
+ N2 = lookup("N2")
+ N2p = NewPointer(N2)
+ N3 = lookup("N3")
+ N4 = lookup("N4")
+ Bad = lookup("Bad")
+ )
+
+ tests := []struct {
+ V Type
+ T *Interface
+ want bool
+ }{
+ {I, II, true},
+ {I, CI, false},
+ {C, II, true},
+ {C, CI, true},
+ {Typ[Int8], Integer, true},
+ {Typ[Int64], Integer, true},
+ {Typ[String], Integer, false},
+ {EmptyTypeSet, II, true},
+ {EmptyTypeSet, EmptyTypeSet, true},
+ {Typ[Int], EmptyTypeSet, false},
+ {N1, II, true},
+ {N1, CI, true},
+ {N1p, II, true},
+ {N1p, CI, false},
+ {N2, II, false},
+ {N2, CI, false},
+ {N2p, II, true},
+ {N2p, CI, false},
+ {N3, II, false},
+ {N3, CI, false},
+ {N4, II, true},
+ {N4, CI, false},
+ {Bad, II, false},
+ {Bad, CI, false},
+ {Bad, EmptyIface, true},
+ }
+
+ for _, test := range tests {
+ if got := Implements(test.V, test.T); got != test.want {
+ t.Errorf("Implements(%s, %s) = %t, want %t", test.V, test.T, got, test.want)
+ }
+
+ // The type assertion x.(T) is valid if T is an interface or if T implements the type of x.
+ // The assertion is never valid if T is a bad type.
+ V := test.T
+ T := test.V
+ want := false
+ if _, ok := T.Underlying().(*Interface); (ok || Implements(T, V)) && T != Bad {
+ want = true
+ }
+ if got := AssertableTo(V, T); got != want {
+ t.Errorf("AssertableTo(%s, %s) = %t, want %t", V, T, got, want)
+ }
+ }
+}
+
+func TestMissingMethodAlternative(t *testing.T) {
+ const src = `
+package p
+type T interface {
+ m()
+}
+
+type V0 struct{}
+func (V0) m() {}
+
+type V1 struct{}
+
+type V2 struct{}
+func (V2) m() int
+
+type V3 struct{}
+func (*V3) m()
+
+type V4 struct{}
+func (V4) M()
+`
+
+ pkg := mustTypecheck(src, nil, nil)
+
+ T := pkg.Scope().Lookup("T").Type().Underlying().(*Interface)
+ lookup := func(name string) (*Func, bool) {
+ return MissingMethod(pkg.Scope().Lookup(name).Type(), T, true)
+ }
+
+ // V0 has method m with correct signature. Should not report wrongType.
+ method, wrongType := lookup("V0")
+ if method != nil || wrongType {
+ t.Fatalf("V0: got method = %v, wrongType = %v", method, wrongType)
+ }
+
+ checkMissingMethod := func(tname string, reportWrongType bool) {
+ method, wrongType := lookup(tname)
+ if method == nil || method.Name() != "m" || wrongType != reportWrongType {
+ t.Fatalf("%s: got method = %v, wrongType = %v", tname, method, wrongType)
+ }
+ }
+
+ // V1 has no method m. Should not report wrongType.
+ checkMissingMethod("V1", false)
+
+ // V2 has method m with wrong signature type (ignoring receiver). Should report wrongType.
+ checkMissingMethod("V2", true)
+
+ // V3 has no method m but it exists on *V3. Should report wrongType.
+ checkMissingMethod("V3", true)
+
+ // V4 has no method m but has M. Should not report wrongType.
+ checkMissingMethod("V4", false)
+}
+
+func TestErrorURL(t *testing.T) {
+ conf := Config{ErrorURL: " [go.dev/e/%s]"}
+
+ // test case for a one-line error
+ const src1 = `
+package p
+var _ T
+`
+ _, err := typecheck(src1, &conf, nil)
+ if err == nil || !strings.HasSuffix(err.Error(), " [go.dev/e/UndeclaredName]") {
+ t.Errorf("src1: unexpected error: got %v", err)
+ }
+
+ // test case for a multi-line error
+ const src2 = `
+package p
+func f() int { return 0 }
+var _ = f(1, 2)
+`
+ _, err = typecheck(src2, &conf, nil)
+ if err == nil || !strings.Contains(err.Error(), " [go.dev/e/WrongArgCount]\n") {
+ t.Errorf("src1: unexpected error: got %v", err)
+ }
+}
+
+func TestModuleVersion(t *testing.T) {
+ // version go1.dd must be able to typecheck go1.dd.0, go1.dd.1, etc.
+ goversion := fmt.Sprintf("go1.%d", goversion.Version)
+ for _, v := range []string{
+ goversion,
+ goversion + ".0",
+ goversion + ".1",
+ goversion + ".rc",
+ } {
+ conf := Config{GoVersion: v}
+ pkg := mustTypecheck("package p", &conf, nil)
+ if pkg.GoVersion() != conf.GoVersion {
+ t.Errorf("got %s; want %s", pkg.GoVersion(), conf.GoVersion)
+ }
+ }
+}
+
+func TestFileVersions(t *testing.T) {
+ for _, test := range []struct {
+ goVersion string
+ fileVersion string
+ wantVersion string
+ }{
+ {"", "", ""}, // no versions specified
+ {"go1.19", "", "go1.19"}, // module version specified
+ {"", "go1.20", ""}, // file upgrade ignored
+ {"go1.19", "go1.20", "go1.20"}, // file upgrade permitted
+ {"go1.20", "go1.19", "go1.20"}, // file downgrade not permitted
+ {"go1.21", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21)
+
+ // versions containing release numbers
+ // (file versions containing release numbers are considered invalid)
+ {"go1.19.0", "", "go1.19.0"}, // no file version specified
+ {"go1.20", "go1.20.1", "go1.20"}, // file upgrade ignored
+ {"go1.20.1", "go1.20", "go1.20.1"}, // file upgrade ignored
+ {"go1.20.1", "go1.21", "go1.21"}, // file upgrade permitted
+ {"go1.20.1", "go1.19", "go1.20.1"}, // file downgrade not permitted
+ {"go1.21.1", "go1.19.1", "go1.21.1"}, // file downgrade not permitted (invalid file version)
+ {"go1.21.1", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21)
+ } {
+ var src string
+ if test.fileVersion != "" {
+ src = "//go:build " + test.fileVersion + "\n"
+ }
+ src += "package p"
+
+ conf := Config{GoVersion: test.goVersion}
+ versions := make(map[*syntax.PosBase]string)
+ var info Info
+ info.FileVersions = versions
+ mustTypecheck(src, &conf, &info)
+
+ n := 0
+ for _, v := range info.FileVersions {
+ want := test.wantVersion
+ if v != want {
+ t.Errorf("%q: unexpected file version: got %v, want %v", src, v, want)
+ }
+ n++
+ }
+ if n != 1 {
+ t.Errorf("%q: incorrect number of map entries: got %d", src, n)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/array.go b/src/cmd/compile/internal/types2/array.go
new file mode 100644
index 0000000..502d49b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/array.go
@@ -0,0 +1,25 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// An Array represents an array type.
+type Array struct {
+ len int64
+ elem Type
+}
+
+// NewArray returns a new array type for the given element type and length.
+// A negative length indicates an unknown length.
+func NewArray(elem Type, len int64) *Array { return &Array{len: len, elem: elem} }
+
+// Len returns the length of array a.
+// A negative result indicates an unknown length.
+func (a *Array) Len() int64 { return a.len }
+
+// Elem returns element type of array a.
+func (a *Array) Elem() Type { return a.elem }
+
+func (a *Array) Underlying() Type { return a }
+func (a *Array) String() string { return TypeString(a, nil) }
diff --git a/src/cmd/compile/internal/types2/assignments.go b/src/cmd/compile/internal/types2/assignments.go
new file mode 100644
index 0000000..8abafdb
--- /dev/null
+++ b/src/cmd/compile/internal/types2/assignments.go
@@ -0,0 +1,575 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements initialization and assignment checks.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ . "internal/types/errors"
+ "strings"
+)
+
+// assignment reports whether x can be assigned to a variable of type T,
+// if necessary by attempting to convert untyped values to the appropriate
+// type. context describes the context in which the assignment takes place.
+// Use T == nil to indicate assignment to an untyped blank identifier.
+// If the assignment check fails, x.mode is set to invalid.
+func (check *Checker) assignment(x *operand, T Type, context string) {
+ check.singleValue(x)
+
+ switch x.mode {
+ case invalid:
+ return // error reported before
+ case constant_, variable, mapindex, value, nilvalue, commaok, commaerr:
+ // ok
+ default:
+ // we may get here because of other problems (go.dev/issue/39634, crash 12)
+ // TODO(gri) do we need a new "generic" error code here?
+ check.errorf(x, IncompatibleAssign, "cannot assign %s to %s in %s", x, T, context)
+ x.mode = invalid
+ return
+ }
+
+ if isUntyped(x.typ) {
+ target := T
+ // spec: "If an untyped constant is assigned to a variable of interface
+ // type or the blank identifier, the constant is first converted to type
+ // bool, rune, int, float64, complex128 or string respectively, depending
+ // on whether the value is a boolean, rune, integer, floating-point,
+ // complex, or string constant."
+ if x.isNil() {
+ if T == nil {
+ check.errorf(x, UntypedNilUse, "use of untyped nil in %s", context)
+ x.mode = invalid
+ return
+ }
+ } else if T == nil || isNonTypeParamInterface(T) {
+ target = Default(x.typ)
+ }
+ newType, val, code := check.implicitTypeAndValue(x, target)
+ if code != 0 {
+ msg := check.sprintf("cannot use %s as %s value in %s", x, target, context)
+ switch code {
+ case TruncatedFloat:
+ msg += " (truncated)"
+ case NumericOverflow:
+ msg += " (overflows)"
+ default:
+ code = IncompatibleAssign
+ }
+ check.error(x, code, msg)
+ x.mode = invalid
+ return
+ }
+ if val != nil {
+ x.val = val
+ check.updateExprVal(x.expr, val)
+ }
+ if newType != x.typ {
+ x.typ = newType
+ check.updateExprType(x.expr, newType, false)
+ }
+ }
+ // x.typ is typed
+
+ // A generic (non-instantiated) function value cannot be assigned to a variable.
+ if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 {
+ check.errorf(x, WrongTypeArgCount, "cannot use generic function %s without instantiation in %s", x, context)
+ x.mode = invalid
+ return
+ }
+
+ // spec: "If a left-hand side is the blank identifier, any typed or
+ // non-constant value except for the predeclared identifier nil may
+ // be assigned to it."
+ if T == nil {
+ return
+ }
+
+ cause := ""
+ if ok, code := x.assignableTo(check, T, &cause); !ok {
+ if cause != "" {
+ check.errorf(x, code, "cannot use %s as %s value in %s: %s", x, T, context, cause)
+ } else {
+ check.errorf(x, code, "cannot use %s as %s value in %s", x, T, context)
+ }
+ x.mode = invalid
+ }
+}
+
+func (check *Checker) initConst(lhs *Const, x *operand) {
+ if x.mode == invalid || !isValid(x.typ) || !isValid(lhs.typ) {
+ if lhs.typ == nil {
+ lhs.typ = Typ[Invalid]
+ }
+ return
+ }
+
+ // rhs must be a constant
+ if x.mode != constant_ {
+ check.errorf(x, InvalidConstInit, "%s is not constant", x)
+ if lhs.typ == nil {
+ lhs.typ = Typ[Invalid]
+ }
+ return
+ }
+ assert(isConstType(x.typ))
+
+ // If the lhs doesn't have a type yet, use the type of x.
+ if lhs.typ == nil {
+ lhs.typ = x.typ
+ }
+
+ check.assignment(x, lhs.typ, "constant declaration")
+ if x.mode == invalid {
+ return
+ }
+
+ lhs.val = x.val
+}
+
+// initVar checks the initialization lhs = x in a variable declaration.
+// If lhs doesn't have a type yet, it is given the type of x,
+// or Typ[Invalid] in case of an error.
+// If the initialization check fails, x.mode is set to invalid.
+func (check *Checker) initVar(lhs *Var, x *operand, context string) {
+ if x.mode == invalid || !isValid(x.typ) || !isValid(lhs.typ) {
+ if lhs.typ == nil {
+ lhs.typ = Typ[Invalid]
+ }
+ x.mode = invalid
+ return
+ }
+
+ // If lhs doesn't have a type yet, use the type of x.
+ if lhs.typ == nil {
+ typ := x.typ
+ if isUntyped(typ) {
+ // convert untyped types to default types
+ if typ == Typ[UntypedNil] {
+ check.errorf(x, UntypedNilUse, "use of untyped nil in %s", context)
+ lhs.typ = Typ[Invalid]
+ x.mode = invalid
+ return
+ }
+ typ = Default(typ)
+ }
+ lhs.typ = typ
+ }
+
+ check.assignment(x, lhs.typ, context)
+}
+
+// lhsVar checks a lhs variable in an assignment and returns its type.
+// lhsVar takes care of not counting a lhs identifier as a "use" of
+// that identifier. The result is nil if it is the blank identifier,
+// and Typ[Invalid] if it is an invalid lhs expression.
+func (check *Checker) lhsVar(lhs syntax.Expr) Type {
+ // Determine if the lhs is a (possibly parenthesized) identifier.
+ ident, _ := syntax.Unparen(lhs).(*syntax.Name)
+
+ // Don't evaluate lhs if it is the blank identifier.
+ if ident != nil && ident.Value == "_" {
+ check.recordDef(ident, nil)
+ return nil
+ }
+
+ // If the lhs is an identifier denoting a variable v, this reference
+ // is not a 'use' of v. Remember current value of v.used and restore
+ // after evaluating the lhs via check.expr.
+ var v *Var
+ var v_used bool
+ if ident != nil {
+ if obj := check.lookup(ident.Value); obj != nil {
+ // It's ok to mark non-local variables, but ignore variables
+ // from other packages to avoid potential race conditions with
+ // dot-imported variables.
+ if w, _ := obj.(*Var); w != nil && w.pkg == check.pkg {
+ v = w
+ v_used = v.used
+ }
+ }
+ }
+
+ var x operand
+ check.expr(nil, &x, lhs)
+
+ if v != nil {
+ v.used = v_used // restore v.used
+ }
+
+ if x.mode == invalid || !isValid(x.typ) {
+ return Typ[Invalid]
+ }
+
+ // spec: "Each left-hand side operand must be addressable, a map index
+ // expression, or the blank identifier. Operands may be parenthesized."
+ switch x.mode {
+ case invalid:
+ return Typ[Invalid]
+ case variable, mapindex:
+ // ok
+ default:
+ if sel, ok := x.expr.(*syntax.SelectorExpr); ok {
+ var op operand
+ check.expr(nil, &op, sel.X)
+ if op.mode == mapindex {
+ check.errorf(&x, UnaddressableFieldAssign, "cannot assign to struct field %s in map", syntax.String(x.expr))
+ return Typ[Invalid]
+ }
+ }
+ check.errorf(&x, UnassignableOperand, "cannot assign to %s (neither addressable nor a map index expression)", x.expr)
+ return Typ[Invalid]
+ }
+
+ return x.typ
+}
+
+// assignVar checks the assignment lhs = rhs (if x == nil), or lhs = x (if x != nil).
+// If x != nil, it must be the evaluation of rhs (and rhs will be ignored).
+// If the assignment check fails and x != nil, x.mode is set to invalid.
+func (check *Checker) assignVar(lhs, rhs syntax.Expr, x *operand, context string) {
+ T := check.lhsVar(lhs) // nil if lhs is _
+ if !isValid(T) {
+ if x != nil {
+ x.mode = invalid
+ } else {
+ check.use(rhs)
+ }
+ return
+ }
+
+ if x == nil {
+ var target *target
+ // avoid calling syntax.String if not needed
+ if T != nil {
+ if _, ok := under(T).(*Signature); ok {
+ target = newTarget(T, syntax.String(lhs))
+ }
+ }
+ x = new(operand)
+ check.expr(target, x, rhs)
+ }
+
+ if T == nil && context == "assignment" {
+ context = "assignment to _ identifier"
+ }
+ check.assignment(x, T, context)
+}
+
+// operandTypes returns the list of types for the given operands.
+func operandTypes(list []*operand) (res []Type) {
+ for _, x := range list {
+ res = append(res, x.typ)
+ }
+ return res
+}
+
+// varTypes returns the list of types for the given variables.
+func varTypes(list []*Var) (res []Type) {
+ for _, x := range list {
+ res = append(res, x.typ)
+ }
+ return res
+}
+
+// typesSummary returns a string of the form "(t1, t2, ...)" where the
+// ti's are user-friendly string representations for the given types.
+// If variadic is set and the last type is a slice, its string is of
+// the form "...E" where E is the slice's element type.
+func (check *Checker) typesSummary(list []Type, variadic bool) string {
+ var res []string
+ for i, t := range list {
+ var s string
+ switch {
+ case t == nil:
+ fallthrough // should not happen but be cautious
+ case !isValid(t):
+ s = "unknown type"
+ case isUntyped(t):
+ if isNumeric(t) {
+ // Do not imply a specific type requirement:
+ // "have number, want float64" is better than
+ // "have untyped int, want float64" or
+ // "have int, want float64".
+ s = "number"
+ } else {
+ // If we don't have a number, omit the "untyped" qualifier
+ // for compactness.
+ s = strings.Replace(t.(*Basic).name, "untyped ", "", -1)
+ }
+ case variadic && i == len(list)-1:
+ s = check.sprintf("...%s", t.(*Slice).elem)
+ }
+ if s == "" {
+ s = check.sprintf("%s", t)
+ }
+ res = append(res, s)
+ }
+ return "(" + strings.Join(res, ", ") + ")"
+}
+
+func measure(x int, unit string) string {
+ if x != 1 {
+ unit += "s"
+ }
+ return fmt.Sprintf("%d %s", x, unit)
+}
+
+func (check *Checker) assignError(rhs []syntax.Expr, l, r int) {
+ vars := measure(l, "variable")
+ vals := measure(r, "value")
+ rhs0 := rhs[0]
+
+ if len(rhs) == 1 {
+ if call, _ := syntax.Unparen(rhs0).(*syntax.CallExpr); call != nil {
+ check.errorf(rhs0, WrongAssignCount, "assignment mismatch: %s but %s returns %s", vars, call.Fun, vals)
+ return
+ }
+ }
+ check.errorf(rhs0, WrongAssignCount, "assignment mismatch: %s but %s", vars, vals)
+}
+
+func (check *Checker) returnError(at poser, lhs []*Var, rhs []*operand) {
+ l, r := len(lhs), len(rhs)
+ qualifier := "not enough"
+ if r > l {
+ at = rhs[l] // report at first extra value
+ qualifier = "too many"
+ } else if r > 0 {
+ at = rhs[r-1] // report at last value
+ }
+ var err error_
+ err.code = WrongResultCount
+ err.errorf(at, "%s return values", qualifier)
+ err.errorf(nopos, "have %s", check.typesSummary(operandTypes(rhs), false))
+ err.errorf(nopos, "want %s", check.typesSummary(varTypes(lhs), false))
+ check.report(&err)
+}
+
+// initVars type-checks assignments of initialization expressions orig_rhs
+// to variables lhs.
+// If returnStmt is non-nil, initVars type-checks the implicit assignment
+// of result expressions orig_rhs to function result parameters lhs.
+func (check *Checker) initVars(lhs []*Var, orig_rhs []syntax.Expr, returnStmt syntax.Stmt) {
+ context := "assignment"
+ if returnStmt != nil {
+ context = "return statement"
+ }
+
+ l, r := len(lhs), len(orig_rhs)
+
+ // If l == 1 and the rhs is a single call, for a better
+ // error message don't handle it as n:n mapping below.
+ isCall := false
+ if r == 1 {
+ _, isCall = syntax.Unparen(orig_rhs[0]).(*syntax.CallExpr)
+ }
+
+ // If we have a n:n mapping from lhs variable to rhs expression,
+ // each value can be assigned to its corresponding variable.
+ if l == r && !isCall {
+ var x operand
+ for i, lhs := range lhs {
+ desc := lhs.name
+ if returnStmt != nil && desc == "" {
+ desc = "result variable"
+ }
+ check.expr(newTarget(lhs.typ, desc), &x, orig_rhs[i])
+ check.initVar(lhs, &x, context)
+ }
+ return
+ }
+
+ // If we don't have an n:n mapping, the rhs must be a single expression
+ // resulting in 2 or more values; otherwise we have an assignment mismatch.
+ if r != 1 {
+ // Only report a mismatch error if there are no other errors on the rhs.
+ if check.use(orig_rhs...) {
+ if returnStmt != nil {
+ rhs := check.exprList(orig_rhs)
+ check.returnError(returnStmt, lhs, rhs)
+ } else {
+ check.assignError(orig_rhs, l, r)
+ }
+ }
+ // ensure that LHS variables have a type
+ for _, v := range lhs {
+ if v.typ == nil {
+ v.typ = Typ[Invalid]
+ }
+ }
+ return
+ }
+
+ rhs, commaOk := check.multiExpr(orig_rhs[0], l == 2 && returnStmt == nil)
+ r = len(rhs)
+ if l == r {
+ for i, lhs := range lhs {
+ check.initVar(lhs, rhs[i], context)
+ }
+ // Only record comma-ok expression if both initializations succeeded
+ // (go.dev/issue/59371).
+ if commaOk && rhs[0].mode != invalid && rhs[1].mode != invalid {
+ check.recordCommaOkTypes(orig_rhs[0], rhs)
+ }
+ return
+ }
+
+ // In all other cases we have an assignment mismatch.
+ // Only report a mismatch error if there are no other errors on the rhs.
+ if rhs[0].mode != invalid {
+ if returnStmt != nil {
+ check.returnError(returnStmt, lhs, rhs)
+ } else {
+ check.assignError(orig_rhs, l, r)
+ }
+ }
+ // ensure that LHS variables have a type
+ for _, v := range lhs {
+ if v.typ == nil {
+ v.typ = Typ[Invalid]
+ }
+ }
+ // orig_rhs[0] was already evaluated
+}
+
+// assignVars type-checks assignments of expressions orig_rhs to variables lhs.
+func (check *Checker) assignVars(lhs, orig_rhs []syntax.Expr) {
+ l, r := len(lhs), len(orig_rhs)
+
+ // If l == 1 and the rhs is a single call, for a better
+ // error message don't handle it as n:n mapping below.
+ isCall := false
+ if r == 1 {
+ _, isCall = syntax.Unparen(orig_rhs[0]).(*syntax.CallExpr)
+ }
+
+ // If we have a n:n mapping from lhs variable to rhs expression,
+ // each value can be assigned to its corresponding variable.
+ if l == r && !isCall {
+ for i, lhs := range lhs {
+ check.assignVar(lhs, orig_rhs[i], nil, "assignment")
+ }
+ return
+ }
+
+ // If we don't have an n:n mapping, the rhs must be a single expression
+ // resulting in 2 or more values; otherwise we have an assignment mismatch.
+ if r != 1 {
+ // Only report a mismatch error if there are no other errors on the lhs or rhs.
+ okLHS := check.useLHS(lhs...)
+ okRHS := check.use(orig_rhs...)
+ if okLHS && okRHS {
+ check.assignError(orig_rhs, l, r)
+ }
+ return
+ }
+
+ rhs, commaOk := check.multiExpr(orig_rhs[0], l == 2)
+ r = len(rhs)
+ if l == r {
+ for i, lhs := range lhs {
+ check.assignVar(lhs, nil, rhs[i], "assignment")
+ }
+ // Only record comma-ok expression if both assignments succeeded
+ // (go.dev/issue/59371).
+ if commaOk && rhs[0].mode != invalid && rhs[1].mode != invalid {
+ check.recordCommaOkTypes(orig_rhs[0], rhs)
+ }
+ return
+ }
+
+ // In all other cases we have an assignment mismatch.
+ // Only report a mismatch error if there are no other errors on the rhs.
+ if rhs[0].mode != invalid {
+ check.assignError(orig_rhs, l, r)
+ }
+ check.useLHS(lhs...)
+ // orig_rhs[0] was already evaluated
+}
+
+func (check *Checker) shortVarDecl(pos syntax.Pos, lhs, rhs []syntax.Expr) {
+ top := len(check.delayed)
+ scope := check.scope
+
+ // collect lhs variables
+ seen := make(map[string]bool, len(lhs))
+ lhsVars := make([]*Var, len(lhs))
+ newVars := make([]*Var, 0, len(lhs))
+ hasErr := false
+ for i, lhs := range lhs {
+ ident, _ := lhs.(*syntax.Name)
+ if ident == nil {
+ check.useLHS(lhs)
+ check.errorf(lhs, BadDecl, "non-name %s on left side of :=", lhs)
+ hasErr = true
+ continue
+ }
+
+ name := ident.Value
+ if name != "_" {
+ if seen[name] {
+ check.errorf(lhs, RepeatedDecl, "%s repeated on left side of :=", lhs)
+ hasErr = true
+ continue
+ }
+ seen[name] = true
+ }
+
+ // Use the correct obj if the ident is redeclared. The
+ // variable's scope starts after the declaration; so we
+ // must use Scope.Lookup here and call Scope.Insert
+ // (via check.declare) later.
+ if alt := scope.Lookup(name); alt != nil {
+ check.recordUse(ident, alt)
+ // redeclared object must be a variable
+ if obj, _ := alt.(*Var); obj != nil {
+ lhsVars[i] = obj
+ } else {
+ check.errorf(lhs, UnassignableOperand, "cannot assign to %s", lhs)
+ hasErr = true
+ }
+ continue
+ }
+
+ // declare new variable
+ obj := NewVar(ident.Pos(), check.pkg, name, nil)
+ lhsVars[i] = obj
+ if name != "_" {
+ newVars = append(newVars, obj)
+ }
+ check.recordDef(ident, obj)
+ }
+
+ // create dummy variables where the lhs is invalid
+ for i, obj := range lhsVars {
+ if obj == nil {
+ lhsVars[i] = NewVar(lhs[i].Pos(), check.pkg, "_", nil)
+ }
+ }
+
+ check.initVars(lhsVars, rhs, nil)
+
+ // process function literals in rhs expressions before scope changes
+ check.processDelayed(top)
+
+ if len(newVars) == 0 && !hasErr {
+ check.softErrorf(pos, NoNewVar, "no new variables on left side of :=")
+ return
+ }
+
+ // declare new variables
+ // spec: "The scope of a constant or variable identifier declared inside
+ // a function begins at the end of the ConstSpec or VarSpec (ShortVarDecl
+ // for short variable declarations) and ends at the end of the innermost
+ // containing block."
+ scopePos := syntax.EndPos(rhs[len(rhs)-1])
+ for _, obj := range newVars {
+ check.declare(scope, nil, obj, scopePos) // id = nil: recordDef already called
+ }
+}
diff --git a/src/cmd/compile/internal/types2/basic.go b/src/cmd/compile/internal/types2/basic.go
new file mode 100644
index 0000000..2fd973c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/basic.go
@@ -0,0 +1,82 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// BasicKind describes the kind of basic type.
+type BasicKind int
+
+const (
+ Invalid BasicKind = iota // type is invalid
+
+ // predeclared types
+ Bool
+ Int
+ Int8
+ Int16
+ Int32
+ Int64
+ Uint
+ Uint8
+ Uint16
+ Uint32
+ Uint64
+ Uintptr
+ Float32
+ Float64
+ Complex64
+ Complex128
+ String
+ UnsafePointer
+
+ // types for untyped values
+ UntypedBool
+ UntypedInt
+ UntypedRune
+ UntypedFloat
+ UntypedComplex
+ UntypedString
+ UntypedNil
+
+ // aliases
+ Byte = Uint8
+ Rune = Int32
+)
+
+// BasicInfo is a set of flags describing properties of a basic type.
+type BasicInfo int
+
+// Properties of basic types.
+const (
+ IsBoolean BasicInfo = 1 << iota
+ IsInteger
+ IsUnsigned
+ IsFloat
+ IsComplex
+ IsString
+ IsUntyped
+
+ IsOrdered = IsInteger | IsFloat | IsString
+ IsNumeric = IsInteger | IsFloat | IsComplex
+ IsConstType = IsBoolean | IsNumeric | IsString
+)
+
+// A Basic represents a basic type.
+type Basic struct {
+ kind BasicKind
+ info BasicInfo
+ name string
+}
+
+// Kind returns the kind of basic type b.
+func (b *Basic) Kind() BasicKind { return b.kind }
+
+// Info returns information about properties of basic type b.
+func (b *Basic) Info() BasicInfo { return b.info }
+
+// Name returns the name of basic type b.
+func (b *Basic) Name() string { return b.name }
+
+func (b *Basic) Underlying() Type { return b }
+func (b *Basic) String() string { return TypeString(b, nil) }
diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go
new file mode 100644
index 0000000..60f6d7f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/builtins.go
@@ -0,0 +1,1047 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of builtin function calls.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "go/constant"
+ "go/token"
+ . "internal/types/errors"
+)
+
+// builtin type-checks a call to the built-in specified by id and
+// reports whether the call is valid, with *x holding the result;
+// but x.expr is not set. If the call is invalid, the result is
+// false, and *x is undefined.
+func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (_ bool) {
+ argList := call.ArgList
+
+ // append is the only built-in that permits the use of ... for the last argument
+ bin := predeclaredFuncs[id]
+ if call.HasDots && id != _Append {
+ //check.errorf(call.Ellipsis, invalidOp + "invalid use of ... with built-in %s", bin.name)
+ check.errorf(call,
+ InvalidDotDotDot,
+ invalidOp+"invalid use of ... with built-in %s", bin.name)
+ check.use(argList...)
+ return
+ }
+
+ // For len(x) and cap(x) we need to know if x contains any function calls or
+ // receive operations. Save/restore current setting and set hasCallOrRecv to
+ // false for the evaluation of x so that we can check it afterwards.
+ // Note: We must do this _before_ calling exprList because exprList evaluates
+ // all arguments.
+ if id == _Len || id == _Cap {
+ defer func(b bool) {
+ check.hasCallOrRecv = b
+ }(check.hasCallOrRecv)
+ check.hasCallOrRecv = false
+ }
+
+ // Evaluate arguments for built-ins that use ordinary (value) arguments.
+ // For built-ins with special argument handling (make, new, etc.),
+ // evaluation is done by the respective built-in code.
+ var args []*operand // not valid for _Make, _New, _Offsetof, _Trace
+ var nargs int
+ switch id {
+ default:
+ // check all arguments
+ args = check.exprList(argList)
+ nargs = len(args)
+ for _, a := range args {
+ if a.mode == invalid {
+ return
+ }
+ }
+ // first argument is always in x
+ if nargs > 0 {
+ *x = *args[0]
+ }
+ case _Make, _New, _Offsetof, _Trace:
+ // arguments require special handling
+ nargs = len(argList)
+ }
+
+ // check argument count
+ {
+ msg := ""
+ if nargs < bin.nargs {
+ msg = "not enough"
+ } else if !bin.variadic && nargs > bin.nargs {
+ msg = "too many"
+ }
+ if msg != "" {
+ check.errorf(call, WrongArgCount, invalidOp+"%s arguments for %v (expected %d, found %d)", msg, call, bin.nargs, nargs)
+ return
+ }
+ }
+
+ switch id {
+ case _Append:
+ // append(s S, x ...T) S, where T is the element type of S
+ // spec: "The variadic function append appends zero or more values x to s of type
+ // S, which must be a slice type, and returns the resulting slice, also of type S.
+ // The values x are passed to a parameter of type ...T where T is the element type
+ // of S and the respective parameter passing rules apply."
+ S := x.typ
+ var T Type
+ if s, _ := coreType(S).(*Slice); s != nil {
+ T = s.elem
+ } else {
+ var cause string
+ switch {
+ case x.isNil():
+ cause = "have untyped nil"
+ case isTypeParam(S):
+ if u := coreType(S); u != nil {
+ cause = check.sprintf("%s has core type %s", x, u)
+ } else {
+ cause = check.sprintf("%s has no core type", x)
+ }
+ default:
+ cause = check.sprintf("have %s", x)
+ }
+ // don't use invalidArg prefix here as it would repeat "argument" in the error message
+ check.errorf(x, InvalidAppend, "first argument to append must be a slice; %s", cause)
+ return
+ }
+
+ // spec: "As a special case, append also accepts a first argument assignable
+ // to type []byte with a second argument of string type followed by ... .
+ // This form appends the bytes of the string.
+ if nargs == 2 && call.HasDots {
+ if ok, _ := x.assignableTo(check, NewSlice(universeByte), nil); ok {
+ y := args[1]
+ if t := coreString(y.typ); t != nil && isString(t) {
+ if check.recordTypes() {
+ sig := makeSig(S, S, y.typ)
+ sig.variadic = true
+ check.recordBuiltinType(call.Fun, sig)
+ }
+ x.mode = value
+ x.typ = S
+ break
+ }
+ }
+ }
+
+ // check general case by creating custom signature
+ sig := makeSig(S, S, NewSlice(T)) // []T required for variadic signature
+ sig.variadic = true
+ check.arguments(call, sig, nil, nil, args, nil, nil) // discard result (we know the result type)
+ // ok to continue even if check.arguments reported errors
+
+ x.mode = value
+ x.typ = S
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, sig)
+ }
+
+ case _Cap, _Len:
+ // cap(x)
+ // len(x)
+ mode := invalid
+ var val constant.Value
+ switch t := arrayPtrDeref(under(x.typ)).(type) {
+ case *Basic:
+ if isString(t) && id == _Len {
+ if x.mode == constant_ {
+ mode = constant_
+ val = constant.MakeInt64(int64(len(constant.StringVal(x.val))))
+ } else {
+ mode = value
+ }
+ }
+
+ case *Array:
+ mode = value
+ // spec: "The expressions len(s) and cap(s) are constants
+ // if the type of s is an array or pointer to an array and
+ // the expression s does not contain channel receives or
+ // function calls; in this case s is not evaluated."
+ if !check.hasCallOrRecv {
+ mode = constant_
+ if t.len >= 0 {
+ val = constant.MakeInt64(t.len)
+ } else {
+ val = constant.MakeUnknown()
+ }
+ }
+
+ case *Slice, *Chan:
+ mode = value
+
+ case *Map:
+ if id == _Len {
+ mode = value
+ }
+
+ case *Interface:
+ if !isTypeParam(x.typ) {
+ break
+ }
+ if t.typeSet().underIs(func(t Type) bool {
+ switch t := arrayPtrDeref(t).(type) {
+ case *Basic:
+ if isString(t) && id == _Len {
+ return true
+ }
+ case *Array, *Slice, *Chan:
+ return true
+ case *Map:
+ if id == _Len {
+ return true
+ }
+ }
+ return false
+ }) {
+ mode = value
+ }
+ }
+
+ if mode == invalid {
+ // avoid error if underlying type is invalid
+ if isValid(under(x.typ)) {
+ code := InvalidCap
+ if id == _Len {
+ code = InvalidLen
+ }
+ check.errorf(x, code, invalidArg+"%s for %s", x, bin.name)
+ }
+ return
+ }
+
+ // record the signature before changing x.typ
+ if check.recordTypes() && mode != constant_ {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Int], x.typ))
+ }
+
+ x.mode = mode
+ x.typ = Typ[Int]
+ x.val = val
+
+ case _Clear:
+ // clear(m)
+ check.verifyVersionf(call.Fun, go1_21, "clear")
+
+ if !underIs(x.typ, func(u Type) bool {
+ switch u.(type) {
+ case *Map, *Slice:
+ return true
+ }
+ check.errorf(x, InvalidClear, invalidArg+"cannot clear %s: argument must be (or constrained by) map or slice", x)
+ return false
+ }) {
+ return
+ }
+
+ x.mode = novalue
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(nil, x.typ))
+ }
+
+ case _Close:
+ // close(c)
+ if !underIs(x.typ, func(u Type) bool {
+ uch, _ := u.(*Chan)
+ if uch == nil {
+ check.errorf(x, InvalidClose, invalidOp+"cannot close non-channel %s", x)
+ return false
+ }
+ if uch.dir == RecvOnly {
+ check.errorf(x, InvalidClose, invalidOp+"cannot close receive-only channel %s", x)
+ return false
+ }
+ return true
+ }) {
+ return
+ }
+ x.mode = novalue
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(nil, x.typ))
+ }
+
+ case _Complex:
+ // complex(x, y floatT) complexT
+ y := args[1]
+
+ // convert or check untyped arguments
+ d := 0
+ if isUntyped(x.typ) {
+ d |= 1
+ }
+ if isUntyped(y.typ) {
+ d |= 2
+ }
+ switch d {
+ case 0:
+ // x and y are typed => nothing to do
+ case 1:
+ // only x is untyped => convert to type of y
+ check.convertUntyped(x, y.typ)
+ case 2:
+ // only y is untyped => convert to type of x
+ check.convertUntyped(y, x.typ)
+ case 3:
+ // x and y are untyped =>
+ // 1) if both are constants, convert them to untyped
+ // floating-point numbers if possible,
+ // 2) if one of them is not constant (possible because
+ // it contains a shift that is yet untyped), convert
+ // both of them to float64 since they must have the
+ // same type to succeed (this will result in an error
+ // because shifts of floats are not permitted)
+ if x.mode == constant_ && y.mode == constant_ {
+ toFloat := func(x *operand) {
+ if isNumeric(x.typ) && constant.Sign(constant.Imag(x.val)) == 0 {
+ x.typ = Typ[UntypedFloat]
+ }
+ }
+ toFloat(x)
+ toFloat(y)
+ } else {
+ check.convertUntyped(x, Typ[Float64])
+ check.convertUntyped(y, Typ[Float64])
+ // x and y should be invalid now, but be conservative
+ // and check below
+ }
+ }
+ if x.mode == invalid || y.mode == invalid {
+ return
+ }
+
+ // both argument types must be identical
+ if !Identical(x.typ, y.typ) {
+ check.errorf(x, InvalidComplex, invalidOp+"%v (mismatched types %s and %s)", call, x.typ, y.typ)
+ return
+ }
+
+ // the argument types must be of floating-point type
+ // (applyTypeFunc never calls f with a type parameter)
+ f := func(typ Type) Type {
+ assert(!isTypeParam(typ))
+ if t, _ := under(typ).(*Basic); t != nil {
+ switch t.kind {
+ case Float32:
+ return Typ[Complex64]
+ case Float64:
+ return Typ[Complex128]
+ case UntypedFloat:
+ return Typ[UntypedComplex]
+ }
+ }
+ return nil
+ }
+ resTyp := check.applyTypeFunc(f, x, id)
+ if resTyp == nil {
+ check.errorf(x, InvalidComplex, invalidArg+"arguments have type %s, expected floating-point", x.typ)
+ return
+ }
+
+ // if both arguments are constants, the result is a constant
+ if x.mode == constant_ && y.mode == constant_ {
+ x.val = constant.BinaryOp(constant.ToFloat(x.val), token.ADD, constant.MakeImag(constant.ToFloat(y.val)))
+ } else {
+ x.mode = value
+ }
+
+ if check.recordTypes() && x.mode != constant_ {
+ check.recordBuiltinType(call.Fun, makeSig(resTyp, x.typ, x.typ))
+ }
+
+ x.typ = resTyp
+
+ case _Copy:
+ // copy(x, y []T) int
+ dst, _ := coreType(x.typ).(*Slice)
+
+ y := args[1]
+ src0 := coreString(y.typ)
+ if src0 != nil && isString(src0) {
+ src0 = NewSlice(universeByte)
+ }
+ src, _ := src0.(*Slice)
+
+ if dst == nil || src == nil {
+ check.errorf(x, InvalidCopy, invalidArg+"copy expects slice arguments; found %s and %s", x, y)
+ return
+ }
+
+ if !Identical(dst.elem, src.elem) {
+ check.errorf(x, InvalidCopy, invalidArg+"arguments to copy %s and %s have different element types %s and %s", x, y, dst.elem, src.elem)
+ return
+ }
+
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Int], x.typ, y.typ))
+ }
+ x.mode = value
+ x.typ = Typ[Int]
+
+ case _Delete:
+ // delete(map_, key)
+ // map_ must be a map type or a type parameter describing map types.
+ // The key cannot be a type parameter for now.
+ map_ := x.typ
+ var key Type
+ if !underIs(map_, func(u Type) bool {
+ map_, _ := u.(*Map)
+ if map_ == nil {
+ check.errorf(x, InvalidDelete, invalidArg+"%s is not a map", x)
+ return false
+ }
+ if key != nil && !Identical(map_.key, key) {
+ check.errorf(x, InvalidDelete, invalidArg+"maps of %s must have identical key types", x)
+ return false
+ }
+ key = map_.key
+ return true
+ }) {
+ return
+ }
+
+ *x = *args[1] // key
+ check.assignment(x, key, "argument to delete")
+ if x.mode == invalid {
+ return
+ }
+
+ x.mode = novalue
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(nil, map_, key))
+ }
+
+ case _Imag, _Real:
+ // imag(complexT) floatT
+ // real(complexT) floatT
+
+ // convert or check untyped argument
+ if isUntyped(x.typ) {
+ if x.mode == constant_ {
+ // an untyped constant number can always be considered
+ // as a complex constant
+ if isNumeric(x.typ) {
+ x.typ = Typ[UntypedComplex]
+ }
+ } else {
+ // an untyped non-constant argument may appear if
+ // it contains a (yet untyped non-constant) shift
+ // expression: convert it to complex128 which will
+ // result in an error (shift of complex value)
+ check.convertUntyped(x, Typ[Complex128])
+ // x should be invalid now, but be conservative and check
+ if x.mode == invalid {
+ return
+ }
+ }
+ }
+
+ // the argument must be of complex type
+ // (applyTypeFunc never calls f with a type parameter)
+ f := func(typ Type) Type {
+ assert(!isTypeParam(typ))
+ if t, _ := under(typ).(*Basic); t != nil {
+ switch t.kind {
+ case Complex64:
+ return Typ[Float32]
+ case Complex128:
+ return Typ[Float64]
+ case UntypedComplex:
+ return Typ[UntypedFloat]
+ }
+ }
+ return nil
+ }
+ resTyp := check.applyTypeFunc(f, x, id)
+ if resTyp == nil {
+ code := InvalidImag
+ if id == _Real {
+ code = InvalidReal
+ }
+ check.errorf(x, code, invalidArg+"argument has type %s, expected complex type", x.typ)
+ return
+ }
+
+ // if the argument is a constant, the result is a constant
+ if x.mode == constant_ {
+ if id == _Real {
+ x.val = constant.Real(x.val)
+ } else {
+ x.val = constant.Imag(x.val)
+ }
+ } else {
+ x.mode = value
+ }
+
+ if check.recordTypes() && x.mode != constant_ {
+ check.recordBuiltinType(call.Fun, makeSig(resTyp, x.typ))
+ }
+
+ x.typ = resTyp
+
+ case _Make:
+ // make(T, n)
+ // make(T, n, m)
+ // (no argument evaluated yet)
+ arg0 := argList[0]
+ T := check.varType(arg0)
+ if !isValid(T) {
+ return
+ }
+
+ var min int // minimum number of arguments
+ switch coreType(T).(type) {
+ case *Slice:
+ min = 2
+ case *Map, *Chan:
+ min = 1
+ case nil:
+ check.errorf(arg0, InvalidMake, invalidArg+"cannot make %s: no core type", arg0)
+ return
+ default:
+ check.errorf(arg0, InvalidMake, invalidArg+"cannot make %s; type must be slice, map, or channel", arg0)
+ return
+ }
+ if nargs < min || min+1 < nargs {
+ check.errorf(call, WrongArgCount, invalidOp+"%v expects %d or %d arguments; found %d", call, min, min+1, nargs)
+ return
+ }
+
+ types := []Type{T}
+ var sizes []int64 // constant integer arguments, if any
+ for _, arg := range argList[1:] {
+ typ, size := check.index(arg, -1) // ok to continue with typ == Typ[Invalid]
+ types = append(types, typ)
+ if size >= 0 {
+ sizes = append(sizes, size)
+ }
+ }
+ if len(sizes) == 2 && sizes[0] > sizes[1] {
+ check.error(argList[1], SwappedMakeArgs, invalidArg+"length and capacity swapped")
+ // safe to continue
+ }
+ x.mode = value
+ x.typ = T
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(x.typ, types...))
+ }
+
+ case _Max, _Min:
+ // max(x, ...)
+ // min(x, ...)
+ check.verifyVersionf(call.Fun, go1_21, bin.name)
+
+ op := token.LSS
+ if id == _Max {
+ op = token.GTR
+ }
+
+ for i, a := range args {
+ if a.mode == invalid {
+ return
+ }
+
+ if !allOrdered(a.typ) {
+ check.errorf(a, InvalidMinMaxOperand, invalidArg+"%s cannot be ordered", a)
+ return
+ }
+
+ // The first argument is already in x and there's nothing left to do.
+ if i > 0 {
+ check.matchTypes(x, a)
+ if x.mode == invalid {
+ return
+ }
+
+ if !Identical(x.typ, a.typ) {
+ check.errorf(a, MismatchedTypes, invalidArg+"mismatched types %s (previous argument) and %s (type of %s)", x.typ, a.typ, a.expr)
+ return
+ }
+
+ if x.mode == constant_ && a.mode == constant_ {
+ if constant.Compare(a.val, op, x.val) {
+ *x = *a
+ }
+ } else {
+ x.mode = value
+ }
+ }
+ }
+
+ // If nargs == 1, make sure x.mode is either a value or a constant.
+ if x.mode != constant_ {
+ x.mode = value
+ // A value must not be untyped.
+ check.assignment(x, &emptyInterface, "argument to "+bin.name)
+ if x.mode == invalid {
+ return
+ }
+ }
+
+ // Use the final type computed above for all arguments.
+ for _, a := range args {
+ check.updateExprType(a.expr, x.typ, true)
+ }
+
+ if check.recordTypes() && x.mode != constant_ {
+ types := make([]Type, nargs)
+ for i := range types {
+ types[i] = x.typ
+ }
+ check.recordBuiltinType(call.Fun, makeSig(x.typ, types...))
+ }
+
+ case _New:
+ // new(T)
+ // (no argument evaluated yet)
+ T := check.varType(argList[0])
+ if !isValid(T) {
+ return
+ }
+
+ x.mode = value
+ x.typ = &Pointer{base: T}
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(x.typ, T))
+ }
+
+ case _Panic:
+ // panic(x)
+ // record panic call if inside a function with result parameters
+ // (for use in Checker.isTerminating)
+ if check.sig != nil && check.sig.results.Len() > 0 {
+ // function has result parameters
+ p := check.isPanic
+ if p == nil {
+ // allocate lazily
+ p = make(map[*syntax.CallExpr]bool)
+ check.isPanic = p
+ }
+ p[call] = true
+ }
+
+ check.assignment(x, &emptyInterface, "argument to panic")
+ if x.mode == invalid {
+ return
+ }
+
+ x.mode = novalue
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(nil, &emptyInterface))
+ }
+
+ case _Print, _Println:
+ // print(x, y, ...)
+ // println(x, y, ...)
+ var params []Type
+ if nargs > 0 {
+ params = make([]Type, nargs)
+ for i, a := range args {
+ check.assignment(a, nil, "argument to "+predeclaredFuncs[id].name)
+ if a.mode == invalid {
+ return
+ }
+ params[i] = a.typ
+ }
+ }
+
+ x.mode = novalue
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(nil, params...))
+ }
+
+ case _Recover:
+ // recover() interface{}
+ x.mode = value
+ x.typ = &emptyInterface
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(x.typ))
+ }
+
+ case _Add:
+ // unsafe.Add(ptr unsafe.Pointer, len IntegerType) unsafe.Pointer
+ check.verifyVersionf(call.Fun, go1_17, "unsafe.Add")
+
+ check.assignment(x, Typ[UnsafePointer], "argument to unsafe.Add")
+ if x.mode == invalid {
+ return
+ }
+
+ y := args[1]
+ if !check.isValidIndex(y, InvalidUnsafeAdd, "length", true) {
+ return
+ }
+
+ x.mode = value
+ x.typ = Typ[UnsafePointer]
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(x.typ, x.typ, y.typ))
+ }
+
+ case _Alignof:
+ // unsafe.Alignof(x T) uintptr
+ check.assignment(x, nil, "argument to unsafe.Alignof")
+ if x.mode == invalid {
+ return
+ }
+
+ if hasVarSize(x.typ, nil) {
+ x.mode = value
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], x.typ))
+ }
+ } else {
+ x.mode = constant_
+ x.val = constant.MakeInt64(check.conf.alignof(x.typ))
+ // result is constant - no need to record signature
+ }
+ x.typ = Typ[Uintptr]
+
+ case _Offsetof:
+ // unsafe.Offsetof(x T) uintptr, where x must be a selector
+ // (no argument evaluated yet)
+ arg0 := argList[0]
+ selx, _ := syntax.Unparen(arg0).(*syntax.SelectorExpr)
+ if selx == nil {
+ check.errorf(arg0, BadOffsetofSyntax, invalidArg+"%s is not a selector expression", arg0)
+ check.use(arg0)
+ return
+ }
+
+ check.expr(nil, x, selx.X)
+ if x.mode == invalid {
+ return
+ }
+
+ base := derefStructPtr(x.typ)
+ sel := selx.Sel.Value
+ obj, index, indirect := LookupFieldOrMethod(base, false, check.pkg, sel)
+ switch obj.(type) {
+ case nil:
+ check.errorf(x, MissingFieldOrMethod, invalidArg+"%s has no single field %s", base, sel)
+ return
+ case *Func:
+ // TODO(gri) Using derefStructPtr may result in methods being found
+ // that don't actually exist. An error either way, but the error
+ // message is confusing. See: https://play.golang.org/p/al75v23kUy ,
+ // but go/types reports: "invalid argument: x.m is a method value".
+ check.errorf(arg0, InvalidOffsetof, invalidArg+"%s is a method value", arg0)
+ return
+ }
+ if indirect {
+ check.errorf(x, InvalidOffsetof, invalidArg+"field %s is embedded via a pointer in %s", sel, base)
+ return
+ }
+
+ // TODO(gri) Should we pass x.typ instead of base (and have indirect report if derefStructPtr indirected)?
+ check.recordSelection(selx, FieldVal, base, obj, index, false)
+
+ // record the selector expression (was bug - go.dev/issue/47895)
+ {
+ mode := value
+ if x.mode == variable || indirect {
+ mode = variable
+ }
+ check.record(&operand{mode, selx, obj.Type(), nil, 0})
+ }
+
+ // The field offset is considered a variable even if the field is declared before
+ // the part of the struct which is variable-sized. This makes both the rules
+ // simpler and also permits (or at least doesn't prevent) a compiler from re-
+ // arranging struct fields if it wanted to.
+ if hasVarSize(base, nil) {
+ x.mode = value
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], obj.Type()))
+ }
+ } else {
+ offs := check.conf.offsetof(base, index)
+ if offs < 0 {
+ check.errorf(x, TypeTooLarge, "%s is too large", x)
+ return
+ }
+ x.mode = constant_
+ x.val = constant.MakeInt64(offs)
+ // result is constant - no need to record signature
+ }
+ x.typ = Typ[Uintptr]
+
+ case _Sizeof:
+ // unsafe.Sizeof(x T) uintptr
+ check.assignment(x, nil, "argument to unsafe.Sizeof")
+ if x.mode == invalid {
+ return
+ }
+
+ if hasVarSize(x.typ, nil) {
+ x.mode = value
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], x.typ))
+ }
+ } else {
+ size := check.conf.sizeof(x.typ)
+ if size < 0 {
+ check.errorf(x, TypeTooLarge, "%s is too large", x)
+ return
+ }
+ x.mode = constant_
+ x.val = constant.MakeInt64(size)
+ // result is constant - no need to record signature
+ }
+ x.typ = Typ[Uintptr]
+
+ case _Slice:
+ // unsafe.Slice(ptr *T, len IntegerType) []T
+ check.verifyVersionf(call.Fun, go1_17, "unsafe.Slice")
+
+ ptr, _ := coreType(x.typ).(*Pointer)
+ if ptr == nil {
+ check.errorf(x, InvalidUnsafeSlice, invalidArg+"%s is not a pointer", x)
+ return
+ }
+
+ y := args[1]
+ if !check.isValidIndex(y, InvalidUnsafeSlice, "length", false) {
+ return
+ }
+
+ x.mode = value
+ x.typ = NewSlice(ptr.base)
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(x.typ, ptr, y.typ))
+ }
+
+ case _SliceData:
+ // unsafe.SliceData(slice []T) *T
+ check.verifyVersionf(call.Fun, go1_20, "unsafe.SliceData")
+
+ slice, _ := coreType(x.typ).(*Slice)
+ if slice == nil {
+ check.errorf(x, InvalidUnsafeSliceData, invalidArg+"%s is not a slice", x)
+ return
+ }
+
+ x.mode = value
+ x.typ = NewPointer(slice.elem)
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(x.typ, slice))
+ }
+
+ case _String:
+ // unsafe.String(ptr *byte, len IntegerType) string
+ check.verifyVersionf(call.Fun, go1_20, "unsafe.String")
+
+ check.assignment(x, NewPointer(universeByte), "argument to unsafe.String")
+ if x.mode == invalid {
+ return
+ }
+
+ y := args[1]
+ if !check.isValidIndex(y, InvalidUnsafeString, "length", false) {
+ return
+ }
+
+ x.mode = value
+ x.typ = Typ[String]
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(x.typ, NewPointer(universeByte), y.typ))
+ }
+
+ case _StringData:
+ // unsafe.StringData(str string) *byte
+ check.verifyVersionf(call.Fun, go1_20, "unsafe.StringData")
+
+ check.assignment(x, Typ[String], "argument to unsafe.StringData")
+ if x.mode == invalid {
+ return
+ }
+
+ x.mode = value
+ x.typ = NewPointer(universeByte)
+ if check.recordTypes() {
+ check.recordBuiltinType(call.Fun, makeSig(x.typ, Typ[String]))
+ }
+
+ case _Assert:
+ // assert(pred) causes a typechecker error if pred is false.
+ // The result of assert is the value of pred if there is no error.
+ // Note: assert is only available in self-test mode.
+ if x.mode != constant_ || !isBoolean(x.typ) {
+ check.errorf(x, Test, invalidArg+"%s is not a boolean constant", x)
+ return
+ }
+ if x.val.Kind() != constant.Bool {
+ check.errorf(x, Test, "internal error: value of %s should be a boolean constant", x)
+ return
+ }
+ if !constant.BoolVal(x.val) {
+ check.errorf(call, Test, "%v failed", call)
+ // compile-time assertion failure - safe to continue
+ }
+ // result is constant - no need to record signature
+
+ case _Trace:
+ // trace(x, y, z, ...) dumps the positions, expressions, and
+ // values of its arguments. The result of trace is the value
+ // of the first argument.
+ // Note: trace is only available in self-test mode.
+ // (no argument evaluated yet)
+ if nargs == 0 {
+ check.dump("%v: trace() without arguments", atPos(call))
+ x.mode = novalue
+ break
+ }
+ var t operand
+ x1 := x
+ for _, arg := range argList {
+ check.rawExpr(nil, x1, arg, nil, false) // permit trace for types, e.g.: new(trace(T))
+ check.dump("%v: %s", atPos(x1), x1)
+ x1 = &t // use incoming x only for first argument
+ }
+ if x.mode == invalid {
+ return
+ }
+ // trace is only available in test mode - no need to record signature
+
+ default:
+ unreachable()
+ }
+
+ assert(x.mode != invalid)
+ return true
+}
+
+// hasVarSize reports if the size of type t is variable due to type parameters
+// or if the type is infinitely-sized due to a cycle for which the type has not
+// yet been checked.
+func hasVarSize(t Type, seen map[*Named]bool) (varSized bool) {
+ // Cycles are only possible through *Named types.
+ // The seen map is used to detect cycles and track
+ // the results of previously seen types.
+ if named := asNamed(t); named != nil {
+ if v, ok := seen[named]; ok {
+ return v
+ }
+ if seen == nil {
+ seen = make(map[*Named]bool)
+ }
+ seen[named] = true // possibly cyclic until proven otherwise
+ defer func() {
+ seen[named] = varSized // record final determination for named
+ }()
+ }
+
+ switch u := under(t).(type) {
+ case *Array:
+ return hasVarSize(u.elem, seen)
+ case *Struct:
+ for _, f := range u.fields {
+ if hasVarSize(f.typ, seen) {
+ return true
+ }
+ }
+ case *Interface:
+ return isTypeParam(t)
+ case *Named, *Union:
+ unreachable()
+ }
+ return false
+}
+
+// applyTypeFunc applies f to x. If x is a type parameter,
+// the result is a type parameter constrained by a new
+// interface bound. The type bounds for that interface
+// are computed by applying f to each of the type bounds
+// of x. If any of these applications of f return nil,
+// applyTypeFunc returns nil.
+// If x is not a type parameter, the result is f(x).
+func (check *Checker) applyTypeFunc(f func(Type) Type, x *operand, id builtinId) Type {
+ if tp, _ := x.typ.(*TypeParam); tp != nil {
+ // Test if t satisfies the requirements for the argument
+ // type and collect possible result types at the same time.
+ var terms []*Term
+ if !tp.is(func(t *term) bool {
+ if t == nil {
+ return false
+ }
+ if r := f(t.typ); r != nil {
+ terms = append(terms, NewTerm(t.tilde, r))
+ return true
+ }
+ return false
+ }) {
+ return nil
+ }
+
+ // We can type-check this fine but we're introducing a synthetic
+ // type parameter for the result. It's not clear what the API
+ // implications are here. Report an error for 1.18 (see go.dev/issue/50912),
+ // but continue type-checking.
+ var code Code
+ switch id {
+ case _Real:
+ code = InvalidReal
+ case _Imag:
+ code = InvalidImag
+ case _Complex:
+ code = InvalidComplex
+ default:
+ unreachable()
+ }
+ check.softErrorf(x, code, "%s not supported as argument to %s for go1.18 (see go.dev/issue/50937)", x, predeclaredFuncs[id].name)
+
+ // Construct a suitable new type parameter for the result type.
+ // The type parameter is placed in the current package so export/import
+ // works as expected.
+ tpar := NewTypeName(nopos, check.pkg, tp.obj.name, nil)
+ ptyp := check.newTypeParam(tpar, NewInterfaceType(nil, []Type{NewUnion(terms)})) // assigns type to tpar as a side-effect
+ ptyp.index = tp.index
+
+ return ptyp
+ }
+
+ return f(x.typ)
+}
+
+// makeSig makes a signature for the given argument and result types.
+// Default types are used for untyped arguments, and res may be nil.
+func makeSig(res Type, args ...Type) *Signature {
+ list := make([]*Var, len(args))
+ for i, param := range args {
+ list[i] = NewVar(nopos, nil, "", Default(param))
+ }
+ params := NewTuple(list...)
+ var result *Tuple
+ if res != nil {
+ assert(!isUntyped(res))
+ result = NewTuple(NewVar(nopos, nil, "", res))
+ }
+ return &Signature{params: params, results: result}
+}
+
+// arrayPtrDeref returns A if typ is of the form *A and A is an array;
+// otherwise it returns typ.
+func arrayPtrDeref(typ Type) Type {
+ if p, ok := typ.(*Pointer); ok {
+ if a, _ := under(p.base).(*Array); a != nil {
+ return a
+ }
+ }
+ return typ
+}
+
+// unparen returns e with any enclosing parentheses stripped.
+func unparen(e syntax.Expr) syntax.Expr {
+ for {
+ p, ok := e.(*syntax.ParenExpr)
+ if !ok {
+ return e
+ }
+ e = p.X
+ }
+}
diff --git a/src/cmd/compile/internal/types2/builtins_test.go b/src/cmd/compile/internal/types2/builtins_test.go
new file mode 100644
index 0000000..875ee5a
--- /dev/null
+++ b/src/cmd/compile/internal/types2/builtins_test.go
@@ -0,0 +1,250 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "testing"
+
+ . "cmd/compile/internal/types2"
+)
+
+var builtinCalls = []struct {
+ name, src, sig string
+}{
+ {"append", `var s []int; _ = append(s)`, `func([]int, ...int) []int`},
+ {"append", `var s []int; _ = append(s, 0)`, `func([]int, ...int) []int`},
+ {"append", `var s []int; _ = (append)(s, 0)`, `func([]int, ...int) []int`},
+ {"append", `var s []byte; _ = ((append))(s, 0)`, `func([]byte, ...byte) []byte`},
+ {"append", `var s []byte; _ = append(s, "foo"...)`, `func([]byte, string...) []byte`},
+ {"append", `type T []byte; var s T; var str string; _ = append(s, str...)`, `func(p.T, string...) p.T`},
+ {"append", `type T []byte; type U string; var s T; var str U; _ = append(s, str...)`, `func(p.T, p.U...) p.T`},
+
+ {"cap", `var s [10]int; _ = cap(s)`, `invalid type`}, // constant
+ {"cap", `var s [10]int; _ = cap(&s)`, `invalid type`}, // constant
+ {"cap", `var s []int64; _ = cap(s)`, `func([]int64) int`},
+ {"cap", `var c chan<-bool; _ = cap(c)`, `func(chan<- bool) int`},
+ {"cap", `type S []byte; var s S; _ = cap(s)`, `func(p.S) int`},
+ {"cap", `var s P; _ = cap(s)`, `func(P) int`},
+
+ {"len", `_ = len("foo")`, `invalid type`}, // constant
+ {"len", `var s string; _ = len(s)`, `func(string) int`},
+ {"len", `var s [10]int; _ = len(s)`, `invalid type`}, // constant
+ {"len", `var s [10]int; _ = len(&s)`, `invalid type`}, // constant
+ {"len", `var s []int64; _ = len(s)`, `func([]int64) int`},
+ {"len", `var c chan<-bool; _ = len(c)`, `func(chan<- bool) int`},
+ {"len", `var m map[string]float32; _ = len(m)`, `func(map[string]float32) int`},
+ {"len", `type S []byte; var s S; _ = len(s)`, `func(p.S) int`},
+ {"len", `var s P; _ = len(s)`, `func(P) int`},
+
+ {"clear", `var m map[float64]int; clear(m)`, `func(map[float64]int)`},
+ {"clear", `var s []byte; clear(s)`, `func([]byte)`},
+
+ {"close", `var c chan int; close(c)`, `func(chan int)`},
+ {"close", `var c chan<- chan string; close(c)`, `func(chan<- chan string)`},
+
+ {"complex", `_ = complex(1, 0)`, `invalid type`}, // constant
+ {"complex", `var re float32; _ = complex(re, 1.0)`, `func(float32, float32) complex64`},
+ {"complex", `var im float64; _ = complex(1, im)`, `func(float64, float64) complex128`},
+ {"complex", `type F32 float32; var re, im F32; _ = complex(re, im)`, `func(p.F32, p.F32) complex64`},
+ {"complex", `type F64 float64; var re, im F64; _ = complex(re, im)`, `func(p.F64, p.F64) complex128`},
+
+ {"copy", `var src, dst []byte; copy(dst, src)`, `func([]byte, []byte) int`},
+ {"copy", `type T [][]int; var src, dst T; _ = copy(dst, src)`, `func(p.T, p.T) int`},
+ {"copy", `var src string; var dst []byte; copy(dst, src)`, `func([]byte, string) int`},
+ {"copy", `type T string; type U []byte; var src T; var dst U; copy(dst, src)`, `func(p.U, p.T) int`},
+ {"copy", `var dst []byte; copy(dst, "hello")`, `func([]byte, string) int`},
+
+ {"delete", `var m map[string]bool; delete(m, "foo")`, `func(map[string]bool, string)`},
+ {"delete", `type (K string; V int); var m map[K]V; delete(m, "foo")`, `func(map[p.K]p.V, p.K)`},
+
+ {"imag", `_ = imag(1i)`, `invalid type`}, // constant
+ {"imag", `var c complex64; _ = imag(c)`, `func(complex64) float32`},
+ {"imag", `var c complex128; _ = imag(c)`, `func(complex128) float64`},
+ {"imag", `type C64 complex64; var c C64; _ = imag(c)`, `func(p.C64) float32`},
+ {"imag", `type C128 complex128; var c C128; _ = imag(c)`, `func(p.C128) float64`},
+
+ {"real", `_ = real(1i)`, `invalid type`}, // constant
+ {"real", `var c complex64; _ = real(c)`, `func(complex64) float32`},
+ {"real", `var c complex128; _ = real(c)`, `func(complex128) float64`},
+ {"real", `type C64 complex64; var c C64; _ = real(c)`, `func(p.C64) float32`},
+ {"real", `type C128 complex128; var c C128; _ = real(c)`, `func(p.C128) float64`},
+
+ {"make", `_ = make([]int, 10)`, `func([]int, int) []int`},
+ {"make", `type T []byte; _ = make(T, 10, 20)`, `func(p.T, int, int) p.T`},
+
+ // go.dev/issue/37349
+ {"make", ` _ = make([]int, 0 )`, `func([]int, int) []int`},
+ {"make", `var l int; _ = make([]int, l )`, `func([]int, int) []int`},
+ {"make", ` _ = make([]int, 0, 0)`, `func([]int, int, int) []int`},
+ {"make", `var l int; _ = make([]int, l, 0)`, `func([]int, int, int) []int`},
+ {"make", `var c int; _ = make([]int, 0, c)`, `func([]int, int, int) []int`},
+ {"make", `var l, c int; _ = make([]int, l, c)`, `func([]int, int, int) []int`},
+
+ // go.dev/issue/37393
+ {"make", ` _ = make([]int , 0 )`, `func([]int, int) []int`},
+ {"make", `var l byte ; _ = make([]int8 , l )`, `func([]int8, byte) []int8`},
+ {"make", ` _ = make([]int16 , 0, 0)`, `func([]int16, int, int) []int16`},
+ {"make", `var l int16; _ = make([]string , l, 0)`, `func([]string, int16, int) []string`},
+ {"make", `var c int32; _ = make([]float64 , 0, c)`, `func([]float64, int, int32) []float64`},
+ {"make", `var l, c uint ; _ = make([]complex128, l, c)`, `func([]complex128, uint, uint) []complex128`},
+
+ // go.dev/issue/45667
+ {"make", `const l uint = 1; _ = make([]int, l)`, `func([]int, uint) []int`},
+
+ {"max", ` _ = max(0 )`, `invalid type`}, // constant
+ {"max", `var x int ; _ = max(x )`, `func(int) int`},
+ {"max", `var x int ; _ = max(0, x )`, `func(int, int) int`},
+ {"max", `var x string ; _ = max("a", x )`, `func(string, string) string`},
+ {"max", `var x float32; _ = max(0, 1.0, x)`, `func(float32, float32, float32) float32`},
+
+ {"min", ` _ = min(0 )`, `invalid type`}, // constant
+ {"min", `var x int ; _ = min(x )`, `func(int) int`},
+ {"min", `var x int ; _ = min(0, x )`, `func(int, int) int`},
+ {"min", `var x string ; _ = min("a", x )`, `func(string, string) string`},
+ {"min", `var x float32; _ = min(0, 1.0, x)`, `func(float32, float32, float32) float32`},
+
+ {"new", `_ = new(int)`, `func(int) *int`},
+ {"new", `type T struct{}; _ = new(T)`, `func(p.T) *p.T`},
+
+ {"panic", `panic(0)`, `func(interface{})`},
+ {"panic", `panic("foo")`, `func(interface{})`},
+
+ {"print", `print()`, `func()`},
+ {"print", `print(0)`, `func(int)`},
+ {"print", `print(1, 2.0, "foo", true)`, `func(int, float64, string, bool)`},
+
+ {"println", `println()`, `func()`},
+ {"println", `println(0)`, `func(int)`},
+ {"println", `println(1, 2.0, "foo", true)`, `func(int, float64, string, bool)`},
+
+ {"recover", `recover()`, `func() interface{}`},
+ {"recover", `_ = recover()`, `func() interface{}`},
+
+ {"Add", `var p unsafe.Pointer; _ = unsafe.Add(p, -1.0)`, `func(unsafe.Pointer, int) unsafe.Pointer`},
+ {"Add", `var p unsafe.Pointer; var n uintptr; _ = unsafe.Add(p, n)`, `func(unsafe.Pointer, uintptr) unsafe.Pointer`},
+ {"Add", `_ = unsafe.Add(nil, 0)`, `func(unsafe.Pointer, int) unsafe.Pointer`},
+
+ {"Alignof", `_ = unsafe.Alignof(0)`, `invalid type`}, // constant
+ {"Alignof", `var x struct{}; _ = unsafe.Alignof(x)`, `invalid type`}, // constant
+ {"Alignof", `var x P; _ = unsafe.Alignof(x)`, `func(P) uintptr`},
+
+ {"Offsetof", `var x struct{f bool}; _ = unsafe.Offsetof(x.f)`, `invalid type`}, // constant
+ {"Offsetof", `var x struct{_ int; f bool}; _ = unsafe.Offsetof((&x).f)`, `invalid type`}, // constant
+ {"Offsetof", `var x struct{_ int; f P}; _ = unsafe.Offsetof((&x).f)`, `func(P) uintptr`},
+
+ {"Sizeof", `_ = unsafe.Sizeof(0)`, `invalid type`}, // constant
+ {"Sizeof", `var x struct{}; _ = unsafe.Sizeof(x)`, `invalid type`}, // constant
+ {"Sizeof", `var x P; _ = unsafe.Sizeof(x)`, `func(P) uintptr`},
+
+ {"Slice", `var p *int; _ = unsafe.Slice(p, 1)`, `func(*int, int) []int`},
+ {"Slice", `var p *byte; var n uintptr; _ = unsafe.Slice(p, n)`, `func(*byte, uintptr) []byte`},
+ {"Slice", `type B *byte; var b B; _ = unsafe.Slice(b, 0)`, `func(*byte, int) []byte`},
+
+ {"SliceData", "var s []int; _ = unsafe.SliceData(s)", `func([]int) *int`},
+ {"SliceData", "type S []int; var s S; _ = unsafe.SliceData(s)", `func([]int) *int`},
+
+ {"String", `var p *byte; _ = unsafe.String(p, 1)`, `func(*byte, int) string`},
+ {"String", `type B *byte; var b B; _ = unsafe.String(b, 0)`, `func(*byte, int) string`},
+
+ {"StringData", `var s string; _ = unsafe.StringData(s)`, `func(string) *byte`},
+ {"StringData", `_ = unsafe.StringData("foo")`, `func(string) *byte`},
+
+ {"assert", `assert(true)`, `invalid type`}, // constant
+ {"assert", `type B bool; const pred B = 1 < 2; assert(pred)`, `invalid type`}, // constant
+
+ // no tests for trace since it produces output as a side-effect
+}
+
+func TestBuiltinSignatures(t *testing.T) {
+ DefPredeclaredTestFuncs()
+
+ seen := map[string]bool{"trace": true} // no test for trace built-in; add it manually
+ for _, call := range builtinCalls {
+ testBuiltinSignature(t, call.name, call.src, call.sig)
+ seen[call.name] = true
+ }
+
+ // make sure we didn't miss one
+ for _, name := range Universe.Names() {
+ if _, ok := Universe.Lookup(name).(*Builtin); ok && !seen[name] {
+ t.Errorf("missing test for %s", name)
+ }
+ }
+ for _, name := range Unsafe.Scope().Names() {
+ if _, ok := Unsafe.Scope().Lookup(name).(*Builtin); ok && !seen[name] {
+ t.Errorf("missing test for unsafe.%s", name)
+ }
+ }
+}
+
+func testBuiltinSignature(t *testing.T, name, src0, want string) {
+ src := fmt.Sprintf(`package p; import "unsafe"; type _ unsafe.Pointer /* use unsafe */; func _[P ~[]byte]() { %s }`, src0)
+
+ uses := make(map[*syntax.Name]Object)
+ types := make(map[syntax.Expr]TypeAndValue)
+ mustTypecheck(src, nil, &Info{Uses: uses, Types: types})
+
+ // find called function
+ n := 0
+ var fun syntax.Expr
+ for x := range types {
+ if call, _ := x.(*syntax.CallExpr); call != nil {
+ fun = call.Fun
+ n++
+ }
+ }
+ if n != 1 {
+ t.Errorf("%s: got %d CallExprs; want 1", src0, n)
+ return
+ }
+
+ // check recorded types for fun and descendents (may be parenthesized)
+ for {
+ // the recorded type for the built-in must match the wanted signature
+ typ := types[fun].Type
+ if typ == nil {
+ t.Errorf("%s: no type recorded for %s", src0, syntax.String(fun))
+ return
+ }
+ if got := typ.String(); got != want {
+ t.Errorf("%s: got type %s; want %s", src0, got, want)
+ return
+ }
+
+ // called function must be a (possibly parenthesized, qualified)
+ // identifier denoting the expected built-in
+ switch p := fun.(type) {
+ case *syntax.Name:
+ obj := uses[p]
+ if obj == nil {
+ t.Errorf("%s: no object found for %s", src0, p.Value)
+ return
+ }
+ bin, _ := obj.(*Builtin)
+ if bin == nil {
+ t.Errorf("%s: %s does not denote a built-in", src0, p.Value)
+ return
+ }
+ if bin.Name() != name {
+ t.Errorf("%s: got built-in %s; want %s", src0, bin.Name(), name)
+ return
+ }
+ return // we're done
+
+ case *syntax.ParenExpr:
+ fun = p.X // unpack
+
+ case *syntax.SelectorExpr:
+ // built-in from package unsafe - ignore details
+ return // we're done
+
+ default:
+ t.Errorf("%s: invalid function call", src0)
+ return
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/call.go b/src/cmd/compile/internal/types2/call.go
new file mode 100644
index 0000000..db7d86e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/call.go
@@ -0,0 +1,999 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of call and selector expressions.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ . "internal/types/errors"
+ "strings"
+ "unicode"
+)
+
+// funcInst type-checks a function instantiation.
+// The incoming x must be a generic function.
+// If inst != nil, it provides some or all of the type arguments (inst.Index).
+// If target != nil, it may be used to infer missing type arguments of x, if any.
+// At least one of T or inst must be provided.
+//
+// There are two modes of operation:
+//
+// 1. If infer == true, funcInst infers missing type arguments as needed and
+// instantiates the function x. The returned results are nil.
+//
+// 2. If infer == false and inst provides all type arguments, funcInst
+// instantiates the function x. The returned results are nil.
+// If inst doesn't provide enough type arguments, funcInst returns the
+// available arguments and the corresponding expression list; x remains
+// unchanged.
+//
+// If an error (other than a version error) occurs in any case, it is reported
+// and x.mode is set to invalid.
+func (check *Checker) funcInst(T *target, pos syntax.Pos, x *operand, inst *syntax.IndexExpr, infer bool) ([]Type, []syntax.Expr) {
+ assert(T != nil || inst != nil)
+
+ var instErrPos poser
+ if inst != nil {
+ instErrPos = inst.Pos()
+ } else {
+ instErrPos = pos
+ }
+ versionErr := !check.verifyVersionf(instErrPos, go1_18, "function instantiation")
+
+ // targs and xlist are the type arguments and corresponding type expressions, or nil.
+ var targs []Type
+ var xlist []syntax.Expr
+ if inst != nil {
+ xlist = syntax.UnpackListExpr(inst.Index)
+ targs = check.typeList(xlist)
+ if targs == nil {
+ x.mode = invalid
+ x.expr = inst
+ return nil, nil
+ }
+ assert(len(targs) == len(xlist))
+ }
+
+ // Check the number of type arguments (got) vs number of type parameters (want).
+ // Note that x is a function value, not a type expression, so we don't need to
+ // call under below.
+ sig := x.typ.(*Signature)
+ got, want := len(targs), sig.TypeParams().Len()
+ if got > want {
+ // Providing too many type arguments is always an error.
+ check.errorf(xlist[got-1], WrongTypeArgCount, "got %d type arguments but want %d", got, want)
+ x.mode = invalid
+ x.expr = inst
+ return nil, nil
+ }
+
+ if got < want {
+ if !infer {
+ return targs, xlist
+ }
+
+ // If the uninstantiated or partially instantiated function x is used in
+ // an assignment (tsig != nil), infer missing type arguments by treating
+ // the assignment
+ //
+ // var tvar tsig = x
+ //
+ // like a call g(tvar) of the synthetic generic function g
+ //
+ // func g[type_parameters_of_x](func_type_of_x)
+ //
+ var args []*operand
+ var params []*Var
+ var reverse bool
+ if T != nil && sig.tparams != nil {
+ if !versionErr && !check.allowVersion(check.pkg, instErrPos, go1_21) {
+ if inst != nil {
+ check.versionErrorf(instErrPos, go1_21, "partially instantiated function in assignment")
+ } else {
+ check.versionErrorf(instErrPos, go1_21, "implicitly instantiated function in assignment")
+ }
+ }
+ gsig := NewSignatureType(nil, nil, nil, sig.params, sig.results, sig.variadic)
+ params = []*Var{NewVar(x.Pos(), check.pkg, "", gsig)}
+ // The type of the argument operand is tsig, which is the type of the LHS in an assignment
+ // or the result type in a return statement. Create a pseudo-expression for that operand
+ // that makes sense when reported in error messages from infer, below.
+ expr := syntax.NewName(x.Pos(), T.desc)
+ args = []*operand{{mode: value, expr: expr, typ: T.sig}}
+ reverse = true
+ }
+
+ // Rename type parameters to avoid problems with recursive instantiations.
+ // Note that NewTuple(params...) below is (*Tuple)(nil) if len(params) == 0, as desired.
+ tparams, params2 := check.renameTParams(pos, sig.TypeParams().list(), NewTuple(params...))
+
+ targs = check.infer(pos, tparams, targs, params2.(*Tuple), args, reverse)
+ if targs == nil {
+ // error was already reported
+ x.mode = invalid
+ x.expr = inst
+ return nil, nil
+ }
+ got = len(targs)
+ }
+ assert(got == want)
+
+ // instantiate function signature
+ expr := x.expr // if we don't have an index expression, keep the existing expression of x
+ if inst != nil {
+ expr = inst
+ }
+ sig = check.instantiateSignature(x.Pos(), expr, sig, targs, xlist)
+
+ x.typ = sig
+ x.mode = value
+ x.expr = expr
+ return nil, nil
+}
+
+func (check *Checker) instantiateSignature(pos syntax.Pos, expr syntax.Expr, typ *Signature, targs []Type, xlist []syntax.Expr) (res *Signature) {
+ assert(check != nil)
+ assert(len(targs) == typ.TypeParams().Len())
+
+ if check.conf.Trace {
+ check.trace(pos, "-- instantiating signature %s with %s", typ, targs)
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(pos, "=> %s (under = %s)", res, res.Underlying())
+ }()
+ }
+
+ inst := check.instance(pos, typ, targs, nil, check.context()).(*Signature)
+ assert(inst.TypeParams().Len() == 0) // signature is not generic anymore
+ check.recordInstance(expr, targs, inst)
+ assert(len(xlist) <= len(targs))
+
+ // verify instantiation lazily (was go.dev/issue/50450)
+ check.later(func() {
+ tparams := typ.TypeParams().list()
+ if i, err := check.verify(pos, tparams, targs, check.context()); err != nil {
+ // best position for error reporting
+ pos := pos
+ if i < len(xlist) {
+ pos = syntax.StartPos(xlist[i])
+ }
+ check.softErrorf(pos, InvalidTypeArg, "%s", err)
+ } else {
+ check.mono.recordInstance(check.pkg, pos, tparams, targs, xlist)
+ }
+ }).describef(pos, "verify instantiation")
+
+ return inst
+}
+
+func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind {
+ var inst *syntax.IndexExpr // function instantiation, if any
+ if iexpr, _ := call.Fun.(*syntax.IndexExpr); iexpr != nil {
+ if check.indexExpr(x, iexpr) {
+ // Delay function instantiation to argument checking,
+ // where we combine type and value arguments for type
+ // inference.
+ assert(x.mode == value)
+ inst = iexpr
+ }
+ x.expr = iexpr
+ check.record(x)
+ } else {
+ check.exprOrType(x, call.Fun, true)
+ }
+ // x.typ may be generic
+
+ switch x.mode {
+ case invalid:
+ check.use(call.ArgList...)
+ x.expr = call
+ return statement
+
+ case typexpr:
+ // conversion
+ check.nonGeneric(nil, x)
+ if x.mode == invalid {
+ return conversion
+ }
+ T := x.typ
+ x.mode = invalid
+ switch n := len(call.ArgList); n {
+ case 0:
+ check.errorf(call, WrongArgCount, "missing argument in conversion to %s", T)
+ case 1:
+ check.expr(nil, x, call.ArgList[0])
+ if x.mode != invalid {
+ if t, _ := under(T).(*Interface); t != nil && !isTypeParam(T) {
+ if !t.IsMethodSet() {
+ check.errorf(call, MisplacedConstraintIface, "cannot use interface %s in conversion (contains specific type constraints or is comparable)", T)
+ break
+ }
+ }
+ if call.HasDots {
+ check.errorf(call.ArgList[0], BadDotDotDotSyntax, "invalid use of ... in conversion to %s", T)
+ break
+ }
+ check.conversion(x, T)
+ }
+ default:
+ check.use(call.ArgList...)
+ check.errorf(call.ArgList[n-1], WrongArgCount, "too many arguments in conversion to %s", T)
+ }
+ x.expr = call
+ return conversion
+
+ case builtin:
+ // no need to check for non-genericity here
+ id := x.id
+ if !check.builtin(x, call, id) {
+ x.mode = invalid
+ }
+ x.expr = call
+ // a non-constant result implies a function call
+ if x.mode != invalid && x.mode != constant_ {
+ check.hasCallOrRecv = true
+ }
+ return predeclaredFuncs[id].kind
+ }
+
+ // ordinary function/method call
+ // signature may be generic
+ cgocall := x.mode == cgofunc
+
+ // a type parameter may be "called" if all types have the same signature
+ sig, _ := coreType(x.typ).(*Signature)
+ if sig == nil {
+ check.errorf(x, InvalidCall, invalidOp+"cannot call non-function %s", x)
+ x.mode = invalid
+ x.expr = call
+ return statement
+ }
+
+ // Capture wasGeneric before sig is potentially instantiated below.
+ wasGeneric := sig.TypeParams().Len() > 0
+
+ // evaluate type arguments, if any
+ var xlist []syntax.Expr
+ var targs []Type
+ if inst != nil {
+ xlist = syntax.UnpackListExpr(inst.Index)
+ targs = check.typeList(xlist)
+ if targs == nil {
+ check.use(call.ArgList...)
+ x.mode = invalid
+ x.expr = call
+ return statement
+ }
+ assert(len(targs) == len(xlist))
+
+ // check number of type arguments (got) vs number of type parameters (want)
+ got, want := len(targs), sig.TypeParams().Len()
+ if got > want {
+ check.errorf(xlist[want], WrongTypeArgCount, "got %d type arguments but want %d", got, want)
+ check.use(call.ArgList...)
+ x.mode = invalid
+ x.expr = call
+ return statement
+ }
+
+ // If sig is generic and all type arguments are provided, preempt function
+ // argument type inference by explicitly instantiating the signature. This
+ // ensures that we record accurate type information for sig, even if there
+ // is an error checking its arguments (for example, if an incorrect number
+ // of arguments is supplied).
+ if got == want && want > 0 {
+ check.verifyVersionf(inst, go1_18, "function instantiation")
+ sig = check.instantiateSignature(inst.Pos(), inst, sig, targs, xlist)
+ // targs have been consumed; proceed with checking arguments of the
+ // non-generic signature.
+ targs = nil
+ xlist = nil
+ }
+ }
+
+ // evaluate arguments
+ args, atargs, atxlist := check.genericExprList(call.ArgList)
+ sig = check.arguments(call, sig, targs, xlist, args, atargs, atxlist)
+
+ if wasGeneric && sig.TypeParams().Len() == 0 {
+ // update the recorded type of call.Fun to its instantiated type
+ check.recordTypeAndValue(call.Fun, value, sig, nil)
+ }
+
+ // determine result
+ switch sig.results.Len() {
+ case 0:
+ x.mode = novalue
+ case 1:
+ if cgocall {
+ x.mode = commaerr
+ } else {
+ x.mode = value
+ }
+ x.typ = sig.results.vars[0].typ // unpack tuple
+ default:
+ x.mode = value
+ x.typ = sig.results
+ }
+ x.expr = call
+ check.hasCallOrRecv = true
+
+ // if type inference failed, a parameterized result must be invalidated
+ // (operands cannot have a parameterized type)
+ if x.mode == value && sig.TypeParams().Len() > 0 && isParameterized(sig.TypeParams().list(), x.typ) {
+ x.mode = invalid
+ }
+
+ return statement
+}
+
+// exprList evaluates a list of expressions and returns the corresponding operands.
+// A single-element expression list may evaluate to multiple operands.
+func (check *Checker) exprList(elist []syntax.Expr) (xlist []*operand) {
+ if n := len(elist); n == 1 {
+ xlist, _ = check.multiExpr(elist[0], false)
+ } else if n > 1 {
+ // multiple (possibly invalid) values
+ xlist = make([]*operand, n)
+ for i, e := range elist {
+ var x operand
+ check.expr(nil, &x, e)
+ xlist[i] = &x
+ }
+ }
+ return
+}
+
+// genericExprList is like exprList but result operands may be uninstantiated or partially
+// instantiated generic functions (where constraint information is insufficient to infer
+// the missing type arguments) for Go 1.21 and later.
+// For each non-generic or uninstantiated generic operand, the corresponding targsList and
+// xlistList elements do not exist (targsList and xlistList are nil) or the elements are nil.
+// For each partially instantiated generic function operand, the corresponding targsList and
+// xlistList elements are the operand's partial type arguments and type expression lists.
+func (check *Checker) genericExprList(elist []syntax.Expr) (resList []*operand, targsList [][]Type, xlistList [][]syntax.Expr) {
+ if debug {
+ defer func() {
+ // targsList and xlistList must have matching lengths
+ assert(len(targsList) == len(xlistList))
+ // type arguments must only exist for partially instantiated functions
+ for i, x := range resList {
+ if i < len(targsList) {
+ if n := len(targsList[i]); n > 0 {
+ // x must be a partially instantiated function
+ assert(n < x.typ.(*Signature).TypeParams().Len())
+ }
+ }
+ }
+ }()
+ }
+
+ // Before Go 1.21, uninstantiated or partially instantiated argument functions are
+ // nor permitted. Checker.funcInst must infer missing type arguments in that case.
+ infer := true // for -lang < go1.21
+ n := len(elist)
+ if n > 0 && check.allowVersion(check.pkg, elist[0], go1_21) {
+ infer = false
+ }
+
+ if n == 1 {
+ // single value (possibly a partially instantiated function), or a multi-valued expression
+ e := elist[0]
+ var x operand
+ if inst, _ := e.(*syntax.IndexExpr); inst != nil && check.indexExpr(&x, inst) {
+ // x is a generic function.
+ targs, xlist := check.funcInst(nil, x.Pos(), &x, inst, infer)
+ if targs != nil {
+ // x was not instantiated: collect the (partial) type arguments.
+ targsList = [][]Type{targs}
+ xlistList = [][]syntax.Expr{xlist}
+ // Update x.expr so that we can record the partially instantiated function.
+ x.expr = inst
+ } else {
+ // x was instantiated: we must record it here because we didn't
+ // use the usual expression evaluators.
+ check.record(&x)
+ }
+ resList = []*operand{&x}
+ } else {
+ // x is not a function instantiation (it may still be a generic function).
+ check.rawExpr(nil, &x, e, nil, true)
+ check.exclude(&x, 1<<novalue|1<<builtin|1<<typexpr)
+ if t, ok := x.typ.(*Tuple); ok && x.mode != invalid {
+ // x is a function call returning multiple values; it cannot be generic.
+ resList = make([]*operand, t.Len())
+ for i, v := range t.vars {
+ resList[i] = &operand{mode: value, expr: e, typ: v.typ}
+ }
+ } else {
+ // x is exactly one value (possibly invalid or uninstantiated generic function).
+ resList = []*operand{&x}
+ }
+ }
+ } else if n > 1 {
+ // multiple values
+ resList = make([]*operand, n)
+ targsList = make([][]Type, n)
+ xlistList = make([][]syntax.Expr, n)
+ for i, e := range elist {
+ var x operand
+ if inst, _ := e.(*syntax.IndexExpr); inst != nil && check.indexExpr(&x, inst) {
+ // x is a generic function.
+ targs, xlist := check.funcInst(nil, x.Pos(), &x, inst, infer)
+ if targs != nil {
+ // x was not instantiated: collect the (partial) type arguments.
+ targsList[i] = targs
+ xlistList[i] = xlist
+ // Update x.expr so that we can record the partially instantiated function.
+ x.expr = inst
+ } else {
+ // x was instantiated: we must record it here because we didn't
+ // use the usual expression evaluators.
+ check.record(&x)
+ }
+ } else {
+ // x is exactly one value (possibly invalid or uninstantiated generic function).
+ check.genericExpr(&x, e)
+ }
+ resList[i] = &x
+ }
+ }
+
+ return
+}
+
+// arguments type-checks arguments passed to a function call with the given signature.
+// The function and its arguments may be generic, and possibly partially instantiated.
+// targs and xlist are the function's type arguments (and corresponding expressions).
+// args are the function arguments. If an argument args[i] is a partially instantiated
+// generic function, atargs[i] and atxlist[i] are the corresponding type arguments
+// (and corresponding expressions).
+// If the callee is variadic, arguments adjusts its signature to match the provided
+// arguments. The type parameters and arguments of the callee and all its arguments
+// are used together to infer any missing type arguments, and the callee and argument
+// functions are instantiated as necessary.
+// The result signature is the (possibly adjusted and instantiated) function signature.
+// If an error occurred, the result signature is the incoming sig.
+func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []Type, xlist []syntax.Expr, args []*operand, atargs [][]Type, atxlist [][]syntax.Expr) (rsig *Signature) {
+ rsig = sig
+
+ // Function call argument/parameter count requirements
+ //
+ // | standard call | dotdotdot call |
+ // --------------+------------------+----------------+
+ // standard func | nargs == npars | invalid |
+ // --------------+------------------+----------------+
+ // variadic func | nargs >= npars-1 | nargs == npars |
+ // --------------+------------------+----------------+
+
+ nargs := len(args)
+ npars := sig.params.Len()
+ ddd := call.HasDots
+
+ // set up parameters
+ sigParams := sig.params // adjusted for variadic functions (may be nil for empty parameter lists!)
+ adjusted := false // indicates if sigParams is different from sig.params
+ if sig.variadic {
+ if ddd {
+ // variadic_func(a, b, c...)
+ if len(call.ArgList) == 1 && nargs > 1 {
+ // f()... is not permitted if f() is multi-valued
+ //check.errorf(call.Ellipsis, "cannot use ... with %d-valued %s", nargs, call.ArgList[0])
+ check.errorf(call, InvalidDotDotDot, "cannot use ... with %d-valued %s", nargs, call.ArgList[0])
+ return
+ }
+ } else {
+ // variadic_func(a, b, c)
+ if nargs >= npars-1 {
+ // Create custom parameters for arguments: keep
+ // the first npars-1 parameters and add one for
+ // each argument mapping to the ... parameter.
+ vars := make([]*Var, npars-1) // npars > 0 for variadic functions
+ copy(vars, sig.params.vars)
+ last := sig.params.vars[npars-1]
+ typ := last.typ.(*Slice).elem
+ for len(vars) < nargs {
+ vars = append(vars, NewParam(last.pos, last.pkg, last.name, typ))
+ }
+ sigParams = NewTuple(vars...) // possibly nil!
+ adjusted = true
+ npars = nargs
+ } else {
+ // nargs < npars-1
+ npars-- // for correct error message below
+ }
+ }
+ } else {
+ if ddd {
+ // standard_func(a, b, c...)
+ //check.errorf(call.Ellipsis, "cannot use ... in call to non-variadic %s", call.Fun)
+ check.errorf(call, NonVariadicDotDotDot, "cannot use ... in call to non-variadic %s", call.Fun)
+ return
+ }
+ // standard_func(a, b, c)
+ }
+
+ // check argument count
+ if nargs != npars {
+ var at poser = call
+ qualifier := "not enough"
+ if nargs > npars {
+ at = args[npars].expr // report at first extra argument
+ qualifier = "too many"
+ } else if nargs > 0 {
+ at = args[nargs-1].expr // report at last argument
+ }
+ // take care of empty parameter lists represented by nil tuples
+ var params []*Var
+ if sig.params != nil {
+ params = sig.params.vars
+ }
+ var err error_
+ err.code = WrongArgCount
+ err.errorf(at, "%s arguments in call to %s", qualifier, call.Fun)
+ err.errorf(nopos, "have %s", check.typesSummary(operandTypes(args), false))
+ err.errorf(nopos, "want %s", check.typesSummary(varTypes(params), sig.variadic))
+ check.report(&err)
+ return
+ }
+
+ // collect type parameters of callee and generic function arguments
+ var tparams []*TypeParam
+
+ // collect type parameters of callee
+ n := sig.TypeParams().Len()
+ if n > 0 {
+ if !check.allowVersion(check.pkg, call.Pos(), go1_18) {
+ if iexpr, _ := call.Fun.(*syntax.IndexExpr); iexpr != nil {
+ check.versionErrorf(iexpr, go1_18, "function instantiation")
+ } else {
+ check.versionErrorf(call, go1_18, "implicit function instantiation")
+ }
+ }
+ // rename type parameters to avoid problems with recursive calls
+ var tmp Type
+ tparams, tmp = check.renameTParams(call.Pos(), sig.TypeParams().list(), sigParams)
+ sigParams = tmp.(*Tuple)
+ // make sure targs and tparams have the same length
+ for len(targs) < len(tparams) {
+ targs = append(targs, nil)
+ }
+ }
+ assert(len(tparams) == len(targs))
+
+ // collect type parameters from generic function arguments
+ var genericArgs []int // indices of generic function arguments
+ if enableReverseTypeInference {
+ for i, arg := range args {
+ // generic arguments cannot have a defined (*Named) type - no need for underlying type below
+ if asig, _ := arg.typ.(*Signature); asig != nil && asig.TypeParams().Len() > 0 {
+ // The argument type is a generic function signature. This type is
+ // pointer-identical with (it's copied from) the type of the generic
+ // function argument and thus the function object.
+ // Before we change the type (type parameter renaming, below), make
+ // a clone of it as otherwise we implicitly modify the object's type
+ // (go.dev/issues/63260).
+ asig = clone(asig)
+ // Rename type parameters for cases like f(g, g); this gives each
+ // generic function argument a unique type identity (go.dev/issues/59956).
+ // TODO(gri) Consider only doing this if a function argument appears
+ // multiple times, which is rare (possible optimization).
+ atparams, tmp := check.renameTParams(call.Pos(), asig.TypeParams().list(), asig)
+ asig = tmp.(*Signature)
+ asig.tparams = &TypeParamList{atparams} // renameTParams doesn't touch associated type parameters
+ arg.typ = asig // new type identity for the function argument
+ tparams = append(tparams, atparams...)
+ // add partial list of type arguments, if any
+ if i < len(atargs) {
+ targs = append(targs, atargs[i]...)
+ }
+ // make sure targs and tparams have the same length
+ for len(targs) < len(tparams) {
+ targs = append(targs, nil)
+ }
+ genericArgs = append(genericArgs, i)
+ }
+ }
+ }
+ assert(len(tparams) == len(targs))
+
+ // at the moment we only support implicit instantiations of argument functions
+ _ = len(genericArgs) > 0 && check.verifyVersionf(args[genericArgs[0]], go1_21, "implicitly instantiated function as argument")
+
+ // tparams holds the type parameters of the callee and generic function arguments, if any:
+ // the first n type parameters belong to the callee, followed by mi type parameters for each
+ // of the generic function arguments, where mi = args[i].typ.(*Signature).TypeParams().Len().
+
+ // infer missing type arguments of callee and function arguments
+ if len(tparams) > 0 {
+ targs = check.infer(call.Pos(), tparams, targs, sigParams, args, false)
+ if targs == nil {
+ // TODO(gri) If infer inferred the first targs[:n], consider instantiating
+ // the call signature for better error messages/gopls behavior.
+ // Perhaps instantiate as much as we can, also for arguments.
+ // This will require changes to how infer returns its results.
+ return // error already reported
+ }
+
+ // update result signature: instantiate if needed
+ if n > 0 {
+ rsig = check.instantiateSignature(call.Pos(), call.Fun, sig, targs[:n], xlist)
+ // If the callee's parameter list was adjusted we need to update (instantiate)
+ // it separately. Otherwise we can simply use the result signature's parameter
+ // list.
+ if adjusted {
+ sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(tparams[:n], targs[:n]), nil, check.context()).(*Tuple)
+ } else {
+ sigParams = rsig.params
+ }
+ }
+
+ // compute argument signatures: instantiate if needed
+ j := n
+ for _, i := range genericArgs {
+ arg := args[i]
+ asig := arg.typ.(*Signature)
+ k := j + asig.TypeParams().Len()
+ // targs[j:k] are the inferred type arguments for asig
+ arg.typ = check.instantiateSignature(call.Pos(), arg.expr, asig, targs[j:k], nil) // TODO(gri) provide xlist if possible (partial instantiations)
+ check.record(arg) // record here because we didn't use the usual expr evaluators
+ j = k
+ }
+ }
+
+ // check arguments
+ if len(args) > 0 {
+ context := check.sprintf("argument to %s", call.Fun)
+ for i, a := range args {
+ check.assignment(a, sigParams.vars[i].typ, context)
+ }
+ }
+
+ return
+}
+
+var cgoPrefixes = [...]string{
+ "_Ciconst_",
+ "_Cfconst_",
+ "_Csconst_",
+ "_Ctype_",
+ "_Cvar_", // actually a pointer to the var
+ "_Cfpvar_fp_",
+ "_Cfunc_",
+ "_Cmacro_", // function to evaluate the expanded expression
+}
+
+func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName, wantType bool) {
+ // these must be declared before the "goto Error" statements
+ var (
+ obj Object
+ index []int
+ indirect bool
+ )
+
+ sel := e.Sel.Value
+ // If the identifier refers to a package, handle everything here
+ // so we don't need a "package" mode for operands: package names
+ // can only appear in qualified identifiers which are mapped to
+ // selector expressions.
+ if ident, ok := e.X.(*syntax.Name); ok {
+ obj := check.lookup(ident.Value)
+ if pname, _ := obj.(*PkgName); pname != nil {
+ assert(pname.pkg == check.pkg)
+ check.recordUse(ident, pname)
+ pname.used = true
+ pkg := pname.imported
+
+ var exp Object
+ funcMode := value
+ if pkg.cgo {
+ // cgo special cases C.malloc: it's
+ // rewritten to _CMalloc and does not
+ // support two-result calls.
+ if sel == "malloc" {
+ sel = "_CMalloc"
+ } else {
+ funcMode = cgofunc
+ }
+ for _, prefix := range cgoPrefixes {
+ // cgo objects are part of the current package (in file
+ // _cgo_gotypes.go). Use regular lookup.
+ _, exp = check.scope.LookupParent(prefix+sel, check.pos)
+ if exp != nil {
+ break
+ }
+ }
+ if exp == nil {
+ check.errorf(e.Sel, UndeclaredImportedName, "undefined: %s", syntax.Expr(e)) // cast to syntax.Expr to silence vet
+ goto Error
+ }
+ check.objDecl(exp, nil)
+ } else {
+ exp = pkg.scope.Lookup(sel)
+ if exp == nil {
+ if !pkg.fake {
+ check.errorf(e.Sel, UndeclaredImportedName, "undefined: %s", syntax.Expr(e))
+ }
+ goto Error
+ }
+ if !exp.Exported() {
+ check.errorf(e.Sel, UnexportedName, "%s not exported by package %s", sel, pkg.name)
+ // ok to continue
+ }
+ }
+ check.recordUse(e.Sel, exp)
+
+ // Simplified version of the code for *syntax.Names:
+ // - imported objects are always fully initialized
+ switch exp := exp.(type) {
+ case *Const:
+ assert(exp.Val() != nil)
+ x.mode = constant_
+ x.typ = exp.typ
+ x.val = exp.val
+ case *TypeName:
+ x.mode = typexpr
+ x.typ = exp.typ
+ case *Var:
+ x.mode = variable
+ x.typ = exp.typ
+ if pkg.cgo && strings.HasPrefix(exp.name, "_Cvar_") {
+ x.typ = x.typ.(*Pointer).base
+ }
+ case *Func:
+ x.mode = funcMode
+ x.typ = exp.typ
+ if pkg.cgo && strings.HasPrefix(exp.name, "_Cmacro_") {
+ x.mode = value
+ x.typ = x.typ.(*Signature).results.vars[0].typ
+ }
+ case *Builtin:
+ x.mode = builtin
+ x.typ = exp.typ
+ x.id = exp.id
+ default:
+ check.dump("%v: unexpected object %v", atPos(e.Sel), exp)
+ unreachable()
+ }
+ x.expr = e
+ return
+ }
+ }
+
+ check.exprOrType(x, e.X, false)
+ switch x.mode {
+ case typexpr:
+ // don't crash for "type T T.x" (was go.dev/issue/51509)
+ if def != nil && def.typ == x.typ {
+ check.cycleError([]Object{def})
+ goto Error
+ }
+ case builtin:
+ check.errorf(e.Pos(), UncalledBuiltin, "cannot select on %s", x)
+ goto Error
+ case invalid:
+ goto Error
+ }
+
+ // Avoid crashing when checking an invalid selector in a method declaration
+ // (i.e., where def is not set):
+ //
+ // type S[T any] struct{}
+ // type V = S[any]
+ // func (fs *S[T]) M(x V.M) {}
+ //
+ // All codepaths below return a non-type expression. If we get here while
+ // expecting a type expression, it is an error.
+ //
+ // See go.dev/issue/57522 for more details.
+ //
+ // TODO(rfindley): We should do better by refusing to check selectors in all cases where
+ // x.typ is incomplete.
+ if wantType {
+ check.errorf(e.Sel, NotAType, "%s is not a type", syntax.Expr(e))
+ goto Error
+ }
+
+ obj, index, indirect = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel)
+ if obj == nil {
+ // Don't report another error if the underlying type was invalid (go.dev/issue/49541).
+ if !isValid(under(x.typ)) {
+ goto Error
+ }
+
+ if index != nil {
+ // TODO(gri) should provide actual type where the conflict happens
+ check.errorf(e.Sel, AmbiguousSelector, "ambiguous selector %s.%s", x.expr, sel)
+ goto Error
+ }
+
+ if indirect {
+ if x.mode == typexpr {
+ check.errorf(e.Sel, InvalidMethodExpr, "invalid method expression %s.%s (needs pointer receiver (*%s).%s)", x.typ, sel, x.typ, sel)
+ } else {
+ check.errorf(e.Sel, InvalidMethodExpr, "cannot call pointer method %s on %s", sel, x.typ)
+ }
+ goto Error
+ }
+
+ var why string
+ if isInterfacePtr(x.typ) {
+ why = check.interfacePtrError(x.typ)
+ } else {
+ why = check.sprintf("type %s has no field or method %s", x.typ, sel)
+ // Check if capitalization of sel matters and provide better error message in that case.
+ // TODO(gri) This code only looks at the first character but LookupFieldOrMethod has an
+ // (internal) mechanism for case-insensitive lookup. Should use that instead.
+ if len(sel) > 0 {
+ var changeCase string
+ if r := rune(sel[0]); unicode.IsUpper(r) {
+ changeCase = string(unicode.ToLower(r)) + sel[1:]
+ } else {
+ changeCase = string(unicode.ToUpper(r)) + sel[1:]
+ }
+ if obj, _, _ = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, changeCase); obj != nil {
+ why += ", but does have " + changeCase
+ }
+ }
+ }
+ check.errorf(e.Sel, MissingFieldOrMethod, "%s.%s undefined (%s)", x.expr, sel, why)
+ goto Error
+ }
+
+ // methods may not have a fully set up signature yet
+ if m, _ := obj.(*Func); m != nil {
+ check.objDecl(m, nil)
+ }
+
+ if x.mode == typexpr {
+ // method expression
+ m, _ := obj.(*Func)
+ if m == nil {
+ // TODO(gri) should check if capitalization of sel matters and provide better error message in that case
+ check.errorf(e.Sel, MissingFieldOrMethod, "%s.%s undefined (type %s has no method %s)", x.expr, sel, x.typ, sel)
+ goto Error
+ }
+
+ check.recordSelection(e, MethodExpr, x.typ, m, index, indirect)
+
+ sig := m.typ.(*Signature)
+ if sig.recv == nil {
+ check.error(e, InvalidDeclCycle, "illegal cycle in method declaration")
+ goto Error
+ }
+
+ // The receiver type becomes the type of the first function
+ // argument of the method expression's function type.
+ var params []*Var
+ if sig.params != nil {
+ params = sig.params.vars
+ }
+ // Be consistent about named/unnamed parameters. This is not needed
+ // for type-checking, but the newly constructed signature may appear
+ // in an error message and then have mixed named/unnamed parameters.
+ // (An alternative would be to not print parameter names in errors,
+ // but it's useful to see them; this is cheap and method expressions
+ // are rare.)
+ name := ""
+ if len(params) > 0 && params[0].name != "" {
+ // name needed
+ name = sig.recv.name
+ if name == "" {
+ name = "_"
+ }
+ }
+ params = append([]*Var{NewVar(sig.recv.pos, sig.recv.pkg, name, x.typ)}, params...)
+ x.mode = value
+ x.typ = &Signature{
+ tparams: sig.tparams,
+ params: NewTuple(params...),
+ results: sig.results,
+ variadic: sig.variadic,
+ }
+
+ check.addDeclDep(m)
+
+ } else {
+ // regular selector
+ switch obj := obj.(type) {
+ case *Var:
+ check.recordSelection(e, FieldVal, x.typ, obj, index, indirect)
+ if x.mode == variable || indirect {
+ x.mode = variable
+ } else {
+ x.mode = value
+ }
+ x.typ = obj.typ
+
+ case *Func:
+ // TODO(gri) If we needed to take into account the receiver's
+ // addressability, should we report the type &(x.typ) instead?
+ check.recordSelection(e, MethodVal, x.typ, obj, index, indirect)
+
+ x.mode = value
+
+ // remove receiver
+ sig := *obj.typ.(*Signature)
+ sig.recv = nil
+ x.typ = &sig
+
+ check.addDeclDep(obj)
+
+ default:
+ unreachable()
+ }
+ }
+
+ // everything went well
+ x.expr = e
+ return
+
+Error:
+ x.mode = invalid
+ x.expr = e
+}
+
+// use type-checks each argument.
+// Useful to make sure expressions are evaluated
+// (and variables are "used") in the presence of
+// other errors. Arguments may be nil.
+// Reports if all arguments evaluated without error.
+func (check *Checker) use(args ...syntax.Expr) bool { return check.useN(args, false) }
+
+// useLHS is like use, but doesn't "use" top-level identifiers.
+// It should be called instead of use if the arguments are
+// expressions on the lhs of an assignment.
+func (check *Checker) useLHS(args ...syntax.Expr) bool { return check.useN(args, true) }
+
+func (check *Checker) useN(args []syntax.Expr, lhs bool) bool {
+ ok := true
+ for _, e := range args {
+ if !check.use1(e, lhs) {
+ ok = false
+ }
+ }
+ return ok
+}
+
+func (check *Checker) use1(e syntax.Expr, lhs bool) bool {
+ var x operand
+ x.mode = value // anything but invalid
+ switch n := syntax.Unparen(e).(type) {
+ case nil:
+ // nothing to do
+ case *syntax.Name:
+ // don't report an error evaluating blank
+ if n.Value == "_" {
+ break
+ }
+ // If the lhs is an identifier denoting a variable v, this assignment
+ // is not a 'use' of v. Remember current value of v.used and restore
+ // after evaluating the lhs via check.rawExpr.
+ var v *Var
+ var v_used bool
+ if lhs {
+ if _, obj := check.scope.LookupParent(n.Value, nopos); obj != nil {
+ // It's ok to mark non-local variables, but ignore variables
+ // from other packages to avoid potential race conditions with
+ // dot-imported variables.
+ if w, _ := obj.(*Var); w != nil && w.pkg == check.pkg {
+ v = w
+ v_used = v.used
+ }
+ }
+ }
+ check.exprOrType(&x, n, true)
+ if v != nil {
+ v.used = v_used // restore v.used
+ }
+ case *syntax.ListExpr:
+ return check.useN(n.ElemList, lhs)
+ default:
+ check.rawExpr(nil, &x, e, nil, true)
+ }
+ return x.mode != invalid
+}
diff --git a/src/cmd/compile/internal/types2/chan.go b/src/cmd/compile/internal/types2/chan.go
new file mode 100644
index 0000000..77650df
--- /dev/null
+++ b/src/cmd/compile/internal/types2/chan.go
@@ -0,0 +1,35 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A Chan represents a channel type.
+type Chan struct {
+ dir ChanDir
+ elem Type
+}
+
+// A ChanDir value indicates a channel direction.
+type ChanDir int
+
+// The direction of a channel is indicated by one of these constants.
+const (
+ SendRecv ChanDir = iota
+ SendOnly
+ RecvOnly
+)
+
+// NewChan returns a new channel type for the given direction and element type.
+func NewChan(dir ChanDir, elem Type) *Chan {
+ return &Chan{dir: dir, elem: elem}
+}
+
+// Dir returns the direction of channel c.
+func (c *Chan) Dir() ChanDir { return c.dir }
+
+// Elem returns the element type of channel c.
+func (c *Chan) Elem() Type { return c.elem }
+
+func (c *Chan) Underlying() Type { return c }
+func (c *Chan) String() string { return TypeString(c, nil) }
diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go
new file mode 100644
index 0000000..0582367
--- /dev/null
+++ b/src/cmd/compile/internal/types2/check.go
@@ -0,0 +1,704 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements the Check function, which drives type-checking.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "errors"
+ "fmt"
+ "go/constant"
+ "internal/godebug"
+ . "internal/types/errors"
+)
+
+// nopos indicates an unknown position
+var nopos syntax.Pos
+
+// debugging/development support
+const debug = false // leave on during development
+
+// gotypesalias controls the use of Alias types.
+var gotypesalias = godebug.New("#gotypesalias")
+
+// exprInfo stores information about an untyped expression.
+type exprInfo struct {
+ isLhs bool // expression is lhs operand of a shift with delayed type-check
+ mode operandMode
+ typ *Basic
+ val constant.Value // constant value; or nil (if not a constant)
+}
+
+// An environment represents the environment within which an object is
+// type-checked.
+type environment struct {
+ decl *declInfo // package-level declaration whose init expression/function body is checked
+ scope *Scope // top-most scope for lookups
+ pos syntax.Pos // if valid, identifiers are looked up as if at position pos (used by Eval)
+ iota constant.Value // value of iota in a constant declaration; nil otherwise
+ errpos syntax.Pos // if valid, identifier position of a constant with inherited initializer
+ inTParamList bool // set if inside a type parameter list
+ sig *Signature // function signature if inside a function; nil otherwise
+ isPanic map[*syntax.CallExpr]bool // set of panic call expressions (used for termination check)
+ hasLabel bool // set if a function makes use of labels (only ~1% of functions); unused outside functions
+ hasCallOrRecv bool // set if an expression contains a function call or channel receive operation
+}
+
+// lookup looks up name in the current environment and returns the matching object, or nil.
+func (env *environment) lookup(name string) Object {
+ _, obj := env.scope.LookupParent(name, env.pos)
+ return obj
+}
+
+// An importKey identifies an imported package by import path and source directory
+// (directory containing the file containing the import). In practice, the directory
+// may always be the same, or may not matter. Given an (import path, directory), an
+// importer must always return the same package (but given two different import paths,
+// an importer may still return the same package by mapping them to the same package
+// paths).
+type importKey struct {
+ path, dir string
+}
+
+// A dotImportKey describes a dot-imported object in the given scope.
+type dotImportKey struct {
+ scope *Scope
+ name string
+}
+
+// An action describes a (delayed) action.
+type action struct {
+ f func() // action to be executed
+ desc *actionDesc // action description; may be nil, requires debug to be set
+}
+
+// If debug is set, describef sets a printf-formatted description for action a.
+// Otherwise, it is a no-op.
+func (a *action) describef(pos poser, format string, args ...interface{}) {
+ if debug {
+ a.desc = &actionDesc{pos, format, args}
+ }
+}
+
+// An actionDesc provides information on an action.
+// For debugging only.
+type actionDesc struct {
+ pos poser
+ format string
+ args []interface{}
+}
+
+// A Checker maintains the state of the type checker.
+// It must be created with NewChecker.
+type Checker struct {
+ // package information
+ // (initialized by NewChecker, valid for the life-time of checker)
+
+ // If enableAlias is set, alias declarations produce an Alias type.
+ // Otherwise the alias information is only in the type name, which
+ // points directly to the actual (aliased) type.
+ enableAlias bool
+
+ conf *Config
+ ctxt *Context // context for de-duplicating instances
+ pkg *Package
+ *Info
+ version goVersion // accepted language version
+ nextID uint64 // unique Id for type parameters (first valid Id is 1)
+ objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
+ impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
+ valids instanceLookup // valid *Named (incl. instantiated) types per the validType check
+
+ // pkgPathMap maps package names to the set of distinct import paths we've
+ // seen for that name, anywhere in the import graph. It is used for
+ // disambiguating package names in error messages.
+ //
+ // pkgPathMap is allocated lazily, so that we don't pay the price of building
+ // it on the happy path. seenPkgMap tracks the packages that we've already
+ // walked.
+ pkgPathMap map[string]map[string]bool
+ seenPkgMap map[*Package]bool
+
+ // information collected during type-checking of a set of package files
+ // (initialized by Files, valid only for the duration of check.Files;
+ // maps and lists are allocated on demand)
+ files []*syntax.File // list of package files
+ versions map[*syntax.PosBase]string // maps file bases to version strings (each file has an entry)
+ imports []*PkgName // list of imported packages
+ dotImportMap map[dotImportKey]*PkgName // maps dot-imported objects to the package they were dot-imported through
+ recvTParamMap map[*syntax.Name]*TypeParam // maps blank receiver type parameters to their type
+ brokenAliases map[*TypeName]bool // set of aliases with broken (not yet determined) types
+ unionTypeSets map[*Union]*_TypeSet // computed type sets for union types
+ mono monoGraph // graph for detecting non-monomorphizable instantiation loops
+
+ firstErr error // first error encountered
+ methods map[*TypeName][]*Func // maps package scope type names to associated non-blank (non-interface) methods
+ untyped map[syntax.Expr]exprInfo // map of expressions without final type
+ delayed []action // stack of delayed action segments; segments are processed in FIFO order
+ objPath []Object // path of object dependencies during type inference (for cycle reporting)
+ cleaners []cleaner // list of types that may need a final cleanup at the end of type-checking
+
+ // environment within which the current object is type-checked (valid only
+ // for the duration of type-checking a specific object)
+ environment
+
+ // debugging
+ indent int // indentation for tracing
+}
+
+// addDeclDep adds the dependency edge (check.decl -> to) if check.decl exists
+func (check *Checker) addDeclDep(to Object) {
+ from := check.decl
+ if from == nil {
+ return // not in a package-level init expression
+ }
+ if _, found := check.objMap[to]; !found {
+ return // to is not a package-level object
+ }
+ from.addDep(to)
+}
+
+// Note: The following three alias-related functions are only used
+// when Alias types are not enabled.
+
+// brokenAlias records that alias doesn't have a determined type yet.
+// It also sets alias.typ to Typ[Invalid].
+// Not used if check.enableAlias is set.
+func (check *Checker) brokenAlias(alias *TypeName) {
+ assert(!check.enableAlias)
+ if check.brokenAliases == nil {
+ check.brokenAliases = make(map[*TypeName]bool)
+ }
+ check.brokenAliases[alias] = true
+ alias.typ = Typ[Invalid]
+}
+
+// validAlias records that alias has the valid type typ (possibly Typ[Invalid]).
+func (check *Checker) validAlias(alias *TypeName, typ Type) {
+ assert(!check.enableAlias)
+ delete(check.brokenAliases, alias)
+ alias.typ = typ
+}
+
+// isBrokenAlias reports whether alias doesn't have a determined type yet.
+func (check *Checker) isBrokenAlias(alias *TypeName) bool {
+ assert(!check.enableAlias)
+ return check.brokenAliases[alias]
+}
+
+func (check *Checker) rememberUntyped(e syntax.Expr, lhs bool, mode operandMode, typ *Basic, val constant.Value) {
+ m := check.untyped
+ if m == nil {
+ m = make(map[syntax.Expr]exprInfo)
+ check.untyped = m
+ }
+ m[e] = exprInfo{lhs, mode, typ, val}
+}
+
+// later pushes f on to the stack of actions that will be processed later;
+// either at the end of the current statement, or in case of a local constant
+// or variable declaration, before the constant or variable is in scope
+// (so that f still sees the scope before any new declarations).
+// later returns the pushed action so one can provide a description
+// via action.describef for debugging, if desired.
+func (check *Checker) later(f func()) *action {
+ i := len(check.delayed)
+ check.delayed = append(check.delayed, action{f: f})
+ return &check.delayed[i]
+}
+
+// push pushes obj onto the object path and returns its index in the path.
+func (check *Checker) push(obj Object) int {
+ check.objPath = append(check.objPath, obj)
+ return len(check.objPath) - 1
+}
+
+// pop pops and returns the topmost object from the object path.
+func (check *Checker) pop() Object {
+ i := len(check.objPath) - 1
+ obj := check.objPath[i]
+ check.objPath[i] = nil
+ check.objPath = check.objPath[:i]
+ return obj
+}
+
+type cleaner interface {
+ cleanup()
+}
+
+// needsCleanup records objects/types that implement the cleanup method
+// which will be called at the end of type-checking.
+func (check *Checker) needsCleanup(c cleaner) {
+ check.cleaners = append(check.cleaners, c)
+}
+
+// NewChecker returns a new Checker instance for a given package.
+// Package files may be added incrementally via checker.Files.
+func NewChecker(conf *Config, pkg *Package, info *Info) *Checker {
+ // make sure we have a configuration
+ if conf == nil {
+ conf = new(Config)
+ }
+
+ // make sure we have an info struct
+ if info == nil {
+ info = new(Info)
+ }
+
+ // Note: clients may call NewChecker with the Unsafe package, which is
+ // globally shared and must not be mutated. Therefore NewChecker must not
+ // mutate *pkg.
+ //
+ // (previously, pkg.goVersion was mutated here: go.dev/issue/61212)
+
+ return &Checker{
+ enableAlias: gotypesalias.Value() == "1",
+ conf: conf,
+ ctxt: conf.Context,
+ pkg: pkg,
+ Info: info,
+ version: asGoVersion(conf.GoVersion),
+ objMap: make(map[Object]*declInfo),
+ impMap: make(map[importKey]*Package),
+ }
+}
+
+// initFiles initializes the files-specific portion of checker.
+// The provided files must all belong to the same package.
+func (check *Checker) initFiles(files []*syntax.File) {
+ // start with a clean slate (check.Files may be called multiple times)
+ check.files = nil
+ check.imports = nil
+ check.dotImportMap = nil
+
+ check.firstErr = nil
+ check.methods = nil
+ check.untyped = nil
+ check.delayed = nil
+ check.objPath = nil
+ check.cleaners = nil
+
+ // determine package name and collect valid files
+ pkg := check.pkg
+ for _, file := range files {
+ switch name := file.PkgName.Value; pkg.name {
+ case "":
+ if name != "_" {
+ pkg.name = name
+ } else {
+ check.error(file.PkgName, BlankPkgName, "invalid package name _")
+ }
+ fallthrough
+
+ case name:
+ check.files = append(check.files, file)
+
+ default:
+ check.errorf(file, MismatchedPkgName, "package %s; expected %s", name, pkg.name)
+ // ignore this file
+ }
+ }
+
+ // reuse Info.FileVersions if provided
+ versions := check.Info.FileVersions
+ if versions == nil {
+ versions = make(map[*syntax.PosBase]string)
+ }
+ check.versions = versions
+
+ pkgVersionOk := check.version.isValid()
+ downgradeOk := check.version.cmp(go1_21) >= 0
+
+ // determine Go version for each file
+ for _, file := range check.files {
+ // use unaltered Config.GoVersion by default
+ // (This version string may contain dot-release numbers as in go1.20.1,
+ // unlike file versions which are Go language versions only, if valid.)
+ v := check.conf.GoVersion
+ // use the file version, if applicable
+ // (file versions are either the empty string or of the form go1.dd)
+ if pkgVersionOk {
+ fileVersion := asGoVersion(file.GoVersion)
+ if fileVersion.isValid() {
+ cmp := fileVersion.cmp(check.version)
+ // Go 1.21 introduced the feature of setting the go.mod
+ // go line to an early version of Go and allowing //go:build lines
+ // to “upgrade” (cmp > 0) the Go version in a given file.
+ // We can do that backwards compatibly.
+ //
+ // Go 1.21 also introduced the feature of allowing //go:build lines
+ // to “downgrade” (cmp < 0) the Go version in a given file.
+ // That can't be done compatibly in general, since before the
+ // build lines were ignored and code got the module's Go version.
+ // To work around this, downgrades are only allowed when the
+ // module's Go version is Go 1.21 or later.
+ //
+ // If there is no valid check.version, then we don't really know what
+ // Go version to apply.
+ // Legacy tools may do this, and they historically have accepted everything.
+ // Preserve that behavior by ignoring //go:build constraints entirely in that
+ // case (!pkgVersionOk).
+ if cmp > 0 || cmp < 0 && downgradeOk {
+ v = file.GoVersion
+ }
+ }
+ }
+ versions[base(file.Pos())] = v // base(file.Pos()) may be nil for tests
+ }
+}
+
+// A bailout panic is used for early termination.
+type bailout struct{}
+
+func (check *Checker) handleBailout(err *error) {
+ switch p := recover().(type) {
+ case nil, bailout:
+ // normal return or early exit
+ *err = check.firstErr
+ default:
+ // re-panic
+ panic(p)
+ }
+}
+
+// Files checks the provided files as part of the checker's package.
+func (check *Checker) Files(files []*syntax.File) error { return check.checkFiles(files) }
+
+var errBadCgo = errors.New("cannot use FakeImportC and go115UsesCgo together")
+
+func (check *Checker) checkFiles(files []*syntax.File) (err error) {
+ if check.pkg == Unsafe {
+ // Defensive handling for Unsafe, which cannot be type checked, and must
+ // not be mutated. See https://go.dev/issue/61212 for an example of where
+ // Unsafe is passed to NewChecker.
+ return nil
+ }
+
+ // Note: NewChecker doesn't return an error, so we need to check the version here.
+ if check.version.cmp(go_current) > 0 {
+ return fmt.Errorf("package requires newer Go version %v", check.version)
+ }
+ if check.conf.FakeImportC && check.conf.go115UsesCgo {
+ return errBadCgo
+ }
+
+ defer check.handleBailout(&err)
+
+ print := func(msg string) {
+ if check.conf.Trace {
+ fmt.Println()
+ fmt.Println(msg)
+ }
+ }
+
+ print("== initFiles ==")
+ check.initFiles(files)
+
+ print("== collectObjects ==")
+ check.collectObjects()
+
+ print("== packageObjects ==")
+ check.packageObjects()
+
+ print("== processDelayed ==")
+ check.processDelayed(0) // incl. all functions
+
+ print("== cleanup ==")
+ check.cleanup()
+
+ print("== initOrder ==")
+ check.initOrder()
+
+ if !check.conf.DisableUnusedImportCheck {
+ print("== unusedImports ==")
+ check.unusedImports()
+ }
+
+ print("== recordUntyped ==")
+ check.recordUntyped()
+
+ if check.firstErr == nil {
+ // TODO(mdempsky): Ensure monomorph is safe when errors exist.
+ check.monomorph()
+ }
+
+ check.pkg.goVersion = check.conf.GoVersion
+ check.pkg.complete = true
+
+ // no longer needed - release memory
+ check.imports = nil
+ check.dotImportMap = nil
+ check.pkgPathMap = nil
+ check.seenPkgMap = nil
+ check.recvTParamMap = nil
+ check.brokenAliases = nil
+ check.unionTypeSets = nil
+ check.ctxt = nil
+
+ // TODO(gri) There's more memory we should release at this point.
+
+ return
+}
+
+// processDelayed processes all delayed actions pushed after top.
+func (check *Checker) processDelayed(top int) {
+ // If each delayed action pushes a new action, the
+ // stack will continue to grow during this loop.
+ // However, it is only processing functions (which
+ // are processed in a delayed fashion) that may
+ // add more actions (such as nested functions), so
+ // this is a sufficiently bounded process.
+ for i := top; i < len(check.delayed); i++ {
+ a := &check.delayed[i]
+ if check.conf.Trace {
+ if a.desc != nil {
+ check.trace(a.desc.pos.Pos(), "-- "+a.desc.format, a.desc.args...)
+ } else {
+ check.trace(nopos, "-- delayed %p", a.f)
+ }
+ }
+ a.f() // may append to check.delayed
+ if check.conf.Trace {
+ fmt.Println()
+ }
+ }
+ assert(top <= len(check.delayed)) // stack must not have shrunk
+ check.delayed = check.delayed[:top]
+}
+
+// cleanup runs cleanup for all collected cleaners.
+func (check *Checker) cleanup() {
+ // Don't use a range clause since Named.cleanup may add more cleaners.
+ for i := 0; i < len(check.cleaners); i++ {
+ check.cleaners[i].cleanup()
+ }
+ check.cleaners = nil
+}
+
+func (check *Checker) record(x *operand) {
+ // convert x into a user-friendly set of values
+ // TODO(gri) this code can be simplified
+ var typ Type
+ var val constant.Value
+ switch x.mode {
+ case invalid:
+ typ = Typ[Invalid]
+ case novalue:
+ typ = (*Tuple)(nil)
+ case constant_:
+ typ = x.typ
+ val = x.val
+ default:
+ typ = x.typ
+ }
+ assert(x.expr != nil && typ != nil)
+
+ if isUntyped(typ) {
+ // delay type and value recording until we know the type
+ // or until the end of type checking
+ check.rememberUntyped(x.expr, false, x.mode, typ.(*Basic), val)
+ } else {
+ check.recordTypeAndValue(x.expr, x.mode, typ, val)
+ }
+}
+
+func (check *Checker) recordUntyped() {
+ if !debug && !check.recordTypes() {
+ return // nothing to do
+ }
+
+ for x, info := range check.untyped {
+ if debug && isTyped(info.typ) {
+ check.dump("%v: %s (type %s) is typed", atPos(x), x, info.typ)
+ unreachable()
+ }
+ check.recordTypeAndValue(x, info.mode, info.typ, info.val)
+ }
+}
+
+func (check *Checker) recordTypeAndValue(x syntax.Expr, mode operandMode, typ Type, val constant.Value) {
+ assert(x != nil)
+ assert(typ != nil)
+ if mode == invalid {
+ return // omit
+ }
+ if mode == constant_ {
+ assert(val != nil)
+ // We check allBasic(typ, IsConstType) here as constant expressions may be
+ // recorded as type parameters.
+ assert(!isValid(typ) || allBasic(typ, IsConstType))
+ }
+ if m := check.Types; m != nil {
+ m[x] = TypeAndValue{mode, typ, val}
+ }
+ if check.StoreTypesInSyntax {
+ tv := TypeAndValue{mode, typ, val}
+ stv := syntax.TypeAndValue{Type: typ, Value: val}
+ if tv.IsVoid() {
+ stv.SetIsVoid()
+ }
+ if tv.IsType() {
+ stv.SetIsType()
+ }
+ if tv.IsBuiltin() {
+ stv.SetIsBuiltin()
+ }
+ if tv.IsValue() {
+ stv.SetIsValue()
+ }
+ if tv.IsNil() {
+ stv.SetIsNil()
+ }
+ if tv.Addressable() {
+ stv.SetAddressable()
+ }
+ if tv.Assignable() {
+ stv.SetAssignable()
+ }
+ if tv.HasOk() {
+ stv.SetHasOk()
+ }
+ x.SetTypeInfo(stv)
+ }
+}
+
+func (check *Checker) recordBuiltinType(f syntax.Expr, sig *Signature) {
+ // f must be a (possibly parenthesized, possibly qualified)
+ // identifier denoting a built-in (including unsafe's non-constant
+ // functions Add and Slice): record the signature for f and possible
+ // children.
+ for {
+ check.recordTypeAndValue(f, builtin, sig, nil)
+ switch p := f.(type) {
+ case *syntax.Name, *syntax.SelectorExpr:
+ return // we're done
+ case *syntax.ParenExpr:
+ f = p.X
+ default:
+ unreachable()
+ }
+ }
+}
+
+// recordCommaOkTypes updates recorded types to reflect that x is used in a commaOk context
+// (and therefore has tuple type).
+func (check *Checker) recordCommaOkTypes(x syntax.Expr, a []*operand) {
+ assert(x != nil)
+ assert(len(a) == 2)
+ if a[0].mode == invalid {
+ return
+ }
+ t0, t1 := a[0].typ, a[1].typ
+ assert(isTyped(t0) && isTyped(t1) && (isBoolean(t1) || t1 == universeError))
+ if m := check.Types; m != nil {
+ for {
+ tv := m[x]
+ assert(tv.Type != nil) // should have been recorded already
+ pos := x.Pos()
+ tv.Type = NewTuple(
+ NewVar(pos, check.pkg, "", t0),
+ NewVar(pos, check.pkg, "", t1),
+ )
+ m[x] = tv
+ // if x is a parenthesized expression (p.X), update p.X
+ p, _ := x.(*syntax.ParenExpr)
+ if p == nil {
+ break
+ }
+ x = p.X
+ }
+ }
+ if check.StoreTypesInSyntax {
+ // Note: this loop is duplicated because the type of tv is different.
+ // Above it is types2.TypeAndValue, here it is syntax.TypeAndValue.
+ for {
+ tv := x.GetTypeInfo()
+ assert(tv.Type != nil) // should have been recorded already
+ pos := x.Pos()
+ tv.Type = NewTuple(
+ NewVar(pos, check.pkg, "", t0),
+ NewVar(pos, check.pkg, "", t1),
+ )
+ x.SetTypeInfo(tv)
+ p, _ := x.(*syntax.ParenExpr)
+ if p == nil {
+ break
+ }
+ x = p.X
+ }
+ }
+}
+
+// recordInstance records instantiation information into check.Info, if the
+// Instances map is non-nil. The given expr must be an ident, selector, or
+// index (list) expr with ident or selector operand.
+//
+// TODO(rfindley): the expr parameter is fragile. See if we can access the
+// instantiated identifier in some other way.
+func (check *Checker) recordInstance(expr syntax.Expr, targs []Type, typ Type) {
+ ident := instantiatedIdent(expr)
+ assert(ident != nil)
+ assert(typ != nil)
+ if m := check.Instances; m != nil {
+ m[ident] = Instance{newTypeList(targs), typ}
+ }
+}
+
+func instantiatedIdent(expr syntax.Expr) *syntax.Name {
+ var selOrIdent syntax.Expr
+ switch e := expr.(type) {
+ case *syntax.IndexExpr:
+ selOrIdent = e.X
+ case *syntax.SelectorExpr, *syntax.Name:
+ selOrIdent = e
+ }
+ switch x := selOrIdent.(type) {
+ case *syntax.Name:
+ return x
+ case *syntax.SelectorExpr:
+ return x.Sel
+ }
+ panic("instantiated ident not found")
+}
+
+func (check *Checker) recordDef(id *syntax.Name, obj Object) {
+ assert(id != nil)
+ if m := check.Defs; m != nil {
+ m[id] = obj
+ }
+}
+
+func (check *Checker) recordUse(id *syntax.Name, obj Object) {
+ assert(id != nil)
+ assert(obj != nil)
+ if m := check.Uses; m != nil {
+ m[id] = obj
+ }
+}
+
+func (check *Checker) recordImplicit(node syntax.Node, obj Object) {
+ assert(node != nil)
+ assert(obj != nil)
+ if m := check.Implicits; m != nil {
+ m[node] = obj
+ }
+}
+
+func (check *Checker) recordSelection(x *syntax.SelectorExpr, kind SelectionKind, recv Type, obj Object, index []int, indirect bool) {
+ assert(obj != nil && (recv == nil || len(index) > 0))
+ check.recordUse(x.Sel, obj)
+ if m := check.Selections; m != nil {
+ m[x] = &Selection{kind, recv, obj, index, indirect}
+ }
+}
+
+func (check *Checker) recordScope(node syntax.Node, scope *Scope) {
+ assert(node != nil)
+ assert(scope != nil)
+ if m := check.Scopes; m != nil {
+ m[node] = scope
+ }
+}
diff --git a/src/cmd/compile/internal/types2/check_test.go b/src/cmd/compile/internal/types2/check_test.go
new file mode 100644
index 0000000..a9d6202
--- /dev/null
+++ b/src/cmd/compile/internal/types2/check_test.go
@@ -0,0 +1,461 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a typechecker test harness. The packages specified
+// in tests are typechecked. Error messages reported by the typechecker are
+// compared against the errors expected in the test files.
+//
+// Expected errors are indicated in the test files by putting comments
+// of the form /* ERROR pattern */ or /* ERRORx pattern */ (or a similar
+// //-style line comment) immediately following the tokens where errors
+// are reported. There must be exactly one blank before and after the
+// ERROR/ERRORx indicator, and the pattern must be a properly quoted Go
+// string.
+//
+// The harness will verify that each ERROR pattern is a substring of the
+// error reported at that source position, and that each ERRORx pattern
+// is a regular expression matching the respective error.
+// Consecutive comments may be used to indicate multiple errors reported
+// at the same position.
+//
+// For instance, the following test source indicates that an "undeclared"
+// error should be reported for the undeclared variable x:
+//
+// package p
+// func f() {
+// _ = x /* ERROR "undeclared" */ + 1
+// }
+
+package types2_test
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "flag"
+ "fmt"
+ "internal/buildcfg"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "testing"
+
+ . "cmd/compile/internal/types2"
+)
+
+var (
+ haltOnError = flag.Bool("halt", false, "halt on error")
+ verifyErrors = flag.Bool("verify", false, "verify errors (rather than list them) in TestManual")
+)
+
+func parseFiles(t *testing.T, filenames []string, srcs [][]byte, mode syntax.Mode) ([]*syntax.File, []error) {
+ var files []*syntax.File
+ var errlist []error
+ errh := func(err error) { errlist = append(errlist, err) }
+ for i, filename := range filenames {
+ base := syntax.NewFileBase(filename)
+ r := bytes.NewReader(srcs[i])
+ file, err := syntax.Parse(base, r, errh, nil, mode)
+ if file == nil {
+ t.Fatalf("%s: %s", filename, err)
+ }
+ files = append(files, file)
+ }
+ return files, errlist
+}
+
+func unpackError(err error) (syntax.Pos, string) {
+ switch err := err.(type) {
+ case syntax.Error:
+ return err.Pos, err.Msg
+ case Error:
+ return err.Pos, err.Msg
+ default:
+ return nopos, err.Error()
+ }
+}
+
+// absDiff returns the absolute difference between x and y.
+func absDiff(x, y uint) uint {
+ if x < y {
+ return y - x
+ }
+ return x - y
+}
+
+// parseFlags parses flags from the first line of the given source if the line
+// starts with "//" (line comment) followed by "-" (possibly with spaces
+// between). Otherwise the line is ignored.
+func parseFlags(src []byte, flags *flag.FlagSet) error {
+ // we must have a line comment that starts with a "-"
+ const prefix = "//"
+ if !bytes.HasPrefix(src, []byte(prefix)) {
+ return nil // first line is not a line comment
+ }
+ src = src[len(prefix):]
+ if i := bytes.Index(src, []byte("-")); i < 0 || len(bytes.TrimSpace(src[:i])) != 0 {
+ return nil // comment doesn't start with a "-"
+ }
+ end := bytes.Index(src, []byte("\n"))
+ const maxLen = 256
+ if end < 0 || end > maxLen {
+ return fmt.Errorf("flags comment line too long")
+ }
+
+ return flags.Parse(strings.Fields(string(src[:end])))
+}
+
+// testFiles type-checks the package consisting of the given files, and
+// compares the resulting errors with the ERROR annotations in the source.
+// Except for manual tests, each package is type-checked twice, once without
+// use of Alias types, and once with Alias types.
+//
+// The srcs slice contains the file content for the files named in the
+// filenames slice. The colDelta parameter specifies the tolerance for position
+// mismatch when comparing errors. The manual parameter specifies whether this
+// is a 'manual' test.
+//
+// If provided, opts may be used to mutate the Config before type-checking.
+func testFiles(t *testing.T, filenames []string, srcs [][]byte, colDelta uint, manual bool, opts ...func(*Config)) {
+ // Alias types are disabled by default
+ testFilesImpl(t, filenames, srcs, colDelta, manual, opts...)
+ if !manual {
+ t.Setenv("GODEBUG", "gotypesalias=1")
+ testFilesImpl(t, filenames, srcs, colDelta, manual, opts...)
+ }
+}
+
+func testFilesImpl(t *testing.T, filenames []string, srcs [][]byte, colDelta uint, manual bool, opts ...func(*Config)) {
+ if len(filenames) == 0 {
+ t.Fatal("no source files")
+ }
+
+ // parse files
+ files, errlist := parseFiles(t, filenames, srcs, 0)
+ pkgName := "<no package>"
+ if len(files) > 0 {
+ pkgName = files[0].PkgName.Value
+ }
+ listErrors := manual && !*verifyErrors
+ if listErrors && len(errlist) > 0 {
+ t.Errorf("--- %s:", pkgName)
+ for _, err := range errlist {
+ t.Error(err)
+ }
+ }
+
+ // set up typechecker
+ var conf Config
+ conf.Trace = manual && testing.Verbose()
+ conf.Importer = defaultImporter()
+ conf.Error = func(err error) {
+ if *haltOnError {
+ defer panic(err)
+ }
+ if listErrors {
+ t.Error(err)
+ return
+ }
+ errlist = append(errlist, err)
+ }
+
+ // apply custom configuration
+ for _, opt := range opts {
+ opt(&conf)
+ }
+
+ // apply flag setting (overrides custom configuration)
+ var goexperiment, gotypesalias string
+ flags := flag.NewFlagSet("", flag.PanicOnError)
+ flags.StringVar(&conf.GoVersion, "lang", "", "")
+ flags.StringVar(&goexperiment, "goexperiment", "", "")
+ flags.BoolVar(&conf.FakeImportC, "fakeImportC", false, "")
+ flags.StringVar(&gotypesalias, "gotypesalias", "", "")
+ if err := parseFlags(srcs[0], flags); err != nil {
+ t.Fatal(err)
+ }
+
+ exp, err := buildcfg.ParseGOEXPERIMENT(runtime.GOOS, runtime.GOARCH, goexperiment)
+ if err != nil {
+ t.Fatal(err)
+ }
+ old := buildcfg.Experiment
+ defer func() {
+ buildcfg.Experiment = old
+ }()
+ buildcfg.Experiment = *exp
+
+ // By default, gotypesalias is not set.
+ if gotypesalias != "" {
+ t.Setenv("GODEBUG", "gotypesalias="+gotypesalias)
+ }
+
+ // Provide Config.Info with all maps so that info recording is tested.
+ info := Info{
+ Types: make(map[syntax.Expr]TypeAndValue),
+ Instances: make(map[*syntax.Name]Instance),
+ Defs: make(map[*syntax.Name]Object),
+ Uses: make(map[*syntax.Name]Object),
+ Implicits: make(map[syntax.Node]Object),
+ Selections: make(map[*syntax.SelectorExpr]*Selection),
+ Scopes: make(map[syntax.Node]*Scope),
+ FileVersions: make(map[*syntax.PosBase]string),
+ }
+
+ // typecheck
+ conf.Check(pkgName, files, &info)
+ if listErrors {
+ return
+ }
+
+ // collect expected errors
+ errmap := make(map[string]map[uint][]syntax.Error)
+ for i, filename := range filenames {
+ if m := syntax.CommentMap(bytes.NewReader(srcs[i]), regexp.MustCompile("^ ERRORx? ")); len(m) > 0 {
+ errmap[filename] = m
+ }
+ }
+
+ // match against found errors
+ var indices []int // list indices of matching errors, reused for each error
+ for _, err := range errlist {
+ gotPos, gotMsg := unpackError(err)
+
+ // find list of errors for the respective error line
+ filename := gotPos.Base().Filename()
+ filemap := errmap[filename]
+ line := gotPos.Line()
+ var errList []syntax.Error
+ if filemap != nil {
+ errList = filemap[line]
+ }
+
+ // At least one of the errors in errList should match the current error.
+ indices = indices[:0]
+ for i, want := range errList {
+ pattern, substr := strings.CutPrefix(want.Msg, " ERROR ")
+ if !substr {
+ var found bool
+ pattern, found = strings.CutPrefix(want.Msg, " ERRORx ")
+ if !found {
+ panic("unreachable")
+ }
+ }
+ pattern, err := strconv.Unquote(strings.TrimSpace(pattern))
+ if err != nil {
+ t.Errorf("%s:%d:%d: %v", filename, line, want.Pos.Col(), err)
+ continue
+ }
+ if substr {
+ if !strings.Contains(gotMsg, pattern) {
+ continue
+ }
+ } else {
+ rx, err := regexp.Compile(pattern)
+ if err != nil {
+ t.Errorf("%s:%d:%d: %v", filename, line, want.Pos.Col(), err)
+ continue
+ }
+ if !rx.MatchString(gotMsg) {
+ continue
+ }
+ }
+ indices = append(indices, i)
+ }
+ if len(indices) == 0 {
+ t.Errorf("%s: no error expected: %q", gotPos, gotMsg)
+ continue
+ }
+ // len(indices) > 0
+
+ // If there are multiple matching errors, select the one with the closest column position.
+ index := -1 // index of matching error
+ var delta uint
+ for _, i := range indices {
+ if d := absDiff(gotPos.Col(), errList[i].Pos.Col()); index < 0 || d < delta {
+ index, delta = i, d
+ }
+ }
+
+ // The closest column position must be within expected colDelta.
+ if delta > colDelta {
+ t.Errorf("%s: got col = %d; want %d", gotPos, gotPos.Col(), errList[index].Pos.Col())
+ }
+
+ // eliminate from errList
+ if n := len(errList) - 1; n > 0 {
+ // not the last entry - slide entries down (don't reorder)
+ copy(errList[index:], errList[index+1:])
+ filemap[line] = errList[:n]
+ } else {
+ // last entry - remove errList from filemap
+ delete(filemap, line)
+ }
+
+ // if filemap is empty, eliminate from errmap
+ if len(filemap) == 0 {
+ delete(errmap, filename)
+ }
+ }
+
+ // there should be no expected errors left
+ if len(errmap) > 0 {
+ t.Errorf("--- %s: unreported errors:", pkgName)
+ for filename, filemap := range errmap {
+ for line, errList := range filemap {
+ for _, err := range errList {
+ t.Errorf("%s:%d:%d: %s", filename, line, err.Pos.Col(), err.Msg)
+ }
+ }
+ }
+ }
+}
+
+// boolFieldAddr(conf, name) returns the address of the boolean field conf.<name>.
+// For accessing unexported fields.
+func boolFieldAddr(conf *Config, name string) *bool {
+ v := reflect.Indirect(reflect.ValueOf(conf))
+ return (*bool)(v.FieldByName(name).Addr().UnsafePointer())
+}
+
+// TestManual is for manual testing of a package - either provided
+// as a list of filenames belonging to the package, or a directory
+// name containing the package files - after the test arguments
+// (and a separating "--"). For instance, to test the package made
+// of the files foo.go and bar.go, use:
+//
+// go test -run Manual -- foo.go bar.go
+//
+// If no source arguments are provided, the file testdata/manual.go
+// is used instead.
+// Provide the -verify flag to verify errors against ERROR comments
+// in the input files rather than having a list of errors reported.
+// The accepted Go language version can be controlled with the -lang
+// flag.
+func TestManual(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ filenames := flag.Args()
+ if len(filenames) == 0 {
+ filenames = []string{filepath.FromSlash("testdata/manual.go")}
+ }
+
+ info, err := os.Stat(filenames[0])
+ if err != nil {
+ t.Fatalf("TestManual: %v", err)
+ }
+
+ DefPredeclaredTestFuncs()
+ if info.IsDir() {
+ if len(filenames) > 1 {
+ t.Fatal("TestManual: must have only one directory argument")
+ }
+ testDir(t, filenames[0], 0, true)
+ } else {
+ testPkg(t, filenames, 0, true)
+ }
+}
+
+func TestLongConstants(t *testing.T) {
+ format := `package longconst; const _ = %s /* ERROR "constant overflow" */; const _ = %s // ERROR "excessively long constant"`
+ src := fmt.Sprintf(format, strings.Repeat("1", 9999), strings.Repeat("1", 10001))
+ testFiles(t, []string{"longconst.go"}, [][]byte{[]byte(src)}, 0, false)
+}
+
+func withSizes(sizes Sizes) func(*Config) {
+ return func(cfg *Config) {
+ cfg.Sizes = sizes
+ }
+}
+
+// TestIndexRepresentability tests that constant index operands must
+// be representable as int even if they already have a type that can
+// represent larger values.
+func TestIndexRepresentability(t *testing.T) {
+ const src = `package index; var s []byte; var _ = s[int64 /* ERRORx "int64\\(1\\) << 40 \\(.*\\) overflows int" */ (1) << 40]`
+ testFiles(t, []string{"index.go"}, [][]byte{[]byte(src)}, 0, false, withSizes(&StdSizes{4, 4}))
+}
+
+func TestIssue47243_TypedRHS(t *testing.T) {
+ // The RHS of the shift expression below overflows uint on 32bit platforms,
+ // but this is OK as it is explicitly typed.
+ const src = `package issue47243; var a uint64; var _ = a << uint64(4294967296)` // uint64(1<<32)
+ testFiles(t, []string{"p.go"}, [][]byte{[]byte(src)}, 0, false, withSizes(&StdSizes{4, 4}))
+}
+
+func TestCheck(t *testing.T) {
+ old := buildcfg.Experiment.RangeFunc
+ defer func() {
+ buildcfg.Experiment.RangeFunc = old
+ }()
+ buildcfg.Experiment.RangeFunc = true
+
+ DefPredeclaredTestFuncs()
+ testDirFiles(t, "../../../../internal/types/testdata/check", 50, false) // TODO(gri) narrow column tolerance
+}
+func TestSpec(t *testing.T) { testDirFiles(t, "../../../../internal/types/testdata/spec", 0, false) }
+func TestExamples(t *testing.T) {
+ testDirFiles(t, "../../../../internal/types/testdata/examples", 125, false)
+} // TODO(gri) narrow column tolerance
+func TestFixedbugs(t *testing.T) {
+ testDirFiles(t, "../../../../internal/types/testdata/fixedbugs", 100, false)
+} // TODO(gri) narrow column tolerance
+func TestLocal(t *testing.T) { testDirFiles(t, "testdata/local", 0, false) }
+
+func testDirFiles(t *testing.T, dir string, colDelta uint, manual bool) {
+ testenv.MustHaveGoBuild(t)
+ dir = filepath.FromSlash(dir)
+
+ fis, err := os.ReadDir(dir)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ for _, fi := range fis {
+ path := filepath.Join(dir, fi.Name())
+
+ // If fi is a directory, its files make up a single package.
+ if fi.IsDir() {
+ testDir(t, path, colDelta, manual)
+ } else {
+ t.Run(filepath.Base(path), func(t *testing.T) {
+ testPkg(t, []string{path}, colDelta, manual)
+ })
+ }
+ }
+}
+
+func testDir(t *testing.T, dir string, colDelta uint, manual bool) {
+ fis, err := os.ReadDir(dir)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ var filenames []string
+ for _, fi := range fis {
+ filenames = append(filenames, filepath.Join(dir, fi.Name()))
+ }
+
+ t.Run(filepath.Base(dir), func(t *testing.T) {
+ testPkg(t, filenames, colDelta, manual)
+ })
+}
+
+func testPkg(t *testing.T, filenames []string, colDelta uint, manual bool) {
+ srcs := make([][]byte, len(filenames))
+ for i, filename := range filenames {
+ src, err := os.ReadFile(filename)
+ if err != nil {
+ t.Fatalf("could not read %s: %v", filename, err)
+ }
+ srcs[i] = src
+ }
+ testFiles(t, filenames, srcs, colDelta, manual)
+}
diff --git a/src/cmd/compile/internal/types2/compilersupport.go b/src/cmd/compile/internal/types2/compilersupport.go
new file mode 100644
index 0000000..33dd8e8
--- /dev/null
+++ b/src/cmd/compile/internal/types2/compilersupport.go
@@ -0,0 +1,30 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Helper functions exported for the compiler.
+// Do not use internally.
+
+package types2
+
+// If t is a pointer, AsPointer returns that type, otherwise it returns nil.
+func AsPointer(t Type) *Pointer {
+ u, _ := t.Underlying().(*Pointer)
+ return u
+}
+
+// If t is a signature, AsSignature returns that type, otherwise it returns nil.
+func AsSignature(t Type) *Signature {
+ u, _ := t.Underlying().(*Signature)
+ return u
+}
+
+// If typ is a type parameter, CoreType returns the single underlying
+// type of all types in the corresponding type constraint if it exists, or
+// nil otherwise. If the type set contains only unrestricted and restricted
+// channel types (with identical element types), the single underlying type
+// is the restricted channel type if the restrictions are always the same.
+// If typ is not a type parameter, CoreType returns the underlying type.
+func CoreType(t Type) Type {
+ return coreType(t)
+}
diff --git a/src/cmd/compile/internal/types2/const.go b/src/cmd/compile/internal/types2/const.go
new file mode 100644
index 0000000..af27c72
--- /dev/null
+++ b/src/cmd/compile/internal/types2/const.go
@@ -0,0 +1,306 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements functions for untyped constant operands.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "go/constant"
+ "go/token"
+ . "internal/types/errors"
+ "math"
+)
+
+// overflow checks that the constant x is representable by its type.
+// For untyped constants, it checks that the value doesn't become
+// arbitrarily large.
+func (check *Checker) overflow(x *operand, opPos syntax.Pos) {
+ assert(x.mode == constant_)
+
+ if x.val.Kind() == constant.Unknown {
+ // TODO(gri) We should report exactly what went wrong. At the
+ // moment we don't have the (go/constant) API for that.
+ // See also TODO in go/constant/value.go.
+ check.error(atPos(opPos), InvalidConstVal, "constant result is not representable")
+ return
+ }
+
+ // Typed constants must be representable in
+ // their type after each constant operation.
+ // x.typ cannot be a type parameter (type
+ // parameters cannot be constant types).
+ if isTyped(x.typ) {
+ check.representable(x, under(x.typ).(*Basic))
+ return
+ }
+
+ // Untyped integer values must not grow arbitrarily.
+ const prec = 512 // 512 is the constant precision
+ if x.val.Kind() == constant.Int && constant.BitLen(x.val) > prec {
+ op := opName(x.expr)
+ if op != "" {
+ op += " "
+ }
+ check.errorf(atPos(opPos), InvalidConstVal, "constant %soverflow", op)
+ x.val = constant.MakeUnknown()
+ }
+}
+
+// representableConst reports whether x can be represented as
+// value of the given basic type and for the configuration
+// provided (only needed for int/uint sizes).
+//
+// If rounded != nil, *rounded is set to the rounded value of x for
+// representable floating-point and complex values, and to an Int
+// value for integer values; it is left alone otherwise.
+// It is ok to provide the addressof the first argument for rounded.
+//
+// The check parameter may be nil if representableConst is invoked
+// (indirectly) through an exported API call (AssignableTo, ConvertibleTo)
+// because we don't need the Checker's config for those calls.
+func representableConst(x constant.Value, check *Checker, typ *Basic, rounded *constant.Value) bool {
+ if x.Kind() == constant.Unknown {
+ return true // avoid follow-up errors
+ }
+
+ var conf *Config
+ if check != nil {
+ conf = check.conf
+ }
+
+ sizeof := func(T Type) int64 {
+ s := conf.sizeof(T)
+ return s
+ }
+
+ switch {
+ case isInteger(typ):
+ x := constant.ToInt(x)
+ if x.Kind() != constant.Int {
+ return false
+ }
+ if rounded != nil {
+ *rounded = x
+ }
+ if x, ok := constant.Int64Val(x); ok {
+ switch typ.kind {
+ case Int:
+ var s = uint(sizeof(typ)) * 8
+ return int64(-1)<<(s-1) <= x && x <= int64(1)<<(s-1)-1
+ case Int8:
+ const s = 8
+ return -1<<(s-1) <= x && x <= 1<<(s-1)-1
+ case Int16:
+ const s = 16
+ return -1<<(s-1) <= x && x <= 1<<(s-1)-1
+ case Int32:
+ const s = 32
+ return -1<<(s-1) <= x && x <= 1<<(s-1)-1
+ case Int64, UntypedInt:
+ return true
+ case Uint, Uintptr:
+ if s := uint(sizeof(typ)) * 8; s < 64 {
+ return 0 <= x && x <= int64(1)<<s-1
+ }
+ return 0 <= x
+ case Uint8:
+ const s = 8
+ return 0 <= x && x <= 1<<s-1
+ case Uint16:
+ const s = 16
+ return 0 <= x && x <= 1<<s-1
+ case Uint32:
+ const s = 32
+ return 0 <= x && x <= 1<<s-1
+ case Uint64:
+ return 0 <= x
+ default:
+ unreachable()
+ }
+ }
+ // x does not fit into int64
+ switch n := constant.BitLen(x); typ.kind {
+ case Uint, Uintptr:
+ var s = uint(sizeof(typ)) * 8
+ return constant.Sign(x) >= 0 && n <= int(s)
+ case Uint64:
+ return constant.Sign(x) >= 0 && n <= 64
+ case UntypedInt:
+ return true
+ }
+
+ case isFloat(typ):
+ x := constant.ToFloat(x)
+ if x.Kind() != constant.Float {
+ return false
+ }
+ switch typ.kind {
+ case Float32:
+ if rounded == nil {
+ return fitsFloat32(x)
+ }
+ r := roundFloat32(x)
+ if r != nil {
+ *rounded = r
+ return true
+ }
+ case Float64:
+ if rounded == nil {
+ return fitsFloat64(x)
+ }
+ r := roundFloat64(x)
+ if r != nil {
+ *rounded = r
+ return true
+ }
+ case UntypedFloat:
+ return true
+ default:
+ unreachable()
+ }
+
+ case isComplex(typ):
+ x := constant.ToComplex(x)
+ if x.Kind() != constant.Complex {
+ return false
+ }
+ switch typ.kind {
+ case Complex64:
+ if rounded == nil {
+ return fitsFloat32(constant.Real(x)) && fitsFloat32(constant.Imag(x))
+ }
+ re := roundFloat32(constant.Real(x))
+ im := roundFloat32(constant.Imag(x))
+ if re != nil && im != nil {
+ *rounded = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+ return true
+ }
+ case Complex128:
+ if rounded == nil {
+ return fitsFloat64(constant.Real(x)) && fitsFloat64(constant.Imag(x))
+ }
+ re := roundFloat64(constant.Real(x))
+ im := roundFloat64(constant.Imag(x))
+ if re != nil && im != nil {
+ *rounded = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+ return true
+ }
+ case UntypedComplex:
+ return true
+ default:
+ unreachable()
+ }
+
+ case isString(typ):
+ return x.Kind() == constant.String
+
+ case isBoolean(typ):
+ return x.Kind() == constant.Bool
+ }
+
+ return false
+}
+
+func fitsFloat32(x constant.Value) bool {
+ f32, _ := constant.Float32Val(x)
+ f := float64(f32)
+ return !math.IsInf(f, 0)
+}
+
+func roundFloat32(x constant.Value) constant.Value {
+ f32, _ := constant.Float32Val(x)
+ f := float64(f32)
+ if !math.IsInf(f, 0) {
+ return constant.MakeFloat64(f)
+ }
+ return nil
+}
+
+func fitsFloat64(x constant.Value) bool {
+ f, _ := constant.Float64Val(x)
+ return !math.IsInf(f, 0)
+}
+
+func roundFloat64(x constant.Value) constant.Value {
+ f, _ := constant.Float64Val(x)
+ if !math.IsInf(f, 0) {
+ return constant.MakeFloat64(f)
+ }
+ return nil
+}
+
+// representable checks that a constant operand is representable in the given
+// basic type.
+func (check *Checker) representable(x *operand, typ *Basic) {
+ v, code := check.representation(x, typ)
+ if code != 0 {
+ check.invalidConversion(code, x, typ)
+ x.mode = invalid
+ return
+ }
+ assert(v != nil)
+ x.val = v
+}
+
+// representation returns the representation of the constant operand x as the
+// basic type typ.
+//
+// If no such representation is possible, it returns a non-zero error code.
+func (check *Checker) representation(x *operand, typ *Basic) (constant.Value, Code) {
+ assert(x.mode == constant_)
+ v := x.val
+ if !representableConst(x.val, check, typ, &v) {
+ if isNumeric(x.typ) && isNumeric(typ) {
+ // numeric conversion : error msg
+ //
+ // integer -> integer : overflows
+ // integer -> float : overflows (actually not possible)
+ // float -> integer : truncated
+ // float -> float : overflows
+ //
+ if !isInteger(x.typ) && isInteger(typ) {
+ return nil, TruncatedFloat
+ } else {
+ return nil, NumericOverflow
+ }
+ }
+ return nil, InvalidConstVal
+ }
+ return v, 0
+}
+
+func (check *Checker) invalidConversion(code Code, x *operand, target Type) {
+ msg := "cannot convert %s to type %s"
+ switch code {
+ case TruncatedFloat:
+ msg = "%s truncated to %s"
+ case NumericOverflow:
+ msg = "%s overflows %s"
+ }
+ check.errorf(x, code, msg, x, target)
+}
+
+// convertUntyped attempts to set the type of an untyped value to the target type.
+func (check *Checker) convertUntyped(x *operand, target Type) {
+ newType, val, code := check.implicitTypeAndValue(x, target)
+ if code != 0 {
+ t := target
+ if !isTypeParam(target) {
+ t = safeUnderlying(target)
+ }
+ check.invalidConversion(code, x, t)
+ x.mode = invalid
+ return
+ }
+ if val != nil {
+ x.val = val
+ check.updateExprVal(x.expr, val)
+ }
+ if newType != x.typ {
+ x.typ = newType
+ check.updateExprType(x.expr, newType, false)
+ }
+}
diff --git a/src/cmd/compile/internal/types2/context.go b/src/cmd/compile/internal/types2/context.go
new file mode 100644
index 0000000..7723124
--- /dev/null
+++ b/src/cmd/compile/internal/types2/context.go
@@ -0,0 +1,144 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// This file contains a definition of the type-checking context; an opaque type
+// that may be supplied by users during instantiation.
+//
+// Contexts serve two purposes:
+// - reduce the duplication of identical instances
+// - short-circuit instantiation cycles
+//
+// For the latter purpose, we must always have a context during instantiation,
+// whether or not it is supplied by the user. For both purposes, it must be the
+// case that hashing a pointer-identical type produces consistent results
+// (somewhat obviously).
+//
+// However, neither of these purposes require that our hash is perfect, and so
+// this was not an explicit design goal of the context type. In fact, due to
+// concurrent use it is convenient not to guarantee de-duplication.
+//
+// Nevertheless, in the future it could be helpful to allow users to leverage
+// contexts to canonicalize instances, and it would probably be possible to
+// achieve such a guarantee.
+
+// A Context is an opaque type checking context. It may be used to share
+// identical type instances across type-checked packages or calls to
+// Instantiate. Contexts are safe for concurrent use.
+//
+// The use of a shared context does not guarantee that identical instances are
+// deduplicated in all cases.
+type Context struct {
+ mu sync.Mutex
+ typeMap map[string][]ctxtEntry // type hash -> instances entries
+ nextID int // next unique ID
+ originIDs map[Type]int // origin type -> unique ID
+}
+
+type ctxtEntry struct {
+ orig Type
+ targs []Type
+ instance Type // = orig[targs]
+}
+
+// NewContext creates a new Context.
+func NewContext() *Context {
+ return &Context{
+ typeMap: make(map[string][]ctxtEntry),
+ originIDs: make(map[Type]int),
+ }
+}
+
+// instanceHash returns a string representation of typ instantiated with targs.
+// The hash should be a perfect hash, though out of caution the type checker
+// does not assume this. The result is guaranteed to not contain blanks.
+func (ctxt *Context) instanceHash(orig Type, targs []Type) string {
+ assert(ctxt != nil)
+ assert(orig != nil)
+ var buf bytes.Buffer
+
+ h := newTypeHasher(&buf, ctxt)
+ h.string(strconv.Itoa(ctxt.getID(orig)))
+ // Because we've already written the unique origin ID this call to h.typ is
+ // unnecessary, but we leave it for hash readability. It can be removed later
+ // if performance is an issue.
+ h.typ(orig)
+ if len(targs) > 0 {
+ // TODO(rfindley): consider asserting on isGeneric(typ) here, if and when
+ // isGeneric handles *Signature types.
+ h.typeList(targs)
+ }
+
+ return strings.ReplaceAll(buf.String(), " ", "#")
+}
+
+// lookup returns an existing instantiation of orig with targs, if it exists.
+// Otherwise, it returns nil.
+func (ctxt *Context) lookup(h string, orig Type, targs []Type) Type {
+ ctxt.mu.Lock()
+ defer ctxt.mu.Unlock()
+
+ for _, e := range ctxt.typeMap[h] {
+ if identicalInstance(orig, targs, e.orig, e.targs) {
+ return e.instance
+ }
+ if debug {
+ // Panic during development to surface any imperfections in our hash.
+ panic(fmt.Sprintf("non-identical instances: (orig: %s, targs: %v) and %s", orig, targs, e.instance))
+ }
+ }
+
+ return nil
+}
+
+// update de-duplicates n against previously seen types with the hash h. If an
+// identical type is found with the type hash h, the previously seen type is
+// returned. Otherwise, n is returned, and recorded in the Context for the hash
+// h.
+func (ctxt *Context) update(h string, orig Type, targs []Type, inst Type) Type {
+ assert(inst != nil)
+
+ ctxt.mu.Lock()
+ defer ctxt.mu.Unlock()
+
+ for _, e := range ctxt.typeMap[h] {
+ if inst == nil || Identical(inst, e.instance) {
+ return e.instance
+ }
+ if debug {
+ // Panic during development to surface any imperfections in our hash.
+ panic(fmt.Sprintf("%s and %s are not identical", inst, e.instance))
+ }
+ }
+
+ ctxt.typeMap[h] = append(ctxt.typeMap[h], ctxtEntry{
+ orig: orig,
+ targs: targs,
+ instance: inst,
+ })
+
+ return inst
+}
+
+// getID returns a unique ID for the type t.
+func (ctxt *Context) getID(t Type) int {
+ ctxt.mu.Lock()
+ defer ctxt.mu.Unlock()
+ id, ok := ctxt.originIDs[t]
+ if !ok {
+ id = ctxt.nextID
+ ctxt.originIDs[t] = id
+ ctxt.nextID++
+ }
+ return id
+}
diff --git a/src/cmd/compile/internal/types2/context_test.go b/src/cmd/compile/internal/types2/context_test.go
new file mode 100644
index 0000000..aa649b1
--- /dev/null
+++ b/src/cmd/compile/internal/types2/context_test.go
@@ -0,0 +1,69 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "testing"
+)
+
+func TestContextHashCollisions(t *testing.T) {
+ if debug {
+ t.Skip("hash collisions are expected, and would fail debug assertions")
+ }
+ // Unit test the de-duplication fall-back logic in Context.
+ //
+ // We can't test this via Instantiate because this is only a fall-back in
+ // case our hash is imperfect.
+ //
+ // These lookups and updates use reasonable looking types in an attempt to
+ // make them robust to internal type assertions, but could equally well use
+ // arbitrary types.
+
+ // Create some distinct origin types. nullaryP and nullaryQ have no
+ // parameters and are identical (but have different type parameter names).
+ // unaryP has a parameter.
+ var nullaryP, nullaryQ, unaryP Type
+ {
+ // type nullaryP = func[P any]()
+ tparam := NewTypeParam(NewTypeName(nopos, nil, "P", nil), &emptyInterface)
+ nullaryP = NewSignatureType(nil, nil, []*TypeParam{tparam}, nil, nil, false)
+ }
+ {
+ // type nullaryQ = func[Q any]()
+ tparam := NewTypeParam(NewTypeName(nopos, nil, "Q", nil), &emptyInterface)
+ nullaryQ = NewSignatureType(nil, nil, []*TypeParam{tparam}, nil, nil, false)
+ }
+ {
+ // type unaryP = func[P any](_ P)
+ tparam := NewTypeParam(NewTypeName(nopos, nil, "P", nil), &emptyInterface)
+ params := NewTuple(NewVar(nopos, nil, "_", tparam))
+ unaryP = NewSignatureType(nil, nil, []*TypeParam{tparam}, params, nil, false)
+ }
+
+ ctxt := NewContext()
+
+ // Update the context with an instantiation of nullaryP.
+ inst := NewSignatureType(nil, nil, nil, nil, nil, false)
+ if got := ctxt.update("", nullaryP, []Type{Typ[Int]}, inst); got != inst {
+ t.Error("bad")
+ }
+
+ // unaryP is not identical to nullaryP, so we should not get inst when
+ // instantiated with identical type arguments.
+ if got := ctxt.lookup("", unaryP, []Type{Typ[Int]}); got != nil {
+ t.Error("bad")
+ }
+
+ // nullaryQ is identical to nullaryP, so we *should* get inst when
+ // instantiated with identical type arguments.
+ if got := ctxt.lookup("", nullaryQ, []Type{Typ[Int]}); got != inst {
+ t.Error("bad")
+ }
+
+ // ...but verify we don't get inst with different type arguments.
+ if got := ctxt.lookup("", nullaryQ, []Type{Typ[String]}); got != nil {
+ t.Error("bad")
+ }
+}
diff --git a/src/cmd/compile/internal/types2/conversions.go b/src/cmd/compile/internal/types2/conversions.go
new file mode 100644
index 0000000..8027092
--- /dev/null
+++ b/src/cmd/compile/internal/types2/conversions.go
@@ -0,0 +1,311 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of conversions.
+
+package types2
+
+import (
+ "go/constant"
+ . "internal/types/errors"
+ "unicode"
+)
+
+// conversion type-checks the conversion T(x).
+// The result is in x.
+func (check *Checker) conversion(x *operand, T Type) {
+ constArg := x.mode == constant_
+
+ constConvertibleTo := func(T Type, val *constant.Value) bool {
+ switch t, _ := under(T).(*Basic); {
+ case t == nil:
+ // nothing to do
+ case representableConst(x.val, check, t, val):
+ return true
+ case isInteger(x.typ) && isString(t):
+ codepoint := unicode.ReplacementChar
+ if i, ok := constant.Uint64Val(x.val); ok && i <= unicode.MaxRune {
+ codepoint = rune(i)
+ }
+ if val != nil {
+ *val = constant.MakeString(string(codepoint))
+ }
+ return true
+ }
+ return false
+ }
+
+ var ok bool
+ var cause string
+ switch {
+ case constArg && isConstType(T):
+ // constant conversion
+ ok = constConvertibleTo(T, &x.val)
+ // A conversion from an integer constant to an integer type
+ // can only fail if there's overflow. Give a concise error.
+ // (go.dev/issue/63563)
+ if !ok && isInteger(x.typ) && isInteger(T) {
+ check.errorf(x, InvalidConversion, "constant %s overflows %s", x.val, T)
+ x.mode = invalid
+ return
+ }
+ case constArg && isTypeParam(T):
+ // x is convertible to T if it is convertible
+ // to each specific type in the type set of T.
+ // If T's type set is empty, or if it doesn't
+ // have specific types, constant x cannot be
+ // converted.
+ ok = T.(*TypeParam).underIs(func(u Type) bool {
+ // u is nil if there are no specific type terms
+ if u == nil {
+ cause = check.sprintf("%s does not contain specific types", T)
+ return false
+ }
+ if isString(x.typ) && isBytesOrRunes(u) {
+ return true
+ }
+ if !constConvertibleTo(u, nil) {
+ if isInteger(x.typ) && isInteger(u) {
+ // see comment above on constant conversion
+ cause = check.sprintf("constant %s overflows %s (in %s)", x.val, u, T)
+ } else {
+ cause = check.sprintf("cannot convert %s to type %s (in %s)", x, u, T)
+ }
+ return false
+ }
+ return true
+ })
+ x.mode = value // type parameters are not constants
+ case x.convertibleTo(check, T, &cause):
+ // non-constant conversion
+ ok = true
+ x.mode = value
+ }
+
+ if !ok {
+ if cause != "" {
+ check.errorf(x, InvalidConversion, "cannot convert %s to type %s: %s", x, T, cause)
+ } else {
+ check.errorf(x, InvalidConversion, "cannot convert %s to type %s", x, T)
+ }
+ x.mode = invalid
+ return
+ }
+
+ // The conversion argument types are final. For untyped values the
+ // conversion provides the type, per the spec: "A constant may be
+ // given a type explicitly by a constant declaration or conversion,...".
+ if isUntyped(x.typ) {
+ final := T
+ // - For conversions to interfaces, except for untyped nil arguments,
+ // use the argument's default type.
+ // - For conversions of untyped constants to non-constant types, also
+ // use the default type (e.g., []byte("foo") should report string
+ // not []byte as type for the constant "foo").
+ // - For constant integer to string conversions, keep the argument type.
+ // (See also the TODO below.)
+ if x.typ == Typ[UntypedNil] {
+ // ok
+ } else if isNonTypeParamInterface(T) || constArg && !isConstType(T) {
+ final = Default(x.typ)
+ } else if x.mode == constant_ && isInteger(x.typ) && allString(T) {
+ final = x.typ
+ }
+ check.updateExprType(x.expr, final, true)
+ }
+
+ x.typ = T
+}
+
+// TODO(gri) convertibleTo checks if T(x) is valid. It assumes that the type
+// of x is fully known, but that's not the case for say string(1<<s + 1.0):
+// Here, the type of 1<<s + 1.0 will be UntypedFloat which will lead to the
+// (correct!) refusal of the conversion. But the reported error is essentially
+// "cannot convert untyped float value to string", yet the correct error (per
+// the spec) is that we cannot shift a floating-point value: 1 in 1<<s should
+// be converted to UntypedFloat because of the addition of 1.0. Fixing this
+// is tricky because we'd have to run updateExprType on the argument first.
+// (go.dev/issue/21982.)
+
+// convertibleTo reports whether T(x) is valid. In the failure case, *cause
+// may be set to the cause for the failure.
+// The check parameter may be nil if convertibleTo is invoked through an
+// exported API call, i.e., when all methods have been type-checked.
+func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool {
+ // "x is assignable to T"
+ if ok, _ := x.assignableTo(check, T, cause); ok {
+ return true
+ }
+
+ // "V and T have identical underlying types if tags are ignored
+ // and V and T are not type parameters"
+ V := x.typ
+ Vu := under(V)
+ Tu := under(T)
+ Vp, _ := V.(*TypeParam)
+ Tp, _ := T.(*TypeParam)
+ if IdenticalIgnoreTags(Vu, Tu) && Vp == nil && Tp == nil {
+ return true
+ }
+
+ // "V and T are unnamed pointer types and their pointer base types
+ // have identical underlying types if tags are ignored
+ // and their pointer base types are not type parameters"
+ if V, ok := V.(*Pointer); ok {
+ if T, ok := T.(*Pointer); ok {
+ if IdenticalIgnoreTags(under(V.base), under(T.base)) && !isTypeParam(V.base) && !isTypeParam(T.base) {
+ return true
+ }
+ }
+ }
+
+ // "V and T are both integer or floating point types"
+ if isIntegerOrFloat(Vu) && isIntegerOrFloat(Tu) {
+ return true
+ }
+
+ // "V and T are both complex types"
+ if isComplex(Vu) && isComplex(Tu) {
+ return true
+ }
+
+ // "V is an integer or a slice of bytes or runes and T is a string type"
+ if (isInteger(Vu) || isBytesOrRunes(Vu)) && isString(Tu) {
+ return true
+ }
+
+ // "V is a string and T is a slice of bytes or runes"
+ if isString(Vu) && isBytesOrRunes(Tu) {
+ return true
+ }
+
+ // package unsafe:
+ // "any pointer or value of underlying type uintptr can be converted into a unsafe.Pointer"
+ if (isPointer(Vu) || isUintptr(Vu)) && isUnsafePointer(Tu) {
+ return true
+ }
+ // "and vice versa"
+ if isUnsafePointer(Vu) && (isPointer(Tu) || isUintptr(Tu)) {
+ return true
+ }
+
+ // "V is a slice, T is an array or pointer-to-array type,
+ // and the slice and array types have identical element types."
+ if s, _ := Vu.(*Slice); s != nil {
+ switch a := Tu.(type) {
+ case *Array:
+ if Identical(s.Elem(), a.Elem()) {
+ if check == nil || check.allowVersion(check.pkg, x, go1_20) {
+ return true
+ }
+ // check != nil
+ if cause != nil {
+ // TODO(gri) consider restructuring versionErrorf so we can use it here and below
+ *cause = "conversion of slices to arrays requires go1.20 or later"
+ }
+ return false
+ }
+ case *Pointer:
+ if a, _ := under(a.Elem()).(*Array); a != nil {
+ if Identical(s.Elem(), a.Elem()) {
+ if check == nil || check.allowVersion(check.pkg, x, go1_17) {
+ return true
+ }
+ // check != nil
+ if cause != nil {
+ *cause = "conversion of slices to array pointers requires go1.17 or later"
+ }
+ return false
+ }
+ }
+ }
+ }
+
+ // optimization: if we don't have type parameters, we're done
+ if Vp == nil && Tp == nil {
+ return false
+ }
+
+ errorf := func(format string, args ...interface{}) {
+ if check != nil && cause != nil {
+ msg := check.sprintf(format, args...)
+ if *cause != "" {
+ msg += "\n\t" + *cause
+ }
+ *cause = msg
+ }
+ }
+
+ // generic cases with specific type terms
+ // (generic operands cannot be constants, so we can ignore x.val)
+ switch {
+ case Vp != nil && Tp != nil:
+ x := *x // don't clobber outer x
+ return Vp.is(func(V *term) bool {
+ if V == nil {
+ return false // no specific types
+ }
+ x.typ = V.typ
+ return Tp.is(func(T *term) bool {
+ if T == nil {
+ return false // no specific types
+ }
+ if !x.convertibleTo(check, T.typ, cause) {
+ errorf("cannot convert %s (in %s) to type %s (in %s)", V.typ, Vp, T.typ, Tp)
+ return false
+ }
+ return true
+ })
+ })
+ case Vp != nil:
+ x := *x // don't clobber outer x
+ return Vp.is(func(V *term) bool {
+ if V == nil {
+ return false // no specific types
+ }
+ x.typ = V.typ
+ if !x.convertibleTo(check, T, cause) {
+ errorf("cannot convert %s (in %s) to type %s", V.typ, Vp, T)
+ return false
+ }
+ return true
+ })
+ case Tp != nil:
+ return Tp.is(func(T *term) bool {
+ if T == nil {
+ return false // no specific types
+ }
+ if !x.convertibleTo(check, T.typ, cause) {
+ errorf("cannot convert %s to type %s (in %s)", x.typ, T.typ, Tp)
+ return false
+ }
+ return true
+ })
+ }
+
+ return false
+}
+
+func isUintptr(typ Type) bool {
+ t, _ := under(typ).(*Basic)
+ return t != nil && t.kind == Uintptr
+}
+
+func isUnsafePointer(typ Type) bool {
+ t, _ := under(typ).(*Basic)
+ return t != nil && t.kind == UnsafePointer
+}
+
+func isPointer(typ Type) bool {
+ _, ok := under(typ).(*Pointer)
+ return ok
+}
+
+func isBytesOrRunes(typ Type) bool {
+ if s, _ := under(typ).(*Slice); s != nil {
+ t, _ := under(s.elem).(*Basic)
+ return t != nil && (t.kind == Byte || t.kind == Rune)
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go
new file mode 100644
index 0000000..f3e3418
--- /dev/null
+++ b/src/cmd/compile/internal/types2/decl.go
@@ -0,0 +1,910 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "go/constant"
+ . "internal/types/errors"
+)
+
+func (err *error_) recordAltDecl(obj Object) {
+ if pos := obj.Pos(); pos.IsKnown() {
+ // We use "other" rather than "previous" here because
+ // the first declaration seen may not be textually
+ // earlier in the source.
+ err.errorf(pos, "other declaration of %s", obj.Name())
+ }
+}
+
+func (check *Checker) declare(scope *Scope, id *syntax.Name, obj Object, pos syntax.Pos) {
+ // spec: "The blank identifier, represented by the underscore
+ // character _, may be used in a declaration like any other
+ // identifier but the declaration does not introduce a new
+ // binding."
+ if obj.Name() != "_" {
+ if alt := scope.Insert(obj); alt != nil {
+ var err error_
+ err.code = DuplicateDecl
+ err.errorf(obj, "%s redeclared in this block", obj.Name())
+ err.recordAltDecl(alt)
+ check.report(&err)
+ return
+ }
+ obj.setScopePos(pos)
+ }
+ if id != nil {
+ check.recordDef(id, obj)
+ }
+}
+
+// pathString returns a string of the form a->b-> ... ->g for a path [a, b, ... g].
+func pathString(path []Object) string {
+ var s string
+ for i, p := range path {
+ if i > 0 {
+ s += "->"
+ }
+ s += p.Name()
+ }
+ return s
+}
+
+// objDecl type-checks the declaration of obj in its respective (file) environment.
+// For the meaning of def, see Checker.definedType, in typexpr.go.
+func (check *Checker) objDecl(obj Object, def *TypeName) {
+ if check.conf.Trace && obj.Type() == nil {
+ if check.indent == 0 {
+ fmt.Println() // empty line between top-level objects for readability
+ }
+ check.trace(obj.Pos(), "-- checking %s (%s, objPath = %s)", obj, obj.color(), pathString(check.objPath))
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(obj.Pos(), "=> %s (%s)", obj, obj.color())
+ }()
+ }
+
+ // Checking the declaration of obj means inferring its type
+ // (and possibly its value, for constants).
+ // An object's type (and thus the object) may be in one of
+ // three states which are expressed by colors:
+ //
+ // - an object whose type is not yet known is painted white (initial color)
+ // - an object whose type is in the process of being inferred is painted grey
+ // - an object whose type is fully inferred is painted black
+ //
+ // During type inference, an object's color changes from white to grey
+ // to black (pre-declared objects are painted black from the start).
+ // A black object (i.e., its type) can only depend on (refer to) other black
+ // ones. White and grey objects may depend on white and black objects.
+ // A dependency on a grey object indicates a cycle which may or may not be
+ // valid.
+ //
+ // When objects turn grey, they are pushed on the object path (a stack);
+ // they are popped again when they turn black. Thus, if a grey object (a
+ // cycle) is encountered, it is on the object path, and all the objects
+ // it depends on are the remaining objects on that path. Color encoding
+ // is such that the color value of a grey object indicates the index of
+ // that object in the object path.
+
+ // During type-checking, white objects may be assigned a type without
+ // traversing through objDecl; e.g., when initializing constants and
+ // variables. Update the colors of those objects here (rather than
+ // everywhere where we set the type) to satisfy the color invariants.
+ if obj.color() == white && obj.Type() != nil {
+ obj.setColor(black)
+ return
+ }
+
+ switch obj.color() {
+ case white:
+ assert(obj.Type() == nil)
+ // All color values other than white and black are considered grey.
+ // Because black and white are < grey, all values >= grey are grey.
+ // Use those values to encode the object's index into the object path.
+ obj.setColor(grey + color(check.push(obj)))
+ defer func() {
+ check.pop().setColor(black)
+ }()
+
+ case black:
+ assert(obj.Type() != nil)
+ return
+
+ default:
+ // Color values other than white or black are considered grey.
+ fallthrough
+
+ case grey:
+ // We have a (possibly invalid) cycle.
+ // In the existing code, this is marked by a non-nil type
+ // for the object except for constants and variables whose
+ // type may be non-nil (known), or nil if it depends on the
+ // not-yet known initialization value.
+ // In the former case, set the type to Typ[Invalid] because
+ // we have an initialization cycle. The cycle error will be
+ // reported later, when determining initialization order.
+ // TODO(gri) Report cycle here and simplify initialization
+ // order code.
+ switch obj := obj.(type) {
+ case *Const:
+ if !check.validCycle(obj) || obj.typ == nil {
+ obj.typ = Typ[Invalid]
+ }
+
+ case *Var:
+ if !check.validCycle(obj) || obj.typ == nil {
+ obj.typ = Typ[Invalid]
+ }
+
+ case *TypeName:
+ if !check.validCycle(obj) {
+ // break cycle
+ // (without this, calling underlying()
+ // below may lead to an endless loop
+ // if we have a cycle for a defined
+ // (*Named) type)
+ obj.typ = Typ[Invalid]
+ }
+
+ case *Func:
+ if !check.validCycle(obj) {
+ // Don't set obj.typ to Typ[Invalid] here
+ // because plenty of code type-asserts that
+ // functions have a *Signature type. Grey
+ // functions have their type set to an empty
+ // signature which makes it impossible to
+ // initialize a variable with the function.
+ }
+
+ default:
+ unreachable()
+ }
+ assert(obj.Type() != nil)
+ return
+ }
+
+ d := check.objMap[obj]
+ if d == nil {
+ check.dump("%v: %s should have been declared", obj.Pos(), obj)
+ unreachable()
+ }
+
+ // save/restore current environment and set up object environment
+ defer func(env environment) {
+ check.environment = env
+ }(check.environment)
+ check.environment = environment{
+ scope: d.file,
+ }
+
+ // Const and var declarations must not have initialization
+ // cycles. We track them by remembering the current declaration
+ // in check.decl. Initialization expressions depending on other
+ // consts, vars, or functions, add dependencies to the current
+ // check.decl.
+ switch obj := obj.(type) {
+ case *Const:
+ check.decl = d // new package-level const decl
+ check.constDecl(obj, d.vtyp, d.init, d.inherited)
+ case *Var:
+ check.decl = d // new package-level var decl
+ check.varDecl(obj, d.lhs, d.vtyp, d.init)
+ case *TypeName:
+ // invalid recursive types are detected via path
+ check.typeDecl(obj, d.tdecl, def)
+ check.collectMethods(obj) // methods can only be added to top-level types
+ case *Func:
+ // functions may be recursive - no need to track dependencies
+ check.funcDecl(obj, d)
+ default:
+ unreachable()
+ }
+}
+
+// validCycle reports whether the cycle starting with obj is valid and
+// reports an error if it is not.
+func (check *Checker) validCycle(obj Object) (valid bool) {
+ // The object map contains the package scope objects and the non-interface methods.
+ if debug {
+ info := check.objMap[obj]
+ inObjMap := info != nil && (info.fdecl == nil || info.fdecl.Recv == nil) // exclude methods
+ isPkgObj := obj.Parent() == check.pkg.scope
+ if isPkgObj != inObjMap {
+ check.dump("%v: inconsistent object map for %s (isPkgObj = %v, inObjMap = %v)", obj.Pos(), obj, isPkgObj, inObjMap)
+ unreachable()
+ }
+ }
+
+ // Count cycle objects.
+ assert(obj.color() >= grey)
+ start := obj.color() - grey // index of obj in objPath
+ cycle := check.objPath[start:]
+ tparCycle := false // if set, the cycle is through a type parameter list
+ nval := 0 // number of (constant or variable) values in the cycle; valid if !generic
+ ndef := 0 // number of type definitions in the cycle; valid if !generic
+loop:
+ for _, obj := range cycle {
+ switch obj := obj.(type) {
+ case *Const, *Var:
+ nval++
+ case *TypeName:
+ // If we reach a generic type that is part of a cycle
+ // and we are in a type parameter list, we have a cycle
+ // through a type parameter list, which is invalid.
+ if check.inTParamList && isGeneric(obj.typ) {
+ tparCycle = true
+ break loop
+ }
+
+ // Determine if the type name is an alias or not. For
+ // package-level objects, use the object map which
+ // provides syntactic information (which doesn't rely
+ // on the order in which the objects are set up). For
+ // local objects, we can rely on the order, so use
+ // the object's predicate.
+ // TODO(gri) It would be less fragile to always access
+ // the syntactic information. We should consider storing
+ // this information explicitly in the object.
+ var alias bool
+ if check.enableAlias {
+ alias = obj.IsAlias()
+ } else {
+ if d := check.objMap[obj]; d != nil {
+ alias = d.tdecl.Alias // package-level object
+ } else {
+ alias = obj.IsAlias() // function local object
+ }
+ }
+ if !alias {
+ ndef++
+ }
+ case *Func:
+ // ignored for now
+ default:
+ unreachable()
+ }
+ }
+
+ if check.conf.Trace {
+ check.trace(obj.Pos(), "## cycle detected: objPath = %s->%s (len = %d)", pathString(cycle), obj.Name(), len(cycle))
+ if tparCycle {
+ check.trace(obj.Pos(), "## cycle contains: generic type in a type parameter list")
+ } else {
+ check.trace(obj.Pos(), "## cycle contains: %d values, %d type definitions", nval, ndef)
+ }
+ defer func() {
+ if valid {
+ check.trace(obj.Pos(), "=> cycle is valid")
+ } else {
+ check.trace(obj.Pos(), "=> error: cycle is invalid")
+ }
+ }()
+ }
+
+ if !tparCycle {
+ // A cycle involving only constants and variables is invalid but we
+ // ignore them here because they are reported via the initialization
+ // cycle check.
+ if nval == len(cycle) {
+ return true
+ }
+
+ // A cycle involving only types (and possibly functions) must have at least
+ // one type definition to be permitted: If there is no type definition, we
+ // have a sequence of alias type names which will expand ad infinitum.
+ if nval == 0 && ndef > 0 {
+ return true
+ }
+ }
+
+ check.cycleError(cycle)
+ return false
+}
+
+// cycleError reports a declaration cycle starting with
+// the object in cycle that is "first" in the source.
+func (check *Checker) cycleError(cycle []Object) {
+ // name returns the (possibly qualified) object name.
+ // This is needed because with generic types, cycles
+ // may refer to imported types. See go.dev/issue/50788.
+ // TODO(gri) This functionality is used elsewhere. Factor it out.
+ name := func(obj Object) string {
+ return packagePrefix(obj.Pkg(), check.qualifier) + obj.Name()
+ }
+
+ // TODO(gri) Should we start with the last (rather than the first) object in the cycle
+ // since that is the earliest point in the source where we start seeing the
+ // cycle? That would be more consistent with other error messages.
+ i := firstInSrc(cycle)
+ obj := cycle[i]
+ objName := name(obj)
+ // If obj is a type alias, mark it as valid (not broken) in order to avoid follow-on errors.
+ tname, _ := obj.(*TypeName)
+ if tname != nil && tname.IsAlias() {
+ // If we use Alias nodes, it is initialized with Typ[Invalid].
+ // TODO(gri) Adjust this code if we initialize with nil.
+ if !check.enableAlias {
+ check.validAlias(tname, Typ[Invalid])
+ }
+ }
+
+ // report a more concise error for self references
+ if len(cycle) == 1 {
+ if tname != nil {
+ check.errorf(obj, InvalidDeclCycle, "invalid recursive type: %s refers to itself", objName)
+ } else {
+ check.errorf(obj, InvalidDeclCycle, "invalid cycle in declaration: %s refers to itself", objName)
+ }
+ return
+ }
+
+ var err error_
+ err.code = InvalidDeclCycle
+ if tname != nil {
+ err.errorf(obj, "invalid recursive type %s", objName)
+ } else {
+ err.errorf(obj, "invalid cycle in declaration of %s", objName)
+ }
+ for range cycle {
+ err.errorf(obj, "%s refers to", objName)
+ i++
+ if i >= len(cycle) {
+ i = 0
+ }
+ obj = cycle[i]
+ objName = name(obj)
+ }
+ err.errorf(obj, "%s", objName)
+ check.report(&err)
+}
+
+// firstInSrc reports the index of the object with the "smallest"
+// source position in path. path must not be empty.
+func firstInSrc(path []Object) int {
+ fst, pos := 0, path[0].Pos()
+ for i, t := range path[1:] {
+ if cmpPos(t.Pos(), pos) < 0 {
+ fst, pos = i+1, t.Pos()
+ }
+ }
+ return fst
+}
+
+func (check *Checker) constDecl(obj *Const, typ, init syntax.Expr, inherited bool) {
+ assert(obj.typ == nil)
+
+ // use the correct value of iota and errpos
+ defer func(iota constant.Value, errpos syntax.Pos) {
+ check.iota = iota
+ check.errpos = errpos
+ }(check.iota, check.errpos)
+ check.iota = obj.val
+ check.errpos = nopos
+
+ // provide valid constant value under all circumstances
+ obj.val = constant.MakeUnknown()
+
+ // determine type, if any
+ if typ != nil {
+ t := check.typ(typ)
+ if !isConstType(t) {
+ // don't report an error if the type is an invalid C (defined) type
+ // (go.dev/issue/22090)
+ if isValid(under(t)) {
+ check.errorf(typ, InvalidConstType, "invalid constant type %s", t)
+ }
+ obj.typ = Typ[Invalid]
+ return
+ }
+ obj.typ = t
+ }
+
+ // check initialization
+ var x operand
+ if init != nil {
+ if inherited {
+ // The initialization expression is inherited from a previous
+ // constant declaration, and (error) positions refer to that
+ // expression and not the current constant declaration. Use
+ // the constant identifier position for any errors during
+ // init expression evaluation since that is all we have
+ // (see issues go.dev/issue/42991, go.dev/issue/42992).
+ check.errpos = obj.pos
+ }
+ check.expr(nil, &x, init)
+ }
+ check.initConst(obj, &x)
+}
+
+func (check *Checker) varDecl(obj *Var, lhs []*Var, typ, init syntax.Expr) {
+ assert(obj.typ == nil)
+
+ // determine type, if any
+ if typ != nil {
+ obj.typ = check.varType(typ)
+ // We cannot spread the type to all lhs variables if there
+ // are more than one since that would mark them as checked
+ // (see Checker.objDecl) and the assignment of init exprs,
+ // if any, would not be checked.
+ //
+ // TODO(gri) If we have no init expr, we should distribute
+ // a given type otherwise we need to re-evalate the type
+ // expr for each lhs variable, leading to duplicate work.
+ }
+
+ // check initialization
+ if init == nil {
+ if typ == nil {
+ // error reported before by arityMatch
+ obj.typ = Typ[Invalid]
+ }
+ return
+ }
+
+ if lhs == nil || len(lhs) == 1 {
+ assert(lhs == nil || lhs[0] == obj)
+ var x operand
+ check.expr(newTarget(obj.typ, obj.name), &x, init)
+ check.initVar(obj, &x, "variable declaration")
+ return
+ }
+
+ if debug {
+ // obj must be one of lhs
+ found := false
+ for _, lhs := range lhs {
+ if obj == lhs {
+ found = true
+ break
+ }
+ }
+ if !found {
+ panic("inconsistent lhs")
+ }
+ }
+
+ // We have multiple variables on the lhs and one init expr.
+ // Make sure all variables have been given the same type if
+ // one was specified, otherwise they assume the type of the
+ // init expression values (was go.dev/issue/15755).
+ if typ != nil {
+ for _, lhs := range lhs {
+ lhs.typ = obj.typ
+ }
+ }
+
+ check.initVars(lhs, []syntax.Expr{init}, nil)
+}
+
+// isImportedConstraint reports whether typ is an imported type constraint.
+func (check *Checker) isImportedConstraint(typ Type) bool {
+ named := asNamed(typ)
+ if named == nil || named.obj.pkg == check.pkg || named.obj.pkg == nil {
+ return false
+ }
+ u, _ := named.under().(*Interface)
+ return u != nil && !u.IsMethodSet()
+}
+
+func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeName) {
+ assert(obj.typ == nil)
+
+ var rhs Type
+ check.later(func() {
+ if t := asNamed(obj.typ); t != nil { // type may be invalid
+ check.validType(t)
+ }
+ // If typ is local, an error was already reported where typ is specified/defined.
+ _ = check.isImportedConstraint(rhs) && check.verifyVersionf(tdecl.Type, go1_18, "using type constraint %s", rhs)
+ }).describef(obj, "validType(%s)", obj.Name())
+
+ aliasDecl := tdecl.Alias
+ if aliasDecl && tdecl.TParamList != nil {
+ // The parser will ensure this but we may still get an invalid AST.
+ // Complain and continue as regular type definition.
+ check.error(tdecl, BadDecl, "generic type cannot be alias")
+ aliasDecl = false
+ }
+
+ // alias declaration
+ if aliasDecl {
+ check.verifyVersionf(tdecl, go1_9, "type aliases")
+ if check.enableAlias {
+ // TODO(gri) Should be able to use nil instead of Typ[Invalid] to mark
+ // the alias as incomplete. Currently this causes problems
+ // with certain cycles. Investigate.
+ alias := check.newAlias(obj, Typ[Invalid])
+ setDefType(def, alias)
+ rhs = check.definedType(tdecl.Type, obj)
+ assert(rhs != nil)
+ alias.fromRHS = rhs
+ Unalias(alias) // resolve alias.actual
+ } else {
+ check.brokenAlias(obj)
+ rhs = check.typ(tdecl.Type)
+ check.validAlias(obj, rhs)
+ }
+ return
+ }
+
+ // type definition or generic type declaration
+ named := check.newNamed(obj, nil, nil)
+ setDefType(def, named)
+
+ if tdecl.TParamList != nil {
+ check.openScope(tdecl, "type parameters")
+ defer check.closeScope()
+ check.collectTypeParams(&named.tparams, tdecl.TParamList)
+ }
+
+ // determine underlying type of named
+ rhs = check.definedType(tdecl.Type, obj)
+ assert(rhs != nil)
+ named.fromRHS = rhs
+
+ // If the underlying type was not set while type-checking the right-hand
+ // side, it is invalid and an error should have been reported elsewhere.
+ if named.underlying == nil {
+ named.underlying = Typ[Invalid]
+ }
+
+ // Disallow a lone type parameter as the RHS of a type declaration (go.dev/issue/45639).
+ // We don't need this restriction anymore if we make the underlying type of a type
+ // parameter its constraint interface: if the RHS is a lone type parameter, we will
+ // use its underlying type (like we do for any RHS in a type declaration), and its
+ // underlying type is an interface and the type declaration is well defined.
+ if isTypeParam(rhs) {
+ check.error(tdecl.Type, MisplacedTypeParam, "cannot use a type parameter as RHS in type declaration")
+ named.underlying = Typ[Invalid]
+ }
+}
+
+func (check *Checker) collectTypeParams(dst **TypeParamList, list []*syntax.Field) {
+ tparams := make([]*TypeParam, len(list))
+
+ // Declare type parameters up-front.
+ // The scope of type parameters starts at the beginning of the type parameter
+ // list (so we can have mutually recursive parameterized type bounds).
+ if len(list) > 0 {
+ scopePos := list[0].Pos()
+ for i, f := range list {
+ tparams[i] = check.declareTypeParam(f.Name, scopePos)
+ }
+ }
+
+ // Set the type parameters before collecting the type constraints because
+ // the parameterized type may be used by the constraints (go.dev/issue/47887).
+ // Example: type T[P T[P]] interface{}
+ *dst = bindTParams(tparams)
+
+ // Signal to cycle detection that we are in a type parameter list.
+ // We can only be inside one type parameter list at any given time:
+ // function closures may appear inside a type parameter list but they
+ // cannot be generic, and their bodies are processed in delayed and
+ // sequential fashion. Note that with each new declaration, we save
+ // the existing environment and restore it when done; thus inTParamList
+ // is true exactly only when we are in a specific type parameter list.
+ assert(!check.inTParamList)
+ check.inTParamList = true
+ defer func() {
+ check.inTParamList = false
+ }()
+
+ // Keep track of bounds for later validation.
+ var bound Type
+ for i, f := range list {
+ // Optimization: Re-use the previous type bound if it hasn't changed.
+ // This also preserves the grouped output of type parameter lists
+ // when printing type strings.
+ if i == 0 || f.Type != list[i-1].Type {
+ bound = check.bound(f.Type)
+ if isTypeParam(bound) {
+ // We may be able to allow this since it is now well-defined what
+ // the underlying type and thus type set of a type parameter is.
+ // But we may need some additional form of cycle detection within
+ // type parameter lists.
+ check.error(f.Type, MisplacedTypeParam, "cannot use a type parameter as constraint")
+ bound = Typ[Invalid]
+ }
+ }
+ tparams[i].bound = bound
+ }
+}
+
+func (check *Checker) bound(x syntax.Expr) Type {
+ // A type set literal of the form ~T and A|B may only appear as constraint;
+ // embed it in an implicit interface so that only interface type-checking
+ // needs to take care of such type expressions.
+ if op, _ := x.(*syntax.Operation); op != nil && (op.Op == syntax.Tilde || op.Op == syntax.Or) {
+ t := check.typ(&syntax.InterfaceType{MethodList: []*syntax.Field{{Type: x}}})
+ // mark t as implicit interface if all went well
+ if t, _ := t.(*Interface); t != nil {
+ t.implicit = true
+ }
+ return t
+ }
+ return check.typ(x)
+}
+
+func (check *Checker) declareTypeParam(name *syntax.Name, scopePos syntax.Pos) *TypeParam {
+ // Use Typ[Invalid] for the type constraint to ensure that a type
+ // is present even if the actual constraint has not been assigned
+ // yet.
+ // TODO(gri) Need to systematically review all uses of type parameter
+ // constraints to make sure we don't rely on them if they
+ // are not properly set yet.
+ tname := NewTypeName(name.Pos(), check.pkg, name.Value, nil)
+ tpar := check.newTypeParam(tname, Typ[Invalid]) // assigns type to tname as a side-effect
+ check.declare(check.scope, name, tname, scopePos)
+ return tpar
+}
+
+func (check *Checker) collectMethods(obj *TypeName) {
+ // get associated methods
+ // (Checker.collectObjects only collects methods with non-blank names;
+ // Checker.resolveBaseTypeName ensures that obj is not an alias name
+ // if it has attached methods.)
+ methods := check.methods[obj]
+ if methods == nil {
+ return
+ }
+ delete(check.methods, obj)
+ assert(!check.objMap[obj].tdecl.Alias) // don't use TypeName.IsAlias (requires fully set up object)
+
+ // use an objset to check for name conflicts
+ var mset objset
+
+ // spec: "If the base type is a struct type, the non-blank method
+ // and field names must be distinct."
+ base := asNamed(obj.typ) // shouldn't fail but be conservative
+ if base != nil {
+ assert(base.TypeArgs().Len() == 0) // collectMethods should not be called on an instantiated type
+
+ // See go.dev/issue/52529: we must delay the expansion of underlying here, as
+ // base may not be fully set-up.
+ check.later(func() {
+ check.checkFieldUniqueness(base)
+ }).describef(obj, "verifying field uniqueness for %v", base)
+
+ // Checker.Files may be called multiple times; additional package files
+ // may add methods to already type-checked types. Add pre-existing methods
+ // so that we can detect redeclarations.
+ for i := 0; i < base.NumMethods(); i++ {
+ m := base.Method(i)
+ assert(m.name != "_")
+ assert(mset.insert(m) == nil)
+ }
+ }
+
+ // add valid methods
+ for _, m := range methods {
+ // spec: "For a base type, the non-blank names of methods bound
+ // to it must be unique."
+ assert(m.name != "_")
+ if alt := mset.insert(m); alt != nil {
+ if alt.Pos().IsKnown() {
+ check.errorf(m.pos, DuplicateMethod, "method %s.%s already declared at %s", obj.Name(), m.name, alt.Pos())
+ } else {
+ check.errorf(m.pos, DuplicateMethod, "method %s.%s already declared", obj.Name(), m.name)
+ }
+ continue
+ }
+
+ if base != nil {
+ base.AddMethod(m)
+ }
+ }
+}
+
+func (check *Checker) checkFieldUniqueness(base *Named) {
+ if t, _ := base.under().(*Struct); t != nil {
+ var mset objset
+ for i := 0; i < base.NumMethods(); i++ {
+ m := base.Method(i)
+ assert(m.name != "_")
+ assert(mset.insert(m) == nil)
+ }
+
+ // Check that any non-blank field names of base are distinct from its
+ // method names.
+ for _, fld := range t.fields {
+ if fld.name != "_" {
+ if alt := mset.insert(fld); alt != nil {
+ // Struct fields should already be unique, so we should only
+ // encounter an alternate via collision with a method name.
+ _ = alt.(*Func)
+
+ // For historical consistency, we report the primary error on the
+ // method, and the alt decl on the field.
+ var err error_
+ err.code = DuplicateFieldAndMethod
+ err.errorf(alt, "field and method with the same name %s", fld.name)
+ err.recordAltDecl(fld)
+ check.report(&err)
+ }
+ }
+ }
+ }
+}
+
+func (check *Checker) funcDecl(obj *Func, decl *declInfo) {
+ assert(obj.typ == nil)
+
+ // func declarations cannot use iota
+ assert(check.iota == nil)
+
+ sig := new(Signature)
+ obj.typ = sig // guard against cycles
+
+ // Avoid cycle error when referring to method while type-checking the signature.
+ // This avoids a nuisance in the best case (non-parameterized receiver type) and
+ // since the method is not a type, we get an error. If we have a parameterized
+ // receiver type, instantiating the receiver type leads to the instantiation of
+ // its methods, and we don't want a cycle error in that case.
+ // TODO(gri) review if this is correct and/or whether we still need this?
+ saved := obj.color_
+ obj.color_ = black
+ fdecl := decl.fdecl
+ check.funcType(sig, fdecl.Recv, fdecl.TParamList, fdecl.Type)
+ obj.color_ = saved
+
+ // Set the scope's extent to the complete "func (...) { ... }"
+ // so that Scope.Innermost works correctly.
+ sig.scope.pos = fdecl.Pos()
+ sig.scope.end = syntax.EndPos(fdecl)
+
+ if len(fdecl.TParamList) > 0 && fdecl.Body == nil {
+ check.softErrorf(fdecl, BadDecl, "generic function is missing function body")
+ }
+
+ // function body must be type-checked after global declarations
+ // (functions implemented elsewhere have no body)
+ if !check.conf.IgnoreFuncBodies && fdecl.Body != nil {
+ check.later(func() {
+ check.funcBody(decl, obj.name, sig, fdecl.Body, nil)
+ }).describef(obj, "func %s", obj.name)
+ }
+}
+
+func (check *Checker) declStmt(list []syntax.Decl) {
+ pkg := check.pkg
+
+ first := -1 // index of first ConstDecl in the current group, or -1
+ var last *syntax.ConstDecl // last ConstDecl with init expressions, or nil
+ for index, decl := range list {
+ if _, ok := decl.(*syntax.ConstDecl); !ok {
+ first = -1 // we're not in a constant declaration
+ }
+
+ switch s := decl.(type) {
+ case *syntax.ConstDecl:
+ top := len(check.delayed)
+
+ // iota is the index of the current constDecl within the group
+ if first < 0 || s.Group == nil || list[index-1].(*syntax.ConstDecl).Group != s.Group {
+ first = index
+ last = nil
+ }
+ iota := constant.MakeInt64(int64(index - first))
+
+ // determine which initialization expressions to use
+ inherited := true
+ switch {
+ case s.Type != nil || s.Values != nil:
+ last = s
+ inherited = false
+ case last == nil:
+ last = new(syntax.ConstDecl) // make sure last exists
+ inherited = false
+ }
+
+ // declare all constants
+ lhs := make([]*Const, len(s.NameList))
+ values := syntax.UnpackListExpr(last.Values)
+ for i, name := range s.NameList {
+ obj := NewConst(name.Pos(), pkg, name.Value, nil, iota)
+ lhs[i] = obj
+
+ var init syntax.Expr
+ if i < len(values) {
+ init = values[i]
+ }
+
+ check.constDecl(obj, last.Type, init, inherited)
+ }
+
+ // Constants must always have init values.
+ check.arity(s.Pos(), s.NameList, values, true, inherited)
+
+ // process function literals in init expressions before scope changes
+ check.processDelayed(top)
+
+ // spec: "The scope of a constant or variable identifier declared
+ // inside a function begins at the end of the ConstSpec or VarSpec
+ // (ShortVarDecl for short variable declarations) and ends at the
+ // end of the innermost containing block."
+ scopePos := syntax.EndPos(s)
+ for i, name := range s.NameList {
+ check.declare(check.scope, name, lhs[i], scopePos)
+ }
+
+ case *syntax.VarDecl:
+ top := len(check.delayed)
+
+ lhs0 := make([]*Var, len(s.NameList))
+ for i, name := range s.NameList {
+ lhs0[i] = NewVar(name.Pos(), pkg, name.Value, nil)
+ }
+
+ // initialize all variables
+ values := syntax.UnpackListExpr(s.Values)
+ for i, obj := range lhs0 {
+ var lhs []*Var
+ var init syntax.Expr
+ switch len(values) {
+ case len(s.NameList):
+ // lhs and rhs match
+ init = values[i]
+ case 1:
+ // rhs is expected to be a multi-valued expression
+ lhs = lhs0
+ init = values[0]
+ default:
+ if i < len(values) {
+ init = values[i]
+ }
+ }
+ check.varDecl(obj, lhs, s.Type, init)
+ if len(values) == 1 {
+ // If we have a single lhs variable we are done either way.
+ // If we have a single rhs expression, it must be a multi-
+ // valued expression, in which case handling the first lhs
+ // variable will cause all lhs variables to have a type
+ // assigned, and we are done as well.
+ if debug {
+ for _, obj := range lhs0 {
+ assert(obj.typ != nil)
+ }
+ }
+ break
+ }
+ }
+
+ // If we have no type, we must have values.
+ if s.Type == nil || values != nil {
+ check.arity(s.Pos(), s.NameList, values, false, false)
+ }
+
+ // process function literals in init expressions before scope changes
+ check.processDelayed(top)
+
+ // declare all variables
+ // (only at this point are the variable scopes (parents) set)
+ scopePos := syntax.EndPos(s) // see constant declarations
+ for i, name := range s.NameList {
+ // see constant declarations
+ check.declare(check.scope, name, lhs0[i], scopePos)
+ }
+
+ case *syntax.TypeDecl:
+ obj := NewTypeName(s.Name.Pos(), pkg, s.Name.Value, nil)
+ // spec: "The scope of a type identifier declared inside a function
+ // begins at the identifier in the TypeSpec and ends at the end of
+ // the innermost containing block."
+ scopePos := s.Name.Pos()
+ check.declare(check.scope, s.Name, obj, scopePos)
+ // mark and unmark type before calling typeDecl; its type is still nil (see Checker.objDecl)
+ obj.setColor(grey + color(check.push(obj)))
+ check.typeDecl(obj, s, nil)
+ check.pop().setColor(black)
+
+ default:
+ check.errorf(s, InvalidSyntaxTree, "unknown syntax.Decl node %T", s)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/errorcalls_test.go b/src/cmd/compile/internal/types2/errorcalls_test.go
new file mode 100644
index 0000000..ba4dc87
--- /dev/null
+++ b/src/cmd/compile/internal/types2/errorcalls_test.go
@@ -0,0 +1,95 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "cmd/compile/internal/syntax"
+ "strconv"
+ "testing"
+)
+
+const (
+ errorfMinArgCount = 4
+ errorfFormatIndex = 2
+)
+
+// TestErrorCalls makes sure that check.errorf calls have at least
+// errorfMinArgCount arguments (otherwise we should use check.error)
+// and use balanced parentheses/brackets.
+func TestErrorCalls(t *testing.T) {
+ files, err := pkgFiles(".")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, file := range files {
+ syntax.Inspect(file, func(n syntax.Node) bool {
+ call, _ := n.(*syntax.CallExpr)
+ if call == nil {
+ return true
+ }
+ selx, _ := call.Fun.(*syntax.SelectorExpr)
+ if selx == nil {
+ return true
+ }
+ if !(isName(selx.X, "check") && isName(selx.Sel, "errorf")) {
+ return true
+ }
+ // check.errorf calls should have at least errorfMinArgCount arguments:
+ // position, code, format string, and arguments to format
+ if n := len(call.ArgList); n < errorfMinArgCount {
+ t.Errorf("%s: got %d arguments, want at least %d", call.Pos(), n, errorfMinArgCount)
+ return false
+ }
+ format := call.ArgList[errorfFormatIndex]
+ syntax.Inspect(format, func(n syntax.Node) bool {
+ if lit, _ := n.(*syntax.BasicLit); lit != nil && lit.Kind == syntax.StringLit {
+ if s, err := strconv.Unquote(lit.Value); err == nil {
+ if !balancedParentheses(s) {
+ t.Errorf("%s: unbalanced parentheses/brackets", lit.Pos())
+ }
+ }
+ return false
+ }
+ return true
+ })
+ return false
+ })
+ }
+}
+
+func isName(n syntax.Node, name string) bool {
+ if n, ok := n.(*syntax.Name); ok {
+ return n.Value == name
+ }
+ return false
+}
+
+func balancedParentheses(s string) bool {
+ var stack []byte
+ for _, ch := range s {
+ var open byte
+ switch ch {
+ case '(', '[', '{':
+ stack = append(stack, byte(ch))
+ continue
+ case ')':
+ open = '('
+ case ']':
+ open = '['
+ case '}':
+ open = '{'
+ default:
+ continue
+ }
+ // closing parenthesis/bracket must have matching opening
+ top := len(stack) - 1
+ if top < 0 || stack[top] != open {
+ return false
+ }
+ stack = stack[:top]
+ }
+ return len(stack) == 0
+}
diff --git a/src/cmd/compile/internal/types2/errors.go b/src/cmd/compile/internal/types2/errors.go
new file mode 100644
index 0000000..b8414b4
--- /dev/null
+++ b/src/cmd/compile/internal/types2/errors.go
@@ -0,0 +1,332 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements various error reporters.
+
+package types2
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "fmt"
+ . "internal/types/errors"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+func assert(p bool) {
+ if !p {
+ msg := "assertion failed"
+ // Include information about the assertion location. Due to panic recovery,
+ // this location is otherwise buried in the middle of the panicking stack.
+ if _, file, line, ok := runtime.Caller(1); ok {
+ msg = fmt.Sprintf("%s:%d: %s", file, line, msg)
+ }
+ panic(msg)
+ }
+}
+
+func unreachable() {
+ panic("unreachable")
+}
+
+// An error_ represents a type-checking error.
+// To report an error_, call Checker.report.
+type error_ struct {
+ desc []errorDesc
+ code Code
+ soft bool // TODO(gri) eventually determine this from an error code
+}
+
+// An errorDesc describes part of a type-checking error.
+type errorDesc struct {
+ pos syntax.Pos
+ format string
+ args []interface{}
+}
+
+func (err *error_) empty() bool {
+ return err.desc == nil
+}
+
+func (err *error_) pos() syntax.Pos {
+ if err.empty() {
+ return nopos
+ }
+ return err.desc[0].pos
+}
+
+func (err *error_) msg(qf Qualifier) string {
+ if err.empty() {
+ return "no error"
+ }
+ var buf strings.Builder
+ for i := range err.desc {
+ p := &err.desc[i]
+ if i > 0 {
+ fmt.Fprint(&buf, "\n\t")
+ if p.pos.IsKnown() {
+ fmt.Fprintf(&buf, "%s: ", p.pos)
+ }
+ }
+ buf.WriteString(sprintf(qf, false, p.format, p.args...))
+ }
+ return buf.String()
+}
+
+// String is for testing.
+func (err *error_) String() string {
+ if err.empty() {
+ return "no error"
+ }
+ return fmt.Sprintf("%s: %s", err.pos(), err.msg(nil))
+}
+
+// errorf adds formatted error information to err.
+// It may be called multiple times to provide additional information.
+func (err *error_) errorf(at poser, format string, args ...interface{}) {
+ err.desc = append(err.desc, errorDesc{atPos(at), format, args})
+}
+
+func sprintf(qf Qualifier, tpSubscripts bool, format string, args ...interface{}) string {
+ for i, arg := range args {
+ switch a := arg.(type) {
+ case nil:
+ arg = "<nil>"
+ case operand:
+ panic("got operand instead of *operand")
+ case *operand:
+ arg = operandString(a, qf)
+ case syntax.Pos:
+ arg = a.String()
+ case syntax.Expr:
+ arg = syntax.String(a)
+ case []syntax.Expr:
+ var buf strings.Builder
+ buf.WriteByte('[')
+ for i, x := range a {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(syntax.String(x))
+ }
+ buf.WriteByte(']')
+ arg = buf.String()
+ case Object:
+ arg = ObjectString(a, qf)
+ case Type:
+ var buf bytes.Buffer
+ w := newTypeWriter(&buf, qf)
+ w.tpSubscripts = tpSubscripts
+ w.typ(a)
+ arg = buf.String()
+ case []Type:
+ var buf bytes.Buffer
+ w := newTypeWriter(&buf, qf)
+ w.tpSubscripts = tpSubscripts
+ buf.WriteByte('[')
+ for i, x := range a {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ w.typ(x)
+ }
+ buf.WriteByte(']')
+ arg = buf.String()
+ case []*TypeParam:
+ var buf bytes.Buffer
+ w := newTypeWriter(&buf, qf)
+ w.tpSubscripts = tpSubscripts
+ buf.WriteByte('[')
+ for i, x := range a {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ w.typ(x)
+ }
+ buf.WriteByte(']')
+ arg = buf.String()
+ }
+ args[i] = arg
+ }
+ return fmt.Sprintf(format, args...)
+}
+
+func (check *Checker) qualifier(pkg *Package) string {
+ // Qualify the package unless it's the package being type-checked.
+ if pkg != check.pkg {
+ if check.pkgPathMap == nil {
+ check.pkgPathMap = make(map[string]map[string]bool)
+ check.seenPkgMap = make(map[*Package]bool)
+ check.markImports(check.pkg)
+ }
+ // If the same package name was used by multiple packages, display the full path.
+ if len(check.pkgPathMap[pkg.name]) > 1 {
+ return strconv.Quote(pkg.path)
+ }
+ return pkg.name
+ }
+ return ""
+}
+
+// markImports recursively walks pkg and its imports, to record unique import
+// paths in pkgPathMap.
+func (check *Checker) markImports(pkg *Package) {
+ if check.seenPkgMap[pkg] {
+ return
+ }
+ check.seenPkgMap[pkg] = true
+
+ forName, ok := check.pkgPathMap[pkg.name]
+ if !ok {
+ forName = make(map[string]bool)
+ check.pkgPathMap[pkg.name] = forName
+ }
+ forName[pkg.path] = true
+
+ for _, imp := range pkg.imports {
+ check.markImports(imp)
+ }
+}
+
+// check may be nil.
+func (check *Checker) sprintf(format string, args ...interface{}) string {
+ var qf Qualifier
+ if check != nil {
+ qf = check.qualifier
+ }
+ return sprintf(qf, false, format, args...)
+}
+
+func (check *Checker) report(err *error_) {
+ if err.empty() {
+ panic("no error to report")
+ }
+ check.err(err.pos(), err.code, err.msg(check.qualifier), err.soft)
+}
+
+func (check *Checker) trace(pos syntax.Pos, format string, args ...interface{}) {
+ fmt.Printf("%s:\t%s%s\n",
+ pos,
+ strings.Repeat(". ", check.indent),
+ sprintf(check.qualifier, true, format, args...),
+ )
+}
+
+// dump is only needed for debugging
+func (check *Checker) dump(format string, args ...interface{}) {
+ fmt.Println(sprintf(check.qualifier, true, format, args...))
+}
+
+func (check *Checker) err(at poser, code Code, msg string, soft bool) {
+ switch code {
+ case InvalidSyntaxTree:
+ msg = "invalid syntax tree: " + msg
+ case 0:
+ panic("no error code provided")
+ }
+
+ // Cheap trick: Don't report errors with messages containing
+ // "invalid operand" or "invalid type" as those tend to be
+ // follow-on errors which don't add useful information. Only
+ // exclude them if these strings are not at the beginning,
+ // and only if we have at least one error already reported.
+ if check.firstErr != nil && (strings.Index(msg, "invalid operand") > 0 || strings.Index(msg, "invalid type") > 0) {
+ return
+ }
+
+ pos := atPos(at)
+
+ // If we are encountering an error while evaluating an inherited
+ // constant initialization expression, pos is the position of in
+ // the original expression, and not of the currently declared
+ // constant identifier. Use the provided errpos instead.
+ // TODO(gri) We may also want to augment the error message and
+ // refer to the position (pos) in the original expression.
+ if check.errpos.IsKnown() {
+ assert(check.iota != nil)
+ pos = check.errpos
+ }
+
+ // If we have a URL for error codes, add a link to the first line.
+ if code != 0 && check.conf.ErrorURL != "" {
+ u := fmt.Sprintf(check.conf.ErrorURL, code)
+ if i := strings.Index(msg, "\n"); i >= 0 {
+ msg = msg[:i] + u + msg[i:]
+ } else {
+ msg += u
+ }
+ }
+
+ err := Error{pos, stripAnnotations(msg), msg, soft, code}
+ if check.firstErr == nil {
+ check.firstErr = err
+ }
+
+ if check.conf.Trace {
+ check.trace(pos, "ERROR: %s", msg)
+ }
+
+ f := check.conf.Error
+ if f == nil {
+ panic(bailout{}) // report only first error
+ }
+ f(err)
+}
+
+const (
+ invalidArg = "invalid argument: "
+ invalidOp = "invalid operation: "
+)
+
+type poser interface {
+ Pos() syntax.Pos
+}
+
+func (check *Checker) error(at poser, code Code, msg string) {
+ check.err(at, code, msg, false)
+}
+
+func (check *Checker) errorf(at poser, code Code, format string, args ...interface{}) {
+ check.err(at, code, check.sprintf(format, args...), false)
+}
+
+func (check *Checker) softErrorf(at poser, code Code, format string, args ...interface{}) {
+ check.err(at, code, check.sprintf(format, args...), true)
+}
+
+func (check *Checker) versionErrorf(at poser, v goVersion, format string, args ...interface{}) {
+ msg := check.sprintf(format, args...)
+ msg = fmt.Sprintf("%s requires %s or later", msg, v)
+ check.err(at, UnsupportedFeature, msg, true)
+}
+
+// atPos reports the left (= start) position of at.
+func atPos(at poser) syntax.Pos {
+ switch x := at.(type) {
+ case *operand:
+ if x.expr != nil {
+ return syntax.StartPos(x.expr)
+ }
+ case syntax.Node:
+ return syntax.StartPos(x)
+ }
+ return at.Pos()
+}
+
+// stripAnnotations removes internal (type) annotations from s.
+func stripAnnotations(s string) string {
+ var buf strings.Builder
+ for _, r := range s {
+ // strip #'s and subscript digits
+ if r < '₀' || '₀'+10 <= r { // '₀' == U+2080
+ buf.WriteRune(r)
+ }
+ }
+ if buf.Len() < len(s) {
+ return buf.String()
+ }
+ return s
+}
diff --git a/src/cmd/compile/internal/types2/errors_test.go b/src/cmd/compile/internal/types2/errors_test.go
new file mode 100644
index 0000000..ac73ca4
--- /dev/null
+++ b/src/cmd/compile/internal/types2/errors_test.go
@@ -0,0 +1,44 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "testing"
+
+func TestError(t *testing.T) {
+ var err error_
+ want := "no error"
+ if got := err.String(); got != want {
+ t.Errorf("empty error: got %q, want %q", got, want)
+ }
+
+ want = "<unknown position>: foo 42"
+ err.errorf(nopos, "foo %d", 42)
+ if got := err.String(); got != want {
+ t.Errorf("simple error: got %q, want %q", got, want)
+ }
+
+ want = "<unknown position>: foo 42\n\tbar 43"
+ err.errorf(nopos, "bar %d", 43)
+ if got := err.String(); got != want {
+ t.Errorf("simple error: got %q, want %q", got, want)
+ }
+}
+
+func TestStripAnnotations(t *testing.T) {
+ for _, test := range []struct {
+ in, want string
+ }{
+ {"", ""},
+ {" ", " "},
+ {"foo", "foo"},
+ {"foo₀", "foo"},
+ {"foo(T₀)", "foo(T)"},
+ } {
+ got := stripAnnotations(test.in)
+ if got != test.want {
+ t.Errorf("%q: got %q; want %q", test.in, got, test.want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/example_test.go b/src/cmd/compile/internal/types2/example_test.go
new file mode 100644
index 0000000..7031fdb
--- /dev/null
+++ b/src/cmd/compile/internal/types2/example_test.go
@@ -0,0 +1,252 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Only run where builders (build.golang.org) have
+// access to compiled packages for import.
+//
+//go:build !android && !ios && !js && !wasip1
+
+package types2_test
+
+// This file shows examples of basic usage of the go/types API.
+//
+// To locate a Go package, use (*go/build.Context).Import.
+// To load, parse, and type-check a complete Go program
+// from source, use golang.org/x/tools/go/loader.
+
+import (
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+ "fmt"
+ "log"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+// ExampleScope prints the tree of Scopes of a package created from a
+// set of parsed files.
+func ExampleScope() {
+ // Parse the source files for a package.
+ var files []*syntax.File
+ for _, src := range []string{
+ `package main
+import "fmt"
+func main() {
+ freezing := FToC(-18)
+ fmt.Println(freezing, Boiling) }
+`,
+ `package main
+import "fmt"
+type Celsius float64
+func (c Celsius) String() string { return fmt.Sprintf("%g°C", c) }
+func FToC(f float64) Celsius { return Celsius(f - 32 / 9 * 5) }
+const Boiling Celsius = 100
+func Unused() { {}; {{ var x int; _ = x }} } // make sure empty block scopes get printed
+`,
+ } {
+ files = append(files, mustParse(src))
+ }
+
+ // Type-check a package consisting of these files.
+ // Type information for the imported "fmt" package
+ // comes from $GOROOT/pkg/$GOOS_$GOOARCH/fmt.a.
+ conf := types2.Config{Importer: defaultImporter()}
+ pkg, err := conf.Check("temperature", files, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Print the tree of scopes.
+ // For determinism, we redact addresses.
+ var buf strings.Builder
+ pkg.Scope().WriteTo(&buf, 0, true)
+ rx := regexp.MustCompile(` 0x[a-fA-F0-9]*`)
+ fmt.Println(rx.ReplaceAllString(buf.String(), ""))
+
+ // Output:
+ // package "temperature" scope {
+ // . const temperature.Boiling temperature.Celsius
+ // . type temperature.Celsius float64
+ // . func temperature.FToC(f float64) temperature.Celsius
+ // . func temperature.Unused()
+ // . func temperature.main()
+ // . main scope {
+ // . . package fmt
+ // . . function scope {
+ // . . . var freezing temperature.Celsius
+ // . . }
+ // . }
+ // . main scope {
+ // . . package fmt
+ // . . function scope {
+ // . . . var c temperature.Celsius
+ // . . }
+ // . . function scope {
+ // . . . var f float64
+ // . . }
+ // . . function scope {
+ // . . . block scope {
+ // . . . }
+ // . . . block scope {
+ // . . . . block scope {
+ // . . . . . var x int
+ // . . . . }
+ // . . . }
+ // . . }
+ // . }
+ // }
+}
+
+// ExampleInfo prints various facts recorded by the type checker in a
+// types2.Info struct: definitions of and references to each named object,
+// and the type, value, and mode of every expression in the package.
+func ExampleInfo() {
+ // Parse a single source file.
+ const input = `
+package fib
+
+type S string
+
+var a, b, c = len(b), S(c), "hello"
+
+func fib(x int) int {
+ if x < 2 {
+ return x
+ }
+ return fib(x-1) - fib(x-2)
+}`
+ // Type-check the package.
+ // We create an empty map for each kind of input
+ // we're interested in, and Check populates them.
+ info := types2.Info{
+ Types: make(map[syntax.Expr]types2.TypeAndValue),
+ Defs: make(map[*syntax.Name]types2.Object),
+ Uses: make(map[*syntax.Name]types2.Object),
+ }
+ pkg := mustTypecheck(input, nil, &info)
+
+ // Print package-level variables in initialization order.
+ fmt.Printf("InitOrder: %v\n\n", info.InitOrder)
+
+ // For each named object, print the line and
+ // column of its definition and each of its uses.
+ fmt.Println("Defs and Uses of each named object:")
+ usesByObj := make(map[types2.Object][]string)
+ for id, obj := range info.Uses {
+ posn := id.Pos()
+ lineCol := fmt.Sprintf("%d:%d", posn.Line(), posn.Col())
+ usesByObj[obj] = append(usesByObj[obj], lineCol)
+ }
+ var items []string
+ for obj, uses := range usesByObj {
+ sort.Strings(uses)
+ item := fmt.Sprintf("%s:\n defined at %s\n used at %s",
+ types2.ObjectString(obj, types2.RelativeTo(pkg)),
+ obj.Pos(),
+ strings.Join(uses, ", "))
+ items = append(items, item)
+ }
+ sort.Strings(items) // sort by line:col, in effect
+ fmt.Println(strings.Join(items, "\n"))
+ fmt.Println()
+
+ // TODO(gri) Enable once positions are updated/verified
+ // fmt.Println("Types and Values of each expression:")
+ // items = nil
+ // for expr, tv := range info.Types {
+ // var buf strings.Builder
+ // posn := expr.Pos()
+ // tvstr := tv.Type.String()
+ // if tv.Value != nil {
+ // tvstr += " = " + tv.Value.String()
+ // }
+ // // line:col | expr | mode : type = value
+ // fmt.Fprintf(&buf, "%2d:%2d | %-19s | %-7s : %s",
+ // posn.Line(), posn.Col(), types2.ExprString(expr),
+ // mode(tv), tvstr)
+ // items = append(items, buf.String())
+ // }
+ // sort.Strings(items)
+ // fmt.Println(strings.Join(items, "\n"))
+
+ // Output:
+ // InitOrder: [c = "hello" b = S(c) a = len(b)]
+ //
+ // Defs and Uses of each named object:
+ // builtin len:
+ // defined at <unknown position>
+ // used at 6:15
+ // func fib(x int) int:
+ // defined at fib:8:6
+ // used at 12:20, 12:9
+ // type S string:
+ // defined at fib:4:6
+ // used at 6:23
+ // type int:
+ // defined at <unknown position>
+ // used at 8:12, 8:17
+ // type string:
+ // defined at <unknown position>
+ // used at 4:8
+ // var b S:
+ // defined at fib:6:8
+ // used at 6:19
+ // var c string:
+ // defined at fib:6:11
+ // used at 6:25
+ // var x int:
+ // defined at fib:8:10
+ // used at 10:10, 12:13, 12:24, 9:5
+}
+
+// TODO(gri) Enable once positions are updated/verified
+// Types and Values of each expression:
+// 4: 8 | string | type : string
+// 6:15 | len | builtin : func(string) int
+// 6:15 | len(b) | value : int
+// 6:19 | b | var : fib.S
+// 6:23 | S | type : fib.S
+// 6:23 | S(c) | value : fib.S
+// 6:25 | c | var : string
+// 6:29 | "hello" | value : string = "hello"
+// 8:12 | int | type : int
+// 8:17 | int | type : int
+// 9: 5 | x | var : int
+// 9: 5 | x < 2 | value : untyped bool
+// 9: 9 | 2 | value : int = 2
+// 10:10 | x | var : int
+// 12: 9 | fib | value : func(x int) int
+// 12: 9 | fib(x - 1) | value : int
+// 12: 9 | fib(x - 1) - fib(x - 2) | value : int
+// 12:13 | x | var : int
+// 12:13 | x - 1 | value : int
+// 12:15 | 1 | value : int = 1
+// 12:20 | fib | value : func(x int) int
+// 12:20 | fib(x - 2) | value : int
+// 12:24 | x | var : int
+// 12:24 | x - 2 | value : int
+// 12:26 | 2 | value : int = 2
+
+func mode(tv types2.TypeAndValue) string {
+ switch {
+ case tv.IsVoid():
+ return "void"
+ case tv.IsType():
+ return "type"
+ case tv.IsBuiltin():
+ return "builtin"
+ case tv.IsNil():
+ return "nil"
+ case tv.Assignable():
+ if tv.Addressable() {
+ return "var"
+ }
+ return "mapindex"
+ case tv.IsValue():
+ return "value"
+ default:
+ return "unknown"
+ }
+}
diff --git a/src/cmd/compile/internal/types2/expr.go b/src/cmd/compile/internal/types2/expr.go
new file mode 100644
index 0000000..124d970
--- /dev/null
+++ b/src/cmd/compile/internal/types2/expr.go
@@ -0,0 +1,1699 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of expressions.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "go/constant"
+ "go/token"
+ . "internal/types/errors"
+)
+
+/*
+Basic algorithm:
+
+Expressions are checked recursively, top down. Expression checker functions
+are generally of the form:
+
+ func f(x *operand, e *syntax.Expr, ...)
+
+where e is the expression to be checked, and x is the result of the check.
+The check performed by f may fail in which case x.mode == invalid, and
+related error messages will have been issued by f.
+
+If a hint argument is present, it is the composite literal element type
+of an outer composite literal; it is used to type-check composite literal
+elements that have no explicit type specification in the source
+(e.g.: []T{{...}, {...}}, the hint is the type T in this case).
+
+All expressions are checked via rawExpr, which dispatches according
+to expression kind. Upon returning, rawExpr is recording the types and
+constant values for all expressions that have an untyped type (those types
+may change on the way up in the expression tree). Usually these are constants,
+but the results of comparisons or non-constant shifts of untyped constants
+may also be untyped, but not constant.
+
+Untyped expressions may eventually become fully typed (i.e., not untyped),
+typically when the value is assigned to a variable, or is used otherwise.
+The updateExprType method is used to record this final type and update
+the recorded types: the type-checked expression tree is again traversed down,
+and the new type is propagated as needed. Untyped constant expression values
+that become fully typed must now be representable by the full type (constant
+sub-expression trees are left alone except for their roots). This mechanism
+ensures that a client sees the actual (run-time) type an untyped value would
+have. It also permits type-checking of lhs shift operands "as if the shift
+were not present": when updateExprType visits an untyped lhs shift operand
+and assigns it it's final type, that type must be an integer type, and a
+constant lhs must be representable as an integer.
+
+When an expression gets its final type, either on the way out from rawExpr,
+on the way down in updateExprType, or at the end of the type checker run,
+the type (and constant value, if any) is recorded via Info.Types, if present.
+*/
+
+type opPredicates map[syntax.Operator]func(Type) bool
+
+var unaryOpPredicates opPredicates
+
+func init() {
+ // Setting unaryOpPredicates in init avoids declaration cycles.
+ unaryOpPredicates = opPredicates{
+ syntax.Add: allNumeric,
+ syntax.Sub: allNumeric,
+ syntax.Xor: allInteger,
+ syntax.Not: allBoolean,
+ }
+}
+
+func (check *Checker) op(m opPredicates, x *operand, op syntax.Operator) bool {
+ if pred := m[op]; pred != nil {
+ if !pred(x.typ) {
+ check.errorf(x, UndefinedOp, invalidOp+"operator %s not defined on %s", op, x)
+ return false
+ }
+ } else {
+ check.errorf(x, InvalidSyntaxTree, "unknown operator %s", op)
+ return false
+ }
+ return true
+}
+
+// opPos returns the position of the operator if x is an operation;
+// otherwise it returns the start position of x.
+func opPos(x syntax.Expr) syntax.Pos {
+ switch op := x.(type) {
+ case nil:
+ return nopos // don't crash
+ case *syntax.Operation:
+ return op.Pos()
+ default:
+ return syntax.StartPos(x)
+ }
+}
+
+// opName returns the name of the operation if x is an operation
+// that might overflow; otherwise it returns the empty string.
+func opName(x syntax.Expr) string {
+ if e, _ := x.(*syntax.Operation); e != nil {
+ op := int(e.Op)
+ if e.Y == nil {
+ if op < len(op2str1) {
+ return op2str1[op]
+ }
+ } else {
+ if op < len(op2str2) {
+ return op2str2[op]
+ }
+ }
+ }
+ return ""
+}
+
+var op2str1 = [...]string{
+ syntax.Xor: "bitwise complement",
+}
+
+// This is only used for operations that may cause overflow.
+var op2str2 = [...]string{
+ syntax.Add: "addition",
+ syntax.Sub: "subtraction",
+ syntax.Xor: "bitwise XOR",
+ syntax.Mul: "multiplication",
+ syntax.Shl: "shift",
+}
+
+// If typ is a type parameter, underIs returns the result of typ.underIs(f).
+// Otherwise, underIs returns the result of f(under(typ)).
+func underIs(typ Type, f func(Type) bool) bool {
+ if tpar, _ := typ.(*TypeParam); tpar != nil {
+ return tpar.underIs(f)
+ }
+ return f(under(typ))
+}
+
+func (check *Checker) unary(x *operand, e *syntax.Operation) {
+ check.expr(nil, x, e.X)
+ if x.mode == invalid {
+ return
+ }
+
+ op := e.Op
+ switch op {
+ case syntax.And:
+ // spec: "As an exception to the addressability
+ // requirement x may also be a composite literal."
+ if _, ok := syntax.Unparen(e.X).(*syntax.CompositeLit); !ok && x.mode != variable {
+ check.errorf(x, UnaddressableOperand, invalidOp+"cannot take address of %s", x)
+ x.mode = invalid
+ return
+ }
+ x.mode = value
+ x.typ = &Pointer{base: x.typ}
+ return
+
+ case syntax.Recv:
+ u := coreType(x.typ)
+ if u == nil {
+ check.errorf(x, InvalidReceive, invalidOp+"cannot receive from %s (no core type)", x)
+ x.mode = invalid
+ return
+ }
+ ch, _ := u.(*Chan)
+ if ch == nil {
+ check.errorf(x, InvalidReceive, invalidOp+"cannot receive from non-channel %s", x)
+ x.mode = invalid
+ return
+ }
+ if ch.dir == SendOnly {
+ check.errorf(x, InvalidReceive, invalidOp+"cannot receive from send-only channel %s", x)
+ x.mode = invalid
+ return
+ }
+ x.mode = commaok
+ x.typ = ch.elem
+ check.hasCallOrRecv = true
+ return
+
+ case syntax.Tilde:
+ // Provide a better error position and message than what check.op below would do.
+ if !allInteger(x.typ) {
+ check.error(e, UndefinedOp, "cannot use ~ outside of interface or type constraint")
+ x.mode = invalid
+ return
+ }
+ check.error(e, UndefinedOp, "cannot use ~ outside of interface or type constraint (use ^ for bitwise complement)")
+ op = syntax.Xor
+ }
+
+ if !check.op(unaryOpPredicates, x, op) {
+ x.mode = invalid
+ return
+ }
+
+ if x.mode == constant_ {
+ if x.val.Kind() == constant.Unknown {
+ // nothing to do (and don't cause an error below in the overflow check)
+ return
+ }
+ var prec uint
+ if isUnsigned(x.typ) {
+ prec = uint(check.conf.sizeof(x.typ) * 8)
+ }
+ x.val = constant.UnaryOp(op2tok[op], x.val, prec)
+ x.expr = e
+ check.overflow(x, opPos(x.expr))
+ return
+ }
+
+ x.mode = value
+ // x.typ remains unchanged
+}
+
+func isShift(op syntax.Operator) bool {
+ return op == syntax.Shl || op == syntax.Shr
+}
+
+func isComparison(op syntax.Operator) bool {
+ // Note: tokens are not ordered well to make this much easier
+ switch op {
+ case syntax.Eql, syntax.Neq, syntax.Lss, syntax.Leq, syntax.Gtr, syntax.Geq:
+ return true
+ }
+ return false
+}
+
+// updateExprType updates the type of x to typ and invokes itself
+// recursively for the operands of x, depending on expression kind.
+// If typ is still an untyped and not the final type, updateExprType
+// only updates the recorded untyped type for x and possibly its
+// operands. Otherwise (i.e., typ is not an untyped type anymore,
+// or it is the final type for x), the type and value are recorded.
+// Also, if x is a constant, it must be representable as a value of typ,
+// and if x is the (formerly untyped) lhs operand of a non-constant
+// shift, it must be an integer value.
+func (check *Checker) updateExprType(x syntax.Expr, typ Type, final bool) {
+ check.updateExprType0(nil, x, typ, final)
+}
+
+func (check *Checker) updateExprType0(parent, x syntax.Expr, typ Type, final bool) {
+ old, found := check.untyped[x]
+ if !found {
+ return // nothing to do
+ }
+
+ // update operands of x if necessary
+ switch x := x.(type) {
+ case *syntax.BadExpr,
+ *syntax.FuncLit,
+ *syntax.CompositeLit,
+ *syntax.IndexExpr,
+ *syntax.SliceExpr,
+ *syntax.AssertExpr,
+ *syntax.ListExpr,
+ //*syntax.StarExpr,
+ *syntax.KeyValueExpr,
+ *syntax.ArrayType,
+ *syntax.StructType,
+ *syntax.FuncType,
+ *syntax.InterfaceType,
+ *syntax.MapType,
+ *syntax.ChanType:
+ // These expression are never untyped - nothing to do.
+ // The respective sub-expressions got their final types
+ // upon assignment or use.
+ if debug {
+ check.dump("%v: found old type(%s): %s (new: %s)", atPos(x), x, old.typ, typ)
+ unreachable()
+ }
+ return
+
+ case *syntax.CallExpr:
+ // Resulting in an untyped constant (e.g., built-in complex).
+ // The respective calls take care of calling updateExprType
+ // for the arguments if necessary.
+
+ case *syntax.Name, *syntax.BasicLit, *syntax.SelectorExpr:
+ // An identifier denoting a constant, a constant literal,
+ // or a qualified identifier (imported untyped constant).
+ // No operands to take care of.
+
+ case *syntax.ParenExpr:
+ check.updateExprType0(x, x.X, typ, final)
+
+ // case *syntax.UnaryExpr:
+ // // If x is a constant, the operands were constants.
+ // // The operands don't need to be updated since they
+ // // never get "materialized" into a typed value. If
+ // // left in the untyped map, they will be processed
+ // // at the end of the type check.
+ // if old.val != nil {
+ // break
+ // }
+ // check.updateExprType0(x, x.X, typ, final)
+
+ case *syntax.Operation:
+ if x.Y == nil {
+ // unary expression
+ if x.Op == syntax.Mul {
+ // see commented out code for StarExpr above
+ // TODO(gri) needs cleanup
+ if debug {
+ panic("unimplemented")
+ }
+ return
+ }
+ // If x is a constant, the operands were constants.
+ // The operands don't need to be updated since they
+ // never get "materialized" into a typed value. If
+ // left in the untyped map, they will be processed
+ // at the end of the type check.
+ if old.val != nil {
+ break
+ }
+ check.updateExprType0(x, x.X, typ, final)
+ break
+ }
+
+ // binary expression
+ if old.val != nil {
+ break // see comment for unary expressions
+ }
+ if isComparison(x.Op) {
+ // The result type is independent of operand types
+ // and the operand types must have final types.
+ } else if isShift(x.Op) {
+ // The result type depends only on lhs operand.
+ // The rhs type was updated when checking the shift.
+ check.updateExprType0(x, x.X, typ, final)
+ } else {
+ // The operand types match the result type.
+ check.updateExprType0(x, x.X, typ, final)
+ check.updateExprType0(x, x.Y, typ, final)
+ }
+
+ default:
+ unreachable()
+ }
+
+ // If the new type is not final and still untyped, just
+ // update the recorded type.
+ if !final && isUntyped(typ) {
+ old.typ = under(typ).(*Basic)
+ check.untyped[x] = old
+ return
+ }
+
+ // Otherwise we have the final (typed or untyped type).
+ // Remove it from the map of yet untyped expressions.
+ delete(check.untyped, x)
+
+ if old.isLhs {
+ // If x is the lhs of a shift, its final type must be integer.
+ // We already know from the shift check that it is representable
+ // as an integer if it is a constant.
+ if !allInteger(typ) {
+ check.errorf(x, InvalidShiftOperand, invalidOp+"shifted operand %s (type %s) must be integer", x, typ)
+ return
+ }
+ // Even if we have an integer, if the value is a constant we
+ // still must check that it is representable as the specific
+ // int type requested (was go.dev/issue/22969). Fall through here.
+ }
+ if old.val != nil {
+ // If x is a constant, it must be representable as a value of typ.
+ c := operand{old.mode, x, old.typ, old.val, 0}
+ check.convertUntyped(&c, typ)
+ if c.mode == invalid {
+ return
+ }
+ }
+
+ // Everything's fine, record final type and value for x.
+ check.recordTypeAndValue(x, old.mode, typ, old.val)
+}
+
+// updateExprVal updates the value of x to val.
+func (check *Checker) updateExprVal(x syntax.Expr, val constant.Value) {
+ if info, ok := check.untyped[x]; ok {
+ info.val = val
+ check.untyped[x] = info
+ }
+}
+
+// implicitTypeAndValue returns the implicit type of x when used in a context
+// where the target type is expected. If no such implicit conversion is
+// possible, it returns a nil Type and non-zero error code.
+//
+// If x is a constant operand, the returned constant.Value will be the
+// representation of x in this context.
+func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, constant.Value, Code) {
+ if x.mode == invalid || isTyped(x.typ) || !isValid(target) {
+ return x.typ, nil, 0
+ }
+ // x is untyped
+
+ if isUntyped(target) {
+ // both x and target are untyped
+ if m := maxType(x.typ, target); m != nil {
+ return m, nil, 0
+ }
+ return nil, nil, InvalidUntypedConversion
+ }
+
+ if x.isNil() {
+ assert(isUntyped(x.typ))
+ if hasNil(target) {
+ return target, nil, 0
+ }
+ return nil, nil, InvalidUntypedConversion
+ }
+
+ switch u := under(target).(type) {
+ case *Basic:
+ if x.mode == constant_ {
+ v, code := check.representation(x, u)
+ if code != 0 {
+ return nil, nil, code
+ }
+ return target, v, code
+ }
+ // Non-constant untyped values may appear as the
+ // result of comparisons (untyped bool), intermediate
+ // (delayed-checked) rhs operands of shifts, and as
+ // the value nil.
+ switch x.typ.(*Basic).kind {
+ case UntypedBool:
+ if !isBoolean(target) {
+ return nil, nil, InvalidUntypedConversion
+ }
+ case UntypedInt, UntypedRune, UntypedFloat, UntypedComplex:
+ if !isNumeric(target) {
+ return nil, nil, InvalidUntypedConversion
+ }
+ case UntypedString:
+ // Non-constant untyped string values are not permitted by the spec and
+ // should not occur during normal typechecking passes, but this path is
+ // reachable via the AssignableTo API.
+ if !isString(target) {
+ return nil, nil, InvalidUntypedConversion
+ }
+ default:
+ return nil, nil, InvalidUntypedConversion
+ }
+ case *Interface:
+ if isTypeParam(target) {
+ if !u.typeSet().underIs(func(u Type) bool {
+ if u == nil {
+ return false
+ }
+ t, _, _ := check.implicitTypeAndValue(x, u)
+ return t != nil
+ }) {
+ return nil, nil, InvalidUntypedConversion
+ }
+ break
+ }
+ // Update operand types to the default type rather than the target
+ // (interface) type: values must have concrete dynamic types.
+ // Untyped nil was handled upfront.
+ if !u.Empty() {
+ return nil, nil, InvalidUntypedConversion // cannot assign untyped values to non-empty interfaces
+ }
+ return Default(x.typ), nil, 0 // default type for nil is nil
+ default:
+ return nil, nil, InvalidUntypedConversion
+ }
+ return target, nil, 0
+}
+
+// If switchCase is true, the operator op is ignored.
+func (check *Checker) comparison(x, y *operand, op syntax.Operator, switchCase bool) {
+ // Avoid spurious errors if any of the operands has an invalid type (go.dev/issue/54405).
+ if !isValid(x.typ) || !isValid(y.typ) {
+ x.mode = invalid
+ return
+ }
+
+ if switchCase {
+ op = syntax.Eql
+ }
+
+ errOp := x // operand for which error is reported, if any
+ cause := "" // specific error cause, if any
+
+ // spec: "In any comparison, the first operand must be assignable
+ // to the type of the second operand, or vice versa."
+ code := MismatchedTypes
+ ok, _ := x.assignableTo(check, y.typ, nil)
+ if !ok {
+ ok, _ = y.assignableTo(check, x.typ, nil)
+ }
+ if !ok {
+ // Report the error on the 2nd operand since we only
+ // know after seeing the 2nd operand whether we have
+ // a type mismatch.
+ errOp = y
+ cause = check.sprintf("mismatched types %s and %s", x.typ, y.typ)
+ goto Error
+ }
+
+ // check if comparison is defined for operands
+ code = UndefinedOp
+ switch op {
+ case syntax.Eql, syntax.Neq:
+ // spec: "The equality operators == and != apply to operands that are comparable."
+ switch {
+ case x.isNil() || y.isNil():
+ // Comparison against nil requires that the other operand type has nil.
+ typ := x.typ
+ if x.isNil() {
+ typ = y.typ
+ }
+ if !hasNil(typ) {
+ // This case should only be possible for "nil == nil".
+ // Report the error on the 2nd operand since we only
+ // know after seeing the 2nd operand whether we have
+ // an invalid comparison.
+ errOp = y
+ goto Error
+ }
+
+ case !Comparable(x.typ):
+ errOp = x
+ cause = check.incomparableCause(x.typ)
+ goto Error
+
+ case !Comparable(y.typ):
+ errOp = y
+ cause = check.incomparableCause(y.typ)
+ goto Error
+ }
+
+ case syntax.Lss, syntax.Leq, syntax.Gtr, syntax.Geq:
+ // spec: The ordering operators <, <=, >, and >= apply to operands that are ordered."
+ switch {
+ case !allOrdered(x.typ):
+ errOp = x
+ goto Error
+ case !allOrdered(y.typ):
+ errOp = y
+ goto Error
+ }
+
+ default:
+ unreachable()
+ }
+
+ // comparison is ok
+ if x.mode == constant_ && y.mode == constant_ {
+ x.val = constant.MakeBool(constant.Compare(x.val, op2tok[op], y.val))
+ // The operands are never materialized; no need to update
+ // their types.
+ } else {
+ x.mode = value
+ // The operands have now their final types, which at run-
+ // time will be materialized. Update the expression trees.
+ // If the current types are untyped, the materialized type
+ // is the respective default type.
+ check.updateExprType(x.expr, Default(x.typ), true)
+ check.updateExprType(y.expr, Default(y.typ), true)
+ }
+
+ // spec: "Comparison operators compare two operands and yield
+ // an untyped boolean value."
+ x.typ = Typ[UntypedBool]
+ return
+
+Error:
+ // We have an offending operand errOp and possibly an error cause.
+ if cause == "" {
+ if isTypeParam(x.typ) || isTypeParam(y.typ) {
+ // TODO(gri) should report the specific type causing the problem, if any
+ if !isTypeParam(x.typ) {
+ errOp = y
+ }
+ cause = check.sprintf("type parameter %s is not comparable with %s", errOp.typ, op)
+ } else {
+ cause = check.sprintf("operator %s not defined on %s", op, check.kindString(errOp.typ)) // catch-all
+ }
+ }
+ if switchCase {
+ check.errorf(x, code, "invalid case %s in switch on %s (%s)", x.expr, y.expr, cause) // error position always at 1st operand
+ } else {
+ check.errorf(errOp, code, invalidOp+"%s %s %s (%s)", x.expr, op, y.expr, cause)
+ }
+ x.mode = invalid
+}
+
+// incomparableCause returns a more specific cause why typ is not comparable.
+// If there is no more specific cause, the result is "".
+func (check *Checker) incomparableCause(typ Type) string {
+ switch under(typ).(type) {
+ case *Slice, *Signature, *Map:
+ return check.kindString(typ) + " can only be compared to nil"
+ }
+ // see if we can extract a more specific error
+ var cause string
+ comparable(typ, true, nil, func(format string, args ...interface{}) {
+ cause = check.sprintf(format, args...)
+ })
+ return cause
+}
+
+// kindString returns the type kind as a string.
+func (check *Checker) kindString(typ Type) string {
+ switch under(typ).(type) {
+ case *Array:
+ return "array"
+ case *Slice:
+ return "slice"
+ case *Struct:
+ return "struct"
+ case *Pointer:
+ return "pointer"
+ case *Signature:
+ return "func"
+ case *Interface:
+ if isTypeParam(typ) {
+ return check.sprintf("type parameter %s", typ)
+ }
+ return "interface"
+ case *Map:
+ return "map"
+ case *Chan:
+ return "chan"
+ default:
+ return check.sprintf("%s", typ) // catch-all
+ }
+}
+
+// If e != nil, it must be the shift expression; it may be nil for non-constant shifts.
+func (check *Checker) shift(x, y *operand, e syntax.Expr, op syntax.Operator) {
+ // TODO(gri) This function seems overly complex. Revisit.
+
+ var xval constant.Value
+ if x.mode == constant_ {
+ xval = constant.ToInt(x.val)
+ }
+
+ if allInteger(x.typ) || isUntyped(x.typ) && xval != nil && xval.Kind() == constant.Int {
+ // The lhs is of integer type or an untyped constant representable
+ // as an integer. Nothing to do.
+ } else {
+ // shift has no chance
+ check.errorf(x, InvalidShiftOperand, invalidOp+"shifted operand %s must be integer", x)
+ x.mode = invalid
+ return
+ }
+
+ // spec: "The right operand in a shift expression must have integer type
+ // or be an untyped constant representable by a value of type uint."
+
+ // Check that constants are representable by uint, but do not convert them
+ // (see also go.dev/issue/47243).
+ var yval constant.Value
+ if y.mode == constant_ {
+ // Provide a good error message for negative shift counts.
+ yval = constant.ToInt(y.val) // consider -1, 1.0, but not -1.1
+ if yval.Kind() == constant.Int && constant.Sign(yval) < 0 {
+ check.errorf(y, InvalidShiftCount, invalidOp+"negative shift count %s", y)
+ x.mode = invalid
+ return
+ }
+
+ if isUntyped(y.typ) {
+ // Caution: Check for representability here, rather than in the switch
+ // below, because isInteger includes untyped integers (was bug go.dev/issue/43697).
+ check.representable(y, Typ[Uint])
+ if y.mode == invalid {
+ x.mode = invalid
+ return
+ }
+ }
+ } else {
+ // Check that RHS is otherwise at least of integer type.
+ switch {
+ case allInteger(y.typ):
+ if !allUnsigned(y.typ) && !check.verifyVersionf(y, go1_13, invalidOp+"signed shift count %s", y) {
+ x.mode = invalid
+ return
+ }
+ case isUntyped(y.typ):
+ // This is incorrect, but preserves pre-existing behavior.
+ // See also go.dev/issue/47410.
+ check.convertUntyped(y, Typ[Uint])
+ if y.mode == invalid {
+ x.mode = invalid
+ return
+ }
+ default:
+ check.errorf(y, InvalidShiftCount, invalidOp+"shift count %s must be integer", y)
+ x.mode = invalid
+ return
+ }
+ }
+
+ if x.mode == constant_ {
+ if y.mode == constant_ {
+ // if either x or y has an unknown value, the result is unknown
+ if x.val.Kind() == constant.Unknown || y.val.Kind() == constant.Unknown {
+ x.val = constant.MakeUnknown()
+ // ensure the correct type - see comment below
+ if !isInteger(x.typ) {
+ x.typ = Typ[UntypedInt]
+ }
+ return
+ }
+ // rhs must be within reasonable bounds in constant shifts
+ const shiftBound = 1023 - 1 + 52 // so we can express smallestFloat64 (see go.dev/issue/44057)
+ s, ok := constant.Uint64Val(yval)
+ if !ok || s > shiftBound {
+ check.errorf(y, InvalidShiftCount, invalidOp+"invalid shift count %s", y)
+ x.mode = invalid
+ return
+ }
+ // The lhs is representable as an integer but may not be an integer
+ // (e.g., 2.0, an untyped float) - this can only happen for untyped
+ // non-integer numeric constants. Correct the type so that the shift
+ // result is of integer type.
+ if !isInteger(x.typ) {
+ x.typ = Typ[UntypedInt]
+ }
+ // x is a constant so xval != nil and it must be of Int kind.
+ x.val = constant.Shift(xval, op2tok[op], uint(s))
+ x.expr = e
+ check.overflow(x, opPos(x.expr))
+ return
+ }
+
+ // non-constant shift with constant lhs
+ if isUntyped(x.typ) {
+ // spec: "If the left operand of a non-constant shift
+ // expression is an untyped constant, the type of the
+ // constant is what it would be if the shift expression
+ // were replaced by its left operand alone.".
+ //
+ // Delay operand checking until we know the final type
+ // by marking the lhs expression as lhs shift operand.
+ //
+ // Usually (in correct programs), the lhs expression
+ // is in the untyped map. However, it is possible to
+ // create incorrect programs where the same expression
+ // is evaluated twice (via a declaration cycle) such
+ // that the lhs expression type is determined in the
+ // first round and thus deleted from the map, and then
+ // not found in the second round (double insertion of
+ // the same expr node still just leads to one entry for
+ // that node, and it can only be deleted once).
+ // Be cautious and check for presence of entry.
+ // Example: var e, f = int(1<<""[f]) // go.dev/issue/11347
+ if info, found := check.untyped[x.expr]; found {
+ info.isLhs = true
+ check.untyped[x.expr] = info
+ }
+ // keep x's type
+ x.mode = value
+ return
+ }
+ }
+
+ // non-constant shift - lhs must be an integer
+ if !allInteger(x.typ) {
+ check.errorf(x, InvalidShiftOperand, invalidOp+"shifted operand %s must be integer", x)
+ x.mode = invalid
+ return
+ }
+
+ x.mode = value
+}
+
+var binaryOpPredicates opPredicates
+
+func init() {
+ // Setting binaryOpPredicates in init avoids declaration cycles.
+ binaryOpPredicates = opPredicates{
+ syntax.Add: allNumericOrString,
+ syntax.Sub: allNumeric,
+ syntax.Mul: allNumeric,
+ syntax.Div: allNumeric,
+ syntax.Rem: allInteger,
+
+ syntax.And: allInteger,
+ syntax.Or: allInteger,
+ syntax.Xor: allInteger,
+ syntax.AndNot: allInteger,
+
+ syntax.AndAnd: allBoolean,
+ syntax.OrOr: allBoolean,
+ }
+}
+
+// If e != nil, it must be the binary expression; it may be nil for non-constant expressions
+// (when invoked for an assignment operation where the binary expression is implicit).
+func (check *Checker) binary(x *operand, e syntax.Expr, lhs, rhs syntax.Expr, op syntax.Operator) {
+ var y operand
+
+ check.expr(nil, x, lhs)
+ check.expr(nil, &y, rhs)
+
+ if x.mode == invalid {
+ return
+ }
+ if y.mode == invalid {
+ x.mode = invalid
+ x.expr = y.expr
+ return
+ }
+
+ if isShift(op) {
+ check.shift(x, &y, e, op)
+ return
+ }
+
+ check.matchTypes(x, &y)
+ if x.mode == invalid {
+ return
+ }
+
+ if isComparison(op) {
+ check.comparison(x, &y, op, false)
+ return
+ }
+
+ if !Identical(x.typ, y.typ) {
+ // only report an error if we have valid types
+ // (otherwise we had an error reported elsewhere already)
+ if isValid(x.typ) && isValid(y.typ) {
+ if e != nil {
+ check.errorf(x, MismatchedTypes, invalidOp+"%s (mismatched types %s and %s)", e, x.typ, y.typ)
+ } else {
+ check.errorf(x, MismatchedTypes, invalidOp+"%s %s= %s (mismatched types %s and %s)", lhs, op, rhs, x.typ, y.typ)
+ }
+ }
+ x.mode = invalid
+ return
+ }
+
+ if !check.op(binaryOpPredicates, x, op) {
+ x.mode = invalid
+ return
+ }
+
+ if op == syntax.Div || op == syntax.Rem {
+ // check for zero divisor
+ if (x.mode == constant_ || allInteger(x.typ)) && y.mode == constant_ && constant.Sign(y.val) == 0 {
+ check.error(&y, DivByZero, invalidOp+"division by zero")
+ x.mode = invalid
+ return
+ }
+
+ // check for divisor underflow in complex division (see go.dev/issue/20227)
+ if x.mode == constant_ && y.mode == constant_ && isComplex(x.typ) {
+ re, im := constant.Real(y.val), constant.Imag(y.val)
+ re2, im2 := constant.BinaryOp(re, token.MUL, re), constant.BinaryOp(im, token.MUL, im)
+ if constant.Sign(re2) == 0 && constant.Sign(im2) == 0 {
+ check.error(&y, DivByZero, invalidOp+"division by zero")
+ x.mode = invalid
+ return
+ }
+ }
+ }
+
+ if x.mode == constant_ && y.mode == constant_ {
+ // if either x or y has an unknown value, the result is unknown
+ if x.val.Kind() == constant.Unknown || y.val.Kind() == constant.Unknown {
+ x.val = constant.MakeUnknown()
+ // x.typ is unchanged
+ return
+ }
+ // force integer division for integer operands
+ tok := op2tok[op]
+ if op == syntax.Div && isInteger(x.typ) {
+ tok = token.QUO_ASSIGN
+ }
+ x.val = constant.BinaryOp(x.val, tok, y.val)
+ x.expr = e
+ check.overflow(x, opPos(x.expr))
+ return
+ }
+
+ x.mode = value
+ // x.typ is unchanged
+}
+
+// matchTypes attempts to convert any untyped types x and y such that they match.
+// If an error occurs, x.mode is set to invalid.
+func (check *Checker) matchTypes(x, y *operand) {
+ // mayConvert reports whether the operands x and y may
+ // possibly have matching types after converting one
+ // untyped operand to the type of the other.
+ // If mayConvert returns true, we try to convert the
+ // operands to each other's types, and if that fails
+ // we report a conversion failure.
+ // If mayConvert returns false, we continue without an
+ // attempt at conversion, and if the operand types are
+ // not compatible, we report a type mismatch error.
+ mayConvert := func(x, y *operand) bool {
+ // If both operands are typed, there's no need for an implicit conversion.
+ if isTyped(x.typ) && isTyped(y.typ) {
+ return false
+ }
+ // An untyped operand may convert to its default type when paired with an empty interface
+ // TODO(gri) This should only matter for comparisons (the only binary operation that is
+ // valid with interfaces), but in that case the assignability check should take
+ // care of the conversion. Verify and possibly eliminate this extra test.
+ if isNonTypeParamInterface(x.typ) || isNonTypeParamInterface(y.typ) {
+ return true
+ }
+ // A boolean type can only convert to another boolean type.
+ if allBoolean(x.typ) != allBoolean(y.typ) {
+ return false
+ }
+ // A string type can only convert to another string type.
+ if allString(x.typ) != allString(y.typ) {
+ return false
+ }
+ // Untyped nil can only convert to a type that has a nil.
+ if x.isNil() {
+ return hasNil(y.typ)
+ }
+ if y.isNil() {
+ return hasNil(x.typ)
+ }
+ // An untyped operand cannot convert to a pointer.
+ // TODO(gri) generalize to type parameters
+ if isPointer(x.typ) || isPointer(y.typ) {
+ return false
+ }
+ return true
+ }
+
+ if mayConvert(x, y) {
+ check.convertUntyped(x, y.typ)
+ if x.mode == invalid {
+ return
+ }
+ check.convertUntyped(y, x.typ)
+ if y.mode == invalid {
+ x.mode = invalid
+ return
+ }
+ }
+}
+
+// exprKind describes the kind of an expression; the kind
+// determines if an expression is valid in 'statement context'.
+type exprKind int
+
+const (
+ conversion exprKind = iota
+ expression
+ statement
+)
+
+// target represent the (signature) type and description of the LHS
+// variable of an assignment, or of a function result variable.
+type target struct {
+ sig *Signature
+ desc string
+}
+
+// newTarget creates a new target for the given type and description.
+// The result is nil if typ is not a signature.
+func newTarget(typ Type, desc string) *target {
+ if typ != nil {
+ if sig, _ := under(typ).(*Signature); sig != nil {
+ return &target{sig, desc}
+ }
+ }
+ return nil
+}
+
+// rawExpr typechecks expression e and initializes x with the expression
+// value or type. If an error occurred, x.mode is set to invalid.
+// If a non-nil target T is given and e is a generic function,
+// T is used to infer the type arguments for e.
+// If hint != nil, it is the type of a composite literal element.
+// If allowGeneric is set, the operand type may be an uninstantiated
+// parameterized type or function value.
+func (check *Checker) rawExpr(T *target, x *operand, e syntax.Expr, hint Type, allowGeneric bool) exprKind {
+ if check.conf.Trace {
+ check.trace(e.Pos(), "-- expr %s", e)
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(e.Pos(), "=> %s", x)
+ }()
+ }
+
+ kind := check.exprInternal(T, x, e, hint)
+
+ if !allowGeneric {
+ check.nonGeneric(T, x)
+ }
+
+ check.record(x)
+
+ return kind
+}
+
+// If x is a generic type, or a generic function whose type arguments cannot be inferred
+// from a non-nil target T, nonGeneric reports an error and invalidates x.mode and x.typ.
+// Otherwise it leaves x alone.
+func (check *Checker) nonGeneric(T *target, x *operand) {
+ if x.mode == invalid || x.mode == novalue {
+ return
+ }
+ var what string
+ switch t := x.typ.(type) {
+ case *Named:
+ if isGeneric(t) {
+ what = "type"
+ }
+ case *Signature:
+ if t.tparams != nil {
+ if enableReverseTypeInference && T != nil {
+ check.funcInst(T, x.Pos(), x, nil, true)
+ return
+ }
+ what = "function"
+ }
+ }
+ if what != "" {
+ check.errorf(x.expr, WrongTypeArgCount, "cannot use generic %s %s without instantiation", what, x.expr)
+ x.mode = invalid
+ x.typ = Typ[Invalid]
+ }
+}
+
+// exprInternal contains the core of type checking of expressions.
+// Must only be called by rawExpr.
+// (See rawExpr for an explanation of the parameters.)
+func (check *Checker) exprInternal(T *target, x *operand, e syntax.Expr, hint Type) exprKind {
+ // make sure x has a valid state in case of bailout
+ // (was go.dev/issue/5770)
+ x.mode = invalid
+ x.typ = Typ[Invalid]
+
+ switch e := e.(type) {
+ case nil:
+ unreachable()
+
+ case *syntax.BadExpr:
+ goto Error // error was reported before
+
+ case *syntax.Name:
+ check.ident(x, e, nil, false)
+
+ case *syntax.DotsType:
+ // dots are handled explicitly where they are legal
+ // (array composite literals and parameter lists)
+ check.error(e, BadDotDotDotSyntax, "invalid use of '...'")
+ goto Error
+
+ case *syntax.BasicLit:
+ if e.Bad {
+ goto Error // error reported during parsing
+ }
+ switch e.Kind {
+ case syntax.IntLit, syntax.FloatLit, syntax.ImagLit:
+ check.langCompat(e)
+ // The max. mantissa precision for untyped numeric values
+ // is 512 bits, or 4048 bits for each of the two integer
+ // parts of a fraction for floating-point numbers that are
+ // represented accurately in the go/constant package.
+ // Constant literals that are longer than this many bits
+ // are not meaningful; and excessively long constants may
+ // consume a lot of space and time for a useless conversion.
+ // Cap constant length with a generous upper limit that also
+ // allows for separators between all digits.
+ const limit = 10000
+ if len(e.Value) > limit {
+ check.errorf(e, InvalidConstVal, "excessively long constant: %s... (%d chars)", e.Value[:10], len(e.Value))
+ goto Error
+ }
+ }
+ x.setConst(e.Kind, e.Value)
+ if x.mode == invalid {
+ // The parser already establishes syntactic correctness.
+ // If we reach here it's because of number under-/overflow.
+ // TODO(gri) setConst (and in turn the go/constant package)
+ // should return an error describing the issue.
+ check.errorf(e, InvalidConstVal, "malformed constant: %s", e.Value)
+ goto Error
+ }
+ // Ensure that integer values don't overflow (go.dev/issue/54280).
+ x.expr = e // make sure that check.overflow below has an error position
+ check.overflow(x, opPos(x.expr))
+
+ case *syntax.FuncLit:
+ if sig, ok := check.typ(e.Type).(*Signature); ok {
+ // Set the Scope's extent to the complete "func (...) {...}"
+ // so that Scope.Innermost works correctly.
+ sig.scope.pos = e.Pos()
+ sig.scope.end = syntax.EndPos(e)
+ if !check.conf.IgnoreFuncBodies && e.Body != nil {
+ // Anonymous functions are considered part of the
+ // init expression/func declaration which contains
+ // them: use existing package-level declaration info.
+ decl := check.decl // capture for use in closure below
+ iota := check.iota // capture for use in closure below (go.dev/issue/22345)
+ // Don't type-check right away because the function may
+ // be part of a type definition to which the function
+ // body refers. Instead, type-check as soon as possible,
+ // but before the enclosing scope contents changes (go.dev/issue/22992).
+ check.later(func() {
+ check.funcBody(decl, "<function literal>", sig, e.Body, iota)
+ }).describef(e, "func literal")
+ }
+ x.mode = value
+ x.typ = sig
+ } else {
+ check.errorf(e, InvalidSyntaxTree, "invalid function literal %v", e)
+ goto Error
+ }
+
+ case *syntax.CompositeLit:
+ var typ, base Type
+
+ switch {
+ case e.Type != nil:
+ // composite literal type present - use it
+ // [...]T array types may only appear with composite literals.
+ // Check for them here so we don't have to handle ... in general.
+ if atyp, _ := e.Type.(*syntax.ArrayType); atyp != nil && atyp.Len == nil {
+ // We have an "open" [...]T array type.
+ // Create a new ArrayType with unknown length (-1)
+ // and finish setting it up after analyzing the literal.
+ typ = &Array{len: -1, elem: check.varType(atyp.Elem)}
+ base = typ
+ break
+ }
+ typ = check.typ(e.Type)
+ base = typ
+
+ case hint != nil:
+ // no composite literal type present - use hint (element type of enclosing type)
+ typ = hint
+ base, _ = deref(coreType(typ)) // *T implies &T{}
+ if base == nil {
+ check.errorf(e, InvalidLit, "invalid composite literal element type %s (no core type)", typ)
+ goto Error
+ }
+
+ default:
+ // TODO(gri) provide better error messages depending on context
+ check.error(e, UntypedLit, "missing type in composite literal")
+ goto Error
+ }
+
+ switch utyp := coreType(base).(type) {
+ case *Struct:
+ // Prevent crash if the struct referred to is not yet set up.
+ // See analogous comment for *Array.
+ if utyp.fields == nil {
+ check.error(e, InvalidTypeCycle, "invalid recursive type")
+ goto Error
+ }
+ if len(e.ElemList) == 0 {
+ break
+ }
+ // Convention for error messages on invalid struct literals:
+ // we mention the struct type only if it clarifies the error
+ // (e.g., a duplicate field error doesn't need the struct type).
+ fields := utyp.fields
+ if _, ok := e.ElemList[0].(*syntax.KeyValueExpr); ok {
+ // all elements must have keys
+ visited := make([]bool, len(fields))
+ for _, e := range e.ElemList {
+ kv, _ := e.(*syntax.KeyValueExpr)
+ if kv == nil {
+ check.error(e, MixedStructLit, "mixture of field:value and value elements in struct literal")
+ continue
+ }
+ key, _ := kv.Key.(*syntax.Name)
+ // do all possible checks early (before exiting due to errors)
+ // so we don't drop information on the floor
+ check.expr(nil, x, kv.Value)
+ if key == nil {
+ check.errorf(kv, InvalidLitField, "invalid field name %s in struct literal", kv.Key)
+ continue
+ }
+ i := fieldIndex(utyp.fields, check.pkg, key.Value)
+ if i < 0 {
+ check.errorf(kv.Key, MissingLitField, "unknown field %s in struct literal of type %s", key.Value, base)
+ continue
+ }
+ fld := fields[i]
+ check.recordUse(key, fld)
+ etyp := fld.typ
+ check.assignment(x, etyp, "struct literal")
+ // 0 <= i < len(fields)
+ if visited[i] {
+ check.errorf(kv, DuplicateLitField, "duplicate field name %s in struct literal", key.Value)
+ continue
+ }
+ visited[i] = true
+ }
+ } else {
+ // no element must have a key
+ for i, e := range e.ElemList {
+ if kv, _ := e.(*syntax.KeyValueExpr); kv != nil {
+ check.error(kv, MixedStructLit, "mixture of field:value and value elements in struct literal")
+ continue
+ }
+ check.expr(nil, x, e)
+ if i >= len(fields) {
+ check.errorf(x, InvalidStructLit, "too many values in struct literal of type %s", base)
+ break // cannot continue
+ }
+ // i < len(fields)
+ fld := fields[i]
+ if !fld.Exported() && fld.pkg != check.pkg {
+ check.errorf(x, UnexportedLitField, "implicit assignment to unexported field %s in struct literal of type %s", fld.name, base)
+ continue
+ }
+ etyp := fld.typ
+ check.assignment(x, etyp, "struct literal")
+ }
+ if len(e.ElemList) < len(fields) {
+ check.errorf(e.Rbrace, InvalidStructLit, "too few values in struct literal of type %s", base)
+ // ok to continue
+ }
+ }
+
+ case *Array:
+ // Prevent crash if the array referred to is not yet set up. Was go.dev/issue/18643.
+ // This is a stop-gap solution. Should use Checker.objPath to report entire
+ // path starting with earliest declaration in the source. TODO(gri) fix this.
+ if utyp.elem == nil {
+ check.error(e, InvalidTypeCycle, "invalid recursive type")
+ goto Error
+ }
+ n := check.indexedElts(e.ElemList, utyp.elem, utyp.len)
+ // If we have an array of unknown length (usually [...]T arrays, but also
+ // arrays [n]T where n is invalid) set the length now that we know it and
+ // record the type for the array (usually done by check.typ which is not
+ // called for [...]T). We handle [...]T arrays and arrays with invalid
+ // length the same here because it makes sense to "guess" the length for
+ // the latter if we have a composite literal; e.g. for [n]int{1, 2, 3}
+ // where n is invalid for some reason, it seems fair to assume it should
+ // be 3 (see also Checked.arrayLength and go.dev/issue/27346).
+ if utyp.len < 0 {
+ utyp.len = n
+ // e.Type is missing if we have a composite literal element
+ // that is itself a composite literal with omitted type. In
+ // that case there is nothing to record (there is no type in
+ // the source at that point).
+ if e.Type != nil {
+ check.recordTypeAndValue(e.Type, typexpr, utyp, nil)
+ }
+ }
+
+ case *Slice:
+ // Prevent crash if the slice referred to is not yet set up.
+ // See analogous comment for *Array.
+ if utyp.elem == nil {
+ check.error(e, InvalidTypeCycle, "invalid recursive type")
+ goto Error
+ }
+ check.indexedElts(e.ElemList, utyp.elem, -1)
+
+ case *Map:
+ // Prevent crash if the map referred to is not yet set up.
+ // See analogous comment for *Array.
+ if utyp.key == nil || utyp.elem == nil {
+ check.error(e, InvalidTypeCycle, "invalid recursive type")
+ goto Error
+ }
+ // If the map key type is an interface (but not a type parameter),
+ // the type of a constant key must be considered when checking for
+ // duplicates.
+ keyIsInterface := isNonTypeParamInterface(utyp.key)
+ visited := make(map[interface{}][]Type, len(e.ElemList))
+ for _, e := range e.ElemList {
+ kv, _ := e.(*syntax.KeyValueExpr)
+ if kv == nil {
+ check.error(e, MissingLitKey, "missing key in map literal")
+ continue
+ }
+ check.exprWithHint(x, kv.Key, utyp.key)
+ check.assignment(x, utyp.key, "map literal")
+ if x.mode == invalid {
+ continue
+ }
+ if x.mode == constant_ {
+ duplicate := false
+ xkey := keyVal(x.val)
+ if keyIsInterface {
+ for _, vtyp := range visited[xkey] {
+ if Identical(vtyp, x.typ) {
+ duplicate = true
+ break
+ }
+ }
+ visited[xkey] = append(visited[xkey], x.typ)
+ } else {
+ _, duplicate = visited[xkey]
+ visited[xkey] = nil
+ }
+ if duplicate {
+ check.errorf(x, DuplicateLitKey, "duplicate key %s in map literal", x.val)
+ continue
+ }
+ }
+ check.exprWithHint(x, kv.Value, utyp.elem)
+ check.assignment(x, utyp.elem, "map literal")
+ }
+
+ default:
+ // when "using" all elements unpack KeyValueExpr
+ // explicitly because check.use doesn't accept them
+ for _, e := range e.ElemList {
+ if kv, _ := e.(*syntax.KeyValueExpr); kv != nil {
+ // Ideally, we should also "use" kv.Key but we can't know
+ // if it's an externally defined struct key or not. Going
+ // forward anyway can lead to other errors. Give up instead.
+ e = kv.Value
+ }
+ check.use(e)
+ }
+ // if utyp is invalid, an error was reported before
+ if isValid(utyp) {
+ check.errorf(e, InvalidLit, "invalid composite literal type %s", typ)
+ goto Error
+ }
+ }
+
+ x.mode = value
+ x.typ = typ
+
+ case *syntax.ParenExpr:
+ // type inference doesn't go past parentheses (targe type T = nil)
+ kind := check.rawExpr(nil, x, e.X, nil, false)
+ x.expr = e
+ return kind
+
+ case *syntax.SelectorExpr:
+ check.selector(x, e, nil, false)
+
+ case *syntax.IndexExpr:
+ if check.indexExpr(x, e) {
+ if !enableReverseTypeInference {
+ T = nil
+ }
+ check.funcInst(T, e.Pos(), x, e, true)
+ }
+ if x.mode == invalid {
+ goto Error
+ }
+
+ case *syntax.SliceExpr:
+ check.sliceExpr(x, e)
+ if x.mode == invalid {
+ goto Error
+ }
+
+ case *syntax.AssertExpr:
+ check.expr(nil, x, e.X)
+ if x.mode == invalid {
+ goto Error
+ }
+ // x.(type) expressions are encoded via TypeSwitchGuards
+ if e.Type == nil {
+ check.error(e, InvalidSyntaxTree, "invalid use of AssertExpr")
+ goto Error
+ }
+ if isTypeParam(x.typ) {
+ check.errorf(x, InvalidAssert, invalidOp+"cannot use type assertion on type parameter value %s", x)
+ goto Error
+ }
+ if _, ok := under(x.typ).(*Interface); !ok {
+ check.errorf(x, InvalidAssert, invalidOp+"%s is not an interface", x)
+ goto Error
+ }
+ T := check.varType(e.Type)
+ if !isValid(T) {
+ goto Error
+ }
+ check.typeAssertion(e, x, T, false)
+ x.mode = commaok
+ x.typ = T
+
+ case *syntax.TypeSwitchGuard:
+ // x.(type) expressions are handled explicitly in type switches
+ check.error(e, InvalidSyntaxTree, "use of .(type) outside type switch")
+ check.use(e.X)
+ goto Error
+
+ case *syntax.CallExpr:
+ return check.callExpr(x, e)
+
+ case *syntax.ListExpr:
+ // catch-all for unexpected expression lists
+ check.error(e, InvalidSyntaxTree, "unexpected list of expressions")
+ goto Error
+
+ // case *syntax.UnaryExpr:
+ // check.expr(x, e.X)
+ // if x.mode == invalid {
+ // goto Error
+ // }
+ // check.unary(x, e, e.Op)
+ // if x.mode == invalid {
+ // goto Error
+ // }
+ // if e.Op == token.ARROW {
+ // x.expr = e
+ // return statement // receive operations may appear in statement context
+ // }
+
+ // case *syntax.BinaryExpr:
+ // check.binary(x, e, e.X, e.Y, e.Op)
+ // if x.mode == invalid {
+ // goto Error
+ // }
+
+ case *syntax.Operation:
+ if e.Y == nil {
+ // unary expression
+ if e.Op == syntax.Mul {
+ // pointer indirection
+ check.exprOrType(x, e.X, false)
+ switch x.mode {
+ case invalid:
+ goto Error
+ case typexpr:
+ check.validVarType(e.X, x.typ)
+ x.typ = &Pointer{base: x.typ}
+ default:
+ var base Type
+ if !underIs(x.typ, func(u Type) bool {
+ p, _ := u.(*Pointer)
+ if p == nil {
+ check.errorf(x, InvalidIndirection, invalidOp+"cannot indirect %s", x)
+ return false
+ }
+ if base != nil && !Identical(p.base, base) {
+ check.errorf(x, InvalidIndirection, invalidOp+"pointers of %s must have identical base types", x)
+ return false
+ }
+ base = p.base
+ return true
+ }) {
+ goto Error
+ }
+ x.mode = variable
+ x.typ = base
+ }
+ break
+ }
+
+ check.unary(x, e)
+ if x.mode == invalid {
+ goto Error
+ }
+ if e.Op == syntax.Recv {
+ x.expr = e
+ return statement // receive operations may appear in statement context
+ }
+ break
+ }
+
+ // binary expression
+ check.binary(x, e, e.X, e.Y, e.Op)
+ if x.mode == invalid {
+ goto Error
+ }
+
+ case *syntax.KeyValueExpr:
+ // key:value expressions are handled in composite literals
+ check.error(e, InvalidSyntaxTree, "no key:value expected")
+ goto Error
+
+ case *syntax.ArrayType, *syntax.SliceType, *syntax.StructType, *syntax.FuncType,
+ *syntax.InterfaceType, *syntax.MapType, *syntax.ChanType:
+ x.mode = typexpr
+ x.typ = check.typ(e)
+ // Note: rawExpr (caller of exprInternal) will call check.recordTypeAndValue
+ // even though check.typ has already called it. This is fine as both
+ // times the same expression and type are recorded. It is also not a
+ // performance issue because we only reach here for composite literal
+ // types, which are comparatively rare.
+
+ default:
+ panic(fmt.Sprintf("%s: unknown expression type %T", atPos(e), e))
+ }
+
+ // everything went well
+ x.expr = e
+ return expression
+
+Error:
+ x.mode = invalid
+ x.expr = e
+ return statement // avoid follow-up errors
+}
+
+// keyVal maps a complex, float, integer, string or boolean constant value
+// to the corresponding complex128, float64, int64, uint64, string, or bool
+// Go value if possible; otherwise it returns x.
+// A complex constant that can be represented as a float (such as 1.2 + 0i)
+// is returned as a floating point value; if a floating point value can be
+// represented as an integer (such as 1.0) it is returned as an integer value.
+// This ensures that constants of different kind but equal value (such as
+// 1.0 + 0i, 1.0, 1) result in the same value.
+func keyVal(x constant.Value) interface{} {
+ switch x.Kind() {
+ case constant.Complex:
+ f := constant.ToFloat(x)
+ if f.Kind() != constant.Float {
+ r, _ := constant.Float64Val(constant.Real(x))
+ i, _ := constant.Float64Val(constant.Imag(x))
+ return complex(r, i)
+ }
+ x = f
+ fallthrough
+ case constant.Float:
+ i := constant.ToInt(x)
+ if i.Kind() != constant.Int {
+ v, _ := constant.Float64Val(x)
+ return v
+ }
+ x = i
+ fallthrough
+ case constant.Int:
+ if v, ok := constant.Int64Val(x); ok {
+ return v
+ }
+ if v, ok := constant.Uint64Val(x); ok {
+ return v
+ }
+ case constant.String:
+ return constant.StringVal(x)
+ case constant.Bool:
+ return constant.BoolVal(x)
+ }
+ return x
+}
+
+// typeAssertion checks x.(T). The type of x must be an interface.
+func (check *Checker) typeAssertion(e syntax.Expr, x *operand, T Type, typeSwitch bool) {
+ var cause string
+ if check.assertableTo(x.typ, T, &cause) {
+ return // success
+ }
+
+ if typeSwitch {
+ check.errorf(e, ImpossibleAssert, "impossible type switch case: %s\n\t%s cannot have dynamic type %s %s", e, x, T, cause)
+ return
+ }
+
+ check.errorf(e, ImpossibleAssert, "impossible type assertion: %s\n\t%s does not implement %s %s", e, T, x.typ, cause)
+}
+
+// expr typechecks expression e and initializes x with the expression value.
+// If a non-nil target T is given and e is a generic function or
+// a function call, T is used to infer the type arguments for e.
+// The result must be a single value.
+// If an error occurred, x.mode is set to invalid.
+func (check *Checker) expr(T *target, x *operand, e syntax.Expr) {
+ check.rawExpr(T, x, e, nil, false)
+ check.exclude(x, 1<<novalue|1<<builtin|1<<typexpr)
+ check.singleValue(x)
+}
+
+// genericExpr is like expr but the result may also be generic.
+func (check *Checker) genericExpr(x *operand, e syntax.Expr) {
+ check.rawExpr(nil, x, e, nil, true)
+ check.exclude(x, 1<<novalue|1<<builtin|1<<typexpr)
+ check.singleValue(x)
+}
+
+// multiExpr typechecks e and returns its value (or values) in list.
+// If allowCommaOk is set and e is a map index, comma-ok, or comma-err
+// expression, the result is a two-element list containing the value
+// of e, and an untyped bool value or an error value, respectively.
+// If an error occurred, list[0] is not valid.
+func (check *Checker) multiExpr(e syntax.Expr, allowCommaOk bool) (list []*operand, commaOk bool) {
+ var x operand
+ check.rawExpr(nil, &x, e, nil, false)
+ check.exclude(&x, 1<<novalue|1<<builtin|1<<typexpr)
+
+ if t, ok := x.typ.(*Tuple); ok && x.mode != invalid {
+ // multiple values
+ list = make([]*operand, t.Len())
+ for i, v := range t.vars {
+ list[i] = &operand{mode: value, expr: e, typ: v.typ}
+ }
+ return
+ }
+
+ // exactly one (possibly invalid or comma-ok) value
+ list = []*operand{&x}
+ if allowCommaOk && (x.mode == mapindex || x.mode == commaok || x.mode == commaerr) {
+ x2 := &operand{mode: value, expr: e, typ: Typ[UntypedBool]}
+ if x.mode == commaerr {
+ x2.typ = universeError
+ }
+ list = append(list, x2)
+ commaOk = true
+ }
+
+ return
+}
+
+// exprWithHint typechecks expression e and initializes x with the expression value;
+// hint is the type of a composite literal element.
+// If an error occurred, x.mode is set to invalid.
+func (check *Checker) exprWithHint(x *operand, e syntax.Expr, hint Type) {
+ assert(hint != nil)
+ check.rawExpr(nil, x, e, hint, false)
+ check.exclude(x, 1<<novalue|1<<builtin|1<<typexpr)
+ check.singleValue(x)
+}
+
+// exprOrType typechecks expression or type e and initializes x with the expression value or type.
+// If allowGeneric is set, the operand type may be an uninstantiated parameterized type or function
+// value.
+// If an error occurred, x.mode is set to invalid.
+func (check *Checker) exprOrType(x *operand, e syntax.Expr, allowGeneric bool) {
+ check.rawExpr(nil, x, e, nil, allowGeneric)
+ check.exclude(x, 1<<novalue)
+ check.singleValue(x)
+}
+
+// exclude reports an error if x.mode is in modeset and sets x.mode to invalid.
+// The modeset may contain any of 1<<novalue, 1<<builtin, 1<<typexpr.
+func (check *Checker) exclude(x *operand, modeset uint) {
+ if modeset&(1<<x.mode) != 0 {
+ var msg string
+ var code Code
+ switch x.mode {
+ case novalue:
+ if modeset&(1<<typexpr) != 0 {
+ msg = "%s used as value"
+ } else {
+ msg = "%s used as value or type"
+ }
+ code = TooManyValues
+ case builtin:
+ msg = "%s must be called"
+ code = UncalledBuiltin
+ case typexpr:
+ msg = "%s is not an expression"
+ code = NotAnExpr
+ default:
+ unreachable()
+ }
+ check.errorf(x, code, msg, x)
+ x.mode = invalid
+ }
+}
+
+// singleValue reports an error if x describes a tuple and sets x.mode to invalid.
+func (check *Checker) singleValue(x *operand) {
+ if x.mode == value {
+ // tuple types are never named - no need for underlying type below
+ if t, ok := x.typ.(*Tuple); ok {
+ assert(t.Len() != 1)
+ check.errorf(x, TooManyValues, "multiple-value %s in single-value context", x)
+ x.mode = invalid
+ }
+ }
+}
+
+// op2tok translates syntax.Operators into token.Tokens.
+var op2tok = [...]token.Token{
+ syntax.Def: token.ILLEGAL,
+ syntax.Not: token.NOT,
+ syntax.Recv: token.ILLEGAL,
+
+ syntax.OrOr: token.LOR,
+ syntax.AndAnd: token.LAND,
+
+ syntax.Eql: token.EQL,
+ syntax.Neq: token.NEQ,
+ syntax.Lss: token.LSS,
+ syntax.Leq: token.LEQ,
+ syntax.Gtr: token.GTR,
+ syntax.Geq: token.GEQ,
+
+ syntax.Add: token.ADD,
+ syntax.Sub: token.SUB,
+ syntax.Or: token.OR,
+ syntax.Xor: token.XOR,
+
+ syntax.Mul: token.MUL,
+ syntax.Div: token.QUO,
+ syntax.Rem: token.REM,
+ syntax.And: token.AND,
+ syntax.AndNot: token.AND_NOT,
+ syntax.Shl: token.SHL,
+ syntax.Shr: token.SHR,
+}
diff --git a/src/cmd/compile/internal/types2/gccgosizes.go b/src/cmd/compile/internal/types2/gccgosizes.go
new file mode 100644
index 0000000..4602001
--- /dev/null
+++ b/src/cmd/compile/internal/types2/gccgosizes.go
@@ -0,0 +1,41 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a copy of the file generated during the gccgo build process.
+// Last update 2019-01-22.
+
+package types2
+
+var gccgoArchSizes = map[string]*StdSizes{
+ "386": {4, 4},
+ "alpha": {8, 8},
+ "amd64": {8, 8},
+ "amd64p32": {4, 8},
+ "arm": {4, 8},
+ "armbe": {4, 8},
+ "arm64": {8, 8},
+ "arm64be": {8, 8},
+ "ia64": {8, 8},
+ "loong64": {8, 8},
+ "m68k": {4, 2},
+ "mips": {4, 8},
+ "mipsle": {4, 8},
+ "mips64": {8, 8},
+ "mips64le": {8, 8},
+ "mips64p32": {4, 8},
+ "mips64p32le": {4, 8},
+ "nios2": {4, 8},
+ "ppc": {4, 8},
+ "ppc64": {8, 8},
+ "ppc64le": {8, 8},
+ "riscv": {4, 8},
+ "riscv64": {8, 8},
+ "s390": {4, 8},
+ "s390x": {8, 8},
+ "sh": {4, 8},
+ "shbe": {4, 8},
+ "sparc": {4, 8},
+ "sparc64": {8, 8},
+ "wasm": {8, 8},
+}
diff --git a/src/cmd/compile/internal/types2/gcsizes.go b/src/cmd/compile/internal/types2/gcsizes.go
new file mode 100644
index 0000000..d204d9f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/gcsizes.go
@@ -0,0 +1,170 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+type gcSizes struct {
+ WordSize int64 // word size in bytes - must be >= 4 (32bits)
+ MaxAlign int64 // maximum alignment in bytes - must be >= 1
+}
+
+func (s *gcSizes) Alignof(T Type) (result int64) {
+ defer func() {
+ assert(result >= 1)
+ }()
+
+ // For arrays and structs, alignment is defined in terms
+ // of alignment of the elements and fields, respectively.
+ switch t := under(T).(type) {
+ case *Array:
+ // spec: "For a variable x of array type: unsafe.Alignof(x)
+ // is the same as unsafe.Alignof(x[0]), but at least 1."
+ return s.Alignof(t.elem)
+ case *Struct:
+ if len(t.fields) == 0 && IsSyncAtomicAlign64(T) {
+ // Special case: sync/atomic.align64 is an
+ // empty struct we recognize as a signal that
+ // the struct it contains must be
+ // 64-bit-aligned.
+ //
+ // This logic is equivalent to the logic in
+ // cmd/compile/internal/types/size.go:calcStructOffset
+ return 8
+ }
+
+ // spec: "For a variable x of struct type: unsafe.Alignof(x)
+ // is the largest of the values unsafe.Alignof(x.f) for each
+ // field f of x, but at least 1."
+ max := int64(1)
+ for _, f := range t.fields {
+ if a := s.Alignof(f.typ); a > max {
+ max = a
+ }
+ }
+ return max
+ case *Slice, *Interface:
+ // Multiword data structures are effectively structs
+ // in which each element has size WordSize.
+ // Type parameters lead to variable sizes/alignments;
+ // StdSizes.Alignof won't be called for them.
+ assert(!isTypeParam(T))
+ return s.WordSize
+ case *Basic:
+ // Strings are like slices and interfaces.
+ if t.Info()&IsString != 0 {
+ return s.WordSize
+ }
+ case *TypeParam, *Union:
+ unreachable()
+ }
+ a := s.Sizeof(T) // may be 0 or negative
+ // spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1."
+ if a < 1 {
+ return 1
+ }
+ // complex{64,128} are aligned like [2]float{32,64}.
+ if isComplex(T) {
+ a /= 2
+ }
+ if a > s.MaxAlign {
+ return s.MaxAlign
+ }
+ return a
+}
+
+func (s *gcSizes) Offsetsof(fields []*Var) []int64 {
+ offsets := make([]int64, len(fields))
+ var offs int64
+ for i, f := range fields {
+ if offs < 0 {
+ // all remaining offsets are too large
+ offsets[i] = -1
+ continue
+ }
+ // offs >= 0
+ a := s.Alignof(f.typ)
+ offs = align(offs, a) // possibly < 0 if align overflows
+ offsets[i] = offs
+ if d := s.Sizeof(f.typ); d >= 0 && offs >= 0 {
+ offs += d // ok to overflow to < 0
+ } else {
+ offs = -1 // f.typ or offs is too large
+ }
+ }
+ return offsets
+}
+
+func (s *gcSizes) Sizeof(T Type) int64 {
+ switch t := under(T).(type) {
+ case *Basic:
+ assert(isTyped(T))
+ k := t.kind
+ if int(k) < len(basicSizes) {
+ if s := basicSizes[k]; s > 0 {
+ return int64(s)
+ }
+ }
+ if k == String {
+ return s.WordSize * 2
+ }
+ case *Array:
+ n := t.len
+ if n <= 0 {
+ return 0
+ }
+ // n > 0
+ esize := s.Sizeof(t.elem)
+ if esize < 0 {
+ return -1 // element too large
+ }
+ if esize == 0 {
+ return 0 // 0-size element
+ }
+ // esize > 0
+ // Final size is esize * n; and size must be <= maxInt64.
+ const maxInt64 = 1<<63 - 1
+ if esize > maxInt64/n {
+ return -1 // esize * n overflows
+ }
+ return esize * n
+ case *Slice:
+ return s.WordSize * 3
+ case *Struct:
+ n := t.NumFields()
+ if n == 0 {
+ return 0
+ }
+ offsets := s.Offsetsof(t.fields)
+ offs := offsets[n-1]
+ size := s.Sizeof(t.fields[n-1].typ)
+ if offs < 0 || size < 0 {
+ return -1 // type too large
+ }
+ // gc: The last field of a non-zero-sized struct is not allowed to
+ // have size 0.
+ if offs > 0 && size == 0 {
+ size = 1
+ }
+ // gc: Size includes alignment padding.
+ return align(offs+size, s.Alignof(t)) // may overflow to < 0 which is ok
+ case *Interface:
+ // Type parameters lead to variable sizes/alignments;
+ // StdSizes.Sizeof won't be called for them.
+ assert(!isTypeParam(T))
+ return s.WordSize * 2
+ case *TypeParam, *Union:
+ unreachable()
+ }
+ return s.WordSize // catch-all
+}
+
+// gcSizesFor returns the Sizes used by gc for an architecture.
+// The result is a nil *gcSizes pointer (which is not a valid types.Sizes)
+// if a compiler/architecture pair is not known.
+func gcSizesFor(compiler, arch string) *gcSizes {
+ if compiler != "gc" {
+ return nil
+ }
+ return gcArchSizes[arch]
+}
diff --git a/src/cmd/compile/internal/types2/hilbert_test.go b/src/cmd/compile/internal/types2/hilbert_test.go
new file mode 100644
index 0000000..df8a3e7
--- /dev/null
+++ b/src/cmd/compile/internal/types2/hilbert_test.go
@@ -0,0 +1,206 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "os"
+ "testing"
+
+ . "cmd/compile/internal/types2"
+)
+
+var (
+ H = flag.Int("H", 5, "Hilbert matrix size")
+ out = flag.String("out", "", "write generated program to out")
+)
+
+func TestHilbert(t *testing.T) {
+ // generate source
+ src := program(*H, *out)
+ if *out != "" {
+ os.WriteFile(*out, src, 0666)
+ return
+ }
+
+ DefPredeclaredTestFuncs() // declare assert (used by code generated by verify)
+ mustTypecheck(string(src), nil, nil)
+}
+
+func program(n int, out string) []byte {
+ var g gen
+
+ g.p(`// Code generated by: go test -run=Hilbert -H=%d -out=%q. DO NOT EDIT.
+
+// +`+`build ignore
+
+// This program tests arbitrary precision constant arithmetic
+// by generating the constant elements of a Hilbert matrix H,
+// its inverse I, and the product P = H*I. The product should
+// be the identity matrix.
+package main
+
+func main() {
+ if !ok {
+ printProduct()
+ return
+ }
+ println("PASS")
+}
+
+`, n, out)
+ g.hilbert(n)
+ g.inverse(n)
+ g.product(n)
+ g.verify(n)
+ g.printProduct(n)
+ g.binomials(2*n - 1)
+ g.factorials(2*n - 1)
+
+ return g.Bytes()
+}
+
+type gen struct {
+ bytes.Buffer
+}
+
+func (g *gen) p(format string, args ...interface{}) {
+ fmt.Fprintf(&g.Buffer, format, args...)
+}
+
+func (g *gen) hilbert(n int) {
+ g.p(`// Hilbert matrix, n = %d
+const (
+`, n)
+ for i := 0; i < n; i++ {
+ g.p("\t")
+ for j := 0; j < n; j++ {
+ if j > 0 {
+ g.p(", ")
+ }
+ g.p("h%d_%d", i, j)
+ }
+ if i == 0 {
+ g.p(" = ")
+ for j := 0; j < n; j++ {
+ if j > 0 {
+ g.p(", ")
+ }
+ g.p("1.0/(iota + %d)", j+1)
+ }
+ }
+ g.p("\n")
+ }
+ g.p(")\n\n")
+}
+
+func (g *gen) inverse(n int) {
+ g.p(`// Inverse Hilbert matrix
+const (
+`)
+ for i := 0; i < n; i++ {
+ for j := 0; j < n; j++ {
+ s := "+"
+ if (i+j)&1 != 0 {
+ s = "-"
+ }
+ g.p("\ti%d_%d = %s%d * b%d_%d * b%d_%d * b%d_%d * b%d_%d\n",
+ i, j, s, i+j+1, n+i, n-j-1, n+j, n-i-1, i+j, i, i+j, i)
+ }
+ g.p("\n")
+ }
+ g.p(")\n\n")
+}
+
+func (g *gen) product(n int) {
+ g.p(`// Product matrix
+const (
+`)
+ for i := 0; i < n; i++ {
+ for j := 0; j < n; j++ {
+ g.p("\tp%d_%d = ", i, j)
+ for k := 0; k < n; k++ {
+ if k > 0 {
+ g.p(" + ")
+ }
+ g.p("h%d_%d*i%d_%d", i, k, k, j)
+ }
+ g.p("\n")
+ }
+ g.p("\n")
+ }
+ g.p(")\n\n")
+}
+
+func (g *gen) verify(n int) {
+ g.p(`// Verify that product is the identity matrix
+const ok =
+`)
+ for i := 0; i < n; i++ {
+ for j := 0; j < n; j++ {
+ if j == 0 {
+ g.p("\t")
+ } else {
+ g.p(" && ")
+ }
+ v := 0
+ if i == j {
+ v = 1
+ }
+ g.p("p%d_%d == %d", i, j, v)
+ }
+ g.p(" &&\n")
+ }
+ g.p("\ttrue\n\n")
+
+ // verify ok at type-check time
+ if *out == "" {
+ g.p("const _ = assert(ok)\n\n")
+ }
+}
+
+func (g *gen) printProduct(n int) {
+ g.p("func printProduct() {\n")
+ for i := 0; i < n; i++ {
+ g.p("\tprintln(")
+ for j := 0; j < n; j++ {
+ if j > 0 {
+ g.p(", ")
+ }
+ g.p("p%d_%d", i, j)
+ }
+ g.p(")\n")
+ }
+ g.p("}\n\n")
+}
+
+func (g *gen) binomials(n int) {
+ g.p(`// Binomials
+const (
+`)
+ for j := 0; j <= n; j++ {
+ if j > 0 {
+ g.p("\n")
+ }
+ for k := 0; k <= j; k++ {
+ g.p("\tb%d_%d = f%d / (f%d*f%d)\n", j, k, j, k, j-k)
+ }
+ }
+ g.p(")\n\n")
+}
+
+func (g *gen) factorials(n int) {
+ g.p(`// Factorials
+const (
+ f0 = 1
+ f1 = 1
+`)
+ for i := 2; i <= n; i++ {
+ g.p("\tf%d = f%d * %d\n", i, i-1, i)
+ }
+ g.p(")\n\n")
+}
diff --git a/src/cmd/compile/internal/types2/importer_test.go b/src/cmd/compile/internal/types2/importer_test.go
new file mode 100644
index 0000000..6b9b500
--- /dev/null
+++ b/src/cmd/compile/internal/types2/importer_test.go
@@ -0,0 +1,35 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements the (temporary) plumbing to get importing to work.
+
+package types2_test
+
+import (
+ gcimporter "cmd/compile/internal/importer"
+ "cmd/compile/internal/types2"
+ "io"
+)
+
+func defaultImporter() types2.Importer {
+ return &gcimports{
+ packages: make(map[string]*types2.Package),
+ }
+}
+
+type gcimports struct {
+ packages map[string]*types2.Package
+ lookup func(path string) (io.ReadCloser, error)
+}
+
+func (m *gcimports) Import(path string) (*types2.Package, error) {
+ return m.ImportFrom(path, "" /* no vendoring */, 0)
+}
+
+func (m *gcimports) ImportFrom(path, srcDir string, mode types2.ImportMode) (*types2.Package, error) {
+ if mode != 0 {
+ panic("mode must be 0")
+ }
+ return gcimporter.Import(m.packages, path, srcDir, m.lookup)
+}
diff --git a/src/cmd/compile/internal/types2/index.go b/src/cmd/compile/internal/types2/index.go
new file mode 100644
index 0000000..4db2213
--- /dev/null
+++ b/src/cmd/compile/internal/types2/index.go
@@ -0,0 +1,464 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of index/slice expressions.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "go/constant"
+ . "internal/types/errors"
+)
+
+// If e is a valid function instantiation, indexExpr returns true.
+// In that case x represents the uninstantiated function value and
+// it is the caller's responsibility to instantiate the function.
+func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst bool) {
+ check.exprOrType(x, e.X, true)
+ // x may be generic
+
+ switch x.mode {
+ case invalid:
+ check.use(e.Index)
+ return false
+
+ case typexpr:
+ // type instantiation
+ x.mode = invalid
+ // TODO(gri) here we re-evaluate e.X - try to avoid this
+ x.typ = check.varType(e)
+ if isValid(x.typ) {
+ x.mode = typexpr
+ }
+ return false
+
+ case value:
+ if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 {
+ // function instantiation
+ return true
+ }
+ }
+
+ // x should not be generic at this point, but be safe and check
+ check.nonGeneric(nil, x)
+ if x.mode == invalid {
+ return false
+ }
+
+ // ordinary index expression
+ valid := false
+ length := int64(-1) // valid if >= 0
+ switch typ := under(x.typ).(type) {
+ case *Basic:
+ if isString(typ) {
+ valid = true
+ if x.mode == constant_ {
+ length = int64(len(constant.StringVal(x.val)))
+ }
+ // an indexed string always yields a byte value
+ // (not a constant) even if the string and the
+ // index are constant
+ x.mode = value
+ x.typ = universeByte // use 'byte' name
+ }
+
+ case *Array:
+ valid = true
+ length = typ.len
+ if x.mode != variable {
+ x.mode = value
+ }
+ x.typ = typ.elem
+
+ case *Pointer:
+ if typ, _ := under(typ.base).(*Array); typ != nil {
+ valid = true
+ length = typ.len
+ x.mode = variable
+ x.typ = typ.elem
+ }
+
+ case *Slice:
+ valid = true
+ x.mode = variable
+ x.typ = typ.elem
+
+ case *Map:
+ index := check.singleIndex(e)
+ if index == nil {
+ x.mode = invalid
+ return false
+ }
+ var key operand
+ check.expr(nil, &key, index)
+ check.assignment(&key, typ.key, "map index")
+ // ok to continue even if indexing failed - map element type is known
+ x.mode = mapindex
+ x.typ = typ.elem
+ x.expr = e
+ return false
+
+ case *Interface:
+ if !isTypeParam(x.typ) {
+ break
+ }
+ // TODO(gri) report detailed failure cause for better error messages
+ var key, elem Type // key != nil: we must have all maps
+ mode := variable // non-maps result mode
+ // TODO(gri) factor out closure and use it for non-typeparam cases as well
+ if typ.typeSet().underIs(func(u Type) bool {
+ l := int64(-1) // valid if >= 0
+ var k, e Type // k is only set for maps
+ switch t := u.(type) {
+ case *Basic:
+ if isString(t) {
+ e = universeByte
+ mode = value
+ }
+ case *Array:
+ l = t.len
+ e = t.elem
+ if x.mode != variable {
+ mode = value
+ }
+ case *Pointer:
+ if t, _ := under(t.base).(*Array); t != nil {
+ l = t.len
+ e = t.elem
+ }
+ case *Slice:
+ e = t.elem
+ case *Map:
+ k = t.key
+ e = t.elem
+ }
+ if e == nil {
+ return false
+ }
+ if elem == nil {
+ // first type
+ length = l
+ key, elem = k, e
+ return true
+ }
+ // all map keys must be identical (incl. all nil)
+ // (that is, we cannot mix maps with other types)
+ if !Identical(key, k) {
+ return false
+ }
+ // all element types must be identical
+ if !Identical(elem, e) {
+ return false
+ }
+ // track the minimal length for arrays, if any
+ if l >= 0 && l < length {
+ length = l
+ }
+ return true
+ }) {
+ // For maps, the index expression must be assignable to the map key type.
+ if key != nil {
+ index := check.singleIndex(e)
+ if index == nil {
+ x.mode = invalid
+ return false
+ }
+ var k operand
+ check.expr(nil, &k, index)
+ check.assignment(&k, key, "map index")
+ // ok to continue even if indexing failed - map element type is known
+ x.mode = mapindex
+ x.typ = elem
+ x.expr = e
+ return false
+ }
+
+ // no maps
+ valid = true
+ x.mode = mode
+ x.typ = elem
+ }
+ }
+
+ if !valid {
+ check.errorf(e.Pos(), NonSliceableOperand, invalidOp+"cannot index %s", x)
+ check.use(e.Index)
+ x.mode = invalid
+ return false
+ }
+
+ index := check.singleIndex(e)
+ if index == nil {
+ x.mode = invalid
+ return false
+ }
+
+ // In pathological (invalid) cases (e.g.: type T1 [][[]T1{}[0][0]]T0)
+ // the element type may be accessed before it's set. Make sure we have
+ // a valid type.
+ if x.typ == nil {
+ x.typ = Typ[Invalid]
+ }
+
+ check.index(index, length)
+ return false
+}
+
+func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) {
+ check.expr(nil, x, e.X)
+ if x.mode == invalid {
+ check.use(e.Index[:]...)
+ return
+ }
+
+ valid := false
+ length := int64(-1) // valid if >= 0
+ switch u := coreString(x.typ).(type) {
+ case nil:
+ check.errorf(x, NonSliceableOperand, invalidOp+"cannot slice %s: %s has no core type", x, x.typ)
+ x.mode = invalid
+ return
+
+ case *Basic:
+ if isString(u) {
+ if e.Full {
+ at := e.Index[2]
+ if at == nil {
+ at = e // e.Index[2] should be present but be careful
+ }
+ check.error(at, InvalidSliceExpr, invalidOp+"3-index slice of string")
+ x.mode = invalid
+ return
+ }
+ valid = true
+ if x.mode == constant_ {
+ length = int64(len(constant.StringVal(x.val)))
+ }
+ // spec: "For untyped string operands the result
+ // is a non-constant value of type string."
+ if isUntyped(x.typ) {
+ x.typ = Typ[String]
+ }
+ }
+
+ case *Array:
+ valid = true
+ length = u.len
+ if x.mode != variable {
+ check.errorf(x, NonSliceableOperand, invalidOp+"%s (slice of unaddressable value)", x)
+ x.mode = invalid
+ return
+ }
+ x.typ = &Slice{elem: u.elem}
+
+ case *Pointer:
+ if u, _ := under(u.base).(*Array); u != nil {
+ valid = true
+ length = u.len
+ x.typ = &Slice{elem: u.elem}
+ }
+
+ case *Slice:
+ valid = true
+ // x.typ doesn't change
+ }
+
+ if !valid {
+ check.errorf(x, NonSliceableOperand, invalidOp+"cannot slice %s", x)
+ x.mode = invalid
+ return
+ }
+
+ x.mode = value
+
+ // spec: "Only the first index may be omitted; it defaults to 0."
+ if e.Full && (e.Index[1] == nil || e.Index[2] == nil) {
+ check.error(e, InvalidSyntaxTree, "2nd and 3rd index required in 3-index slice")
+ x.mode = invalid
+ return
+ }
+
+ // check indices
+ var ind [3]int64
+ for i, expr := range e.Index {
+ x := int64(-1)
+ switch {
+ case expr != nil:
+ // The "capacity" is only known statically for strings, arrays,
+ // and pointers to arrays, and it is the same as the length for
+ // those types.
+ max := int64(-1)
+ if length >= 0 {
+ max = length + 1
+ }
+ if _, v := check.index(expr, max); v >= 0 {
+ x = v
+ }
+ case i == 0:
+ // default is 0 for the first index
+ x = 0
+ case length >= 0:
+ // default is length (== capacity) otherwise
+ x = length
+ }
+ ind[i] = x
+ }
+
+ // constant indices must be in range
+ // (check.index already checks that existing indices >= 0)
+L:
+ for i, x := range ind[:len(ind)-1] {
+ if x > 0 {
+ for j, y := range ind[i+1:] {
+ if y >= 0 && y < x {
+ // The value y corresponds to the expression e.Index[i+1+j].
+ // Because y >= 0, it must have been set from the expression
+ // when checking indices and thus e.Index[i+1+j] is not nil.
+ check.errorf(e.Index[i+1+j], SwappedSliceIndices, "invalid slice indices: %d < %d", y, x)
+ break L // only report one error, ok to continue
+ }
+ }
+ }
+ }
+}
+
+// singleIndex returns the (single) index from the index expression e.
+// If the index is missing, or if there are multiple indices, an error
+// is reported and the result is nil.
+func (check *Checker) singleIndex(e *syntax.IndexExpr) syntax.Expr {
+ index := e.Index
+ if index == nil {
+ check.errorf(e, InvalidSyntaxTree, "missing index for %s", e.X)
+ return nil
+ }
+ if l, _ := index.(*syntax.ListExpr); l != nil {
+ if n := len(l.ElemList); n <= 1 {
+ check.errorf(e, InvalidSyntaxTree, "invalid use of ListExpr for index expression %v with %d indices", e, n)
+ return nil
+ }
+ // len(l.ElemList) > 1
+ check.error(l.ElemList[1], InvalidIndex, invalidOp+"more than one index")
+ index = l.ElemList[0] // continue with first index
+ }
+ return index
+}
+
+// index checks an index expression for validity.
+// If max >= 0, it is the upper bound for index.
+// If the result typ is != Typ[Invalid], index is valid and typ is its (possibly named) integer type.
+// If the result val >= 0, index is valid and val is its constant int value.
+func (check *Checker) index(index syntax.Expr, max int64) (typ Type, val int64) {
+ typ = Typ[Invalid]
+ val = -1
+
+ var x operand
+ check.expr(nil, &x, index)
+ if !check.isValidIndex(&x, InvalidIndex, "index", false) {
+ return
+ }
+
+ if x.mode != constant_ {
+ return x.typ, -1
+ }
+
+ if x.val.Kind() == constant.Unknown {
+ return
+ }
+
+ v, ok := constant.Int64Val(x.val)
+ assert(ok)
+ if max >= 0 && v >= max {
+ check.errorf(&x, InvalidIndex, invalidArg+"index %s out of bounds [0:%d]", x.val.String(), max)
+ return
+ }
+
+ // 0 <= v [ && v < max ]
+ return x.typ, v
+}
+
+// isValidIndex checks whether operand x satisfies the criteria for integer
+// index values. If allowNegative is set, a constant operand may be negative.
+// If the operand is not valid, an error is reported (using what as context)
+// and the result is false.
+func (check *Checker) isValidIndex(x *operand, code Code, what string, allowNegative bool) bool {
+ if x.mode == invalid {
+ return false
+ }
+
+ // spec: "a constant index that is untyped is given type int"
+ check.convertUntyped(x, Typ[Int])
+ if x.mode == invalid {
+ return false
+ }
+
+ // spec: "the index x must be of integer type or an untyped constant"
+ if !allInteger(x.typ) {
+ check.errorf(x, code, invalidArg+"%s %s must be integer", what, x)
+ return false
+ }
+
+ if x.mode == constant_ {
+ // spec: "a constant index must be non-negative ..."
+ if !allowNegative && constant.Sign(x.val) < 0 {
+ check.errorf(x, code, invalidArg+"%s %s must not be negative", what, x)
+ return false
+ }
+
+ // spec: "... and representable by a value of type int"
+ if !representableConst(x.val, check, Typ[Int], &x.val) {
+ check.errorf(x, code, invalidArg+"%s %s overflows int", what, x)
+ return false
+ }
+ }
+
+ return true
+}
+
+// indexedElts checks the elements (elts) of an array or slice composite literal
+// against the literal's element type (typ), and the element indices against
+// the literal length if known (length >= 0). It returns the length of the
+// literal (maximum index value + 1).
+func (check *Checker) indexedElts(elts []syntax.Expr, typ Type, length int64) int64 {
+ visited := make(map[int64]bool, len(elts))
+ var index, max int64
+ for _, e := range elts {
+ // determine and check index
+ validIndex := false
+ eval := e
+ if kv, _ := e.(*syntax.KeyValueExpr); kv != nil {
+ if typ, i := check.index(kv.Key, length); isValid(typ) {
+ if i >= 0 {
+ index = i
+ validIndex = true
+ } else {
+ check.errorf(e, InvalidLitIndex, "index %s must be integer constant", kv.Key)
+ }
+ }
+ eval = kv.Value
+ } else if length >= 0 && index >= length {
+ check.errorf(e, OversizeArrayLit, "index %d is out of bounds (>= %d)", index, length)
+ } else {
+ validIndex = true
+ }
+
+ // if we have a valid index, check for duplicate entries
+ if validIndex {
+ if visited[index] {
+ check.errorf(e, DuplicateLitKey, "duplicate index %d in array or slice literal", index)
+ }
+ visited[index] = true
+ }
+ index++
+ if index > max {
+ max = index
+ }
+
+ // check element against composite literal element type
+ var x operand
+ check.exprWithHint(&x, eval, typ)
+ check.assignment(&x, typ, "array or slice literal")
+ }
+ return max
+}
diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go
new file mode 100644
index 0000000..a520f70
--- /dev/null
+++ b/src/cmd/compile/internal/types2/infer.go
@@ -0,0 +1,790 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements type parameter inference.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ . "internal/types/errors"
+ "strings"
+)
+
+// If enableReverseTypeInference is set, uninstantiated and
+// partially instantiated generic functions may be assigned
+// (incl. returned) to variables of function type and type
+// inference will attempt to infer the missing type arguments.
+// Available with go1.21.
+const enableReverseTypeInference = true // disable for debugging
+
+// infer attempts to infer the complete set of type arguments for generic function instantiation/call
+// based on the given type parameters tparams, type arguments targs, function parameters params, and
+// function arguments args, if any. There must be at least one type parameter, no more type arguments
+// than type parameters, and params and args must match in number (incl. zero).
+// If reverse is set, an error message's contents are reversed for a better error message for some
+// errors related to reverse type inference (where the function call is synthetic).
+// If successful, infer returns the complete list of given and inferred type arguments, one for each
+// type parameter. Otherwise the result is nil and appropriate errors will be reported.
+func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, params *Tuple, args []*operand, reverse bool) (inferred []Type) {
+ // Don't verify result conditions if there's no error handler installed:
+ // in that case, an error leads to an exit panic and the result value may
+ // be incorrect. But in that case it doesn't matter because callers won't
+ // be able to use it either.
+ if check.conf.Error != nil {
+ defer func() {
+ assert(inferred == nil || len(inferred) == len(tparams) && !containsNil(inferred))
+ }()
+ }
+
+ if traceInference {
+ check.dump("== infer : %s%s ➞ %s", tparams, params, targs) // aligned with rename print below
+ defer func() {
+ check.dump("=> %s ➞ %s\n", tparams, inferred)
+ }()
+ }
+
+ // There must be at least one type parameter, and no more type arguments than type parameters.
+ n := len(tparams)
+ assert(n > 0 && len(targs) <= n)
+
+ // Parameters and arguments must match in number.
+ assert(params.Len() == len(args))
+
+ // If we already have all type arguments, we're done.
+ if len(targs) == n && !containsNil(targs) {
+ return targs
+ }
+
+ // If we have invalid (ordinary) arguments, an error was reported before.
+ // Avoid additional inference errors and exit early (go.dev/issue/60434).
+ for _, arg := range args {
+ if arg.mode == invalid {
+ return nil
+ }
+ }
+
+ // Make sure we have a "full" list of type arguments, some of which may
+ // be nil (unknown). Make a copy so as to not clobber the incoming slice.
+ if len(targs) < n {
+ targs2 := make([]Type, n)
+ copy(targs2, targs)
+ targs = targs2
+ }
+ // len(targs) == n
+
+ // Continue with the type arguments we have. Avoid matching generic
+ // parameters that already have type arguments against function arguments:
+ // It may fail because matching uses type identity while parameter passing
+ // uses assignment rules. Instantiate the parameter list with the type
+ // arguments we have, and continue with that parameter list.
+
+ // Substitute type arguments for their respective type parameters in params,
+ // if any. Note that nil targs entries are ignored by check.subst.
+ // We do this for better error messages; it's not needed for correctness.
+ // For instance, given:
+ //
+ // func f[P, Q any](P, Q) {}
+ //
+ // func _(s string) {
+ // f[int](s, s) // ERROR
+ // }
+ //
+ // With substitution, we get the error:
+ // "cannot use s (variable of type string) as int value in argument to f[int]"
+ //
+ // Without substitution we get the (worse) error:
+ // "type string of s does not match inferred type int for P"
+ // even though the type int was provided (not inferred) for P.
+ //
+ // TODO(gri) We might be able to finesse this in the error message reporting
+ // (which only happens in case of an error) and then avoid doing
+ // the substitution (which always happens).
+ if params.Len() > 0 {
+ smap := makeSubstMap(tparams, targs)
+ params = check.subst(nopos, params, smap, nil, check.context()).(*Tuple)
+ }
+
+ // Unify parameter and argument types for generic parameters with typed arguments
+ // and collect the indices of generic parameters with untyped arguments.
+ // Terminology: generic parameter = function parameter with a type-parameterized type
+ u := newUnifier(tparams, targs, check.allowVersion(check.pkg, pos, go1_21))
+
+ errorf := func(tpar, targ Type, arg *operand) {
+ // provide a better error message if we can
+ targs := u.inferred(tparams)
+ if targs[0] == nil {
+ // The first type parameter couldn't be inferred.
+ // If none of them could be inferred, don't try
+ // to provide the inferred type in the error msg.
+ allFailed := true
+ for _, targ := range targs {
+ if targ != nil {
+ allFailed = false
+ break
+ }
+ }
+ if allFailed {
+ check.errorf(arg, CannotInferTypeArgs, "type %s of %s does not match %s (cannot infer %s)", targ, arg.expr, tpar, typeParamsString(tparams))
+ return
+ }
+ }
+ smap := makeSubstMap(tparams, targs)
+ // TODO(gri): pass a poser here, rather than arg.Pos().
+ inferred := check.subst(arg.Pos(), tpar, smap, nil, check.context())
+ // CannotInferTypeArgs indicates a failure of inference, though the actual
+ // error may be better attributed to a user-provided type argument (hence
+ // InvalidTypeArg). We can't differentiate these cases, so fall back on
+ // the more general CannotInferTypeArgs.
+ if inferred != tpar {
+ if reverse {
+ check.errorf(arg, CannotInferTypeArgs, "inferred type %s for %s does not match type %s of %s", inferred, tpar, targ, arg.expr)
+ } else {
+ check.errorf(arg, CannotInferTypeArgs, "type %s of %s does not match inferred type %s for %s", targ, arg.expr, inferred, tpar)
+ }
+ } else {
+ check.errorf(arg, CannotInferTypeArgs, "type %s of %s does not match %s", targ, arg.expr, tpar)
+ }
+ }
+
+ // indices of generic parameters with untyped arguments, for later use
+ var untyped []int
+
+ // --- 1 ---
+ // use information from function arguments
+
+ if traceInference {
+ u.tracef("== function parameters: %s", params)
+ u.tracef("-- function arguments : %s", args)
+ }
+
+ for i, arg := range args {
+ if arg.mode == invalid {
+ // An error was reported earlier. Ignore this arg
+ // and continue, we may still be able to infer all
+ // targs resulting in fewer follow-on errors.
+ // TODO(gri) determine if we still need this check
+ continue
+ }
+ par := params.At(i)
+ if isParameterized(tparams, par.typ) || isParameterized(tparams, arg.typ) {
+ // Function parameters are always typed. Arguments may be untyped.
+ // Collect the indices of untyped arguments and handle them later.
+ if isTyped(arg.typ) {
+ if !u.unify(par.typ, arg.typ, assign) {
+ errorf(par.typ, arg.typ, arg)
+ return nil
+ }
+ } else if _, ok := par.typ.(*TypeParam); ok && !arg.isNil() {
+ // Since default types are all basic (i.e., non-composite) types, an
+ // untyped argument will never match a composite parameter type; the
+ // only parameter type it can possibly match against is a *TypeParam.
+ // Thus, for untyped arguments we only need to look at parameter types
+ // that are single type parameters.
+ // Also, untyped nils don't have a default type and can be ignored.
+ untyped = append(untyped, i)
+ }
+ }
+ }
+
+ if traceInference {
+ inferred := u.inferred(tparams)
+ u.tracef("=> %s ➞ %s\n", tparams, inferred)
+ }
+
+ // --- 2 ---
+ // use information from type parameter constraints
+
+ if traceInference {
+ u.tracef("== type parameters: %s", tparams)
+ }
+
+ // Unify type parameters with their constraints as long
+ // as progress is being made.
+ //
+ // This is an O(n^2) algorithm where n is the number of
+ // type parameters: if there is progress, at least one
+ // type argument is inferred per iteration, and we have
+ // a doubly nested loop.
+ //
+ // In practice this is not a problem because the number
+ // of type parameters tends to be very small (< 5 or so).
+ // (It should be possible for unification to efficiently
+ // signal newly inferred type arguments; then the loops
+ // here could handle the respective type parameters only,
+ // but that will come at a cost of extra complexity which
+ // may not be worth it.)
+ for i := 0; ; i++ {
+ nn := u.unknowns()
+ if traceInference {
+ if i > 0 {
+ fmt.Println()
+ }
+ u.tracef("-- iteration %d", i)
+ }
+
+ for _, tpar := range tparams {
+ tx := u.at(tpar)
+ core, single := coreTerm(tpar)
+ if traceInference {
+ u.tracef("-- type parameter %s = %s: core(%s) = %s, single = %v", tpar, tx, tpar, core, single)
+ }
+
+ // If there is a core term (i.e., a core type with tilde information)
+ // unify the type parameter with the core type.
+ if core != nil {
+ // A type parameter can be unified with its core type in two cases.
+ switch {
+ case tx != nil:
+ // The corresponding type argument tx is known. There are 2 cases:
+ // 1) If the core type has a tilde, per spec requirement for tilde
+ // elements, the core type is an underlying (literal) type.
+ // And because of the tilde, the underlying type of tx must match
+ // against the core type.
+ // But because unify automatically matches a defined type against
+ // an underlying literal type, we can simply unify tx with the
+ // core type.
+ // 2) If the core type doesn't have a tilde, we also must unify tx
+ // with the core type.
+ if !u.unify(tx, core.typ, 0) {
+ // TODO(gri) Type parameters that appear in the constraint and
+ // for which we have type arguments inferred should
+ // use those type arguments for a better error message.
+ check.errorf(pos, CannotInferTypeArgs, "%s (type %s) does not satisfy %s", tpar, tx, tpar.Constraint())
+ return nil
+ }
+ case single && !core.tilde:
+ // The corresponding type argument tx is unknown and there's a single
+ // specific type and no tilde.
+ // In this case the type argument must be that single type; set it.
+ u.set(tpar, core.typ)
+ }
+ } else {
+ if tx != nil {
+ // We don't have a core type, but the type argument tx is known.
+ // It must have (at least) all the methods of the type constraint,
+ // and the method signatures must unify; otherwise tx cannot satisfy
+ // the constraint.
+ // TODO(gri) Now that unification handles interfaces, this code can
+ // be reduced to calling u.unify(tx, tpar.iface(), assign)
+ // (which will compare signatures exactly as we do below).
+ // We leave it as is for now because missingMethod provides
+ // a failure cause which allows for a better error message.
+ // Eventually, unify should return an error with cause.
+ var cause string
+ constraint := tpar.iface()
+ if m, _ := check.missingMethod(tx, constraint, true, func(x, y Type) bool { return u.unify(x, y, exact) }, &cause); m != nil {
+ // TODO(gri) better error message (see TODO above)
+ check.errorf(pos, CannotInferTypeArgs, "%s (type %s) does not satisfy %s %s", tpar, tx, tpar.Constraint(), cause)
+ return nil
+ }
+ }
+ }
+ }
+
+ if u.unknowns() == nn {
+ break // no progress
+ }
+ }
+
+ if traceInference {
+ inferred := u.inferred(tparams)
+ u.tracef("=> %s ➞ %s\n", tparams, inferred)
+ }
+
+ // --- 3 ---
+ // use information from untyped constants
+
+ if traceInference {
+ u.tracef("== untyped arguments: %v", untyped)
+ }
+
+ // Some generic parameters with untyped arguments may have been given a type by now.
+ // Collect all remaining parameters that don't have a type yet and determine the
+ // maximum untyped type for each of those parameters, if possible.
+ var maxUntyped map[*TypeParam]Type // lazily allocated (we may not need it)
+ for _, index := range untyped {
+ tpar := params.At(index).typ.(*TypeParam) // is type parameter by construction of untyped
+ if u.at(tpar) == nil {
+ arg := args[index] // arg corresponding to tpar
+ if maxUntyped == nil {
+ maxUntyped = make(map[*TypeParam]Type)
+ }
+ max := maxUntyped[tpar]
+ if max == nil {
+ max = arg.typ
+ } else {
+ m := maxType(max, arg.typ)
+ if m == nil {
+ check.errorf(arg, CannotInferTypeArgs, "mismatched types %s and %s (cannot infer %s)", max, arg.typ, tpar)
+ return nil
+ }
+ max = m
+ }
+ maxUntyped[tpar] = max
+ }
+ }
+ // maxUntyped contains the maximum untyped type for each type parameter
+ // which doesn't have a type yet. Set the respective default types.
+ for tpar, typ := range maxUntyped {
+ d := Default(typ)
+ assert(isTyped(d))
+ u.set(tpar, d)
+ }
+
+ // --- simplify ---
+
+ // u.inferred(tparams) now contains the incoming type arguments plus any additional type
+ // arguments which were inferred. The inferred non-nil entries may still contain
+ // references to other type parameters found in constraints.
+ // For instance, for [A any, B interface{ []C }, C interface{ *A }], if A == int
+ // was given, unification produced the type list [int, []C, *A]. We eliminate the
+ // remaining type parameters by substituting the type parameters in this type list
+ // until nothing changes anymore.
+ inferred = u.inferred(tparams)
+ if debug {
+ for i, targ := range targs {
+ assert(targ == nil || inferred[i] == targ)
+ }
+ }
+
+ // The data structure of each (provided or inferred) type represents a graph, where
+ // each node corresponds to a type and each (directed) vertex points to a component
+ // type. The substitution process described above repeatedly replaces type parameter
+ // nodes in these graphs with the graphs of the types the type parameters stand for,
+ // which creates a new (possibly bigger) graph for each type.
+ // The substitution process will not stop if the replacement graph for a type parameter
+ // also contains that type parameter.
+ // For instance, for [A interface{ *A }], without any type argument provided for A,
+ // unification produces the type list [*A]. Substituting A in *A with the value for
+ // A will lead to infinite expansion by producing [**A], [****A], [********A], etc.,
+ // because the graph A -> *A has a cycle through A.
+ // Generally, cycles may occur across multiple type parameters and inferred types
+ // (for instance, consider [P interface{ *Q }, Q interface{ func(P) }]).
+ // We eliminate cycles by walking the graphs for all type parameters. If a cycle
+ // through a type parameter is detected, killCycles nils out the respective type
+ // (in the inferred list) which kills the cycle, and marks the corresponding type
+ // parameter as not inferred.
+ //
+ // TODO(gri) If useful, we could report the respective cycle as an error. We don't
+ // do this now because type inference will fail anyway, and furthermore,
+ // constraints with cycles of this kind cannot currently be satisfied by
+ // any user-supplied type. But should that change, reporting an error
+ // would be wrong.
+ killCycles(tparams, inferred)
+
+ // dirty tracks the indices of all types that may still contain type parameters.
+ // We know that nil type entries and entries corresponding to provided (non-nil)
+ // type arguments are clean, so exclude them from the start.
+ var dirty []int
+ for i, typ := range inferred {
+ if typ != nil && (i >= len(targs) || targs[i] == nil) {
+ dirty = append(dirty, i)
+ }
+ }
+
+ for len(dirty) > 0 {
+ if traceInference {
+ u.tracef("-- simplify %s ➞ %s", tparams, inferred)
+ }
+ // TODO(gri) Instead of creating a new substMap for each iteration,
+ // provide an update operation for substMaps and only change when
+ // needed. Optimization.
+ smap := makeSubstMap(tparams, inferred)
+ n := 0
+ for _, index := range dirty {
+ t0 := inferred[index]
+ if t1 := check.subst(nopos, t0, smap, nil, check.context()); t1 != t0 {
+ // t0 was simplified to t1.
+ // If t0 was a generic function, but the simplified signature t1 does
+ // not contain any type parameters anymore, the function is not generic
+ // anymore. Remove it's type parameters. (go.dev/issue/59953)
+ // Note that if t0 was a signature, t1 must be a signature, and t1
+ // can only be a generic signature if it originated from a generic
+ // function argument. Those signatures are never defined types and
+ // thus there is no need to call under below.
+ // TODO(gri) Consider doing this in Checker.subst.
+ // Then this would fall out automatically here and also
+ // in instantiation (where we also explicitly nil out
+ // type parameters). See the *Signature TODO in subst.
+ if sig, _ := t1.(*Signature); sig != nil && sig.TypeParams().Len() > 0 && !isParameterized(tparams, sig) {
+ sig.tparams = nil
+ }
+ inferred[index] = t1
+ dirty[n] = index
+ n++
+ }
+ }
+ dirty = dirty[:n]
+ }
+
+ // Once nothing changes anymore, we may still have type parameters left;
+ // e.g., a constraint with core type *P may match a type parameter Q but
+ // we don't have any type arguments to fill in for *P or Q (go.dev/issue/45548).
+ // Don't let such inferences escape; instead treat them as unresolved.
+ for i, typ := range inferred {
+ if typ == nil || isParameterized(tparams, typ) {
+ obj := tparams[i].obj
+ check.errorf(pos, CannotInferTypeArgs, "cannot infer %s (%s)", obj.name, obj.pos)
+ return nil
+ }
+ }
+
+ return
+}
+
+// containsNil reports whether list contains a nil entry.
+func containsNil(list []Type) bool {
+ for _, t := range list {
+ if t == nil {
+ return true
+ }
+ }
+ return false
+}
+
+// renameTParams renames the type parameters in the given type such that each type
+// parameter is given a new identity. renameTParams returns the new type parameters
+// and updated type. If the result type is unchanged from the argument type, none
+// of the type parameters in tparams occurred in the type.
+// If typ is a generic function, type parameters held with typ are not changed and
+// must be updated separately if desired.
+// The positions is only used for debug traces.
+func (check *Checker) renameTParams(pos syntax.Pos, tparams []*TypeParam, typ Type) ([]*TypeParam, Type) {
+ // For the purpose of type inference we must differentiate type parameters
+ // occurring in explicit type or value function arguments from the type
+ // parameters we are solving for via unification because they may be the
+ // same in self-recursive calls:
+ //
+ // func f[P constraint](x P) {
+ // f(x)
+ // }
+ //
+ // In this example, without type parameter renaming, the P used in the
+ // instantiation f[P] has the same pointer identity as the P we are trying
+ // to solve for through type inference. This causes problems for type
+ // unification. Because any such self-recursive call is equivalent to
+ // a mutually recursive call, type parameter renaming can be used to
+ // create separate, disentangled type parameters. The above example
+ // can be rewritten into the following equivalent code:
+ //
+ // func f[P constraint](x P) {
+ // f2(x)
+ // }
+ //
+ // func f2[P2 constraint](x P2) {
+ // f(x)
+ // }
+ //
+ // Type parameter renaming turns the first example into the second
+ // example by renaming the type parameter P into P2.
+ if len(tparams) == 0 {
+ return nil, typ // nothing to do
+ }
+
+ tparams2 := make([]*TypeParam, len(tparams))
+ for i, tparam := range tparams {
+ tname := NewTypeName(tparam.Obj().Pos(), tparam.Obj().Pkg(), tparam.Obj().Name(), nil)
+ tparams2[i] = NewTypeParam(tname, nil)
+ tparams2[i].index = tparam.index // == i
+ }
+
+ renameMap := makeRenameMap(tparams, tparams2)
+ for i, tparam := range tparams {
+ tparams2[i].bound = check.subst(pos, tparam.bound, renameMap, nil, check.context())
+ }
+
+ return tparams2, check.subst(pos, typ, renameMap, nil, check.context())
+}
+
+// typeParamsString produces a string containing all the type parameter names
+// in list suitable for human consumption.
+func typeParamsString(list []*TypeParam) string {
+ // common cases
+ n := len(list)
+ switch n {
+ case 0:
+ return ""
+ case 1:
+ return list[0].obj.name
+ case 2:
+ return list[0].obj.name + " and " + list[1].obj.name
+ }
+
+ // general case (n > 2)
+ var buf strings.Builder
+ for i, tname := range list[:n-1] {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(tname.obj.name)
+ }
+ buf.WriteString(", and ")
+ buf.WriteString(list[n-1].obj.name)
+ return buf.String()
+}
+
+// isParameterized reports whether typ contains any of the type parameters of tparams.
+// If typ is a generic function, isParameterized ignores the type parameter declarations;
+// it only considers the signature proper (incoming and result parameters).
+func isParameterized(tparams []*TypeParam, typ Type) bool {
+ w := tpWalker{
+ tparams: tparams,
+ seen: make(map[Type]bool),
+ }
+ return w.isParameterized(typ)
+}
+
+type tpWalker struct {
+ tparams []*TypeParam
+ seen map[Type]bool
+}
+
+func (w *tpWalker) isParameterized(typ Type) (res bool) {
+ // detect cycles
+ if x, ok := w.seen[typ]; ok {
+ return x
+ }
+ w.seen[typ] = false
+ defer func() {
+ w.seen[typ] = res
+ }()
+
+ switch t := typ.(type) {
+ case *Basic:
+ // nothing to do
+
+ case *Alias:
+ return w.isParameterized(Unalias(t))
+
+ case *Array:
+ return w.isParameterized(t.elem)
+
+ case *Slice:
+ return w.isParameterized(t.elem)
+
+ case *Struct:
+ return w.varList(t.fields)
+
+ case *Pointer:
+ return w.isParameterized(t.base)
+
+ case *Tuple:
+ // This case does not occur from within isParameterized
+ // because tuples only appear in signatures where they
+ // are handled explicitly. But isParameterized is also
+ // called by Checker.callExpr with a function result tuple
+ // if instantiation failed (go.dev/issue/59890).
+ return t != nil && w.varList(t.vars)
+
+ case *Signature:
+ // t.tparams may not be nil if we are looking at a signature
+ // of a generic function type (or an interface method) that is
+ // part of the type we're testing. We don't care about these type
+ // parameters.
+ // Similarly, the receiver of a method may declare (rather than
+ // use) type parameters, we don't care about those either.
+ // Thus, we only need to look at the input and result parameters.
+ return t.params != nil && w.varList(t.params.vars) || t.results != nil && w.varList(t.results.vars)
+
+ case *Interface:
+ tset := t.typeSet()
+ for _, m := range tset.methods {
+ if w.isParameterized(m.typ) {
+ return true
+ }
+ }
+ return tset.is(func(t *term) bool {
+ return t != nil && w.isParameterized(t.typ)
+ })
+
+ case *Map:
+ return w.isParameterized(t.key) || w.isParameterized(t.elem)
+
+ case *Chan:
+ return w.isParameterized(t.elem)
+
+ case *Named:
+ for _, t := range t.TypeArgs().list() {
+ if w.isParameterized(t) {
+ return true
+ }
+ }
+
+ case *TypeParam:
+ return tparamIndex(w.tparams, t) >= 0
+
+ default:
+ panic(fmt.Sprintf("unexpected %T", typ))
+ }
+
+ return false
+}
+
+func (w *tpWalker) varList(list []*Var) bool {
+ for _, v := range list {
+ if w.isParameterized(v.typ) {
+ return true
+ }
+ }
+ return false
+}
+
+// If the type parameter has a single specific type S, coreTerm returns (S, true).
+// Otherwise, if tpar has a core type T, it returns a term corresponding to that
+// core type and false. In that case, if any term of tpar has a tilde, the core
+// term has a tilde. In all other cases coreTerm returns (nil, false).
+func coreTerm(tpar *TypeParam) (*term, bool) {
+ n := 0
+ var single *term // valid if n == 1
+ var tilde bool
+ tpar.is(func(t *term) bool {
+ if t == nil {
+ assert(n == 0)
+ return false // no terms
+ }
+ n++
+ single = t
+ if t.tilde {
+ tilde = true
+ }
+ return true
+ })
+ if n == 1 {
+ if debug {
+ assert(debug && under(single.typ) == coreType(tpar))
+ }
+ return single, true
+ }
+ if typ := coreType(tpar); typ != nil {
+ // A core type is always an underlying type.
+ // If any term of tpar has a tilde, we don't
+ // have a precise core type and we must return
+ // a tilde as well.
+ return &term{tilde, typ}, false
+ }
+ return nil, false
+}
+
+// killCycles walks through the given type parameters and looks for cycles
+// created by type parameters whose inferred types refer back to that type
+// parameter, either directly or indirectly. If such a cycle is detected,
+// it is killed by setting the corresponding inferred type to nil.
+//
+// TODO(gri) Determine if we can simply abort inference as soon as we have
+// found a single cycle.
+func killCycles(tparams []*TypeParam, inferred []Type) {
+ w := cycleFinder{tparams, inferred, make(map[Type]bool)}
+ for _, t := range tparams {
+ w.typ(t) // t != nil
+ }
+}
+
+type cycleFinder struct {
+ tparams []*TypeParam
+ inferred []Type
+ seen map[Type]bool
+}
+
+func (w *cycleFinder) typ(typ Type) {
+ if w.seen[typ] {
+ // We have seen typ before. If it is one of the type parameters
+ // in w.tparams, iterative substitution will lead to infinite expansion.
+ // Nil out the corresponding type which effectively kills the cycle.
+ if tpar, _ := typ.(*TypeParam); tpar != nil {
+ if i := tparamIndex(w.tparams, tpar); i >= 0 {
+ // cycle through tpar
+ w.inferred[i] = nil
+ }
+ }
+ // If we don't have one of our type parameters, the cycle is due
+ // to an ordinary recursive type and we can just stop walking it.
+ return
+ }
+ w.seen[typ] = true
+ defer delete(w.seen, typ)
+
+ switch t := typ.(type) {
+ case *Basic:
+ // nothing to do
+
+ case *Alias:
+ w.typ(Unalias(t))
+
+ case *Array:
+ w.typ(t.elem)
+
+ case *Slice:
+ w.typ(t.elem)
+
+ case *Struct:
+ w.varList(t.fields)
+
+ case *Pointer:
+ w.typ(t.base)
+
+ // case *Tuple:
+ // This case should not occur because tuples only appear
+ // in signatures where they are handled explicitly.
+
+ case *Signature:
+ if t.params != nil {
+ w.varList(t.params.vars)
+ }
+ if t.results != nil {
+ w.varList(t.results.vars)
+ }
+
+ case *Union:
+ for _, t := range t.terms {
+ w.typ(t.typ)
+ }
+
+ case *Interface:
+ for _, m := range t.methods {
+ w.typ(m.typ)
+ }
+ for _, t := range t.embeddeds {
+ w.typ(t)
+ }
+
+ case *Map:
+ w.typ(t.key)
+ w.typ(t.elem)
+
+ case *Chan:
+ w.typ(t.elem)
+
+ case *Named:
+ for _, tpar := range t.TypeArgs().list() {
+ w.typ(tpar)
+ }
+
+ case *TypeParam:
+ if i := tparamIndex(w.tparams, t); i >= 0 && w.inferred[i] != nil {
+ w.typ(w.inferred[i])
+ }
+
+ default:
+ panic(fmt.Sprintf("unexpected %T", typ))
+ }
+}
+
+func (w *cycleFinder) varList(list []*Var) {
+ for _, v := range list {
+ w.typ(v.typ)
+ }
+}
+
+// If tpar is a type parameter in list, tparamIndex returns the index
+// of the type parameter in list. Otherwise the result is < 0.
+func tparamIndex(list []*TypeParam, tpar *TypeParam) int {
+ for i, p := range list {
+ if p == tpar {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/src/cmd/compile/internal/types2/initorder.go b/src/cmd/compile/internal/types2/initorder.go
new file mode 100644
index 0000000..6e04172
--- /dev/null
+++ b/src/cmd/compile/internal/types2/initorder.go
@@ -0,0 +1,328 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "container/heap"
+ "fmt"
+ . "internal/types/errors"
+ "sort"
+)
+
+// initOrder computes the Info.InitOrder for package variables.
+func (check *Checker) initOrder() {
+ // An InitOrder may already have been computed if a package is
+ // built from several calls to (*Checker).Files. Clear it.
+ check.Info.InitOrder = check.Info.InitOrder[:0]
+
+ // Compute the object dependency graph and initialize
+ // a priority queue with the list of graph nodes.
+ pq := nodeQueue(dependencyGraph(check.objMap))
+ heap.Init(&pq)
+
+ const debug = false
+ if debug {
+ fmt.Printf("Computing initialization order for %s\n\n", check.pkg)
+ fmt.Println("Object dependency graph:")
+ for obj, d := range check.objMap {
+ // only print objects that may appear in the dependency graph
+ if obj, _ := obj.(dependency); obj != nil {
+ if len(d.deps) > 0 {
+ fmt.Printf("\t%s depends on\n", obj.Name())
+ for dep := range d.deps {
+ fmt.Printf("\t\t%s\n", dep.Name())
+ }
+ } else {
+ fmt.Printf("\t%s has no dependencies\n", obj.Name())
+ }
+ }
+ }
+ fmt.Println()
+
+ fmt.Println("Transposed object dependency graph (functions eliminated):")
+ for _, n := range pq {
+ fmt.Printf("\t%s depends on %d nodes\n", n.obj.Name(), n.ndeps)
+ for p := range n.pred {
+ fmt.Printf("\t\t%s is dependent\n", p.obj.Name())
+ }
+ }
+ fmt.Println()
+
+ fmt.Println("Processing nodes:")
+ }
+
+ // Determine initialization order by removing the highest priority node
+ // (the one with the fewest dependencies) and its edges from the graph,
+ // repeatedly, until there are no nodes left.
+ // In a valid Go program, those nodes always have zero dependencies (after
+ // removing all incoming dependencies), otherwise there are initialization
+ // cycles.
+ emitted := make(map[*declInfo]bool)
+ for len(pq) > 0 {
+ // get the next node
+ n := heap.Pop(&pq).(*graphNode)
+
+ if debug {
+ fmt.Printf("\t%s (src pos %d) depends on %d nodes now\n",
+ n.obj.Name(), n.obj.order(), n.ndeps)
+ }
+
+ // if n still depends on other nodes, we have a cycle
+ if n.ndeps > 0 {
+ cycle := findPath(check.objMap, n.obj, n.obj, make(map[Object]bool))
+ // If n.obj is not part of the cycle (e.g., n.obj->b->c->d->c),
+ // cycle will be nil. Don't report anything in that case since
+ // the cycle is reported when the algorithm gets to an object
+ // in the cycle.
+ // Furthermore, once an object in the cycle is encountered,
+ // the cycle will be broken (dependency count will be reduced
+ // below), and so the remaining nodes in the cycle don't trigger
+ // another error (unless they are part of multiple cycles).
+ if cycle != nil {
+ check.reportCycle(cycle)
+ }
+ // Ok to continue, but the variable initialization order
+ // will be incorrect at this point since it assumes no
+ // cycle errors.
+ }
+
+ // reduce dependency count of all dependent nodes
+ // and update priority queue
+ for p := range n.pred {
+ p.ndeps--
+ heap.Fix(&pq, p.index)
+ }
+
+ // record the init order for variables with initializers only
+ v, _ := n.obj.(*Var)
+ info := check.objMap[v]
+ if v == nil || !info.hasInitializer() {
+ continue
+ }
+
+ // n:1 variable declarations such as: a, b = f()
+ // introduce a node for each lhs variable (here: a, b);
+ // but they all have the same initializer - emit only
+ // one, for the first variable seen
+ if emitted[info] {
+ continue // initializer already emitted, if any
+ }
+ emitted[info] = true
+
+ infoLhs := info.lhs // possibly nil (see declInfo.lhs field comment)
+ if infoLhs == nil {
+ infoLhs = []*Var{v}
+ }
+ init := &Initializer{infoLhs, info.init}
+ check.Info.InitOrder = append(check.Info.InitOrder, init)
+ }
+
+ if debug {
+ fmt.Println()
+ fmt.Println("Initialization order:")
+ for _, init := range check.Info.InitOrder {
+ fmt.Printf("\t%s\n", init)
+ }
+ fmt.Println()
+ }
+}
+
+// findPath returns the (reversed) list of objects []Object{to, ... from}
+// such that there is a path of object dependencies from 'from' to 'to'.
+// If there is no such path, the result is nil.
+func findPath(objMap map[Object]*declInfo, from, to Object, seen map[Object]bool) []Object {
+ if seen[from] {
+ return nil
+ }
+ seen[from] = true
+
+ for d := range objMap[from].deps {
+ if d == to {
+ return []Object{d}
+ }
+ if P := findPath(objMap, d, to, seen); P != nil {
+ return append(P, d)
+ }
+ }
+
+ return nil
+}
+
+// reportCycle reports an error for the given cycle.
+func (check *Checker) reportCycle(cycle []Object) {
+ obj := cycle[0]
+
+ // report a more concise error for self references
+ if len(cycle) == 1 {
+ check.errorf(obj, InvalidInitCycle, "initialization cycle: %s refers to itself", obj.Name())
+ return
+ }
+
+ var err error_
+ err.code = InvalidInitCycle
+ err.errorf(obj, "initialization cycle for %s", obj.Name())
+ // subtle loop: print cycle[i] for i = 0, n-1, n-2, ... 1 for len(cycle) = n
+ for i := len(cycle) - 1; i >= 0; i-- {
+ err.errorf(obj, "%s refers to", obj.Name())
+ obj = cycle[i]
+ }
+ // print cycle[0] again to close the cycle
+ err.errorf(obj, "%s", obj.Name())
+ check.report(&err)
+}
+
+// ----------------------------------------------------------------------------
+// Object dependency graph
+
+// A dependency is an object that may be a dependency in an initialization
+// expression. Only constants, variables, and functions can be dependencies.
+// Constants are here because constant expression cycles are reported during
+// initialization order computation.
+type dependency interface {
+ Object
+ isDependency()
+}
+
+// A graphNode represents a node in the object dependency graph.
+// Each node p in n.pred represents an edge p->n, and each node
+// s in n.succ represents an edge n->s; with a->b indicating that
+// a depends on b.
+type graphNode struct {
+ obj dependency // object represented by this node
+ pred, succ nodeSet // consumers and dependencies of this node (lazily initialized)
+ index int // node index in graph slice/priority queue
+ ndeps int // number of outstanding dependencies before this object can be initialized
+}
+
+// cost returns the cost of removing this node, which involves copying each
+// predecessor to each successor (and vice-versa).
+func (n *graphNode) cost() int {
+ return len(n.pred) * len(n.succ)
+}
+
+type nodeSet map[*graphNode]bool
+
+func (s *nodeSet) add(p *graphNode) {
+ if *s == nil {
+ *s = make(nodeSet)
+ }
+ (*s)[p] = true
+}
+
+// dependencyGraph computes the object dependency graph from the given objMap,
+// with any function nodes removed. The resulting graph contains only constants
+// and variables.
+func dependencyGraph(objMap map[Object]*declInfo) []*graphNode {
+ // M is the dependency (Object) -> graphNode mapping
+ M := make(map[dependency]*graphNode)
+ for obj := range objMap {
+ // only consider nodes that may be an initialization dependency
+ if obj, _ := obj.(dependency); obj != nil {
+ M[obj] = &graphNode{obj: obj}
+ }
+ }
+
+ // compute edges for graph M
+ // (We need to include all nodes, even isolated ones, because they still need
+ // to be scheduled for initialization in correct order relative to other nodes.)
+ for obj, n := range M {
+ // for each dependency obj -> d (= deps[i]), create graph edges n->s and s->n
+ for d := range objMap[obj].deps {
+ // only consider nodes that may be an initialization dependency
+ if d, _ := d.(dependency); d != nil {
+ d := M[d]
+ n.succ.add(d)
+ d.pred.add(n)
+ }
+ }
+ }
+
+ var G, funcG []*graphNode // separate non-functions and functions
+ for _, n := range M {
+ if _, ok := n.obj.(*Func); ok {
+ funcG = append(funcG, n)
+ } else {
+ G = append(G, n)
+ }
+ }
+
+ // remove function nodes and collect remaining graph nodes in G
+ // (Mutually recursive functions may introduce cycles among themselves
+ // which are permitted. Yet such cycles may incorrectly inflate the dependency
+ // count for variables which in turn may not get scheduled for initialization
+ // in correct order.)
+ //
+ // Note that because we recursively copy predecessors and successors
+ // throughout the function graph, the cost of removing a function at
+ // position X is proportional to cost * (len(funcG)-X). Therefore, we should
+ // remove high-cost functions last.
+ sort.Slice(funcG, func(i, j int) bool {
+ return funcG[i].cost() < funcG[j].cost()
+ })
+ for _, n := range funcG {
+ // connect each predecessor p of n with each successor s
+ // and drop the function node (don't collect it in G)
+ for p := range n.pred {
+ // ignore self-cycles
+ if p != n {
+ // Each successor s of n becomes a successor of p, and
+ // each predecessor p of n becomes a predecessor of s.
+ for s := range n.succ {
+ // ignore self-cycles
+ if s != n {
+ p.succ.add(s)
+ s.pred.add(p)
+ }
+ }
+ delete(p.succ, n) // remove edge to n
+ }
+ }
+ for s := range n.succ {
+ delete(s.pred, n) // remove edge to n
+ }
+ }
+
+ // fill in index and ndeps fields
+ for i, n := range G {
+ n.index = i
+ n.ndeps = len(n.succ)
+ }
+
+ return G
+}
+
+// ----------------------------------------------------------------------------
+// Priority queue
+
+// nodeQueue implements the container/heap interface;
+// a nodeQueue may be used as a priority queue.
+type nodeQueue []*graphNode
+
+func (a nodeQueue) Len() int { return len(a) }
+
+func (a nodeQueue) Swap(i, j int) {
+ x, y := a[i], a[j]
+ a[i], a[j] = y, x
+ x.index, y.index = j, i
+}
+
+func (a nodeQueue) Less(i, j int) bool {
+ x, y := a[i], a[j]
+ // nodes are prioritized by number of incoming dependencies (1st key)
+ // and source order (2nd key)
+ return x.ndeps < y.ndeps || x.ndeps == y.ndeps && x.obj.order() < y.obj.order()
+}
+
+func (a *nodeQueue) Push(x interface{}) {
+ panic("unreachable")
+}
+
+func (a *nodeQueue) Pop() interface{} {
+ n := len(*a)
+ x := (*a)[n-1]
+ x.index = -1 // for safety
+ *a = (*a)[:n-1]
+ return x
+}
diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go
new file mode 100644
index 0000000..e33d4b4
--- /dev/null
+++ b/src/cmd/compile/internal/types2/instantiate.go
@@ -0,0 +1,366 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements instantiation of generic types
+// through substitution of type parameters by type arguments.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "errors"
+ "fmt"
+ . "internal/types/errors"
+)
+
+// Instantiate instantiates the type orig with the given type arguments targs.
+// orig must be a *Named or a *Signature type. If there is no error, the
+// resulting Type is an instantiated type of the same kind (either a *Named or
+// a *Signature). Methods attached to a *Named type are also instantiated, and
+// associated with a new *Func that has the same position as the original
+// method, but nil function scope.
+//
+// If ctxt is non-nil, it may be used to de-duplicate the instance against
+// previous instances with the same identity. As a special case, generic
+// *Signature origin types are only considered identical if they are pointer
+// equivalent, so that instantiating distinct (but possibly identical)
+// signatures will yield different instances. The use of a shared context does
+// not guarantee that identical instances are deduplicated in all cases.
+//
+// If validate is set, Instantiate verifies that the number of type arguments
+// and parameters match, and that the type arguments satisfy their
+// corresponding type constraints. If verification fails, the resulting error
+// may wrap an *ArgumentError indicating which type argument did not satisfy
+// its corresponding type parameter constraint, and why.
+//
+// If validate is not set, Instantiate does not verify the type argument count
+// or whether the type arguments satisfy their constraints. Instantiate is
+// guaranteed to not return an error, but may panic. Specifically, for
+// *Signature types, Instantiate will panic immediately if the type argument
+// count is incorrect; for *Named types, a panic may occur later inside the
+// *Named API.
+func Instantiate(ctxt *Context, orig Type, targs []Type, validate bool) (Type, error) {
+ if ctxt == nil {
+ ctxt = NewContext()
+ }
+ if validate {
+ var tparams []*TypeParam
+ switch t := orig.(type) {
+ case *Named:
+ tparams = t.TypeParams().list()
+ case *Signature:
+ tparams = t.TypeParams().list()
+ }
+ if len(targs) != len(tparams) {
+ return nil, fmt.Errorf("got %d type arguments but %s has %d type parameters", len(targs), orig, len(tparams))
+ }
+ if i, err := (*Checker)(nil).verify(nopos, tparams, targs, ctxt); err != nil {
+ return nil, &ArgumentError{i, err}
+ }
+ }
+
+ inst := (*Checker)(nil).instance(nopos, orig, targs, nil, ctxt)
+ return inst, nil
+}
+
+// instance instantiates the given original (generic) function or type with the
+// provided type arguments and returns the resulting instance. If an identical
+// instance exists already in the given contexts, it returns that instance,
+// otherwise it creates a new one.
+//
+// If expanding is non-nil, it is the Named instance type currently being
+// expanded. If ctxt is non-nil, it is the context associated with the current
+// type-checking pass or call to Instantiate. At least one of expanding or ctxt
+// must be non-nil.
+//
+// For Named types the resulting instance may be unexpanded.
+func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, expanding *Named, ctxt *Context) (res Type) {
+ // The order of the contexts below matters: we always prefer instances in the
+ // expanding instance context in order to preserve reference cycles.
+ //
+ // Invariant: if expanding != nil, the returned instance will be the instance
+ // recorded in expanding.inst.ctxt.
+ var ctxts []*Context
+ if expanding != nil {
+ ctxts = append(ctxts, expanding.inst.ctxt)
+ }
+ if ctxt != nil {
+ ctxts = append(ctxts, ctxt)
+ }
+ assert(len(ctxts) > 0)
+
+ // Compute all hashes; hashes may differ across contexts due to different
+ // unique IDs for Named types within the hasher.
+ hashes := make([]string, len(ctxts))
+ for i, ctxt := range ctxts {
+ hashes[i] = ctxt.instanceHash(orig, targs)
+ }
+
+ // If local is non-nil, updateContexts return the type recorded in
+ // local.
+ updateContexts := func(res Type) Type {
+ for i := len(ctxts) - 1; i >= 0; i-- {
+ res = ctxts[i].update(hashes[i], orig, targs, res)
+ }
+ return res
+ }
+
+ // typ may already have been instantiated with identical type arguments. In
+ // that case, re-use the existing instance.
+ for i, ctxt := range ctxts {
+ if inst := ctxt.lookup(hashes[i], orig, targs); inst != nil {
+ return updateContexts(inst)
+ }
+ }
+
+ switch orig := orig.(type) {
+ case *Named:
+ res = check.newNamedInstance(pos, orig, targs, expanding) // substituted lazily
+
+ case *Signature:
+ assert(expanding == nil) // function instances cannot be reached from Named types
+
+ tparams := orig.TypeParams()
+ // TODO(gri) investigate if this is needed (type argument and parameter count seem to be correct here)
+ if !check.validateTArgLen(pos, orig.String(), tparams.Len(), len(targs)) {
+ return Typ[Invalid]
+ }
+ if tparams.Len() == 0 {
+ return orig // nothing to do (minor optimization)
+ }
+ sig := check.subst(pos, orig, makeSubstMap(tparams.list(), targs), nil, ctxt).(*Signature)
+ // If the signature doesn't use its type parameters, subst
+ // will not make a copy. In that case, make a copy now (so
+ // we can set tparams to nil w/o causing side-effects).
+ if sig == orig {
+ copy := *sig
+ sig = &copy
+ }
+ // After instantiating a generic signature, it is not generic
+ // anymore; we need to set tparams to nil.
+ sig.tparams = nil
+ res = sig
+
+ default:
+ // only types and functions can be generic
+ panic(fmt.Sprintf("%v: cannot instantiate %v", pos, orig))
+ }
+
+ // Update all contexts; it's possible that we've lost a race.
+ return updateContexts(res)
+}
+
+// validateTArgLen checks that the number of type arguments (got) matches the
+// number of type parameters (want); if they don't match an error is reported.
+// If validation fails and check is nil, validateTArgLen panics.
+func (check *Checker) validateTArgLen(pos syntax.Pos, name string, want, got int) bool {
+ var qual string
+ switch {
+ case got < want:
+ qual = "not enough"
+ case got > want:
+ qual = "too many"
+ default:
+ return true
+ }
+
+ msg := check.sprintf("%s type arguments for type %s: have %d, want %d", qual, name, got, want)
+ if check != nil {
+ check.error(atPos(pos), WrongTypeArgCount, msg)
+ return false
+ }
+
+ panic(fmt.Sprintf("%v: %s", pos, msg))
+}
+
+func (check *Checker) verify(pos syntax.Pos, tparams []*TypeParam, targs []Type, ctxt *Context) (int, error) {
+ smap := makeSubstMap(tparams, targs)
+ for i, tpar := range tparams {
+ // Ensure that we have a (possibly implicit) interface as type bound (go.dev/issue/51048).
+ tpar.iface()
+ // The type parameter bound is parameterized with the same type parameters
+ // as the instantiated type; before we can use it for bounds checking we
+ // need to instantiate it with the type arguments with which we instantiated
+ // the parameterized type.
+ bound := check.subst(pos, tpar.bound, smap, nil, ctxt)
+ var cause string
+ if !check.implements(pos, targs[i], bound, true, &cause) {
+ return i, errors.New(cause)
+ }
+ }
+ return -1, nil
+}
+
+// implements checks if V implements T. The receiver may be nil if implements
+// is called through an exported API call such as AssignableTo. If constraint
+// is set, T is a type constraint.
+//
+// If the provided cause is non-nil, it may be set to an error string
+// explaining why V does not implement (or satisfy, for constraints) T.
+func (check *Checker) implements(pos syntax.Pos, V, T Type, constraint bool, cause *string) bool {
+ Vu := under(V)
+ Tu := under(T)
+ if !isValid(Vu) || !isValid(Tu) {
+ return true // avoid follow-on errors
+ }
+ if p, _ := Vu.(*Pointer); p != nil && !isValid(under(p.base)) {
+ return true // avoid follow-on errors (see go.dev/issue/49541 for an example)
+ }
+
+ verb := "implement"
+ if constraint {
+ verb = "satisfy"
+ }
+
+ Ti, _ := Tu.(*Interface)
+ if Ti == nil {
+ if cause != nil {
+ var detail string
+ if isInterfacePtr(Tu) {
+ detail = check.sprintf("type %s is pointer to interface, not interface", T)
+ } else {
+ detail = check.sprintf("%s is not an interface", T)
+ }
+ *cause = check.sprintf("%s does not %s %s (%s)", V, verb, T, detail)
+ }
+ return false
+ }
+
+ // Every type satisfies the empty interface.
+ if Ti.Empty() {
+ return true
+ }
+ // T is not the empty interface (i.e., the type set of T is restricted)
+
+ // An interface V with an empty type set satisfies any interface.
+ // (The empty set is a subset of any set.)
+ Vi, _ := Vu.(*Interface)
+ if Vi != nil && Vi.typeSet().IsEmpty() {
+ return true
+ }
+ // type set of V is not empty
+
+ // No type with non-empty type set satisfies the empty type set.
+ if Ti.typeSet().IsEmpty() {
+ if cause != nil {
+ *cause = check.sprintf("cannot %s %s (empty type set)", verb, T)
+ }
+ return false
+ }
+
+ // V must implement T's methods, if any.
+ if m, _ := check.missingMethod(V, T, true, Identical, cause); m != nil /* !Implements(V, T) */ {
+ if cause != nil {
+ *cause = check.sprintf("%s does not %s %s %s", V, verb, T, *cause)
+ }
+ return false
+ }
+
+ // Only check comparability if we don't have a more specific error.
+ checkComparability := func() bool {
+ if !Ti.IsComparable() {
+ return true
+ }
+ // If T is comparable, V must be comparable.
+ // If V is strictly comparable, we're done.
+ if comparable(V, false /* strict comparability */, nil, nil) {
+ return true
+ }
+ // For constraint satisfaction, use dynamic (spec) comparability
+ // so that ordinary, non-type parameter interfaces implement comparable.
+ if constraint && comparable(V, true /* spec comparability */, nil, nil) {
+ // V is comparable if we are at Go 1.20 or higher.
+ if check == nil || check.allowVersion(check.pkg, atPos(pos), go1_20) { // atPos needed so that go/types generate passes
+ return true
+ }
+ if cause != nil {
+ *cause = check.sprintf("%s to %s comparable requires go1.20 or later", V, verb)
+ }
+ return false
+ }
+ if cause != nil {
+ *cause = check.sprintf("%s does not %s comparable", V, verb)
+ }
+ return false
+ }
+
+ // V must also be in the set of types of T, if any.
+ // Constraints with empty type sets were already excluded above.
+ if !Ti.typeSet().hasTerms() {
+ return checkComparability() // nothing to do
+ }
+
+ // If V is itself an interface, each of its possible types must be in the set
+ // of T types (i.e., the V type set must be a subset of the T type set).
+ // Interfaces V with empty type sets were already excluded above.
+ if Vi != nil {
+ if !Vi.typeSet().subsetOf(Ti.typeSet()) {
+ // TODO(gri) report which type is missing
+ if cause != nil {
+ *cause = check.sprintf("%s does not %s %s", V, verb, T)
+ }
+ return false
+ }
+ return checkComparability()
+ }
+
+ // Otherwise, V's type must be included in the iface type set.
+ var alt Type
+ if Ti.typeSet().is(func(t *term) bool {
+ if !t.includes(V) {
+ // If V ∉ t.typ but V ∈ ~t.typ then remember this type
+ // so we can suggest it as an alternative in the error
+ // message.
+ if alt == nil && !t.tilde && Identical(t.typ, under(t.typ)) {
+ tt := *t
+ tt.tilde = true
+ if tt.includes(V) {
+ alt = t.typ
+ }
+ }
+ return true
+ }
+ return false
+ }) {
+ if cause != nil {
+ var detail string
+ switch {
+ case alt != nil:
+ detail = check.sprintf("possibly missing ~ for %s in %s", alt, T)
+ case mentions(Ti, V):
+ detail = check.sprintf("%s mentions %s, but %s is not in the type set of %s", T, V, V, T)
+ default:
+ detail = check.sprintf("%s missing in %s", V, Ti.typeSet().terms)
+ }
+ *cause = check.sprintf("%s does not %s %s (%s)", V, verb, T, detail)
+ }
+ return false
+ }
+
+ return checkComparability()
+}
+
+// mentions reports whether type T "mentions" typ in an (embedded) element or term
+// of T (whether typ is in the type set of T or not). For better error messages.
+func mentions(T, typ Type) bool {
+ switch T := T.(type) {
+ case *Interface:
+ for _, e := range T.embeddeds {
+ if mentions(e, typ) {
+ return true
+ }
+ }
+ case *Union:
+ for _, t := range T.terms {
+ if mentions(t.typ, typ) {
+ return true
+ }
+ }
+ default:
+ if Identical(T, typ) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/types2/instantiate_test.go b/src/cmd/compile/internal/types2/instantiate_test.go
new file mode 100644
index 0000000..af772b9
--- /dev/null
+++ b/src/cmd/compile/internal/types2/instantiate_test.go
@@ -0,0 +1,232 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package types2_test
+
+import (
+ . "cmd/compile/internal/types2"
+ "strings"
+ "testing"
+)
+
+func TestInstantiateEquality(t *testing.T) {
+ emptySignature := NewSignatureType(nil, nil, nil, nil, nil, false)
+ tests := []struct {
+ src string
+ name1 string
+ targs1 []Type
+ name2 string
+ targs2 []Type
+ wantEqual bool
+ }{
+ {
+ "package basictype; type T[P any] int",
+ "T", []Type{Typ[Int]},
+ "T", []Type{Typ[Int]},
+ true,
+ },
+ {
+ "package differenttypeargs; type T[P any] int",
+ "T", []Type{Typ[Int]},
+ "T", []Type{Typ[String]},
+ false,
+ },
+ {
+ "package typeslice; type T[P any] int",
+ "T", []Type{NewSlice(Typ[Int])},
+ "T", []Type{NewSlice(Typ[Int])},
+ true,
+ },
+ {
+ // interface{interface{...}} is equivalent to interface{...}
+ "package equivalentinterfaces; type T[P any] int",
+ "T", []Type{
+ NewInterfaceType([]*Func{NewFunc(nopos, nil, "M", emptySignature)}, nil),
+ },
+ "T", []Type{
+ NewInterfaceType(
+ nil,
+ []Type{
+ NewInterfaceType([]*Func{NewFunc(nopos, nil, "M", emptySignature)}, nil),
+ },
+ ),
+ },
+ true,
+ },
+ {
+ // int|string is equivalent to string|int
+ "package equivalenttypesets; type T[P any] int",
+ "T", []Type{
+ NewInterfaceType(nil, []Type{
+ NewUnion([]*Term{NewTerm(false, Typ[Int]), NewTerm(false, Typ[String])}),
+ }),
+ },
+ "T", []Type{
+ NewInterfaceType(nil, []Type{
+ NewUnion([]*Term{NewTerm(false, Typ[String]), NewTerm(false, Typ[Int])}),
+ }),
+ },
+ true,
+ },
+ {
+ "package basicfunc; func F[P any]() {}",
+ "F", []Type{Typ[Int]},
+ "F", []Type{Typ[Int]},
+ true,
+ },
+ {
+ "package funcslice; func F[P any]() {}",
+ "F", []Type{NewSlice(Typ[Int])},
+ "F", []Type{NewSlice(Typ[Int])},
+ true,
+ },
+ {
+ "package funcwithparams; func F[P any](x string) float64 { return 0 }",
+ "F", []Type{Typ[Int]},
+ "F", []Type{Typ[Int]},
+ true,
+ },
+ {
+ "package differentfuncargs; func F[P any](x string) float64 { return 0 }",
+ "F", []Type{Typ[Int]},
+ "F", []Type{Typ[String]},
+ false,
+ },
+ {
+ "package funcequality; func F1[P any](x int) {}; func F2[Q any](x int) {}",
+ "F1", []Type{Typ[Int]},
+ "F2", []Type{Typ[Int]},
+ false,
+ },
+ {
+ "package funcsymmetry; func F1[P any](x P) {}; func F2[Q any](x Q) {}",
+ "F1", []Type{Typ[Int]},
+ "F2", []Type{Typ[Int]},
+ false,
+ },
+ }
+
+ for _, test := range tests {
+ pkg := mustTypecheck(test.src, nil, nil)
+
+ t.Run(pkg.Name(), func(t *testing.T) {
+ ctxt := NewContext()
+
+ T1 := pkg.Scope().Lookup(test.name1).Type()
+ res1, err := Instantiate(ctxt, T1, test.targs1, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ T2 := pkg.Scope().Lookup(test.name2).Type()
+ res2, err := Instantiate(ctxt, T2, test.targs2, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if gotEqual := res1 == res2; gotEqual != test.wantEqual {
+ t.Errorf("%s == %s: %t, want %t", res1, res2, gotEqual, test.wantEqual)
+ }
+ })
+ }
+}
+
+func TestInstantiateNonEquality(t *testing.T) {
+ const src = "package p; type T[P any] int"
+ pkg1 := mustTypecheck(src, nil, nil)
+ pkg2 := mustTypecheck(src, nil, nil)
+ // We consider T1 and T2 to be distinct types, so their instances should not
+ // be deduplicated by the context.
+ T1 := pkg1.Scope().Lookup("T").Type().(*Named)
+ T2 := pkg2.Scope().Lookup("T").Type().(*Named)
+ ctxt := NewContext()
+ res1, err := Instantiate(ctxt, T1, []Type{Typ[Int]}, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res2, err := Instantiate(ctxt, T2, []Type{Typ[Int]}, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res1 == res2 {
+ t.Errorf("instance from pkg1 (%s) is pointer-equivalent to instance from pkg2 (%s)", res1, res2)
+ }
+ if Identical(res1, res2) {
+ t.Errorf("instance from pkg1 (%s) is identical to instance from pkg2 (%s)", res1, res2)
+ }
+}
+
+func TestMethodInstantiation(t *testing.T) {
+ const prefix = `package p
+
+type T[P any] struct{}
+
+var X T[int]
+
+`
+ tests := []struct {
+ decl string
+ want string
+ }{
+ {"func (r T[P]) m() P", "func (T[int]).m() int"},
+ {"func (r T[P]) m(P)", "func (T[int]).m(int)"},
+ {"func (r *T[P]) m(P)", "func (*T[int]).m(int)"},
+ {"func (r T[P]) m() T[P]", "func (T[int]).m() T[int]"},
+ {"func (r T[P]) m(T[P])", "func (T[int]).m(T[int])"},
+ {"func (r T[P]) m(T[P], P, string)", "func (T[int]).m(T[int], int, string)"},
+ {"func (r T[P]) m(T[P], T[string], T[int])", "func (T[int]).m(T[int], T[string], T[int])"},
+ }
+
+ for _, test := range tests {
+ src := prefix + test.decl
+ pkg := mustTypecheck(src, nil, nil)
+ typ := NewPointer(pkg.Scope().Lookup("X").Type())
+ obj, _, _ := LookupFieldOrMethod(typ, false, pkg, "m")
+ m, _ := obj.(*Func)
+ if m == nil {
+ t.Fatalf(`LookupFieldOrMethod(%s, "m") = %v, want func m`, typ, obj)
+ }
+ if got := ObjectString(m, RelativeTo(pkg)); got != test.want {
+ t.Errorf("instantiated %q, want %q", got, test.want)
+ }
+ }
+}
+
+func TestImmutableSignatures(t *testing.T) {
+ const src = `package p
+
+type T[P any] struct{}
+
+func (T[P]) m() {}
+
+var _ T[int]
+`
+ pkg := mustTypecheck(src, nil, nil)
+ typ := pkg.Scope().Lookup("T").Type().(*Named)
+ obj, _, _ := LookupFieldOrMethod(typ, false, pkg, "m")
+ if obj == nil {
+ t.Fatalf(`LookupFieldOrMethod(%s, "m") = %v, want func m`, typ, obj)
+ }
+
+ // Verify that the original method is not mutated by instantiating T (this
+ // bug manifested when subst did not return a new signature).
+ want := "func (T[P]).m()"
+ if got := stripAnnotations(ObjectString(obj, RelativeTo(pkg))); got != want {
+ t.Errorf("instantiated %q, want %q", got, want)
+ }
+}
+
+// Copied from errors.go.
+func stripAnnotations(s string) string {
+ var buf strings.Builder
+ for _, r := range s {
+ // strip #'s and subscript digits
+ if r < '₀' || '₀'+10 <= r { // '₀' == U+2080
+ buf.WriteRune(r)
+ }
+ }
+ if buf.Len() < len(s) {
+ return buf.String()
+ }
+ return s
+}
diff --git a/src/cmd/compile/internal/types2/interface.go b/src/cmd/compile/internal/types2/interface.go
new file mode 100644
index 0000000..4072098
--- /dev/null
+++ b/src/cmd/compile/internal/types2/interface.go
@@ -0,0 +1,186 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ . "internal/types/errors"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// An Interface represents an interface type.
+type Interface struct {
+ check *Checker // for error reporting; nil once type set is computed
+ methods []*Func // ordered list of explicitly declared methods
+ embeddeds []Type // ordered list of explicitly embedded elements
+ embedPos *[]syntax.Pos // positions of embedded elements; or nil (for error messages) - use pointer to save space
+ implicit bool // interface is wrapper for type set literal (non-interface T, ~T, or A|B)
+ complete bool // indicates that all fields (except for tset) are set up
+
+ tset *_TypeSet // type set described by this interface, computed lazily
+}
+
+// typeSet returns the type set for interface t.
+func (t *Interface) typeSet() *_TypeSet { return computeInterfaceTypeSet(t.check, nopos, t) }
+
+// emptyInterface represents the empty interface
+var emptyInterface = Interface{complete: true, tset: &topTypeSet}
+
+// NewInterfaceType returns a new interface for the given methods and embedded types.
+// NewInterfaceType takes ownership of the provided methods and may modify their types
+// by setting missing receivers.
+func NewInterfaceType(methods []*Func, embeddeds []Type) *Interface {
+ if len(methods) == 0 && len(embeddeds) == 0 {
+ return &emptyInterface
+ }
+
+ // set method receivers if necessary
+ typ := (*Checker)(nil).newInterface()
+ for _, m := range methods {
+ if sig := m.typ.(*Signature); sig.recv == nil {
+ sig.recv = NewVar(m.pos, m.pkg, "", typ)
+ }
+ }
+
+ // sort for API stability
+ sortMethods(methods)
+
+ typ.methods = methods
+ typ.embeddeds = embeddeds
+ typ.complete = true
+
+ return typ
+}
+
+// check may be nil
+func (check *Checker) newInterface() *Interface {
+ typ := &Interface{check: check}
+ if check != nil {
+ check.needsCleanup(typ)
+ }
+ return typ
+}
+
+// MarkImplicit marks the interface t as implicit, meaning this interface
+// corresponds to a constraint literal such as ~T or A|B without explicit
+// interface embedding. MarkImplicit should be called before any concurrent use
+// of implicit interfaces.
+func (t *Interface) MarkImplicit() {
+ t.implicit = true
+}
+
+// NumExplicitMethods returns the number of explicitly declared methods of interface t.
+func (t *Interface) NumExplicitMethods() int { return len(t.methods) }
+
+// ExplicitMethod returns the i'th explicitly declared method of interface t for 0 <= i < t.NumExplicitMethods().
+// The methods are ordered by their unique Id.
+func (t *Interface) ExplicitMethod(i int) *Func { return t.methods[i] }
+
+// NumEmbeddeds returns the number of embedded types in interface t.
+func (t *Interface) NumEmbeddeds() int { return len(t.embeddeds) }
+
+// EmbeddedType returns the i'th embedded type of interface t for 0 <= i < t.NumEmbeddeds().
+func (t *Interface) EmbeddedType(i int) Type { return t.embeddeds[i] }
+
+// NumMethods returns the total number of methods of interface t.
+func (t *Interface) NumMethods() int { return t.typeSet().NumMethods() }
+
+// Method returns the i'th method of interface t for 0 <= i < t.NumMethods().
+// The methods are ordered by their unique Id.
+func (t *Interface) Method(i int) *Func { return t.typeSet().Method(i) }
+
+// Empty reports whether t is the empty interface.
+func (t *Interface) Empty() bool { return t.typeSet().IsAll() }
+
+// IsComparable reports whether each type in interface t's type set is comparable.
+func (t *Interface) IsComparable() bool { return t.typeSet().IsComparable(nil) }
+
+// IsMethodSet reports whether the interface t is fully described by its method set.
+func (t *Interface) IsMethodSet() bool { return t.typeSet().IsMethodSet() }
+
+// IsImplicit reports whether the interface t is a wrapper for a type set literal.
+func (t *Interface) IsImplicit() bool { return t.implicit }
+
+func (t *Interface) Underlying() Type { return t }
+func (t *Interface) String() string { return TypeString(t, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+func (t *Interface) cleanup() {
+ t.typeSet() // any interface that escapes type checking must be safe for concurrent use
+ t.check = nil
+ t.embedPos = nil
+}
+
+func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType, def *TypeName) {
+ addEmbedded := func(pos syntax.Pos, typ Type) {
+ ityp.embeddeds = append(ityp.embeddeds, typ)
+ if ityp.embedPos == nil {
+ ityp.embedPos = new([]syntax.Pos)
+ }
+ *ityp.embedPos = append(*ityp.embedPos, pos)
+ }
+
+ for _, f := range iface.MethodList {
+ if f.Name == nil {
+ addEmbedded(atPos(f.Type), parseUnion(check, f.Type))
+ continue
+ }
+ // f.Name != nil
+
+ // We have a method with name f.Name.
+ name := f.Name.Value
+ if name == "_" {
+ check.error(f.Name, BlankIfaceMethod, "methods must have a unique non-blank name")
+ continue // ignore
+ }
+
+ typ := check.typ(f.Type)
+ sig, _ := typ.(*Signature)
+ if sig == nil {
+ if isValid(typ) {
+ check.errorf(f.Type, InvalidSyntaxTree, "%s is not a method signature", typ)
+ }
+ continue // ignore
+ }
+
+ // use named receiver type if available (for better error messages)
+ var recvTyp Type = ityp
+ if def != nil {
+ if named := asNamed(def.typ); named != nil {
+ recvTyp = named
+ }
+ }
+ sig.recv = NewVar(f.Name.Pos(), check.pkg, "", recvTyp)
+
+ m := NewFunc(f.Name.Pos(), check.pkg, name, sig)
+ check.recordDef(f.Name, m)
+ ityp.methods = append(ityp.methods, m)
+ }
+
+ // All methods and embedded elements for this interface are collected;
+ // i.e., this interface may be used in a type set computation.
+ ityp.complete = true
+
+ if len(ityp.methods) == 0 && len(ityp.embeddeds) == 0 {
+ // empty interface
+ ityp.tset = &topTypeSet
+ return
+ }
+
+ // sort for API stability
+ // (don't sort embeddeds: they must correspond to *embedPos entries)
+ sortMethods(ityp.methods)
+
+ // Compute type set as soon as possible to report any errors.
+ // Subsequent uses of type sets will use this computed type
+ // set and won't need to pass in a *Checker.
+ check.later(func() {
+ computeInterfaceTypeSet(check, iface.Pos(), ityp)
+ }).describef(iface, "compute type set for %s", ityp)
+}
diff --git a/src/cmd/compile/internal/types2/issues_test.go b/src/cmd/compile/internal/types2/issues_test.go
new file mode 100644
index 0000000..0117571
--- /dev/null
+++ b/src/cmd/compile/internal/types2/issues_test.go
@@ -0,0 +1,1095 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements tests for various issues.
+
+package types2_test
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "internal/testenv"
+ "regexp"
+ "sort"
+ "strings"
+ "testing"
+
+ . "cmd/compile/internal/types2"
+)
+
+func TestIssue5770(t *testing.T) {
+ _, err := typecheck(`package p; type S struct{T}`, nil, nil)
+ const want = "undefined: T"
+ if err == nil || !strings.Contains(err.Error(), want) {
+ t.Errorf("got: %v; want: %s", err, want)
+ }
+}
+
+func TestIssue5849(t *testing.T) {
+ src := `
+package p
+var (
+ s uint
+ _ = uint8(8)
+ _ = uint16(16) << s
+ _ = uint32(32 << s)
+ _ = uint64(64 << s + s)
+ _ = (interface{})("foo")
+ _ = (interface{})(nil)
+)`
+ types := make(map[syntax.Expr]TypeAndValue)
+ mustTypecheck(src, nil, &Info{Types: types})
+
+ for x, tv := range types {
+ var want Type
+ switch x := x.(type) {
+ case *syntax.BasicLit:
+ switch x.Value {
+ case `8`:
+ want = Typ[Uint8]
+ case `16`:
+ want = Typ[Uint16]
+ case `32`:
+ want = Typ[Uint32]
+ case `64`:
+ want = Typ[Uint] // because of "+ s", s is of type uint
+ case `"foo"`:
+ want = Typ[String]
+ }
+ case *syntax.Name:
+ if x.Value == "nil" {
+ want = NewInterfaceType(nil, nil) // interface{} (for now, go/types types this as "untyped nil")
+ }
+ }
+ if want != nil && !Identical(tv.Type, want) {
+ t.Errorf("got %s; want %s", tv.Type, want)
+ }
+ }
+}
+
+func TestIssue6413(t *testing.T) {
+ src := `
+package p
+func f() int {
+ defer f()
+ go f()
+ return 0
+}
+`
+ types := make(map[syntax.Expr]TypeAndValue)
+ mustTypecheck(src, nil, &Info{Types: types})
+
+ want := Typ[Int]
+ n := 0
+ for x, tv := range types {
+ if _, ok := x.(*syntax.CallExpr); ok {
+ if tv.Type != want {
+ t.Errorf("%s: got %s; want %s", x.Pos(), tv.Type, want)
+ }
+ n++
+ }
+ }
+
+ if n != 2 {
+ t.Errorf("got %d CallExprs; want 2", n)
+ }
+}
+
+func TestIssue7245(t *testing.T) {
+ src := `
+package p
+func (T) m() (res bool) { return }
+type T struct{} // receiver type after method declaration
+`
+ f := mustParse(src)
+
+ var conf Config
+ defs := make(map[*syntax.Name]Object)
+ _, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, &Info{Defs: defs})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ m := f.DeclList[0].(*syntax.FuncDecl)
+ res1 := defs[m.Name].(*Func).Type().(*Signature).Results().At(0)
+ res2 := defs[m.Type.ResultList[0].Name].(*Var)
+
+ if res1 != res2 {
+ t.Errorf("got %s (%p) != %s (%p)", res1, res2, res1, res2)
+ }
+}
+
+// This tests that uses of existing vars on the LHS of an assignment
+// are Uses, not Defs; and also that the (illegal) use of a non-var on
+// the LHS of an assignment is a Use nonetheless.
+func TestIssue7827(t *testing.T) {
+ const src = `
+package p
+func _() {
+ const w = 1 // defs w
+ x, y := 2, 3 // defs x, y
+ w, x, z := 4, 5, 6 // uses w, x, defs z; error: cannot assign to w
+ _, _, _ = x, y, z // uses x, y, z
+}
+`
+ const want = `L3 defs func p._()
+L4 defs const w untyped int
+L5 defs var x int
+L5 defs var y int
+L6 defs var z int
+L6 uses const w untyped int
+L6 uses var x int
+L7 uses var x int
+L7 uses var y int
+L7 uses var z int`
+
+ // don't abort at the first error
+ conf := Config{Error: func(err error) { t.Log(err) }}
+ defs := make(map[*syntax.Name]Object)
+ uses := make(map[*syntax.Name]Object)
+ _, err := typecheck(src, &conf, &Info{Defs: defs, Uses: uses})
+ if s := err.Error(); !strings.HasSuffix(s, "cannot assign to w") {
+ t.Errorf("Check: unexpected error: %s", s)
+ }
+
+ var facts []string
+ for id, obj := range defs {
+ if obj != nil {
+ fact := fmt.Sprintf("L%d defs %s", id.Pos().Line(), obj)
+ facts = append(facts, fact)
+ }
+ }
+ for id, obj := range uses {
+ fact := fmt.Sprintf("L%d uses %s", id.Pos().Line(), obj)
+ facts = append(facts, fact)
+ }
+ sort.Strings(facts)
+
+ got := strings.Join(facts, "\n")
+ if got != want {
+ t.Errorf("Unexpected defs/uses\ngot:\n%s\nwant:\n%s", got, want)
+ }
+}
+
+// This tests that the package associated with the types2.Object.Pkg method
+// is the type's package independent of the order in which the imports are
+// listed in the sources src1, src2 below.
+// The actual issue is in go/internal/gcimporter which has a corresponding
+// test; we leave this test here to verify correct behavior at the go/types
+// level.
+func TestIssue13898(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ const src0 = `
+package main
+
+import "go/types"
+
+func main() {
+ var info types.Info
+ for _, obj := range info.Uses {
+ _ = obj.Pkg()
+ }
+}
+`
+ // like src0, but also imports go/importer
+ const src1 = `
+package main
+
+import (
+ "go/types"
+ _ "go/importer"
+)
+
+func main() {
+ var info types.Info
+ for _, obj := range info.Uses {
+ _ = obj.Pkg()
+ }
+}
+`
+ // like src1 but with different import order
+ // (used to fail with this issue)
+ const src2 = `
+package main
+
+import (
+ _ "go/importer"
+ "go/types"
+)
+
+func main() {
+ var info types.Info
+ for _, obj := range info.Uses {
+ _ = obj.Pkg()
+ }
+}
+`
+ f := func(test, src string) {
+ info := &Info{Uses: make(map[*syntax.Name]Object)}
+ mustTypecheck(src, nil, info)
+
+ var pkg *Package
+ count := 0
+ for id, obj := range info.Uses {
+ if id.Value == "Pkg" {
+ pkg = obj.Pkg()
+ count++
+ }
+ }
+ if count != 1 {
+ t.Fatalf("%s: got %d entries named Pkg; want 1", test, count)
+ }
+ if pkg.Name() != "types" {
+ t.Fatalf("%s: got %v; want package types2", test, pkg)
+ }
+ }
+
+ f("src0", src0)
+ f("src1", src1)
+ f("src2", src2)
+}
+
+func TestIssue22525(t *testing.T) {
+ const src = `package p; func f() { var a, b, c, d, e int }`
+
+ got := "\n"
+ conf := Config{Error: func(err error) { got += err.Error() + "\n" }}
+ typecheck(src, &conf, nil) // do not crash
+ want := `
+p:1:27: a declared and not used
+p:1:30: b declared and not used
+p:1:33: c declared and not used
+p:1:36: d declared and not used
+p:1:39: e declared and not used
+`
+ if got != want {
+ t.Errorf("got: %swant: %s", got, want)
+ }
+}
+
+func TestIssue25627(t *testing.T) {
+ const prefix = `package p; import "unsafe"; type P *struct{}; type I interface{}; type T `
+ // The src strings (without prefix) are constructed such that the number of semicolons
+ // plus one corresponds to the number of fields expected in the respective struct.
+ for _, src := range []string{
+ `struct { x Missing }`,
+ `struct { Missing }`,
+ `struct { *Missing }`,
+ `struct { unsafe.Pointer }`,
+ `struct { P }`,
+ `struct { *I }`,
+ `struct { a int; b Missing; *Missing }`,
+ } {
+ f := mustParse(prefix + src)
+
+ conf := Config{Importer: defaultImporter(), Error: func(err error) {}}
+ info := &Info{Types: make(map[syntax.Expr]TypeAndValue)}
+ _, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, info)
+ if err != nil {
+ if _, ok := err.(Error); !ok {
+ t.Fatal(err)
+ }
+ }
+
+ syntax.Inspect(f, func(n syntax.Node) bool {
+ if decl, _ := n.(*syntax.TypeDecl); decl != nil {
+ if tv, ok := info.Types[decl.Type]; ok && decl.Name.Value == "T" {
+ want := strings.Count(src, ";") + 1
+ if got := tv.Type.(*Struct).NumFields(); got != want {
+ t.Errorf("%s: got %d fields; want %d", src, got, want)
+ }
+ }
+ }
+ return true
+ })
+ }
+}
+
+func TestIssue28005(t *testing.T) {
+ // method names must match defining interface name for this test
+ // (see last comment in this function)
+ sources := [...]string{
+ "package p; type A interface{ A() }",
+ "package p; type B interface{ B() }",
+ "package p; type X interface{ A; B }",
+ }
+
+ // compute original file ASTs
+ var orig [len(sources)]*syntax.File
+ for i, src := range sources {
+ orig[i] = mustParse(src)
+ }
+
+ // run the test for all order permutations of the incoming files
+ for _, perm := range [][len(sources)]int{
+ {0, 1, 2},
+ {0, 2, 1},
+ {1, 0, 2},
+ {1, 2, 0},
+ {2, 0, 1},
+ {2, 1, 0},
+ } {
+ // create file order permutation
+ files := make([]*syntax.File, len(sources))
+ for i := range perm {
+ files[i] = orig[perm[i]]
+ }
+
+ // type-check package with given file order permutation
+ var conf Config
+ info := &Info{Defs: make(map[*syntax.Name]Object)}
+ _, err := conf.Check("", files, info)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // look for interface object X
+ var obj Object
+ for name, def := range info.Defs {
+ if name.Value == "X" {
+ obj = def
+ break
+ }
+ }
+ if obj == nil {
+ t.Fatal("object X not found")
+ }
+ iface := obj.Type().Underlying().(*Interface) // object X must be an interface
+
+ // Each iface method m is embedded; and m's receiver base type name
+ // must match the method's name per the choice in the source file.
+ for i := 0; i < iface.NumMethods(); i++ {
+ m := iface.Method(i)
+ recvName := m.Type().(*Signature).Recv().Type().(*Named).Obj().Name()
+ if recvName != m.Name() {
+ t.Errorf("perm %v: got recv %s; want %s", perm, recvName, m.Name())
+ }
+ }
+ }
+}
+
+func TestIssue28282(t *testing.T) {
+ // create type interface { error }
+ et := Universe.Lookup("error").Type()
+ it := NewInterfaceType(nil, []Type{et})
+ // verify that after completing the interface, the embedded method remains unchanged
+ // (interfaces are "completed" lazily now, so the completion happens implicitly when
+ // accessing Method(0))
+ want := et.Underlying().(*Interface).Method(0)
+ got := it.Method(0)
+ if got != want {
+ t.Fatalf("%s.Method(0): got %q (%p); want %q (%p)", it, got, got, want, want)
+ }
+ // verify that lookup finds the same method in both interfaces (redundant check)
+ obj, _, _ := LookupFieldOrMethod(et, false, nil, "Error")
+ if obj != want {
+ t.Fatalf("%s.Lookup: got %q (%p); want %q (%p)", et, obj, obj, want, want)
+ }
+ obj, _, _ = LookupFieldOrMethod(it, false, nil, "Error")
+ if obj != want {
+ t.Fatalf("%s.Lookup: got %q (%p); want %q (%p)", it, obj, obj, want, want)
+ }
+}
+
+func TestIssue29029(t *testing.T) {
+ f1 := mustParse(`package p; type A interface { M() }`)
+ f2 := mustParse(`package p; var B interface { A }`)
+
+ // printInfo prints the *Func definitions recorded in info, one *Func per line.
+ printInfo := func(info *Info) string {
+ var buf strings.Builder
+ for _, obj := range info.Defs {
+ if fn, ok := obj.(*Func); ok {
+ fmt.Fprintln(&buf, fn)
+ }
+ }
+ return buf.String()
+ }
+
+ // The *Func (method) definitions for package p must be the same
+ // independent on whether f1 and f2 are type-checked together, or
+ // incrementally.
+
+ // type-check together
+ var conf Config
+ info := &Info{Defs: make(map[*syntax.Name]Object)}
+ check := NewChecker(&conf, NewPackage("", "p"), info)
+ if err := check.Files([]*syntax.File{f1, f2}); err != nil {
+ t.Fatal(err)
+ }
+ want := printInfo(info)
+
+ // type-check incrementally
+ info = &Info{Defs: make(map[*syntax.Name]Object)}
+ check = NewChecker(&conf, NewPackage("", "p"), info)
+ if err := check.Files([]*syntax.File{f1}); err != nil {
+ t.Fatal(err)
+ }
+ if err := check.Files([]*syntax.File{f2}); err != nil {
+ t.Fatal(err)
+ }
+ got := printInfo(info)
+
+ if got != want {
+ t.Errorf("\ngot : %swant: %s", got, want)
+ }
+}
+
+func TestIssue34151(t *testing.T) {
+ const asrc = `package a; type I interface{ M() }; type T struct { F interface { I } }`
+ const bsrc = `package b; import "a"; type T struct { F interface { a.I } }; var _ = a.T(T{})`
+
+ a := mustTypecheck(asrc, nil, nil)
+
+ conf := Config{Importer: importHelper{pkg: a}}
+ mustTypecheck(bsrc, &conf, nil)
+}
+
+type importHelper struct {
+ pkg *Package
+ fallback Importer
+}
+
+func (h importHelper) Import(path string) (*Package, error) {
+ if path == h.pkg.Path() {
+ return h.pkg, nil
+ }
+ if h.fallback == nil {
+ return nil, fmt.Errorf("got package path %q; want %q", path, h.pkg.Path())
+ }
+ return h.fallback.Import(path)
+}
+
+// TestIssue34921 verifies that we don't update an imported type's underlying
+// type when resolving an underlying type. Specifically, when determining the
+// underlying type of b.T (which is the underlying type of a.T, which is int)
+// we must not set the underlying type of a.T again since that would lead to
+// a race condition if package b is imported elsewhere, in a package that is
+// concurrently type-checked.
+func TestIssue34921(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Error(r)
+ }
+ }()
+
+ var sources = []string{
+ `package a; type T int`,
+ `package b; import "a"; type T a.T`,
+ }
+
+ var pkg *Package
+ for _, src := range sources {
+ conf := Config{Importer: importHelper{pkg: pkg}}
+ pkg = mustTypecheck(src, &conf, nil) // pkg imported by the next package in this test
+ }
+}
+
+func TestIssue43088(t *testing.T) {
+ // type T1 struct {
+ // _ T2
+ // }
+ //
+ // type T2 struct {
+ // _ struct {
+ // _ T2
+ // }
+ // }
+ n1 := NewTypeName(nopos, nil, "T1", nil)
+ T1 := NewNamed(n1, nil, nil)
+ n2 := NewTypeName(nopos, nil, "T2", nil)
+ T2 := NewNamed(n2, nil, nil)
+ s1 := NewStruct([]*Var{NewField(nopos, nil, "_", T2, false)}, nil)
+ T1.SetUnderlying(s1)
+ s2 := NewStruct([]*Var{NewField(nopos, nil, "_", T2, false)}, nil)
+ s3 := NewStruct([]*Var{NewField(nopos, nil, "_", s2, false)}, nil)
+ T2.SetUnderlying(s3)
+
+ // These calls must terminate (no endless recursion).
+ Comparable(T1)
+ Comparable(T2)
+}
+
+func TestIssue44515(t *testing.T) {
+ typ := Unsafe.Scope().Lookup("Pointer").Type()
+
+ got := TypeString(typ, nil)
+ want := "unsafe.Pointer"
+ if got != want {
+ t.Errorf("got %q; want %q", got, want)
+ }
+
+ qf := func(pkg *Package) string {
+ if pkg == Unsafe {
+ return "foo"
+ }
+ return ""
+ }
+ got = TypeString(typ, qf)
+ want = "foo.Pointer"
+ if got != want {
+ t.Errorf("got %q; want %q", got, want)
+ }
+}
+
+func TestIssue43124(t *testing.T) {
+ // TODO(rFindley) move this to testdata by enhancing support for importing.
+
+ testenv.MustHaveGoBuild(t) // The go command is needed for the importer to determine the locations of stdlib .a files.
+
+ // All involved packages have the same name (template). Error messages should
+ // disambiguate between text/template and html/template by printing the full
+ // path.
+ const (
+ asrc = `package a; import "text/template"; func F(template.Template) {}; func G(int) {}`
+ bsrc = `
+package b
+
+import (
+ "a"
+ "html/template"
+)
+
+func _() {
+ // Packages should be fully qualified when there is ambiguity within the
+ // error string itself.
+ a.F(template /* ERRORx "cannot use.*html/template.* as .*text/template" */ .Template{})
+}
+`
+ csrc = `
+package c
+
+import (
+ "a"
+ "fmt"
+ "html/template"
+)
+
+// go.dev/issue/46905: make sure template is not the first package qualified.
+var _ fmt.Stringer = 1 // ERRORx "cannot use 1.*as fmt\\.Stringer"
+
+// Packages should be fully qualified when there is ambiguity in reachable
+// packages. In this case both a (and for that matter html/template) import
+// text/template.
+func _() { a.G(template /* ERRORx "cannot use .*html/template.*Template" */ .Template{}) }
+`
+
+ tsrc = `
+package template
+
+import "text/template"
+
+type T int
+
+// Verify that the current package name also causes disambiguation.
+var _ T = template /* ERRORx "cannot use.*text/template.* as T value" */.Template{}
+`
+ )
+
+ a := mustTypecheck(asrc, nil, nil)
+ imp := importHelper{pkg: a, fallback: defaultImporter()}
+
+ withImporter := func(cfg *Config) {
+ cfg.Importer = imp
+ }
+
+ testFiles(t, []string{"b.go"}, [][]byte{[]byte(bsrc)}, 0, false, withImporter)
+ testFiles(t, []string{"c.go"}, [][]byte{[]byte(csrc)}, 0, false, withImporter)
+ testFiles(t, []string{"t.go"}, [][]byte{[]byte(tsrc)}, 0, false, withImporter)
+}
+
+func TestIssue50646(t *testing.T) {
+ anyType := Universe.Lookup("any").Type()
+ comparableType := Universe.Lookup("comparable").Type()
+
+ if !Comparable(anyType) {
+ t.Error("any is not a comparable type")
+ }
+ if !Comparable(comparableType) {
+ t.Error("comparable is not a comparable type")
+ }
+
+ if Implements(anyType, comparableType.Underlying().(*Interface)) {
+ t.Error("any implements comparable")
+ }
+ if !Implements(comparableType, anyType.(*Interface)) {
+ t.Error("comparable does not implement any")
+ }
+
+ if AssignableTo(anyType, comparableType) {
+ t.Error("any assignable to comparable")
+ }
+ if !AssignableTo(comparableType, anyType) {
+ t.Error("comparable not assignable to any")
+ }
+}
+
+func TestIssue55030(t *testing.T) {
+ // makeSig makes the signature func(typ...)
+ makeSig := func(typ Type) {
+ par := NewVar(nopos, nil, "", typ)
+ params := NewTuple(par)
+ NewSignatureType(nil, nil, nil, params, nil, true)
+ }
+
+ // makeSig must not panic for the following (example) types:
+ // []int
+ makeSig(NewSlice(Typ[Int]))
+
+ // string
+ makeSig(Typ[String])
+
+ // P where P's core type is string
+ {
+ P := NewTypeName(nopos, nil, "P", nil) // [P string]
+ makeSig(NewTypeParam(P, NewInterfaceType(nil, []Type{Typ[String]})))
+ }
+
+ // P where P's core type is an (unnamed) slice
+ {
+ P := NewTypeName(nopos, nil, "P", nil) // [P []int]
+ makeSig(NewTypeParam(P, NewInterfaceType(nil, []Type{NewSlice(Typ[Int])})))
+ }
+
+ // P where P's core type is bytestring (i.e., string or []byte)
+ {
+ t1 := NewTerm(true, Typ[String]) // ~string
+ t2 := NewTerm(false, NewSlice(Typ[Byte])) // []byte
+ u := NewUnion([]*Term{t1, t2}) // ~string | []byte
+ P := NewTypeName(nopos, nil, "P", nil) // [P ~string | []byte]
+ makeSig(NewTypeParam(P, NewInterfaceType(nil, []Type{u})))
+ }
+}
+
+func TestIssue51093(t *testing.T) {
+ // Each test stands for a conversion of the form P(val)
+ // where P is a type parameter with typ as constraint.
+ // The test ensures that P(val) has the correct type P
+ // and is not a constant.
+ var tests = []struct {
+ typ string
+ val string
+ }{
+ {"bool", "false"},
+ {"int", "-1"},
+ {"uint", "1.0"},
+ {"rune", "'a'"},
+ {"float64", "3.5"},
+ {"complex64", "1.25"},
+ {"string", "\"foo\""},
+
+ // some more complex constraints
+ {"~byte", "1"},
+ {"~int | ~float64 | complex128", "1"},
+ {"~uint64 | ~rune", "'X'"},
+ }
+
+ for _, test := range tests {
+ src := fmt.Sprintf("package p; func _[P %s]() { _ = P(%s) }", test.typ, test.val)
+ types := make(map[syntax.Expr]TypeAndValue)
+ mustTypecheck(src, nil, &Info{Types: types})
+
+ var n int
+ for x, tv := range types {
+ if x, _ := x.(*syntax.CallExpr); x != nil {
+ // there must be exactly one CallExpr which is the P(val) conversion
+ n++
+ tpar, _ := tv.Type.(*TypeParam)
+ if tpar == nil {
+ t.Fatalf("%s: got type %s, want type parameter", syntax.String(x), tv.Type)
+ }
+ if name := tpar.Obj().Name(); name != "P" {
+ t.Fatalf("%s: got type parameter name %s, want P", syntax.String(x), name)
+ }
+ // P(val) must not be constant
+ if tv.Value != nil {
+ t.Errorf("%s: got constant value %s (%s), want no constant", syntax.String(x), tv.Value, tv.Value.String())
+ }
+ }
+ }
+
+ if n != 1 {
+ t.Fatalf("%s: got %d CallExpr nodes; want 1", src, 1)
+ }
+ }
+}
+
+func TestIssue54258(t *testing.T) {
+ tests := []struct{ main, b, want string }{
+ { //---------------------------------------------------------------
+ `package main
+import "b"
+type I0 interface {
+ M0(w struct{ f string })
+}
+var _ I0 = b.S{}
+`,
+ `package b
+type S struct{}
+func (S) M0(struct{ f string }) {}
+`,
+ `6:12: cannot use b[.]S{} [(]value of type b[.]S[)] as I0 value in variable declaration: b[.]S does not implement I0 [(]wrong type for method M0[)]
+.*have M0[(]struct{f string /[*] package b [*]/ }[)]
+.*want M0[(]struct{f string /[*] package main [*]/ }[)]`},
+
+ { //---------------------------------------------------------------
+ `package main
+import "b"
+type I1 interface {
+ M1(struct{ string })
+}
+var _ I1 = b.S{}
+`,
+ `package b
+type S struct{}
+func (S) M1(struct{ string }) {}
+`,
+ `6:12: cannot use b[.]S{} [(]value of type b[.]S[)] as I1 value in variable declaration: b[.]S does not implement I1 [(]wrong type for method M1[)]
+.*have M1[(]struct{string /[*] package b [*]/ }[)]
+.*want M1[(]struct{string /[*] package main [*]/ }[)]`},
+
+ { //---------------------------------------------------------------
+ `package main
+import "b"
+type I2 interface {
+ M2(y struct{ f struct{ f string } })
+}
+var _ I2 = b.S{}
+`,
+ `package b
+type S struct{}
+func (S) M2(struct{ f struct{ f string } }) {}
+`,
+ `6:12: cannot use b[.]S{} [(]value of type b[.]S[)] as I2 value in variable declaration: b[.]S does not implement I2 [(]wrong type for method M2[)]
+.*have M2[(]struct{f struct{f string} /[*] package b [*]/ }[)]
+.*want M2[(]struct{f struct{f string} /[*] package main [*]/ }[)]`},
+
+ { //---------------------------------------------------------------
+ `package main
+import "b"
+type I3 interface {
+ M3(z struct{ F struct{ f string } })
+}
+var _ I3 = b.S{}
+`,
+ `package b
+type S struct{}
+func (S) M3(struct{ F struct{ f string } }) {}
+`,
+ `6:12: cannot use b[.]S{} [(]value of type b[.]S[)] as I3 value in variable declaration: b[.]S does not implement I3 [(]wrong type for method M3[)]
+.*have M3[(]struct{F struct{f string /[*] package b [*]/ }}[)]
+.*want M3[(]struct{F struct{f string /[*] package main [*]/ }}[)]`},
+
+ { //---------------------------------------------------------------
+ `package main
+import "b"
+type I4 interface {
+ M4(_ struct { *string })
+}
+var _ I4 = b.S{}
+`,
+ `package b
+type S struct{}
+func (S) M4(struct { *string }) {}
+`,
+ `6:12: cannot use b[.]S{} [(]value of type b[.]S[)] as I4 value in variable declaration: b[.]S does not implement I4 [(]wrong type for method M4[)]
+.*have M4[(]struct{[*]string /[*] package b [*]/ }[)]
+.*want M4[(]struct{[*]string /[*] package main [*]/ }[)]`},
+
+ { //---------------------------------------------------------------
+ `package main
+import "b"
+type t struct{ A int }
+type I5 interface {
+ M5(_ struct {b.S;t})
+}
+var _ I5 = b.S{}
+`,
+ `package b
+type S struct{}
+type t struct{ A int }
+func (S) M5(struct {S;t}) {}
+`,
+ `7:12: cannot use b[.]S{} [(]value of type b[.]S[)] as I5 value in variable declaration: b[.]S does not implement I5 [(]wrong type for method M5[)]
+.*have M5[(]struct{b[.]S; b[.]t}[)]
+.*want M5[(]struct{b[.]S; t}[)]`},
+ }
+
+ test := func(main, b, want string) {
+ re := regexp.MustCompile(want)
+ bpkg := mustTypecheck(b, nil, nil)
+ mast := mustParse(main)
+ conf := Config{Importer: importHelper{pkg: bpkg}}
+ _, err := conf.Check(mast.PkgName.Value, []*syntax.File{mast}, nil)
+ if err == nil {
+ t.Error("Expected failure, but it did not")
+ } else if got := err.Error(); !re.MatchString(got) {
+ t.Errorf("Wanted match for\n\t%s\n but got\n\t%s", want, got)
+ } else if testing.Verbose() {
+ t.Logf("Saw expected\n\t%s", err.Error())
+ }
+ }
+ for _, t := range tests {
+ test(t.main, t.b, t.want)
+ }
+}
+
+func TestIssue59944(t *testing.T) {
+ testenv.MustHaveCGO(t)
+
+ // The typechecker should resolve methods declared on aliases of cgo types.
+ const src = `
+package p
+
+/*
+struct layout {
+ int field;
+};
+*/
+import "C"
+
+type Layout = C.struct_layout
+
+func (l *Layout) Binding() {}
+
+func _() {
+ _ = (*Layout).Binding
+}
+`
+
+ // code generated by cmd/cgo for the above source.
+ const cgoTypes = `
+// Code generated by cmd/cgo; DO NOT EDIT.
+
+package p
+
+import "unsafe"
+
+import "syscall"
+
+import _cgopackage "runtime/cgo"
+
+type _ _cgopackage.Incomplete
+var _ syscall.Errno
+func _Cgo_ptr(ptr unsafe.Pointer) unsafe.Pointer { return ptr }
+
+//go:linkname _Cgo_always_false runtime.cgoAlwaysFalse
+var _Cgo_always_false bool
+//go:linkname _Cgo_use runtime.cgoUse
+func _Cgo_use(interface{})
+type _Ctype_int int32
+
+type _Ctype_struct_layout struct {
+ field _Ctype_int
+}
+
+type _Ctype_void [0]byte
+
+//go:linkname _cgo_runtime_cgocall runtime.cgocall
+func _cgo_runtime_cgocall(unsafe.Pointer, uintptr) int32
+
+//go:linkname _cgoCheckPointer runtime.cgoCheckPointer
+func _cgoCheckPointer(interface{}, interface{})
+
+//go:linkname _cgoCheckResult runtime.cgoCheckResult
+func _cgoCheckResult(interface{})
+`
+ testFiles(t, []string{"p.go", "_cgo_gotypes.go"}, [][]byte{[]byte(src), []byte(cgoTypes)}, 0, false, func(cfg *Config) {
+ *boolFieldAddr(cfg, "go115UsesCgo") = true
+ })
+}
+
+func TestIssue61931(t *testing.T) {
+ const src = `
+package p
+
+func A(func(any), ...any) {}
+func B[T any](T) {}
+
+func _() {
+ A(B, nil // syntax error: missing ',' before newline in argument list
+}
+`
+ f, err := syntax.Parse(syntax.NewFileBase(pkgName(src)), strings.NewReader(src), func(error) {}, nil, 0)
+ if err == nil {
+ t.Fatal("expected syntax error")
+ }
+
+ var conf Config
+ conf.Check(f.PkgName.Value, []*syntax.File{f}, nil) // must not panic
+}
+
+func TestIssue61938(t *testing.T) {
+ const src = `
+package p
+
+func f[T any]() {}
+func _() { f() }
+`
+ // no error handler provided (this issue)
+ var conf Config
+ typecheck(src, &conf, nil) // must not panic
+
+ // with error handler (sanity check)
+ conf.Error = func(error) {}
+ typecheck(src, &conf, nil) // must not panic
+}
+
+func TestIssue63260(t *testing.T) {
+ const src = `
+package p
+
+func _() {
+ use(f[*string])
+}
+
+func use(func()) {}
+
+func f[I *T, T any]() {
+ var v T
+ _ = v
+}`
+
+ info := Info{
+ Defs: make(map[*syntax.Name]Object),
+ }
+ pkg := mustTypecheck(src, nil, &info)
+
+ // get type parameter T in signature of f
+ T := pkg.Scope().Lookup("f").Type().(*Signature).TypeParams().At(1)
+ if T.Obj().Name() != "T" {
+ t.Fatalf("got type parameter %s, want T", T)
+ }
+
+ // get type of variable v in body of f
+ var v Object
+ for name, obj := range info.Defs {
+ if name.Value == "v" {
+ v = obj
+ break
+ }
+ }
+ if v == nil {
+ t.Fatal("variable v not found")
+ }
+
+ // type of v and T must be pointer-identical
+ if v.Type() != T {
+ t.Fatalf("types of v and T are not pointer-identical: %p != %p", v.Type().(*TypeParam), T)
+ }
+}
+
+func TestIssue44410(t *testing.T) {
+ const src = `
+package p
+
+type A = []int
+type S struct{ A }
+`
+
+ t.Setenv("GODEBUG", "gotypesalias=1")
+ pkg := mustTypecheck(src, nil, nil)
+
+ S := pkg.Scope().Lookup("S")
+ if S == nil {
+ t.Fatal("object S not found")
+ }
+
+ got := S.String()
+ const want = "type p.S struct{p.A}"
+ if got != want {
+ t.Fatalf("got %q; want %q", got, want)
+ }
+}
+
+func TestIssue59831(t *testing.T) {
+ // Package a exports a type S with an unexported method m;
+ // the tests check the error messages when m is not found.
+ const asrc = `package a; type S struct{}; func (S) m() {}`
+ apkg := mustTypecheck(asrc, nil, nil)
+
+ // Package b exports a type S with an exported method m;
+ // the tests check the error messages when M is not found.
+ const bsrc = `package b; type S struct{}; func (S) M() {}`
+ bpkg := mustTypecheck(bsrc, nil, nil)
+
+ tests := []struct {
+ imported *Package
+ src, err string
+ }{
+ // tests importing a (or nothing)
+ {apkg, `package a1; import "a"; var _ interface { M() } = a.S{}`,
+ "a.S does not implement interface{M()} (missing method M) have m() want M()"},
+
+ {apkg, `package a2; import "a"; var _ interface { m() } = a.S{}`,
+ "a.S does not implement interface{m()} (unexported method m)"}, // test for issue
+
+ {nil, `package a3; type S struct{}; func (S) m(); var _ interface { M() } = S{}`,
+ "S does not implement interface{M()} (missing method M) have m() want M()"},
+
+ {nil, `package a4; type S struct{}; func (S) m(); var _ interface { m() } = S{}`,
+ ""}, // no error expected
+
+ {nil, `package a5; type S struct{}; func (S) m(); var _ interface { n() } = S{}`,
+ "S does not implement interface{n()} (missing method n)"},
+
+ // tests importing b (or nothing)
+ {bpkg, `package b1; import "b"; var _ interface { m() } = b.S{}`,
+ "b.S does not implement interface{m()} (missing method m) have M() want m()"},
+
+ {bpkg, `package b2; import "b"; var _ interface { M() } = b.S{}`,
+ ""}, // no error expected
+
+ {nil, `package b3; type S struct{}; func (S) M(); var _ interface { M() } = S{}`,
+ ""}, // no error expected
+
+ {nil, `package b4; type S struct{}; func (S) M(); var _ interface { m() } = S{}`,
+ "S does not implement interface{m()} (missing method m) have M() want m()"},
+
+ {nil, `package b5; type S struct{}; func (S) M(); var _ interface { n() } = S{}`,
+ "S does not implement interface{n()} (missing method n)"},
+ }
+
+ for _, test := range tests {
+ // typecheck test source
+ conf := Config{Importer: importHelper{pkg: test.imported}}
+ pkg, err := typecheck(test.src, &conf, nil)
+ if err == nil {
+ if test.err != "" {
+ t.Errorf("package %s: got no error, want %q", pkg.Name(), test.err)
+ }
+ continue
+ }
+ if test.err == "" {
+ t.Errorf("package %s: got %q, want not error", pkg.Name(), err.Error())
+ }
+
+ // flatten reported error message
+ errmsg := strings.ReplaceAll(err.Error(), "\n", " ")
+ errmsg = strings.ReplaceAll(errmsg, "\t", "")
+
+ // verify error message
+ if !strings.Contains(errmsg, test.err) {
+ t.Errorf("package %s: got %q, want %q", pkg.Name(), errmsg, test.err)
+ }
+ }
+}
+
+func TestIssue64759(t *testing.T) {
+ const src = `
+//go:build go1.18
+package p
+
+func f[S ~[]E, E any](S) {}
+
+func _() {
+ f([]string{})
+}
+`
+ // Per the go:build directive, the source must typecheck
+ // even though the (module) Go version is set to go1.17.
+ conf := Config{GoVersion: "go1.17"}
+ mustTypecheck(src, &conf, nil)
+}
diff --git a/src/cmd/compile/internal/types2/labels.go b/src/cmd/compile/internal/types2/labels.go
new file mode 100644
index 0000000..ffb3700
--- /dev/null
+++ b/src/cmd/compile/internal/types2/labels.go
@@ -0,0 +1,269 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ . "internal/types/errors"
+)
+
+// labels checks correct label use in body.
+func (check *Checker) labels(body *syntax.BlockStmt) {
+ // set of all labels in this body
+ all := NewScope(nil, body.Pos(), syntax.EndPos(body), "label")
+
+ fwdJumps := check.blockBranches(all, nil, nil, body.List)
+
+ // If there are any forward jumps left, no label was found for
+ // the corresponding goto statements. Either those labels were
+ // never defined, or they are inside blocks and not reachable
+ // for the respective gotos.
+ for _, jmp := range fwdJumps {
+ var msg string
+ var code Code
+ name := jmp.Label.Value
+ if alt := all.Lookup(name); alt != nil {
+ msg = "goto %s jumps into block"
+ alt.(*Label).used = true // avoid another error
+ code = JumpIntoBlock
+ } else {
+ msg = "label %s not declared"
+ code = UndeclaredLabel
+ }
+ check.errorf(jmp.Label, code, msg, name)
+ }
+
+ // spec: "It is illegal to define a label that is never used."
+ for name, obj := range all.elems {
+ obj = resolve(name, obj)
+ if lbl := obj.(*Label); !lbl.used {
+ check.softErrorf(lbl.pos, UnusedLabel, "label %s declared and not used", lbl.name)
+ }
+ }
+}
+
+// A block tracks label declarations in a block and its enclosing blocks.
+type block struct {
+ parent *block // enclosing block
+ lstmt *syntax.LabeledStmt // labeled statement to which this block belongs, or nil
+ labels map[string]*syntax.LabeledStmt // allocated lazily
+}
+
+// insert records a new label declaration for the current block.
+// The label must not have been declared before in any block.
+func (b *block) insert(s *syntax.LabeledStmt) {
+ name := s.Label.Value
+ if debug {
+ assert(b.gotoTarget(name) == nil)
+ }
+ labels := b.labels
+ if labels == nil {
+ labels = make(map[string]*syntax.LabeledStmt)
+ b.labels = labels
+ }
+ labels[name] = s
+}
+
+// gotoTarget returns the labeled statement in the current
+// or an enclosing block with the given label name, or nil.
+func (b *block) gotoTarget(name string) *syntax.LabeledStmt {
+ for s := b; s != nil; s = s.parent {
+ if t := s.labels[name]; t != nil {
+ return t
+ }
+ }
+ return nil
+}
+
+// enclosingTarget returns the innermost enclosing labeled
+// statement with the given label name, or nil.
+func (b *block) enclosingTarget(name string) *syntax.LabeledStmt {
+ for s := b; s != nil; s = s.parent {
+ if t := s.lstmt; t != nil && t.Label.Value == name {
+ return t
+ }
+ }
+ return nil
+}
+
+// blockBranches processes a block's statement list and returns the set of outgoing forward jumps.
+// all is the scope of all declared labels, parent the set of labels declared in the immediately
+// enclosing block, and lstmt is the labeled statement this block is associated with (or nil).
+func (check *Checker) blockBranches(all *Scope, parent *block, lstmt *syntax.LabeledStmt, list []syntax.Stmt) []*syntax.BranchStmt {
+ b := &block{parent, lstmt, nil}
+
+ var (
+ varDeclPos syntax.Pos
+ fwdJumps, badJumps []*syntax.BranchStmt
+ )
+
+ // All forward jumps jumping over a variable declaration are possibly
+ // invalid (they may still jump out of the block and be ok).
+ // recordVarDecl records them for the given position.
+ recordVarDecl := func(pos syntax.Pos) {
+ varDeclPos = pos
+ badJumps = append(badJumps[:0], fwdJumps...) // copy fwdJumps to badJumps
+ }
+
+ jumpsOverVarDecl := func(jmp *syntax.BranchStmt) bool {
+ if varDeclPos.IsKnown() {
+ for _, bad := range badJumps {
+ if jmp == bad {
+ return true
+ }
+ }
+ }
+ return false
+ }
+
+ var stmtBranches func(syntax.Stmt)
+ stmtBranches = func(s syntax.Stmt) {
+ switch s := s.(type) {
+ case *syntax.DeclStmt:
+ for _, d := range s.DeclList {
+ if d, _ := d.(*syntax.VarDecl); d != nil {
+ recordVarDecl(d.Pos())
+ }
+ }
+
+ case *syntax.LabeledStmt:
+ // declare non-blank label
+ if name := s.Label.Value; name != "_" {
+ lbl := NewLabel(s.Label.Pos(), check.pkg, name)
+ if alt := all.Insert(lbl); alt != nil {
+ var err error_
+ err.code = DuplicateLabel
+ err.soft = true
+ err.errorf(lbl.pos, "label %s already declared", name)
+ err.recordAltDecl(alt)
+ check.report(&err)
+ // ok to continue
+ } else {
+ b.insert(s)
+ check.recordDef(s.Label, lbl)
+ }
+ // resolve matching forward jumps and remove them from fwdJumps
+ i := 0
+ for _, jmp := range fwdJumps {
+ if jmp.Label.Value == name {
+ // match
+ lbl.used = true
+ check.recordUse(jmp.Label, lbl)
+ if jumpsOverVarDecl(jmp) {
+ check.softErrorf(
+ jmp.Label,
+ JumpOverDecl,
+ "goto %s jumps over variable declaration at line %d",
+ name,
+ varDeclPos.Line(),
+ )
+ // ok to continue
+ }
+ } else {
+ // no match - record new forward jump
+ fwdJumps[i] = jmp
+ i++
+ }
+ }
+ fwdJumps = fwdJumps[:i]
+ lstmt = s
+ }
+ stmtBranches(s.Stmt)
+
+ case *syntax.BranchStmt:
+ if s.Label == nil {
+ return // checked in 1st pass (check.stmt)
+ }
+
+ // determine and validate target
+ name := s.Label.Value
+ switch s.Tok {
+ case syntax.Break:
+ // spec: "If there is a label, it must be that of an enclosing
+ // "for", "switch", or "select" statement, and that is the one
+ // whose execution terminates."
+ valid := false
+ if t := b.enclosingTarget(name); t != nil {
+ switch t.Stmt.(type) {
+ case *syntax.SwitchStmt, *syntax.SelectStmt, *syntax.ForStmt:
+ valid = true
+ }
+ }
+ if !valid {
+ check.errorf(s.Label, MisplacedLabel, "invalid break label %s", name)
+ return
+ }
+
+ case syntax.Continue:
+ // spec: "If there is a label, it must be that of an enclosing
+ // "for" statement, and that is the one whose execution advances."
+ valid := false
+ if t := b.enclosingTarget(name); t != nil {
+ switch t.Stmt.(type) {
+ case *syntax.ForStmt:
+ valid = true
+ }
+ }
+ if !valid {
+ check.errorf(s.Label, MisplacedLabel, "invalid continue label %s", name)
+ return
+ }
+
+ case syntax.Goto:
+ if b.gotoTarget(name) == nil {
+ // label may be declared later - add branch to forward jumps
+ fwdJumps = append(fwdJumps, s)
+ return
+ }
+
+ default:
+ check.errorf(s, InvalidSyntaxTree, "branch statement: %s %s", s.Tok, name)
+ return
+ }
+
+ // record label use
+ obj := all.Lookup(name)
+ obj.(*Label).used = true
+ check.recordUse(s.Label, obj)
+
+ case *syntax.AssignStmt:
+ if s.Op == syntax.Def {
+ recordVarDecl(s.Pos())
+ }
+
+ case *syntax.BlockStmt:
+ // Unresolved forward jumps inside the nested block
+ // become forward jumps in the current block.
+ fwdJumps = append(fwdJumps, check.blockBranches(all, b, lstmt, s.List)...)
+
+ case *syntax.IfStmt:
+ stmtBranches(s.Then)
+ if s.Else != nil {
+ stmtBranches(s.Else)
+ }
+
+ case *syntax.SwitchStmt:
+ b := &block{b, lstmt, nil}
+ for _, s := range s.Body {
+ fwdJumps = append(fwdJumps, check.blockBranches(all, b, nil, s.Body)...)
+ }
+
+ case *syntax.SelectStmt:
+ b := &block{b, lstmt, nil}
+ for _, s := range s.Body {
+ fwdJumps = append(fwdJumps, check.blockBranches(all, b, nil, s.Body)...)
+ }
+
+ case *syntax.ForStmt:
+ stmtBranches(s.Body)
+ }
+ }
+
+ for _, s := range list {
+ stmtBranches(s)
+ }
+
+ return fwdJumps
+}
diff --git a/src/cmd/compile/internal/types2/lookup.go b/src/cmd/compile/internal/types2/lookup.go
new file mode 100644
index 0000000..bc47c15
--- /dev/null
+++ b/src/cmd/compile/internal/types2/lookup.go
@@ -0,0 +1,603 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements various field and method lookup functions.
+
+package types2
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "strings"
+)
+
+// Internal use of LookupFieldOrMethod: If the obj result is a method
+// associated with a concrete (non-interface) type, the method's signature
+// may not be fully set up. Call Checker.objDecl(obj, nil) before accessing
+// the method's type.
+
+// LookupFieldOrMethod looks up a field or method with given package and name
+// in T and returns the corresponding *Var or *Func, an index sequence, and a
+// bool indicating if there were any pointer indirections on the path to the
+// field or method. If addressable is set, T is the type of an addressable
+// variable (only matters for method lookups). T must not be nil.
+//
+// The last index entry is the field or method index in the (possibly embedded)
+// type where the entry was found, either:
+//
+// 1. the list of declared methods of a named type; or
+// 2. the list of all methods (method set) of an interface type; or
+// 3. the list of fields of a struct type.
+//
+// The earlier index entries are the indices of the embedded struct fields
+// traversed to get to the found entry, starting at depth 0.
+//
+// If no entry is found, a nil object is returned. In this case, the returned
+// index and indirect values have the following meaning:
+//
+// - If index != nil, the index sequence points to an ambiguous entry
+// (the same name appeared more than once at the same embedding level).
+//
+// - If indirect is set, a method with a pointer receiver type was found
+// but there was no pointer on the path from the actual receiver type to
+// the method's formal receiver base type, nor was the receiver addressable.
+func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
+ if T == nil {
+ panic("LookupFieldOrMethod on nil type")
+ }
+
+ // Methods cannot be associated to a named pointer type.
+ // (spec: "The type denoted by T is called the receiver base type;
+ // it must not be a pointer or interface type and it must be declared
+ // in the same package as the method.").
+ // Thus, if we have a named pointer type, proceed with the underlying
+ // pointer type but discard the result if it is a method since we would
+ // not have found it for T (see also go.dev/issue/8590).
+ if t := asNamed(T); t != nil {
+ if p, _ := t.Underlying().(*Pointer); p != nil {
+ obj, index, indirect = lookupFieldOrMethodImpl(p, false, pkg, name, false)
+ if _, ok := obj.(*Func); ok {
+ return nil, nil, false
+ }
+ return
+ }
+ }
+
+ obj, index, indirect = lookupFieldOrMethodImpl(T, addressable, pkg, name, false)
+
+ // If we didn't find anything and if we have a type parameter with a core type,
+ // see if there is a matching field (but not a method, those need to be declared
+ // explicitly in the constraint). If the constraint is a named pointer type (see
+ // above), we are ok here because only fields are accepted as results.
+ const enableTParamFieldLookup = false // see go.dev/issue/51576
+ if enableTParamFieldLookup && obj == nil && isTypeParam(T) {
+ if t := coreType(T); t != nil {
+ obj, index, indirect = lookupFieldOrMethodImpl(t, addressable, pkg, name, false)
+ if _, ok := obj.(*Var); !ok {
+ obj, index, indirect = nil, nil, false // accept fields (variables) only
+ }
+ }
+ }
+ return
+}
+
+// lookupFieldOrMethodImpl is the implementation of LookupFieldOrMethod.
+// Notably, in contrast to LookupFieldOrMethod, it won't find struct fields
+// in base types of defined (*Named) pointer types T. For instance, given
+// the declaration:
+//
+// type T *struct{f int}
+//
+// lookupFieldOrMethodImpl won't find the field f in the defined (*Named) type T
+// (methods on T are not permitted in the first place).
+//
+// Thus, lookupFieldOrMethodImpl should only be called by LookupFieldOrMethod
+// and missingMethod (the latter doesn't care about struct fields).
+//
+// If foldCase is true, method names are considered equal if they are equal
+// with case folding, irrespective of which package they are in.
+//
+// The resulting object may not be fully type-checked.
+func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string, foldCase bool) (obj Object, index []int, indirect bool) {
+ // WARNING: The code in this function is extremely subtle - do not modify casually!
+
+ if name == "_" {
+ return // blank fields/methods are never found
+ }
+
+ // Importantly, we must not call under before the call to deref below (nor
+ // does deref call under), as doing so could incorrectly result in finding
+ // methods of the pointer base type when T is a (*Named) pointer type.
+ typ, isPtr := deref(T)
+
+ // *typ where typ is an interface (incl. a type parameter) has no methods.
+ if isPtr {
+ if _, ok := under(typ).(*Interface); ok {
+ return
+ }
+ }
+
+ // Start with typ as single entry at shallowest depth.
+ current := []embeddedType{{typ, nil, isPtr, false}}
+
+ // seen tracks named types that we have seen already, allocated lazily.
+ // Used to avoid endless searches in case of recursive types.
+ //
+ // We must use a lookup on identity rather than a simple map[*Named]bool as
+ // instantiated types may be identical but not equal.
+ var seen instanceLookup
+
+ // search current depth
+ for len(current) > 0 {
+ var next []embeddedType // embedded types found at current depth
+
+ // look for (pkg, name) in all types at current depth
+ for _, e := range current {
+ typ := e.typ
+
+ // If we have a named type, we may have associated methods.
+ // Look for those first.
+ if named := asNamed(typ); named != nil {
+ if alt := seen.lookup(named); alt != nil {
+ // We have seen this type before, at a more shallow depth
+ // (note that multiples of this type at the current depth
+ // were consolidated before). The type at that depth shadows
+ // this same type at the current depth, so we can ignore
+ // this one.
+ continue
+ }
+ seen.add(named)
+
+ // look for a matching attached method
+ if i, m := named.lookupMethod(pkg, name, foldCase); m != nil {
+ // potential match
+ // caution: method may not have a proper signature yet
+ index = concat(e.index, i)
+ if obj != nil || e.multiples {
+ return nil, index, false // collision
+ }
+ obj = m
+ indirect = e.indirect
+ continue // we can't have a matching field or interface method
+ }
+ }
+
+ switch t := under(typ).(type) {
+ case *Struct:
+ // look for a matching field and collect embedded types
+ for i, f := range t.fields {
+ if f.sameId(pkg, name) {
+ assert(f.typ != nil)
+ index = concat(e.index, i)
+ if obj != nil || e.multiples {
+ return nil, index, false // collision
+ }
+ obj = f
+ indirect = e.indirect
+ continue // we can't have a matching interface method
+ }
+ // Collect embedded struct fields for searching the next
+ // lower depth, but only if we have not seen a match yet
+ // (if we have a match it is either the desired field or
+ // we have a name collision on the same depth; in either
+ // case we don't need to look further).
+ // Embedded fields are always of the form T or *T where
+ // T is a type name. If e.typ appeared multiple times at
+ // this depth, f.typ appears multiple times at the next
+ // depth.
+ if obj == nil && f.embedded {
+ typ, isPtr := deref(f.typ)
+ // TODO(gri) optimization: ignore types that can't
+ // have fields or methods (only Named, Struct, and
+ // Interface types need to be considered).
+ next = append(next, embeddedType{typ, concat(e.index, i), e.indirect || isPtr, e.multiples})
+ }
+ }
+
+ case *Interface:
+ // look for a matching method (interface may be a type parameter)
+ if i, m := t.typeSet().LookupMethod(pkg, name, foldCase); m != nil {
+ assert(m.typ != nil)
+ index = concat(e.index, i)
+ if obj != nil || e.multiples {
+ return nil, index, false // collision
+ }
+ obj = m
+ indirect = e.indirect
+ }
+ }
+ }
+
+ if obj != nil {
+ // found a potential match
+ // spec: "A method call x.m() is valid if the method set of (the type of) x
+ // contains m and the argument list can be assigned to the parameter
+ // list of m. If x is addressable and &x's method set contains m, x.m()
+ // is shorthand for (&x).m()".
+ if f, _ := obj.(*Func); f != nil {
+ // determine if method has a pointer receiver
+ if f.hasPtrRecv() && !indirect && !addressable {
+ return nil, nil, true // pointer/addressable receiver required
+ }
+ }
+ return
+ }
+
+ current = consolidateMultiples(next)
+ }
+
+ return nil, nil, false // not found
+}
+
+// embeddedType represents an embedded type
+type embeddedType struct {
+ typ Type
+ index []int // embedded field indices, starting with index at depth 0
+ indirect bool // if set, there was a pointer indirection on the path to this field
+ multiples bool // if set, typ appears multiple times at this depth
+}
+
+// consolidateMultiples collects multiple list entries with the same type
+// into a single entry marked as containing multiples. The result is the
+// consolidated list.
+func consolidateMultiples(list []embeddedType) []embeddedType {
+ if len(list) <= 1 {
+ return list // at most one entry - nothing to do
+ }
+
+ n := 0 // number of entries w/ unique type
+ prev := make(map[Type]int) // index at which type was previously seen
+ for _, e := range list {
+ if i, found := lookupType(prev, e.typ); found {
+ list[i].multiples = true
+ // ignore this entry
+ } else {
+ prev[e.typ] = n
+ list[n] = e
+ n++
+ }
+ }
+ return list[:n]
+}
+
+func lookupType(m map[Type]int, typ Type) (int, bool) {
+ // fast path: maybe the types are equal
+ if i, found := m[typ]; found {
+ return i, true
+ }
+
+ for t, i := range m {
+ if Identical(t, typ) {
+ return i, true
+ }
+ }
+
+ return 0, false
+}
+
+type instanceLookup struct {
+ // buf is used to avoid allocating the map m in the common case of a small
+ // number of instances.
+ buf [3]*Named
+ m map[*Named][]*Named
+}
+
+func (l *instanceLookup) lookup(inst *Named) *Named {
+ for _, t := range l.buf {
+ if t != nil && Identical(inst, t) {
+ return t
+ }
+ }
+ for _, t := range l.m[inst.Origin()] {
+ if Identical(inst, t) {
+ return t
+ }
+ }
+ return nil
+}
+
+func (l *instanceLookup) add(inst *Named) {
+ for i, t := range l.buf {
+ if t == nil {
+ l.buf[i] = inst
+ return
+ }
+ }
+ if l.m == nil {
+ l.m = make(map[*Named][]*Named)
+ }
+ insts := l.m[inst.Origin()]
+ l.m[inst.Origin()] = append(insts, inst)
+}
+
+// MissingMethod returns (nil, false) if V implements T, otherwise it
+// returns a missing method required by T and whether it is missing or
+// just has the wrong type: either a pointer receiver or wrong signature.
+//
+// For non-interface types V, or if static is set, V implements T if all
+// methods of T are present in V. Otherwise (V is an interface and static
+// is not set), MissingMethod only checks that methods of T which are also
+// present in V have matching types (e.g., for a type assertion x.(T) where
+// x is of interface type V).
+func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType bool) {
+ return (*Checker)(nil).missingMethod(V, T, static, Identical, nil)
+}
+
+// missingMethod is like MissingMethod but accepts a *Checker as receiver,
+// a comparator equivalent for type comparison, and a *string for error causes.
+// The receiver may be nil if missingMethod is invoked through an exported
+// API call (such as MissingMethod), i.e., when all methods have been type-
+// checked.
+// The underlying type of T must be an interface; T (rather than its under-
+// lying type) is used for better error messages (reported through *cause).
+// The comparator is used to compare signatures.
+// If a method is missing and cause is not nil, *cause describes the error.
+func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y Type) bool, cause *string) (method *Func, wrongType bool) {
+ methods := under(T).(*Interface).typeSet().methods // T must be an interface
+ if len(methods) == 0 {
+ return nil, false
+ }
+
+ const (
+ ok = iota
+ notFound
+ wrongName
+ unexported
+ wrongSig
+ ambigSel
+ ptrRecv
+ field
+ )
+
+ state := ok
+ var m *Func // method on T we're trying to implement
+ var f *Func // method on V, if found (state is one of ok, wrongName, wrongSig)
+
+ if u, _ := under(V).(*Interface); u != nil {
+ tset := u.typeSet()
+ for _, m = range methods {
+ _, f = tset.LookupMethod(m.pkg, m.name, false)
+
+ if f == nil {
+ if !static {
+ continue
+ }
+ state = notFound
+ break
+ }
+
+ if !equivalent(f.typ, m.typ) {
+ state = wrongSig
+ break
+ }
+ }
+ } else {
+ for _, m = range methods {
+ obj, index, indirect := lookupFieldOrMethodImpl(V, false, m.pkg, m.name, false)
+
+ // check if m is ambiguous, on *V, or on V with case-folding
+ if obj == nil {
+ switch {
+ case index != nil:
+ state = ambigSel
+ case indirect:
+ state = ptrRecv
+ default:
+ state = notFound
+ obj, _, _ = lookupFieldOrMethodImpl(V, false, m.pkg, m.name, true /* fold case */)
+ f, _ = obj.(*Func)
+ if f != nil {
+ state = wrongName
+ if f.name == m.name {
+ // If the names are equal, f must be unexported
+ // (otherwise the package wouldn't matter).
+ state = unexported
+ }
+ }
+ }
+ break
+ }
+
+ // we must have a method (not a struct field)
+ f, _ = obj.(*Func)
+ if f == nil {
+ state = field
+ break
+ }
+
+ // methods may not have a fully set up signature yet
+ if check != nil {
+ check.objDecl(f, nil)
+ }
+
+ if !equivalent(f.typ, m.typ) {
+ state = wrongSig
+ break
+ }
+ }
+ }
+
+ if state == ok {
+ return nil, false
+ }
+
+ if cause != nil {
+ if f != nil {
+ // This method may be formatted in funcString below, so must have a fully
+ // set up signature.
+ if check != nil {
+ check.objDecl(f, nil)
+ }
+ }
+ switch state {
+ case notFound:
+ switch {
+ case isInterfacePtr(V):
+ *cause = "(" + check.interfacePtrError(V) + ")"
+ case isInterfacePtr(T):
+ *cause = "(" + check.interfacePtrError(T) + ")"
+ default:
+ *cause = check.sprintf("(missing method %s)", m.Name())
+ }
+ case wrongName:
+ fs, ms := check.funcString(f, false), check.funcString(m, false)
+ *cause = check.sprintf("(missing method %s)\n\t\thave %s\n\t\twant %s", m.Name(), fs, ms)
+ case unexported:
+ *cause = check.sprintf("(unexported method %s)", m.Name())
+ case wrongSig:
+ fs, ms := check.funcString(f, false), check.funcString(m, false)
+ if fs == ms {
+ // Don't report "want Foo, have Foo".
+ // Add package information to disambiguate (go.dev/issue/54258).
+ fs, ms = check.funcString(f, true), check.funcString(m, true)
+ }
+ if fs == ms {
+ // We still have "want Foo, have Foo".
+ // This is most likely due to different type parameters with
+ // the same name appearing in the instantiated signatures
+ // (go.dev/issue/61685).
+ // Rather than reporting this misleading error cause, for now
+ // just point out that the method signature is incorrect.
+ // TODO(gri) should find a good way to report the root cause
+ *cause = check.sprintf("(wrong type for method %s)", m.Name())
+ break
+ }
+ *cause = check.sprintf("(wrong type for method %s)\n\t\thave %s\n\t\twant %s", m.Name(), fs, ms)
+ case ambigSel:
+ *cause = check.sprintf("(ambiguous selector %s.%s)", V, m.Name())
+ case ptrRecv:
+ *cause = check.sprintf("(method %s has pointer receiver)", m.Name())
+ case field:
+ *cause = check.sprintf("(%s.%s is a field, not a method)", V, m.Name())
+ default:
+ unreachable()
+ }
+ }
+
+ return m, state == wrongSig || state == ptrRecv
+}
+
+func isInterfacePtr(T Type) bool {
+ p, _ := under(T).(*Pointer)
+ return p != nil && IsInterface(p.base)
+}
+
+// check may be nil.
+func (check *Checker) interfacePtrError(T Type) string {
+ assert(isInterfacePtr(T))
+ if p, _ := under(T).(*Pointer); isTypeParam(p.base) {
+ return check.sprintf("type %s is pointer to type parameter, not type parameter", T)
+ }
+ return check.sprintf("type %s is pointer to interface, not interface", T)
+}
+
+// funcString returns a string of the form name + signature for f.
+// check may be nil.
+func (check *Checker) funcString(f *Func, pkgInfo bool) string {
+ buf := bytes.NewBufferString(f.name)
+ var qf Qualifier
+ if check != nil && !pkgInfo {
+ qf = check.qualifier
+ }
+ w := newTypeWriter(buf, qf)
+ w.pkgInfo = pkgInfo
+ w.paramNames = false
+ w.signature(f.typ.(*Signature))
+ return buf.String()
+}
+
+// assertableTo reports whether a value of type V can be asserted to have type T.
+// The receiver may be nil if assertableTo is invoked through an exported API call
+// (such as AssertableTo), i.e., when all methods have been type-checked.
+// The underlying type of V must be an interface.
+// If the result is false and cause is not nil, *cause describes the error.
+// TODO(gri) replace calls to this function with calls to newAssertableTo.
+func (check *Checker) assertableTo(V, T Type, cause *string) bool {
+ // no static check is required if T is an interface
+ // spec: "If T is an interface type, x.(T) asserts that the
+ // dynamic type of x implements the interface T."
+ if IsInterface(T) {
+ return true
+ }
+ // TODO(gri) fix this for generalized interfaces
+ m, _ := check.missingMethod(T, V, false, Identical, cause)
+ return m == nil
+}
+
+// newAssertableTo reports whether a value of type V can be asserted to have type T.
+// It also implements behavior for interfaces that currently are only permitted
+// in constraint position (we have not yet defined that behavior in the spec).
+// The underlying type of V must be an interface.
+// If the result is false and cause is not nil, *cause is set to the error cause.
+func (check *Checker) newAssertableTo(pos syntax.Pos, V, T Type, cause *string) bool {
+ // no static check is required if T is an interface
+ // spec: "If T is an interface type, x.(T) asserts that the
+ // dynamic type of x implements the interface T."
+ if IsInterface(T) {
+ return true
+ }
+ return check.implements(pos, T, V, false, cause)
+}
+
+// deref dereferences typ if it is a *Pointer (but not a *Named type
+// with an underlying pointer type!) and returns its base and true.
+// Otherwise it returns (typ, false).
+func deref(typ Type) (Type, bool) {
+ if p, _ := Unalias(typ).(*Pointer); p != nil {
+ // p.base should never be nil, but be conservative
+ if p.base == nil {
+ if debug {
+ panic("pointer with nil base type (possibly due to an invalid cyclic declaration)")
+ }
+ return Typ[Invalid], true
+ }
+ return p.base, true
+ }
+ return typ, false
+}
+
+// derefStructPtr dereferences typ if it is a (named or unnamed) pointer to a
+// (named or unnamed) struct and returns its base. Otherwise it returns typ.
+func derefStructPtr(typ Type) Type {
+ if p, _ := under(typ).(*Pointer); p != nil {
+ if _, ok := under(p.base).(*Struct); ok {
+ return p.base
+ }
+ }
+ return typ
+}
+
+// concat returns the result of concatenating list and i.
+// The result does not share its underlying array with list.
+func concat(list []int, i int) []int {
+ var t []int
+ t = append(t, list...)
+ return append(t, i)
+}
+
+// fieldIndex returns the index for the field with matching package and name, or a value < 0.
+func fieldIndex(fields []*Var, pkg *Package, name string) int {
+ if name != "_" {
+ for i, f := range fields {
+ if f.sameId(pkg, name) {
+ return i
+ }
+ }
+ }
+ return -1
+}
+
+// lookupMethod returns the index of and method with matching package and name, or (-1, nil).
+// If foldCase is true, method names are considered equal if they are equal with case folding
+// and their packages are ignored (e.g., pkg1.m, pkg1.M, pkg2.m, and pkg2.M are all equal).
+func lookupMethod(methods []*Func, pkg *Package, name string, foldCase bool) (int, *Func) {
+ if name != "_" {
+ for i, m := range methods {
+ if m.sameId(pkg, name) || foldCase && strings.EqualFold(m.name, name) {
+ return i, m
+ }
+ }
+ }
+ return -1, nil
+}
diff --git a/src/cmd/compile/internal/types2/lookup_test.go b/src/cmd/compile/internal/types2/lookup_test.go
new file mode 100644
index 0000000..56fe48c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/lookup_test.go
@@ -0,0 +1,55 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "path/filepath"
+ "runtime"
+ "testing"
+
+ . "cmd/compile/internal/types2"
+)
+
+// BenchmarkLookupFieldOrMethod measures types.LookupFieldOrMethod performance.
+// LookupFieldOrMethod is a performance hotspot for both type-checking and
+// external API calls.
+func BenchmarkLookupFieldOrMethod(b *testing.B) {
+ // Choose an arbitrary, large package.
+ path := filepath.Join(runtime.GOROOT(), "src", "net", "http")
+
+ files, err := pkgFiles(path)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ conf := Config{
+ Importer: defaultImporter(),
+ }
+
+ pkg, err := conf.Check("http", files, nil)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ scope := pkg.Scope()
+ names := scope.Names()
+
+ // Look up an arbitrary name for each type referenced in the package scope.
+ lookup := func() {
+ for _, name := range names {
+ typ := scope.Lookup(name).Type()
+ LookupFieldOrMethod(typ, true, pkg, "m")
+ }
+ }
+
+ // Perform a lookup once, to ensure that any lazily-evaluated state is
+ // complete.
+ lookup()
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ lookup()
+ }
+}
diff --git a/src/cmd/compile/internal/types2/main_test.go b/src/cmd/compile/internal/types2/main_test.go
new file mode 100644
index 0000000..42d2694
--- /dev/null
+++ b/src/cmd/compile/internal/types2/main_test.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "go/build"
+ "internal/testenv"
+ "os"
+ "testing"
+)
+
+func TestMain(m *testing.M) {
+ build.Default.GOROOT = testenv.GOROOT(nil)
+ os.Exit(m.Run())
+}
diff --git a/src/cmd/compile/internal/types2/map.go b/src/cmd/compile/internal/types2/map.go
new file mode 100644
index 0000000..0d3464c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/map.go
@@ -0,0 +1,24 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A Map represents a map type.
+type Map struct {
+ key, elem Type
+}
+
+// NewMap returns a new map for the given key and element types.
+func NewMap(key, elem Type) *Map {
+ return &Map{key: key, elem: elem}
+}
+
+// Key returns the key type of map m.
+func (m *Map) Key() Type { return m.key }
+
+// Elem returns the element type of map m.
+func (m *Map) Elem() Type { return m.elem }
+
+func (t *Map) Underlying() Type { return t }
+func (t *Map) String() string { return TypeString(t, nil) }
diff --git a/src/cmd/compile/internal/types2/mono.go b/src/cmd/compile/internal/types2/mono.go
new file mode 100644
index 0000000..dae9230
--- /dev/null
+++ b/src/cmd/compile/internal/types2/mono.go
@@ -0,0 +1,339 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ . "internal/types/errors"
+)
+
+// This file implements a check to validate that a Go package doesn't
+// have unbounded recursive instantiation, which is not compatible
+// with compilers using static instantiation (such as
+// monomorphization).
+//
+// It implements a sort of "type flow" analysis by detecting which
+// type parameters are instantiated with other type parameters (or
+// types derived thereof). A package cannot be statically instantiated
+// if the graph has any cycles involving at least one derived type.
+//
+// Concretely, we construct a directed, weighted graph. Vertices are
+// used to represent type parameters as well as some defined
+// types. Edges are used to represent how types depend on each other:
+//
+// * Everywhere a type-parameterized function or type is instantiated,
+// we add edges to each type parameter from the vertices (if any)
+// representing each type parameter or defined type referenced by
+// the type argument. If the type argument is just the referenced
+// type itself, then the edge has weight 0, otherwise 1.
+//
+// * For every defined type declared within a type-parameterized
+// function or method, we add an edge of weight 1 to the defined
+// type from each ambient type parameter.
+//
+// For example, given:
+//
+// func f[A, B any]() {
+// type T int
+// f[T, map[A]B]()
+// }
+//
+// we construct vertices representing types A, B, and T. Because of
+// declaration "type T int", we construct edges T<-A and T<-B with
+// weight 1; and because of instantiation "f[T, map[A]B]" we construct
+// edges A<-T with weight 0, and B<-A and B<-B with weight 1.
+//
+// Finally, we look for any positive-weight cycles. Zero-weight cycles
+// are allowed because static instantiation will reach a fixed point.
+
+type monoGraph struct {
+ vertices []monoVertex
+ edges []monoEdge
+
+ // canon maps method receiver type parameters to their respective
+ // receiver type's type parameters.
+ canon map[*TypeParam]*TypeParam
+
+ // nameIdx maps a defined type or (canonical) type parameter to its
+ // vertex index.
+ nameIdx map[*TypeName]int
+}
+
+type monoVertex struct {
+ weight int // weight of heaviest known path to this vertex
+ pre int // previous edge (if any) in the above path
+ len int // length of the above path
+
+ // obj is the defined type or type parameter represented by this
+ // vertex.
+ obj *TypeName
+}
+
+type monoEdge struct {
+ dst, src int
+ weight int
+
+ pos syntax.Pos
+ typ Type
+}
+
+func (check *Checker) monomorph() {
+ // We detect unbounded instantiation cycles using a variant of
+ // Bellman-Ford's algorithm. Namely, instead of always running |V|
+ // iterations, we run until we either reach a fixed point or we've
+ // found a path of length |V|. This allows us to terminate earlier
+ // when there are no cycles, which should be the common case.
+
+ again := true
+ for again {
+ again = false
+
+ for i, edge := range check.mono.edges {
+ src := &check.mono.vertices[edge.src]
+ dst := &check.mono.vertices[edge.dst]
+
+ // N.B., we're looking for the greatest weight paths, unlike
+ // typical Bellman-Ford.
+ w := src.weight + edge.weight
+ if w <= dst.weight {
+ continue
+ }
+
+ dst.pre = i
+ dst.len = src.len + 1
+ if dst.len == len(check.mono.vertices) {
+ check.reportInstanceLoop(edge.dst)
+ return
+ }
+
+ dst.weight = w
+ again = true
+ }
+ }
+}
+
+func (check *Checker) reportInstanceLoop(v int) {
+ var stack []int
+ seen := make([]bool, len(check.mono.vertices))
+
+ // We have a path that contains a cycle and ends at v, but v may
+ // only be reachable from the cycle, not on the cycle itself. We
+ // start by walking backwards along the path until we find a vertex
+ // that appears twice.
+ for !seen[v] {
+ stack = append(stack, v)
+ seen[v] = true
+ v = check.mono.edges[check.mono.vertices[v].pre].src
+ }
+
+ // Trim any vertices we visited before visiting v the first
+ // time. Since v is the first vertex we found within the cycle, any
+ // vertices we visited earlier cannot be part of the cycle.
+ for stack[0] != v {
+ stack = stack[1:]
+ }
+
+ // TODO(mdempsky): Pivot stack so we report the cycle from the top?
+
+ var err error_
+ err.code = InvalidInstanceCycle
+ obj0 := check.mono.vertices[v].obj
+ err.errorf(obj0, "instantiation cycle:")
+
+ qf := RelativeTo(check.pkg)
+ for _, v := range stack {
+ edge := check.mono.edges[check.mono.vertices[v].pre]
+ obj := check.mono.vertices[edge.dst].obj
+
+ switch obj.Type().(type) {
+ default:
+ panic("unexpected type")
+ case *Named:
+ err.errorf(edge.pos, "%s implicitly parameterized by %s", obj.Name(), TypeString(edge.typ, qf)) // secondary error, \t indented
+ case *TypeParam:
+ err.errorf(edge.pos, "%s instantiated as %s", obj.Name(), TypeString(edge.typ, qf)) // secondary error, \t indented
+ }
+ }
+ check.report(&err)
+}
+
+// recordCanon records that tpar is the canonical type parameter
+// corresponding to method type parameter mpar.
+func (w *monoGraph) recordCanon(mpar, tpar *TypeParam) {
+ if w.canon == nil {
+ w.canon = make(map[*TypeParam]*TypeParam)
+ }
+ w.canon[mpar] = tpar
+}
+
+// recordInstance records that the given type parameters were
+// instantiated with the corresponding type arguments.
+func (w *monoGraph) recordInstance(pkg *Package, pos syntax.Pos, tparams []*TypeParam, targs []Type, xlist []syntax.Expr) {
+ for i, tpar := range tparams {
+ pos := pos
+ if i < len(xlist) {
+ pos = syntax.StartPos(xlist[i])
+ }
+ w.assign(pkg, pos, tpar, targs[i])
+ }
+}
+
+// assign records that tpar was instantiated as targ at pos.
+func (w *monoGraph) assign(pkg *Package, pos syntax.Pos, tpar *TypeParam, targ Type) {
+ // Go generics do not have an analog to C++`s template-templates,
+ // where a template parameter can itself be an instantiable
+ // template. So any instantiation cycles must occur within a single
+ // package. Accordingly, we can ignore instantiations of imported
+ // type parameters.
+ //
+ // TODO(mdempsky): Push this check up into recordInstance? All type
+ // parameters in a list will appear in the same package.
+ if tpar.Obj().Pkg() != pkg {
+ return
+ }
+
+ // flow adds an edge from vertex src representing that typ flows to tpar.
+ flow := func(src int, typ Type) {
+ weight := 1
+ if typ == targ {
+ weight = 0
+ }
+
+ w.addEdge(w.typeParamVertex(tpar), src, weight, pos, targ)
+ }
+
+ // Recursively walk the type argument to find any defined types or
+ // type parameters.
+ var do func(typ Type)
+ do = func(typ Type) {
+ switch typ := Unalias(typ).(type) {
+ default:
+ panic("unexpected type")
+
+ case *TypeParam:
+ assert(typ.Obj().Pkg() == pkg)
+ flow(w.typeParamVertex(typ), typ)
+
+ case *Named:
+ if src := w.localNamedVertex(pkg, typ.Origin()); src >= 0 {
+ flow(src, typ)
+ }
+
+ targs := typ.TypeArgs()
+ for i := 0; i < targs.Len(); i++ {
+ do(targs.At(i))
+ }
+
+ case *Array:
+ do(typ.Elem())
+ case *Basic:
+ // ok
+ case *Chan:
+ do(typ.Elem())
+ case *Map:
+ do(typ.Key())
+ do(typ.Elem())
+ case *Pointer:
+ do(typ.Elem())
+ case *Slice:
+ do(typ.Elem())
+
+ case *Interface:
+ for i := 0; i < typ.NumMethods(); i++ {
+ do(typ.Method(i).Type())
+ }
+ case *Signature:
+ tuple := func(tup *Tuple) {
+ for i := 0; i < tup.Len(); i++ {
+ do(tup.At(i).Type())
+ }
+ }
+ tuple(typ.Params())
+ tuple(typ.Results())
+ case *Struct:
+ for i := 0; i < typ.NumFields(); i++ {
+ do(typ.Field(i).Type())
+ }
+ }
+ }
+ do(targ)
+}
+
+// localNamedVertex returns the index of the vertex representing
+// named, or -1 if named doesn't need representation.
+func (w *monoGraph) localNamedVertex(pkg *Package, named *Named) int {
+ obj := named.Obj()
+ if obj.Pkg() != pkg {
+ return -1 // imported type
+ }
+
+ root := pkg.Scope()
+ if obj.Parent() == root {
+ return -1 // package scope, no ambient type parameters
+ }
+
+ if idx, ok := w.nameIdx[obj]; ok {
+ return idx
+ }
+
+ idx := -1
+
+ // Walk the type definition's scope to find any ambient type
+ // parameters that it's implicitly parameterized by.
+ for scope := obj.Parent(); scope != root; scope = scope.Parent() {
+ for _, elem := range scope.elems {
+ if elem, ok := elem.(*TypeName); ok && !elem.IsAlias() && cmpPos(elem.Pos(), obj.Pos()) < 0 {
+ if tpar, ok := elem.Type().(*TypeParam); ok {
+ if idx < 0 {
+ idx = len(w.vertices)
+ w.vertices = append(w.vertices, monoVertex{obj: obj})
+ }
+
+ w.addEdge(idx, w.typeParamVertex(tpar), 1, obj.Pos(), tpar)
+ }
+ }
+ }
+ }
+
+ if w.nameIdx == nil {
+ w.nameIdx = make(map[*TypeName]int)
+ }
+ w.nameIdx[obj] = idx
+ return idx
+}
+
+// typeParamVertex returns the index of the vertex representing tpar.
+func (w *monoGraph) typeParamVertex(tpar *TypeParam) int {
+ if x, ok := w.canon[tpar]; ok {
+ tpar = x
+ }
+
+ obj := tpar.Obj()
+
+ if idx, ok := w.nameIdx[obj]; ok {
+ return idx
+ }
+
+ if w.nameIdx == nil {
+ w.nameIdx = make(map[*TypeName]int)
+ }
+
+ idx := len(w.vertices)
+ w.vertices = append(w.vertices, monoVertex{obj: obj})
+ w.nameIdx[obj] = idx
+ return idx
+}
+
+func (w *monoGraph) addEdge(dst, src, weight int, pos syntax.Pos, typ Type) {
+ // TODO(mdempsky): Deduplicate redundant edges?
+ w.edges = append(w.edges, monoEdge{
+ dst: dst,
+ src: src,
+ weight: weight,
+
+ pos: pos,
+ typ: typ,
+ })
+}
diff --git a/src/cmd/compile/internal/types2/mono_test.go b/src/cmd/compile/internal/types2/mono_test.go
new file mode 100644
index 0000000..c2955a2
--- /dev/null
+++ b/src/cmd/compile/internal/types2/mono_test.go
@@ -0,0 +1,82 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "cmd/compile/internal/types2"
+ "errors"
+ "fmt"
+ "strings"
+ "testing"
+)
+
+func checkMono(t *testing.T, body string) error {
+ src := "package x; import `unsafe`; var _ unsafe.Pointer;\n" + body
+
+ var buf strings.Builder
+ conf := types2.Config{
+ Error: func(err error) { fmt.Fprintln(&buf, err) },
+ Importer: defaultImporter(),
+ }
+ typecheck(src, &conf, nil)
+ if buf.Len() == 0 {
+ return nil
+ }
+ return errors.New(strings.TrimRight(buf.String(), "\n"))
+}
+
+func TestMonoGood(t *testing.T) {
+ for i, good := range goods {
+ if err := checkMono(t, good); err != nil {
+ t.Errorf("%d: unexpected failure: %v", i, err)
+ }
+ }
+}
+
+func TestMonoBad(t *testing.T) {
+ for i, bad := range bads {
+ if err := checkMono(t, bad); err == nil {
+ t.Errorf("%d: unexpected success", i)
+ } else {
+ t.Log(err)
+ }
+ }
+}
+
+var goods = []string{
+ "func F[T any](x T) { F(x) }",
+ "func F[T, U, V any]() { F[U, V, T](); F[V, T, U]() }",
+ "type Ring[A, B, C any] struct { L *Ring[B, C, A]; R *Ring[C, A, B] }",
+ "func F[T any]() { type U[T any] [unsafe.Sizeof(F[*T])]byte }",
+ "func F[T any]() { type U[T any] [unsafe.Sizeof(F[*T])]byte; var _ U[int] }",
+ "type U[T any] [unsafe.Sizeof(F[*T])]byte; func F[T any]() { var _ U[U[int]] }",
+ "func F[T any]() { type A = int; F[A]() }",
+}
+
+// TODO(mdempsky): Validate specific error messages and positioning.
+
+var bads = []string{
+ "func F[T any](x T) { F(&x) }",
+ "func F[T any]() { F[*T]() }",
+ "func F[T any]() { F[[]T]() }",
+ "func F[T any]() { F[[1]T]() }",
+ "func F[T any]() { F[chan T]() }",
+ "func F[T any]() { F[map[*T]int]() }",
+ "func F[T any]() { F[map[error]T]() }",
+ "func F[T any]() { F[func(T)]() }",
+ "func F[T any]() { F[func() T]() }",
+ "func F[T any]() { F[struct{ t T }]() }",
+ "func F[T any]() { F[interface{ t() T }]() }",
+ "type U[_ any] int; func F[T any]() { F[U[T]]() }",
+ "func F[T any]() { type U int; F[U]() }",
+ "func F[T any]() { type U int; F[*U]() }",
+ "type U[T any] int; func (U[T]) m() { var _ U[*T] }",
+ "type U[T any] int; func (*U[T]) m() { var _ U[*T] }",
+ "type U[T1 any] [unsafe.Sizeof(F[*T1])]byte; func F[T2 any]() { var _ U[T2] }",
+ "func F[A, B, C, D, E any]() { F[B, C, D, E, *A]() }",
+ "type U[_ any] int; const X = unsafe.Sizeof(func() { type A[T any] U[A[*T]] })",
+ "func F[T any]() { type A = *T; F[A]() }",
+ "type A[T any] struct { _ A[*T] }",
+}
diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go
new file mode 100644
index 0000000..893247d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/named.go
@@ -0,0 +1,658 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "sync"
+ "sync/atomic"
+)
+
+// Type-checking Named types is subtle, because they may be recursively
+// defined, and because their full details may be spread across multiple
+// declarations (via methods). For this reason they are type-checked lazily,
+// to avoid information being accessed before it is complete.
+//
+// Conceptually, it is helpful to think of named types as having two distinct
+// sets of information:
+// - "LHS" information, defining their identity: Obj() and TypeArgs()
+// - "RHS" information, defining their details: TypeParams(), Underlying(),
+// and methods.
+//
+// In this taxonomy, LHS information is available immediately, but RHS
+// information is lazy. Specifically, a named type N may be constructed in any
+// of the following ways:
+// 1. type-checked from the source
+// 2. loaded eagerly from export data
+// 3. loaded lazily from export data (when using unified IR)
+// 4. instantiated from a generic type
+//
+// In cases 1, 3, and 4, it is possible that the underlying type or methods of
+// N may not be immediately available.
+// - During type-checking, we allocate N before type-checking its underlying
+// type or methods, so that we may resolve recursive references.
+// - When loading from export data, we may load its methods and underlying
+// type lazily using a provided load function.
+// - After instantiating, we lazily expand the underlying type and methods
+// (note that instances may be created while still in the process of
+// type-checking the original type declaration).
+//
+// In cases 3 and 4 this lazy construction may also occur concurrently, due to
+// concurrent use of the type checker API (after type checking or importing has
+// finished). It is critical that we keep track of state, so that Named types
+// are constructed exactly once and so that we do not access their details too
+// soon.
+//
+// We achieve this by tracking state with an atomic state variable, and
+// guarding potentially concurrent calculations with a mutex. At any point in
+// time this state variable determines which data on N may be accessed. As
+// state monotonically progresses, any data available at state M may be
+// accessed without acquiring the mutex at state N, provided N >= M.
+//
+// GLOSSARY: Here are a few terms used in this file to describe Named types:
+// - We say that a Named type is "instantiated" if it has been constructed by
+// instantiating a generic named type with type arguments.
+// - We say that a Named type is "declared" if it corresponds to a type
+// declaration in the source. Instantiated named types correspond to a type
+// instantiation in the source, not a declaration. But their Origin type is
+// a declared type.
+// - We say that a Named type is "resolved" if its RHS information has been
+// loaded or fully type-checked. For Named types constructed from export
+// data, this may involve invoking a loader function to extract information
+// from export data. For instantiated named types this involves reading
+// information from their origin.
+// - We say that a Named type is "expanded" if it is an instantiated type and
+// type parameters in its underlying type and methods have been substituted
+// with the type arguments from the instantiation. A type may be partially
+// expanded if some but not all of these details have been substituted.
+// Similarly, we refer to these individual details (underlying type or
+// method) as being "expanded".
+// - When all information is known for a named type, we say it is "complete".
+//
+// Some invariants to keep in mind: each declared Named type has a single
+// corresponding object, and that object's type is the (possibly generic) Named
+// type. Declared Named types are identical if and only if their pointers are
+// identical. On the other hand, multiple instantiated Named types may be
+// identical even though their pointers are not identical. One has to use
+// Identical to compare them. For instantiated named types, their obj is a
+// synthetic placeholder that records their position of the corresponding
+// instantiation in the source (if they were constructed during type checking).
+//
+// To prevent infinite expansion of named instances that are created outside of
+// type-checking, instances share a Context with other instances created during
+// their expansion. Via the pidgeonhole principle, this guarantees that in the
+// presence of a cycle of named types, expansion will eventually find an
+// existing instance in the Context and short-circuit the expansion.
+//
+// Once an instance is complete, we can nil out this shared Context to unpin
+// memory, though this Context may still be held by other incomplete instances
+// in its "lineage".
+
+// A Named represents a named (defined) type.
+type Named struct {
+ check *Checker // non-nil during type-checking; nil otherwise
+ obj *TypeName // corresponding declared object for declared types; see above for instantiated types
+
+ // fromRHS holds the type (on RHS of declaration) this *Named type is derived
+ // from (for cycle reporting). Only used by validType, and therefore does not
+ // require synchronization.
+ fromRHS Type
+
+ // information for instantiated types; nil otherwise
+ inst *instance
+
+ mu sync.Mutex // guards all fields below
+ state_ uint32 // the current state of this type; must only be accessed atomically
+ underlying Type // possibly a *Named during setup; never a *Named once set up completely
+ tparams *TypeParamList // type parameters, or nil
+
+ // methods declared for this type (not the method set of this type)
+ // Signatures are type-checked lazily.
+ // For non-instantiated types, this is a fully populated list of methods. For
+ // instantiated types, methods are individually expanded when they are first
+ // accessed.
+ methods []*Func
+
+ // loader may be provided to lazily load type parameters, underlying type, and methods.
+ loader func(*Named) (tparams []*TypeParam, underlying Type, methods []*Func)
+}
+
+// instance holds information that is only necessary for instantiated named
+// types.
+type instance struct {
+ orig *Named // original, uninstantiated type
+ targs *TypeList // type arguments
+ expandedMethods int // number of expanded methods; expandedMethods <= len(orig.methods)
+ ctxt *Context // local Context; set to nil after full expansion
+}
+
+// namedState represents the possible states that a named type may assume.
+type namedState uint32
+
+const (
+ unresolved namedState = iota // tparams, underlying type and methods might be unavailable
+ resolved // resolve has run; methods might be incomplete (for instances)
+ complete // all data is known
+)
+
+// NewNamed returns a new named type for the given type name, underlying type, and associated methods.
+// If the given type name obj doesn't have a type yet, its type is set to the returned named type.
+// The underlying type must not be a *Named.
+func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
+ if asNamed(underlying) != nil {
+ panic("underlying type must not be *Named")
+ }
+ return (*Checker)(nil).newNamed(obj, underlying, methods)
+}
+
+// resolve resolves the type parameters, methods, and underlying type of n.
+// This information may be loaded from a provided loader function, or computed
+// from an origin type (in the case of instances).
+//
+// After resolution, the type parameters, methods, and underlying type of n are
+// accessible; but if n is an instantiated type, its methods may still be
+// unexpanded.
+func (n *Named) resolve() *Named {
+ if n.state() >= resolved { // avoid locking below
+ return n
+ }
+
+ // TODO(rfindley): if n.check is non-nil we can avoid locking here, since
+ // type-checking is not concurrent. Evaluate if this is worth doing.
+ n.mu.Lock()
+ defer n.mu.Unlock()
+
+ if n.state() >= resolved {
+ return n
+ }
+
+ if n.inst != nil {
+ assert(n.underlying == nil) // n is an unresolved instance
+ assert(n.loader == nil) // instances are created by instantiation, in which case n.loader is nil
+
+ orig := n.inst.orig
+ orig.resolve()
+ underlying := n.expandUnderlying()
+
+ n.tparams = orig.tparams
+ n.underlying = underlying
+ n.fromRHS = orig.fromRHS // for cycle detection
+
+ if len(orig.methods) == 0 {
+ n.setState(complete) // nothing further to do
+ n.inst.ctxt = nil
+ } else {
+ n.setState(resolved)
+ }
+ return n
+ }
+
+ // TODO(mdempsky): Since we're passing n to the loader anyway
+ // (necessary because types2 expects the receiver type for methods
+ // on defined interface types to be the Named rather than the
+ // underlying Interface), maybe it should just handle calling
+ // SetTypeParams, SetUnderlying, and AddMethod instead? Those
+ // methods would need to support reentrant calls though. It would
+ // also make the API more future-proof towards further extensions.
+ if n.loader != nil {
+ assert(n.underlying == nil)
+ assert(n.TypeArgs().Len() == 0) // instances are created by instantiation, in which case n.loader is nil
+
+ tparams, underlying, methods := n.loader(n)
+
+ n.tparams = bindTParams(tparams)
+ n.underlying = underlying
+ n.fromRHS = underlying // for cycle detection
+ n.methods = methods
+ n.loader = nil
+ }
+
+ n.setState(complete)
+ return n
+}
+
+// state atomically accesses the current state of the receiver.
+func (n *Named) state() namedState {
+ return namedState(atomic.LoadUint32(&n.state_))
+}
+
+// setState atomically stores the given state for n.
+// Must only be called while holding n.mu.
+func (n *Named) setState(state namedState) {
+ atomic.StoreUint32(&n.state_, uint32(state))
+}
+
+// newNamed is like NewNamed but with a *Checker receiver.
+func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
+ typ := &Named{check: check, obj: obj, fromRHS: underlying, underlying: underlying, methods: methods}
+ if obj.typ == nil {
+ obj.typ = typ
+ }
+ // Ensure that typ is always sanity-checked.
+ if check != nil {
+ check.needsCleanup(typ)
+ }
+ return typ
+}
+
+// newNamedInstance creates a new named instance for the given origin and type
+// arguments, recording pos as the position of its synthetic object (for error
+// reporting).
+//
+// If set, expanding is the named type instance currently being expanded, that
+// led to the creation of this instance.
+func (check *Checker) newNamedInstance(pos syntax.Pos, orig *Named, targs []Type, expanding *Named) *Named {
+ assert(len(targs) > 0)
+
+ obj := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil)
+ inst := &instance{orig: orig, targs: newTypeList(targs)}
+
+ // Only pass the expanding context to the new instance if their packages
+ // match. Since type reference cycles are only possible within a single
+ // package, this is sufficient for the purposes of short-circuiting cycles.
+ // Avoiding passing the context in other cases prevents unnecessary coupling
+ // of types across packages.
+ if expanding != nil && expanding.Obj().pkg == obj.pkg {
+ inst.ctxt = expanding.inst.ctxt
+ }
+ typ := &Named{check: check, obj: obj, inst: inst}
+ obj.typ = typ
+ // Ensure that typ is always sanity-checked.
+ if check != nil {
+ check.needsCleanup(typ)
+ }
+ return typ
+}
+
+func (t *Named) cleanup() {
+ assert(t.inst == nil || t.inst.orig.inst == nil)
+ // Ensure that every defined type created in the course of type-checking has
+ // either non-*Named underlying type, or is unexpanded.
+ //
+ // This guarantees that we don't leak any types whose underlying type is
+ // *Named, because any unexpanded instances will lazily compute their
+ // underlying type by substituting in the underlying type of their origin.
+ // The origin must have either been imported or type-checked and expanded
+ // here, and in either case its underlying type will be fully expanded.
+ switch t.underlying.(type) {
+ case nil:
+ if t.TypeArgs().Len() == 0 {
+ panic("nil underlying")
+ }
+ case *Named:
+ t.under() // t.under may add entries to check.cleaners
+ }
+ t.check = nil
+}
+
+// Obj returns the type name for the declaration defining the named type t. For
+// instantiated types, this is same as the type name of the origin type.
+func (t *Named) Obj() *TypeName {
+ if t.inst == nil {
+ return t.obj
+ }
+ return t.inst.orig.obj
+}
+
+// Origin returns the generic type from which the named type t is
+// instantiated. If t is not an instantiated type, the result is t.
+func (t *Named) Origin() *Named {
+ if t.inst == nil {
+ return t
+ }
+ return t.inst.orig
+}
+
+// TypeParams returns the type parameters of the named type t, or nil.
+// The result is non-nil for an (originally) generic type even if it is instantiated.
+func (t *Named) TypeParams() *TypeParamList { return t.resolve().tparams }
+
+// SetTypeParams sets the type parameters of the named type t.
+// t must not have type arguments.
+func (t *Named) SetTypeParams(tparams []*TypeParam) {
+ assert(t.inst == nil)
+ t.resolve().tparams = bindTParams(tparams)
+}
+
+// TypeArgs returns the type arguments used to instantiate the named type t.
+func (t *Named) TypeArgs() *TypeList {
+ if t.inst == nil {
+ return nil
+ }
+ return t.inst.targs
+}
+
+// NumMethods returns the number of explicit methods defined for t.
+func (t *Named) NumMethods() int {
+ return len(t.Origin().resolve().methods)
+}
+
+// Method returns the i'th method of named type t for 0 <= i < t.NumMethods().
+//
+// For an ordinary or instantiated type t, the receiver base type of this
+// method is the named type t. For an uninstantiated generic type t, each
+// method receiver is instantiated with its receiver type parameters.
+func (t *Named) Method(i int) *Func {
+ t.resolve()
+
+ if t.state() >= complete {
+ return t.methods[i]
+ }
+
+ assert(t.inst != nil) // only instances should have incomplete methods
+ orig := t.inst.orig
+
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ if len(t.methods) != len(orig.methods) {
+ assert(len(t.methods) == 0)
+ t.methods = make([]*Func, len(orig.methods))
+ }
+
+ if t.methods[i] == nil {
+ assert(t.inst.ctxt != nil) // we should still have a context remaining from the resolution phase
+ t.methods[i] = t.expandMethod(i)
+ t.inst.expandedMethods++
+
+ // Check if we've created all methods at this point. If we have, mark the
+ // type as fully expanded.
+ if t.inst.expandedMethods == len(orig.methods) {
+ t.setState(complete)
+ t.inst.ctxt = nil // no need for a context anymore
+ }
+ }
+
+ return t.methods[i]
+}
+
+// expandMethod substitutes type arguments in the i'th method for an
+// instantiated receiver.
+func (t *Named) expandMethod(i int) *Func {
+ // t.orig.methods is not lazy. origm is the method instantiated with its
+ // receiver type parameters (the "origin" method).
+ origm := t.inst.orig.Method(i)
+ assert(origm != nil)
+
+ check := t.check
+ // Ensure that the original method is type-checked.
+ if check != nil {
+ check.objDecl(origm, nil)
+ }
+
+ origSig := origm.typ.(*Signature)
+ rbase, _ := deref(origSig.Recv().Type())
+
+ // If rbase is t, then origm is already the instantiated method we're looking
+ // for. In this case, we return origm to preserve the invariant that
+ // traversing Method->Receiver Type->Method should get back to the same
+ // method.
+ //
+ // This occurs if t is instantiated with the receiver type parameters, as in
+ // the use of m in func (r T[_]) m() { r.m() }.
+ if rbase == t {
+ return origm
+ }
+
+ sig := origSig
+ // We can only substitute if we have a correspondence between type arguments
+ // and type parameters. This check is necessary in the presence of invalid
+ // code.
+ if origSig.RecvTypeParams().Len() == t.inst.targs.Len() {
+ smap := makeSubstMap(origSig.RecvTypeParams().list(), t.inst.targs.list())
+ var ctxt *Context
+ if check != nil {
+ ctxt = check.context()
+ }
+ sig = check.subst(origm.pos, origSig, smap, t, ctxt).(*Signature)
+ }
+
+ if sig == origSig {
+ // No substitution occurred, but we still need to create a new signature to
+ // hold the instantiated receiver.
+ copy := *origSig
+ sig = &copy
+ }
+
+ var rtyp Type
+ if origm.hasPtrRecv() {
+ rtyp = NewPointer(t)
+ } else {
+ rtyp = t
+ }
+
+ sig.recv = substVar(origSig.recv, rtyp)
+ return substFunc(origm, sig)
+}
+
+// SetUnderlying sets the underlying type and marks t as complete.
+// t must not have type arguments.
+func (t *Named) SetUnderlying(underlying Type) {
+ assert(t.inst == nil)
+ if underlying == nil {
+ panic("underlying type must not be nil")
+ }
+ if asNamed(underlying) != nil {
+ panic("underlying type must not be *Named")
+ }
+ t.resolve().underlying = underlying
+ if t.fromRHS == nil {
+ t.fromRHS = underlying // for cycle detection
+ }
+}
+
+// AddMethod adds method m unless it is already in the method list.
+// t must not have type arguments.
+func (t *Named) AddMethod(m *Func) {
+ assert(t.inst == nil)
+ t.resolve()
+ if i, _ := lookupMethod(t.methods, m.pkg, m.name, false); i < 0 {
+ t.methods = append(t.methods, m)
+ }
+}
+
+// TODO(gri) Investigate if Unalias can be moved to where underlying is set.
+func (t *Named) Underlying() Type { return Unalias(t.resolve().underlying) }
+func (t *Named) String() string { return TypeString(t, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+//
+// TODO(rfindley): reorganize the loading and expansion methods under this
+// heading.
+
+// under returns the expanded underlying type of n0; possibly by following
+// forward chains of named types. If an underlying type is found, resolve
+// the chain by setting the underlying type for each defined type in the
+// chain before returning it. If no underlying type is found or a cycle
+// is detected, the result is Typ[Invalid]. If a cycle is detected and
+// n0.check != nil, the cycle is reported.
+//
+// This is necessary because the underlying type of named may be itself a
+// named type that is incomplete:
+//
+// type (
+// A B
+// B *C
+// C A
+// )
+//
+// The type of C is the (named) type of A which is incomplete,
+// and which has as its underlying type the named type B.
+func (n0 *Named) under() Type {
+ u := n0.Underlying()
+
+ // If the underlying type of a defined type is not a defined
+ // (incl. instance) type, then that is the desired underlying
+ // type.
+ var n1 *Named
+ switch u1 := u.(type) {
+ case nil:
+ // After expansion via Underlying(), we should never encounter a nil
+ // underlying.
+ panic("nil underlying")
+ default:
+ // common case
+ return u
+ case *Named:
+ // handled below
+ n1 = u1
+ }
+
+ if n0.check == nil {
+ panic("Named.check == nil but type is incomplete")
+ }
+
+ // Invariant: after this point n0 as well as any named types in its
+ // underlying chain should be set up when this function exits.
+ check := n0.check
+ n := n0
+
+ seen := make(map[*Named]int) // types that need their underlying type resolved
+ var path []Object // objects encountered, for cycle reporting
+
+loop:
+ for {
+ seen[n] = len(seen)
+ path = append(path, n.obj)
+ n = n1
+ if i, ok := seen[n]; ok {
+ // cycle
+ check.cycleError(path[i:])
+ u = Typ[Invalid]
+ break
+ }
+ u = n.Underlying()
+ switch u1 := u.(type) {
+ case nil:
+ u = Typ[Invalid]
+ break loop
+ default:
+ break loop
+ case *Named:
+ // Continue collecting *Named types in the chain.
+ n1 = u1
+ }
+ }
+
+ for n := range seen {
+ // We should never have to update the underlying type of an imported type;
+ // those underlying types should have been resolved during the import.
+ // Also, doing so would lead to a race condition (was go.dev/issue/31749).
+ // Do this check always, not just in debug mode (it's cheap).
+ if n.obj.pkg != check.pkg {
+ panic("imported type with unresolved underlying type")
+ }
+ n.underlying = u
+ }
+
+ return u
+}
+
+func (n *Named) lookupMethod(pkg *Package, name string, foldCase bool) (int, *Func) {
+ n.resolve()
+ // If n is an instance, we may not have yet instantiated all of its methods.
+ // Look up the method index in orig, and only instantiate method at the
+ // matching index (if any).
+ i, _ := lookupMethod(n.Origin().methods, pkg, name, foldCase)
+ if i < 0 {
+ return -1, nil
+ }
+ // For instances, m.Method(i) will be different from the orig method.
+ return i, n.Method(i)
+}
+
+// context returns the type-checker context.
+func (check *Checker) context() *Context {
+ if check.ctxt == nil {
+ check.ctxt = NewContext()
+ }
+ return check.ctxt
+}
+
+// expandUnderlying substitutes type arguments in the underlying type n.orig,
+// returning the result. Returns Typ[Invalid] if there was an error.
+func (n *Named) expandUnderlying() Type {
+ check := n.check
+ if check != nil && check.conf.Trace {
+ check.trace(n.obj.pos, "-- Named.expandUnderlying %s", n)
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(n.obj.pos, "=> %s (tparams = %s, under = %s)", n, n.tparams.list(), n.underlying)
+ }()
+ }
+
+ assert(n.inst.orig.underlying != nil)
+ if n.inst.ctxt == nil {
+ n.inst.ctxt = NewContext()
+ }
+
+ orig := n.inst.orig
+ targs := n.inst.targs
+
+ if asNamed(orig.underlying) != nil {
+ // We should only get a Named underlying type here during type checking
+ // (for example, in recursive type declarations).
+ assert(check != nil)
+ }
+
+ if orig.tparams.Len() != targs.Len() {
+ // Mismatching arg and tparam length may be checked elsewhere.
+ return Typ[Invalid]
+ }
+
+ // Ensure that an instance is recorded before substituting, so that we
+ // resolve n for any recursive references.
+ h := n.inst.ctxt.instanceHash(orig, targs.list())
+ n2 := n.inst.ctxt.update(h, orig, n.TypeArgs().list(), n)
+ assert(n == n2)
+
+ smap := makeSubstMap(orig.tparams.list(), targs.list())
+ var ctxt *Context
+ if check != nil {
+ ctxt = check.context()
+ }
+ underlying := n.check.subst(n.obj.pos, orig.underlying, smap, n, ctxt)
+ // If the underlying type of n is an interface, we need to set the receiver of
+ // its methods accurately -- we set the receiver of interface methods on
+ // the RHS of a type declaration to the defined type.
+ if iface, _ := underlying.(*Interface); iface != nil {
+ if methods, copied := replaceRecvType(iface.methods, orig, n); copied {
+ // If the underlying type doesn't actually use type parameters, it's
+ // possible that it wasn't substituted. In this case we need to create
+ // a new *Interface before modifying receivers.
+ if iface == orig.underlying {
+ old := iface
+ iface = check.newInterface()
+ iface.embeddeds = old.embeddeds
+ assert(old.complete) // otherwise we are copying incomplete data
+ iface.complete = old.complete
+ iface.implicit = old.implicit // should be false but be conservative
+ underlying = iface
+ }
+ iface.methods = methods
+ iface.tset = nil // recompute type set with new methods
+
+ // If check != nil, check.newInterface will have saved the interface for later completion.
+ if check == nil { // golang/go#61561: all newly created interfaces must be fully evaluated
+ iface.typeSet()
+ }
+ }
+ }
+
+ return underlying
+}
+
+// safeUnderlying returns the underlying type of typ without expanding
+// instances, to avoid infinite recursion.
+//
+// TODO(rfindley): eliminate this function or give it a better name.
+func safeUnderlying(typ Type) Type {
+ if t := asNamed(typ); t != nil {
+ return t.underlying
+ }
+ return typ.Underlying()
+}
diff --git a/src/cmd/compile/internal/types2/named_test.go b/src/cmd/compile/internal/types2/named_test.go
new file mode 100644
index 0000000..705dcae
--- /dev/null
+++ b/src/cmd/compile/internal/types2/named_test.go
@@ -0,0 +1,114 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "testing"
+
+ "cmd/compile/internal/syntax"
+ . "cmd/compile/internal/types2"
+)
+
+func BenchmarkNamed(b *testing.B) {
+ const src = `
+package p
+
+type T struct {
+ P int
+}
+
+func (T) M(int) {}
+func (T) N() (i int) { return }
+
+type G[P any] struct {
+ F P
+}
+
+func (G[P]) M(P) {}
+func (G[P]) N() (p P) { return }
+
+type Inst = G[int]
+ `
+ pkg := mustTypecheck(src, nil, nil)
+
+ var (
+ T = pkg.Scope().Lookup("T").Type()
+ G = pkg.Scope().Lookup("G").Type()
+ SrcInst = pkg.Scope().Lookup("Inst").Type()
+ UserInst = mustInstantiate(b, G, Typ[Int])
+ )
+
+ tests := []struct {
+ name string
+ typ Type
+ }{
+ {"nongeneric", T},
+ {"generic", G},
+ {"src instance", SrcInst},
+ {"user instance", UserInst},
+ }
+
+ b.Run("Underlying", func(b *testing.B) {
+ for _, test := range tests {
+ b.Run(test.name, func(b *testing.B) {
+ // Access underlying once, to trigger any lazy calculation.
+ _ = test.typ.Underlying()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = test.typ.Underlying()
+ }
+ })
+ }
+ })
+}
+
+func mustInstantiate(tb testing.TB, orig Type, targs ...Type) Type {
+ inst, err := Instantiate(nil, orig, targs, true)
+ if err != nil {
+ tb.Fatal(err)
+ }
+ return inst
+}
+
+// Test that types do not expand infinitely, as in go.dev/issue/52715.
+func TestFiniteTypeExpansion(t *testing.T) {
+ const src = `
+package p
+
+type Tree[T any] struct {
+ *Node[T]
+}
+
+func (*Tree[R]) N(r R) R { return r }
+
+type Node[T any] struct {
+ *Tree[T]
+}
+
+func (Node[Q]) M(Q) {}
+
+type Inst = *Tree[int]
+`
+
+ f := mustParse(src)
+ pkg := NewPackage("p", f.PkgName.Value)
+ if err := NewChecker(nil, pkg, nil).Files([]*syntax.File{f}); err != nil {
+ t.Fatal(err)
+ }
+
+ firstFieldType := func(n *Named) *Named {
+ return n.Underlying().(*Struct).Field(0).Type().(*Pointer).Elem().(*Named)
+ }
+
+ Inst := pkg.Scope().Lookup("Inst").Type().(*Pointer).Elem().(*Named)
+ Node := firstFieldType(Inst)
+ Tree := firstFieldType(Node)
+ if !Identical(Inst, Tree) {
+ t.Fatalf("Not a cycle: got %v, want %v", Tree, Inst)
+ }
+ if Inst != Tree {
+ t.Errorf("Duplicate instances in cycle: %s (%p) -> %s (%p) -> %s (%p)", Inst, Inst, Node, Node, Tree, Tree)
+ }
+}
diff --git a/src/cmd/compile/internal/types2/object.go b/src/cmd/compile/internal/types2/object.go
new file mode 100644
index 0000000..2515872
--- /dev/null
+++ b/src/cmd/compile/internal/types2/object.go
@@ -0,0 +1,619 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "go/constant"
+ "unicode"
+ "unicode/utf8"
+)
+
+// An Object describes a named language entity such as a package,
+// constant, type, variable, function (incl. methods), or label.
+// All objects implement the Object interface.
+type Object interface {
+ Parent() *Scope // scope in which this object is declared; nil for methods and struct fields
+ Pos() syntax.Pos // position of object identifier in declaration
+ Pkg() *Package // package to which this object belongs; nil for labels and objects in the Universe scope
+ Name() string // package local object name
+ Type() Type // object type
+ Exported() bool // reports whether the name starts with a capital letter
+ Id() string // object name if exported, qualified name if not exported (see func Id)
+
+ // String returns a human-readable string of the object.
+ String() string
+
+ // order reflects a package-level object's source order: if object
+ // a is before object b in the source, then a.order() < b.order().
+ // order returns a value > 0 for package-level objects; it returns
+ // 0 for all other objects (including objects in file scopes).
+ order() uint32
+
+ // color returns the object's color.
+ color() color
+
+ // setType sets the type of the object.
+ setType(Type)
+
+ // setOrder sets the order number of the object. It must be > 0.
+ setOrder(uint32)
+
+ // setColor sets the object's color. It must not be white.
+ setColor(color color)
+
+ // setParent sets the parent scope of the object.
+ setParent(*Scope)
+
+ // sameId reports whether obj.Id() and Id(pkg, name) are the same.
+ sameId(pkg *Package, name string) bool
+
+ // scopePos returns the start position of the scope of this Object
+ scopePos() syntax.Pos
+
+ // setScopePos sets the start position of the scope for this Object.
+ setScopePos(pos syntax.Pos)
+}
+
+func isExported(name string) bool {
+ ch, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(ch)
+}
+
+// Id returns name if it is exported, otherwise it
+// returns the name qualified with the package path.
+func Id(pkg *Package, name string) string {
+ if isExported(name) {
+ return name
+ }
+ // unexported names need the package path for differentiation
+ // (if there's no package, make sure we don't start with '.'
+ // as that may change the order of methods between a setup
+ // inside a package and outside a package - which breaks some
+ // tests)
+ path := "_"
+ // pkg is nil for objects in Universe scope and possibly types
+ // introduced via Eval (see also comment in object.sameId)
+ if pkg != nil && pkg.path != "" {
+ path = pkg.path
+ }
+ return path + "." + name
+}
+
+// An object implements the common parts of an Object.
+type object struct {
+ parent *Scope
+ pos syntax.Pos
+ pkg *Package
+ name string
+ typ Type
+ order_ uint32
+ color_ color
+ scopePos_ syntax.Pos
+}
+
+// color encodes the color of an object (see Checker.objDecl for details).
+type color uint32
+
+// An object may be painted in one of three colors.
+// Color values other than white or black are considered grey.
+const (
+ white color = iota
+ black
+ grey // must be > white and black
+)
+
+func (c color) String() string {
+ switch c {
+ case white:
+ return "white"
+ case black:
+ return "black"
+ default:
+ return "grey"
+ }
+}
+
+// colorFor returns the (initial) color for an object depending on
+// whether its type t is known or not.
+func colorFor(t Type) color {
+ if t != nil {
+ return black
+ }
+ return white
+}
+
+// Parent returns the scope in which the object is declared.
+// The result is nil for methods and struct fields.
+func (obj *object) Parent() *Scope { return obj.parent }
+
+// Pos returns the declaration position of the object's identifier.
+func (obj *object) Pos() syntax.Pos { return obj.pos }
+
+// Pkg returns the package to which the object belongs.
+// The result is nil for labels and objects in the Universe scope.
+func (obj *object) Pkg() *Package { return obj.pkg }
+
+// Name returns the object's (package-local, unqualified) name.
+func (obj *object) Name() string { return obj.name }
+
+// Type returns the object's type.
+func (obj *object) Type() Type { return obj.typ }
+
+// Exported reports whether the object is exported (starts with a capital letter).
+// It doesn't take into account whether the object is in a local (function) scope
+// or not.
+func (obj *object) Exported() bool { return isExported(obj.name) }
+
+// Id is a wrapper for Id(obj.Pkg(), obj.Name()).
+func (obj *object) Id() string { return Id(obj.pkg, obj.name) }
+
+func (obj *object) String() string { panic("abstract") }
+func (obj *object) order() uint32 { return obj.order_ }
+func (obj *object) color() color { return obj.color_ }
+func (obj *object) scopePos() syntax.Pos { return obj.scopePos_ }
+
+func (obj *object) setParent(parent *Scope) { obj.parent = parent }
+func (obj *object) setType(typ Type) { obj.typ = typ }
+func (obj *object) setOrder(order uint32) { assert(order > 0); obj.order_ = order }
+func (obj *object) setColor(color color) { assert(color != white); obj.color_ = color }
+func (obj *object) setScopePos(pos syntax.Pos) { obj.scopePos_ = pos }
+
+func (obj *object) sameId(pkg *Package, name string) bool {
+ // spec:
+ // "Two identifiers are different if they are spelled differently,
+ // or if they appear in different packages and are not exported.
+ // Otherwise, they are the same."
+ if name != obj.name {
+ return false
+ }
+ // obj.Name == name
+ if obj.Exported() {
+ return true
+ }
+ // not exported, so packages must be the same (pkg == nil for
+ // fields in Universe scope; this can only happen for types
+ // introduced via Eval)
+ if pkg == nil || obj.pkg == nil {
+ return pkg == obj.pkg
+ }
+ // pkg != nil && obj.pkg != nil
+ return pkg.path == obj.pkg.path
+}
+
+// less reports whether object a is ordered before object b.
+//
+// Objects are ordered nil before non-nil, exported before
+// non-exported, then by name, and finally (for non-exported
+// functions) by package path.
+func (a *object) less(b *object) bool {
+ if a == b {
+ return false
+ }
+
+ // Nil before non-nil.
+ if a == nil {
+ return true
+ }
+ if b == nil {
+ return false
+ }
+
+ // Exported functions before non-exported.
+ ea := isExported(a.name)
+ eb := isExported(b.name)
+ if ea != eb {
+ return ea
+ }
+
+ // Order by name and then (for non-exported names) by package.
+ if a.name != b.name {
+ return a.name < b.name
+ }
+ if !ea {
+ return a.pkg.path < b.pkg.path
+ }
+
+ return false
+}
+
+// A PkgName represents an imported Go package.
+// PkgNames don't have a type.
+type PkgName struct {
+ object
+ imported *Package
+ used bool // set if the package was used
+}
+
+// NewPkgName returns a new PkgName object representing an imported package.
+// The remaining arguments set the attributes found with all Objects.
+func NewPkgName(pos syntax.Pos, pkg *Package, name string, imported *Package) *PkgName {
+ return &PkgName{object{nil, pos, pkg, name, Typ[Invalid], 0, black, nopos}, imported, false}
+}
+
+// Imported returns the package that was imported.
+// It is distinct from Pkg(), which is the package containing the import statement.
+func (obj *PkgName) Imported() *Package { return obj.imported }
+
+// A Const represents a declared constant.
+type Const struct {
+ object
+ val constant.Value
+}
+
+// NewConst returns a new constant with value val.
+// The remaining arguments set the attributes found with all Objects.
+func NewConst(pos syntax.Pos, pkg *Package, name string, typ Type, val constant.Value) *Const {
+ return &Const{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, val}
+}
+
+// Val returns the constant's value.
+func (obj *Const) Val() constant.Value { return obj.val }
+
+func (*Const) isDependency() {} // a constant may be a dependency of an initialization expression
+
+// A TypeName represents a name for a (defined or alias) type.
+type TypeName struct {
+ object
+}
+
+// NewTypeName returns a new type name denoting the given typ.
+// The remaining arguments set the attributes found with all Objects.
+//
+// The typ argument may be a defined (Named) type or an alias type.
+// It may also be nil such that the returned TypeName can be used as
+// argument for NewNamed, which will set the TypeName's type as a side-
+// effect.
+func NewTypeName(pos syntax.Pos, pkg *Package, name string, typ Type) *TypeName {
+ return &TypeName{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}}
+}
+
+// NewTypeNameLazy returns a new defined type like NewTypeName, but it
+// lazily calls resolve to finish constructing the Named object.
+func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, load func(named *Named) (tparams []*TypeParam, underlying Type, methods []*Func)) *TypeName {
+ obj := NewTypeName(pos, pkg, name, nil)
+ NewNamed(obj, nil, nil).loader = load
+ return obj
+}
+
+// IsAlias reports whether obj is an alias name for a type.
+func (obj *TypeName) IsAlias() bool {
+ switch t := obj.typ.(type) {
+ case nil:
+ return false
+ // case *Alias:
+ // handled by default case
+ case *Basic:
+ // unsafe.Pointer is not an alias.
+ if obj.pkg == Unsafe {
+ return false
+ }
+ // Any user-defined type name for a basic type is an alias for a
+ // basic type (because basic types are pre-declared in the Universe
+ // scope, outside any package scope), and so is any type name with
+ // a different name than the name of the basic type it refers to.
+ // Additionally, we need to look for "byte" and "rune" because they
+ // are aliases but have the same names (for better error messages).
+ return obj.pkg != nil || t.name != obj.name || t == universeByte || t == universeRune
+ case *Named:
+ return obj != t.obj
+ case *TypeParam:
+ return obj != t.obj
+ default:
+ return true
+ }
+}
+
+// A Variable represents a declared variable (including function parameters and results, and struct fields).
+type Var struct {
+ object
+ embedded bool // if set, the variable is an embedded struct field, and name is the type name
+ isField bool // var is struct field
+ used bool // set if the variable was used
+ origin *Var // if non-nil, the Var from which this one was instantiated
+}
+
+// NewVar returns a new variable.
+// The arguments set the attributes found with all Objects.
+func NewVar(pos syntax.Pos, pkg *Package, name string, typ Type) *Var {
+ return &Var{object: object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}}
+}
+
+// NewParam returns a new variable representing a function parameter.
+func NewParam(pos syntax.Pos, pkg *Package, name string, typ Type) *Var {
+ return &Var{object: object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, used: true} // parameters are always 'used'
+}
+
+// NewField returns a new variable representing a struct field.
+// For embedded fields, the name is the unqualified type name
+// under which the field is accessible.
+func NewField(pos syntax.Pos, pkg *Package, name string, typ Type, embedded bool) *Var {
+ return &Var{object: object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, embedded: embedded, isField: true}
+}
+
+// Anonymous reports whether the variable is an embedded field.
+// Same as Embedded; only present for backward-compatibility.
+func (obj *Var) Anonymous() bool { return obj.embedded }
+
+// Embedded reports whether the variable is an embedded field.
+func (obj *Var) Embedded() bool { return obj.embedded }
+
+// IsField reports whether the variable is a struct field.
+func (obj *Var) IsField() bool { return obj.isField }
+
+// Origin returns the canonical Var for its receiver, i.e. the Var object
+// recorded in Info.Defs.
+//
+// For synthetic Vars created during instantiation (such as struct fields or
+// function parameters that depend on type arguments), this will be the
+// corresponding Var on the generic (uninstantiated) type. For all other Vars
+// Origin returns the receiver.
+func (obj *Var) Origin() *Var {
+ if obj.origin != nil {
+ return obj.origin
+ }
+ return obj
+}
+
+func (*Var) isDependency() {} // a variable may be a dependency of an initialization expression
+
+// A Func represents a declared function, concrete method, or abstract
+// (interface) method. Its Type() is always a *Signature.
+// An abstract method may belong to many interfaces due to embedding.
+type Func struct {
+ object
+ hasPtrRecv_ bool // only valid for methods that don't have a type yet; use hasPtrRecv() to read
+ origin *Func // if non-nil, the Func from which this one was instantiated
+}
+
+// NewFunc returns a new function with the given signature, representing
+// the function's type.
+func NewFunc(pos syntax.Pos, pkg *Package, name string, sig *Signature) *Func {
+ // don't store a (typed) nil signature
+ var typ Type
+ if sig != nil {
+ typ = sig
+ }
+ return &Func{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, false, nil}
+}
+
+// FullName returns the package- or receiver-type-qualified name of
+// function or method obj.
+func (obj *Func) FullName() string {
+ var buf bytes.Buffer
+ writeFuncName(&buf, obj, nil)
+ return buf.String()
+}
+
+// Scope returns the scope of the function's body block.
+// The result is nil for imported or instantiated functions and methods
+// (but there is also no mechanism to get to an instantiated function).
+func (obj *Func) Scope() *Scope { return obj.typ.(*Signature).scope }
+
+// Origin returns the canonical Func for its receiver, i.e. the Func object
+// recorded in Info.Defs.
+//
+// For synthetic functions created during instantiation (such as methods on an
+// instantiated Named type or interface methods that depend on type arguments),
+// this will be the corresponding Func on the generic (uninstantiated) type.
+// For all other Funcs Origin returns the receiver.
+func (obj *Func) Origin() *Func {
+ if obj.origin != nil {
+ return obj.origin
+ }
+ return obj
+}
+
+// Pkg returns the package to which the function belongs.
+//
+// The result is nil for methods of types in the Universe scope,
+// like method Error of the error built-in interface type.
+func (obj *Func) Pkg() *Package { return obj.object.Pkg() }
+
+// hasPtrRecv reports whether the receiver is of the form *T for the given method obj.
+func (obj *Func) hasPtrRecv() bool {
+ // If a method's receiver type is set, use that as the source of truth for the receiver.
+ // Caution: Checker.funcDecl (decl.go) marks a function by setting its type to an empty
+ // signature. We may reach here before the signature is fully set up: we must explicitly
+ // check if the receiver is set (we cannot just look for non-nil obj.typ).
+ if sig, _ := obj.typ.(*Signature); sig != nil && sig.recv != nil {
+ _, isPtr := deref(sig.recv.typ)
+ return isPtr
+ }
+
+ // If a method's type is not set it may be a method/function that is:
+ // 1) client-supplied (via NewFunc with no signature), or
+ // 2) internally created but not yet type-checked.
+ // For case 1) we can't do anything; the client must know what they are doing.
+ // For case 2) we can use the information gathered by the resolver.
+ return obj.hasPtrRecv_
+}
+
+func (*Func) isDependency() {} // a function may be a dependency of an initialization expression
+
+// A Label represents a declared label.
+// Labels don't have a type.
+type Label struct {
+ object
+ used bool // set if the label was used
+}
+
+// NewLabel returns a new label.
+func NewLabel(pos syntax.Pos, pkg *Package, name string) *Label {
+ return &Label{object{pos: pos, pkg: pkg, name: name, typ: Typ[Invalid], color_: black}, false}
+}
+
+// A Builtin represents a built-in function.
+// Builtins don't have a valid type.
+type Builtin struct {
+ object
+ id builtinId
+}
+
+func newBuiltin(id builtinId) *Builtin {
+ return &Builtin{object{name: predeclaredFuncs[id].name, typ: Typ[Invalid], color_: black}, id}
+}
+
+// Nil represents the predeclared value nil.
+type Nil struct {
+ object
+}
+
+func writeObject(buf *bytes.Buffer, obj Object, qf Qualifier) {
+ var tname *TypeName
+ typ := obj.Type()
+
+ switch obj := obj.(type) {
+ case *PkgName:
+ fmt.Fprintf(buf, "package %s", obj.Name())
+ if path := obj.imported.path; path != "" && path != obj.name {
+ fmt.Fprintf(buf, " (%q)", path)
+ }
+ return
+
+ case *Const:
+ buf.WriteString("const")
+
+ case *TypeName:
+ tname = obj
+ buf.WriteString("type")
+ if isTypeParam(typ) {
+ buf.WriteString(" parameter")
+ }
+
+ case *Var:
+ if obj.isField {
+ buf.WriteString("field")
+ } else {
+ buf.WriteString("var")
+ }
+
+ case *Func:
+ buf.WriteString("func ")
+ writeFuncName(buf, obj, qf)
+ if typ != nil {
+ WriteSignature(buf, typ.(*Signature), qf)
+ }
+ return
+
+ case *Label:
+ buf.WriteString("label")
+ typ = nil
+
+ case *Builtin:
+ buf.WriteString("builtin")
+ typ = nil
+
+ case *Nil:
+ buf.WriteString("nil")
+ return
+
+ default:
+ panic(fmt.Sprintf("writeObject(%T)", obj))
+ }
+
+ buf.WriteByte(' ')
+
+ // For package-level objects, qualify the name.
+ if obj.Pkg() != nil && obj.Pkg().scope.Lookup(obj.Name()) == obj {
+ buf.WriteString(packagePrefix(obj.Pkg(), qf))
+ }
+ buf.WriteString(obj.Name())
+
+ if typ == nil {
+ return
+ }
+
+ if tname != nil {
+ switch t := typ.(type) {
+ case *Basic:
+ // Don't print anything more for basic types since there's
+ // no more information.
+ return
+ case *Named:
+ if t.TypeParams().Len() > 0 {
+ newTypeWriter(buf, qf).tParamList(t.TypeParams().list())
+ }
+ }
+ if tname.IsAlias() {
+ buf.WriteString(" =")
+ } else if t, _ := typ.(*TypeParam); t != nil {
+ typ = t.bound
+ } else {
+ // TODO(gri) should this be fromRHS for *Named?
+ typ = under(typ)
+ }
+ }
+
+ // Special handling for any: because WriteType will format 'any' as 'any',
+ // resulting in the object string `type any = any` rather than `type any =
+ // interface{}`. To avoid this, swap in a different empty interface.
+ if obj == universeAny {
+ assert(Identical(typ, &emptyInterface))
+ typ = &emptyInterface
+ }
+
+ buf.WriteByte(' ')
+ WriteType(buf, typ, qf)
+}
+
+func packagePrefix(pkg *Package, qf Qualifier) string {
+ if pkg == nil {
+ return ""
+ }
+ var s string
+ if qf != nil {
+ s = qf(pkg)
+ } else {
+ s = pkg.Path()
+ }
+ if s != "" {
+ s += "."
+ }
+ return s
+}
+
+// ObjectString returns the string form of obj.
+// The Qualifier controls the printing of
+// package-level objects, and may be nil.
+func ObjectString(obj Object, qf Qualifier) string {
+ var buf bytes.Buffer
+ writeObject(&buf, obj, qf)
+ return buf.String()
+}
+
+func (obj *PkgName) String() string { return ObjectString(obj, nil) }
+func (obj *Const) String() string { return ObjectString(obj, nil) }
+func (obj *TypeName) String() string { return ObjectString(obj, nil) }
+func (obj *Var) String() string { return ObjectString(obj, nil) }
+func (obj *Func) String() string { return ObjectString(obj, nil) }
+func (obj *Label) String() string { return ObjectString(obj, nil) }
+func (obj *Builtin) String() string { return ObjectString(obj, nil) }
+func (obj *Nil) String() string { return ObjectString(obj, nil) }
+
+func writeFuncName(buf *bytes.Buffer, f *Func, qf Qualifier) {
+ if f.typ != nil {
+ sig := f.typ.(*Signature)
+ if recv := sig.Recv(); recv != nil {
+ buf.WriteByte('(')
+ if _, ok := recv.Type().(*Interface); ok {
+ // gcimporter creates abstract methods of
+ // named interfaces using the interface type
+ // (not the named type) as the receiver.
+ // Don't print it in full.
+ buf.WriteString("interface")
+ } else {
+ WriteType(buf, recv.Type(), qf)
+ }
+ buf.WriteByte(')')
+ buf.WriteByte('.')
+ } else if f.pkg != nil {
+ buf.WriteString(packagePrefix(f.pkg, qf))
+ }
+ }
+ buf.WriteString(f.name)
+}
diff --git a/src/cmd/compile/internal/types2/object_test.go b/src/cmd/compile/internal/types2/object_test.go
new file mode 100644
index 0000000..ef1a864
--- /dev/null
+++ b/src/cmd/compile/internal/types2/object_test.go
@@ -0,0 +1,156 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "internal/testenv"
+ "strings"
+ "testing"
+
+ . "cmd/compile/internal/types2"
+)
+
+func TestIsAlias(t *testing.T) {
+ check := func(obj *TypeName, want bool) {
+ if got := obj.IsAlias(); got != want {
+ t.Errorf("%v: got IsAlias = %v; want %v", obj, got, want)
+ }
+ }
+
+ // predeclared types
+ check(Unsafe.Scope().Lookup("Pointer").(*TypeName), false)
+ for _, name := range Universe.Names() {
+ if obj, _ := Universe.Lookup(name).(*TypeName); obj != nil {
+ check(obj, name == "any" || name == "byte" || name == "rune")
+ }
+ }
+
+ // various other types
+ pkg := NewPackage("p", "p")
+ t1 := NewTypeName(nopos, pkg, "t1", nil)
+ n1 := NewNamed(t1, new(Struct), nil)
+ t5 := NewTypeName(nopos, pkg, "t5", nil)
+ NewTypeParam(t5, nil)
+ for _, test := range []struct {
+ name *TypeName
+ alias bool
+ }{
+ {NewTypeName(nopos, nil, "t0", nil), false}, // no type yet
+ {NewTypeName(nopos, pkg, "t0", nil), false}, // no type yet
+ {t1, false}, // type name refers to named type and vice versa
+ {NewTypeName(nopos, nil, "t2", NewInterfaceType(nil, nil)), true}, // type name refers to unnamed type
+ {NewTypeName(nopos, pkg, "t3", n1), true}, // type name refers to named type with different type name
+ {NewTypeName(nopos, nil, "t4", Typ[Int32]), true}, // type name refers to basic type with different name
+ {NewTypeName(nopos, nil, "int32", Typ[Int32]), false}, // type name refers to basic type with same name
+ {NewTypeName(nopos, pkg, "int32", Typ[Int32]), true}, // type name is declared in user-defined package (outside Universe)
+ {NewTypeName(nopos, nil, "rune", Typ[Rune]), true}, // type name refers to basic type rune which is an alias already
+ {t5, false}, // type name refers to type parameter and vice versa
+ } {
+ check(test.name, test.alias)
+ }
+}
+
+// TestEmbeddedMethod checks that an embedded method is represented by
+// the same Func Object as the original method. See also go.dev/issue/34421.
+func TestEmbeddedMethod(t *testing.T) {
+ const src = `package p; type I interface { error }`
+ pkg := mustTypecheck(src, nil, nil)
+
+ // get original error.Error method
+ eface := Universe.Lookup("error")
+ orig, _, _ := LookupFieldOrMethod(eface.Type(), false, nil, "Error")
+ if orig == nil {
+ t.Fatalf("original error.Error not found")
+ }
+
+ // get embedded error.Error method
+ iface := pkg.Scope().Lookup("I")
+ embed, _, _ := LookupFieldOrMethod(iface.Type(), false, nil, "Error")
+ if embed == nil {
+ t.Fatalf("embedded error.Error not found")
+ }
+
+ // original and embedded Error object should be identical
+ if orig != embed {
+ t.Fatalf("%s (%p) != %s (%p)", orig, orig, embed, embed)
+ }
+}
+
+var testObjects = []struct {
+ src string
+ obj string
+ want string
+}{
+ {"import \"io\"; var r io.Reader", "r", "var p.r io.Reader"},
+
+ {"const c = 1.2", "c", "const p.c untyped float"},
+ {"const c float64 = 3.14", "c", "const p.c float64"},
+
+ {"type t struct{f int}", "t", "type p.t struct{f int}"},
+ {"type t func(int)", "t", "type p.t func(int)"},
+ {"type t[P any] struct{f P}", "t", "type p.t[P any] struct{f P}"},
+ {"type t[P any] struct{f P}", "t.P", "type parameter P any"},
+ {"type C interface{m()}; type t[P C] struct{}", "t.P", "type parameter P p.C"},
+
+ {"type t = struct{f int}", "t", "type p.t = struct{f int}"},
+ {"type t = func(int)", "t", "type p.t = func(int)"},
+
+ {"var v int", "v", "var p.v int"},
+
+ {"func f(int) string", "f", "func p.f(int) string"},
+ {"func g[P any](x P){}", "g", "func p.g[P any](x P)"},
+ {"func g[P interface{~int}](x P){}", "g.P", "type parameter P interface{~int}"},
+ {"", "any", "type any = interface{}"},
+}
+
+func TestObjectString(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ for _, test := range testObjects {
+ src := "package p; " + test.src
+ pkg, err := typecheck(src, nil, nil)
+ if err != nil {
+ t.Errorf("%s: %s", src, err)
+ continue
+ }
+
+ names := strings.Split(test.obj, ".")
+ if len(names) != 1 && len(names) != 2 {
+ t.Errorf("%s: invalid object path %s", test.src, test.obj)
+ continue
+ }
+ _, obj := pkg.Scope().LookupParent(names[0], nopos)
+ if obj == nil {
+ t.Errorf("%s: %s not found", test.src, names[0])
+ continue
+ }
+ if len(names) == 2 {
+ if typ, ok := obj.Type().(interface{ TypeParams() *TypeParamList }); ok {
+ obj = lookupTypeParamObj(typ.TypeParams(), names[1])
+ if obj == nil {
+ t.Errorf("%s: %s not found", test.src, test.obj)
+ continue
+ }
+ } else {
+ t.Errorf("%s: %s has no type parameters", test.src, names[0])
+ continue
+ }
+ }
+
+ if got := obj.String(); got != test.want {
+ t.Errorf("%s: got %s, want %s", test.src, got, test.want)
+ }
+ }
+}
+
+func lookupTypeParamObj(list *TypeParamList, name string) Object {
+ for i := 0; i < list.Len(); i++ {
+ tpar := list.At(i)
+ if tpar.Obj().Name() == name {
+ return tpar.Obj()
+ }
+ }
+ return nil
+}
diff --git a/src/cmd/compile/internal/types2/objset.go b/src/cmd/compile/internal/types2/objset.go
new file mode 100644
index 0000000..88ff0af
--- /dev/null
+++ b/src/cmd/compile/internal/types2/objset.go
@@ -0,0 +1,31 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements objsets.
+//
+// An objset is similar to a Scope but objset elements
+// are identified by their unique id, instead of their
+// object name.
+
+package types2
+
+// An objset is a set of objects identified by their unique id.
+// The zero value for objset is a ready-to-use empty objset.
+type objset map[string]Object // initialized lazily
+
+// insert attempts to insert an object obj into objset s.
+// If s already contains an alternative object alt with
+// the same name, insert leaves s unchanged and returns alt.
+// Otherwise it inserts obj and returns nil.
+func (s *objset) insert(obj Object) Object {
+ id := obj.Id()
+ if alt := (*s)[id]; alt != nil {
+ return alt
+ }
+ if *s == nil {
+ *s = make(map[string]Object)
+ }
+ (*s)[id] = obj
+ return nil
+}
diff --git a/src/cmd/compile/internal/types2/operand.go b/src/cmd/compile/internal/types2/operand.go
new file mode 100644
index 0000000..3f15100
--- /dev/null
+++ b/src/cmd/compile/internal/types2/operand.go
@@ -0,0 +1,396 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file defines operands and associated operations.
+
+package types2
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "go/constant"
+ "go/token"
+ . "internal/types/errors"
+)
+
+// An operandMode specifies the (addressing) mode of an operand.
+type operandMode byte
+
+const (
+ invalid operandMode = iota // operand is invalid
+ novalue // operand represents no value (result of a function call w/o result)
+ builtin // operand is a built-in function
+ typexpr // operand is a type
+ constant_ // operand is a constant; the operand's typ is a Basic type
+ variable // operand is an addressable variable
+ mapindex // operand is a map index expression (acts like a variable on lhs, commaok on rhs of an assignment)
+ value // operand is a computed value
+ nilvalue // operand is the nil value
+ commaok // like value, but operand may be used in a comma,ok expression
+ commaerr // like commaok, but second value is error, not boolean
+ cgofunc // operand is a cgo function
+)
+
+var operandModeString = [...]string{
+ invalid: "invalid operand",
+ novalue: "no value",
+ builtin: "built-in",
+ typexpr: "type",
+ constant_: "constant",
+ variable: "variable",
+ mapindex: "map index expression",
+ value: "value",
+ nilvalue: "nil",
+ commaok: "comma, ok expression",
+ commaerr: "comma, error expression",
+ cgofunc: "cgo function",
+}
+
+// An operand represents an intermediate value during type checking.
+// Operands have an (addressing) mode, the expression evaluating to
+// the operand, the operand's type, a value for constants, and an id
+// for built-in functions.
+// The zero value of operand is a ready to use invalid operand.
+type operand struct {
+ mode operandMode
+ expr syntax.Expr
+ typ Type
+ val constant.Value
+ id builtinId
+}
+
+// Pos returns the position of the expression corresponding to x.
+// If x is invalid the position is nopos.
+func (x *operand) Pos() syntax.Pos {
+ // x.expr may not be set if x is invalid
+ if x.expr == nil {
+ return nopos
+ }
+ return x.expr.Pos()
+}
+
+// Operand string formats
+// (not all "untyped" cases can appear due to the type system,
+// but they fall out naturally here)
+//
+// mode format
+//
+// invalid <expr> ( <mode> )
+// novalue <expr> ( <mode> )
+// builtin <expr> ( <mode> )
+// typexpr <expr> ( <mode> )
+//
+// constant <expr> (<untyped kind> <mode> )
+// constant <expr> ( <mode> of type <typ>)
+// constant <expr> (<untyped kind> <mode> <val> )
+// constant <expr> ( <mode> <val> of type <typ>)
+//
+// variable <expr> (<untyped kind> <mode> )
+// variable <expr> ( <mode> of type <typ>)
+//
+// mapindex <expr> (<untyped kind> <mode> )
+// mapindex <expr> ( <mode> of type <typ>)
+//
+// value <expr> (<untyped kind> <mode> )
+// value <expr> ( <mode> of type <typ>)
+//
+// nilvalue untyped nil
+// nilvalue nil ( of type <typ>)
+//
+// commaok <expr> (<untyped kind> <mode> )
+// commaok <expr> ( <mode> of type <typ>)
+//
+// commaerr <expr> (<untyped kind> <mode> )
+// commaerr <expr> ( <mode> of type <typ>)
+//
+// cgofunc <expr> (<untyped kind> <mode> )
+// cgofunc <expr> ( <mode> of type <typ>)
+func operandString(x *operand, qf Qualifier) string {
+ // special-case nil
+ if x.mode == nilvalue {
+ switch x.typ {
+ case nil, Typ[Invalid]:
+ return "nil (with invalid type)"
+ case Typ[UntypedNil]:
+ return "nil"
+ default:
+ return fmt.Sprintf("nil (of type %s)", TypeString(x.typ, qf))
+ }
+ }
+
+ var buf bytes.Buffer
+
+ var expr string
+ if x.expr != nil {
+ expr = syntax.String(x.expr)
+ } else {
+ switch x.mode {
+ case builtin:
+ expr = predeclaredFuncs[x.id].name
+ case typexpr:
+ expr = TypeString(x.typ, qf)
+ case constant_:
+ expr = x.val.String()
+ }
+ }
+
+ // <expr> (
+ if expr != "" {
+ buf.WriteString(expr)
+ buf.WriteString(" (")
+ }
+
+ // <untyped kind>
+ hasType := false
+ switch x.mode {
+ case invalid, novalue, builtin, typexpr:
+ // no type
+ default:
+ // should have a type, but be cautious (don't crash during printing)
+ if x.typ != nil {
+ if isUntyped(x.typ) {
+ buf.WriteString(x.typ.(*Basic).name)
+ buf.WriteByte(' ')
+ break
+ }
+ hasType = true
+ }
+ }
+
+ // <mode>
+ buf.WriteString(operandModeString[x.mode])
+
+ // <val>
+ if x.mode == constant_ {
+ if s := x.val.String(); s != expr {
+ buf.WriteByte(' ')
+ buf.WriteString(s)
+ }
+ }
+
+ // <typ>
+ if hasType {
+ if isValid(x.typ) {
+ var intro string
+ if isGeneric(x.typ) {
+ intro = " of generic type "
+ } else {
+ intro = " of type "
+ }
+ buf.WriteString(intro)
+ WriteType(&buf, x.typ, qf)
+ if tpar, _ := x.typ.(*TypeParam); tpar != nil {
+ buf.WriteString(" constrained by ")
+ WriteType(&buf, tpar.bound, qf) // do not compute interface type sets here
+ // If we have the type set and it's empty, say so for better error messages.
+ if hasEmptyTypeset(tpar) {
+ buf.WriteString(" with empty type set")
+ }
+ }
+ } else {
+ buf.WriteString(" with invalid type")
+ }
+ }
+
+ // )
+ if expr != "" {
+ buf.WriteByte(')')
+ }
+
+ return buf.String()
+}
+
+func (x *operand) String() string {
+ return operandString(x, nil)
+}
+
+// setConst sets x to the untyped constant for literal lit.
+func (x *operand) setConst(k syntax.LitKind, lit string) {
+ var kind BasicKind
+ switch k {
+ case syntax.IntLit:
+ kind = UntypedInt
+ case syntax.FloatLit:
+ kind = UntypedFloat
+ case syntax.ImagLit:
+ kind = UntypedComplex
+ case syntax.RuneLit:
+ kind = UntypedRune
+ case syntax.StringLit:
+ kind = UntypedString
+ default:
+ unreachable()
+ }
+
+ val := constant.MakeFromLiteral(lit, kind2tok[k], 0)
+ if val.Kind() == constant.Unknown {
+ x.mode = invalid
+ x.typ = Typ[Invalid]
+ return
+ }
+ x.mode = constant_
+ x.typ = Typ[kind]
+ x.val = val
+}
+
+// isNil reports whether x is the (untyped) nil value.
+func (x *operand) isNil() bool { return x.mode == nilvalue }
+
+// assignableTo reports whether x is assignable to a variable of type T. If the
+// result is false and a non-nil cause is provided, it may be set to a more
+// detailed explanation of the failure (result != ""). The returned error code
+// is only valid if the (first) result is false. The check parameter may be nil
+// if assignableTo is invoked through an exported API call, i.e., when all
+// methods have been type-checked.
+func (x *operand) assignableTo(check *Checker, T Type, cause *string) (bool, Code) {
+ if x.mode == invalid || !isValid(T) {
+ return true, 0 // avoid spurious errors
+ }
+
+ V := x.typ
+
+ // x's type is identical to T
+ if Identical(V, T) {
+ return true, 0
+ }
+
+ Vu := under(V)
+ Tu := under(T)
+ Vp, _ := V.(*TypeParam)
+ Tp, _ := T.(*TypeParam)
+
+ // x is an untyped value representable by a value of type T.
+ if isUntyped(Vu) {
+ assert(Vp == nil)
+ if Tp != nil {
+ // T is a type parameter: x is assignable to T if it is
+ // representable by each specific type in the type set of T.
+ return Tp.is(func(t *term) bool {
+ if t == nil {
+ return false
+ }
+ // A term may be a tilde term but the underlying
+ // type of an untyped value doesn't change so we
+ // don't need to do anything special.
+ newType, _, _ := check.implicitTypeAndValue(x, t.typ)
+ return newType != nil
+ }), IncompatibleAssign
+ }
+ newType, _, _ := check.implicitTypeAndValue(x, T)
+ return newType != nil, IncompatibleAssign
+ }
+ // Vu is typed
+
+ // x's type V and T have identical underlying types
+ // and at least one of V or T is not a named type
+ // and neither V nor T is a type parameter.
+ if Identical(Vu, Tu) && (!hasName(V) || !hasName(T)) && Vp == nil && Tp == nil {
+ return true, 0
+ }
+
+ // T is an interface type, but not a type parameter, and V implements T.
+ // Also handle the case where T is a pointer to an interface so that we get
+ // the Checker.implements error cause.
+ if _, ok := Tu.(*Interface); ok && Tp == nil || isInterfacePtr(Tu) {
+ if check.implements(x.Pos(), V, T, false, cause) {
+ return true, 0
+ }
+ // V doesn't implement T but V may still be assignable to T if V
+ // is a type parameter; do not report an error in that case yet.
+ if Vp == nil {
+ return false, InvalidIfaceAssign
+ }
+ if cause != nil {
+ *cause = ""
+ }
+ }
+
+ // If V is an interface, check if a missing type assertion is the problem.
+ if Vi, _ := Vu.(*Interface); Vi != nil && Vp == nil {
+ if check.implements(x.Pos(), T, V, false, nil) {
+ // T implements V, so give hint about type assertion.
+ if cause != nil {
+ *cause = "need type assertion"
+ }
+ return false, IncompatibleAssign
+ }
+ }
+
+ // x is a bidirectional channel value, T is a channel
+ // type, x's type V and T have identical element types,
+ // and at least one of V or T is not a named type.
+ if Vc, ok := Vu.(*Chan); ok && Vc.dir == SendRecv {
+ if Tc, ok := Tu.(*Chan); ok && Identical(Vc.elem, Tc.elem) {
+ return !hasName(V) || !hasName(T), InvalidChanAssign
+ }
+ }
+
+ // optimization: if we don't have type parameters, we're done
+ if Vp == nil && Tp == nil {
+ return false, IncompatibleAssign
+ }
+
+ errorf := func(format string, args ...interface{}) {
+ if check != nil && cause != nil {
+ msg := check.sprintf(format, args...)
+ if *cause != "" {
+ msg += "\n\t" + *cause
+ }
+ *cause = msg
+ }
+ }
+
+ // x's type V is not a named type and T is a type parameter, and
+ // x is assignable to each specific type in T's type set.
+ if !hasName(V) && Tp != nil {
+ ok := false
+ code := IncompatibleAssign
+ Tp.is(func(T *term) bool {
+ if T == nil {
+ return false // no specific types
+ }
+ ok, code = x.assignableTo(check, T.typ, cause)
+ if !ok {
+ errorf("cannot assign %s to %s (in %s)", x.typ, T.typ, Tp)
+ return false
+ }
+ return true
+ })
+ return ok, code
+ }
+
+ // x's type V is a type parameter and T is not a named type,
+ // and values x' of each specific type in V's type set are
+ // assignable to T.
+ if Vp != nil && !hasName(T) {
+ x := *x // don't clobber outer x
+ ok := false
+ code := IncompatibleAssign
+ Vp.is(func(V *term) bool {
+ if V == nil {
+ return false // no specific types
+ }
+ x.typ = V.typ
+ ok, code = x.assignableTo(check, T, cause)
+ if !ok {
+ errorf("cannot assign %s (in %s) to %s", V.typ, Vp, T)
+ return false
+ }
+ return true
+ })
+ return ok, code
+ }
+
+ return false, IncompatibleAssign
+}
+
+// kind2tok translates syntax.LitKinds into token.Tokens.
+var kind2tok = [...]token.Token{
+ syntax.IntLit: token.INT,
+ syntax.FloatLit: token.FLOAT,
+ syntax.ImagLit: token.IMAG,
+ syntax.RuneLit: token.CHAR,
+ syntax.StringLit: token.STRING,
+}
diff --git a/src/cmd/compile/internal/types2/package.go b/src/cmd/compile/internal/types2/package.go
new file mode 100644
index 0000000..e08099d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/package.go
@@ -0,0 +1,80 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "fmt"
+)
+
+// A Package describes a Go package.
+type Package struct {
+ path string
+ name string
+ scope *Scope
+ imports []*Package
+ complete bool
+ fake bool // scope lookup errors are silently dropped if package is fake (internal use only)
+ cgo bool // uses of this package will be rewritten into uses of declarations from _cgo_gotypes.go
+ goVersion string // minimum Go version required for package (by Config.GoVersion, typically from go.mod)
+}
+
+// NewPackage returns a new Package for the given package path and name.
+// The package is not complete and contains no explicit imports.
+func NewPackage(path, name string) *Package {
+ scope := NewScope(Universe, nopos, nopos, fmt.Sprintf("package %q", path))
+ return &Package{path: path, name: name, scope: scope}
+}
+
+// Path returns the package path.
+func (pkg *Package) Path() string { return pkg.path }
+
+// Name returns the package name.
+func (pkg *Package) Name() string { return pkg.name }
+
+// SetName sets the package name.
+func (pkg *Package) SetName(name string) { pkg.name = name }
+
+// GoVersion returns the minimum Go version required by this package.
+// If the minimum version is unknown, GoVersion returns the empty string.
+// Individual source files may specify a different minimum Go version,
+// as reported in the [go/ast.File.GoVersion] field.
+func (pkg *Package) GoVersion() string { return pkg.goVersion }
+
+// Scope returns the (complete or incomplete) package scope
+// holding the objects declared at package level (TypeNames,
+// Consts, Vars, and Funcs).
+// For a nil pkg receiver, Scope returns the Universe scope.
+func (pkg *Package) Scope() *Scope {
+ if pkg != nil {
+ return pkg.scope
+ }
+ return Universe
+}
+
+// A package is complete if its scope contains (at least) all
+// exported objects; otherwise it is incomplete.
+func (pkg *Package) Complete() bool { return pkg.complete }
+
+// MarkComplete marks a package as complete.
+func (pkg *Package) MarkComplete() { pkg.complete = true }
+
+// Imports returns the list of packages directly imported by
+// pkg; the list is in source order.
+//
+// If pkg was loaded from export data, Imports includes packages that
+// provide package-level objects referenced by pkg. This may be more or
+// less than the set of packages directly imported by pkg's source code.
+//
+// If pkg uses cgo and the FakeImportC configuration option
+// was enabled, the imports list may contain a fake "C" package.
+func (pkg *Package) Imports() []*Package { return pkg.imports }
+
+// SetImports sets the list of explicitly imported packages to list.
+// It is the caller's responsibility to make sure list elements are unique.
+func (pkg *Package) SetImports(list []*Package) { pkg.imports = list }
+
+func (pkg *Package) String() string {
+ return fmt.Sprintf("package %s (%q)", pkg.name, pkg.path)
+}
diff --git a/src/cmd/compile/internal/types2/pointer.go b/src/cmd/compile/internal/types2/pointer.go
new file mode 100644
index 0000000..63055fc
--- /dev/null
+++ b/src/cmd/compile/internal/types2/pointer.go
@@ -0,0 +1,19 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A Pointer represents a pointer type.
+type Pointer struct {
+ base Type // element type
+}
+
+// NewPointer returns a new pointer type for the given element (base) type.
+func NewPointer(elem Type) *Pointer { return &Pointer{base: elem} }
+
+// Elem returns the element type for the given pointer p.
+func (p *Pointer) Elem() Type { return p.base }
+
+func (p *Pointer) Underlying() Type { return p }
+func (p *Pointer) String() string { return TypeString(p, nil) }
diff --git a/src/cmd/compile/internal/types2/predicates.go b/src/cmd/compile/internal/types2/predicates.go
new file mode 100644
index 0000000..7a096e3
--- /dev/null
+++ b/src/cmd/compile/internal/types2/predicates.go
@@ -0,0 +1,546 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements commonly used type predicates.
+
+package types2
+
+// isValid reports whether t is a valid type.
+func isValid(t Type) bool { return Unalias(t) != Typ[Invalid] }
+
+// The isX predicates below report whether t is an X.
+// If t is a type parameter the result is false; i.e.,
+// these predicates don't look inside a type parameter.
+
+func isBoolean(t Type) bool { return isBasic(t, IsBoolean) }
+func isInteger(t Type) bool { return isBasic(t, IsInteger) }
+func isUnsigned(t Type) bool { return isBasic(t, IsUnsigned) }
+func isFloat(t Type) bool { return isBasic(t, IsFloat) }
+func isComplex(t Type) bool { return isBasic(t, IsComplex) }
+func isNumeric(t Type) bool { return isBasic(t, IsNumeric) }
+func isString(t Type) bool { return isBasic(t, IsString) }
+func isIntegerOrFloat(t Type) bool { return isBasic(t, IsInteger|IsFloat) }
+func isConstType(t Type) bool { return isBasic(t, IsConstType) }
+
+// isBasic reports whether under(t) is a basic type with the specified info.
+// If t is a type parameter the result is false; i.e.,
+// isBasic does not look inside a type parameter.
+func isBasic(t Type, info BasicInfo) bool {
+ u, _ := under(t).(*Basic)
+ return u != nil && u.info&info != 0
+}
+
+// The allX predicates below report whether t is an X.
+// If t is a type parameter the result is true if isX is true
+// for all specified types of the type parameter's type set.
+// allX is an optimized version of isX(coreType(t)) (which
+// is the same as underIs(t, isX)).
+
+func allBoolean(t Type) bool { return allBasic(t, IsBoolean) }
+func allInteger(t Type) bool { return allBasic(t, IsInteger) }
+func allUnsigned(t Type) bool { return allBasic(t, IsUnsigned) }
+func allNumeric(t Type) bool { return allBasic(t, IsNumeric) }
+func allString(t Type) bool { return allBasic(t, IsString) }
+func allOrdered(t Type) bool { return allBasic(t, IsOrdered) }
+func allNumericOrString(t Type) bool { return allBasic(t, IsNumeric|IsString) }
+
+// allBasic reports whether under(t) is a basic type with the specified info.
+// If t is a type parameter, the result is true if isBasic(t, info) is true
+// for all specific types of the type parameter's type set.
+// allBasic(t, info) is an optimized version of isBasic(coreType(t), info).
+func allBasic(t Type, info BasicInfo) bool {
+ if tpar, _ := Unalias(t).(*TypeParam); tpar != nil {
+ return tpar.is(func(t *term) bool { return t != nil && isBasic(t.typ, info) })
+ }
+ return isBasic(t, info)
+}
+
+// hasName reports whether t has a name. This includes
+// predeclared types, defined types, and type parameters.
+// hasName may be called with types that are not fully set up.
+func hasName(t Type) bool {
+ switch Unalias(t).(type) {
+ case *Basic, *Named, *TypeParam:
+ return true
+ }
+ return false
+}
+
+// isTypeLit reports whether t is a type literal.
+// This includes all non-defined types, but also basic types.
+// isTypeLit may be called with types that are not fully set up.
+func isTypeLit(t Type) bool {
+ switch Unalias(t).(type) {
+ case *Named, *TypeParam:
+ return false
+ }
+ return true
+}
+
+// isTyped reports whether t is typed; i.e., not an untyped
+// constant or boolean. isTyped may be called with types that
+// are not fully set up.
+func isTyped(t Type) bool {
+ // Alias or Named types cannot denote untyped types,
+ // thus we don't need to call Unalias or under
+ // (which would be unsafe to do for types that are
+ // not fully set up).
+ b, _ := t.(*Basic)
+ return b == nil || b.info&IsUntyped == 0
+}
+
+// isUntyped(t) is the same as !isTyped(t).
+func isUntyped(t Type) bool {
+ return !isTyped(t)
+}
+
+// IsInterface reports whether t is an interface type.
+func IsInterface(t Type) bool {
+ _, ok := under(t).(*Interface)
+ return ok
+}
+
+// isNonTypeParamInterface reports whether t is an interface type but not a type parameter.
+func isNonTypeParamInterface(t Type) bool {
+ return !isTypeParam(t) && IsInterface(t)
+}
+
+// isTypeParam reports whether t is a type parameter.
+func isTypeParam(t Type) bool {
+ _, ok := Unalias(t).(*TypeParam)
+ return ok
+}
+
+// hasEmptyTypeset reports whether t is a type parameter with an empty type set.
+// The function does not force the computation of the type set and so is safe to
+// use anywhere, but it may report a false negative if the type set has not been
+// computed yet.
+func hasEmptyTypeset(t Type) bool {
+ if tpar, _ := Unalias(t).(*TypeParam); tpar != nil && tpar.bound != nil {
+ iface, _ := safeUnderlying(tpar.bound).(*Interface)
+ return iface != nil && iface.tset != nil && iface.tset.IsEmpty()
+ }
+ return false
+}
+
+// isGeneric reports whether a type is a generic, uninstantiated type
+// (generic signatures are not included).
+// TODO(gri) should we include signatures or assert that they are not present?
+func isGeneric(t Type) bool {
+ // A parameterized type is only generic if it doesn't have an instantiation already.
+ named := asNamed(t)
+ return named != nil && named.obj != nil && named.inst == nil && named.TypeParams().Len() > 0
+}
+
+// Comparable reports whether values of type T are comparable.
+func Comparable(T Type) bool {
+ return comparable(T, true, nil, nil)
+}
+
+// If dynamic is set, non-type parameter interfaces are always comparable.
+// If reportf != nil, it may be used to report why T is not comparable.
+func comparable(T Type, dynamic bool, seen map[Type]bool, reportf func(string, ...interface{})) bool {
+ if seen[T] {
+ return true
+ }
+ if seen == nil {
+ seen = make(map[Type]bool)
+ }
+ seen[T] = true
+
+ switch t := under(T).(type) {
+ case *Basic:
+ // assume invalid types to be comparable
+ // to avoid follow-up errors
+ return t.kind != UntypedNil
+ case *Pointer, *Chan:
+ return true
+ case *Struct:
+ for _, f := range t.fields {
+ if !comparable(f.typ, dynamic, seen, nil) {
+ if reportf != nil {
+ reportf("struct containing %s cannot be compared", f.typ)
+ }
+ return false
+ }
+ }
+ return true
+ case *Array:
+ if !comparable(t.elem, dynamic, seen, nil) {
+ if reportf != nil {
+ reportf("%s cannot be compared", t)
+ }
+ return false
+ }
+ return true
+ case *Interface:
+ if dynamic && !isTypeParam(T) || t.typeSet().IsComparable(seen) {
+ return true
+ }
+ if reportf != nil {
+ if t.typeSet().IsEmpty() {
+ reportf("empty type set")
+ } else {
+ reportf("incomparable types in type set")
+ }
+ }
+ // fallthrough
+ }
+ return false
+}
+
+// hasNil reports whether type t includes the nil value.
+func hasNil(t Type) bool {
+ switch u := under(t).(type) {
+ case *Basic:
+ return u.kind == UnsafePointer
+ case *Slice, *Pointer, *Signature, *Map, *Chan:
+ return true
+ case *Interface:
+ return !isTypeParam(t) || u.typeSet().underIs(func(u Type) bool {
+ return u != nil && hasNil(u)
+ })
+ }
+ return false
+}
+
+// An ifacePair is a node in a stack of interface type pairs compared for identity.
+type ifacePair struct {
+ x, y *Interface
+ prev *ifacePair
+}
+
+func (p *ifacePair) identical(q *ifacePair) bool {
+ return p.x == q.x && p.y == q.y || p.x == q.y && p.y == q.x
+}
+
+// A comparer is used to compare types.
+type comparer struct {
+ ignoreTags bool // if set, identical ignores struct tags
+ ignoreInvalids bool // if set, identical treats an invalid type as identical to any type
+}
+
+// For changes to this code the corresponding changes should be made to unifier.nify.
+func (c *comparer) identical(x, y Type, p *ifacePair) bool {
+ x = Unalias(x)
+ y = Unalias(y)
+
+ if x == y {
+ return true
+ }
+
+ if c.ignoreInvalids && (!isValid(x) || !isValid(y)) {
+ return true
+ }
+
+ switch x := x.(type) {
+ case *Basic:
+ // Basic types are singletons except for the rune and byte
+ // aliases, thus we cannot solely rely on the x == y check
+ // above. See also comment in TypeName.IsAlias.
+ if y, ok := y.(*Basic); ok {
+ return x.kind == y.kind
+ }
+
+ case *Array:
+ // Two array types are identical if they have identical element types
+ // and the same array length.
+ if y, ok := y.(*Array); ok {
+ // If one or both array lengths are unknown (< 0) due to some error,
+ // assume they are the same to avoid spurious follow-on errors.
+ return (x.len < 0 || y.len < 0 || x.len == y.len) && c.identical(x.elem, y.elem, p)
+ }
+
+ case *Slice:
+ // Two slice types are identical if they have identical element types.
+ if y, ok := y.(*Slice); ok {
+ return c.identical(x.elem, y.elem, p)
+ }
+
+ case *Struct:
+ // Two struct types are identical if they have the same sequence of fields,
+ // and if corresponding fields have the same names, and identical types,
+ // and identical tags. Two embedded fields are considered to have the same
+ // name. Lower-case field names from different packages are always different.
+ if y, ok := y.(*Struct); ok {
+ if x.NumFields() == y.NumFields() {
+ for i, f := range x.fields {
+ g := y.fields[i]
+ if f.embedded != g.embedded ||
+ !c.ignoreTags && x.Tag(i) != y.Tag(i) ||
+ !f.sameId(g.pkg, g.name) ||
+ !c.identical(f.typ, g.typ, p) {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ case *Pointer:
+ // Two pointer types are identical if they have identical base types.
+ if y, ok := y.(*Pointer); ok {
+ return c.identical(x.base, y.base, p)
+ }
+
+ case *Tuple:
+ // Two tuples types are identical if they have the same number of elements
+ // and corresponding elements have identical types.
+ if y, ok := y.(*Tuple); ok {
+ if x.Len() == y.Len() {
+ if x != nil {
+ for i, v := range x.vars {
+ w := y.vars[i]
+ if !c.identical(v.typ, w.typ, p) {
+ return false
+ }
+ }
+ }
+ return true
+ }
+ }
+
+ case *Signature:
+ y, _ := y.(*Signature)
+ if y == nil {
+ return false
+ }
+
+ // Two function types are identical if they have the same number of
+ // parameters and result values, corresponding parameter and result types
+ // are identical, and either both functions are variadic or neither is.
+ // Parameter and result names are not required to match, and type
+ // parameters are considered identical modulo renaming.
+
+ if x.TypeParams().Len() != y.TypeParams().Len() {
+ return false
+ }
+
+ // In the case of generic signatures, we will substitute in yparams and
+ // yresults.
+ yparams := y.params
+ yresults := y.results
+
+ if x.TypeParams().Len() > 0 {
+ // We must ignore type parameter names when comparing x and y. The
+ // easiest way to do this is to substitute x's type parameters for y's.
+ xtparams := x.TypeParams().list()
+ ytparams := y.TypeParams().list()
+
+ var targs []Type
+ for i := range xtparams {
+ targs = append(targs, x.TypeParams().At(i))
+ }
+ smap := makeSubstMap(ytparams, targs)
+
+ var check *Checker // ok to call subst on a nil *Checker
+ ctxt := NewContext() // need a non-nil Context for the substitution below
+
+ // Constraints must be pair-wise identical, after substitution.
+ for i, xtparam := range xtparams {
+ ybound := check.subst(nopos, ytparams[i].bound, smap, nil, ctxt)
+ if !c.identical(xtparam.bound, ybound, p) {
+ return false
+ }
+ }
+
+ yparams = check.subst(nopos, y.params, smap, nil, ctxt).(*Tuple)
+ yresults = check.subst(nopos, y.results, smap, nil, ctxt).(*Tuple)
+ }
+
+ return x.variadic == y.variadic &&
+ c.identical(x.params, yparams, p) &&
+ c.identical(x.results, yresults, p)
+
+ case *Union:
+ if y, _ := y.(*Union); y != nil {
+ // TODO(rfindley): can this be reached during type checking? If so,
+ // consider passing a type set map.
+ unionSets := make(map[*Union]*_TypeSet)
+ xset := computeUnionTypeSet(nil, unionSets, nopos, x)
+ yset := computeUnionTypeSet(nil, unionSets, nopos, y)
+ return xset.terms.equal(yset.terms)
+ }
+
+ case *Interface:
+ // Two interface types are identical if they describe the same type sets.
+ // With the existing implementation restriction, this simplifies to:
+ //
+ // Two interface types are identical if they have the same set of methods with
+ // the same names and identical function types, and if any type restrictions
+ // are the same. Lower-case method names from different packages are always
+ // different. The order of the methods is irrelevant.
+ if y, ok := y.(*Interface); ok {
+ xset := x.typeSet()
+ yset := y.typeSet()
+ if xset.comparable != yset.comparable {
+ return false
+ }
+ if !xset.terms.equal(yset.terms) {
+ return false
+ }
+ a := xset.methods
+ b := yset.methods
+ if len(a) == len(b) {
+ // Interface types are the only types where cycles can occur
+ // that are not "terminated" via named types; and such cycles
+ // can only be created via method parameter types that are
+ // anonymous interfaces (directly or indirectly) embedding
+ // the current interface. Example:
+ //
+ // type T interface {
+ // m() interface{T}
+ // }
+ //
+ // If two such (differently named) interfaces are compared,
+ // endless recursion occurs if the cycle is not detected.
+ //
+ // If x and y were compared before, they must be equal
+ // (if they were not, the recursion would have stopped);
+ // search the ifacePair stack for the same pair.
+ //
+ // This is a quadratic algorithm, but in practice these stacks
+ // are extremely short (bounded by the nesting depth of interface
+ // type declarations that recur via parameter types, an extremely
+ // rare occurrence). An alternative implementation might use a
+ // "visited" map, but that is probably less efficient overall.
+ q := &ifacePair{x, y, p}
+ for p != nil {
+ if p.identical(q) {
+ return true // same pair was compared before
+ }
+ p = p.prev
+ }
+ if debug {
+ assertSortedMethods(a)
+ assertSortedMethods(b)
+ }
+ for i, f := range a {
+ g := b[i]
+ if f.Id() != g.Id() || !c.identical(f.typ, g.typ, q) {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ case *Map:
+ // Two map types are identical if they have identical key and value types.
+ if y, ok := y.(*Map); ok {
+ return c.identical(x.key, y.key, p) && c.identical(x.elem, y.elem, p)
+ }
+
+ case *Chan:
+ // Two channel types are identical if they have identical value types
+ // and the same direction.
+ if y, ok := y.(*Chan); ok {
+ return x.dir == y.dir && c.identical(x.elem, y.elem, p)
+ }
+
+ case *Named:
+ // Two named types are identical if their type names originate
+ // in the same type declaration; if they are instantiated they
+ // must have identical type argument lists.
+ if y := asNamed(y); y != nil {
+ // check type arguments before origins to match unifier
+ // (for correct source code we need to do all checks so
+ // order doesn't matter)
+ xargs := x.TypeArgs().list()
+ yargs := y.TypeArgs().list()
+ if len(xargs) != len(yargs) {
+ return false
+ }
+ for i, xarg := range xargs {
+ if !Identical(xarg, yargs[i]) {
+ return false
+ }
+ }
+ return identicalOrigin(x, y)
+ }
+
+ case *TypeParam:
+ // nothing to do (x and y being equal is caught in the very beginning of this function)
+
+ case nil:
+ // avoid a crash in case of nil type
+
+ default:
+ unreachable()
+ }
+
+ return false
+}
+
+// identicalOrigin reports whether x and y originated in the same declaration.
+func identicalOrigin(x, y *Named) bool {
+ // TODO(gri) is this correct?
+ return x.Origin().obj == y.Origin().obj
+}
+
+// identicalInstance reports if two type instantiations are identical.
+// Instantiations are identical if their origin and type arguments are
+// identical.
+func identicalInstance(xorig Type, xargs []Type, yorig Type, yargs []Type) bool {
+ if len(xargs) != len(yargs) {
+ return false
+ }
+
+ for i, xa := range xargs {
+ if !Identical(xa, yargs[i]) {
+ return false
+ }
+ }
+
+ return Identical(xorig, yorig)
+}
+
+// Default returns the default "typed" type for an "untyped" type;
+// it returns the incoming type for all other types. The default type
+// for untyped nil is untyped nil.
+func Default(t Type) Type {
+ if t, ok := Unalias(t).(*Basic); ok {
+ switch t.kind {
+ case UntypedBool:
+ return Typ[Bool]
+ case UntypedInt:
+ return Typ[Int]
+ case UntypedRune:
+ return universeRune // use 'rune' name
+ case UntypedFloat:
+ return Typ[Float64]
+ case UntypedComplex:
+ return Typ[Complex128]
+ case UntypedString:
+ return Typ[String]
+ }
+ }
+ return t
+}
+
+// maxType returns the "largest" type that encompasses both x and y.
+// If x and y are different untyped numeric types, the result is the type of x or y
+// that appears later in this list: integer, rune, floating-point, complex.
+// Otherwise, if x != y, the result is nil.
+func maxType(x, y Type) Type {
+ // We only care about untyped types (for now), so == is good enough.
+ // TODO(gri) investigate generalizing this function to simplify code elsewhere
+ if x == y {
+ return x
+ }
+ if isUntyped(x) && isUntyped(y) && isNumeric(x) && isNumeric(y) {
+ // untyped types are basic types
+ if x.(*Basic).kind > y.(*Basic).kind {
+ return x
+ }
+ return y
+ }
+ return nil
+}
+
+// clone makes a "flat copy" of *p and returns a pointer to the copy.
+func clone[P *T, T any](p P) P {
+ c := *p
+ return &c
+}
diff --git a/src/cmd/compile/internal/types2/resolver.go b/src/cmd/compile/internal/types2/resolver.go
new file mode 100644
index 0000000..0cf7c91
--- /dev/null
+++ b/src/cmd/compile/internal/types2/resolver.go
@@ -0,0 +1,776 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "go/constant"
+ . "internal/types/errors"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+// A declInfo describes a package-level const, type, var, or func declaration.
+type declInfo struct {
+ file *Scope // scope of file containing this declaration
+ lhs []*Var // lhs of n:1 variable declarations, or nil
+ vtyp syntax.Expr // type, or nil (for const and var declarations only)
+ init syntax.Expr // init/orig expression, or nil (for const and var declarations only)
+ inherited bool // if set, the init expression is inherited from a previous constant declaration
+ tdecl *syntax.TypeDecl // type declaration, or nil
+ fdecl *syntax.FuncDecl // func declaration, or nil
+
+ // The deps field tracks initialization expression dependencies.
+ deps map[Object]bool // lazily initialized
+}
+
+// hasInitializer reports whether the declared object has an initialization
+// expression or function body.
+func (d *declInfo) hasInitializer() bool {
+ return d.init != nil || d.fdecl != nil && d.fdecl.Body != nil
+}
+
+// addDep adds obj to the set of objects d's init expression depends on.
+func (d *declInfo) addDep(obj Object) {
+ m := d.deps
+ if m == nil {
+ m = make(map[Object]bool)
+ d.deps = m
+ }
+ m[obj] = true
+}
+
+// arity checks that the lhs and rhs of a const or var decl
+// have a matching number of names and initialization values.
+// If inherited is set, the initialization values are from
+// another (constant) declaration.
+func (check *Checker) arity(pos syntax.Pos, names []*syntax.Name, inits []syntax.Expr, constDecl, inherited bool) {
+ l := len(names)
+ r := len(inits)
+
+ const code = WrongAssignCount
+ switch {
+ case l < r:
+ n := inits[l]
+ if inherited {
+ check.errorf(pos, code, "extra init expr at %s", n.Pos())
+ } else {
+ check.errorf(n, code, "extra init expr %s", n)
+ }
+ case l > r && (constDecl || r != 1): // if r == 1 it may be a multi-valued function and we can't say anything yet
+ n := names[r]
+ check.errorf(n, code, "missing init expr for %s", n.Value)
+ }
+}
+
+func validatedImportPath(path string) (string, error) {
+ s, err := strconv.Unquote(path)
+ if err != nil {
+ return "", err
+ }
+ if s == "" {
+ return "", fmt.Errorf("empty string")
+ }
+ const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
+ for _, r := range s {
+ if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
+ return s, fmt.Errorf("invalid character %#U", r)
+ }
+ }
+ return s, nil
+}
+
+// declarePkgObj declares obj in the package scope, records its ident -> obj mapping,
+// and updates check.objMap. The object must not be a function or method.
+func (check *Checker) declarePkgObj(ident *syntax.Name, obj Object, d *declInfo) {
+ assert(ident.Value == obj.Name())
+
+ // spec: "A package-scope or file-scope identifier with name init
+ // may only be declared to be a function with this (func()) signature."
+ if ident.Value == "init" {
+ check.error(ident, InvalidInitDecl, "cannot declare init - must be func")
+ return
+ }
+
+ // spec: "The main package must have package name main and declare
+ // a function main that takes no arguments and returns no value."
+ if ident.Value == "main" && check.pkg.name == "main" {
+ check.error(ident, InvalidMainDecl, "cannot declare main - must be func")
+ return
+ }
+
+ check.declare(check.pkg.scope, ident, obj, nopos)
+ check.objMap[obj] = d
+ obj.setOrder(uint32(len(check.objMap)))
+}
+
+// filename returns a filename suitable for debugging output.
+func (check *Checker) filename(fileNo int) string {
+ file := check.files[fileNo]
+ if pos := file.Pos(); pos.IsKnown() {
+ // return check.fset.File(pos).Name()
+ // TODO(gri) do we need the actual file name here?
+ return pos.RelFilename()
+ }
+ return fmt.Sprintf("file[%d]", fileNo)
+}
+
+func (check *Checker) importPackage(pos syntax.Pos, path, dir string) *Package {
+ // If we already have a package for the given (path, dir)
+ // pair, use it instead of doing a full import.
+ // Checker.impMap only caches packages that are marked Complete
+ // or fake (dummy packages for failed imports). Incomplete but
+ // non-fake packages do require an import to complete them.
+ key := importKey{path, dir}
+ imp := check.impMap[key]
+ if imp != nil {
+ return imp
+ }
+
+ // no package yet => import it
+ if path == "C" && (check.conf.FakeImportC || check.conf.go115UsesCgo) {
+ imp = NewPackage("C", "C")
+ imp.fake = true // package scope is not populated
+ imp.cgo = check.conf.go115UsesCgo
+ } else {
+ // ordinary import
+ var err error
+ if importer := check.conf.Importer; importer == nil {
+ err = fmt.Errorf("Config.Importer not installed")
+ } else if importerFrom, ok := importer.(ImporterFrom); ok {
+ imp, err = importerFrom.ImportFrom(path, dir, 0)
+ if imp == nil && err == nil {
+ err = fmt.Errorf("Config.Importer.ImportFrom(%s, %s, 0) returned nil but no error", path, dir)
+ }
+ } else {
+ imp, err = importer.Import(path)
+ if imp == nil && err == nil {
+ err = fmt.Errorf("Config.Importer.Import(%s) returned nil but no error", path)
+ }
+ }
+ // make sure we have a valid package name
+ // (errors here can only happen through manipulation of packages after creation)
+ if err == nil && imp != nil && (imp.name == "_" || imp.name == "") {
+ err = fmt.Errorf("invalid package name: %q", imp.name)
+ imp = nil // create fake package below
+ }
+ if err != nil {
+ check.errorf(pos, BrokenImport, "could not import %s (%s)", path, err)
+ if imp == nil {
+ // create a new fake package
+ // come up with a sensible package name (heuristic)
+ name := path
+ if i := len(name); i > 0 && name[i-1] == '/' {
+ name = name[:i-1]
+ }
+ if i := strings.LastIndex(name, "/"); i >= 0 {
+ name = name[i+1:]
+ }
+ imp = NewPackage(path, name)
+ }
+ // continue to use the package as best as we can
+ imp.fake = true // avoid follow-up lookup failures
+ }
+ }
+
+ // package should be complete or marked fake, but be cautious
+ if imp.complete || imp.fake {
+ check.impMap[key] = imp
+ // Once we've formatted an error message, keep the pkgPathMap
+ // up-to-date on subsequent imports. It is used for package
+ // qualification in error messages.
+ if check.pkgPathMap != nil {
+ check.markImports(imp)
+ }
+ return imp
+ }
+
+ // something went wrong (importer may have returned incomplete package without error)
+ return nil
+}
+
+// collectObjects collects all file and package objects and inserts them
+// into their respective scopes. It also performs imports and associates
+// methods with receiver base type names.
+func (check *Checker) collectObjects() {
+ pkg := check.pkg
+
+ // pkgImports is the set of packages already imported by any package file seen
+ // so far. Used to avoid duplicate entries in pkg.imports. Allocate and populate
+ // it (pkg.imports may not be empty if we are checking test files incrementally).
+ // Note that pkgImports is keyed by package (and thus package path), not by an
+ // importKey value. Two different importKey values may map to the same package
+ // which is why we cannot use the check.impMap here.
+ var pkgImports = make(map[*Package]bool)
+ for _, imp := range pkg.imports {
+ pkgImports[imp] = true
+ }
+
+ type methodInfo struct {
+ obj *Func // method
+ ptr bool // true if pointer receiver
+ recv *syntax.Name // receiver type name
+ }
+ var methods []methodInfo // collected methods with valid receivers and non-blank _ names
+ var fileScopes []*Scope
+ for fileNo, file := range check.files {
+ // The package identifier denotes the current package,
+ // but there is no corresponding package object.
+ check.recordDef(file.PkgName, nil)
+
+ fileScope := NewScope(pkg.scope, syntax.StartPos(file), syntax.EndPos(file), check.filename(fileNo))
+ fileScopes = append(fileScopes, fileScope)
+ check.recordScope(file, fileScope)
+
+ // determine file directory, necessary to resolve imports
+ // FileName may be "" (typically for tests) in which case
+ // we get "." as the directory which is what we would want.
+ fileDir := dir(file.PkgName.Pos().RelFilename()) // TODO(gri) should this be filename?
+
+ first := -1 // index of first ConstDecl in the current group, or -1
+ var last *syntax.ConstDecl // last ConstDecl with init expressions, or nil
+ for index, decl := range file.DeclList {
+ if _, ok := decl.(*syntax.ConstDecl); !ok {
+ first = -1 // we're not in a constant declaration
+ }
+
+ switch s := decl.(type) {
+ case *syntax.ImportDecl:
+ // import package
+ if s.Path == nil || s.Path.Bad {
+ continue // error reported during parsing
+ }
+ path, err := validatedImportPath(s.Path.Value)
+ if err != nil {
+ check.errorf(s.Path, BadImportPath, "invalid import path (%s)", err)
+ continue
+ }
+
+ imp := check.importPackage(s.Path.Pos(), path, fileDir)
+ if imp == nil {
+ continue
+ }
+
+ // local name overrides imported package name
+ name := imp.name
+ if s.LocalPkgName != nil {
+ name = s.LocalPkgName.Value
+ if path == "C" {
+ // match 1.17 cmd/compile (not prescribed by spec)
+ check.error(s.LocalPkgName, ImportCRenamed, `cannot rename import "C"`)
+ continue
+ }
+ }
+
+ if name == "init" {
+ check.error(s, InvalidInitDecl, "cannot import package as init - init must be a func")
+ continue
+ }
+
+ // add package to list of explicit imports
+ // (this functionality is provided as a convenience
+ // for clients; it is not needed for type-checking)
+ if !pkgImports[imp] {
+ pkgImports[imp] = true
+ pkg.imports = append(pkg.imports, imp)
+ }
+
+ pkgName := NewPkgName(s.Pos(), pkg, name, imp)
+ if s.LocalPkgName != nil {
+ // in a dot-import, the dot represents the package
+ check.recordDef(s.LocalPkgName, pkgName)
+ } else {
+ check.recordImplicit(s, pkgName)
+ }
+
+ if imp.fake {
+ // match 1.17 cmd/compile (not prescribed by spec)
+ pkgName.used = true
+ }
+
+ // add import to file scope
+ check.imports = append(check.imports, pkgName)
+ if name == "." {
+ // dot-import
+ if check.dotImportMap == nil {
+ check.dotImportMap = make(map[dotImportKey]*PkgName)
+ }
+ // merge imported scope with file scope
+ for name, obj := range imp.scope.elems {
+ // Note: Avoid eager resolve(name, obj) here, so we only
+ // resolve dot-imported objects as needed.
+
+ // A package scope may contain non-exported objects,
+ // do not import them!
+ if isExported(name) {
+ // declare dot-imported object
+ // (Do not use check.declare because it modifies the object
+ // via Object.setScopePos, which leads to a race condition;
+ // the object may be imported into more than one file scope
+ // concurrently. See go.dev/issue/32154.)
+ if alt := fileScope.Lookup(name); alt != nil {
+ var err error_
+ err.code = DuplicateDecl
+ err.errorf(s.LocalPkgName, "%s redeclared in this block", alt.Name())
+ err.recordAltDecl(alt)
+ check.report(&err)
+ } else {
+ fileScope.insert(name, obj)
+ check.dotImportMap[dotImportKey{fileScope, name}] = pkgName
+ }
+ }
+ }
+ } else {
+ // declare imported package object in file scope
+ // (no need to provide s.LocalPkgName since we called check.recordDef earlier)
+ check.declare(fileScope, nil, pkgName, nopos)
+ }
+
+ case *syntax.ConstDecl:
+ // iota is the index of the current constDecl within the group
+ if first < 0 || s.Group == nil || file.DeclList[index-1].(*syntax.ConstDecl).Group != s.Group {
+ first = index
+ last = nil
+ }
+ iota := constant.MakeInt64(int64(index - first))
+
+ // determine which initialization expressions to use
+ inherited := true
+ switch {
+ case s.Type != nil || s.Values != nil:
+ last = s
+ inherited = false
+ case last == nil:
+ last = new(syntax.ConstDecl) // make sure last exists
+ inherited = false
+ }
+
+ // declare all constants
+ values := syntax.UnpackListExpr(last.Values)
+ for i, name := range s.NameList {
+ obj := NewConst(name.Pos(), pkg, name.Value, nil, iota)
+
+ var init syntax.Expr
+ if i < len(values) {
+ init = values[i]
+ }
+
+ d := &declInfo{file: fileScope, vtyp: last.Type, init: init, inherited: inherited}
+ check.declarePkgObj(name, obj, d)
+ }
+
+ // Constants must always have init values.
+ check.arity(s.Pos(), s.NameList, values, true, inherited)
+
+ case *syntax.VarDecl:
+ lhs := make([]*Var, len(s.NameList))
+ // If there's exactly one rhs initializer, use
+ // the same declInfo d1 for all lhs variables
+ // so that each lhs variable depends on the same
+ // rhs initializer (n:1 var declaration).
+ var d1 *declInfo
+ if _, ok := s.Values.(*syntax.ListExpr); !ok {
+ // The lhs elements are only set up after the for loop below,
+ // but that's ok because declarePkgObj only collects the declInfo
+ // for a later phase.
+ d1 = &declInfo{file: fileScope, lhs: lhs, vtyp: s.Type, init: s.Values}
+ }
+
+ // declare all variables
+ values := syntax.UnpackListExpr(s.Values)
+ for i, name := range s.NameList {
+ obj := NewVar(name.Pos(), pkg, name.Value, nil)
+ lhs[i] = obj
+
+ d := d1
+ if d == nil {
+ // individual assignments
+ var init syntax.Expr
+ if i < len(values) {
+ init = values[i]
+ }
+ d = &declInfo{file: fileScope, vtyp: s.Type, init: init}
+ }
+
+ check.declarePkgObj(name, obj, d)
+ }
+
+ // If we have no type, we must have values.
+ if s.Type == nil || values != nil {
+ check.arity(s.Pos(), s.NameList, values, false, false)
+ }
+
+ case *syntax.TypeDecl:
+ _ = len(s.TParamList) != 0 && check.verifyVersionf(s.TParamList[0], go1_18, "type parameter")
+ obj := NewTypeName(s.Name.Pos(), pkg, s.Name.Value, nil)
+ check.declarePkgObj(s.Name, obj, &declInfo{file: fileScope, tdecl: s})
+
+ case *syntax.FuncDecl:
+ name := s.Name.Value
+ obj := NewFunc(s.Name.Pos(), pkg, name, nil)
+ hasTParamError := false // avoid duplicate type parameter errors
+ if s.Recv == nil {
+ // regular function
+ if name == "init" || name == "main" && pkg.name == "main" {
+ code := InvalidInitDecl
+ if name == "main" {
+ code = InvalidMainDecl
+ }
+ if len(s.TParamList) != 0 {
+ check.softErrorf(s.TParamList[0], code, "func %s must have no type parameters", name)
+ hasTParamError = true
+ }
+ if t := s.Type; len(t.ParamList) != 0 || len(t.ResultList) != 0 {
+ check.softErrorf(s.Name, code, "func %s must have no arguments and no return values", name)
+ }
+ }
+ // don't declare init functions in the package scope - they are invisible
+ if name == "init" {
+ obj.parent = pkg.scope
+ check.recordDef(s.Name, obj)
+ // init functions must have a body
+ if s.Body == nil {
+ // TODO(gri) make this error message consistent with the others above
+ check.softErrorf(obj.pos, MissingInitBody, "missing function body")
+ }
+ } else {
+ check.declare(pkg.scope, s.Name, obj, nopos)
+ }
+ } else {
+ // method
+ // d.Recv != nil
+ ptr, recv, _ := check.unpackRecv(s.Recv.Type, false)
+ // Methods with invalid receiver cannot be associated to a type, and
+ // methods with blank _ names are never found; no need to collect any
+ // of them. They will still be type-checked with all the other functions.
+ if recv != nil && name != "_" {
+ methods = append(methods, methodInfo{obj, ptr, recv})
+ }
+ check.recordDef(s.Name, obj)
+ }
+ _ = len(s.TParamList) != 0 && !hasTParamError && check.verifyVersionf(s.TParamList[0], go1_18, "type parameter")
+ info := &declInfo{file: fileScope, fdecl: s}
+ // Methods are not package-level objects but we still track them in the
+ // object map so that we can handle them like regular functions (if the
+ // receiver is invalid); also we need their fdecl info when associating
+ // them with their receiver base type, below.
+ check.objMap[obj] = info
+ obj.setOrder(uint32(len(check.objMap)))
+
+ default:
+ check.errorf(s, InvalidSyntaxTree, "unknown syntax.Decl node %T", s)
+ }
+ }
+ }
+
+ // verify that objects in package and file scopes have different names
+ for _, scope := range fileScopes {
+ for name, obj := range scope.elems {
+ if alt := pkg.scope.Lookup(name); alt != nil {
+ obj = resolve(name, obj)
+ var err error_
+ err.code = DuplicateDecl
+ if pkg, ok := obj.(*PkgName); ok {
+ err.errorf(alt, "%s already declared through import of %s", alt.Name(), pkg.Imported())
+ err.recordAltDecl(pkg)
+ } else {
+ err.errorf(alt, "%s already declared through dot-import of %s", alt.Name(), obj.Pkg())
+ // TODO(gri) dot-imported objects don't have a position; recordAltDecl won't print anything
+ err.recordAltDecl(obj)
+ }
+ check.report(&err)
+ }
+ }
+ }
+
+ // Now that we have all package scope objects and all methods,
+ // associate methods with receiver base type name where possible.
+ // Ignore methods that have an invalid receiver. They will be
+ // type-checked later, with regular functions.
+ if methods != nil {
+ check.methods = make(map[*TypeName][]*Func)
+ for i := range methods {
+ m := &methods[i]
+ // Determine the receiver base type and associate m with it.
+ ptr, base := check.resolveBaseTypeName(m.ptr, m.recv, fileScopes)
+ if base != nil {
+ m.obj.hasPtrRecv_ = ptr
+ check.methods[base] = append(check.methods[base], m.obj)
+ }
+ }
+ }
+}
+
+// unpackRecv unpacks a receiver type and returns its components: ptr indicates whether
+// rtyp is a pointer receiver, rname is the receiver type name, and tparams are its
+// type parameters, if any. The type parameters are only unpacked if unpackParams is
+// set. If rname is nil, the receiver is unusable (i.e., the source has a bug which we
+// cannot easily work around).
+func (check *Checker) unpackRecv(rtyp syntax.Expr, unpackParams bool) (ptr bool, rname *syntax.Name, tparams []*syntax.Name) {
+L: // unpack receiver type
+ // This accepts invalid receivers such as ***T and does not
+ // work for other invalid receivers, but we don't care. The
+ // validity of receiver expressions is checked elsewhere.
+ for {
+ switch t := rtyp.(type) {
+ case *syntax.ParenExpr:
+ rtyp = t.X
+ // case *ast.StarExpr:
+ // ptr = true
+ // rtyp = t.X
+ case *syntax.Operation:
+ if t.Op != syntax.Mul || t.Y != nil {
+ break
+ }
+ ptr = true
+ rtyp = t.X
+ default:
+ break L
+ }
+ }
+
+ // unpack type parameters, if any
+ if ptyp, _ := rtyp.(*syntax.IndexExpr); ptyp != nil {
+ rtyp = ptyp.X
+ if unpackParams {
+ for _, arg := range syntax.UnpackListExpr(ptyp.Index) {
+ var par *syntax.Name
+ switch arg := arg.(type) {
+ case *syntax.Name:
+ par = arg
+ case *syntax.BadExpr:
+ // ignore - error already reported by parser
+ case nil:
+ check.error(ptyp, InvalidSyntaxTree, "parameterized receiver contains nil parameters")
+ default:
+ check.errorf(arg, BadDecl, "receiver type parameter %s must be an identifier", arg)
+ }
+ if par == nil {
+ par = syntax.NewName(arg.Pos(), "_")
+ }
+ tparams = append(tparams, par)
+ }
+
+ }
+ }
+
+ // unpack receiver name
+ if name, _ := rtyp.(*syntax.Name); name != nil {
+ rname = name
+ }
+
+ return
+}
+
+// resolveBaseTypeName returns the non-alias base type name for typ, and whether
+// there was a pointer indirection to get to it. The base type name must be declared
+// in package scope, and there can be at most one pointer indirection. If no such type
+// name exists, the returned base is nil.
+func (check *Checker) resolveBaseTypeName(seenPtr bool, typ syntax.Expr, fileScopes []*Scope) (ptr bool, base *TypeName) {
+ // Algorithm: Starting from a type expression, which may be a name,
+ // we follow that type through alias declarations until we reach a
+ // non-alias type name. If we encounter anything but pointer types or
+ // parentheses we're done. If we encounter more than one pointer type
+ // we're done.
+ ptr = seenPtr
+ var seen map[*TypeName]bool
+ for {
+ // check if we have a pointer type
+ // if pexpr, _ := typ.(*ast.StarExpr); pexpr != nil {
+ if pexpr, _ := typ.(*syntax.Operation); pexpr != nil && pexpr.Op == syntax.Mul && pexpr.Y == nil {
+ // if we've already seen a pointer, we're done
+ if ptr {
+ return false, nil
+ }
+ ptr = true
+ typ = syntax.Unparen(pexpr.X) // continue with pointer base type
+ }
+
+ // typ must be a name, or a C.name cgo selector.
+ var name string
+ switch typ := typ.(type) {
+ case *syntax.Name:
+ name = typ.Value
+ case *syntax.SelectorExpr:
+ // C.struct_foo is a valid type name for packages using cgo.
+ //
+ // Detect this case, and adjust name so that the correct TypeName is
+ // resolved below.
+ if ident, _ := typ.X.(*syntax.Name); ident != nil && ident.Value == "C" {
+ // Check whether "C" actually resolves to an import of "C", by looking
+ // in the appropriate file scope.
+ var obj Object
+ for _, scope := range fileScopes {
+ if scope.Contains(ident.Pos()) {
+ obj = scope.Lookup(ident.Value)
+ }
+ }
+ // If Config.go115UsesCgo is set, the typechecker will resolve Cgo
+ // selectors to their cgo name. We must do the same here.
+ if pname, _ := obj.(*PkgName); pname != nil {
+ if pname.imported.cgo { // only set if Config.go115UsesCgo is set
+ name = "_Ctype_" + typ.Sel.Value
+ }
+ }
+ }
+ if name == "" {
+ return false, nil
+ }
+ default:
+ return false, nil
+ }
+
+ // name must denote an object found in the current package scope
+ // (note that dot-imported objects are not in the package scope!)
+ obj := check.pkg.scope.Lookup(name)
+ if obj == nil {
+ return false, nil
+ }
+
+ // the object must be a type name...
+ tname, _ := obj.(*TypeName)
+ if tname == nil {
+ return false, nil
+ }
+
+ // ... which we have not seen before
+ if seen[tname] {
+ return false, nil
+ }
+
+ // we're done if tdecl defined tname as a new type
+ // (rather than an alias)
+ tdecl := check.objMap[tname].tdecl // must exist for objects in package scope
+ if !tdecl.Alias {
+ return ptr, tname
+ }
+
+ // otherwise, continue resolving
+ typ = tdecl.Type
+ if seen == nil {
+ seen = make(map[*TypeName]bool)
+ }
+ seen[tname] = true
+ }
+}
+
+// packageObjects typechecks all package objects, but not function bodies.
+func (check *Checker) packageObjects() {
+ // process package objects in source order for reproducible results
+ objList := make([]Object, len(check.objMap))
+ i := 0
+ for obj := range check.objMap {
+ objList[i] = obj
+ i++
+ }
+ sort.Sort(inSourceOrder(objList))
+
+ // add new methods to already type-checked types (from a prior Checker.Files call)
+ for _, obj := range objList {
+ if obj, _ := obj.(*TypeName); obj != nil && obj.typ != nil {
+ check.collectMethods(obj)
+ }
+ }
+
+ if check.enableAlias {
+ // With Alias nodes we can process declarations in any order.
+ for _, obj := range objList {
+ check.objDecl(obj, nil)
+ }
+ } else {
+ // Without Alias nodes, we process non-alias type declarations first, followed by
+ // alias declarations, and then everything else. This appears to avoid most situations
+ // where the type of an alias is needed before it is available.
+ // There may still be cases where this is not good enough (see also go.dev/issue/25838).
+ // In those cases Checker.ident will report an error ("invalid use of type alias").
+ var aliasList []*TypeName
+ var othersList []Object // everything that's not a type
+ // phase 1: non-alias type declarations
+ for _, obj := range objList {
+ if tname, _ := obj.(*TypeName); tname != nil {
+ if check.objMap[tname].tdecl.Alias {
+ aliasList = append(aliasList, tname)
+ } else {
+ check.objDecl(obj, nil)
+ }
+ } else {
+ othersList = append(othersList, obj)
+ }
+ }
+ // phase 2: alias type declarations
+ for _, obj := range aliasList {
+ check.objDecl(obj, nil)
+ }
+ // phase 3: all other declarations
+ for _, obj := range othersList {
+ check.objDecl(obj, nil)
+ }
+ }
+
+ // At this point we may have a non-empty check.methods map; this means that not all
+ // entries were deleted at the end of typeDecl because the respective receiver base
+ // types were not found. In that case, an error was reported when declaring those
+ // methods. We can now safely discard this map.
+ check.methods = nil
+}
+
+// inSourceOrder implements the sort.Sort interface.
+type inSourceOrder []Object
+
+func (a inSourceOrder) Len() int { return len(a) }
+func (a inSourceOrder) Less(i, j int) bool { return a[i].order() < a[j].order() }
+func (a inSourceOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// unusedImports checks for unused imports.
+func (check *Checker) unusedImports() {
+ // If function bodies are not checked, packages' uses are likely missing - don't check.
+ if check.conf.IgnoreFuncBodies {
+ return
+ }
+
+ // spec: "It is illegal (...) to directly import a package without referring to
+ // any of its exported identifiers. To import a package solely for its side-effects
+ // (initialization), use the blank identifier as explicit package name."
+
+ for _, obj := range check.imports {
+ if !obj.used && obj.name != "_" {
+ check.errorUnusedPkg(obj)
+ }
+ }
+}
+
+func (check *Checker) errorUnusedPkg(obj *PkgName) {
+ // If the package was imported with a name other than the final
+ // import path element, show it explicitly in the error message.
+ // Note that this handles both renamed imports and imports of
+ // packages containing unconventional package declarations.
+ // Note that this uses / always, even on Windows, because Go import
+ // paths always use forward slashes.
+ path := obj.imported.path
+ elem := path
+ if i := strings.LastIndex(elem, "/"); i >= 0 {
+ elem = elem[i+1:]
+ }
+ if obj.name == "" || obj.name == "." || obj.name == elem {
+ check.softErrorf(obj, UnusedImport, "%q imported and not used", path)
+ } else {
+ check.softErrorf(obj, UnusedImport, "%q imported as %s and not used", path, obj.name)
+ }
+}
+
+// dir makes a good-faith attempt to return the directory
+// portion of path. If path is empty, the result is ".".
+// (Per the go/build package dependency tests, we cannot import
+// path/filepath and simply use filepath.Dir.)
+func dir(path string) string {
+ if i := strings.LastIndexAny(path, `/\`); i > 0 {
+ return path[:i]
+ }
+ // i <= 0
+ return "."
+}
diff --git a/src/cmd/compile/internal/types2/resolver_test.go b/src/cmd/compile/internal/types2/resolver_test.go
new file mode 100644
index 0000000..8105d8a
--- /dev/null
+++ b/src/cmd/compile/internal/types2/resolver_test.go
@@ -0,0 +1,218 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "internal/testenv"
+ "sort"
+ "testing"
+
+ . "cmd/compile/internal/types2"
+)
+
+type resolveTestImporter struct {
+ importer ImporterFrom
+ imported map[string]bool
+}
+
+func (imp *resolveTestImporter) Import(string) (*Package, error) {
+ panic("should not be called")
+}
+
+func (imp *resolveTestImporter) ImportFrom(path, srcDir string, mode ImportMode) (*Package, error) {
+ if mode != 0 {
+ panic("mode must be 0")
+ }
+ if imp.importer == nil {
+ imp.importer = defaultImporter().(ImporterFrom)
+ imp.imported = make(map[string]bool)
+ }
+ pkg, err := imp.importer.ImportFrom(path, srcDir, mode)
+ if err != nil {
+ return nil, err
+ }
+ imp.imported[path] = true
+ return pkg, nil
+}
+
+func TestResolveIdents(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ sources := []string{
+ `
+ package p
+ import "fmt"
+ import "math"
+ const pi = math.Pi
+ func sin(x float64) float64 {
+ return math.Sin(x)
+ }
+ var Println = fmt.Println
+ `,
+ `
+ package p
+ import "fmt"
+ type errorStringer struct { fmt.Stringer; error }
+ func f() string {
+ _ = "foo"
+ return fmt.Sprintf("%d", g())
+ }
+ func g() (x int) { return }
+ `,
+ `
+ package p
+ import . "go/parser"
+ import "sync"
+ func h() Mode { return ImportsOnly }
+ var _, x int = 1, 2
+ func init() {}
+ type T struct{ *sync.Mutex; a, b, c int}
+ type I interface{ m() }
+ var _ = T{a: 1, b: 2, c: 3}
+ func (_ T) m() {}
+ func (T) _() {}
+ var i I
+ var _ = i.m
+ func _(s []int) { for i, x := range s { _, _ = i, x } }
+ func _(x interface{}) {
+ switch x := x.(type) {
+ case int:
+ _ = x
+ }
+ switch {} // implicit 'true' tag
+ }
+ `,
+ `
+ package p
+ type S struct{}
+ func (T) _() {}
+ func (T) _() {}
+ `,
+ `
+ package p
+ func _() {
+ L0:
+ L1:
+ goto L0
+ for {
+ goto L1
+ }
+ if true {
+ goto L2
+ }
+ L2:
+ }
+ `,
+ }
+
+ pkgnames := []string{
+ "fmt",
+ "math",
+ }
+
+ // parse package files
+ var files []*syntax.File
+ for _, src := range sources {
+ files = append(files, mustParse(src))
+ }
+
+ // resolve and type-check package AST
+ importer := new(resolveTestImporter)
+ conf := Config{Importer: importer}
+ uses := make(map[*syntax.Name]Object)
+ defs := make(map[*syntax.Name]Object)
+ _, err := conf.Check("testResolveIdents", files, &Info{Defs: defs, Uses: uses})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // check that all packages were imported
+ for _, name := range pkgnames {
+ if !importer.imported[name] {
+ t.Errorf("package %s not imported", name)
+ }
+ }
+
+ // check that qualified identifiers are resolved
+ for _, f := range files {
+ syntax.Inspect(f, func(n syntax.Node) bool {
+ if s, ok := n.(*syntax.SelectorExpr); ok {
+ if x, ok := s.X.(*syntax.Name); ok {
+ obj := uses[x]
+ if obj == nil {
+ t.Errorf("%s: unresolved qualified identifier %s", x.Pos(), x.Value)
+ return false
+ }
+ if _, ok := obj.(*PkgName); ok && uses[s.Sel] == nil {
+ t.Errorf("%s: unresolved selector %s", s.Sel.Pos(), s.Sel.Value)
+ return false
+ }
+ return false
+ }
+ return true
+ }
+ return true
+ })
+ }
+
+ for id, obj := range uses {
+ if obj == nil {
+ t.Errorf("%s: Uses[%s] == nil", id.Pos(), id.Value)
+ }
+ }
+
+ // Check that each identifier in the source is found in uses or defs or both.
+ // We need the foundUses/Defs maps (rather than just deleting the found objects
+ // from the uses and defs maps) because syntax.Walk traverses shared nodes multiple
+ // times (e.g. types in field lists such as "a, b, c int").
+ foundUses := make(map[*syntax.Name]bool)
+ foundDefs := make(map[*syntax.Name]bool)
+ var both []string
+ for _, f := range files {
+ syntax.Inspect(f, func(n syntax.Node) bool {
+ if x, ok := n.(*syntax.Name); ok {
+ var objects int
+ if _, found := uses[x]; found {
+ objects |= 1
+ foundUses[x] = true
+ }
+ if _, found := defs[x]; found {
+ objects |= 2
+ foundDefs[x] = true
+ }
+ switch objects {
+ case 0:
+ t.Errorf("%s: unresolved identifier %s", x.Pos(), x.Value)
+ case 3:
+ both = append(both, x.Value)
+ }
+ return false
+ }
+ return true
+ })
+ }
+
+ // check the expected set of idents that are simultaneously uses and defs
+ sort.Strings(both)
+ if got, want := fmt.Sprint(both), "[Mutex Stringer error]"; got != want {
+ t.Errorf("simultaneous uses/defs = %s, want %s", got, want)
+ }
+
+ // any left-over identifiers didn't exist in the source
+ for x := range uses {
+ if !foundUses[x] {
+ t.Errorf("%s: identifier %s not present in source", x.Pos(), x.Value)
+ }
+ }
+ for x := range defs {
+ if !foundDefs[x] {
+ t.Errorf("%s: identifier %s not present in source", x.Pos(), x.Value)
+ }
+ }
+
+ // TODO(gri) add tests to check ImplicitObj callbacks
+}
diff --git a/src/cmd/compile/internal/types2/return.go b/src/cmd/compile/internal/types2/return.go
new file mode 100644
index 0000000..01988b0
--- /dev/null
+++ b/src/cmd/compile/internal/types2/return.go
@@ -0,0 +1,184 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements isTerminating.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+)
+
+// isTerminating reports if s is a terminating statement.
+// If s is labeled, label is the label name; otherwise s
+// is "".
+func (check *Checker) isTerminating(s syntax.Stmt, label string) bool {
+ switch s := s.(type) {
+ default:
+ unreachable()
+
+ case *syntax.DeclStmt, *syntax.EmptyStmt, *syntax.SendStmt,
+ *syntax.AssignStmt, *syntax.CallStmt:
+ // no chance
+
+ case *syntax.LabeledStmt:
+ return check.isTerminating(s.Stmt, s.Label.Value)
+
+ case *syntax.ExprStmt:
+ // calling the predeclared (possibly parenthesized) panic() function is terminating
+ if call, ok := syntax.Unparen(s.X).(*syntax.CallExpr); ok && check.isPanic[call] {
+ return true
+ }
+
+ case *syntax.ReturnStmt:
+ return true
+
+ case *syntax.BranchStmt:
+ if s.Tok == syntax.Goto || s.Tok == syntax.Fallthrough {
+ return true
+ }
+
+ case *syntax.BlockStmt:
+ return check.isTerminatingList(s.List, "")
+
+ case *syntax.IfStmt:
+ if s.Else != nil &&
+ check.isTerminating(s.Then, "") &&
+ check.isTerminating(s.Else, "") {
+ return true
+ }
+
+ case *syntax.SwitchStmt:
+ return check.isTerminatingSwitch(s.Body, label)
+
+ case *syntax.SelectStmt:
+ for _, cc := range s.Body {
+ if !check.isTerminatingList(cc.Body, "") || hasBreakList(cc.Body, label, true) {
+ return false
+ }
+
+ }
+ return true
+
+ case *syntax.ForStmt:
+ if _, ok := s.Init.(*syntax.RangeClause); ok {
+ // Range clauses guarantee that the loop terminates,
+ // so the loop is not a terminating statement. See go.dev/issue/49003.
+ break
+ }
+ if s.Cond == nil && !hasBreak(s.Body, label, true) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (check *Checker) isTerminatingList(list []syntax.Stmt, label string) bool {
+ // trailing empty statements are permitted - skip them
+ for i := len(list) - 1; i >= 0; i-- {
+ if _, ok := list[i].(*syntax.EmptyStmt); !ok {
+ return check.isTerminating(list[i], label)
+ }
+ }
+ return false // all statements are empty
+}
+
+func (check *Checker) isTerminatingSwitch(body []*syntax.CaseClause, label string) bool {
+ hasDefault := false
+ for _, cc := range body {
+ if cc.Cases == nil {
+ hasDefault = true
+ }
+ if !check.isTerminatingList(cc.Body, "") || hasBreakList(cc.Body, label, true) {
+ return false
+ }
+ }
+ return hasDefault
+}
+
+// TODO(gri) For nested breakable statements, the current implementation of hasBreak
+// will traverse the same subtree repeatedly, once for each label. Replace
+// with a single-pass label/break matching phase.
+
+// hasBreak reports if s is or contains a break statement
+// referring to the label-ed statement or implicit-ly the
+// closest outer breakable statement.
+func hasBreak(s syntax.Stmt, label string, implicit bool) bool {
+ switch s := s.(type) {
+ default:
+ unreachable()
+
+ case *syntax.DeclStmt, *syntax.EmptyStmt, *syntax.ExprStmt,
+ *syntax.SendStmt, *syntax.AssignStmt, *syntax.CallStmt,
+ *syntax.ReturnStmt:
+ // no chance
+
+ case *syntax.LabeledStmt:
+ return hasBreak(s.Stmt, label, implicit)
+
+ case *syntax.BranchStmt:
+ if s.Tok == syntax.Break {
+ if s.Label == nil {
+ return implicit
+ }
+ if s.Label.Value == label {
+ return true
+ }
+ }
+
+ case *syntax.BlockStmt:
+ return hasBreakList(s.List, label, implicit)
+
+ case *syntax.IfStmt:
+ if hasBreak(s.Then, label, implicit) ||
+ s.Else != nil && hasBreak(s.Else, label, implicit) {
+ return true
+ }
+
+ case *syntax.SwitchStmt:
+ if label != "" && hasBreakCaseList(s.Body, label, false) {
+ return true
+ }
+
+ case *syntax.SelectStmt:
+ if label != "" && hasBreakCommList(s.Body, label, false) {
+ return true
+ }
+
+ case *syntax.ForStmt:
+ if label != "" && hasBreak(s.Body, label, false) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func hasBreakList(list []syntax.Stmt, label string, implicit bool) bool {
+ for _, s := range list {
+ if hasBreak(s, label, implicit) {
+ return true
+ }
+ }
+ return false
+}
+
+func hasBreakCaseList(list []*syntax.CaseClause, label string, implicit bool) bool {
+ for _, s := range list {
+ if hasBreakList(s.Body, label, implicit) {
+ return true
+ }
+ }
+ return false
+}
+
+func hasBreakCommList(list []*syntax.CommClause, label string, implicit bool) bool {
+ for _, s := range list {
+ if hasBreakList(s.Body, label, implicit) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/types2/scope.go b/src/cmd/compile/internal/types2/scope.go
new file mode 100644
index 0000000..25bde6a
--- /dev/null
+++ b/src/cmd/compile/internal/types2/scope.go
@@ -0,0 +1,292 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements Scopes.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+ "sync"
+)
+
+// A Scope maintains a set of objects and links to its containing
+// (parent) and contained (children) scopes. Objects may be inserted
+// and looked up by name. The zero value for Scope is a ready-to-use
+// empty scope.
+type Scope struct {
+ parent *Scope
+ children []*Scope
+ number int // parent.children[number-1] is this scope; 0 if there is no parent
+ elems map[string]Object // lazily allocated
+ pos, end syntax.Pos // scope extent; may be invalid
+ comment string // for debugging only
+ isFunc bool // set if this is a function scope (internal use only)
+}
+
+// NewScope returns a new, empty scope contained in the given parent
+// scope, if any. The comment is for debugging only.
+func NewScope(parent *Scope, pos, end syntax.Pos, comment string) *Scope {
+ s := &Scope{parent, nil, 0, nil, pos, end, comment, false}
+ // don't add children to Universe scope!
+ if parent != nil && parent != Universe {
+ parent.children = append(parent.children, s)
+ s.number = len(parent.children)
+ }
+ return s
+}
+
+// Parent returns the scope's containing (parent) scope.
+func (s *Scope) Parent() *Scope { return s.parent }
+
+// Len returns the number of scope elements.
+func (s *Scope) Len() int { return len(s.elems) }
+
+// Names returns the scope's element names in sorted order.
+func (s *Scope) Names() []string {
+ names := make([]string, len(s.elems))
+ i := 0
+ for name := range s.elems {
+ names[i] = name
+ i++
+ }
+ sort.Strings(names)
+ return names
+}
+
+// NumChildren returns the number of scopes nested in s.
+func (s *Scope) NumChildren() int { return len(s.children) }
+
+// Child returns the i'th child scope for 0 <= i < NumChildren().
+func (s *Scope) Child(i int) *Scope { return s.children[i] }
+
+// Lookup returns the object in scope s with the given name if such an
+// object exists; otherwise the result is nil.
+func (s *Scope) Lookup(name string) Object {
+ return resolve(name, s.elems[name])
+}
+
+// LookupParent follows the parent chain of scopes starting with s until
+// it finds a scope where Lookup(name) returns a non-nil object, and then
+// returns that scope and object. If a valid position pos is provided,
+// only objects that were declared at or before pos are considered.
+// If no such scope and object exists, the result is (nil, nil).
+//
+// Note that obj.Parent() may be different from the returned scope if the
+// object was inserted into the scope and already had a parent at that
+// time (see Insert). This can only happen for dot-imported objects
+// whose scope is the scope of the package that exported them.
+func (s *Scope) LookupParent(name string, pos syntax.Pos) (*Scope, Object) {
+ for ; s != nil; s = s.parent {
+ if obj := s.Lookup(name); obj != nil && (!pos.IsKnown() || cmpPos(obj.scopePos(), pos) <= 0) {
+ return s, obj
+ }
+ }
+ return nil, nil
+}
+
+// Insert attempts to insert an object obj into scope s.
+// If s already contains an alternative object alt with
+// the same name, Insert leaves s unchanged and returns alt.
+// Otherwise it inserts obj, sets the object's parent scope
+// if not already set, and returns nil.
+func (s *Scope) Insert(obj Object) Object {
+ name := obj.Name()
+ if alt := s.Lookup(name); alt != nil {
+ return alt
+ }
+ s.insert(name, obj)
+ if obj.Parent() == nil {
+ obj.setParent(s)
+ }
+ return nil
+}
+
+// InsertLazy is like Insert, but allows deferring construction of the
+// inserted object until it's accessed with Lookup. The Object
+// returned by resolve must have the same name as given to InsertLazy.
+// If s already contains an alternative object with the same name,
+// InsertLazy leaves s unchanged and returns false. Otherwise it
+// records the binding and returns true. The object's parent scope
+// will be set to s after resolve is called.
+func (s *Scope) InsertLazy(name string, resolve func() Object) bool {
+ if s.elems[name] != nil {
+ return false
+ }
+ s.insert(name, &lazyObject{parent: s, resolve: resolve})
+ return true
+}
+
+func (s *Scope) insert(name string, obj Object) {
+ if s.elems == nil {
+ s.elems = make(map[string]Object)
+ }
+ s.elems[name] = obj
+}
+
+// Squash merges s with its parent scope p by adding all
+// objects of s to p, adding all children of s to the
+// children of p, and removing s from p's children.
+// The function f is called for each object obj in s which
+// has an object alt in p. s should be discarded after
+// having been squashed.
+func (s *Scope) Squash(err func(obj, alt Object)) {
+ p := s.parent
+ assert(p != nil)
+ for name, obj := range s.elems {
+ obj = resolve(name, obj)
+ obj.setParent(nil)
+ if alt := p.Insert(obj); alt != nil {
+ err(obj, alt)
+ }
+ }
+
+ j := -1 // index of s in p.children
+ for i, ch := range p.children {
+ if ch == s {
+ j = i
+ break
+ }
+ }
+ assert(j >= 0)
+ k := len(p.children) - 1
+ p.children[j] = p.children[k]
+ p.children = p.children[:k]
+
+ p.children = append(p.children, s.children...)
+
+ s.children = nil
+ s.elems = nil
+}
+
+// Pos and End describe the scope's source code extent [pos, end).
+// The results are guaranteed to be valid only if the type-checked
+// AST has complete position information. The extent is undefined
+// for Universe and package scopes.
+func (s *Scope) Pos() syntax.Pos { return s.pos }
+func (s *Scope) End() syntax.Pos { return s.end }
+
+// Contains reports whether pos is within the scope's extent.
+// The result is guaranteed to be valid only if the type-checked
+// AST has complete position information.
+func (s *Scope) Contains(pos syntax.Pos) bool {
+ return cmpPos(s.pos, pos) <= 0 && cmpPos(pos, s.end) < 0
+}
+
+// Innermost returns the innermost (child) scope containing
+// pos. If pos is not within any scope, the result is nil.
+// The result is also nil for the Universe scope.
+// The result is guaranteed to be valid only if the type-checked
+// AST has complete position information.
+func (s *Scope) Innermost(pos syntax.Pos) *Scope {
+ // Package scopes do not have extents since they may be
+ // discontiguous, so iterate over the package's files.
+ if s.parent == Universe {
+ for _, s := range s.children {
+ if inner := s.Innermost(pos); inner != nil {
+ return inner
+ }
+ }
+ }
+
+ if s.Contains(pos) {
+ for _, s := range s.children {
+ if s.Contains(pos) {
+ return s.Innermost(pos)
+ }
+ }
+ return s
+ }
+ return nil
+}
+
+// WriteTo writes a string representation of the scope to w,
+// with the scope elements sorted by name.
+// The level of indentation is controlled by n >= 0, with
+// n == 0 for no indentation.
+// If recurse is set, it also writes nested (children) scopes.
+func (s *Scope) WriteTo(w io.Writer, n int, recurse bool) {
+ const ind = ". "
+ indn := strings.Repeat(ind, n)
+
+ fmt.Fprintf(w, "%s%s scope %p {\n", indn, s.comment, s)
+
+ indn1 := indn + ind
+ for _, name := range s.Names() {
+ fmt.Fprintf(w, "%s%s\n", indn1, s.Lookup(name))
+ }
+
+ if recurse {
+ for _, s := range s.children {
+ s.WriteTo(w, n+1, recurse)
+ }
+ }
+
+ fmt.Fprintf(w, "%s}\n", indn)
+}
+
+// String returns a string representation of the scope, for debugging.
+func (s *Scope) String() string {
+ var buf strings.Builder
+ s.WriteTo(&buf, 0, false)
+ return buf.String()
+}
+
+// A lazyObject represents an imported Object that has not been fully
+// resolved yet by its importer.
+type lazyObject struct {
+ parent *Scope
+ resolve func() Object
+ obj Object
+ once sync.Once
+}
+
+// resolve returns the Object represented by obj, resolving lazy
+// objects as appropriate.
+func resolve(name string, obj Object) Object {
+ if lazy, ok := obj.(*lazyObject); ok {
+ lazy.once.Do(func() {
+ obj := lazy.resolve()
+
+ if _, ok := obj.(*lazyObject); ok {
+ panic("recursive lazy object")
+ }
+ if obj.Name() != name {
+ panic("lazy object has unexpected name")
+ }
+
+ if obj.Parent() == nil {
+ obj.setParent(lazy.parent)
+ }
+ lazy.obj = obj
+ })
+
+ obj = lazy.obj
+ }
+ return obj
+}
+
+// stub implementations so *lazyObject implements Object and we can
+// store them directly into Scope.elems.
+func (*lazyObject) Parent() *Scope { panic("unreachable") }
+func (*lazyObject) Pos() syntax.Pos { panic("unreachable") }
+func (*lazyObject) Pkg() *Package { panic("unreachable") }
+func (*lazyObject) Name() string { panic("unreachable") }
+func (*lazyObject) Type() Type { panic("unreachable") }
+func (*lazyObject) Exported() bool { panic("unreachable") }
+func (*lazyObject) Id() string { panic("unreachable") }
+func (*lazyObject) String() string { panic("unreachable") }
+func (*lazyObject) order() uint32 { panic("unreachable") }
+func (*lazyObject) color() color { panic("unreachable") }
+func (*lazyObject) setType(Type) { panic("unreachable") }
+func (*lazyObject) setOrder(uint32) { panic("unreachable") }
+func (*lazyObject) setColor(color color) { panic("unreachable") }
+func (*lazyObject) setParent(*Scope) { panic("unreachable") }
+func (*lazyObject) sameId(pkg *Package, name string) bool { panic("unreachable") }
+func (*lazyObject) scopePos() syntax.Pos { panic("unreachable") }
+func (*lazyObject) setScopePos(pos syntax.Pos) { panic("unreachable") }
diff --git a/src/cmd/compile/internal/types2/selection.go b/src/cmd/compile/internal/types2/selection.go
new file mode 100644
index 0000000..dfbf3a0
--- /dev/null
+++ b/src/cmd/compile/internal/types2/selection.go
@@ -0,0 +1,180 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements Selections.
+
+package types2
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// SelectionKind describes the kind of a selector expression x.f
+// (excluding qualified identifiers).
+//
+// If x is a struct or *struct, a selector expression x.f may denote a
+// sequence of selection operations x.a.b.c.f. The SelectionKind
+// describes the kind of the final (explicit) operation; all the
+// previous (implicit) operations are always field selections.
+// Each element of Indices specifies an implicit field (a, b, c)
+// by its index in the struct type of the field selection operand.
+//
+// For a FieldVal operation, the final selection refers to the field
+// specified by Selection.Obj.
+//
+// For a MethodVal operation, the final selection refers to a method.
+// If the "pointerness" of the method's declared receiver does not
+// match that of the effective receiver after implicit field
+// selection, then an & or * operation is implicitly applied to the
+// receiver variable or value.
+// So, x.f denotes (&x.a.b.c).f when f requires a pointer receiver but
+// x.a.b.c is a non-pointer variable; and it denotes (*x.a.b.c).f when
+// f requires a non-pointer receiver but x.a.b.c is a pointer value.
+//
+// All pointer indirections, whether due to implicit or explicit field
+// selections or * operations inserted for "pointerness", panic if
+// applied to a nil pointer, so a method call x.f() may panic even
+// before the function call.
+//
+// By contrast, a MethodExpr operation T.f is essentially equivalent
+// to a function literal of the form:
+//
+// func(x T, args) (results) { return x.f(args) }
+//
+// Consequently, any implicit field selections and * operations
+// inserted for "pointerness" are not evaluated until the function is
+// called, so a T.f or (*T).f expression never panics.
+type SelectionKind int
+
+const (
+ FieldVal SelectionKind = iota // x.f is a struct field selector
+ MethodVal // x.f is a method selector
+ MethodExpr // x.f is a method expression
+)
+
+// A Selection describes a selector expression x.f.
+// For the declarations:
+//
+// type T struct{ x int; E }
+// type E struct{}
+// func (e E) m() {}
+// var p *T
+//
+// the following relations exist:
+//
+// Selector Kind Recv Obj Type Index Indirect
+//
+// p.x FieldVal T x int {0} true
+// p.m MethodVal *T m func() {1, 0} true
+// T.m MethodExpr T m func(T) {1, 0} false
+type Selection struct {
+ kind SelectionKind
+ recv Type // type of x
+ obj Object // object denoted by x.f
+ index []int // path from x to x.f
+ indirect bool // set if there was any pointer indirection on the path
+}
+
+// Kind returns the selection kind.
+func (s *Selection) Kind() SelectionKind { return s.kind }
+
+// Recv returns the type of x in x.f.
+func (s *Selection) Recv() Type { return s.recv }
+
+// Obj returns the object denoted by x.f; a *Var for
+// a field selection, and a *Func in all other cases.
+func (s *Selection) Obj() Object { return s.obj }
+
+// Type returns the type of x.f, which may be different from the type of f.
+// See Selection for more information.
+func (s *Selection) Type() Type {
+ switch s.kind {
+ case MethodVal:
+ // The type of x.f is a method with its receiver type set
+ // to the type of x.
+ sig := *s.obj.(*Func).typ.(*Signature)
+ recv := *sig.recv
+ recv.typ = s.recv
+ sig.recv = &recv
+ return &sig
+
+ case MethodExpr:
+ // The type of x.f is a function (without receiver)
+ // and an additional first argument with the same type as x.
+ // TODO(gri) Similar code is already in call.go - factor!
+ // TODO(gri) Compute this eagerly to avoid allocations.
+ sig := *s.obj.(*Func).typ.(*Signature)
+ arg0 := *sig.recv
+ sig.recv = nil
+ arg0.typ = s.recv
+ var params []*Var
+ if sig.params != nil {
+ params = sig.params.vars
+ }
+ sig.params = NewTuple(append([]*Var{&arg0}, params...)...)
+ return &sig
+ }
+
+ // In all other cases, the type of x.f is the type of x.
+ return s.obj.Type()
+}
+
+// Index describes the path from x to f in x.f.
+// The last index entry is the field or method index of the type declaring f;
+// either:
+//
+// 1. the list of declared methods of a named type; or
+// 2. the list of methods of an interface type; or
+// 3. the list of fields of a struct type.
+//
+// The earlier index entries are the indices of the embedded fields implicitly
+// traversed to get from (the type of) x to f, starting at embedding depth 0.
+func (s *Selection) Index() []int { return s.index }
+
+// Indirect reports whether any pointer indirection was required to get from
+// x to f in x.f.
+//
+// Beware: Indirect spuriously returns true (Go issue #8353) for a
+// MethodVal selection in which the receiver argument and parameter
+// both have type *T so there is no indirection.
+// Unfortunately, a fix is too risky.
+func (s *Selection) Indirect() bool { return s.indirect }
+
+func (s *Selection) String() string { return SelectionString(s, nil) }
+
+// SelectionString returns the string form of s.
+// The Qualifier controls the printing of
+// package-level objects, and may be nil.
+//
+// Examples:
+//
+// "field (T) f int"
+// "method (T) f(X) Y"
+// "method expr (T) f(X) Y"
+func SelectionString(s *Selection, qf Qualifier) string {
+ var k string
+ switch s.kind {
+ case FieldVal:
+ k = "field "
+ case MethodVal:
+ k = "method "
+ case MethodExpr:
+ k = "method expr "
+ default:
+ unreachable()
+ }
+ var buf bytes.Buffer
+ buf.WriteString(k)
+ buf.WriteByte('(')
+ WriteType(&buf, s.Recv(), qf)
+ fmt.Fprintf(&buf, ") %s", s.obj.Name())
+ if T := s.Type(); s.kind == FieldVal {
+ buf.WriteByte(' ')
+ WriteType(&buf, T, qf)
+ } else {
+ WriteSignature(&buf, T.(*Signature), qf)
+ }
+ return buf.String()
+}
diff --git a/src/cmd/compile/internal/types2/self_test.go b/src/cmd/compile/internal/types2/self_test.go
new file mode 100644
index 0000000..3c8bec1
--- /dev/null
+++ b/src/cmd/compile/internal/types2/self_test.go
@@ -0,0 +1,118 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "cmd/compile/internal/syntax"
+ "internal/testenv"
+ "path"
+ "path/filepath"
+ "runtime"
+ "testing"
+ "time"
+
+ . "cmd/compile/internal/types2"
+)
+
+func TestSelf(t *testing.T) {
+ testenv.MustHaveGoBuild(t) // The Go command is needed for the importer to determine the locations of stdlib .a files.
+
+ files, err := pkgFiles(".")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ conf := Config{Importer: defaultImporter()}
+ _, err = conf.Check("cmd/compile/internal/types2", files, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func BenchmarkCheck(b *testing.B) {
+ testenv.MustHaveGoBuild(b) // The Go command is needed for the importer to determine the locations of stdlib .a files.
+
+ for _, p := range []string{
+ filepath.Join("src", "net", "http"),
+ filepath.Join("src", "go", "parser"),
+ filepath.Join("src", "go", "constant"),
+ filepath.Join("src", "runtime"),
+ filepath.Join("src", "go", "internal", "gcimporter"),
+ } {
+ b.Run(path.Base(p), func(b *testing.B) {
+ path := filepath.Join(runtime.GOROOT(), p)
+ for _, ignoreFuncBodies := range []bool{false, true} {
+ name := "funcbodies"
+ if ignoreFuncBodies {
+ name = "nofuncbodies"
+ }
+ b.Run(name, func(b *testing.B) {
+ b.Run("info", func(b *testing.B) {
+ runbench(b, path, ignoreFuncBodies, true)
+ })
+ b.Run("noinfo", func(b *testing.B) {
+ runbench(b, path, ignoreFuncBodies, false)
+ })
+ })
+ }
+ })
+ }
+}
+
+func runbench(b *testing.B, path string, ignoreFuncBodies, writeInfo bool) {
+ files, err := pkgFiles(path)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ // determine line count
+ var lines uint
+ for _, f := range files {
+ lines += f.EOF.Line()
+ }
+
+ b.ResetTimer()
+ start := time.Now()
+ for i := 0; i < b.N; i++ {
+ conf := Config{
+ IgnoreFuncBodies: ignoreFuncBodies,
+ Importer: defaultImporter(),
+ }
+ var info *Info
+ if writeInfo {
+ info = &Info{
+ Types: make(map[syntax.Expr]TypeAndValue),
+ Defs: make(map[*syntax.Name]Object),
+ Uses: make(map[*syntax.Name]Object),
+ Implicits: make(map[syntax.Node]Object),
+ Selections: make(map[*syntax.SelectorExpr]*Selection),
+ Scopes: make(map[syntax.Node]*Scope),
+ }
+ }
+ if _, err := conf.Check(path, files, info); err != nil {
+ b.Fatal(err)
+ }
+ }
+ b.StopTimer()
+ b.ReportMetric(float64(lines)*float64(b.N)/time.Since(start).Seconds(), "lines/s")
+}
+
+func pkgFiles(path string) ([]*syntax.File, error) {
+ filenames, err := pkgFilenames(path, true) // from stdlib_test.go
+ if err != nil {
+ return nil, err
+ }
+
+ var files []*syntax.File
+ for _, filename := range filenames {
+ file, err := syntax.ParseFile(filename, nil, nil, 0)
+ if err != nil {
+ return nil, err
+ }
+ files = append(files, file)
+ }
+
+ return files, nil
+}
diff --git a/src/cmd/compile/internal/types2/signature.go b/src/cmd/compile/internal/types2/signature.go
new file mode 100644
index 0000000..18a64ec
--- /dev/null
+++ b/src/cmd/compile/internal/types2/signature.go
@@ -0,0 +1,332 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ . "internal/types/errors"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// A Signature represents a (non-builtin) function or method type.
+// The receiver is ignored when comparing signatures for identity.
+type Signature struct {
+ // We need to keep the scope in Signature (rather than passing it around
+ // and store it in the Func Object) because when type-checking a function
+ // literal we call the general type checker which returns a general Type.
+ // We then unpack the *Signature and use the scope for the literal body.
+ rparams *TypeParamList // receiver type parameters from left to right, or nil
+ tparams *TypeParamList // type parameters from left to right, or nil
+ scope *Scope // function scope for package-local and non-instantiated signatures; nil otherwise
+ recv *Var // nil if not a method
+ params *Tuple // (incoming) parameters from left to right; or nil
+ results *Tuple // (outgoing) results from left to right; or nil
+ variadic bool // true if the last parameter's type is of the form ...T (or string, for append built-in only)
+}
+
+// NewSignatureType creates a new function type for the given receiver,
+// receiver type parameters, type parameters, parameters, and results. If
+// variadic is set, params must hold at least one parameter and the last
+// parameter's core type must be of unnamed slice or bytestring type.
+// If recv is non-nil, typeParams must be empty. If recvTypeParams is
+// non-empty, recv must be non-nil.
+func NewSignatureType(recv *Var, recvTypeParams, typeParams []*TypeParam, params, results *Tuple, variadic bool) *Signature {
+ if variadic {
+ n := params.Len()
+ if n == 0 {
+ panic("variadic function must have at least one parameter")
+ }
+ core := coreString(params.At(n - 1).typ)
+ if _, ok := core.(*Slice); !ok && !isString(core) {
+ panic(fmt.Sprintf("got %s, want variadic parameter with unnamed slice type or string as core type", core.String()))
+ }
+ }
+ sig := &Signature{recv: recv, params: params, results: results, variadic: variadic}
+ if len(recvTypeParams) != 0 {
+ if recv == nil {
+ panic("function with receiver type parameters must have a receiver")
+ }
+ sig.rparams = bindTParams(recvTypeParams)
+ }
+ if len(typeParams) != 0 {
+ if recv != nil {
+ panic("function with type parameters cannot have a receiver")
+ }
+ sig.tparams = bindTParams(typeParams)
+ }
+ return sig
+}
+
+// Recv returns the receiver of signature s (if a method), or nil if a
+// function. It is ignored when comparing signatures for identity.
+//
+// For an abstract method, Recv returns the enclosing interface either
+// as a *Named or an *Interface. Due to embedding, an interface may
+// contain methods whose receiver type is a different interface.
+func (s *Signature) Recv() *Var { return s.recv }
+
+// TypeParams returns the type parameters of signature s, or nil.
+func (s *Signature) TypeParams() *TypeParamList { return s.tparams }
+
+// SetTypeParams sets the type parameters of signature s.
+func (s *Signature) SetTypeParams(tparams []*TypeParam) { s.tparams = bindTParams(tparams) }
+
+// RecvTypeParams returns the receiver type parameters of signature s, or nil.
+func (s *Signature) RecvTypeParams() *TypeParamList { return s.rparams }
+
+// Params returns the parameters of signature s, or nil.
+func (s *Signature) Params() *Tuple { return s.params }
+
+// Results returns the results of signature s, or nil.
+func (s *Signature) Results() *Tuple { return s.results }
+
+// Variadic reports whether the signature s is variadic.
+func (s *Signature) Variadic() bool { return s.variadic }
+
+func (s *Signature) Underlying() Type { return s }
+func (s *Signature) String() string { return TypeString(s, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+// funcType type-checks a function or method type.
+func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []*syntax.Field, ftyp *syntax.FuncType) {
+ check.openScope(ftyp, "function")
+ check.scope.isFunc = true
+ check.recordScope(ftyp, check.scope)
+ sig.scope = check.scope
+ defer check.closeScope()
+
+ if recvPar != nil {
+ // collect generic receiver type parameters, if any
+ // - a receiver type parameter is like any other type parameter, except that it is declared implicitly
+ // - the receiver specification acts as local declaration for its type parameters, which may be blank
+ _, rname, rparams := check.unpackRecv(recvPar.Type, true)
+ if len(rparams) > 0 {
+ // The scope of the type parameter T in "func (r T[T]) f()"
+ // starts after f, not at "r"; see #52038.
+ scopePos := ftyp.Pos()
+ tparams := make([]*TypeParam, len(rparams))
+ for i, rparam := range rparams {
+ tparams[i] = check.declareTypeParam(rparam, scopePos)
+ }
+ sig.rparams = bindTParams(tparams)
+ // Blank identifiers don't get declared, so naive type-checking of the
+ // receiver type expression would fail in Checker.collectParams below,
+ // when Checker.ident cannot resolve the _ to a type.
+ //
+ // Checker.recvTParamMap maps these blank identifiers to their type parameter
+ // types, so that they may be resolved in Checker.ident when they fail
+ // lookup in the scope.
+ for i, p := range rparams {
+ if p.Value == "_" {
+ if check.recvTParamMap == nil {
+ check.recvTParamMap = make(map[*syntax.Name]*TypeParam)
+ }
+ check.recvTParamMap[p] = tparams[i]
+ }
+ }
+ // determine receiver type to get its type parameters
+ // and the respective type parameter bounds
+ var recvTParams []*TypeParam
+ if rname != nil {
+ // recv should be a Named type (otherwise an error is reported elsewhere)
+ // Also: Don't report an error via genericType since it will be reported
+ // again when we type-check the signature.
+ // TODO(gri) maybe the receiver should be marked as invalid instead?
+ if recv := asNamed(check.genericType(rname, nil)); recv != nil {
+ recvTParams = recv.TypeParams().list()
+ }
+ }
+ // provide type parameter bounds
+ if len(tparams) == len(recvTParams) {
+ smap := makeRenameMap(recvTParams, tparams)
+ for i, tpar := range tparams {
+ recvTPar := recvTParams[i]
+ check.mono.recordCanon(tpar, recvTPar)
+ // recvTPar.bound is (possibly) parameterized in the context of the
+ // receiver type declaration. Substitute parameters for the current
+ // context.
+ tpar.bound = check.subst(tpar.obj.pos, recvTPar.bound, smap, nil, check.context())
+ }
+ } else if len(tparams) < len(recvTParams) {
+ // Reporting an error here is a stop-gap measure to avoid crashes in the
+ // compiler when a type parameter/argument cannot be inferred later. It
+ // may lead to follow-on errors (see issues go.dev/issue/51339, go.dev/issue/51343).
+ // TODO(gri) find a better solution
+ got := measure(len(tparams), "type parameter")
+ check.errorf(recvPar, BadRecv, "got %s, but receiver base type declares %d", got, len(recvTParams))
+ }
+ }
+ }
+
+ if tparams != nil {
+ // The parser will complain about invalid type parameters for methods.
+ check.collectTypeParams(&sig.tparams, tparams)
+ }
+
+ // Use a temporary scope for all parameter declarations and then
+ // squash that scope into the parent scope (and report any
+ // redeclarations at that time).
+ //
+ // TODO(adonovan): now that each declaration has the correct
+ // scopePos, there should be no need for scope squashing.
+ // Audit to ensure all lookups honor scopePos and simplify.
+ scope := NewScope(check.scope, nopos, nopos, "function body (temp. scope)")
+ scopePos := syntax.EndPos(ftyp) // all parameters' scopes start after the signature
+ var recvList []*Var // TODO(gri) remove the need for making a list here
+ if recvPar != nil {
+ recvList, _ = check.collectParams(scope, []*syntax.Field{recvPar}, false, scopePos) // use rewritten receiver type, if any
+ }
+ params, variadic := check.collectParams(scope, ftyp.ParamList, true, scopePos)
+ results, _ := check.collectParams(scope, ftyp.ResultList, false, scopePos)
+ scope.Squash(func(obj, alt Object) {
+ var err error_
+ err.code = DuplicateDecl
+ err.errorf(obj, "%s redeclared in this block", obj.Name())
+ err.recordAltDecl(alt)
+ check.report(&err)
+ })
+
+ if recvPar != nil {
+ // recv parameter list present (may be empty)
+ // spec: "The receiver is specified via an extra parameter section preceding the
+ // method name. That parameter section must declare a single parameter, the receiver."
+ var recv *Var
+ switch len(recvList) {
+ case 0:
+ // error reported by resolver
+ recv = NewParam(nopos, nil, "", Typ[Invalid]) // ignore recv below
+ default:
+ // more than one receiver
+ check.error(recvList[len(recvList)-1].Pos(), InvalidRecv, "method must have exactly one receiver")
+ fallthrough // continue with first receiver
+ case 1:
+ recv = recvList[0]
+ }
+ sig.recv = recv
+
+ // Delay validation of receiver type as it may cause premature expansion
+ // of types the receiver type is dependent on (see issues go.dev/issue/51232, go.dev/issue/51233).
+ check.later(func() {
+ // spec: "The receiver type must be of the form T or *T where T is a type name."
+ rtyp, _ := deref(recv.typ)
+ atyp := Unalias(rtyp)
+ if !isValid(atyp) {
+ return // error was reported before
+ }
+ // spec: "The type denoted by T is called the receiver base type; it must not
+ // be a pointer or interface type and it must be declared in the same package
+ // as the method."
+ switch T := atyp.(type) {
+ case *Named:
+ // The receiver type may be an instantiated type referred to
+ // by an alias (which cannot have receiver parameters for now).
+ if T.TypeArgs() != nil && sig.RecvTypeParams() == nil {
+ check.errorf(recv, InvalidRecv, "cannot define new methods on instantiated type %s", rtyp)
+ break
+ }
+ if T.obj.pkg != check.pkg {
+ check.errorf(recv, InvalidRecv, "cannot define new methods on non-local type %s", rtyp)
+ break
+ }
+ var cause string
+ switch u := T.under().(type) {
+ case *Basic:
+ // unsafe.Pointer is treated like a regular pointer
+ if u.kind == UnsafePointer {
+ cause = "unsafe.Pointer"
+ }
+ case *Pointer, *Interface:
+ cause = "pointer or interface type"
+ case *TypeParam:
+ // The underlying type of a receiver base type cannot be a
+ // type parameter: "type T[P any] P" is not a valid declaration.
+ unreachable()
+ }
+ if cause != "" {
+ check.errorf(recv, InvalidRecv, "invalid receiver type %s (%s)", rtyp, cause)
+ }
+ case *Basic:
+ check.errorf(recv, InvalidRecv, "cannot define new methods on non-local type %s", rtyp)
+ default:
+ check.errorf(recv, InvalidRecv, "invalid receiver type %s", recv.typ)
+ }
+ }).describef(recv, "validate receiver %s", recv)
+ }
+
+ sig.params = NewTuple(params...)
+ sig.results = NewTuple(results...)
+ sig.variadic = variadic
+}
+
+// collectParams declares the parameters of list in scope and returns the corresponding
+// variable list.
+func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, variadicOk bool, scopePos syntax.Pos) (params []*Var, variadic bool) {
+ if list == nil {
+ return
+ }
+
+ var named, anonymous bool
+
+ var typ Type
+ var prev syntax.Expr
+ for i, field := range list {
+ ftype := field.Type
+ // type-check type of grouped fields only once
+ if ftype != prev {
+ prev = ftype
+ if t, _ := ftype.(*syntax.DotsType); t != nil {
+ ftype = t.Elem
+ if variadicOk && i == len(list)-1 {
+ variadic = true
+ } else {
+ check.softErrorf(t, MisplacedDotDotDot, "can only use ... with final parameter in list")
+ // ignore ... and continue
+ }
+ }
+ typ = check.varType(ftype)
+ }
+ // The parser ensures that f.Tag is nil and we don't
+ // care if a constructed AST contains a non-nil tag.
+ if field.Name != nil {
+ // named parameter
+ name := field.Name.Value
+ if name == "" {
+ check.error(field.Name, InvalidSyntaxTree, "anonymous parameter")
+ // ok to continue
+ }
+ par := NewParam(field.Name.Pos(), check.pkg, name, typ)
+ check.declare(scope, field.Name, par, scopePos)
+ params = append(params, par)
+ named = true
+ } else {
+ // anonymous parameter
+ par := NewParam(field.Pos(), check.pkg, "", typ)
+ check.recordImplicit(field, par)
+ params = append(params, par)
+ anonymous = true
+ }
+ }
+
+ if named && anonymous {
+ check.error(list[0], InvalidSyntaxTree, "list contains both named and anonymous parameters")
+ // ok to continue
+ }
+
+ // For a variadic function, change the last parameter's type from T to []T.
+ // Since we type-checked T rather than ...T, we also need to retro-actively
+ // record the type for ...T.
+ if variadic {
+ last := params[len(params)-1]
+ last.typ = &Slice{elem: last.typ}
+ check.recordTypeAndValue(list[len(list)-1].Type, typexpr, last.typ, nil)
+ }
+
+ return
+}
diff --git a/src/cmd/compile/internal/types2/sizeof_test.go b/src/cmd/compile/internal/types2/sizeof_test.go
new file mode 100644
index 0000000..740dbc9
--- /dev/null
+++ b/src/cmd/compile/internal/types2/sizeof_test.go
@@ -0,0 +1,64 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "reflect"
+ "testing"
+)
+
+// Signal size changes of important structures.
+
+func TestSizeof(t *testing.T) {
+ const _64bit = ^uint(0)>>32 != 0
+
+ var tests = []struct {
+ val interface{} // type as a value
+ _32bit uintptr // size on 32bit platforms
+ _64bit uintptr // size on 64bit platforms
+ }{
+ // Types
+ {Basic{}, 16, 32},
+ {Array{}, 16, 24},
+ {Slice{}, 8, 16},
+ {Struct{}, 24, 48},
+ {Pointer{}, 8, 16},
+ {Tuple{}, 12, 24},
+ {Signature{}, 28, 56},
+ {Union{}, 12, 24},
+ {Interface{}, 40, 80},
+ {Map{}, 16, 32},
+ {Chan{}, 12, 24},
+ {Named{}, 60, 112},
+ {TypeParam{}, 28, 48},
+ {term{}, 12, 24},
+
+ // Objects
+ {PkgName{}, 64, 104},
+ {Const{}, 64, 104},
+ {TypeName{}, 56, 88},
+ {Var{}, 64, 104},
+ {Func{}, 64, 104},
+ {Label{}, 60, 96},
+ {Builtin{}, 60, 96},
+ {Nil{}, 56, 88},
+
+ // Misc
+ {Scope{}, 60, 104},
+ {Package{}, 44, 88},
+ {_TypeSet{}, 28, 56},
+ }
+
+ for _, test := range tests {
+ got := reflect.TypeOf(test.val).Size()
+ want := test._32bit
+ if _64bit {
+ want = test._64bit
+ }
+ if got != want {
+ t.Errorf("unsafe.Sizeof(%T) = %d, want %d", test.val, got, want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/sizes.go b/src/cmd/compile/internal/types2/sizes.go
new file mode 100644
index 0000000..486c05c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/sizes.go
@@ -0,0 +1,340 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements Sizes.
+
+package types2
+
+// Sizes defines the sizing functions for package unsafe.
+type Sizes interface {
+ // Alignof returns the alignment of a variable of type T.
+ // Alignof must implement the alignment guarantees required by the spec.
+ // The result must be >= 1.
+ Alignof(T Type) int64
+
+ // Offsetsof returns the offsets of the given struct fields, in bytes.
+ // Offsetsof must implement the offset guarantees required by the spec.
+ // A negative entry in the result indicates that the struct is too large.
+ Offsetsof(fields []*Var) []int64
+
+ // Sizeof returns the size of a variable of type T.
+ // Sizeof must implement the size guarantees required by the spec.
+ // A negative result indicates that T is too large.
+ Sizeof(T Type) int64
+}
+
+// StdSizes is a convenience type for creating commonly used Sizes.
+// It makes the following simplifying assumptions:
+//
+// - The size of explicitly sized basic types (int16, etc.) is the
+// specified size.
+// - The size of strings and interfaces is 2*WordSize.
+// - The size of slices is 3*WordSize.
+// - The size of an array of n elements corresponds to the size of
+// a struct of n consecutive fields of the array's element type.
+// - The size of a struct is the offset of the last field plus that
+// field's size. As with all element types, if the struct is used
+// in an array its size must first be aligned to a multiple of the
+// struct's alignment.
+// - All other types have size WordSize.
+// - Arrays and structs are aligned per spec definition; all other
+// types are naturally aligned with a maximum alignment MaxAlign.
+//
+// *StdSizes implements Sizes.
+type StdSizes struct {
+ WordSize int64 // word size in bytes - must be >= 4 (32bits)
+ MaxAlign int64 // maximum alignment in bytes - must be >= 1
+}
+
+func (s *StdSizes) Alignof(T Type) (result int64) {
+ defer func() {
+ assert(result >= 1)
+ }()
+
+ // For arrays and structs, alignment is defined in terms
+ // of alignment of the elements and fields, respectively.
+ switch t := under(T).(type) {
+ case *Array:
+ // spec: "For a variable x of array type: unsafe.Alignof(x)
+ // is the same as unsafe.Alignof(x[0]), but at least 1."
+ return s.Alignof(t.elem)
+ case *Struct:
+ if len(t.fields) == 0 && IsSyncAtomicAlign64(T) {
+ // Special case: sync/atomic.align64 is an
+ // empty struct we recognize as a signal that
+ // the struct it contains must be
+ // 64-bit-aligned.
+ //
+ // This logic is equivalent to the logic in
+ // cmd/compile/internal/types/size.go:calcStructOffset
+ return 8
+ }
+
+ // spec: "For a variable x of struct type: unsafe.Alignof(x)
+ // is the largest of the values unsafe.Alignof(x.f) for each
+ // field f of x, but at least 1."
+ max := int64(1)
+ for _, f := range t.fields {
+ if a := s.Alignof(f.typ); a > max {
+ max = a
+ }
+ }
+ return max
+ case *Slice, *Interface:
+ // Multiword data structures are effectively structs
+ // in which each element has size WordSize.
+ // Type parameters lead to variable sizes/alignments;
+ // StdSizes.Alignof won't be called for them.
+ assert(!isTypeParam(T))
+ return s.WordSize
+ case *Basic:
+ // Strings are like slices and interfaces.
+ if t.Info()&IsString != 0 {
+ return s.WordSize
+ }
+ case *TypeParam, *Union:
+ unreachable()
+ }
+ a := s.Sizeof(T) // may be 0 or negative
+ // spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1."
+ if a < 1 {
+ return 1
+ }
+ // complex{64,128} are aligned like [2]float{32,64}.
+ if isComplex(T) {
+ a /= 2
+ }
+ if a > s.MaxAlign {
+ return s.MaxAlign
+ }
+ return a
+}
+
+func IsSyncAtomicAlign64(T Type) bool {
+ named := asNamed(T)
+ if named == nil {
+ return false
+ }
+ obj := named.Obj()
+ return obj.Name() == "align64" &&
+ obj.Pkg() != nil &&
+ (obj.Pkg().Path() == "sync/atomic" ||
+ obj.Pkg().Path() == "runtime/internal/atomic")
+}
+
+func (s *StdSizes) Offsetsof(fields []*Var) []int64 {
+ offsets := make([]int64, len(fields))
+ var offs int64
+ for i, f := range fields {
+ if offs < 0 {
+ // all remaining offsets are too large
+ offsets[i] = -1
+ continue
+ }
+ // offs >= 0
+ a := s.Alignof(f.typ)
+ offs = align(offs, a) // possibly < 0 if align overflows
+ offsets[i] = offs
+ if d := s.Sizeof(f.typ); d >= 0 && offs >= 0 {
+ offs += d // ok to overflow to < 0
+ } else {
+ offs = -1 // f.typ or offs is too large
+ }
+ }
+ return offsets
+}
+
+var basicSizes = [...]byte{
+ Bool: 1,
+ Int8: 1,
+ Int16: 2,
+ Int32: 4,
+ Int64: 8,
+ Uint8: 1,
+ Uint16: 2,
+ Uint32: 4,
+ Uint64: 8,
+ Float32: 4,
+ Float64: 8,
+ Complex64: 8,
+ Complex128: 16,
+}
+
+func (s *StdSizes) Sizeof(T Type) int64 {
+ switch t := under(T).(type) {
+ case *Basic:
+ assert(isTyped(T))
+ k := t.kind
+ if int(k) < len(basicSizes) {
+ if s := basicSizes[k]; s > 0 {
+ return int64(s)
+ }
+ }
+ if k == String {
+ return s.WordSize * 2
+ }
+ case *Array:
+ n := t.len
+ if n <= 0 {
+ return 0
+ }
+ // n > 0
+ esize := s.Sizeof(t.elem)
+ if esize < 0 {
+ return -1 // element too large
+ }
+ if esize == 0 {
+ return 0 // 0-size element
+ }
+ // esize > 0
+ a := s.Alignof(t.elem)
+ ea := align(esize, a) // possibly < 0 if align overflows
+ if ea < 0 {
+ return -1
+ }
+ // ea >= 1
+ n1 := n - 1 // n1 >= 0
+ // Final size is ea*n1 + esize; and size must be <= maxInt64.
+ const maxInt64 = 1<<63 - 1
+ if n1 > 0 && ea > maxInt64/n1 {
+ return -1 // ea*n1 overflows
+ }
+ return ea*n1 + esize // may still overflow to < 0 which is ok
+ case *Slice:
+ return s.WordSize * 3
+ case *Struct:
+ n := t.NumFields()
+ if n == 0 {
+ return 0
+ }
+ offsets := s.Offsetsof(t.fields)
+ offs := offsets[n-1]
+ size := s.Sizeof(t.fields[n-1].typ)
+ if offs < 0 || size < 0 {
+ return -1 // type too large
+ }
+ return offs + size // may overflow to < 0 which is ok
+ case *Interface:
+ // Type parameters lead to variable sizes/alignments;
+ // StdSizes.Sizeof won't be called for them.
+ assert(!isTypeParam(T))
+ return s.WordSize * 2
+ case *TypeParam, *Union:
+ unreachable()
+ }
+ return s.WordSize // catch-all
+}
+
+// common architecture word sizes and alignments
+var gcArchSizes = map[string]*gcSizes{
+ "386": {4, 4},
+ "amd64": {8, 8},
+ "amd64p32": {4, 8},
+ "arm": {4, 4},
+ "arm64": {8, 8},
+ "loong64": {8, 8},
+ "mips": {4, 4},
+ "mipsle": {4, 4},
+ "mips64": {8, 8},
+ "mips64le": {8, 8},
+ "ppc64": {8, 8},
+ "ppc64le": {8, 8},
+ "riscv64": {8, 8},
+ "s390x": {8, 8},
+ "sparc64": {8, 8},
+ "wasm": {8, 8},
+ // When adding more architectures here,
+ // update the doc string of SizesFor below.
+}
+
+// SizesFor returns the Sizes used by a compiler for an architecture.
+// The result is nil if a compiler/architecture pair is not known.
+//
+// Supported architectures for compiler "gc":
+// "386", "amd64", "amd64p32", "arm", "arm64", "loong64", "mips", "mipsle",
+// "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "sparc64", "wasm".
+func SizesFor(compiler, arch string) Sizes {
+ switch compiler {
+ case "gc":
+ if s := gcSizesFor(compiler, arch); s != nil {
+ return Sizes(s)
+ }
+ case "gccgo":
+ if s, ok := gccgoArchSizes[arch]; ok {
+ return Sizes(s)
+ }
+ }
+ return nil
+}
+
+// stdSizes is used if Config.Sizes == nil.
+var stdSizes = SizesFor("gc", "amd64")
+
+func (conf *Config) alignof(T Type) int64 {
+ f := stdSizes.Alignof
+ if conf.Sizes != nil {
+ f = conf.Sizes.Alignof
+ }
+ if a := f(T); a >= 1 {
+ return a
+ }
+ panic("implementation of alignof returned an alignment < 1")
+}
+
+func (conf *Config) offsetsof(T *Struct) []int64 {
+ var offsets []int64
+ if T.NumFields() > 0 {
+ // compute offsets on demand
+ f := stdSizes.Offsetsof
+ if conf.Sizes != nil {
+ f = conf.Sizes.Offsetsof
+ }
+ offsets = f(T.fields)
+ // sanity checks
+ if len(offsets) != T.NumFields() {
+ panic("implementation of offsetsof returned the wrong number of offsets")
+ }
+ }
+ return offsets
+}
+
+// offsetof returns the offset of the field specified via
+// the index sequence relative to T. All embedded fields
+// must be structs (rather than pointers to structs).
+// If the offset is too large (because T is too large),
+// the result is negative.
+func (conf *Config) offsetof(T Type, index []int) int64 {
+ var offs int64
+ for _, i := range index {
+ s := under(T).(*Struct)
+ d := conf.offsetsof(s)[i]
+ if d < 0 {
+ return -1
+ }
+ offs += d
+ if offs < 0 {
+ return -1
+ }
+ T = s.fields[i].typ
+ }
+ return offs
+}
+
+// sizeof returns the size of T.
+// If T is too large, the result is negative.
+func (conf *Config) sizeof(T Type) int64 {
+ f := stdSizes.Sizeof
+ if conf.Sizes != nil {
+ f = conf.Sizes.Sizeof
+ }
+ return f(T)
+}
+
+// align returns the smallest y >= x such that y % a == 0.
+// a must be within 1 and 8 and it must be a power of 2.
+// The result may be negative due to overflow.
+func align(x, a int64) int64 {
+ assert(x >= 0 && 1 <= a && a <= 8 && a&(a-1) == 0)
+ return (x + a - 1) &^ (a - 1)
+}
diff --git a/src/cmd/compile/internal/types2/sizes_test.go b/src/cmd/compile/internal/types2/sizes_test.go
new file mode 100644
index 0000000..9a772f4
--- /dev/null
+++ b/src/cmd/compile/internal/types2/sizes_test.go
@@ -0,0 +1,194 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for sizes.
+
+package types2_test
+
+import (
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+ "internal/testenv"
+ "testing"
+)
+
+// findStructType typechecks src and returns the first struct type encountered.
+func findStructType(t *testing.T, src string) *types2.Struct {
+ return findStructTypeConfig(t, src, &types2.Config{})
+}
+
+func findStructTypeConfig(t *testing.T, src string, conf *types2.Config) *types2.Struct {
+ types := make(map[syntax.Expr]types2.TypeAndValue)
+ mustTypecheck(src, nil, &types2.Info{Types: types})
+ for _, tv := range types {
+ if ts, ok := tv.Type.(*types2.Struct); ok {
+ return ts
+ }
+ }
+ t.Fatalf("failed to find a struct type in src:\n%s\n", src)
+ return nil
+}
+
+// go.dev/issue/16316
+func TestMultipleSizeUse(t *testing.T) {
+ const src = `
+package main
+
+type S struct {
+ i int
+ b bool
+ s string
+ n int
+}
+`
+ ts := findStructType(t, src)
+ sizes := types2.StdSizes{WordSize: 4, MaxAlign: 4}
+ if got := sizes.Sizeof(ts); got != 20 {
+ t.Errorf("Sizeof(%v) with WordSize 4 = %d want 20", ts, got)
+ }
+ sizes = types2.StdSizes{WordSize: 8, MaxAlign: 8}
+ if got := sizes.Sizeof(ts); got != 40 {
+ t.Errorf("Sizeof(%v) with WordSize 8 = %d want 40", ts, got)
+ }
+}
+
+// go.dev/issue/16464
+func TestAlignofNaclSlice(t *testing.T) {
+ const src = `
+package main
+
+var s struct {
+ x *int
+ y []byte
+}
+`
+ ts := findStructType(t, src)
+ sizes := &types2.StdSizes{WordSize: 4, MaxAlign: 8}
+ var fields []*types2.Var
+ // Make a copy manually :(
+ for i := 0; i < ts.NumFields(); i++ {
+ fields = append(fields, ts.Field(i))
+ }
+ offsets := sizes.Offsetsof(fields)
+ if offsets[0] != 0 || offsets[1] != 4 {
+ t.Errorf("OffsetsOf(%v) = %v want %v", ts, offsets, []int{0, 4})
+ }
+}
+
+func TestIssue16902(t *testing.T) {
+ const src = `
+package a
+
+import "unsafe"
+
+const _ = unsafe.Offsetof(struct{ x int64 }{}.x)
+`
+ info := types2.Info{Types: make(map[syntax.Expr]types2.TypeAndValue)}
+ conf := types2.Config{
+ Importer: defaultImporter(),
+ Sizes: &types2.StdSizes{WordSize: 8, MaxAlign: 8},
+ }
+ mustTypecheck(src, &conf, &info)
+ for _, tv := range info.Types {
+ _ = conf.Sizes.Sizeof(tv.Type)
+ _ = conf.Sizes.Alignof(tv.Type)
+ }
+}
+
+// go.dev/issue/53884.
+func TestAtomicAlign(t *testing.T) {
+ testenv.MustHaveGoBuild(t) // The Go command is needed for the importer to determine the locations of stdlib .a files.
+
+ const src = `
+package main
+
+import "sync/atomic"
+
+var s struct {
+ x int32
+ y atomic.Int64
+ z int64
+}
+`
+
+ want := []int64{0, 8, 16}
+ for _, arch := range []string{"386", "amd64"} {
+ t.Run(arch, func(t *testing.T) {
+ conf := types2.Config{
+ Importer: defaultImporter(),
+ Sizes: types2.SizesFor("gc", arch),
+ }
+ ts := findStructTypeConfig(t, src, &conf)
+ var fields []*types2.Var
+ // Make a copy manually :(
+ for i := 0; i < ts.NumFields(); i++ {
+ fields = append(fields, ts.Field(i))
+ }
+
+ offsets := conf.Sizes.Offsetsof(fields)
+ if offsets[0] != want[0] || offsets[1] != want[1] || offsets[2] != want[2] {
+ t.Errorf("OffsetsOf(%v) = %v want %v", ts, offsets, want)
+ }
+ })
+ }
+}
+
+type gcSizeTest struct {
+ name string
+ src string
+}
+
+var gcSizesTests = []gcSizeTest{
+ {
+ "issue60431",
+ `
+package main
+
+import "unsafe"
+
+// The foo struct size is expected to be rounded up to 16 bytes.
+type foo struct {
+ a int64
+ b bool
+}
+
+func main() {
+ assert(unsafe.Sizeof(foo{}) == 16)
+}`,
+ },
+ {
+ "issue60734",
+ `
+package main
+
+import (
+ "unsafe"
+)
+
+// The Data struct size is expected to be rounded up to 16 bytes.
+type Data struct {
+ Value uint32 // 4 bytes
+ Label [10]byte // 10 bytes
+ Active bool // 1 byte
+ // padded with 1 byte to make it align
+}
+
+func main() {
+ assert(unsafe.Sizeof(Data{}) == 16)
+}
+`,
+ },
+}
+
+func TestGCSizes(t *testing.T) {
+ types2.DefPredeclaredTestFuncs()
+ for _, tc := range gcSizesTests {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ conf := types2.Config{Importer: defaultImporter(), Sizes: types2.SizesFor("gc", "amd64")}
+ mustTypecheck(tc.src, &conf, nil)
+ })
+ }
+}
diff --git a/src/cmd/compile/internal/types2/slice.go b/src/cmd/compile/internal/types2/slice.go
new file mode 100644
index 0000000..9c22a6f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/slice.go
@@ -0,0 +1,19 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A Slice represents a slice type.
+type Slice struct {
+ elem Type
+}
+
+// NewSlice returns a new slice type for the given element type.
+func NewSlice(elem Type) *Slice { return &Slice{elem: elem} }
+
+// Elem returns the element type of slice s.
+func (s *Slice) Elem() Type { return s.elem }
+
+func (s *Slice) Underlying() Type { return s }
+func (s *Slice) String() string { return TypeString(s, nil) }
diff --git a/src/cmd/compile/internal/types2/stdlib_test.go b/src/cmd/compile/internal/types2/stdlib_test.go
new file mode 100644
index 0000000..405af78
--- /dev/null
+++ b/src/cmd/compile/internal/types2/stdlib_test.go
@@ -0,0 +1,488 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file tests types2.Check by using it to
+// typecheck the standard library and tests.
+
+package types2_test
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "errors"
+ "fmt"
+ "go/build"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ . "cmd/compile/internal/types2"
+)
+
+var stdLibImporter = defaultImporter()
+
+func TestStdlib(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in short mode")
+ }
+
+ testenv.MustHaveGoBuild(t)
+
+ // Collect non-test files.
+ dirFiles := make(map[string][]string)
+ root := filepath.Join(testenv.GOROOT(t), "src")
+ walkPkgDirs(root, func(dir string, filenames []string) {
+ dirFiles[dir] = filenames
+ }, t.Error)
+
+ c := &stdlibChecker{
+ dirFiles: dirFiles,
+ pkgs: make(map[string]*futurePackage),
+ }
+
+ start := time.Now()
+
+ // Though we read files while parsing, type-checking is otherwise CPU bound.
+ //
+ // This doesn't achieve great CPU utilization as many packages may block
+ // waiting for a common import, but in combination with the non-deterministic
+ // map iteration below this should provide decent coverage of concurrent
+ // type-checking (see golang/go#47729).
+ cpulimit := make(chan struct{}, runtime.GOMAXPROCS(0))
+ var wg sync.WaitGroup
+
+ for dir := range dirFiles {
+ dir := dir
+
+ cpulimit <- struct{}{}
+ wg.Add(1)
+ go func() {
+ defer func() {
+ wg.Done()
+ <-cpulimit
+ }()
+
+ _, err := c.getDirPackage(dir)
+ if err != nil {
+ t.Errorf("error checking %s: %v", dir, err)
+ }
+ }()
+ }
+
+ wg.Wait()
+
+ if testing.Verbose() {
+ fmt.Println(len(dirFiles), "packages typechecked in", time.Since(start))
+ }
+}
+
+// stdlibChecker implements concurrent type-checking of the packages defined by
+// dirFiles, which must define a closed set of packages (such as GOROOT/src).
+type stdlibChecker struct {
+ dirFiles map[string][]string // non-test files per directory; must be pre-populated
+
+ mu sync.Mutex
+ pkgs map[string]*futurePackage // future cache of type-checking results
+}
+
+// A futurePackage is a future result of type-checking.
+type futurePackage struct {
+ done chan struct{} // guards pkg and err
+ pkg *Package
+ err error
+}
+
+func (c *stdlibChecker) Import(path string) (*Package, error) {
+ panic("unimplemented: use ImportFrom")
+}
+
+func (c *stdlibChecker) ImportFrom(path, dir string, _ ImportMode) (*Package, error) {
+ if path == "unsafe" {
+ // unsafe cannot be type checked normally.
+ return Unsafe, nil
+ }
+
+ p, err := build.Default.Import(path, dir, build.FindOnly)
+ if err != nil {
+ return nil, err
+ }
+
+ pkg, err := c.getDirPackage(p.Dir)
+ if pkg != nil {
+ // As long as pkg is non-nil, avoid redundant errors related to failed
+ // imports. TestStdlib will collect errors once for each package.
+ return pkg, nil
+ }
+ return nil, err
+}
+
+// getDirPackage gets the package defined in dir from the future cache.
+//
+// If this is the first goroutine requesting the package, getDirPackage
+// type-checks.
+func (c *stdlibChecker) getDirPackage(dir string) (*Package, error) {
+ c.mu.Lock()
+ fut, ok := c.pkgs[dir]
+ if !ok {
+ // First request for this package dir; type check.
+ fut = &futurePackage{
+ done: make(chan struct{}),
+ }
+ c.pkgs[dir] = fut
+ files, ok := c.dirFiles[dir]
+ c.mu.Unlock()
+ if !ok {
+ fut.err = fmt.Errorf("no files for %s", dir)
+ } else {
+ // Using dir as the package path here may be inconsistent with the behavior
+ // of a normal importer, but is sufficient as dir is by construction unique
+ // to this package.
+ fut.pkg, fut.err = typecheckFiles(dir, files, c)
+ }
+ close(fut.done)
+ } else {
+ // Otherwise, await the result.
+ c.mu.Unlock()
+ <-fut.done
+ }
+ return fut.pkg, fut.err
+}
+
+// firstComment returns the contents of the first non-empty comment in
+// the given file, "skip", or the empty string. No matter the present
+// comments, if any of them contains a build tag, the result is always
+// "skip". Only comments within the first 4K of the file are considered.
+// TODO(gri) should only read until we see "package" token.
+func firstComment(filename string) (first string) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return ""
+ }
+ defer f.Close()
+
+ // read at most 4KB
+ var buf [4 << 10]byte
+ n, _ := f.Read(buf[:])
+ src := bytes.NewBuffer(buf[:n])
+
+ // TODO(gri) we need a better way to terminate CommentsDo
+ defer func() {
+ if p := recover(); p != nil {
+ if s, ok := p.(string); ok {
+ first = s
+ }
+ }
+ }()
+
+ syntax.CommentsDo(src, func(_, _ uint, text string) {
+ if text[0] != '/' {
+ return // not a comment
+ }
+
+ // extract comment text
+ if text[1] == '*' {
+ text = text[:len(text)-2]
+ }
+ text = strings.TrimSpace(text[2:])
+
+ if strings.HasPrefix(text, "go:build ") {
+ panic("skip")
+ }
+ if first == "" {
+ first = text // text may be "" but that's ok
+ }
+ // continue as we may still see build tags
+ })
+
+ return
+}
+
+func testTestDir(t *testing.T, path string, ignore ...string) {
+ files, err := os.ReadDir(path)
+ if err != nil {
+ // cmd/distpack deletes GOROOT/test, so skip the test if it isn't present.
+ // cmd/distpack also requires GOROOT/VERSION to exist, so use that to
+ // suppress false-positive skips.
+ if _, err := os.Stat(filepath.Join(testenv.GOROOT(t), "test")); os.IsNotExist(err) {
+ if _, err := os.Stat(filepath.Join(testenv.GOROOT(t), "VERSION")); err == nil {
+ t.Skipf("skipping: GOROOT/test not present")
+ }
+ }
+ t.Fatal(err)
+ }
+
+ excluded := make(map[string]bool)
+ for _, filename := range ignore {
+ excluded[filename] = true
+ }
+
+ for _, f := range files {
+ // filter directory contents
+ if f.IsDir() || !strings.HasSuffix(f.Name(), ".go") || excluded[f.Name()] {
+ continue
+ }
+
+ // get per-file instructions
+ expectErrors := false
+ filename := filepath.Join(path, f.Name())
+ goVersion := ""
+ if comment := firstComment(filename); comment != "" {
+ if strings.Contains(comment, "-goexperiment") {
+ continue // ignore this file
+ }
+ fields := strings.Fields(comment)
+ switch fields[0] {
+ case "skip", "compiledir":
+ continue // ignore this file
+ case "errorcheck":
+ expectErrors = true
+ for _, arg := range fields[1:] {
+ if arg == "-0" || arg == "-+" || arg == "-std" {
+ // Marked explicitly as not expecting errors (-0),
+ // or marked as compiling runtime/stdlib, which is only done
+ // to trigger runtime/stdlib-only error output.
+ // In both cases, the code should typecheck.
+ expectErrors = false
+ break
+ }
+ const prefix = "-lang="
+ if strings.HasPrefix(arg, prefix) {
+ goVersion = arg[len(prefix):]
+ }
+ }
+ }
+ }
+
+ // parse and type-check file
+ if testing.Verbose() {
+ fmt.Println("\t", filename)
+ }
+ file, err := syntax.ParseFile(filename, nil, nil, 0)
+ if err == nil {
+ conf := Config{
+ GoVersion: goVersion,
+ Importer: stdLibImporter,
+ }
+ _, err = conf.Check(filename, []*syntax.File{file}, nil)
+ }
+
+ if expectErrors {
+ if err == nil {
+ t.Errorf("expected errors but found none in %s", filename)
+ }
+ } else {
+ if err != nil {
+ t.Error(err)
+ }
+ }
+ }
+}
+
+func TestStdTest(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ if testing.Short() && testenv.Builder() == "" {
+ t.Skip("skipping in short mode")
+ }
+
+ testTestDir(t, filepath.Join(testenv.GOROOT(t), "test"),
+ "cmplxdivide.go", // also needs file cmplxdivide1.go - ignore
+ "directive.go", // tests compiler rejection of bad directive placement - ignore
+ "directive2.go", // tests compiler rejection of bad directive placement - ignore
+ "embedfunc.go", // tests //go:embed
+ "embedvers.go", // tests //go:embed
+ "linkname2.go", // types2 doesn't check validity of //go:xxx directives
+ "linkname3.go", // types2 doesn't check validity of //go:xxx directives
+ )
+}
+
+func TestStdFixed(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ if testing.Short() && testenv.Builder() == "" {
+ t.Skip("skipping in short mode")
+ }
+
+ testTestDir(t, filepath.Join(testenv.GOROOT(t), "test", "fixedbugs"),
+ "bug248.go", "bug302.go", "bug369.go", // complex test instructions - ignore
+ "bug398.go", // types2 doesn't check for anonymous interface cycles (go.dev/issue/56103)
+ "issue6889.go", // gc-specific test
+ "issue11362.go", // canonical import path check
+ "issue16369.go", // types2 handles this correctly - not an issue
+ "issue18459.go", // types2 doesn't check validity of //go:xxx directives
+ "issue18882.go", // types2 doesn't check validity of //go:xxx directives
+ "issue20529.go", // types2 does not have constraints on stack size
+ "issue22200.go", // types2 does not have constraints on stack size
+ "issue22200b.go", // types2 does not have constraints on stack size
+ "issue25507.go", // types2 does not have constraints on stack size
+ "issue20780.go", // types2 does not have constraints on stack size
+ "issue42058a.go", // types2 does not have constraints on channel element size
+ "issue42058b.go", // types2 does not have constraints on channel element size
+ "issue48097.go", // go/types doesn't check validity of //go:xxx directives, and non-init bodyless function
+ "issue48230.go", // go/types doesn't check validity of //go:xxx directives
+ "issue49767.go", // go/types does not have constraints on channel element size
+ "issue49814.go", // go/types does not have constraints on array size
+ "issue56103.go", // anonymous interface cycles; will be a type checker error in 1.22
+ "issue52697.go", // types2 does not have constraints on stack size
+
+ // These tests requires runtime/cgo.Incomplete, which is only available on some platforms.
+ // However, types2 does not know about build constraints.
+ "bug514.go",
+ "issue40954.go",
+ "issue42032.go",
+ "issue42076.go",
+ "issue46903.go",
+ "issue51733.go",
+ "notinheap2.go",
+ "notinheap3.go",
+ )
+}
+
+func TestStdKen(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ testTestDir(t, filepath.Join(testenv.GOROOT(t), "test", "ken"))
+}
+
+// Package paths of excluded packages.
+var excluded = map[string]bool{
+ "builtin": true,
+
+ // go.dev/issue/46027: some imports are missing for this submodule.
+ "crypto/internal/edwards25519/field/_asm": true,
+ "crypto/internal/bigmod/_asm": true,
+}
+
+// printPackageMu synchronizes the printing of type-checked package files in
+// the typecheckFiles function.
+//
+// Without synchronization, package files may be interleaved during concurrent
+// type-checking.
+var printPackageMu sync.Mutex
+
+// typecheckFiles typechecks the given package files.
+func typecheckFiles(path string, filenames []string, importer Importer) (*Package, error) {
+ // Parse package files.
+ var files []*syntax.File
+ for _, filename := range filenames {
+ var errs []error
+ errh := func(err error) { errs = append(errs, err) }
+ file, err := syntax.ParseFile(filename, errh, nil, 0)
+ if err != nil {
+ return nil, errors.Join(errs...)
+ }
+
+ files = append(files, file)
+ }
+
+ if testing.Verbose() {
+ printPackageMu.Lock()
+ fmt.Println("package", files[0].PkgName.Value)
+ for _, filename := range filenames {
+ fmt.Println("\t", filename)
+ }
+ printPackageMu.Unlock()
+ }
+
+ // Typecheck package files.
+ var errs []error
+ conf := Config{
+ Error: func(err error) {
+ errs = append(errs, err)
+ },
+ Importer: importer,
+ }
+ info := Info{Uses: make(map[*syntax.Name]Object)}
+ pkg, _ := conf.Check(path, files, &info)
+ err := errors.Join(errs...)
+ if err != nil {
+ return pkg, err
+ }
+
+ // Perform checks of API invariants.
+
+ // All Objects have a package, except predeclared ones.
+ errorError := Universe.Lookup("error").Type().Underlying().(*Interface).ExplicitMethod(0) // (error).Error
+ for id, obj := range info.Uses {
+ predeclared := obj == Universe.Lookup(obj.Name()) || obj == errorError
+ if predeclared == (obj.Pkg() != nil) {
+ posn := id.Pos()
+ if predeclared {
+ return nil, fmt.Errorf("%s: predeclared object with package: %s", posn, obj)
+ } else {
+ return nil, fmt.Errorf("%s: user-defined object without package: %s", posn, obj)
+ }
+ }
+ }
+
+ return pkg, nil
+}
+
+// pkgFilenames returns the list of package filenames for the given directory.
+func pkgFilenames(dir string, includeTest bool) ([]string, error) {
+ ctxt := build.Default
+ ctxt.CgoEnabled = false
+ pkg, err := ctxt.ImportDir(dir, 0)
+ if err != nil {
+ if _, nogo := err.(*build.NoGoError); nogo {
+ return nil, nil // no *.go files, not an error
+ }
+ return nil, err
+ }
+ if excluded[pkg.ImportPath] {
+ return nil, nil
+ }
+ var filenames []string
+ for _, name := range pkg.GoFiles {
+ filenames = append(filenames, filepath.Join(pkg.Dir, name))
+ }
+ if includeTest {
+ for _, name := range pkg.TestGoFiles {
+ filenames = append(filenames, filepath.Join(pkg.Dir, name))
+ }
+ }
+ return filenames, nil
+}
+
+func walkPkgDirs(dir string, pkgh func(dir string, filenames []string), errh func(args ...interface{})) {
+ w := walker{pkgh, errh}
+ w.walk(dir)
+}
+
+type walker struct {
+ pkgh func(dir string, filenames []string)
+ errh func(args ...any)
+}
+
+func (w *walker) walk(dir string) {
+ files, err := os.ReadDir(dir)
+ if err != nil {
+ w.errh(err)
+ return
+ }
+
+ // apply pkgh to the files in directory dir
+
+ // Don't get test files as these packages are imported.
+ pkgFiles, err := pkgFilenames(dir, false)
+ if err != nil {
+ w.errh(err)
+ return
+ }
+ if pkgFiles != nil {
+ w.pkgh(dir, pkgFiles)
+ }
+
+ // traverse subdirectories, but don't walk into testdata
+ for _, f := range files {
+ if f.IsDir() && f.Name() != "testdata" {
+ w.walk(filepath.Join(dir, f.Name()))
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/stmt.go b/src/cmd/compile/internal/types2/stmt.go
new file mode 100644
index 0000000..c9713da
--- /dev/null
+++ b/src/cmd/compile/internal/types2/stmt.go
@@ -0,0 +1,1059 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of statements.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "go/constant"
+ "internal/buildcfg"
+ . "internal/types/errors"
+ "sort"
+)
+
+func (check *Checker) funcBody(decl *declInfo, name string, sig *Signature, body *syntax.BlockStmt, iota constant.Value) {
+ if check.conf.IgnoreFuncBodies {
+ panic("function body not ignored")
+ }
+
+ if check.conf.Trace {
+ check.trace(body.Pos(), "-- %s: %s", name, sig)
+ }
+
+ // save/restore current environment and set up function environment
+ // (and use 0 indentation at function start)
+ defer func(env environment, indent int) {
+ check.environment = env
+ check.indent = indent
+ }(check.environment, check.indent)
+ check.environment = environment{
+ decl: decl,
+ scope: sig.scope,
+ iota: iota,
+ sig: sig,
+ }
+ check.indent = 0
+
+ check.stmtList(0, body.List)
+
+ if check.hasLabel && !check.conf.IgnoreBranchErrors {
+ check.labels(body)
+ }
+
+ if sig.results.Len() > 0 && !check.isTerminating(body, "") {
+ check.error(body.Rbrace, MissingReturn, "missing return")
+ }
+
+ // spec: "Implementation restriction: A compiler may make it illegal to
+ // declare a variable inside a function body if the variable is never used."
+ check.usage(sig.scope)
+}
+
+func (check *Checker) usage(scope *Scope) {
+ var unused []*Var
+ for name, elem := range scope.elems {
+ elem = resolve(name, elem)
+ if v, _ := elem.(*Var); v != nil && !v.used {
+ unused = append(unused, v)
+ }
+ }
+ sort.Slice(unused, func(i, j int) bool {
+ return cmpPos(unused[i].pos, unused[j].pos) < 0
+ })
+ for _, v := range unused {
+ check.softErrorf(v.pos, UnusedVar, "%s declared and not used", v.name)
+ }
+
+ for _, scope := range scope.children {
+ // Don't go inside function literal scopes a second time;
+ // they are handled explicitly by funcBody.
+ if !scope.isFunc {
+ check.usage(scope)
+ }
+ }
+}
+
+// stmtContext is a bitset describing which
+// control-flow statements are permissible,
+// and provides additional context information
+// for better error messages.
+type stmtContext uint
+
+const (
+ // permissible control-flow statements
+ breakOk stmtContext = 1 << iota
+ continueOk
+ fallthroughOk
+
+ // additional context information
+ finalSwitchCase
+ inTypeSwitch
+)
+
+func (check *Checker) simpleStmt(s syntax.Stmt) {
+ if s != nil {
+ check.stmt(0, s)
+ }
+}
+
+func trimTrailingEmptyStmts(list []syntax.Stmt) []syntax.Stmt {
+ for i := len(list); i > 0; i-- {
+ if _, ok := list[i-1].(*syntax.EmptyStmt); !ok {
+ return list[:i]
+ }
+ }
+ return nil
+}
+
+func (check *Checker) stmtList(ctxt stmtContext, list []syntax.Stmt) {
+ ok := ctxt&fallthroughOk != 0
+ inner := ctxt &^ fallthroughOk
+ list = trimTrailingEmptyStmts(list) // trailing empty statements are "invisible" to fallthrough analysis
+ for i, s := range list {
+ inner := inner
+ if ok && i+1 == len(list) {
+ inner |= fallthroughOk
+ }
+ check.stmt(inner, s)
+ }
+}
+
+func (check *Checker) multipleSwitchDefaults(list []*syntax.CaseClause) {
+ var first *syntax.CaseClause
+ for _, c := range list {
+ if c.Cases == nil {
+ if first != nil {
+ check.errorf(c, DuplicateDefault, "multiple defaults (first at %s)", first.Pos())
+ // TODO(gri) probably ok to bail out after first error (and simplify this code)
+ } else {
+ first = c
+ }
+ }
+ }
+}
+
+func (check *Checker) multipleSelectDefaults(list []*syntax.CommClause) {
+ var first *syntax.CommClause
+ for _, c := range list {
+ if c.Comm == nil {
+ if first != nil {
+ check.errorf(c, DuplicateDefault, "multiple defaults (first at %s)", first.Pos())
+ // TODO(gri) probably ok to bail out after first error (and simplify this code)
+ } else {
+ first = c
+ }
+ }
+ }
+}
+
+func (check *Checker) openScope(node syntax.Node, comment string) {
+ check.openScopeUntil(node, syntax.EndPos(node), comment)
+}
+
+func (check *Checker) openScopeUntil(node syntax.Node, end syntax.Pos, comment string) {
+ scope := NewScope(check.scope, node.Pos(), end, comment)
+ check.recordScope(node, scope)
+ check.scope = scope
+}
+
+func (check *Checker) closeScope() {
+ check.scope = check.scope.Parent()
+}
+
+func (check *Checker) suspendedCall(keyword string, call syntax.Expr) {
+ code := InvalidDefer
+ if keyword == "go" {
+ code = InvalidGo
+ }
+
+ if _, ok := call.(*syntax.CallExpr); !ok {
+ check.errorf(call, code, "expression in %s must be function call", keyword)
+ check.use(call)
+ return
+ }
+
+ var x operand
+ var msg string
+ switch check.rawExpr(nil, &x, call, nil, false) {
+ case conversion:
+ msg = "requires function call, not conversion"
+ case expression:
+ msg = "discards result of"
+ code = UnusedResults
+ case statement:
+ return
+ default:
+ unreachable()
+ }
+ check.errorf(&x, code, "%s %s %s", keyword, msg, &x)
+}
+
+// goVal returns the Go value for val, or nil.
+func goVal(val constant.Value) interface{} {
+ // val should exist, but be conservative and check
+ if val == nil {
+ return nil
+ }
+ // Match implementation restriction of other compilers.
+ // gc only checks duplicates for integer, floating-point
+ // and string values, so only create Go values for these
+ // types.
+ switch val.Kind() {
+ case constant.Int:
+ if x, ok := constant.Int64Val(val); ok {
+ return x
+ }
+ if x, ok := constant.Uint64Val(val); ok {
+ return x
+ }
+ case constant.Float:
+ if x, ok := constant.Float64Val(val); ok {
+ return x
+ }
+ case constant.String:
+ return constant.StringVal(val)
+ }
+ return nil
+}
+
+// A valueMap maps a case value (of a basic Go type) to a list of positions
+// where the same case value appeared, together with the corresponding case
+// types.
+// Since two case values may have the same "underlying" value but different
+// types we need to also check the value's types (e.g., byte(1) vs myByte(1))
+// when the switch expression is of interface type.
+type (
+ valueMap map[interface{}][]valueType // underlying Go value -> valueType
+ valueType struct {
+ pos syntax.Pos
+ typ Type
+ }
+)
+
+func (check *Checker) caseValues(x *operand, values []syntax.Expr, seen valueMap) {
+L:
+ for _, e := range values {
+ var v operand
+ check.expr(nil, &v, e)
+ if x.mode == invalid || v.mode == invalid {
+ continue L
+ }
+ check.convertUntyped(&v, x.typ)
+ if v.mode == invalid {
+ continue L
+ }
+ // Order matters: By comparing v against x, error positions are at the case values.
+ res := v // keep original v unchanged
+ check.comparison(&res, x, syntax.Eql, true)
+ if res.mode == invalid {
+ continue L
+ }
+ if v.mode != constant_ {
+ continue L // we're done
+ }
+ // look for duplicate values
+ if val := goVal(v.val); val != nil {
+ // look for duplicate types for a given value
+ // (quadratic algorithm, but these lists tend to be very short)
+ for _, vt := range seen[val] {
+ if Identical(v.typ, vt.typ) {
+ var err error_
+ err.code = DuplicateCase
+ err.errorf(&v, "duplicate case %s in expression switch", &v)
+ err.errorf(vt.pos, "previous case")
+ check.report(&err)
+ continue L
+ }
+ }
+ seen[val] = append(seen[val], valueType{v.Pos(), v.typ})
+ }
+ }
+}
+
+// isNil reports whether the expression e denotes the predeclared value nil.
+func (check *Checker) isNil(e syntax.Expr) bool {
+ // The only way to express the nil value is by literally writing nil (possibly in parentheses).
+ if name, _ := syntax.Unparen(e).(*syntax.Name); name != nil {
+ _, ok := check.lookup(name.Value).(*Nil)
+ return ok
+ }
+ return false
+}
+
+// If the type switch expression is invalid, x is nil.
+func (check *Checker) caseTypes(x *operand, types []syntax.Expr, seen map[Type]syntax.Expr) (T Type) {
+ var dummy operand
+L:
+ for _, e := range types {
+ // The spec allows the value nil instead of a type.
+ if check.isNil(e) {
+ T = nil
+ check.expr(nil, &dummy, e) // run e through expr so we get the usual Info recordings
+ } else {
+ T = check.varType(e)
+ if !isValid(T) {
+ continue L
+ }
+ }
+ // look for duplicate types
+ // (quadratic algorithm, but type switches tend to be reasonably small)
+ for t, other := range seen {
+ if T == nil && t == nil || T != nil && t != nil && Identical(T, t) {
+ // talk about "case" rather than "type" because of nil case
+ Ts := "nil"
+ if T != nil {
+ Ts = TypeString(T, check.qualifier)
+ }
+ var err error_
+ err.code = DuplicateCase
+ err.errorf(e, "duplicate case %s in type switch", Ts)
+ err.errorf(other, "previous case")
+ check.report(&err)
+ continue L
+ }
+ }
+ seen[T] = e
+ if x != nil && T != nil {
+ check.typeAssertion(e, x, T, true)
+ }
+ }
+ return
+}
+
+// TODO(gri) Once we are certain that typeHash is correct in all situations, use this version of caseTypes instead.
+// (Currently it may be possible that different types have identical names and import paths due to ImporterFrom.)
+//
+// func (check *Checker) caseTypes(x *operand, xtyp *Interface, types []syntax.Expr, seen map[string]syntax.Expr) (T Type) {
+// var dummy operand
+// L:
+// for _, e := range types {
+// // The spec allows the value nil instead of a type.
+// var hash string
+// if check.isNil(e) {
+// check.expr(nil, &dummy, e) // run e through expr so we get the usual Info recordings
+// T = nil
+// hash = "<nil>" // avoid collision with a type named nil
+// } else {
+// T = check.varType(e)
+// if !isValid(T) {
+// continue L
+// }
+// hash = typeHash(T, nil)
+// }
+// // look for duplicate types
+// if other := seen[hash]; other != nil {
+// // talk about "case" rather than "type" because of nil case
+// Ts := "nil"
+// if T != nil {
+// Ts = TypeString(T, check.qualifier)
+// }
+// var err error_
+// err.code = _DuplicateCase
+// err.errorf(e, "duplicate case %s in type switch", Ts)
+// err.errorf(other, "previous case")
+// check.report(&err)
+// continue L
+// }
+// seen[hash] = e
+// if T != nil {
+// check.typeAssertion(e, x, xtyp, T, true)
+// }
+// }
+// return
+// }
+
+// stmt typechecks statement s.
+func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) {
+ // statements must end with the same top scope as they started with
+ if debug {
+ defer func(scope *Scope) {
+ // don't check if code is panicking
+ if p := recover(); p != nil {
+ panic(p)
+ }
+ assert(scope == check.scope)
+ }(check.scope)
+ }
+
+ // process collected function literals before scope changes
+ defer check.processDelayed(len(check.delayed))
+
+ // reset context for statements of inner blocks
+ inner := ctxt &^ (fallthroughOk | finalSwitchCase | inTypeSwitch)
+
+ switch s := s.(type) {
+ case *syntax.EmptyStmt:
+ // ignore
+
+ case *syntax.DeclStmt:
+ check.declStmt(s.DeclList)
+
+ case *syntax.LabeledStmt:
+ check.hasLabel = true
+ check.stmt(ctxt, s.Stmt)
+
+ case *syntax.ExprStmt:
+ // spec: "With the exception of specific built-in functions,
+ // function and method calls and receive operations can appear
+ // in statement context. Such statements may be parenthesized."
+ var x operand
+ kind := check.rawExpr(nil, &x, s.X, nil, false)
+ var msg string
+ var code Code
+ switch x.mode {
+ default:
+ if kind == statement {
+ return
+ }
+ msg = "is not used"
+ code = UnusedExpr
+ case builtin:
+ msg = "must be called"
+ code = UncalledBuiltin
+ case typexpr:
+ msg = "is not an expression"
+ code = NotAnExpr
+ }
+ check.errorf(&x, code, "%s %s", &x, msg)
+
+ case *syntax.SendStmt:
+ var ch, val operand
+ check.expr(nil, &ch, s.Chan)
+ check.expr(nil, &val, s.Value)
+ if ch.mode == invalid || val.mode == invalid {
+ return
+ }
+ u := coreType(ch.typ)
+ if u == nil {
+ check.errorf(s, InvalidSend, invalidOp+"cannot send to %s: no core type", &ch)
+ return
+ }
+ uch, _ := u.(*Chan)
+ if uch == nil {
+ check.errorf(s, InvalidSend, invalidOp+"cannot send to non-channel %s", &ch)
+ return
+ }
+ if uch.dir == RecvOnly {
+ check.errorf(s, InvalidSend, invalidOp+"cannot send to receive-only channel %s", &ch)
+ return
+ }
+ check.assignment(&val, uch.elem, "send")
+
+ case *syntax.AssignStmt:
+ if s.Rhs == nil {
+ // x++ or x--
+ // (no need to call unpackExpr as s.Lhs must be single-valued)
+ var x operand
+ check.expr(nil, &x, s.Lhs)
+ if x.mode == invalid {
+ return
+ }
+ if !allNumeric(x.typ) {
+ check.errorf(s.Lhs, NonNumericIncDec, invalidOp+"%s%s%s (non-numeric type %s)", s.Lhs, s.Op, s.Op, x.typ)
+ return
+ }
+ check.assignVar(s.Lhs, nil, &x, "assignment")
+ return
+ }
+
+ lhs := syntax.UnpackListExpr(s.Lhs)
+ rhs := syntax.UnpackListExpr(s.Rhs)
+ switch s.Op {
+ case 0:
+ check.assignVars(lhs, rhs)
+ return
+ case syntax.Def:
+ check.shortVarDecl(s.Pos(), lhs, rhs)
+ return
+ }
+
+ // assignment operations
+ if len(lhs) != 1 || len(rhs) != 1 {
+ check.errorf(s, MultiValAssignOp, "assignment operation %s requires single-valued expressions", s.Op)
+ return
+ }
+
+ var x operand
+ check.binary(&x, nil, lhs[0], rhs[0], s.Op)
+ check.assignVar(lhs[0], nil, &x, "assignment")
+
+ case *syntax.CallStmt:
+ kind := "go"
+ if s.Tok == syntax.Defer {
+ kind = "defer"
+ }
+ check.suspendedCall(kind, s.Call)
+
+ case *syntax.ReturnStmt:
+ res := check.sig.results
+ // Return with implicit results allowed for function with named results.
+ // (If one is named, all are named.)
+ results := syntax.UnpackListExpr(s.Results)
+ if len(results) == 0 && res.Len() > 0 && res.vars[0].name != "" {
+ // spec: "Implementation restriction: A compiler may disallow an empty expression
+ // list in a "return" statement if a different entity (constant, type, or variable)
+ // with the same name as a result parameter is in scope at the place of the return."
+ for _, obj := range res.vars {
+ if alt := check.lookup(obj.name); alt != nil && alt != obj {
+ var err error_
+ err.code = OutOfScopeResult
+ err.errorf(s, "result parameter %s not in scope at return", obj.name)
+ err.errorf(alt, "inner declaration of %s", obj)
+ check.report(&err)
+ // ok to continue
+ }
+ }
+ } else {
+ var lhs []*Var
+ if res.Len() > 0 {
+ lhs = res.vars
+ }
+ check.initVars(lhs, results, s)
+ }
+
+ case *syntax.BranchStmt:
+ if s.Label != nil {
+ check.hasLabel = true
+ break // checked in 2nd pass (check.labels)
+ }
+ if check.conf.IgnoreBranchErrors {
+ break
+ }
+ switch s.Tok {
+ case syntax.Break:
+ if ctxt&breakOk == 0 {
+ check.error(s, MisplacedBreak, "break not in for, switch, or select statement")
+ }
+ case syntax.Continue:
+ if ctxt&continueOk == 0 {
+ check.error(s, MisplacedContinue, "continue not in for statement")
+ }
+ case syntax.Fallthrough:
+ if ctxt&fallthroughOk == 0 {
+ var msg string
+ switch {
+ case ctxt&finalSwitchCase != 0:
+ msg = "cannot fallthrough final case in switch"
+ case ctxt&inTypeSwitch != 0:
+ msg = "cannot fallthrough in type switch"
+ default:
+ msg = "fallthrough statement out of place"
+ }
+ check.error(s, MisplacedFallthrough, msg)
+ }
+ case syntax.Goto:
+ // goto's must have labels, should have been caught above
+ fallthrough
+ default:
+ check.errorf(s, InvalidSyntaxTree, "branch statement: %s", s.Tok)
+ }
+
+ case *syntax.BlockStmt:
+ check.openScope(s, "block")
+ defer check.closeScope()
+
+ check.stmtList(inner, s.List)
+
+ case *syntax.IfStmt:
+ check.openScope(s, "if")
+ defer check.closeScope()
+
+ check.simpleStmt(s.Init)
+ var x operand
+ check.expr(nil, &x, s.Cond)
+ if x.mode != invalid && !allBoolean(x.typ) {
+ check.error(s.Cond, InvalidCond, "non-boolean condition in if statement")
+ }
+ check.stmt(inner, s.Then)
+ // The parser produces a correct AST but if it was modified
+ // elsewhere the else branch may be invalid. Check again.
+ switch s.Else.(type) {
+ case nil:
+ // valid or error already reported
+ case *syntax.IfStmt, *syntax.BlockStmt:
+ check.stmt(inner, s.Else)
+ default:
+ check.error(s.Else, InvalidSyntaxTree, "invalid else branch in if statement")
+ }
+
+ case *syntax.SwitchStmt:
+ inner |= breakOk
+ check.openScope(s, "switch")
+ defer check.closeScope()
+
+ check.simpleStmt(s.Init)
+
+ if g, _ := s.Tag.(*syntax.TypeSwitchGuard); g != nil {
+ check.typeSwitchStmt(inner|inTypeSwitch, s, g)
+ } else {
+ check.switchStmt(inner, s)
+ }
+
+ case *syntax.SelectStmt:
+ inner |= breakOk
+
+ check.multipleSelectDefaults(s.Body)
+
+ for i, clause := range s.Body {
+ if clause == nil {
+ continue // error reported before
+ }
+
+ // clause.Comm must be a SendStmt, RecvStmt, or default case
+ valid := false
+ var rhs syntax.Expr // rhs of RecvStmt, or nil
+ switch s := clause.Comm.(type) {
+ case nil, *syntax.SendStmt:
+ valid = true
+ case *syntax.AssignStmt:
+ if _, ok := s.Rhs.(*syntax.ListExpr); !ok {
+ rhs = s.Rhs
+ }
+ case *syntax.ExprStmt:
+ rhs = s.X
+ }
+
+ // if present, rhs must be a receive operation
+ if rhs != nil {
+ if x, _ := syntax.Unparen(rhs).(*syntax.Operation); x != nil && x.Y == nil && x.Op == syntax.Recv {
+ valid = true
+ }
+ }
+
+ if !valid {
+ check.error(clause.Comm, InvalidSelectCase, "select case must be send or receive (possibly with assignment)")
+ continue
+ }
+ end := s.Rbrace
+ if i+1 < len(s.Body) {
+ end = s.Body[i+1].Pos()
+ }
+ check.openScopeUntil(clause, end, "case")
+ if clause.Comm != nil {
+ check.stmt(inner, clause.Comm)
+ }
+ check.stmtList(inner, clause.Body)
+ check.closeScope()
+ }
+
+ case *syntax.ForStmt:
+ inner |= breakOk | continueOk
+
+ if rclause, _ := s.Init.(*syntax.RangeClause); rclause != nil {
+ check.rangeStmt(inner, s, rclause)
+ break
+ }
+
+ check.openScope(s, "for")
+ defer check.closeScope()
+
+ check.simpleStmt(s.Init)
+ if s.Cond != nil {
+ var x operand
+ check.expr(nil, &x, s.Cond)
+ if x.mode != invalid && !allBoolean(x.typ) {
+ check.error(s.Cond, InvalidCond, "non-boolean condition in for statement")
+ }
+ }
+ check.simpleStmt(s.Post)
+ // spec: "The init statement may be a short variable
+ // declaration, but the post statement must not."
+ if s, _ := s.Post.(*syntax.AssignStmt); s != nil && s.Op == syntax.Def {
+ // The parser already reported an error.
+ check.use(s.Lhs) // avoid follow-up errors
+ }
+ check.stmt(inner, s.Body)
+
+ default:
+ check.error(s, InvalidSyntaxTree, "invalid statement")
+ }
+}
+
+func (check *Checker) switchStmt(inner stmtContext, s *syntax.SwitchStmt) {
+ // init statement already handled
+
+ var x operand
+ if s.Tag != nil {
+ check.expr(nil, &x, s.Tag)
+ // By checking assignment of x to an invisible temporary
+ // (as a compiler would), we get all the relevant checks.
+ check.assignment(&x, nil, "switch expression")
+ if x.mode != invalid && !Comparable(x.typ) && !hasNil(x.typ) {
+ check.errorf(&x, InvalidExprSwitch, "cannot switch on %s (%s is not comparable)", &x, x.typ)
+ x.mode = invalid
+ }
+ } else {
+ // spec: "A missing switch expression is
+ // equivalent to the boolean value true."
+ x.mode = constant_
+ x.typ = Typ[Bool]
+ x.val = constant.MakeBool(true)
+ // TODO(gri) should have a better position here
+ pos := s.Rbrace
+ if len(s.Body) > 0 {
+ pos = s.Body[0].Pos()
+ }
+ x.expr = syntax.NewName(pos, "true")
+ }
+
+ check.multipleSwitchDefaults(s.Body)
+
+ seen := make(valueMap) // map of seen case values to positions and types
+ for i, clause := range s.Body {
+ if clause == nil {
+ check.error(clause, InvalidSyntaxTree, "incorrect expression switch case")
+ continue
+ }
+ end := s.Rbrace
+ inner := inner
+ if i+1 < len(s.Body) {
+ end = s.Body[i+1].Pos()
+ inner |= fallthroughOk
+ } else {
+ inner |= finalSwitchCase
+ }
+ check.caseValues(&x, syntax.UnpackListExpr(clause.Cases), seen)
+ check.openScopeUntil(clause, end, "case")
+ check.stmtList(inner, clause.Body)
+ check.closeScope()
+ }
+}
+
+func (check *Checker) typeSwitchStmt(inner stmtContext, s *syntax.SwitchStmt, guard *syntax.TypeSwitchGuard) {
+ // init statement already handled
+
+ // A type switch guard must be of the form:
+ //
+ // TypeSwitchGuard = [ identifier ":=" ] PrimaryExpr "." "(" "type" ")" .
+ // \__lhs__/ \___rhs___/
+
+ // check lhs, if any
+ lhs := guard.Lhs
+ if lhs != nil {
+ if lhs.Value == "_" {
+ // _ := x.(type) is an invalid short variable declaration
+ check.softErrorf(lhs, NoNewVar, "no new variable on left side of :=")
+ lhs = nil // avoid declared and not used error below
+ } else {
+ check.recordDef(lhs, nil) // lhs variable is implicitly declared in each cause clause
+ }
+ }
+
+ // check rhs
+ var x operand
+ check.expr(nil, &x, guard.X)
+ if x.mode == invalid {
+ return
+ }
+
+ // TODO(gri) we may want to permit type switches on type parameter values at some point
+ var sx *operand // switch expression against which cases are compared against; nil if invalid
+ if isTypeParam(x.typ) {
+ check.errorf(&x, InvalidTypeSwitch, "cannot use type switch on type parameter value %s", &x)
+ } else {
+ if _, ok := under(x.typ).(*Interface); ok {
+ sx = &x
+ } else {
+ check.errorf(&x, InvalidTypeSwitch, "%s is not an interface", &x)
+ }
+ }
+
+ check.multipleSwitchDefaults(s.Body)
+
+ var lhsVars []*Var // list of implicitly declared lhs variables
+ seen := make(map[Type]syntax.Expr) // map of seen types to positions
+ for i, clause := range s.Body {
+ if clause == nil {
+ check.error(s, InvalidSyntaxTree, "incorrect type switch case")
+ continue
+ }
+ end := s.Rbrace
+ if i+1 < len(s.Body) {
+ end = s.Body[i+1].Pos()
+ }
+ // Check each type in this type switch case.
+ cases := syntax.UnpackListExpr(clause.Cases)
+ T := check.caseTypes(sx, cases, seen)
+ check.openScopeUntil(clause, end, "case")
+ // If lhs exists, declare a corresponding variable in the case-local scope.
+ if lhs != nil {
+ // spec: "The TypeSwitchGuard may include a short variable declaration.
+ // When that form is used, the variable is declared at the beginning of
+ // the implicit block in each clause. In clauses with a case listing
+ // exactly one type, the variable has that type; otherwise, the variable
+ // has the type of the expression in the TypeSwitchGuard."
+ if len(cases) != 1 || T == nil {
+ T = x.typ
+ }
+ obj := NewVar(lhs.Pos(), check.pkg, lhs.Value, T)
+ // TODO(mdempsky): Just use clause.Colon? Why did I even suggest
+ // "at the end of the TypeSwitchCase" in go.dev/issue/16794 instead?
+ scopePos := clause.Pos() // for default clause (len(List) == 0)
+ if n := len(cases); n > 0 {
+ scopePos = syntax.EndPos(cases[n-1])
+ }
+ check.declare(check.scope, nil, obj, scopePos)
+ check.recordImplicit(clause, obj)
+ // For the "declared and not used" error, all lhs variables act as
+ // one; i.e., if any one of them is 'used', all of them are 'used'.
+ // Collect them for later analysis.
+ lhsVars = append(lhsVars, obj)
+ }
+ check.stmtList(inner, clause.Body)
+ check.closeScope()
+ }
+
+ // If lhs exists, we must have at least one lhs variable that was used.
+ // (We can't use check.usage because that only looks at one scope; and
+ // we don't want to use the same variable for all scopes and change the
+ // variable type underfoot.)
+ if lhs != nil {
+ var used bool
+ for _, v := range lhsVars {
+ if v.used {
+ used = true
+ }
+ v.used = true // avoid usage error when checking entire function
+ }
+ if !used {
+ check.softErrorf(lhs, UnusedVar, "%s declared and not used", lhs.Value)
+ }
+ }
+}
+
+func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *syntax.RangeClause) {
+ // Convert syntax form to local variables.
+ type Expr = syntax.Expr
+ type identType = syntax.Name
+ identName := func(n *identType) string { return n.Value }
+ sKey := rclause.Lhs // possibly nil
+ var sValue, sExtra syntax.Expr
+ if p, _ := sKey.(*syntax.ListExpr); p != nil {
+ if len(p.ElemList) < 2 {
+ check.error(s, InvalidSyntaxTree, "invalid lhs in range clause")
+ return
+ }
+ // len(p.ElemList) >= 2
+ sKey = p.ElemList[0]
+ sValue = p.ElemList[1]
+ if len(p.ElemList) > 2 {
+ // delay error reporting until we know more
+ sExtra = p.ElemList[2]
+ }
+ }
+ isDef := rclause.Def
+ rangeVar := rclause.X
+ noNewVarPos := s
+
+ // Do not use rclause anymore.
+ rclause = nil
+
+ // Everything from here on is shared between cmd/compile/internal/types2 and go/types.
+
+ // check expression to iterate over
+ var x operand
+ check.expr(nil, &x, rangeVar)
+
+ // determine key/value types
+ var key, val Type
+ if x.mode != invalid {
+ // Ranging over a type parameter is permitted if it has a core type.
+ k, v, cause, isFunc, ok := rangeKeyVal(x.typ, func(v goVersion) bool {
+ return check.allowVersion(check.pkg, x.expr, v)
+ })
+ switch {
+ case !ok && cause != "":
+ check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s: %s", &x, cause)
+ case !ok:
+ check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s", &x)
+ case k == nil && sKey != nil:
+ check.softErrorf(sKey, InvalidIterVar, "range over %s permits no iteration variables", &x)
+ case v == nil && sValue != nil:
+ check.softErrorf(sValue, InvalidIterVar, "range over %s permits only one iteration variable", &x)
+ case sExtra != nil:
+ check.softErrorf(sExtra, InvalidIterVar, "range clause permits at most two iteration variables")
+ case isFunc && ((k == nil) != (sKey == nil) || (v == nil) != (sValue == nil)):
+ var count string
+ switch {
+ case k == nil:
+ count = "no iteration variables"
+ case v == nil:
+ count = "one iteration variable"
+ default:
+ count = "two iteration variables"
+ }
+ check.softErrorf(&x, InvalidIterVar, "range over %s must have %s", &x, count)
+ }
+ key, val = k, v
+ }
+
+ // Open the for-statement block scope now, after the range clause.
+ // Iteration variables declared with := need to go in this scope (was go.dev/issue/51437).
+ check.openScope(s, "range")
+ defer check.closeScope()
+
+ // check assignment to/declaration of iteration variables
+ // (irregular assignment, cannot easily map to existing assignment checks)
+
+ // lhs expressions and initialization value (rhs) types
+ lhs := [2]Expr{sKey, sValue} // sKey, sValue may be nil
+ rhs := [2]Type{key, val} // key, val may be nil
+
+ constIntRange := x.mode == constant_ && isInteger(x.typ)
+
+ if isDef {
+ // short variable declaration
+ var vars []*Var
+ for i, lhs := range lhs {
+ if lhs == nil {
+ continue
+ }
+
+ // determine lhs variable
+ var obj *Var
+ if ident, _ := lhs.(*identType); ident != nil {
+ // declare new variable
+ name := identName(ident)
+ obj = NewVar(ident.Pos(), check.pkg, name, nil)
+ check.recordDef(ident, obj)
+ // _ variables don't count as new variables
+ if name != "_" {
+ vars = append(vars, obj)
+ }
+ } else {
+ check.errorf(lhs, InvalidSyntaxTree, "cannot declare %s", lhs)
+ obj = NewVar(lhs.Pos(), check.pkg, "_", nil) // dummy variable
+ }
+
+ // initialize lhs variable
+ if constIntRange {
+ check.initVar(obj, &x, "range clause")
+ } else if typ := rhs[i]; typ != nil {
+ x.mode = value
+ x.expr = lhs // we don't have a better rhs expression to use here
+ x.typ = typ
+ check.initVar(obj, &x, "assignment") // error is on variable, use "assignment" not "range clause"
+ } else {
+ obj.typ = Typ[Invalid]
+ obj.used = true // don't complain about unused variable
+ }
+ }
+
+ // declare variables
+ if len(vars) > 0 {
+ scopePos := s.Body.Pos()
+ for _, obj := range vars {
+ check.declare(check.scope, nil /* recordDef already called */, obj, scopePos)
+ }
+ } else {
+ check.error(noNewVarPos, NoNewVar, "no new variables on left side of :=")
+ }
+ } else if sKey != nil /* lhs[0] != nil */ {
+ // ordinary assignment
+ for i, lhs := range lhs {
+ if lhs == nil {
+ continue
+ }
+
+ if constIntRange {
+ check.assignVar(lhs, nil, &x, "range clause")
+ } else if typ := rhs[i]; typ != nil {
+ x.mode = value
+ x.expr = lhs // we don't have a better rhs expression to use here
+ x.typ = typ
+ check.assignVar(lhs, nil, &x, "assignment") // error is on variable, use "assignment" not "range clause"
+ }
+ }
+ } else if constIntRange {
+ // If we don't have any iteration variables, we still need to
+ // check that a (possibly untyped) integer range expression x
+ // is valid.
+ // We do this by checking the assignment _ = x. This ensures
+ // that an untyped x can be converted to a value of type int.
+ check.assignment(&x, nil, "range clause")
+ }
+
+ check.stmt(inner, s.Body)
+}
+
+// RangeKeyVal returns the key and value types for a range over typ.
+// Exported for use by the compiler (does not exist in go/types).
+func RangeKeyVal(typ Type) (Type, Type) {
+ key, val, _, _, _ := rangeKeyVal(typ, nil)
+ return key, val
+}
+
+// rangeKeyVal returns the key and value type produced by a range clause
+// over an expression of type typ.
+// If allowVersion != nil, it is used to check the required language version.
+// If the range clause is not permitted, rangeKeyVal returns ok = false.
+// When ok = false, rangeKeyVal may also return a reason in cause.
+func rangeKeyVal(typ Type, allowVersion func(goVersion) bool) (key, val Type, cause string, isFunc, ok bool) {
+ bad := func(cause string) (Type, Type, string, bool, bool) {
+ return Typ[Invalid], Typ[Invalid], cause, false, false
+ }
+ toSig := func(t Type) *Signature {
+ sig, _ := coreType(t).(*Signature)
+ return sig
+ }
+
+ orig := typ
+ switch typ := arrayPtrDeref(coreType(typ)).(type) {
+ case nil:
+ return bad("no core type")
+ case *Basic:
+ if isString(typ) {
+ return Typ[Int], universeRune, "", false, true // use 'rune' name
+ }
+ if isInteger(typ) {
+ if allowVersion != nil && !allowVersion(go1_22) {
+ return bad("requires go1.22 or later")
+ }
+ return orig, nil, "", false, true
+ }
+ case *Array:
+ return Typ[Int], typ.elem, "", false, true
+ case *Slice:
+ return Typ[Int], typ.elem, "", false, true
+ case *Map:
+ return typ.key, typ.elem, "", false, true
+ case *Chan:
+ if typ.dir == SendOnly {
+ return bad("receive from send-only channel")
+ }
+ return typ.elem, nil, "", false, true
+ case *Signature:
+ // TODO(gri) when this becomes enabled permanently, add version check
+ if !buildcfg.Experiment.RangeFunc {
+ break
+ }
+ assert(typ.Recv() == nil)
+ switch {
+ case typ.Params().Len() != 1:
+ return bad("func must be func(yield func(...) bool): wrong argument count")
+ case toSig(typ.Params().At(0).Type()) == nil:
+ return bad("func must be func(yield func(...) bool): argument is not func")
+ case typ.Results().Len() != 0:
+ return bad("func must be func(yield func(...) bool): unexpected results")
+ }
+ cb := toSig(typ.Params().At(0).Type())
+ assert(cb.Recv() == nil)
+ switch {
+ case cb.Params().Len() > 2:
+ return bad("func must be func(yield func(...) bool): yield func has too many parameters")
+ case cb.Results().Len() != 1 || !isBoolean(cb.Results().At(0).Type()):
+ return bad("func must be func(yield func(...) bool): yield func does not return bool")
+ }
+ if cb.Params().Len() >= 1 {
+ key = cb.Params().At(0).Type()
+ }
+ if cb.Params().Len() >= 2 {
+ val = cb.Params().At(1).Type()
+ }
+ return key, val, "", true, true
+ }
+ return
+}
diff --git a/src/cmd/compile/internal/types2/struct.go b/src/cmd/compile/internal/types2/struct.go
new file mode 100644
index 0000000..9e46b34
--- /dev/null
+++ b/src/cmd/compile/internal/types2/struct.go
@@ -0,0 +1,230 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ . "internal/types/errors"
+ "strconv"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// A Struct represents a struct type.
+type Struct struct {
+ fields []*Var // fields != nil indicates the struct is set up (possibly with len(fields) == 0)
+ tags []string // field tags; nil if there are no tags
+}
+
+// NewStruct returns a new struct with the given fields and corresponding field tags.
+// If a field with index i has a tag, tags[i] must be that tag, but len(tags) may be
+// only as long as required to hold the tag with the largest index i. Consequently,
+// if no field has a tag, tags may be nil.
+func NewStruct(fields []*Var, tags []string) *Struct {
+ var fset objset
+ for _, f := range fields {
+ if f.name != "_" && fset.insert(f) != nil {
+ panic("multiple fields with the same name")
+ }
+ }
+ if len(tags) > len(fields) {
+ panic("more tags than fields")
+ }
+ s := &Struct{fields: fields, tags: tags}
+ s.markComplete()
+ return s
+}
+
+// NumFields returns the number of fields in the struct (including blank and embedded fields).
+func (s *Struct) NumFields() int { return len(s.fields) }
+
+// Field returns the i'th field for 0 <= i < NumFields().
+func (s *Struct) Field(i int) *Var { return s.fields[i] }
+
+// Tag returns the i'th field tag for 0 <= i < NumFields().
+func (s *Struct) Tag(i int) string {
+ if i < len(s.tags) {
+ return s.tags[i]
+ }
+ return ""
+}
+
+func (s *Struct) Underlying() Type { return s }
+func (s *Struct) String() string { return TypeString(s, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+func (s *Struct) markComplete() {
+ if s.fields == nil {
+ s.fields = make([]*Var, 0)
+ }
+}
+
+func (check *Checker) structType(styp *Struct, e *syntax.StructType) {
+ if e.FieldList == nil {
+ styp.markComplete()
+ return
+ }
+
+ // struct fields and tags
+ var fields []*Var
+ var tags []string
+
+ // for double-declaration checks
+ var fset objset
+
+ // current field typ and tag
+ var typ Type
+ var tag string
+ add := func(ident *syntax.Name, embedded bool, pos syntax.Pos) {
+ if tag != "" && tags == nil {
+ tags = make([]string, len(fields))
+ }
+ if tags != nil {
+ tags = append(tags, tag)
+ }
+
+ name := ident.Value
+ fld := NewField(pos, check.pkg, name, typ, embedded)
+ // spec: "Within a struct, non-blank field names must be unique."
+ if name == "_" || check.declareInSet(&fset, pos, fld) {
+ fields = append(fields, fld)
+ check.recordDef(ident, fld)
+ }
+ }
+
+ // addInvalid adds an embedded field of invalid type to the struct for
+ // fields with errors; this keeps the number of struct fields in sync
+ // with the source as long as the fields are _ or have different names
+ // (go.dev/issue/25627).
+ addInvalid := func(ident *syntax.Name, pos syntax.Pos) {
+ typ = Typ[Invalid]
+ tag = ""
+ add(ident, true, pos)
+ }
+
+ var prev syntax.Expr
+ for i, f := range e.FieldList {
+ // Fields declared syntactically with the same type (e.g.: a, b, c T)
+ // share the same type expression. Only check type if it's a new type.
+ if i == 0 || f.Type != prev {
+ typ = check.varType(f.Type)
+ prev = f.Type
+ }
+ tag = ""
+ if i < len(e.TagList) {
+ tag = check.tag(e.TagList[i])
+ }
+ if f.Name != nil {
+ // named field
+ add(f.Name, false, f.Name.Pos())
+ } else {
+ // embedded field
+ // spec: "An embedded type must be specified as a type name T or as a
+ // pointer to a non-interface type name *T, and T itself may not be a
+ // pointer type."
+ pos := syntax.StartPos(f.Type) // position of type, for errors
+ name := embeddedFieldIdent(f.Type)
+ if name == nil {
+ check.errorf(pos, InvalidSyntaxTree, "invalid embedded field type %s", f.Type)
+ name = &syntax.Name{Value: "_"} // TODO(gri) need to set position to pos
+ addInvalid(name, pos)
+ continue
+ }
+ add(name, true, name.Pos()) // struct{p.T} field has position of T
+
+ // Because we have a name, typ must be of the form T or *T, where T is the name
+ // of a (named or alias) type, and t (= deref(typ)) must be the type of T.
+ // We must delay this check to the end because we don't want to instantiate
+ // (via under(t)) a possibly incomplete type.
+ embeddedTyp := typ // for closure below
+ embeddedPos := pos
+ check.later(func() {
+ t, isPtr := deref(embeddedTyp)
+ switch u := under(t).(type) {
+ case *Basic:
+ if !isValid(t) {
+ // error was reported before
+ return
+ }
+ // unsafe.Pointer is treated like a regular pointer
+ if u.kind == UnsafePointer {
+ check.error(embeddedPos, InvalidPtrEmbed, "embedded field type cannot be unsafe.Pointer")
+ }
+ case *Pointer:
+ check.error(embeddedPos, InvalidPtrEmbed, "embedded field type cannot be a pointer")
+ case *Interface:
+ if isTypeParam(t) {
+ // The error code here is inconsistent with other error codes for
+ // invalid embedding, because this restriction may be relaxed in the
+ // future, and so it did not warrant a new error code.
+ check.error(embeddedPos, MisplacedTypeParam, "embedded field type cannot be a (pointer to a) type parameter")
+ break
+ }
+ if isPtr {
+ check.error(embeddedPos, InvalidPtrEmbed, "embedded field type cannot be a pointer to an interface")
+ }
+ }
+ }).describef(embeddedPos, "check embedded type %s", embeddedTyp)
+ }
+ }
+
+ styp.fields = fields
+ styp.tags = tags
+ styp.markComplete()
+}
+
+func embeddedFieldIdent(e syntax.Expr) *syntax.Name {
+ switch e := e.(type) {
+ case *syntax.Name:
+ return e
+ case *syntax.Operation:
+ if base := ptrBase(e); base != nil {
+ // *T is valid, but **T is not
+ if op, _ := base.(*syntax.Operation); op == nil || ptrBase(op) == nil {
+ return embeddedFieldIdent(e.X)
+ }
+ }
+ case *syntax.SelectorExpr:
+ return e.Sel
+ case *syntax.IndexExpr:
+ return embeddedFieldIdent(e.X)
+ }
+ return nil // invalid embedded field
+}
+
+func (check *Checker) declareInSet(oset *objset, pos syntax.Pos, obj Object) bool {
+ if alt := oset.insert(obj); alt != nil {
+ var err error_
+ err.code = DuplicateDecl
+ err.errorf(pos, "%s redeclared", obj.Name())
+ err.recordAltDecl(alt)
+ check.report(&err)
+ return false
+ }
+ return true
+}
+
+func (check *Checker) tag(t *syntax.BasicLit) string {
+ // If t.Bad, an error was reported during parsing.
+ if t != nil && !t.Bad {
+ if t.Kind == syntax.StringLit {
+ if val, err := strconv.Unquote(t.Value); err == nil {
+ return val
+ }
+ }
+ check.errorf(t, InvalidSyntaxTree, "incorrect tag syntax: %q", t.Value)
+ }
+ return ""
+}
+
+func ptrBase(x *syntax.Operation) syntax.Expr {
+ if x.Op == syntax.Mul && x.Y == nil {
+ return x.X
+ }
+ return nil
+}
diff --git a/src/cmd/compile/internal/types2/subst.go b/src/cmd/compile/internal/types2/subst.go
new file mode 100644
index 0000000..09dc585
--- /dev/null
+++ b/src/cmd/compile/internal/types2/subst.go
@@ -0,0 +1,428 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements type parameter substitution.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+)
+
+type substMap map[*TypeParam]Type
+
+// makeSubstMap creates a new substitution map mapping tpars[i] to targs[i].
+// If targs[i] is nil, tpars[i] is not substituted.
+func makeSubstMap(tpars []*TypeParam, targs []Type) substMap {
+ assert(len(tpars) == len(targs))
+ proj := make(substMap, len(tpars))
+ for i, tpar := range tpars {
+ proj[tpar] = targs[i]
+ }
+ return proj
+}
+
+// makeRenameMap is like makeSubstMap, but creates a map used to rename type
+// parameters in from with the type parameters in to.
+func makeRenameMap(from, to []*TypeParam) substMap {
+ assert(len(from) == len(to))
+ proj := make(substMap, len(from))
+ for i, tpar := range from {
+ proj[tpar] = to[i]
+ }
+ return proj
+}
+
+func (m substMap) empty() bool {
+ return len(m) == 0
+}
+
+func (m substMap) lookup(tpar *TypeParam) Type {
+ if t := m[tpar]; t != nil {
+ return t
+ }
+ return tpar
+}
+
+// subst returns the type typ with its type parameters tpars replaced by the
+// corresponding type arguments targs, recursively. subst doesn't modify the
+// incoming type. If a substitution took place, the result type is different
+// from the incoming type.
+//
+// If expanding is non-nil, it is the instance type currently being expanded.
+// One of expanding or ctxt must be non-nil.
+func (check *Checker) subst(pos syntax.Pos, typ Type, smap substMap, expanding *Named, ctxt *Context) Type {
+ assert(expanding != nil || ctxt != nil)
+
+ if smap.empty() {
+ return typ
+ }
+
+ // common cases
+ switch t := typ.(type) {
+ case *Basic:
+ return typ // nothing to do
+ case *TypeParam:
+ return smap.lookup(t)
+ }
+
+ // general case
+ subst := subster{
+ pos: pos,
+ smap: smap,
+ check: check,
+ expanding: expanding,
+ ctxt: ctxt,
+ }
+ return subst.typ(typ)
+}
+
+type subster struct {
+ pos syntax.Pos
+ smap substMap
+ check *Checker // nil if called via Instantiate
+ expanding *Named // if non-nil, the instance that is being expanded
+ ctxt *Context
+}
+
+func (subst *subster) typ(typ Type) Type {
+ switch t := typ.(type) {
+ case nil:
+ // Call typOrNil if it's possible that typ is nil.
+ panic("nil typ")
+
+ case *Basic:
+ // nothing to do
+
+ case *Array:
+ elem := subst.typOrNil(t.elem)
+ if elem != t.elem {
+ return &Array{len: t.len, elem: elem}
+ }
+
+ case *Slice:
+ elem := subst.typOrNil(t.elem)
+ if elem != t.elem {
+ return &Slice{elem: elem}
+ }
+
+ case *Struct:
+ if fields, copied := subst.varList(t.fields); copied {
+ s := &Struct{fields: fields, tags: t.tags}
+ s.markComplete()
+ return s
+ }
+
+ case *Pointer:
+ base := subst.typ(t.base)
+ if base != t.base {
+ return &Pointer{base: base}
+ }
+
+ case *Tuple:
+ return subst.tuple(t)
+
+ case *Signature:
+ // Preserve the receiver: it is handled during *Interface and *Named type
+ // substitution.
+ //
+ // Naively doing the substitution here can lead to an infinite recursion in
+ // the case where the receiver is an interface. For example, consider the
+ // following declaration:
+ //
+ // type T[A any] struct { f interface{ m() } }
+ //
+ // In this case, the type of f is an interface that is itself the receiver
+ // type of all of its methods. Because we have no type name to break
+ // cycles, substituting in the recv results in an infinite loop of
+ // recv->interface->recv->interface->...
+ recv := t.recv
+
+ params := subst.tuple(t.params)
+ results := subst.tuple(t.results)
+ if params != t.params || results != t.results {
+ return &Signature{
+ rparams: t.rparams,
+ // TODO(gri) why can't we nil out tparams here, rather than in instantiate?
+ tparams: t.tparams,
+ // instantiated signatures have a nil scope
+ recv: recv,
+ params: params,
+ results: results,
+ variadic: t.variadic,
+ }
+ }
+
+ case *Union:
+ terms, copied := subst.termlist(t.terms)
+ if copied {
+ // term list substitution may introduce duplicate terms (unlikely but possible).
+ // This is ok; lazy type set computation will determine the actual type set
+ // in normal form.
+ return &Union{terms}
+ }
+
+ case *Interface:
+ methods, mcopied := subst.funcList(t.methods)
+ embeddeds, ecopied := subst.typeList(t.embeddeds)
+ if mcopied || ecopied {
+ iface := subst.check.newInterface()
+ iface.embeddeds = embeddeds
+ iface.embedPos = t.embedPos
+ iface.implicit = t.implicit
+ assert(t.complete) // otherwise we are copying incomplete data
+ iface.complete = t.complete
+ // If we've changed the interface type, we may need to replace its
+ // receiver if the receiver type is the original interface. Receivers of
+ // *Named type are replaced during named type expansion.
+ //
+ // Notably, it's possible to reach here and not create a new *Interface,
+ // even though the receiver type may be parameterized. For example:
+ //
+ // type T[P any] interface{ m() }
+ //
+ // In this case the interface will not be substituted here, because its
+ // method signatures do not depend on the type parameter P, but we still
+ // need to create new interface methods to hold the instantiated
+ // receiver. This is handled by Named.expandUnderlying.
+ iface.methods, _ = replaceRecvType(methods, t, iface)
+
+ // If check != nil, check.newInterface will have saved the interface for later completion.
+ if subst.check == nil { // golang/go#61561: all newly created interfaces must be completed
+ iface.typeSet()
+ }
+ return iface
+ }
+
+ case *Map:
+ key := subst.typ(t.key)
+ elem := subst.typ(t.elem)
+ if key != t.key || elem != t.elem {
+ return &Map{key: key, elem: elem}
+ }
+
+ case *Chan:
+ elem := subst.typ(t.elem)
+ if elem != t.elem {
+ return &Chan{dir: t.dir, elem: elem}
+ }
+
+ case *Named:
+ // dump is for debugging
+ dump := func(string, ...interface{}) {}
+ if subst.check != nil && subst.check.conf.Trace {
+ subst.check.indent++
+ defer func() {
+ subst.check.indent--
+ }()
+ dump = func(format string, args ...interface{}) {
+ subst.check.trace(subst.pos, format, args...)
+ }
+ }
+
+ // subst is called during expansion, so in this function we need to be
+ // careful not to call any methods that would cause t to be expanded: doing
+ // so would result in deadlock.
+ //
+ // So we call t.Origin().TypeParams() rather than t.TypeParams().
+ orig := t.Origin()
+ n := orig.TypeParams().Len()
+ if n == 0 {
+ dump(">>> %s is not parameterized", t)
+ return t // type is not parameterized
+ }
+
+ var newTArgs []Type
+ if t.TypeArgs().Len() != n {
+ return Typ[Invalid] // error reported elsewhere
+ }
+
+ // already instantiated
+ dump(">>> %s already instantiated", t)
+ // For each (existing) type argument targ, determine if it needs
+ // to be substituted; i.e., if it is or contains a type parameter
+ // that has a type argument for it.
+ for i, targ := range t.TypeArgs().list() {
+ dump(">>> %d targ = %s", i, targ)
+ new_targ := subst.typ(targ)
+ if new_targ != targ {
+ dump(">>> substituted %d targ %s => %s", i, targ, new_targ)
+ if newTArgs == nil {
+ newTArgs = make([]Type, n)
+ copy(newTArgs, t.TypeArgs().list())
+ }
+ newTArgs[i] = new_targ
+ }
+ }
+
+ if newTArgs == nil {
+ dump(">>> nothing to substitute in %s", t)
+ return t // nothing to substitute
+ }
+
+ // Create a new instance and populate the context to avoid endless
+ // recursion. The position used here is irrelevant because validation only
+ // occurs on t (we don't call validType on named), but we use subst.pos to
+ // help with debugging.
+ return subst.check.instance(subst.pos, orig, newTArgs, subst.expanding, subst.ctxt)
+
+ case *TypeParam:
+ return subst.smap.lookup(t)
+
+ default:
+ unreachable()
+ }
+
+ return typ
+}
+
+// typOrNil is like typ but if the argument is nil it is replaced with Typ[Invalid].
+// A nil type may appear in pathological cases such as type T[P any] []func(_ T([]_))
+// where an array/slice element is accessed before it is set up.
+func (subst *subster) typOrNil(typ Type) Type {
+ if typ == nil {
+ return Typ[Invalid]
+ }
+ return subst.typ(typ)
+}
+
+func (subst *subster) var_(v *Var) *Var {
+ if v != nil {
+ if typ := subst.typ(v.typ); typ != v.typ {
+ return substVar(v, typ)
+ }
+ }
+ return v
+}
+
+func substVar(v *Var, typ Type) *Var {
+ copy := *v
+ copy.typ = typ
+ copy.origin = v.Origin()
+ return &copy
+}
+
+func (subst *subster) tuple(t *Tuple) *Tuple {
+ if t != nil {
+ if vars, copied := subst.varList(t.vars); copied {
+ return &Tuple{vars: vars}
+ }
+ }
+ return t
+}
+
+func (subst *subster) varList(in []*Var) (out []*Var, copied bool) {
+ out = in
+ for i, v := range in {
+ if w := subst.var_(v); w != v {
+ if !copied {
+ // first variable that got substituted => allocate new out slice
+ // and copy all variables
+ new := make([]*Var, len(in))
+ copy(new, out)
+ out = new
+ copied = true
+ }
+ out[i] = w
+ }
+ }
+ return
+}
+
+func (subst *subster) func_(f *Func) *Func {
+ if f != nil {
+ if typ := subst.typ(f.typ); typ != f.typ {
+ return substFunc(f, typ)
+ }
+ }
+ return f
+}
+
+func substFunc(f *Func, typ Type) *Func {
+ copy := *f
+ copy.typ = typ
+ copy.origin = f.Origin()
+ return &copy
+}
+
+func (subst *subster) funcList(in []*Func) (out []*Func, copied bool) {
+ out = in
+ for i, f := range in {
+ if g := subst.func_(f); g != f {
+ if !copied {
+ // first function that got substituted => allocate new out slice
+ // and copy all functions
+ new := make([]*Func, len(in))
+ copy(new, out)
+ out = new
+ copied = true
+ }
+ out[i] = g
+ }
+ }
+ return
+}
+
+func (subst *subster) typeList(in []Type) (out []Type, copied bool) {
+ out = in
+ for i, t := range in {
+ if u := subst.typ(t); u != t {
+ if !copied {
+ // first function that got substituted => allocate new out slice
+ // and copy all functions
+ new := make([]Type, len(in))
+ copy(new, out)
+ out = new
+ copied = true
+ }
+ out[i] = u
+ }
+ }
+ return
+}
+
+func (subst *subster) termlist(in []*Term) (out []*Term, copied bool) {
+ out = in
+ for i, t := range in {
+ if u := subst.typ(t.typ); u != t.typ {
+ if !copied {
+ // first function that got substituted => allocate new out slice
+ // and copy all functions
+ new := make([]*Term, len(in))
+ copy(new, out)
+ out = new
+ copied = true
+ }
+ out[i] = NewTerm(t.tilde, u)
+ }
+ }
+ return
+}
+
+// replaceRecvType updates any function receivers that have type old to have
+// type new. It does not modify the input slice; if modifications are required,
+// the input slice and any affected signatures will be copied before mutating.
+//
+// The resulting out slice contains the updated functions, and copied reports
+// if anything was modified.
+func replaceRecvType(in []*Func, old, new Type) (out []*Func, copied bool) {
+ out = in
+ for i, method := range in {
+ sig := method.Type().(*Signature)
+ if sig.recv != nil && sig.recv.Type() == old {
+ if !copied {
+ // Allocate a new methods slice before mutating for the first time.
+ // This is defensive, as we may share methods across instantiations of
+ // a given interface type if they do not get substituted.
+ out = make([]*Func, len(in))
+ copy(out, in)
+ copied = true
+ }
+ newsig := *sig
+ newsig.recv = substVar(sig.recv, new)
+ out[i] = substFunc(method, &newsig)
+ }
+ }
+ return
+}
diff --git a/src/cmd/compile/internal/types2/termlist.go b/src/cmd/compile/internal/types2/termlist.go
new file mode 100644
index 0000000..196f8ab
--- /dev/null
+++ b/src/cmd/compile/internal/types2/termlist.go
@@ -0,0 +1,161 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "strings"
+
+// A termlist represents the type set represented by the union
+// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn.
+// A termlist is in normal form if all terms are disjoint.
+// termlist operations don't require the operands to be in
+// normal form.
+type termlist []*term
+
+// allTermlist represents the set of all types.
+// It is in normal form.
+var allTermlist = termlist{new(term)}
+
+// termSep is the separator used between individual terms.
+const termSep = " | "
+
+// String prints the termlist exactly (without normalization).
+func (xl termlist) String() string {
+ if len(xl) == 0 {
+ return "∅"
+ }
+ var buf strings.Builder
+ for i, x := range xl {
+ if i > 0 {
+ buf.WriteString(termSep)
+ }
+ buf.WriteString(x.String())
+ }
+ return buf.String()
+}
+
+// isEmpty reports whether the termlist xl represents the empty set of types.
+func (xl termlist) isEmpty() bool {
+ // If there's a non-nil term, the entire list is not empty.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil {
+ return false
+ }
+ }
+ return true
+}
+
+// isAll reports whether the termlist xl represents the set of all types.
+func (xl termlist) isAll() bool {
+ // If there's a 𝓤 term, the entire list is 𝓤.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil && x.typ == nil {
+ return true
+ }
+ }
+ return false
+}
+
+// norm returns the normal form of xl.
+func (xl termlist) norm() termlist {
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ used := make([]bool, len(xl))
+ var rl termlist
+ for i, xi := range xl {
+ if xi == nil || used[i] {
+ continue
+ }
+ for j := i + 1; j < len(xl); j++ {
+ xj := xl[j]
+ if xj == nil || used[j] {
+ continue
+ }
+ if u1, u2 := xi.union(xj); u2 == nil {
+ // If we encounter a 𝓤 term, the entire list is 𝓤.
+ // Exit early.
+ // (Note that this is not just an optimization;
+ // if we continue, we may end up with a 𝓤 term
+ // and other terms and the result would not be
+ // in normal form.)
+ if u1.typ == nil {
+ return allTermlist
+ }
+ xi = u1
+ used[j] = true // xj is now unioned into xi - ignore it in future iterations
+ }
+ }
+ rl = append(rl, xi)
+ }
+ return rl
+}
+
+// union returns the union xl ∪ yl.
+func (xl termlist) union(yl termlist) termlist {
+ return append(xl, yl...).norm()
+}
+
+// intersect returns the intersection xl ∩ yl.
+func (xl termlist) intersect(yl termlist) termlist {
+ if xl.isEmpty() || yl.isEmpty() {
+ return nil
+ }
+
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ var rl termlist
+ for _, x := range xl {
+ for _, y := range yl {
+ if r := x.intersect(y); r != nil {
+ rl = append(rl, r)
+ }
+ }
+ }
+ return rl.norm()
+}
+
+// equal reports whether xl and yl represent the same type set.
+func (xl termlist) equal(yl termlist) bool {
+ // TODO(gri) this should be more efficient
+ return xl.subsetOf(yl) && yl.subsetOf(xl)
+}
+
+// includes reports whether t ∈ xl.
+func (xl termlist) includes(t Type) bool {
+ for _, x := range xl {
+ if x.includes(t) {
+ return true
+ }
+ }
+ return false
+}
+
+// supersetOf reports whether y ⊆ xl.
+func (xl termlist) supersetOf(y *term) bool {
+ for _, x := range xl {
+ if y.subsetOf(x) {
+ return true
+ }
+ }
+ return false
+}
+
+// subsetOf reports whether xl ⊆ yl.
+func (xl termlist) subsetOf(yl termlist) bool {
+ if yl.isEmpty() {
+ return xl.isEmpty()
+ }
+
+ // each term x of xl must be a subset of yl
+ for _, x := range xl {
+ if !yl.supersetOf(x) {
+ return false // x is not a subset yl
+ }
+ }
+ return true
+}
diff --git a/src/cmd/compile/internal/types2/termlist_test.go b/src/cmd/compile/internal/types2/termlist_test.go
new file mode 100644
index 0000000..3005d0e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/termlist_test.go
@@ -0,0 +1,284 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "strings"
+ "testing"
+)
+
+// maketl makes a term list from a string of the term list.
+func maketl(s string) termlist {
+ s = strings.ReplaceAll(s, " ", "")
+ names := strings.Split(s, "|")
+ r := make(termlist, len(names))
+ for i, n := range names {
+ r[i] = testTerm(n)
+ }
+ return r
+}
+
+func TestTermlistAll(t *testing.T) {
+ if !allTermlist.isAll() {
+ t.Errorf("allTermlist is not the set of all types")
+ }
+}
+
+func TestTermlistString(t *testing.T) {
+ for _, want := range []string{
+ "∅",
+ "𝓤",
+ "int",
+ "~int",
+ "myInt",
+ "∅ | ∅",
+ "𝓤 | 𝓤",
+ "∅ | 𝓤 | int",
+ "∅ | 𝓤 | int | myInt",
+ } {
+ if got := maketl(want).String(); got != want {
+ t.Errorf("(%v).String() == %v", want, got)
+ }
+ }
+}
+
+func TestTermlistIsEmpty(t *testing.T) {
+ for test, want := range map[string]bool{
+ "∅": true,
+ "∅ | ∅": true,
+ "∅ | ∅ | 𝓤": false,
+ "∅ | ∅ | myInt": false,
+ "𝓤": false,
+ "𝓤 | int": false,
+ "𝓤 | myInt | ∅": false,
+ } {
+ xl := maketl(test)
+ got := xl.isEmpty()
+ if got != want {
+ t.Errorf("(%v).isEmpty() == %v; want %v", test, got, want)
+ }
+ }
+}
+
+func TestTermlistIsAll(t *testing.T) {
+ for test, want := range map[string]bool{
+ "∅": false,
+ "∅ | ∅": false,
+ "int | ~string": false,
+ "~int | myInt": false,
+ "∅ | ∅ | 𝓤": true,
+ "𝓤": true,
+ "𝓤 | int": true,
+ "myInt | 𝓤": true,
+ } {
+ xl := maketl(test)
+ got := xl.isAll()
+ if got != want {
+ t.Errorf("(%v).isAll() == %v; want %v", test, got, want)
+ }
+ }
+}
+
+func TestTermlistNorm(t *testing.T) {
+ for _, test := range []struct {
+ xl, want string
+ }{
+ {"∅", "∅"},
+ {"∅ | ∅", "∅"},
+ {"∅ | int", "int"},
+ {"∅ | myInt", "myInt"},
+ {"𝓤 | int", "𝓤"},
+ {"𝓤 | myInt", "𝓤"},
+ {"int | myInt", "int | myInt"},
+ {"~int | int", "~int"},
+ {"~int | myInt", "~int"},
+ {"int | ~string | int", "int | ~string"},
+ {"~int | string | 𝓤 | ~string | int", "𝓤"},
+ {"~int | string | myInt | ~string | int", "~int | ~string"},
+ } {
+ xl := maketl(test.xl)
+ got := maketl(test.xl).norm()
+ if got.String() != test.want {
+ t.Errorf("(%v).norm() = %v; want %v", xl, got, test.want)
+ }
+ }
+}
+
+func TestTermlistUnion(t *testing.T) {
+ for _, test := range []struct {
+ xl, yl, want string
+ }{
+
+ {"∅", "∅", "∅"},
+ {"∅", "𝓤", "𝓤"},
+ {"∅", "int", "int"},
+ {"𝓤", "~int", "𝓤"},
+ {"int", "~int", "~int"},
+ {"int", "string", "int | string"},
+ {"int", "myInt", "int | myInt"},
+ {"~int", "myInt", "~int"},
+ {"int | string", "~string", "int | ~string"},
+ {"~int | string", "~string | int", "~int | ~string"},
+ {"~int | string | ∅", "~string | int", "~int | ~string"},
+ {"~int | myInt | ∅", "~string | int", "~int | ~string"},
+ {"~int | string | 𝓤", "~string | int", "𝓤"},
+ {"~int | string | myInt", "~string | int", "~int | ~string"},
+ } {
+ xl := maketl(test.xl)
+ yl := maketl(test.yl)
+ got := xl.union(yl).String()
+ if got != test.want {
+ t.Errorf("(%v).union(%v) = %v; want %v", test.xl, test.yl, got, test.want)
+ }
+ }
+}
+
+func TestTermlistIntersect(t *testing.T) {
+ for _, test := range []struct {
+ xl, yl, want string
+ }{
+
+ {"∅", "∅", "∅"},
+ {"∅", "𝓤", "∅"},
+ {"∅", "int", "∅"},
+ {"∅", "myInt", "∅"},
+ {"𝓤", "~int", "~int"},
+ {"𝓤", "myInt", "myInt"},
+ {"int", "~int", "int"},
+ {"int", "string", "∅"},
+ {"int", "myInt", "∅"},
+ {"~int", "myInt", "myInt"},
+ {"int | string", "~string", "string"},
+ {"~int | string", "~string | int", "int | string"},
+ {"~int | string | ∅", "~string | int", "int | string"},
+ {"~int | myInt | ∅", "~string | int", "int"},
+ {"~int | string | 𝓤", "~string | int", "int | ~string"},
+ {"~int | string | myInt", "~string | int", "int | string"},
+ } {
+ xl := maketl(test.xl)
+ yl := maketl(test.yl)
+ got := xl.intersect(yl).String()
+ if got != test.want {
+ t.Errorf("(%v).intersect(%v) = %v; want %v", test.xl, test.yl, got, test.want)
+ }
+ }
+}
+
+func TestTermlistEqual(t *testing.T) {
+ for _, test := range []struct {
+ xl, yl string
+ want bool
+ }{
+ {"∅", "∅", true},
+ {"∅", "𝓤", false},
+ {"𝓤", "𝓤", true},
+ {"𝓤 | int", "𝓤", true},
+ {"𝓤 | int", "string | 𝓤", true},
+ {"𝓤 | myInt", "string | 𝓤", true},
+ {"int | ~string", "string | int", false},
+ {"~int | string", "string | myInt", false},
+ {"int | ~string | ∅", "string | int | ~string", true},
+ } {
+ xl := maketl(test.xl)
+ yl := maketl(test.yl)
+ got := xl.equal(yl)
+ if got != test.want {
+ t.Errorf("(%v).equal(%v) = %v; want %v", test.xl, test.yl, got, test.want)
+ }
+ }
+}
+
+func TestTermlistIncludes(t *testing.T) {
+ for _, test := range []struct {
+ xl, typ string
+ want bool
+ }{
+ {"∅", "int", false},
+ {"𝓤", "int", true},
+ {"~int", "int", true},
+ {"int", "string", false},
+ {"~int", "string", false},
+ {"~int", "myInt", true},
+ {"int | string", "string", true},
+ {"~int | string", "int", true},
+ {"~int | string", "myInt", true},
+ {"~int | myInt | ∅", "myInt", true},
+ {"myInt | ∅ | 𝓤", "int", true},
+ } {
+ xl := maketl(test.xl)
+ yl := testTerm(test.typ).typ
+ got := xl.includes(yl)
+ if got != test.want {
+ t.Errorf("(%v).includes(%v) = %v; want %v", test.xl, yl, got, test.want)
+ }
+ }
+}
+
+func TestTermlistSupersetOf(t *testing.T) {
+ for _, test := range []struct {
+ xl, typ string
+ want bool
+ }{
+ {"∅", "∅", true},
+ {"∅", "𝓤", false},
+ {"∅", "int", false},
+ {"𝓤", "∅", true},
+ {"𝓤", "𝓤", true},
+ {"𝓤", "int", true},
+ {"𝓤", "~int", true},
+ {"𝓤", "myInt", true},
+ {"~int", "int", true},
+ {"~int", "~int", true},
+ {"~int", "myInt", true},
+ {"int", "~int", false},
+ {"myInt", "~int", false},
+ {"int", "string", false},
+ {"~int", "string", false},
+ {"int | string", "string", true},
+ {"int | string", "~string", false},
+ {"~int | string", "int", true},
+ {"~int | string", "myInt", true},
+ {"~int | string | ∅", "string", true},
+ {"~string | ∅ | 𝓤", "myInt", true},
+ } {
+ xl := maketl(test.xl)
+ y := testTerm(test.typ)
+ got := xl.supersetOf(y)
+ if got != test.want {
+ t.Errorf("(%v).supersetOf(%v) = %v; want %v", test.xl, y, got, test.want)
+ }
+ }
+}
+
+func TestTermlistSubsetOf(t *testing.T) {
+ for _, test := range []struct {
+ xl, yl string
+ want bool
+ }{
+ {"∅", "∅", true},
+ {"∅", "𝓤", true},
+ {"𝓤", "∅", false},
+ {"𝓤", "𝓤", true},
+ {"int", "int | string", true},
+ {"~int", "int | string", false},
+ {"~int", "myInt | string", false},
+ {"myInt", "~int | string", true},
+ {"~int", "string | string | int | ~int", true},
+ {"myInt", "string | string | ~int", true},
+ {"int | string", "string", false},
+ {"int | string", "string | int", true},
+ {"int | ~string", "string | int", false},
+ {"myInt | ~string", "string | int | 𝓤", true},
+ {"int | ~string", "string | int | ∅ | string", false},
+ {"int | myInt", "string | ~int | ∅ | string", true},
+ } {
+ xl := maketl(test.xl)
+ yl := maketl(test.yl)
+ got := xl.subsetOf(yl)
+ if got != test.want {
+ t.Errorf("(%v).subsetOf(%v) = %v; want %v", test.xl, test.yl, got, test.want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/local/issue47996.go b/src/cmd/compile/internal/types2/testdata/local/issue47996.go
new file mode 100644
index 0000000..375a931
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/local/issue47996.go
@@ -0,0 +1,8 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// don't crash
+func T /* ERROR "missing" */ [P] /* ERROR "missing" */ m /* ERROR "unexpected" */ () /* ERROR ")" */ { /* ERROR "{" */ } /* ERROR "}" */
diff --git a/src/cmd/compile/internal/types2/testdata/manual.go b/src/cmd/compile/internal/types2/testdata/manual.go
new file mode 100644
index 0000000..57dcc22
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/manual.go
@@ -0,0 +1,8 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is tested when running "go test -run Manual"
+// without source arguments. Use for one-off debugging.
+
+package p
diff --git a/src/cmd/compile/internal/types2/tuple.go b/src/cmd/compile/internal/types2/tuple.go
new file mode 100644
index 0000000..1356aae
--- /dev/null
+++ b/src/cmd/compile/internal/types2/tuple.go
@@ -0,0 +1,34 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A Tuple represents an ordered list of variables; a nil *Tuple is a valid (empty) tuple.
+// Tuples are used as components of signatures and to represent the type of multiple
+// assignments; they are not first class types of Go.
+type Tuple struct {
+ vars []*Var
+}
+
+// NewTuple returns a new tuple for the given variables.
+func NewTuple(x ...*Var) *Tuple {
+ if len(x) > 0 {
+ return &Tuple{vars: x}
+ }
+ return nil
+}
+
+// Len returns the number variables of tuple t.
+func (t *Tuple) Len() int {
+ if t != nil {
+ return len(t.vars)
+ }
+ return 0
+}
+
+// At returns the i'th variable of tuple t.
+func (t *Tuple) At(i int) *Var { return t.vars[i] }
+
+func (t *Tuple) Underlying() Type { return t }
+func (t *Tuple) String() string { return TypeString(t, nil) }
diff --git a/src/cmd/compile/internal/types2/type.go b/src/cmd/compile/internal/types2/type.go
new file mode 100644
index 0000000..bd19421
--- /dev/null
+++ b/src/cmd/compile/internal/types2/type.go
@@ -0,0 +1,11 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "cmd/compile/internal/syntax"
+
+// A Type represents a type of Go.
+// All types implement the Type interface.
+type Type = syntax.Type
diff --git a/src/cmd/compile/internal/types2/typelists.go b/src/cmd/compile/internal/types2/typelists.go
new file mode 100644
index 0000000..a2aba4a
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typelists.go
@@ -0,0 +1,69 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// TypeParamList holds a list of type parameters.
+type TypeParamList struct{ tparams []*TypeParam }
+
+// Len returns the number of type parameters in the list.
+// It is safe to call on a nil receiver.
+func (l *TypeParamList) Len() int { return len(l.list()) }
+
+// At returns the i'th type parameter in the list.
+func (l *TypeParamList) At(i int) *TypeParam { return l.tparams[i] }
+
+// list is for internal use where we expect a []*TypeParam.
+// TODO(rfindley): list should probably be eliminated: we can pass around a
+// TypeParamList instead.
+func (l *TypeParamList) list() []*TypeParam {
+ if l == nil {
+ return nil
+ }
+ return l.tparams
+}
+
+// TypeList holds a list of types.
+type TypeList struct{ types []Type }
+
+// newTypeList returns a new TypeList with the types in list.
+func newTypeList(list []Type) *TypeList {
+ if len(list) == 0 {
+ return nil
+ }
+ return &TypeList{list}
+}
+
+// Len returns the number of types in the list.
+// It is safe to call on a nil receiver.
+func (l *TypeList) Len() int { return len(l.list()) }
+
+// At returns the i'th type in the list.
+func (l *TypeList) At(i int) Type { return l.types[i] }
+
+// list is for internal use where we expect a []Type.
+// TODO(rfindley): list should probably be eliminated: we can pass around a
+// TypeList instead.
+func (l *TypeList) list() []Type {
+ if l == nil {
+ return nil
+ }
+ return l.types
+}
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+func bindTParams(list []*TypeParam) *TypeParamList {
+ if len(list) == 0 {
+ return nil
+ }
+ for i, typ := range list {
+ if typ.index >= 0 {
+ panic("type parameter bound more than once")
+ }
+ typ.index = i
+ }
+ return &TypeParamList{tparams: list}
+}
diff --git a/src/cmd/compile/internal/types2/typeparam.go b/src/cmd/compile/internal/types2/typeparam.go
new file mode 100644
index 0000000..5c6030b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typeparam.go
@@ -0,0 +1,156 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "sync/atomic"
+
+// Note: This is a uint32 rather than a uint64 because the
+// respective 64 bit atomic instructions are not available
+// on all platforms.
+var lastID atomic.Uint32
+
+// nextID returns a value increasing monotonically by 1 with
+// each call, starting with 1. It may be called concurrently.
+func nextID() uint64 { return uint64(lastID.Add(1)) }
+
+// A TypeParam represents a type parameter type.
+type TypeParam struct {
+ check *Checker // for lazy type bound completion
+ id uint64 // unique id, for debugging only
+ obj *TypeName // corresponding type name
+ index int // type parameter index in source order, starting at 0
+ bound Type // any type, but underlying is eventually *Interface for correct programs (see TypeParam.iface)
+}
+
+// NewTypeParam returns a new TypeParam. Type parameters may be set on a Named
+// or Signature type by calling SetTypeParams. Setting a type parameter on more
+// than one type will result in a panic.
+//
+// The constraint argument can be nil, and set later via SetConstraint. If the
+// constraint is non-nil, it must be fully defined.
+func NewTypeParam(obj *TypeName, constraint Type) *TypeParam {
+ return (*Checker)(nil).newTypeParam(obj, constraint)
+}
+
+// check may be nil
+func (check *Checker) newTypeParam(obj *TypeName, constraint Type) *TypeParam {
+ // Always increment lastID, even if it is not used.
+ id := nextID()
+ if check != nil {
+ check.nextID++
+ id = check.nextID
+ }
+ typ := &TypeParam{check: check, id: id, obj: obj, index: -1, bound: constraint}
+ if obj.typ == nil {
+ obj.typ = typ
+ }
+ // iface may mutate typ.bound, so we must ensure that iface() is called
+ // at least once before the resulting TypeParam escapes.
+ if check != nil {
+ check.needsCleanup(typ)
+ } else if constraint != nil {
+ typ.iface()
+ }
+ return typ
+}
+
+// Obj returns the type name for the type parameter t.
+func (t *TypeParam) Obj() *TypeName { return t.obj }
+
+// Index returns the index of the type param within its param list, or -1 if
+// the type parameter has not yet been bound to a type.
+func (t *TypeParam) Index() int {
+ return t.index
+}
+
+// Constraint returns the type constraint specified for t.
+func (t *TypeParam) Constraint() Type {
+ return t.bound
+}
+
+// SetConstraint sets the type constraint for t.
+//
+// It must be called by users of NewTypeParam after the bound's underlying is
+// fully defined, and before using the type parameter in any way other than to
+// form other types. Once SetConstraint returns the receiver, t is safe for
+// concurrent use.
+func (t *TypeParam) SetConstraint(bound Type) {
+ if bound == nil {
+ panic("nil constraint")
+ }
+ t.bound = bound
+ // iface may mutate t.bound (if bound is not an interface), so ensure that
+ // this is done before returning.
+ t.iface()
+}
+
+func (t *TypeParam) Underlying() Type {
+ return t.iface()
+}
+
+func (t *TypeParam) String() string { return TypeString(t, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+func (t *TypeParam) cleanup() {
+ t.iface()
+ t.check = nil
+}
+
+// iface returns the constraint interface of t.
+func (t *TypeParam) iface() *Interface {
+ bound := t.bound
+
+ // determine constraint interface
+ var ityp *Interface
+ switch u := under(bound).(type) {
+ case *Basic:
+ if !isValid(u) {
+ // error is reported elsewhere
+ return &emptyInterface
+ }
+ case *Interface:
+ if isTypeParam(bound) {
+ // error is reported in Checker.collectTypeParams
+ return &emptyInterface
+ }
+ ityp = u
+ }
+
+ // If we don't have an interface, wrap constraint into an implicit interface.
+ if ityp == nil {
+ ityp = NewInterfaceType(nil, []Type{bound})
+ ityp.implicit = true
+ t.bound = ityp // update t.bound for next time (optimization)
+ }
+
+ // compute type set if necessary
+ if ityp.tset == nil {
+ // pos is used for tracing output; start with the type parameter position.
+ pos := t.obj.pos
+ // use the (original or possibly instantiated) type bound position if we have one
+ if n := asNamed(bound); n != nil {
+ pos = n.obj.pos
+ }
+ computeInterfaceTypeSet(t.check, pos, ityp)
+ }
+
+ return ityp
+}
+
+// is calls f with the specific type terms of t's constraint and reports whether
+// all calls to f returned true. If there are no specific terms, is
+// returns the result of f(nil).
+func (t *TypeParam) is(f func(*term) bool) bool {
+ return t.iface().typeSet().is(f)
+}
+
+// underIs calls f with the underlying types of the specific type terms
+// of t's constraint and reports whether all calls to f returned true.
+// If there are no specific terms, underIs returns the result of f(nil).
+func (t *TypeParam) underIs(f func(Type) bool) bool {
+ return t.iface().typeSet().underIs(f)
+}
diff --git a/src/cmd/compile/internal/types2/typeset.go b/src/cmd/compile/internal/types2/typeset.go
new file mode 100644
index 0000000..a6ccfdb
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typeset.go
@@ -0,0 +1,415 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ . "internal/types/errors"
+ "sort"
+ "strings"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// A _TypeSet represents the type set of an interface.
+// Because of existing language restrictions, methods can be "factored out"
+// from the terms. The actual type set is the intersection of the type set
+// implied by the methods and the type set described by the terms and the
+// comparable bit. To test whether a type is included in a type set
+// ("implements" relation), the type must implement all methods _and_ be
+// an element of the type set described by the terms and the comparable bit.
+// If the term list describes the set of all types and comparable is true,
+// only comparable types are meant; in all other cases comparable is false.
+type _TypeSet struct {
+ methods []*Func // all methods of the interface; sorted by unique ID
+ terms termlist // type terms of the type set
+ comparable bool // invariant: !comparable || terms.isAll()
+}
+
+// IsEmpty reports whether type set s is the empty set.
+func (s *_TypeSet) IsEmpty() bool { return s.terms.isEmpty() }
+
+// IsAll reports whether type set s is the set of all types (corresponding to the empty interface).
+func (s *_TypeSet) IsAll() bool { return s.IsMethodSet() && len(s.methods) == 0 }
+
+// IsMethodSet reports whether the interface t is fully described by its method set.
+func (s *_TypeSet) IsMethodSet() bool { return !s.comparable && s.terms.isAll() }
+
+// IsComparable reports whether each type in the set is comparable.
+func (s *_TypeSet) IsComparable(seen map[Type]bool) bool {
+ if s.terms.isAll() {
+ return s.comparable
+ }
+ return s.is(func(t *term) bool {
+ return t != nil && comparable(t.typ, false, seen, nil)
+ })
+}
+
+// NumMethods returns the number of methods available.
+func (s *_TypeSet) NumMethods() int { return len(s.methods) }
+
+// Method returns the i'th method of type set s for 0 <= i < s.NumMethods().
+// The methods are ordered by their unique ID.
+func (s *_TypeSet) Method(i int) *Func { return s.methods[i] }
+
+// LookupMethod returns the index of and method with matching package and name, or (-1, nil).
+func (s *_TypeSet) LookupMethod(pkg *Package, name string, foldCase bool) (int, *Func) {
+ return lookupMethod(s.methods, pkg, name, foldCase)
+}
+
+func (s *_TypeSet) String() string {
+ switch {
+ case s.IsEmpty():
+ return "∅"
+ case s.IsAll():
+ return "𝓤"
+ }
+
+ hasMethods := len(s.methods) > 0
+ hasTerms := s.hasTerms()
+
+ var buf strings.Builder
+ buf.WriteByte('{')
+ if s.comparable {
+ buf.WriteString("comparable")
+ if hasMethods || hasTerms {
+ buf.WriteString("; ")
+ }
+ }
+ for i, m := range s.methods {
+ if i > 0 {
+ buf.WriteString("; ")
+ }
+ buf.WriteString(m.String())
+ }
+ if hasMethods && hasTerms {
+ buf.WriteString("; ")
+ }
+ if hasTerms {
+ buf.WriteString(s.terms.String())
+ }
+ buf.WriteString("}")
+ return buf.String()
+}
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+// hasTerms reports whether the type set has specific type terms.
+func (s *_TypeSet) hasTerms() bool { return !s.terms.isEmpty() && !s.terms.isAll() }
+
+// subsetOf reports whether s1 ⊆ s2.
+func (s1 *_TypeSet) subsetOf(s2 *_TypeSet) bool { return s1.terms.subsetOf(s2.terms) }
+
+// TODO(gri) TypeSet.is and TypeSet.underIs should probably also go into termlist.go
+
+// is calls f with the specific type terms of s and reports whether
+// all calls to f returned true. If there are no specific terms, is
+// returns the result of f(nil).
+func (s *_TypeSet) is(f func(*term) bool) bool {
+ if !s.hasTerms() {
+ return f(nil)
+ }
+ for _, t := range s.terms {
+ assert(t.typ != nil)
+ if !f(t) {
+ return false
+ }
+ }
+ return true
+}
+
+// underIs calls f with the underlying types of the specific type terms
+// of s and reports whether all calls to f returned true. If there are
+// no specific terms, underIs returns the result of f(nil).
+func (s *_TypeSet) underIs(f func(Type) bool) bool {
+ if !s.hasTerms() {
+ return f(nil)
+ }
+ for _, t := range s.terms {
+ assert(t.typ != nil)
+ // x == under(x) for ~x terms
+ u := t.typ
+ if !t.tilde {
+ u = under(u)
+ }
+ if debug {
+ assert(Identical(u, under(u)))
+ }
+ if !f(u) {
+ return false
+ }
+ }
+ return true
+}
+
+// topTypeSet may be used as type set for the empty interface.
+var topTypeSet = _TypeSet{terms: allTermlist}
+
+// computeInterfaceTypeSet may be called with check == nil.
+func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_TypeSet {
+ if ityp.tset != nil {
+ return ityp.tset
+ }
+
+ // If the interface is not fully set up yet, the type set will
+ // not be complete, which may lead to errors when using the
+ // type set (e.g. missing method). Don't compute a partial type
+ // set (and don't store it!), so that we still compute the full
+ // type set eventually. Instead, return the top type set and
+ // let any follow-on errors play out.
+ if !ityp.complete {
+ return &topTypeSet
+ }
+
+ if check != nil && check.conf.Trace {
+ // Types don't generally have position information.
+ // If we don't have a valid pos provided, try to use
+ // one close enough.
+ if !pos.IsKnown() && len(ityp.methods) > 0 {
+ pos = ityp.methods[0].pos
+ }
+
+ check.trace(pos, "-- type set for %s", ityp)
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(pos, "=> %s ", ityp.typeSet())
+ }()
+ }
+
+ // An infinitely expanding interface (due to a cycle) is detected
+ // elsewhere (Checker.validType), so here we simply assume we only
+ // have valid interfaces. Mark the interface as complete to avoid
+ // infinite recursion if the validType check occurs later for some
+ // reason.
+ ityp.tset = &_TypeSet{terms: allTermlist} // TODO(gri) is this sufficient?
+
+ var unionSets map[*Union]*_TypeSet
+ if check != nil {
+ if check.unionTypeSets == nil {
+ check.unionTypeSets = make(map[*Union]*_TypeSet)
+ }
+ unionSets = check.unionTypeSets
+ } else {
+ unionSets = make(map[*Union]*_TypeSet)
+ }
+
+ // Methods of embedded interfaces are collected unchanged; i.e., the identity
+ // of a method I.m's Func Object of an interface I is the same as that of
+ // the method m in an interface that embeds interface I. On the other hand,
+ // if a method is embedded via multiple overlapping embedded interfaces, we
+ // don't provide a guarantee which "original m" got chosen for the embedding
+ // interface. See also go.dev/issue/34421.
+ //
+ // If we don't care to provide this identity guarantee anymore, instead of
+ // reusing the original method in embeddings, we can clone the method's Func
+ // Object and give it the position of a corresponding embedded interface. Then
+ // we can get rid of the mpos map below and simply use the cloned method's
+ // position.
+
+ var seen objset
+ var allMethods []*Func
+ mpos := make(map[*Func]syntax.Pos) // method specification or method embedding position, for good error messages
+ addMethod := func(pos syntax.Pos, m *Func, explicit bool) {
+ switch other := seen.insert(m); {
+ case other == nil:
+ allMethods = append(allMethods, m)
+ mpos[m] = pos
+ case explicit:
+ if check != nil {
+ var err error_
+ err.code = DuplicateDecl
+ err.errorf(pos, "duplicate method %s", m.name)
+ err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
+ check.report(&err)
+ }
+ default:
+ // We have a duplicate method name in an embedded (not explicitly declared) method.
+ // Check method signatures after all types are computed (go.dev/issue/33656).
+ // If we're pre-go1.14 (overlapping embeddings are not permitted), report that
+ // error here as well (even though we could do it eagerly) because it's the same
+ // error message.
+ if check != nil {
+ check.later(func() {
+ if !check.allowVersion(m.pkg, pos, go1_14) || !Identical(m.typ, other.Type()) {
+ var err error_
+ err.code = DuplicateDecl
+ err.errorf(pos, "duplicate method %s", m.name)
+ err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
+ check.report(&err)
+ }
+ }).describef(pos, "duplicate method check for %s", m.name)
+ }
+ }
+ }
+
+ for _, m := range ityp.methods {
+ addMethod(m.pos, m, true)
+ }
+
+ // collect embedded elements
+ allTerms := allTermlist
+ allComparable := false
+ for i, typ := range ityp.embeddeds {
+ // The embedding position is nil for imported interfaces
+ // and also for interface copies after substitution (but
+ // in that case we don't need to report errors again).
+ var pos syntax.Pos // embedding position
+ if ityp.embedPos != nil {
+ pos = (*ityp.embedPos)[i]
+ }
+ var comparable bool
+ var terms termlist
+ switch u := under(typ).(type) {
+ case *Interface:
+ // For now we don't permit type parameters as constraints.
+ assert(!isTypeParam(typ))
+ tset := computeInterfaceTypeSet(check, pos, u)
+ // If typ is local, an error was already reported where typ is specified/defined.
+ if check != nil && check.isImportedConstraint(typ) && !check.verifyVersionf(pos, go1_18, "embedding constraint interface %s", typ) {
+ continue
+ }
+ comparable = tset.comparable
+ for _, m := range tset.methods {
+ addMethod(pos, m, false) // use embedding position pos rather than m.pos
+ }
+ terms = tset.terms
+ case *Union:
+ if check != nil && !check.verifyVersionf(pos, go1_18, "embedding interface element %s", u) {
+ continue
+ }
+ tset := computeUnionTypeSet(check, unionSets, pos, u)
+ if tset == &invalidTypeSet {
+ continue // ignore invalid unions
+ }
+ assert(!tset.comparable)
+ assert(len(tset.methods) == 0)
+ terms = tset.terms
+ default:
+ if !isValid(u) {
+ continue
+ }
+ if check != nil && !check.verifyVersionf(pos, go1_18, "embedding non-interface type %s", typ) {
+ continue
+ }
+ terms = termlist{{false, typ}}
+ }
+
+ // The type set of an interface is the intersection of the type sets of all its elements.
+ // Due to language restrictions, only embedded interfaces can add methods, they are handled
+ // separately. Here we only need to intersect the term lists and comparable bits.
+ allTerms, allComparable = intersectTermLists(allTerms, allComparable, terms, comparable)
+ }
+
+ ityp.tset.comparable = allComparable
+ if len(allMethods) != 0 {
+ sortMethods(allMethods)
+ ityp.tset.methods = allMethods
+ }
+ ityp.tset.terms = allTerms
+
+ return ityp.tset
+}
+
+// TODO(gri) The intersectTermLists function belongs to the termlist implementation.
+// The comparable type set may also be best represented as a term (using
+// a special type).
+
+// intersectTermLists computes the intersection of two term lists and respective comparable bits.
+// xcomp, ycomp are valid only if xterms.isAll() and yterms.isAll() respectively.
+func intersectTermLists(xterms termlist, xcomp bool, yterms termlist, ycomp bool) (termlist, bool) {
+ terms := xterms.intersect(yterms)
+ // If one of xterms or yterms is marked as comparable,
+ // the result must only include comparable types.
+ comp := xcomp || ycomp
+ if comp && !terms.isAll() {
+ // only keep comparable terms
+ i := 0
+ for _, t := range terms {
+ assert(t.typ != nil)
+ if comparable(t.typ, false /* strictly comparable */, nil, nil) {
+ terms[i] = t
+ i++
+ }
+ }
+ terms = terms[:i]
+ if !terms.isAll() {
+ comp = false
+ }
+ }
+ assert(!comp || terms.isAll()) // comparable invariant
+ return terms, comp
+}
+
+func sortMethods(list []*Func) {
+ sort.Sort(byUniqueMethodName(list))
+}
+
+func assertSortedMethods(list []*Func) {
+ if !debug {
+ panic("assertSortedMethods called outside debug mode")
+ }
+ if !sort.IsSorted(byUniqueMethodName(list)) {
+ panic("methods not sorted")
+ }
+}
+
+// byUniqueMethodName method lists can be sorted by their unique method names.
+type byUniqueMethodName []*Func
+
+func (a byUniqueMethodName) Len() int { return len(a) }
+func (a byUniqueMethodName) Less(i, j int) bool { return a[i].less(&a[j].object) }
+func (a byUniqueMethodName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// invalidTypeSet is a singleton type set to signal an invalid type set
+// due to an error. It's also a valid empty type set, so consumers of
+// type sets may choose to ignore it.
+var invalidTypeSet _TypeSet
+
+// computeUnionTypeSet may be called with check == nil.
+// The result is &invalidTypeSet if the union overflows.
+func computeUnionTypeSet(check *Checker, unionSets map[*Union]*_TypeSet, pos syntax.Pos, utyp *Union) *_TypeSet {
+ if tset, _ := unionSets[utyp]; tset != nil {
+ return tset
+ }
+
+ // avoid infinite recursion (see also computeInterfaceTypeSet)
+ unionSets[utyp] = new(_TypeSet)
+
+ var allTerms termlist
+ for _, t := range utyp.terms {
+ var terms termlist
+ u := under(t.typ)
+ if ui, _ := u.(*Interface); ui != nil {
+ // For now we don't permit type parameters as constraints.
+ assert(!isTypeParam(t.typ))
+ terms = computeInterfaceTypeSet(check, pos, ui).terms
+ } else if !isValid(u) {
+ continue
+ } else {
+ if t.tilde && !Identical(t.typ, u) {
+ // There is no underlying type which is t.typ.
+ // The corresponding type set is empty.
+ t = nil // ∅ term
+ }
+ terms = termlist{(*term)(t)}
+ }
+ // The type set of a union expression is the union
+ // of the type sets of each term.
+ allTerms = allTerms.union(terms)
+ if len(allTerms) > maxTermCount {
+ if check != nil {
+ check.errorf(pos, InvalidUnion, "cannot handle more than %d union terms (implementation limitation)", maxTermCount)
+ }
+ unionSets[utyp] = &invalidTypeSet
+ return unionSets[utyp]
+ }
+ }
+ unionSets[utyp].terms = allTerms
+
+ return unionSets[utyp]
+}
diff --git a/src/cmd/compile/internal/types2/typeset_test.go b/src/cmd/compile/internal/types2/typeset_test.go
new file mode 100644
index 0000000..40ca28e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typeset_test.go
@@ -0,0 +1,80 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "strings"
+ "testing"
+)
+
+func TestInvalidTypeSet(t *testing.T) {
+ if !invalidTypeSet.IsEmpty() {
+ t.Error("invalidTypeSet is not empty")
+ }
+}
+
+func TestTypeSetString(t *testing.T) {
+ for body, want := range map[string]string{
+ "{}": "𝓤",
+ "{int}": "{int}",
+ "{~int}": "{~int}",
+ "{int|string}": "{int | string}",
+ "{int; string}": "∅",
+
+ "{comparable}": "{comparable}",
+ "{comparable; int}": "{int}",
+ "{~int; comparable}": "{~int}",
+ "{int|string; comparable}": "{int | string}",
+ "{comparable; int; string}": "∅",
+
+ "{m()}": "{func (p.T).m()}",
+ "{m1(); m2() int }": "{func (p.T).m1(); func (p.T).m2() int}",
+ "{error}": "{func (error).Error() string}",
+ "{m(); comparable}": "{comparable; func (p.T).m()}",
+ "{m1(); comparable; m2() int }": "{comparable; func (p.T).m1(); func (p.T).m2() int}",
+ "{comparable; error}": "{comparable; func (error).Error() string}",
+
+ "{m(); comparable; int|float32|string}": "{func (p.T).m(); int | float32 | string}",
+ "{m1(); int; m2(); comparable }": "{func (p.T).m1(); func (p.T).m2(); int}",
+
+ "{E}; type E interface{}": "𝓤",
+ "{E}; type E interface{int;string}": "∅",
+ "{E}; type E interface{comparable}": "{comparable}",
+ } {
+ // parse
+ errh := func(error) {} // dummy error handler so that parsing continues in presence of errors
+ src := "package p; type T interface" + body
+ file, err := syntax.Parse(nil, strings.NewReader(src), errh, nil, 0)
+ if err != nil {
+ t.Fatalf("%s: %v (invalid test case)", body, err)
+ }
+
+ // type check
+ var conf Config
+ pkg, err := conf.Check(file.PkgName.Value, []*syntax.File{file}, nil)
+ if err != nil {
+ t.Fatalf("%s: %v (invalid test case)", body, err)
+ }
+
+ // lookup T
+ obj := pkg.scope.Lookup("T")
+ if obj == nil {
+ t.Fatalf("%s: T not found (invalid test case)", body)
+ }
+ T, ok := under(obj.Type()).(*Interface)
+ if !ok {
+ t.Fatalf("%s: %v is not an interface (invalid test case)", body, obj)
+ }
+
+ // verify test case
+ got := T.typeSet().String()
+ if got != want {
+ t.Errorf("%s: got %s; want %s", body, got, want)
+ }
+ }
+}
+
+// TODO(gri) add more tests
diff --git a/src/cmd/compile/internal/types2/typestring.go b/src/cmd/compile/internal/types2/typestring.go
new file mode 100644
index 0000000..4b410af
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typestring.go
@@ -0,0 +1,504 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements printing of types.
+
+package types2
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// A Qualifier controls how named package-level objects are printed in
+// calls to TypeString, ObjectString, and SelectionString.
+//
+// These three formatting routines call the Qualifier for each
+// package-level object O, and if the Qualifier returns a non-empty
+// string p, the object is printed in the form p.O.
+// If it returns an empty string, only the object name O is printed.
+//
+// Using a nil Qualifier is equivalent to using (*Package).Path: the
+// object is qualified by the import path, e.g., "encoding/json.Marshal".
+type Qualifier func(*Package) string
+
+// RelativeTo returns a Qualifier that fully qualifies members of
+// all packages other than pkg.
+func RelativeTo(pkg *Package) Qualifier {
+ if pkg == nil {
+ return nil
+ }
+ return func(other *Package) string {
+ if pkg == other {
+ return "" // same package; unqualified
+ }
+ return other.Path()
+ }
+}
+
+// TypeString returns the string representation of typ.
+// The Qualifier controls the printing of
+// package-level objects, and may be nil.
+func TypeString(typ Type, qf Qualifier) string {
+ var buf bytes.Buffer
+ WriteType(&buf, typ, qf)
+ return buf.String()
+}
+
+// WriteType writes the string representation of typ to buf.
+// The Qualifier controls the printing of
+// package-level objects, and may be nil.
+func WriteType(buf *bytes.Buffer, typ Type, qf Qualifier) {
+ newTypeWriter(buf, qf).typ(typ)
+}
+
+// WriteSignature writes the representation of the signature sig to buf,
+// without a leading "func" keyword. The Qualifier controls the printing
+// of package-level objects, and may be nil.
+func WriteSignature(buf *bytes.Buffer, sig *Signature, qf Qualifier) {
+ newTypeWriter(buf, qf).signature(sig)
+}
+
+type typeWriter struct {
+ buf *bytes.Buffer
+ seen map[Type]bool
+ qf Qualifier
+ ctxt *Context // if non-nil, we are type hashing
+ tparams *TypeParamList // local type parameters
+ paramNames bool // if set, write function parameter names, otherwise, write types only
+ tpSubscripts bool // if set, write type parameter indices as subscripts
+ pkgInfo bool // package-annotate first unexported-type field to avoid confusing type description
+}
+
+func newTypeWriter(buf *bytes.Buffer, qf Qualifier) *typeWriter {
+ return &typeWriter{buf, make(map[Type]bool), qf, nil, nil, true, false, false}
+}
+
+func newTypeHasher(buf *bytes.Buffer, ctxt *Context) *typeWriter {
+ assert(ctxt != nil)
+ return &typeWriter{buf, make(map[Type]bool), nil, ctxt, nil, false, false, false}
+}
+
+func (w *typeWriter) byte(b byte) {
+ if w.ctxt != nil {
+ if b == ' ' {
+ b = '#'
+ }
+ w.buf.WriteByte(b)
+ return
+ }
+ w.buf.WriteByte(b)
+ if b == ',' || b == ';' {
+ w.buf.WriteByte(' ')
+ }
+}
+
+func (w *typeWriter) string(s string) {
+ w.buf.WriteString(s)
+}
+
+func (w *typeWriter) error(msg string) {
+ if w.ctxt != nil {
+ panic(msg)
+ }
+ w.buf.WriteString("<" + msg + ">")
+}
+
+func (w *typeWriter) typ(typ Type) {
+ if w.seen[typ] {
+ w.error("cycle to " + goTypeName(typ))
+ return
+ }
+ w.seen[typ] = true
+ defer delete(w.seen, typ)
+
+ switch t := typ.(type) {
+ case nil:
+ w.error("nil")
+
+ case *Basic:
+ // exported basic types go into package unsafe
+ // (currently this is just unsafe.Pointer)
+ if isExported(t.name) {
+ if obj, _ := Unsafe.scope.Lookup(t.name).(*TypeName); obj != nil {
+ w.typeName(obj)
+ break
+ }
+ }
+ w.string(t.name)
+
+ case *Array:
+ w.byte('[')
+ w.string(strconv.FormatInt(t.len, 10))
+ w.byte(']')
+ w.typ(t.elem)
+
+ case *Slice:
+ w.string("[]")
+ w.typ(t.elem)
+
+ case *Struct:
+ w.string("struct{")
+ for i, f := range t.fields {
+ if i > 0 {
+ w.byte(';')
+ }
+
+ // If disambiguating one struct for another, look for the first unexported field.
+ // Do this first in case of nested structs; tag the first-outermost field.
+ pkgAnnotate := false
+ if w.qf == nil && w.pkgInfo && !isExported(f.name) {
+ // note for embedded types, type name is field name, and "string" etc are lower case hence unexported.
+ pkgAnnotate = true
+ w.pkgInfo = false // only tag once
+ }
+
+ // This doesn't do the right thing for embedded type
+ // aliases where we should print the alias name, not
+ // the aliased type (see go.dev/issue/44410).
+ if !f.embedded {
+ w.string(f.name)
+ w.byte(' ')
+ }
+ w.typ(f.typ)
+ if pkgAnnotate {
+ w.string(" /* package ")
+ w.string(f.pkg.Path())
+ w.string(" */ ")
+ }
+ if tag := t.Tag(i); tag != "" {
+ w.byte(' ')
+ // TODO(gri) If tag contains blanks, replacing them with '#'
+ // in Context.TypeHash may produce another tag
+ // accidentally.
+ w.string(strconv.Quote(tag))
+ }
+ }
+ w.byte('}')
+
+ case *Pointer:
+ w.byte('*')
+ w.typ(t.base)
+
+ case *Tuple:
+ w.tuple(t, false)
+
+ case *Signature:
+ w.string("func")
+ w.signature(t)
+
+ case *Union:
+ // Unions only appear as (syntactic) embedded elements
+ // in interfaces and syntactically cannot be empty.
+ if t.Len() == 0 {
+ w.error("empty union")
+ break
+ }
+ for i, t := range t.terms {
+ if i > 0 {
+ w.string(termSep)
+ }
+ if t.tilde {
+ w.byte('~')
+ }
+ w.typ(t.typ)
+ }
+
+ case *Interface:
+ if w.ctxt == nil {
+ if t == universeAny.Type() {
+ // When not hashing, we can try to improve type strings by writing "any"
+ // for a type that is pointer-identical to universeAny. This logic should
+ // be deprecated by more robust handling for aliases.
+ w.string("any")
+ break
+ }
+ if t == asNamed(universeComparable.Type()).underlying {
+ w.string("interface{comparable}")
+ break
+ }
+ }
+ if t.implicit {
+ if len(t.methods) == 0 && len(t.embeddeds) == 1 {
+ w.typ(t.embeddeds[0])
+ break
+ }
+ // Something's wrong with the implicit interface.
+ // Print it as such and continue.
+ w.string("/* implicit */ ")
+ }
+ w.string("interface{")
+ first := true
+ if w.ctxt != nil {
+ w.typeSet(t.typeSet())
+ } else {
+ for _, m := range t.methods {
+ if !first {
+ w.byte(';')
+ }
+ first = false
+ w.string(m.name)
+ w.signature(m.typ.(*Signature))
+ }
+ for _, typ := range t.embeddeds {
+ if !first {
+ w.byte(';')
+ }
+ first = false
+ w.typ(typ)
+ }
+ }
+ w.byte('}')
+
+ case *Map:
+ w.string("map[")
+ w.typ(t.key)
+ w.byte(']')
+ w.typ(t.elem)
+
+ case *Chan:
+ var s string
+ var parens bool
+ switch t.dir {
+ case SendRecv:
+ s = "chan "
+ // chan (<-chan T) requires parentheses
+ if c, _ := t.elem.(*Chan); c != nil && c.dir == RecvOnly {
+ parens = true
+ }
+ case SendOnly:
+ s = "chan<- "
+ case RecvOnly:
+ s = "<-chan "
+ default:
+ w.error("unknown channel direction")
+ }
+ w.string(s)
+ if parens {
+ w.byte('(')
+ }
+ w.typ(t.elem)
+ if parens {
+ w.byte(')')
+ }
+
+ case *Named:
+ // If hashing, write a unique prefix for t to represent its identity, since
+ // named type identity is pointer identity.
+ if w.ctxt != nil {
+ w.string(strconv.Itoa(w.ctxt.getID(t)))
+ }
+ w.typeName(t.obj) // when hashing written for readability of the hash only
+ if t.inst != nil {
+ // instantiated type
+ w.typeList(t.inst.targs.list())
+ } else if w.ctxt == nil && t.TypeParams().Len() != 0 { // For type hashing, don't need to format the TypeParams
+ // parameterized type
+ w.tParamList(t.TypeParams().list())
+ }
+
+ case *TypeParam:
+ if t.obj == nil {
+ w.error("unnamed type parameter")
+ break
+ }
+ if i := tparamIndex(w.tparams.list(), t); i >= 0 {
+ // The names of type parameters that are declared by the type being
+ // hashed are not part of the type identity. Replace them with a
+ // placeholder indicating their index.
+ w.string(fmt.Sprintf("$%d", i))
+ } else {
+ w.string(t.obj.name)
+ if w.tpSubscripts || w.ctxt != nil {
+ w.string(subscript(t.id))
+ }
+ // If the type parameter name is the same as a predeclared object
+ // (say int), point out where it is declared to avoid confusing
+ // error messages. This doesn't need to be super-elegant; we just
+ // need a clear indication that this is not a predeclared name.
+ if w.ctxt == nil && Universe.Lookup(t.obj.name) != nil {
+ w.string(fmt.Sprintf(" /* with %s declared at %s */", t.obj.name, t.obj.Pos()))
+ }
+ }
+
+ case *Alias:
+ w.typeName(t.obj)
+ if w.ctxt != nil {
+ // TODO(gri) do we need to print the alias type name, too?
+ w.typ(Unalias(t.obj.typ))
+ }
+
+ default:
+ // For externally defined implementations of Type.
+ // Note: In this case cycles won't be caught.
+ w.string(t.String())
+ }
+}
+
+// typeSet writes a canonical hash for an interface type set.
+func (w *typeWriter) typeSet(s *_TypeSet) {
+ assert(w.ctxt != nil)
+ first := true
+ for _, m := range s.methods {
+ if !first {
+ w.byte(';')
+ }
+ first = false
+ w.string(m.name)
+ w.signature(m.typ.(*Signature))
+ }
+ switch {
+ case s.terms.isAll():
+ // nothing to do
+ case s.terms.isEmpty():
+ w.string(s.terms.String())
+ default:
+ var termHashes []string
+ for _, term := range s.terms {
+ // terms are not canonically sorted, so we sort their hashes instead.
+ var buf bytes.Buffer
+ if term.tilde {
+ buf.WriteByte('~')
+ }
+ newTypeHasher(&buf, w.ctxt).typ(term.typ)
+ termHashes = append(termHashes, buf.String())
+ }
+ sort.Strings(termHashes)
+ if !first {
+ w.byte(';')
+ }
+ w.string(strings.Join(termHashes, "|"))
+ }
+}
+
+func (w *typeWriter) typeList(list []Type) {
+ w.byte('[')
+ for i, typ := range list {
+ if i > 0 {
+ w.byte(',')
+ }
+ w.typ(typ)
+ }
+ w.byte(']')
+}
+
+func (w *typeWriter) tParamList(list []*TypeParam) {
+ w.byte('[')
+ var prev Type
+ for i, tpar := range list {
+ // Determine the type parameter and its constraint.
+ // list is expected to hold type parameter names,
+ // but don't crash if that's not the case.
+ if tpar == nil {
+ w.error("nil type parameter")
+ continue
+ }
+ if i > 0 {
+ if tpar.bound != prev {
+ // bound changed - write previous one before advancing
+ w.byte(' ')
+ w.typ(prev)
+ }
+ w.byte(',')
+ }
+ prev = tpar.bound
+ w.typ(tpar)
+ }
+ if prev != nil {
+ w.byte(' ')
+ w.typ(prev)
+ }
+ w.byte(']')
+}
+
+func (w *typeWriter) typeName(obj *TypeName) {
+ w.string(packagePrefix(obj.pkg, w.qf))
+ w.string(obj.name)
+}
+
+func (w *typeWriter) tuple(tup *Tuple, variadic bool) {
+ w.byte('(')
+ if tup != nil {
+ for i, v := range tup.vars {
+ if i > 0 {
+ w.byte(',')
+ }
+ // parameter names are ignored for type identity and thus type hashes
+ if w.ctxt == nil && v.name != "" && w.paramNames {
+ w.string(v.name)
+ w.byte(' ')
+ }
+ typ := v.typ
+ if variadic && i == len(tup.vars)-1 {
+ if s, ok := typ.(*Slice); ok {
+ w.string("...")
+ typ = s.elem
+ } else {
+ // special case:
+ // append(s, "foo"...) leads to signature func([]byte, string...)
+ if t, _ := under(typ).(*Basic); t == nil || t.kind != String {
+ w.error("expected string type")
+ continue
+ }
+ w.typ(typ)
+ w.string("...")
+ continue
+ }
+ }
+ w.typ(typ)
+ }
+ }
+ w.byte(')')
+}
+
+func (w *typeWriter) signature(sig *Signature) {
+ if sig.TypeParams().Len() != 0 {
+ if w.ctxt != nil {
+ assert(w.tparams == nil)
+ w.tparams = sig.TypeParams()
+ defer func() {
+ w.tparams = nil
+ }()
+ }
+ w.tParamList(sig.TypeParams().list())
+ }
+
+ w.tuple(sig.params, sig.variadic)
+
+ n := sig.results.Len()
+ if n == 0 {
+ // no result
+ return
+ }
+
+ w.byte(' ')
+ if n == 1 && (w.ctxt != nil || sig.results.vars[0].name == "") {
+ // single unnamed result (if type hashing, name must be ignored)
+ w.typ(sig.results.vars[0].typ)
+ return
+ }
+
+ // multiple or named result(s)
+ w.tuple(sig.results, false)
+}
+
+// subscript returns the decimal (utf8) representation of x using subscript digits.
+func subscript(x uint64) string {
+ const w = len("₀") // all digits 0...9 have the same utf8 width
+ var buf [32 * w]byte
+ i := len(buf)
+ for {
+ i -= w
+ utf8.EncodeRune(buf[i:], '₀'+rune(x%10)) // '₀' == U+2080
+ x /= 10
+ if x == 0 {
+ break
+ }
+ }
+ return string(buf[i:])
+}
diff --git a/src/cmd/compile/internal/types2/typestring_test.go b/src/cmd/compile/internal/types2/typestring_test.go
new file mode 100644
index 0000000..c2be40d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typestring_test.go
@@ -0,0 +1,166 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "internal/testenv"
+ "testing"
+
+ . "cmd/compile/internal/types2"
+)
+
+const filename = "<src>"
+
+type testEntry struct {
+ src, str string
+}
+
+// dup returns a testEntry where both src and str are the same.
+func dup(s string) testEntry {
+ return testEntry{s, s}
+}
+
+// types that don't depend on any other type declarations
+var independentTestTypes = []testEntry{
+ // basic types
+ dup("int"),
+ dup("float32"),
+ dup("string"),
+
+ // arrays
+ dup("[10]int"),
+
+ // slices
+ dup("[]int"),
+ dup("[][]int"),
+
+ // structs
+ dup("struct{}"),
+ dup("struct{x int}"),
+ {`struct {
+ x, y int
+ z float32 "foo"
+ }`, `struct{x int; y int; z float32 "foo"}`},
+ {`struct {
+ string
+ elems []complex128
+ }`, `struct{string; elems []complex128}`},
+
+ // pointers
+ dup("*int"),
+ dup("***struct{}"),
+ dup("*struct{a int; b float32}"),
+
+ // functions
+ dup("func()"),
+ dup("func(x int)"),
+ {"func(x, y int)", "func(x int, y int)"},
+ {"func(x, y int, z string)", "func(x int, y int, z string)"},
+ dup("func(int)"),
+ {"func(int, string, byte)", "func(int, string, byte)"},
+
+ dup("func() int"),
+ {"func() (string)", "func() string"},
+ dup("func() (u int)"),
+ {"func() (u, v int, w string)", "func() (u int, v int, w string)"},
+
+ dup("func(int) string"),
+ dup("func(x int) string"),
+ dup("func(x int) (u string)"),
+ {"func(x, y int) (u string)", "func(x int, y int) (u string)"},
+
+ dup("func(...int) string"),
+ dup("func(x ...int) string"),
+ dup("func(x ...int) (u string)"),
+ {"func(x int, y ...int) (u string)", "func(x int, y ...int) (u string)"},
+
+ // interfaces
+ dup("interface{}"),
+ dup("interface{m()}"),
+ dup(`interface{String() string; m(int) float32}`),
+ dup("interface{int | float32 | complex128}"),
+ dup("interface{int | ~float32 | ~complex128}"),
+ dup("any"),
+ dup("interface{comparable}"),
+ {"comparable", "interface{comparable}"},
+ {"error", "interface{Error() string}"},
+
+ // maps
+ dup("map[string]int"),
+ {"map[struct{x, y int}][]byte", "map[struct{x int; y int}][]byte"},
+
+ // channels
+ dup("chan<- chan int"),
+ dup("chan<- <-chan int"),
+ dup("<-chan <-chan int"),
+ dup("chan (<-chan int)"),
+ dup("chan<- func()"),
+ dup("<-chan []func() int"),
+}
+
+// types that depend on other type declarations (src in TestTypes)
+var dependentTestTypes = []testEntry{
+ // interfaces
+ dup(`interface{io.Reader; io.Writer}`),
+ dup(`interface{m() int; io.Writer}`),
+ {`interface{m() interface{T}}`, `interface{m() interface{generic_p.T}}`},
+}
+
+func TestTypeString(t *testing.T) {
+ // The Go command is needed for the importer to determine the locations of stdlib .a files.
+ testenv.MustHaveGoBuild(t)
+
+ var tests []testEntry
+ tests = append(tests, independentTestTypes...)
+ tests = append(tests, dependentTestTypes...)
+
+ for _, test := range tests {
+ src := `package generic_p; import "io"; type _ io.Writer; type T ` + test.src
+ pkg, err := typecheck(src, nil, nil)
+ if err != nil {
+ t.Errorf("%s: %s", src, err)
+ continue
+ }
+ obj := pkg.Scope().Lookup("T")
+ if obj == nil {
+ t.Errorf("%s: T not found", test.src)
+ continue
+ }
+ typ := obj.Type().Underlying()
+ if got := typ.String(); got != test.str {
+ t.Errorf("%s: got %s, want %s", test.src, got, test.str)
+ }
+ }
+}
+
+func TestQualifiedTypeString(t *testing.T) {
+ p := mustTypecheck("package p; type T int", nil, nil)
+ q := mustTypecheck("package q", nil, nil)
+
+ pT := p.Scope().Lookup("T").Type()
+ for _, test := range []struct {
+ typ Type
+ this *Package
+ want string
+ }{
+ {nil, nil, "<nil>"},
+ {pT, nil, "p.T"},
+ {pT, p, "T"},
+ {pT, q, "p.T"},
+ {NewPointer(pT), p, "*T"},
+ {NewPointer(pT), q, "*p.T"},
+ } {
+ qualifier := func(pkg *Package) string {
+ if pkg != test.this {
+ return pkg.Name()
+ }
+ return ""
+ }
+ if got := TypeString(test.typ, qualifier); got != test.want {
+ t.Errorf("TypeString(%s, %s) = %s, want %s",
+ test.this, test.typ, got, test.want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/typeterm.go b/src/cmd/compile/internal/types2/typeterm.go
new file mode 100644
index 0000000..9779132
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typeterm.go
@@ -0,0 +1,165 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A term describes elementary type sets:
+//
+// ∅: (*term)(nil) == ∅ // set of no types (empty set)
+// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse)
+// T: &term{false, T} == {T} // set of type T
+// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
+type term struct {
+ tilde bool // valid if typ != nil
+ typ Type
+}
+
+func (x *term) String() string {
+ switch {
+ case x == nil:
+ return "∅"
+ case x.typ == nil:
+ return "𝓤"
+ case x.tilde:
+ return "~" + x.typ.String()
+ default:
+ return x.typ.String()
+ }
+}
+
+// equal reports whether x and y represent the same type set.
+func (x *term) equal(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return x == y
+ case x.typ == nil || y.typ == nil:
+ return x.typ == y.typ
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ return x.tilde == y.tilde && Identical(x.typ, y.typ)
+}
+
+// union returns the union x ∪ y: zero, one, or two non-nil terms.
+func (x *term) union(y *term) (_, _ *term) {
+ // easy cases
+ switch {
+ case x == nil && y == nil:
+ return nil, nil // ∅ ∪ ∅ == ∅
+ case x == nil:
+ return y, nil // ∅ ∪ y == y
+ case y == nil:
+ return x, nil // x ∪ ∅ == x
+ case x.typ == nil:
+ return x, nil // 𝓤 ∪ y == 𝓤
+ case y.typ == nil:
+ return y, nil // x ∪ 𝓤 == 𝓤
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return x, y // x ∪ y == (x, y) if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∪ ~t == ~t
+ // ~t ∪ T == ~t
+ // T ∪ ~t == ~t
+ // T ∪ T == T
+ if x.tilde || !y.tilde {
+ return x, nil
+ }
+ return y, nil
+}
+
+// intersect returns the intersection x ∩ y.
+func (x *term) intersect(y *term) *term {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
+ case x.typ == nil:
+ return y // 𝓤 ∩ y == y
+ case y.typ == nil:
+ return x // x ∩ 𝓤 == x
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return nil // x ∩ y == ∅ if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∩ ~t == ~t
+ // ~t ∩ T == T
+ // T ∩ ~t == T
+ // T ∩ T == T
+ if !x.tilde || y.tilde {
+ return x
+ }
+ return y
+}
+
+// includes reports whether t ∈ x.
+func (x *term) includes(t Type) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return false // t ∈ ∅ == false
+ case x.typ == nil:
+ return true // t ∈ 𝓤 == true
+ }
+ // ∅ ⊂ x ⊂ 𝓤
+
+ u := t
+ if x.tilde {
+ u = under(u)
+ }
+ return Identical(x.typ, u)
+}
+
+// subsetOf reports whether x ⊆ y.
+func (x *term) subsetOf(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return true // ∅ ⊆ y == true
+ case y == nil:
+ return false // x ⊆ ∅ == false since x != ∅
+ case y.typ == nil:
+ return true // x ⊆ 𝓤 == true
+ case x.typ == nil:
+ return false // 𝓤 ⊆ y == false since y != 𝓤
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return false // x ⊆ y == false if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ⊆ ~t == true
+ // ~t ⊆ T == false
+ // T ⊆ ~t == true
+ // T ⊆ T == true
+ return !x.tilde || y.tilde
+}
+
+// disjoint reports whether x ∩ y == ∅.
+// x.typ and y.typ must not be nil.
+func (x *term) disjoint(y *term) bool {
+ if debug && (x.typ == nil || y.typ == nil) {
+ panic("invalid argument(s)")
+ }
+ ux := x.typ
+ if y.tilde {
+ ux = under(ux)
+ }
+ uy := y.typ
+ if x.tilde {
+ uy = under(uy)
+ }
+ return !Identical(ux, uy)
+}
diff --git a/src/cmd/compile/internal/types2/typeterm_test.go b/src/cmd/compile/internal/types2/typeterm_test.go
new file mode 100644
index 0000000..6d9c8db
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typeterm_test.go
@@ -0,0 +1,239 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "strings"
+ "testing"
+)
+
+var myInt = func() Type {
+ tname := NewTypeName(nopos, nil, "myInt", nil)
+ return NewNamed(tname, Typ[Int], nil)
+}()
+
+var testTerms = map[string]*term{
+ "∅": nil,
+ "𝓤": {},
+ "int": {false, Typ[Int]},
+ "~int": {true, Typ[Int]},
+ "string": {false, Typ[String]},
+ "~string": {true, Typ[String]},
+ "myInt": {false, myInt},
+}
+
+func TestTermString(t *testing.T) {
+ for want, x := range testTerms {
+ if got := x.String(); got != want {
+ t.Errorf("%v.String() == %v; want %v", x, got, want)
+ }
+ }
+}
+
+func split(s string, n int) []string {
+ r := strings.Split(s, " ")
+ if len(r) != n {
+ panic("invalid test case: " + s)
+ }
+ return r
+}
+
+func testTerm(name string) *term {
+ r, ok := testTerms[name]
+ if !ok {
+ panic("invalid test argument: " + name)
+ }
+ return r
+}
+
+func TestTermEqual(t *testing.T) {
+ for _, test := range []string{
+ "∅ ∅ T",
+ "𝓤 𝓤 T",
+ "int int T",
+ "~int ~int T",
+ "myInt myInt T",
+ "∅ 𝓤 F",
+ "∅ int F",
+ "∅ ~int F",
+ "𝓤 int F",
+ "𝓤 ~int F",
+ "𝓤 myInt F",
+ "int ~int F",
+ "int myInt F",
+ "~int myInt F",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want := args[2] == "T"
+ if got := x.equal(y); got != want {
+ t.Errorf("%v.equal(%v) = %v; want %v", x, y, got, want)
+ }
+ // equal is symmetric
+ x, y = y, x
+ if got := x.equal(y); got != want {
+ t.Errorf("%v.equal(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
+
+func TestTermUnion(t *testing.T) {
+ for _, test := range []string{
+ "∅ ∅ ∅ ∅",
+ "∅ 𝓤 𝓤 ∅",
+ "∅ int int ∅",
+ "∅ ~int ~int ∅",
+ "∅ myInt myInt ∅",
+ "𝓤 𝓤 𝓤 ∅",
+ "𝓤 int 𝓤 ∅",
+ "𝓤 ~int 𝓤 ∅",
+ "𝓤 myInt 𝓤 ∅",
+ "int int int ∅",
+ "int ~int ~int ∅",
+ "int string int string",
+ "int ~string int ~string",
+ "int myInt int myInt",
+ "~int ~string ~int ~string",
+ "~int myInt ~int ∅",
+
+ // union is symmetric, but the result order isn't - repeat symmetric cases explicitly
+ "𝓤 ∅ 𝓤 ∅",
+ "int ∅ int ∅",
+ "~int ∅ ~int ∅",
+ "myInt ∅ myInt ∅",
+ "int 𝓤 𝓤 ∅",
+ "~int 𝓤 𝓤 ∅",
+ "myInt 𝓤 𝓤 ∅",
+ "~int int ~int ∅",
+ "string int string int",
+ "~string int ~string int",
+ "myInt int myInt int",
+ "~string ~int ~string ~int",
+ "myInt ~int ~int ∅",
+ } {
+ args := split(test, 4)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want1 := testTerm(args[2])
+ want2 := testTerm(args[3])
+ if got1, got2 := x.union(y); !got1.equal(want1) || !got2.equal(want2) {
+ t.Errorf("%v.union(%v) = %v, %v; want %v, %v", x, y, got1, got2, want1, want2)
+ }
+ }
+}
+
+func TestTermIntersection(t *testing.T) {
+ for _, test := range []string{
+ "∅ ∅ ∅",
+ "∅ 𝓤 ∅",
+ "∅ int ∅",
+ "∅ ~int ∅",
+ "∅ myInt ∅",
+ "𝓤 𝓤 𝓤",
+ "𝓤 int int",
+ "𝓤 ~int ~int",
+ "𝓤 myInt myInt",
+ "int int int",
+ "int ~int int",
+ "int string ∅",
+ "int ~string ∅",
+ "int string ∅",
+ "~int ~string ∅",
+ "~int myInt myInt",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want := testTerm(args[2])
+ if got := x.intersect(y); !got.equal(want) {
+ t.Errorf("%v.intersect(%v) = %v; want %v", x, y, got, want)
+ }
+ // intersect is symmetric
+ x, y = y, x
+ if got := x.intersect(y); !got.equal(want) {
+ t.Errorf("%v.intersect(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
+
+func TestTermIncludes(t *testing.T) {
+ for _, test := range []string{
+ "∅ int F",
+ "𝓤 int T",
+ "int int T",
+ "~int int T",
+ "~int myInt T",
+ "string int F",
+ "~string int F",
+ "myInt int F",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1]).typ
+ want := args[2] == "T"
+ if got := x.includes(y); got != want {
+ t.Errorf("%v.includes(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
+
+func TestTermSubsetOf(t *testing.T) {
+ for _, test := range []string{
+ "∅ ∅ T",
+ "𝓤 𝓤 T",
+ "int int T",
+ "~int ~int T",
+ "myInt myInt T",
+ "∅ 𝓤 T",
+ "∅ int T",
+ "∅ ~int T",
+ "∅ myInt T",
+ "𝓤 int F",
+ "𝓤 ~int F",
+ "𝓤 myInt F",
+ "int ~int T",
+ "int myInt F",
+ "~int myInt F",
+ "myInt int F",
+ "myInt ~int T",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want := args[2] == "T"
+ if got := x.subsetOf(y); got != want {
+ t.Errorf("%v.subsetOf(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
+
+func TestTermDisjoint(t *testing.T) {
+ for _, test := range []string{
+ "int int F",
+ "~int ~int F",
+ "int ~int F",
+ "int string T",
+ "int ~string T",
+ "int myInt T",
+ "~int ~string T",
+ "~int myInt F",
+ "string myInt T",
+ "~string myInt T",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want := args[2] == "T"
+ if got := x.disjoint(y); got != want {
+ t.Errorf("%v.disjoint(%v) = %v; want %v", x, y, got, want)
+ }
+ // disjoint is symmetric
+ x, y = y, x
+ if got := x.disjoint(y); got != want {
+ t.Errorf("%v.disjoint(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/typexpr.go b/src/cmd/compile/internal/types2/typexpr.go
new file mode 100644
index 0000000..81adcbd
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typexpr.go
@@ -0,0 +1,551 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements type-checking of identifiers and type expressions.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "go/constant"
+ . "internal/types/errors"
+ "strings"
+)
+
+// ident type-checks identifier e and initializes x with the value or type of e.
+// If an error occurred, x.mode is set to invalid.
+// For the meaning of def, see Checker.definedType, below.
+// If wantType is set, the identifier e is expected to denote a type.
+func (check *Checker) ident(x *operand, e *syntax.Name, def *TypeName, wantType bool) {
+ x.mode = invalid
+ x.expr = e
+
+ // Note that we cannot use check.lookup here because the returned scope
+ // may be different from obj.Parent(). See also Scope.LookupParent doc.
+ scope, obj := check.scope.LookupParent(e.Value, check.pos)
+ switch obj {
+ case nil:
+ if e.Value == "_" {
+ // Blank identifiers are never declared, but the current identifier may
+ // be a placeholder for a receiver type parameter. In this case we can
+ // resolve its type and object from Checker.recvTParamMap.
+ if tpar := check.recvTParamMap[e]; tpar != nil {
+ x.mode = typexpr
+ x.typ = tpar
+ } else {
+ check.error(e, InvalidBlank, "cannot use _ as value or type")
+ }
+ } else {
+ check.errorf(e, UndeclaredName, "undefined: %s", e.Value)
+ }
+ return
+ case universeAny, universeComparable:
+ if !check.verifyVersionf(e, go1_18, "predeclared %s", e.Value) {
+ return // avoid follow-on errors
+ }
+ }
+ check.recordUse(e, obj)
+
+ // Type-check the object.
+ // Only call Checker.objDecl if the object doesn't have a type yet
+ // (in which case we must actually determine it) or the object is a
+ // TypeName and we also want a type (in which case we might detect
+ // a cycle which needs to be reported). Otherwise we can skip the
+ // call and avoid a possible cycle error in favor of the more
+ // informative "not a type/value" error that this function's caller
+ // will issue (see go.dev/issue/25790).
+ typ := obj.Type()
+ if _, gotType := obj.(*TypeName); typ == nil || gotType && wantType {
+ check.objDecl(obj, def)
+ typ = obj.Type() // type must have been assigned by Checker.objDecl
+ }
+ assert(typ != nil)
+
+ // The object may have been dot-imported.
+ // If so, mark the respective package as used.
+ // (This code is only needed for dot-imports. Without them,
+ // we only have to mark variables, see *Var case below).
+ if pkgName := check.dotImportMap[dotImportKey{scope, obj.Name()}]; pkgName != nil {
+ pkgName.used = true
+ }
+
+ switch obj := obj.(type) {
+ case *PkgName:
+ check.errorf(e, InvalidPkgUse, "use of package %s not in selector", obj.name)
+ return
+
+ case *Const:
+ check.addDeclDep(obj)
+ if !isValid(typ) {
+ return
+ }
+ if obj == universeIota {
+ if check.iota == nil {
+ check.error(e, InvalidIota, "cannot use iota outside constant declaration")
+ return
+ }
+ x.val = check.iota
+ } else {
+ x.val = obj.val
+ }
+ assert(x.val != nil)
+ x.mode = constant_
+
+ case *TypeName:
+ if !check.enableAlias && check.isBrokenAlias(obj) {
+ check.errorf(e, InvalidDeclCycle, "invalid use of type alias %s in recursive type (see go.dev/issue/50729)", obj.name)
+ return
+ }
+ x.mode = typexpr
+
+ case *Var:
+ // It's ok to mark non-local variables, but ignore variables
+ // from other packages to avoid potential race conditions with
+ // dot-imported variables.
+ if obj.pkg == check.pkg {
+ obj.used = true
+ }
+ check.addDeclDep(obj)
+ if !isValid(typ) {
+ return
+ }
+ x.mode = variable
+
+ case *Func:
+ check.addDeclDep(obj)
+ x.mode = value
+
+ case *Builtin:
+ x.id = obj.id
+ x.mode = builtin
+
+ case *Nil:
+ x.mode = nilvalue
+
+ default:
+ unreachable()
+ }
+
+ x.typ = typ
+}
+
+// typ type-checks the type expression e and returns its type, or Typ[Invalid].
+// The type must not be an (uninstantiated) generic type.
+func (check *Checker) typ(e syntax.Expr) Type {
+ return check.definedType(e, nil)
+}
+
+// varType type-checks the type expression e and returns its type, or Typ[Invalid].
+// The type must not be an (uninstantiated) generic type and it must not be a
+// constraint interface.
+func (check *Checker) varType(e syntax.Expr) Type {
+ typ := check.definedType(e, nil)
+ check.validVarType(e, typ)
+ return typ
+}
+
+// validVarType reports an error if typ is a constraint interface.
+// The expression e is used for error reporting, if any.
+func (check *Checker) validVarType(e syntax.Expr, typ Type) {
+ // If we have a type parameter there's nothing to do.
+ if isTypeParam(typ) {
+ return
+ }
+
+ // We don't want to call under() or complete interfaces while we are in
+ // the middle of type-checking parameter declarations that might belong
+ // to interface methods. Delay this check to the end of type-checking.
+ check.later(func() {
+ if t, _ := under(typ).(*Interface); t != nil {
+ pos := syntax.StartPos(e)
+ tset := computeInterfaceTypeSet(check, pos, t) // TODO(gri) is this the correct position?
+ if !tset.IsMethodSet() {
+ if tset.comparable {
+ check.softErrorf(pos, MisplacedConstraintIface, "cannot use type %s outside a type constraint: interface is (or embeds) comparable", typ)
+ } else {
+ check.softErrorf(pos, MisplacedConstraintIface, "cannot use type %s outside a type constraint: interface contains type constraints", typ)
+ }
+ }
+ }
+ }).describef(e, "check var type %s", typ)
+}
+
+// definedType is like typ but also accepts a type name def.
+// If def != nil, e is the type specification for the type named def, declared
+// in a type declaration, and def.typ.underlying will be set to the type of e
+// before any components of e are type-checked.
+func (check *Checker) definedType(e syntax.Expr, def *TypeName) Type {
+ typ := check.typInternal(e, def)
+ assert(isTyped(typ))
+ if isGeneric(typ) {
+ check.errorf(e, WrongTypeArgCount, "cannot use generic type %s without instantiation", typ)
+ typ = Typ[Invalid]
+ }
+ check.recordTypeAndValue(e, typexpr, typ, nil)
+ return typ
+}
+
+// genericType is like typ but the type must be an (uninstantiated) generic
+// type. If cause is non-nil and the type expression was a valid type but not
+// generic, cause will be populated with a message describing the error.
+func (check *Checker) genericType(e syntax.Expr, cause *string) Type {
+ typ := check.typInternal(e, nil)
+ assert(isTyped(typ))
+ if isValid(typ) && !isGeneric(typ) {
+ if cause != nil {
+ *cause = check.sprintf("%s is not a generic type", typ)
+ }
+ typ = Typ[Invalid]
+ }
+ // TODO(gri) what is the correct call below?
+ check.recordTypeAndValue(e, typexpr, typ, nil)
+ return typ
+}
+
+// goTypeName returns the Go type name for typ and
+// removes any occurrences of "types2." from that name.
+func goTypeName(typ Type) string {
+ return strings.ReplaceAll(fmt.Sprintf("%T", typ), "types2.", "")
+}
+
+// typInternal drives type checking of types.
+// Must only be called by definedType or genericType.
+func (check *Checker) typInternal(e0 syntax.Expr, def *TypeName) (T Type) {
+ if check.conf.Trace {
+ check.trace(e0.Pos(), "-- type %s", e0)
+ check.indent++
+ defer func() {
+ check.indent--
+ var under Type
+ if T != nil {
+ // Calling under() here may lead to endless instantiations.
+ // Test case: type T[P any] *T[P]
+ under = safeUnderlying(T)
+ }
+ if T == under {
+ check.trace(e0.Pos(), "=> %s // %s", T, goTypeName(T))
+ } else {
+ check.trace(e0.Pos(), "=> %s (under = %s) // %s", T, under, goTypeName(T))
+ }
+ }()
+ }
+
+ switch e := e0.(type) {
+ case *syntax.BadExpr:
+ // ignore - error reported before
+
+ case *syntax.Name:
+ var x operand
+ check.ident(&x, e, def, true)
+
+ switch x.mode {
+ case typexpr:
+ typ := x.typ
+ setDefType(def, typ)
+ return typ
+ case invalid:
+ // ignore - error reported before
+ case novalue:
+ check.errorf(&x, NotAType, "%s used as type", &x)
+ default:
+ check.errorf(&x, NotAType, "%s is not a type", &x)
+ }
+
+ case *syntax.SelectorExpr:
+ var x operand
+ check.selector(&x, e, def, true)
+
+ switch x.mode {
+ case typexpr:
+ typ := x.typ
+ setDefType(def, typ)
+ return typ
+ case invalid:
+ // ignore - error reported before
+ case novalue:
+ check.errorf(&x, NotAType, "%s used as type", &x)
+ default:
+ check.errorf(&x, NotAType, "%s is not a type", &x)
+ }
+
+ case *syntax.IndexExpr:
+ check.verifyVersionf(e, go1_18, "type instantiation")
+ return check.instantiatedType(e.X, syntax.UnpackListExpr(e.Index), def)
+
+ case *syntax.ParenExpr:
+ // Generic types must be instantiated before they can be used in any form.
+ // Consequently, generic types cannot be parenthesized.
+ return check.definedType(e.X, def)
+
+ case *syntax.ArrayType:
+ typ := new(Array)
+ setDefType(def, typ)
+ if e.Len != nil {
+ typ.len = check.arrayLength(e.Len)
+ } else {
+ // [...]array
+ check.error(e, BadDotDotDotSyntax, "invalid use of [...] array (outside a composite literal)")
+ typ.len = -1
+ }
+ typ.elem = check.varType(e.Elem)
+ if typ.len >= 0 {
+ return typ
+ }
+ // report error if we encountered [...]
+
+ case *syntax.SliceType:
+ typ := new(Slice)
+ setDefType(def, typ)
+ typ.elem = check.varType(e.Elem)
+ return typ
+
+ case *syntax.DotsType:
+ // dots are handled explicitly where they are legal
+ // (array composite literals and parameter lists)
+ check.error(e, InvalidDotDotDot, "invalid use of '...'")
+ check.use(e.Elem)
+
+ case *syntax.StructType:
+ typ := new(Struct)
+ setDefType(def, typ)
+ check.structType(typ, e)
+ return typ
+
+ case *syntax.Operation:
+ if e.Op == syntax.Mul && e.Y == nil {
+ typ := new(Pointer)
+ typ.base = Typ[Invalid] // avoid nil base in invalid recursive type declaration
+ setDefType(def, typ)
+ typ.base = check.varType(e.X)
+ // If typ.base is invalid, it's unlikely that *base is particularly
+ // useful - even a valid dereferenciation will lead to an invalid
+ // type again, and in some cases we get unexpected follow-on errors
+ // (e.g., go.dev/issue/49005). Return an invalid type instead.
+ if !isValid(typ.base) {
+ return Typ[Invalid]
+ }
+ return typ
+ }
+
+ check.errorf(e0, NotAType, "%s is not a type", e0)
+ check.use(e0)
+
+ case *syntax.FuncType:
+ typ := new(Signature)
+ setDefType(def, typ)
+ check.funcType(typ, nil, nil, e)
+ return typ
+
+ case *syntax.InterfaceType:
+ typ := check.newInterface()
+ setDefType(def, typ)
+ check.interfaceType(typ, e, def)
+ return typ
+
+ case *syntax.MapType:
+ typ := new(Map)
+ setDefType(def, typ)
+
+ typ.key = check.varType(e.Key)
+ typ.elem = check.varType(e.Value)
+
+ // spec: "The comparison operators == and != must be fully defined
+ // for operands of the key type; thus the key type must not be a
+ // function, map, or slice."
+ //
+ // Delay this check because it requires fully setup types;
+ // it is safe to continue in any case (was go.dev/issue/6667).
+ check.later(func() {
+ if !Comparable(typ.key) {
+ var why string
+ if isTypeParam(typ.key) {
+ why = " (missing comparable constraint)"
+ }
+ check.errorf(e.Key, IncomparableMapKey, "invalid map key type %s%s", typ.key, why)
+ }
+ }).describef(e.Key, "check map key %s", typ.key)
+
+ return typ
+
+ case *syntax.ChanType:
+ typ := new(Chan)
+ setDefType(def, typ)
+
+ dir := SendRecv
+ switch e.Dir {
+ case 0:
+ // nothing to do
+ case syntax.SendOnly:
+ dir = SendOnly
+ case syntax.RecvOnly:
+ dir = RecvOnly
+ default:
+ check.errorf(e, InvalidSyntaxTree, "unknown channel direction %d", e.Dir)
+ // ok to continue
+ }
+
+ typ.dir = dir
+ typ.elem = check.varType(e.Elem)
+ return typ
+
+ default:
+ check.errorf(e0, NotAType, "%s is not a type", e0)
+ check.use(e0)
+ }
+
+ typ := Typ[Invalid]
+ setDefType(def, typ)
+ return typ
+}
+
+func setDefType(def *TypeName, typ Type) {
+ if def != nil {
+ switch t := def.typ.(type) {
+ case *Alias:
+ // t.fromRHS should always be set, either to an invalid type
+ // in the beginning, or to typ in certain cyclic declarations.
+ if t.fromRHS != Typ[Invalid] && t.fromRHS != typ {
+ panic(sprintf(nil, true, "t.fromRHS = %s, typ = %s\n", t.fromRHS, typ))
+ }
+ t.fromRHS = typ
+ case *Basic:
+ assert(t == Typ[Invalid])
+ case *Named:
+ t.underlying = typ
+ default:
+ panic(fmt.Sprintf("unexpected type %T", t))
+ }
+ }
+}
+
+func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def *TypeName) (res Type) {
+ if check.conf.Trace {
+ check.trace(x.Pos(), "-- instantiating type %s with %s", x, xlist)
+ check.indent++
+ defer func() {
+ check.indent--
+ // Don't format the underlying here. It will always be nil.
+ check.trace(x.Pos(), "=> %s", res)
+ }()
+ }
+
+ var cause string
+ gtyp := check.genericType(x, &cause)
+ if cause != "" {
+ check.errorf(x, NotAGenericType, invalidOp+"%s%s (%s)", x, xlist, cause)
+ }
+ if !isValid(gtyp) {
+ return gtyp // error already reported
+ }
+
+ orig := asNamed(gtyp)
+ if orig == nil {
+ panic(fmt.Sprintf("%v: cannot instantiate %v", x.Pos(), gtyp))
+ }
+
+ // evaluate arguments
+ targs := check.typeList(xlist)
+ if targs == nil {
+ setDefType(def, Typ[Invalid]) // avoid errors later due to lazy instantiation
+ return Typ[Invalid]
+ }
+
+ // create the instance
+ inst := asNamed(check.instance(x.Pos(), orig, targs, nil, check.context()))
+ setDefType(def, inst)
+
+ // orig.tparams may not be set up, so we need to do expansion later.
+ check.later(func() {
+ // This is an instance from the source, not from recursive substitution,
+ // and so it must be resolved during type-checking so that we can report
+ // errors.
+ check.recordInstance(x, inst.TypeArgs().list(), inst)
+
+ if check.validateTArgLen(x.Pos(), inst.obj.name, inst.TypeParams().Len(), inst.TypeArgs().Len()) {
+ if i, err := check.verify(x.Pos(), inst.TypeParams().list(), inst.TypeArgs().list(), check.context()); err != nil {
+ // best position for error reporting
+ pos := x.Pos()
+ if i < len(xlist) {
+ pos = syntax.StartPos(xlist[i])
+ }
+ check.softErrorf(pos, InvalidTypeArg, "%s", err)
+ } else {
+ check.mono.recordInstance(check.pkg, x.Pos(), inst.TypeParams().list(), inst.TypeArgs().list(), xlist)
+ }
+ }
+
+ // TODO(rfindley): remove this call: we don't need to call validType here,
+ // as cycles can only occur for types used inside a Named type declaration,
+ // and so it suffices to call validType from declared types.
+ check.validType(inst)
+ }).describef(x, "resolve instance %s", inst)
+
+ return inst
+}
+
+// arrayLength type-checks the array length expression e
+// and returns the constant length >= 0, or a value < 0
+// to indicate an error (and thus an unknown length).
+func (check *Checker) arrayLength(e syntax.Expr) int64 {
+ // If e is an identifier, the array declaration might be an
+ // attempt at a parameterized type declaration with missing
+ // constraint. Provide an error message that mentions array
+ // length.
+ if name, _ := e.(*syntax.Name); name != nil {
+ obj := check.lookup(name.Value)
+ if obj == nil {
+ check.errorf(name, InvalidArrayLen, "undefined array length %s or missing type constraint", name.Value)
+ return -1
+ }
+ if _, ok := obj.(*Const); !ok {
+ check.errorf(name, InvalidArrayLen, "invalid array length %s", name.Value)
+ return -1
+ }
+ }
+
+ var x operand
+ check.expr(nil, &x, e)
+ if x.mode != constant_ {
+ if x.mode != invalid {
+ check.errorf(&x, InvalidArrayLen, "array length %s must be constant", &x)
+ }
+ return -1
+ }
+
+ if isUntyped(x.typ) || isInteger(x.typ) {
+ if val := constant.ToInt(x.val); val.Kind() == constant.Int {
+ if representableConst(val, check, Typ[Int], nil) {
+ if n, ok := constant.Int64Val(val); ok && n >= 0 {
+ return n
+ }
+ }
+ }
+ }
+
+ var msg string
+ if isInteger(x.typ) {
+ msg = "invalid array length %s"
+ } else {
+ msg = "array length %s must be integer"
+ }
+ check.errorf(&x, InvalidArrayLen, msg, &x)
+ return -1
+}
+
+// typeList provides the list of types corresponding to the incoming expression list.
+// If an error occurred, the result is nil, but all list elements were type-checked.
+func (check *Checker) typeList(list []syntax.Expr) []Type {
+ res := make([]Type, len(list)) // res != nil even if len(list) == 0
+ for i, x := range list {
+ t := check.varType(x)
+ if !isValid(t) {
+ res = nil
+ }
+ if res != nil {
+ res[i] = t
+ }
+ }
+ return res
+}
diff --git a/src/cmd/compile/internal/types2/under.go b/src/cmd/compile/internal/types2/under.go
new file mode 100644
index 0000000..6b24399
--- /dev/null
+++ b/src/cmd/compile/internal/types2/under.go
@@ -0,0 +1,114 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// under returns the true expanded underlying type.
+// If it doesn't exist, the result is Typ[Invalid].
+// under must only be called when a type is known
+// to be fully set up.
+func under(t Type) Type {
+ if t := asNamed(t); t != nil {
+ return t.under()
+ }
+ return t.Underlying()
+}
+
+// If t is not a type parameter, coreType returns the underlying type.
+// If t is a type parameter, coreType returns the single underlying
+// type of all types in its type set if it exists, or nil otherwise. If the
+// type set contains only unrestricted and restricted channel types (with
+// identical element types), the single underlying type is the restricted
+// channel type if the restrictions are always the same, or nil otherwise.
+func coreType(t Type) Type {
+ tpar, _ := t.(*TypeParam)
+ if tpar == nil {
+ return under(t)
+ }
+
+ var su Type
+ if tpar.underIs(func(u Type) bool {
+ if u == nil {
+ return false
+ }
+ if su != nil {
+ u = match(su, u)
+ if u == nil {
+ return false
+ }
+ }
+ // su == nil || match(su, u) != nil
+ su = u
+ return true
+ }) {
+ return su
+ }
+ return nil
+}
+
+// coreString is like coreType but also considers []byte
+// and strings as identical. In this case, if successful and we saw
+// a string, the result is of type (possibly untyped) string.
+func coreString(t Type) Type {
+ tpar, _ := t.(*TypeParam)
+ if tpar == nil {
+ return under(t) // string or untyped string
+ }
+
+ var su Type
+ hasString := false
+ if tpar.underIs(func(u Type) bool {
+ if u == nil {
+ return false
+ }
+ if isString(u) {
+ u = NewSlice(universeByte)
+ hasString = true
+ }
+ if su != nil {
+ u = match(su, u)
+ if u == nil {
+ return false
+ }
+ }
+ // su == nil || match(su, u) != nil
+ su = u
+ return true
+ }) {
+ if hasString {
+ return Typ[String]
+ }
+ return su
+ }
+ return nil
+}
+
+// If x and y are identical, match returns x.
+// If x and y are identical channels but for their direction
+// and one of them is unrestricted, match returns the channel
+// with the restricted direction.
+// In all other cases, match returns nil.
+func match(x, y Type) Type {
+ // Common case: we don't have channels.
+ if Identical(x, y) {
+ return x
+ }
+
+ // We may have channels that differ in direction only.
+ if x, _ := x.(*Chan); x != nil {
+ if y, _ := y.(*Chan); y != nil && Identical(x.elem, y.elem) {
+ // We have channels that differ in direction only.
+ // If there's an unrestricted channel, select the restricted one.
+ switch {
+ case x.dir == SendRecv:
+ return y
+ case y.dir == SendRecv:
+ return x
+ }
+ }
+ }
+
+ // types are different
+ return nil
+}
diff --git a/src/cmd/compile/internal/types2/unify.go b/src/cmd/compile/internal/types2/unify.go
new file mode 100644
index 0000000..8218939
--- /dev/null
+++ b/src/cmd/compile/internal/types2/unify.go
@@ -0,0 +1,796 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements type unification.
+//
+// Type unification attempts to make two types x and y structurally
+// equivalent by determining the types for a given list of (bound)
+// type parameters which may occur within x and y. If x and y are
+// structurally different (say []T vs chan T), or conflicting
+// types are determined for type parameters, unification fails.
+// If unification succeeds, as a side-effect, the types of the
+// bound type parameters may be determined.
+//
+// Unification typically requires multiple calls u.unify(x, y) to
+// a given unifier u, with various combinations of types x and y.
+// In each call, additional type parameter types may be determined
+// as a side effect and recorded in u.
+// If a call fails (returns false), unification fails.
+//
+// In the unification context, structural equivalence of two types
+// ignores the difference between a defined type and its underlying
+// type if one type is a defined type and the other one is not.
+// It also ignores the difference between an (external, unbound)
+// type parameter and its core type.
+// If two types are not structurally equivalent, they cannot be Go
+// identical types. On the other hand, if they are structurally
+// equivalent, they may be Go identical or at least assignable, or
+// they may be in the type set of a constraint.
+// Whether they indeed are identical or assignable is determined
+// upon instantiation and function argument passing.
+
+package types2
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+const (
+ // Upper limit for recursion depth. Used to catch infinite recursions
+ // due to implementation issues (e.g., see issues go.dev/issue/48619, go.dev/issue/48656).
+ unificationDepthLimit = 50
+
+ // Whether to panic when unificationDepthLimit is reached.
+ // If disabled, a recursion depth overflow results in a (quiet)
+ // unification failure.
+ panicAtUnificationDepthLimit = true
+
+ // If enableCoreTypeUnification is set, unification will consider
+ // the core types, if any, of non-local (unbound) type parameters.
+ enableCoreTypeUnification = true
+
+ // If traceInference is set, unification will print a trace of its operation.
+ // Interpretation of trace:
+ // x ≡ y attempt to unify types x and y
+ // p ➞ y type parameter p is set to type y (p is inferred to be y)
+ // p ⇄ q type parameters p and q match (p is inferred to be q and vice versa)
+ // x ≢ y types x and y cannot be unified
+ // [p, q, ...] ➞ [x, y, ...] mapping from type parameters to types
+ traceInference = false
+)
+
+// A unifier maintains a list of type parameters and
+// corresponding types inferred for each type parameter.
+// A unifier is created by calling newUnifier.
+type unifier struct {
+ // handles maps each type parameter to its inferred type through
+ // an indirection *Type called (inferred type) "handle".
+ // Initially, each type parameter has its own, separate handle,
+ // with a nil (i.e., not yet inferred) type.
+ // After a type parameter P is unified with a type parameter Q,
+ // P and Q share the same handle (and thus type). This ensures
+ // that inferring the type for a given type parameter P will
+ // automatically infer the same type for all other parameters
+ // unified (joined) with P.
+ handles map[*TypeParam]*Type
+ depth int // recursion depth during unification
+ enableInterfaceInference bool // use shared methods for better inference
+}
+
+// newUnifier returns a new unifier initialized with the given type parameter
+// and corresponding type argument lists. The type argument list may be shorter
+// than the type parameter list, and it may contain nil types. Matching type
+// parameters and arguments must have the same index.
+func newUnifier(tparams []*TypeParam, targs []Type, enableInterfaceInference bool) *unifier {
+ assert(len(tparams) >= len(targs))
+ handles := make(map[*TypeParam]*Type, len(tparams))
+ // Allocate all handles up-front: in a correct program, all type parameters
+ // must be resolved and thus eventually will get a handle.
+ // Also, sharing of handles caused by unified type parameters is rare and
+ // so it's ok to not optimize for that case (and delay handle allocation).
+ for i, x := range tparams {
+ var t Type
+ if i < len(targs) {
+ t = targs[i]
+ }
+ handles[x] = &t
+ }
+ return &unifier{handles, 0, enableInterfaceInference}
+}
+
+// unifyMode controls the behavior of the unifier.
+type unifyMode uint
+
+const (
+ // If assign is set, we are unifying types involved in an assignment:
+ // they may match inexactly at the top, but element types must match
+ // exactly.
+ assign unifyMode = 1 << iota
+
+ // If exact is set, types unify if they are identical (or can be
+ // made identical with suitable arguments for type parameters).
+ // Otherwise, a named type and a type literal unify if their
+ // underlying types unify, channel directions are ignored, and
+ // if there is an interface, the other type must implement the
+ // interface.
+ exact
+)
+
+func (m unifyMode) String() string {
+ switch m {
+ case 0:
+ return "inexact"
+ case assign:
+ return "assign"
+ case exact:
+ return "exact"
+ case assign | exact:
+ return "assign, exact"
+ }
+ return fmt.Sprintf("mode %d", m)
+}
+
+// unify attempts to unify x and y and reports whether it succeeded.
+// As a side-effect, types may be inferred for type parameters.
+// The mode parameter controls how types are compared.
+func (u *unifier) unify(x, y Type, mode unifyMode) bool {
+ return u.nify(x, y, mode, nil)
+}
+
+func (u *unifier) tracef(format string, args ...interface{}) {
+ fmt.Println(strings.Repeat(". ", u.depth) + sprintf(nil, true, format, args...))
+}
+
+// String returns a string representation of the current mapping
+// from type parameters to types.
+func (u *unifier) String() string {
+ // sort type parameters for reproducible strings
+ tparams := make(typeParamsById, len(u.handles))
+ i := 0
+ for tpar := range u.handles {
+ tparams[i] = tpar
+ i++
+ }
+ sort.Sort(tparams)
+
+ var buf bytes.Buffer
+ w := newTypeWriter(&buf, nil)
+ w.byte('[')
+ for i, x := range tparams {
+ if i > 0 {
+ w.string(", ")
+ }
+ w.typ(x)
+ w.string(": ")
+ w.typ(u.at(x))
+ }
+ w.byte(']')
+ return buf.String()
+}
+
+type typeParamsById []*TypeParam
+
+func (s typeParamsById) Len() int { return len(s) }
+func (s typeParamsById) Less(i, j int) bool { return s[i].id < s[j].id }
+func (s typeParamsById) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// join unifies the given type parameters x and y.
+// If both type parameters already have a type associated with them
+// and they are not joined, join fails and returns false.
+func (u *unifier) join(x, y *TypeParam) bool {
+ if traceInference {
+ u.tracef("%s ⇄ %s", x, y)
+ }
+ switch hx, hy := u.handles[x], u.handles[y]; {
+ case hx == hy:
+ // Both type parameters already share the same handle. Nothing to do.
+ case *hx != nil && *hy != nil:
+ // Both type parameters have (possibly different) inferred types. Cannot join.
+ return false
+ case *hx != nil:
+ // Only type parameter x has an inferred type. Use handle of x.
+ u.setHandle(y, hx)
+ // This case is treated like the default case.
+ // case *hy != nil:
+ // // Only type parameter y has an inferred type. Use handle of y.
+ // u.setHandle(x, hy)
+ default:
+ // Neither type parameter has an inferred type. Use handle of y.
+ u.setHandle(x, hy)
+ }
+ return true
+}
+
+// asTypeParam returns x.(*TypeParam) if x is a type parameter recorded with u.
+// Otherwise, the result is nil.
+func (u *unifier) asTypeParam(x Type) *TypeParam {
+ if x, _ := x.(*TypeParam); x != nil {
+ if _, found := u.handles[x]; found {
+ return x
+ }
+ }
+ return nil
+}
+
+// setHandle sets the handle for type parameter x
+// (and all its joined type parameters) to h.
+func (u *unifier) setHandle(x *TypeParam, h *Type) {
+ hx := u.handles[x]
+ assert(hx != nil)
+ for y, hy := range u.handles {
+ if hy == hx {
+ u.handles[y] = h
+ }
+ }
+}
+
+// at returns the (possibly nil) type for type parameter x.
+func (u *unifier) at(x *TypeParam) Type {
+ return *u.handles[x]
+}
+
+// set sets the type t for type parameter x;
+// t must not be nil.
+func (u *unifier) set(x *TypeParam, t Type) {
+ assert(t != nil)
+ if traceInference {
+ u.tracef("%s ➞ %s", x, t)
+ }
+ *u.handles[x] = t
+}
+
+// unknowns returns the number of type parameters for which no type has been set yet.
+func (u *unifier) unknowns() int {
+ n := 0
+ for _, h := range u.handles {
+ if *h == nil {
+ n++
+ }
+ }
+ return n
+}
+
+// inferred returns the list of inferred types for the given type parameter list.
+// The result is never nil and has the same length as tparams; result types that
+// could not be inferred are nil. Corresponding type parameters and result types
+// have identical indices.
+func (u *unifier) inferred(tparams []*TypeParam) []Type {
+ list := make([]Type, len(tparams))
+ for i, x := range tparams {
+ list[i] = u.at(x)
+ }
+ return list
+}
+
+// asInterface returns the underlying type of x as an interface if
+// it is a non-type parameter interface. Otherwise it returns nil.
+func asInterface(x Type) (i *Interface) {
+ if _, ok := x.(*TypeParam); !ok {
+ i, _ = under(x).(*Interface)
+ }
+ return i
+}
+
+// nify implements the core unification algorithm which is an
+// adapted version of Checker.identical. For changes to that
+// code the corresponding changes should be made here.
+// Must not be called directly from outside the unifier.
+func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) {
+ u.depth++
+ if traceInference {
+ u.tracef("%s ≡ %s\t// %s", x, y, mode)
+ }
+ defer func() {
+ if traceInference && !result {
+ u.tracef("%s ≢ %s", x, y)
+ }
+ u.depth--
+ }()
+
+ x = Unalias(x)
+ y = Unalias(y)
+
+ // nothing to do if x == y
+ if x == y {
+ return true
+ }
+
+ // Stop gap for cases where unification fails.
+ if u.depth > unificationDepthLimit {
+ if traceInference {
+ u.tracef("depth %d >= %d", u.depth, unificationDepthLimit)
+ }
+ if panicAtUnificationDepthLimit {
+ panic("unification reached recursion depth limit")
+ }
+ return false
+ }
+
+ // Unification is symmetric, so we can swap the operands.
+ // Ensure that if we have at least one
+ // - defined type, make sure one is in y
+ // - type parameter recorded with u, make sure one is in x
+ if asNamed(x) != nil || u.asTypeParam(y) != nil {
+ if traceInference {
+ u.tracef("%s ≡ %s\t// swap", y, x)
+ }
+ x, y = y, x
+ }
+
+ // Unification will fail if we match a defined type against a type literal.
+ // If we are matching types in an assignment, at the top-level, types with
+ // the same type structure are permitted as long as at least one of them
+ // is not a defined type. To accommodate for that possibility, we continue
+ // unification with the underlying type of a defined type if the other type
+ // is a type literal. This is controlled by the exact unification mode.
+ // We also continue if the other type is a basic type because basic types
+ // are valid underlying types and may appear as core types of type constraints.
+ // If we exclude them, inferred defined types for type parameters may not
+ // match against the core types of their constraints (even though they might
+ // correctly match against some of the types in the constraint's type set).
+ // Finally, if unification (incorrectly) succeeds by matching the underlying
+ // type of a defined type against a basic type (because we include basic types
+ // as type literals here), and if that leads to an incorrectly inferred type,
+ // we will fail at function instantiation or argument assignment time.
+ //
+ // If we have at least one defined type, there is one in y.
+ if ny := asNamed(y); mode&exact == 0 && ny != nil && isTypeLit(x) && !(u.enableInterfaceInference && IsInterface(x)) {
+ if traceInference {
+ u.tracef("%s ≡ under %s", x, ny)
+ }
+ y = ny.under()
+ // Per the spec, a defined type cannot have an underlying type
+ // that is a type parameter.
+ assert(!isTypeParam(y))
+ // x and y may be identical now
+ if x == y {
+ return true
+ }
+ }
+
+ // Cases where at least one of x or y is a type parameter recorded with u.
+ // If we have at least one type parameter, there is one in x.
+ // If we have exactly one type parameter, because it is in x,
+ // isTypeLit(x) is false and y was not changed above. In other
+ // words, if y was a defined type, it is still a defined type
+ // (relevant for the logic below).
+ switch px, py := u.asTypeParam(x), u.asTypeParam(y); {
+ case px != nil && py != nil:
+ // both x and y are type parameters
+ if u.join(px, py) {
+ return true
+ }
+ // both x and y have an inferred type - they must match
+ return u.nify(u.at(px), u.at(py), mode, p)
+
+ case px != nil:
+ // x is a type parameter, y is not
+ if x := u.at(px); x != nil {
+ // x has an inferred type which must match y
+ if u.nify(x, y, mode, p) {
+ // We have a match, possibly through underlying types.
+ xi := asInterface(x)
+ yi := asInterface(y)
+ xn := asNamed(x) != nil
+ yn := asNamed(y) != nil
+ // If we have two interfaces, what to do depends on
+ // whether they are named and their method sets.
+ if xi != nil && yi != nil {
+ // Both types are interfaces.
+ // If both types are defined types, they must be identical
+ // because unification doesn't know which type has the "right" name.
+ if xn && yn {
+ return Identical(x, y)
+ }
+ // In all other cases, the method sets must match.
+ // The types unified so we know that corresponding methods
+ // match and we can simply compare the number of methods.
+ // TODO(gri) We may be able to relax this rule and select
+ // the more general interface. But if one of them is a defined
+ // type, it's not clear how to choose and whether we introduce
+ // an order dependency or not. Requiring the same method set
+ // is conservative.
+ if len(xi.typeSet().methods) != len(yi.typeSet().methods) {
+ return false
+ }
+ } else if xi != nil || yi != nil {
+ // One but not both of them are interfaces.
+ // In this case, either x or y could be viable matches for the corresponding
+ // type parameter, which means choosing either introduces an order dependence.
+ // Therefore, we must fail unification (go.dev/issue/60933).
+ return false
+ }
+ // If we have inexact unification and one of x or y is a defined type, select the
+ // defined type. This ensures that in a series of types, all matching against the
+ // same type parameter, we infer a defined type if there is one, independent of
+ // order. Type inference or assignment may fail, which is ok.
+ // Selecting a defined type, if any, ensures that we don't lose the type name;
+ // and since we have inexact unification, a value of equally named or matching
+ // undefined type remains assignable (go.dev/issue/43056).
+ //
+ // Similarly, if we have inexact unification and there are no defined types but
+ // channel types, select a directed channel, if any. This ensures that in a series
+ // of unnamed types, all matching against the same type parameter, we infer the
+ // directed channel if there is one, independent of order.
+ // Selecting a directional channel, if any, ensures that a value of another
+ // inexactly unifying channel type remains assignable (go.dev/issue/62157).
+ //
+ // If we have multiple defined channel types, they are either identical or we
+ // have assignment conflicts, so we can ignore directionality in this case.
+ //
+ // If we have defined and literal channel types, a defined type wins to avoid
+ // order dependencies.
+ if mode&exact == 0 {
+ switch {
+ case xn:
+ // x is a defined type: nothing to do.
+ case yn:
+ // x is not a defined type and y is a defined type: select y.
+ u.set(px, y)
+ default:
+ // Neither x nor y are defined types.
+ if yc, _ := under(y).(*Chan); yc != nil && yc.dir != SendRecv {
+ // y is a directed channel type: select y.
+ u.set(px, y)
+ }
+ }
+ }
+ return true
+ }
+ return false
+ }
+ // otherwise, infer type from y
+ u.set(px, y)
+ return true
+ }
+
+ // x != y if we get here
+ assert(x != y)
+
+ // If u.EnableInterfaceInference is set and we don't require exact unification,
+ // if both types are interfaces, one interface must have a subset of the
+ // methods of the other and corresponding method signatures must unify.
+ // If only one type is an interface, all its methods must be present in the
+ // other type and corresponding method signatures must unify.
+ if u.enableInterfaceInference && mode&exact == 0 {
+ // One or both interfaces may be defined types.
+ // Look under the name, but not under type parameters (go.dev/issue/60564).
+ xi := asInterface(x)
+ yi := asInterface(y)
+ // If we have two interfaces, check the type terms for equivalence,
+ // and unify common methods if possible.
+ if xi != nil && yi != nil {
+ xset := xi.typeSet()
+ yset := yi.typeSet()
+ if xset.comparable != yset.comparable {
+ return false
+ }
+ // For now we require terms to be equal.
+ // We should be able to relax this as well, eventually.
+ if !xset.terms.equal(yset.terms) {
+ return false
+ }
+ // Interface types are the only types where cycles can occur
+ // that are not "terminated" via named types; and such cycles
+ // can only be created via method parameter types that are
+ // anonymous interfaces (directly or indirectly) embedding
+ // the current interface. Example:
+ //
+ // type T interface {
+ // m() interface{T}
+ // }
+ //
+ // If two such (differently named) interfaces are compared,
+ // endless recursion occurs if the cycle is not detected.
+ //
+ // If x and y were compared before, they must be equal
+ // (if they were not, the recursion would have stopped);
+ // search the ifacePair stack for the same pair.
+ //
+ // This is a quadratic algorithm, but in practice these stacks
+ // are extremely short (bounded by the nesting depth of interface
+ // type declarations that recur via parameter types, an extremely
+ // rare occurrence). An alternative implementation might use a
+ // "visited" map, but that is probably less efficient overall.
+ q := &ifacePair{xi, yi, p}
+ for p != nil {
+ if p.identical(q) {
+ return true // same pair was compared before
+ }
+ p = p.prev
+ }
+ // The method set of x must be a subset of the method set
+ // of y or vice versa, and the common methods must unify.
+ xmethods := xset.methods
+ ymethods := yset.methods
+ // The smaller method set must be the subset, if it exists.
+ if len(xmethods) > len(ymethods) {
+ xmethods, ymethods = ymethods, xmethods
+ }
+ // len(xmethods) <= len(ymethods)
+ // Collect the ymethods in a map for quick lookup.
+ ymap := make(map[string]*Func, len(ymethods))
+ for _, ym := range ymethods {
+ ymap[ym.Id()] = ym
+ }
+ // All xmethods must exist in ymethods and corresponding signatures must unify.
+ for _, xm := range xmethods {
+ if ym := ymap[xm.Id()]; ym == nil || !u.nify(xm.typ, ym.typ, exact, p) {
+ return false
+ }
+ }
+ return true
+ }
+
+ // We don't have two interfaces. If we have one, make sure it's in xi.
+ if yi != nil {
+ xi = yi
+ y = x
+ }
+
+ // If we have one interface, at a minimum each of the interface methods
+ // must be implemented and thus unify with a corresponding method from
+ // the non-interface type, otherwise unification fails.
+ if xi != nil {
+ // All xi methods must exist in y and corresponding signatures must unify.
+ xmethods := xi.typeSet().methods
+ for _, xm := range xmethods {
+ obj, _, _ := LookupFieldOrMethod(y, false, xm.pkg, xm.name)
+ if ym, _ := obj.(*Func); ym == nil || !u.nify(xm.typ, ym.typ, exact, p) {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ // Unless we have exact unification, neither x nor y are interfaces now.
+ // Except for unbound type parameters (see below), x and y must be structurally
+ // equivalent to unify.
+
+ // If we get here and x or y is a type parameter, they are unbound
+ // (not recorded with the unifier).
+ // Ensure that if we have at least one type parameter, it is in x
+ // (the earlier swap checks for _recorded_ type parameters only).
+ // This ensures that the switch switches on the type parameter.
+ //
+ // TODO(gri) Factor out type parameter handling from the switch.
+ if isTypeParam(y) {
+ if traceInference {
+ u.tracef("%s ≡ %s\t// swap", y, x)
+ }
+ x, y = y, x
+ }
+
+ // Type elements (array, slice, etc. elements) use emode for unification.
+ // Element types must match exactly if the types are used in an assignment.
+ emode := mode
+ if mode&assign != 0 {
+ emode |= exact
+ }
+
+ switch x := x.(type) {
+ case *Basic:
+ // Basic types are singletons except for the rune and byte
+ // aliases, thus we cannot solely rely on the x == y check
+ // above. See also comment in TypeName.IsAlias.
+ if y, ok := y.(*Basic); ok {
+ return x.kind == y.kind
+ }
+
+ case *Array:
+ // Two array types unify if they have the same array length
+ // and their element types unify.
+ if y, ok := y.(*Array); ok {
+ // If one or both array lengths are unknown (< 0) due to some error,
+ // assume they are the same to avoid spurious follow-on errors.
+ return (x.len < 0 || y.len < 0 || x.len == y.len) && u.nify(x.elem, y.elem, emode, p)
+ }
+
+ case *Slice:
+ // Two slice types unify if their element types unify.
+ if y, ok := y.(*Slice); ok {
+ return u.nify(x.elem, y.elem, emode, p)
+ }
+
+ case *Struct:
+ // Two struct types unify if they have the same sequence of fields,
+ // and if corresponding fields have the same names, their (field) types unify,
+ // and they have identical tags. Two embedded fields are considered to have the same
+ // name. Lower-case field names from different packages are always different.
+ if y, ok := y.(*Struct); ok {
+ if x.NumFields() == y.NumFields() {
+ for i, f := range x.fields {
+ g := y.fields[i]
+ if f.embedded != g.embedded ||
+ x.Tag(i) != y.Tag(i) ||
+ !f.sameId(g.pkg, g.name) ||
+ !u.nify(f.typ, g.typ, emode, p) {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ case *Pointer:
+ // Two pointer types unify if their base types unify.
+ if y, ok := y.(*Pointer); ok {
+ return u.nify(x.base, y.base, emode, p)
+ }
+
+ case *Tuple:
+ // Two tuples types unify if they have the same number of elements
+ // and the types of corresponding elements unify.
+ if y, ok := y.(*Tuple); ok {
+ if x.Len() == y.Len() {
+ if x != nil {
+ for i, v := range x.vars {
+ w := y.vars[i]
+ if !u.nify(v.typ, w.typ, mode, p) {
+ return false
+ }
+ }
+ }
+ return true
+ }
+ }
+
+ case *Signature:
+ // Two function types unify if they have the same number of parameters
+ // and result values, corresponding parameter and result types unify,
+ // and either both functions are variadic or neither is.
+ // Parameter and result names are not required to match.
+ // TODO(gri) handle type parameters or document why we can ignore them.
+ if y, ok := y.(*Signature); ok {
+ return x.variadic == y.variadic &&
+ u.nify(x.params, y.params, emode, p) &&
+ u.nify(x.results, y.results, emode, p)
+ }
+
+ case *Interface:
+ assert(!u.enableInterfaceInference || mode&exact != 0) // handled before this switch
+
+ // Two interface types unify if they have the same set of methods with
+ // the same names, and corresponding function types unify.
+ // Lower-case method names from different packages are always different.
+ // The order of the methods is irrelevant.
+ if y, ok := y.(*Interface); ok {
+ xset := x.typeSet()
+ yset := y.typeSet()
+ if xset.comparable != yset.comparable {
+ return false
+ }
+ if !xset.terms.equal(yset.terms) {
+ return false
+ }
+ a := xset.methods
+ b := yset.methods
+ if len(a) == len(b) {
+ // Interface types are the only types where cycles can occur
+ // that are not "terminated" via named types; and such cycles
+ // can only be created via method parameter types that are
+ // anonymous interfaces (directly or indirectly) embedding
+ // the current interface. Example:
+ //
+ // type T interface {
+ // m() interface{T}
+ // }
+ //
+ // If two such (differently named) interfaces are compared,
+ // endless recursion occurs if the cycle is not detected.
+ //
+ // If x and y were compared before, they must be equal
+ // (if they were not, the recursion would have stopped);
+ // search the ifacePair stack for the same pair.
+ //
+ // This is a quadratic algorithm, but in practice these stacks
+ // are extremely short (bounded by the nesting depth of interface
+ // type declarations that recur via parameter types, an extremely
+ // rare occurrence). An alternative implementation might use a
+ // "visited" map, but that is probably less efficient overall.
+ q := &ifacePair{x, y, p}
+ for p != nil {
+ if p.identical(q) {
+ return true // same pair was compared before
+ }
+ p = p.prev
+ }
+ if debug {
+ assertSortedMethods(a)
+ assertSortedMethods(b)
+ }
+ for i, f := range a {
+ g := b[i]
+ if f.Id() != g.Id() || !u.nify(f.typ, g.typ, exact, q) {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ case *Map:
+ // Two map types unify if their key and value types unify.
+ if y, ok := y.(*Map); ok {
+ return u.nify(x.key, y.key, emode, p) && u.nify(x.elem, y.elem, emode, p)
+ }
+
+ case *Chan:
+ // Two channel types unify if their value types unify
+ // and if they have the same direction.
+ // The channel direction is ignored for inexact unification.
+ if y, ok := y.(*Chan); ok {
+ return (mode&exact == 0 || x.dir == y.dir) && u.nify(x.elem, y.elem, emode, p)
+ }
+
+ case *Named:
+ // Two named types unify if their type names originate in the same type declaration.
+ // If they are instantiated, their type argument lists must unify.
+ if y := asNamed(y); y != nil {
+ // Check type arguments before origins so they unify
+ // even if the origins don't match; for better error
+ // messages (see go.dev/issue/53692).
+ xargs := x.TypeArgs().list()
+ yargs := y.TypeArgs().list()
+ if len(xargs) != len(yargs) {
+ return false
+ }
+ for i, xarg := range xargs {
+ if !u.nify(xarg, yargs[i], mode, p) {
+ return false
+ }
+ }
+ return identicalOrigin(x, y)
+ }
+
+ case *TypeParam:
+ // x must be an unbound type parameter (see comment above).
+ if debug {
+ assert(u.asTypeParam(x) == nil)
+ }
+ // By definition, a valid type argument must be in the type set of
+ // the respective type constraint. Therefore, the type argument's
+ // underlying type must be in the set of underlying types of that
+ // constraint. If there is a single such underlying type, it's the
+ // constraint's core type. It must match the type argument's under-
+ // lying type, irrespective of whether the actual type argument,
+ // which may be a defined type, is actually in the type set (that
+ // will be determined at instantiation time).
+ // Thus, if we have the core type of an unbound type parameter,
+ // we know the structure of the possible types satisfying such
+ // parameters. Use that core type for further unification
+ // (see go.dev/issue/50755 for a test case).
+ if enableCoreTypeUnification {
+ // Because the core type is always an underlying type,
+ // unification will take care of matching against a
+ // defined or literal type automatically.
+ // If y is also an unbound type parameter, we will end
+ // up here again with x and y swapped, so we don't
+ // need to take care of that case separately.
+ if cx := coreType(x); cx != nil {
+ if traceInference {
+ u.tracef("core %s ≡ %s", x, y)
+ }
+ // If y is a defined type, it may not match against cx which
+ // is an underlying type (incl. int, string, etc.). Use assign
+ // mode here so that the unifier automatically takes under(y)
+ // if necessary.
+ return u.nify(cx, y, assign, p)
+ }
+ }
+ // x != y and there's nothing to do
+
+ case nil:
+ // avoid a crash in case of nil type
+
+ default:
+ panic(sprintf(nil, true, "u.nify(%s, %s, %d)", x, y, mode))
+ }
+
+ return false
+}
diff --git a/src/cmd/compile/internal/types2/union.go b/src/cmd/compile/internal/types2/union.go
new file mode 100644
index 0000000..1bf4353
--- /dev/null
+++ b/src/cmd/compile/internal/types2/union.go
@@ -0,0 +1,199 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ . "internal/types/errors"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// A Union represents a union of terms embedded in an interface.
+type Union struct {
+ terms []*Term // list of syntactical terms (not a canonicalized termlist)
+}
+
+// NewUnion returns a new Union type with the given terms.
+// It is an error to create an empty union; they are syntactically not possible.
+func NewUnion(terms []*Term) *Union {
+ if len(terms) == 0 {
+ panic("empty union")
+ }
+ return &Union{terms}
+}
+
+func (u *Union) Len() int { return len(u.terms) }
+func (u *Union) Term(i int) *Term { return u.terms[i] }
+
+func (u *Union) Underlying() Type { return u }
+func (u *Union) String() string { return TypeString(u, nil) }
+
+// A Term represents a term in a Union.
+type Term term
+
+// NewTerm returns a new union term.
+func NewTerm(tilde bool, typ Type) *Term { return &Term{tilde, typ} }
+
+func (t *Term) Tilde() bool { return t.tilde }
+func (t *Term) Type() Type { return t.typ }
+func (t *Term) String() string { return (*term)(t).String() }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+// Avoid excessive type-checking times due to quadratic termlist operations.
+const maxTermCount = 100
+
+// parseUnion parses uexpr as a union of expressions.
+// The result is a Union type, or Typ[Invalid] for some errors.
+func parseUnion(check *Checker, uexpr syntax.Expr) Type {
+ blist, tlist := flattenUnion(nil, uexpr)
+ assert(len(blist) == len(tlist)-1)
+
+ var terms []*Term
+
+ var u Type
+ for i, x := range tlist {
+ term := parseTilde(check, x)
+ if len(tlist) == 1 && !term.tilde {
+ // Single type. Ok to return early because all relevant
+ // checks have been performed in parseTilde (no need to
+ // run through term validity check below).
+ return term.typ // typ already recorded through check.typ in parseTilde
+ }
+ if len(terms) >= maxTermCount {
+ if isValid(u) {
+ check.errorf(x, InvalidUnion, "cannot handle more than %d union terms (implementation limitation)", maxTermCount)
+ u = Typ[Invalid]
+ }
+ } else {
+ terms = append(terms, term)
+ u = &Union{terms}
+ }
+
+ if i > 0 {
+ check.recordTypeAndValue(blist[i-1], typexpr, u, nil)
+ }
+ }
+
+ if !isValid(u) {
+ return u
+ }
+
+ // Check validity of terms.
+ // Do this check later because it requires types to be set up.
+ // Note: This is a quadratic algorithm, but unions tend to be short.
+ check.later(func() {
+ for i, t := range terms {
+ if !isValid(t.typ) {
+ continue
+ }
+
+ u := under(t.typ)
+ f, _ := u.(*Interface)
+ if t.tilde {
+ if f != nil {
+ check.errorf(tlist[i], InvalidUnion, "invalid use of ~ (%s is an interface)", t.typ)
+ continue // don't report another error for t
+ }
+
+ if !Identical(u, t.typ) {
+ check.errorf(tlist[i], InvalidUnion, "invalid use of ~ (underlying type of %s is %s)", t.typ, u)
+ continue
+ }
+ }
+
+ // Stand-alone embedded interfaces are ok and are handled by the single-type case
+ // in the beginning. Embedded interfaces with tilde are excluded above. If we reach
+ // here, we must have at least two terms in the syntactic term list (but not necessarily
+ // in the term list of the union's type set).
+ if f != nil {
+ tset := f.typeSet()
+ switch {
+ case tset.NumMethods() != 0:
+ check.errorf(tlist[i], InvalidUnion, "cannot use %s in union (%s contains methods)", t, t)
+ case t.typ == universeComparable.Type():
+ check.error(tlist[i], InvalidUnion, "cannot use comparable in union")
+ case tset.comparable:
+ check.errorf(tlist[i], InvalidUnion, "cannot use %s in union (%s embeds comparable)", t, t)
+ }
+ continue // terms with interface types are not subject to the no-overlap rule
+ }
+
+ // Report overlapping (non-disjoint) terms such as
+ // a|a, a|~a, ~a|~a, and ~a|A (where under(A) == a).
+ if j := overlappingTerm(terms[:i], t); j >= 0 {
+ check.softErrorf(tlist[i], InvalidUnion, "overlapping terms %s and %s", t, terms[j])
+ }
+ }
+ }).describef(uexpr, "check term validity %s", uexpr)
+
+ return u
+}
+
+func parseTilde(check *Checker, tx syntax.Expr) *Term {
+ x := tx
+ var tilde bool
+ if op, _ := x.(*syntax.Operation); op != nil && op.Op == syntax.Tilde {
+ x = op.X
+ tilde = true
+ }
+ typ := check.typ(x)
+ // Embedding stand-alone type parameters is not permitted (go.dev/issue/47127).
+ // We don't need this restriction anymore if we make the underlying type of a type
+ // parameter its constraint interface: if we embed a lone type parameter, we will
+ // simply use its underlying type (like we do for other named, embedded interfaces),
+ // and since the underlying type is an interface the embedding is well defined.
+ if isTypeParam(typ) {
+ if tilde {
+ check.errorf(x, MisplacedTypeParam, "type in term %s cannot be a type parameter", tx)
+ } else {
+ check.error(x, MisplacedTypeParam, "term cannot be a type parameter")
+ }
+ typ = Typ[Invalid]
+ }
+ term := NewTerm(tilde, typ)
+ if tilde {
+ check.recordTypeAndValue(tx, typexpr, &Union{[]*Term{term}}, nil)
+ }
+ return term
+}
+
+// overlappingTerm reports the index of the term x in terms which is
+// overlapping (not disjoint) from y. The result is < 0 if there is no
+// such term. The type of term y must not be an interface, and terms
+// with an interface type are ignored in the terms list.
+func overlappingTerm(terms []*Term, y *Term) int {
+ assert(!IsInterface(y.typ))
+ for i, x := range terms {
+ if IsInterface(x.typ) {
+ continue
+ }
+ // disjoint requires non-nil, non-top arguments,
+ // and non-interface types as term types.
+ if debug {
+ if x == nil || x.typ == nil || y == nil || y.typ == nil {
+ panic("empty or top union term")
+ }
+ }
+ if !(*term)(x).disjoint((*term)(y)) {
+ return i
+ }
+ }
+ return -1
+}
+
+// flattenUnion walks a union type expression of the form A | B | C | ...,
+// extracting both the binary exprs (blist) and leaf types (tlist).
+func flattenUnion(list []syntax.Expr, x syntax.Expr) (blist, tlist []syntax.Expr) {
+ if o, _ := x.(*syntax.Operation); o != nil && o.Op == syntax.Or {
+ blist, tlist = flattenUnion(list, o.X)
+ blist = append(blist, o)
+ x = o.Y
+ }
+ return blist, append(tlist, x)
+}
diff --git a/src/cmd/compile/internal/types2/universe.go b/src/cmd/compile/internal/types2/universe.go
new file mode 100644
index 0000000..c8be81b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/universe.go
@@ -0,0 +1,288 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file sets up the universe scope and the unsafe package.
+
+package types2
+
+import (
+ "go/constant"
+ "strings"
+)
+
+// The Universe scope contains all predeclared objects of Go.
+// It is the outermost scope of any chain of nested scopes.
+var Universe *Scope
+
+// The Unsafe package is the package returned by an importer
+// for the import path "unsafe".
+var Unsafe *Package
+
+var (
+ universeIota Object
+ universeByte Type // uint8 alias, but has name "byte"
+ universeRune Type // int32 alias, but has name "rune"
+ universeAny Object
+ universeError Type
+ universeComparable Object
+)
+
+// Typ contains the predeclared *Basic types indexed by their
+// corresponding BasicKind.
+//
+// The *Basic type for Typ[Byte] will have the name "uint8".
+// Use Universe.Lookup("byte").Type() to obtain the specific
+// alias basic type named "byte" (and analogous for "rune").
+var Typ = [...]*Basic{
+ Invalid: {Invalid, 0, "invalid type"},
+
+ Bool: {Bool, IsBoolean, "bool"},
+ Int: {Int, IsInteger, "int"},
+ Int8: {Int8, IsInteger, "int8"},
+ Int16: {Int16, IsInteger, "int16"},
+ Int32: {Int32, IsInteger, "int32"},
+ Int64: {Int64, IsInteger, "int64"},
+ Uint: {Uint, IsInteger | IsUnsigned, "uint"},
+ Uint8: {Uint8, IsInteger | IsUnsigned, "uint8"},
+ Uint16: {Uint16, IsInteger | IsUnsigned, "uint16"},
+ Uint32: {Uint32, IsInteger | IsUnsigned, "uint32"},
+ Uint64: {Uint64, IsInteger | IsUnsigned, "uint64"},
+ Uintptr: {Uintptr, IsInteger | IsUnsigned, "uintptr"},
+ Float32: {Float32, IsFloat, "float32"},
+ Float64: {Float64, IsFloat, "float64"},
+ Complex64: {Complex64, IsComplex, "complex64"},
+ Complex128: {Complex128, IsComplex, "complex128"},
+ String: {String, IsString, "string"},
+ UnsafePointer: {UnsafePointer, 0, "Pointer"},
+
+ UntypedBool: {UntypedBool, IsBoolean | IsUntyped, "untyped bool"},
+ UntypedInt: {UntypedInt, IsInteger | IsUntyped, "untyped int"},
+ UntypedRune: {UntypedRune, IsInteger | IsUntyped, "untyped rune"},
+ UntypedFloat: {UntypedFloat, IsFloat | IsUntyped, "untyped float"},
+ UntypedComplex: {UntypedComplex, IsComplex | IsUntyped, "untyped complex"},
+ UntypedString: {UntypedString, IsString | IsUntyped, "untyped string"},
+ UntypedNil: {UntypedNil, IsUntyped, "untyped nil"},
+}
+
+var aliases = [...]*Basic{
+ {Byte, IsInteger | IsUnsigned, "byte"},
+ {Rune, IsInteger, "rune"},
+}
+
+func defPredeclaredTypes() {
+ for _, t := range Typ {
+ def(NewTypeName(nopos, nil, t.name, t))
+ }
+ for _, t := range aliases {
+ def(NewTypeName(nopos, nil, t.name, t))
+ }
+
+ // type any = interface{}
+ // Note: don't use &emptyInterface for the type of any. Using a unique
+ // pointer allows us to detect any and format it as "any" rather than
+ // interface{}, which clarifies user-facing error messages significantly.
+ def(NewTypeName(nopos, nil, "any", &Interface{complete: true, tset: &topTypeSet}))
+
+ // type error interface{ Error() string }
+ {
+ obj := NewTypeName(nopos, nil, "error", nil)
+ obj.setColor(black)
+ typ := NewNamed(obj, nil, nil)
+
+ // error.Error() string
+ recv := NewVar(nopos, nil, "", typ)
+ res := NewVar(nopos, nil, "", Typ[String])
+ sig := NewSignatureType(recv, nil, nil, nil, NewTuple(res), false)
+ err := NewFunc(nopos, nil, "Error", sig)
+
+ // interface{ Error() string }
+ ityp := &Interface{methods: []*Func{err}, complete: true}
+ computeInterfaceTypeSet(nil, nopos, ityp) // prevent races due to lazy computation of tset
+
+ typ.SetUnderlying(ityp)
+ def(obj)
+ }
+
+ // type comparable interface{} // marked as comparable
+ {
+ obj := NewTypeName(nopos, nil, "comparable", nil)
+ obj.setColor(black)
+ typ := NewNamed(obj, nil, nil)
+
+ // interface{} // marked as comparable
+ ityp := &Interface{complete: true, tset: &_TypeSet{nil, allTermlist, true}}
+
+ typ.SetUnderlying(ityp)
+ def(obj)
+ }
+}
+
+var predeclaredConsts = [...]struct {
+ name string
+ kind BasicKind
+ val constant.Value
+}{
+ {"true", UntypedBool, constant.MakeBool(true)},
+ {"false", UntypedBool, constant.MakeBool(false)},
+ {"iota", UntypedInt, constant.MakeInt64(0)},
+}
+
+func defPredeclaredConsts() {
+ for _, c := range predeclaredConsts {
+ def(NewConst(nopos, nil, c.name, Typ[c.kind], c.val))
+ }
+}
+
+func defPredeclaredNil() {
+ def(&Nil{object{name: "nil", typ: Typ[UntypedNil], color_: black}})
+}
+
+// A builtinId is the id of a builtin function.
+type builtinId int
+
+const (
+ // universe scope
+ _Append builtinId = iota
+ _Cap
+ _Clear
+ _Close
+ _Complex
+ _Copy
+ _Delete
+ _Imag
+ _Len
+ _Make
+ _Max
+ _Min
+ _New
+ _Panic
+ _Print
+ _Println
+ _Real
+ _Recover
+
+ // package unsafe
+ _Add
+ _Alignof
+ _Offsetof
+ _Sizeof
+ _Slice
+ _SliceData
+ _String
+ _StringData
+
+ // testing support
+ _Assert
+ _Trace
+)
+
+var predeclaredFuncs = [...]struct {
+ name string
+ nargs int
+ variadic bool
+ kind exprKind
+}{
+ _Append: {"append", 1, true, expression},
+ _Cap: {"cap", 1, false, expression},
+ _Clear: {"clear", 1, false, statement},
+ _Close: {"close", 1, false, statement},
+ _Complex: {"complex", 2, false, expression},
+ _Copy: {"copy", 2, false, statement},
+ _Delete: {"delete", 2, false, statement},
+ _Imag: {"imag", 1, false, expression},
+ _Len: {"len", 1, false, expression},
+ _Make: {"make", 1, true, expression},
+ // To disable max/min, remove the next two lines.
+ _Max: {"max", 1, true, expression},
+ _Min: {"min", 1, true, expression},
+ _New: {"new", 1, false, expression},
+ _Panic: {"panic", 1, false, statement},
+ _Print: {"print", 0, true, statement},
+ _Println: {"println", 0, true, statement},
+ _Real: {"real", 1, false, expression},
+ _Recover: {"recover", 0, false, statement},
+
+ _Add: {"Add", 2, false, expression},
+ _Alignof: {"Alignof", 1, false, expression},
+ _Offsetof: {"Offsetof", 1, false, expression},
+ _Sizeof: {"Sizeof", 1, false, expression},
+ _Slice: {"Slice", 2, false, expression},
+ _SliceData: {"SliceData", 1, false, expression},
+ _String: {"String", 2, false, expression},
+ _StringData: {"StringData", 1, false, expression},
+
+ _Assert: {"assert", 1, false, statement},
+ _Trace: {"trace", 0, true, statement},
+}
+
+func defPredeclaredFuncs() {
+ for i := range predeclaredFuncs {
+ id := builtinId(i)
+ if id == _Assert || id == _Trace {
+ continue // only define these in testing environment
+ }
+ def(newBuiltin(id))
+ }
+}
+
+// DefPredeclaredTestFuncs defines the assert and trace built-ins.
+// These built-ins are intended for debugging and testing of this
+// package only.
+func DefPredeclaredTestFuncs() {
+ if Universe.Lookup("assert") != nil {
+ return // already defined
+ }
+ def(newBuiltin(_Assert))
+ def(newBuiltin(_Trace))
+}
+
+func init() {
+ Universe = NewScope(nil, nopos, nopos, "universe")
+ Unsafe = NewPackage("unsafe", "unsafe")
+ Unsafe.complete = true
+
+ defPredeclaredTypes()
+ defPredeclaredConsts()
+ defPredeclaredNil()
+ defPredeclaredFuncs()
+
+ universeIota = Universe.Lookup("iota")
+ universeByte = Universe.Lookup("byte").Type()
+ universeRune = Universe.Lookup("rune").Type()
+ universeAny = Universe.Lookup("any")
+ universeError = Universe.Lookup("error").Type()
+ universeComparable = Universe.Lookup("comparable")
+}
+
+// Objects with names containing blanks are internal and not entered into
+// a scope. Objects with exported names are inserted in the unsafe package
+// scope; other objects are inserted in the universe scope.
+func def(obj Object) {
+ assert(obj.color() == black)
+ name := obj.Name()
+ if strings.Contains(name, " ") {
+ return // nothing to do
+ }
+ // fix Obj link for named types
+ if typ := asNamed(obj.Type()); typ != nil {
+ typ.obj = obj.(*TypeName)
+ }
+ // exported identifiers go into package unsafe
+ scope := Universe
+ if obj.Exported() {
+ scope = Unsafe.scope
+ // set Pkg field
+ switch obj := obj.(type) {
+ case *TypeName:
+ obj.pkg = Unsafe
+ case *Builtin:
+ obj.pkg = Unsafe
+ default:
+ unreachable()
+ }
+ }
+ if scope.Insert(obj) != nil {
+ panic("double declaration of predeclared identifier")
+ }
+}
diff --git a/src/cmd/compile/internal/types2/util.go b/src/cmd/compile/internal/types2/util.go
new file mode 100644
index 0000000..01da1c1
--- /dev/null
+++ b/src/cmd/compile/internal/types2/util.go
@@ -0,0 +1,22 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains various functionality that is
+// different between go/types and types2. Factoring
+// out this code allows more of the rest of the code
+// to be shared.
+
+package types2
+
+import "cmd/compile/internal/syntax"
+
+// cmpPos compares the positions p and q and returns a result r as follows:
+//
+// r < 0: p is before q
+// r == 0: p and q are the same position (but may not be identical)
+// r > 0: p is after q
+//
+// If p and q are in different files, p is before q if the filename
+// of p sorts lexicographically before the filename of q.
+func cmpPos(p, q syntax.Pos) int { return p.Cmp(q) }
diff --git a/src/cmd/compile/internal/types2/util_test.go b/src/cmd/compile/internal/types2/util_test.go
new file mode 100644
index 0000000..70058aa
--- /dev/null
+++ b/src/cmd/compile/internal/types2/util_test.go
@@ -0,0 +1,17 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file exports various functionality of util.go
+// so that it can be used in (package-external) tests.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+)
+
+func CmpPos(p, q syntax.Pos) int { return cmpPos(p, q) }
+
+func ScopeComment(s *Scope) string { return s.comment }
+func ObjectScopePos(obj Object) syntax.Pos { return obj.scopePos() }
diff --git a/src/cmd/compile/internal/types2/validtype.go b/src/cmd/compile/internal/types2/validtype.go
new file mode 100644
index 0000000..a880a3d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/validtype.go
@@ -0,0 +1,256 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// validType verifies that the given type does not "expand" indefinitely
+// producing a cycle in the type graph.
+// (Cycles involving alias types, as in "type A = [10]A" are detected
+// earlier, via the objDecl cycle detection mechanism.)
+func (check *Checker) validType(typ *Named) {
+ check.validType0(typ, nil, nil)
+}
+
+// validType0 checks if the given type is valid. If typ is a type parameter
+// its value is looked up in the type argument list of the instantiated
+// (enclosing) type, if it exists. Otherwise the type parameter must be from
+// an enclosing function and can be ignored.
+// The nest list describes the stack (the "nest in memory") of types which
+// contain (or embed in the case of interfaces) other types. For instance, a
+// struct named S which contains a field of named type F contains (the memory
+// of) F in S, leading to the nest S->F. If a type appears in its own nest
+// (say S->F->S) we have an invalid recursive type. The path list is the full
+// path of named types in a cycle, it is only needed for error reporting.
+func (check *Checker) validType0(typ Type, nest, path []*Named) bool {
+ switch t := Unalias(typ).(type) {
+ case nil:
+ // We should never see a nil type but be conservative and panic
+ // only in debug mode.
+ if debug {
+ panic("validType0(nil)")
+ }
+
+ case *Array:
+ return check.validType0(t.elem, nest, path)
+
+ case *Struct:
+ for _, f := range t.fields {
+ if !check.validType0(f.typ, nest, path) {
+ return false
+ }
+ }
+
+ case *Union:
+ for _, t := range t.terms {
+ if !check.validType0(t.typ, nest, path) {
+ return false
+ }
+ }
+
+ case *Interface:
+ for _, etyp := range t.embeddeds {
+ if !check.validType0(etyp, nest, path) {
+ return false
+ }
+ }
+
+ case *Named:
+ // Exit early if we already know t is valid.
+ // This is purely an optimization but it prevents excessive computation
+ // times in pathological cases such as testdata/fixedbugs/issue6977.go.
+ // (Note: The valids map could also be allocated locally, once for each
+ // validType call.)
+ if check.valids.lookup(t) != nil {
+ break
+ }
+
+ // Don't report a 2nd error if we already know the type is invalid
+ // (e.g., if a cycle was detected earlier, via under).
+ // Note: ensure that t.orig is fully resolved by calling Underlying().
+ if !isValid(t.Underlying()) {
+ return false
+ }
+
+ // If the current type t is also found in nest, (the memory of) t is
+ // embedded in itself, indicating an invalid recursive type.
+ for _, e := range nest {
+ if Identical(e, t) {
+ // We have a cycle. If t != t.Origin() then t is an instance of
+ // the generic type t.Origin(). Because t is in the nest, t must
+ // occur within the definition (RHS) of the generic type t.Origin(),
+ // directly or indirectly, after expansion of the RHS.
+ // Therefore t.Origin() must be invalid, no matter how it is
+ // instantiated since the instantiation t of t.Origin() happens
+ // inside t.Origin()'s RHS and thus is always the same and always
+ // present.
+ // Therefore we can mark the underlying of both t and t.Origin()
+ // as invalid. If t is not an instance of a generic type, t and
+ // t.Origin() are the same.
+ // Furthermore, because we check all types in a package for validity
+ // before type checking is complete, any exported type that is invalid
+ // will have an invalid underlying type and we can't reach here with
+ // such a type (invalid types are excluded above).
+ // Thus, if we reach here with a type t, both t and t.Origin() (if
+ // different in the first place) must be from the current package;
+ // they cannot have been imported.
+ // Therefore it is safe to change their underlying types; there is
+ // no chance for a race condition (the types of the current package
+ // are not yet available to other goroutines).
+ assert(t.obj.pkg == check.pkg)
+ assert(t.Origin().obj.pkg == check.pkg)
+ t.underlying = Typ[Invalid]
+ t.Origin().underlying = Typ[Invalid]
+
+ // Find the starting point of the cycle and report it.
+ // Because each type in nest must also appear in path (see invariant below),
+ // type t must be in path since it was found in nest. But not every type in path
+ // is in nest. Specifically t may appear in path with an earlier index than the
+ // index of t in nest. Search again.
+ for start, p := range path {
+ if Identical(p, t) {
+ check.cycleError(makeObjList(path[start:]))
+ return false
+ }
+ }
+ panic("cycle start not found")
+ }
+ }
+
+ // No cycle was found. Check the RHS of t.
+ // Every type added to nest is also added to path; thus every type that is in nest
+ // must also be in path (invariant). But not every type in path is in nest, since
+ // nest may be pruned (see below, *TypeParam case).
+ if !check.validType0(t.Origin().fromRHS, append(nest, t), append(path, t)) {
+ return false
+ }
+
+ check.valids.add(t) // t is valid
+
+ case *TypeParam:
+ // A type parameter stands for the type (argument) it was instantiated with.
+ // Check the corresponding type argument for validity if we are in an
+ // instantiated type.
+ if len(nest) > 0 {
+ inst := nest[len(nest)-1] // the type instance
+ // Find the corresponding type argument for the type parameter
+ // and proceed with checking that type argument.
+ for i, tparam := range inst.TypeParams().list() {
+ // The type parameter and type argument lists should
+ // match in length but be careful in case of errors.
+ if t == tparam && i < inst.TypeArgs().Len() {
+ targ := inst.TypeArgs().At(i)
+ // The type argument must be valid in the enclosing
+ // type (where inst was instantiated), hence we must
+ // check targ's validity in the type nest excluding
+ // the current (instantiated) type (see the example
+ // at the end of this file).
+ // For error reporting we keep the full path.
+ return check.validType0(targ, nest[:len(nest)-1], path)
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// makeObjList returns the list of type name objects for the given
+// list of named types.
+func makeObjList(tlist []*Named) []Object {
+ olist := make([]Object, len(tlist))
+ for i, t := range tlist {
+ olist[i] = t.obj
+ }
+ return olist
+}
+
+// Here is an example illustrating why we need to exclude the
+// instantiated type from nest when evaluating the validity of
+// a type parameter. Given the declarations
+//
+// var _ A[A[string]]
+//
+// type A[P any] struct { _ B[P] }
+// type B[P any] struct { _ P }
+//
+// we want to determine if the type A[A[string]] is valid.
+// We start evaluating A[A[string]] outside any type nest:
+//
+// A[A[string]]
+// nest =
+// path =
+//
+// The RHS of A is now evaluated in the A[A[string]] nest:
+//
+// struct{_ B[P₁]}
+// nest = A[A[string]]
+// path = A[A[string]]
+//
+// The struct has a single field of type B[P₁] with which
+// we continue:
+//
+// B[P₁]
+// nest = A[A[string]]
+// path = A[A[string]]
+//
+// struct{_ P₂}
+// nest = A[A[string]]->B[P]
+// path = A[A[string]]->B[P]
+//
+// Eventually we reach the type parameter P of type B (P₂):
+//
+// P₂
+// nest = A[A[string]]->B[P]
+// path = A[A[string]]->B[P]
+//
+// The type argument for P of B is the type parameter P of A (P₁).
+// It must be evaluated in the type nest that existed when B was
+// instantiated:
+//
+// P₁
+// nest = A[A[string]] <== type nest at B's instantiation time
+// path = A[A[string]]->B[P]
+//
+// If we'd use the current nest it would correspond to the path
+// which will be wrong as we will see shortly. P's type argument
+// is A[string], which again must be evaluated in the type nest
+// that existed when A was instantiated with A[string]. That type
+// nest is empty:
+//
+// A[string]
+// nest = <== type nest at A's instantiation time
+// path = A[A[string]]->B[P]
+//
+// Evaluation then proceeds as before for A[string]:
+//
+// struct{_ B[P₁]}
+// nest = A[string]
+// path = A[A[string]]->B[P]->A[string]
+//
+// Now we reach B[P] again. If we had not adjusted nest, it would
+// correspond to path, and we would find B[P] in nest, indicating
+// a cycle, which would clearly be wrong since there's no cycle in
+// A[string]:
+//
+// B[P₁]
+// nest = A[string]
+// path = A[A[string]]->B[P]->A[string] <== path contains B[P]!
+//
+// But because we use the correct type nest, evaluation proceeds without
+// errors and we get the evaluation sequence:
+//
+// struct{_ P₂}
+// nest = A[string]->B[P]
+// path = A[A[string]]->B[P]->A[string]->B[P]
+// P₂
+// nest = A[string]->B[P]
+// path = A[A[string]]->B[P]->A[string]->B[P]
+// P₁
+// nest = A[string]
+// path = A[A[string]]->B[P]->A[string]->B[P]
+// string
+// nest =
+// path = A[A[string]]->B[P]->A[string]->B[P]
+//
+// At this point we're done and A[A[string]] and is valid.
diff --git a/src/cmd/compile/internal/types2/version.go b/src/cmd/compile/internal/types2/version.go
new file mode 100644
index 0000000..5aa3c80
--- /dev/null
+++ b/src/cmd/compile/internal/types2/version.go
@@ -0,0 +1,126 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "go/version"
+ "internal/goversion"
+ "strings"
+)
+
+// A goVersion is a Go language version string of the form "go1.%d"
+// where d is the minor version number. goVersion strings don't
+// contain release numbers ("go1.20.1" is not a valid goVersion).
+type goVersion string
+
+// asGoVersion returns v as a goVersion (e.g., "go1.20.1" becomes "go1.20").
+// If v is not a valid Go version, the result is the empty string.
+func asGoVersion(v string) goVersion {
+ return goVersion(version.Lang(v))
+}
+
+// isValid reports whether v is a valid Go version.
+func (v goVersion) isValid() bool {
+ return v != ""
+}
+
+// cmp returns -1, 0, or +1 depending on whether x < y, x == y, or x > y,
+// interpreted as Go versions.
+func (x goVersion) cmp(y goVersion) int {
+ return version.Compare(string(x), string(y))
+}
+
+var (
+ // Go versions that introduced language changes
+ go1_9 = asGoVersion("go1.9")
+ go1_13 = asGoVersion("go1.13")
+ go1_14 = asGoVersion("go1.14")
+ go1_17 = asGoVersion("go1.17")
+ go1_18 = asGoVersion("go1.18")
+ go1_20 = asGoVersion("go1.20")
+ go1_21 = asGoVersion("go1.21")
+ go1_22 = asGoVersion("go1.22")
+
+ // current (deployed) Go version
+ go_current = asGoVersion(fmt.Sprintf("go1.%d", goversion.Version))
+)
+
+// langCompat reports an error if the representation of a numeric
+// literal is not compatible with the current language version.
+func (check *Checker) langCompat(lit *syntax.BasicLit) {
+ s := lit.Value
+ if len(s) <= 2 || check.allowVersion(check.pkg, lit, go1_13) {
+ return
+ }
+ // len(s) > 2
+ if strings.Contains(s, "_") {
+ check.versionErrorf(lit, go1_13, "underscores in numeric literals")
+ return
+ }
+ if s[0] != '0' {
+ return
+ }
+ radix := s[1]
+ if radix == 'b' || radix == 'B' {
+ check.versionErrorf(lit, go1_13, "binary literals")
+ return
+ }
+ if radix == 'o' || radix == 'O' {
+ check.versionErrorf(lit, go1_13, "0o/0O-style octal literals")
+ return
+ }
+ if lit.Kind != syntax.IntLit && (radix == 'x' || radix == 'X') {
+ check.versionErrorf(lit, go1_13, "hexadecimal floating-point literals")
+ }
+}
+
+// allowVersion reports whether the given package is allowed to use version v.
+func (check *Checker) allowVersion(pkg *Package, at poser, v goVersion) bool {
+ // We assume that imported packages have all been checked,
+ // so we only have to check for the local package.
+ if pkg != check.pkg {
+ return true
+ }
+
+ // If no explicit file version is specified,
+ // fileVersion corresponds to the module version.
+ var fileVersion goVersion
+ if pos := at.Pos(); pos.IsKnown() {
+ // We need version.Lang below because file versions
+ // can be (unaltered) Config.GoVersion strings that
+ // may contain dot-release information.
+ fileVersion = asGoVersion(check.versions[base(pos)])
+ }
+ return !fileVersion.isValid() || fileVersion.cmp(v) >= 0
+}
+
+// verifyVersionf is like allowVersion but also accepts a format string and arguments
+// which are used to report a version error if allowVersion returns false. It uses the
+// current package.
+func (check *Checker) verifyVersionf(at poser, v goVersion, format string, args ...interface{}) bool {
+ if !check.allowVersion(check.pkg, at, v) {
+ check.versionErrorf(at, v, format, args...)
+ return false
+ }
+ return true
+}
+
+// base finds the underlying PosBase of the source file containing pos,
+// skipping over intermediate PosBase layers created by //line directives.
+// The positions must be known.
+func base(pos syntax.Pos) *syntax.PosBase {
+ assert(pos.IsKnown())
+ b := pos.Base()
+ for {
+ bb := b.Pos().Base()
+ if bb == nil || bb == b {
+ break
+ }
+ b = bb
+ }
+ return b
+}
diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go
new file mode 100644
index 0000000..fc3b858
--- /dev/null
+++ b/src/cmd/compile/internal/walk/assign.go
@@ -0,0 +1,733 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "go/constant"
+ "internal/abi"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// walkAssign walks an OAS (AssignExpr) or OASOP (AssignOpExpr) node.
+func walkAssign(init *ir.Nodes, n ir.Node) ir.Node {
+ init.Append(ir.TakeInit(n)...)
+
+ var left, right ir.Node
+ switch n.Op() {
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ left, right = n.X, n.Y
+ case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
+ left, right = n.X, n.Y
+ }
+
+ // Recognize m[k] = append(m[k], ...) so we can reuse
+ // the mapassign call.
+ var mapAppend *ir.CallExpr
+ if left.Op() == ir.OINDEXMAP && right.Op() == ir.OAPPEND {
+ left := left.(*ir.IndexExpr)
+ mapAppend = right.(*ir.CallExpr)
+ if !ir.SameSafeExpr(left, mapAppend.Args[0]) {
+ base.Fatalf("not same expressions: %v != %v", left, mapAppend.Args[0])
+ }
+ }
+
+ left = walkExpr(left, init)
+ left = safeExpr(left, init)
+ if mapAppend != nil {
+ mapAppend.Args[0] = left
+ }
+
+ if n.Op() == ir.OASOP {
+ // Rewrite x op= y into x = x op y.
+ n = ir.NewAssignStmt(base.Pos, left, typecheck.Expr(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).AsOp, left, right)))
+ } else {
+ n.(*ir.AssignStmt).X = left
+ }
+ as := n.(*ir.AssignStmt)
+
+ if oaslit(as, init) {
+ return ir.NewBlockStmt(as.Pos(), nil)
+ }
+
+ if as.Y == nil {
+ // TODO(austin): Check all "implicit zeroing"
+ return as
+ }
+
+ if !base.Flag.Cfg.Instrumenting && ir.IsZero(as.Y) {
+ return as
+ }
+
+ switch as.Y.Op() {
+ default:
+ as.Y = walkExpr(as.Y, init)
+
+ case ir.ORECV:
+ // x = <-c; as.Left is x, as.Right.Left is c.
+ // order.stmt made sure x is addressable.
+ recv := as.Y.(*ir.UnaryExpr)
+ recv.X = walkExpr(recv.X, init)
+
+ n1 := typecheck.NodAddr(as.X)
+ r := recv.X // the channel
+ return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1)
+
+ case ir.OAPPEND:
+ // x = append(...)
+ call := as.Y.(*ir.CallExpr)
+ if call.Type().Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", call.Type().Elem())
+ }
+ var r ir.Node
+ switch {
+ case isAppendOfMake(call):
+ // x = append(y, make([]T, y)...)
+ r = extendSlice(call, init)
+ case call.IsDDD:
+ r = appendSlice(call, init) // also works for append(slice, string).
+ default:
+ r = walkAppend(call, init, as)
+ }
+ as.Y = r
+ if r.Op() == ir.OAPPEND {
+ r := r.(*ir.CallExpr)
+ // Left in place for back end.
+ // Do not add a new write barrier.
+ // Set up address of type for back end.
+ r.Fun = reflectdata.AppendElemRType(base.Pos, r)
+ return as
+ }
+ // Otherwise, lowered for race detector.
+ // Treat as ordinary assignment.
+ }
+
+ if as.X != nil && as.Y != nil {
+ return convas(as, init)
+ }
+ return as
+}
+
+// walkAssignDotType walks an OAS2DOTTYPE node.
+func walkAssignDotType(n *ir.AssignListStmt, init *ir.Nodes) ir.Node {
+ walkExprListSafe(n.Lhs, init)
+ n.Rhs[0] = walkExpr(n.Rhs[0], init)
+ return n
+}
+
+// walkAssignFunc walks an OAS2FUNC node.
+func walkAssignFunc(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
+ init.Append(ir.TakeInit(n)...)
+
+ r := n.Rhs[0]
+ walkExprListSafe(n.Lhs, init)
+ r = walkExpr(r, init)
+
+ if ir.IsIntrinsicCall(r.(*ir.CallExpr)) {
+ n.Rhs = []ir.Node{r}
+ return n
+ }
+ init.Append(r)
+
+ ll := ascompatet(n.Lhs, r.Type())
+ return ir.NewBlockStmt(src.NoXPos, ll)
+}
+
+// walkAssignList walks an OAS2 node.
+func walkAssignList(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
+ init.Append(ir.TakeInit(n)...)
+ return ir.NewBlockStmt(src.NoXPos, ascompatee(ir.OAS, n.Lhs, n.Rhs))
+}
+
+// walkAssignMapRead walks an OAS2MAPR node.
+func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
+ init.Append(ir.TakeInit(n)...)
+
+ r := n.Rhs[0].(*ir.IndexExpr)
+ walkExprListSafe(n.Lhs, init)
+ r.X = walkExpr(r.X, init)
+ r.Index = walkExpr(r.Index, init)
+ t := r.X.Type()
+
+ fast := mapfast(t)
+ key := mapKeyArg(fast, r, r.Index, false)
+
+ // from:
+ // a,b = m[i]
+ // to:
+ // var,b = mapaccess2*(t, m, i)
+ // a = *var
+ a := n.Lhs[0]
+
+ var call *ir.CallExpr
+ if w := t.Elem().Size(); w <= abi.ZeroValSize {
+ fn := mapfn(mapaccess2[fast], t, false)
+ call = mkcall1(fn, fn.Type().ResultsTuple(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key)
+ } else {
+ fn := mapfn("mapaccess2_fat", t, true)
+ z := reflectdata.ZeroAddr(w)
+ call = mkcall1(fn, fn.Type().ResultsTuple(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key, z)
+ }
+
+ // mapaccess2* returns a typed bool, but due to spec changes,
+ // the boolean result of i.(T) is now untyped so we make it the
+ // same type as the variable on the lhs.
+ if ok := n.Lhs[1]; !ir.IsBlank(ok) && ok.Type().IsBoolean() {
+ call.Type().Field(1).Type = ok.Type()
+ }
+ n.Rhs = []ir.Node{call}
+ n.SetOp(ir.OAS2FUNC)
+
+ // don't generate a = *var if a is _
+ if ir.IsBlank(a) {
+ return walkExpr(typecheck.Stmt(n), init)
+ }
+
+ var_ := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewPtr(t.Elem()))
+ var_.SetTypecheck(1)
+ var_.MarkNonNil() // mapaccess always returns a non-nil pointer
+
+ n.Lhs[0] = var_
+ init.Append(walkExpr(n, init))
+
+ as := ir.NewAssignStmt(base.Pos, a, ir.NewStarExpr(base.Pos, var_))
+ return walkExpr(typecheck.Stmt(as), init)
+}
+
+// walkAssignRecv walks an OAS2RECV node.
+func walkAssignRecv(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
+ init.Append(ir.TakeInit(n)...)
+
+ r := n.Rhs[0].(*ir.UnaryExpr) // recv
+ walkExprListSafe(n.Lhs, init)
+ r.X = walkExpr(r.X, init)
+ var n1 ir.Node
+ if ir.IsBlank(n.Lhs[0]) {
+ n1 = typecheck.NodNil()
+ } else {
+ n1 = typecheck.NodAddr(n.Lhs[0])
+ }
+ fn := chanfn("chanrecv2", 2, r.X.Type())
+ ok := n.Lhs[1]
+ call := mkcall1(fn, types.Types[types.TBOOL], init, r.X, n1)
+ return typecheck.Stmt(ir.NewAssignStmt(base.Pos, ok, call))
+}
+
+// walkReturn walks an ORETURN node.
+func walkReturn(n *ir.ReturnStmt) ir.Node {
+ fn := ir.CurFunc
+
+ fn.NumReturns++
+ if len(n.Results) == 0 {
+ return n
+ }
+
+ results := fn.Type().Results()
+ dsts := make([]ir.Node, len(results))
+ for i, v := range results {
+ // TODO(mdempsky): typecheck should have already checked the result variables.
+ dsts[i] = typecheck.AssignExpr(v.Nname.(*ir.Name))
+ }
+
+ n.Results = ascompatee(n.Op(), dsts, n.Results)
+ return n
+}
+
+// check assign type list to
+// an expression list. called in
+//
+// expr-list = func()
+func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
+ if len(nl) != nr.NumFields() {
+ base.Fatalf("ascompatet: assignment count mismatch: %d = %d", len(nl), nr.NumFields())
+ }
+
+ var nn ir.Nodes
+ for i, l := range nl {
+ if ir.IsBlank(l) {
+ continue
+ }
+ r := nr.Field(i)
+
+ // Order should have created autotemps of the appropriate type for
+ // us to store results into.
+ if tmp, ok := l.(*ir.Name); !ok || !tmp.AutoTemp() || !types.Identical(tmp.Type(), r.Type) {
+ base.FatalfAt(l.Pos(), "assigning %v to %+v", r.Type, l)
+ }
+
+ res := ir.NewResultExpr(base.Pos, nil, types.BADWIDTH)
+ res.Index = int64(i)
+ res.SetType(r.Type)
+ res.SetTypecheck(1)
+
+ nn.Append(ir.NewAssignStmt(base.Pos, l, res))
+ }
+ return nn
+}
+
+// check assign expression list to
+// an expression list. called in
+//
+// expr-list = expr-list
+func ascompatee(op ir.Op, nl, nr []ir.Node) []ir.Node {
+ // cannot happen: should have been rejected during type checking
+ if len(nl) != len(nr) {
+ base.Fatalf("assignment operands mismatch: %+v / %+v", ir.Nodes(nl), ir.Nodes(nr))
+ }
+
+ var assigned ir.NameSet
+ var memWrite, deferResultWrite bool
+
+ // affected reports whether expression n could be affected by
+ // the assignments applied so far.
+ affected := func(n ir.Node) bool {
+ if deferResultWrite {
+ return true
+ }
+ return ir.Any(n, func(n ir.Node) bool {
+ if n.Op() == ir.ONAME && assigned.Has(n.(*ir.Name)) {
+ return true
+ }
+ if memWrite && readsMemory(n) {
+ return true
+ }
+ return false
+ })
+ }
+
+ // If a needed expression may be affected by an
+ // earlier assignment, make an early copy of that
+ // expression and use the copy instead.
+ var early ir.Nodes
+ save := func(np *ir.Node) {
+ if n := *np; affected(n) {
+ *np = copyExpr(n, n.Type(), &early)
+ }
+ }
+
+ var late ir.Nodes
+ for i, lorig := range nl {
+ l, r := lorig, nr[i]
+
+ // Do not generate 'x = x' during return. See issue 4014.
+ if op == ir.ORETURN && ir.SameSafeExpr(l, r) {
+ continue
+ }
+
+ // Save subexpressions needed on left side.
+ // Drill through non-dereferences.
+ for {
+ // If an expression has init statements, they must be evaluated
+ // before any of its saved sub-operands (#45706).
+ // TODO(mdempsky): Disallow init statements on lvalues.
+ init := ir.TakeInit(l)
+ walkStmtList(init)
+ early.Append(init...)
+
+ switch ll := l.(type) {
+ case *ir.IndexExpr:
+ if ll.X.Type().IsArray() {
+ save(&ll.Index)
+ l = ll.X
+ continue
+ }
+ case *ir.ParenExpr:
+ l = ll.X
+ continue
+ case *ir.SelectorExpr:
+ if ll.Op() == ir.ODOT {
+ l = ll.X
+ continue
+ }
+ }
+ break
+ }
+
+ var name *ir.Name
+ switch l.Op() {
+ default:
+ base.Fatalf("unexpected lvalue %v", l.Op())
+ case ir.ONAME:
+ name = l.(*ir.Name)
+ case ir.OINDEX, ir.OINDEXMAP:
+ l := l.(*ir.IndexExpr)
+ save(&l.X)
+ save(&l.Index)
+ case ir.ODEREF:
+ l := l.(*ir.StarExpr)
+ save(&l.X)
+ case ir.ODOTPTR:
+ l := l.(*ir.SelectorExpr)
+ save(&l.X)
+ }
+
+ // Save expression on right side.
+ save(&r)
+
+ appendWalkStmt(&late, convas(ir.NewAssignStmt(base.Pos, lorig, r), &late))
+
+ // Check for reasons why we may need to compute later expressions
+ // before this assignment happens.
+
+ if name == nil {
+ // Not a direct assignment to a declared variable.
+ // Conservatively assume any memory access might alias.
+ memWrite = true
+ continue
+ }
+
+ if name.Class == ir.PPARAMOUT && ir.CurFunc.HasDefer() {
+ // Assignments to a result parameter in a function with defers
+ // becomes visible early if evaluation of any later expression
+ // panics (#43835).
+ deferResultWrite = true
+ continue
+ }
+
+ if ir.IsBlank(name) {
+ // We can ignore assignments to blank or anonymous result parameters.
+ // These can't appear in expressions anyway.
+ continue
+ }
+
+ if name.Addrtaken() || !name.OnStack() {
+ // Global variable, heap escaped, or just addrtaken.
+ // Conservatively assume any memory access might alias.
+ memWrite = true
+ continue
+ }
+
+ // Local, non-addrtaken variable.
+ // Assignments can only alias with direct uses of this variable.
+ assigned.Add(name)
+ }
+
+ early.Append(late.Take()...)
+ return early
+}
+
+// readsMemory reports whether the evaluation n directly reads from
+// memory that might be written to indirectly.
+func readsMemory(n ir.Node) bool {
+ switch n.Op() {
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class == ir.PFUNC {
+ return false
+ }
+ return n.Addrtaken() || !n.OnStack()
+
+ case ir.OADD,
+ ir.OAND,
+ ir.OANDAND,
+ ir.OANDNOT,
+ ir.OBITNOT,
+ ir.OCONV,
+ ir.OCONVIFACE,
+ ir.OCONVNOP,
+ ir.ODIV,
+ ir.ODOT,
+ ir.ODOTTYPE,
+ ir.OLITERAL,
+ ir.OLSH,
+ ir.OMOD,
+ ir.OMUL,
+ ir.ONEG,
+ ir.ONIL,
+ ir.OOR,
+ ir.OOROR,
+ ir.OPAREN,
+ ir.OPLUS,
+ ir.ORSH,
+ ir.OSUB,
+ ir.OXOR:
+ return false
+ }
+
+ // Be conservative.
+ return true
+}
+
+// expand append(l1, l2...) to
+//
+// init {
+// s := l1
+// newLen := s.len + l2.len
+// // Compare as uint so growslice can panic on overflow.
+// if uint(newLen) <= uint(s.cap) {
+// s = s[:newLen]
+// } else {
+// s = growslice(s.ptr, s.len, s.cap, l2.len, T)
+// }
+// memmove(&s[s.len-l2.len], &l2[0], l2.len*sizeof(T))
+// }
+// s
+//
+// l2 is allowed to be a string.
+func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
+ walkAppendArgs(n, init)
+
+ l1 := n.Args[0]
+ l2 := n.Args[1]
+ l2 = cheapExpr(l2, init)
+ n.Args[1] = l2
+
+ var nodes ir.Nodes
+
+ // var s []T
+ s := typecheck.TempAt(base.Pos, ir.CurFunc, l1.Type())
+ nodes.Append(ir.NewAssignStmt(base.Pos, s, l1)) // s = l1
+
+ elemtype := s.Type().Elem()
+
+ // Decompose slice.
+ oldPtr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
+ oldLen := ir.NewUnaryExpr(base.Pos, ir.OLEN, s)
+ oldCap := ir.NewUnaryExpr(base.Pos, ir.OCAP, s)
+
+ // Number of elements we are adding
+ num := ir.NewUnaryExpr(base.Pos, ir.OLEN, l2)
+
+ // newLen := oldLen + num
+ newLen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+ nodes.Append(ir.NewAssignStmt(base.Pos, newLen, ir.NewBinaryExpr(base.Pos, ir.OADD, oldLen, num)))
+
+ // if uint(newLen) <= uint(oldCap)
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nuint := typecheck.Conv(newLen, types.Types[types.TUINT])
+ scapuint := typecheck.Conv(oldCap, types.Types[types.TUINT])
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLE, nuint, scapuint)
+ nif.Likely = true
+
+ // then { s = s[:newLen] }
+ slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, nil, newLen, nil)
+ slice.SetBounded(true)
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, slice)}
+
+ // else { s = growslice(oldPtr, newLen, oldCap, num, T) }
+ call := walkGrowslice(s, nif.PtrInit(), oldPtr, newLen, oldCap, num)
+ nif.Else = []ir.Node{ir.NewAssignStmt(base.Pos, s, call)}
+
+ nodes.Append(nif)
+
+ // Index to start copying into s.
+ // idx = newLen - len(l2)
+ // We use this expression instead of oldLen because it avoids
+ // a spill/restore of oldLen.
+ // Note: this doesn't work optimally currently because
+ // the compiler optimizer undoes this arithmetic.
+ idx := ir.NewBinaryExpr(base.Pos, ir.OSUB, newLen, ir.NewUnaryExpr(base.Pos, ir.OLEN, l2))
+
+ var ncopy ir.Node
+ if elemtype.HasPointers() {
+ // copy(s[idx:], l2)
+ slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, idx, nil, nil)
+ slice.SetType(s.Type())
+ slice.SetBounded(true)
+
+ ir.CurFunc.SetWBPos(n.Pos())
+
+ // instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
+ fn := typecheck.LookupRuntime("typedslicecopy", l1.Type().Elem(), l2.Type().Elem())
+ ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes))
+ ptr2, len2 := backingArrayPtrLen(l2)
+ ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.AppendElemRType(base.Pos, n), ptr1, len1, ptr2, len2)
+ } else if base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime {
+ // rely on runtime to instrument:
+ // copy(s[idx:], l2)
+ // l2 can be a slice or string.
+ slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, idx, nil, nil)
+ slice.SetType(s.Type())
+ slice.SetBounded(true)
+
+ ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes))
+ ptr2, len2 := backingArrayPtrLen(l2)
+
+ fn := typecheck.LookupRuntime("slicecopy", ptr1.Type().Elem(), ptr2.Type().Elem())
+ ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, ir.NewInt(base.Pos, elemtype.Size()))
+ } else {
+ // memmove(&s[idx], &l2[0], len(l2)*sizeof(T))
+ ix := ir.NewIndexExpr(base.Pos, s, idx)
+ ix.SetBounded(true)
+ addr := typecheck.NodAddr(ix)
+
+ sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l2)
+
+ nwid := cheapExpr(typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, l2), types.Types[types.TUINTPTR]), &nodes)
+ nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(base.Pos, elemtype.Size()))
+
+ // instantiate func memmove(to *any, frm *any, length uintptr)
+ fn := typecheck.LookupRuntime("memmove", elemtype, elemtype)
+ ncopy = mkcall1(fn, nil, &nodes, addr, sptr, nwid)
+ }
+ ln := append(nodes, ncopy)
+
+ typecheck.Stmts(ln)
+ walkStmtList(ln)
+ init.Append(ln...)
+ return s
+}
+
+// isAppendOfMake reports whether n is of the form append(x, make([]T, y)...).
+// isAppendOfMake assumes n has already been typechecked.
+func isAppendOfMake(n ir.Node) bool {
+ if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
+ return false
+ }
+
+ if n.Typecheck() == 0 {
+ base.Fatalf("missing typecheck: %+v", n)
+ }
+
+ if n.Op() != ir.OAPPEND {
+ return false
+ }
+ call := n.(*ir.CallExpr)
+ if !call.IsDDD || len(call.Args) != 2 || call.Args[1].Op() != ir.OMAKESLICE {
+ return false
+ }
+
+ mk := call.Args[1].(*ir.MakeExpr)
+ if mk.Cap != nil {
+ return false
+ }
+
+ // y must be either an integer constant or the largest possible positive value
+ // of variable y needs to fit into a uint.
+
+ // typecheck made sure that constant arguments to make are not negative and fit into an int.
+
+ // The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime.
+ y := mk.Len
+ if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() {
+ return false
+ }
+
+ return true
+}
+
+// extendSlice rewrites append(l1, make([]T, l2)...) to
+//
+// init {
+// if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true)
+// } else {
+// panicmakeslicelen()
+// }
+// s := l1
+// n := len(s) + l2
+// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2.
+// // cap is a positive int and n can become negative when len(s) + l2
+// // overflows int. Interpreting n when negative as uint makes it larger
+// // than cap(s). growslice will check the int n arg and panic if n is
+// // negative. This prevents the overflow from being undetected.
+// if uint(n) <= uint(cap(s)) {
+// s = s[:n]
+// } else {
+// s = growslice(T, s.ptr, n, s.cap, l2, T)
+// }
+// // clear the new portion of the underlying array.
+// hp := &s[len(s)-l2]
+// hn := l2 * sizeof(T)
+// memclr(hp, hn)
+// }
+// s
+//
+// if T has pointers, the final memclr can go inside the "then" branch, as
+// growslice will have done the clearing for us.
+
+func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
+ // isAppendOfMake made sure all possible positive values of l2 fit into a uint.
+ // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
+ // check of l2 < 0 at runtime which is generated below.
+ l2 := typecheck.Conv(n.Args[1].(*ir.MakeExpr).Len, types.Types[types.TINT])
+ l2 = typecheck.Expr(l2)
+ n.Args[1] = l2 // walkAppendArgs expects l2 in n.List.Second().
+
+ walkAppendArgs(n, init)
+
+ l1 := n.Args[0]
+ l2 = n.Args[1] // re-read l2, as it may have been updated by walkAppendArgs
+
+ var nodes []ir.Node
+
+ // if l2 >= 0 (likely happens), do nothing
+ nifneg := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGE, l2, ir.NewInt(base.Pos, 0)), nil, nil)
+ nifneg.Likely = true
+
+ // else panicmakeslicelen()
+ nifneg.Else = []ir.Node{mkcall("panicmakeslicelen", nil, init)}
+ nodes = append(nodes, nifneg)
+
+ // s := l1
+ s := typecheck.TempAt(base.Pos, ir.CurFunc, l1.Type())
+ nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, l1))
+
+ elemtype := s.Type().Elem()
+
+ // n := s.len + l2
+ nn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+ nodes = append(nodes, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2)))
+
+ // if uint(n) <= uint(s.cap)
+ nuint := typecheck.Conv(nn, types.Types[types.TUINT])
+ capuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT])
+ nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, nuint, capuint), nil, nil)
+ nif.Likely = true
+
+ // then { s = s[:n] }
+ nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, nil, nn, nil)
+ nt.SetBounded(true)
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, nt)}
+
+ // else { s = growslice(s.ptr, n, s.cap, l2, T) }
+ nif.Else = []ir.Node{
+ ir.NewAssignStmt(base.Pos, s, walkGrowslice(s, nif.PtrInit(),
+ ir.NewUnaryExpr(base.Pos, ir.OSPTR, s),
+ nn,
+ ir.NewUnaryExpr(base.Pos, ir.OCAP, s),
+ l2)),
+ }
+
+ nodes = append(nodes, nif)
+
+ // hp := &s[s.len - l2]
+ // TODO: &s[s.len] - hn?
+ ix := ir.NewIndexExpr(base.Pos, s, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2))
+ ix.SetBounded(true)
+ hp := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR])
+
+ // hn := l2 * sizeof(elem(s))
+ hn := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, l2, ir.NewInt(base.Pos, elemtype.Size())), types.Types[types.TUINTPTR])
+
+ clrname := "memclrNoHeapPointers"
+ hasPointers := elemtype.HasPointers()
+ if hasPointers {
+ clrname = "memclrHasPointers"
+ ir.CurFunc.SetWBPos(n.Pos())
+ }
+
+ var clr ir.Nodes
+ clrfn := mkcall(clrname, nil, &clr, hp, hn)
+ clr.Append(clrfn)
+ if hasPointers {
+ // growslice will have cleared the new entries, so only
+ // if growslice isn't called do we need to do the zeroing ourselves.
+ nif.Body = append(nif.Body, clr...)
+ } else {
+ nodes = append(nodes, clr...)
+ }
+
+ typecheck.Stmts(nodes)
+ walkStmtList(nodes)
+ init.Append(nodes...)
+ return s
+}
diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go
new file mode 100644
index 0000000..37143ba
--- /dev/null
+++ b/src/cmd/compile/internal/walk/builtin.go
@@ -0,0 +1,888 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "fmt"
+ "go/constant"
+ "go/token"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/escape"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+// Rewrite append(src, x, y, z) so that any side effects in
+// x, y, z (including runtime panics) are evaluated in
+// initialization statements before the append.
+// For normal code generation, stop there and leave the
+// rest to ssagen.
+//
+// For race detector, expand append(src, a [, b]* ) to
+//
+// init {
+// s := src
+// const argc = len(args) - 1
+// newLen := s.len + argc
+// if uint(newLen) <= uint(s.cap) {
+// s = s[:newLen]
+// } else {
+// s = growslice(s.ptr, newLen, s.cap, argc, elemType)
+// }
+// s[s.len - argc] = a
+// s[s.len - argc + 1] = b
+// ...
+// }
+// s
+func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
+ if !ir.SameSafeExpr(dst, n.Args[0]) {
+ n.Args[0] = safeExpr(n.Args[0], init)
+ n.Args[0] = walkExpr(n.Args[0], init)
+ }
+ walkExprListSafe(n.Args[1:], init)
+
+ nsrc := n.Args[0]
+
+ // walkExprListSafe will leave OINDEX (s[n]) alone if both s
+ // and n are name or literal, but those may index the slice we're
+ // modifying here. Fix explicitly.
+ // Using cheapExpr also makes sure that the evaluation
+ // of all arguments (and especially any panics) happen
+ // before we begin to modify the slice in a visible way.
+ ls := n.Args[1:]
+ for i, n := range ls {
+ n = cheapExpr(n, init)
+ if !types.Identical(n.Type(), nsrc.Type().Elem()) {
+ n = typecheck.AssignConv(n, nsrc.Type().Elem(), "append")
+ n = walkExpr(n, init)
+ }
+ ls[i] = n
+ }
+
+ argc := len(n.Args) - 1
+ if argc < 1 {
+ return nsrc
+ }
+
+ // General case, with no function calls left as arguments.
+ // Leave for ssagen, except that instrumentation requires the old form.
+ if !base.Flag.Cfg.Instrumenting || base.Flag.CompilingRuntime {
+ return n
+ }
+
+ var l []ir.Node
+
+ // s = slice to append to
+ s := typecheck.TempAt(base.Pos, ir.CurFunc, nsrc.Type())
+ l = append(l, ir.NewAssignStmt(base.Pos, s, nsrc))
+
+ // num = number of things to append
+ num := ir.NewInt(base.Pos, int64(argc))
+
+ // newLen := s.len + num
+ newLen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+ l = append(l, ir.NewAssignStmt(base.Pos, newLen, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), num)))
+
+ // if uint(newLen) <= uint(s.cap)
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLE, typecheck.Conv(newLen, types.Types[types.TUINT]), typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT]))
+ nif.Likely = true
+
+ // then { s = s[:n] }
+ slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, nil, newLen, nil)
+ slice.SetBounded(true)
+ nif.Body = []ir.Node{
+ ir.NewAssignStmt(base.Pos, s, slice),
+ }
+
+ // else { s = growslice(s.ptr, n, s.cap, a, T) }
+ nif.Else = []ir.Node{
+ ir.NewAssignStmt(base.Pos, s, walkGrowslice(s, nif.PtrInit(),
+ ir.NewUnaryExpr(base.Pos, ir.OSPTR, s),
+ newLen,
+ ir.NewUnaryExpr(base.Pos, ir.OCAP, s),
+ num)),
+ }
+
+ l = append(l, nif)
+
+ ls = n.Args[1:]
+ for i, n := range ls {
+ // s[s.len-argc+i] = arg
+ ix := ir.NewIndexExpr(base.Pos, s, ir.NewBinaryExpr(base.Pos, ir.OSUB, newLen, ir.NewInt(base.Pos, int64(argc-i))))
+ ix.SetBounded(true)
+ l = append(l, ir.NewAssignStmt(base.Pos, ix, n))
+ }
+
+ typecheck.Stmts(l)
+ walkStmtList(l)
+ init.Append(l...)
+ return s
+}
+
+// growslice(ptr *T, newLen, oldCap, num int, <type>) (ret []T)
+func walkGrowslice(slice *ir.Name, init *ir.Nodes, oldPtr, newLen, oldCap, num ir.Node) *ir.CallExpr {
+ elemtype := slice.Type().Elem()
+ fn := typecheck.LookupRuntime("growslice", elemtype, elemtype)
+ elemtypeptr := reflectdata.TypePtrAt(base.Pos, elemtype)
+ return mkcall1(fn, slice.Type(), init, oldPtr, newLen, oldCap, num, elemtypeptr)
+}
+
+// walkClear walks an OCLEAR node.
+func walkClear(n *ir.UnaryExpr) ir.Node {
+ typ := n.X.Type()
+ switch {
+ case typ.IsSlice():
+ if n := arrayClear(n.X.Pos(), n.X, nil); n != nil {
+ return n
+ }
+ // If n == nil, we are clearing an array which takes zero memory, do nothing.
+ return ir.NewBlockStmt(n.Pos(), nil)
+ case typ.IsMap():
+ return mapClear(n.X, reflectdata.TypePtrAt(n.X.Pos(), n.X.Type()))
+ }
+ panic("unreachable")
+}
+
+// walkClose walks an OCLOSE node.
+func walkClose(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
+ // cannot use chanfn - closechan takes any, not chan any
+ fn := typecheck.LookupRuntime("closechan", n.X.Type())
+ return mkcall1(fn, nil, init, n.X)
+}
+
+// Lower copy(a, b) to a memmove call or a runtime call.
+//
+// init {
+// n := len(a)
+// if n > len(b) { n = len(b) }
+// if a.ptr != b.ptr { memmove(a.ptr, b.ptr, n*sizeof(elem(a))) }
+// }
+// n;
+//
+// Also works if b is a string.
+func walkCopy(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node {
+ if n.X.Type().Elem().HasPointers() {
+ ir.CurFunc.SetWBPos(n.Pos())
+ fn := writebarrierfn("typedslicecopy", n.X.Type().Elem(), n.Y.Type().Elem())
+ n.X = cheapExpr(n.X, init)
+ ptrL, lenL := backingArrayPtrLen(n.X)
+ n.Y = cheapExpr(n.Y, init)
+ ptrR, lenR := backingArrayPtrLen(n.Y)
+ return mkcall1(fn, n.Type(), init, reflectdata.CopyElemRType(base.Pos, n), ptrL, lenL, ptrR, lenR)
+ }
+
+ if runtimecall {
+ // rely on runtime to instrument:
+ // copy(n.Left, n.Right)
+ // n.Right can be a slice or string.
+
+ n.X = cheapExpr(n.X, init)
+ ptrL, lenL := backingArrayPtrLen(n.X)
+ n.Y = cheapExpr(n.Y, init)
+ ptrR, lenR := backingArrayPtrLen(n.Y)
+
+ fn := typecheck.LookupRuntime("slicecopy", ptrL.Type().Elem(), ptrR.Type().Elem())
+
+ return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, ir.NewInt(base.Pos, n.X.Type().Elem().Size()))
+ }
+
+ n.X = walkExpr(n.X, init)
+ n.Y = walkExpr(n.Y, init)
+ nl := typecheck.TempAt(base.Pos, ir.CurFunc, n.X.Type())
+ nr := typecheck.TempAt(base.Pos, ir.CurFunc, n.Y.Type())
+ var l []ir.Node
+ l = append(l, ir.NewAssignStmt(base.Pos, nl, n.X))
+ l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Y))
+
+ nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr)
+ nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl)
+
+ nlen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+
+ // n = len(to)
+ l = append(l, ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nl)))
+
+ // if n > len(frm) { n = len(frm) }
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))
+ nif.Body.Append(ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr)))
+ l = append(l, nif)
+
+ // if to.ptr != frm.ptr { memmove( ... ) }
+ ne := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.ONE, nto, nfrm), nil, nil)
+ ne.Likely = true
+ l = append(l, ne)
+
+ fn := typecheck.LookupRuntime("memmove", nl.Type().Elem(), nl.Type().Elem())
+ nwid := ir.Node(typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR]))
+ setwid := ir.NewAssignStmt(base.Pos, nwid, typecheck.Conv(nlen, types.Types[types.TUINTPTR]))
+ ne.Body.Append(setwid)
+ nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(base.Pos, nl.Type().Elem().Size()))
+ call := mkcall1(fn, nil, init, nto, nfrm, nwid)
+ ne.Body.Append(call)
+
+ typecheck.Stmts(l)
+ walkStmtList(l)
+ init.Append(l...)
+ return nlen
+}
+
+// walkDelete walks an ODELETE node.
+func walkDelete(init *ir.Nodes, n *ir.CallExpr) ir.Node {
+ init.Append(ir.TakeInit(n)...)
+ map_ := n.Args[0]
+ key := n.Args[1]
+ map_ = walkExpr(map_, init)
+ key = walkExpr(key, init)
+
+ t := map_.Type()
+ fast := mapfast(t)
+ key = mapKeyArg(fast, n, key, false)
+ return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.DeleteMapRType(base.Pos, n), map_, key)
+}
+
+// walkLenCap walks an OLEN or OCAP node.
+func walkLenCap(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
+ if isRuneCount(n) {
+ // Replace len([]rune(string)) with runtime.countrunes(string).
+ return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING]))
+ }
+ if isByteCount(n) {
+ conv := n.X.(*ir.ConvExpr)
+ walkStmtList(conv.Init())
+ init.Append(ir.TakeInit(conv)...)
+ _, len := backingArrayPtrLen(cheapExpr(conv.X, init))
+ return len
+ }
+
+ n.X = walkExpr(n.X, init)
+
+ // replace len(*[10]int) with 10.
+ // delayed until now to preserve side effects.
+ t := n.X.Type()
+
+ if t.IsPtr() {
+ t = t.Elem()
+ }
+ if t.IsArray() {
+ safeExpr(n.X, init)
+ con := ir.NewConstExpr(constant.MakeInt64(t.NumElem()), n)
+ con.SetTypecheck(1)
+ return con
+ }
+ return n
+}
+
+// walkMakeChan walks an OMAKECHAN node.
+func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
+ // When size fits into int, use makechan instead of
+ // makechan64, which is faster and shorter on 32 bit platforms.
+ size := n.Len
+ fnname := "makechan64"
+ argtype := types.Types[types.TINT64]
+
+ // Type checking guarantees that TIDEAL size is positive and fits in an int.
+ // The case of size overflow when converting TUINT or TUINTPTR to TINT
+ // will be handled by the negative range checks in makechan during runtime.
+ if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() {
+ fnname = "makechan"
+ argtype = types.Types[types.TINT]
+ }
+
+ return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.MakeChanRType(base.Pos, n), typecheck.Conv(size, argtype))
+}
+
+// walkMakeMap walks an OMAKEMAP node.
+func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
+ t := n.Type()
+ hmapType := reflectdata.MapType()
+ hint := n.Len
+
+ // var h *hmap
+ var h ir.Node
+ if n.Esc() == ir.EscNone {
+ // Allocate hmap on stack.
+
+ // var hv hmap
+ // h = &hv
+ h = stackTempAddr(init, hmapType)
+
+ // Allocate one bucket pointed to by hmap.buckets on stack if hint
+ // is not larger than BUCKETSIZE. In case hint is larger than
+ // BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
+ // Maximum key and elem size is 128 bytes, larger objects
+ // are stored with an indirection. So max bucket size is 2048+eps.
+ if !ir.IsConst(hint, constant.Int) ||
+ constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
+
+ // In case hint is larger than BUCKETSIZE runtime.makemap
+ // will allocate the buckets on the heap, see #20184
+ //
+ // if hint <= BUCKETSIZE {
+ // var bv bmap
+ // b = &bv
+ // h.buckets = b
+ // }
+
+ nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, reflectdata.BUCKETSIZE)), nil, nil)
+ nif.Likely = true
+
+ // var bv bmap
+ // b = &bv
+ b := stackTempAddr(&nif.Body, reflectdata.MapBucketType(t))
+
+ // h.buckets = b
+ bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
+ na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), typecheck.ConvNop(b, types.Types[types.TUNSAFEPTR]))
+ nif.Body.Append(na)
+ appendWalkStmt(init, nif)
+ }
+ }
+
+ if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
+ // Handling make(map[any]any) and
+ // make(map[any]any, hint) where hint <= BUCKETSIZE
+ // special allows for faster map initialization and
+ // improves binary size by using calls with fewer arguments.
+ // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
+ // and no buckets will be allocated by makemap. Therefore,
+ // no buckets need to be allocated in this code path.
+ if n.Esc() == ir.EscNone {
+ // Only need to initialize h.hash0 since
+ // hmap h has been allocated on the stack already.
+ // h.hash0 = rand32()
+ rand := mkcall("rand32", types.Types[types.TUINT32], init)
+ hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
+ return typecheck.ConvNop(h, t)
+ }
+ // Call runtime.makehmap to allocate an
+ // hmap on the heap and initialize hmap's hash0 field.
+ fn := typecheck.LookupRuntime("makemap_small", t.Key(), t.Elem())
+ return mkcall1(fn, n.Type(), init)
+ }
+
+ if n.Esc() != ir.EscNone {
+ h = typecheck.NodNil()
+ }
+ // Map initialization with a variable or large hint is
+ // more complicated. We therefore generate a call to
+ // runtime.makemap to initialize hmap and allocate the
+ // map buckets.
+
+ // When hint fits into int, use makemap instead of
+ // makemap64, which is faster and shorter on 32 bit platforms.
+ fnname := "makemap64"
+ argtype := types.Types[types.TINT64]
+
+ // Type checking guarantees that TIDEAL hint is positive and fits in an int.
+ // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
+ // The case of hint overflow when converting TUINT or TUINTPTR to TINT
+ // will be handled by the negative range checks in makemap during runtime.
+ if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
+ fnname = "makemap"
+ argtype = types.Types[types.TINT]
+ }
+
+ fn := typecheck.LookupRuntime(fnname, hmapType, t.Key(), t.Elem())
+ return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), h)
+}
+
+// walkMakeSlice walks an OMAKESLICE node.
+func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
+ l := n.Len
+ r := n.Cap
+ if r == nil {
+ r = safeExpr(l, init)
+ l = r
+ }
+ t := n.Type()
+ if t.Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+ }
+ if n.Esc() == ir.EscNone {
+ if why := escape.HeapAllocReason(n); why != "" {
+ base.Fatalf("%v has EscNone, but %v", n, why)
+ }
+ // var arr [r]T
+ // n = arr[:l]
+ i := typecheck.IndexConst(r)
+ if i < 0 {
+ base.Fatalf("walkExpr: invalid index %v", r)
+ }
+
+ // cap is constrained to [0,2^31) or [0,2^63) depending on whether
+ // we're in 32-bit or 64-bit systems. So it's safe to do:
+ //
+ // if uint64(len) > cap {
+ // if len < 0 { panicmakeslicelen() }
+ // panicmakeslicecap()
+ // }
+ nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(l, types.Types[types.TUINT64]), ir.NewInt(base.Pos, i)), nil, nil)
+ niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, ir.NewInt(base.Pos, 0)), nil, nil)
+ niflen.Body = []ir.Node{mkcall("panicmakeslicelen", nil, init)}
+ nif.Body.Append(niflen, mkcall("panicmakeslicecap", nil, init))
+ init.Append(typecheck.Stmt(nif))
+
+ t = types.NewArray(t.Elem(), i) // [r]T
+ var_ := typecheck.TempAt(base.Pos, ir.CurFunc, t)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) // zero temp
+ r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_, nil, l, nil) // arr[:l]
+ // The conv is necessary in case n.Type is named.
+ return walkExpr(typecheck.Expr(typecheck.Conv(r, n.Type())), init)
+ }
+
+ // n escapes; set up a call to makeslice.
+ // When len and cap can fit into int, use makeslice instead of
+ // makeslice64, which is faster and shorter on 32 bit platforms.
+
+ len, cap := l, r
+
+ fnname := "makeslice64"
+ argtype := types.Types[types.TINT64]
+
+ // Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
+ // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
+ // will be handled by the negative range checks in makeslice during runtime.
+ if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) &&
+ (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) {
+ fnname = "makeslice"
+ argtype = types.Types[types.TINT]
+ }
+ fn := typecheck.LookupRuntime(fnname)
+ ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.MakeSliceElemRType(base.Pos, n), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype))
+ ptr.MarkNonNil()
+ len = typecheck.Conv(len, types.Types[types.TINT])
+ cap = typecheck.Conv(cap, types.Types[types.TINT])
+ sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, len, cap)
+ return walkExpr(typecheck.Expr(sh), init)
+}
+
+// walkMakeSliceCopy walks an OMAKESLICECOPY node.
+func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
+ if n.Esc() == ir.EscNone {
+ base.Fatalf("OMAKESLICECOPY with EscNone: %v", n)
+ }
+
+ t := n.Type()
+ if t.Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+ }
+
+ length := typecheck.Conv(n.Len, types.Types[types.TINT])
+ copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Cap)
+ copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Cap)
+
+ if !t.Elem().HasPointers() && n.Bounded() {
+ // When len(to)==len(from) and elements have no pointers:
+ // replace make+copy with runtime.mallocgc+runtime.memmove.
+
+ // We do not check for overflow of len(to)*elem.Width here
+ // since len(from) is an existing checked slice capacity
+ // with same elem.Width for the from slice.
+ size := ir.NewBinaryExpr(base.Pos, ir.OMUL, typecheck.Conv(length, types.Types[types.TUINTPTR]), typecheck.Conv(ir.NewInt(base.Pos, t.Elem().Size()), types.Types[types.TUINTPTR]))
+
+ // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
+ fn := typecheck.LookupRuntime("mallocgc")
+ ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, typecheck.NodNil(), ir.NewBool(base.Pos, false))
+ ptr.MarkNonNil()
+ sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length)
+
+ s := typecheck.TempAt(base.Pos, ir.CurFunc, t)
+ r := typecheck.Stmt(ir.NewAssignStmt(base.Pos, s, sh))
+ r = walkExpr(r, init)
+ init.Append(r)
+
+ // instantiate memmove(to *any, frm *any, size uintptr)
+ fn = typecheck.LookupRuntime("memmove", t.Elem(), t.Elem())
+ ncopy := mkcall1(fn, nil, init, ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), copyptr, size)
+ init.Append(walkExpr(typecheck.Stmt(ncopy), init))
+
+ return s
+ }
+ // Replace make+copy with runtime.makeslicecopy.
+ // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
+ fn := typecheck.LookupRuntime("makeslicecopy")
+ ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.MakeSliceElemRType(base.Pos, n), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR]))
+ ptr.MarkNonNil()
+ sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length)
+ return walkExpr(typecheck.Expr(sh), init)
+}
+
+// walkNew walks an ONEW node.
+func walkNew(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
+ t := n.Type().Elem()
+ if t.NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem())
+ }
+ if n.Esc() == ir.EscNone {
+ if t.Size() > ir.MaxImplicitStackVarSize {
+ base.Fatalf("large ONEW with EscNone: %v", n)
+ }
+ return stackTempAddr(init, t)
+ }
+ types.CalcSize(t)
+ n.MarkNonNil()
+ return n
+}
+
+func walkMinMax(n *ir.CallExpr, init *ir.Nodes) ir.Node {
+ init.Append(ir.TakeInit(n)...)
+ walkExprList(n.Args, init)
+ return n
+}
+
+// generate code for print.
+func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
+ // Hoist all the argument evaluation up before the lock.
+ walkExprListCheap(nn.Args, init)
+
+ // For println, add " " between elements and "\n" at the end.
+ if nn.Op() == ir.OPRINTLN {
+ s := nn.Args
+ t := make([]ir.Node, 0, len(s)*2)
+ for i, n := range s {
+ if i != 0 {
+ t = append(t, ir.NewString(base.Pos, " "))
+ }
+ t = append(t, n)
+ }
+ t = append(t, ir.NewString(base.Pos, "\n"))
+ nn.Args = t
+ }
+
+ // Collapse runs of constant strings.
+ s := nn.Args
+ t := make([]ir.Node, 0, len(s))
+ for i := 0; i < len(s); {
+ var strs []string
+ for i < len(s) && ir.IsConst(s[i], constant.String) {
+ strs = append(strs, ir.StringVal(s[i]))
+ i++
+ }
+ if len(strs) > 0 {
+ t = append(t, ir.NewString(base.Pos, strings.Join(strs, "")))
+ }
+ if i < len(s) {
+ t = append(t, s[i])
+ i++
+ }
+ }
+ nn.Args = t
+
+ calls := []ir.Node{mkcall("printlock", nil, init)}
+ for i, n := range nn.Args {
+ if n.Op() == ir.OLITERAL {
+ if n.Type() == types.UntypedRune {
+ n = typecheck.DefaultLit(n, types.RuneType)
+ }
+
+ switch n.Val().Kind() {
+ case constant.Int:
+ n = typecheck.DefaultLit(n, types.Types[types.TINT64])
+
+ case constant.Float:
+ n = typecheck.DefaultLit(n, types.Types[types.TFLOAT64])
+ }
+ }
+
+ if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
+ n = typecheck.DefaultLit(n, types.Types[types.TINT64])
+ }
+ n = typecheck.DefaultLit(n, nil)
+ nn.Args[i] = n
+ if n.Type() == nil || n.Type().Kind() == types.TFORW {
+ continue
+ }
+
+ var on *ir.Name
+ switch n.Type().Kind() {
+ case types.TINTER:
+ if n.Type().IsEmptyInterface() {
+ on = typecheck.LookupRuntime("printeface", n.Type())
+ } else {
+ on = typecheck.LookupRuntime("printiface", n.Type())
+ }
+ case types.TPTR:
+ if n.Type().Elem().NotInHeap() {
+ on = typecheck.LookupRuntime("printuintptr")
+ n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+ n.SetType(types.Types[types.TUNSAFEPTR])
+ n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+ n.SetType(types.Types[types.TUINTPTR])
+ break
+ }
+ fallthrough
+ case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR:
+ on = typecheck.LookupRuntime("printpointer", n.Type())
+ case types.TSLICE:
+ on = typecheck.LookupRuntime("printslice", n.Type())
+ case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
+ if types.RuntimeSymName(n.Type().Sym()) == "hex" {
+ on = typecheck.LookupRuntime("printhex")
+ } else {
+ on = typecheck.LookupRuntime("printuint")
+ }
+ case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64:
+ on = typecheck.LookupRuntime("printint")
+ case types.TFLOAT32, types.TFLOAT64:
+ on = typecheck.LookupRuntime("printfloat")
+ case types.TCOMPLEX64, types.TCOMPLEX128:
+ on = typecheck.LookupRuntime("printcomplex")
+ case types.TBOOL:
+ on = typecheck.LookupRuntime("printbool")
+ case types.TSTRING:
+ cs := ""
+ if ir.IsConst(n, constant.String) {
+ cs = ir.StringVal(n)
+ }
+ switch cs {
+ case " ":
+ on = typecheck.LookupRuntime("printsp")
+ case "\n":
+ on = typecheck.LookupRuntime("printnl")
+ default:
+ on = typecheck.LookupRuntime("printstring")
+ }
+ default:
+ badtype(ir.OPRINT, n.Type(), nil)
+ continue
+ }
+
+ r := ir.NewCallExpr(base.Pos, ir.OCALL, on, nil)
+ if params := on.Type().Params(); len(params) > 0 {
+ t := params[0].Type
+ n = typecheck.Conv(n, t)
+ r.Args.Append(n)
+ }
+ calls = append(calls, r)
+ }
+
+ calls = append(calls, mkcall("printunlock", nil, init))
+
+ typecheck.Stmts(calls)
+ walkExprList(calls, init)
+
+ r := ir.NewBlockStmt(base.Pos, nil)
+ r.List = calls
+ return walkStmt(typecheck.Stmt(r))
+}
+
+// walkRecoverFP walks an ORECOVERFP node.
+func walkRecoverFP(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
+ return mkcall("gorecover", nn.Type(), init, walkExpr(nn.Args[0], init))
+}
+
+// walkUnsafeData walks an OUNSAFESLICEDATA or OUNSAFESTRINGDATA expression.
+func walkUnsafeData(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
+ slice := walkExpr(n.X, init)
+ res := typecheck.Expr(ir.NewUnaryExpr(n.Pos(), ir.OSPTR, slice))
+ res.SetType(n.Type())
+ return walkExpr(res, init)
+}
+
+func walkUnsafeSlice(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ ptr := safeExpr(n.X, init)
+ len := safeExpr(n.Y, init)
+ sliceType := n.Type()
+
+ lenType := types.Types[types.TINT64]
+ unsafePtr := typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR])
+
+ // If checkptr enabled, call runtime.unsafeslicecheckptr to check ptr and len.
+ // for simplicity, unsafeslicecheckptr always uses int64.
+ // Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
+ // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
+ // will be handled by the negative range checks in unsafeslice during runtime.
+ if ir.ShouldCheckPtr(ir.CurFunc, 1) {
+ fnname := "unsafeslicecheckptr"
+ fn := typecheck.LookupRuntime(fnname)
+ init.Append(mkcall1(fn, nil, init, reflectdata.UnsafeSliceElemRType(base.Pos, n), unsafePtr, typecheck.Conv(len, lenType)))
+ } else {
+ // Otherwise, open code unsafe.Slice to prevent runtime call overhead.
+ // Keep this code in sync with runtime.unsafeslice{,64}
+ if len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size() {
+ lenType = types.Types[types.TINT]
+ } else {
+ // len64 := int64(len)
+ // if int64(int(len64)) != len64 {
+ // panicunsafeslicelen()
+ // }
+ len64 := typecheck.Conv(len, lenType)
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, typecheck.Conv(typecheck.Conv(len64, types.Types[types.TINT]), lenType), len64)
+ nif.Body.Append(mkcall("panicunsafeslicelen", nil, &nif.Body))
+ appendWalkStmt(init, nif)
+ }
+
+ // if len < 0 { panicunsafeslicelen() }
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, typecheck.Conv(len, lenType), ir.NewInt(base.Pos, 0))
+ nif.Body.Append(mkcall("panicunsafeslicelen", nil, &nif.Body))
+ appendWalkStmt(init, nif)
+
+ if sliceType.Elem().Size() == 0 {
+ // if ptr == nil && len > 0 {
+ // panicunsafesliceptrnil()
+ // }
+ nifPtr := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ isNil := ir.NewBinaryExpr(base.Pos, ir.OEQ, unsafePtr, typecheck.NodNil())
+ gtZero := ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(len, lenType), ir.NewInt(base.Pos, 0))
+ nifPtr.Cond =
+ ir.NewLogicalExpr(base.Pos, ir.OANDAND, isNil, gtZero)
+ nifPtr.Body.Append(mkcall("panicunsafeslicenilptr", nil, &nifPtr.Body))
+ appendWalkStmt(init, nifPtr)
+
+ h := ir.NewSliceHeaderExpr(n.Pos(), sliceType,
+ typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]),
+ typecheck.Conv(len, types.Types[types.TINT]),
+ typecheck.Conv(len, types.Types[types.TINT]))
+ return walkExpr(typecheck.Expr(h), init)
+ }
+
+ // mem, overflow := math.mulUintptr(et.size, len)
+ mem := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])
+ overflow := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
+
+ decl := types.NewSignature(nil,
+ []*types.Field{
+ types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+ types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+ },
+ []*types.Field{
+ types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+ types.NewField(base.Pos, nil, types.Types[types.TBOOL]),
+ })
+
+ fn := ir.NewFunc(n.Pos(), n.Pos(), math_MulUintptr, decl)
+
+ call := mkcall1(fn.Nname, fn.Type().ResultsTuple(), init, ir.NewInt(base.Pos, sliceType.Elem().Size()), typecheck.Conv(typecheck.Conv(len, lenType), types.Types[types.TUINTPTR]))
+ appendWalkStmt(init, ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{mem, overflow}, []ir.Node{call}))
+
+ // if overflow || mem > -uintptr(ptr) {
+ // if ptr == nil {
+ // panicunsafesliceptrnil()
+ // }
+ // panicunsafeslicelen()
+ // }
+ nif = ir.NewIfStmt(base.Pos, nil, nil, nil)
+ memCond := ir.NewBinaryExpr(base.Pos, ir.OGT, mem, ir.NewUnaryExpr(base.Pos, ir.ONEG, typecheck.Conv(unsafePtr, types.Types[types.TUINTPTR])))
+ nif.Cond = ir.NewLogicalExpr(base.Pos, ir.OOROR, overflow, memCond)
+ nifPtr := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nifPtr.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, unsafePtr, typecheck.NodNil())
+ nifPtr.Body.Append(mkcall("panicunsafeslicenilptr", nil, &nifPtr.Body))
+ nif.Body.Append(nifPtr, mkcall("panicunsafeslicelen", nil, &nif.Body))
+ appendWalkStmt(init, nif)
+ }
+
+ h := ir.NewSliceHeaderExpr(n.Pos(), sliceType,
+ typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]),
+ typecheck.Conv(len, types.Types[types.TINT]),
+ typecheck.Conv(len, types.Types[types.TINT]))
+ return walkExpr(typecheck.Expr(h), init)
+}
+
+var math_MulUintptr = &types.Sym{Pkg: types.NewPkg("runtime/internal/math", "math"), Name: "MulUintptr"}
+
+func walkUnsafeString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ ptr := safeExpr(n.X, init)
+ len := safeExpr(n.Y, init)
+
+ lenType := types.Types[types.TINT64]
+ unsafePtr := typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR])
+
+ // If checkptr enabled, call runtime.unsafestringcheckptr to check ptr and len.
+ // for simplicity, unsafestringcheckptr always uses int64.
+ // Type checking guarantees that TIDEAL len are positive and fit in an int.
+ if ir.ShouldCheckPtr(ir.CurFunc, 1) {
+ fnname := "unsafestringcheckptr"
+ fn := typecheck.LookupRuntime(fnname)
+ init.Append(mkcall1(fn, nil, init, unsafePtr, typecheck.Conv(len, lenType)))
+ } else {
+ // Otherwise, open code unsafe.String to prevent runtime call overhead.
+ // Keep this code in sync with runtime.unsafestring{,64}
+ if len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size() {
+ lenType = types.Types[types.TINT]
+ } else {
+ // len64 := int64(len)
+ // if int64(int(len64)) != len64 {
+ // panicunsafestringlen()
+ // }
+ len64 := typecheck.Conv(len, lenType)
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, typecheck.Conv(typecheck.Conv(len64, types.Types[types.TINT]), lenType), len64)
+ nif.Body.Append(mkcall("panicunsafestringlen", nil, &nif.Body))
+ appendWalkStmt(init, nif)
+ }
+
+ // if len < 0 { panicunsafestringlen() }
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, typecheck.Conv(len, lenType), ir.NewInt(base.Pos, 0))
+ nif.Body.Append(mkcall("panicunsafestringlen", nil, &nif.Body))
+ appendWalkStmt(init, nif)
+
+ // if uintpr(len) > -uintptr(ptr) {
+ // if ptr == nil {
+ // panicunsafestringnilptr()
+ // }
+ // panicunsafeslicelen()
+ // }
+ nifLen := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nifLen.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(len, types.Types[types.TUINTPTR]), ir.NewUnaryExpr(base.Pos, ir.ONEG, typecheck.Conv(unsafePtr, types.Types[types.TUINTPTR])))
+ nifPtr := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nifPtr.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, unsafePtr, typecheck.NodNil())
+ nifPtr.Body.Append(mkcall("panicunsafestringnilptr", nil, &nifPtr.Body))
+ nifLen.Body.Append(nifPtr, mkcall("panicunsafestringlen", nil, &nifLen.Body))
+ appendWalkStmt(init, nifLen)
+ }
+ h := ir.NewStringHeaderExpr(n.Pos(),
+ typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]),
+ typecheck.Conv(len, types.Types[types.TINT]),
+ )
+ return walkExpr(typecheck.Expr(h), init)
+}
+
+func badtype(op ir.Op, tl, tr *types.Type) {
+ var s string
+ if tl != nil {
+ s += fmt.Sprintf("\n\t%v", tl)
+ }
+ if tr != nil {
+ s += fmt.Sprintf("\n\t%v", tr)
+ }
+
+ // common mistake: *struct and *interface.
+ if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
+ if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
+ s += "\n\t(*struct vs *interface)"
+ } else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
+ s += "\n\t(*interface vs *struct)"
+ }
+ }
+
+ base.Errorf("illegal types for operand: %v%s", op, s)
+}
+
+func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
+ return typecheck.LookupRuntime(name, l, r)
+}
+
+// isRuneCount reports whether n is of the form len([]rune(string)).
+// These are optimized into a call to runtime.countrunes.
+func isRuneCount(n ir.Node) bool {
+ return base.Flag.N == 0 && !base.Flag.Cfg.Instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).X.Op() == ir.OSTR2RUNES
+}
+
+// isByteCount reports whether n is of the form len(string([]byte)).
+func isByteCount(n ir.Node) bool {
+ return base.Flag.N == 0 && !base.Flag.Cfg.Instrumenting && n.Op() == ir.OLEN &&
+ (n.(*ir.UnaryExpr).X.Op() == ir.OBYTES2STR || n.(*ir.UnaryExpr).X.Op() == ir.OBYTES2STRTMP)
+}
diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go
new file mode 100644
index 0000000..38c6c03
--- /dev/null
+++ b/src/cmd/compile/internal/walk/closure.go
@@ -0,0 +1,230 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// directClosureCall rewrites a direct call of a function literal into
+// a normal function call with closure variables passed as arguments.
+// This avoids allocation of a closure object.
+//
+// For illustration, the following call:
+//
+// func(a int) {
+// println(byval)
+// byref++
+// }(42)
+//
+// becomes:
+//
+// func(byval int, &byref *int, a int) {
+// println(byval)
+// (*&byref)++
+// }(byval, &byref, 42)
+func directClosureCall(n *ir.CallExpr) {
+ clo := n.Fun.(*ir.ClosureExpr)
+ clofn := clo.Func
+
+ if ir.IsTrivialClosure(clo) {
+ return // leave for walkClosure to handle
+ }
+
+ // We are going to insert captured variables before input args.
+ var params []*types.Field
+ var decls []*ir.Name
+ for _, v := range clofn.ClosureVars {
+ if !v.Byval() {
+ // If v of type T is captured by reference,
+ // we introduce function param &v *T
+ // and v remains PAUTOHEAP with &v heapaddr
+ // (accesses will implicitly deref &v).
+
+ addr := ir.NewNameAt(clofn.Pos(), typecheck.Lookup("&"+v.Sym().Name), types.NewPtr(v.Type()))
+ addr.Curfn = clofn
+ v.Heapaddr = addr
+ v = addr
+ }
+
+ v.Class = ir.PPARAM
+ decls = append(decls, v)
+
+ fld := types.NewField(src.NoXPos, v.Sym(), v.Type())
+ fld.Nname = v
+ params = append(params, fld)
+ }
+
+ // f is ONAME of the actual function.
+ f := clofn.Nname
+ typ := f.Type()
+
+ // Create new function type with parameters prepended, and
+ // then update type and declarations.
+ typ = types.NewSignature(nil, append(params, typ.Params()...), typ.Results())
+ f.SetType(typ)
+ clofn.Dcl = append(decls, clofn.Dcl...)
+
+ // Rewrite call.
+ n.Fun = f
+ n.Args.Prepend(closureArgs(clo)...)
+
+ // Update the call expression's type. We need to do this
+ // because typecheck gave it the result type of the OCLOSURE
+ // node, but we only rewrote the ONAME node's type. Logically,
+ // they're the same, but the stack offsets probably changed.
+ if typ.NumResults() == 1 {
+ n.SetType(typ.Result(0).Type)
+ } else {
+ n.SetType(typ.ResultsTuple())
+ }
+
+ // Add to Closures for enqueueFunc. It's no longer a proper
+ // closure, but we may have already skipped over it in the
+ // functions list as a non-trivial closure, so this just
+ // ensures it's compiled.
+ ir.CurFunc.Closures = append(ir.CurFunc.Closures, clofn)
+}
+
+func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node {
+ clofn := clo.Func
+
+ // If no closure vars, don't bother wrapping.
+ if ir.IsTrivialClosure(clo) {
+ if base.Debug.Closure > 0 {
+ base.WarnfAt(clo.Pos(), "closure converted to global")
+ }
+ return clofn.Nname
+ }
+
+ // The closure is not trivial or directly called, so it's going to stay a closure.
+ ir.ClosureDebugRuntimeCheck(clo)
+ clofn.SetNeedctxt(true)
+
+ // The closure expression may be walked more than once if it appeared in composite
+ // literal initialization (e.g, see issue #49029).
+ //
+ // Don't add the closure function to compilation queue more than once, since when
+ // compiling a function twice would lead to an ICE.
+ if !clofn.Walked() {
+ clofn.SetWalked(true)
+ ir.CurFunc.Closures = append(ir.CurFunc.Closures, clofn)
+ }
+
+ typ := typecheck.ClosureType(clo)
+
+ clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, typ, nil)
+ clos.SetEsc(clo.Esc())
+ clos.List = append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, clofn.Nname)}, closureArgs(clo)...)
+ for i, value := range clos.List {
+ clos.List[i] = ir.NewStructKeyExpr(base.Pos, typ.Field(i), value)
+ }
+
+ addr := typecheck.NodAddr(clos)
+ addr.SetEsc(clo.Esc())
+
+ // Force type conversion from *struct to the func type.
+ cfn := typecheck.ConvNop(addr, clo.Type())
+
+ // non-escaping temp to use, if any.
+ if x := clo.Prealloc; x != nil {
+ if !types.Identical(typ, x.Type()) {
+ panic("closure type does not match order's assigned type")
+ }
+ addr.Prealloc = x
+ clo.Prealloc = nil
+ }
+
+ return walkExpr(cfn, init)
+}
+
+// closureArgs returns a slice of expressions that can be used to
+// initialize the given closure's free variables. These correspond
+// one-to-one with the variables in clo.Func.ClosureVars, and will be
+// either an ONAME node (if the variable is captured by value) or an
+// OADDR-of-ONAME node (if not).
+func closureArgs(clo *ir.ClosureExpr) []ir.Node {
+ fn := clo.Func
+
+ args := make([]ir.Node, len(fn.ClosureVars))
+ for i, v := range fn.ClosureVars {
+ var outer ir.Node
+ outer = v.Outer
+ if !v.Byval() {
+ outer = typecheck.NodAddrAt(fn.Pos(), outer)
+ }
+ args[i] = typecheck.Expr(outer)
+ }
+ return args
+}
+
+func walkMethodValue(n *ir.SelectorExpr, init *ir.Nodes) ir.Node {
+ // Create closure in the form of a composite literal.
+ // For x.M with receiver (x) type T, the generated code looks like:
+ //
+ // clos = &struct{F uintptr; R T}{T.M·f, x}
+ //
+ // Like walkClosure above.
+
+ if n.X.Type().IsInterface() {
+ // Trigger panic for method on nil interface now.
+ // Otherwise it happens in the wrapper and is confusing.
+ n.X = cheapExpr(n.X, init)
+ n.X = walkExpr(n.X, nil)
+
+ tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X)
+ check := ir.NewUnaryExpr(base.Pos, ir.OCHECKNIL, tab)
+ init.Append(typecheck.Stmt(check))
+ }
+
+ typ := typecheck.MethodValueType(n)
+
+ clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, typ, nil)
+ clos.SetEsc(n.Esc())
+ clos.List = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, methodValueWrapper(n)), n.X}
+
+ addr := typecheck.NodAddr(clos)
+ addr.SetEsc(n.Esc())
+
+ // Force type conversion from *struct to the func type.
+ cfn := typecheck.ConvNop(addr, n.Type())
+
+ // non-escaping temp to use, if any.
+ if x := n.Prealloc; x != nil {
+ if !types.Identical(typ, x.Type()) {
+ panic("partial call type does not match order's assigned type")
+ }
+ addr.Prealloc = x
+ n.Prealloc = nil
+ }
+
+ return walkExpr(cfn, init)
+}
+
+// methodValueWrapper returns the ONAME node representing the
+// wrapper function (*-fm) needed for the given method value. If the
+// wrapper function hasn't already been created yet, it's created and
+// added to typecheck.Target.Decls.
+func methodValueWrapper(dot *ir.SelectorExpr) *ir.Name {
+ if dot.Op() != ir.OMETHVALUE {
+ base.Fatalf("methodValueWrapper: unexpected %v (%v)", dot, dot.Op())
+ }
+
+ meth := dot.Sel
+ rcvrtype := dot.X.Type()
+ sym := ir.MethodSymSuffix(rcvrtype, meth, "-fm")
+
+ if sym.Uniq() {
+ return sym.Def.(*ir.Name)
+ }
+ sym.SetUniq(true)
+
+ base.FatalfAt(dot.Pos(), "missing wrapper for %v", meth)
+ panic("unreachable")
+}
diff --git a/src/cmd/compile/internal/walk/compare.go b/src/cmd/compile/internal/walk/compare.go
new file mode 100644
index 0000000..625cfec
--- /dev/null
+++ b/src/cmd/compile/internal/walk/compare.go
@@ -0,0 +1,514 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "hash/fnv"
+ "io"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/compare"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+func fakePC(n ir.Node) ir.Node {
+ // In order to get deterministic IDs, we include the package path, absolute filename, line number, column number
+ // in the calculation of the fakePC for the IR node.
+ hash := fnv.New32()
+ // We ignore the errors here because the `io.Writer` in the `hash.Hash` interface never returns an error.
+ io.WriteString(hash, base.Ctxt.Pkgpath)
+ io.WriteString(hash, base.Ctxt.PosTable.Pos(n.Pos()).AbsFilename())
+ binary.Write(hash, binary.LittleEndian, int64(n.Pos().Line()))
+ binary.Write(hash, binary.LittleEndian, int64(n.Pos().Col()))
+ // We also include the string representation of the node to distinguish autogenerated expression since
+ // those get the same `src.XPos`
+ io.WriteString(hash, fmt.Sprintf("%v", n))
+
+ return ir.NewInt(base.Pos, int64(hash.Sum32()))
+}
+
+// The result of walkCompare MUST be assigned back to n, e.g.
+//
+// n.Left = walkCompare(n.Left, init)
+func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ if n.X.Type().IsInterface() && n.Y.Type().IsInterface() && n.X.Op() != ir.ONIL && n.Y.Op() != ir.ONIL {
+ return walkCompareInterface(n, init)
+ }
+
+ if n.X.Type().IsString() && n.Y.Type().IsString() {
+ return walkCompareString(n, init)
+ }
+
+ n.X = walkExpr(n.X, init)
+ n.Y = walkExpr(n.Y, init)
+
+ // Given mixed interface/concrete comparison,
+ // rewrite into types-equal && data-equal.
+ // This is efficient, avoids allocations, and avoids runtime calls.
+ //
+ // TODO(mdempsky): It would be more general and probably overall
+ // simpler to just extend walkCompareInterface to optimize when one
+ // operand is an OCONVIFACE.
+ if n.X.Type().IsInterface() != n.Y.Type().IsInterface() {
+ // Preserve side-effects in case of short-circuiting; see #32187.
+ l := cheapExpr(n.X, init)
+ r := cheapExpr(n.Y, init)
+ // Swap so that l is the interface value and r is the concrete value.
+ if n.Y.Type().IsInterface() {
+ l, r = r, l
+ }
+
+ // Handle both == and !=.
+ eq := n.Op()
+ andor := ir.OOROR
+ if eq == ir.OEQ {
+ andor = ir.OANDAND
+ }
+ // Check for types equal.
+ // For empty interface, this is:
+ // l.tab == type(r)
+ // For non-empty interface, this is:
+ // l.tab != nil && l.tab._type == type(r)
+ //
+ // TODO(mdempsky): For non-empty interface comparisons, just
+ // compare against the itab address directly?
+ var eqtype ir.Node
+ tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l)
+ rtyp := reflectdata.CompareRType(base.Pos, n)
+ if l.Type().IsEmptyInterface() {
+ tab.SetType(types.NewPtr(types.Types[types.TUINT8]))
+ tab.SetTypecheck(1)
+ eqtype = ir.NewBinaryExpr(base.Pos, eq, tab, rtyp)
+ } else {
+ nonnil := ir.NewBinaryExpr(base.Pos, brcom(eq), typecheck.NodNil(), tab)
+ match := ir.NewBinaryExpr(base.Pos, eq, itabType(tab), rtyp)
+ eqtype = ir.NewLogicalExpr(base.Pos, andor, nonnil, match)
+ }
+ // Check for data equal.
+ eqdata := ir.NewBinaryExpr(base.Pos, eq, ifaceData(n.Pos(), l, r.Type()), r)
+ // Put it all together.
+ expr := ir.NewLogicalExpr(base.Pos, andor, eqtype, eqdata)
+ return finishCompare(n, expr, init)
+ }
+
+ // Must be comparison of array or struct.
+ // Otherwise back end handles it.
+ // While we're here, decide whether to
+ // inline or call an eq alg.
+ t := n.X.Type()
+ var inline bool
+
+ maxcmpsize := int64(4)
+ unalignedLoad := ssagen.Arch.LinkArch.CanMergeLoads
+ if unalignedLoad {
+ // Keep this low enough to generate less code than a function call.
+ maxcmpsize = 2 * int64(ssagen.Arch.LinkArch.RegSize)
+ }
+
+ switch t.Kind() {
+ default:
+ if base.Debug.Libfuzzer != 0 && t.IsInteger() && (n.X.Name() == nil || !n.X.Name().Libfuzzer8BitCounter()) {
+ n.X = cheapExpr(n.X, init)
+ n.Y = cheapExpr(n.Y, init)
+
+ // If exactly one comparison operand is
+ // constant, invoke the constcmp functions
+ // instead, and arrange for the constant
+ // operand to be the first argument.
+ l, r := n.X, n.Y
+ if r.Op() == ir.OLITERAL {
+ l, r = r, l
+ }
+ constcmp := l.Op() == ir.OLITERAL && r.Op() != ir.OLITERAL
+
+ var fn string
+ var paramType *types.Type
+ switch t.Size() {
+ case 1:
+ fn = "libfuzzerTraceCmp1"
+ if constcmp {
+ fn = "libfuzzerTraceConstCmp1"
+ }
+ paramType = types.Types[types.TUINT8]
+ case 2:
+ fn = "libfuzzerTraceCmp2"
+ if constcmp {
+ fn = "libfuzzerTraceConstCmp2"
+ }
+ paramType = types.Types[types.TUINT16]
+ case 4:
+ fn = "libfuzzerTraceCmp4"
+ if constcmp {
+ fn = "libfuzzerTraceConstCmp4"
+ }
+ paramType = types.Types[types.TUINT32]
+ case 8:
+ fn = "libfuzzerTraceCmp8"
+ if constcmp {
+ fn = "libfuzzerTraceConstCmp8"
+ }
+ paramType = types.Types[types.TUINT64]
+ default:
+ base.Fatalf("unexpected integer size %d for %v", t.Size(), t)
+ }
+ init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init), fakePC(n)))
+ }
+ return n
+ case types.TARRAY:
+ // We can compare several elements at once with 2/4/8 byte integer compares
+ inline = t.NumElem() <= 1 || (types.IsSimple[t.Elem().Kind()] && (t.NumElem() <= 4 || t.Elem().Size()*t.NumElem() <= maxcmpsize))
+ case types.TSTRUCT:
+ inline = compare.EqStructCost(t) <= 4
+ }
+
+ cmpl := n.X
+ for cmpl != nil && cmpl.Op() == ir.OCONVNOP {
+ cmpl = cmpl.(*ir.ConvExpr).X
+ }
+ cmpr := n.Y
+ for cmpr != nil && cmpr.Op() == ir.OCONVNOP {
+ cmpr = cmpr.(*ir.ConvExpr).X
+ }
+
+ // Chose not to inline. Call equality function directly.
+ if !inline {
+ // eq algs take pointers; cmpl and cmpr must be addressable
+ if !ir.IsAddressable(cmpl) || !ir.IsAddressable(cmpr) {
+ base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
+ }
+
+ // Should only arrive here with large memory or
+ // a struct/array containing a non-memory field/element.
+ // Small memory is handled inline, and single non-memory
+ // is handled by walkCompare.
+ fn, needsLength := reflectdata.EqFor(t)
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
+ call.Args.Append(typecheck.NodAddr(cmpl))
+ call.Args.Append(typecheck.NodAddr(cmpr))
+ if needsLength {
+ call.Args.Append(ir.NewInt(base.Pos, t.Size()))
+ }
+ res := ir.Node(call)
+ if n.Op() != ir.OEQ {
+ res = ir.NewUnaryExpr(base.Pos, ir.ONOT, res)
+ }
+ return finishCompare(n, res, init)
+ }
+
+ // inline: build boolean expression comparing element by element
+ andor := ir.OANDAND
+ if n.Op() == ir.ONE {
+ andor = ir.OOROR
+ }
+ var expr ir.Node
+ comp := func(el, er ir.Node) {
+ a := ir.NewBinaryExpr(base.Pos, n.Op(), el, er)
+ if expr == nil {
+ expr = a
+ } else {
+ expr = ir.NewLogicalExpr(base.Pos, andor, expr, a)
+ }
+ }
+ and := func(cond ir.Node) {
+ if expr == nil {
+ expr = cond
+ } else {
+ expr = ir.NewLogicalExpr(base.Pos, andor, expr, cond)
+ }
+ }
+ cmpl = safeExpr(cmpl, init)
+ cmpr = safeExpr(cmpr, init)
+ if t.IsStruct() {
+ conds, _ := compare.EqStruct(t, cmpl, cmpr)
+ if n.Op() == ir.OEQ {
+ for _, cond := range conds {
+ and(cond)
+ }
+ } else {
+ for _, cond := range conds {
+ notCond := ir.NewUnaryExpr(base.Pos, ir.ONOT, cond)
+ and(notCond)
+ }
+ }
+ } else {
+ step := int64(1)
+ remains := t.NumElem() * t.Elem().Size()
+ combine64bit := unalignedLoad && types.RegSize == 8 && t.Elem().Size() <= 4 && t.Elem().IsInteger()
+ combine32bit := unalignedLoad && t.Elem().Size() <= 2 && t.Elem().IsInteger()
+ combine16bit := unalignedLoad && t.Elem().Size() == 1 && t.Elem().IsInteger()
+ for i := int64(0); remains > 0; {
+ var convType *types.Type
+ switch {
+ case remains >= 8 && combine64bit:
+ convType = types.Types[types.TINT64]
+ step = 8 / t.Elem().Size()
+ case remains >= 4 && combine32bit:
+ convType = types.Types[types.TUINT32]
+ step = 4 / t.Elem().Size()
+ case remains >= 2 && combine16bit:
+ convType = types.Types[types.TUINT16]
+ step = 2 / t.Elem().Size()
+ default:
+ step = 1
+ }
+ if step == 1 {
+ comp(
+ ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(base.Pos, i)),
+ ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(base.Pos, i)),
+ )
+ i++
+ remains -= t.Elem().Size()
+ } else {
+ elemType := t.Elem().ToUnsigned()
+ cmplw := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(base.Pos, i)))
+ cmplw = typecheck.Conv(cmplw, elemType) // convert to unsigned
+ cmplw = typecheck.Conv(cmplw, convType) // widen
+ cmprw := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(base.Pos, i)))
+ cmprw = typecheck.Conv(cmprw, elemType)
+ cmprw = typecheck.Conv(cmprw, convType)
+ // For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
+ // ssa will generate a single large load.
+ for offset := int64(1); offset < step; offset++ {
+ lb := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(base.Pos, i+offset)))
+ lb = typecheck.Conv(lb, elemType)
+ lb = typecheck.Conv(lb, convType)
+ lb = ir.NewBinaryExpr(base.Pos, ir.OLSH, lb, ir.NewInt(base.Pos, 8*t.Elem().Size()*offset))
+ cmplw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmplw, lb)
+ rb := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(base.Pos, i+offset)))
+ rb = typecheck.Conv(rb, elemType)
+ rb = typecheck.Conv(rb, convType)
+ rb = ir.NewBinaryExpr(base.Pos, ir.OLSH, rb, ir.NewInt(base.Pos, 8*t.Elem().Size()*offset))
+ cmprw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmprw, rb)
+ }
+ comp(cmplw, cmprw)
+ i += step
+ remains -= step * t.Elem().Size()
+ }
+ }
+ }
+ if expr == nil {
+ expr = ir.NewBool(base.Pos, n.Op() == ir.OEQ)
+ // We still need to use cmpl and cmpr, in case they contain
+ // an expression which might panic. See issue 23837.
+ a1 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, ir.BlankNode, cmpl))
+ a2 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, ir.BlankNode, cmpr))
+ init.Append(a1, a2)
+ }
+ return finishCompare(n, expr, init)
+}
+
+func walkCompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ n.Y = cheapExpr(n.Y, init)
+ n.X = cheapExpr(n.X, init)
+ eqtab, eqdata := compare.EqInterface(n.X, n.Y)
+ var cmp ir.Node
+ if n.Op() == ir.OEQ {
+ cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata)
+ } else {
+ eqtab.SetOp(ir.ONE)
+ cmp = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqtab, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqdata))
+ }
+ return finishCompare(n, cmp, init)
+}
+
+func walkCompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ if base.Debug.Libfuzzer != 0 {
+ if !ir.IsConst(n.X, constant.String) || !ir.IsConst(n.Y, constant.String) {
+ fn := "libfuzzerHookStrCmp"
+ n.X = cheapExpr(n.X, init)
+ n.Y = cheapExpr(n.Y, init)
+ paramType := types.Types[types.TSTRING]
+ init.Append(mkcall(fn, nil, init, tracecmpArg(n.X, paramType, init), tracecmpArg(n.Y, paramType, init), fakePC(n)))
+ }
+ }
+ // Rewrite comparisons to short constant strings as length+byte-wise comparisons.
+ var cs, ncs ir.Node // const string, non-const string
+ switch {
+ case ir.IsConst(n.X, constant.String) && ir.IsConst(n.Y, constant.String):
+ // ignore; will be constant evaluated
+ case ir.IsConst(n.X, constant.String):
+ cs = n.X
+ ncs = n.Y
+ case ir.IsConst(n.Y, constant.String):
+ cs = n.Y
+ ncs = n.X
+ }
+ if cs != nil {
+ cmp := n.Op()
+ // Our comparison below assumes that the non-constant string
+ // is on the left hand side, so rewrite "" cmp x to x cmp "".
+ // See issue 24817.
+ if ir.IsConst(n.X, constant.String) {
+ cmp = brrev(cmp)
+ }
+
+ // maxRewriteLen was chosen empirically.
+ // It is the value that minimizes cmd/go file size
+ // across most architectures.
+ // See the commit description for CL 26758 for details.
+ maxRewriteLen := 6
+ // Some architectures can load unaligned byte sequence as 1 word.
+ // So we can cover longer strings with the same amount of code.
+ canCombineLoads := ssagen.Arch.LinkArch.CanMergeLoads
+ combine64bit := false
+ if canCombineLoads {
+ // Keep this low enough to generate less code than a function call.
+ maxRewriteLen = 2 * ssagen.Arch.LinkArch.RegSize
+ combine64bit = ssagen.Arch.LinkArch.RegSize >= 8
+ }
+
+ var and ir.Op
+ switch cmp {
+ case ir.OEQ:
+ and = ir.OANDAND
+ case ir.ONE:
+ and = ir.OOROR
+ default:
+ // Don't do byte-wise comparisons for <, <=, etc.
+ // They're fairly complicated.
+ // Length-only checks are ok, though.
+ maxRewriteLen = 0
+ }
+ if s := ir.StringVal(cs); len(s) <= maxRewriteLen {
+ if len(s) > 0 {
+ ncs = safeExpr(ncs, init)
+ }
+ r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.NewUnaryExpr(base.Pos, ir.OLEN, ncs), ir.NewInt(base.Pos, int64(len(s)))))
+ remains := len(s)
+ for i := 0; remains > 0; {
+ if remains == 1 || !canCombineLoads {
+ cb := ir.NewInt(base.Pos, int64(s[i]))
+ ncb := ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(base.Pos, int64(i)))
+ r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, ncb, cb))
+ remains--
+ i++
+ continue
+ }
+ var step int
+ var convType *types.Type
+ switch {
+ case remains >= 8 && combine64bit:
+ convType = types.Types[types.TINT64]
+ step = 8
+ case remains >= 4:
+ convType = types.Types[types.TUINT32]
+ step = 4
+ case remains >= 2:
+ convType = types.Types[types.TUINT16]
+ step = 2
+ }
+ ncsubstr := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(base.Pos, int64(i))), convType)
+ csubstr := int64(s[i])
+ // Calculate large constant from bytes as sequence of shifts and ors.
+ // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
+ // ssa will combine this into a single large load.
+ for offset := 1; offset < step; offset++ {
+ b := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(base.Pos, int64(i+offset))), convType)
+ b = ir.NewBinaryExpr(base.Pos, ir.OLSH, b, ir.NewInt(base.Pos, int64(8*offset)))
+ ncsubstr = ir.NewBinaryExpr(base.Pos, ir.OOR, ncsubstr, b)
+ csubstr |= int64(s[i+offset]) << uint8(8*offset)
+ }
+ csubstrPart := ir.NewInt(base.Pos, csubstr)
+ // Compare "step" bytes as once
+ r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, csubstrPart, ncsubstr))
+ remains -= step
+ i += step
+ }
+ return finishCompare(n, r, init)
+ }
+ }
+
+ var r ir.Node
+ if n.Op() == ir.OEQ || n.Op() == ir.ONE {
+ // prepare for rewrite below
+ n.X = cheapExpr(n.X, init)
+ n.Y = cheapExpr(n.Y, init)
+ eqlen, eqmem := compare.EqString(n.X, n.Y)
+ // quick check of len before full compare for == or !=.
+ // memequal then tests equality up to length len.
+ if n.Op() == ir.OEQ {
+ // len(left) == len(right) && memequal(left, right, len)
+ r = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqlen, eqmem)
+ } else {
+ // len(left) != len(right) || !memequal(left, right, len)
+ eqlen.SetOp(ir.ONE)
+ r = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqlen, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqmem))
+ }
+ } else {
+ // sys_cmpstring(s1, s2) :: 0
+ r = mkcall("cmpstring", types.Types[types.TINT], init, typecheck.Conv(n.X, types.Types[types.TSTRING]), typecheck.Conv(n.Y, types.Types[types.TSTRING]))
+ r = ir.NewBinaryExpr(base.Pos, n.Op(), r, ir.NewInt(base.Pos, 0))
+ }
+
+ return finishCompare(n, r, init)
+}
+
+// The result of finishCompare MUST be assigned back to n, e.g.
+//
+// n.Left = finishCompare(n.Left, x, r, init)
+func finishCompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node {
+ r = typecheck.Expr(r)
+ r = typecheck.Conv(r, n.Type())
+ r = walkExpr(r, init)
+ return r
+}
+
+// brcom returns !(op).
+// For example, brcom(==) is !=.
+func brcom(op ir.Op) ir.Op {
+ switch op {
+ case ir.OEQ:
+ return ir.ONE
+ case ir.ONE:
+ return ir.OEQ
+ case ir.OLT:
+ return ir.OGE
+ case ir.OGT:
+ return ir.OLE
+ case ir.OLE:
+ return ir.OGT
+ case ir.OGE:
+ return ir.OLT
+ }
+ base.Fatalf("brcom: no com for %v\n", op)
+ return op
+}
+
+// brrev returns reverse(op).
+// For example, Brrev(<) is >.
+func brrev(op ir.Op) ir.Op {
+ switch op {
+ case ir.OEQ:
+ return ir.OEQ
+ case ir.ONE:
+ return ir.ONE
+ case ir.OLT:
+ return ir.OGT
+ case ir.OGT:
+ return ir.OLT
+ case ir.OLE:
+ return ir.OGE
+ case ir.OGE:
+ return ir.OLE
+ }
+ base.Fatalf("brrev: no rev for %v\n", op)
+ return op
+}
+
+func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
+ // Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
+ if n.Op() == ir.OLITERAL && n.Type().IsSigned() && ir.Int64Val(n) < 0 {
+ n = copyExpr(n, n.Type(), init)
+ }
+
+ return typecheck.Conv(n, t)
+}
diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go
new file mode 100644
index 0000000..adc44ca
--- /dev/null
+++ b/src/cmd/compile/internal/walk/complit.go
@@ -0,0 +1,684 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/staticinit"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+// walkCompLit walks a composite literal node:
+// OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT (all CompLitExpr), or OPTRLIT (AddrExpr).
+func walkCompLit(n ir.Node, init *ir.Nodes) ir.Node {
+ if isStaticCompositeLiteral(n) && !ssa.CanSSA(n.Type()) {
+ n := n.(*ir.CompLitExpr) // not OPTRLIT
+ // n can be directly represented in the read-only data section.
+ // Make direct reference to the static data. See issue 12841.
+ vstat := readonlystaticname(n.Type())
+ fixedlit(inInitFunction, initKindStatic, n, vstat, init)
+ return typecheck.Expr(vstat)
+ }
+ var_ := typecheck.TempAt(base.Pos, ir.CurFunc, n.Type())
+ anylit(n, var_, init)
+ return var_
+}
+
+// initContext is the context in which static data is populated.
+// It is either in an init function or in any other function.
+// Static data populated in an init function will be written either
+// zero times (as a readonly, static data symbol) or
+// one time (during init function execution).
+// Either way, there is no opportunity for races or further modification,
+// so the data can be written to a (possibly readonly) data symbol.
+// Static data populated in any other function needs to be local to
+// that function to allow multiple instances of that function
+// to execute concurrently without clobbering each others' data.
+type initContext uint8
+
+const (
+ inInitFunction initContext = iota
+ inNonInitFunction
+)
+
+func (c initContext) String() string {
+ if c == inInitFunction {
+ return "inInitFunction"
+ }
+ return "inNonInitFunction"
+}
+
+// readonlystaticname returns a name backed by a read-only static data symbol.
+func readonlystaticname(t *types.Type) *ir.Name {
+ n := staticinit.StaticName(t)
+ n.MarkReadonly()
+ n.Linksym().Set(obj.AttrContentAddressable, true)
+ n.Linksym().Set(obj.AttrLocal, true)
+ return n
+}
+
+func isSimpleName(nn ir.Node) bool {
+ if nn.Op() != ir.ONAME || ir.IsBlank(nn) {
+ return false
+ }
+ n := nn.(*ir.Name)
+ return n.OnStack()
+}
+
+// initGenType is a bitmap indicating the types of generation that will occur for a static value.
+type initGenType uint8
+
+const (
+ initDynamic initGenType = 1 << iota // contains some dynamic values, for which init code will be generated
+ initConst // contains some constant values, which may be written into data symbols
+)
+
+// getdyn calculates the initGenType for n.
+// If top is false, getdyn is recursing.
+func getdyn(n ir.Node, top bool) initGenType {
+ switch n.Op() {
+ default:
+ if ir.IsConstNode(n) {
+ return initConst
+ }
+ return initDynamic
+
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ if !top {
+ return initDynamic
+ }
+ if n.Len/4 > int64(len(n.List)) {
+ // <25% of entries have explicit values.
+ // Very rough estimation, it takes 4 bytes of instructions
+ // to initialize 1 byte of result. So don't use a static
+ // initializer if the dynamic initialization code would be
+ // smaller than the static value.
+ // See issue 23780.
+ return initDynamic
+ }
+
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ }
+ lit := n.(*ir.CompLitExpr)
+
+ var mode initGenType
+ for _, n1 := range lit.List {
+ switch n1.Op() {
+ case ir.OKEY:
+ n1 = n1.(*ir.KeyExpr).Value
+ case ir.OSTRUCTKEY:
+ n1 = n1.(*ir.StructKeyExpr).Value
+ }
+ mode |= getdyn(n1, false)
+ if mode == initDynamic|initConst {
+ break
+ }
+ }
+ return mode
+}
+
+// isStaticCompositeLiteral reports whether n is a compile-time constant.
+func isStaticCompositeLiteral(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OSLICELIT:
+ return false
+ case ir.OARRAYLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, r := range n.List {
+ if r.Op() == ir.OKEY {
+ r = r.(*ir.KeyExpr).Value
+ }
+ if !isStaticCompositeLiteral(r) {
+ return false
+ }
+ }
+ return true
+ case ir.OSTRUCTLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, r := range n.List {
+ r := r.(*ir.StructKeyExpr)
+ if !isStaticCompositeLiteral(r.Value) {
+ return false
+ }
+ }
+ return true
+ case ir.OLITERAL, ir.ONIL:
+ return true
+ case ir.OCONVIFACE:
+ // See staticassign's OCONVIFACE case for comments.
+ n := n.(*ir.ConvExpr)
+ val := ir.Node(n)
+ for val.Op() == ir.OCONVIFACE {
+ val = val.(*ir.ConvExpr).X
+ }
+ if val.Type().IsInterface() {
+ return val.Op() == ir.ONIL
+ }
+ if types.IsDirectIface(val.Type()) && val.Op() == ir.ONIL {
+ return true
+ }
+ return isStaticCompositeLiteral(val)
+ }
+ return false
+}
+
+// initKind is a kind of static initialization: static, dynamic, or local.
+// Static initialization represents literals and
+// literal components of composite literals.
+// Dynamic initialization represents non-literals and
+// non-literal components of composite literals.
+// LocalCode initialization represents initialization
+// that occurs purely in generated code local to the function of use.
+// Initialization code is sometimes generated in passes,
+// first static then dynamic.
+type initKind uint8
+
+const (
+ initKindStatic initKind = iota + 1
+ initKindDynamic
+ initKindLocalCode
+)
+
+// fixedlit handles struct, array, and slice literals.
+// TODO: expand documentation.
+func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) {
+ isBlank := var_ == ir.BlankNode
+ var splitnode func(ir.Node) (a ir.Node, value ir.Node)
+ switch n.Op() {
+ case ir.OARRAYLIT, ir.OSLICELIT:
+ var k int64
+ splitnode = func(r ir.Node) (ir.Node, ir.Node) {
+ if r.Op() == ir.OKEY {
+ kv := r.(*ir.KeyExpr)
+ k = typecheck.IndexConst(kv.Key)
+ if k < 0 {
+ base.Fatalf("fixedlit: invalid index %v", kv.Key)
+ }
+ r = kv.Value
+ }
+ a := ir.NewIndexExpr(base.Pos, var_, ir.NewInt(base.Pos, k))
+ k++
+ if isBlank {
+ return ir.BlankNode, r
+ }
+ return a, r
+ }
+ case ir.OSTRUCTLIT:
+ splitnode = func(rn ir.Node) (ir.Node, ir.Node) {
+ r := rn.(*ir.StructKeyExpr)
+ if r.Sym().IsBlank() || isBlank {
+ return ir.BlankNode, r.Value
+ }
+ ir.SetPos(r)
+ return ir.NewSelectorExpr(base.Pos, ir.ODOT, var_, r.Sym()), r.Value
+ }
+ default:
+ base.Fatalf("fixedlit bad op: %v", n.Op())
+ }
+
+ for _, r := range n.List {
+ a, value := splitnode(r)
+ if a == ir.BlankNode && !staticinit.AnySideEffects(value) {
+ // Discard.
+ continue
+ }
+
+ switch value.Op() {
+ case ir.OSLICELIT:
+ value := value.(*ir.CompLitExpr)
+ if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) {
+ var sinit ir.Nodes
+ slicelit(ctxt, value, a, &sinit)
+ if kind == initKindStatic {
+ // When doing static initialization, init statements may contain dynamic
+ // expression, which will be initialized later, causing liveness analysis
+ // confuses about variables lifetime. So making sure those expressions
+ // are ordered correctly here. See issue #52673.
+ orderBlock(&sinit, map[string][]*ir.Name{})
+ typecheck.Stmts(sinit)
+ walkStmtList(sinit)
+ }
+ init.Append(sinit...)
+ continue
+ }
+
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ value := value.(*ir.CompLitExpr)
+ fixedlit(ctxt, kind, value, a, init)
+ continue
+ }
+
+ islit := ir.IsConstNode(value)
+ if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) {
+ continue
+ }
+
+ // build list of assignments: var[index] = expr
+ ir.SetPos(a)
+ as := ir.NewAssignStmt(base.Pos, a, value)
+ as = typecheck.Stmt(as).(*ir.AssignStmt)
+ switch kind {
+ case initKindStatic:
+ genAsStatic(as)
+ case initKindDynamic, initKindLocalCode:
+ appendWalkStmt(init, orderStmtInPlace(as, map[string][]*ir.Name{}))
+ default:
+ base.Fatalf("fixedlit: bad kind %d", kind)
+ }
+
+ }
+}
+
+func isSmallSliceLit(n *ir.CompLitExpr) bool {
+ if n.Op() != ir.OSLICELIT {
+ return false
+ }
+
+ return n.Type().Elem().Size() == 0 || n.Len <= ir.MaxSmallArraySize/n.Type().Elem().Size()
+}
+
+func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) {
+ // make an array type corresponding the number of elements we have
+ t := types.NewArray(n.Type().Elem(), n.Len)
+ types.CalcSize(t)
+
+ if ctxt == inNonInitFunction {
+ // put everything into static array
+ vstat := staticinit.StaticName(t)
+
+ fixedlit(ctxt, initKindStatic, n, vstat, init)
+ fixedlit(ctxt, initKindDynamic, n, vstat, init)
+
+ // copy static to slice
+ var_ = typecheck.AssignExpr(var_)
+ name, offset, ok := staticinit.StaticLoc(var_)
+ if !ok || name.Class != ir.PEXTERN {
+ base.Fatalf("slicelit: %v", var_)
+ }
+ staticdata.InitSlice(name, offset, vstat.Linksym(), t.NumElem())
+ return
+ }
+
+ // recipe for var = []t{...}
+ // 1. make a static array
+ // var vstat [...]t
+ // 2. assign (data statements) the constant part
+ // vstat = constpart{}
+ // 3. make an auto pointer to array and allocate heap to it
+ // var vauto *[...]t = new([...]t)
+ // 4. copy the static array to the auto array
+ // *vauto = vstat
+ // 5. for each dynamic part assign to the array
+ // vauto[i] = dynamic part
+ // 6. assign slice of allocated heap to var
+ // var = vauto[:]
+ //
+ // an optimization is done if there is no constant part
+ // 3. var vauto *[...]t = new([...]t)
+ // 5. vauto[i] = dynamic part
+ // 6. var = vauto[:]
+
+ // if the literal contains constants,
+ // make static initialized array (1),(2)
+ var vstat ir.Node
+
+ mode := getdyn(n, true)
+ if mode&initConst != 0 && !isSmallSliceLit(n) {
+ if ctxt == inInitFunction {
+ vstat = readonlystaticname(t)
+ } else {
+ vstat = staticinit.StaticName(t)
+ }
+ fixedlit(ctxt, initKindStatic, n, vstat, init)
+ }
+
+ // make new auto *array (3 declare)
+ vauto := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewPtr(t))
+
+ // set auto to point at new temp or heap (3 assign)
+ var a ir.Node
+ if x := n.Prealloc; x != nil {
+ // temp allocated during order.go for dddarg
+ if !types.Identical(t, x.Type()) {
+ panic("dotdotdot base type does not match order's assigned type")
+ }
+ a = initStackTemp(init, x, vstat)
+ } else if n.Esc() == ir.EscNone {
+ a = initStackTemp(init, typecheck.TempAt(base.Pos, ir.CurFunc, t), vstat)
+ } else {
+ a = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(t))
+ }
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, vauto, a))
+
+ if vstat != nil && n.Prealloc == nil && n.Esc() != ir.EscNone {
+ // If we allocated on the heap with ONEW, copy the static to the
+ // heap (4). We skip this for stack temporaries, because
+ // initStackTemp already handled the copy.
+ a = ir.NewStarExpr(base.Pos, vauto)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, a, vstat))
+ }
+
+ // put dynamics into array (5)
+ var index int64
+ for _, value := range n.List {
+ if value.Op() == ir.OKEY {
+ kv := value.(*ir.KeyExpr)
+ index = typecheck.IndexConst(kv.Key)
+ if index < 0 {
+ base.Fatalf("slicelit: invalid index %v", kv.Key)
+ }
+ value = kv.Value
+ }
+ a := ir.NewIndexExpr(base.Pos, vauto, ir.NewInt(base.Pos, index))
+ a.SetBounded(true)
+ index++
+
+ // TODO need to check bounds?
+
+ switch value.Op() {
+ case ir.OSLICELIT:
+ break
+
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ value := value.(*ir.CompLitExpr)
+ k := initKindDynamic
+ if vstat == nil {
+ // Generate both static and dynamic initializations.
+ // See issue #31987.
+ k = initKindLocalCode
+ }
+ fixedlit(ctxt, k, value, a, init)
+ continue
+ }
+
+ if vstat != nil && ir.IsConstNode(value) { // already set by copy from static value
+ continue
+ }
+
+ // build list of vauto[c] = expr
+ ir.SetPos(value)
+ as := ir.NewAssignStmt(base.Pos, a, value)
+ appendWalkStmt(init, orderStmtInPlace(typecheck.Stmt(as), map[string][]*ir.Name{}))
+ }
+
+ // make slice out of heap (6)
+ a = ir.NewAssignStmt(base.Pos, var_, ir.NewSliceExpr(base.Pos, ir.OSLICE, vauto, nil, nil, nil))
+ appendWalkStmt(init, orderStmtInPlace(typecheck.Stmt(a), map[string][]*ir.Name{}))
+}
+
+func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) {
+ // make the map var
+ args := []ir.Node{ir.TypeNode(n.Type()), ir.NewInt(base.Pos, n.Len+int64(len(n.List)))}
+ a := typecheck.Expr(ir.NewCallExpr(base.Pos, ir.OMAKE, nil, args)).(*ir.MakeExpr)
+ a.RType = n.RType
+ a.SetEsc(n.Esc())
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, m, a))
+
+ entries := n.List
+
+ // The order pass already removed any dynamic (runtime-computed) entries.
+ // All remaining entries are static. Double-check that.
+ for _, r := range entries {
+ r := r.(*ir.KeyExpr)
+ if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
+ base.Fatalf("maplit: entry is not a literal: %v", r)
+ }
+ }
+
+ if len(entries) > 25 {
+ // For a large number of entries, put them in an array and loop.
+
+ // build types [count]Tindex and [count]Tvalue
+ tk := types.NewArray(n.Type().Key(), int64(len(entries)))
+ te := types.NewArray(n.Type().Elem(), int64(len(entries)))
+
+ // TODO(#47904): mark tk and te NoAlg here once the
+ // compiler/linker can handle NoAlg types correctly.
+
+ types.CalcSize(tk)
+ types.CalcSize(te)
+
+ // make and initialize static arrays
+ vstatk := readonlystaticname(tk)
+ vstate := readonlystaticname(te)
+
+ datak := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil)
+ datae := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil)
+ for _, r := range entries {
+ r := r.(*ir.KeyExpr)
+ datak.List.Append(r.Key)
+ datae.List.Append(r.Value)
+ }
+ fixedlit(inInitFunction, initKindStatic, datak, vstatk, init)
+ fixedlit(inInitFunction, initKindStatic, datae, vstate, init)
+
+ // loop adding structure elements to map
+ // for i = 0; i < len(vstatk); i++ {
+ // map[vstatk[i]] = vstate[i]
+ // }
+ i := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+ rhs := ir.NewIndexExpr(base.Pos, vstate, i)
+ rhs.SetBounded(true)
+
+ kidx := ir.NewIndexExpr(base.Pos, vstatk, i)
+ kidx.SetBounded(true)
+
+ // typechecker rewrites OINDEX to OINDEXMAP
+ lhs := typecheck.AssignExpr(ir.NewIndexExpr(base.Pos, m, kidx)).(*ir.IndexExpr)
+ base.AssertfAt(lhs.Op() == ir.OINDEXMAP, lhs.Pos(), "want OINDEXMAP, have %+v", lhs)
+ lhs.RType = n.RType
+
+ zero := ir.NewAssignStmt(base.Pos, i, ir.NewInt(base.Pos, 0))
+ cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(base.Pos, tk.NumElem()))
+ incr := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(base.Pos, 1)))
+
+ var body ir.Node = ir.NewAssignStmt(base.Pos, lhs, rhs)
+ body = typecheck.Stmt(body)
+ body = orderStmtInPlace(body, map[string][]*ir.Name{})
+
+ loop := ir.NewForStmt(base.Pos, nil, cond, incr, nil, false)
+ loop.Body = []ir.Node{body}
+ loop.SetInit([]ir.Node{zero})
+
+ appendWalkStmt(init, loop)
+ return
+ }
+ // For a small number of entries, just add them directly.
+
+ // Build list of var[c] = expr.
+ // Use temporaries so that mapassign1 can have addressable key, elem.
+ // TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys.
+ // TODO(khr): assign these temps in order phase so we can reuse them across multiple maplits?
+ tmpkey := typecheck.TempAt(base.Pos, ir.CurFunc, m.Type().Key())
+ tmpelem := typecheck.TempAt(base.Pos, ir.CurFunc, m.Type().Elem())
+
+ for _, r := range entries {
+ r := r.(*ir.KeyExpr)
+ index, elem := r.Key, r.Value
+
+ ir.SetPos(index)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpkey, index))
+
+ ir.SetPos(elem)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpelem, elem))
+
+ ir.SetPos(tmpelem)
+
+ // typechecker rewrites OINDEX to OINDEXMAP
+ lhs := typecheck.AssignExpr(ir.NewIndexExpr(base.Pos, m, tmpkey)).(*ir.IndexExpr)
+ base.AssertfAt(lhs.Op() == ir.OINDEXMAP, lhs.Pos(), "want OINDEXMAP, have %+v", lhs)
+ lhs.RType = n.RType
+
+ var a ir.Node = ir.NewAssignStmt(base.Pos, lhs, tmpelem)
+ a = typecheck.Stmt(a)
+ a = orderStmtInPlace(a, map[string][]*ir.Name{})
+ appendWalkStmt(init, a)
+ }
+}
+
+func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
+ t := n.Type()
+ switch n.Op() {
+ default:
+ base.Fatalf("anylit: not lit, op=%v node=%v", n.Op(), n)
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, n))
+
+ case ir.OMETHEXPR:
+ n := n.(*ir.SelectorExpr)
+ anylit(n.FuncName(), var_, init)
+
+ case ir.OPTRLIT:
+ n := n.(*ir.AddrExpr)
+ if !t.IsPtr() {
+ base.Fatalf("anylit: not ptr")
+ }
+
+ var r ir.Node
+ if n.Prealloc != nil {
+ // n.Prealloc is stack temporary used as backing store.
+ r = initStackTemp(init, n.Prealloc, nil)
+ } else {
+ r = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(n.X.Type()))
+ r.SetEsc(n.Esc())
+ }
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, r))
+
+ var_ = ir.NewStarExpr(base.Pos, var_)
+ var_ = typecheck.AssignExpr(var_)
+ anylit(n.X, var_, init)
+
+ case ir.OSTRUCTLIT, ir.OARRAYLIT:
+ n := n.(*ir.CompLitExpr)
+ if !t.IsStruct() && !t.IsArray() {
+ base.Fatalf("anylit: not struct/array")
+ }
+
+ if isSimpleName(var_) && len(n.List) > 4 {
+ // lay out static data
+ vstat := readonlystaticname(t)
+
+ ctxt := inInitFunction
+ if n.Op() == ir.OARRAYLIT {
+ ctxt = inNonInitFunction
+ }
+ fixedlit(ctxt, initKindStatic, n, vstat, init)
+
+ // copy static to var
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, vstat))
+
+ // add expressions to automatic
+ fixedlit(inInitFunction, initKindDynamic, n, var_, init)
+ break
+ }
+
+ var components int64
+ if n.Op() == ir.OARRAYLIT {
+ components = t.NumElem()
+ } else {
+ components = int64(t.NumFields())
+ }
+ // initialization of an array or struct with unspecified components (missing fields or arrays)
+ if isSimpleName(var_) || int64(len(n.List)) < components {
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil))
+ }
+
+ fixedlit(inInitFunction, initKindLocalCode, n, var_, init)
+
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ slicelit(inInitFunction, n, var_, init)
+
+ case ir.OMAPLIT:
+ n := n.(*ir.CompLitExpr)
+ if !t.IsMap() {
+ base.Fatalf("anylit: not map")
+ }
+ maplit(n, var_, init)
+ }
+}
+
+// oaslit handles special composite literal assignments.
+// It returns true if n's effects have been added to init,
+// in which case n should be dropped from the program by the caller.
+func oaslit(n *ir.AssignStmt, init *ir.Nodes) bool {
+ if n.X == nil || n.Y == nil {
+ // not a special composite literal assignment
+ return false
+ }
+ if n.X.Type() == nil || n.Y.Type() == nil {
+ // not a special composite literal assignment
+ return false
+ }
+ if !isSimpleName(n.X) {
+ // not a special composite literal assignment
+ return false
+ }
+ x := n.X.(*ir.Name)
+ if !types.Identical(n.X.Type(), n.Y.Type()) {
+ // not a special composite literal assignment
+ return false
+ }
+ if x.Addrtaken() {
+ // If x is address-taken, the RHS may (implicitly) uses LHS.
+ // Not safe to do a special composite literal assignment
+ // (which may expand to multiple assignments).
+ return false
+ }
+
+ switch n.Y.Op() {
+ default:
+ // not a special composite literal assignment
+ return false
+
+ case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
+ if ir.Any(n.Y, func(y ir.Node) bool { return ir.Uses(y, x) }) {
+ // not safe to do a special composite literal assignment if RHS uses LHS.
+ return false
+ }
+ anylit(n.Y, n.X, init)
+ }
+
+ return true
+}
+
+func genAsStatic(as *ir.AssignStmt) {
+ if as.X.Type() == nil {
+ base.Fatalf("genAsStatic as.Left not typechecked")
+ }
+
+ name, offset, ok := staticinit.StaticLoc(as.X)
+ if !ok || (name.Class != ir.PEXTERN && as.X != ir.BlankNode) {
+ base.Fatalf("genAsStatic: lhs %v", as.X)
+ }
+
+ switch r := as.Y; r.Op() {
+ case ir.OLITERAL:
+ staticdata.InitConst(name, offset, r, int(r.Type().Size()))
+ return
+ case ir.OMETHEXPR:
+ r := r.(*ir.SelectorExpr)
+ staticdata.InitAddr(name, offset, staticdata.FuncLinksym(r.FuncName()))
+ return
+ case ir.ONAME:
+ r := r.(*ir.Name)
+ if r.Offset_ != 0 {
+ base.Fatalf("genAsStatic %+v", as)
+ }
+ if r.Class == ir.PFUNC {
+ staticdata.InitAddr(name, offset, staticdata.FuncLinksym(r))
+ return
+ }
+ }
+ base.Fatalf("genAsStatic: rhs %v", as.Y)
+}
diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go
new file mode 100644
index 0000000..280b3b6
--- /dev/null
+++ b/src/cmd/compile/internal/walk/convert.go
@@ -0,0 +1,536 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "encoding/binary"
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/sys"
+)
+
+// walkConv walks an OCONV or OCONVNOP (but not OCONVIFACE) node.
+func walkConv(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ if n.Op() == ir.OCONVNOP && n.Type() == n.X.Type() {
+ return n.X
+ }
+ if n.Op() == ir.OCONVNOP && ir.ShouldCheckPtr(ir.CurFunc, 1) {
+ if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() { // uintptr to unsafe.Pointer
+ return walkCheckPtrArithmetic(n, init)
+ }
+ }
+ param, result := rtconvfn(n.X.Type(), n.Type())
+ if param == types.Txxx {
+ return n
+ }
+ fn := types.BasicTypeNames[param] + "to" + types.BasicTypeNames[result]
+ return typecheck.Conv(mkcall(fn, types.Types[result], init, typecheck.Conv(n.X, types.Types[param])), n.Type())
+}
+
+// walkConvInterface walks an OCONVIFACE node.
+func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+
+ n.X = walkExpr(n.X, init)
+
+ fromType := n.X.Type()
+ toType := n.Type()
+ if !fromType.IsInterface() && !ir.IsBlank(ir.CurFunc.Nname) {
+ // skip unnamed functions (func _())
+ if fromType.HasShape() {
+ // Unified IR uses OCONVIFACE for converting all derived types
+ // to interface type. Avoid assertion failure in
+ // MarkTypeUsedInInterface, because we've marked used types
+ // separately anyway.
+ } else {
+ reflectdata.MarkTypeUsedInInterface(fromType, ir.CurFunc.LSym)
+ }
+ }
+
+ if !fromType.IsInterface() {
+ typeWord := reflectdata.ConvIfaceTypeWord(base.Pos, n)
+ l := ir.NewBinaryExpr(base.Pos, ir.OMAKEFACE, typeWord, dataWord(n, init))
+ l.SetType(toType)
+ l.SetTypecheck(n.Typecheck())
+ return l
+ }
+ if fromType.IsEmptyInterface() {
+ base.Fatalf("OCONVIFACE can't operate on an empty interface")
+ }
+
+ // Evaluate the input interface.
+ c := typecheck.TempAt(base.Pos, ir.CurFunc, fromType)
+ init.Append(ir.NewAssignStmt(base.Pos, c, n.X))
+
+ if toType.IsEmptyInterface() {
+ // Implement interface to empty interface conversion:
+ //
+ // var res *uint8
+ // res = (*uint8)(unsafe.Pointer(itab))
+ // if res != nil {
+ // res = res.type
+ // }
+
+ // Grab its parts.
+ itab := ir.NewUnaryExpr(base.Pos, ir.OITAB, c)
+ itab.SetType(types.Types[types.TUINTPTR].PtrTo())
+ itab.SetTypecheck(1)
+ data := ir.NewUnaryExpr(n.Pos(), ir.OIDATA, c)
+ data.SetType(types.Types[types.TUINT8].PtrTo()) // Type is generic pointer - we're just passing it through.
+ data.SetTypecheck(1)
+
+ typeWord := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewPtr(types.Types[types.TUINT8]))
+ init.Append(ir.NewAssignStmt(base.Pos, typeWord, typecheck.Conv(typecheck.Conv(itab, types.Types[types.TUNSAFEPTR]), typeWord.Type())))
+ nif := ir.NewIfStmt(base.Pos, typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.ONE, typeWord, typecheck.NodNil())), nil, nil)
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, typeWord, itabType(typeWord))}
+ init.Append(nif)
+
+ // Build the result.
+ // e = iface{typeWord, data}
+ e := ir.NewBinaryExpr(base.Pos, ir.OMAKEFACE, typeWord, data)
+ e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
+ e.SetTypecheck(1)
+ return e
+ }
+
+ // Must be converting I2I (more specific to less specific interface).
+ // Use the same code as e, _ = c.(T).
+ var rhs ir.Node
+ if n.TypeWord == nil || n.TypeWord.Op() == ir.OADDR && n.TypeWord.(*ir.AddrExpr).X.Op() == ir.OLINKSYMOFFSET {
+ // Fixed (not loaded from a dictionary) type.
+ ta := ir.NewTypeAssertExpr(base.Pos, c, toType)
+ ta.SetOp(ir.ODOTTYPE2)
+ // Allocate a descriptor for this conversion to pass to the runtime.
+ ta.Descriptor = makeTypeAssertDescriptor(toType, true)
+ rhs = ta
+ } else {
+ ta := ir.NewDynamicTypeAssertExpr(base.Pos, ir.ODYNAMICDOTTYPE2, c, n.TypeWord)
+ rhs = ta
+ }
+ rhs.SetType(toType)
+ rhs.SetTypecheck(1)
+
+ res := typecheck.TempAt(base.Pos, ir.CurFunc, toType)
+ as := ir.NewAssignListStmt(base.Pos, ir.OAS2DOTTYPE, []ir.Node{res, ir.BlankNode}, []ir.Node{rhs})
+ init.Append(as)
+ return res
+}
+
+// Returns the data word (the second word) used to represent conv.X in
+// an interface.
+func dataWord(conv *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ pos, n := conv.Pos(), conv.X
+ fromType := n.Type()
+
+ // If it's a pointer, it is its own representation.
+ if types.IsDirectIface(fromType) {
+ return n
+ }
+
+ isInteger := fromType.IsInteger()
+ isBool := fromType.IsBoolean()
+ if sc := fromType.SoleComponent(); sc != nil {
+ isInteger = sc.IsInteger()
+ isBool = sc.IsBoolean()
+ }
+ // Try a bunch of cases to avoid an allocation.
+ var value ir.Node
+ switch {
+ case fromType.Size() == 0:
+ // n is zero-sized. Use zerobase.
+ cheapExpr(n, init) // Evaluate n for side-effects. See issue 19246.
+ value = ir.NewLinksymExpr(base.Pos, ir.Syms.Zerobase, types.Types[types.TUINTPTR])
+ case isBool || fromType.Size() == 1 && isInteger:
+ // n is a bool/byte. Use staticuint64s[n * 8] on little-endian
+ // and staticuint64s[n * 8 + 7] on big-endian.
+ n = cheapExpr(n, init)
+ n = soleComponent(init, n)
+ // byteindex widens n so that the multiplication doesn't overflow.
+ index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n), ir.NewInt(base.Pos, 3))
+ if ssagen.Arch.LinkArch.ByteOrder == binary.BigEndian {
+ index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, ir.NewInt(base.Pos, 7))
+ }
+ // The actual type is [256]uint64, but we use [256*8]uint8 so we can address
+ // individual bytes.
+ staticuint64s := ir.NewLinksymExpr(base.Pos, ir.Syms.Staticuint64s, types.NewArray(types.Types[types.TUINT8], 256*8))
+ xe := ir.NewIndexExpr(base.Pos, staticuint64s, index)
+ xe.SetBounded(true)
+ value = xe
+ case n.Op() == ir.ONAME && n.(*ir.Name).Class == ir.PEXTERN && n.(*ir.Name).Readonly():
+ // n is a readonly global; use it directly.
+ value = n
+ case conv.Esc() == ir.EscNone && fromType.Size() <= 1024:
+ // n does not escape. Use a stack temporary initialized to n.
+ value = typecheck.TempAt(base.Pos, ir.CurFunc, fromType)
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n)))
+ }
+ if value != nil {
+ // The interface data word is &value.
+ return typecheck.Expr(typecheck.NodAddr(value))
+ }
+
+ // Time to do an allocation. We'll call into the runtime for that.
+ fnname, argType, needsaddr := dataWordFuncName(fromType)
+ var fn *ir.Name
+
+ var args []ir.Node
+ if needsaddr {
+ // Types of large or unknown size are passed by reference.
+ // Orderexpr arranged for n to be a temporary for all
+ // the conversions it could see. Comparison of an interface
+ // with a non-interface, especially in a switch on interface value
+ // with non-interface cases, is not visible to order.stmt, so we
+ // have to fall back on allocating a temp here.
+ if !ir.IsAddressable(n) {
+ n = copyExpr(n, fromType, init)
+ }
+ fn = typecheck.LookupRuntime(fnname, fromType)
+ args = []ir.Node{reflectdata.ConvIfaceSrcRType(base.Pos, conv), typecheck.NodAddr(n)}
+ } else {
+ // Use a specialized conversion routine that takes the type being
+ // converted by value, not by pointer.
+ fn = typecheck.LookupRuntime(fnname)
+ var arg ir.Node
+ switch {
+ case fromType == argType:
+ // already in the right type, nothing to do
+ arg = n
+ case fromType.Kind() == argType.Kind(),
+ fromType.IsPtrShaped() && argType.IsPtrShaped():
+ // can directly convert (e.g. named type to underlying type, or one pointer to another)
+ // TODO: never happens because pointers are directIface?
+ arg = ir.NewConvExpr(pos, ir.OCONVNOP, argType, n)
+ case fromType.IsInteger() && argType.IsInteger():
+ // can directly convert (e.g. int32 to uint32)
+ arg = ir.NewConvExpr(pos, ir.OCONV, argType, n)
+ default:
+ // unsafe cast through memory
+ arg = copyExpr(n, fromType, init)
+ var addr ir.Node = typecheck.NodAddr(arg)
+ addr = ir.NewConvExpr(pos, ir.OCONVNOP, argType.PtrTo(), addr)
+ arg = ir.NewStarExpr(pos, addr)
+ arg.SetType(argType)
+ }
+ args = []ir.Node{arg}
+ }
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
+ call.Args = args
+ return safeExpr(walkExpr(typecheck.Expr(call), init), init)
+}
+
+// walkBytesRunesToString walks an OBYTES2STR or ORUNES2STR node.
+func walkBytesRunesToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ a := typecheck.NodNil()
+ if n.Esc() == ir.EscNone {
+ // Create temporary buffer for string on stack.
+ a = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8])
+ }
+ if n.Op() == ir.ORUNES2STR {
+ // slicerunetostring(*[32]byte, []rune) string
+ return mkcall("slicerunetostring", n.Type(), init, a, n.X)
+ }
+ // slicebytetostring(*[32]byte, ptr *byte, n int) string
+ n.X = cheapExpr(n.X, init)
+ ptr, len := backingArrayPtrLen(n.X)
+ return mkcall("slicebytetostring", n.Type(), init, a, ptr, len)
+}
+
+// walkBytesToStringTemp walks an OBYTES2STRTMP node.
+func walkBytesToStringTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ if !base.Flag.Cfg.Instrumenting {
+ // Let the backend handle OBYTES2STRTMP directly
+ // to avoid a function call to slicebytetostringtmp.
+ return n
+ }
+ // slicebytetostringtmp(ptr *byte, n int) string
+ n.X = cheapExpr(n.X, init)
+ ptr, len := backingArrayPtrLen(n.X)
+ return mkcall("slicebytetostringtmp", n.Type(), init, ptr, len)
+}
+
+// walkRuneToString walks an ORUNESTR node.
+func walkRuneToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ a := typecheck.NodNil()
+ if n.Esc() == ir.EscNone {
+ a = stackBufAddr(4, types.Types[types.TUINT8])
+ }
+ // intstring(*[4]byte, rune)
+ return mkcall("intstring", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TINT64]))
+}
+
+// walkStringToBytes walks an OSTR2BYTES node.
+func walkStringToBytes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ s := n.X
+ if ir.IsConst(s, constant.String) {
+ sc := ir.StringVal(s)
+
+ // Allocate a [n]byte of the right size.
+ t := types.NewArray(types.Types[types.TUINT8], int64(len(sc)))
+ var a ir.Node
+ if n.Esc() == ir.EscNone && len(sc) <= int(ir.MaxImplicitStackVarSize) {
+ a = stackBufAddr(t.NumElem(), t.Elem())
+ } else {
+ types.CalcSize(t)
+ a = ir.NewUnaryExpr(base.Pos, ir.ONEW, nil)
+ a.SetType(types.NewPtr(t))
+ a.SetTypecheck(1)
+ a.MarkNonNil()
+ }
+ p := typecheck.TempAt(base.Pos, ir.CurFunc, t.PtrTo()) // *[n]byte
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, p, a)))
+
+ // Copy from the static string data to the [n]byte.
+ if len(sc) > 0 {
+ sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
+ sptr.SetBounded(true)
+ as := ir.NewAssignStmt(base.Pos, ir.NewStarExpr(base.Pos, p), ir.NewStarExpr(base.Pos, typecheck.ConvNop(sptr, t.PtrTo())))
+ appendWalkStmt(init, as)
+ }
+
+ // Slice the [n]byte to a []byte.
+ slice := ir.NewSliceExpr(n.Pos(), ir.OSLICEARR, p, nil, nil, nil)
+ slice.SetType(n.Type())
+ slice.SetTypecheck(1)
+ return walkExpr(slice, init)
+ }
+
+ a := typecheck.NodNil()
+ if n.Esc() == ir.EscNone {
+ // Create temporary buffer for slice on stack.
+ a = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8])
+ }
+ // stringtoslicebyte(*32[byte], string) []byte
+ return mkcall("stringtoslicebyte", n.Type(), init, a, typecheck.Conv(s, types.Types[types.TSTRING]))
+}
+
+// walkStringToBytesTemp walks an OSTR2BYTESTMP node.
+func walkStringToBytesTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ // []byte(string) conversion that creates a slice
+ // referring to the actual string bytes.
+ // This conversion is handled later by the backend and
+ // is only for use by internal compiler optimizations
+ // that know that the slice won't be mutated.
+ // The only such case today is:
+ // for i, c := range []byte(string)
+ n.X = walkExpr(n.X, init)
+ return n
+}
+
+// walkStringToRunes walks an OSTR2RUNES node.
+func walkStringToRunes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ a := typecheck.NodNil()
+ if n.Esc() == ir.EscNone {
+ // Create temporary buffer for slice on stack.
+ a = stackBufAddr(tmpstringbufsize, types.Types[types.TINT32])
+ }
+ // stringtoslicerune(*[32]rune, string) []rune
+ return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING]))
+}
+
+// dataWordFuncName returns the name of the function used to convert a value of type "from"
+// to the data word of an interface.
+// argType is the type the argument needs to be coerced to.
+// needsaddr reports whether the value should be passed (needaddr==false) or its address (needsaddr==true).
+func dataWordFuncName(from *types.Type) (fnname string, argType *types.Type, needsaddr bool) {
+ if from.IsInterface() {
+ base.Fatalf("can only handle non-interfaces")
+ }
+ switch {
+ case from.Size() == 2 && uint8(from.Alignment()) == 2:
+ return "convT16", types.Types[types.TUINT16], false
+ case from.Size() == 4 && uint8(from.Alignment()) == 4 && !from.HasPointers():
+ return "convT32", types.Types[types.TUINT32], false
+ case from.Size() == 8 && uint8(from.Alignment()) == uint8(types.Types[types.TUINT64].Alignment()) && !from.HasPointers():
+ return "convT64", types.Types[types.TUINT64], false
+ }
+ if sc := from.SoleComponent(); sc != nil {
+ switch {
+ case sc.IsString():
+ return "convTstring", types.Types[types.TSTRING], false
+ case sc.IsSlice():
+ return "convTslice", types.NewSlice(types.Types[types.TUINT8]), false // the element type doesn't matter
+ }
+ }
+
+ if from.HasPointers() {
+ return "convT", types.Types[types.TUNSAFEPTR], true
+ }
+ return "convTnoptr", types.Types[types.TUNSAFEPTR], true
+}
+
+// rtconvfn returns the parameter and result types that will be used by a
+// runtime function to convert from type src to type dst. The runtime function
+// name can be derived from the names of the returned types.
+//
+// If no such function is necessary, it returns (Txxx, Txxx).
+func rtconvfn(src, dst *types.Type) (param, result types.Kind) {
+ if ssagen.Arch.SoftFloat {
+ return types.Txxx, types.Txxx
+ }
+
+ switch ssagen.Arch.LinkArch.Family {
+ case sys.ARM, sys.MIPS:
+ if src.IsFloat() {
+ switch dst.Kind() {
+ case types.TINT64, types.TUINT64:
+ return types.TFLOAT64, dst.Kind()
+ }
+ }
+ if dst.IsFloat() {
+ switch src.Kind() {
+ case types.TINT64, types.TUINT64:
+ return src.Kind(), dst.Kind()
+ }
+ }
+
+ case sys.I386:
+ if src.IsFloat() {
+ switch dst.Kind() {
+ case types.TINT64, types.TUINT64:
+ return types.TFLOAT64, dst.Kind()
+ case types.TUINT32, types.TUINT, types.TUINTPTR:
+ return types.TFLOAT64, types.TUINT32
+ }
+ }
+ if dst.IsFloat() {
+ switch src.Kind() {
+ case types.TINT64, types.TUINT64:
+ return src.Kind(), dst.Kind()
+ case types.TUINT32, types.TUINT, types.TUINTPTR:
+ return types.TUINT32, types.TFLOAT64
+ }
+ }
+ }
+ return types.Txxx, types.Txxx
+}
+
+func soleComponent(init *ir.Nodes, n ir.Node) ir.Node {
+ if n.Type().SoleComponent() == nil {
+ return n
+ }
+ // Keep in sync with cmd/compile/internal/types/type.go:Type.SoleComponent.
+ for {
+ switch {
+ case n.Type().IsStruct():
+ if n.Type().Field(0).Sym.IsBlank() {
+ // Treat blank fields as the zero value as the Go language requires.
+ n = typecheck.TempAt(base.Pos, ir.CurFunc, n.Type().Field(0).Type)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, n, nil))
+ continue
+ }
+ n = typecheck.DotField(n.Pos(), n, 0)
+ case n.Type().IsArray():
+ n = typecheck.Expr(ir.NewIndexExpr(n.Pos(), n, ir.NewInt(base.Pos, 0)))
+ default:
+ return n
+ }
+ }
+}
+
+// byteindex converts n, which is byte-sized, to an int used to index into an array.
+// We cannot use conv, because we allow converting bool to int here,
+// which is forbidden in user code.
+func byteindex(n ir.Node) ir.Node {
+ // We cannot convert from bool to int directly.
+ // While converting from int8 to int is possible, it would yield
+ // the wrong result for negative values.
+ // Reinterpreting the value as an unsigned byte solves both cases.
+ if !types.Identical(n.Type(), types.Types[types.TUINT8]) {
+ n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+ n.SetType(types.Types[types.TUINT8])
+ n.SetTypecheck(1)
+ }
+ n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+ n.SetType(types.Types[types.TINT])
+ n.SetTypecheck(1)
+ return n
+}
+
+func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ // Calling cheapExpr(n, init) below leads to a recursive call to
+ // walkExpr, which leads us back here again. Use n.Checkptr to
+ // prevent infinite loops.
+ if n.CheckPtr() {
+ return n
+ }
+ n.SetCheckPtr(true)
+ defer n.SetCheckPtr(false)
+
+ // TODO(mdempsky): Make stricter. We only need to exempt
+ // reflect.Value.Pointer and reflect.Value.UnsafeAddr.
+ switch n.X.Op() {
+ case ir.OCALLMETH:
+ base.FatalfAt(n.X.Pos(), "OCALLMETH missed by typecheck")
+ case ir.OCALLFUNC, ir.OCALLINTER:
+ return n
+ }
+
+ if n.X.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(n.X) {
+ return n
+ }
+
+ // Find original unsafe.Pointer operands involved in this
+ // arithmetic expression.
+ //
+ // "It is valid both to add and to subtract offsets from a
+ // pointer in this way. It is also valid to use &^ to round
+ // pointers, usually for alignment."
+ var originals []ir.Node
+ var walk func(n ir.Node)
+ walk = func(n ir.Node) {
+ switch n.Op() {
+ case ir.OADD:
+ n := n.(*ir.BinaryExpr)
+ walk(n.X)
+ walk(n.Y)
+ case ir.OSUB, ir.OANDNOT:
+ n := n.(*ir.BinaryExpr)
+ walk(n.X)
+ case ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if n.X.Type().IsUnsafePtr() {
+ n.X = cheapExpr(n.X, init)
+ originals = append(originals, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]))
+ }
+ }
+ }
+ walk(n.X)
+
+ cheap := cheapExpr(n, init)
+
+ slice := typecheck.MakeDotArgs(base.Pos, types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
+ slice.SetEsc(ir.EscNone)
+
+ init.Append(mkcall("checkptrArithmetic", nil, init, typecheck.ConvNop(cheap, types.Types[types.TUNSAFEPTR]), slice))
+ // TODO(khr): Mark backing store of slice as dead. This will allow us to reuse
+ // the backing store for multiple calls to checkptrArithmetic.
+
+ return cheap
+}
+
+// walkSliceToArray walks an OSLICE2ARR expression.
+func walkSliceToArray(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ // Replace T(x) with *(*T)(x).
+ conv := typecheck.Expr(ir.NewConvExpr(base.Pos, ir.OCONV, types.NewPtr(n.Type()), n.X)).(*ir.ConvExpr)
+ deref := typecheck.Expr(ir.NewStarExpr(base.Pos, conv)).(*ir.StarExpr)
+
+ // The OSLICE2ARRPTR conversion handles checking the slice length,
+ // so the dereference can't fail.
+ //
+ // However, this is more than just an optimization: if T is a
+ // zero-length array, then x (and thus (*T)(x)) can be nil, but T(x)
+ // should *not* panic. So suppressing the nil check here is
+ // necessary for correctness in that case.
+ deref.SetBounded(true)
+
+ return walkExpr(deref, init)
+}
diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go
new file mode 100644
index 0000000..268f793
--- /dev/null
+++ b/src/cmd/compile/internal/walk/expr.go
@@ -0,0 +1,1096 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "fmt"
+ "go/constant"
+ "internal/abi"
+ "internal/buildcfg"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/rttype"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+)
+
+// The result of walkExpr MUST be assigned back to n, e.g.
+//
+// n.Left = walkExpr(n.Left, init)
+func walkExpr(n ir.Node, init *ir.Nodes) ir.Node {
+ if n == nil {
+ return n
+ }
+
+ if n, ok := n.(ir.InitNode); ok && init == n.PtrInit() {
+ // not okay to use n->ninit when walking n,
+ // because we might replace n with some other node
+ // and would lose the init list.
+ base.Fatalf("walkExpr init == &n->ninit")
+ }
+
+ if len(n.Init()) != 0 {
+ walkStmtList(n.Init())
+ init.Append(ir.TakeInit(n)...)
+ }
+
+ lno := ir.SetPos(n)
+
+ if base.Flag.LowerW > 1 {
+ ir.Dump("before walk expr", n)
+ }
+
+ if n.Typecheck() != 1 {
+ base.Fatalf("missed typecheck: %+v", n)
+ }
+
+ if n.Type().IsUntyped() {
+ base.Fatalf("expression has untyped type: %+v", n)
+ }
+
+ n = walkExpr1(n, init)
+
+ // Eagerly compute sizes of all expressions for the back end.
+ if typ := n.Type(); typ != nil && typ.Kind() != types.TBLANK && !typ.IsFuncArgStruct() {
+ types.CheckSize(typ)
+ }
+ if n, ok := n.(*ir.Name); ok && n.Heapaddr != nil {
+ types.CheckSize(n.Heapaddr.Type())
+ }
+ if ir.IsConst(n, constant.String) {
+ // Emit string symbol now to avoid emitting
+ // any concurrently during the backend.
+ _ = staticdata.StringSym(n.Pos(), constant.StringVal(n.Val()))
+ }
+
+ if base.Flag.LowerW != 0 && n != nil {
+ ir.Dump("after walk expr", n)
+ }
+
+ base.Pos = lno
+ return n
+}
+
+func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
+ switch n.Op() {
+ default:
+ ir.Dump("walk", n)
+ base.Fatalf("walkExpr: switch 1 unknown op %+v", n.Op())
+ panic("unreachable")
+
+ case ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP:
+ return n
+
+ case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.OLINKSYMOFFSET:
+ // TODO(mdempsky): Just return n; see discussion on CL 38655.
+ // Perhaps refactor to use Node.mayBeShared for these instead.
+ // If these return early, make sure to still call
+ // StringSym for constant strings.
+ return n
+
+ case ir.OMETHEXPR:
+ // TODO(mdempsky): Do this right after type checking.
+ n := n.(*ir.SelectorExpr)
+ return n.FuncName()
+
+ case ir.OMIN, ir.OMAX:
+ n := n.(*ir.CallExpr)
+ return walkMinMax(n, init)
+
+ case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.OSPTR, ir.OITAB, ir.OIDATA:
+ n := n.(*ir.UnaryExpr)
+ n.X = walkExpr(n.X, init)
+ return n
+
+ case ir.ODOTMETH, ir.ODOTINTER:
+ n := n.(*ir.SelectorExpr)
+ n.X = walkExpr(n.X, init)
+ return n
+
+ case ir.OADDR:
+ n := n.(*ir.AddrExpr)
+ n.X = walkExpr(n.X, init)
+ return n
+
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ n.X = walkExpr(n.X, init)
+ return n
+
+ case ir.OMAKEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH,
+ ir.OUNSAFEADD:
+ n := n.(*ir.BinaryExpr)
+ n.X = walkExpr(n.X, init)
+ n.Y = walkExpr(n.Y, init)
+ return n
+
+ case ir.OUNSAFESLICE:
+ n := n.(*ir.BinaryExpr)
+ return walkUnsafeSlice(n, init)
+
+ case ir.OUNSAFESTRING:
+ n := n.(*ir.BinaryExpr)
+ return walkUnsafeString(n, init)
+
+ case ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA:
+ n := n.(*ir.UnaryExpr)
+ return walkUnsafeData(n, init)
+
+ case ir.ODOT, ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ return walkDot(n, init)
+
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n := n.(*ir.TypeAssertExpr)
+ return walkDotType(n, init)
+
+ case ir.ODYNAMICDOTTYPE, ir.ODYNAMICDOTTYPE2:
+ n := n.(*ir.DynamicTypeAssertExpr)
+ return walkDynamicDotType(n, init)
+
+ case ir.OLEN, ir.OCAP:
+ n := n.(*ir.UnaryExpr)
+ return walkLenCap(n, init)
+
+ case ir.OCOMPLEX:
+ n := n.(*ir.BinaryExpr)
+ n.X = walkExpr(n.X, init)
+ n.Y = walkExpr(n.Y, init)
+ return n
+
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n := n.(*ir.BinaryExpr)
+ return walkCompare(n, init)
+
+ case ir.OANDAND, ir.OOROR:
+ n := n.(*ir.LogicalExpr)
+ return walkLogical(n, init)
+
+ case ir.OPRINT, ir.OPRINTLN:
+ return walkPrint(n.(*ir.CallExpr), init)
+
+ case ir.OPANIC:
+ n := n.(*ir.UnaryExpr)
+ return mkcall("gopanic", nil, init, n.X)
+
+ case ir.ORECOVERFP:
+ return walkRecoverFP(n.(*ir.CallExpr), init)
+
+ case ir.OCFUNC:
+ return n
+
+ case ir.OCALLINTER, ir.OCALLFUNC:
+ n := n.(*ir.CallExpr)
+ return walkCall(n, init)
+
+ case ir.OAS, ir.OASOP:
+ return walkAssign(init, n)
+
+ case ir.OAS2:
+ n := n.(*ir.AssignListStmt)
+ return walkAssignList(init, n)
+
+ // a,b,... = fn()
+ case ir.OAS2FUNC:
+ n := n.(*ir.AssignListStmt)
+ return walkAssignFunc(init, n)
+
+ // x, y = <-c
+ // order.stmt made sure x is addressable or blank.
+ case ir.OAS2RECV:
+ n := n.(*ir.AssignListStmt)
+ return walkAssignRecv(init, n)
+
+ // a,b = m[i]
+ case ir.OAS2MAPR:
+ n := n.(*ir.AssignListStmt)
+ return walkAssignMapRead(init, n)
+
+ case ir.ODELETE:
+ n := n.(*ir.CallExpr)
+ return walkDelete(init, n)
+
+ case ir.OAS2DOTTYPE:
+ n := n.(*ir.AssignListStmt)
+ return walkAssignDotType(n, init)
+
+ case ir.OCONVIFACE:
+ n := n.(*ir.ConvExpr)
+ return walkConvInterface(n, init)
+
+ case ir.OCONV, ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ return walkConv(n, init)
+
+ case ir.OSLICE2ARR:
+ n := n.(*ir.ConvExpr)
+ return walkSliceToArray(n, init)
+
+ case ir.OSLICE2ARRPTR:
+ n := n.(*ir.ConvExpr)
+ n.X = walkExpr(n.X, init)
+ return n
+
+ case ir.ODIV, ir.OMOD:
+ n := n.(*ir.BinaryExpr)
+ return walkDivMod(n, init)
+
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ return walkIndex(n, init)
+
+ case ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ return walkIndexMap(n, init)
+
+ case ir.ORECV:
+ base.Fatalf("walkExpr ORECV") // should see inside OAS only
+ panic("unreachable")
+
+ case ir.OSLICEHEADER:
+ n := n.(*ir.SliceHeaderExpr)
+ return walkSliceHeader(n, init)
+
+ case ir.OSTRINGHEADER:
+ n := n.(*ir.StringHeaderExpr)
+ return walkStringHeader(n, init)
+
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
+ n := n.(*ir.SliceExpr)
+ return walkSlice(n, init)
+
+ case ir.ONEW:
+ n := n.(*ir.UnaryExpr)
+ return walkNew(n, init)
+
+ case ir.OADDSTR:
+ return walkAddString(n.(*ir.AddStringExpr), init)
+
+ case ir.OAPPEND:
+ // order should make sure we only see OAS(node, OAPPEND), which we handle above.
+ base.Fatalf("append outside assignment")
+ panic("unreachable")
+
+ case ir.OCOPY:
+ return walkCopy(n.(*ir.BinaryExpr), init, base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime)
+
+ case ir.OCLEAR:
+ n := n.(*ir.UnaryExpr)
+ return walkClear(n)
+
+ case ir.OCLOSE:
+ n := n.(*ir.UnaryExpr)
+ return walkClose(n, init)
+
+ case ir.OMAKECHAN:
+ n := n.(*ir.MakeExpr)
+ return walkMakeChan(n, init)
+
+ case ir.OMAKEMAP:
+ n := n.(*ir.MakeExpr)
+ return walkMakeMap(n, init)
+
+ case ir.OMAKESLICE:
+ n := n.(*ir.MakeExpr)
+ return walkMakeSlice(n, init)
+
+ case ir.OMAKESLICECOPY:
+ n := n.(*ir.MakeExpr)
+ return walkMakeSliceCopy(n, init)
+
+ case ir.ORUNESTR:
+ n := n.(*ir.ConvExpr)
+ return walkRuneToString(n, init)
+
+ case ir.OBYTES2STR, ir.ORUNES2STR:
+ n := n.(*ir.ConvExpr)
+ return walkBytesRunesToString(n, init)
+
+ case ir.OBYTES2STRTMP:
+ n := n.(*ir.ConvExpr)
+ return walkBytesToStringTemp(n, init)
+
+ case ir.OSTR2BYTES:
+ n := n.(*ir.ConvExpr)
+ return walkStringToBytes(n, init)
+
+ case ir.OSTR2BYTESTMP:
+ n := n.(*ir.ConvExpr)
+ return walkStringToBytesTemp(n, init)
+
+ case ir.OSTR2RUNES:
+ n := n.(*ir.ConvExpr)
+ return walkStringToRunes(n, init)
+
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT:
+ return walkCompLit(n, init)
+
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ return walkSend(n, init)
+
+ case ir.OCLOSURE:
+ return walkClosure(n.(*ir.ClosureExpr), init)
+
+ case ir.OMETHVALUE:
+ return walkMethodValue(n.(*ir.SelectorExpr), init)
+ }
+
+ // No return! Each case must return (or panic),
+ // to avoid confusion about what gets returned
+ // in the presence of type assertions.
+}
+
+// walk the whole tree of the body of an
+// expression or simple statement.
+// the types expressions are calculated.
+// compile-time constants are evaluated.
+// complex side effects like statements are appended to init.
+func walkExprList(s []ir.Node, init *ir.Nodes) {
+ for i := range s {
+ s[i] = walkExpr(s[i], init)
+ }
+}
+
+func walkExprListCheap(s []ir.Node, init *ir.Nodes) {
+ for i, n := range s {
+ s[i] = cheapExpr(n, init)
+ s[i] = walkExpr(s[i], init)
+ }
+}
+
+func walkExprListSafe(s []ir.Node, init *ir.Nodes) {
+ for i, n := range s {
+ s[i] = safeExpr(n, init)
+ s[i] = walkExpr(s[i], init)
+ }
+}
+
+// return side-effect free and cheap n, appending side effects to init.
+// result may not be assignable.
+func cheapExpr(n ir.Node, init *ir.Nodes) ir.Node {
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
+ return n
+ }
+
+ return copyExpr(n, n.Type(), init)
+}
+
+// return side effect-free n, appending side effects to init.
+// result is assignable if n is.
+func safeExpr(n ir.Node, init *ir.Nodes) ir.Node {
+ if n == nil {
+ return nil
+ }
+
+ if len(n.Init()) != 0 {
+ walkStmtList(n.Init())
+ init.Append(ir.TakeInit(n)...)
+ }
+
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.OLINKSYMOFFSET:
+ return n
+
+ case ir.OLEN, ir.OCAP:
+ n := n.(*ir.UnaryExpr)
+ l := safeExpr(n.X, init)
+ if l == n.X {
+ return n
+ }
+ a := ir.Copy(n).(*ir.UnaryExpr)
+ a.X = l
+ return walkExpr(typecheck.Expr(a), init)
+
+ case ir.ODOT, ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ l := safeExpr(n.X, init)
+ if l == n.X {
+ return n
+ }
+ a := ir.Copy(n).(*ir.SelectorExpr)
+ a.X = l
+ return walkExpr(typecheck.Expr(a), init)
+
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ l := safeExpr(n.X, init)
+ if l == n.X {
+ return n
+ }
+ a := ir.Copy(n).(*ir.StarExpr)
+ a.X = l
+ return walkExpr(typecheck.Expr(a), init)
+
+ case ir.OINDEX, ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ l := safeExpr(n.X, init)
+ r := safeExpr(n.Index, init)
+ if l == n.X && r == n.Index {
+ return n
+ }
+ a := ir.Copy(n).(*ir.IndexExpr)
+ a.X = l
+ a.Index = r
+ return walkExpr(typecheck.Expr(a), init)
+
+ case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ if isStaticCompositeLiteral(n) {
+ return n
+ }
+ }
+
+ // make a copy; must not be used as an lvalue
+ if ir.IsAddressable(n) {
+ base.Fatalf("missing lvalue case in safeExpr: %v", n)
+ }
+ return cheapExpr(n, init)
+}
+
+func copyExpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
+ l := typecheck.TempAt(base.Pos, ir.CurFunc, t)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, n))
+ return l
+}
+
+func walkAddString(n *ir.AddStringExpr, init *ir.Nodes) ir.Node {
+ c := len(n.List)
+
+ if c < 2 {
+ base.Fatalf("walkAddString count %d too small", c)
+ }
+
+ buf := typecheck.NodNil()
+ if n.Esc() == ir.EscNone {
+ sz := int64(0)
+ for _, n1 := range n.List {
+ if n1.Op() == ir.OLITERAL {
+ sz += int64(len(ir.StringVal(n1)))
+ }
+ }
+
+ // Don't allocate the buffer if the result won't fit.
+ if sz < tmpstringbufsize {
+ // Create temporary buffer for result string on stack.
+ buf = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8])
+ }
+ }
+
+ // build list of string arguments
+ args := []ir.Node{buf}
+ for _, n2 := range n.List {
+ args = append(args, typecheck.Conv(n2, types.Types[types.TSTRING]))
+ }
+
+ var fn string
+ if c <= 5 {
+ // small numbers of strings use direct runtime helpers.
+ // note: order.expr knows this cutoff too.
+ fn = fmt.Sprintf("concatstring%d", c)
+ } else {
+ // large numbers of strings are passed to the runtime as a slice.
+ fn = "concatstrings"
+
+ t := types.NewSlice(types.Types[types.TSTRING])
+ // args[1:] to skip buf arg
+ slice := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, t, args[1:])
+ slice.Prealloc = n.Prealloc
+ args = []ir.Node{buf, slice}
+ slice.SetEsc(ir.EscNone)
+ }
+
+ cat := typecheck.LookupRuntime(fn)
+ r := ir.NewCallExpr(base.Pos, ir.OCALL, cat, nil)
+ r.Args = args
+ r1 := typecheck.Expr(r)
+ r1 = walkExpr(r1, init)
+ r1.SetType(n.Type())
+
+ return r1
+}
+
+type hookInfo struct {
+ paramType types.Kind
+ argsNum int
+ runtimeFunc string
+}
+
+var hooks = map[string]hookInfo{
+ "strings.EqualFold": {paramType: types.TSTRING, argsNum: 2, runtimeFunc: "libfuzzerHookEqualFold"},
+}
+
+// walkCall walks an OCALLFUNC or OCALLINTER node.
+func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
+ if n.Op() == ir.OCALLMETH {
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+ }
+ if n.Op() == ir.OCALLINTER || n.Fun.Op() == ir.OMETHEXPR {
+ // We expect both interface call reflect.Type.Method and concrete
+ // call reflect.(*rtype).Method.
+ usemethod(n)
+ }
+ if n.Op() == ir.OCALLINTER {
+ reflectdata.MarkUsedIfaceMethod(n)
+ }
+
+ if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.OCLOSURE {
+ directClosureCall(n)
+ }
+
+ if ir.IsFuncPCIntrinsic(n) {
+ // For internal/abi.FuncPCABIxxx(fn), if fn is a defined function, rewrite
+ // it to the address of the function of the ABI fn is defined.
+ name := n.Fun.(*ir.Name).Sym().Name
+ arg := n.Args[0]
+ var wantABI obj.ABI
+ switch name {
+ case "FuncPCABI0":
+ wantABI = obj.ABI0
+ case "FuncPCABIInternal":
+ wantABI = obj.ABIInternal
+ }
+ if n.Type() != types.Types[types.TUINTPTR] {
+ base.FatalfAt(n.Pos(), "FuncPC intrinsic should return uintptr, got %v", n.Type()) // as expected by typecheck.FuncPC.
+ }
+ n := ir.FuncPC(n.Pos(), arg, wantABI)
+ return walkExpr(n, init)
+ }
+
+ if name, ok := n.Fun.(*ir.Name); ok {
+ sym := name.Sym()
+ if sym.Pkg.Path == "go.runtime" && sym.Name == "deferrangefunc" {
+ // Call to runtime.deferrangefunc is being shared with a range-over-func
+ // body that might add defers to this frame, so we cannot use open-coded defers
+ // and we need to call deferreturn even if we don't see any other explicit defers.
+ ir.CurFunc.SetHasDefer(true)
+ ir.CurFunc.SetOpenCodedDeferDisallowed(true)
+ }
+ }
+
+ walkCall1(n, init)
+ return n
+}
+
+func walkCall1(n *ir.CallExpr, init *ir.Nodes) {
+ if n.Walked() {
+ return // already walked
+ }
+ n.SetWalked(true)
+
+ if n.Op() == ir.OCALLMETH {
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+ }
+
+ args := n.Args
+ params := n.Fun.Type().Params()
+
+ n.Fun = walkExpr(n.Fun, init)
+ walkExprList(args, init)
+
+ for i, arg := range args {
+ // Validate argument and parameter types match.
+ param := params[i]
+ if !types.Identical(arg.Type(), param.Type) {
+ base.FatalfAt(n.Pos(), "assigning %L to parameter %v (type %v)", arg, param.Sym, param.Type)
+ }
+
+ // For any argument whose evaluation might require a function call,
+ // store that argument into a temporary variable,
+ // to prevent that calls from clobbering arguments already on the stack.
+ if mayCall(arg) {
+ // assignment of arg to Temp
+ tmp := typecheck.TempAt(base.Pos, ir.CurFunc, param.Type)
+ init.Append(convas(typecheck.Stmt(ir.NewAssignStmt(base.Pos, tmp, arg)).(*ir.AssignStmt), init))
+ // replace arg with temp
+ args[i] = tmp
+ }
+ }
+
+ funSym := n.Fun.Sym()
+ if base.Debug.Libfuzzer != 0 && funSym != nil {
+ if hook, found := hooks[funSym.Pkg.Path+"."+funSym.Name]; found {
+ if len(args) != hook.argsNum {
+ panic(fmt.Sprintf("%s.%s expects %d arguments, but received %d", funSym.Pkg.Path, funSym.Name, hook.argsNum, len(args)))
+ }
+ var hookArgs []ir.Node
+ for _, arg := range args {
+ hookArgs = append(hookArgs, tracecmpArg(arg, types.Types[hook.paramType], init))
+ }
+ hookArgs = append(hookArgs, fakePC(n))
+ init.Append(mkcall(hook.runtimeFunc, nil, init, hookArgs...))
+ }
+ }
+}
+
+// walkDivMod walks an ODIV or OMOD node.
+func walkDivMod(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ n.Y = walkExpr(n.Y, init)
+
+ // rewrite complex div into function call.
+ et := n.X.Type().Kind()
+
+ if types.IsComplex[et] && n.Op() == ir.ODIV {
+ t := n.Type()
+ call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, typecheck.Conv(n.X, types.Types[types.TCOMPLEX128]), typecheck.Conv(n.Y, types.Types[types.TCOMPLEX128]))
+ return typecheck.Conv(call, t)
+ }
+
+ // Nothing to do for float divisions.
+ if types.IsFloat[et] {
+ return n
+ }
+
+ // rewrite 64-bit div and mod on 32-bit architectures.
+ // TODO: Remove this code once we can introduce
+ // runtime calls late in SSA processing.
+ if types.RegSize < 8 && (et == types.TINT64 || et == types.TUINT64) {
+ if n.Y.Op() == ir.OLITERAL {
+ // Leave div/mod by constant powers of 2 or small 16-bit constants.
+ // The SSA backend will handle those.
+ switch et {
+ case types.TINT64:
+ c := ir.Int64Val(n.Y)
+ if c < 0 {
+ c = -c
+ }
+ if c != 0 && c&(c-1) == 0 {
+ return n
+ }
+ case types.TUINT64:
+ c := ir.Uint64Val(n.Y)
+ if c < 1<<16 {
+ return n
+ }
+ if c != 0 && c&(c-1) == 0 {
+ return n
+ }
+ }
+ }
+ var fn string
+ if et == types.TINT64 {
+ fn = "int64"
+ } else {
+ fn = "uint64"
+ }
+ if n.Op() == ir.ODIV {
+ fn += "div"
+ } else {
+ fn += "mod"
+ }
+ return mkcall(fn, n.Type(), init, typecheck.Conv(n.X, types.Types[et]), typecheck.Conv(n.Y, types.Types[et]))
+ }
+ return n
+}
+
+// walkDot walks an ODOT or ODOTPTR node.
+func walkDot(n *ir.SelectorExpr, init *ir.Nodes) ir.Node {
+ usefield(n)
+ n.X = walkExpr(n.X, init)
+ return n
+}
+
+// walkDotType walks an ODOTTYPE or ODOTTYPE2 node.
+func walkDotType(n *ir.TypeAssertExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ // Set up interface type addresses for back end.
+ if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() {
+ n.ITab = reflectdata.ITabAddrAt(base.Pos, n.Type(), n.X.Type())
+ }
+ if n.X.Type().IsInterface() && n.Type().IsInterface() && !n.Type().IsEmptyInterface() {
+ // This kind of conversion needs a runtime call. Allocate
+ // a descriptor for that call.
+ n.Descriptor = makeTypeAssertDescriptor(n.Type(), n.Op() == ir.ODOTTYPE2)
+ }
+ return n
+}
+
+func makeTypeAssertDescriptor(target *types.Type, canFail bool) *obj.LSym {
+ // When converting from an interface to a non-empty interface. Needs a runtime call.
+ // Allocate an internal/abi.TypeAssert descriptor for that call.
+ lsym := types.LocalPkg.Lookup(fmt.Sprintf(".typeAssert.%d", typeAssertGen)).LinksymABI(obj.ABI0)
+ typeAssertGen++
+ c := rttype.NewCursor(lsym, 0, rttype.TypeAssert)
+ c.Field("Cache").WritePtr(typecheck.LookupRuntimeVar("emptyTypeAssertCache"))
+ c.Field("Inter").WritePtr(reflectdata.TypeSym(target).Linksym())
+ c.Field("CanFail").WriteBool(canFail)
+ objw.Global(lsym, int32(rttype.TypeAssert.Size()), obj.LOCAL)
+ lsym.Gotype = reflectdata.TypeLinksym(rttype.TypeAssert)
+ return lsym
+}
+
+var typeAssertGen int
+
+// walkDynamicDotType walks an ODYNAMICDOTTYPE or ODYNAMICDOTTYPE2 node.
+func walkDynamicDotType(n *ir.DynamicTypeAssertExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ n.RType = walkExpr(n.RType, init)
+ n.ITab = walkExpr(n.ITab, init)
+ // Convert to non-dynamic if we can.
+ if n.RType != nil && n.RType.Op() == ir.OADDR {
+ addr := n.RType.(*ir.AddrExpr)
+ if addr.X.Op() == ir.OLINKSYMOFFSET {
+ r := ir.NewTypeAssertExpr(n.Pos(), n.X, n.Type())
+ if n.Op() == ir.ODYNAMICDOTTYPE2 {
+ r.SetOp(ir.ODOTTYPE2)
+ }
+ r.SetType(n.Type())
+ r.SetTypecheck(1)
+ return walkExpr(r, init)
+ }
+ }
+ return n
+}
+
+// walkIndex walks an OINDEX node.
+func walkIndex(n *ir.IndexExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+
+ // save the original node for bounds checking elision.
+ // If it was a ODIV/OMOD walk might rewrite it.
+ r := n.Index
+
+ n.Index = walkExpr(n.Index, init)
+
+ // if range of type cannot exceed static array bound,
+ // disable bounds check.
+ if n.Bounded() {
+ return n
+ }
+ t := n.X.Type()
+ if t != nil && t.IsPtr() {
+ t = t.Elem()
+ }
+ if t.IsArray() {
+ n.SetBounded(bounded(r, t.NumElem()))
+ if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) {
+ base.Warn("index bounds check elided")
+ }
+ } else if ir.IsConst(n.X, constant.String) {
+ n.SetBounded(bounded(r, int64(len(ir.StringVal(n.X)))))
+ if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) {
+ base.Warn("index bounds check elided")
+ }
+ }
+ return n
+}
+
+// mapKeyArg returns an expression for key that is suitable to be passed
+// as the key argument for runtime map* functions.
+// n is the map indexing or delete Node (to provide Pos).
+func mapKeyArg(fast int, n, key ir.Node, assigned bool) ir.Node {
+ if fast == mapslow {
+ // standard version takes key by reference.
+ // orderState.expr made sure key is addressable.
+ return typecheck.NodAddr(key)
+ }
+ if assigned {
+ // mapassign does distinguish pointer vs. integer key.
+ return key
+ }
+ // mapaccess and mapdelete don't distinguish pointer vs. integer key.
+ switch fast {
+ case mapfast32ptr:
+ return ir.NewConvExpr(n.Pos(), ir.OCONVNOP, types.Types[types.TUINT32], key)
+ case mapfast64ptr:
+ return ir.NewConvExpr(n.Pos(), ir.OCONVNOP, types.Types[types.TUINT64], key)
+ default:
+ // fast version takes key by value.
+ return key
+ }
+}
+
+// walkIndexMap walks an OINDEXMAP node.
+// It replaces m[k] with *map{access1,assign}(maptype, m, &k)
+func walkIndexMap(n *ir.IndexExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ n.Index = walkExpr(n.Index, init)
+ map_ := n.X
+ t := map_.Type()
+ fast := mapfast(t)
+ key := mapKeyArg(fast, n, n.Index, n.Assigned)
+ args := []ir.Node{reflectdata.IndexMapRType(base.Pos, n), map_, key}
+
+ var mapFn ir.Node
+ switch {
+ case n.Assigned:
+ mapFn = mapfn(mapassign[fast], t, false)
+ case t.Elem().Size() > abi.ZeroValSize:
+ args = append(args, reflectdata.ZeroAddr(t.Elem().Size()))
+ mapFn = mapfn("mapaccess1_fat", t, true)
+ default:
+ mapFn = mapfn(mapaccess1[fast], t, false)
+ }
+ call := mkcall1(mapFn, nil, init, args...)
+ call.SetType(types.NewPtr(t.Elem()))
+ call.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers.
+ star := ir.NewStarExpr(base.Pos, call)
+ star.SetType(t.Elem())
+ star.SetTypecheck(1)
+ return star
+}
+
+// walkLogical walks an OANDAND or OOROR node.
+func walkLogical(n *ir.LogicalExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+
+ // cannot put side effects from n.Right on init,
+ // because they cannot run before n.Left is checked.
+ // save elsewhere and store on the eventual n.Right.
+ var ll ir.Nodes
+
+ n.Y = walkExpr(n.Y, &ll)
+ n.Y = ir.InitExpr(ll, n.Y)
+ return n
+}
+
+// walkSend walks an OSEND node.
+func walkSend(n *ir.SendStmt, init *ir.Nodes) ir.Node {
+ n1 := n.Value
+ n1 = typecheck.AssignConv(n1, n.Chan.Type().Elem(), "chan send")
+ n1 = walkExpr(n1, init)
+ n1 = typecheck.NodAddr(n1)
+ return mkcall1(chanfn("chansend1", 2, n.Chan.Type()), nil, init, n.Chan, n1)
+}
+
+// walkSlice walks an OSLICE, OSLICEARR, OSLICESTR, OSLICE3, or OSLICE3ARR node.
+func walkSlice(n *ir.SliceExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ n.Low = walkExpr(n.Low, init)
+ if n.Low != nil && ir.IsZero(n.Low) {
+ // Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k].
+ n.Low = nil
+ }
+ n.High = walkExpr(n.High, init)
+ n.Max = walkExpr(n.Max, init)
+
+ if (n.Op() == ir.OSLICE || n.Op() == ir.OSLICESTR) && n.Low == nil && n.High == nil {
+ // Reduce x[:] to x.
+ if base.Debug.Slice > 0 {
+ base.Warn("slice: omit slice operation")
+ }
+ return n.X
+ }
+ return n
+}
+
+// walkSliceHeader walks an OSLICEHEADER node.
+func walkSliceHeader(n *ir.SliceHeaderExpr, init *ir.Nodes) ir.Node {
+ n.Ptr = walkExpr(n.Ptr, init)
+ n.Len = walkExpr(n.Len, init)
+ n.Cap = walkExpr(n.Cap, init)
+ return n
+}
+
+// walkStringHeader walks an OSTRINGHEADER node.
+func walkStringHeader(n *ir.StringHeaderExpr, init *ir.Nodes) ir.Node {
+ n.Ptr = walkExpr(n.Ptr, init)
+ n.Len = walkExpr(n.Len, init)
+ return n
+}
+
+// return 1 if integer n must be in range [0, max), 0 otherwise.
+func bounded(n ir.Node, max int64) bool {
+ if n.Type() == nil || !n.Type().IsInteger() {
+ return false
+ }
+
+ sign := n.Type().IsSigned()
+ bits := int32(8 * n.Type().Size())
+
+ if ir.IsSmallIntConst(n) {
+ v := ir.Int64Val(n)
+ return 0 <= v && v < max
+ }
+
+ switch n.Op() {
+ case ir.OAND, ir.OANDNOT:
+ n := n.(*ir.BinaryExpr)
+ v := int64(-1)
+ switch {
+ case ir.IsSmallIntConst(n.X):
+ v = ir.Int64Val(n.X)
+ case ir.IsSmallIntConst(n.Y):
+ v = ir.Int64Val(n.Y)
+ if n.Op() == ir.OANDNOT {
+ v = ^v
+ if !sign {
+ v &= 1<<uint(bits) - 1
+ }
+ }
+ }
+ if 0 <= v && v < max {
+ return true
+ }
+
+ case ir.OMOD:
+ n := n.(*ir.BinaryExpr)
+ if !sign && ir.IsSmallIntConst(n.Y) {
+ v := ir.Int64Val(n.Y)
+ if 0 <= v && v <= max {
+ return true
+ }
+ }
+
+ case ir.ODIV:
+ n := n.(*ir.BinaryExpr)
+ if !sign && ir.IsSmallIntConst(n.Y) {
+ v := ir.Int64Val(n.Y)
+ for bits > 0 && v >= 2 {
+ bits--
+ v >>= 1
+ }
+ }
+
+ case ir.ORSH:
+ n := n.(*ir.BinaryExpr)
+ if !sign && ir.IsSmallIntConst(n.Y) {
+ v := ir.Int64Val(n.Y)
+ if v > int64(bits) {
+ return true
+ }
+ bits -= int32(v)
+ }
+ }
+
+ if !sign && bits <= 62 && 1<<uint(bits) <= max {
+ return true
+ }
+
+ return false
+}
+
+// usemethod checks calls for uses of Method and MethodByName of reflect.Value,
+// reflect.Type, reflect.(*rtype), and reflect.(*interfaceType).
+func usemethod(n *ir.CallExpr) {
+ // Don't mark reflect.(*rtype).Method, etc. themselves in the reflect package.
+ // Those functions may be alive via the itab, which should not cause all methods
+ // alive. We only want to mark their callers.
+ if base.Ctxt.Pkgpath == "reflect" {
+ // TODO: is there a better way than hardcoding the names?
+ switch fn := ir.CurFunc.Nname.Sym().Name; {
+ case fn == "(*rtype).Method", fn == "(*rtype).MethodByName":
+ return
+ case fn == "(*interfaceType).Method", fn == "(*interfaceType).MethodByName":
+ return
+ case fn == "Value.Method", fn == "Value.MethodByName":
+ return
+ }
+ }
+
+ dot, ok := n.Fun.(*ir.SelectorExpr)
+ if !ok {
+ return
+ }
+
+ // looking for either direct method calls and interface method calls of:
+ // reflect.Type.Method - func(int) reflect.Method
+ // reflect.Type.MethodByName - func(string) (reflect.Method, bool)
+ //
+ // reflect.Value.Method - func(int) reflect.Value
+ // reflect.Value.MethodByName - func(string) reflect.Value
+ methodName := dot.Sel.Name
+ t := dot.Selection.Type
+
+ // Check the number of arguments and return values.
+ if t.NumParams() != 1 || (t.NumResults() != 1 && t.NumResults() != 2) {
+ return
+ }
+
+ // Check the type of the argument.
+ switch pKind := t.Param(0).Type.Kind(); {
+ case methodName == "Method" && pKind == types.TINT,
+ methodName == "MethodByName" && pKind == types.TSTRING:
+
+ default:
+ // not a call to Method or MethodByName of reflect.{Type,Value}.
+ return
+ }
+
+ // Check that first result type is "reflect.Method" or "reflect.Value".
+ // Note that we have to check sym name and sym package separately, as
+ // we can't check for exact string "reflect.Method" reliably
+ // (e.g., see #19028 and #38515).
+ switch s := t.Result(0).Type.Sym(); {
+ case s != nil && types.ReflectSymName(s) == "Method",
+ s != nil && types.ReflectSymName(s) == "Value":
+
+ default:
+ // not a call to Method or MethodByName of reflect.{Type,Value}.
+ return
+ }
+
+ var targetName ir.Node
+ switch dot.Op() {
+ case ir.ODOTINTER:
+ if methodName == "MethodByName" {
+ targetName = n.Args[0]
+ }
+ case ir.OMETHEXPR:
+ if methodName == "MethodByName" {
+ targetName = n.Args[1]
+ }
+ default:
+ base.FatalfAt(dot.Pos(), "usemethod: unexpected dot.Op() %s", dot.Op())
+ }
+
+ if ir.IsConst(targetName, constant.String) {
+ name := constant.StringVal(targetName.Val())
+
+ r := obj.Addrel(ir.CurFunc.LSym)
+ r.Type = objabi.R_USENAMEDMETHOD
+ r.Sym = staticdata.StringSymNoCommon(name)
+ } else {
+ ir.CurFunc.LSym.Set(obj.AttrReflectMethod, true)
+ }
+}
+
+func usefield(n *ir.SelectorExpr) {
+ if !buildcfg.Experiment.FieldTrack {
+ return
+ }
+
+ switch n.Op() {
+ default:
+ base.Fatalf("usefield %v", n.Op())
+
+ case ir.ODOT, ir.ODOTPTR:
+ break
+ }
+
+ field := n.Selection
+ if field == nil {
+ base.Fatalf("usefield %v %v without paramfld", n.X.Type(), n.Sel)
+ }
+ if field.Sym != n.Sel {
+ base.Fatalf("field inconsistency: %v != %v", field.Sym, n.Sel)
+ }
+ if !strings.Contains(field.Note, "go:\"track\"") {
+ return
+ }
+
+ outer := n.X.Type()
+ if outer.IsPtr() {
+ outer = outer.Elem()
+ }
+ if outer.Sym() == nil {
+ base.Errorf("tracked field must be in named struct type")
+ }
+
+ sym := reflectdata.TrackSym(outer, field)
+ if ir.CurFunc.FieldTrack == nil {
+ ir.CurFunc.FieldTrack = make(map[*obj.LSym]struct{})
+ }
+ ir.CurFunc.FieldTrack[sym] = struct{}{}
+}
diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go
new file mode 100644
index 0000000..179fbdb
--- /dev/null
+++ b/src/cmd/compile/internal/walk/order.go
@@ -0,0 +1,1550 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "fmt"
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/staticinit"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+// Rewrite tree to use separate statements to enforce
+// order of evaluation. Makes walk easier, because it
+// can (after this runs) reorder at will within an expression.
+//
+// Rewrite m[k] op= r into m[k] = m[k] op r if op is / or %.
+//
+// Introduce temporaries as needed by runtime routines.
+// For example, the map runtime routines take the map key
+// by reference, so make sure all map keys are addressable
+// by copying them to temporaries as needed.
+// The same is true for channel operations.
+//
+// Arrange that map index expressions only appear in direct
+// assignments x = m[k] or m[k] = x, never in larger expressions.
+//
+// Arrange that receive expressions only appear in direct assignments
+// x = <-c or as standalone statements <-c, never in larger expressions.
+
+// orderState holds state during the ordering process.
+type orderState struct {
+ out []ir.Node // list of generated statements
+ temp []*ir.Name // stack of temporary variables
+ free map[string][]*ir.Name // free list of unused temporaries, by type.LinkString().
+ edit func(ir.Node) ir.Node // cached closure of o.exprNoLHS
+}
+
+// order rewrites fn.Nbody to apply the ordering constraints
+// described in the comment at the top of the file.
+func order(fn *ir.Func) {
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("\nbefore order %v", fn.Sym())
+ ir.DumpList(s, fn.Body)
+ }
+ ir.SetPos(fn) // Set reasonable position for instrumenting code. See issue 53688.
+ orderBlock(&fn.Body, map[string][]*ir.Name{})
+}
+
+// append typechecks stmt and appends it to out.
+func (o *orderState) append(stmt ir.Node) {
+ o.out = append(o.out, typecheck.Stmt(stmt))
+}
+
+// newTemp allocates a new temporary with the given type,
+// pushes it onto the temp stack, and returns it.
+// If clear is true, newTemp emits code to zero the temporary.
+func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name {
+ var v *ir.Name
+ key := t.LinkString()
+ if a := o.free[key]; len(a) > 0 {
+ v = a[len(a)-1]
+ if !types.Identical(t, v.Type()) {
+ base.Fatalf("expected %L to have type %v", v, t)
+ }
+ o.free[key] = a[:len(a)-1]
+ } else {
+ v = typecheck.TempAt(base.Pos, ir.CurFunc, t)
+ }
+ if clear {
+ o.append(ir.NewAssignStmt(base.Pos, v, nil))
+ }
+
+ o.temp = append(o.temp, v)
+ return v
+}
+
+// copyExpr behaves like newTemp but also emits
+// code to initialize the temporary to the value n.
+func (o *orderState) copyExpr(n ir.Node) *ir.Name {
+ return o.copyExpr1(n, false)
+}
+
+// copyExprClear is like copyExpr but clears the temp before assignment.
+// It is provided for use when the evaluation of tmp = n turns into
+// a function call that is passed a pointer to the temporary as the output space.
+// If the call blocks before tmp has been written,
+// the garbage collector will still treat the temporary as live,
+// so we must zero it before entering that call.
+// Today, this only happens for channel receive operations.
+// (The other candidate would be map access, but map access
+// returns a pointer to the result data instead of taking a pointer
+// to be filled in.)
+func (o *orderState) copyExprClear(n ir.Node) *ir.Name {
+ return o.copyExpr1(n, true)
+}
+
+func (o *orderState) copyExpr1(n ir.Node, clear bool) *ir.Name {
+ t := n.Type()
+ v := o.newTemp(t, clear)
+ o.append(ir.NewAssignStmt(base.Pos, v, n))
+ return v
+}
+
+// cheapExpr returns a cheap version of n.
+// The definition of cheap is that n is a variable or constant.
+// If not, cheapExpr allocates a new tmp, emits tmp = n,
+// and then returns tmp.
+func (o *orderState) cheapExpr(n ir.Node) ir.Node {
+ if n == nil {
+ return nil
+ }
+
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
+ return n
+ case ir.OLEN, ir.OCAP:
+ n := n.(*ir.UnaryExpr)
+ l := o.cheapExpr(n.X)
+ if l == n.X {
+ return n
+ }
+ a := ir.Copy(n).(*ir.UnaryExpr)
+ a.X = l
+ return typecheck.Expr(a)
+ }
+
+ return o.copyExpr(n)
+}
+
+// safeExpr returns a safe version of n.
+// The definition of safe is that n can appear multiple times
+// without violating the semantics of the original program,
+// and that assigning to the safe version has the same effect
+// as assigning to the original n.
+//
+// The intended use is to apply to x when rewriting x += y into x = x + y.
+func (o *orderState) safeExpr(n ir.Node) ir.Node {
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
+ return n
+
+ case ir.OLEN, ir.OCAP:
+ n := n.(*ir.UnaryExpr)
+ l := o.safeExpr(n.X)
+ if l == n.X {
+ return n
+ }
+ a := ir.Copy(n).(*ir.UnaryExpr)
+ a.X = l
+ return typecheck.Expr(a)
+
+ case ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ l := o.safeExpr(n.X)
+ if l == n.X {
+ return n
+ }
+ a := ir.Copy(n).(*ir.SelectorExpr)
+ a.X = l
+ return typecheck.Expr(a)
+
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ l := o.cheapExpr(n.X)
+ if l == n.X {
+ return n
+ }
+ a := ir.Copy(n).(*ir.SelectorExpr)
+ a.X = l
+ return typecheck.Expr(a)
+
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ l := o.cheapExpr(n.X)
+ if l == n.X {
+ return n
+ }
+ a := ir.Copy(n).(*ir.StarExpr)
+ a.X = l
+ return typecheck.Expr(a)
+
+ case ir.OINDEX, ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ var l ir.Node
+ if n.X.Type().IsArray() {
+ l = o.safeExpr(n.X)
+ } else {
+ l = o.cheapExpr(n.X)
+ }
+ r := o.cheapExpr(n.Index)
+ if l == n.X && r == n.Index {
+ return n
+ }
+ a := ir.Copy(n).(*ir.IndexExpr)
+ a.X = l
+ a.Index = r
+ return typecheck.Expr(a)
+
+ default:
+ base.Fatalf("order.safeExpr %v", n.Op())
+ return nil // not reached
+ }
+}
+
+// addrTemp ensures that n is okay to pass by address to runtime routines.
+// If the original argument n is not okay, addrTemp creates a tmp, emits
+// tmp = n, and then returns tmp.
+// The result of addrTemp MUST be assigned back to n, e.g.
+//
+// n.Left = o.addrTemp(n.Left)
+func (o *orderState) addrTemp(n ir.Node) ir.Node {
+ if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
+ // TODO: expand this to all static composite literal nodes?
+ n = typecheck.DefaultLit(n, nil)
+ types.CalcSize(n.Type())
+ vstat := readonlystaticname(n.Type())
+ var s staticinit.Schedule
+ s.StaticAssign(vstat, 0, n, n.Type())
+ if s.Out != nil {
+ base.Fatalf("staticassign of const generated code: %+v", n)
+ }
+ vstat = typecheck.Expr(vstat).(*ir.Name)
+ return vstat
+ }
+
+ // Prevent taking the address of an SSA-able local variable (#63332).
+ //
+ // TODO(mdempsky): Note that OuterValue unwraps OCONVNOPs, but
+ // IsAddressable does not. It should be possible to skip copying for
+ // at least some of these OCONVNOPs (e.g., reinsert them after the
+ // OADDR operation), but at least walkCompare needs to be fixed to
+ // support that (see trybot failures on go.dev/cl/541715, PS1).
+ if ir.IsAddressable(n) {
+ if name, ok := ir.OuterValue(n).(*ir.Name); ok && name.Op() == ir.ONAME {
+ if name.Class == ir.PAUTO && !name.Addrtaken() && ssa.CanSSA(name.Type()) {
+ goto Copy
+ }
+ }
+
+ return n
+ }
+
+Copy:
+ return o.copyExpr(n)
+}
+
+// mapKeyTemp prepares n to be a key in a map runtime call and returns n.
+// The first parameter is the position of n's containing node, for use in case
+// that n's position is not unique (e.g., if n is an ONAME).
+func (o *orderState) mapKeyTemp(outerPos src.XPos, t *types.Type, n ir.Node) ir.Node {
+ pos := outerPos
+ if ir.HasUniquePos(n) {
+ pos = n.Pos()
+ }
+ // Most map calls need to take the address of the key.
+ // Exception: map*_fast* calls. See golang.org/issue/19015.
+ alg := mapfast(t)
+ if alg == mapslow {
+ return o.addrTemp(n)
+ }
+ var kt *types.Type
+ switch alg {
+ case mapfast32:
+ kt = types.Types[types.TUINT32]
+ case mapfast64:
+ kt = types.Types[types.TUINT64]
+ case mapfast32ptr, mapfast64ptr:
+ kt = types.Types[types.TUNSAFEPTR]
+ case mapfaststr:
+ kt = types.Types[types.TSTRING]
+ }
+ nt := n.Type()
+ switch {
+ case nt == kt:
+ return n
+ case nt.Kind() == kt.Kind(), nt.IsPtrShaped() && kt.IsPtrShaped():
+ // can directly convert (e.g. named type to underlying type, or one pointer to another)
+ return typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, kt, n))
+ case nt.IsInteger() && kt.IsInteger():
+ // can directly convert (e.g. int32 to uint32)
+ if n.Op() == ir.OLITERAL && nt.IsSigned() {
+ // avoid constant overflow error
+ n = ir.NewConstExpr(constant.MakeUint64(uint64(ir.Int64Val(n))), n)
+ n.SetType(kt)
+ return n
+ }
+ return typecheck.Expr(ir.NewConvExpr(pos, ir.OCONV, kt, n))
+ default:
+ // Unsafe cast through memory.
+ // We'll need to do a load with type kt. Create a temporary of type kt to
+ // ensure sufficient alignment. nt may be under-aligned.
+ if uint8(kt.Alignment()) < uint8(nt.Alignment()) {
+ base.Fatalf("mapKeyTemp: key type is not sufficiently aligned, kt=%v nt=%v", kt, nt)
+ }
+ tmp := o.newTemp(kt, true)
+ // *(*nt)(&tmp) = n
+ var e ir.Node = typecheck.NodAddr(tmp)
+ e = ir.NewConvExpr(pos, ir.OCONVNOP, nt.PtrTo(), e)
+ e = ir.NewStarExpr(pos, e)
+ o.append(ir.NewAssignStmt(pos, e, n))
+ return tmp
+ }
+}
+
+// mapKeyReplaceStrConv replaces OBYTES2STR by OBYTES2STRTMP
+// in n to avoid string allocations for keys in map lookups.
+// Returns a bool that signals if a modification was made.
+//
+// For:
+//
+// x = m[string(k)]
+// x = m[T1{... Tn{..., string(k), ...}}]
+//
+// where k is []byte, T1 to Tn is a nesting of struct and array literals,
+// the allocation of backing bytes for the string can be avoided
+// by reusing the []byte backing array. These are special cases
+// for avoiding allocations when converting byte slices to strings.
+// It would be nice to handle these generally, but because
+// []byte keys are not allowed in maps, the use of string(k)
+// comes up in important cases in practice. See issue 3512.
+func mapKeyReplaceStrConv(n ir.Node) bool {
+ var replaced bool
+ switch n.Op() {
+ case ir.OBYTES2STR:
+ n := n.(*ir.ConvExpr)
+ n.SetOp(ir.OBYTES2STRTMP)
+ replaced = true
+ case ir.OSTRUCTLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, elem := range n.List {
+ elem := elem.(*ir.StructKeyExpr)
+ if mapKeyReplaceStrConv(elem.Value) {
+ replaced = true
+ }
+ }
+ case ir.OARRAYLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, elem := range n.List {
+ if elem.Op() == ir.OKEY {
+ elem = elem.(*ir.KeyExpr).Value
+ }
+ if mapKeyReplaceStrConv(elem) {
+ replaced = true
+ }
+ }
+ }
+ return replaced
+}
+
+type ordermarker int
+
+// markTemp returns the top of the temporary variable stack.
+func (o *orderState) markTemp() ordermarker {
+ return ordermarker(len(o.temp))
+}
+
+// popTemp pops temporaries off the stack until reaching the mark,
+// which must have been returned by markTemp.
+func (o *orderState) popTemp(mark ordermarker) {
+ for _, n := range o.temp[mark:] {
+ key := n.Type().LinkString()
+ o.free[key] = append(o.free[key], n)
+ }
+ o.temp = o.temp[:mark]
+}
+
+// stmtList orders each of the statements in the list.
+func (o *orderState) stmtList(l ir.Nodes) {
+ s := l
+ for i := range s {
+ orderMakeSliceCopy(s[i:])
+ o.stmt(s[i])
+ }
+}
+
+// orderMakeSliceCopy matches the pattern:
+//
+// m = OMAKESLICE([]T, x); OCOPY(m, s)
+//
+// and rewrites it to:
+//
+// m = OMAKESLICECOPY([]T, x, s); nil
+func orderMakeSliceCopy(s []ir.Node) {
+ if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
+ return
+ }
+ if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY {
+ return
+ }
+
+ as := s[0].(*ir.AssignStmt)
+ cp := s[1].(*ir.BinaryExpr)
+ if as.Y == nil || as.Y.Op() != ir.OMAKESLICE || ir.IsBlank(as.X) ||
+ as.X.Op() != ir.ONAME || cp.X.Op() != ir.ONAME || cp.Y.Op() != ir.ONAME ||
+ as.X.Name() != cp.X.Name() || cp.X.Name() == cp.Y.Name() {
+ // The line above this one is correct with the differing equality operators:
+ // we want as.X and cp.X to be the same name,
+ // but we want the initial data to be coming from a different name.
+ return
+ }
+
+ mk := as.Y.(*ir.MakeExpr)
+ if mk.Esc() == ir.EscNone || mk.Len == nil || mk.Cap != nil {
+ return
+ }
+ mk.SetOp(ir.OMAKESLICECOPY)
+ mk.Cap = cp.Y
+ // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
+ mk.SetBounded(mk.Len.Op() == ir.OLEN && ir.SameSafeExpr(mk.Len.(*ir.UnaryExpr).X, cp.Y))
+ as.Y = typecheck.Expr(mk)
+ s[1] = nil // remove separate copy call
+}
+
+// edge inserts coverage instrumentation for libfuzzer.
+func (o *orderState) edge() {
+ if base.Debug.Libfuzzer == 0 {
+ return
+ }
+
+ // Create a new uint8 counter to be allocated in section __sancov_cntrs
+ counter := staticinit.StaticName(types.Types[types.TUINT8])
+ counter.SetLibfuzzer8BitCounter(true)
+ // As well as setting SetLibfuzzer8BitCounter, we preemptively set the
+ // symbol type to SLIBFUZZER_8BIT_COUNTER so that the race detector
+ // instrumentation pass (which does not have access to the flags set by
+ // SetLibfuzzer8BitCounter) knows to ignore them. This information is
+ // lost by the time it reaches the compile step, so SetLibfuzzer8BitCounter
+ // is still necessary.
+ counter.Linksym().Type = objabi.SLIBFUZZER_8BIT_COUNTER
+
+ // We guarantee that the counter never becomes zero again once it has been
+ // incremented once. This implementation follows the NeverZero optimization
+ // presented by the paper:
+ // "AFL++: Combining Incremental Steps of Fuzzing Research"
+ // The NeverZero policy avoids the overflow to 0 by setting the counter to one
+ // after it reaches 255 and so, if an edge is executed at least one time, the entry is
+ // never 0.
+ // Another policy presented in the paper is the Saturated Counters policy which
+ // freezes the counter when it reaches the value of 255. However, a range
+ // of experiments showed that that decreases overall performance.
+ o.append(ir.NewIfStmt(base.Pos,
+ ir.NewBinaryExpr(base.Pos, ir.OEQ, counter, ir.NewInt(base.Pos, 0xff)),
+ []ir.Node{ir.NewAssignStmt(base.Pos, counter, ir.NewInt(base.Pos, 1))},
+ []ir.Node{ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, ir.NewInt(base.Pos, 1))}))
+}
+
+// orderBlock orders the block of statements in n into a new slice,
+// and then replaces the old slice in n with the new slice.
+// free is a map that can be used to obtain temporary variables by type.
+func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
+ if len(*n) != 0 {
+ // Set reasonable position for instrumenting code. See issue 53688.
+ // It would be nice if ir.Nodes had a position (the opening {, probably),
+ // but it doesn't. So we use the first statement's position instead.
+ ir.SetPos((*n)[0])
+ }
+ var order orderState
+ order.free = free
+ mark := order.markTemp()
+ order.edge()
+ order.stmtList(*n)
+ order.popTemp(mark)
+ *n = order.out
+}
+
+// exprInPlace orders the side effects in *np and
+// leaves them as the init list of the final *np.
+// The result of exprInPlace MUST be assigned back to n, e.g.
+//
+// n.Left = o.exprInPlace(n.Left)
+func (o *orderState) exprInPlace(n ir.Node) ir.Node {
+ var order orderState
+ order.free = o.free
+ n = order.expr(n, nil)
+ n = ir.InitExpr(order.out, n)
+
+ // insert new temporaries from order
+ // at head of outer list.
+ o.temp = append(o.temp, order.temp...)
+ return n
+}
+
+// orderStmtInPlace orders the side effects of the single statement *np
+// and replaces it with the resulting statement list.
+// The result of orderStmtInPlace MUST be assigned back to n, e.g.
+//
+// n.Left = orderStmtInPlace(n.Left)
+//
+// free is a map that can be used to obtain temporary variables by type.
+func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node {
+ var order orderState
+ order.free = free
+ mark := order.markTemp()
+ order.stmt(n)
+ order.popTemp(mark)
+ return ir.NewBlockStmt(src.NoXPos, order.out)
+}
+
+// init moves n's init list to o.out.
+func (o *orderState) init(n ir.Node) {
+ if ir.MayBeShared(n) {
+ // For concurrency safety, don't mutate potentially shared nodes.
+ // First, ensure that no work is required here.
+ if len(n.Init()) > 0 {
+ base.Fatalf("order.init shared node with ninit")
+ }
+ return
+ }
+ o.stmtList(ir.TakeInit(n))
+}
+
+// call orders the call expression n.
+// n.Op is OCALLFUNC/OCALLINTER or a builtin like OCOPY.
+func (o *orderState) call(nn ir.Node) {
+ if len(nn.Init()) > 0 {
+ // Caller should have already called o.init(nn).
+ base.Fatalf("%v with unexpected ninit", nn.Op())
+ }
+ if nn.Op() == ir.OCALLMETH {
+ base.FatalfAt(nn.Pos(), "OCALLMETH missed by typecheck")
+ }
+
+ // Builtin functions.
+ if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLINTER {
+ switch n := nn.(type) {
+ default:
+ base.Fatalf("unexpected call: %+v", n)
+ case *ir.UnaryExpr:
+ n.X = o.expr(n.X, nil)
+ case *ir.ConvExpr:
+ n.X = o.expr(n.X, nil)
+ case *ir.BinaryExpr:
+ n.X = o.expr(n.X, nil)
+ n.Y = o.expr(n.Y, nil)
+ case *ir.MakeExpr:
+ n.Len = o.expr(n.Len, nil)
+ n.Cap = o.expr(n.Cap, nil)
+ case *ir.CallExpr:
+ o.exprList(n.Args)
+ }
+ return
+ }
+
+ n := nn.(*ir.CallExpr)
+ typecheck.AssertFixedCall(n)
+
+ if ir.IsFuncPCIntrinsic(n) && ir.IsIfaceOfFunc(n.Args[0]) != nil {
+ // For internal/abi.FuncPCABIxxx(fn), if fn is a defined function,
+ // do not introduce temporaries here, so it is easier to rewrite it
+ // to symbol address reference later in walk.
+ return
+ }
+
+ n.Fun = o.expr(n.Fun, nil)
+ o.exprList(n.Args)
+}
+
+// mapAssign appends n to o.out.
+func (o *orderState) mapAssign(n ir.Node) {
+ switch n.Op() {
+ default:
+ base.Fatalf("order.mapAssign %v", n.Op())
+
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ if n.X.Op() == ir.OINDEXMAP {
+ n.Y = o.safeMapRHS(n.Y)
+ }
+ o.out = append(o.out, n)
+ case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
+ if n.X.Op() == ir.OINDEXMAP {
+ n.Y = o.safeMapRHS(n.Y)
+ }
+ o.out = append(o.out, n)
+ }
+}
+
+func (o *orderState) safeMapRHS(r ir.Node) ir.Node {
+ // Make sure we evaluate the RHS before starting the map insert.
+ // We need to make sure the RHS won't panic. See issue 22881.
+ if r.Op() == ir.OAPPEND {
+ r := r.(*ir.CallExpr)
+ s := r.Args[1:]
+ for i, n := range s {
+ s[i] = o.cheapExpr(n)
+ }
+ return r
+ }
+ return o.cheapExpr(r)
+}
+
+// stmt orders the statement n, appending to o.out.
+func (o *orderState) stmt(n ir.Node) {
+ if n == nil {
+ return
+ }
+
+ lno := ir.SetPos(n)
+ o.init(n)
+
+ switch n.Op() {
+ default:
+ base.Fatalf("order.stmt %v", n.Op())
+
+ case ir.OINLMARK:
+ o.out = append(o.out, n)
+
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ t := o.markTemp()
+
+ // There's a delicate interaction here between two OINDEXMAP
+ // optimizations.
+ //
+ // First, we want to handle m[k] = append(m[k], ...) with a single
+ // runtime call to mapassign. This requires the m[k] expressions to
+ // satisfy ir.SameSafeExpr in walkAssign.
+ //
+ // But if k is a slow map key type that's passed by reference (e.g.,
+ // byte), then we want to avoid marking user variables as addrtaken,
+ // if that might prevent the compiler from keeping k in a register.
+ //
+ // TODO(mdempsky): It would be better if walk was responsible for
+ // inserting temporaries as needed.
+ mapAppend := n.X.Op() == ir.OINDEXMAP && n.Y.Op() == ir.OAPPEND &&
+ ir.SameSafeExpr(n.X, n.Y.(*ir.CallExpr).Args[0])
+
+ n.X = o.expr(n.X, nil)
+ if mapAppend {
+ indexLHS := n.X.(*ir.IndexExpr)
+ indexLHS.X = o.cheapExpr(indexLHS.X)
+ indexLHS.Index = o.cheapExpr(indexLHS.Index)
+
+ call := n.Y.(*ir.CallExpr)
+ indexRHS := call.Args[0].(*ir.IndexExpr)
+ indexRHS.X = indexLHS.X
+ indexRHS.Index = indexLHS.Index
+
+ o.exprList(call.Args[1:])
+ } else {
+ n.Y = o.expr(n.Y, n.X)
+ }
+ o.mapAssign(n)
+ o.popTemp(t)
+
+ case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
+ t := o.markTemp()
+ n.X = o.expr(n.X, nil)
+ n.Y = o.expr(n.Y, nil)
+
+ if base.Flag.Cfg.Instrumenting || n.X.Op() == ir.OINDEXMAP && (n.AsOp == ir.ODIV || n.AsOp == ir.OMOD) {
+ // Rewrite m[k] op= r into m[k] = m[k] op r so
+ // that we can ensure that if op panics
+ // because r is zero, the panic happens before
+ // the map assignment.
+ // DeepCopy is a big hammer here, but safeExpr
+ // makes sure there is nothing too deep being copied.
+ l1 := o.safeExpr(n.X)
+ l2 := ir.DeepCopy(src.NoXPos, l1)
+ if l2.Op() == ir.OINDEXMAP {
+ l2 := l2.(*ir.IndexExpr)
+ l2.Assigned = false
+ }
+ l2 = o.copyExpr(l2)
+ r := o.expr(typecheck.Expr(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y)), nil)
+ as := typecheck.Stmt(ir.NewAssignStmt(n.Pos(), l1, r))
+ o.mapAssign(as)
+ o.popTemp(t)
+ return
+ }
+
+ o.mapAssign(n)
+ o.popTemp(t)
+
+ case ir.OAS2:
+ n := n.(*ir.AssignListStmt)
+ t := o.markTemp()
+ o.exprList(n.Lhs)
+ o.exprList(n.Rhs)
+ o.out = append(o.out, n)
+ o.popTemp(t)
+
+ // Special: avoid copy of func call n.Right
+ case ir.OAS2FUNC:
+ n := n.(*ir.AssignListStmt)
+ t := o.markTemp()
+ o.exprList(n.Lhs)
+ call := n.Rhs[0]
+ o.init(call)
+ if ic, ok := call.(*ir.InlinedCallExpr); ok {
+ o.stmtList(ic.Body)
+
+ n.SetOp(ir.OAS2)
+ n.Rhs = ic.ReturnVars
+
+ o.exprList(n.Rhs)
+ o.out = append(o.out, n)
+ } else {
+ o.call(call)
+ o.as2func(n)
+ }
+ o.popTemp(t)
+
+ // Special: use temporary variables to hold result,
+ // so that runtime can take address of temporary.
+ // No temporary for blank assignment.
+ //
+ // OAS2MAPR: make sure key is addressable if needed,
+ // and make sure OINDEXMAP is not copied out.
+ case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR:
+ n := n.(*ir.AssignListStmt)
+ t := o.markTemp()
+ o.exprList(n.Lhs)
+
+ switch r := n.Rhs[0]; r.Op() {
+ case ir.ODOTTYPE2:
+ r := r.(*ir.TypeAssertExpr)
+ r.X = o.expr(r.X, nil)
+ case ir.ODYNAMICDOTTYPE2:
+ r := r.(*ir.DynamicTypeAssertExpr)
+ r.X = o.expr(r.X, nil)
+ r.RType = o.expr(r.RType, nil)
+ r.ITab = o.expr(r.ITab, nil)
+ case ir.ORECV:
+ r := r.(*ir.UnaryExpr)
+ r.X = o.expr(r.X, nil)
+ case ir.OINDEXMAP:
+ r := r.(*ir.IndexExpr)
+ r.X = o.expr(r.X, nil)
+ r.Index = o.expr(r.Index, nil)
+ // See similar conversion for OINDEXMAP below.
+ _ = mapKeyReplaceStrConv(r.Index)
+ r.Index = o.mapKeyTemp(r.Pos(), r.X.Type(), r.Index)
+ default:
+ base.Fatalf("order.stmt: %v", r.Op())
+ }
+
+ o.as2ok(n)
+ o.popTemp(t)
+
+ // Special: does not save n onto out.
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ o.stmtList(n.List)
+
+ // Special: n->left is not an expression; save as is.
+ case ir.OBREAK,
+ ir.OCONTINUE,
+ ir.ODCL,
+ ir.OFALL,
+ ir.OGOTO,
+ ir.OLABEL,
+ ir.OTAILCALL:
+ o.out = append(o.out, n)
+
+ // Special: handle call arguments.
+ case ir.OCALLFUNC, ir.OCALLINTER:
+ n := n.(*ir.CallExpr)
+ t := o.markTemp()
+ o.call(n)
+ o.out = append(o.out, n)
+ o.popTemp(t)
+
+ case ir.OINLCALL:
+ n := n.(*ir.InlinedCallExpr)
+ o.stmtList(n.Body)
+
+ // discard results; double-check for no side effects
+ for _, result := range n.ReturnVars {
+ if staticinit.AnySideEffects(result) {
+ base.FatalfAt(result.Pos(), "inlined call result has side effects: %v", result)
+ }
+ }
+
+ case ir.OCHECKNIL, ir.OCLEAR, ir.OCLOSE, ir.OPANIC, ir.ORECV:
+ n := n.(*ir.UnaryExpr)
+ t := o.markTemp()
+ n.X = o.expr(n.X, nil)
+ o.out = append(o.out, n)
+ o.popTemp(t)
+
+ case ir.OCOPY:
+ n := n.(*ir.BinaryExpr)
+ t := o.markTemp()
+ n.X = o.expr(n.X, nil)
+ n.Y = o.expr(n.Y, nil)
+ o.out = append(o.out, n)
+ o.popTemp(t)
+
+ case ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP:
+ n := n.(*ir.CallExpr)
+ t := o.markTemp()
+ o.call(n)
+ o.out = append(o.out, n)
+ o.popTemp(t)
+
+ // Special: order arguments to inner call but not call itself.
+ case ir.ODEFER, ir.OGO:
+ n := n.(*ir.GoDeferStmt)
+ t := o.markTemp()
+ o.init(n.Call)
+ o.call(n.Call)
+ o.out = append(o.out, n)
+ o.popTemp(t)
+
+ case ir.ODELETE:
+ n := n.(*ir.CallExpr)
+ t := o.markTemp()
+ n.Args[0] = o.expr(n.Args[0], nil)
+ n.Args[1] = o.expr(n.Args[1], nil)
+ n.Args[1] = o.mapKeyTemp(n.Pos(), n.Args[0].Type(), n.Args[1])
+ o.out = append(o.out, n)
+ o.popTemp(t)
+
+ // Clean temporaries from condition evaluation at
+ // beginning of loop body and after for statement.
+ case ir.OFOR:
+ n := n.(*ir.ForStmt)
+ t := o.markTemp()
+ n.Cond = o.exprInPlace(n.Cond)
+ orderBlock(&n.Body, o.free)
+ n.Post = orderStmtInPlace(n.Post, o.free)
+ o.out = append(o.out, n)
+ o.popTemp(t)
+
+ // Clean temporaries from condition at
+ // beginning of both branches.
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ t := o.markTemp()
+ n.Cond = o.exprInPlace(n.Cond)
+ o.popTemp(t)
+ orderBlock(&n.Body, o.free)
+ orderBlock(&n.Else, o.free)
+ o.out = append(o.out, n)
+
+ case ir.ORANGE:
+ // n.Right is the expression being ranged over.
+ // order it, and then make a copy if we need one.
+ // We almost always do, to ensure that we don't
+ // see any value changes made during the loop.
+ // Usually the copy is cheap (e.g., array pointer,
+ // chan, slice, string are all tiny).
+ // The exception is ranging over an array value
+ // (not a slice, not a pointer to array),
+ // which must make a copy to avoid seeing updates made during
+ // the range body. Ranging over an array value is uncommon though.
+
+ // Mark []byte(str) range expression to reuse string backing storage.
+ // It is safe because the storage cannot be mutated.
+ n := n.(*ir.RangeStmt)
+ if x, ok := n.X.(*ir.ConvExpr); ok {
+ switch x.Op() {
+ case ir.OSTR2BYTES:
+ x.SetOp(ir.OSTR2BYTESTMP)
+ fallthrough
+ case ir.OSTR2BYTESTMP:
+ x.MarkNonNil() // "range []byte(nil)" is fine
+ }
+ }
+
+ t := o.markTemp()
+ n.X = o.expr(n.X, nil)
+
+ orderBody := true
+ xt := typecheck.RangeExprType(n.X.Type())
+ switch k := xt.Kind(); {
+ default:
+ base.Fatalf("order.stmt range %v", n.Type())
+
+ case types.IsInt[k]:
+ // Used only once, no need to copy.
+
+ case k == types.TARRAY, k == types.TSLICE:
+ if n.Value == nil || ir.IsBlank(n.Value) {
+ // for i := range x will only use x once, to compute len(x).
+ // No need to copy it.
+ break
+ }
+ fallthrough
+
+ case k == types.TCHAN, k == types.TSTRING:
+ // chan, string, slice, array ranges use value multiple times.
+ // make copy.
+ r := n.X
+
+ if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] {
+ r = ir.NewConvExpr(base.Pos, ir.OCONV, nil, r)
+ r.SetType(types.Types[types.TSTRING])
+ r = typecheck.Expr(r)
+ }
+
+ n.X = o.copyExpr(r)
+
+ case k == types.TMAP:
+ if isMapClear(n) {
+ // Preserve the body of the map clear pattern so it can
+ // be detected during walk. The loop body will not be used
+ // when optimizing away the range loop to a runtime call.
+ orderBody = false
+ break
+ }
+
+ // copy the map value in case it is a map literal.
+ // TODO(rsc): Make tmp = literal expressions reuse tmp.
+ // For maps tmp is just one word so it hardly matters.
+ r := n.X
+ n.X = o.copyExpr(r)
+
+ // n.Prealloc is the temp for the iterator.
+ // MapIterType contains pointers and needs to be zeroed.
+ n.Prealloc = o.newTemp(reflectdata.MapIterType(), true)
+ }
+ n.Key = o.exprInPlace(n.Key)
+ n.Value = o.exprInPlace(n.Value)
+ if orderBody {
+ orderBlock(&n.Body, o.free)
+ }
+ o.out = append(o.out, n)
+ o.popTemp(t)
+
+ case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
+ o.exprList(n.Results)
+ o.out = append(o.out, n)
+
+ // Special: clean case temporaries in each block entry.
+ // Select must enter one of its blocks, so there is no
+ // need for a cleaning at the end.
+ // Doubly special: evaluation order for select is stricter
+ // than ordinary expressions. Even something like p.c
+ // has to be hoisted into a temporary, so that it cannot be
+ // reordered after the channel evaluation for a different
+ // case (if p were nil, then the timing of the fault would
+ // give this away).
+ case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
+ t := o.markTemp()
+ for _, ncas := range n.Cases {
+ r := ncas.Comm
+ ir.SetPos(ncas)
+
+ // Append any new body prologue to ninit.
+ // The next loop will insert ninit into nbody.
+ if len(ncas.Init()) != 0 {
+ base.Fatalf("order select ninit")
+ }
+ if r == nil {
+ continue
+ }
+ switch r.Op() {
+ default:
+ ir.Dump("select case", r)
+ base.Fatalf("unknown op in select %v", r.Op())
+
+ case ir.OSELRECV2:
+ // case x, ok = <-c
+ r := r.(*ir.AssignListStmt)
+ recv := r.Rhs[0].(*ir.UnaryExpr)
+ recv.X = o.expr(recv.X, nil)
+ if !ir.IsAutoTmp(recv.X) {
+ recv.X = o.copyExpr(recv.X)
+ }
+ init := ir.TakeInit(r)
+
+ colas := r.Def
+ do := func(i int, t *types.Type) {
+ n := r.Lhs[i]
+ if ir.IsBlank(n) {
+ return
+ }
+ // If this is case x := <-ch or case x, y := <-ch, the case has
+ // the ODCL nodes to declare x and y. We want to delay that
+ // declaration (and possible allocation) until inside the case body.
+ // Delete the ODCL nodes here and recreate them inside the body below.
+ if colas {
+ if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n {
+ init = init[1:]
+
+ // iimport may have added a default initialization assignment,
+ // due to how it handles ODCL statements.
+ if len(init) > 0 && init[0].Op() == ir.OAS && init[0].(*ir.AssignStmt).X == n {
+ init = init[1:]
+ }
+ }
+ dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
+ ncas.PtrInit().Append(dcl)
+ }
+ tmp := o.newTemp(t, t.HasPointers())
+ as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, n, typecheck.Conv(tmp, n.Type())))
+ ncas.PtrInit().Append(as)
+ r.Lhs[i] = tmp
+ }
+ do(0, recv.X.Type().Elem())
+ do(1, types.Types[types.TBOOL])
+ if len(init) != 0 {
+ ir.DumpList("ninit", init)
+ base.Fatalf("ninit on select recv")
+ }
+ orderBlock(ncas.PtrInit(), o.free)
+
+ case ir.OSEND:
+ r := r.(*ir.SendStmt)
+ if len(r.Init()) != 0 {
+ ir.DumpList("ninit", r.Init())
+ base.Fatalf("ninit on select send")
+ }
+
+ // case c <- x
+ // r->left is c, r->right is x, both are always evaluated.
+ r.Chan = o.expr(r.Chan, nil)
+
+ if !ir.IsAutoTmp(r.Chan) {
+ r.Chan = o.copyExpr(r.Chan)
+ }
+ r.Value = o.expr(r.Value, nil)
+ if !ir.IsAutoTmp(r.Value) {
+ r.Value = o.copyExpr(r.Value)
+ }
+ }
+ }
+ // Now that we have accumulated all the temporaries, clean them.
+ // Also insert any ninit queued during the previous loop.
+ // (The temporary cleaning must follow that ninit work.)
+ for _, cas := range n.Cases {
+ orderBlock(&cas.Body, o.free)
+
+ // TODO(mdempsky): Is this actually necessary?
+ // walkSelect appears to walk Ninit.
+ cas.Body.Prepend(ir.TakeInit(cas)...)
+ }
+
+ o.out = append(o.out, n)
+ o.popTemp(t)
+
+ // Special: value being sent is passed as a pointer; make it addressable.
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ t := o.markTemp()
+ n.Chan = o.expr(n.Chan, nil)
+ n.Value = o.expr(n.Value, nil)
+ if base.Flag.Cfg.Instrumenting {
+ // Force copying to the stack so that (chan T)(nil) <- x
+ // is still instrumented as a read of x.
+ n.Value = o.copyExpr(n.Value)
+ } else {
+ n.Value = o.addrTemp(n.Value)
+ }
+ o.out = append(o.out, n)
+ o.popTemp(t)
+
+ // TODO(rsc): Clean temporaries more aggressively.
+ // Note that because walkSwitch will rewrite some of the
+ // switch into a binary search, this is not as easy as it looks.
+ // (If we ran that code here we could invoke order.stmt on
+ // the if-else chain instead.)
+ // For now just clean all the temporaries at the end.
+ // In practice that's fine.
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+ if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) {
+ // Add empty "default:" case for instrumentation.
+ n.Cases = append(n.Cases, ir.NewCaseStmt(base.Pos, nil, nil))
+ }
+
+ t := o.markTemp()
+ n.Tag = o.expr(n.Tag, nil)
+ for _, ncas := range n.Cases {
+ o.exprListInPlace(ncas.List)
+ orderBlock(&ncas.Body, o.free)
+ }
+
+ o.out = append(o.out, n)
+ o.popTemp(t)
+ }
+
+ base.Pos = lno
+}
+
+func hasDefaultCase(n *ir.SwitchStmt) bool {
+ for _, ncas := range n.Cases {
+ if len(ncas.List) == 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// exprList orders the expression list l into o.
+func (o *orderState) exprList(l ir.Nodes) {
+ s := l
+ for i := range s {
+ s[i] = o.expr(s[i], nil)
+ }
+}
+
+// exprListInPlace orders the expression list l but saves
+// the side effects on the individual expression ninit lists.
+func (o *orderState) exprListInPlace(l ir.Nodes) {
+ s := l
+ for i := range s {
+ s[i] = o.exprInPlace(s[i])
+ }
+}
+
+func (o *orderState) exprNoLHS(n ir.Node) ir.Node {
+ return o.expr(n, nil)
+}
+
+// expr orders a single expression, appending side
+// effects to o.out as needed.
+// If this is part of an assignment lhs = *np, lhs is given.
+// Otherwise lhs == nil. (When lhs != nil it may be possible
+// to avoid copying the result of the expression to a temporary.)
+// The result of expr MUST be assigned back to n, e.g.
+//
+// n.Left = o.expr(n.Left, lhs)
+func (o *orderState) expr(n, lhs ir.Node) ir.Node {
+ if n == nil {
+ return n
+ }
+ lno := ir.SetPos(n)
+ n = o.expr1(n, lhs)
+ base.Pos = lno
+ return n
+}
+
+func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
+ o.init(n)
+
+ switch n.Op() {
+ default:
+ if o.edit == nil {
+ o.edit = o.exprNoLHS // create closure once
+ }
+ ir.EditChildren(n, o.edit)
+ return n
+
+ // Addition of strings turns into a function call.
+ // Allocate a temporary to hold the strings.
+ // Fewer than 5 strings use direct runtime helpers.
+ case ir.OADDSTR:
+ n := n.(*ir.AddStringExpr)
+ o.exprList(n.List)
+
+ if len(n.List) > 5 {
+ t := types.NewArray(types.Types[types.TSTRING], int64(len(n.List)))
+ n.Prealloc = o.newTemp(t, false)
+ }
+
+ // Mark string(byteSlice) arguments to reuse byteSlice backing
+ // buffer during conversion. String concatenation does not
+ // memorize the strings for later use, so it is safe.
+ // However, we can do it only if there is at least one non-empty string literal.
+ // Otherwise if all other arguments are empty strings,
+ // concatstrings will return the reference to the temp string
+ // to the caller.
+ hasbyte := false
+
+ haslit := false
+ for _, n1 := range n.List {
+ hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR
+ haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0
+ }
+
+ if haslit && hasbyte {
+ for _, n2 := range n.List {
+ if n2.Op() == ir.OBYTES2STR {
+ n2 := n2.(*ir.ConvExpr)
+ n2.SetOp(ir.OBYTES2STRTMP)
+ }
+ }
+ }
+ return n
+
+ case ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ n.X = o.expr(n.X, nil)
+ n.Index = o.expr(n.Index, nil)
+ needCopy := false
+
+ if !n.Assigned {
+ // Enforce that any []byte slices we are not copying
+ // can not be changed before the map index by forcing
+ // the map index to happen immediately following the
+ // conversions. See copyExpr a few lines below.
+ needCopy = mapKeyReplaceStrConv(n.Index)
+
+ if base.Flag.Cfg.Instrumenting {
+ // Race detector needs the copy.
+ needCopy = true
+ }
+ }
+
+ // key may need to be be addressable
+ n.Index = o.mapKeyTemp(n.Pos(), n.X.Type(), n.Index)
+ if needCopy {
+ return o.copyExpr(n)
+ }
+ return n
+
+ // concrete type (not interface) argument might need an addressable
+ // temporary to pass to the runtime conversion routine.
+ case ir.OCONVIFACE:
+ n := n.(*ir.ConvExpr)
+ n.X = o.expr(n.X, nil)
+ if n.X.Type().IsInterface() {
+ return n
+ }
+ if _, _, needsaddr := dataWordFuncName(n.X.Type()); needsaddr || isStaticCompositeLiteral(n.X) {
+ // Need a temp if we need to pass the address to the conversion function.
+ // We also process static composite literal node here, making a named static global
+ // whose address we can put directly in an interface (see OCONVIFACE case in walk).
+ n.X = o.addrTemp(n.X)
+ }
+ return n
+
+ case ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if n.X.Op() == ir.OCALLMETH {
+ base.FatalfAt(n.X.Pos(), "OCALLMETH missed by typecheck")
+ }
+ if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER) {
+ call := n.X.(*ir.CallExpr)
+ // When reordering unsafe.Pointer(f()) into a separate
+ // statement, the conversion and function call must stay
+ // together. See golang.org/issue/15329.
+ o.init(call)
+ o.call(call)
+ if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
+ return o.copyExpr(n)
+ }
+ } else {
+ n.X = o.expr(n.X, nil)
+ }
+ return n
+
+ case ir.OANDAND, ir.OOROR:
+ // ... = LHS && RHS
+ //
+ // var r bool
+ // r = LHS
+ // if r { // or !r, for OROR
+ // r = RHS
+ // }
+ // ... = r
+
+ n := n.(*ir.LogicalExpr)
+ r := o.newTemp(n.Type(), false)
+
+ // Evaluate left-hand side.
+ lhs := o.expr(n.X, nil)
+ o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, lhs)))
+
+ // Evaluate right-hand side, save generated code.
+ saveout := o.out
+ o.out = nil
+ t := o.markTemp()
+ o.edge()
+ rhs := o.expr(n.Y, nil)
+ o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, rhs)))
+ o.popTemp(t)
+ gen := o.out
+ o.out = saveout
+
+ // If left-hand side doesn't cause a short-circuit, issue right-hand side.
+ nif := ir.NewIfStmt(base.Pos, r, nil, nil)
+ if n.Op() == ir.OANDAND {
+ nif.Body = gen
+ } else {
+ nif.Else = gen
+ }
+ o.out = append(o.out, nif)
+ return r
+
+ case ir.OCALLMETH:
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+ panic("unreachable")
+
+ case ir.OCALLFUNC,
+ ir.OCALLINTER,
+ ir.OCAP,
+ ir.OCOMPLEX,
+ ir.OCOPY,
+ ir.OIMAG,
+ ir.OLEN,
+ ir.OMAKECHAN,
+ ir.OMAKEMAP,
+ ir.OMAKESLICE,
+ ir.OMAKESLICECOPY,
+ ir.OMAX,
+ ir.OMIN,
+ ir.ONEW,
+ ir.OREAL,
+ ir.ORECOVERFP,
+ ir.OSTR2BYTES,
+ ir.OSTR2BYTESTMP,
+ ir.OSTR2RUNES:
+
+ if isRuneCount(n) {
+ // len([]rune(s)) is rewritten to runtime.countrunes(s) later.
+ conv := n.(*ir.UnaryExpr).X.(*ir.ConvExpr)
+ conv.X = o.expr(conv.X, nil)
+ } else {
+ o.call(n)
+ }
+
+ if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
+ return o.copyExpr(n)
+ }
+ return n
+
+ case ir.OINLCALL:
+ n := n.(*ir.InlinedCallExpr)
+ o.stmtList(n.Body)
+ return n.SingleResult()
+
+ case ir.OAPPEND:
+ // Check for append(x, make([]T, y)...) .
+ n := n.(*ir.CallExpr)
+ if isAppendOfMake(n) {
+ n.Args[0] = o.expr(n.Args[0], nil) // order x
+ mk := n.Args[1].(*ir.MakeExpr)
+ mk.Len = o.expr(mk.Len, nil) // order y
+ } else {
+ o.exprList(n.Args)
+ }
+
+ if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.Args[0]) {
+ return o.copyExpr(n)
+ }
+ return n
+
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
+ n := n.(*ir.SliceExpr)
+ n.X = o.expr(n.X, nil)
+ n.Low = o.cheapExpr(o.expr(n.Low, nil))
+ n.High = o.cheapExpr(o.expr(n.High, nil))
+ n.Max = o.cheapExpr(o.expr(n.Max, nil))
+ if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.X) {
+ return o.copyExpr(n)
+ }
+ return n
+
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ if n.Transient() && len(n.Func.ClosureVars) > 0 {
+ n.Prealloc = o.newTemp(typecheck.ClosureType(n), false)
+ }
+ return n
+
+ case ir.OMETHVALUE:
+ n := n.(*ir.SelectorExpr)
+ n.X = o.expr(n.X, nil)
+ if n.Transient() {
+ t := typecheck.MethodValueType(n)
+ n.Prealloc = o.newTemp(t, false)
+ }
+ return n
+
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ o.exprList(n.List)
+ if n.Transient() {
+ t := types.NewArray(n.Type().Elem(), n.Len)
+ n.Prealloc = o.newTemp(t, false)
+ }
+ return n
+
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n := n.(*ir.TypeAssertExpr)
+ n.X = o.expr(n.X, nil)
+ if !types.IsDirectIface(n.Type()) || base.Flag.Cfg.Instrumenting {
+ return o.copyExprClear(n)
+ }
+ return n
+
+ case ir.ORECV:
+ n := n.(*ir.UnaryExpr)
+ n.X = o.expr(n.X, nil)
+ return o.copyExprClear(n)
+
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n := n.(*ir.BinaryExpr)
+ n.X = o.expr(n.X, nil)
+ n.Y = o.expr(n.Y, nil)
+
+ t := n.X.Type()
+ switch {
+ case t.IsString():
+ // Mark string(byteSlice) arguments to reuse byteSlice backing
+ // buffer during conversion. String comparison does not
+ // memorize the strings for later use, so it is safe.
+ if n.X.Op() == ir.OBYTES2STR {
+ n.X.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
+ }
+ if n.Y.Op() == ir.OBYTES2STR {
+ n.Y.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
+ }
+
+ case t.IsStruct() || t.IsArray():
+ // for complex comparisons, we need both args to be
+ // addressable so we can pass them to the runtime.
+ n.X = o.addrTemp(n.X)
+ n.Y = o.addrTemp(n.Y)
+ }
+ return n
+
+ case ir.OMAPLIT:
+ // Order map by converting:
+ // map[int]int{
+ // a(): b(),
+ // c(): d(),
+ // e(): f(),
+ // }
+ // to
+ // m := map[int]int{}
+ // m[a()] = b()
+ // m[c()] = d()
+ // m[e()] = f()
+ // Then order the result.
+ // Without this special case, order would otherwise compute all
+ // the keys and values before storing any of them to the map.
+ // See issue 26552.
+ n := n.(*ir.CompLitExpr)
+ entries := n.List
+ statics := entries[:0]
+ var dynamics []*ir.KeyExpr
+ for _, r := range entries {
+ r := r.(*ir.KeyExpr)
+
+ if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
+ dynamics = append(dynamics, r)
+ continue
+ }
+
+ // Recursively ordering some static entries can change them to dynamic;
+ // e.g., OCONVIFACE nodes. See #31777.
+ r = o.expr(r, nil).(*ir.KeyExpr)
+ if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
+ dynamics = append(dynamics, r)
+ continue
+ }
+
+ statics = append(statics, r)
+ }
+ n.List = statics
+
+ if len(dynamics) == 0 {
+ return n
+ }
+
+ // Emit the creation of the map (with all its static entries).
+ m := o.newTemp(n.Type(), false)
+ as := ir.NewAssignStmt(base.Pos, m, n)
+ typecheck.Stmt(as)
+ o.stmt(as)
+
+ // Emit eval+insert of dynamic entries, one at a time.
+ for _, r := range dynamics {
+ lhs := typecheck.AssignExpr(ir.NewIndexExpr(base.Pos, m, r.Key)).(*ir.IndexExpr)
+ base.AssertfAt(lhs.Op() == ir.OINDEXMAP, lhs.Pos(), "want OINDEXMAP, have %+v", lhs)
+ lhs.RType = n.RType
+
+ as := ir.NewAssignStmt(base.Pos, lhs, r.Value)
+ typecheck.Stmt(as)
+ o.stmt(as)
+ }
+
+ // Remember that we issued these assignments so we can include that count
+ // in the map alloc hint.
+ // We're assuming here that all the keys in the map literal are distinct.
+ // If any are equal, this will be an overcount. Probably not worth accounting
+ // for that, as equal keys in map literals are rare, and at worst we waste
+ // a bit of space.
+ n.Len += int64(len(dynamics))
+
+ return m
+ }
+
+ // No return - type-assertions above. Each case must return for itself.
+}
+
+// as2func orders OAS2FUNC nodes. It creates temporaries to ensure left-to-right assignment.
+// The caller should order the right-hand side of the assignment before calling order.as2func.
+// It rewrites,
+//
+// a, b, a = ...
+//
+// as
+//
+// tmp1, tmp2, tmp3 = ...
+// a, b, a = tmp1, tmp2, tmp3
+//
+// This is necessary to ensure left to right assignment order.
+func (o *orderState) as2func(n *ir.AssignListStmt) {
+ results := n.Rhs[0].Type()
+ as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
+ for i, nl := range n.Lhs {
+ if !ir.IsBlank(nl) {
+ typ := results.Field(i).Type
+ tmp := o.newTemp(typ, typ.HasPointers())
+ n.Lhs[i] = tmp
+ as.Lhs = append(as.Lhs, nl)
+ as.Rhs = append(as.Rhs, tmp)
+ }
+ }
+
+ o.out = append(o.out, n)
+ o.stmt(typecheck.Stmt(as))
+}
+
+// as2ok orders OAS2XXX with ok.
+// Just like as2func, this also adds temporaries to ensure left-to-right assignment.
+func (o *orderState) as2ok(n *ir.AssignListStmt) {
+ as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
+
+ do := func(i int, typ *types.Type) {
+ if nl := n.Lhs[i]; !ir.IsBlank(nl) {
+ var tmp ir.Node = o.newTemp(typ, typ.HasPointers())
+ n.Lhs[i] = tmp
+ as.Lhs = append(as.Lhs, nl)
+ if i == 1 {
+ // The "ok" result is an untyped boolean according to the Go
+ // spec. We need to explicitly convert it to the LHS type in
+ // case the latter is a defined boolean type (#8475).
+ tmp = typecheck.Conv(tmp, nl.Type())
+ }
+ as.Rhs = append(as.Rhs, tmp)
+ }
+ }
+
+ do(0, n.Rhs[0].Type())
+ do(1, types.Types[types.TBOOL])
+
+ o.out = append(o.out, n)
+ o.stmt(typecheck.Stmt(as))
+}
diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go
new file mode 100644
index 0000000..93898b3
--- /dev/null
+++ b/src/cmd/compile/internal/walk/range.go
@@ -0,0 +1,576 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "unicode/utf8"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "cmd/internal/sys"
+)
+
+func cheapComputableIndex(width int64) bool {
+ switch ssagen.Arch.LinkArch.Family {
+ // MIPS does not have R+R addressing
+ // Arm64 may lack ability to generate this code in our assembler,
+ // but the architecture supports it.
+ case sys.PPC64, sys.S390X:
+ return width == 1
+ case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
+ switch width {
+ case 1, 2, 4, 8:
+ return true
+ }
+ }
+ return false
+}
+
+// walkRange transforms various forms of ORANGE into
+// simpler forms. The result must be assigned back to n.
+// Node n may also be modified in place, and may also be
+// the returned node.
+func walkRange(nrange *ir.RangeStmt) ir.Node {
+ base.Assert(!nrange.DistinctVars) // Should all be rewritten before escape analysis
+ if isMapClear(nrange) {
+ return mapRangeClear(nrange)
+ }
+
+ nfor := ir.NewForStmt(nrange.Pos(), nil, nil, nil, nil, nrange.DistinctVars)
+ nfor.SetInit(nrange.Init())
+ nfor.Label = nrange.Label
+
+ // variable name conventions:
+ // ohv1, hv1, hv2: hidden (old) val 1, 2
+ // ha, hit: hidden aggregate, iterator
+ // hn, hp: hidden len, pointer
+ // hb: hidden bool
+ // a, v1, v2: not hidden aggregate, val 1, 2
+
+ a := nrange.X
+ t := a.Type()
+ lno := ir.SetPos(a)
+
+ v1, v2 := nrange.Key, nrange.Value
+
+ if ir.IsBlank(v2) {
+ v2 = nil
+ }
+
+ if ir.IsBlank(v1) && v2 == nil {
+ v1 = nil
+ }
+
+ if v1 == nil && v2 != nil {
+ base.Fatalf("walkRange: v2 != nil while v1 == nil")
+ }
+
+ var body []ir.Node
+ var init []ir.Node
+ switch k := t.Kind(); {
+ default:
+ base.Fatalf("walkRange")
+
+ case types.IsInt[k]:
+ hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, t)
+ hn := typecheck.TempAt(base.Pos, ir.CurFunc, t)
+
+ init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
+ init = append(init, ir.NewAssignStmt(base.Pos, hn, a))
+
+ nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
+ nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(base.Pos, 1)))
+
+ if v1 != nil {
+ body = []ir.Node{rangeAssign(nrange, hv1)}
+ }
+
+ case k == types.TARRAY, k == types.TSLICE, k == types.TPTR: // TPTR is pointer-to-array
+ if nn := arrayRangeClear(nrange, v1, v2, a); nn != nil {
+ base.Pos = lno
+ return nn
+ }
+
+ // Element type of the iteration
+ var elem *types.Type
+ switch t.Kind() {
+ case types.TSLICE, types.TARRAY:
+ elem = t.Elem()
+ case types.TPTR:
+ elem = t.Elem().Elem()
+ }
+
+ // order.stmt arranged for a copy of the array/slice variable if needed.
+ ha := a
+
+ hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+ hn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+
+ init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
+ init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha)))
+
+ nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
+ nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(base.Pos, 1)))
+
+ // for range ha { body }
+ if v1 == nil {
+ break
+ }
+
+ // for v1 := range ha { body }
+ if v2 == nil {
+ body = []ir.Node{rangeAssign(nrange, hv1)}
+ break
+ }
+
+ // for v1, v2 := range ha { body }
+ if cheapComputableIndex(elem.Size()) {
+ // v1, v2 = hv1, ha[hv1]
+ tmp := ir.NewIndexExpr(base.Pos, ha, hv1)
+ tmp.SetBounded(true)
+ body = []ir.Node{rangeAssign2(nrange, hv1, tmp)}
+ break
+ }
+
+ // Slice to iterate over
+ var hs ir.Node
+ if t.IsSlice() {
+ hs = ha
+ } else {
+ var arr ir.Node
+ if t.IsPtr() {
+ arr = ha
+ } else {
+ arr = typecheck.NodAddr(ha)
+ arr.SetType(t.PtrTo())
+ arr.SetTypecheck(1)
+ }
+ hs = ir.NewSliceExpr(base.Pos, ir.OSLICEARR, arr, nil, nil, nil)
+ // old typechecker doesn't know OSLICEARR, so we set types explicitly
+ hs.SetType(types.NewSlice(elem))
+ hs.SetTypecheck(1)
+ }
+
+ // We use a "pointer" to keep track of where we are in the backing array
+ // of the slice hs. This pointer starts at hs.ptr and gets incremented
+ // by the element size each time through the loop.
+ //
+ // It's tricky, though, as on the last iteration this pointer gets
+ // incremented to point past the end of the backing array. We can't
+ // let the garbage collector see that final out-of-bounds pointer.
+ //
+ // To avoid this, we keep the "pointer" alternately in 2 variables, one
+ // pointer typed and one uintptr typed. Most of the time it lives in the
+ // regular pointer variable, but when it might be out of bounds (after it
+ // has been incremented, but before the loop condition has been checked)
+ // it lives briefly in the uintptr variable.
+ //
+ // hp contains the pointer version (of type *T, where T is the element type).
+ // It is guaranteed to always be in range, keeps the backing store alive,
+ // and is updated on stack copies. If a GC occurs when this function is
+ // suspended at any safepoint, this variable ensures correct operation.
+ //
+ // hu contains the equivalent uintptr version. It may point past the
+ // end, but doesn't keep the backing store alive and doesn't get updated
+ // on a stack copy. If a GC occurs while this function is on the top of
+ // the stack, then the last frame is scanned conservatively and hu will
+ // act as a reference to the backing array to ensure it is not collected.
+ //
+ // The "pointer" we're moving across the backing array lives in one
+ // or the other of hp and hu as the loop proceeds.
+ //
+ // hp is live during most of the body of the loop. But it isn't live
+ // at the very top of the loop, when we haven't checked i<n yet, and
+ // it could point off the end of the backing store.
+ // hu is live only at the very top and very bottom of the loop.
+ // In particular, only when it cannot possibly be live across a call.
+ //
+ // So we do
+ // hu = uintptr(unsafe.Pointer(hs.ptr))
+ // for i := 0; i < hs.len; i++ {
+ // hp = (*T)(unsafe.Pointer(hu))
+ // v1, v2 = i, *hp
+ // ... body of loop ...
+ // hu = uintptr(unsafe.Pointer(hp)) + elemsize
+ // }
+ //
+ // Between the assignments to hu and the assignment back to hp, there
+ // must not be any calls.
+
+ // Pointer to current iteration position. Start on entry to the loop
+ // with the pointer in hu.
+ ptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, hs)
+ ptr.SetBounded(true)
+ huVal := ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], ptr)
+ huVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUINTPTR], huVal)
+ hu := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])
+ init = append(init, ir.NewAssignStmt(base.Pos, hu, huVal))
+
+ // Convert hu to hp at the top of the loop (after the condition has been checked).
+ hpVal := ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], hu)
+ hpVal.SetCheckPtr(true) // disable checkptr on this conversion
+ hpVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, elem.PtrTo(), hpVal)
+ hp := typecheck.TempAt(base.Pos, ir.CurFunc, elem.PtrTo())
+ body = append(body, ir.NewAssignStmt(base.Pos, hp, hpVal))
+
+ // Assign variables on the LHS of the range statement. Use *hp to get the element.
+ e := ir.NewStarExpr(base.Pos, hp)
+ e.SetBounded(true)
+ a := rangeAssign2(nrange, hv1, e)
+ body = append(body, a)
+
+ // Advance pointer for next iteration of the loop.
+ // This reads from hp and writes to hu.
+ huVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], hp)
+ huVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUINTPTR], huVal)
+ as := ir.NewAssignStmt(base.Pos, hu, ir.NewBinaryExpr(base.Pos, ir.OADD, huVal, ir.NewInt(base.Pos, elem.Size())))
+ nfor.Post = ir.NewBlockStmt(base.Pos, []ir.Node{nfor.Post, as})
+
+ case k == types.TMAP:
+ // order.stmt allocated the iterator for us.
+ // we only use a once, so no copy needed.
+ ha := a
+
+ hit := nrange.Prealloc
+ th := hit.Type()
+ // depends on layout of iterator struct.
+ // See cmd/compile/internal/reflectdata/reflect.go:MapIterType
+ keysym := th.Field(0).Sym
+ elemsym := th.Field(1).Sym // ditto
+
+ fn := typecheck.LookupRuntime("mapiterinit", t.Key(), t.Elem(), th)
+ init = append(init, mkcallstmt1(fn, reflectdata.RangeMapRType(base.Pos, nrange), ha, typecheck.NodAddr(hit)))
+ nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil())
+
+ fn = typecheck.LookupRuntime("mapiternext", th)
+ nfor.Post = mkcallstmt1(fn, typecheck.NodAddr(hit))
+
+ key := ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), types.NewPtr(t.Key())))
+ if v1 == nil {
+ body = nil
+ } else if v2 == nil {
+ body = []ir.Node{rangeAssign(nrange, key)}
+ } else {
+ elem := ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym), types.NewPtr(t.Elem())))
+ body = []ir.Node{rangeAssign2(nrange, key, elem)}
+ }
+
+ case k == types.TCHAN:
+ // order.stmt arranged for a copy of the channel variable.
+ ha := a
+
+ hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, t.Elem())
+ hv1.SetTypecheck(1)
+ if t.Elem().HasPointers() {
+ init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
+ }
+ hb := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
+
+ nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, ir.NewBool(base.Pos, false))
+ lhs := []ir.Node{hv1, hb}
+ rhs := []ir.Node{ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)}
+ a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, lhs, rhs)
+ a.SetTypecheck(1)
+ nfor.Cond = ir.InitExpr([]ir.Node{a}, nfor.Cond)
+ if v1 == nil {
+ body = nil
+ } else {
+ body = []ir.Node{rangeAssign(nrange, hv1)}
+ }
+ // Zero hv1. This prevents hv1 from being the sole, inaccessible
+ // reference to an otherwise GC-able value during the next channel receive.
+ // See issue 15281.
+ body = append(body, ir.NewAssignStmt(base.Pos, hv1, nil))
+
+ case k == types.TSTRING:
+ // Transform string range statements like "for v1, v2 = range a" into
+ //
+ // ha := a
+ // for hv1 := 0; hv1 < len(ha); {
+ // hv1t := hv1
+ // hv2 := rune(ha[hv1])
+ // if hv2 < utf8.RuneSelf {
+ // hv1++
+ // } else {
+ // hv2, hv1 = decoderune(ha, hv1)
+ // }
+ // v1, v2 = hv1t, hv2
+ // // original body
+ // }
+
+ // order.stmt arranged for a copy of the string variable.
+ ha := a
+
+ hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+ hv1t := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+ hv2 := typecheck.TempAt(base.Pos, ir.CurFunc, types.RuneType)
+
+ // hv1 := 0
+ init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
+
+ // hv1 < len(ha)
+ nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))
+
+ if v1 != nil {
+ // hv1t = hv1
+ body = append(body, ir.NewAssignStmt(base.Pos, hv1t, hv1))
+ }
+
+ // hv2 := rune(ha[hv1])
+ nind := ir.NewIndexExpr(base.Pos, ha, hv1)
+ nind.SetBounded(true)
+ body = append(body, ir.NewAssignStmt(base.Pos, hv2, typecheck.Conv(nind, types.RuneType)))
+
+ // if hv2 < utf8.RuneSelf
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv2, ir.NewInt(base.Pos, utf8.RuneSelf))
+
+ // hv1++
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(base.Pos, 1)))}
+
+ // } else {
+ // hv2, hv1 = decoderune(ha, hv1)
+ fn := typecheck.LookupRuntime("decoderune")
+ call := mkcall1(fn, fn.Type().ResultsTuple(), &nif.Else, ha, hv1)
+ a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{hv2, hv1}, []ir.Node{call})
+ nif.Else.Append(a)
+
+ body = append(body, nif)
+
+ if v1 != nil {
+ if v2 != nil {
+ // v1, v2 = hv1t, hv2
+ body = append(body, rangeAssign2(nrange, hv1t, hv2))
+ } else {
+ // v1 = hv1t
+ body = append(body, rangeAssign(nrange, hv1t))
+ }
+ }
+ }
+
+ typecheck.Stmts(init)
+
+ nfor.PtrInit().Append(init...)
+
+ typecheck.Stmts(nfor.Cond.Init())
+
+ nfor.Cond = typecheck.Expr(nfor.Cond)
+ nfor.Cond = typecheck.DefaultLit(nfor.Cond, nil)
+ nfor.Post = typecheck.Stmt(nfor.Post)
+ typecheck.Stmts(body)
+ nfor.Body.Append(body...)
+ nfor.Body.Append(nrange.Body...)
+
+ var n ir.Node = nfor
+
+ n = walkStmt(n)
+
+ base.Pos = lno
+ return n
+}
+
+// rangeAssign returns "n.Key = key".
+func rangeAssign(n *ir.RangeStmt, key ir.Node) ir.Node {
+ key = rangeConvert(n, n.Key.Type(), key, n.KeyTypeWord, n.KeySrcRType)
+ return ir.NewAssignStmt(n.Pos(), n.Key, key)
+}
+
+// rangeAssign2 returns "n.Key, n.Value = key, value".
+func rangeAssign2(n *ir.RangeStmt, key, value ir.Node) ir.Node {
+ // Use OAS2 to correctly handle assignments
+ // of the form "v1, a[v1] = range".
+ key = rangeConvert(n, n.Key.Type(), key, n.KeyTypeWord, n.KeySrcRType)
+ value = rangeConvert(n, n.Value.Type(), value, n.ValueTypeWord, n.ValueSrcRType)
+ return ir.NewAssignListStmt(n.Pos(), ir.OAS2, []ir.Node{n.Key, n.Value}, []ir.Node{key, value})
+}
+
+// rangeConvert returns src, converted to dst if necessary. If a
+// conversion is necessary, then typeWord and srcRType are copied to
+// their respective ConvExpr fields.
+func rangeConvert(nrange *ir.RangeStmt, dst *types.Type, src, typeWord, srcRType ir.Node) ir.Node {
+ src = typecheck.Expr(src)
+ if dst.Kind() == types.TBLANK || types.Identical(dst, src.Type()) {
+ return src
+ }
+
+ n := ir.NewConvExpr(nrange.Pos(), ir.OCONV, dst, src)
+ n.TypeWord = typeWord
+ n.SrcRType = srcRType
+ return typecheck.Expr(n)
+}
+
+// isMapClear checks if n is of the form:
+//
+// for k := range m {
+// delete(m, k)
+// }
+//
+// where == for keys of map m is reflexive.
+func isMapClear(n *ir.RangeStmt) bool {
+ if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
+ return false
+ }
+
+ t := n.X.Type()
+ if n.Op() != ir.ORANGE || t.Kind() != types.TMAP || n.Key == nil || n.Value != nil {
+ return false
+ }
+
+ k := n.Key
+ // Require k to be a new variable name.
+ if !ir.DeclaredBy(k, n) {
+ return false
+ }
+
+ if len(n.Body) != 1 {
+ return false
+ }
+
+ stmt := n.Body[0] // only stmt in body
+ if stmt == nil || stmt.Op() != ir.ODELETE {
+ return false
+ }
+
+ m := n.X
+ if delete := stmt.(*ir.CallExpr); !ir.SameSafeExpr(delete.Args[0], m) || !ir.SameSafeExpr(delete.Args[1], k) {
+ return false
+ }
+
+ // Keys where equality is not reflexive can not be deleted from maps.
+ if !types.IsReflexive(t.Key()) {
+ return false
+ }
+
+ return true
+}
+
+// mapRangeClear constructs a call to runtime.mapclear for the map range idiom.
+func mapRangeClear(nrange *ir.RangeStmt) ir.Node {
+ m := nrange.X
+ origPos := ir.SetPos(m)
+ defer func() { base.Pos = origPos }()
+
+ return mapClear(m, reflectdata.RangeMapRType(base.Pos, nrange))
+}
+
+// mapClear constructs a call to runtime.mapclear for the map m.
+func mapClear(m, rtyp ir.Node) ir.Node {
+ t := m.Type()
+
+ // instantiate mapclear(typ *type, hmap map[any]any)
+ fn := typecheck.LookupRuntime("mapclear", t.Key(), t.Elem())
+ n := mkcallstmt1(fn, rtyp, m)
+ return walkStmt(typecheck.Stmt(n))
+}
+
+// Lower n into runtime·memclr if possible, for
+// fast zeroing of slices and arrays (issue 5373).
+// Look for instances of
+//
+// for i := range a {
+// a[i] = zero
+// }
+//
+// in which the evaluation of a is side-effect-free.
+//
+// Parameters are as in walkRange: "for v1, v2 = range a".
+func arrayRangeClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
+ if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
+ return nil
+ }
+
+ if v1 == nil || v2 != nil {
+ return nil
+ }
+
+ if len(loop.Body) != 1 || loop.Body[0] == nil {
+ return nil
+ }
+
+ stmt1 := loop.Body[0] // only stmt in body
+ if stmt1.Op() != ir.OAS {
+ return nil
+ }
+ stmt := stmt1.(*ir.AssignStmt)
+ if stmt.X.Op() != ir.OINDEX {
+ return nil
+ }
+ lhs := stmt.X.(*ir.IndexExpr)
+ x := lhs.X
+ if a.Type().IsPtr() && a.Type().Elem().IsArray() {
+ if s, ok := x.(*ir.StarExpr); ok && s.Op() == ir.ODEREF {
+ x = s.X
+ }
+ }
+
+ if !ir.SameSafeExpr(x, a) || !ir.SameSafeExpr(lhs.Index, v1) {
+ return nil
+ }
+
+ if !ir.IsZero(stmt.Y) {
+ return nil
+ }
+
+ return arrayClear(stmt.Pos(), a, loop)
+}
+
+// arrayClear constructs a call to runtime.memclr for fast zeroing of slices and arrays.
+func arrayClear(wbPos src.XPos, a ir.Node, nrange *ir.RangeStmt) ir.Node {
+ elemsize := typecheck.RangeExprType(a.Type()).Elem().Size()
+ if elemsize <= 0 {
+ return nil
+ }
+
+ // Convert to
+ // if len(a) != 0 {
+ // hp = &a[0]
+ // hn = len(a)*sizeof(elem(a))
+ // memclr{NoHeap,Has}Pointers(hp, hn)
+ // i = len(a) - 1
+ // }
+ n := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(base.Pos, 0))
+
+ // hp = &a[0]
+ hp := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUNSAFEPTR])
+
+ ix := ir.NewIndexExpr(base.Pos, a, ir.NewInt(base.Pos, 0))
+ ix.SetBounded(true)
+ addr := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR])
+ n.Body.Append(ir.NewAssignStmt(base.Pos, hp, addr))
+
+ // hn = len(a) * sizeof(elem(a))
+ hn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])
+ mul := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(base.Pos, elemsize)), types.Types[types.TUINTPTR])
+ n.Body.Append(ir.NewAssignStmt(base.Pos, hn, mul))
+
+ var fn ir.Node
+ if a.Type().Elem().HasPointers() {
+ // memclrHasPointers(hp, hn)
+ ir.CurFunc.SetWBPos(wbPos)
+ fn = mkcallstmt("memclrHasPointers", hp, hn)
+ } else {
+ // memclrNoHeapPointers(hp, hn)
+ fn = mkcallstmt("memclrNoHeapPointers", hp, hn)
+ }
+
+ n.Body.Append(fn)
+
+ // For array range clear, also set "i = len(a) - 1"
+ if nrange != nil {
+ idx := ir.NewAssignStmt(base.Pos, nrange.Key, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(base.Pos, 1)))
+ n.Body.Append(idx)
+ }
+
+ n.Cond = typecheck.Expr(n.Cond)
+ n.Cond = typecheck.DefaultLit(n.Cond, nil)
+ typecheck.Stmts(n.Body)
+ return walkStmt(n)
+}
diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go
new file mode 100644
index 0000000..ca6a76a
--- /dev/null
+++ b/src/cmd/compile/internal/walk/select.go
@@ -0,0 +1,302 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+func walkSelect(sel *ir.SelectStmt) {
+ lno := ir.SetPos(sel)
+ if sel.Walked() {
+ base.Fatalf("double walkSelect")
+ }
+ sel.SetWalked(true)
+
+ init := ir.TakeInit(sel)
+
+ init = append(init, walkSelectCases(sel.Cases)...)
+ sel.Cases = nil
+
+ sel.Compiled = init
+ walkStmtList(sel.Compiled)
+
+ base.Pos = lno
+}
+
+func walkSelectCases(cases []*ir.CommClause) []ir.Node {
+ ncas := len(cases)
+ sellineno := base.Pos
+
+ // optimization: zero-case select
+ if ncas == 0 {
+ return []ir.Node{mkcallstmt("block")}
+ }
+
+ // optimization: one-case select: single op.
+ if ncas == 1 {
+ cas := cases[0]
+ ir.SetPos(cas)
+ l := cas.Init()
+ if cas.Comm != nil { // not default:
+ n := cas.Comm
+ l = append(l, ir.TakeInit(n)...)
+ switch n.Op() {
+ default:
+ base.Fatalf("select %v", n.Op())
+
+ case ir.OSEND:
+ // already ok
+
+ case ir.OSELRECV2:
+ r := n.(*ir.AssignListStmt)
+ if ir.IsBlank(r.Lhs[0]) && ir.IsBlank(r.Lhs[1]) {
+ n = r.Rhs[0]
+ break
+ }
+ r.SetOp(ir.OAS2RECV)
+ }
+
+ l = append(l, n)
+ }
+
+ l = append(l, cas.Body...)
+ l = append(l, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil))
+ return l
+ }
+
+ // convert case value arguments to addresses.
+ // this rewrite is used by both the general code and the next optimization.
+ var dflt *ir.CommClause
+ for _, cas := range cases {
+ ir.SetPos(cas)
+ n := cas.Comm
+ if n == nil {
+ dflt = cas
+ continue
+ }
+ switch n.Op() {
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ n.Value = typecheck.NodAddr(n.Value)
+ n.Value = typecheck.Expr(n.Value)
+
+ case ir.OSELRECV2:
+ n := n.(*ir.AssignListStmt)
+ if !ir.IsBlank(n.Lhs[0]) {
+ n.Lhs[0] = typecheck.NodAddr(n.Lhs[0])
+ n.Lhs[0] = typecheck.Expr(n.Lhs[0])
+ }
+ }
+ }
+
+ // optimization: two-case select but one is default: single non-blocking op.
+ if ncas == 2 && dflt != nil {
+ cas := cases[0]
+ if cas == dflt {
+ cas = cases[1]
+ }
+
+ n := cas.Comm
+ ir.SetPos(n)
+ r := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ r.SetInit(cas.Init())
+ var cond ir.Node
+ switch n.Op() {
+ default:
+ base.Fatalf("select %v", n.Op())
+
+ case ir.OSEND:
+ // if selectnbsend(c, v) { body } else { default body }
+ n := n.(*ir.SendStmt)
+ ch := n.Chan
+ cond = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Value)
+
+ case ir.OSELRECV2:
+ n := n.(*ir.AssignListStmt)
+ recv := n.Rhs[0].(*ir.UnaryExpr)
+ ch := recv.X
+ elem := n.Lhs[0]
+ if ir.IsBlank(elem) {
+ elem = typecheck.NodNil()
+ }
+ cond = typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
+ fn := chanfn("selectnbrecv", 2, ch.Type())
+ call := mkcall1(fn, fn.Type().ResultsTuple(), r.PtrInit(), elem, ch)
+ as := ir.NewAssignListStmt(r.Pos(), ir.OAS2, []ir.Node{cond, n.Lhs[1]}, []ir.Node{call})
+ r.PtrInit().Append(typecheck.Stmt(as))
+ }
+
+ r.Cond = typecheck.Expr(cond)
+ r.Body = cas.Body
+ r.Else = append(dflt.Init(), dflt.Body...)
+ return []ir.Node{r, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)}
+ }
+
+ if dflt != nil {
+ ncas--
+ }
+ casorder := make([]*ir.CommClause, ncas)
+ nsends, nrecvs := 0, 0
+
+ var init []ir.Node
+
+ // generate sel-struct
+ base.Pos = sellineno
+ selv := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewArray(scasetype(), int64(ncas)))
+ init = append(init, typecheck.Stmt(ir.NewAssignStmt(base.Pos, selv, nil)))
+
+ // No initialization for order; runtime.selectgo is responsible for that.
+ order := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
+
+ var pc0, pcs ir.Node
+ if base.Flag.Race {
+ pcs = typecheck.TempAt(base.Pos, ir.CurFunc, types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
+ pc0 = typecheck.Expr(typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(base.Pos, 0))))
+ } else {
+ pc0 = typecheck.NodNil()
+ }
+
+ // register cases
+ for _, cas := range cases {
+ ir.SetPos(cas)
+
+ init = append(init, ir.TakeInit(cas)...)
+
+ n := cas.Comm
+ if n == nil { // default:
+ continue
+ }
+
+ var i int
+ var c, elem ir.Node
+ switch n.Op() {
+ default:
+ base.Fatalf("select %v", n.Op())
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ i = nsends
+ nsends++
+ c = n.Chan
+ elem = n.Value
+ case ir.OSELRECV2:
+ n := n.(*ir.AssignListStmt)
+ nrecvs++
+ i = ncas - nrecvs
+ recv := n.Rhs[0].(*ir.UnaryExpr)
+ c = recv.X
+ elem = n.Lhs[0]
+ }
+
+ casorder[i] = cas
+
+ setField := func(f string, val ir.Node) {
+ r := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, ir.NewIndexExpr(base.Pos, selv, ir.NewInt(base.Pos, int64(i))), typecheck.Lookup(f)), val)
+ init = append(init, typecheck.Stmt(r))
+ }
+
+ c = typecheck.ConvNop(c, types.Types[types.TUNSAFEPTR])
+ setField("c", c)
+ if !ir.IsBlank(elem) {
+ elem = typecheck.ConvNop(elem, types.Types[types.TUNSAFEPTR])
+ setField("elem", elem)
+ }
+
+ // TODO(mdempsky): There should be a cleaner way to
+ // handle this.
+ if base.Flag.Race {
+ r := mkcallstmt("selectsetpc", typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(base.Pos, int64(i)))))
+ init = append(init, r)
+ }
+ }
+ if nsends+nrecvs != ncas {
+ base.Fatalf("walkSelectCases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
+ }
+
+ // run the select
+ base.Pos = sellineno
+ chosen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+ recvOK := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
+ r := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+ r.Lhs = []ir.Node{chosen, recvOK}
+ fn := typecheck.LookupRuntime("selectgo")
+ var fnInit ir.Nodes
+ r.Rhs = []ir.Node{mkcall1(fn, fn.Type().ResultsTuple(), &fnInit, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, ir.NewInt(base.Pos, int64(nsends)), ir.NewInt(base.Pos, int64(nrecvs)), ir.NewBool(base.Pos, dflt == nil))}
+ init = append(init, fnInit...)
+ init = append(init, typecheck.Stmt(r))
+
+ // selv, order, and pcs (if race) are no longer alive after selectgo.
+
+ // dispatch cases
+ dispatch := func(cond ir.Node, cas *ir.CommClause) {
+ var list ir.Nodes
+
+ if n := cas.Comm; n != nil && n.Op() == ir.OSELRECV2 {
+ n := n.(*ir.AssignListStmt)
+ if !ir.IsBlank(n.Lhs[1]) {
+ x := ir.NewAssignStmt(base.Pos, n.Lhs[1], recvOK)
+ list.Append(typecheck.Stmt(x))
+ }
+ }
+
+ list.Append(cas.Body.Take()...)
+ list.Append(ir.NewBranchStmt(base.Pos, ir.OBREAK, nil))
+
+ var r ir.Node
+ if cond != nil {
+ cond = typecheck.Expr(cond)
+ cond = typecheck.DefaultLit(cond, nil)
+ r = ir.NewIfStmt(base.Pos, cond, list, nil)
+ } else {
+ r = ir.NewBlockStmt(base.Pos, list)
+ }
+
+ init = append(init, r)
+ }
+
+ if dflt != nil {
+ ir.SetPos(dflt)
+ dispatch(ir.NewBinaryExpr(base.Pos, ir.OLT, chosen, ir.NewInt(base.Pos, 0)), dflt)
+ }
+ for i, cas := range casorder {
+ ir.SetPos(cas)
+ if i == len(casorder)-1 {
+ dispatch(nil, cas)
+ break
+ }
+ dispatch(ir.NewBinaryExpr(base.Pos, ir.OEQ, chosen, ir.NewInt(base.Pos, int64(i))), cas)
+ }
+
+ return init
+}
+
+// bytePtrToIndex returns a Node representing "(*byte)(&n[i])".
+func bytePtrToIndex(n ir.Node, i int64) ir.Node {
+ s := typecheck.NodAddr(ir.NewIndexExpr(base.Pos, n, ir.NewInt(base.Pos, i)))
+ t := types.NewPtr(types.Types[types.TUINT8])
+ return typecheck.ConvNop(s, t)
+}
+
+var scase *types.Type
+
+// Keep in sync with src/runtime/select.go.
+func scasetype() *types.Type {
+ if scase == nil {
+ n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("scase"))
+ scase = types.NewNamed(n)
+ n.SetType(scase)
+ n.SetTypecheck(1)
+
+ scase.SetUnderlying(types.NewStruct([]*types.Field{
+ types.NewField(base.Pos, typecheck.Lookup("c"), types.Types[types.TUNSAFEPTR]),
+ types.NewField(base.Pos, typecheck.Lookup("elem"), types.Types[types.TUNSAFEPTR]),
+ }))
+ }
+ return scase
+}
diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go
new file mode 100644
index 0000000..b2a226e
--- /dev/null
+++ b/src/cmd/compile/internal/walk/stmt.go
@@ -0,0 +1,229 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+)
+
+// The result of walkStmt MUST be assigned back to n, e.g.
+//
+// n.Left = walkStmt(n.Left)
+func walkStmt(n ir.Node) ir.Node {
+ if n == nil {
+ return n
+ }
+
+ ir.SetPos(n)
+
+ walkStmtList(n.Init())
+
+ switch n.Op() {
+ default:
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ base.Errorf("%v is not a top level statement", n.Sym())
+ } else {
+ base.Errorf("%v is not a top level statement", n.Op())
+ }
+ ir.Dump("nottop", n)
+ return n
+
+ case ir.OAS,
+ ir.OASOP,
+ ir.OAS2,
+ ir.OAS2DOTTYPE,
+ ir.OAS2RECV,
+ ir.OAS2FUNC,
+ ir.OAS2MAPR,
+ ir.OCLEAR,
+ ir.OCLOSE,
+ ir.OCOPY,
+ ir.OCALLINTER,
+ ir.OCALL,
+ ir.OCALLFUNC,
+ ir.ODELETE,
+ ir.OSEND,
+ ir.OPRINT,
+ ir.OPRINTLN,
+ ir.OPANIC,
+ ir.ORECOVERFP,
+ ir.OGETG:
+ if n.Typecheck() == 0 {
+ base.Fatalf("missing typecheck: %+v", n)
+ }
+
+ init := ir.TakeInit(n)
+ n = walkExpr(n, &init)
+ if n.Op() == ir.ONAME {
+ // copy rewrote to a statement list and a temp for the length.
+ // Throw away the temp to avoid plain values as statements.
+ n = ir.NewBlockStmt(n.Pos(), init)
+ init = nil
+ }
+ if len(init) > 0 {
+ switch n.Op() {
+ case ir.OAS, ir.OAS2, ir.OBLOCK:
+ n.(ir.InitNode).PtrInit().Prepend(init...)
+
+ default:
+ init.Append(n)
+ n = ir.NewBlockStmt(n.Pos(), init)
+ }
+ }
+ return n
+
+ // special case for a receive where we throw away
+ // the value received.
+ case ir.ORECV:
+ n := n.(*ir.UnaryExpr)
+ return walkRecv(n)
+
+ case ir.OBREAK,
+ ir.OCONTINUE,
+ ir.OFALL,
+ ir.OGOTO,
+ ir.OLABEL,
+ ir.OJUMPTABLE,
+ ir.OINTERFACESWITCH,
+ ir.ODCL,
+ ir.OCHECKNIL:
+ return n
+
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ walkStmtList(n.List)
+ return n
+
+ case ir.OCASE:
+ base.Errorf("case statement out of place")
+ panic("unreachable")
+
+ case ir.ODEFER:
+ n := n.(*ir.GoDeferStmt)
+ ir.CurFunc.SetHasDefer(true)
+ ir.CurFunc.NumDefers++
+ if ir.CurFunc.NumDefers > maxOpenDefers || n.DeferAt != nil {
+ // Don't allow open-coded defers if there are more than
+ // 8 defers in the function, since we use a single
+ // byte to record active defers.
+ // Also don't allow if we need to use deferprocat.
+ ir.CurFunc.SetOpenCodedDeferDisallowed(true)
+ }
+ if n.Esc() != ir.EscNever {
+ // If n.Esc is not EscNever, then this defer occurs in a loop,
+ // so open-coded defers cannot be used in this function.
+ ir.CurFunc.SetOpenCodedDeferDisallowed(true)
+ }
+ fallthrough
+ case ir.OGO:
+ n := n.(*ir.GoDeferStmt)
+ return walkGoDefer(n)
+
+ case ir.OFOR:
+ n := n.(*ir.ForStmt)
+ return walkFor(n)
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ return walkIf(n)
+
+ case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
+ return walkReturn(n)
+
+ case ir.OTAILCALL:
+ n := n.(*ir.TailCallStmt)
+
+ var init ir.Nodes
+ n.Call.Fun = walkExpr(n.Call.Fun, &init)
+
+ if len(init) > 0 {
+ init.Append(n)
+ return ir.NewBlockStmt(n.Pos(), init)
+ }
+ return n
+
+ case ir.OINLMARK:
+ n := n.(*ir.InlineMarkStmt)
+ return n
+
+ case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
+ walkSelect(n)
+ return n
+
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+ walkSwitch(n)
+ return n
+
+ case ir.ORANGE:
+ n := n.(*ir.RangeStmt)
+ return walkRange(n)
+ }
+
+ // No return! Each case must return (or panic),
+ // to avoid confusion about what gets returned
+ // in the presence of type assertions.
+}
+
+func walkStmtList(s []ir.Node) {
+ for i := range s {
+ s[i] = walkStmt(s[i])
+ }
+}
+
+// walkFor walks an OFOR node.
+func walkFor(n *ir.ForStmt) ir.Node {
+ if n.Cond != nil {
+ init := ir.TakeInit(n.Cond)
+ walkStmtList(init)
+ n.Cond = walkExpr(n.Cond, &init)
+ n.Cond = ir.InitExpr(init, n.Cond)
+ }
+
+ n.Post = walkStmt(n.Post)
+ walkStmtList(n.Body)
+ return n
+}
+
+// validGoDeferCall reports whether call is a valid call to appear in
+// a go or defer statement; that is, whether it's a regular function
+// call without arguments or results.
+func validGoDeferCall(call ir.Node) bool {
+ if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC && len(call.KeepAlive) == 0 {
+ sig := call.Fun.Type()
+ return sig.NumParams()+sig.NumResults() == 0
+ }
+ return false
+}
+
+// walkGoDefer walks an OGO or ODEFER node.
+func walkGoDefer(n *ir.GoDeferStmt) ir.Node {
+ if !validGoDeferCall(n.Call) {
+ base.FatalfAt(n.Pos(), "invalid %v call: %v", n.Op(), n.Call)
+ }
+
+ var init ir.Nodes
+
+ call := n.Call.(*ir.CallExpr)
+ call.Fun = walkExpr(call.Fun, &init)
+
+ if len(init) > 0 {
+ init.Append(n)
+ return ir.NewBlockStmt(n.Pos(), init)
+ }
+ return n
+}
+
+// walkIf walks an OIF node.
+func walkIf(n *ir.IfStmt) ir.Node {
+ n.Cond = walkExpr(n.Cond, n.PtrInit())
+ walkStmtList(n.Body)
+ walkStmtList(n.Else)
+ return n
+}
diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go
new file mode 100644
index 0000000..b67d011
--- /dev/null
+++ b/src/cmd/compile/internal/walk/switch.go
@@ -0,0 +1,966 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "fmt"
+ "go/constant"
+ "go/token"
+ "math/bits"
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/rttype"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+// walkSwitch walks a switch statement.
+func walkSwitch(sw *ir.SwitchStmt) {
+ // Guard against double walk, see #25776.
+ if sw.Walked() {
+ return // Was fatal, but eliminating every possible source of double-walking is hard
+ }
+ sw.SetWalked(true)
+
+ if sw.Tag != nil && sw.Tag.Op() == ir.OTYPESW {
+ walkSwitchType(sw)
+ } else {
+ walkSwitchExpr(sw)
+ }
+}
+
+// walkSwitchExpr generates an AST implementing sw. sw is an
+// expression switch.
+func walkSwitchExpr(sw *ir.SwitchStmt) {
+ lno := ir.SetPos(sw)
+
+ cond := sw.Tag
+ sw.Tag = nil
+
+ // convert switch {...} to switch true {...}
+ if cond == nil {
+ cond = ir.NewBool(base.Pos, true)
+ cond = typecheck.Expr(cond)
+ cond = typecheck.DefaultLit(cond, nil)
+ }
+
+ // Given "switch string(byteslice)",
+ // with all cases being side-effect free,
+ // use a zero-cost alias of the byte slice.
+ // Do this before calling walkExpr on cond,
+ // because walkExpr will lower the string
+ // conversion into a runtime call.
+ // See issue 24937 for more discussion.
+ if cond.Op() == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
+ cond := cond.(*ir.ConvExpr)
+ cond.SetOp(ir.OBYTES2STRTMP)
+ }
+
+ cond = walkExpr(cond, sw.PtrInit())
+ if cond.Op() != ir.OLITERAL && cond.Op() != ir.ONIL {
+ cond = copyExpr(cond, cond.Type(), &sw.Compiled)
+ }
+
+ base.Pos = lno
+
+ s := exprSwitch{
+ pos: lno,
+ exprname: cond,
+ }
+
+ var defaultGoto ir.Node
+ var body ir.Nodes
+ for _, ncase := range sw.Cases {
+ label := typecheck.AutoLabel(".s")
+ jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label)
+
+ // Process case dispatch.
+ if len(ncase.List) == 0 {
+ if defaultGoto != nil {
+ base.Fatalf("duplicate default case not detected during typechecking")
+ }
+ defaultGoto = jmp
+ }
+
+ for i, n1 := range ncase.List {
+ var rtype ir.Node
+ if i < len(ncase.RTypes) {
+ rtype = ncase.RTypes[i]
+ }
+ s.Add(ncase.Pos(), n1, rtype, jmp)
+ }
+
+ // Process body.
+ body.Append(ir.NewLabelStmt(ncase.Pos(), label))
+ body.Append(ncase.Body...)
+ if fall, pos := endsInFallthrough(ncase.Body); !fall {
+ br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
+ br.SetPos(pos)
+ body.Append(br)
+ }
+ }
+ sw.Cases = nil
+
+ if defaultGoto == nil {
+ br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
+ br.SetPos(br.Pos().WithNotStmt())
+ defaultGoto = br
+ }
+
+ s.Emit(&sw.Compiled)
+ sw.Compiled.Append(defaultGoto)
+ sw.Compiled.Append(body.Take()...)
+ walkStmtList(sw.Compiled)
+}
+
+// An exprSwitch walks an expression switch.
+type exprSwitch struct {
+ pos src.XPos
+ exprname ir.Node // value being switched on
+
+ done ir.Nodes
+ clauses []exprClause
+}
+
+type exprClause struct {
+ pos src.XPos
+ lo, hi ir.Node
+ rtype ir.Node // *runtime._type for OEQ node
+ jmp ir.Node
+}
+
+func (s *exprSwitch) Add(pos src.XPos, expr, rtype, jmp ir.Node) {
+ c := exprClause{pos: pos, lo: expr, hi: expr, rtype: rtype, jmp: jmp}
+ if types.IsOrdered[s.exprname.Type().Kind()] && expr.Op() == ir.OLITERAL {
+ s.clauses = append(s.clauses, c)
+ return
+ }
+
+ s.flush()
+ s.clauses = append(s.clauses, c)
+ s.flush()
+}
+
+func (s *exprSwitch) Emit(out *ir.Nodes) {
+ s.flush()
+ out.Append(s.done.Take()...)
+}
+
+func (s *exprSwitch) flush() {
+ cc := s.clauses
+ s.clauses = nil
+ if len(cc) == 0 {
+ return
+ }
+
+ // Caution: If len(cc) == 1, then cc[0] might not an OLITERAL.
+ // The code below is structured to implicitly handle this case
+ // (e.g., sort.Slice doesn't need to invoke the less function
+ // when there's only a single slice element).
+
+ if s.exprname.Type().IsString() && len(cc) >= 2 {
+ // Sort strings by length and then by value. It is
+ // much cheaper to compare lengths than values, and
+ // all we need here is consistency. We respect this
+ // sorting below.
+ sort.Slice(cc, func(i, j int) bool {
+ si := ir.StringVal(cc[i].lo)
+ sj := ir.StringVal(cc[j].lo)
+ if len(si) != len(sj) {
+ return len(si) < len(sj)
+ }
+ return si < sj
+ })
+
+ // runLen returns the string length associated with a
+ // particular run of exprClauses.
+ runLen := func(run []exprClause) int64 { return int64(len(ir.StringVal(run[0].lo))) }
+
+ // Collapse runs of consecutive strings with the same length.
+ var runs [][]exprClause
+ start := 0
+ for i := 1; i < len(cc); i++ {
+ if runLen(cc[start:]) != runLen(cc[i:]) {
+ runs = append(runs, cc[start:i])
+ start = i
+ }
+ }
+ runs = append(runs, cc[start:])
+
+ // We have strings of more than one length. Generate an
+ // outer switch which switches on the length of the string
+ // and an inner switch in each case which resolves all the
+ // strings of the same length. The code looks something like this:
+
+ // goto outerLabel
+ // len5:
+ // ... search among length 5 strings ...
+ // goto endLabel
+ // len8:
+ // ... search among length 8 strings ...
+ // goto endLabel
+ // ... other lengths ...
+ // outerLabel:
+ // switch len(s) {
+ // case 5: goto len5
+ // case 8: goto len8
+ // ... other lengths ...
+ // }
+ // endLabel:
+
+ outerLabel := typecheck.AutoLabel(".s")
+ endLabel := typecheck.AutoLabel(".s")
+
+ // Jump around all the individual switches for each length.
+ s.done.Append(ir.NewBranchStmt(s.pos, ir.OGOTO, outerLabel))
+
+ var outer exprSwitch
+ outer.exprname = ir.NewUnaryExpr(s.pos, ir.OLEN, s.exprname)
+ outer.exprname.SetType(types.Types[types.TINT])
+
+ for _, run := range runs {
+ // Target label to jump to when we match this length.
+ label := typecheck.AutoLabel(".s")
+
+ // Search within this run of same-length strings.
+ pos := run[0].pos
+ s.done.Append(ir.NewLabelStmt(pos, label))
+ stringSearch(s.exprname, run, &s.done)
+ s.done.Append(ir.NewBranchStmt(pos, ir.OGOTO, endLabel))
+
+ // Add length case to outer switch.
+ cas := ir.NewInt(pos, runLen(run))
+ jmp := ir.NewBranchStmt(pos, ir.OGOTO, label)
+ outer.Add(pos, cas, nil, jmp)
+ }
+ s.done.Append(ir.NewLabelStmt(s.pos, outerLabel))
+ outer.Emit(&s.done)
+ s.done.Append(ir.NewLabelStmt(s.pos, endLabel))
+ return
+ }
+
+ sort.Slice(cc, func(i, j int) bool {
+ return constant.Compare(cc[i].lo.Val(), token.LSS, cc[j].lo.Val())
+ })
+
+ // Merge consecutive integer cases.
+ if s.exprname.Type().IsInteger() {
+ consecutive := func(last, next constant.Value) bool {
+ delta := constant.BinaryOp(next, token.SUB, last)
+ return constant.Compare(delta, token.EQL, constant.MakeInt64(1))
+ }
+
+ merged := cc[:1]
+ for _, c := range cc[1:] {
+ last := &merged[len(merged)-1]
+ if last.jmp == c.jmp && consecutive(last.hi.Val(), c.lo.Val()) {
+ last.hi = c.lo
+ } else {
+ merged = append(merged, c)
+ }
+ }
+ cc = merged
+ }
+
+ s.search(cc, &s.done)
+}
+
+func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) {
+ if s.tryJumpTable(cc, out) {
+ return
+ }
+ binarySearch(len(cc), out,
+ func(i int) ir.Node {
+ return ir.NewBinaryExpr(base.Pos, ir.OLE, s.exprname, cc[i-1].hi)
+ },
+ func(i int, nif *ir.IfStmt) {
+ c := &cc[i]
+ nif.Cond = c.test(s.exprname)
+ nif.Body = []ir.Node{c.jmp}
+ },
+ )
+}
+
+// Try to implement the clauses with a jump table. Returns true if successful.
+func (s *exprSwitch) tryJumpTable(cc []exprClause, out *ir.Nodes) bool {
+ const minCases = 8 // have at least minCases cases in the switch
+ const minDensity = 4 // use at least 1 out of every minDensity entries
+
+ if base.Flag.N != 0 || !ssagen.Arch.LinkArch.CanJumpTable || base.Ctxt.Retpoline {
+ return false
+ }
+ if len(cc) < minCases {
+ return false // not enough cases for it to be worth it
+ }
+ if cc[0].lo.Val().Kind() != constant.Int {
+ return false // e.g. float
+ }
+ if s.exprname.Type().Size() > int64(types.PtrSize) {
+ return false // 64-bit switches on 32-bit archs
+ }
+ min := cc[0].lo.Val()
+ max := cc[len(cc)-1].hi.Val()
+ width := constant.BinaryOp(constant.BinaryOp(max, token.SUB, min), token.ADD, constant.MakeInt64(1))
+ limit := constant.MakeInt64(int64(len(cc)) * minDensity)
+ if constant.Compare(width, token.GTR, limit) {
+ // We disable jump tables if we use less than a minimum fraction of the entries.
+ // i.e. for switch x {case 0: case 1000: case 2000:} we don't want to use a jump table.
+ return false
+ }
+ jt := ir.NewJumpTableStmt(base.Pos, s.exprname)
+ for _, c := range cc {
+ jmp := c.jmp.(*ir.BranchStmt)
+ if jmp.Op() != ir.OGOTO || jmp.Label == nil {
+ panic("bad switch case body")
+ }
+ for i := c.lo.Val(); constant.Compare(i, token.LEQ, c.hi.Val()); i = constant.BinaryOp(i, token.ADD, constant.MakeInt64(1)) {
+ jt.Cases = append(jt.Cases, i)
+ jt.Targets = append(jt.Targets, jmp.Label)
+ }
+ }
+ out.Append(jt)
+ return true
+}
+
+func (c *exprClause) test(exprname ir.Node) ir.Node {
+ // Integer range.
+ if c.hi != c.lo {
+ low := ir.NewBinaryExpr(c.pos, ir.OGE, exprname, c.lo)
+ high := ir.NewBinaryExpr(c.pos, ir.OLE, exprname, c.hi)
+ return ir.NewLogicalExpr(c.pos, ir.OANDAND, low, high)
+ }
+
+ // Optimize "switch true { ...}" and "switch false { ... }".
+ if ir.IsConst(exprname, constant.Bool) && !c.lo.Type().IsInterface() {
+ if ir.BoolVal(exprname) {
+ return c.lo
+ } else {
+ return ir.NewUnaryExpr(c.pos, ir.ONOT, c.lo)
+ }
+ }
+
+ n := ir.NewBinaryExpr(c.pos, ir.OEQ, exprname, c.lo)
+ n.RType = c.rtype
+ return n
+}
+
+func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool {
+ // In theory, we could be more aggressive, allowing any
+ // side-effect-free expressions in cases, but it's a bit
+ // tricky because some of that information is unavailable due
+ // to the introduction of temporaries during order.
+ // Restricting to constants is simple and probably powerful
+ // enough.
+
+ for _, ncase := range sw.Cases {
+ for _, v := range ncase.List {
+ if v.Op() != ir.OLITERAL {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// endsInFallthrough reports whether stmts ends with a "fallthrough" statement.
+func endsInFallthrough(stmts []ir.Node) (bool, src.XPos) {
+ if len(stmts) == 0 {
+ return false, src.NoXPos
+ }
+ i := len(stmts) - 1
+ return stmts[i].Op() == ir.OFALL, stmts[i].Pos()
+}
+
+// walkSwitchType generates an AST that implements sw, where sw is a
+// type switch.
+func walkSwitchType(sw *ir.SwitchStmt) {
+ var s typeSwitch
+ s.srcName = sw.Tag.(*ir.TypeSwitchGuard).X
+ s.srcName = walkExpr(s.srcName, sw.PtrInit())
+ s.srcName = copyExpr(s.srcName, s.srcName.Type(), &sw.Compiled)
+ s.okName = typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
+ s.itabName = typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINT8].PtrTo())
+
+ // Get interface descriptor word.
+ // For empty interfaces this will be the type.
+ // For non-empty interfaces this will be the itab.
+ srcItab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s.srcName)
+ srcData := ir.NewUnaryExpr(base.Pos, ir.OIDATA, s.srcName)
+ srcData.SetType(types.Types[types.TUINT8].PtrTo())
+ srcData.SetTypecheck(1)
+
+ // For empty interfaces, do:
+ // if e._type == nil {
+ // do nil case if it exists, otherwise default
+ // }
+ // h := e._type.hash
+ // Use a similar strategy for non-empty interfaces.
+ ifNil := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ ifNil.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, srcItab, typecheck.NodNil())
+ base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check.
+ ifNil.Cond = typecheck.Expr(ifNil.Cond)
+ ifNil.Cond = typecheck.DefaultLit(ifNil.Cond, nil)
+ // ifNil.Nbody assigned later.
+ sw.Compiled.Append(ifNil)
+
+ // Load hash from type or itab.
+ dotHash := typeHashFieldOf(base.Pos, srcItab)
+ s.hashName = copyExpr(dotHash, dotHash.Type(), &sw.Compiled)
+
+ // Make a label for each case body.
+ labels := make([]*types.Sym, len(sw.Cases))
+ for i := range sw.Cases {
+ labels[i] = typecheck.AutoLabel(".s")
+ }
+
+ // "jump" to execute if no case matches.
+ br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
+
+ // Assemble a list of all the types we're looking for.
+ // This pass flattens the case lists, as well as handles
+ // some unusual cases, like default and nil cases.
+ type oneCase struct {
+ pos src.XPos
+ jmp ir.Node // jump to body of selected case
+
+ // The case we're matching. Normally the type we're looking for
+ // is typ.Type(), but when typ is ODYNAMICTYPE the actual type
+ // we're looking for is not a compile-time constant (typ.Type()
+ // will be its shape).
+ typ ir.Node
+ }
+ var cases []oneCase
+ var defaultGoto, nilGoto ir.Node
+ for i, ncase := range sw.Cases {
+ jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, labels[i])
+ if len(ncase.List) == 0 { // default:
+ if defaultGoto != nil {
+ base.Fatalf("duplicate default case not detected during typechecking")
+ }
+ defaultGoto = jmp
+ }
+ for _, n1 := range ncase.List {
+ if ir.IsNil(n1) { // case nil:
+ if nilGoto != nil {
+ base.Fatalf("duplicate nil case not detected during typechecking")
+ }
+ nilGoto = jmp
+ continue
+ }
+ if n1.Op() == ir.ODYNAMICTYPE {
+ // Convert dynamic to static, if the dynamic is actually static.
+ // TODO: why isn't this OTYPE to begin with?
+ dt := n1.(*ir.DynamicType)
+ if dt.RType != nil && dt.RType.Op() == ir.OADDR {
+ addr := dt.RType.(*ir.AddrExpr)
+ if addr.X.Op() == ir.OLINKSYMOFFSET {
+ n1 = ir.TypeNode(n1.Type())
+ }
+ }
+ if dt.ITab != nil && dt.ITab.Op() == ir.OADDR {
+ addr := dt.ITab.(*ir.AddrExpr)
+ if addr.X.Op() == ir.OLINKSYMOFFSET {
+ n1 = ir.TypeNode(n1.Type())
+ }
+ }
+ }
+ cases = append(cases, oneCase{
+ pos: ncase.Pos(),
+ typ: n1,
+ jmp: jmp,
+ })
+ }
+ }
+ if defaultGoto == nil {
+ defaultGoto = br
+ }
+ if nilGoto == nil {
+ nilGoto = defaultGoto
+ }
+ ifNil.Body = []ir.Node{nilGoto}
+
+ // Now go through the list of cases, processing groups as we find them.
+ var concreteCases []oneCase
+ var interfaceCases []oneCase
+ flush := func() {
+ // Process all the concrete types first. Because we handle shadowing
+ // below, it is correct to do all the concrete types before all of
+ // the interface types.
+ // The concrete cases can all be handled without a runtime call.
+ if len(concreteCases) > 0 {
+ var clauses []typeClause
+ for _, c := range concreteCases {
+ as := ir.NewAssignListStmt(c.pos, ir.OAS2,
+ []ir.Node{ir.BlankNode, s.okName}, // _, ok =
+ []ir.Node{ir.NewTypeAssertExpr(c.pos, s.srcName, c.typ.Type())}) // iface.(type)
+ nif := ir.NewIfStmt(c.pos, s.okName, []ir.Node{c.jmp}, nil)
+ clauses = append(clauses, typeClause{
+ hash: types.TypeHash(c.typ.Type()),
+ body: []ir.Node{typecheck.Stmt(as), typecheck.Stmt(nif)},
+ })
+ }
+ s.flush(clauses, &sw.Compiled)
+ concreteCases = concreteCases[:0]
+ }
+
+ // The "any" case, if it exists, must be the last interface case, because
+ // it would shadow all subsequent cases. Strip it off here so the runtime
+ // call only needs to handle non-empty interfaces.
+ var anyGoto ir.Node
+ if len(interfaceCases) > 0 && interfaceCases[len(interfaceCases)-1].typ.Type().IsEmptyInterface() {
+ anyGoto = interfaceCases[len(interfaceCases)-1].jmp
+ interfaceCases = interfaceCases[:len(interfaceCases)-1]
+ }
+
+ // Next, process all the interface types with a single call to the runtime.
+ if len(interfaceCases) > 0 {
+
+ // Build an internal/abi.InterfaceSwitch descriptor to pass to the runtime.
+ lsym := types.LocalPkg.Lookup(fmt.Sprintf(".interfaceSwitch.%d", interfaceSwitchGen)).LinksymABI(obj.ABI0)
+ interfaceSwitchGen++
+ c := rttype.NewCursor(lsym, 0, rttype.InterfaceSwitch)
+ c.Field("Cache").WritePtr(typecheck.LookupRuntimeVar("emptyInterfaceSwitchCache"))
+ c.Field("NCases").WriteInt(int64(len(interfaceCases)))
+ array, sizeDelta := c.Field("Cases").ModifyArray(len(interfaceCases))
+ for i, c := range interfaceCases {
+ array.Elem(i).WritePtr(reflectdata.TypeSym(c.typ.Type()).Linksym())
+ }
+ objw.Global(lsym, int32(rttype.InterfaceSwitch.Size()+sizeDelta), obj.LOCAL)
+ // The GC only needs to see the first pointer in the structure (all the others
+ // are to static locations). So the InterfaceSwitch type itself is fine, even
+ // though it might not cover the whole array we wrote above.
+ lsym.Gotype = reflectdata.TypeLinksym(rttype.InterfaceSwitch)
+
+ // Call runtime to do switch
+ // case, itab = runtime.interfaceSwitch(&descriptor, typeof(arg))
+ var typeArg ir.Node
+ if s.srcName.Type().IsEmptyInterface() {
+ typeArg = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUINT8].PtrTo(), srcItab)
+ } else {
+ typeArg = itabType(srcItab)
+ }
+ caseVar := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
+ isw := ir.NewInterfaceSwitchStmt(base.Pos, caseVar, s.itabName, typeArg, dotHash, lsym)
+ sw.Compiled.Append(isw)
+
+ // Switch on the result of the call (or cache lookup).
+ var newCases []*ir.CaseClause
+ for i, c := range interfaceCases {
+ newCases = append(newCases, &ir.CaseClause{
+ List: []ir.Node{ir.NewInt(base.Pos, int64(i))},
+ Body: []ir.Node{c.jmp},
+ })
+ }
+ // TODO: add len(newCases) case, mark switch as bounded
+ sw2 := ir.NewSwitchStmt(base.Pos, caseVar, newCases)
+ sw.Compiled.Append(typecheck.Stmt(sw2))
+ interfaceCases = interfaceCases[:0]
+ }
+
+ if anyGoto != nil {
+ // We've already handled the nil case, so everything
+ // that reaches here matches the "any" case.
+ sw.Compiled.Append(anyGoto)
+ }
+ }
+caseLoop:
+ for _, c := range cases {
+ if c.typ.Op() == ir.ODYNAMICTYPE {
+ flush() // process all previous cases
+ dt := c.typ.(*ir.DynamicType)
+ dot := ir.NewDynamicTypeAssertExpr(c.pos, ir.ODYNAMICDOTTYPE, s.srcName, dt.RType)
+ dot.ITab = dt.ITab
+ dot.SetType(c.typ.Type())
+ dot.SetTypecheck(1)
+
+ as := ir.NewAssignListStmt(c.pos, ir.OAS2, nil, nil)
+ as.Lhs = []ir.Node{ir.BlankNode, s.okName} // _, ok =
+ as.Rhs = []ir.Node{dot}
+ typecheck.Stmt(as)
+
+ nif := ir.NewIfStmt(c.pos, s.okName, []ir.Node{c.jmp}, nil)
+ sw.Compiled.Append(as, nif)
+ continue
+ }
+
+ // Check for shadowing (a case that will never fire because
+ // a previous case would have always fired first). This check
+ // allows us to reorder concrete and interface cases.
+ // (TODO: these should be vet failures, maybe?)
+ for _, ic := range interfaceCases {
+ // An interface type case will shadow all
+ // subsequent types that implement that interface.
+ if typecheck.Implements(c.typ.Type(), ic.typ.Type()) {
+ continue caseLoop
+ }
+ // Note that we don't need to worry about:
+ // 1. Two concrete types shadowing each other. That's
+ // disallowed by the spec.
+ // 2. A concrete type shadowing an interface type.
+ // That can never happen, as interface types can
+ // be satisfied by an infinite set of concrete types.
+ // The correctness of this step also depends on handling
+ // the dynamic type cases separately, as we do above.
+ }
+
+ if c.typ.Type().IsInterface() {
+ interfaceCases = append(interfaceCases, c)
+ } else {
+ concreteCases = append(concreteCases, c)
+ }
+ }
+ flush()
+
+ sw.Compiled.Append(defaultGoto) // if none of the cases matched
+
+ // Now generate all the case bodies
+ for i, ncase := range sw.Cases {
+ sw.Compiled.Append(ir.NewLabelStmt(ncase.Pos(), labels[i]))
+ if caseVar := ncase.Var; caseVar != nil {
+ val := s.srcName
+ if len(ncase.List) == 1 {
+ // single type. We have to downcast the input value to the target type.
+ if ncase.List[0].Op() == ir.OTYPE { // single compile-time known type
+ t := ncase.List[0].Type()
+ if t.IsInterface() {
+ // This case is an interface. Build case value from input interface.
+ // The data word will always be the same, but the itab/type changes.
+ if t.IsEmptyInterface() {
+ var typ ir.Node
+ if s.srcName.Type().IsEmptyInterface() {
+ // E->E, nothing to do, type is already correct.
+ typ = srcItab
+ } else {
+ // I->E, load type out of itab
+ typ = itabType(srcItab)
+ typ.SetPos(ncase.Pos())
+ }
+ val = ir.NewBinaryExpr(ncase.Pos(), ir.OMAKEFACE, typ, srcData)
+ } else {
+ // The itab we need was returned by a runtime.interfaceSwitch call.
+ val = ir.NewBinaryExpr(ncase.Pos(), ir.OMAKEFACE, s.itabName, srcData)
+ }
+ } else {
+ // This case is a concrete type, just read its value out of the interface.
+ val = ifaceData(ncase.Pos(), s.srcName, t)
+ }
+ } else if ncase.List[0].Op() == ir.ODYNAMICTYPE { // single runtime known type
+ dt := ncase.List[0].(*ir.DynamicType)
+ x := ir.NewDynamicTypeAssertExpr(ncase.Pos(), ir.ODYNAMICDOTTYPE, val, dt.RType)
+ x.ITab = dt.ITab
+ val = x
+ } else if ir.IsNil(ncase.List[0]) {
+ } else {
+ base.Fatalf("unhandled type switch case %v", ncase.List[0])
+ }
+ val.SetType(caseVar.Type())
+ val.SetTypecheck(1)
+ }
+ l := []ir.Node{
+ ir.NewDecl(ncase.Pos(), ir.ODCL, caseVar),
+ ir.NewAssignStmt(ncase.Pos(), caseVar, val),
+ }
+ typecheck.Stmts(l)
+ sw.Compiled.Append(l...)
+ }
+ sw.Compiled.Append(ncase.Body...)
+ sw.Compiled.Append(br)
+ }
+
+ walkStmtList(sw.Compiled)
+ sw.Tag = nil
+ sw.Cases = nil
+}
+
+var interfaceSwitchGen int
+
+// typeHashFieldOf returns an expression to select the type hash field
+// from an interface's descriptor word (whether a *runtime._type or
+// *runtime.itab pointer).
+func typeHashFieldOf(pos src.XPos, itab *ir.UnaryExpr) *ir.SelectorExpr {
+ if itab.Op() != ir.OITAB {
+ base.Fatalf("expected OITAB, got %v", itab.Op())
+ }
+ var hashField *types.Field
+ if itab.X.Type().IsEmptyInterface() {
+ // runtime._type's hash field
+ if rtypeHashField == nil {
+ rtypeHashField = runtimeField("hash", rttype.Type.OffsetOf("Hash"), types.Types[types.TUINT32])
+ }
+ hashField = rtypeHashField
+ } else {
+ // runtime.itab's hash field
+ if itabHashField == nil {
+ itabHashField = runtimeField("hash", int64(2*types.PtrSize), types.Types[types.TUINT32])
+ }
+ hashField = itabHashField
+ }
+ return boundedDotPtr(pos, itab, hashField)
+}
+
+var rtypeHashField, itabHashField *types.Field
+
+// A typeSwitch walks a type switch.
+type typeSwitch struct {
+ // Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
+ srcName ir.Node // value being type-switched on
+ hashName ir.Node // type hash of the value being type-switched on
+ okName ir.Node // boolean used for comma-ok type assertions
+ itabName ir.Node // itab value to use for first word of non-empty interface
+}
+
+type typeClause struct {
+ hash uint32
+ body ir.Nodes
+}
+
+func (s *typeSwitch) flush(cc []typeClause, compiled *ir.Nodes) {
+ if len(cc) == 0 {
+ return
+ }
+
+ sort.Slice(cc, func(i, j int) bool { return cc[i].hash < cc[j].hash })
+
+ // Combine adjacent cases with the same hash.
+ merged := cc[:1]
+ for _, c := range cc[1:] {
+ last := &merged[len(merged)-1]
+ if last.hash == c.hash {
+ last.body.Append(c.body.Take()...)
+ } else {
+ merged = append(merged, c)
+ }
+ }
+ cc = merged
+
+ if s.tryJumpTable(cc, compiled) {
+ return
+ }
+ binarySearch(len(cc), compiled,
+ func(i int) ir.Node {
+ return ir.NewBinaryExpr(base.Pos, ir.OLE, s.hashName, ir.NewInt(base.Pos, int64(cc[i-1].hash)))
+ },
+ func(i int, nif *ir.IfStmt) {
+ // TODO(mdempsky): Omit hash equality check if
+ // there's only one type.
+ c := cc[i]
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, s.hashName, ir.NewInt(base.Pos, int64(c.hash)))
+ nif.Body.Append(c.body.Take()...)
+ },
+ )
+}
+
+// Try to implement the clauses with a jump table. Returns true if successful.
+func (s *typeSwitch) tryJumpTable(cc []typeClause, out *ir.Nodes) bool {
+ const minCases = 5 // have at least minCases cases in the switch
+ if base.Flag.N != 0 || !ssagen.Arch.LinkArch.CanJumpTable || base.Ctxt.Retpoline {
+ return false
+ }
+ if len(cc) < minCases {
+ return false // not enough cases for it to be worth it
+ }
+ hashes := make([]uint32, len(cc))
+ // b = # of bits to use. Start with the minimum number of
+ // bits possible, but try a few larger sizes if needed.
+ b0 := bits.Len(uint(len(cc) - 1))
+ for b := b0; b < b0+3; b++ {
+ pickI:
+ for i := 0; i <= 32-b; i++ { // starting bit position
+ // Compute the hash we'd get from all the cases,
+ // selecting b bits starting at bit i.
+ hashes = hashes[:0]
+ for _, c := range cc {
+ h := c.hash >> i & (1<<b - 1)
+ hashes = append(hashes, h)
+ }
+ // Order by increasing hash.
+ sort.Slice(hashes, func(j, k int) bool {
+ return hashes[j] < hashes[k]
+ })
+ for j := 1; j < len(hashes); j++ {
+ if hashes[j] == hashes[j-1] {
+ // There is a duplicate hash; try a different b/i pair.
+ continue pickI
+ }
+ }
+
+ // All hashes are distinct. Use these values of b and i.
+ h := s.hashName
+ if i != 0 {
+ h = ir.NewBinaryExpr(base.Pos, ir.ORSH, h, ir.NewInt(base.Pos, int64(i)))
+ }
+ h = ir.NewBinaryExpr(base.Pos, ir.OAND, h, ir.NewInt(base.Pos, int64(1<<b-1)))
+ h = typecheck.Expr(h)
+
+ // Build jump table.
+ jt := ir.NewJumpTableStmt(base.Pos, h)
+ jt.Cases = make([]constant.Value, 1<<b)
+ jt.Targets = make([]*types.Sym, 1<<b)
+ out.Append(jt)
+
+ // Start with all hashes going to the didn't-match target.
+ noMatch := typecheck.AutoLabel(".s")
+ for j := 0; j < 1<<b; j++ {
+ jt.Cases[j] = constant.MakeInt64(int64(j))
+ jt.Targets[j] = noMatch
+ }
+ // This statement is not reachable, but it will make it obvious that we don't
+ // fall through to the first case.
+ out.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, noMatch))
+
+ // Emit each of the actual cases.
+ for _, c := range cc {
+ h := c.hash >> i & (1<<b - 1)
+ label := typecheck.AutoLabel(".s")
+ jt.Targets[h] = label
+ out.Append(ir.NewLabelStmt(base.Pos, label))
+ out.Append(c.body...)
+ // We reach here if the hash matches but the type equality test fails.
+ out.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, noMatch))
+ }
+ // Emit point to go to if type doesn't match any case.
+ out.Append(ir.NewLabelStmt(base.Pos, noMatch))
+ return true
+ }
+ }
+ // Couldn't find a perfect hash. Fall back to binary search.
+ return false
+}
+
+// binarySearch constructs a binary search tree for handling n cases,
+// and appends it to out. It's used for efficiently implementing
+// switch statements.
+//
+// less(i) should return a boolean expression. If it evaluates true,
+// then cases before i will be tested; otherwise, cases i and later.
+//
+// leaf(i, nif) should setup nif (an OIF node) to test case i. In
+// particular, it should set nif.Cond and nif.Body.
+func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif *ir.IfStmt)) {
+ const binarySearchMin = 4 // minimum number of cases for binary search
+
+ var do func(lo, hi int, out *ir.Nodes)
+ do = func(lo, hi int, out *ir.Nodes) {
+ n := hi - lo
+ if n < binarySearchMin {
+ for i := lo; i < hi; i++ {
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ leaf(i, nif)
+ base.Pos = base.Pos.WithNotStmt()
+ nif.Cond = typecheck.Expr(nif.Cond)
+ nif.Cond = typecheck.DefaultLit(nif.Cond, nil)
+ out.Append(nif)
+ out = &nif.Else
+ }
+ return
+ }
+
+ half := lo + n/2
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nif.Cond = less(half)
+ base.Pos = base.Pos.WithNotStmt()
+ nif.Cond = typecheck.Expr(nif.Cond)
+ nif.Cond = typecheck.DefaultLit(nif.Cond, nil)
+ do(lo, half, &nif.Body)
+ do(half, hi, &nif.Else)
+ out.Append(nif)
+ }
+
+ do(0, n, out)
+}
+
+func stringSearch(expr ir.Node, cc []exprClause, out *ir.Nodes) {
+ if len(cc) < 4 {
+ // Short list, just do brute force equality checks.
+ for _, c := range cc {
+ nif := ir.NewIfStmt(base.Pos.WithNotStmt(), typecheck.DefaultLit(typecheck.Expr(c.test(expr)), nil), []ir.Node{c.jmp}, nil)
+ out.Append(nif)
+ out = &nif.Else
+ }
+ return
+ }
+
+ // The strategy here is to find a simple test to divide the set of possible strings
+ // that might match expr approximately in half.
+ // The test we're going to use is to do an ordered comparison of a single byte
+ // of expr to a constant. We will pick the index of that byte and the value we're
+ // comparing against to make the split as even as possible.
+ // if expr[3] <= 'd' { ... search strings with expr[3] at 'd' or lower ... }
+ // else { ... search strings with expr[3] at 'e' or higher ... }
+ //
+ // To add complication, we will do the ordered comparison in the signed domain.
+ // The reason for this is to prevent CSE from merging the load used for the
+ // ordered comparison with the load used for the later equality check.
+ // if expr[3] <= 'd' { ... if expr[0] == 'f' && expr[1] == 'o' && expr[2] == 'o' && expr[3] == 'd' { ... } }
+ // If we did both expr[3] loads in the unsigned domain, they would be CSEd, and that
+ // would in turn defeat the combining of expr[0]...expr[3] into a single 4-byte load.
+ // See issue 48222.
+ // By using signed loads for the ordered comparison and unsigned loads for the
+ // equality comparison, they don't get CSEd and the equality comparisons will be
+ // done using wider loads.
+
+ n := len(ir.StringVal(cc[0].lo)) // Length of the constant strings.
+ bestScore := int64(0) // measure of how good the split is.
+ bestIdx := 0 // split using expr[bestIdx]
+ bestByte := int8(0) // compare expr[bestIdx] against bestByte
+ for idx := 0; idx < n; idx++ {
+ for b := int8(-128); b < 127; b++ {
+ le := 0
+ for _, c := range cc {
+ s := ir.StringVal(c.lo)
+ if int8(s[idx]) <= b {
+ le++
+ }
+ }
+ score := int64(le) * int64(len(cc)-le)
+ if score > bestScore {
+ bestScore = score
+ bestIdx = idx
+ bestByte = b
+ }
+ }
+ }
+
+ // The split must be at least 1:n-1 because we have at least 2 distinct strings; they
+ // have to be different somewhere.
+ // TODO: what if the best split is still pretty bad?
+ if bestScore == 0 {
+ base.Fatalf("unable to split string set")
+ }
+
+ // Convert expr to a []int8
+ slice := ir.NewConvExpr(base.Pos, ir.OSTR2BYTESTMP, types.NewSlice(types.Types[types.TINT8]), expr)
+ slice.SetTypecheck(1) // legacy typechecker doesn't handle this op
+ slice.MarkNonNil()
+ // Load the byte we're splitting on.
+ load := ir.NewIndexExpr(base.Pos, slice, ir.NewInt(base.Pos, int64(bestIdx)))
+ // Compare with the value we're splitting on.
+ cmp := ir.Node(ir.NewBinaryExpr(base.Pos, ir.OLE, load, ir.NewInt(base.Pos, int64(bestByte))))
+ cmp = typecheck.DefaultLit(typecheck.Expr(cmp), nil)
+ nif := ir.NewIfStmt(base.Pos, cmp, nil, nil)
+
+ var le []exprClause
+ var gt []exprClause
+ for _, c := range cc {
+ s := ir.StringVal(c.lo)
+ if int8(s[bestIdx]) <= bestByte {
+ le = append(le, c)
+ } else {
+ gt = append(gt, c)
+ }
+ }
+ stringSearch(expr, le, &nif.Body)
+ stringSearch(expr, gt, &nif.Else)
+ out.Append(nif)
+
+ // TODO: if expr[bestIdx] has enough different possible values, use a jump table.
+}
diff --git a/src/cmd/compile/internal/walk/temp.go b/src/cmd/compile/internal/walk/temp.go
new file mode 100644
index 0000000..886b5be
--- /dev/null
+++ b/src/cmd/compile/internal/walk/temp.go
@@ -0,0 +1,40 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+// initStackTemp appends statements to init to initialize the given
+// temporary variable to val, and then returns the expression &tmp.
+func initStackTemp(init *ir.Nodes, tmp *ir.Name, val ir.Node) *ir.AddrExpr {
+ if val != nil && !types.Identical(tmp.Type(), val.Type()) {
+ base.Fatalf("bad initial value for %L: %L", tmp, val)
+ }
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmp, val))
+ return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr)
+}
+
+// stackTempAddr returns the expression &tmp, where tmp is a newly
+// allocated temporary variable of the given type. Statements to
+// zero-initialize tmp are appended to init.
+func stackTempAddr(init *ir.Nodes, typ *types.Type) *ir.AddrExpr {
+ return initStackTemp(init, typecheck.TempAt(base.Pos, ir.CurFunc, typ), nil)
+}
+
+// stackBufAddr returns the expression &tmp, where tmp is a newly
+// allocated temporary variable of type [len]elem. This variable is
+// initialized, and elem must not contain pointers.
+func stackBufAddr(len int64, elem *types.Type) *ir.AddrExpr {
+ if elem.HasPointers() {
+ base.FatalfAt(base.Pos, "%v has pointers", elem)
+ }
+ tmp := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewArray(elem, len))
+ return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr)
+}
diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go
new file mode 100644
index 0000000..001edcc
--- /dev/null
+++ b/src/cmd/compile/internal/walk/walk.go
@@ -0,0 +1,393 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "fmt"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// The constant is known to runtime.
+const tmpstringbufsize = 32
+
+func Walk(fn *ir.Func) {
+ ir.CurFunc = fn
+ errorsBefore := base.Errors()
+ order(fn)
+ if base.Errors() > errorsBefore {
+ return
+ }
+
+ if base.Flag.W != 0 {
+ s := fmt.Sprintf("\nbefore walk %v", ir.CurFunc.Sym())
+ ir.DumpList(s, ir.CurFunc.Body)
+ }
+
+ lno := base.Pos
+
+ base.Pos = lno
+ if base.Errors() > errorsBefore {
+ return
+ }
+ walkStmtList(ir.CurFunc.Body)
+ if base.Flag.W != 0 {
+ s := fmt.Sprintf("after walk %v", ir.CurFunc.Sym())
+ ir.DumpList(s, ir.CurFunc.Body)
+ }
+
+ // Eagerly compute sizes of all variables for SSA.
+ for _, n := range fn.Dcl {
+ types.CalcSize(n.Type())
+ }
+}
+
+// walkRecv walks an ORECV node.
+func walkRecv(n *ir.UnaryExpr) ir.Node {
+ if n.Typecheck() == 0 {
+ base.Fatalf("missing typecheck: %+v", n)
+ }
+ init := ir.TakeInit(n)
+
+ n.X = walkExpr(n.X, &init)
+ call := walkExpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, typecheck.NodNil()), &init)
+ return ir.InitExpr(init, call)
+}
+
+func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt {
+ if n.Op() != ir.OAS {
+ base.Fatalf("convas: not OAS %v", n.Op())
+ }
+ n.SetTypecheck(1)
+
+ if n.X == nil || n.Y == nil {
+ return n
+ }
+
+ lt := n.X.Type()
+ rt := n.Y.Type()
+ if lt == nil || rt == nil {
+ return n
+ }
+
+ if ir.IsBlank(n.X) {
+ n.Y = typecheck.DefaultLit(n.Y, nil)
+ return n
+ }
+
+ if !types.Identical(lt, rt) {
+ n.Y = typecheck.AssignConv(n.Y, lt, "assignment")
+ n.Y = walkExpr(n.Y, init)
+ }
+ types.CalcSize(n.Y.Type())
+
+ return n
+}
+
+func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr {
+ if init == nil {
+ base.Fatalf("mkcall with nil init: %v", fn)
+ }
+ if fn.Type() == nil || fn.Type().Kind() != types.TFUNC {
+ base.Fatalf("mkcall %v %v", fn, fn.Type())
+ }
+
+ n := fn.Type().NumParams()
+ if n != len(va) {
+ base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
+ }
+
+ call := typecheck.Call(base.Pos, fn, va, false).(*ir.CallExpr)
+ call.SetType(t)
+ return walkExpr(call, init).(*ir.CallExpr)
+}
+
+func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
+ return vmkcall(typecheck.LookupRuntime(name), t, init, args)
+}
+
+func mkcallstmt(name string, args ...ir.Node) ir.Node {
+ return mkcallstmt1(typecheck.LookupRuntime(name), args...)
+}
+
+func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
+ return vmkcall(fn, t, init, args)
+}
+
+func mkcallstmt1(fn ir.Node, args ...ir.Node) ir.Node {
+ var init ir.Nodes
+ n := vmkcall(fn, nil, &init, args)
+ if len(init) == 0 {
+ return n
+ }
+ init.Append(n)
+ return ir.NewBlockStmt(n.Pos(), init)
+}
+
+func chanfn(name string, n int, t *types.Type) ir.Node {
+ if !t.IsChan() {
+ base.Fatalf("chanfn %v", t)
+ }
+ switch n {
+ case 1:
+ return typecheck.LookupRuntime(name, t.Elem())
+ case 2:
+ return typecheck.LookupRuntime(name, t.Elem(), t.Elem())
+ }
+ base.Fatalf("chanfn %d", n)
+ return nil
+}
+
+func mapfn(name string, t *types.Type, isfat bool) ir.Node {
+ if !t.IsMap() {
+ base.Fatalf("mapfn %v", t)
+ }
+ if mapfast(t) == mapslow || isfat {
+ return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Key(), t.Elem())
+ }
+ return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Elem())
+}
+
+func mapfndel(name string, t *types.Type) ir.Node {
+ if !t.IsMap() {
+ base.Fatalf("mapfn %v", t)
+ }
+ if mapfast(t) == mapslow {
+ return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Key())
+ }
+ return typecheck.LookupRuntime(name, t.Key(), t.Elem())
+}
+
+const (
+ mapslow = iota
+ mapfast32
+ mapfast32ptr
+ mapfast64
+ mapfast64ptr
+ mapfaststr
+ nmapfast
+)
+
+type mapnames [nmapfast]string
+
+func mkmapnames(base string, ptr string) mapnames {
+ return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
+}
+
+var mapaccess1 = mkmapnames("mapaccess1", "")
+var mapaccess2 = mkmapnames("mapaccess2", "")
+var mapassign = mkmapnames("mapassign", "ptr")
+var mapdelete = mkmapnames("mapdelete", "")
+
+func mapfast(t *types.Type) int {
+ // Check runtime/map.go:maxElemSize before changing.
+ if t.Elem().Size() > 128 {
+ return mapslow
+ }
+ switch reflectdata.AlgType(t.Key()) {
+ case types.AMEM32:
+ if !t.Key().HasPointers() {
+ return mapfast32
+ }
+ if types.PtrSize == 4 {
+ return mapfast32ptr
+ }
+ base.Fatalf("small pointer %v", t.Key())
+ case types.AMEM64:
+ if !t.Key().HasPointers() {
+ return mapfast64
+ }
+ if types.PtrSize == 8 {
+ return mapfast64ptr
+ }
+ // Two-word object, at least one of which is a pointer.
+ // Use the slow path.
+ case types.ASTRING:
+ return mapfaststr
+ }
+ return mapslow
+}
+
+func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) {
+ walkExprListSafe(n.Args, init)
+
+ // walkExprListSafe will leave OINDEX (s[n]) alone if both s
+ // and n are name or literal, but those may index the slice we're
+ // modifying here. Fix explicitly.
+ ls := n.Args
+ for i1, n1 := range ls {
+ ls[i1] = cheapExpr(n1, init)
+ }
+}
+
+// appendWalkStmt typechecks and walks stmt and then appends it to init.
+func appendWalkStmt(init *ir.Nodes, stmt ir.Node) {
+ op := stmt.Op()
+ n := typecheck.Stmt(stmt)
+ if op == ir.OAS || op == ir.OAS2 {
+ // If the assignment has side effects, walkExpr will append them
+ // directly to init for us, while walkStmt will wrap it in an OBLOCK.
+ // We need to append them directly.
+ // TODO(rsc): Clean this up.
+ n = walkExpr(n, init)
+ } else {
+ n = walkStmt(n)
+ }
+ init.Append(n)
+}
+
+// The max number of defers in a function using open-coded defers. We enforce this
+// limit because the deferBits bitmask is currently a single byte (to minimize code size)
+const maxOpenDefers = 8
+
+// backingArrayPtrLen extracts the pointer and length from a slice or string.
+// This constructs two nodes referring to n, so n must be a cheapExpr.
+func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
+ var init ir.Nodes
+ c := cheapExpr(n, &init)
+ if c != n || len(init) != 0 {
+ base.Fatalf("backingArrayPtrLen not cheap: %v", n)
+ }
+ ptr = ir.NewUnaryExpr(base.Pos, ir.OSPTR, n)
+ if n.Type().IsString() {
+ ptr.SetType(types.Types[types.TUINT8].PtrTo())
+ } else {
+ ptr.SetType(n.Type().Elem().PtrTo())
+ }
+ ptr.SetTypecheck(1)
+ length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n)
+ length.SetType(types.Types[types.TINT])
+ length.SetTypecheck(1)
+ return ptr, length
+}
+
+// mayCall reports whether evaluating expression n may require
+// function calls, which could clobber function call arguments/results
+// currently on the stack.
+func mayCall(n ir.Node) bool {
+ // When instrumenting, any expression might require function calls.
+ if base.Flag.Cfg.Instrumenting {
+ return true
+ }
+
+ isSoftFloat := func(typ *types.Type) bool {
+ return types.IsFloat[typ.Kind()] || types.IsComplex[typ.Kind()]
+ }
+
+ return ir.Any(n, func(n ir.Node) bool {
+ // walk should have already moved any Init blocks off of
+ // expressions.
+ if len(n.Init()) != 0 {
+ base.FatalfAt(n.Pos(), "mayCall %+v", n)
+ }
+
+ switch n.Op() {
+ default:
+ base.FatalfAt(n.Pos(), "mayCall %+v", n)
+
+ case ir.OCALLFUNC, ir.OCALLINTER,
+ ir.OUNSAFEADD, ir.OUNSAFESLICE:
+ return true
+
+ case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR,
+ ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODYNAMICDOTTYPE, ir.ODIV, ir.OMOD,
+ ir.OSLICE2ARR, ir.OSLICE2ARRPTR:
+ // These ops might panic, make sure they are done
+ // before we start marshaling args for a call. See issue 16760.
+ return true
+
+ case ir.OANDAND, ir.OOROR:
+ n := n.(*ir.LogicalExpr)
+ // The RHS expression may have init statements that
+ // should only execute conditionally, and so cannot be
+ // pulled out to the top-level init list. We could try
+ // to be more precise here.
+ return len(n.Y.Init()) != 0
+
+ // When using soft-float, these ops might be rewritten to function calls
+ // so we ensure they are evaluated first.
+ case ir.OADD, ir.OSUB, ir.OMUL, ir.ONEG:
+ return ssagen.Arch.SoftFloat && isSoftFloat(n.Type())
+ case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
+ n := n.(*ir.BinaryExpr)
+ return ssagen.Arch.SoftFloat && isSoftFloat(n.X.Type())
+ case ir.OCONV:
+ n := n.(*ir.ConvExpr)
+ return ssagen.Arch.SoftFloat && (isSoftFloat(n.Type()) || isSoftFloat(n.X.Type()))
+
+ case ir.OMIN, ir.OMAX:
+ // string or float requires runtime call, see (*ssagen.state).minmax method.
+ return n.Type().IsString() || n.Type().IsFloat()
+
+ case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OLINKSYMOFFSET, ir.OMETHEXPR,
+ ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOMPLEX, ir.OMAKEFACE,
+ ir.OADDR, ir.OBITNOT, ir.ONOT, ir.OPLUS,
+ ir.OCAP, ir.OIMAG, ir.OLEN, ir.OREAL,
+ ir.OCONVNOP, ir.ODOT,
+ ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.OSPTR,
+ ir.OBYTES2STRTMP, ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP, ir.OSLICEHEADER, ir.OSTRINGHEADER:
+ // ok: operations that don't require function calls.
+ // Expand as needed.
+ }
+
+ return false
+ })
+}
+
+// itabType loads the _type field from a runtime.itab struct.
+func itabType(itab ir.Node) ir.Node {
+ if itabTypeField == nil {
+ // runtime.itab's _type field
+ itabTypeField = runtimeField("_type", int64(types.PtrSize), types.NewPtr(types.Types[types.TUINT8]))
+ }
+ return boundedDotPtr(base.Pos, itab, itabTypeField)
+}
+
+var itabTypeField *types.Field
+
+// boundedDotPtr returns a selector expression representing ptr.field
+// and omits nil-pointer checks for ptr.
+func boundedDotPtr(pos src.XPos, ptr ir.Node, field *types.Field) *ir.SelectorExpr {
+ sel := ir.NewSelectorExpr(pos, ir.ODOTPTR, ptr, field.Sym)
+ sel.Selection = field
+ sel.SetType(field.Type)
+ sel.SetTypecheck(1)
+ sel.SetBounded(true) // guaranteed not to fault
+ return sel
+}
+
+func runtimeField(name string, offset int64, typ *types.Type) *types.Field {
+ f := types.NewField(src.NoXPos, ir.Pkgs.Runtime.Lookup(name), typ)
+ f.Offset = offset
+ return f
+}
+
+// ifaceData loads the data field from an interface.
+// The concrete type must be known to have type t.
+// It follows the pointer if !IsDirectIface(t).
+func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node {
+ if t.IsInterface() {
+ base.Fatalf("ifaceData interface: %v", t)
+ }
+ ptr := ir.NewUnaryExpr(pos, ir.OIDATA, n)
+ if types.IsDirectIface(t) {
+ ptr.SetType(t)
+ ptr.SetTypecheck(1)
+ return ptr
+ }
+ ptr.SetType(types.NewPtr(t))
+ ptr.SetTypecheck(1)
+ ind := ir.NewStarExpr(pos, ptr)
+ ind.SetType(t)
+ ind.SetTypecheck(1)
+ ind.SetBounded(true)
+ return ind
+}
diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go
new file mode 100644
index 0000000..85f34a7
--- /dev/null
+++ b/src/cmd/compile/internal/wasm/ssa.go
@@ -0,0 +1,623 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package wasm
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/wasm"
+ "internal/buildcfg"
+)
+
+/*
+
+ Wasm implementation
+ -------------------
+
+ Wasm is a strange Go port because the machine isn't
+ a register-based machine, threads are different, code paths
+ are different, etc. We outline those differences here.
+
+ See the design doc for some additional info on this topic.
+ https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4/edit#heading=h.mjo1bish3xni
+
+ PCs:
+
+ Wasm doesn't have PCs in the normal sense that you can jump
+ to or call to. Instead, we simulate these PCs using our own construct.
+
+ A PC in the Wasm implementation is the combination of a function
+ ID and a block ID within that function. The function ID is an index
+ into a function table which transfers control to the start of the
+ function in question, and the block ID is a sequential integer
+ indicating where in the function we are.
+
+ Every function starts with a branch table which transfers control
+ to the place in the function indicated by the block ID. The block
+ ID is provided to the function as the sole Wasm argument.
+
+ Block IDs do not encode every possible PC. They only encode places
+ in the function where it might be suspended. Typically these places
+ are call sites.
+
+ Sometimes we encode the function ID and block ID separately. When
+ recorded together as a single integer, we use the value F<<16+B.
+
+ Threads:
+
+ Wasm doesn't (yet) have threads. We have to simulate threads by
+ keeping goroutine stacks in linear memory and unwinding
+ the Wasm stack each time we want to switch goroutines.
+
+ To support unwinding a stack, each function call returns on the Wasm
+ stack a boolean that tells the function whether it should return
+ immediately or not. When returning immediately, a return address
+ is left on the top of the Go stack indicating where the goroutine
+ should be resumed.
+
+ Stack pointer:
+
+ There is a single global stack pointer which records the stack pointer
+ used by the currently active goroutine. This is just an address in
+ linear memory where the Go runtime is maintaining the stack for that
+ goroutine.
+
+ Functions cache the global stack pointer in a local variable for
+ faster access, but any changes must be spilled to the global variable
+ before any call and restored from the global variable after any call.
+
+ Calling convention:
+
+ All Go arguments and return values are passed on the Go stack, not
+ the wasm stack. In addition, return addresses are pushed on the
+ Go stack at every call point. Return addresses are not used during
+ normal execution, they are used only when resuming goroutines.
+ (So they are not really a "return address", they are a "resume address".)
+
+ All Go functions have the Wasm type (i32)->i32. The argument
+ is the block ID and the return value is the exit immediately flag.
+
+ Callsite:
+ - write arguments to the Go stack (starting at SP+0)
+ - push return address to Go stack (8 bytes)
+ - write local SP to global SP
+ - push 0 (type i32) to Wasm stack
+ - issue Call
+ - restore local SP from global SP
+ - pop int32 from top of Wasm stack. If nonzero, exit function immediately.
+ - use results from Go stack (starting at SP+sizeof(args))
+ - note that the callee will have popped the return address
+
+ Prologue:
+ - initialize local SP from global SP
+ - jump to the location indicated by the block ID argument
+ (which appears in local variable 0)
+ - at block 0
+ - check for Go stack overflow, call morestack if needed
+ - subtract frame size from SP
+ - note that arguments now start at SP+framesize+8
+
+ Normal epilogue:
+ - pop frame from Go stack
+ - pop return address from Go stack
+ - push 0 (type i32) on the Wasm stack
+ - return
+ Exit immediately epilogue:
+ - push 1 (type i32) on the Wasm stack
+ - return
+ - note that the return address and stack frame are left on the Go stack
+
+ The main loop that executes goroutines is wasm_pc_f_loop, in
+ runtime/rt0_js_wasm.s. It grabs the saved return address from
+ the top of the Go stack (actually SP-8?), splits it up into F
+ and B parts, then calls F with its Wasm argument set to B.
+
+ Note that when resuming a goroutine, only the most recent function
+ invocation of that goroutine appears on the Wasm stack. When that
+ Wasm function returns normally, the next most recent frame will
+ then be started up by wasm_pc_f_loop.
+
+ Global 0 is SP (stack pointer)
+ Global 1 is CTXT (closure pointer)
+ Global 2 is GP (goroutine pointer)
+*/
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &wasm.Linkwasm
+ arch.REGSP = wasm.REG_SP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.ZeroRange = zeroRange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = ssaMarkMoves
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
+
+func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if cnt%8 != 0 {
+ base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
+ }
+
+ for i := int64(0); i < cnt; i += 8 {
+ p = pp.Append(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0)
+ p = pp.Append(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0)
+ p = pp.Append(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ return pp.Prog(wasm.ANop)
+}
+
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if next != b.Succs[0].Block() {
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+
+ case ssa.BlockIf:
+ switch next {
+ case b.Succs[0].Block():
+ // if false, jump to b.Succs[1]
+ getValue32(s, b.Controls[0])
+ s.Prog(wasm.AI32Eqz)
+ s.Prog(wasm.AIf)
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ s.Prog(wasm.AEnd)
+ case b.Succs[1].Block():
+ // if true, jump to b.Succs[0]
+ getValue32(s, b.Controls[0])
+ s.Prog(wasm.AIf)
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ s.Prog(wasm.AEnd)
+ default:
+ // if true, jump to b.Succs[0], else jump to b.Succs[1]
+ getValue32(s, b.Controls[0])
+ s.Prog(wasm.AIf)
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ s.Prog(wasm.AEnd)
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ }
+
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+
+ case ssa.BlockExit, ssa.BlockRetJmp:
+
+ case ssa.BlockDefer:
+ p := s.Prog(wasm.AGet)
+ p.From = obj.Addr{Type: obj.TYPE_REG, Reg: wasm.REG_RET0}
+ s.Prog(wasm.AI64Eqz)
+ s.Prog(wasm.AI32Eqz)
+ s.Prog(wasm.AIf)
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ s.Prog(wasm.AEnd)
+ if next != b.Succs[0].Block() {
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+
+ default:
+ panic("unexpected block")
+ }
+
+ // Entry point for the next block. Used by the JMP in goToBlock.
+ s.Prog(wasm.ARESUMEPOINT)
+
+ if s.OnWasmStackSkipped != 0 {
+ panic("wasm: bad stack")
+ }
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall, ssa.OpWasmLoweredTailCall:
+ s.PrepareCall(v)
+ if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == ir.Syms.Deferreturn {
+ // The runtime needs to inject jumps to
+ // deferreturn calls using the address in
+ // _func.deferreturn. Hence, the call to
+ // deferreturn must itself be a resumption
+ // point so it gets a target PC.
+ s.Prog(wasm.ARESUMEPOINT)
+ }
+ if v.Op == ssa.OpWasmLoweredClosureCall {
+ getValue64(s, v.Args[1])
+ setReg(s, wasm.REG_CTXT)
+ }
+ if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn != nil {
+ sym := call.Fn
+ p := s.Prog(obj.ACALL)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: sym}
+ p.Pos = v.Pos
+ if v.Op == ssa.OpWasmLoweredTailCall {
+ p.As = obj.ARET
+ }
+ } else {
+ getValue64(s, v.Args[0])
+ p := s.Prog(obj.ACALL)
+ p.To = obj.Addr{Type: obj.TYPE_NONE}
+ p.Pos = v.Pos
+ }
+
+ case ssa.OpWasmLoweredMove:
+ getValue32(s, v.Args[0])
+ getValue32(s, v.Args[1])
+ i32Const(s, int32(v.AuxInt))
+ s.Prog(wasm.AMemoryCopy)
+
+ case ssa.OpWasmLoweredZero:
+ getValue32(s, v.Args[0])
+ i32Const(s, 0)
+ i32Const(s, int32(v.AuxInt))
+ s.Prog(wasm.AMemoryFill)
+
+ case ssa.OpWasmLoweredNilCheck:
+ getValue64(s, v.Args[0])
+ s.Prog(wasm.AI64Eqz)
+ s.Prog(wasm.AIf)
+ p := s.Prog(wasm.ACALLNORESUME)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.SigPanic}
+ s.Prog(wasm.AEnd)
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+
+ case ssa.OpWasmLoweredWB:
+ p := s.Prog(wasm.ACall)
+ // AuxInt encodes how many buffer entries we need.
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.GCWriteBarrier[v.AuxInt-1]}
+ setReg(s, v.Reg0()) // move result from wasm stack to register local
+
+ case ssa.OpWasmI64Store8, ssa.OpWasmI64Store16, ssa.OpWasmI64Store32, ssa.OpWasmI64Store, ssa.OpWasmF32Store, ssa.OpWasmF64Store:
+ getValue32(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ p := s.Prog(v.Op.Asm())
+ p.To = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}
+
+ case ssa.OpStoreReg:
+ getReg(s, wasm.REG_SP)
+ getValue64(s, v.Args[0])
+ p := s.Prog(storeOp(v.Type))
+ ssagen.AddrAuto(&p.To, v)
+
+ case ssa.OpClobber, ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+
+ default:
+ if v.Type.IsMemory() {
+ return
+ }
+ if v.OnWasmStack {
+ s.OnWasmStackSkipped++
+ // If a Value is marked OnWasmStack, we don't generate the value and store it to a register now.
+ // Instead, we delay the generation to when the value is used and then directly generate it on the WebAssembly stack.
+ return
+ }
+ ssaGenValueOnStack(s, v, true)
+ if s.OnWasmStackSkipped != 0 {
+ panic("wasm: bad stack")
+ }
+ setReg(s, v.Reg())
+ }
+}
+
+func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) {
+ switch v.Op {
+ case ssa.OpWasmLoweredGetClosurePtr:
+ getReg(s, wasm.REG_CTXT)
+
+ case ssa.OpWasmLoweredGetCallerPC:
+ p := s.Prog(wasm.AI64Load)
+ // Caller PC is stored 8 bytes below first parameter.
+ p.From = obj.Addr{
+ Type: obj.TYPE_MEM,
+ Name: obj.NAME_PARAM,
+ Offset: -8,
+ }
+
+ case ssa.OpWasmLoweredGetCallerSP:
+ p := s.Prog(wasm.AGet)
+ // Caller SP is the address of the first parameter.
+ p.From = obj.Addr{
+ Type: obj.TYPE_ADDR,
+ Name: obj.NAME_PARAM,
+ Reg: wasm.REG_SP,
+ Offset: 0,
+ }
+
+ case ssa.OpWasmLoweredAddr:
+ if v.Aux == nil { // address of off(SP), no symbol
+ getValue64(s, v.Args[0])
+ i64Const(s, v.AuxInt)
+ s.Prog(wasm.AI64Add)
+ break
+ }
+ p := s.Prog(wasm.AGet)
+ p.From.Type = obj.TYPE_ADDR
+ switch v.Aux.(type) {
+ case *obj.LSym:
+ ssagen.AddAux(&p.From, v)
+ case *ir.Name:
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ default:
+ panic("wasm: bad LoweredAddr")
+ }
+
+ case ssa.OpWasmLoweredConvert:
+ getValue64(s, v.Args[0])
+
+ case ssa.OpWasmSelect:
+ getValue64(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ getValue32(s, v.Args[2])
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpWasmI64AddConst:
+ getValue64(s, v.Args[0])
+ i64Const(s, v.AuxInt)
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpWasmI64Const:
+ i64Const(s, v.AuxInt)
+
+ case ssa.OpWasmF32Const:
+ f32Const(s, v.AuxFloat())
+
+ case ssa.OpWasmF64Const:
+ f64Const(s, v.AuxFloat())
+
+ case ssa.OpWasmI64Load8U, ssa.OpWasmI64Load8S, ssa.OpWasmI64Load16U, ssa.OpWasmI64Load16S, ssa.OpWasmI64Load32U, ssa.OpWasmI64Load32S, ssa.OpWasmI64Load, ssa.OpWasmF32Load, ssa.OpWasmF64Load:
+ getValue32(s, v.Args[0])
+ p := s.Prog(v.Op.Asm())
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}
+
+ case ssa.OpWasmI64Eqz:
+ getValue64(s, v.Args[0])
+ s.Prog(v.Op.Asm())
+ if extend {
+ s.Prog(wasm.AI64ExtendI32U)
+ }
+
+ case ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU,
+ ssa.OpWasmF32Eq, ssa.OpWasmF32Ne, ssa.OpWasmF32Lt, ssa.OpWasmF32Gt, ssa.OpWasmF32Le, ssa.OpWasmF32Ge,
+ ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge:
+ getValue64(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ s.Prog(v.Op.Asm())
+ if extend {
+ s.Prog(wasm.AI64ExtendI32U)
+ }
+
+ case ssa.OpWasmI64Add, ssa.OpWasmI64Sub, ssa.OpWasmI64Mul, ssa.OpWasmI64DivU, ssa.OpWasmI64RemS, ssa.OpWasmI64RemU, ssa.OpWasmI64And, ssa.OpWasmI64Or, ssa.OpWasmI64Xor, ssa.OpWasmI64Shl, ssa.OpWasmI64ShrS, ssa.OpWasmI64ShrU, ssa.OpWasmI64Rotl,
+ ssa.OpWasmF32Add, ssa.OpWasmF32Sub, ssa.OpWasmF32Mul, ssa.OpWasmF32Div, ssa.OpWasmF32Copysign,
+ ssa.OpWasmF64Add, ssa.OpWasmF64Sub, ssa.OpWasmF64Mul, ssa.OpWasmF64Div, ssa.OpWasmF64Copysign:
+ getValue64(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpWasmI32Rotl:
+ getValue32(s, v.Args[0])
+ getValue32(s, v.Args[1])
+ s.Prog(wasm.AI32Rotl)
+ s.Prog(wasm.AI64ExtendI32U)
+
+ case ssa.OpWasmI64DivS:
+ getValue64(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ if v.Type.Size() == 8 {
+ // Division of int64 needs helper function wasmDiv to handle the MinInt64 / -1 case.
+ p := s.Prog(wasm.ACall)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmDiv}
+ break
+ }
+ s.Prog(wasm.AI64DivS)
+
+ case ssa.OpWasmI64TruncSatF32S, ssa.OpWasmI64TruncSatF64S:
+ getValue64(s, v.Args[0])
+ if buildcfg.GOWASM.SatConv {
+ s.Prog(v.Op.Asm())
+ } else {
+ if v.Op == ssa.OpWasmI64TruncSatF32S {
+ s.Prog(wasm.AF64PromoteF32)
+ }
+ p := s.Prog(wasm.ACall)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncS}
+ }
+
+ case ssa.OpWasmI64TruncSatF32U, ssa.OpWasmI64TruncSatF64U:
+ getValue64(s, v.Args[0])
+ if buildcfg.GOWASM.SatConv {
+ s.Prog(v.Op.Asm())
+ } else {
+ if v.Op == ssa.OpWasmI64TruncSatF32U {
+ s.Prog(wasm.AF64PromoteF32)
+ }
+ p := s.Prog(wasm.ACall)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncU}
+ }
+
+ case ssa.OpWasmF32DemoteF64:
+ getValue64(s, v.Args[0])
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpWasmF64PromoteF32:
+ getValue64(s, v.Args[0])
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpWasmF32ConvertI64S, ssa.OpWasmF32ConvertI64U,
+ ssa.OpWasmF64ConvertI64S, ssa.OpWasmF64ConvertI64U,
+ ssa.OpWasmI64Extend8S, ssa.OpWasmI64Extend16S, ssa.OpWasmI64Extend32S,
+ ssa.OpWasmF32Neg, ssa.OpWasmF32Sqrt, ssa.OpWasmF32Trunc, ssa.OpWasmF32Ceil, ssa.OpWasmF32Floor, ssa.OpWasmF32Nearest, ssa.OpWasmF32Abs,
+ ssa.OpWasmF64Neg, ssa.OpWasmF64Sqrt, ssa.OpWasmF64Trunc, ssa.OpWasmF64Ceil, ssa.OpWasmF64Floor, ssa.OpWasmF64Nearest, ssa.OpWasmF64Abs,
+ ssa.OpWasmI64Ctz, ssa.OpWasmI64Clz, ssa.OpWasmI64Popcnt:
+ getValue64(s, v.Args[0])
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpLoadReg:
+ p := s.Prog(loadOp(v.Type))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+
+ case ssa.OpCopy:
+ getValue64(s, v.Args[0])
+
+ default:
+ v.Fatalf("unexpected op: %s", v.Op)
+
+ }
+}
+
+func isCmp(v *ssa.Value) bool {
+ switch v.Op {
+ case ssa.OpWasmI64Eqz, ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU,
+ ssa.OpWasmF32Eq, ssa.OpWasmF32Ne, ssa.OpWasmF32Lt, ssa.OpWasmF32Gt, ssa.OpWasmF32Le, ssa.OpWasmF32Ge,
+ ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge:
+ return true
+ default:
+ return false
+ }
+}
+
+func getValue32(s *ssagen.State, v *ssa.Value) {
+ if v.OnWasmStack {
+ s.OnWasmStackSkipped--
+ ssaGenValueOnStack(s, v, false)
+ if !isCmp(v) {
+ s.Prog(wasm.AI32WrapI64)
+ }
+ return
+ }
+
+ reg := v.Reg()
+ getReg(s, reg)
+ if reg != wasm.REG_SP {
+ s.Prog(wasm.AI32WrapI64)
+ }
+}
+
+func getValue64(s *ssagen.State, v *ssa.Value) {
+ if v.OnWasmStack {
+ s.OnWasmStackSkipped--
+ ssaGenValueOnStack(s, v, true)
+ return
+ }
+
+ reg := v.Reg()
+ getReg(s, reg)
+ if reg == wasm.REG_SP {
+ s.Prog(wasm.AI64ExtendI32U)
+ }
+}
+
+func i32Const(s *ssagen.State, val int32) {
+ p := s.Prog(wasm.AI32Const)
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(val)}
+}
+
+func i64Const(s *ssagen.State, val int64) {
+ p := s.Prog(wasm.AI64Const)
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: val}
+}
+
+func f32Const(s *ssagen.State, val float64) {
+ p := s.Prog(wasm.AF32Const)
+ p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
+}
+
+func f64Const(s *ssagen.State, val float64) {
+ p := s.Prog(wasm.AF64Const)
+ p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
+}
+
+func getReg(s *ssagen.State, reg int16) {
+ p := s.Prog(wasm.AGet)
+ p.From = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
+}
+
+func setReg(s *ssagen.State, reg int16) {
+ p := s.Prog(wasm.ASet)
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
+}
+
+func loadOp(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return wasm.AF32Load
+ case 8:
+ return wasm.AF64Load
+ default:
+ panic("bad load type")
+ }
+ }
+
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return wasm.AI64Load8S
+ }
+ return wasm.AI64Load8U
+ case 2:
+ if t.IsSigned() {
+ return wasm.AI64Load16S
+ }
+ return wasm.AI64Load16U
+ case 4:
+ if t.IsSigned() {
+ return wasm.AI64Load32S
+ }
+ return wasm.AI64Load32U
+ case 8:
+ return wasm.AI64Load
+ default:
+ panic("bad load type")
+ }
+}
+
+func storeOp(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return wasm.AF32Store
+ case 8:
+ return wasm.AF64Store
+ default:
+ panic("bad store type")
+ }
+ }
+
+ switch t.Size() {
+ case 1:
+ return wasm.AI64Store8
+ case 2:
+ return wasm.AI64Store16
+ case 4:
+ return wasm.AI64Store32
+ case 8:
+ return wasm.AI64Store
+ default:
+ panic("bad store type")
+ }
+}
diff --git a/src/cmd/compile/internal/x86/galign.go b/src/cmd/compile/internal/x86/galign.go
new file mode 100644
index 0000000..5565bd3
--- /dev/null
+++ b/src/cmd/compile/internal/x86/galign.go
@@ -0,0 +1,39 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/x86"
+ "fmt"
+ "internal/buildcfg"
+ "os"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &x86.Link386
+ arch.REGSP = x86.REGSP
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+ arch.MAXWIDTH = (1 << 32) - 1
+ switch v := buildcfg.GO386; v {
+ case "sse2":
+ case "softfloat":
+ arch.SoftFloat = true
+ case "387":
+ fmt.Fprintf(os.Stderr, "unsupported setting GO386=387. Consider using GO386=softfloat instead.\n")
+ base.Exit(1)
+ default:
+ fmt.Fprintf(os.Stderr, "unsupported setting GO386=%s\n", v)
+ base.Exit(1)
+
+ }
+
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = ssaMarkMoves
+}
diff --git a/src/cmd/compile/internal/x86/ggen.go b/src/cmd/compile/internal/x86/ggen.go
new file mode 100644
index 0000000..3ca4797
--- /dev/null
+++ b/src/cmd/compile/internal/x86/ggen.go
@@ -0,0 +1,50 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if *ax == 0 {
+ p = pp.Append(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ *ax = 1
+ }
+
+ if cnt <= int64(4*types.RegSize) {
+ for i := int64(0); i < cnt; i += int64(types.RegSize) {
+ p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i)
+ }
+ } else if cnt <= int64(128*types.RegSize) {
+ p = pp.Append(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(types.RegSize)))
+ p.To.Sym = ir.Syms.Duffzero
+ } else {
+ p = pp.Append(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
+ p = pp.Append(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Append(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ // See comment in ../amd64/ggen.go.
+ p := pp.Prog(x86.AXCHGL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+ return p
+}
diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go
new file mode 100644
index 0000000..42ec44a
--- /dev/null
+++ b/src/cmd/compile/internal/x86/ssa.go
@@ -0,0 +1,958 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+import (
+ "fmt"
+ "math"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+
+// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
+ flive := b.FlagsLiveAtEnd
+ for _, c := range b.ControlValues() {
+ flive = c.Type.IsFlags() || flive
+ }
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if flive && v.Op == ssa.Op386MOVLconst {
+ // The "mark" is any non-nil Aux value.
+ v.Aux = ssa.AuxMark
+ }
+ if v.Type.IsFlags() {
+ flive = false
+ }
+ for _, a := range v.Args {
+ if a.Type.IsFlags() {
+ flive = true
+ }
+ }
+ }
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ // Avoid partial register write
+ if !t.IsFloat() {
+ switch t.Size() {
+ case 1:
+ return x86.AMOVBLZX
+ case 2:
+ return x86.AMOVWLZX
+ }
+ }
+ // Otherwise, there's no difference between load and store opcodes.
+ return storeByType(t)
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ width := t.Size()
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return x86.AMOVSS
+ case 8:
+ return x86.AMOVSD
+ }
+ } else {
+ switch width {
+ case 1:
+ return x86.AMOVB
+ case 2:
+ return x86.AMOVW
+ case 4:
+ return x86.AMOVL
+ }
+ }
+ panic("bad store type")
+}
+
+// moveByType returns the reg->reg move instruction of the given type.
+func moveByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return x86.AMOVSS
+ case 8:
+ return x86.AMOVSD
+ default:
+ panic(fmt.Sprintf("bad float register width %d:%s", t.Size(), t))
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ // Avoids partial register write
+ return x86.AMOVL
+ case 2:
+ return x86.AMOVL
+ case 4:
+ return x86.AMOVL
+ default:
+ panic(fmt.Sprintf("bad int register width %d:%s", t.Size(), t))
+ }
+ }
+}
+
+// opregreg emits instructions for
+//
+// dest := dest(To) op src(From)
+//
+// and also returns the created obj.Prog so it
+// may be further adjusted (offset, scale, etc).
+func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dest
+ p.From.Reg = src
+ return p
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.Op386ADDL:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ switch {
+ case r == r1:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case r == r2:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ default:
+ p := s.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r1
+ p.From.Scale = 1
+ p.From.Index = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+
+ // 2-address opcode arithmetic
+ case ssa.Op386SUBL,
+ ssa.Op386MULL,
+ ssa.Op386ANDL,
+ ssa.Op386ORL,
+ ssa.Op386XORL,
+ ssa.Op386SHLL,
+ ssa.Op386SHRL, ssa.Op386SHRW, ssa.Op386SHRB,
+ ssa.Op386SARL, ssa.Op386SARW, ssa.Op386SARB,
+ ssa.Op386ROLL, ssa.Op386ROLW, ssa.Op386ROLB,
+ ssa.Op386ADDSS, ssa.Op386ADDSD, ssa.Op386SUBSS, ssa.Op386SUBSD,
+ ssa.Op386MULSS, ssa.Op386MULSD, ssa.Op386DIVSS, ssa.Op386DIVSD,
+ ssa.Op386PXOR,
+ ssa.Op386ADCL,
+ ssa.Op386SBBL:
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
+
+ case ssa.Op386ADDLcarry, ssa.Op386SUBLcarry:
+ // output 0 is carry/borrow, output 1 is the low 32 bits.
+ opregreg(s, v.Op.Asm(), v.Reg0(), v.Args[1].Reg())
+
+ case ssa.Op386ADDLconstcarry, ssa.Op386SUBLconstcarry:
+ // output 0 is carry/borrow, output 1 is the low 32 bits.
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ case ssa.Op386DIVL, ssa.Op386DIVW,
+ ssa.Op386DIVLU, ssa.Op386DIVWU,
+ ssa.Op386MODL, ssa.Op386MODW,
+ ssa.Op386MODLU, ssa.Op386MODWU:
+
+ // Arg[0] is already in AX as it's the only register we allow
+ // and AX is the only output
+ x := v.Args[1].Reg()
+
+ // CPU faults upon signed overflow, which occurs when most
+ // negative int is divided by -1.
+ var j *obj.Prog
+ if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW ||
+ v.Op == ssa.Op386MODL || v.Op == ssa.Op386MODW {
+
+ if ssa.DivisionNeedsFixUp(v) {
+ var c *obj.Prog
+ switch v.Op {
+ case ssa.Op386DIVL, ssa.Op386MODL:
+ c = s.Prog(x86.ACMPL)
+ j = s.Prog(x86.AJEQ)
+
+ case ssa.Op386DIVW, ssa.Op386MODW:
+ c = s.Prog(x86.ACMPW)
+ j = s.Prog(x86.AJEQ)
+ }
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = x
+ c.To.Type = obj.TYPE_CONST
+ c.To.Offset = -1
+
+ j.To.Type = obj.TYPE_BRANCH
+ }
+ // sign extend the dividend
+ switch v.Op {
+ case ssa.Op386DIVL, ssa.Op386MODL:
+ s.Prog(x86.ACDQ)
+ case ssa.Op386DIVW, ssa.Op386MODW:
+ s.Prog(x86.ACWD)
+ }
+ }
+
+ // for unsigned ints, we sign extend by setting DX = 0
+ // signed ints were sign extended above
+ if v.Op == ssa.Op386DIVLU || v.Op == ssa.Op386MODLU ||
+ v.Op == ssa.Op386DIVWU || v.Op == ssa.Op386MODWU {
+ c := s.Prog(x86.AXORL)
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = x86.REG_DX
+ c.To.Type = obj.TYPE_REG
+ c.To.Reg = x86.REG_DX
+ }
+
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+
+ // signed division, rest of the check for -1 case
+ if j != nil {
+ j2 := s.Prog(obj.AJMP)
+ j2.To.Type = obj.TYPE_BRANCH
+
+ var n *obj.Prog
+ if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW {
+ // n * -1 = -n
+ n = s.Prog(x86.ANEGL)
+ n.To.Type = obj.TYPE_REG
+ n.To.Reg = x86.REG_AX
+ } else {
+ // n % -1 == 0
+ n = s.Prog(x86.AXORL)
+ n.From.Type = obj.TYPE_REG
+ n.From.Reg = x86.REG_DX
+ n.To.Type = obj.TYPE_REG
+ n.To.Reg = x86.REG_DX
+ }
+
+ j.To.SetTarget(n)
+ j2.To.SetTarget(s.Pc())
+ }
+
+ case ssa.Op386HMULL, ssa.Op386HMULLU:
+ // the frontend rewrites constant division by 8/16/32 bit integers into
+ // HMUL by a constant
+ // SSA rewrites generate the 64 bit versions
+
+ // Arg[0] is already in AX as it's the only register we allow
+ // and DX is the only output we care about (the high bits)
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ // IMULB puts the high portion in AH instead of DL,
+ // so move it to DL for consistency
+ if v.Type.Size() == 1 {
+ m := s.Prog(x86.AMOVB)
+ m.From.Type = obj.TYPE_REG
+ m.From.Reg = x86.REG_AH
+ m.To.Type = obj.TYPE_REG
+ m.To.Reg = x86.REG_DX
+ }
+
+ case ssa.Op386MULLU:
+ // Arg[0] is already in AX as it's the only register we allow
+ // results lo in AX
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ case ssa.Op386MULLQU:
+ // AX * args[1], high 32 bits in DX (result[0]), low 32 bits in AX (result[1]).
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ case ssa.Op386AVGLU:
+ // compute (x+y)/2 unsigned.
+ // Do a 32-bit add, the overflow goes into the carry.
+ // Shift right once and pull the carry back into the 31st bit.
+ p := s.Prog(x86.AADDL)
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p.From.Reg = v.Args[1].Reg()
+ p = s.Prog(x86.ARCRL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.Op386ADDLconst:
+ r := v.Reg()
+ a := v.Args[0].Reg()
+ if r == a {
+ if v.AuxInt == 1 {
+ p := s.Prog(x86.AINCL)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ }
+ if v.AuxInt == -1 {
+ p := s.Prog(x86.ADECL)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ }
+ p := s.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = a
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.Op386MULLconst:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.AddRestSourceReg(v.Args[0].Reg())
+
+ case ssa.Op386SUBLconst,
+ ssa.Op386ADCLconst,
+ ssa.Op386SBBLconst,
+ ssa.Op386ANDLconst,
+ ssa.Op386ORLconst,
+ ssa.Op386XORLconst,
+ ssa.Op386SHLLconst,
+ ssa.Op386SHRLconst, ssa.Op386SHRWconst, ssa.Op386SHRBconst,
+ ssa.Op386SARLconst, ssa.Op386SARWconst, ssa.Op386SARBconst,
+ ssa.Op386ROLLconst, ssa.Op386ROLWconst, ssa.Op386ROLBconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386SBBLcarrymask:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.Op386LEAL1, ssa.Op386LEAL2, ssa.Op386LEAL4, ssa.Op386LEAL8:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ p := s.Prog(x86.ALEAL)
+ switch v.Op {
+ case ssa.Op386LEAL1:
+ p.From.Scale = 1
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ case ssa.Op386LEAL2:
+ p.From.Scale = 2
+ case ssa.Op386LEAL4:
+ p.From.Scale = 4
+ case ssa.Op386LEAL8:
+ p.From.Scale = 8
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r
+ p.From.Index = i
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386LEAL:
+ p := s.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386CMPL, ssa.Op386CMPW, ssa.Op386CMPB,
+ ssa.Op386TESTL, ssa.Op386TESTW, ssa.Op386TESTB:
+ opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
+ case ssa.Op386UCOMISS, ssa.Op386UCOMISD:
+ // Go assembler has swapped operands for UCOMISx relative to CMP,
+ // must account for that right here.
+ opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
+ case ssa.Op386CMPLconst, ssa.Op386CMPWconst, ssa.Op386CMPBconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+ case ssa.Op386TESTLconst, ssa.Op386TESTWconst, ssa.Op386TESTBconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[0].Reg()
+ case ssa.Op386CMPLload, ssa.Op386CMPWload, ssa.Op386CMPBload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[1].Reg()
+ case ssa.Op386CMPLconstload, ssa.Op386CMPWconstload, ssa.Op386CMPBconstload:
+ sc := v.AuxValAndOff()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.From, v, sc.Off64())
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = sc.Val64()
+ case ssa.Op386MOVLconst:
+ x := v.Reg()
+
+ // If flags aren't live (indicated by v.Aux == nil),
+ // then we can rewrite MOV $0, AX into XOR AX, AX.
+ if v.AuxInt == 0 && v.Aux == nil {
+ p := s.Prog(x86.AXORL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ break
+ }
+
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst:
+ x := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.Op386MOVSSconst1, ssa.Op386MOVSDconst1:
+ p := s.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ f := math.Float64frombits(uint64(v.AuxInt))
+ if v.Op == ssa.Op386MOVSDconst1 {
+ p.From.Sym = base.Ctxt.Float64Sym(f)
+ } else {
+ p.From.Sym = base.Ctxt.Float32Sym(float32(f))
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVLload, ssa.Op386MOVWload, ssa.Op386MOVBload, ssa.Op386MOVBLSXload, ssa.Op386MOVWLSXload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1,
+ ssa.Op386MOVSDloadidx8, ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4, ssa.Op386MOVWloadidx2:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ switch v.Op {
+ case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1:
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ p.From.Scale = 1
+ case ssa.Op386MOVSDloadidx8:
+ p.From.Scale = 8
+ case ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4:
+ p.From.Scale = 4
+ case ssa.Op386MOVWloadidx2:
+ p.From.Scale = 2
+ }
+ p.From.Reg = r
+ p.From.Index = i
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386ADDLloadidx4, ssa.Op386SUBLloadidx4, ssa.Op386MULLloadidx4,
+ ssa.Op386ANDLloadidx4, ssa.Op386ORLloadidx4, ssa.Op386XORLloadidx4:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ p.From.Index = v.Args[2].Reg()
+ p.From.Scale = 4
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386ADDLload, ssa.Op386SUBLload, ssa.Op386MULLload,
+ ssa.Op386ANDLload, ssa.Op386ORLload, ssa.Op386XORLload,
+ ssa.Op386ADDSDload, ssa.Op386ADDSSload, ssa.Op386SUBSDload, ssa.Op386SUBSSload,
+ ssa.Op386MULSDload, ssa.Op386MULSSload, ssa.Op386DIVSSload, ssa.Op386DIVSDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386MOVSSstore, ssa.Op386MOVSDstore, ssa.Op386MOVLstore, ssa.Op386MOVWstore, ssa.Op386MOVBstore,
+ ssa.Op386ADDLmodify, ssa.Op386SUBLmodify, ssa.Op386ANDLmodify, ssa.Op386ORLmodify, ssa.Op386XORLmodify:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.Op386ADDLconstmodify:
+ sc := v.AuxValAndOff()
+ val := sc.Val()
+ if val == 1 || val == -1 {
+ var p *obj.Prog
+ if val == 1 {
+ p = s.Prog(x86.AINCL)
+ } else {
+ p = s.Prog(x86.ADECL)
+ }
+ off := sc.Off64()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, off)
+ break
+ }
+ fallthrough
+ case ssa.Op386ANDLconstmodify, ssa.Op386ORLconstmodify, ssa.Op386XORLconstmodify:
+ sc := v.AuxValAndOff()
+ off := sc.Off64()
+ val := sc.Val64()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = val
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, off)
+ case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1,
+ ssa.Op386MOVSDstoreidx8, ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4, ssa.Op386MOVWstoreidx2,
+ ssa.Op386ADDLmodifyidx4, ssa.Op386SUBLmodifyidx4, ssa.Op386ANDLmodifyidx4, ssa.Op386ORLmodifyidx4, ssa.Op386XORLmodifyidx4:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_MEM
+ switch v.Op {
+ case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1:
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ p.To.Scale = 1
+ case ssa.Op386MOVSDstoreidx8:
+ p.To.Scale = 8
+ case ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4,
+ ssa.Op386ADDLmodifyidx4, ssa.Op386SUBLmodifyidx4, ssa.Op386ANDLmodifyidx4, ssa.Op386ORLmodifyidx4, ssa.Op386XORLmodifyidx4:
+ p.To.Scale = 4
+ case ssa.Op386MOVWstoreidx2:
+ p.To.Scale = 2
+ }
+ p.To.Reg = r
+ p.To.Index = i
+ ssagen.AddAux(&p.To, v)
+ case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val64()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, sc.Off64())
+ case ssa.Op386ADDLconstmodifyidx4:
+ sc := v.AuxValAndOff()
+ val := sc.Val()
+ if val == 1 || val == -1 {
+ var p *obj.Prog
+ if val == 1 {
+ p = s.Prog(x86.AINCL)
+ } else {
+ p = s.Prog(x86.ADECL)
+ }
+ off := sc.Off64()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Scale = 4
+ p.To.Index = v.Args[1].Reg()
+ ssagen.AddAux2(&p.To, v, off)
+ break
+ }
+ fallthrough
+ case ssa.Op386MOVLstoreconstidx1, ssa.Op386MOVLstoreconstidx4, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVWstoreconstidx2, ssa.Op386MOVBstoreconstidx1,
+ ssa.Op386ANDLconstmodifyidx4, ssa.Op386ORLconstmodifyidx4, ssa.Op386XORLconstmodifyidx4:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val64()
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ switch v.Op {
+ case ssa.Op386MOVBstoreconstidx1, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVLstoreconstidx1:
+ p.To.Scale = 1
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ case ssa.Op386MOVWstoreconstidx2:
+ p.To.Scale = 2
+ case ssa.Op386MOVLstoreconstidx4,
+ ssa.Op386ADDLconstmodifyidx4, ssa.Op386ANDLconstmodifyidx4, ssa.Op386ORLconstmodifyidx4, ssa.Op386XORLconstmodifyidx4:
+ p.To.Scale = 4
+ }
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r
+ p.To.Index = i
+ ssagen.AddAux2(&p.To, v, sc.Off64())
+ case ssa.Op386MOVWLSX, ssa.Op386MOVBLSX, ssa.Op386MOVWLZX, ssa.Op386MOVBLZX,
+ ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD,
+ ssa.Op386CVTTSS2SL, ssa.Op386CVTTSD2SL,
+ ssa.Op386CVTSS2SD, ssa.Op386CVTSD2SS:
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
+ case ssa.Op386DUFFZERO:
+ p := s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = v.AuxInt
+ case ssa.Op386DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = ir.Syms.Duffcopy
+ p.To.Offset = v.AuxInt
+
+ case ssa.OpCopy: // TODO: use MOVLreg for reg->reg copies instead of OpCopy?
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x != y {
+ opregreg(s, moveByType(v.Type), y, x)
+ }
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.Op386LoweredGetClosurePtr:
+ // Closure pointer is DX.
+ ssagen.CheckLoweredGetClosurePtr(v)
+ case ssa.Op386LoweredGetG:
+ r := v.Reg()
+ // See the comments in cmd/internal/obj/x86/obj6.go
+ // near CanUse1InsnTLS for a detailed explanation of these instructions.
+ if x86.CanUse1InsnTLS(base.Ctxt) {
+ // MOVL (TLS), r
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = x86.REG_TLS
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ } else {
+ // MOVL TLS, r
+ // MOVL (r)(TLS*1), r
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_TLS
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ q := s.Prog(x86.AMOVL)
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = r
+ q.From.Index = x86.REG_TLS
+ q.From.Scale = 1
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = r
+ }
+
+ case ssa.Op386LoweredGetCallerPC:
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = -4 // PC is stored 4 bytes below first parameter.
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.Op386LoweredGetCallerSP:
+ // caller's SP is the address of the first arg
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.Arch.FixedFrameSize // 0 on 386, just to be consistent with other architectures
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.Op386LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ // AuxInt encodes how many buffer entries we need.
+ p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
+
+ case ssa.Op386LoweredPanicBoundsA, ssa.Op386LoweredPanicBoundsB, ssa.Op386LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(8) // space used in callee args area by assembly stubs
+
+ case ssa.Op386LoweredPanicExtendA, ssa.Op386LoweredPanicExtendB, ssa.Op386LoweredPanicExtendC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
+ s.UseArgs(12) // space used in callee args area by assembly stubs
+
+ case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLinter:
+ s.Call(v)
+ case ssa.Op386CALLtail:
+ s.TailCall(v)
+ case ssa.Op386NEGL,
+ ssa.Op386BSWAPL,
+ ssa.Op386NOTL:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386BSFL, ssa.Op386BSFW,
+ ssa.Op386BSRL, ssa.Op386BSRW,
+ ssa.Op386SQRTSS, ssa.Op386SQRTSD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386SETEQ, ssa.Op386SETNE,
+ ssa.Op386SETL, ssa.Op386SETLE,
+ ssa.Op386SETG, ssa.Op386SETGE,
+ ssa.Op386SETGF, ssa.Op386SETGEF,
+ ssa.Op386SETB, ssa.Op386SETBE,
+ ssa.Op386SETORD, ssa.Op386SETNAN,
+ ssa.Op386SETA, ssa.Op386SETAE,
+ ssa.Op386SETO:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.Op386SETNEF:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ q := s.Prog(x86.ASETPS)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = x86.REG_AX
+ opregreg(s, x86.AORL, v.Reg(), x86.REG_AX)
+
+ case ssa.Op386SETEQF:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ q := s.Prog(x86.ASETPC)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = x86.REG_AX
+ opregreg(s, x86.AANDL, v.Reg(), x86.REG_AX)
+
+ case ssa.Op386InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.Op386FlagEQ, ssa.Op386FlagLT_ULT, ssa.Op386FlagLT_UGT, ssa.Op386FlagGT_ULT, ssa.Op386FlagGT_UGT:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.Op386REPSTOSL:
+ s.Prog(x86.AREP)
+ s.Prog(x86.ASTOSL)
+ case ssa.Op386REPMOVSL:
+ s.Prog(x86.AREP)
+ s.Prog(x86.AMOVSL)
+ case ssa.Op386LoweredNilCheck:
+ // Issue a load which will fault if the input is nil.
+ // TODO: We currently use the 2-byte instruction TESTB AX, (reg).
+ // Should we use the 3-byte TESTB $0, (reg) instead? It is larger
+ // but it doesn't have false dependency on AX.
+ // Or maybe allocate an output register and use MOVL (reg),reg2 ?
+ // That trades clobbering flags for clobbering a register.
+ p := s.Prog(x86.ATESTB)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+ case ssa.Op386LoweredCtz32:
+ // BSFL in, out
+ p := s.Prog(x86.ABSFL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ // JNZ 2(PC)
+ p1 := s.Prog(x86.AJNE)
+ p1.To.Type = obj.TYPE_BRANCH
+
+ // MOVL $32, out
+ p2 := s.Prog(x86.AMOVL)
+ p2.From.Type = obj.TYPE_CONST
+ p2.From.Offset = 32
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = v.Reg()
+
+ // NOP (so the JNZ has somewhere to land)
+ nop := s.Prog(obj.ANOP)
+ p1.To.SetTarget(nop)
+
+ case ssa.OpClobber:
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0xdeaddead
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = x86.REG_SP
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = [...]struct {
+ asm, invasm obj.As
+}{
+ ssa.Block386EQ: {x86.AJEQ, x86.AJNE},
+ ssa.Block386NE: {x86.AJNE, x86.AJEQ},
+ ssa.Block386LT: {x86.AJLT, x86.AJGE},
+ ssa.Block386GE: {x86.AJGE, x86.AJLT},
+ ssa.Block386LE: {x86.AJLE, x86.AJGT},
+ ssa.Block386GT: {x86.AJGT, x86.AJLE},
+ ssa.Block386OS: {x86.AJOS, x86.AJOC},
+ ssa.Block386OC: {x86.AJOC, x86.AJOS},
+ ssa.Block386ULT: {x86.AJCS, x86.AJCC},
+ ssa.Block386UGE: {x86.AJCC, x86.AJCS},
+ ssa.Block386UGT: {x86.AJHI, x86.AJLS},
+ ssa.Block386ULE: {x86.AJLS, x86.AJHI},
+ ssa.Block386ORD: {x86.AJPC, x86.AJPS},
+ ssa.Block386NAN: {x86.AJPS, x86.AJPC},
+}
+
+var eqfJumps = [2][2]ssagen.IndexJump{
+ {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
+ {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
+}
+var nefJumps = [2][2]ssagen.IndexJump{
+ {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
+ {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockDefer:
+ // defer returns in rax:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(x86.ATESTL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+ p = s.Prog(x86.AJNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit, ssa.BlockRetJmp:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+
+ case ssa.Block386EQF:
+ s.CombJump(b, next, &eqfJumps)
+
+ case ssa.Block386NEF:
+ s.CombJump(b, next, &nefJumps)
+
+ case ssa.Block386EQ, ssa.Block386NE,
+ ssa.Block386LT, ssa.Block386GE,
+ ssa.Block386LE, ssa.Block386GT,
+ ssa.Block386OS, ssa.Block386OC,
+ ssa.Block386ULT, ssa.Block386UGT,
+ ssa.Block386ULE, ssa.Block386UGE:
+ jmp := blockJump[b.Kind]
+ switch next {
+ case b.Succs[0].Block():
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/main.go b/src/cmd/compile/main.go
new file mode 100644
index 0000000..7d38bea
--- /dev/null
+++ b/src/cmd/compile/main.go
@@ -0,0 +1,59 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/compile/internal/amd64"
+ "cmd/compile/internal/arm"
+ "cmd/compile/internal/arm64"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/loong64"
+ "cmd/compile/internal/mips"
+ "cmd/compile/internal/mips64"
+ "cmd/compile/internal/ppc64"
+ "cmd/compile/internal/riscv64"
+ "cmd/compile/internal/s390x"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/wasm"
+ "cmd/compile/internal/x86"
+ "fmt"
+ "internal/buildcfg"
+ "log"
+ "os"
+)
+
+var archInits = map[string]func(*ssagen.ArchInfo){
+ "386": x86.Init,
+ "amd64": amd64.Init,
+ "arm": arm.Init,
+ "arm64": arm64.Init,
+ "loong64": loong64.Init,
+ "mips": mips.Init,
+ "mipsle": mips.Init,
+ "mips64": mips64.Init,
+ "mips64le": mips64.Init,
+ "ppc64": ppc64.Init,
+ "ppc64le": ppc64.Init,
+ "riscv64": riscv64.Init,
+ "s390x": s390x.Init,
+ "wasm": wasm.Init,
+}
+
+func main() {
+ // disable timestamps for reproducible output
+ log.SetFlags(0)
+ log.SetPrefix("compile: ")
+
+ buildcfg.Check()
+ archInit, ok := archInits[buildcfg.GOARCH]
+ if !ok {
+ fmt.Fprintf(os.Stderr, "compile: unknown architecture %q\n", buildcfg.GOARCH)
+ os.Exit(2)
+ }
+
+ gc.Main(archInit)
+ base.Exit(0)
+}
diff --git a/src/cmd/compile/profile.sh b/src/cmd/compile/profile.sh
new file mode 100755
index 0000000..37d65d8
--- /dev/null
+++ b/src/cmd/compile/profile.sh
@@ -0,0 +1,21 @@
+# Copyright 2023 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# This script collects a CPU profile of the compiler
+# for building all targets in std and cmd, and puts
+# the profile at cmd/compile/default.pgo.
+
+dir=$(mktemp -d)
+cd $dir
+seed=$(date)
+
+for p in $(go list std cmd); do
+ h=$(echo $seed $p | md5sum | cut -d ' ' -f 1)
+ echo $p $h
+ go build -o /dev/null -gcflags=-cpuprofile=$PWD/prof.$h $p
+done
+
+go tool pprof -proto prof.* > $(go env GOROOT)/src/cmd/compile/default.pgo
+
+rm -r $dir